linux/drivers/mmc/core/core.c
<<
>>
Prefs
   1/*
   2 *  linux/drivers/mmc/core/core.c
   3 *
   4 *  Copyright (C) 2003-2004 Russell King, All Rights Reserved.
   5 *  SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
   6 *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
   7 *  MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 */
  13#include <linux/module.h>
  14#include <linux/init.h>
  15#include <linux/interrupt.h>
  16#include <linux/completion.h>
  17#include <linux/device.h>
  18#include <linux/delay.h>
  19#include <linux/pagemap.h>
  20#include <linux/err.h>
  21#include <linux/leds.h>
  22#include <linux/scatterlist.h>
  23#include <linux/log2.h>
  24#include <linux/regulator/consumer.h>
  25#include <linux/pm_runtime.h>
  26#include <linux/pm_wakeup.h>
  27#include <linux/suspend.h>
  28#include <linux/fault-inject.h>
  29#include <linux/random.h>
  30#include <linux/slab.h>
  31#include <linux/of.h>
  32
  33#include <linux/mmc/card.h>
  34#include <linux/mmc/host.h>
  35#include <linux/mmc/mmc.h>
  36#include <linux/mmc/sd.h>
  37#include <linux/mmc/slot-gpio.h>
  38
  39#define CREATE_TRACE_POINTS
  40#include <trace/events/mmc.h>
  41
  42#include "core.h"
  43#include "card.h"
  44#include "bus.h"
  45#include "host.h"
  46#include "sdio_bus.h"
  47#include "pwrseq.h"
  48
  49#include "mmc_ops.h"
  50#include "sd_ops.h"
  51#include "sdio_ops.h"
  52
  53/* If the device is not responding */
  54#define MMC_CORE_TIMEOUT_MS     (10 * 60 * 1000) /* 10 minute timeout */
  55
  56/* The max erase timeout, used when host->max_busy_timeout isn't specified */
  57#define MMC_ERASE_TIMEOUT_MS    (60 * 1000) /* 60 s */
  58
  59static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
  60
  61/*
  62 * Enabling software CRCs on the data blocks can be a significant (30%)
  63 * performance cost, and for other reasons may not always be desired.
  64 * So we allow it it to be disabled.
  65 */
  66bool use_spi_crc = 1;
  67module_param(use_spi_crc, bool, 0);
  68
  69static int mmc_schedule_delayed_work(struct delayed_work *work,
  70                                     unsigned long delay)
  71{
  72        /*
  73         * We use the system_freezable_wq, because of two reasons.
  74         * First, it allows several works (not the same work item) to be
  75         * executed simultaneously. Second, the queue becomes frozen when
  76         * userspace becomes frozen during system PM.
  77         */
  78        return queue_delayed_work(system_freezable_wq, work, delay);
  79}
  80
  81#ifdef CONFIG_FAIL_MMC_REQUEST
  82
  83/*
  84 * Internal function. Inject random data errors.
  85 * If mmc_data is NULL no errors are injected.
  86 */
  87static void mmc_should_fail_request(struct mmc_host *host,
  88                                    struct mmc_request *mrq)
  89{
  90        struct mmc_command *cmd = mrq->cmd;
  91        struct mmc_data *data = mrq->data;
  92        static const int data_errors[] = {
  93                -ETIMEDOUT,
  94                -EILSEQ,
  95                -EIO,
  96        };
  97
  98        if (!data)
  99                return;
 100
 101        if (cmd->error || data->error ||
 102            !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
 103                return;
 104
 105        data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
 106        data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
 107}
 108
 109#else /* CONFIG_FAIL_MMC_REQUEST */
 110
 111static inline void mmc_should_fail_request(struct mmc_host *host,
 112                                           struct mmc_request *mrq)
 113{
 114}
 115
 116#endif /* CONFIG_FAIL_MMC_REQUEST */
 117
 118static inline void mmc_complete_cmd(struct mmc_request *mrq)
 119{
 120        if (mrq->cap_cmd_during_tfr && !completion_done(&mrq->cmd_completion))
 121                complete_all(&mrq->cmd_completion);
 122}
 123
 124void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq)
 125{
 126        if (!mrq->cap_cmd_during_tfr)
 127                return;
 128
 129        mmc_complete_cmd(mrq);
 130
 131        pr_debug("%s: cmd done, tfr ongoing (CMD%u)\n",
 132                 mmc_hostname(host), mrq->cmd->opcode);
 133}
 134EXPORT_SYMBOL(mmc_command_done);
 135
 136/**
 137 *      mmc_request_done - finish processing an MMC request
 138 *      @host: MMC host which completed request
 139 *      @mrq: MMC request which request
 140 *
 141 *      MMC drivers should call this function when they have completed
 142 *      their processing of a request.
 143 */
 144void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
 145{
 146        struct mmc_command *cmd = mrq->cmd;
 147        int err = cmd->error;
 148
 149        /* Flag re-tuning needed on CRC errors */
 150        if ((cmd->opcode != MMC_SEND_TUNING_BLOCK &&
 151            cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
 152            (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
 153            (mrq->data && mrq->data->error == -EILSEQ) ||
 154            (mrq->stop && mrq->stop->error == -EILSEQ)))
 155                mmc_retune_needed(host);
 156
 157        if (err && cmd->retries && mmc_host_is_spi(host)) {
 158                if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
 159                        cmd->retries = 0;
 160        }
 161
 162        if (host->ongoing_mrq == mrq)
 163                host->ongoing_mrq = NULL;
 164
 165        mmc_complete_cmd(mrq);
 166
 167        trace_mmc_request_done(host, mrq);
 168
 169        /*
 170         * We list various conditions for the command to be considered
 171         * properly done:
 172         *
 173         * - There was no error, OK fine then
 174         * - We are not doing some kind of retry
 175         * - The card was removed (...so just complete everything no matter
 176         *   if there are errors or retries)
 177         */
 178        if (!err || !cmd->retries || mmc_card_removed(host->card)) {
 179                mmc_should_fail_request(host, mrq);
 180
 181                if (!host->ongoing_mrq)
 182                        led_trigger_event(host->led, LED_OFF);
 183
 184                if (mrq->sbc) {
 185                        pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
 186                                mmc_hostname(host), mrq->sbc->opcode,
 187                                mrq->sbc->error,
 188                                mrq->sbc->resp[0], mrq->sbc->resp[1],
 189                                mrq->sbc->resp[2], mrq->sbc->resp[3]);
 190                }
 191
 192                pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
 193                        mmc_hostname(host), cmd->opcode, err,
 194                        cmd->resp[0], cmd->resp[1],
 195                        cmd->resp[2], cmd->resp[3]);
 196
 197                if (mrq->data) {
 198                        pr_debug("%s:     %d bytes transferred: %d\n",
 199                                mmc_hostname(host),
 200                                mrq->data->bytes_xfered, mrq->data->error);
 201                }
 202
 203                if (mrq->stop) {
 204                        pr_debug("%s:     (CMD%u): %d: %08x %08x %08x %08x\n",
 205                                mmc_hostname(host), mrq->stop->opcode,
 206                                mrq->stop->error,
 207                                mrq->stop->resp[0], mrq->stop->resp[1],
 208                                mrq->stop->resp[2], mrq->stop->resp[3]);
 209                }
 210        }
 211        /*
 212         * Request starter must handle retries - see
 213         * mmc_wait_for_req_done().
 214         */
 215        if (mrq->done)
 216                mrq->done(mrq);
 217}
 218
 219EXPORT_SYMBOL(mmc_request_done);
 220
 221static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
 222{
 223        int err;
 224
 225        /* Assumes host controller has been runtime resumed by mmc_claim_host */
 226        err = mmc_retune(host);
 227        if (err) {
 228                mrq->cmd->error = err;
 229                mmc_request_done(host, mrq);
 230                return;
 231        }
 232
 233        /*
 234         * For sdio rw commands we must wait for card busy otherwise some
 235         * sdio devices won't work properly.
 236         * And bypass I/O abort, reset and bus suspend operations.
 237         */
 238        if (sdio_is_io_busy(mrq->cmd->opcode, mrq->cmd->arg) &&
 239            host->ops->card_busy) {
 240                int tries = 500; /* Wait aprox 500ms at maximum */
 241
 242                while (host->ops->card_busy(host) && --tries)
 243                        mmc_delay(1);
 244
 245                if (tries == 0) {
 246                        mrq->cmd->error = -EBUSY;
 247                        mmc_request_done(host, mrq);
 248                        return;
 249                }
 250        }
 251
 252        if (mrq->cap_cmd_during_tfr) {
 253                host->ongoing_mrq = mrq;
 254                /*
 255                 * Retry path could come through here without having waiting on
 256                 * cmd_completion, so ensure it is reinitialised.
 257                 */
 258                reinit_completion(&mrq->cmd_completion);
 259        }
 260
 261        trace_mmc_request_start(host, mrq);
 262
 263        if (host->cqe_on)
 264                host->cqe_ops->cqe_off(host);
 265
 266        host->ops->request(host, mrq);
 267}
 268
 269static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq,
 270                             bool cqe)
 271{
 272        if (mrq->sbc) {
 273                pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
 274                         mmc_hostname(host), mrq->sbc->opcode,
 275                         mrq->sbc->arg, mrq->sbc->flags);
 276        }
 277
 278        if (mrq->cmd) {
 279                pr_debug("%s: starting %sCMD%u arg %08x flags %08x\n",
 280                         mmc_hostname(host), cqe ? "CQE direct " : "",
 281                         mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags);
 282        } else if (cqe) {
 283                pr_debug("%s: starting CQE transfer for tag %d blkaddr %u\n",
 284                         mmc_hostname(host), mrq->tag, mrq->data->blk_addr);
 285        }
 286
 287        if (mrq->data) {
 288                pr_debug("%s:     blksz %d blocks %d flags %08x "
 289                        "tsac %d ms nsac %d\n",
 290                        mmc_hostname(host), mrq->data->blksz,
 291                        mrq->data->blocks, mrq->data->flags,
 292                        mrq->data->timeout_ns / 1000000,
 293                        mrq->data->timeout_clks);
 294        }
 295
 296        if (mrq->stop) {
 297                pr_debug("%s:     CMD%u arg %08x flags %08x\n",
 298                         mmc_hostname(host), mrq->stop->opcode,
 299                         mrq->stop->arg, mrq->stop->flags);
 300        }
 301}
 302
 303static int mmc_mrq_prep(struct mmc_host *host, struct mmc_request *mrq)
 304{
 305        unsigned int i, sz = 0;
 306        struct scatterlist *sg;
 307
 308        if (mrq->cmd) {
 309                mrq->cmd->error = 0;
 310                mrq->cmd->mrq = mrq;
 311                mrq->cmd->data = mrq->data;
 312        }
 313        if (mrq->sbc) {
 314                mrq->sbc->error = 0;
 315                mrq->sbc->mrq = mrq;
 316        }
 317        if (mrq->data) {
 318                if (mrq->data->blksz > host->max_blk_size ||
 319                    mrq->data->blocks > host->max_blk_count ||
 320                    mrq->data->blocks * mrq->data->blksz > host->max_req_size)
 321                        return -EINVAL;
 322
 323                for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
 324                        sz += sg->length;
 325                if (sz != mrq->data->blocks * mrq->data->blksz)
 326                        return -EINVAL;
 327
 328                mrq->data->error = 0;
 329                mrq->data->mrq = mrq;
 330                if (mrq->stop) {
 331                        mrq->data->stop = mrq->stop;
 332                        mrq->stop->error = 0;
 333                        mrq->stop->mrq = mrq;
 334                }
 335        }
 336
 337        return 0;
 338}
 339
 340int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
 341{
 342        int err;
 343
 344        mmc_retune_hold(host);
 345
 346        if (mmc_card_removed(host->card))
 347                return -ENOMEDIUM;
 348
 349        mmc_mrq_pr_debug(host, mrq, false);
 350
 351        WARN_ON(!host->claimed);
 352
 353        err = mmc_mrq_prep(host, mrq);
 354        if (err)
 355                return err;
 356
 357        led_trigger_event(host->led, LED_FULL);
 358        __mmc_start_request(host, mrq);
 359
 360        return 0;
 361}
 362EXPORT_SYMBOL(mmc_start_request);
 363
 364/*
 365 * mmc_wait_data_done() - done callback for data request
 366 * @mrq: done data request
 367 *
 368 * Wakes up mmc context, passed as a callback to host controller driver
 369 */
 370static void mmc_wait_data_done(struct mmc_request *mrq)
 371{
 372        struct mmc_context_info *context_info = &mrq->host->context_info;
 373
 374        context_info->is_done_rcv = true;
 375        wake_up_interruptible(&context_info->wait);
 376}
 377
 378static void mmc_wait_done(struct mmc_request *mrq)
 379{
 380        complete(&mrq->completion);
 381}
 382
 383static inline void mmc_wait_ongoing_tfr_cmd(struct mmc_host *host)
 384{
 385        struct mmc_request *ongoing_mrq = READ_ONCE(host->ongoing_mrq);
 386
 387        /*
 388         * If there is an ongoing transfer, wait for the command line to become
 389         * available.
 390         */
 391        if (ongoing_mrq && !completion_done(&ongoing_mrq->cmd_completion))
 392                wait_for_completion(&ongoing_mrq->cmd_completion);
 393}
 394
 395/*
 396 *__mmc_start_data_req() - starts data request
 397 * @host: MMC host to start the request
 398 * @mrq: data request to start
 399 *
 400 * Sets the done callback to be called when request is completed by the card.
 401 * Starts data mmc request execution
 402 * If an ongoing transfer is already in progress, wait for the command line
 403 * to become available before sending another command.
 404 */
 405static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
 406{
 407        int err;
 408
 409        mmc_wait_ongoing_tfr_cmd(host);
 410
 411        mrq->done = mmc_wait_data_done;
 412        mrq->host = host;
 413
 414        init_completion(&mrq->cmd_completion);
 415
 416        err = mmc_start_request(host, mrq);
 417        if (err) {
 418                mrq->cmd->error = err;
 419                mmc_complete_cmd(mrq);
 420                mmc_wait_data_done(mrq);
 421        }
 422
 423        return err;
 424}
 425
 426static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
 427{
 428        int err;
 429
 430        mmc_wait_ongoing_tfr_cmd(host);
 431
 432        init_completion(&mrq->completion);
 433        mrq->done = mmc_wait_done;
 434
 435        init_completion(&mrq->cmd_completion);
 436
 437        err = mmc_start_request(host, mrq);
 438        if (err) {
 439                mrq->cmd->error = err;
 440                mmc_complete_cmd(mrq);
 441                complete(&mrq->completion);
 442        }
 443
 444        return err;
 445}
 446
 447void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
 448{
 449        struct mmc_command *cmd;
 450
 451        while (1) {
 452                wait_for_completion(&mrq->completion);
 453
 454                cmd = mrq->cmd;
 455
 456                /*
 457                 * If host has timed out waiting for the sanitize
 458                 * to complete, card might be still in programming state
 459                 * so let's try to bring the card out of programming
 460                 * state.
 461                 */
 462                if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) {
 463                        if (!mmc_interrupt_hpi(host->card)) {
 464                                pr_warn("%s: %s: Interrupted sanitize\n",
 465                                        mmc_hostname(host), __func__);
 466                                cmd->error = 0;
 467                                break;
 468                        } else {
 469                                pr_err("%s: %s: Failed to interrupt sanitize\n",
 470                                       mmc_hostname(host), __func__);
 471                        }
 472                }
 473                if (!cmd->error || !cmd->retries ||
 474                    mmc_card_removed(host->card))
 475                        break;
 476
 477                mmc_retune_recheck(host);
 478
 479                pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
 480                         mmc_hostname(host), cmd->opcode, cmd->error);
 481                cmd->retries--;
 482                cmd->error = 0;
 483                __mmc_start_request(host, mrq);
 484        }
 485
 486        mmc_retune_release(host);
 487}
 488EXPORT_SYMBOL(mmc_wait_for_req_done);
 489
 490/*
 491 * mmc_cqe_start_req - Start a CQE request.
 492 * @host: MMC host to start the request
 493 * @mrq: request to start
 494 *
 495 * Start the request, re-tuning if needed and it is possible. Returns an error
 496 * code if the request fails to start or -EBUSY if CQE is busy.
 497 */
 498int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq)
 499{
 500        int err;
 501
 502        /*
 503         * CQE cannot process re-tuning commands. Caller must hold retuning
 504         * while CQE is in use.  Re-tuning can happen here only when CQE has no
 505         * active requests i.e. this is the first.  Note, re-tuning will call
 506         * ->cqe_off().
 507         */
 508        err = mmc_retune(host);
 509        if (err)
 510                goto out_err;
 511
 512        mrq->host = host;
 513
 514        mmc_mrq_pr_debug(host, mrq, true);
 515
 516        err = mmc_mrq_prep(host, mrq);
 517        if (err)
 518                goto out_err;
 519
 520        err = host->cqe_ops->cqe_request(host, mrq);
 521        if (err)
 522                goto out_err;
 523
 524        trace_mmc_request_start(host, mrq);
 525
 526        return 0;
 527
 528out_err:
 529        if (mrq->cmd) {
 530                pr_debug("%s: failed to start CQE direct CMD%u, error %d\n",
 531                         mmc_hostname(host), mrq->cmd->opcode, err);
 532        } else {
 533                pr_debug("%s: failed to start CQE transfer for tag %d, error %d\n",
 534                         mmc_hostname(host), mrq->tag, err);
 535        }
 536        return err;
 537}
 538EXPORT_SYMBOL(mmc_cqe_start_req);
 539
 540/**
 541 *      mmc_cqe_request_done - CQE has finished processing an MMC request
 542 *      @host: MMC host which completed request
 543 *      @mrq: MMC request which completed
 544 *
 545 *      CQE drivers should call this function when they have completed
 546 *      their processing of a request.
 547 */
 548void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq)
 549{
 550        mmc_should_fail_request(host, mrq);
 551
 552        /* Flag re-tuning needed on CRC errors */
 553        if ((mrq->cmd && mrq->cmd->error == -EILSEQ) ||
 554            (mrq->data && mrq->data->error == -EILSEQ))
 555                mmc_retune_needed(host);
 556
 557        trace_mmc_request_done(host, mrq);
 558
 559        if (mrq->cmd) {
 560                pr_debug("%s: CQE req done (direct CMD%u): %d\n",
 561                         mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->error);
 562        } else {
 563                pr_debug("%s: CQE transfer done tag %d\n",
 564                         mmc_hostname(host), mrq->tag);
 565        }
 566
 567        if (mrq->data) {
 568                pr_debug("%s:     %d bytes transferred: %d\n",
 569                         mmc_hostname(host),
 570                         mrq->data->bytes_xfered, mrq->data->error);
 571        }
 572
 573        mrq->done(mrq);
 574}
 575EXPORT_SYMBOL(mmc_cqe_request_done);
 576
 577/**
 578 *      mmc_cqe_post_req - CQE post process of a completed MMC request
 579 *      @host: MMC host
 580 *      @mrq: MMC request to be processed
 581 */
 582void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq)
 583{
 584        if (host->cqe_ops->cqe_post_req)
 585                host->cqe_ops->cqe_post_req(host, mrq);
 586}
 587EXPORT_SYMBOL(mmc_cqe_post_req);
 588
 589/* Arbitrary 1 second timeout */
 590#define MMC_CQE_RECOVERY_TIMEOUT        1000
 591
 592/*
 593 * mmc_cqe_recovery - Recover from CQE errors.
 594 * @host: MMC host to recover
 595 *
 596 * Recovery consists of stopping CQE, stopping eMMC, discarding the queue in
 597 * in eMMC, and discarding the queue in CQE. CQE must call
 598 * mmc_cqe_request_done() on all requests. An error is returned if the eMMC
 599 * fails to discard its queue.
 600 */
 601int mmc_cqe_recovery(struct mmc_host *host)
 602{
 603        struct mmc_command cmd;
 604        int err;
 605
 606        mmc_retune_hold_now(host);
 607
 608        /*
 609         * Recovery is expected seldom, if at all, but it reduces performance,
 610         * so make sure it is not completely silent.
 611         */
 612        pr_warn("%s: running CQE recovery\n", mmc_hostname(host));
 613
 614        host->cqe_ops->cqe_recovery_start(host);
 615
 616        memset(&cmd, 0, sizeof(cmd));
 617        cmd.opcode       = MMC_STOP_TRANSMISSION,
 618        cmd.flags        = MMC_RSP_R1B | MMC_CMD_AC,
 619        cmd.flags       &= ~MMC_RSP_CRC; /* Ignore CRC */
 620        cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT,
 621        mmc_wait_for_cmd(host, &cmd, 0);
 622
 623        memset(&cmd, 0, sizeof(cmd));
 624        cmd.opcode       = MMC_CMDQ_TASK_MGMT;
 625        cmd.arg          = 1; /* Discard entire queue */
 626        cmd.flags        = MMC_RSP_R1B | MMC_CMD_AC;
 627        cmd.flags       &= ~MMC_RSP_CRC; /* Ignore CRC */
 628        cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT,
 629        err = mmc_wait_for_cmd(host, &cmd, 0);
 630
 631        host->cqe_ops->cqe_recovery_finish(host);
 632
 633        mmc_retune_release(host);
 634
 635        return err;
 636}
 637EXPORT_SYMBOL(mmc_cqe_recovery);
 638
 639/**
 640 *      mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done
 641 *      @host: MMC host
 642 *      @mrq: MMC request
 643 *
 644 *      mmc_is_req_done() is used with requests that have
 645 *      mrq->cap_cmd_during_tfr = true. mmc_is_req_done() must be called after
 646 *      starting a request and before waiting for it to complete. That is,
 647 *      either in between calls to mmc_start_req(), or after mmc_wait_for_req()
 648 *      and before mmc_wait_for_req_done(). If it is called at other times the
 649 *      result is not meaningful.
 650 */
 651bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq)
 652{
 653        if (host->areq)
 654                return host->context_info.is_done_rcv;
 655        else
 656                return completion_done(&mrq->completion);
 657}
 658EXPORT_SYMBOL(mmc_is_req_done);
 659
 660/**
 661 *      mmc_pre_req - Prepare for a new request
 662 *      @host: MMC host to prepare command
 663 *      @mrq: MMC request to prepare for
 664 *
 665 *      mmc_pre_req() is called in prior to mmc_start_req() to let
 666 *      host prepare for the new request. Preparation of a request may be
 667 *      performed while another request is running on the host.
 668 */
 669static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq)
 670{
 671        if (host->ops->pre_req)
 672                host->ops->pre_req(host, mrq);
 673}
 674
 675/**
 676 *      mmc_post_req - Post process a completed request
 677 *      @host: MMC host to post process command
 678 *      @mrq: MMC request to post process for
 679 *      @err: Error, if non zero, clean up any resources made in pre_req
 680 *
 681 *      Let the host post process a completed request. Post processing of
 682 *      a request may be performed while another reuqest is running.
 683 */
 684static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
 685                         int err)
 686{
 687        if (host->ops->post_req)
 688                host->ops->post_req(host, mrq, err);
 689}
 690
 691/**
 692 * mmc_finalize_areq() - finalize an asynchronous request
 693 * @host: MMC host to finalize any ongoing request on
 694 *
 695 * Returns the status of the ongoing asynchronous request, but
 696 * MMC_BLK_SUCCESS if no request was going on.
 697 */
 698static enum mmc_blk_status mmc_finalize_areq(struct mmc_host *host)
 699{
 700        struct mmc_context_info *context_info = &host->context_info;
 701        enum mmc_blk_status status;
 702
 703        if (!host->areq)
 704                return MMC_BLK_SUCCESS;
 705
 706        while (1) {
 707                wait_event_interruptible(context_info->wait,
 708                                (context_info->is_done_rcv ||
 709                                 context_info->is_new_req));
 710
 711                if (context_info->is_done_rcv) {
 712                        struct mmc_command *cmd;
 713
 714                        context_info->is_done_rcv = false;
 715                        cmd = host->areq->mrq->cmd;
 716
 717                        if (!cmd->error || !cmd->retries ||
 718                            mmc_card_removed(host->card)) {
 719                                status = host->areq->err_check(host->card,
 720                                                               host->areq);
 721                                break; /* return status */
 722                        } else {
 723                                mmc_retune_recheck(host);
 724                                pr_info("%s: req failed (CMD%u): %d, retrying...\n",
 725                                        mmc_hostname(host),
 726                                        cmd->opcode, cmd->error);
 727                                cmd->retries--;
 728                                cmd->error = 0;
 729                                __mmc_start_request(host, host->areq->mrq);
 730                                continue; /* wait for done/new event again */
 731                        }
 732                }
 733
 734                return MMC_BLK_NEW_REQUEST;
 735        }
 736
 737        mmc_retune_release(host);
 738
 739        /*
 740         * Check BKOPS urgency for each R1 response
 741         */
 742        if (host->card && mmc_card_mmc(host->card) &&
 743            ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
 744             (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
 745            (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) {
 746                mmc_start_bkops(host->card, true);
 747        }
 748
 749        return status;
 750}
 751
 752/**
 753 *      mmc_start_areq - start an asynchronous request
 754 *      @host: MMC host to start command
 755 *      @areq: asynchronous request to start
 756 *      @ret_stat: out parameter for status
 757 *
 758 *      Start a new MMC custom command request for a host.
 759 *      If there is on ongoing async request wait for completion
 760 *      of that request and start the new one and return.
 761 *      Does not wait for the new request to complete.
 762 *
 763 *      Returns the completed request, NULL in case of none completed.
 764 *      Wait for the an ongoing request (previoulsy started) to complete and
 765 *      return the completed request. If there is no ongoing request, NULL
 766 *      is returned without waiting. NULL is not an error condition.
 767 */
 768struct mmc_async_req *mmc_start_areq(struct mmc_host *host,
 769                                     struct mmc_async_req *areq,
 770                                     enum mmc_blk_status *ret_stat)
 771{
 772        enum mmc_blk_status status;
 773        int start_err = 0;
 774        struct mmc_async_req *previous = host->areq;
 775
 776        /* Prepare a new request */
 777        if (areq)
 778                mmc_pre_req(host, areq->mrq);
 779
 780        /* Finalize previous request */
 781        status = mmc_finalize_areq(host);
 782        if (ret_stat)
 783                *ret_stat = status;
 784
 785        /* The previous request is still going on... */
 786        if (status == MMC_BLK_NEW_REQUEST)
 787                return NULL;
 788
 789        /* Fine so far, start the new request! */
 790        if (status == MMC_BLK_SUCCESS && areq)
 791                start_err = __mmc_start_data_req(host, areq->mrq);
 792
 793        /* Postprocess the old request at this point */
 794        if (host->areq)
 795                mmc_post_req(host, host->areq->mrq, 0);
 796
 797        /* Cancel a prepared request if it was not started. */
 798        if ((status != MMC_BLK_SUCCESS || start_err) && areq)
 799                mmc_post_req(host, areq->mrq, -EINVAL);
 800
 801        if (status != MMC_BLK_SUCCESS)
 802                host->areq = NULL;
 803        else
 804                host->areq = areq;
 805
 806        return previous;
 807}
 808EXPORT_SYMBOL(mmc_start_areq);
 809
 810/**
 811 *      mmc_wait_for_req - start a request and wait for completion
 812 *      @host: MMC host to start command
 813 *      @mrq: MMC request to start
 814 *
 815 *      Start a new MMC custom command request for a host, and wait
 816 *      for the command to complete. In the case of 'cap_cmd_during_tfr'
 817 *      requests, the transfer is ongoing and the caller can issue further
 818 *      commands that do not use the data lines, and then wait by calling
 819 *      mmc_wait_for_req_done().
 820 *      Does not attempt to parse the response.
 821 */
 822void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
 823{
 824        __mmc_start_req(host, mrq);
 825
 826        if (!mrq->cap_cmd_during_tfr)
 827                mmc_wait_for_req_done(host, mrq);
 828}
 829EXPORT_SYMBOL(mmc_wait_for_req);
 830
 831/**
 832 *      mmc_wait_for_cmd - start a command and wait for completion
 833 *      @host: MMC host to start command
 834 *      @cmd: MMC command to start
 835 *      @retries: maximum number of retries
 836 *
 837 *      Start a new MMC command for a host, and wait for the command
 838 *      to complete.  Return any error that occurred while the command
 839 *      was executing.  Do not attempt to parse the response.
 840 */
 841int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
 842{
 843        struct mmc_request mrq = {};
 844
 845        WARN_ON(!host->claimed);
 846
 847        memset(cmd->resp, 0, sizeof(cmd->resp));
 848        cmd->retries = retries;
 849
 850        mrq.cmd = cmd;
 851        cmd->data = NULL;
 852
 853        mmc_wait_for_req(host, &mrq);
 854
 855        return cmd->error;
 856}
 857
 858EXPORT_SYMBOL(mmc_wait_for_cmd);
 859
 860/**
 861 *      mmc_set_data_timeout - set the timeout for a data command
 862 *      @data: data phase for command
 863 *      @card: the MMC card associated with the data transfer
 864 *
 865 *      Computes the data timeout parameters according to the
 866 *      correct algorithm given the card type.
 867 */
 868void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
 869{
 870        unsigned int mult;
 871
 872        /*
 873         * SDIO cards only define an upper 1 s limit on access.
 874         */
 875        if (mmc_card_sdio(card)) {
 876                data->timeout_ns = 1000000000;
 877                data->timeout_clks = 0;
 878                return;
 879        }
 880
 881        /*
 882         * SD cards use a 100 multiplier rather than 10
 883         */
 884        mult = mmc_card_sd(card) ? 100 : 10;
 885
 886        /*
 887         * Scale up the multiplier (and therefore the timeout) by
 888         * the r2w factor for writes.
 889         */
 890        if (data->flags & MMC_DATA_WRITE)
 891                mult <<= card->csd.r2w_factor;
 892
 893        data->timeout_ns = card->csd.taac_ns * mult;
 894        data->timeout_clks = card->csd.taac_clks * mult;
 895
 896        /*
 897         * SD cards also have an upper limit on the timeout.
 898         */
 899        if (mmc_card_sd(card)) {
 900                unsigned int timeout_us, limit_us;
 901
 902                timeout_us = data->timeout_ns / 1000;
 903                if (card->host->ios.clock)
 904                        timeout_us += data->timeout_clks * 1000 /
 905                                (card->host->ios.clock / 1000);
 906
 907                if (data->flags & MMC_DATA_WRITE)
 908                        /*
 909                         * The MMC spec "It is strongly recommended
 910                         * for hosts to implement more than 500ms
 911                         * timeout value even if the card indicates
 912                         * the 250ms maximum busy length."  Even the
 913                         * previous value of 300ms is known to be
 914                         * insufficient for some cards.
 915                         */
 916                        limit_us = 3000000;
 917                else
 918                        limit_us = 100000;
 919
 920                /*
 921                 * SDHC cards always use these fixed values.
 922                 */
 923                if (timeout_us > limit_us) {
 924                        data->timeout_ns = limit_us * 1000;
 925                        data->timeout_clks = 0;
 926                }
 927
 928                /* assign limit value if invalid */
 929                if (timeout_us == 0)
 930                        data->timeout_ns = limit_us * 1000;
 931        }
 932
 933        /*
 934         * Some cards require longer data read timeout than indicated in CSD.
 935         * Address this by setting the read timeout to a "reasonably high"
 936         * value. For the cards tested, 600ms has proven enough. If necessary,
 937         * this value can be increased if other problematic cards require this.
 938         */
 939        if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
 940                data->timeout_ns = 600000000;
 941                data->timeout_clks = 0;
 942        }
 943
 944        /*
 945         * Some cards need very high timeouts if driven in SPI mode.
 946         * The worst observed timeout was 900ms after writing a
 947         * continuous stream of data until the internal logic
 948         * overflowed.
 949         */
 950        if (mmc_host_is_spi(card->host)) {
 951                if (data->flags & MMC_DATA_WRITE) {
 952                        if (data->timeout_ns < 1000000000)
 953                                data->timeout_ns = 1000000000;  /* 1s */
 954                } else {
 955                        if (data->timeout_ns < 100000000)
 956                                data->timeout_ns =  100000000;  /* 100ms */
 957                }
 958        }
 959}
 960EXPORT_SYMBOL(mmc_set_data_timeout);
 961
 962/**
 963 *      mmc_align_data_size - pads a transfer size to a more optimal value
 964 *      @card: the MMC card associated with the data transfer
 965 *      @sz: original transfer size
 966 *
 967 *      Pads the original data size with a number of extra bytes in
 968 *      order to avoid controller bugs and/or performance hits
 969 *      (e.g. some controllers revert to PIO for certain sizes).
 970 *
 971 *      Returns the improved size, which might be unmodified.
 972 *
 973 *      Note that this function is only relevant when issuing a
 974 *      single scatter gather entry.
 975 */
 976unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
 977{
 978        /*
 979         * FIXME: We don't have a system for the controller to tell
 980         * the core about its problems yet, so for now we just 32-bit
 981         * align the size.
 982         */
 983        sz = ((sz + 3) / 4) * 4;
 984
 985        return sz;
 986}
 987EXPORT_SYMBOL(mmc_align_data_size);
 988
 989/*
 990 * Allow claiming an already claimed host if the context is the same or there is
 991 * no context but the task is the same.
 992 */
 993static inline bool mmc_ctx_matches(struct mmc_host *host, struct mmc_ctx *ctx,
 994                                   struct task_struct *task)
 995{
 996        return host->claimer == ctx ||
 997               (!ctx && task && host->claimer->task == task);
 998}
 999
1000static inline void mmc_ctx_set_claimer(struct mmc_host *host,
1001                                       struct mmc_ctx *ctx,
1002                                       struct task_struct *task)
1003{
1004        if (!host->claimer) {
1005                if (ctx)
1006                        host->claimer = ctx;
1007                else
1008                        host->claimer = &host->default_ctx;
1009        }
1010        if (task)
1011                host->claimer->task = task;
1012}
1013
1014/**
1015 *      __mmc_claim_host - exclusively claim a host
1016 *      @host: mmc host to claim
1017 *      @ctx: context that claims the host or NULL in which case the default
1018 *      context will be used
1019 *      @abort: whether or not the operation should be aborted
1020 *
1021 *      Claim a host for a set of operations.  If @abort is non null and
1022 *      dereference a non-zero value then this will return prematurely with
1023 *      that non-zero value without acquiring the lock.  Returns zero
1024 *      with the lock held otherwise.
1025 */
1026int __mmc_claim_host(struct mmc_host *host, struct mmc_ctx *ctx,
1027                     atomic_t *abort)
1028{
1029        struct task_struct *task = ctx ? NULL : current;
1030        DECLARE_WAITQUEUE(wait, current);
1031        unsigned long flags;
1032        int stop;
1033        bool pm = false;
1034
1035        might_sleep();
1036
1037        add_wait_queue(&host->wq, &wait);
1038        spin_lock_irqsave(&host->lock, flags);
1039        while (1) {
1040                set_current_state(TASK_UNINTERRUPTIBLE);
1041                stop = abort ? atomic_read(abort) : 0;
1042                if (stop || !host->claimed || mmc_ctx_matches(host, ctx, task))
1043                        break;
1044                spin_unlock_irqrestore(&host->lock, flags);
1045                schedule();
1046                spin_lock_irqsave(&host->lock, flags);
1047        }
1048        set_current_state(TASK_RUNNING);
1049        if (!stop) {
1050                host->claimed = 1;
1051                mmc_ctx_set_claimer(host, ctx, task);
1052                host->claim_cnt += 1;
1053                if (host->claim_cnt == 1)
1054                        pm = true;
1055        } else
1056                wake_up(&host->wq);
1057        spin_unlock_irqrestore(&host->lock, flags);
1058        remove_wait_queue(&host->wq, &wait);
1059
1060        if (pm)
1061                pm_runtime_get_sync(mmc_dev(host));
1062
1063        return stop;
1064}
1065EXPORT_SYMBOL(__mmc_claim_host);
1066
1067/**
1068 *      mmc_release_host - release a host
1069 *      @host: mmc host to release
1070 *
1071 *      Release a MMC host, allowing others to claim the host
1072 *      for their operations.
1073 */
1074void mmc_release_host(struct mmc_host *host)
1075{
1076        unsigned long flags;
1077
1078        WARN_ON(!host->claimed);
1079
1080        spin_lock_irqsave(&host->lock, flags);
1081        if (--host->claim_cnt) {
1082                /* Release for nested claim */
1083                spin_unlock_irqrestore(&host->lock, flags);
1084        } else {
1085                host->claimed = 0;
1086                host->claimer->task = NULL;
1087                host->claimer = NULL;
1088                spin_unlock_irqrestore(&host->lock, flags);
1089                wake_up(&host->wq);
1090                pm_runtime_mark_last_busy(mmc_dev(host));
1091                pm_runtime_put_autosuspend(mmc_dev(host));
1092        }
1093}
1094EXPORT_SYMBOL(mmc_release_host);
1095
1096/*
1097 * This is a helper function, which fetches a runtime pm reference for the
1098 * card device and also claims the host.
1099 */
1100void mmc_get_card(struct mmc_card *card, struct mmc_ctx *ctx)
1101{
1102        pm_runtime_get_sync(&card->dev);
1103        __mmc_claim_host(card->host, ctx, NULL);
1104}
1105EXPORT_SYMBOL(mmc_get_card);
1106
1107/*
1108 * This is a helper function, which releases the host and drops the runtime
1109 * pm reference for the card device.
1110 */
1111void mmc_put_card(struct mmc_card *card, struct mmc_ctx *ctx)
1112{
1113        struct mmc_host *host = card->host;
1114
1115        WARN_ON(ctx && host->claimer != ctx);
1116
1117        mmc_release_host(host);
1118        pm_runtime_mark_last_busy(&card->dev);
1119        pm_runtime_put_autosuspend(&card->dev);
1120}
1121EXPORT_SYMBOL(mmc_put_card);
1122
1123/*
1124 * Internal function that does the actual ios call to the host driver,
1125 * optionally printing some debug output.
1126 */
1127static inline void mmc_set_ios(struct mmc_host *host)
1128{
1129        struct mmc_ios *ios = &host->ios;
1130
1131        pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
1132                "width %u timing %u\n",
1133                 mmc_hostname(host), ios->clock, ios->bus_mode,
1134                 ios->power_mode, ios->chip_select, ios->vdd,
1135                 1 << ios->bus_width, ios->timing);
1136
1137        host->ops->set_ios(host, ios);
1138}
1139
1140/*
1141 * Control chip select pin on a host.
1142 */
1143void mmc_set_chip_select(struct mmc_host *host, int mode)
1144{
1145        host->ios.chip_select = mode;
1146        mmc_set_ios(host);
1147}
1148
1149/*
1150 * Sets the host clock to the highest possible frequency that
1151 * is below "hz".
1152 */
1153void mmc_set_clock(struct mmc_host *host, unsigned int hz)
1154{
1155        WARN_ON(hz && hz < host->f_min);
1156
1157        if (hz > host->f_max)
1158                hz = host->f_max;
1159
1160        host->ios.clock = hz;
1161        mmc_set_ios(host);
1162}
1163
1164int mmc_execute_tuning(struct mmc_card *card)
1165{
1166        struct mmc_host *host = card->host;
1167        u32 opcode;
1168        int err;
1169
1170        if (!host->ops->execute_tuning)
1171                return 0;
1172
1173        if (host->cqe_on)
1174                host->cqe_ops->cqe_off(host);
1175
1176        if (mmc_card_mmc(card))
1177                opcode = MMC_SEND_TUNING_BLOCK_HS200;
1178        else
1179                opcode = MMC_SEND_TUNING_BLOCK;
1180
1181        err = host->ops->execute_tuning(host, opcode);
1182
1183        if (err)
1184                pr_err("%s: tuning execution failed: %d\n",
1185                        mmc_hostname(host), err);
1186        else
1187                mmc_retune_enable(host);
1188
1189        return err;
1190}
1191
1192/*
1193 * Change the bus mode (open drain/push-pull) of a host.
1194 */
1195void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
1196{
1197        host->ios.bus_mode = mode;
1198        mmc_set_ios(host);
1199}
1200
1201/*
1202 * Change data bus width of a host.
1203 */
1204void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
1205{
1206        host->ios.bus_width = width;
1207        mmc_set_ios(host);
1208}
1209
1210/*
1211 * Set initial state after a power cycle or a hw_reset.
1212 */
1213void mmc_set_initial_state(struct mmc_host *host)
1214{
1215        if (host->cqe_on)
1216                host->cqe_ops->cqe_off(host);
1217
1218        mmc_retune_disable(host);
1219
1220        if (mmc_host_is_spi(host))
1221                host->ios.chip_select = MMC_CS_HIGH;
1222        else
1223                host->ios.chip_select = MMC_CS_DONTCARE;
1224        host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1225        host->ios.bus_width = MMC_BUS_WIDTH_1;
1226        host->ios.timing = MMC_TIMING_LEGACY;
1227        host->ios.drv_type = 0;
1228        host->ios.enhanced_strobe = false;
1229
1230        /*
1231         * Make sure we are in non-enhanced strobe mode before we
1232         * actually enable it in ext_csd.
1233         */
1234        if ((host->caps2 & MMC_CAP2_HS400_ES) &&
1235             host->ops->hs400_enhanced_strobe)
1236                host->ops->hs400_enhanced_strobe(host, &host->ios);
1237
1238        mmc_set_ios(host);
1239}
1240
1241/**
1242 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
1243 * @vdd:        voltage (mV)
1244 * @low_bits:   prefer low bits in boundary cases
1245 *
1246 * This function returns the OCR bit number according to the provided @vdd
1247 * value. If conversion is not possible a negative errno value returned.
1248 *
1249 * Depending on the @low_bits flag the function prefers low or high OCR bits
1250 * on boundary voltages. For example,
1251 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
1252 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
1253 *
1254 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
1255 */
1256static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
1257{
1258        const int max_bit = ilog2(MMC_VDD_35_36);
1259        int bit;
1260
1261        if (vdd < 1650 || vdd > 3600)
1262                return -EINVAL;
1263
1264        if (vdd >= 1650 && vdd <= 1950)
1265                return ilog2(MMC_VDD_165_195);
1266
1267        if (low_bits)
1268                vdd -= 1;
1269
1270        /* Base 2000 mV, step 100 mV, bit's base 8. */
1271        bit = (vdd - 2000) / 100 + 8;
1272        if (bit > max_bit)
1273                return max_bit;
1274        return bit;
1275}
1276
1277/**
1278 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
1279 * @vdd_min:    minimum voltage value (mV)
1280 * @vdd_max:    maximum voltage value (mV)
1281 *
1282 * This function returns the OCR mask bits according to the provided @vdd_min
1283 * and @vdd_max values. If conversion is not possible the function returns 0.
1284 *
1285 * Notes wrt boundary cases:
1286 * This function sets the OCR bits for all boundary voltages, for example
1287 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
1288 * MMC_VDD_34_35 mask.
1289 */
1290u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
1291{
1292        u32 mask = 0;
1293
1294        if (vdd_max < vdd_min)
1295                return 0;
1296
1297        /* Prefer high bits for the boundary vdd_max values. */
1298        vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
1299        if (vdd_max < 0)
1300                return 0;
1301
1302        /* Prefer low bits for the boundary vdd_min values. */
1303        vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
1304        if (vdd_min < 0)
1305                return 0;
1306
1307        /* Fill the mask, from max bit to min bit. */
1308        while (vdd_max >= vdd_min)
1309                mask |= 1 << vdd_max--;
1310
1311        return mask;
1312}
1313EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
1314
1315#ifdef CONFIG_OF
1316
1317/**
1318 * mmc_of_parse_voltage - return mask of supported voltages
1319 * @np: The device node need to be parsed.
1320 * @mask: mask of voltages available for MMC/SD/SDIO
1321 *
1322 * Parse the "voltage-ranges" DT property, returning zero if it is not
1323 * found, negative errno if the voltage-range specification is invalid,
1324 * or one if the voltage-range is specified and successfully parsed.
1325 */
1326int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
1327{
1328        const u32 *voltage_ranges;
1329        int num_ranges, i;
1330
1331        voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
1332        num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
1333        if (!voltage_ranges) {
1334                pr_debug("%pOF: voltage-ranges unspecified\n", np);
1335                return 0;
1336        }
1337        if (!num_ranges) {
1338                pr_err("%pOF: voltage-ranges empty\n", np);
1339                return -EINVAL;
1340        }
1341
1342        for (i = 0; i < num_ranges; i++) {
1343                const int j = i * 2;
1344                u32 ocr_mask;
1345
1346                ocr_mask = mmc_vddrange_to_ocrmask(
1347                                be32_to_cpu(voltage_ranges[j]),
1348                                be32_to_cpu(voltage_ranges[j + 1]));
1349                if (!ocr_mask) {
1350                        pr_err("%pOF: voltage-range #%d is invalid\n",
1351                                np, i);
1352                        return -EINVAL;
1353                }
1354                *mask |= ocr_mask;
1355        }
1356
1357        return 1;
1358}
1359EXPORT_SYMBOL(mmc_of_parse_voltage);
1360
1361#endif /* CONFIG_OF */
1362
1363static int mmc_of_get_func_num(struct device_node *node)
1364{
1365        u32 reg;
1366        int ret;
1367
1368        ret = of_property_read_u32(node, "reg", &reg);
1369        if (ret < 0)
1370                return ret;
1371
1372        return reg;
1373}
1374
1375struct device_node *mmc_of_find_child_device(struct mmc_host *host,
1376                unsigned func_num)
1377{
1378        struct device_node *node;
1379
1380        if (!host->parent || !host->parent->of_node)
1381                return NULL;
1382
1383        for_each_child_of_node(host->parent->of_node, node) {
1384                if (mmc_of_get_func_num(node) == func_num)
1385                        return node;
1386        }
1387
1388        return NULL;
1389}
1390
1391#ifdef CONFIG_REGULATOR
1392
1393/**
1394 * mmc_ocrbitnum_to_vdd - Convert a OCR bit number to its voltage
1395 * @vdd_bit:    OCR bit number
1396 * @min_uV:     minimum voltage value (mV)
1397 * @max_uV:     maximum voltage value (mV)
1398 *
1399 * This function returns the voltage range according to the provided OCR
1400 * bit number. If conversion is not possible a negative errno value returned.
1401 */
1402static int mmc_ocrbitnum_to_vdd(int vdd_bit, int *min_uV, int *max_uV)
1403{
1404        int             tmp;
1405
1406        if (!vdd_bit)
1407                return -EINVAL;
1408
1409        /*
1410         * REVISIT mmc_vddrange_to_ocrmask() may have set some
1411         * bits this regulator doesn't quite support ... don't
1412         * be too picky, most cards and regulators are OK with
1413         * a 0.1V range goof (it's a small error percentage).
1414         */
1415        tmp = vdd_bit - ilog2(MMC_VDD_165_195);
1416        if (tmp == 0) {
1417                *min_uV = 1650 * 1000;
1418                *max_uV = 1950 * 1000;
1419        } else {
1420                *min_uV = 1900 * 1000 + tmp * 100 * 1000;
1421                *max_uV = *min_uV + 100 * 1000;
1422        }
1423
1424        return 0;
1425}
1426
1427/**
1428 * mmc_regulator_get_ocrmask - return mask of supported voltages
1429 * @supply: regulator to use
1430 *
1431 * This returns either a negative errno, or a mask of voltages that
1432 * can be provided to MMC/SD/SDIO devices using the specified voltage
1433 * regulator.  This would normally be called before registering the
1434 * MMC host adapter.
1435 */
1436int mmc_regulator_get_ocrmask(struct regulator *supply)
1437{
1438        int                     result = 0;
1439        int                     count;
1440        int                     i;
1441        int                     vdd_uV;
1442        int                     vdd_mV;
1443
1444        count = regulator_count_voltages(supply);
1445        if (count < 0)
1446                return count;
1447
1448        for (i = 0; i < count; i++) {
1449                vdd_uV = regulator_list_voltage(supply, i);
1450                if (vdd_uV <= 0)
1451                        continue;
1452
1453                vdd_mV = vdd_uV / 1000;
1454                result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1455        }
1456
1457        if (!result) {
1458                vdd_uV = regulator_get_voltage(supply);
1459                if (vdd_uV <= 0)
1460                        return vdd_uV;
1461
1462                vdd_mV = vdd_uV / 1000;
1463                result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1464        }
1465
1466        return result;
1467}
1468EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
1469
1470/**
1471 * mmc_regulator_set_ocr - set regulator to match host->ios voltage
1472 * @mmc: the host to regulate
1473 * @supply: regulator to use
1474 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
1475 *
1476 * Returns zero on success, else negative errno.
1477 *
1478 * MMC host drivers may use this to enable or disable a regulator using
1479 * a particular supply voltage.  This would normally be called from the
1480 * set_ios() method.
1481 */
1482int mmc_regulator_set_ocr(struct mmc_host *mmc,
1483                        struct regulator *supply,
1484                        unsigned short vdd_bit)
1485{
1486        int                     result = 0;
1487        int                     min_uV, max_uV;
1488
1489        if (vdd_bit) {
1490                mmc_ocrbitnum_to_vdd(vdd_bit, &min_uV, &max_uV);
1491
1492                result = regulator_set_voltage(supply, min_uV, max_uV);
1493                if (result == 0 && !mmc->regulator_enabled) {
1494                        result = regulator_enable(supply);
1495                        if (!result)
1496                                mmc->regulator_enabled = true;
1497                }
1498        } else if (mmc->regulator_enabled) {
1499                result = regulator_disable(supply);
1500                if (result == 0)
1501                        mmc->regulator_enabled = false;
1502        }
1503
1504        if (result)
1505                dev_err(mmc_dev(mmc),
1506                        "could not set regulator OCR (%d)\n", result);
1507        return result;
1508}
1509EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
1510
1511static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator,
1512                                                  int min_uV, int target_uV,
1513                                                  int max_uV)
1514{
1515        /*
1516         * Check if supported first to avoid errors since we may try several
1517         * signal levels during power up and don't want to show errors.
1518         */
1519        if (!regulator_is_supported_voltage(regulator, min_uV, max_uV))
1520                return -EINVAL;
1521
1522        return regulator_set_voltage_triplet(regulator, min_uV, target_uV,
1523                                             max_uV);
1524}
1525
1526/**
1527 * mmc_regulator_set_vqmmc - Set VQMMC as per the ios
1528 *
1529 * For 3.3V signaling, we try to match VQMMC to VMMC as closely as possible.
1530 * That will match the behavior of old boards where VQMMC and VMMC were supplied
1531 * by the same supply.  The Bus Operating conditions for 3.3V signaling in the
1532 * SD card spec also define VQMMC in terms of VMMC.
1533 * If this is not possible we'll try the full 2.7-3.6V of the spec.
1534 *
1535 * For 1.2V and 1.8V signaling we'll try to get as close as possible to the
1536 * requested voltage.  This is definitely a good idea for UHS where there's a
1537 * separate regulator on the card that's trying to make 1.8V and it's best if
1538 * we match.
1539 *
1540 * This function is expected to be used by a controller's
1541 * start_signal_voltage_switch() function.
1542 */
1543int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios)
1544{
1545        struct device *dev = mmc_dev(mmc);
1546        int ret, volt, min_uV, max_uV;
1547
1548        /* If no vqmmc supply then we can't change the voltage */
1549        if (IS_ERR(mmc->supply.vqmmc))
1550                return -EINVAL;
1551
1552        switch (ios->signal_voltage) {
1553        case MMC_SIGNAL_VOLTAGE_120:
1554                return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1555                                                1100000, 1200000, 1300000);
1556        case MMC_SIGNAL_VOLTAGE_180:
1557                return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1558                                                1700000, 1800000, 1950000);
1559        case MMC_SIGNAL_VOLTAGE_330:
1560                ret = mmc_ocrbitnum_to_vdd(mmc->ios.vdd, &volt, &max_uV);
1561                if (ret < 0)
1562                        return ret;
1563
1564                dev_dbg(dev, "%s: found vmmc voltage range of %d-%duV\n",
1565                        __func__, volt, max_uV);
1566
1567                min_uV = max(volt - 300000, 2700000);
1568                max_uV = min(max_uV + 200000, 3600000);
1569
1570                /*
1571                 * Due to a limitation in the current implementation of
1572                 * regulator_set_voltage_triplet() which is taking the lowest
1573                 * voltage possible if below the target, search for a suitable
1574                 * voltage in two steps and try to stay close to vmmc
1575                 * with a 0.3V tolerance at first.
1576                 */
1577                if (!mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1578                                                min_uV, volt, max_uV))
1579                        return 0;
1580
1581                return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1582                                                2700000, volt, 3600000);
1583        default:
1584                return -EINVAL;
1585        }
1586}
1587EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc);
1588
1589#endif /* CONFIG_REGULATOR */
1590
1591/**
1592 * mmc_regulator_get_supply - try to get VMMC and VQMMC regulators for a host
1593 * @mmc: the host to regulate
1594 *
1595 * Returns 0 or errno. errno should be handled, it is either a critical error
1596 * or -EPROBE_DEFER. 0 means no critical error but it does not mean all
1597 * regulators have been found because they all are optional. If you require
1598 * certain regulators, you need to check separately in your driver if they got
1599 * populated after calling this function.
1600 */
1601int mmc_regulator_get_supply(struct mmc_host *mmc)
1602{
1603        struct device *dev = mmc_dev(mmc);
1604        int ret;
1605
1606        mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
1607        mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
1608
1609        if (IS_ERR(mmc->supply.vmmc)) {
1610                if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
1611                        return -EPROBE_DEFER;
1612                dev_dbg(dev, "No vmmc regulator found\n");
1613        } else {
1614                ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
1615                if (ret > 0)
1616                        mmc->ocr_avail = ret;
1617                else
1618                        dev_warn(dev, "Failed getting OCR mask: %d\n", ret);
1619        }
1620
1621        if (IS_ERR(mmc->supply.vqmmc)) {
1622                if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
1623                        return -EPROBE_DEFER;
1624                dev_dbg(dev, "No vqmmc regulator found\n");
1625        }
1626
1627        return 0;
1628}
1629EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
1630
1631/*
1632 * Mask off any voltages we don't support and select
1633 * the lowest voltage
1634 */
1635u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1636{
1637        int bit;
1638
1639        /*
1640         * Sanity check the voltages that the card claims to
1641         * support.
1642         */
1643        if (ocr & 0x7F) {
1644                dev_warn(mmc_dev(host),
1645                "card claims to support voltages below defined range\n");
1646                ocr &= ~0x7F;
1647        }
1648
1649        ocr &= host->ocr_avail;
1650        if (!ocr) {
1651                dev_warn(mmc_dev(host), "no support for card's volts\n");
1652                return 0;
1653        }
1654
1655        if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
1656                bit = ffs(ocr) - 1;
1657                ocr &= 3 << bit;
1658                mmc_power_cycle(host, ocr);
1659        } else {
1660                bit = fls(ocr) - 1;
1661                ocr &= 3 << bit;
1662                if (bit != host->ios.vdd)
1663                        dev_warn(mmc_dev(host), "exceeding card's volts\n");
1664        }
1665
1666        return ocr;
1667}
1668
1669int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
1670{
1671        int err = 0;
1672        int old_signal_voltage = host->ios.signal_voltage;
1673
1674        host->ios.signal_voltage = signal_voltage;
1675        if (host->ops->start_signal_voltage_switch)
1676                err = host->ops->start_signal_voltage_switch(host, &host->ios);
1677
1678        if (err)
1679                host->ios.signal_voltage = old_signal_voltage;
1680
1681        return err;
1682
1683}
1684
1685int mmc_host_set_uhs_voltage(struct mmc_host *host)
1686{
1687        u32 clock;
1688
1689        /*
1690         * During a signal voltage level switch, the clock must be gated
1691         * for 5 ms according to the SD spec
1692         */
1693        clock = host->ios.clock;
1694        host->ios.clock = 0;
1695        mmc_set_ios(host);
1696
1697        if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
1698                return -EAGAIN;
1699
1700        /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
1701        mmc_delay(10);
1702        host->ios.clock = clock;
1703        mmc_set_ios(host);
1704
1705        return 0;
1706}
1707
1708int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
1709{
1710        struct mmc_command cmd = {};
1711        int err = 0;
1712
1713        /*
1714         * If we cannot switch voltages, return failure so the caller
1715         * can continue without UHS mode
1716         */
1717        if (!host->ops->start_signal_voltage_switch)
1718                return -EPERM;
1719        if (!host->ops->card_busy)
1720                pr_warn("%s: cannot verify signal voltage switch\n",
1721                        mmc_hostname(host));
1722
1723        cmd.opcode = SD_SWITCH_VOLTAGE;
1724        cmd.arg = 0;
1725        cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1726
1727        err = mmc_wait_for_cmd(host, &cmd, 0);
1728        if (err)
1729                return err;
1730
1731        if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
1732                return -EIO;
1733
1734        /*
1735         * The card should drive cmd and dat[0:3] low immediately
1736         * after the response of cmd11, but wait 1 ms to be sure
1737         */
1738        mmc_delay(1);
1739        if (host->ops->card_busy && !host->ops->card_busy(host)) {
1740                err = -EAGAIN;
1741                goto power_cycle;
1742        }
1743
1744        if (mmc_host_set_uhs_voltage(host)) {
1745                /*
1746                 * Voltages may not have been switched, but we've already
1747                 * sent CMD11, so a power cycle is required anyway
1748                 */
1749                err = -EAGAIN;
1750                goto power_cycle;
1751        }
1752
1753        /* Wait for at least 1 ms according to spec */
1754        mmc_delay(1);
1755
1756        /*
1757         * Failure to switch is indicated by the card holding
1758         * dat[0:3] low
1759         */
1760        if (host->ops->card_busy && host->ops->card_busy(host))
1761                err = -EAGAIN;
1762
1763power_cycle:
1764        if (err) {
1765                pr_debug("%s: Signal voltage switch failed, "
1766                        "power cycling card\n", mmc_hostname(host));
1767                mmc_power_cycle(host, ocr);
1768        }
1769
1770        return err;
1771}
1772
1773/*
1774 * Select timing parameters for host.
1775 */
1776void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1777{
1778        host->ios.timing = timing;
1779        mmc_set_ios(host);
1780}
1781
1782/*
1783 * Select appropriate driver type for host.
1784 */
1785void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1786{
1787        host->ios.drv_type = drv_type;
1788        mmc_set_ios(host);
1789}
1790
1791int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
1792                              int card_drv_type, int *drv_type)
1793{
1794        struct mmc_host *host = card->host;
1795        int host_drv_type = SD_DRIVER_TYPE_B;
1796
1797        *drv_type = 0;
1798
1799        if (!host->ops->select_drive_strength)
1800                return 0;
1801
1802        /* Use SD definition of driver strength for hosts */
1803        if (host->caps & MMC_CAP_DRIVER_TYPE_A)
1804                host_drv_type |= SD_DRIVER_TYPE_A;
1805
1806        if (host->caps & MMC_CAP_DRIVER_TYPE_C)
1807                host_drv_type |= SD_DRIVER_TYPE_C;
1808
1809        if (host->caps & MMC_CAP_DRIVER_TYPE_D)
1810                host_drv_type |= SD_DRIVER_TYPE_D;
1811
1812        /*
1813         * The drive strength that the hardware can support
1814         * depends on the board design.  Pass the appropriate
1815         * information and let the hardware specific code
1816         * return what is possible given the options
1817         */
1818        return host->ops->select_drive_strength(card, max_dtr,
1819                                                host_drv_type,
1820                                                card_drv_type,
1821                                                drv_type);
1822}
1823
1824/*
1825 * Apply power to the MMC stack.  This is a two-stage process.
1826 * First, we enable power to the card without the clock running.
1827 * We then wait a bit for the power to stabilise.  Finally,
1828 * enable the bus drivers and clock to the card.
1829 *
1830 * We must _NOT_ enable the clock prior to power stablising.
1831 *
1832 * If a host does all the power sequencing itself, ignore the
1833 * initial MMC_POWER_UP stage.
1834 */
1835void mmc_power_up(struct mmc_host *host, u32 ocr)
1836{
1837        if (host->ios.power_mode == MMC_POWER_ON)
1838                return;
1839
1840        mmc_pwrseq_pre_power_on(host);
1841
1842        host->ios.vdd = fls(ocr) - 1;
1843        host->ios.power_mode = MMC_POWER_UP;
1844        /* Set initial state and call mmc_set_ios */
1845        mmc_set_initial_state(host);
1846
1847        /* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
1848        if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330))
1849                dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
1850        else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
1851                dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
1852        else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120))
1853                dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
1854
1855        /*
1856         * This delay should be sufficient to allow the power supply
1857         * to reach the minimum voltage.
1858         */
1859        mmc_delay(10);
1860
1861        mmc_pwrseq_post_power_on(host);
1862
1863        host->ios.clock = host->f_init;
1864
1865        host->ios.power_mode = MMC_POWER_ON;
1866        mmc_set_ios(host);
1867
1868        /*
1869         * This delay must be at least 74 clock sizes, or 1 ms, or the
1870         * time required to reach a stable voltage.
1871         */
1872        mmc_delay(10);
1873}
1874
1875void mmc_power_off(struct mmc_host *host)
1876{
1877        if (host->ios.power_mode == MMC_POWER_OFF)
1878                return;
1879
1880        mmc_pwrseq_power_off(host);
1881
1882        host->ios.clock = 0;
1883        host->ios.vdd = 0;
1884
1885        host->ios.power_mode = MMC_POWER_OFF;
1886        /* Set initial state and call mmc_set_ios */
1887        mmc_set_initial_state(host);
1888
1889        /*
1890         * Some configurations, such as the 802.11 SDIO card in the OLPC
1891         * XO-1.5, require a short delay after poweroff before the card
1892         * can be successfully turned on again.
1893         */
1894        mmc_delay(1);
1895}
1896
1897void mmc_power_cycle(struct mmc_host *host, u32 ocr)
1898{
1899        mmc_power_off(host);
1900        /* Wait at least 1 ms according to SD spec */
1901        mmc_delay(1);
1902        mmc_power_up(host, ocr);
1903}
1904
1905/*
1906 * Cleanup when the last reference to the bus operator is dropped.
1907 */
1908static void __mmc_release_bus(struct mmc_host *host)
1909{
1910        WARN_ON(!host->bus_dead);
1911
1912        host->bus_ops = NULL;
1913}
1914
1915/*
1916 * Increase reference count of bus operator
1917 */
1918static inline void mmc_bus_get(struct mmc_host *host)
1919{
1920        unsigned long flags;
1921
1922        spin_lock_irqsave(&host->lock, flags);
1923        host->bus_refs++;
1924        spin_unlock_irqrestore(&host->lock, flags);
1925}
1926
1927/*
1928 * Decrease reference count of bus operator and free it if
1929 * it is the last reference.
1930 */
1931static inline void mmc_bus_put(struct mmc_host *host)
1932{
1933        unsigned long flags;
1934
1935        spin_lock_irqsave(&host->lock, flags);
1936        host->bus_refs--;
1937        if ((host->bus_refs == 0) && host->bus_ops)
1938                __mmc_release_bus(host);
1939        spin_unlock_irqrestore(&host->lock, flags);
1940}
1941
1942/*
1943 * Assign a mmc bus handler to a host. Only one bus handler may control a
1944 * host at any given time.
1945 */
1946void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1947{
1948        unsigned long flags;
1949
1950        WARN_ON(!host->claimed);
1951
1952        spin_lock_irqsave(&host->lock, flags);
1953
1954        WARN_ON(host->bus_ops);
1955        WARN_ON(host->bus_refs);
1956
1957        host->bus_ops = ops;
1958        host->bus_refs = 1;
1959        host->bus_dead = 0;
1960
1961        spin_unlock_irqrestore(&host->lock, flags);
1962}
1963
1964/*
1965 * Remove the current bus handler from a host.
1966 */
1967void mmc_detach_bus(struct mmc_host *host)
1968{
1969        unsigned long flags;
1970
1971        WARN_ON(!host->claimed);
1972        WARN_ON(!host->bus_ops);
1973
1974        spin_lock_irqsave(&host->lock, flags);
1975
1976        host->bus_dead = 1;
1977
1978        spin_unlock_irqrestore(&host->lock, flags);
1979
1980        mmc_bus_put(host);
1981}
1982
1983static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
1984                                bool cd_irq)
1985{
1986        /*
1987         * If the device is configured as wakeup, we prevent a new sleep for
1988         * 5 s to give provision for user space to consume the event.
1989         */
1990        if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) &&
1991                device_can_wakeup(mmc_dev(host)))
1992                pm_wakeup_event(mmc_dev(host), 5000);
1993
1994        host->detect_change = 1;
1995        mmc_schedule_delayed_work(&host->detect, delay);
1996}
1997
1998/**
1999 *      mmc_detect_change - process change of state on a MMC socket
2000 *      @host: host which changed state.
2001 *      @delay: optional delay to wait before detection (jiffies)
2002 *
2003 *      MMC drivers should call this when they detect a card has been
2004 *      inserted or removed. The MMC layer will confirm that any
2005 *      present card is still functional, and initialize any newly
2006 *      inserted.
2007 */
2008void mmc_detect_change(struct mmc_host *host, unsigned long delay)
2009{
2010        _mmc_detect_change(host, delay, true);
2011}
2012EXPORT_SYMBOL(mmc_detect_change);
2013
2014void mmc_init_erase(struct mmc_card *card)
2015{
2016        unsigned int sz;
2017
2018        if (is_power_of_2(card->erase_size))
2019                card->erase_shift = ffs(card->erase_size) - 1;
2020        else
2021                card->erase_shift = 0;
2022
2023        /*
2024         * It is possible to erase an arbitrarily large area of an SD or MMC
2025         * card.  That is not desirable because it can take a long time
2026         * (minutes) potentially delaying more important I/O, and also the
2027         * timeout calculations become increasingly hugely over-estimated.
2028         * Consequently, 'pref_erase' is defined as a guide to limit erases
2029         * to that size and alignment.
2030         *
2031         * For SD cards that define Allocation Unit size, limit erases to one
2032         * Allocation Unit at a time.
2033         * For MMC, have a stab at ai good value and for modern cards it will
2034         * end up being 4MiB. Note that if the value is too small, it can end
2035         * up taking longer to erase. Also note, erase_size is already set to
2036         * High Capacity Erase Size if available when this function is called.
2037         */
2038        if (mmc_card_sd(card) && card->ssr.au) {
2039                card->pref_erase = card->ssr.au;
2040                card->erase_shift = ffs(card->ssr.au) - 1;
2041        } else if (card->erase_size) {
2042                sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
2043                if (sz < 128)
2044                        card->pref_erase = 512 * 1024 / 512;
2045                else if (sz < 512)
2046                        card->pref_erase = 1024 * 1024 / 512;
2047                else if (sz < 1024)
2048                        card->pref_erase = 2 * 1024 * 1024 / 512;
2049                else
2050                        card->pref_erase = 4 * 1024 * 1024 / 512;
2051                if (card->pref_erase < card->erase_size)
2052                        card->pref_erase = card->erase_size;
2053                else {
2054                        sz = card->pref_erase % card->erase_size;
2055                        if (sz)
2056                                card->pref_erase += card->erase_size - sz;
2057                }
2058        } else
2059                card->pref_erase = 0;
2060}
2061
2062static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
2063                                          unsigned int arg, unsigned int qty)
2064{
2065        unsigned int erase_timeout;
2066
2067        if (arg == MMC_DISCARD_ARG ||
2068            (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
2069                erase_timeout = card->ext_csd.trim_timeout;
2070        } else if (card->ext_csd.erase_group_def & 1) {
2071                /* High Capacity Erase Group Size uses HC timeouts */
2072                if (arg == MMC_TRIM_ARG)
2073                        erase_timeout = card->ext_csd.trim_timeout;
2074                else
2075                        erase_timeout = card->ext_csd.hc_erase_timeout;
2076        } else {
2077                /* CSD Erase Group Size uses write timeout */
2078                unsigned int mult = (10 << card->csd.r2w_factor);
2079                unsigned int timeout_clks = card->csd.taac_clks * mult;
2080                unsigned int timeout_us;
2081
2082                /* Avoid overflow: e.g. taac_ns=80000000 mult=1280 */
2083                if (card->csd.taac_ns < 1000000)
2084                        timeout_us = (card->csd.taac_ns * mult) / 1000;
2085                else
2086                        timeout_us = (card->csd.taac_ns / 1000) * mult;
2087
2088                /*
2089                 * ios.clock is only a target.  The real clock rate might be
2090                 * less but not that much less, so fudge it by multiplying by 2.
2091                 */
2092                timeout_clks <<= 1;
2093                timeout_us += (timeout_clks * 1000) /
2094                              (card->host->ios.clock / 1000);
2095
2096                erase_timeout = timeout_us / 1000;
2097
2098                /*
2099                 * Theoretically, the calculation could underflow so round up
2100                 * to 1ms in that case.
2101                 */
2102                if (!erase_timeout)
2103                        erase_timeout = 1;
2104        }
2105
2106        /* Multiplier for secure operations */
2107        if (arg & MMC_SECURE_ARGS) {
2108                if (arg == MMC_SECURE_ERASE_ARG)
2109                        erase_timeout *= card->ext_csd.sec_erase_mult;
2110                else
2111                        erase_timeout *= card->ext_csd.sec_trim_mult;
2112        }
2113
2114        erase_timeout *= qty;
2115
2116        /*
2117         * Ensure at least a 1 second timeout for SPI as per
2118         * 'mmc_set_data_timeout()'
2119         */
2120        if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
2121                erase_timeout = 1000;
2122
2123        return erase_timeout;
2124}
2125
2126static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
2127                                         unsigned int arg,
2128                                         unsigned int qty)
2129{
2130        unsigned int erase_timeout;
2131
2132        if (card->ssr.erase_timeout) {
2133                /* Erase timeout specified in SD Status Register (SSR) */
2134                erase_timeout = card->ssr.erase_timeout * qty +
2135                                card->ssr.erase_offset;
2136        } else {
2137                /*
2138                 * Erase timeout not specified in SD Status Register (SSR) so
2139                 * use 250ms per write block.
2140                 */
2141                erase_timeout = 250 * qty;
2142        }
2143
2144        /* Must not be less than 1 second */
2145        if (erase_timeout < 1000)
2146                erase_timeout = 1000;
2147
2148        return erase_timeout;
2149}
2150
2151static unsigned int mmc_erase_timeout(struct mmc_card *card,
2152                                      unsigned int arg,
2153                                      unsigned int qty)
2154{
2155        if (mmc_card_sd(card))
2156                return mmc_sd_erase_timeout(card, arg, qty);
2157        else
2158                return mmc_mmc_erase_timeout(card, arg, qty);
2159}
2160
2161static int mmc_do_erase(struct mmc_card *card, unsigned int from,
2162                        unsigned int to, unsigned int arg)
2163{
2164        struct mmc_command cmd = {};
2165        unsigned int qty = 0, busy_timeout = 0;
2166        bool use_r1b_resp = false;
2167        unsigned long timeout;
2168        int err;
2169
2170        mmc_retune_hold(card->host);
2171
2172        /*
2173         * qty is used to calculate the erase timeout which depends on how many
2174         * erase groups (or allocation units in SD terminology) are affected.
2175         * We count erasing part of an erase group as one erase group.
2176         * For SD, the allocation units are always a power of 2.  For MMC, the
2177         * erase group size is almost certainly also power of 2, but it does not
2178         * seem to insist on that in the JEDEC standard, so we fall back to
2179         * division in that case.  SD may not specify an allocation unit size,
2180         * in which case the timeout is based on the number of write blocks.
2181         *
2182         * Note that the timeout for secure trim 2 will only be correct if the
2183         * number of erase groups specified is the same as the total of all
2184         * preceding secure trim 1 commands.  Since the power may have been
2185         * lost since the secure trim 1 commands occurred, it is generally
2186         * impossible to calculate the secure trim 2 timeout correctly.
2187         */
2188        if (card->erase_shift)
2189                qty += ((to >> card->erase_shift) -
2190                        (from >> card->erase_shift)) + 1;
2191        else if (mmc_card_sd(card))
2192                qty += to - from + 1;
2193        else
2194                qty += ((to / card->erase_size) -
2195                        (from / card->erase_size)) + 1;
2196
2197        if (!mmc_card_blockaddr(card)) {
2198                from <<= 9;
2199                to <<= 9;
2200        }
2201
2202        if (mmc_card_sd(card))
2203                cmd.opcode = SD_ERASE_WR_BLK_START;
2204        else
2205                cmd.opcode = MMC_ERASE_GROUP_START;
2206        cmd.arg = from;
2207        cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2208        err = mmc_wait_for_cmd(card->host, &cmd, 0);
2209        if (err) {
2210                pr_err("mmc_erase: group start error %d, "
2211                       "status %#x\n", err, cmd.resp[0]);
2212                err = -EIO;
2213                goto out;
2214        }
2215
2216        memset(&cmd, 0, sizeof(struct mmc_command));
2217        if (mmc_card_sd(card))
2218                cmd.opcode = SD_ERASE_WR_BLK_END;
2219        else
2220                cmd.opcode = MMC_ERASE_GROUP_END;
2221        cmd.arg = to;
2222        cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2223        err = mmc_wait_for_cmd(card->host, &cmd, 0);
2224        if (err) {
2225                pr_err("mmc_erase: group end error %d, status %#x\n",
2226                       err, cmd.resp[0]);
2227                err = -EIO;
2228                goto out;
2229        }
2230
2231        memset(&cmd, 0, sizeof(struct mmc_command));
2232        cmd.opcode = MMC_ERASE;
2233        cmd.arg = arg;
2234        busy_timeout = mmc_erase_timeout(card, arg, qty);
2235        /*
2236         * If the host controller supports busy signalling and the timeout for
2237         * the erase operation does not exceed the max_busy_timeout, we should
2238         * use R1B response. Or we need to prevent the host from doing hw busy
2239         * detection, which is done by converting to a R1 response instead.
2240         */
2241        if (card->host->max_busy_timeout &&
2242            busy_timeout > card->host->max_busy_timeout) {
2243                cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2244        } else {
2245                cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
2246                cmd.busy_timeout = busy_timeout;
2247                use_r1b_resp = true;
2248        }
2249
2250        err = mmc_wait_for_cmd(card->host, &cmd, 0);
2251        if (err) {
2252                pr_err("mmc_erase: erase error %d, status %#x\n",
2253                       err, cmd.resp[0]);
2254                err = -EIO;
2255                goto out;
2256        }
2257
2258        if (mmc_host_is_spi(card->host))
2259                goto out;
2260
2261        /*
2262         * In case of when R1B + MMC_CAP_WAIT_WHILE_BUSY is used, the polling
2263         * shall be avoided.
2264         */
2265        if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
2266                goto out;
2267
2268        timeout = jiffies + msecs_to_jiffies(busy_timeout);
2269        do {
2270                memset(&cmd, 0, sizeof(struct mmc_command));
2271                cmd.opcode = MMC_SEND_STATUS;
2272                cmd.arg = card->rca << 16;
2273                cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
2274                /* Do not retry else we can't see errors */
2275                err = mmc_wait_for_cmd(card->host, &cmd, 0);
2276                if (err || (cmd.resp[0] & 0xFDF92000)) {
2277                        pr_err("error %d requesting status %#x\n",
2278                                err, cmd.resp[0]);
2279                        err = -EIO;
2280                        goto out;
2281                }
2282
2283                /* Timeout if the device never becomes ready for data and
2284                 * never leaves the program state.
2285                 */
2286                if (time_after(jiffies, timeout)) {
2287                        pr_err("%s: Card stuck in programming state! %s\n",
2288                                mmc_hostname(card->host), __func__);
2289                        err =  -EIO;
2290                        goto out;
2291                }
2292
2293        } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
2294                 (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
2295out:
2296        mmc_retune_release(card->host);
2297        return err;
2298}
2299
2300static unsigned int mmc_align_erase_size(struct mmc_card *card,
2301                                         unsigned int *from,
2302                                         unsigned int *to,
2303                                         unsigned int nr)
2304{
2305        unsigned int from_new = *from, nr_new = nr, rem;
2306
2307        /*
2308         * When the 'card->erase_size' is power of 2, we can use round_up/down()
2309         * to align the erase size efficiently.
2310         */
2311        if (is_power_of_2(card->erase_size)) {
2312                unsigned int temp = from_new;
2313
2314                from_new = round_up(temp, card->erase_size);
2315                rem = from_new - temp;
2316
2317                if (nr_new > rem)
2318                        nr_new -= rem;
2319                else
2320                        return 0;
2321
2322                nr_new = round_down(nr_new, card->erase_size);
2323        } else {
2324                rem = from_new % card->erase_size;
2325                if (rem) {
2326                        rem = card->erase_size - rem;
2327                        from_new += rem;
2328                        if (nr_new > rem)
2329                                nr_new -= rem;
2330                        else
2331                                return 0;
2332                }
2333
2334                rem = nr_new % card->erase_size;
2335                if (rem)
2336                        nr_new -= rem;
2337        }
2338
2339        if (nr_new == 0)
2340                return 0;
2341
2342        *to = from_new + nr_new;
2343        *from = from_new;
2344
2345        return nr_new;
2346}
2347
2348/**
2349 * mmc_erase - erase sectors.
2350 * @card: card to erase
2351 * @from: first sector to erase
2352 * @nr: number of sectors to erase
2353 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
2354 *
2355 * Caller must claim host before calling this function.
2356 */
2357int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
2358              unsigned int arg)
2359{
2360        unsigned int rem, to = from + nr;
2361        int err;
2362
2363        if (!(card->host->caps & MMC_CAP_ERASE) ||
2364            !(card->csd.cmdclass & CCC_ERASE))
2365                return -EOPNOTSUPP;
2366
2367        if (!card->erase_size)
2368                return -EOPNOTSUPP;
2369
2370        if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
2371                return -EOPNOTSUPP;
2372
2373        if ((arg & MMC_SECURE_ARGS) &&
2374            !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
2375                return -EOPNOTSUPP;
2376
2377        if ((arg & MMC_TRIM_ARGS) &&
2378            !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
2379                return -EOPNOTSUPP;
2380
2381        if (arg == MMC_SECURE_ERASE_ARG) {
2382                if (from % card->erase_size || nr % card->erase_size)
2383                        return -EINVAL;
2384        }
2385
2386        if (arg == MMC_ERASE_ARG)
2387                nr = mmc_align_erase_size(card, &from, &to, nr);
2388
2389        if (nr == 0)
2390                return 0;
2391
2392        if (to <= from)
2393                return -EINVAL;
2394
2395        /* 'from' and 'to' are inclusive */
2396        to -= 1;
2397
2398        /*
2399         * Special case where only one erase-group fits in the timeout budget:
2400         * If the region crosses an erase-group boundary on this particular
2401         * case, we will be trimming more than one erase-group which, does not
2402         * fit in the timeout budget of the controller, so we need to split it
2403         * and call mmc_do_erase() twice if necessary. This special case is
2404         * identified by the card->eg_boundary flag.
2405         */
2406        rem = card->erase_size - (from % card->erase_size);
2407        if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) {
2408                err = mmc_do_erase(card, from, from + rem - 1, arg);
2409                from += rem;
2410                if ((err) || (to <= from))
2411                        return err;
2412        }
2413
2414        return mmc_do_erase(card, from, to, arg);
2415}
2416EXPORT_SYMBOL(mmc_erase);
2417
2418int mmc_can_erase(struct mmc_card *card)
2419{
2420        if ((card->host->caps & MMC_CAP_ERASE) &&
2421            (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
2422                return 1;
2423        return 0;
2424}
2425EXPORT_SYMBOL(mmc_can_erase);
2426
2427int mmc_can_trim(struct mmc_card *card)
2428{
2429        if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) &&
2430            (!(card->quirks & MMC_QUIRK_TRIM_BROKEN)))
2431                return 1;
2432        return 0;
2433}
2434EXPORT_SYMBOL(mmc_can_trim);
2435
2436int mmc_can_discard(struct mmc_card *card)
2437{
2438        /*
2439         * As there's no way to detect the discard support bit at v4.5
2440         * use the s/w feature support filed.
2441         */
2442        if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
2443                return 1;
2444        return 0;
2445}
2446EXPORT_SYMBOL(mmc_can_discard);
2447
2448int mmc_can_sanitize(struct mmc_card *card)
2449{
2450        if (!mmc_can_trim(card) && !mmc_can_erase(card))
2451                return 0;
2452        if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
2453                return 1;
2454        return 0;
2455}
2456EXPORT_SYMBOL(mmc_can_sanitize);
2457
2458int mmc_can_secure_erase_trim(struct mmc_card *card)
2459{
2460        if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
2461            !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
2462                return 1;
2463        return 0;
2464}
2465EXPORT_SYMBOL(mmc_can_secure_erase_trim);
2466
2467int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
2468                            unsigned int nr)
2469{
2470        if (!card->erase_size)
2471                return 0;
2472        if (from % card->erase_size || nr % card->erase_size)
2473                return 0;
2474        return 1;
2475}
2476EXPORT_SYMBOL(mmc_erase_group_aligned);
2477
2478static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
2479                                            unsigned int arg)
2480{
2481        struct mmc_host *host = card->host;
2482        unsigned int max_discard, x, y, qty = 0, max_qty, min_qty, timeout;
2483        unsigned int last_timeout = 0;
2484        unsigned int max_busy_timeout = host->max_busy_timeout ?
2485                        host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS;
2486
2487        if (card->erase_shift) {
2488                max_qty = UINT_MAX >> card->erase_shift;
2489                min_qty = card->pref_erase >> card->erase_shift;
2490        } else if (mmc_card_sd(card)) {
2491                max_qty = UINT_MAX;
2492                min_qty = card->pref_erase;
2493        } else {
2494                max_qty = UINT_MAX / card->erase_size;
2495                min_qty = card->pref_erase / card->erase_size;
2496        }
2497
2498        /*
2499         * We should not only use 'host->max_busy_timeout' as the limitation
2500         * when deciding the max discard sectors. We should set a balance value
2501         * to improve the erase speed, and it can not get too long timeout at
2502         * the same time.
2503         *
2504         * Here we set 'card->pref_erase' as the minimal discard sectors no
2505         * matter what size of 'host->max_busy_timeout', but if the
2506         * 'host->max_busy_timeout' is large enough for more discard sectors,
2507         * then we can continue to increase the max discard sectors until we
2508         * get a balance value. In cases when the 'host->max_busy_timeout'
2509         * isn't specified, use the default max erase timeout.
2510         */
2511        do {
2512                y = 0;
2513                for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
2514                        timeout = mmc_erase_timeout(card, arg, qty + x);
2515
2516                        if (qty + x > min_qty && timeout > max_busy_timeout)
2517                                break;
2518
2519                        if (timeout < last_timeout)
2520                                break;
2521                        last_timeout = timeout;
2522                        y = x;
2523                }
2524                qty += y;
2525        } while (y);
2526
2527        if (!qty)
2528                return 0;
2529
2530        /*
2531         * When specifying a sector range to trim, chances are we might cross
2532         * an erase-group boundary even if the amount of sectors is less than
2533         * one erase-group.
2534         * If we can only fit one erase-group in the controller timeout budget,
2535         * we have to care that erase-group boundaries are not crossed by a
2536         * single trim operation. We flag that special case with "eg_boundary".
2537         * In all other cases we can just decrement qty and pretend that we
2538         * always touch (qty + 1) erase-groups as a simple optimization.
2539         */
2540        if (qty == 1)
2541                card->eg_boundary = 1;
2542        else
2543                qty--;
2544
2545        /* Convert qty to sectors */
2546        if (card->erase_shift)
2547                max_discard = qty << card->erase_shift;
2548        else if (mmc_card_sd(card))
2549                max_discard = qty + 1;
2550        else
2551                max_discard = qty * card->erase_size;
2552
2553        return max_discard;
2554}
2555
2556unsigned int mmc_calc_max_discard(struct mmc_card *card)
2557{
2558        struct mmc_host *host = card->host;
2559        unsigned int max_discard, max_trim;
2560
2561        /*
2562         * Without erase_group_def set, MMC erase timeout depends on clock
2563         * frequence which can change.  In that case, the best choice is
2564         * just the preferred erase size.
2565         */
2566        if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
2567                return card->pref_erase;
2568
2569        max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
2570        if (mmc_can_trim(card)) {
2571                max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
2572                if (max_trim < max_discard)
2573                        max_discard = max_trim;
2574        } else if (max_discard < card->erase_size) {
2575                max_discard = 0;
2576        }
2577        pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
2578                mmc_hostname(host), max_discard, host->max_busy_timeout ?
2579                host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS);
2580        return max_discard;
2581}
2582EXPORT_SYMBOL(mmc_calc_max_discard);
2583
2584bool mmc_card_is_blockaddr(struct mmc_card *card)
2585{
2586        return card ? mmc_card_blockaddr(card) : false;
2587}
2588EXPORT_SYMBOL(mmc_card_is_blockaddr);
2589
2590int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
2591{
2592        struct mmc_command cmd = {};
2593
2594        if (mmc_card_blockaddr(card) || mmc_card_ddr52(card) ||
2595            mmc_card_hs400(card) || mmc_card_hs400es(card))
2596                return 0;
2597
2598        cmd.opcode = MMC_SET_BLOCKLEN;
2599        cmd.arg = blocklen;
2600        cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2601        return mmc_wait_for_cmd(card->host, &cmd, 5);
2602}
2603EXPORT_SYMBOL(mmc_set_blocklen);
2604
2605int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
2606                        bool is_rel_write)
2607{
2608        struct mmc_command cmd = {};
2609
2610        cmd.opcode = MMC_SET_BLOCK_COUNT;
2611        cmd.arg = blockcount & 0x0000FFFF;
2612        if (is_rel_write)
2613                cmd.arg |= 1 << 31;
2614        cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2615        return mmc_wait_for_cmd(card->host, &cmd, 5);
2616}
2617EXPORT_SYMBOL(mmc_set_blockcount);
2618
2619static void mmc_hw_reset_for_init(struct mmc_host *host)
2620{
2621        mmc_pwrseq_reset(host);
2622
2623        if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
2624                return;
2625        host->ops->hw_reset(host);
2626}
2627
2628int mmc_hw_reset(struct mmc_host *host)
2629{
2630        int ret;
2631
2632        if (!host->card)
2633                return -EINVAL;
2634
2635        mmc_bus_get(host);
2636        if (!host->bus_ops || host->bus_dead || !host->bus_ops->reset) {
2637                mmc_bus_put(host);
2638                return -EOPNOTSUPP;
2639        }
2640
2641        ret = host->bus_ops->reset(host);
2642        mmc_bus_put(host);
2643
2644        if (ret)
2645                pr_warn("%s: tried to reset card, got error %d\n",
2646                        mmc_hostname(host), ret);
2647
2648        return ret;
2649}
2650EXPORT_SYMBOL(mmc_hw_reset);
2651
2652static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
2653{
2654        host->f_init = freq;
2655
2656        pr_debug("%s: %s: trying to init card at %u Hz\n",
2657                mmc_hostname(host), __func__, host->f_init);
2658
2659        mmc_power_up(host, host->ocr_avail);
2660
2661        /*
2662         * Some eMMCs (with VCCQ always on) may not be reset after power up, so
2663         * do a hardware reset if possible.
2664         */
2665        mmc_hw_reset_for_init(host);
2666
2667        /*
2668         * sdio_reset sends CMD52 to reset card.  Since we do not know
2669         * if the card is being re-initialized, just send it.  CMD52
2670         * should be ignored by SD/eMMC cards.
2671         * Skip it if we already know that we do not support SDIO commands
2672         */
2673        if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2674                sdio_reset(host);
2675
2676        mmc_go_idle(host);
2677
2678        if (!(host->caps2 & MMC_CAP2_NO_SD))
2679                mmc_send_if_cond(host, host->ocr_avail);
2680
2681        /* Order's important: probe SDIO, then SD, then MMC */
2682        if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2683                if (!mmc_attach_sdio(host))
2684                        return 0;
2685
2686        if (!(host->caps2 & MMC_CAP2_NO_SD))
2687                if (!mmc_attach_sd(host))
2688                        return 0;
2689
2690        if (!(host->caps2 & MMC_CAP2_NO_MMC))
2691                if (!mmc_attach_mmc(host))
2692                        return 0;
2693
2694        mmc_power_off(host);
2695        return -EIO;
2696}
2697
2698int _mmc_detect_card_removed(struct mmc_host *host)
2699{
2700        int ret;
2701
2702        if (!host->card || mmc_card_removed(host->card))
2703                return 1;
2704
2705        ret = host->bus_ops->alive(host);
2706
2707        /*
2708         * Card detect status and alive check may be out of sync if card is
2709         * removed slowly, when card detect switch changes while card/slot
2710         * pads are still contacted in hardware (refer to "SD Card Mechanical
2711         * Addendum, Appendix C: Card Detection Switch"). So reschedule a
2712         * detect work 200ms later for this case.
2713         */
2714        if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
2715                mmc_detect_change(host, msecs_to_jiffies(200));
2716                pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
2717        }
2718
2719        if (ret) {
2720                mmc_card_set_removed(host->card);
2721                pr_debug("%s: card remove detected\n", mmc_hostname(host));
2722        }
2723
2724        return ret;
2725}
2726
2727int mmc_detect_card_removed(struct mmc_host *host)
2728{
2729        struct mmc_card *card = host->card;
2730        int ret;
2731
2732        WARN_ON(!host->claimed);
2733
2734        if (!card)
2735                return 1;
2736
2737        if (!mmc_card_is_removable(host))
2738                return 0;
2739
2740        ret = mmc_card_removed(card);
2741        /*
2742         * The card will be considered unchanged unless we have been asked to
2743         * detect a change or host requires polling to provide card detection.
2744         */
2745        if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
2746                return ret;
2747
2748        host->detect_change = 0;
2749        if (!ret) {
2750                ret = _mmc_detect_card_removed(host);
2751                if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) {
2752                        /*
2753                         * Schedule a detect work as soon as possible to let a
2754                         * rescan handle the card removal.
2755                         */
2756                        cancel_delayed_work(&host->detect);
2757                        _mmc_detect_change(host, 0, false);
2758                }
2759        }
2760
2761        return ret;
2762}
2763EXPORT_SYMBOL(mmc_detect_card_removed);
2764
2765void mmc_rescan(struct work_struct *work)
2766{
2767        struct mmc_host *host =
2768                container_of(work, struct mmc_host, detect.work);
2769        int i;
2770
2771        if (host->rescan_disable)
2772                return;
2773
2774        /* If there is a non-removable card registered, only scan once */
2775        if (!mmc_card_is_removable(host) && host->rescan_entered)
2776                return;
2777        host->rescan_entered = 1;
2778
2779        if (host->trigger_card_event && host->ops->card_event) {
2780                mmc_claim_host(host);
2781                host->ops->card_event(host);
2782                mmc_release_host(host);
2783                host->trigger_card_event = false;
2784        }
2785
2786        mmc_bus_get(host);
2787
2788        /*
2789         * if there is a _removable_ card registered, check whether it is
2790         * still present
2791         */
2792        if (host->bus_ops && !host->bus_dead && mmc_card_is_removable(host))
2793                host->bus_ops->detect(host);
2794
2795        host->detect_change = 0;
2796
2797        /*
2798         * Let mmc_bus_put() free the bus/bus_ops if we've found that
2799         * the card is no longer present.
2800         */
2801        mmc_bus_put(host);
2802        mmc_bus_get(host);
2803
2804        /* if there still is a card present, stop here */
2805        if (host->bus_ops != NULL) {
2806                mmc_bus_put(host);
2807                goto out;
2808        }
2809
2810        /*
2811         * Only we can add a new handler, so it's safe to
2812         * release the lock here.
2813         */
2814        mmc_bus_put(host);
2815
2816        mmc_claim_host(host);
2817        if (mmc_card_is_removable(host) && host->ops->get_cd &&
2818                        host->ops->get_cd(host) == 0) {
2819                mmc_power_off(host);
2820                mmc_release_host(host);
2821                goto out;
2822        }
2823
2824        for (i = 0; i < ARRAY_SIZE(freqs); i++) {
2825                if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
2826                        break;
2827                if (freqs[i] <= host->f_min)
2828                        break;
2829        }
2830        mmc_release_host(host);
2831
2832 out:
2833        if (host->caps & MMC_CAP_NEEDS_POLL)
2834                mmc_schedule_delayed_work(&host->detect, HZ);
2835}
2836
2837void mmc_start_host(struct mmc_host *host)
2838{
2839        host->f_init = max(freqs[0], host->f_min);
2840        host->rescan_disable = 0;
2841        host->ios.power_mode = MMC_POWER_UNDEFINED;
2842
2843        if (!(host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)) {
2844                mmc_claim_host(host);
2845                mmc_power_up(host, host->ocr_avail);
2846                mmc_release_host(host);
2847        }
2848
2849        mmc_gpiod_request_cd_irq(host);
2850        _mmc_detect_change(host, 0, false);
2851}
2852
2853void mmc_stop_host(struct mmc_host *host)
2854{
2855        if (host->slot.cd_irq >= 0) {
2856                if (host->slot.cd_wake_enabled)
2857                        disable_irq_wake(host->slot.cd_irq);
2858                disable_irq(host->slot.cd_irq);
2859        }
2860
2861        host->rescan_disable = 1;
2862        cancel_delayed_work_sync(&host->detect);
2863
2864        /* clear pm flags now and let card drivers set them as needed */
2865        host->pm_flags = 0;
2866
2867        mmc_bus_get(host);
2868        if (host->bus_ops && !host->bus_dead) {
2869                /* Calling bus_ops->remove() with a claimed host can deadlock */
2870                host->bus_ops->remove(host);
2871                mmc_claim_host(host);
2872                mmc_detach_bus(host);
2873                mmc_power_off(host);
2874                mmc_release_host(host);
2875                mmc_bus_put(host);
2876                return;
2877        }
2878        mmc_bus_put(host);
2879
2880        mmc_claim_host(host);
2881        mmc_power_off(host);
2882        mmc_release_host(host);
2883}
2884
2885int mmc_power_save_host(struct mmc_host *host)
2886{
2887        int ret = 0;
2888
2889        pr_debug("%s: %s: powering down\n", mmc_hostname(host), __func__);
2890
2891        mmc_bus_get(host);
2892
2893        if (!host->bus_ops || host->bus_dead) {
2894                mmc_bus_put(host);
2895                return -EINVAL;
2896        }
2897
2898        if (host->bus_ops->power_save)
2899                ret = host->bus_ops->power_save(host);
2900
2901        mmc_bus_put(host);
2902
2903        mmc_power_off(host);
2904
2905        return ret;
2906}
2907EXPORT_SYMBOL(mmc_power_save_host);
2908
2909int mmc_power_restore_host(struct mmc_host *host)
2910{
2911        int ret;
2912
2913        pr_debug("%s: %s: powering up\n", mmc_hostname(host), __func__);
2914
2915        mmc_bus_get(host);
2916
2917        if (!host->bus_ops || host->bus_dead) {
2918                mmc_bus_put(host);
2919                return -EINVAL;
2920        }
2921
2922        mmc_power_up(host, host->card->ocr);
2923        ret = host->bus_ops->power_restore(host);
2924
2925        mmc_bus_put(host);
2926
2927        return ret;
2928}
2929EXPORT_SYMBOL(mmc_power_restore_host);
2930
2931#ifdef CONFIG_PM_SLEEP
2932/* Do the card removal on suspend if card is assumed removeable
2933 * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2934   to sync the card.
2935*/
2936static int mmc_pm_notify(struct notifier_block *notify_block,
2937                        unsigned long mode, void *unused)
2938{
2939        struct mmc_host *host = container_of(
2940                notify_block, struct mmc_host, pm_notify);
2941        unsigned long flags;
2942        int err = 0;
2943
2944        switch (mode) {
2945        case PM_HIBERNATION_PREPARE:
2946        case PM_SUSPEND_PREPARE:
2947        case PM_RESTORE_PREPARE:
2948                spin_lock_irqsave(&host->lock, flags);
2949                host->rescan_disable = 1;
2950                spin_unlock_irqrestore(&host->lock, flags);
2951                cancel_delayed_work_sync(&host->detect);
2952
2953                if (!host->bus_ops)
2954                        break;
2955
2956                /* Validate prerequisites for suspend */
2957                if (host->bus_ops->pre_suspend)
2958                        err = host->bus_ops->pre_suspend(host);
2959                if (!err)
2960                        break;
2961
2962                /* Calling bus_ops->remove() with a claimed host can deadlock */
2963                host->bus_ops->remove(host);
2964                mmc_claim_host(host);
2965                mmc_detach_bus(host);
2966                mmc_power_off(host);
2967                mmc_release_host(host);
2968                host->pm_flags = 0;
2969                break;
2970
2971        case PM_POST_SUSPEND:
2972        case PM_POST_HIBERNATION:
2973        case PM_POST_RESTORE:
2974
2975                spin_lock_irqsave(&host->lock, flags);
2976                host->rescan_disable = 0;
2977                spin_unlock_irqrestore(&host->lock, flags);
2978                _mmc_detect_change(host, 0, false);
2979
2980        }
2981
2982        return 0;
2983}
2984
2985void mmc_register_pm_notifier(struct mmc_host *host)
2986{
2987        host->pm_notify.notifier_call = mmc_pm_notify;
2988        register_pm_notifier(&host->pm_notify);
2989}
2990
2991void mmc_unregister_pm_notifier(struct mmc_host *host)
2992{
2993        unregister_pm_notifier(&host->pm_notify);
2994}
2995#endif
2996
2997/**
2998 * mmc_init_context_info() - init synchronization context
2999 * @host: mmc host
3000 *
3001 * Init struct context_info needed to implement asynchronous
3002 * request mechanism, used by mmc core, host driver and mmc requests
3003 * supplier.
3004 */
3005void mmc_init_context_info(struct mmc_host *host)
3006{
3007        host->context_info.is_new_req = false;
3008        host->context_info.is_done_rcv = false;
3009        host->context_info.is_waiting_last_req = false;
3010        init_waitqueue_head(&host->context_info.wait);
3011}
3012
3013static int __init mmc_init(void)
3014{
3015        int ret;
3016
3017        ret = mmc_register_bus();
3018        if (ret)
3019                return ret;
3020
3021        ret = mmc_register_host_class();
3022        if (ret)
3023                goto unregister_bus;
3024
3025        ret = sdio_register_bus();
3026        if (ret)
3027                goto unregister_host_class;
3028
3029        return 0;
3030
3031unregister_host_class:
3032        mmc_unregister_host_class();
3033unregister_bus:
3034        mmc_unregister_bus();
3035        return ret;
3036}
3037
3038static void __exit mmc_exit(void)
3039{
3040        sdio_unregister_bus();
3041        mmc_unregister_host_class();
3042        mmc_unregister_bus();
3043}
3044
3045subsys_initcall(mmc_init);
3046module_exit(mmc_exit);
3047
3048MODULE_LICENSE("GPL");
3049