linux/drivers/mmc/card/block.c
<<
>>
Prefs
   1/*
   2 * Block driver for media (i.e., flash cards)
   3 *
   4 * Copyright 2002 Hewlett-Packard Company
   5 * Copyright 2005-2008 Pierre Ossman
   6 *
   7 * Use consistent with the GNU GPL is permitted,
   8 * provided that this copyright notice is
   9 * preserved in its entirety in all copies and derived works.
  10 *
  11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
  12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
  13 * FITNESS FOR ANY PARTICULAR PURPOSE.
  14 *
  15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
  16 *
  17 * Author:  Andrew Christian
  18 *          28 May 2002
  19 */
  20#include <linux/moduleparam.h>
  21#include <linux/module.h>
  22#include <linux/init.h>
  23
  24#include <linux/kernel.h>
  25#include <linux/fs.h>
  26#include <linux/slab.h>
  27#include <linux/errno.h>
  28#include <linux/hdreg.h>
  29#include <linux/kdev_t.h>
  30#include <linux/blkdev.h>
  31#include <linux/mutex.h>
  32#include <linux/scatterlist.h>
  33#include <linux/string_helpers.h>
  34#include <linux/delay.h>
  35#include <linux/capability.h>
  36#include <linux/compat.h>
  37#include <linux/pm_runtime.h>
  38
  39#include <linux/mmc/ioctl.h>
  40#include <linux/mmc/card.h>
  41#include <linux/mmc/host.h>
  42#include <linux/mmc/mmc.h>
  43#include <linux/mmc/sd.h>
  44
  45#include <asm/uaccess.h>
  46
  47#include "queue.h"
  48
  49MODULE_ALIAS("mmc:block");
  50
  51#ifdef KERNEL
  52#ifdef MODULE_PARAM_PREFIX
  53#undef MODULE_PARAM_PREFIX
  54#endif
  55#define MODULE_PARAM_PREFIX "mmcblk."
  56#endif
  57
  58#define INAND_CMD38_ARG_EXT_CSD  113
  59#define INAND_CMD38_ARG_ERASE    0x00
  60#define INAND_CMD38_ARG_TRIM     0x01
  61#define INAND_CMD38_ARG_SECERASE 0x80
  62#define INAND_CMD38_ARG_SECTRIM1 0x81
  63#define INAND_CMD38_ARG_SECTRIM2 0x88
  64#define MMC_BLK_TIMEOUT_MS  (10 * 60 * 1000)        /* 10 minute timeout */
  65#define MMC_SANITIZE_REQ_TIMEOUT 240000
  66#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
  67
  68#define mmc_req_rel_wr(req)     (((req->cmd_flags & REQ_FUA) || \
  69                                  (req->cmd_flags & REQ_META)) && \
  70                                  (rq_data_dir(req) == WRITE))
  71#define PACKED_CMD_VER  0x01
  72#define PACKED_CMD_WR   0x02
  73
  74static DEFINE_MUTEX(block_mutex);
  75
  76/*
  77 * The defaults come from config options but can be overriden by module
  78 * or bootarg options.
  79 */
  80static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
  81
  82/*
  83 * We've only got one major, so number of mmcblk devices is
  84 * limited to (1 << 20) / number of minors per device.  It is also
  85 * currently limited by the size of the static bitmaps below.
  86 */
  87static int max_devices;
  88
  89#define MAX_DEVICES 256
  90
  91/* TODO: Replace these with struct ida */
  92static DECLARE_BITMAP(dev_use, MAX_DEVICES);
  93static DECLARE_BITMAP(name_use, MAX_DEVICES);
  94
  95/*
  96 * There is one mmc_blk_data per slot.
  97 */
  98struct mmc_blk_data {
  99        spinlock_t      lock;
 100        struct gendisk  *disk;
 101        struct mmc_queue queue;
 102        struct list_head part;
 103
 104        unsigned int    flags;
 105#define MMC_BLK_CMD23   (1 << 0)        /* Can do SET_BLOCK_COUNT for multiblock */
 106#define MMC_BLK_REL_WR  (1 << 1)        /* MMC Reliable write support */
 107#define MMC_BLK_PACKED_CMD      (1 << 2)        /* MMC packed command support */
 108
 109        unsigned int    usage;
 110        unsigned int    read_only;
 111        unsigned int    part_type;
 112        unsigned int    name_idx;
 113        unsigned int    reset_done;
 114#define MMC_BLK_READ            BIT(0)
 115#define MMC_BLK_WRITE           BIT(1)
 116#define MMC_BLK_DISCARD         BIT(2)
 117#define MMC_BLK_SECDISCARD      BIT(3)
 118
 119        /*
 120         * Only set in main mmc_blk_data associated
 121         * with mmc_card with dev_set_drvdata, and keeps
 122         * track of the current selected device partition.
 123         */
 124        unsigned int    part_curr;
 125        struct device_attribute force_ro;
 126        struct device_attribute power_ro_lock;
 127        int     area_type;
 128};
 129
 130static DEFINE_MUTEX(open_lock);
 131
 132enum {
 133        MMC_PACKED_NR_IDX = -1,
 134        MMC_PACKED_NR_ZERO,
 135        MMC_PACKED_NR_SINGLE,
 136};
 137
 138module_param(perdev_minors, int, 0444);
 139MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
 140
 141static inline int mmc_blk_part_switch(struct mmc_card *card,
 142                                      struct mmc_blk_data *md);
 143static int get_card_status(struct mmc_card *card, u32 *status, int retries);
 144
 145static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
 146{
 147        struct mmc_packed *packed = mqrq->packed;
 148
 149        BUG_ON(!packed);
 150
 151        mqrq->cmd_type = MMC_PACKED_NONE;
 152        packed->nr_entries = MMC_PACKED_NR_ZERO;
 153        packed->idx_failure = MMC_PACKED_NR_IDX;
 154        packed->retries = 0;
 155        packed->blocks = 0;
 156}
 157
 158static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
 159{
 160        struct mmc_blk_data *md;
 161
 162        mutex_lock(&open_lock);
 163        md = disk->private_data;
 164        if (md && md->usage == 0)
 165                md = NULL;
 166        if (md)
 167                md->usage++;
 168        mutex_unlock(&open_lock);
 169
 170        return md;
 171}
 172
 173static inline int mmc_get_devidx(struct gendisk *disk)
 174{
 175        int devmaj = MAJOR(disk_devt(disk));
 176        int devidx = MINOR(disk_devt(disk)) / perdev_minors;
 177
 178        if (!devmaj)
 179                devidx = disk->first_minor / perdev_minors;
 180        return devidx;
 181}
 182
 183static void mmc_blk_put(struct mmc_blk_data *md)
 184{
 185        mutex_lock(&open_lock);
 186        md->usage--;
 187        if (md->usage == 0) {
 188                int devidx = mmc_get_devidx(md->disk);
 189                blk_cleanup_queue(md->queue.queue);
 190
 191                __clear_bit(devidx, dev_use);
 192
 193                put_disk(md->disk);
 194                kfree(md);
 195        }
 196        mutex_unlock(&open_lock);
 197}
 198
 199static ssize_t power_ro_lock_show(struct device *dev,
 200                struct device_attribute *attr, char *buf)
 201{
 202        int ret;
 203        struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
 204        struct mmc_card *card = md->queue.card;
 205        int locked = 0;
 206
 207        if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
 208                locked = 2;
 209        else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
 210                locked = 1;
 211
 212        ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
 213
 214        mmc_blk_put(md);
 215
 216        return ret;
 217}
 218
 219static ssize_t power_ro_lock_store(struct device *dev,
 220                struct device_attribute *attr, const char *buf, size_t count)
 221{
 222        int ret;
 223        struct mmc_blk_data *md, *part_md;
 224        struct mmc_card *card;
 225        unsigned long set;
 226
 227        if (kstrtoul(buf, 0, &set))
 228                return -EINVAL;
 229
 230        if (set != 1)
 231                return count;
 232
 233        md = mmc_blk_get(dev_to_disk(dev));
 234        card = md->queue.card;
 235
 236        mmc_get_card(card);
 237
 238        ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
 239                                card->ext_csd.boot_ro_lock |
 240                                EXT_CSD_BOOT_WP_B_PWR_WP_EN,
 241                                card->ext_csd.part_time);
 242        if (ret)
 243                pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
 244        else
 245                card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
 246
 247        mmc_put_card(card);
 248
 249        if (!ret) {
 250                pr_info("%s: Locking boot partition ro until next power on\n",
 251                        md->disk->disk_name);
 252                set_disk_ro(md->disk, 1);
 253
 254                list_for_each_entry(part_md, &md->part, part)
 255                        if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
 256                                pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
 257                                set_disk_ro(part_md->disk, 1);
 258                        }
 259        }
 260
 261        mmc_blk_put(md);
 262        return count;
 263}
 264
 265static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
 266                             char *buf)
 267{
 268        int ret;
 269        struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
 270
 271        ret = snprintf(buf, PAGE_SIZE, "%d\n",
 272                       get_disk_ro(dev_to_disk(dev)) ^
 273                       md->read_only);
 274        mmc_blk_put(md);
 275        return ret;
 276}
 277
 278static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
 279                              const char *buf, size_t count)
 280{
 281        int ret;
 282        char *end;
 283        struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
 284        unsigned long set = simple_strtoul(buf, &end, 0);
 285        if (end == buf) {
 286                ret = -EINVAL;
 287                goto out;
 288        }
 289
 290        set_disk_ro(dev_to_disk(dev), set || md->read_only);
 291        ret = count;
 292out:
 293        mmc_blk_put(md);
 294        return ret;
 295}
 296
 297static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
 298{
 299        struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
 300        int ret = -ENXIO;
 301
 302        mutex_lock(&block_mutex);
 303        if (md) {
 304                if (md->usage == 2)
 305                        check_disk_change(bdev);
 306                ret = 0;
 307
 308                if ((mode & FMODE_WRITE) && md->read_only) {
 309                        mmc_blk_put(md);
 310                        ret = -EROFS;
 311                }
 312        }
 313        mutex_unlock(&block_mutex);
 314
 315        return ret;
 316}
 317
 318static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
 319{
 320        struct mmc_blk_data *md = disk->private_data;
 321
 322        mutex_lock(&block_mutex);
 323        mmc_blk_put(md);
 324        mutex_unlock(&block_mutex);
 325}
 326
 327static int
 328mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 329{
 330        geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
 331        geo->heads = 4;
 332        geo->sectors = 16;
 333        return 0;
 334}
 335
 336struct mmc_blk_ioc_data {
 337        struct mmc_ioc_cmd ic;
 338        unsigned char *buf;
 339        u64 buf_bytes;
 340};
 341
 342static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
 343        struct mmc_ioc_cmd __user *user)
 344{
 345        struct mmc_blk_ioc_data *idata;
 346        int err;
 347
 348        idata = kzalloc(sizeof(*idata), GFP_KERNEL);
 349        if (!idata) {
 350                err = -ENOMEM;
 351                goto out;
 352        }
 353
 354        if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
 355                err = -EFAULT;
 356                goto idata_err;
 357        }
 358
 359        idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
 360        if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
 361                err = -EOVERFLOW;
 362                goto idata_err;
 363        }
 364
 365        if (!idata->buf_bytes)
 366                return idata;
 367
 368        idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
 369        if (!idata->buf) {
 370                err = -ENOMEM;
 371                goto idata_err;
 372        }
 373
 374        if (copy_from_user(idata->buf, (void __user *)(unsigned long)
 375                                        idata->ic.data_ptr, idata->buf_bytes)) {
 376                err = -EFAULT;
 377                goto copy_err;
 378        }
 379
 380        return idata;
 381
 382copy_err:
 383        kfree(idata->buf);
 384idata_err:
 385        kfree(idata);
 386out:
 387        return ERR_PTR(err);
 388}
 389
 390static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
 391                                       u32 retries_max)
 392{
 393        int err;
 394        u32 retry_count = 0;
 395
 396        if (!status || !retries_max)
 397                return -EINVAL;
 398
 399        do {
 400                err = get_card_status(card, status, 5);
 401                if (err)
 402                        break;
 403
 404                if (!R1_STATUS(*status) &&
 405                                (R1_CURRENT_STATE(*status) != R1_STATE_PRG))
 406                        break; /* RPMB programming operation complete */
 407
 408                /*
 409                 * Rechedule to give the MMC device a chance to continue
 410                 * processing the previous command without being polled too
 411                 * frequently.
 412                 */
 413                usleep_range(1000, 5000);
 414        } while (++retry_count < retries_max);
 415
 416        if (retry_count == retries_max)
 417                err = -EPERM;
 418
 419        return err;
 420}
 421
 422static int ioctl_do_sanitize(struct mmc_card *card)
 423{
 424        int err;
 425
 426        if (!mmc_can_sanitize(card)) {
 427                        pr_warn("%s: %s - SANITIZE is not supported\n",
 428                                mmc_hostname(card->host), __func__);
 429                        err = -EOPNOTSUPP;
 430                        goto out;
 431        }
 432
 433        pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
 434                mmc_hostname(card->host), __func__);
 435
 436        err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 437                                        EXT_CSD_SANITIZE_START, 1,
 438                                        MMC_SANITIZE_REQ_TIMEOUT);
 439
 440        if (err)
 441                pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n",
 442                       mmc_hostname(card->host), __func__, err);
 443
 444        pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host),
 445                                             __func__);
 446out:
 447        return err;
 448}
 449
 450static int mmc_blk_ioctl_cmd(struct block_device *bdev,
 451        struct mmc_ioc_cmd __user *ic_ptr)
 452{
 453        struct mmc_blk_ioc_data *idata;
 454        struct mmc_blk_data *md;
 455        struct mmc_card *card;
 456        struct mmc_command cmd = {0};
 457        struct mmc_data data = {0};
 458        struct mmc_request mrq = {NULL};
 459        struct scatterlist sg;
 460        int err;
 461        int is_rpmb = false;
 462        u32 status = 0;
 463
 464        /*
 465         * The caller must have CAP_SYS_RAWIO, and must be calling this on the
 466         * whole block device, not on a partition.  This prevents overspray
 467         * between sibling partitions.
 468         */
 469        if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
 470                return -EPERM;
 471
 472        idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
 473        if (IS_ERR(idata))
 474                return PTR_ERR(idata);
 475
 476        md = mmc_blk_get(bdev->bd_disk);
 477        if (!md) {
 478                err = -EINVAL;
 479                goto cmd_err;
 480        }
 481
 482        if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
 483                is_rpmb = true;
 484
 485        card = md->queue.card;
 486        if (IS_ERR(card)) {
 487                err = PTR_ERR(card);
 488                goto cmd_done;
 489        }
 490
 491        cmd.opcode = idata->ic.opcode;
 492        cmd.arg = idata->ic.arg;
 493        cmd.flags = idata->ic.flags;
 494
 495        if (idata->buf_bytes) {
 496                data.sg = &sg;
 497                data.sg_len = 1;
 498                data.blksz = idata->ic.blksz;
 499                data.blocks = idata->ic.blocks;
 500
 501                sg_init_one(data.sg, idata->buf, idata->buf_bytes);
 502
 503                if (idata->ic.write_flag)
 504                        data.flags = MMC_DATA_WRITE;
 505                else
 506                        data.flags = MMC_DATA_READ;
 507
 508                /* data.flags must already be set before doing this. */
 509                mmc_set_data_timeout(&data, card);
 510
 511                /* Allow overriding the timeout_ns for empirical tuning. */
 512                if (idata->ic.data_timeout_ns)
 513                        data.timeout_ns = idata->ic.data_timeout_ns;
 514
 515                if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
 516                        /*
 517                         * Pretend this is a data transfer and rely on the
 518                         * host driver to compute timeout.  When all host
 519                         * drivers support cmd.cmd_timeout for R1B, this
 520                         * can be changed to:
 521                         *
 522                         *     mrq.data = NULL;
 523                         *     cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
 524                         */
 525                        data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
 526                }
 527
 528                mrq.data = &data;
 529        }
 530
 531        mrq.cmd = &cmd;
 532
 533        mmc_get_card(card);
 534
 535        err = mmc_blk_part_switch(card, md);
 536        if (err)
 537                goto cmd_rel_host;
 538
 539        if (idata->ic.is_acmd) {
 540                err = mmc_app_cmd(card->host, card);
 541                if (err)
 542                        goto cmd_rel_host;
 543        }
 544
 545        if (is_rpmb) {
 546                err = mmc_set_blockcount(card, data.blocks,
 547                        idata->ic.write_flag & (1 << 31));
 548                if (err)
 549                        goto cmd_rel_host;
 550        }
 551
 552        if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
 553            (cmd.opcode == MMC_SWITCH)) {
 554                err = ioctl_do_sanitize(card);
 555
 556                if (err)
 557                        pr_err("%s: ioctl_do_sanitize() failed. err = %d",
 558                               __func__, err);
 559
 560                goto cmd_rel_host;
 561        }
 562
 563        mmc_wait_for_req(card->host, &mrq);
 564
 565        if (cmd.error) {
 566                dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
 567                                                __func__, cmd.error);
 568                err = cmd.error;
 569                goto cmd_rel_host;
 570        }
 571        if (data.error) {
 572                dev_err(mmc_dev(card->host), "%s: data error %d\n",
 573                                                __func__, data.error);
 574                err = data.error;
 575                goto cmd_rel_host;
 576        }
 577
 578        /*
 579         * According to the SD specs, some commands require a delay after
 580         * issuing the command.
 581         */
 582        if (idata->ic.postsleep_min_us)
 583                usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
 584
 585        if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
 586                err = -EFAULT;
 587                goto cmd_rel_host;
 588        }
 589
 590        if (!idata->ic.write_flag) {
 591                if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
 592                                                idata->buf, idata->buf_bytes)) {
 593                        err = -EFAULT;
 594                        goto cmd_rel_host;
 595                }
 596        }
 597
 598        if (is_rpmb) {
 599                /*
 600                 * Ensure RPMB command has completed by polling CMD13
 601                 * "Send Status".
 602                 */
 603                err = ioctl_rpmb_card_status_poll(card, &status, 5);
 604                if (err)
 605                        dev_err(mmc_dev(card->host),
 606                                        "%s: Card Status=0x%08X, error %d\n",
 607                                        __func__, status, err);
 608        }
 609
 610cmd_rel_host:
 611        mmc_put_card(card);
 612
 613cmd_done:
 614        mmc_blk_put(md);
 615cmd_err:
 616        kfree(idata->buf);
 617        kfree(idata);
 618        return err;
 619}
 620
 621static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
 622        unsigned int cmd, unsigned long arg)
 623{
 624        int ret = -EINVAL;
 625        if (cmd == MMC_IOC_CMD)
 626                ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
 627        return ret;
 628}
 629
 630#ifdef CONFIG_COMPAT
 631static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
 632        unsigned int cmd, unsigned long arg)
 633{
 634        return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
 635}
 636#endif
 637
 638static const struct block_device_operations mmc_bdops = {
 639        .open                   = mmc_blk_open,
 640        .release                = mmc_blk_release,
 641        .getgeo                 = mmc_blk_getgeo,
 642        .owner                  = THIS_MODULE,
 643        .ioctl                  = mmc_blk_ioctl,
 644#ifdef CONFIG_COMPAT
 645        .compat_ioctl           = mmc_blk_compat_ioctl,
 646#endif
 647};
 648
 649static inline int mmc_blk_part_switch(struct mmc_card *card,
 650                                      struct mmc_blk_data *md)
 651{
 652        int ret;
 653        struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
 654
 655        if (main_md->part_curr == md->part_type)
 656                return 0;
 657
 658        if (mmc_card_mmc(card)) {
 659                u8 part_config = card->ext_csd.part_config;
 660
 661                part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
 662                part_config |= md->part_type;
 663
 664                ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 665                                 EXT_CSD_PART_CONFIG, part_config,
 666                                 card->ext_csd.part_time);
 667                if (ret)
 668                        return ret;
 669
 670                card->ext_csd.part_config = part_config;
 671        }
 672
 673        main_md->part_curr = md->part_type;
 674        return 0;
 675}
 676
 677static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
 678{
 679        int err;
 680        u32 result;
 681        __be32 *blocks;
 682
 683        struct mmc_request mrq = {NULL};
 684        struct mmc_command cmd = {0};
 685        struct mmc_data data = {0};
 686
 687        struct scatterlist sg;
 688
 689        cmd.opcode = MMC_APP_CMD;
 690        cmd.arg = card->rca << 16;
 691        cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
 692
 693        err = mmc_wait_for_cmd(card->host, &cmd, 0);
 694        if (err)
 695                return (u32)-1;
 696        if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
 697                return (u32)-1;
 698
 699        memset(&cmd, 0, sizeof(struct mmc_command));
 700
 701        cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
 702        cmd.arg = 0;
 703        cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
 704
 705        data.blksz = 4;
 706        data.blocks = 1;
 707        data.flags = MMC_DATA_READ;
 708        data.sg = &sg;
 709        data.sg_len = 1;
 710        mmc_set_data_timeout(&data, card);
 711
 712        mrq.cmd = &cmd;
 713        mrq.data = &data;
 714
 715        blocks = kmalloc(4, GFP_KERNEL);
 716        if (!blocks)
 717                return (u32)-1;
 718
 719        sg_init_one(&sg, blocks, 4);
 720
 721        mmc_wait_for_req(card->host, &mrq);
 722
 723        result = ntohl(*blocks);
 724        kfree(blocks);
 725
 726        if (cmd.error || data.error)
 727                result = (u32)-1;
 728
 729        return result;
 730}
 731
 732static int get_card_status(struct mmc_card *card, u32 *status, int retries)
 733{
 734        struct mmc_command cmd = {0};
 735        int err;
 736
 737        cmd.opcode = MMC_SEND_STATUS;
 738        if (!mmc_host_is_spi(card->host))
 739                cmd.arg = card->rca << 16;
 740        cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
 741        err = mmc_wait_for_cmd(card->host, &cmd, retries);
 742        if (err == 0)
 743                *status = cmd.resp[0];
 744        return err;
 745}
 746
 747static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
 748                bool hw_busy_detect, struct request *req, int *gen_err)
 749{
 750        unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
 751        int err = 0;
 752        u32 status;
 753
 754        do {
 755                err = get_card_status(card, &status, 5);
 756                if (err) {
 757                        pr_err("%s: error %d requesting status\n",
 758                               req->rq_disk->disk_name, err);
 759                        return err;
 760                }
 761
 762                if (status & R1_ERROR) {
 763                        pr_err("%s: %s: error sending status cmd, status %#x\n",
 764                                req->rq_disk->disk_name, __func__, status);
 765                        *gen_err = 1;
 766                }
 767
 768                /* We may rely on the host hw to handle busy detection.*/
 769                if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) &&
 770                        hw_busy_detect)
 771                        break;
 772
 773                /*
 774                 * Timeout if the device never becomes ready for data and never
 775                 * leaves the program state.
 776                 */
 777                if (time_after(jiffies, timeout)) {
 778                        pr_err("%s: Card stuck in programming state! %s %s\n",
 779                                mmc_hostname(card->host),
 780                                req->rq_disk->disk_name, __func__);
 781                        return -ETIMEDOUT;
 782                }
 783
 784                /*
 785                 * Some cards mishandle the status bits,
 786                 * so make sure to check both the busy
 787                 * indication and the card state.
 788                 */
 789        } while (!(status & R1_READY_FOR_DATA) ||
 790                 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
 791
 792        return err;
 793}
 794
 795static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
 796                struct request *req, int *gen_err, u32 *stop_status)
 797{
 798        struct mmc_host *host = card->host;
 799        struct mmc_command cmd = {0};
 800        int err;
 801        bool use_r1b_resp = rq_data_dir(req) == WRITE;
 802
 803        /*
 804         * Normally we use R1B responses for WRITE, but in cases where the host
 805         * has specified a max_busy_timeout we need to validate it. A failure
 806         * means we need to prevent the host from doing hw busy detection, which
 807         * is done by converting to a R1 response instead.
 808         */
 809        if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
 810                use_r1b_resp = false;
 811
 812        cmd.opcode = MMC_STOP_TRANSMISSION;
 813        if (use_r1b_resp) {
 814                cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
 815                cmd.busy_timeout = timeout_ms;
 816        } else {
 817                cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
 818        }
 819
 820        err = mmc_wait_for_cmd(host, &cmd, 5);
 821        if (err)
 822                return err;
 823
 824        *stop_status = cmd.resp[0];
 825
 826        /* No need to check card status in case of READ. */
 827        if (rq_data_dir(req) == READ)
 828                return 0;
 829
 830        if (!mmc_host_is_spi(host) &&
 831                (*stop_status & R1_ERROR)) {
 832                pr_err("%s: %s: general error sending stop command, resp %#x\n",
 833                        req->rq_disk->disk_name, __func__, *stop_status);
 834                *gen_err = 1;
 835        }
 836
 837        return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err);
 838}
 839
 840#define ERR_NOMEDIUM    3
 841#define ERR_RETRY       2
 842#define ERR_ABORT       1
 843#define ERR_CONTINUE    0
 844
 845static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
 846        bool status_valid, u32 status)
 847{
 848        switch (error) {
 849        case -EILSEQ:
 850                /* response crc error, retry the r/w cmd */
 851                pr_err("%s: %s sending %s command, card status %#x\n",
 852                        req->rq_disk->disk_name, "response CRC error",
 853                        name, status);
 854                return ERR_RETRY;
 855
 856        case -ETIMEDOUT:
 857                pr_err("%s: %s sending %s command, card status %#x\n",
 858                        req->rq_disk->disk_name, "timed out", name, status);
 859
 860                /* If the status cmd initially failed, retry the r/w cmd */
 861                if (!status_valid)
 862                        return ERR_RETRY;
 863
 864                /*
 865                 * If it was a r/w cmd crc error, or illegal command
 866                 * (eg, issued in wrong state) then retry - we should
 867                 * have corrected the state problem above.
 868                 */
 869                if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND))
 870                        return ERR_RETRY;
 871
 872                /* Otherwise abort the command */
 873                return ERR_ABORT;
 874
 875        default:
 876                /* We don't understand the error code the driver gave us */
 877                pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
 878                       req->rq_disk->disk_name, error, status);
 879                return ERR_ABORT;
 880        }
 881}
 882
 883/*
 884 * Initial r/w and stop cmd error recovery.
 885 * We don't know whether the card received the r/w cmd or not, so try to
 886 * restore things back to a sane state.  Essentially, we do this as follows:
 887 * - Obtain card status.  If the first attempt to obtain card status fails,
 888 *   the status word will reflect the failed status cmd, not the failed
 889 *   r/w cmd.  If we fail to obtain card status, it suggests we can no
 890 *   longer communicate with the card.
 891 * - Check the card state.  If the card received the cmd but there was a
 892 *   transient problem with the response, it might still be in a data transfer
 893 *   mode.  Try to send it a stop command.  If this fails, we can't recover.
 894 * - If the r/w cmd failed due to a response CRC error, it was probably
 895 *   transient, so retry the cmd.
 896 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
 897 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
 898 *   illegal cmd, retry.
 899 * Otherwise we don't understand what happened, so abort.
 900 */
 901static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
 902        struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
 903{
 904        bool prev_cmd_status_valid = true;
 905        u32 status, stop_status = 0;
 906        int err, retry;
 907
 908        if (mmc_card_removed(card))
 909                return ERR_NOMEDIUM;
 910
 911        /*
 912         * Try to get card status which indicates both the card state
 913         * and why there was no response.  If the first attempt fails,
 914         * we can't be sure the returned status is for the r/w command.
 915         */
 916        for (retry = 2; retry >= 0; retry--) {
 917                err = get_card_status(card, &status, 0);
 918                if (!err)
 919                        break;
 920
 921                /* Re-tune if needed */
 922                mmc_retune_recheck(card->host);
 923
 924                prev_cmd_status_valid = false;
 925                pr_err("%s: error %d sending status command, %sing\n",
 926                       req->rq_disk->disk_name, err, retry ? "retry" : "abort");
 927        }
 928
 929        /* We couldn't get a response from the card.  Give up. */
 930        if (err) {
 931                /* Check if the card is removed */
 932                if (mmc_detect_card_removed(card->host))
 933                        return ERR_NOMEDIUM;
 934                return ERR_ABORT;
 935        }
 936
 937        /* Flag ECC errors */
 938        if ((status & R1_CARD_ECC_FAILED) ||
 939            (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
 940            (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
 941                *ecc_err = 1;
 942
 943        /* Flag General errors */
 944        if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
 945                if ((status & R1_ERROR) ||
 946                        (brq->stop.resp[0] & R1_ERROR)) {
 947                        pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
 948                               req->rq_disk->disk_name, __func__,
 949                               brq->stop.resp[0], status);
 950                        *gen_err = 1;
 951                }
 952
 953        /*
 954         * Check the current card state.  If it is in some data transfer
 955         * mode, tell it to stop (and hopefully transition back to TRAN.)
 956         */
 957        if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
 958            R1_CURRENT_STATE(status) == R1_STATE_RCV) {
 959                err = send_stop(card,
 960                        DIV_ROUND_UP(brq->data.timeout_ns, 1000000),
 961                        req, gen_err, &stop_status);
 962                if (err) {
 963                        pr_err("%s: error %d sending stop command\n",
 964                               req->rq_disk->disk_name, err);
 965                        /*
 966                         * If the stop cmd also timed out, the card is probably
 967                         * not present, so abort. Other errors are bad news too.
 968                         */
 969                        return ERR_ABORT;
 970                }
 971
 972                if (stop_status & R1_CARD_ECC_FAILED)
 973                        *ecc_err = 1;
 974        }
 975
 976        /* Check for set block count errors */
 977        if (brq->sbc.error)
 978                return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
 979                                prev_cmd_status_valid, status);
 980
 981        /* Check for r/w command errors */
 982        if (brq->cmd.error)
 983                return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
 984                                prev_cmd_status_valid, status);
 985
 986        /* Data errors */
 987        if (!brq->stop.error)
 988                return ERR_CONTINUE;
 989
 990        /* Now for stop errors.  These aren't fatal to the transfer. */
 991        pr_info("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
 992               req->rq_disk->disk_name, brq->stop.error,
 993               brq->cmd.resp[0], status);
 994
 995        /*
 996         * Subsitute in our own stop status as this will give the error
 997         * state which happened during the execution of the r/w command.
 998         */
 999        if (stop_status) {
1000                brq->stop.resp[0] = stop_status;
1001                brq->stop.error = 0;
1002        }
1003        return ERR_CONTINUE;
1004}
1005
1006static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
1007                         int type)
1008{
1009        int err;
1010
1011        if (md->reset_done & type)
1012                return -EEXIST;
1013
1014        md->reset_done |= type;
1015        err = mmc_hw_reset(host);
1016        /* Ensure we switch back to the correct partition */
1017        if (err != -EOPNOTSUPP) {
1018                struct mmc_blk_data *main_md =
1019                        dev_get_drvdata(&host->card->dev);
1020                int part_err;
1021
1022                main_md->part_curr = main_md->part_type;
1023                part_err = mmc_blk_part_switch(host->card, md);
1024                if (part_err) {
1025                        /*
1026                         * We have failed to get back into the correct
1027                         * partition, so we need to abort the whole request.
1028                         */
1029                        return -ENODEV;
1030                }
1031        }
1032        return err;
1033}
1034
1035static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
1036{
1037        md->reset_done &= ~type;
1038}
1039
1040int mmc_access_rpmb(struct mmc_queue *mq)
1041{
1042        struct mmc_blk_data *md = mq->data;
1043        /*
1044         * If this is a RPMB partition access, return ture
1045         */
1046        if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
1047                return true;
1048
1049        return false;
1050}
1051
1052static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
1053{
1054        struct mmc_blk_data *md = mq->data;
1055        struct mmc_card *card = md->queue.card;
1056        unsigned int from, nr, arg;
1057        int err = 0, type = MMC_BLK_DISCARD;
1058
1059        if (!mmc_can_erase(card)) {
1060                err = -EOPNOTSUPP;
1061                goto out;
1062        }
1063
1064        from = blk_rq_pos(req);
1065        nr = blk_rq_sectors(req);
1066
1067        if (mmc_can_discard(card))
1068                arg = MMC_DISCARD_ARG;
1069        else if (mmc_can_trim(card))
1070                arg = MMC_TRIM_ARG;
1071        else
1072                arg = MMC_ERASE_ARG;
1073retry:
1074        if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1075                err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1076                                 INAND_CMD38_ARG_EXT_CSD,
1077                                 arg == MMC_TRIM_ARG ?
1078                                 INAND_CMD38_ARG_TRIM :
1079                                 INAND_CMD38_ARG_ERASE,
1080                                 0);
1081                if (err)
1082                        goto out;
1083        }
1084        err = mmc_erase(card, from, nr, arg);
1085out:
1086        if (err == -EIO && !mmc_blk_reset(md, card->host, type))
1087                goto retry;
1088        if (!err)
1089                mmc_blk_reset_success(md, type);
1090        blk_end_request(req, err, blk_rq_bytes(req));
1091
1092        return err ? 0 : 1;
1093}
1094
1095static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
1096                                       struct request *req)
1097{
1098        struct mmc_blk_data *md = mq->data;
1099        struct mmc_card *card = md->queue.card;
1100        unsigned int from, nr, arg;
1101        int err = 0, type = MMC_BLK_SECDISCARD;
1102
1103        if (!(mmc_can_secure_erase_trim(card))) {
1104                err = -EOPNOTSUPP;
1105                goto out;
1106        }
1107
1108        from = blk_rq_pos(req);
1109        nr = blk_rq_sectors(req);
1110
1111        if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
1112                arg = MMC_SECURE_TRIM1_ARG;
1113        else
1114                arg = MMC_SECURE_ERASE_ARG;
1115
1116retry:
1117        if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1118                err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1119                                 INAND_CMD38_ARG_EXT_CSD,
1120                                 arg == MMC_SECURE_TRIM1_ARG ?
1121                                 INAND_CMD38_ARG_SECTRIM1 :
1122                                 INAND_CMD38_ARG_SECERASE,
1123                                 0);
1124                if (err)
1125                        goto out_retry;
1126        }
1127
1128        err = mmc_erase(card, from, nr, arg);
1129        if (err == -EIO)
1130                goto out_retry;
1131        if (err)
1132                goto out;
1133
1134        if (arg == MMC_SECURE_TRIM1_ARG) {
1135                if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1136                        err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1137                                         INAND_CMD38_ARG_EXT_CSD,
1138                                         INAND_CMD38_ARG_SECTRIM2,
1139                                         0);
1140                        if (err)
1141                                goto out_retry;
1142                }
1143
1144                err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
1145                if (err == -EIO)
1146                        goto out_retry;
1147                if (err)
1148                        goto out;
1149        }
1150
1151out_retry:
1152        if (err && !mmc_blk_reset(md, card->host, type))
1153                goto retry;
1154        if (!err)
1155                mmc_blk_reset_success(md, type);
1156out:
1157        blk_end_request(req, err, blk_rq_bytes(req));
1158
1159        return err ? 0 : 1;
1160}
1161
1162static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
1163{
1164        struct mmc_blk_data *md = mq->data;
1165        struct mmc_card *card = md->queue.card;
1166        int ret = 0;
1167
1168        ret = mmc_flush_cache(card);
1169        if (ret)
1170                ret = -EIO;
1171
1172        blk_end_request_all(req, ret);
1173
1174        return ret ? 0 : 1;
1175}
1176
1177/*
1178 * Reformat current write as a reliable write, supporting
1179 * both legacy and the enhanced reliable write MMC cards.
1180 * In each transfer we'll handle only as much as a single
1181 * reliable write can handle, thus finish the request in
1182 * partial completions.
1183 */
1184static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
1185                                    struct mmc_card *card,
1186                                    struct request *req)
1187{
1188        if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
1189                /* Legacy mode imposes restrictions on transfers. */
1190                if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
1191                        brq->data.blocks = 1;
1192
1193                if (brq->data.blocks > card->ext_csd.rel_sectors)
1194                        brq->data.blocks = card->ext_csd.rel_sectors;
1195                else if (brq->data.blocks < card->ext_csd.rel_sectors)
1196                        brq->data.blocks = 1;
1197        }
1198}
1199
1200#define CMD_ERRORS                                                      \
1201        (R1_OUT_OF_RANGE |      /* Command argument out of range */     \
1202         R1_ADDRESS_ERROR |     /* Misaligned address */                \
1203         R1_BLOCK_LEN_ERROR |   /* Transferred block length incorrect */\
1204         R1_WP_VIOLATION |      /* Tried to write to protected block */ \
1205         R1_CC_ERROR |          /* Card controller error */             \
1206         R1_ERROR)              /* General/unknown error */
1207
1208static int mmc_blk_err_check(struct mmc_card *card,
1209                             struct mmc_async_req *areq)
1210{
1211        struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
1212                                                    mmc_active);
1213        struct mmc_blk_request *brq = &mq_mrq->brq;
1214        struct request *req = mq_mrq->req;
1215        int need_retune = card->host->need_retune;
1216        int ecc_err = 0, gen_err = 0;
1217
1218        /*
1219         * sbc.error indicates a problem with the set block count
1220         * command.  No data will have been transferred.
1221         *
1222         * cmd.error indicates a problem with the r/w command.  No
1223         * data will have been transferred.
1224         *
1225         * stop.error indicates a problem with the stop command.  Data
1226         * may have been transferred, or may still be transferring.
1227         */
1228        if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
1229            brq->data.error) {
1230                switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
1231                case ERR_RETRY:
1232                        return MMC_BLK_RETRY;
1233                case ERR_ABORT:
1234                        return MMC_BLK_ABORT;
1235                case ERR_NOMEDIUM:
1236                        return MMC_BLK_NOMEDIUM;
1237                case ERR_CONTINUE:
1238                        break;
1239                }
1240        }
1241
1242        /*
1243         * Check for errors relating to the execution of the
1244         * initial command - such as address errors.  No data
1245         * has been transferred.
1246         */
1247        if (brq->cmd.resp[0] & CMD_ERRORS) {
1248                pr_err("%s: r/w command failed, status = %#x\n",
1249                       req->rq_disk->disk_name, brq->cmd.resp[0]);
1250                return MMC_BLK_ABORT;
1251        }
1252
1253        /*
1254         * Everything else is either success, or a data error of some
1255         * kind.  If it was a write, we may have transitioned to
1256         * program mode, which we have to wait for it to complete.
1257         */
1258        if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
1259                int err;
1260
1261                /* Check stop command response */
1262                if (brq->stop.resp[0] & R1_ERROR) {
1263                        pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
1264                               req->rq_disk->disk_name, __func__,
1265                               brq->stop.resp[0]);
1266                        gen_err = 1;
1267                }
1268
1269                err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req,
1270                                        &gen_err);
1271                if (err)
1272                        return MMC_BLK_CMD_ERR;
1273        }
1274
1275        /* if general error occurs, retry the write operation. */
1276        if (gen_err) {
1277                pr_warn("%s: retrying write for general error\n",
1278                                req->rq_disk->disk_name);
1279                return MMC_BLK_RETRY;
1280        }
1281
1282        if (brq->data.error) {
1283                if (need_retune && !brq->retune_retry_done) {
1284                        pr_info("%s: retrying because a re-tune was needed\n",
1285                                req->rq_disk->disk_name);
1286                        brq->retune_retry_done = 1;
1287                        return MMC_BLK_RETRY;
1288                }
1289                pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1290                       req->rq_disk->disk_name, brq->data.error,
1291                       (unsigned)blk_rq_pos(req),
1292                       (unsigned)blk_rq_sectors(req),
1293                       brq->cmd.resp[0], brq->stop.resp[0]);
1294
1295                if (rq_data_dir(req) == READ) {
1296                        if (ecc_err)
1297                                return MMC_BLK_ECC_ERR;
1298                        return MMC_BLK_DATA_ERR;
1299                } else {
1300                        return MMC_BLK_CMD_ERR;
1301                }
1302        }
1303
1304        if (!brq->data.bytes_xfered)
1305                return MMC_BLK_RETRY;
1306
1307        if (mmc_packed_cmd(mq_mrq->cmd_type)) {
1308                if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
1309                        return MMC_BLK_PARTIAL;
1310                else
1311                        return MMC_BLK_SUCCESS;
1312        }
1313
1314        if (blk_rq_bytes(req) != brq->data.bytes_xfered)
1315                return MMC_BLK_PARTIAL;
1316
1317        return MMC_BLK_SUCCESS;
1318}
1319
1320static int mmc_blk_packed_err_check(struct mmc_card *card,
1321                                    struct mmc_async_req *areq)
1322{
1323        struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
1324                        mmc_active);
1325        struct request *req = mq_rq->req;
1326        struct mmc_packed *packed = mq_rq->packed;
1327        int err, check, status;
1328        u8 *ext_csd;
1329
1330        BUG_ON(!packed);
1331
1332        packed->retries--;
1333        check = mmc_blk_err_check(card, areq);
1334        err = get_card_status(card, &status, 0);
1335        if (err) {
1336                pr_err("%s: error %d sending status command\n",
1337                       req->rq_disk->disk_name, err);
1338                return MMC_BLK_ABORT;
1339        }
1340
1341        if (status & R1_EXCEPTION_EVENT) {
1342                err = mmc_get_ext_csd(card, &ext_csd);
1343                if (err) {
1344                        pr_err("%s: error %d sending ext_csd\n",
1345                               req->rq_disk->disk_name, err);
1346                        return MMC_BLK_ABORT;
1347                }
1348
1349                if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
1350                     EXT_CSD_PACKED_FAILURE) &&
1351                    (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1352                     EXT_CSD_PACKED_GENERIC_ERROR)) {
1353                        if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1354                            EXT_CSD_PACKED_INDEXED_ERROR) {
1355                                packed->idx_failure =
1356                                  ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
1357                                check = MMC_BLK_PARTIAL;
1358                        }
1359                        pr_err("%s: packed cmd failed, nr %u, sectors %u, "
1360                               "failure index: %d\n",
1361                               req->rq_disk->disk_name, packed->nr_entries,
1362                               packed->blocks, packed->idx_failure);
1363                }
1364                kfree(ext_csd);
1365        }
1366
1367        return check;
1368}
1369
1370static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1371                               struct mmc_card *card,
1372                               int disable_multi,
1373                               struct mmc_queue *mq)
1374{
1375        u32 readcmd, writecmd;
1376        struct mmc_blk_request *brq = &mqrq->brq;
1377        struct request *req = mqrq->req;
1378        struct mmc_blk_data *md = mq->data;
1379        bool do_data_tag;
1380
1381        /*
1382         * Reliable writes are used to implement Forced Unit Access and
1383         * REQ_META accesses, and are supported only on MMCs.
1384         *
1385         * XXX: this really needs a good explanation of why REQ_META
1386         * is treated special.
1387         */
1388        bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
1389                          (req->cmd_flags & REQ_META)) &&
1390                (rq_data_dir(req) == WRITE) &&
1391                (md->flags & MMC_BLK_REL_WR);
1392
1393        memset(brq, 0, sizeof(struct mmc_blk_request));
1394        brq->mrq.cmd = &brq->cmd;
1395        brq->mrq.data = &brq->data;
1396
1397        brq->cmd.arg = blk_rq_pos(req);
1398        if (!mmc_card_blockaddr(card))
1399                brq->cmd.arg <<= 9;
1400        brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1401        brq->data.blksz = 512;
1402        brq->stop.opcode = MMC_STOP_TRANSMISSION;
1403        brq->stop.arg = 0;
1404        brq->data.blocks = blk_rq_sectors(req);
1405
1406        /*
1407         * The block layer doesn't support all sector count
1408         * restrictions, so we need to be prepared for too big
1409         * requests.
1410         */
1411        if (brq->data.blocks > card->host->max_blk_count)
1412                brq->data.blocks = card->host->max_blk_count;
1413
1414        if (brq->data.blocks > 1) {
1415                /*
1416                 * After a read error, we redo the request one sector
1417                 * at a time in order to accurately determine which
1418                 * sectors can be read successfully.
1419                 */
1420                if (disable_multi)
1421                        brq->data.blocks = 1;
1422
1423                /*
1424                 * Some controllers have HW issues while operating
1425                 * in multiple I/O mode
1426                 */
1427                if (card->host->ops->multi_io_quirk)
1428                        brq->data.blocks = card->host->ops->multi_io_quirk(card,
1429                                                (rq_data_dir(req) == READ) ?
1430                                                MMC_DATA_READ : MMC_DATA_WRITE,
1431                                                brq->data.blocks);
1432        }
1433
1434        if (brq->data.blocks > 1 || do_rel_wr) {
1435                /* SPI multiblock writes terminate using a special
1436                 * token, not a STOP_TRANSMISSION request.
1437                 */
1438                if (!mmc_host_is_spi(card->host) ||
1439                    rq_data_dir(req) == READ)
1440                        brq->mrq.stop = &brq->stop;
1441                readcmd = MMC_READ_MULTIPLE_BLOCK;
1442                writecmd = MMC_WRITE_MULTIPLE_BLOCK;
1443        } else {
1444                brq->mrq.stop = NULL;
1445                readcmd = MMC_READ_SINGLE_BLOCK;
1446                writecmd = MMC_WRITE_BLOCK;
1447        }
1448        if (rq_data_dir(req) == READ) {
1449                brq->cmd.opcode = readcmd;
1450                brq->data.flags |= MMC_DATA_READ;
1451                if (brq->mrq.stop)
1452                        brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 |
1453                                        MMC_CMD_AC;
1454        } else {
1455                brq->cmd.opcode = writecmd;
1456                brq->data.flags |= MMC_DATA_WRITE;
1457                if (brq->mrq.stop)
1458                        brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B |
1459                                        MMC_CMD_AC;
1460        }
1461
1462        if (do_rel_wr)
1463                mmc_apply_rel_rw(brq, card, req);
1464
1465        /*
1466         * Data tag is used only during writing meta data to speed
1467         * up write and any subsequent read of this meta data
1468         */
1469        do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1470                (req->cmd_flags & REQ_META) &&
1471                (rq_data_dir(req) == WRITE) &&
1472                ((brq->data.blocks * brq->data.blksz) >=
1473                 card->ext_csd.data_tag_unit_size);
1474
1475        /*
1476         * Pre-defined multi-block transfers are preferable to
1477         * open ended-ones (and necessary for reliable writes).
1478         * However, it is not sufficient to just send CMD23,
1479         * and avoid the final CMD12, as on an error condition
1480         * CMD12 (stop) needs to be sent anyway. This, coupled
1481         * with Auto-CMD23 enhancements provided by some
1482         * hosts, means that the complexity of dealing
1483         * with this is best left to the host. If CMD23 is
1484         * supported by card and host, we'll fill sbc in and let
1485         * the host deal with handling it correctly. This means
1486         * that for hosts that don't expose MMC_CAP_CMD23, no
1487         * change of behavior will be observed.
1488         *
1489         * N.B: Some MMC cards experience perf degradation.
1490         * We'll avoid using CMD23-bounded multiblock writes for
1491         * these, while retaining features like reliable writes.
1492         */
1493        if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
1494            (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
1495             do_data_tag)) {
1496                brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1497                brq->sbc.arg = brq->data.blocks |
1498                        (do_rel_wr ? (1 << 31) : 0) |
1499                        (do_data_tag ? (1 << 29) : 0);
1500                brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1501                brq->mrq.sbc = &brq->sbc;
1502        }
1503
1504        mmc_set_data_timeout(&brq->data, card);
1505
1506        brq->data.sg = mqrq->sg;
1507        brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1508
1509        /*
1510         * Adjust the sg list so it is the same size as the
1511         * request.
1512         */
1513        if (brq->data.blocks != blk_rq_sectors(req)) {
1514                int i, data_size = brq->data.blocks << 9;
1515                struct scatterlist *sg;
1516
1517                for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
1518                        data_size -= sg->length;
1519                        if (data_size <= 0) {
1520                                sg->length += data_size;
1521                                i++;
1522                                break;
1523                        }
1524                }
1525                brq->data.sg_len = i;
1526        }
1527
1528        mqrq->mmc_active.mrq = &brq->mrq;
1529        mqrq->mmc_active.err_check = mmc_blk_err_check;
1530
1531        mmc_queue_bounce_pre(mqrq);
1532}
1533
1534static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q,
1535                                          struct mmc_card *card)
1536{
1537        unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512;
1538        unsigned int max_seg_sz = queue_max_segment_size(q);
1539        unsigned int len, nr_segs = 0;
1540
1541        do {
1542                len = min(hdr_sz, max_seg_sz);
1543                hdr_sz -= len;
1544                nr_segs++;
1545        } while (hdr_sz);
1546
1547        return nr_segs;
1548}
1549
1550static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
1551{
1552        struct request_queue *q = mq->queue;
1553        struct mmc_card *card = mq->card;
1554        struct request *cur = req, *next = NULL;
1555        struct mmc_blk_data *md = mq->data;
1556        struct mmc_queue_req *mqrq = mq->mqrq_cur;
1557        bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
1558        unsigned int req_sectors = 0, phys_segments = 0;
1559        unsigned int max_blk_count, max_phys_segs;
1560        bool put_back = true;
1561        u8 max_packed_rw = 0;
1562        u8 reqs = 0;
1563
1564        if (!(md->flags & MMC_BLK_PACKED_CMD))
1565                goto no_packed;
1566
1567        if ((rq_data_dir(cur) == WRITE) &&
1568            mmc_host_packed_wr(card->host))
1569                max_packed_rw = card->ext_csd.max_packed_writes;
1570
1571        if (max_packed_rw == 0)
1572                goto no_packed;
1573
1574        if (mmc_req_rel_wr(cur) &&
1575            (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1576                goto no_packed;
1577
1578        if (mmc_large_sector(card) &&
1579            !IS_ALIGNED(blk_rq_sectors(cur), 8))
1580                goto no_packed;
1581
1582        mmc_blk_clear_packed(mqrq);
1583
1584        max_blk_count = min(card->host->max_blk_count,
1585                            card->host->max_req_size >> 9);
1586        if (unlikely(max_blk_count > 0xffff))
1587                max_blk_count = 0xffff;
1588
1589        max_phys_segs = queue_max_segments(q);
1590        req_sectors += blk_rq_sectors(cur);
1591        phys_segments += cur->nr_phys_segments;
1592
1593        if (rq_data_dir(cur) == WRITE) {
1594                req_sectors += mmc_large_sector(card) ? 8 : 1;
1595                phys_segments += mmc_calc_packed_hdr_segs(q, card);
1596        }
1597
1598        do {
1599                if (reqs >= max_packed_rw - 1) {
1600                        put_back = false;
1601                        break;
1602                }
1603
1604                spin_lock_irq(q->queue_lock);
1605                next = blk_fetch_request(q);
1606                spin_unlock_irq(q->queue_lock);
1607                if (!next) {
1608                        put_back = false;
1609                        break;
1610                }
1611
1612                if (mmc_large_sector(card) &&
1613                    !IS_ALIGNED(blk_rq_sectors(next), 8))
1614                        break;
1615
1616                if (next->cmd_flags & REQ_DISCARD ||
1617                    next->cmd_flags & REQ_FLUSH)
1618                        break;
1619
1620                if (rq_data_dir(cur) != rq_data_dir(next))
1621                        break;
1622
1623                if (mmc_req_rel_wr(next) &&
1624                    (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1625                        break;
1626
1627                req_sectors += blk_rq_sectors(next);
1628                if (req_sectors > max_blk_count)
1629                        break;
1630
1631                phys_segments +=  next->nr_phys_segments;
1632                if (phys_segments > max_phys_segs)
1633                        break;
1634
1635                list_add_tail(&next->queuelist, &mqrq->packed->list);
1636                cur = next;
1637                reqs++;
1638        } while (1);
1639
1640        if (put_back) {
1641                spin_lock_irq(q->queue_lock);
1642                blk_requeue_request(q, next);
1643                spin_unlock_irq(q->queue_lock);
1644        }
1645
1646        if (reqs > 0) {
1647                list_add(&req->queuelist, &mqrq->packed->list);
1648                mqrq->packed->nr_entries = ++reqs;
1649                mqrq->packed->retries = reqs;
1650                return reqs;
1651        }
1652
1653no_packed:
1654        mqrq->cmd_type = MMC_PACKED_NONE;
1655        return 0;
1656}
1657
1658static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
1659                                        struct mmc_card *card,
1660                                        struct mmc_queue *mq)
1661{
1662        struct mmc_blk_request *brq = &mqrq->brq;
1663        struct request *req = mqrq->req;
1664        struct request *prq;
1665        struct mmc_blk_data *md = mq->data;
1666        struct mmc_packed *packed = mqrq->packed;
1667        bool do_rel_wr, do_data_tag;
1668        u32 *packed_cmd_hdr;
1669        u8 hdr_blocks;
1670        u8 i = 1;
1671
1672        BUG_ON(!packed);
1673
1674        mqrq->cmd_type = MMC_PACKED_WRITE;
1675        packed->blocks = 0;
1676        packed->idx_failure = MMC_PACKED_NR_IDX;
1677
1678        packed_cmd_hdr = packed->cmd_hdr;
1679        memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
1680        packed_cmd_hdr[0] = (packed->nr_entries << 16) |
1681                (PACKED_CMD_WR << 8) | PACKED_CMD_VER;
1682        hdr_blocks = mmc_large_sector(card) ? 8 : 1;
1683
1684        /*
1685         * Argument for each entry of packed group
1686         */
1687        list_for_each_entry(prq, &packed->list, queuelist) {
1688                do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
1689                do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1690                        (prq->cmd_flags & REQ_META) &&
1691                        (rq_data_dir(prq) == WRITE) &&
1692                        ((brq->data.blocks * brq->data.blksz) >=
1693                         card->ext_csd.data_tag_unit_size);
1694                /* Argument of CMD23 */
1695                packed_cmd_hdr[(i * 2)] =
1696                        (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
1697                        (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
1698                        blk_rq_sectors(prq);
1699                /* Argument of CMD18 or CMD25 */
1700                packed_cmd_hdr[((i * 2)) + 1] =
1701                        mmc_card_blockaddr(card) ?
1702                        blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
1703                packed->blocks += blk_rq_sectors(prq);
1704                i++;
1705        }
1706
1707        memset(brq, 0, sizeof(struct mmc_blk_request));
1708        brq->mrq.cmd = &brq->cmd;
1709        brq->mrq.data = &brq->data;
1710        brq->mrq.sbc = &brq->sbc;
1711        brq->mrq.stop = &brq->stop;
1712
1713        brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1714        brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks);
1715        brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1716
1717        brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
1718        brq->cmd.arg = blk_rq_pos(req);
1719        if (!mmc_card_blockaddr(card))
1720                brq->cmd.arg <<= 9;
1721        brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1722
1723        brq->data.blksz = 512;
1724        brq->data.blocks = packed->blocks + hdr_blocks;
1725        brq->data.flags |= MMC_DATA_WRITE;
1726
1727        brq->stop.opcode = MMC_STOP_TRANSMISSION;
1728        brq->stop.arg = 0;
1729        brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1730
1731        mmc_set_data_timeout(&brq->data, card);
1732
1733        brq->data.sg = mqrq->sg;
1734        brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1735
1736        mqrq->mmc_active.mrq = &brq->mrq;
1737        mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
1738
1739        mmc_queue_bounce_pre(mqrq);
1740}
1741
1742static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
1743                           struct mmc_blk_request *brq, struct request *req,
1744                           int ret)
1745{
1746        struct mmc_queue_req *mq_rq;
1747        mq_rq = container_of(brq, struct mmc_queue_req, brq);
1748
1749        /*
1750         * If this is an SD card and we're writing, we can first
1751         * mark the known good sectors as ok.
1752         *
1753         * If the card is not SD, we can still ok written sectors
1754         * as reported by the controller (which might be less than
1755         * the real number of written sectors, but never more).
1756         */
1757        if (mmc_card_sd(card)) {
1758                u32 blocks;
1759
1760                blocks = mmc_sd_num_wr_blocks(card);
1761                if (blocks != (u32)-1) {
1762                        ret = blk_end_request(req, 0, blocks << 9);
1763                }
1764        } else {
1765                if (!mmc_packed_cmd(mq_rq->cmd_type))
1766                        ret = blk_end_request(req, 0, brq->data.bytes_xfered);
1767        }
1768        return ret;
1769}
1770
1771static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq)
1772{
1773        struct request *prq;
1774        struct mmc_packed *packed = mq_rq->packed;
1775        int idx = packed->idx_failure, i = 0;
1776        int ret = 0;
1777
1778        BUG_ON(!packed);
1779
1780        while (!list_empty(&packed->list)) {
1781                prq = list_entry_rq(packed->list.next);
1782                if (idx == i) {
1783                        /* retry from error index */
1784                        packed->nr_entries -= idx;
1785                        mq_rq->req = prq;
1786                        ret = 1;
1787
1788                        if (packed->nr_entries == MMC_PACKED_NR_SINGLE) {
1789                                list_del_init(&prq->queuelist);
1790                                mmc_blk_clear_packed(mq_rq);
1791                        }
1792                        return ret;
1793                }
1794                list_del_init(&prq->queuelist);
1795                blk_end_request(prq, 0, blk_rq_bytes(prq));
1796                i++;
1797        }
1798
1799        mmc_blk_clear_packed(mq_rq);
1800        return ret;
1801}
1802
1803static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq)
1804{
1805        struct request *prq;
1806        struct mmc_packed *packed = mq_rq->packed;
1807
1808        BUG_ON(!packed);
1809
1810        while (!list_empty(&packed->list)) {
1811                prq = list_entry_rq(packed->list.next);
1812                list_del_init(&prq->queuelist);
1813                blk_end_request(prq, -EIO, blk_rq_bytes(prq));
1814        }
1815
1816        mmc_blk_clear_packed(mq_rq);
1817}
1818
1819static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
1820                                      struct mmc_queue_req *mq_rq)
1821{
1822        struct request *prq;
1823        struct request_queue *q = mq->queue;
1824        struct mmc_packed *packed = mq_rq->packed;
1825
1826        BUG_ON(!packed);
1827
1828        while (!list_empty(&packed->list)) {
1829                prq = list_entry_rq(packed->list.prev);
1830                if (prq->queuelist.prev != &packed->list) {
1831                        list_del_init(&prq->queuelist);
1832                        spin_lock_irq(q->queue_lock);
1833                        blk_requeue_request(mq->queue, prq);
1834                        spin_unlock_irq(q->queue_lock);
1835                } else {
1836                        list_del_init(&prq->queuelist);
1837                }
1838        }
1839
1840        mmc_blk_clear_packed(mq_rq);
1841}
1842
1843static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1844{
1845        struct mmc_blk_data *md = mq->data;
1846        struct mmc_card *card = md->queue.card;
1847        struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
1848        int ret = 1, disable_multi = 0, retry = 0, type, retune_retry_done = 0;
1849        enum mmc_blk_status status;
1850        struct mmc_queue_req *mq_rq;
1851        struct request *req = rqc;
1852        struct mmc_async_req *areq;
1853        const u8 packed_nr = 2;
1854        u8 reqs = 0;
1855
1856        if (!rqc && !mq->mqrq_prev->req)
1857                return 0;
1858
1859        if (rqc)
1860                reqs = mmc_blk_prep_packed_list(mq, rqc);
1861
1862        do {
1863                if (rqc) {
1864                        /*
1865                         * When 4KB native sector is enabled, only 8 blocks
1866                         * multiple read or write is allowed
1867                         */
1868                        if ((brq->data.blocks & 0x07) &&
1869                            (card->ext_csd.data_sector_size == 4096)) {
1870                                pr_err("%s: Transfer size is not 4KB sector size aligned\n",
1871                                        req->rq_disk->disk_name);
1872                                mq_rq = mq->mqrq_cur;
1873                                goto cmd_abort;
1874                        }
1875
1876                        if (reqs >= packed_nr)
1877                                mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
1878                                                            card, mq);
1879                        else
1880                                mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
1881                        areq = &mq->mqrq_cur->mmc_active;
1882                } else
1883                        areq = NULL;
1884                areq = mmc_start_req(card->host, areq, (int *) &status);
1885                if (!areq) {
1886                        if (status == MMC_BLK_NEW_REQUEST)
1887                                mq->flags |= MMC_QUEUE_NEW_REQUEST;
1888                        return 0;
1889                }
1890
1891                mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
1892                brq = &mq_rq->brq;
1893                req = mq_rq->req;
1894                type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
1895                mmc_queue_bounce_post(mq_rq);
1896
1897                switch (status) {
1898                case MMC_BLK_SUCCESS:
1899                case MMC_BLK_PARTIAL:
1900                        /*
1901                         * A block was successfully transferred.
1902                         */
1903                        mmc_blk_reset_success(md, type);
1904
1905                        if (mmc_packed_cmd(mq_rq->cmd_type)) {
1906                                ret = mmc_blk_end_packed_req(mq_rq);
1907                                break;
1908                        } else {
1909                                ret = blk_end_request(req, 0,
1910                                                brq->data.bytes_xfered);
1911                        }
1912
1913                        /*
1914                         * If the blk_end_request function returns non-zero even
1915                         * though all data has been transferred and no errors
1916                         * were returned by the host controller, it's a bug.
1917                         */
1918                        if (status == MMC_BLK_SUCCESS && ret) {
1919                                pr_err("%s BUG rq_tot %d d_xfer %d\n",
1920                                       __func__, blk_rq_bytes(req),
1921                                       brq->data.bytes_xfered);
1922                                rqc = NULL;
1923                                goto cmd_abort;
1924                        }
1925                        break;
1926                case MMC_BLK_CMD_ERR:
1927                        ret = mmc_blk_cmd_err(md, card, brq, req, ret);
1928                        if (mmc_blk_reset(md, card->host, type))
1929                                goto cmd_abort;
1930                        if (!ret)
1931                                goto start_new_req;
1932                        break;
1933                case MMC_BLK_RETRY:
1934                        retune_retry_done = brq->retune_retry_done;
1935                        if (retry++ < 5)
1936                                break;
1937                        /* Fall through */
1938                case MMC_BLK_ABORT:
1939                        if (!mmc_blk_reset(md, card->host, type))
1940                                break;
1941                        goto cmd_abort;
1942                case MMC_BLK_DATA_ERR: {
1943                        int err;
1944
1945                        err = mmc_blk_reset(md, card->host, type);
1946                        if (!err)
1947                                break;
1948                        if (err == -ENODEV ||
1949                                mmc_packed_cmd(mq_rq->cmd_type))
1950                                goto cmd_abort;
1951                        /* Fall through */
1952                }
1953                case MMC_BLK_ECC_ERR:
1954                        if (brq->data.blocks > 1) {
1955                                /* Redo read one sector at a time */
1956                                pr_warn("%s: retrying using single block read\n",
1957                                        req->rq_disk->disk_name);
1958                                disable_multi = 1;
1959                                break;
1960                        }
1961                        /*
1962                         * After an error, we redo I/O one sector at a
1963                         * time, so we only reach here after trying to
1964                         * read a single sector.
1965                         */
1966                        ret = blk_end_request(req, -EIO,
1967                                                brq->data.blksz);
1968                        if (!ret)
1969                                goto start_new_req;
1970                        break;
1971                case MMC_BLK_NOMEDIUM:
1972                        goto cmd_abort;
1973                default:
1974                        pr_err("%s: Unhandled return value (%d)",
1975                                        req->rq_disk->disk_name, status);
1976                        goto cmd_abort;
1977                }
1978
1979                if (ret) {
1980                        if (mmc_packed_cmd(mq_rq->cmd_type)) {
1981                                if (!mq_rq->packed->retries)
1982                                        goto cmd_abort;
1983                                mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
1984                                mmc_start_req(card->host,
1985                                              &mq_rq->mmc_active, NULL);
1986                        } else {
1987
1988                                /*
1989                                 * In case of a incomplete request
1990                                 * prepare it again and resend.
1991                                 */
1992                                mmc_blk_rw_rq_prep(mq_rq, card,
1993                                                disable_multi, mq);
1994                                mmc_start_req(card->host,
1995                                                &mq_rq->mmc_active, NULL);
1996                        }
1997                        mq_rq->brq.retune_retry_done = retune_retry_done;
1998                }
1999        } while (ret);
2000
2001        return 1;
2002
2003 cmd_abort:
2004        if (mmc_packed_cmd(mq_rq->cmd_type)) {
2005                mmc_blk_abort_packed_req(mq_rq);
2006        } else {
2007                if (mmc_card_removed(card))
2008                        req->cmd_flags |= REQ_QUIET;
2009                while (ret)
2010                        ret = blk_end_request(req, -EIO,
2011                                        blk_rq_cur_bytes(req));
2012        }
2013
2014 start_new_req:
2015        if (rqc) {
2016                if (mmc_card_removed(card)) {
2017                        rqc->cmd_flags |= REQ_QUIET;
2018                        blk_end_request_all(rqc, -EIO);
2019                } else {
2020                        /*
2021                         * If current request is packed, it needs to put back.
2022                         */
2023                        if (mmc_packed_cmd(mq->mqrq_cur->cmd_type))
2024                                mmc_blk_revert_packed_req(mq, mq->mqrq_cur);
2025
2026                        mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
2027                        mmc_start_req(card->host,
2028                                      &mq->mqrq_cur->mmc_active, NULL);
2029                }
2030        }
2031
2032        return 0;
2033}
2034
2035static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
2036{
2037        int ret;
2038        struct mmc_blk_data *md = mq->data;
2039        struct mmc_card *card = md->queue.card;
2040        struct mmc_host *host = card->host;
2041        unsigned long flags;
2042        unsigned int cmd_flags = req ? req->cmd_flags : 0;
2043
2044        if (req && !mq->mqrq_prev->req)
2045                /* claim host only for the first request */
2046                mmc_get_card(card);
2047
2048        ret = mmc_blk_part_switch(card, md);
2049        if (ret) {
2050                if (req) {
2051                        blk_end_request_all(req, -EIO);
2052                }
2053                ret = 0;
2054                goto out;
2055        }
2056
2057        mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
2058        if (cmd_flags & REQ_DISCARD) {
2059                /* complete ongoing async transfer before issuing discard */
2060                if (card->host->areq)
2061                        mmc_blk_issue_rw_rq(mq, NULL);
2062                if (req->cmd_flags & REQ_SECURE)
2063                        ret = mmc_blk_issue_secdiscard_rq(mq, req);
2064                else
2065                        ret = mmc_blk_issue_discard_rq(mq, req);
2066        } else if (cmd_flags & REQ_FLUSH) {
2067                /* complete ongoing async transfer before issuing flush */
2068                if (card->host->areq)
2069                        mmc_blk_issue_rw_rq(mq, NULL);
2070                ret = mmc_blk_issue_flush(mq, req);
2071        } else {
2072                if (!req && host->areq) {
2073                        spin_lock_irqsave(&host->context_info.lock, flags);
2074                        host->context_info.is_waiting_last_req = true;
2075                        spin_unlock_irqrestore(&host->context_info.lock, flags);
2076                }
2077                ret = mmc_blk_issue_rw_rq(mq, req);
2078        }
2079
2080out:
2081        if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
2082             (cmd_flags & MMC_REQ_SPECIAL_MASK))
2083                /*
2084                 * Release host when there are no more requests
2085                 * and after special request(discard, flush) is done.
2086                 * In case sepecial request, there is no reentry to
2087                 * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
2088                 */
2089                mmc_put_card(card);
2090        return ret;
2091}
2092
2093static inline int mmc_blk_readonly(struct mmc_card *card)
2094{
2095        return mmc_card_readonly(card) ||
2096               !(card->csd.cmdclass & CCC_BLOCK_WRITE);
2097}
2098
2099static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
2100                                              struct device *parent,
2101                                              sector_t size,
2102                                              bool default_ro,
2103                                              const char *subname,
2104                                              int area_type)
2105{
2106        struct mmc_blk_data *md;
2107        int devidx, ret;
2108
2109        devidx = find_first_zero_bit(dev_use, max_devices);
2110        if (devidx >= max_devices)
2111                return ERR_PTR(-ENOSPC);
2112        __set_bit(devidx, dev_use);
2113
2114        md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
2115        if (!md) {
2116                ret = -ENOMEM;
2117                goto out;
2118        }
2119
2120        /*
2121         * !subname implies we are creating main mmc_blk_data that will be
2122         * associated with mmc_card with dev_set_drvdata. Due to device
2123         * partitions, devidx will not coincide with a per-physical card
2124         * index anymore so we keep track of a name index.
2125         */
2126        if (!subname) {
2127                md->name_idx = find_first_zero_bit(name_use, max_devices);
2128                __set_bit(md->name_idx, name_use);
2129        } else
2130                md->name_idx = ((struct mmc_blk_data *)
2131                                dev_to_disk(parent)->private_data)->name_idx;
2132
2133        md->area_type = area_type;
2134
2135        /*
2136         * Set the read-only status based on the supported commands
2137         * and the write protect switch.
2138         */
2139        md->read_only = mmc_blk_readonly(card);
2140
2141        md->disk = alloc_disk(perdev_minors);
2142        if (md->disk == NULL) {
2143                ret = -ENOMEM;
2144                goto err_kfree;
2145        }
2146
2147        spin_lock_init(&md->lock);
2148        INIT_LIST_HEAD(&md->part);
2149        md->usage = 1;
2150
2151        ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
2152        if (ret)
2153                goto err_putdisk;
2154
2155        md->queue.issue_fn = mmc_blk_issue_rq;
2156        md->queue.data = md;
2157
2158        md->disk->major = MMC_BLOCK_MAJOR;
2159        md->disk->first_minor = devidx * perdev_minors;
2160        md->disk->fops = &mmc_bdops;
2161        md->disk->private_data = md;
2162        md->disk->queue = md->queue.queue;
2163        md->disk->driverfs_dev = parent;
2164        set_disk_ro(md->disk, md->read_only || default_ro);
2165        if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
2166                md->disk->flags |= GENHD_FL_NO_PART_SCAN;
2167
2168        /*
2169         * As discussed on lkml, GENHD_FL_REMOVABLE should:
2170         *
2171         * - be set for removable media with permanent block devices
2172         * - be unset for removable block devices with permanent media
2173         *
2174         * Since MMC block devices clearly fall under the second
2175         * case, we do not set GENHD_FL_REMOVABLE.  Userspace
2176         * should use the block device creation/destruction hotplug
2177         * messages to tell when the card is present.
2178         */
2179
2180        snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
2181                 "mmcblk%u%s", md->name_idx, subname ? subname : "");
2182
2183        if (mmc_card_mmc(card))
2184                blk_queue_logical_block_size(md->queue.queue,
2185                                             card->ext_csd.data_sector_size);
2186        else
2187                blk_queue_logical_block_size(md->queue.queue, 512);
2188
2189        set_capacity(md->disk, size);
2190
2191        if (mmc_host_cmd23(card->host)) {
2192                if (mmc_card_mmc(card) ||
2193                    (mmc_card_sd(card) &&
2194                     card->scr.cmds & SD_SCR_CMD23_SUPPORT))
2195                        md->flags |= MMC_BLK_CMD23;
2196        }
2197
2198        if (mmc_card_mmc(card) &&
2199            md->flags & MMC_BLK_CMD23 &&
2200            ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
2201             card->ext_csd.rel_sectors)) {
2202                md->flags |= MMC_BLK_REL_WR;
2203                blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
2204        }
2205
2206        if (mmc_card_mmc(card) &&
2207            (area_type == MMC_BLK_DATA_AREA_MAIN) &&
2208            (md->flags & MMC_BLK_CMD23) &&
2209            card->ext_csd.packed_event_en) {
2210                if (!mmc_packed_init(&md->queue, card))
2211                        md->flags |= MMC_BLK_PACKED_CMD;
2212        }
2213
2214        return md;
2215
2216 err_putdisk:
2217        put_disk(md->disk);
2218 err_kfree:
2219        kfree(md);
2220 out:
2221        return ERR_PTR(ret);
2222}
2223
2224static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
2225{
2226        sector_t size;
2227
2228        if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
2229                /*
2230                 * The EXT_CSD sector count is in number or 512 byte
2231                 * sectors.
2232                 */
2233                size = card->ext_csd.sectors;
2234        } else {
2235                /*
2236                 * The CSD capacity field is in units of read_blkbits.
2237                 * set_capacity takes units of 512 bytes.
2238                 */
2239                size = (typeof(sector_t))card->csd.capacity
2240                        << (card->csd.read_blkbits - 9);
2241        }
2242
2243        return mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
2244                                        MMC_BLK_DATA_AREA_MAIN);
2245}
2246
2247static int mmc_blk_alloc_part(struct mmc_card *card,
2248                              struct mmc_blk_data *md,
2249                              unsigned int part_type,
2250                              sector_t size,
2251                              bool default_ro,
2252                              const char *subname,
2253                              int area_type)
2254{
2255        char cap_str[10];
2256        struct mmc_blk_data *part_md;
2257
2258        part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
2259                                    subname, area_type);
2260        if (IS_ERR(part_md))
2261                return PTR_ERR(part_md);
2262        part_md->part_type = part_type;
2263        list_add(&part_md->part, &md->part);
2264
2265        string_get_size((u64)get_capacity(part_md->disk), 512, STRING_UNITS_2,
2266                        cap_str, sizeof(cap_str));
2267        pr_info("%s: %s %s partition %u %s\n",
2268               part_md->disk->disk_name, mmc_card_id(card),
2269               mmc_card_name(card), part_md->part_type, cap_str);
2270        return 0;
2271}
2272
2273/* MMC Physical partitions consist of two boot partitions and
2274 * up to four general purpose partitions.
2275 * For each partition enabled in EXT_CSD a block device will be allocatedi
2276 * to provide access to the partition.
2277 */
2278
2279static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
2280{
2281        int idx, ret = 0;
2282
2283        if (!mmc_card_mmc(card))
2284                return 0;
2285
2286        for (idx = 0; idx < card->nr_parts; idx++) {
2287                if (card->part[idx].size) {
2288                        ret = mmc_blk_alloc_part(card, md,
2289                                card->part[idx].part_cfg,
2290                                card->part[idx].size >> 9,
2291                                card->part[idx].force_ro,
2292                                card->part[idx].name,
2293                                card->part[idx].area_type);
2294                        if (ret)
2295                                return ret;
2296                }
2297        }
2298
2299        return ret;
2300}
2301
2302static void mmc_blk_remove_req(struct mmc_blk_data *md)
2303{
2304        struct mmc_card *card;
2305
2306        if (md) {
2307                /*
2308                 * Flush remaining requests and free queues. It
2309                 * is freeing the queue that stops new requests
2310                 * from being accepted.
2311                 */
2312                card = md->queue.card;
2313                mmc_cleanup_queue(&md->queue);
2314                if (md->flags & MMC_BLK_PACKED_CMD)
2315                        mmc_packed_clean(&md->queue);
2316                if (md->disk->flags & GENHD_FL_UP) {
2317                        device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2318                        if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2319                                        card->ext_csd.boot_ro_lockable)
2320                                device_remove_file(disk_to_dev(md->disk),
2321                                        &md->power_ro_lock);
2322
2323                        del_gendisk(md->disk);
2324                }
2325                mmc_blk_put(md);
2326        }
2327}
2328
2329static void mmc_blk_remove_parts(struct mmc_card *card,
2330                                 struct mmc_blk_data *md)
2331{
2332        struct list_head *pos, *q;
2333        struct mmc_blk_data *part_md;
2334
2335        __clear_bit(md->name_idx, name_use);
2336        list_for_each_safe(pos, q, &md->part) {
2337                part_md = list_entry(pos, struct mmc_blk_data, part);
2338                list_del(pos);
2339                mmc_blk_remove_req(part_md);
2340        }
2341}
2342
2343static int mmc_add_disk(struct mmc_blk_data *md)
2344{
2345        int ret;
2346        struct mmc_card *card = md->queue.card;
2347
2348        add_disk(md->disk);
2349        md->force_ro.show = force_ro_show;
2350        md->force_ro.store = force_ro_store;
2351        sysfs_attr_init(&md->force_ro.attr);
2352        md->force_ro.attr.name = "force_ro";
2353        md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
2354        ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
2355        if (ret)
2356                goto force_ro_fail;
2357
2358        if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2359             card->ext_csd.boot_ro_lockable) {
2360                umode_t mode;
2361
2362                if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
2363                        mode = S_IRUGO;
2364                else
2365                        mode = S_IRUGO | S_IWUSR;
2366
2367                md->power_ro_lock.show = power_ro_lock_show;
2368                md->power_ro_lock.store = power_ro_lock_store;
2369                sysfs_attr_init(&md->power_ro_lock.attr);
2370                md->power_ro_lock.attr.mode = mode;
2371                md->power_ro_lock.attr.name =
2372                                        "ro_lock_until_next_power_on";
2373                ret = device_create_file(disk_to_dev(md->disk),
2374                                &md->power_ro_lock);
2375                if (ret)
2376                        goto power_ro_lock_fail;
2377        }
2378        return ret;
2379
2380power_ro_lock_fail:
2381        device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2382force_ro_fail:
2383        del_gendisk(md->disk);
2384
2385        return ret;
2386}
2387
2388#define CID_MANFID_SANDISK      0x2
2389#define CID_MANFID_TOSHIBA      0x11
2390#define CID_MANFID_MICRON       0x13
2391#define CID_MANFID_SAMSUNG      0x15
2392#define CID_MANFID_KINGSTON     0x70
2393
2394static const struct mmc_fixup blk_fixups[] =
2395{
2396        MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
2397                  MMC_QUIRK_INAND_CMD38),
2398        MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
2399                  MMC_QUIRK_INAND_CMD38),
2400        MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
2401                  MMC_QUIRK_INAND_CMD38),
2402        MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
2403                  MMC_QUIRK_INAND_CMD38),
2404        MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
2405                  MMC_QUIRK_INAND_CMD38),
2406
2407        /*
2408         * Some MMC cards experience performance degradation with CMD23
2409         * instead of CMD12-bounded multiblock transfers. For now we'll
2410         * black list what's bad...
2411         * - Certain Toshiba cards.
2412         *
2413         * N.B. This doesn't affect SD cards.
2414         */
2415        MMC_FIXUP("SDMB-32", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
2416                  MMC_QUIRK_BLK_NO_CMD23),
2417        MMC_FIXUP("SDM032", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
2418                  MMC_QUIRK_BLK_NO_CMD23),
2419        MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
2420                  MMC_QUIRK_BLK_NO_CMD23),
2421        MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
2422                  MMC_QUIRK_BLK_NO_CMD23),
2423        MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
2424                  MMC_QUIRK_BLK_NO_CMD23),
2425
2426        /*
2427         * Some Micron MMC cards needs longer data read timeout than
2428         * indicated in CSD.
2429         */
2430        MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
2431                  MMC_QUIRK_LONG_READ_TIME),
2432
2433        /*
2434         * On these Samsung MoviNAND parts, performing secure erase or
2435         * secure trim can result in unrecoverable corruption due to a
2436         * firmware bug.
2437         */
2438        MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2439                  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2440        MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2441                  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2442        MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2443                  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2444        MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2445                  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2446        MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2447                  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2448        MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2449                  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2450        MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2451                  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2452        MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2453                  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2454
2455        /*
2456         *  On Some Kingston eMMCs, performing trim can result in
2457         *  unrecoverable data conrruption occasionally due to a firmware bug.
2458         */
2459        MMC_FIXUP("V10008", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
2460                  MMC_QUIRK_TRIM_BROKEN),
2461        MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
2462                  MMC_QUIRK_TRIM_BROKEN),
2463
2464        END_FIXUP
2465};
2466
2467static int mmc_blk_probe(struct mmc_card *card)
2468{
2469        struct mmc_blk_data *md, *part_md;
2470        char cap_str[10];
2471
2472        /*
2473         * Check that the card supports the command class(es) we need.
2474         */
2475        if (!(card->csd.cmdclass & CCC_BLOCK_READ))
2476                return -ENODEV;
2477
2478        mmc_fixup_device(card, blk_fixups);
2479
2480        md = mmc_blk_alloc(card);
2481        if (IS_ERR(md))
2482                return PTR_ERR(md);
2483
2484        string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2,
2485                        cap_str, sizeof(cap_str));
2486        pr_info("%s: %s %s %s %s\n",
2487                md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
2488                cap_str, md->read_only ? "(ro)" : "");
2489
2490        if (mmc_blk_alloc_parts(card, md))
2491                goto out;
2492
2493        dev_set_drvdata(&card->dev, md);
2494
2495        if (mmc_add_disk(md))
2496                goto out;
2497
2498        list_for_each_entry(part_md, &md->part, part) {
2499                if (mmc_add_disk(part_md))
2500                        goto out;
2501        }
2502
2503        pm_runtime_set_autosuspend_delay(&card->dev, 3000);
2504        pm_runtime_use_autosuspend(&card->dev);
2505
2506        /*
2507         * Don't enable runtime PM for SD-combo cards here. Leave that
2508         * decision to be taken during the SDIO init sequence instead.
2509         */
2510        if (card->type != MMC_TYPE_SD_COMBO) {
2511                pm_runtime_set_active(&card->dev);
2512                pm_runtime_enable(&card->dev);
2513        }
2514
2515        return 0;
2516
2517 out:
2518        mmc_blk_remove_parts(card, md);
2519        mmc_blk_remove_req(md);
2520        return 0;
2521}
2522
2523static void mmc_blk_remove(struct mmc_card *card)
2524{
2525        struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
2526
2527        mmc_blk_remove_parts(card, md);
2528        pm_runtime_get_sync(&card->dev);
2529        mmc_claim_host(card->host);
2530        mmc_blk_part_switch(card, md);
2531        mmc_release_host(card->host);
2532        if (card->type != MMC_TYPE_SD_COMBO)
2533                pm_runtime_disable(&card->dev);
2534        pm_runtime_put_noidle(&card->dev);
2535        mmc_blk_remove_req(md);
2536        dev_set_drvdata(&card->dev, NULL);
2537}
2538
2539static int _mmc_blk_suspend(struct mmc_card *card)
2540{
2541        struct mmc_blk_data *part_md;
2542        struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
2543
2544        if (md) {
2545                mmc_queue_suspend(&md->queue);
2546                list_for_each_entry(part_md, &md->part, part) {
2547                        mmc_queue_suspend(&part_md->queue);
2548                }
2549        }
2550        return 0;
2551}
2552
2553static void mmc_blk_shutdown(struct mmc_card *card)
2554{
2555        _mmc_blk_suspend(card);
2556}
2557
2558#ifdef CONFIG_PM_SLEEP
2559static int mmc_blk_suspend(struct device *dev)
2560{
2561        struct mmc_card *card = mmc_dev_to_card(dev);
2562
2563        return _mmc_blk_suspend(card);
2564}
2565
2566static int mmc_blk_resume(struct device *dev)
2567{
2568        struct mmc_blk_data *part_md;
2569        struct mmc_blk_data *md = dev_get_drvdata(dev);
2570
2571        if (md) {
2572                /*
2573                 * Resume involves the card going into idle state,
2574                 * so current partition is always the main one.
2575                 */
2576                md->part_curr = md->part_type;
2577                mmc_queue_resume(&md->queue);
2578                list_for_each_entry(part_md, &md->part, part) {
2579                        mmc_queue_resume(&part_md->queue);
2580                }
2581        }
2582        return 0;
2583}
2584#endif
2585
2586static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume);
2587
2588static struct mmc_driver mmc_driver = {
2589        .drv            = {
2590                .name   = "mmcblk",
2591                .pm     = &mmc_blk_pm_ops,
2592        },
2593        .probe          = mmc_blk_probe,
2594        .remove         = mmc_blk_remove,
2595        .shutdown       = mmc_blk_shutdown,
2596};
2597
2598static int __init mmc_blk_init(void)
2599{
2600        int res;
2601
2602        if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
2603                pr_info("mmcblk: using %d minors per device\n", perdev_minors);
2604
2605        max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors);
2606
2607        res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
2608        if (res)
2609                goto out;
2610
2611        res = mmc_register_driver(&mmc_driver);
2612        if (res)
2613                goto out2;
2614
2615        return 0;
2616 out2:
2617        unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
2618 out:
2619        return res;
2620}
2621
2622static void __exit mmc_blk_exit(void)
2623{
2624        mmc_unregister_driver(&mmc_driver);
2625        unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
2626}
2627
2628module_init(mmc_blk_init);
2629module_exit(mmc_blk_exit);
2630
2631MODULE_LICENSE("GPL");
2632MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
2633
2634