linux/drivers/mmc/card/block.c
<<
>>
Prefs
   1/*
   2 * Block driver for media (i.e., flash cards)
   3 *
   4 * Copyright 2002 Hewlett-Packard Company
   5 * Copyright 2005-2008 Pierre Ossman
   6 *
   7 * Use consistent with the GNU GPL is permitted,
   8 * provided that this copyright notice is
   9 * preserved in its entirety in all copies and derived works.
  10 *
  11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
  12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
  13 * FITNESS FOR ANY PARTICULAR PURPOSE.
  14 *
  15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
  16 *
  17 * Author:  Andrew Christian
  18 *          28 May 2002
  19 */
  20#include <linux/moduleparam.h>
  21#include <linux/module.h>
  22#include <linux/init.h>
  23
  24#include <linux/kernel.h>
  25#include <linux/fs.h>
  26#include <linux/slab.h>
  27#include <linux/errno.h>
  28#include <linux/hdreg.h>
  29#include <linux/kdev_t.h>
  30#include <linux/blkdev.h>
  31#include <linux/mutex.h>
  32#include <linux/scatterlist.h>
  33#include <linux/string_helpers.h>
  34#include <linux/delay.h>
  35#include <linux/capability.h>
  36#include <linux/compat.h>
  37#include <linux/pm_runtime.h>
  38#include <linux/idr.h>
  39
  40#include <linux/mmc/ioctl.h>
  41#include <linux/mmc/card.h>
  42#include <linux/mmc/host.h>
  43#include <linux/mmc/mmc.h>
  44#include <linux/mmc/sd.h>
  45
  46#include <asm/uaccess.h>
  47
  48#include "queue.h"
  49#include "block.h"
  50
  51MODULE_ALIAS("mmc:block");
  52#ifdef MODULE_PARAM_PREFIX
  53#undef MODULE_PARAM_PREFIX
  54#endif
  55#define MODULE_PARAM_PREFIX "mmcblk."
  56
  57#define INAND_CMD38_ARG_EXT_CSD  113
  58#define INAND_CMD38_ARG_ERASE    0x00
  59#define INAND_CMD38_ARG_TRIM     0x01
  60#define INAND_CMD38_ARG_SECERASE 0x80
  61#define INAND_CMD38_ARG_SECTRIM1 0x81
  62#define INAND_CMD38_ARG_SECTRIM2 0x88
  63#define MMC_BLK_TIMEOUT_MS  (10 * 60 * 1000)        /* 10 minute timeout */
  64#define MMC_SANITIZE_REQ_TIMEOUT 240000
  65#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
  66
  67#define mmc_req_rel_wr(req)     ((req->cmd_flags & REQ_FUA) && \
  68                                  (rq_data_dir(req) == WRITE))
  69#define PACKED_CMD_VER  0x01
  70#define PACKED_CMD_WR   0x02
  71
  72static DEFINE_MUTEX(block_mutex);
  73
  74/*
  75 * The defaults come from config options but can be overriden by module
  76 * or bootarg options.
  77 */
  78static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
  79
  80/*
  81 * We've only got one major, so number of mmcblk devices is
  82 * limited to (1 << 20) / number of minors per device.  It is also
  83 * limited by the MAX_DEVICES below.
  84 */
  85static int max_devices;
  86
  87#define MAX_DEVICES 256
  88
  89static DEFINE_IDA(mmc_blk_ida);
  90static DEFINE_SPINLOCK(mmc_blk_lock);
  91
  92/*
  93 * There is one mmc_blk_data per slot.
  94 */
  95struct mmc_blk_data {
  96        spinlock_t      lock;
  97        struct device   *parent;
  98        struct gendisk  *disk;
  99        struct mmc_queue queue;
 100        struct list_head part;
 101
 102        unsigned int    flags;
 103#define MMC_BLK_CMD23   (1 << 0)        /* Can do SET_BLOCK_COUNT for multiblock */
 104#define MMC_BLK_REL_WR  (1 << 1)        /* MMC Reliable write support */
 105#define MMC_BLK_PACKED_CMD      (1 << 2)        /* MMC packed command support */
 106
 107        unsigned int    usage;
 108        unsigned int    read_only;
 109        unsigned int    part_type;
 110        unsigned int    reset_done;
 111#define MMC_BLK_READ            BIT(0)
 112#define MMC_BLK_WRITE           BIT(1)
 113#define MMC_BLK_DISCARD         BIT(2)
 114#define MMC_BLK_SECDISCARD      BIT(3)
 115
 116        /*
 117         * Only set in main mmc_blk_data associated
 118         * with mmc_card with dev_set_drvdata, and keeps
 119         * track of the current selected device partition.
 120         */
 121        unsigned int    part_curr;
 122        struct device_attribute force_ro;
 123        struct device_attribute power_ro_lock;
 124        int     area_type;
 125};
 126
 127static DEFINE_MUTEX(open_lock);
 128
 129enum {
 130        MMC_PACKED_NR_IDX = -1,
 131        MMC_PACKED_NR_ZERO,
 132        MMC_PACKED_NR_SINGLE,
 133};
 134
 135module_param(perdev_minors, int, 0444);
 136MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
 137
 138static inline int mmc_blk_part_switch(struct mmc_card *card,
 139                                      struct mmc_blk_data *md);
 140static int get_card_status(struct mmc_card *card, u32 *status, int retries);
 141
 142static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
 143{
 144        struct mmc_packed *packed = mqrq->packed;
 145
 146        mqrq->cmd_type = MMC_PACKED_NONE;
 147        packed->nr_entries = MMC_PACKED_NR_ZERO;
 148        packed->idx_failure = MMC_PACKED_NR_IDX;
 149        packed->retries = 0;
 150        packed->blocks = 0;
 151}
 152
 153static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
 154{
 155        struct mmc_blk_data *md;
 156
 157        mutex_lock(&open_lock);
 158        md = disk->private_data;
 159        if (md && md->usage == 0)
 160                md = NULL;
 161        if (md)
 162                md->usage++;
 163        mutex_unlock(&open_lock);
 164
 165        return md;
 166}
 167
 168static inline int mmc_get_devidx(struct gendisk *disk)
 169{
 170        int devidx = disk->first_minor / perdev_minors;
 171        return devidx;
 172}
 173
 174static void mmc_blk_put(struct mmc_blk_data *md)
 175{
 176        mutex_lock(&open_lock);
 177        md->usage--;
 178        if (md->usage == 0) {
 179                int devidx = mmc_get_devidx(md->disk);
 180                blk_cleanup_queue(md->queue.queue);
 181
 182                spin_lock(&mmc_blk_lock);
 183                ida_remove(&mmc_blk_ida, devidx);
 184                spin_unlock(&mmc_blk_lock);
 185
 186                put_disk(md->disk);
 187                kfree(md);
 188        }
 189        mutex_unlock(&open_lock);
 190}
 191
 192static ssize_t power_ro_lock_show(struct device *dev,
 193                struct device_attribute *attr, char *buf)
 194{
 195        int ret;
 196        struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
 197        struct mmc_card *card = md->queue.card;
 198        int locked = 0;
 199
 200        if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
 201                locked = 2;
 202        else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
 203                locked = 1;
 204
 205        ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
 206
 207        mmc_blk_put(md);
 208
 209        return ret;
 210}
 211
 212static ssize_t power_ro_lock_store(struct device *dev,
 213                struct device_attribute *attr, const char *buf, size_t count)
 214{
 215        int ret;
 216        struct mmc_blk_data *md, *part_md;
 217        struct mmc_card *card;
 218        unsigned long set;
 219
 220        if (kstrtoul(buf, 0, &set))
 221                return -EINVAL;
 222
 223        if (set != 1)
 224                return count;
 225
 226        md = mmc_blk_get(dev_to_disk(dev));
 227        card = md->queue.card;
 228
 229        mmc_get_card(card);
 230
 231        ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
 232                                card->ext_csd.boot_ro_lock |
 233                                EXT_CSD_BOOT_WP_B_PWR_WP_EN,
 234                                card->ext_csd.part_time);
 235        if (ret)
 236                pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
 237        else
 238                card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
 239
 240        mmc_put_card(card);
 241
 242        if (!ret) {
 243                pr_info("%s: Locking boot partition ro until next power on\n",
 244                        md->disk->disk_name);
 245                set_disk_ro(md->disk, 1);
 246
 247                list_for_each_entry(part_md, &md->part, part)
 248                        if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
 249                                pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
 250                                set_disk_ro(part_md->disk, 1);
 251                        }
 252        }
 253
 254        mmc_blk_put(md);
 255        return count;
 256}
 257
 258static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
 259                             char *buf)
 260{
 261        int ret;
 262        struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
 263
 264        ret = snprintf(buf, PAGE_SIZE, "%d\n",
 265                       get_disk_ro(dev_to_disk(dev)) ^
 266                       md->read_only);
 267        mmc_blk_put(md);
 268        return ret;
 269}
 270
 271static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
 272                              const char *buf, size_t count)
 273{
 274        int ret;
 275        char *end;
 276        struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
 277        unsigned long set = simple_strtoul(buf, &end, 0);
 278        if (end == buf) {
 279                ret = -EINVAL;
 280                goto out;
 281        }
 282
 283        set_disk_ro(dev_to_disk(dev), set || md->read_only);
 284        ret = count;
 285out:
 286        mmc_blk_put(md);
 287        return ret;
 288}
 289
 290static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
 291{
 292        struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
 293        int ret = -ENXIO;
 294
 295        mutex_lock(&block_mutex);
 296        if (md) {
 297                if (md->usage == 2)
 298                        check_disk_change(bdev);
 299                ret = 0;
 300
 301                if ((mode & FMODE_WRITE) && md->read_only) {
 302                        mmc_blk_put(md);
 303                        ret = -EROFS;
 304                }
 305        }
 306        mutex_unlock(&block_mutex);
 307
 308        return ret;
 309}
 310
 311static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
 312{
 313        struct mmc_blk_data *md = disk->private_data;
 314
 315        mutex_lock(&block_mutex);
 316        mmc_blk_put(md);
 317        mutex_unlock(&block_mutex);
 318}
 319
 320static int
 321mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 322{
 323        geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
 324        geo->heads = 4;
 325        geo->sectors = 16;
 326        return 0;
 327}
 328
 329struct mmc_blk_ioc_data {
 330        struct mmc_ioc_cmd ic;
 331        unsigned char *buf;
 332        u64 buf_bytes;
 333};
 334
 335static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
 336        struct mmc_ioc_cmd __user *user)
 337{
 338        struct mmc_blk_ioc_data *idata;
 339        int err;
 340
 341        idata = kmalloc(sizeof(*idata), GFP_KERNEL);
 342        if (!idata) {
 343                err = -ENOMEM;
 344                goto out;
 345        }
 346
 347        if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
 348                err = -EFAULT;
 349                goto idata_err;
 350        }
 351
 352        idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
 353        if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
 354                err = -EOVERFLOW;
 355                goto idata_err;
 356        }
 357
 358        if (!idata->buf_bytes) {
 359                idata->buf = NULL;
 360                return idata;
 361        }
 362
 363        idata->buf = kmalloc(idata->buf_bytes, GFP_KERNEL);
 364        if (!idata->buf) {
 365                err = -ENOMEM;
 366                goto idata_err;
 367        }
 368
 369        if (copy_from_user(idata->buf, (void __user *)(unsigned long)
 370                                        idata->ic.data_ptr, idata->buf_bytes)) {
 371                err = -EFAULT;
 372                goto copy_err;
 373        }
 374
 375        return idata;
 376
 377copy_err:
 378        kfree(idata->buf);
 379idata_err:
 380        kfree(idata);
 381out:
 382        return ERR_PTR(err);
 383}
 384
 385static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
 386                                      struct mmc_blk_ioc_data *idata)
 387{
 388        struct mmc_ioc_cmd *ic = &idata->ic;
 389
 390        if (copy_to_user(&(ic_ptr->response), ic->response,
 391                         sizeof(ic->response)))
 392                return -EFAULT;
 393
 394        if (!idata->ic.write_flag) {
 395                if (copy_to_user((void __user *)(unsigned long)ic->data_ptr,
 396                                 idata->buf, idata->buf_bytes))
 397                        return -EFAULT;
 398        }
 399
 400        return 0;
 401}
 402
 403static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
 404                                       u32 retries_max)
 405{
 406        int err;
 407        u32 retry_count = 0;
 408
 409        if (!status || !retries_max)
 410                return -EINVAL;
 411
 412        do {
 413                err = get_card_status(card, status, 5);
 414                if (err)
 415                        break;
 416
 417                if (!R1_STATUS(*status) &&
 418                                (R1_CURRENT_STATE(*status) != R1_STATE_PRG))
 419                        break; /* RPMB programming operation complete */
 420
 421                /*
 422                 * Rechedule to give the MMC device a chance to continue
 423                 * processing the previous command without being polled too
 424                 * frequently.
 425                 */
 426                usleep_range(1000, 5000);
 427        } while (++retry_count < retries_max);
 428
 429        if (retry_count == retries_max)
 430                err = -EPERM;
 431
 432        return err;
 433}
 434
 435static int ioctl_do_sanitize(struct mmc_card *card)
 436{
 437        int err;
 438
 439        if (!mmc_can_sanitize(card)) {
 440                        pr_warn("%s: %s - SANITIZE is not supported\n",
 441                                mmc_hostname(card->host), __func__);
 442                        err = -EOPNOTSUPP;
 443                        goto out;
 444        }
 445
 446        pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
 447                mmc_hostname(card->host), __func__);
 448
 449        err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 450                                        EXT_CSD_SANITIZE_START, 1,
 451                                        MMC_SANITIZE_REQ_TIMEOUT);
 452
 453        if (err)
 454                pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n",
 455                       mmc_hostname(card->host), __func__, err);
 456
 457        pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host),
 458                                             __func__);
 459out:
 460        return err;
 461}
 462
 463static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
 464                               struct mmc_blk_ioc_data *idata)
 465{
 466        struct mmc_command cmd = {0};
 467        struct mmc_data data = {0};
 468        struct mmc_request mrq = {NULL};
 469        struct scatterlist sg;
 470        int err;
 471        int is_rpmb = false;
 472        u32 status = 0;
 473
 474        if (!card || !md || !idata)
 475                return -EINVAL;
 476
 477        if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
 478                is_rpmb = true;
 479
 480        cmd.opcode = idata->ic.opcode;
 481        cmd.arg = idata->ic.arg;
 482        cmd.flags = idata->ic.flags;
 483
 484        if (idata->buf_bytes) {
 485                data.sg = &sg;
 486                data.sg_len = 1;
 487                data.blksz = idata->ic.blksz;
 488                data.blocks = idata->ic.blocks;
 489
 490                sg_init_one(data.sg, idata->buf, idata->buf_bytes);
 491
 492                if (idata->ic.write_flag)
 493                        data.flags = MMC_DATA_WRITE;
 494                else
 495                        data.flags = MMC_DATA_READ;
 496
 497                /* data.flags must already be set before doing this. */
 498                mmc_set_data_timeout(&data, card);
 499
 500                /* Allow overriding the timeout_ns for empirical tuning. */
 501                if (idata->ic.data_timeout_ns)
 502                        data.timeout_ns = idata->ic.data_timeout_ns;
 503
 504                if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
 505                        /*
 506                         * Pretend this is a data transfer and rely on the
 507                         * host driver to compute timeout.  When all host
 508                         * drivers support cmd.cmd_timeout for R1B, this
 509                         * can be changed to:
 510                         *
 511                         *     mrq.data = NULL;
 512                         *     cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
 513                         */
 514                        data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
 515                }
 516
 517                mrq.data = &data;
 518        }
 519
 520        mrq.cmd = &cmd;
 521
 522        err = mmc_blk_part_switch(card, md);
 523        if (err)
 524                return err;
 525
 526        if (idata->ic.is_acmd) {
 527                err = mmc_app_cmd(card->host, card);
 528                if (err)
 529                        return err;
 530        }
 531
 532        if (is_rpmb) {
 533                err = mmc_set_blockcount(card, data.blocks,
 534                        idata->ic.write_flag & (1 << 31));
 535                if (err)
 536                        return err;
 537        }
 538
 539        if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
 540            (cmd.opcode == MMC_SWITCH)) {
 541                err = ioctl_do_sanitize(card);
 542
 543                if (err)
 544                        pr_err("%s: ioctl_do_sanitize() failed. err = %d",
 545                               __func__, err);
 546
 547                return err;
 548        }
 549
 550        mmc_wait_for_req(card->host, &mrq);
 551
 552        if (cmd.error) {
 553                dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
 554                                                __func__, cmd.error);
 555                return cmd.error;
 556        }
 557        if (data.error) {
 558                dev_err(mmc_dev(card->host), "%s: data error %d\n",
 559                                                __func__, data.error);
 560                return data.error;
 561        }
 562
 563        /*
 564         * According to the SD specs, some commands require a delay after
 565         * issuing the command.
 566         */
 567        if (idata->ic.postsleep_min_us)
 568                usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
 569
 570        memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
 571
 572        if (is_rpmb) {
 573                /*
 574                 * Ensure RPMB command has completed by polling CMD13
 575                 * "Send Status".
 576                 */
 577                err = ioctl_rpmb_card_status_poll(card, &status, 5);
 578                if (err)
 579                        dev_err(mmc_dev(card->host),
 580                                        "%s: Card Status=0x%08X, error %d\n",
 581                                        __func__, status, err);
 582        }
 583
 584        return err;
 585}
 586
 587static int mmc_blk_ioctl_cmd(struct block_device *bdev,
 588                             struct mmc_ioc_cmd __user *ic_ptr)
 589{
 590        struct mmc_blk_ioc_data *idata;
 591        struct mmc_blk_data *md;
 592        struct mmc_card *card;
 593        int err = 0, ioc_err = 0;
 594
 595        /*
 596         * The caller must have CAP_SYS_RAWIO, and must be calling this on the
 597         * whole block device, not on a partition.  This prevents overspray
 598         * between sibling partitions.
 599         */
 600        if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
 601                return -EPERM;
 602
 603        idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
 604        if (IS_ERR(idata))
 605                return PTR_ERR(idata);
 606
 607        md = mmc_blk_get(bdev->bd_disk);
 608        if (!md) {
 609                err = -EINVAL;
 610                goto cmd_err;
 611        }
 612
 613        card = md->queue.card;
 614        if (IS_ERR(card)) {
 615                err = PTR_ERR(card);
 616                goto cmd_done;
 617        }
 618
 619        mmc_get_card(card);
 620
 621        ioc_err = __mmc_blk_ioctl_cmd(card, md, idata);
 622
 623        /* Always switch back to main area after RPMB access */
 624        if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
 625                mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
 626
 627        mmc_put_card(card);
 628
 629        err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
 630
 631cmd_done:
 632        mmc_blk_put(md);
 633cmd_err:
 634        kfree(idata->buf);
 635        kfree(idata);
 636        return ioc_err ? ioc_err : err;
 637}
 638
 639static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
 640                                   struct mmc_ioc_multi_cmd __user *user)
 641{
 642        struct mmc_blk_ioc_data **idata = NULL;
 643        struct mmc_ioc_cmd __user *cmds = user->cmds;
 644        struct mmc_card *card;
 645        struct mmc_blk_data *md;
 646        int i, err = 0, ioc_err = 0;
 647        __u64 num_of_cmds;
 648
 649        /*
 650         * The caller must have CAP_SYS_RAWIO, and must be calling this on the
 651         * whole block device, not on a partition.  This prevents overspray
 652         * between sibling partitions.
 653         */
 654        if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
 655                return -EPERM;
 656
 657        if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
 658                           sizeof(num_of_cmds)))
 659                return -EFAULT;
 660
 661        if (num_of_cmds > MMC_IOC_MAX_CMDS)
 662                return -EINVAL;
 663
 664        idata = kcalloc(num_of_cmds, sizeof(*idata), GFP_KERNEL);
 665        if (!idata)
 666                return -ENOMEM;
 667
 668        for (i = 0; i < num_of_cmds; i++) {
 669                idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]);
 670                if (IS_ERR(idata[i])) {
 671                        err = PTR_ERR(idata[i]);
 672                        num_of_cmds = i;
 673                        goto cmd_err;
 674                }
 675        }
 676
 677        md = mmc_blk_get(bdev->bd_disk);
 678        if (!md) {
 679                err = -EINVAL;
 680                goto cmd_err;
 681        }
 682
 683        card = md->queue.card;
 684        if (IS_ERR(card)) {
 685                err = PTR_ERR(card);
 686                goto cmd_done;
 687        }
 688
 689        mmc_get_card(card);
 690
 691        for (i = 0; i < num_of_cmds && !ioc_err; i++)
 692                ioc_err = __mmc_blk_ioctl_cmd(card, md, idata[i]);
 693
 694        /* Always switch back to main area after RPMB access */
 695        if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
 696                mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
 697
 698        mmc_put_card(card);
 699
 700        /* copy to user if data and response */
 701        for (i = 0; i < num_of_cmds && !err; i++)
 702                err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
 703
 704cmd_done:
 705        mmc_blk_put(md);
 706cmd_err:
 707        for (i = 0; i < num_of_cmds; i++) {
 708                kfree(idata[i]->buf);
 709                kfree(idata[i]);
 710        }
 711        kfree(idata);
 712        return ioc_err ? ioc_err : err;
 713}
 714
 715static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
 716        unsigned int cmd, unsigned long arg)
 717{
 718        switch (cmd) {
 719        case MMC_IOC_CMD:
 720                return mmc_blk_ioctl_cmd(bdev,
 721                                (struct mmc_ioc_cmd __user *)arg);
 722        case MMC_IOC_MULTI_CMD:
 723                return mmc_blk_ioctl_multi_cmd(bdev,
 724                                (struct mmc_ioc_multi_cmd __user *)arg);
 725        default:
 726                return -EINVAL;
 727        }
 728}
 729
 730#ifdef CONFIG_COMPAT
 731static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
 732        unsigned int cmd, unsigned long arg)
 733{
 734        return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
 735}
 736#endif
 737
 738static const struct block_device_operations mmc_bdops = {
 739        .open                   = mmc_blk_open,
 740        .release                = mmc_blk_release,
 741        .getgeo                 = mmc_blk_getgeo,
 742        .owner                  = THIS_MODULE,
 743        .ioctl                  = mmc_blk_ioctl,
 744#ifdef CONFIG_COMPAT
 745        .compat_ioctl           = mmc_blk_compat_ioctl,
 746#endif
 747};
 748
 749static inline int mmc_blk_part_switch(struct mmc_card *card,
 750                                      struct mmc_blk_data *md)
 751{
 752        int ret;
 753        struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
 754
 755        if (main_md->part_curr == md->part_type)
 756                return 0;
 757
 758        if (mmc_card_mmc(card)) {
 759                u8 part_config = card->ext_csd.part_config;
 760
 761                if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
 762                        mmc_retune_pause(card->host);
 763
 764                part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
 765                part_config |= md->part_type;
 766
 767                ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 768                                 EXT_CSD_PART_CONFIG, part_config,
 769                                 card->ext_csd.part_time);
 770                if (ret) {
 771                        if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
 772                                mmc_retune_unpause(card->host);
 773                        return ret;
 774                }
 775
 776                card->ext_csd.part_config = part_config;
 777
 778                if (main_md->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB)
 779                        mmc_retune_unpause(card->host);
 780        }
 781
 782        main_md->part_curr = md->part_type;
 783        return 0;
 784}
 785
 786static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
 787{
 788        int err;
 789        u32 result;
 790        __be32 *blocks;
 791
 792        struct mmc_request mrq = {NULL};
 793        struct mmc_command cmd = {0};
 794        struct mmc_data data = {0};
 795
 796        struct scatterlist sg;
 797
 798        cmd.opcode = MMC_APP_CMD;
 799        cmd.arg = card->rca << 16;
 800        cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
 801
 802        err = mmc_wait_for_cmd(card->host, &cmd, 0);
 803        if (err)
 804                return (u32)-1;
 805        if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
 806                return (u32)-1;
 807
 808        memset(&cmd, 0, sizeof(struct mmc_command));
 809
 810        cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
 811        cmd.arg = 0;
 812        cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
 813
 814        data.blksz = 4;
 815        data.blocks = 1;
 816        data.flags = MMC_DATA_READ;
 817        data.sg = &sg;
 818        data.sg_len = 1;
 819        mmc_set_data_timeout(&data, card);
 820
 821        mrq.cmd = &cmd;
 822        mrq.data = &data;
 823
 824        blocks = kmalloc(4, GFP_KERNEL);
 825        if (!blocks)
 826                return (u32)-1;
 827
 828        sg_init_one(&sg, blocks, 4);
 829
 830        mmc_wait_for_req(card->host, &mrq);
 831
 832        result = ntohl(*blocks);
 833        kfree(blocks);
 834
 835        if (cmd.error || data.error)
 836                result = (u32)-1;
 837
 838        return result;
 839}
 840
 841static int get_card_status(struct mmc_card *card, u32 *status, int retries)
 842{
 843        struct mmc_command cmd = {0};
 844        int err;
 845
 846        cmd.opcode = MMC_SEND_STATUS;
 847        if (!mmc_host_is_spi(card->host))
 848                cmd.arg = card->rca << 16;
 849        cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
 850        err = mmc_wait_for_cmd(card->host, &cmd, retries);
 851        if (err == 0)
 852                *status = cmd.resp[0];
 853        return err;
 854}
 855
 856static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
 857                bool hw_busy_detect, struct request *req, int *gen_err)
 858{
 859        unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
 860        int err = 0;
 861        u32 status;
 862
 863        do {
 864                err = get_card_status(card, &status, 5);
 865                if (err) {
 866                        pr_err("%s: error %d requesting status\n",
 867                               req->rq_disk->disk_name, err);
 868                        return err;
 869                }
 870
 871                if (status & R1_ERROR) {
 872                        pr_err("%s: %s: error sending status cmd, status %#x\n",
 873                                req->rq_disk->disk_name, __func__, status);
 874                        *gen_err = 1;
 875                }
 876
 877                /* We may rely on the host hw to handle busy detection.*/
 878                if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) &&
 879                        hw_busy_detect)
 880                        break;
 881
 882                /*
 883                 * Timeout if the device never becomes ready for data and never
 884                 * leaves the program state.
 885                 */
 886                if (time_after(jiffies, timeout)) {
 887                        pr_err("%s: Card stuck in programming state! %s %s\n",
 888                                mmc_hostname(card->host),
 889                                req->rq_disk->disk_name, __func__);
 890                        return -ETIMEDOUT;
 891                }
 892
 893                /*
 894                 * Some cards mishandle the status bits,
 895                 * so make sure to check both the busy
 896                 * indication and the card state.
 897                 */
 898        } while (!(status & R1_READY_FOR_DATA) ||
 899                 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
 900
 901        return err;
 902}
 903
 904static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
 905                struct request *req, int *gen_err, u32 *stop_status)
 906{
 907        struct mmc_host *host = card->host;
 908        struct mmc_command cmd = {0};
 909        int err;
 910        bool use_r1b_resp = rq_data_dir(req) == WRITE;
 911
 912        /*
 913         * Normally we use R1B responses for WRITE, but in cases where the host
 914         * has specified a max_busy_timeout we need to validate it. A failure
 915         * means we need to prevent the host from doing hw busy detection, which
 916         * is done by converting to a R1 response instead.
 917         */
 918        if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
 919                use_r1b_resp = false;
 920
 921        cmd.opcode = MMC_STOP_TRANSMISSION;
 922        if (use_r1b_resp) {
 923                cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
 924                cmd.busy_timeout = timeout_ms;
 925        } else {
 926                cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
 927        }
 928
 929        err = mmc_wait_for_cmd(host, &cmd, 5);
 930        if (err)
 931                return err;
 932
 933        *stop_status = cmd.resp[0];
 934
 935        /* No need to check card status in case of READ. */
 936        if (rq_data_dir(req) == READ)
 937                return 0;
 938
 939        if (!mmc_host_is_spi(host) &&
 940                (*stop_status & R1_ERROR)) {
 941                pr_err("%s: %s: general error sending stop command, resp %#x\n",
 942                        req->rq_disk->disk_name, __func__, *stop_status);
 943                *gen_err = 1;
 944        }
 945
 946        return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err);
 947}
 948
 949#define ERR_NOMEDIUM    3
 950#define ERR_RETRY       2
 951#define ERR_ABORT       1
 952#define ERR_CONTINUE    0
 953
 954static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
 955        bool status_valid, u32 status)
 956{
 957        switch (error) {
 958        case -EILSEQ:
 959                /* response crc error, retry the r/w cmd */
 960                pr_err("%s: %s sending %s command, card status %#x\n",
 961                        req->rq_disk->disk_name, "response CRC error",
 962                        name, status);
 963                return ERR_RETRY;
 964
 965        case -ETIMEDOUT:
 966                pr_err("%s: %s sending %s command, card status %#x\n",
 967                        req->rq_disk->disk_name, "timed out", name, status);
 968
 969                /* If the status cmd initially failed, retry the r/w cmd */
 970                if (!status_valid) {
 971                        pr_err("%s: status not valid, retrying timeout\n",
 972                                req->rq_disk->disk_name);
 973                        return ERR_RETRY;
 974                }
 975
 976                /*
 977                 * If it was a r/w cmd crc error, or illegal command
 978                 * (eg, issued in wrong state) then retry - we should
 979                 * have corrected the state problem above.
 980                 */
 981                if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
 982                        pr_err("%s: command error, retrying timeout\n",
 983                                req->rq_disk->disk_name);
 984                        return ERR_RETRY;
 985                }
 986
 987                /* Otherwise abort the command */
 988                return ERR_ABORT;
 989
 990        default:
 991                /* We don't understand the error code the driver gave us */
 992                pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
 993                       req->rq_disk->disk_name, error, status);
 994                return ERR_ABORT;
 995        }
 996}
 997
 998/*
 999 * Initial r/w and stop cmd error recovery.
1000 * We don't know whether the card received the r/w cmd or not, so try to
1001 * restore things back to a sane state.  Essentially, we do this as follows:
1002 * - Obtain card status.  If the first attempt to obtain card status fails,
1003 *   the status word will reflect the failed status cmd, not the failed
1004 *   r/w cmd.  If we fail to obtain card status, it suggests we can no
1005 *   longer communicate with the card.
1006 * - Check the card state.  If the card received the cmd but there was a
1007 *   transient problem with the response, it might still be in a data transfer
1008 *   mode.  Try to send it a stop command.  If this fails, we can't recover.
1009 * - If the r/w cmd failed due to a response CRC error, it was probably
1010 *   transient, so retry the cmd.
1011 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
1012 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
1013 *   illegal cmd, retry.
1014 * Otherwise we don't understand what happened, so abort.
1015 */
1016static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
1017        struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
1018{
1019        bool prev_cmd_status_valid = true;
1020        u32 status, stop_status = 0;
1021        int err, retry;
1022
1023        if (mmc_card_removed(card))
1024                return ERR_NOMEDIUM;
1025
1026        /*
1027         * Try to get card status which indicates both the card state
1028         * and why there was no response.  If the first attempt fails,
1029         * we can't be sure the returned status is for the r/w command.
1030         */
1031        for (retry = 2; retry >= 0; retry--) {
1032                err = get_card_status(card, &status, 0);
1033                if (!err)
1034                        break;
1035
1036                /* Re-tune if needed */
1037                mmc_retune_recheck(card->host);
1038
1039                prev_cmd_status_valid = false;
1040                pr_err("%s: error %d sending status command, %sing\n",
1041                       req->rq_disk->disk_name, err, retry ? "retry" : "abort");
1042        }
1043
1044        /* We couldn't get a response from the card.  Give up. */
1045        if (err) {
1046                /* Check if the card is removed */
1047                if (mmc_detect_card_removed(card->host))
1048                        return ERR_NOMEDIUM;
1049                return ERR_ABORT;
1050        }
1051
1052        /* Flag ECC errors */
1053        if ((status & R1_CARD_ECC_FAILED) ||
1054            (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
1055            (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
1056                *ecc_err = 1;
1057
1058        /* Flag General errors */
1059        if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
1060                if ((status & R1_ERROR) ||
1061                        (brq->stop.resp[0] & R1_ERROR)) {
1062                        pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
1063                               req->rq_disk->disk_name, __func__,
1064                               brq->stop.resp[0], status);
1065                        *gen_err = 1;
1066                }
1067
1068        /*
1069         * Check the current card state.  If it is in some data transfer
1070         * mode, tell it to stop (and hopefully transition back to TRAN.)
1071         */
1072        if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
1073            R1_CURRENT_STATE(status) == R1_STATE_RCV) {
1074                err = send_stop(card,
1075                        DIV_ROUND_UP(brq->data.timeout_ns, 1000000),
1076                        req, gen_err, &stop_status);
1077                if (err) {
1078                        pr_err("%s: error %d sending stop command\n",
1079                               req->rq_disk->disk_name, err);
1080                        /*
1081                         * If the stop cmd also timed out, the card is probably
1082                         * not present, so abort. Other errors are bad news too.
1083                         */
1084                        return ERR_ABORT;
1085                }
1086
1087                if (stop_status & R1_CARD_ECC_FAILED)
1088                        *ecc_err = 1;
1089        }
1090
1091        /* Check for set block count errors */
1092        if (brq->sbc.error)
1093                return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
1094                                prev_cmd_status_valid, status);
1095
1096        /* Check for r/w command errors */
1097        if (brq->cmd.error)
1098                return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
1099                                prev_cmd_status_valid, status);
1100
1101        /* Data errors */
1102        if (!brq->stop.error)
1103                return ERR_CONTINUE;
1104
1105        /* Now for stop errors.  These aren't fatal to the transfer. */
1106        pr_info("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
1107               req->rq_disk->disk_name, brq->stop.error,
1108               brq->cmd.resp[0], status);
1109
1110        /*
1111         * Subsitute in our own stop status as this will give the error
1112         * state which happened during the execution of the r/w command.
1113         */
1114        if (stop_status) {
1115                brq->stop.resp[0] = stop_status;
1116                brq->stop.error = 0;
1117        }
1118        return ERR_CONTINUE;
1119}
1120
1121static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
1122                         int type)
1123{
1124        int err;
1125
1126        if (md->reset_done & type)
1127                return -EEXIST;
1128
1129        md->reset_done |= type;
1130        err = mmc_hw_reset(host);
1131        /* Ensure we switch back to the correct partition */
1132        if (err != -EOPNOTSUPP) {
1133                struct mmc_blk_data *main_md =
1134                        dev_get_drvdata(&host->card->dev);
1135                int part_err;
1136
1137                main_md->part_curr = main_md->part_type;
1138                part_err = mmc_blk_part_switch(host->card, md);
1139                if (part_err) {
1140                        /*
1141                         * We have failed to get back into the correct
1142                         * partition, so we need to abort the whole request.
1143                         */
1144                        return -ENODEV;
1145                }
1146        }
1147        return err;
1148}
1149
1150static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
1151{
1152        md->reset_done &= ~type;
1153}
1154
1155int mmc_access_rpmb(struct mmc_queue *mq)
1156{
1157        struct mmc_blk_data *md = mq->data;
1158        /*
1159         * If this is a RPMB partition access, return ture
1160         */
1161        if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
1162                return true;
1163
1164        return false;
1165}
1166
1167static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
1168{
1169        struct mmc_blk_data *md = mq->data;
1170        struct mmc_card *card = md->queue.card;
1171        unsigned int from, nr, arg;
1172        int err = 0, type = MMC_BLK_DISCARD;
1173
1174        if (!mmc_can_erase(card)) {
1175                err = -EOPNOTSUPP;
1176                goto out;
1177        }
1178
1179        from = blk_rq_pos(req);
1180        nr = blk_rq_sectors(req);
1181
1182        if (mmc_can_discard(card))
1183                arg = MMC_DISCARD_ARG;
1184        else if (mmc_can_trim(card))
1185                arg = MMC_TRIM_ARG;
1186        else
1187                arg = MMC_ERASE_ARG;
1188retry:
1189        if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1190                err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1191                                 INAND_CMD38_ARG_EXT_CSD,
1192                                 arg == MMC_TRIM_ARG ?
1193                                 INAND_CMD38_ARG_TRIM :
1194                                 INAND_CMD38_ARG_ERASE,
1195                                 0);
1196                if (err)
1197                        goto out;
1198        }
1199        err = mmc_erase(card, from, nr, arg);
1200out:
1201        if (err == -EIO && !mmc_blk_reset(md, card->host, type))
1202                goto retry;
1203        if (!err)
1204                mmc_blk_reset_success(md, type);
1205        blk_end_request(req, err, blk_rq_bytes(req));
1206
1207        return err ? 0 : 1;
1208}
1209
1210static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
1211                                       struct request *req)
1212{
1213        struct mmc_blk_data *md = mq->data;
1214        struct mmc_card *card = md->queue.card;
1215        unsigned int from, nr, arg;
1216        int err = 0, type = MMC_BLK_SECDISCARD;
1217
1218        if (!(mmc_can_secure_erase_trim(card))) {
1219                err = -EOPNOTSUPP;
1220                goto out;
1221        }
1222
1223        from = blk_rq_pos(req);
1224        nr = blk_rq_sectors(req);
1225
1226        if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
1227                arg = MMC_SECURE_TRIM1_ARG;
1228        else
1229                arg = MMC_SECURE_ERASE_ARG;
1230
1231retry:
1232        if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1233                err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1234                                 INAND_CMD38_ARG_EXT_CSD,
1235                                 arg == MMC_SECURE_TRIM1_ARG ?
1236                                 INAND_CMD38_ARG_SECTRIM1 :
1237                                 INAND_CMD38_ARG_SECERASE,
1238                                 0);
1239                if (err)
1240                        goto out_retry;
1241        }
1242
1243        err = mmc_erase(card, from, nr, arg);
1244        if (err == -EIO)
1245                goto out_retry;
1246        if (err)
1247                goto out;
1248
1249        if (arg == MMC_SECURE_TRIM1_ARG) {
1250                if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1251                        err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1252                                         INAND_CMD38_ARG_EXT_CSD,
1253                                         INAND_CMD38_ARG_SECTRIM2,
1254                                         0);
1255                        if (err)
1256                                goto out_retry;
1257                }
1258
1259                err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
1260                if (err == -EIO)
1261                        goto out_retry;
1262                if (err)
1263                        goto out;
1264        }
1265
1266out_retry:
1267        if (err && !mmc_blk_reset(md, card->host, type))
1268                goto retry;
1269        if (!err)
1270                mmc_blk_reset_success(md, type);
1271out:
1272        blk_end_request(req, err, blk_rq_bytes(req));
1273
1274        return err ? 0 : 1;
1275}
1276
1277static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
1278{
1279        struct mmc_blk_data *md = mq->data;
1280        struct mmc_card *card = md->queue.card;
1281        int ret = 0;
1282
1283        ret = mmc_flush_cache(card);
1284        if (ret)
1285                ret = -EIO;
1286
1287        blk_end_request_all(req, ret);
1288
1289        return ret ? 0 : 1;
1290}
1291
1292/*
1293 * Reformat current write as a reliable write, supporting
1294 * both legacy and the enhanced reliable write MMC cards.
1295 * In each transfer we'll handle only as much as a single
1296 * reliable write can handle, thus finish the request in
1297 * partial completions.
1298 */
1299static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
1300                                    struct mmc_card *card,
1301                                    struct request *req)
1302{
1303        if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
1304                /* Legacy mode imposes restrictions on transfers. */
1305                if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
1306                        brq->data.blocks = 1;
1307
1308                if (brq->data.blocks > card->ext_csd.rel_sectors)
1309                        brq->data.blocks = card->ext_csd.rel_sectors;
1310                else if (brq->data.blocks < card->ext_csd.rel_sectors)
1311                        brq->data.blocks = 1;
1312        }
1313}
1314
1315#define CMD_ERRORS                                                      \
1316        (R1_OUT_OF_RANGE |      /* Command argument out of range */     \
1317         R1_ADDRESS_ERROR |     /* Misaligned address */                \
1318         R1_BLOCK_LEN_ERROR |   /* Transferred block length incorrect */\
1319         R1_WP_VIOLATION |      /* Tried to write to protected block */ \
1320         R1_CC_ERROR |          /* Card controller error */             \
1321         R1_ERROR)              /* General/unknown error */
1322
1323static int mmc_blk_err_check(struct mmc_card *card,
1324                             struct mmc_async_req *areq)
1325{
1326        struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
1327                                                    mmc_active);
1328        struct mmc_blk_request *brq = &mq_mrq->brq;
1329        struct request *req = mq_mrq->req;
1330        int need_retune = card->host->need_retune;
1331        int ecc_err = 0, gen_err = 0;
1332
1333        /*
1334         * sbc.error indicates a problem with the set block count
1335         * command.  No data will have been transferred.
1336         *
1337         * cmd.error indicates a problem with the r/w command.  No
1338         * data will have been transferred.
1339         *
1340         * stop.error indicates a problem with the stop command.  Data
1341         * may have been transferred, or may still be transferring.
1342         */
1343        if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
1344            brq->data.error) {
1345                switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
1346                case ERR_RETRY:
1347                        return MMC_BLK_RETRY;
1348                case ERR_ABORT:
1349                        return MMC_BLK_ABORT;
1350                case ERR_NOMEDIUM:
1351                        return MMC_BLK_NOMEDIUM;
1352                case ERR_CONTINUE:
1353                        break;
1354                }
1355        }
1356
1357        /*
1358         * Check for errors relating to the execution of the
1359         * initial command - such as address errors.  No data
1360         * has been transferred.
1361         */
1362        if (brq->cmd.resp[0] & CMD_ERRORS) {
1363                pr_err("%s: r/w command failed, status = %#x\n",
1364                       req->rq_disk->disk_name, brq->cmd.resp[0]);
1365                return MMC_BLK_ABORT;
1366        }
1367
1368        /*
1369         * Everything else is either success, or a data error of some
1370         * kind.  If it was a write, we may have transitioned to
1371         * program mode, which we have to wait for it to complete.
1372         */
1373        if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
1374                int err;
1375
1376                /* Check stop command response */
1377                if (brq->stop.resp[0] & R1_ERROR) {
1378                        pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
1379                               req->rq_disk->disk_name, __func__,
1380                               brq->stop.resp[0]);
1381                        gen_err = 1;
1382                }
1383
1384                err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req,
1385                                        &gen_err);
1386                if (err)
1387                        return MMC_BLK_CMD_ERR;
1388        }
1389
1390        /* if general error occurs, retry the write operation. */
1391        if (gen_err) {
1392                pr_warn("%s: retrying write for general error\n",
1393                                req->rq_disk->disk_name);
1394                return MMC_BLK_RETRY;
1395        }
1396
1397        if (brq->data.error) {
1398                if (need_retune && !brq->retune_retry_done) {
1399                        pr_debug("%s: retrying because a re-tune was needed\n",
1400                                 req->rq_disk->disk_name);
1401                        brq->retune_retry_done = 1;
1402                        return MMC_BLK_RETRY;
1403                }
1404                pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1405                       req->rq_disk->disk_name, brq->data.error,
1406                       (unsigned)blk_rq_pos(req),
1407                       (unsigned)blk_rq_sectors(req),
1408                       brq->cmd.resp[0], brq->stop.resp[0]);
1409
1410                if (rq_data_dir(req) == READ) {
1411                        if (ecc_err)
1412                                return MMC_BLK_ECC_ERR;
1413                        return MMC_BLK_DATA_ERR;
1414                } else {
1415                        return MMC_BLK_CMD_ERR;
1416                }
1417        }
1418
1419        if (!brq->data.bytes_xfered)
1420                return MMC_BLK_RETRY;
1421
1422        if (mmc_packed_cmd(mq_mrq->cmd_type)) {
1423                if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
1424                        return MMC_BLK_PARTIAL;
1425                else
1426                        return MMC_BLK_SUCCESS;
1427        }
1428
1429        if (blk_rq_bytes(req) != brq->data.bytes_xfered)
1430                return MMC_BLK_PARTIAL;
1431
1432        return MMC_BLK_SUCCESS;
1433}
1434
1435static int mmc_blk_packed_err_check(struct mmc_card *card,
1436                                    struct mmc_async_req *areq)
1437{
1438        struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
1439                        mmc_active);
1440        struct request *req = mq_rq->req;
1441        struct mmc_packed *packed = mq_rq->packed;
1442        int err, check, status;
1443        u8 *ext_csd;
1444
1445        packed->retries--;
1446        check = mmc_blk_err_check(card, areq);
1447        err = get_card_status(card, &status, 0);
1448        if (err) {
1449                pr_err("%s: error %d sending status command\n",
1450                       req->rq_disk->disk_name, err);
1451                return MMC_BLK_ABORT;
1452        }
1453
1454        if (status & R1_EXCEPTION_EVENT) {
1455                err = mmc_get_ext_csd(card, &ext_csd);
1456                if (err) {
1457                        pr_err("%s: error %d sending ext_csd\n",
1458                               req->rq_disk->disk_name, err);
1459                        return MMC_BLK_ABORT;
1460                }
1461
1462                if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
1463                     EXT_CSD_PACKED_FAILURE) &&
1464                    (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1465                     EXT_CSD_PACKED_GENERIC_ERROR)) {
1466                        if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1467                            EXT_CSD_PACKED_INDEXED_ERROR) {
1468                                packed->idx_failure =
1469                                  ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
1470                                check = MMC_BLK_PARTIAL;
1471                        }
1472                        pr_err("%s: packed cmd failed, nr %u, sectors %u, "
1473                               "failure index: %d\n",
1474                               req->rq_disk->disk_name, packed->nr_entries,
1475                               packed->blocks, packed->idx_failure);
1476                }
1477                kfree(ext_csd);
1478        }
1479
1480        return check;
1481}
1482
1483static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1484                               struct mmc_card *card,
1485                               int disable_multi,
1486                               struct mmc_queue *mq)
1487{
1488        u32 readcmd, writecmd;
1489        struct mmc_blk_request *brq = &mqrq->brq;
1490        struct request *req = mqrq->req;
1491        struct mmc_blk_data *md = mq->data;
1492        bool do_data_tag;
1493
1494        /*
1495         * Reliable writes are used to implement Forced Unit Access and
1496         * are supported only on MMCs.
1497         */
1498        bool do_rel_wr = (req->cmd_flags & REQ_FUA) &&
1499                (rq_data_dir(req) == WRITE) &&
1500                (md->flags & MMC_BLK_REL_WR);
1501
1502        memset(brq, 0, sizeof(struct mmc_blk_request));
1503        brq->mrq.cmd = &brq->cmd;
1504        brq->mrq.data = &brq->data;
1505
1506        brq->cmd.arg = blk_rq_pos(req);
1507        if (!mmc_card_blockaddr(card))
1508                brq->cmd.arg <<= 9;
1509        brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1510        brq->data.blksz = 512;
1511        brq->stop.opcode = MMC_STOP_TRANSMISSION;
1512        brq->stop.arg = 0;
1513        brq->data.blocks = blk_rq_sectors(req);
1514
1515        /*
1516         * The block layer doesn't support all sector count
1517         * restrictions, so we need to be prepared for too big
1518         * requests.
1519         */
1520        if (brq->data.blocks > card->host->max_blk_count)
1521                brq->data.blocks = card->host->max_blk_count;
1522
1523        if (brq->data.blocks > 1) {
1524                /*
1525                 * After a read error, we redo the request one sector
1526                 * at a time in order to accurately determine which
1527                 * sectors can be read successfully.
1528                 */
1529                if (disable_multi)
1530                        brq->data.blocks = 1;
1531
1532                /*
1533                 * Some controllers have HW issues while operating
1534                 * in multiple I/O mode
1535                 */
1536                if (card->host->ops->multi_io_quirk)
1537                        brq->data.blocks = card->host->ops->multi_io_quirk(card,
1538                                                (rq_data_dir(req) == READ) ?
1539                                                MMC_DATA_READ : MMC_DATA_WRITE,
1540                                                brq->data.blocks);
1541        }
1542
1543        if (brq->data.blocks > 1 || do_rel_wr) {
1544                /* SPI multiblock writes terminate using a special
1545                 * token, not a STOP_TRANSMISSION request.
1546                 */
1547                if (!mmc_host_is_spi(card->host) ||
1548                    rq_data_dir(req) == READ)
1549                        brq->mrq.stop = &brq->stop;
1550                readcmd = MMC_READ_MULTIPLE_BLOCK;
1551                writecmd = MMC_WRITE_MULTIPLE_BLOCK;
1552        } else {
1553                brq->mrq.stop = NULL;
1554                readcmd = MMC_READ_SINGLE_BLOCK;
1555                writecmd = MMC_WRITE_BLOCK;
1556        }
1557        if (rq_data_dir(req) == READ) {
1558                brq->cmd.opcode = readcmd;
1559                brq->data.flags = MMC_DATA_READ;
1560                if (brq->mrq.stop)
1561                        brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 |
1562                                        MMC_CMD_AC;
1563        } else {
1564                brq->cmd.opcode = writecmd;
1565                brq->data.flags = MMC_DATA_WRITE;
1566                if (brq->mrq.stop)
1567                        brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B |
1568                                        MMC_CMD_AC;
1569        }
1570
1571        if (do_rel_wr)
1572                mmc_apply_rel_rw(brq, card, req);
1573
1574        /*
1575         * Data tag is used only during writing meta data to speed
1576         * up write and any subsequent read of this meta data
1577         */
1578        do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1579                (req->cmd_flags & REQ_META) &&
1580                (rq_data_dir(req) == WRITE) &&
1581                ((brq->data.blocks * brq->data.blksz) >=
1582                 card->ext_csd.data_tag_unit_size);
1583
1584        /*
1585         * Pre-defined multi-block transfers are preferable to
1586         * open ended-ones (and necessary for reliable writes).
1587         * However, it is not sufficient to just send CMD23,
1588         * and avoid the final CMD12, as on an error condition
1589         * CMD12 (stop) needs to be sent anyway. This, coupled
1590         * with Auto-CMD23 enhancements provided by some
1591         * hosts, means that the complexity of dealing
1592         * with this is best left to the host. If CMD23 is
1593         * supported by card and host, we'll fill sbc in and let
1594         * the host deal with handling it correctly. This means
1595         * that for hosts that don't expose MMC_CAP_CMD23, no
1596         * change of behavior will be observed.
1597         *
1598         * N.B: Some MMC cards experience perf degradation.
1599         * We'll avoid using CMD23-bounded multiblock writes for
1600         * these, while retaining features like reliable writes.
1601         */
1602        if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
1603            (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
1604             do_data_tag)) {
1605                brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1606                brq->sbc.arg = brq->data.blocks |
1607                        (do_rel_wr ? (1 << 31) : 0) |
1608                        (do_data_tag ? (1 << 29) : 0);
1609                brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1610                brq->mrq.sbc = &brq->sbc;
1611        }
1612
1613        mmc_set_data_timeout(&brq->data, card);
1614
1615        brq->data.sg = mqrq->sg;
1616        brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1617
1618        /*
1619         * Adjust the sg list so it is the same size as the
1620         * request.
1621         */
1622        if (brq->data.blocks != blk_rq_sectors(req)) {
1623                int i, data_size = brq->data.blocks << 9;
1624                struct scatterlist *sg;
1625
1626                for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
1627                        data_size -= sg->length;
1628                        if (data_size <= 0) {
1629                                sg->length += data_size;
1630                                i++;
1631                                break;
1632                        }
1633                }
1634                brq->data.sg_len = i;
1635        }
1636
1637        mqrq->mmc_active.mrq = &brq->mrq;
1638        mqrq->mmc_active.err_check = mmc_blk_err_check;
1639
1640        mmc_queue_bounce_pre(mqrq);
1641}
1642
1643static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q,
1644                                          struct mmc_card *card)
1645{
1646        unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512;
1647        unsigned int max_seg_sz = queue_max_segment_size(q);
1648        unsigned int len, nr_segs = 0;
1649
1650        do {
1651                len = min(hdr_sz, max_seg_sz);
1652                hdr_sz -= len;
1653                nr_segs++;
1654        } while (hdr_sz);
1655
1656        return nr_segs;
1657}
1658
1659static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
1660{
1661        struct request_queue *q = mq->queue;
1662        struct mmc_card *card = mq->card;
1663        struct request *cur = req, *next = NULL;
1664        struct mmc_blk_data *md = mq->data;
1665        struct mmc_queue_req *mqrq = mq->mqrq_cur;
1666        bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
1667        unsigned int req_sectors = 0, phys_segments = 0;
1668        unsigned int max_blk_count, max_phys_segs;
1669        bool put_back = true;
1670        u8 max_packed_rw = 0;
1671        u8 reqs = 0;
1672
1673        /*
1674         * We don't need to check packed for any further
1675         * operation of packed stuff as we set MMC_PACKED_NONE
1676         * and return zero for reqs if geting null packed. Also
1677         * we clean the flag of MMC_BLK_PACKED_CMD to avoid doing
1678         * it again when removing blk req.
1679         */
1680        if (!mqrq->packed) {
1681                md->flags &= (~MMC_BLK_PACKED_CMD);
1682                goto no_packed;
1683        }
1684
1685        if (!(md->flags & MMC_BLK_PACKED_CMD))
1686                goto no_packed;
1687
1688        if ((rq_data_dir(cur) == WRITE) &&
1689            mmc_host_packed_wr(card->host))
1690                max_packed_rw = card->ext_csd.max_packed_writes;
1691
1692        if (max_packed_rw == 0)
1693                goto no_packed;
1694
1695        if (mmc_req_rel_wr(cur) &&
1696            (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1697                goto no_packed;
1698
1699        if (mmc_large_sector(card) &&
1700            !IS_ALIGNED(blk_rq_sectors(cur), 8))
1701                goto no_packed;
1702
1703        mmc_blk_clear_packed(mqrq);
1704
1705        max_blk_count = min(card->host->max_blk_count,
1706                            card->host->max_req_size >> 9);
1707        if (unlikely(max_blk_count > 0xffff))
1708                max_blk_count = 0xffff;
1709
1710        max_phys_segs = queue_max_segments(q);
1711        req_sectors += blk_rq_sectors(cur);
1712        phys_segments += cur->nr_phys_segments;
1713
1714        if (rq_data_dir(cur) == WRITE) {
1715                req_sectors += mmc_large_sector(card) ? 8 : 1;
1716                phys_segments += mmc_calc_packed_hdr_segs(q, card);
1717        }
1718
1719        do {
1720                if (reqs >= max_packed_rw - 1) {
1721                        put_back = false;
1722                        break;
1723                }
1724
1725                spin_lock_irq(q->queue_lock);
1726                next = blk_fetch_request(q);
1727                spin_unlock_irq(q->queue_lock);
1728                if (!next) {
1729                        put_back = false;
1730                        break;
1731                }
1732
1733                if (mmc_large_sector(card) &&
1734                    !IS_ALIGNED(blk_rq_sectors(next), 8))
1735                        break;
1736
1737                if (req_op(next) == REQ_OP_DISCARD ||
1738                    req_op(next) == REQ_OP_SECURE_ERASE ||
1739                    req_op(next) == REQ_OP_FLUSH)
1740                        break;
1741
1742                if (rq_data_dir(cur) != rq_data_dir(next))
1743                        break;
1744
1745                if (mmc_req_rel_wr(next) &&
1746                    (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1747                        break;
1748
1749                req_sectors += blk_rq_sectors(next);
1750                if (req_sectors > max_blk_count)
1751                        break;
1752
1753                phys_segments +=  next->nr_phys_segments;
1754                if (phys_segments > max_phys_segs)
1755                        break;
1756
1757                list_add_tail(&next->queuelist, &mqrq->packed->list);
1758                cur = next;
1759                reqs++;
1760        } while (1);
1761
1762        if (put_back) {
1763                spin_lock_irq(q->queue_lock);
1764                blk_requeue_request(q, next);
1765                spin_unlock_irq(q->queue_lock);
1766        }
1767
1768        if (reqs > 0) {
1769                list_add(&req->queuelist, &mqrq->packed->list);
1770                mqrq->packed->nr_entries = ++reqs;
1771                mqrq->packed->retries = reqs;
1772                return reqs;
1773        }
1774
1775no_packed:
1776        mqrq->cmd_type = MMC_PACKED_NONE;
1777        return 0;
1778}
1779
1780static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
1781                                        struct mmc_card *card,
1782                                        struct mmc_queue *mq)
1783{
1784        struct mmc_blk_request *brq = &mqrq->brq;
1785        struct request *req = mqrq->req;
1786        struct request *prq;
1787        struct mmc_blk_data *md = mq->data;
1788        struct mmc_packed *packed = mqrq->packed;
1789        bool do_rel_wr, do_data_tag;
1790        __le32 *packed_cmd_hdr;
1791        u8 hdr_blocks;
1792        u8 i = 1;
1793
1794        mqrq->cmd_type = MMC_PACKED_WRITE;
1795        packed->blocks = 0;
1796        packed->idx_failure = MMC_PACKED_NR_IDX;
1797
1798        packed_cmd_hdr = packed->cmd_hdr;
1799        memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
1800        packed_cmd_hdr[0] = cpu_to_le32((packed->nr_entries << 16) |
1801                (PACKED_CMD_WR << 8) | PACKED_CMD_VER);
1802        hdr_blocks = mmc_large_sector(card) ? 8 : 1;
1803
1804        /*
1805         * Argument for each entry of packed group
1806         */
1807        list_for_each_entry(prq, &packed->list, queuelist) {
1808                do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
1809                do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1810                        (prq->cmd_flags & REQ_META) &&
1811                        (rq_data_dir(prq) == WRITE) &&
1812                        blk_rq_bytes(prq) >= card->ext_csd.data_tag_unit_size;
1813                /* Argument of CMD23 */
1814                packed_cmd_hdr[(i * 2)] = cpu_to_le32(
1815                        (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
1816                        (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
1817                        blk_rq_sectors(prq));
1818                /* Argument of CMD18 or CMD25 */
1819                packed_cmd_hdr[((i * 2)) + 1] = cpu_to_le32(
1820                        mmc_card_blockaddr(card) ?
1821                        blk_rq_pos(prq) : blk_rq_pos(prq) << 9);
1822                packed->blocks += blk_rq_sectors(prq);
1823                i++;
1824        }
1825
1826        memset(brq, 0, sizeof(struct mmc_blk_request));
1827        brq->mrq.cmd = &brq->cmd;
1828        brq->mrq.data = &brq->data;
1829        brq->mrq.sbc = &brq->sbc;
1830        brq->mrq.stop = &brq->stop;
1831
1832        brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1833        brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks);
1834        brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1835
1836        brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
1837        brq->cmd.arg = blk_rq_pos(req);
1838        if (!mmc_card_blockaddr(card))
1839                brq->cmd.arg <<= 9;
1840        brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1841
1842        brq->data.blksz = 512;
1843        brq->data.blocks = packed->blocks + hdr_blocks;
1844        brq->data.flags = MMC_DATA_WRITE;
1845
1846        brq->stop.opcode = MMC_STOP_TRANSMISSION;
1847        brq->stop.arg = 0;
1848        brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1849
1850        mmc_set_data_timeout(&brq->data, card);
1851
1852        brq->data.sg = mqrq->sg;
1853        brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1854
1855        mqrq->mmc_active.mrq = &brq->mrq;
1856        mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
1857
1858        mmc_queue_bounce_pre(mqrq);
1859}
1860
1861static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
1862                           struct mmc_blk_request *brq, struct request *req,
1863                           int ret)
1864{
1865        struct mmc_queue_req *mq_rq;
1866        mq_rq = container_of(brq, struct mmc_queue_req, brq);
1867
1868        /*
1869         * If this is an SD card and we're writing, we can first
1870         * mark the known good sectors as ok.
1871         *
1872         * If the card is not SD, we can still ok written sectors
1873         * as reported by the controller (which might be less than
1874         * the real number of written sectors, but never more).
1875         */
1876        if (mmc_card_sd(card)) {
1877                u32 blocks;
1878
1879                blocks = mmc_sd_num_wr_blocks(card);
1880                if (blocks != (u32)-1) {
1881                        ret = blk_end_request(req, 0, blocks << 9);
1882                }
1883        } else {
1884                if (!mmc_packed_cmd(mq_rq->cmd_type))
1885                        ret = blk_end_request(req, 0, brq->data.bytes_xfered);
1886        }
1887        return ret;
1888}
1889
1890static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq)
1891{
1892        struct request *prq;
1893        struct mmc_packed *packed = mq_rq->packed;
1894        int idx = packed->idx_failure, i = 0;
1895        int ret = 0;
1896
1897        while (!list_empty(&packed->list)) {
1898                prq = list_entry_rq(packed->list.next);
1899                if (idx == i) {
1900                        /* retry from error index */
1901                        packed->nr_entries -= idx;
1902                        mq_rq->req = prq;
1903                        ret = 1;
1904
1905                        if (packed->nr_entries == MMC_PACKED_NR_SINGLE) {
1906                                list_del_init(&prq->queuelist);
1907                                mmc_blk_clear_packed(mq_rq);
1908                        }
1909                        return ret;
1910                }
1911                list_del_init(&prq->queuelist);
1912                blk_end_request(prq, 0, blk_rq_bytes(prq));
1913                i++;
1914        }
1915
1916        mmc_blk_clear_packed(mq_rq);
1917        return ret;
1918}
1919
1920static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq)
1921{
1922        struct request *prq;
1923        struct mmc_packed *packed = mq_rq->packed;
1924
1925        while (!list_empty(&packed->list)) {
1926                prq = list_entry_rq(packed->list.next);
1927                list_del_init(&prq->queuelist);
1928                blk_end_request(prq, -EIO, blk_rq_bytes(prq));
1929        }
1930
1931        mmc_blk_clear_packed(mq_rq);
1932}
1933
1934static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
1935                                      struct mmc_queue_req *mq_rq)
1936{
1937        struct request *prq;
1938        struct request_queue *q = mq->queue;
1939        struct mmc_packed *packed = mq_rq->packed;
1940
1941        while (!list_empty(&packed->list)) {
1942                prq = list_entry_rq(packed->list.prev);
1943                if (prq->queuelist.prev != &packed->list) {
1944                        list_del_init(&prq->queuelist);
1945                        spin_lock_irq(q->queue_lock);
1946                        blk_requeue_request(mq->queue, prq);
1947                        spin_unlock_irq(q->queue_lock);
1948                } else {
1949                        list_del_init(&prq->queuelist);
1950                }
1951        }
1952
1953        mmc_blk_clear_packed(mq_rq);
1954}
1955
1956static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1957{
1958        struct mmc_blk_data *md = mq->data;
1959        struct mmc_card *card = md->queue.card;
1960        struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
1961        int ret = 1, disable_multi = 0, retry = 0, type, retune_retry_done = 0;
1962        enum mmc_blk_status status;
1963        struct mmc_queue_req *mq_rq;
1964        struct request *req = rqc;
1965        struct mmc_async_req *areq;
1966        const u8 packed_nr = 2;
1967        u8 reqs = 0;
1968
1969        if (!rqc && !mq->mqrq_prev->req)
1970                return 0;
1971
1972        if (rqc)
1973                reqs = mmc_blk_prep_packed_list(mq, rqc);
1974
1975        do {
1976                if (rqc) {
1977                        /*
1978                         * When 4KB native sector is enabled, only 8 blocks
1979                         * multiple read or write is allowed
1980                         */
1981                        if (mmc_large_sector(card) &&
1982                                !IS_ALIGNED(blk_rq_sectors(rqc), 8)) {
1983                                pr_err("%s: Transfer size is not 4KB sector size aligned\n",
1984                                        req->rq_disk->disk_name);
1985                                mq_rq = mq->mqrq_cur;
1986                                goto cmd_abort;
1987                        }
1988
1989                        if (reqs >= packed_nr)
1990                                mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
1991                                                            card, mq);
1992                        else
1993                                mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
1994                        areq = &mq->mqrq_cur->mmc_active;
1995                } else
1996                        areq = NULL;
1997                areq = mmc_start_req(card->host, areq, (int *) &status);
1998                if (!areq) {
1999                        if (status == MMC_BLK_NEW_REQUEST)
2000                                mq->flags |= MMC_QUEUE_NEW_REQUEST;
2001                        return 0;
2002                }
2003
2004                mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
2005                brq = &mq_rq->brq;
2006                req = mq_rq->req;
2007                type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
2008                mmc_queue_bounce_post(mq_rq);
2009
2010                switch (status) {
2011                case MMC_BLK_SUCCESS:
2012                case MMC_BLK_PARTIAL:
2013                        /*
2014                         * A block was successfully transferred.
2015                         */
2016                        mmc_blk_reset_success(md, type);
2017
2018                        if (mmc_packed_cmd(mq_rq->cmd_type)) {
2019                                ret = mmc_blk_end_packed_req(mq_rq);
2020                                break;
2021                        } else {
2022                                ret = blk_end_request(req, 0,
2023                                                brq->data.bytes_xfered);
2024                        }
2025
2026                        /*
2027                         * If the blk_end_request function returns non-zero even
2028                         * though all data has been transferred and no errors
2029                         * were returned by the host controller, it's a bug.
2030                         */
2031                        if (status == MMC_BLK_SUCCESS && ret) {
2032                                pr_err("%s BUG rq_tot %d d_xfer %d\n",
2033                                       __func__, blk_rq_bytes(req),
2034                                       brq->data.bytes_xfered);
2035                                rqc = NULL;
2036                                goto cmd_abort;
2037                        }
2038                        break;
2039                case MMC_BLK_CMD_ERR:
2040                        ret = mmc_blk_cmd_err(md, card, brq, req, ret);
2041                        if (mmc_blk_reset(md, card->host, type))
2042                                goto cmd_abort;
2043                        if (!ret)
2044                                goto start_new_req;
2045                        break;
2046                case MMC_BLK_RETRY:
2047                        retune_retry_done = brq->retune_retry_done;
2048                        if (retry++ < 5)
2049                                break;
2050                        /* Fall through */
2051                case MMC_BLK_ABORT:
2052                        if (!mmc_blk_reset(md, card->host, type))
2053                                break;
2054                        goto cmd_abort;
2055                case MMC_BLK_DATA_ERR: {
2056                        int err;
2057
2058                        err = mmc_blk_reset(md, card->host, type);
2059                        if (!err)
2060                                break;
2061                        if (err == -ENODEV ||
2062                                mmc_packed_cmd(mq_rq->cmd_type))
2063                                goto cmd_abort;
2064                        /* Fall through */
2065                }
2066                case MMC_BLK_ECC_ERR:
2067                        if (brq->data.blocks > 1) {
2068                                /* Redo read one sector at a time */
2069                                pr_warn("%s: retrying using single block read\n",
2070                                        req->rq_disk->disk_name);
2071                                disable_multi = 1;
2072                                break;
2073                        }
2074                        /*
2075                         * After an error, we redo I/O one sector at a
2076                         * time, so we only reach here after trying to
2077                         * read a single sector.
2078                         */
2079                        ret = blk_end_request(req, -EIO,
2080                                                brq->data.blksz);
2081                        if (!ret)
2082                                goto start_new_req;
2083                        break;
2084                case MMC_BLK_NOMEDIUM:
2085                        goto cmd_abort;
2086                default:
2087                        pr_err("%s: Unhandled return value (%d)",
2088                                        req->rq_disk->disk_name, status);
2089                        goto cmd_abort;
2090                }
2091
2092                if (ret) {
2093                        if (mmc_packed_cmd(mq_rq->cmd_type)) {
2094                                if (!mq_rq->packed->retries)
2095                                        goto cmd_abort;
2096                                mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
2097                                mmc_start_req(card->host,
2098                                              &mq_rq->mmc_active, NULL);
2099                        } else {
2100
2101                                /*
2102                                 * In case of a incomplete request
2103                                 * prepare it again and resend.
2104                                 */
2105                                mmc_blk_rw_rq_prep(mq_rq, card,
2106                                                disable_multi, mq);
2107                                mmc_start_req(card->host,
2108                                                &mq_rq->mmc_active, NULL);
2109                        }
2110                        mq_rq->brq.retune_retry_done = retune_retry_done;
2111                }
2112        } while (ret);
2113
2114        return 1;
2115
2116 cmd_abort:
2117        if (mmc_packed_cmd(mq_rq->cmd_type)) {
2118                mmc_blk_abort_packed_req(mq_rq);
2119        } else {
2120                if (mmc_card_removed(card))
2121                        req->cmd_flags |= REQ_QUIET;
2122                while (ret)
2123                        ret = blk_end_request(req, -EIO,
2124                                        blk_rq_cur_bytes(req));
2125        }
2126
2127 start_new_req:
2128        if (rqc) {
2129                if (mmc_card_removed(card)) {
2130                        rqc->cmd_flags |= REQ_QUIET;
2131                        blk_end_request_all(rqc, -EIO);
2132                } else {
2133                        /*
2134                         * If current request is packed, it needs to put back.
2135                         */
2136                        if (mmc_packed_cmd(mq->mqrq_cur->cmd_type))
2137                                mmc_blk_revert_packed_req(mq, mq->mqrq_cur);
2138
2139                        mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
2140                        mmc_start_req(card->host,
2141                                      &mq->mqrq_cur->mmc_active, NULL);
2142                }
2143        }
2144
2145        return 0;
2146}
2147
2148int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
2149{
2150        int ret;
2151        struct mmc_blk_data *md = mq->data;
2152        struct mmc_card *card = md->queue.card;
2153        struct mmc_host *host = card->host;
2154        unsigned long flags;
2155        bool req_is_special = mmc_req_is_special(req);
2156
2157        if (req && !mq->mqrq_prev->req)
2158                /* claim host only for the first request */
2159                mmc_get_card(card);
2160
2161        ret = mmc_blk_part_switch(card, md);
2162        if (ret) {
2163                if (req) {
2164                        blk_end_request_all(req, -EIO);
2165                }
2166                ret = 0;
2167                goto out;
2168        }
2169
2170        mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
2171        if (req && req_op(req) == REQ_OP_DISCARD) {
2172                /* complete ongoing async transfer before issuing discard */
2173                if (card->host->areq)
2174                        mmc_blk_issue_rw_rq(mq, NULL);
2175                ret = mmc_blk_issue_discard_rq(mq, req);
2176        } else if (req && req_op(req) == REQ_OP_SECURE_ERASE) {
2177                /* complete ongoing async transfer before issuing secure erase*/
2178                if (card->host->areq)
2179                        mmc_blk_issue_rw_rq(mq, NULL);
2180                ret = mmc_blk_issue_secdiscard_rq(mq, req);
2181        } else if (req && req_op(req) == REQ_OP_FLUSH) {
2182                /* complete ongoing async transfer before issuing flush */
2183                if (card->host->areq)
2184                        mmc_blk_issue_rw_rq(mq, NULL);
2185                ret = mmc_blk_issue_flush(mq, req);
2186        } else {
2187                if (!req && host->areq) {
2188                        spin_lock_irqsave(&host->context_info.lock, flags);
2189                        host->context_info.is_waiting_last_req = true;
2190                        spin_unlock_irqrestore(&host->context_info.lock, flags);
2191                }
2192                ret = mmc_blk_issue_rw_rq(mq, req);
2193        }
2194
2195out:
2196        if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) || req_is_special)
2197                /*
2198                 * Release host when there are no more requests
2199                 * and after special request(discard, flush) is done.
2200                 * In case sepecial request, there is no reentry to
2201                 * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
2202                 */
2203                mmc_put_card(card);
2204        return ret;
2205}
2206
2207static inline int mmc_blk_readonly(struct mmc_card *card)
2208{
2209        return mmc_card_readonly(card) ||
2210               !(card->csd.cmdclass & CCC_BLOCK_WRITE);
2211}
2212
2213static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
2214                                              struct device *parent,
2215                                              sector_t size,
2216                                              bool default_ro,
2217                                              const char *subname,
2218                                              int area_type)
2219{
2220        struct mmc_blk_data *md;
2221        int devidx, ret;
2222
2223again:
2224        if (!ida_pre_get(&mmc_blk_ida, GFP_KERNEL))
2225                return ERR_PTR(-ENOMEM);
2226
2227        spin_lock(&mmc_blk_lock);
2228        ret = ida_get_new(&mmc_blk_ida, &devidx);
2229        spin_unlock(&mmc_blk_lock);
2230
2231        if (ret == -EAGAIN)
2232                goto again;
2233        else if (ret)
2234                return ERR_PTR(ret);
2235
2236        if (devidx >= max_devices) {
2237                ret = -ENOSPC;
2238                goto out;
2239        }
2240
2241        md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
2242        if (!md) {
2243                ret = -ENOMEM;
2244                goto out;
2245        }
2246
2247        md->area_type = area_type;
2248
2249        /*
2250         * Set the read-only status based on the supported commands
2251         * and the write protect switch.
2252         */
2253        md->read_only = mmc_blk_readonly(card);
2254
2255        md->disk = alloc_disk(perdev_minors);
2256        if (md->disk == NULL) {
2257                ret = -ENOMEM;
2258                goto err_kfree;
2259        }
2260
2261        spin_lock_init(&md->lock);
2262        INIT_LIST_HEAD(&md->part);
2263        md->usage = 1;
2264
2265        ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
2266        if (ret)
2267                goto err_putdisk;
2268
2269        md->queue.data = md;
2270
2271        md->disk->major = MMC_BLOCK_MAJOR;
2272        md->disk->first_minor = devidx * perdev_minors;
2273        md->disk->fops = &mmc_bdops;
2274        md->disk->private_data = md;
2275        md->disk->queue = md->queue.queue;
2276        md->parent = parent;
2277        set_disk_ro(md->disk, md->read_only || default_ro);
2278        md->disk->flags = GENHD_FL_EXT_DEVT;
2279        if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
2280                md->disk->flags |= GENHD_FL_NO_PART_SCAN;
2281
2282        /*
2283         * As discussed on lkml, GENHD_FL_REMOVABLE should:
2284         *
2285         * - be set for removable media with permanent block devices
2286         * - be unset for removable block devices with permanent media
2287         *
2288         * Since MMC block devices clearly fall under the second
2289         * case, we do not set GENHD_FL_REMOVABLE.  Userspace
2290         * should use the block device creation/destruction hotplug
2291         * messages to tell when the card is present.
2292         */
2293
2294        snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
2295                 "mmcblk%u%s", card->host->index, subname ? subname : "");
2296
2297        if (mmc_card_mmc(card))
2298                blk_queue_logical_block_size(md->queue.queue,
2299                                             card->ext_csd.data_sector_size);
2300        else
2301                blk_queue_logical_block_size(md->queue.queue, 512);
2302
2303        set_capacity(md->disk, size);
2304
2305        if (mmc_host_cmd23(card->host)) {
2306                if ((mmc_card_mmc(card) &&
2307                     card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
2308                    (mmc_card_sd(card) &&
2309                     card->scr.cmds & SD_SCR_CMD23_SUPPORT))
2310                        md->flags |= MMC_BLK_CMD23;
2311        }
2312
2313        if (mmc_card_mmc(card) &&
2314            md->flags & MMC_BLK_CMD23 &&
2315            ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
2316             card->ext_csd.rel_sectors)) {
2317                md->flags |= MMC_BLK_REL_WR;
2318                blk_queue_write_cache(md->queue.queue, true, true);
2319        }
2320
2321        if (mmc_card_mmc(card) &&
2322            (area_type == MMC_BLK_DATA_AREA_MAIN) &&
2323            (md->flags & MMC_BLK_CMD23) &&
2324            card->ext_csd.packed_event_en) {
2325                if (!mmc_packed_init(&md->queue, card))
2326                        md->flags |= MMC_BLK_PACKED_CMD;
2327        }
2328
2329        return md;
2330
2331 err_putdisk:
2332        put_disk(md->disk);
2333 err_kfree:
2334        kfree(md);
2335 out:
2336        spin_lock(&mmc_blk_lock);
2337        ida_remove(&mmc_blk_ida, devidx);
2338        spin_unlock(&mmc_blk_lock);
2339        return ERR_PTR(ret);
2340}
2341
2342static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
2343{
2344        sector_t size;
2345
2346        if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
2347                /*
2348                 * The EXT_CSD sector count is in number or 512 byte
2349                 * sectors.
2350                 */
2351                size = card->ext_csd.sectors;
2352        } else {
2353                /*
2354                 * The CSD capacity field is in units of read_blkbits.
2355                 * set_capacity takes units of 512 bytes.
2356                 */
2357                size = (typeof(sector_t))card->csd.capacity
2358                        << (card->csd.read_blkbits - 9);
2359        }
2360
2361        return mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
2362                                        MMC_BLK_DATA_AREA_MAIN);
2363}
2364
2365static int mmc_blk_alloc_part(struct mmc_card *card,
2366                              struct mmc_blk_data *md,
2367                              unsigned int part_type,
2368                              sector_t size,
2369                              bool default_ro,
2370                              const char *subname,
2371                              int area_type)
2372{
2373        char cap_str[10];
2374        struct mmc_blk_data *part_md;
2375
2376        part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
2377                                    subname, area_type);
2378        if (IS_ERR(part_md))
2379                return PTR_ERR(part_md);
2380        part_md->part_type = part_type;
2381        list_add(&part_md->part, &md->part);
2382
2383        string_get_size((u64)get_capacity(part_md->disk), 512, STRING_UNITS_2,
2384                        cap_str, sizeof(cap_str));
2385        pr_info("%s: %s %s partition %u %s\n",
2386               part_md->disk->disk_name, mmc_card_id(card),
2387               mmc_card_name(card), part_md->part_type, cap_str);
2388        return 0;
2389}
2390
2391/* MMC Physical partitions consist of two boot partitions and
2392 * up to four general purpose partitions.
2393 * For each partition enabled in EXT_CSD a block device will be allocatedi
2394 * to provide access to the partition.
2395 */
2396
2397static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
2398{
2399        int idx, ret = 0;
2400
2401        if (!mmc_card_mmc(card))
2402                return 0;
2403
2404        for (idx = 0; idx < card->nr_parts; idx++) {
2405                if (card->part[idx].size) {
2406                        ret = mmc_blk_alloc_part(card, md,
2407                                card->part[idx].part_cfg,
2408                                card->part[idx].size >> 9,
2409                                card->part[idx].force_ro,
2410                                card->part[idx].name,
2411                                card->part[idx].area_type);
2412                        if (ret)
2413                                return ret;
2414                }
2415        }
2416
2417        return ret;
2418}
2419
2420static void mmc_blk_remove_req(struct mmc_blk_data *md)
2421{
2422        struct mmc_card *card;
2423
2424        if (md) {
2425                /*
2426                 * Flush remaining requests and free queues. It
2427                 * is freeing the queue that stops new requests
2428                 * from being accepted.
2429                 */
2430                card = md->queue.card;
2431                mmc_cleanup_queue(&md->queue);
2432                if (md->flags & MMC_BLK_PACKED_CMD)
2433                        mmc_packed_clean(&md->queue);
2434                if (md->disk->flags & GENHD_FL_UP) {
2435                        device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2436                        if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2437                                        card->ext_csd.boot_ro_lockable)
2438                                device_remove_file(disk_to_dev(md->disk),
2439                                        &md->power_ro_lock);
2440
2441                        del_gendisk(md->disk);
2442                }
2443                mmc_blk_put(md);
2444        }
2445}
2446
2447static void mmc_blk_remove_parts(struct mmc_card *card,
2448                                 struct mmc_blk_data *md)
2449{
2450        struct list_head *pos, *q;
2451        struct mmc_blk_data *part_md;
2452
2453        list_for_each_safe(pos, q, &md->part) {
2454                part_md = list_entry(pos, struct mmc_blk_data, part);
2455                list_del(pos);
2456                mmc_blk_remove_req(part_md);
2457        }
2458}
2459
2460static int mmc_add_disk(struct mmc_blk_data *md)
2461{
2462        int ret;
2463        struct mmc_card *card = md->queue.card;
2464
2465        device_add_disk(md->parent, md->disk);
2466        md->force_ro.show = force_ro_show;
2467        md->force_ro.store = force_ro_store;
2468        sysfs_attr_init(&md->force_ro.attr);
2469        md->force_ro.attr.name = "force_ro";
2470        md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
2471        ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
2472        if (ret)
2473                goto force_ro_fail;
2474
2475        if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2476             card->ext_csd.boot_ro_lockable) {
2477                umode_t mode;
2478
2479                if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
2480                        mode = S_IRUGO;
2481                else
2482                        mode = S_IRUGO | S_IWUSR;
2483
2484                md->power_ro_lock.show = power_ro_lock_show;
2485                md->power_ro_lock.store = power_ro_lock_store;
2486                sysfs_attr_init(&md->power_ro_lock.attr);
2487                md->power_ro_lock.attr.mode = mode;
2488                md->power_ro_lock.attr.name =
2489                                        "ro_lock_until_next_power_on";
2490                ret = device_create_file(disk_to_dev(md->disk),
2491                                &md->power_ro_lock);
2492                if (ret)
2493                        goto power_ro_lock_fail;
2494        }
2495        return ret;
2496
2497power_ro_lock_fail:
2498        device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2499force_ro_fail:
2500        del_gendisk(md->disk);
2501
2502        return ret;
2503}
2504
2505static const struct mmc_fixup blk_fixups[] =
2506{
2507        MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
2508                  MMC_QUIRK_INAND_CMD38),
2509        MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
2510                  MMC_QUIRK_INAND_CMD38),
2511        MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
2512                  MMC_QUIRK_INAND_CMD38),
2513        MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
2514                  MMC_QUIRK_INAND_CMD38),
2515        MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
2516                  MMC_QUIRK_INAND_CMD38),
2517
2518        /*
2519         * Some MMC cards experience performance degradation with CMD23
2520         * instead of CMD12-bounded multiblock transfers. For now we'll
2521         * black list what's bad...
2522         * - Certain Toshiba cards.
2523         *
2524         * N.B. This doesn't affect SD cards.
2525         */
2526        MMC_FIXUP("SDMB-32", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
2527                  MMC_QUIRK_BLK_NO_CMD23),
2528        MMC_FIXUP("SDM032", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
2529                  MMC_QUIRK_BLK_NO_CMD23),
2530        MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
2531                  MMC_QUIRK_BLK_NO_CMD23),
2532        MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
2533                  MMC_QUIRK_BLK_NO_CMD23),
2534        MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
2535                  MMC_QUIRK_BLK_NO_CMD23),
2536
2537        /*
2538         * Some MMC cards need longer data read timeout than indicated in CSD.
2539         */
2540        MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
2541                  MMC_QUIRK_LONG_READ_TIME),
2542        MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
2543                  MMC_QUIRK_LONG_READ_TIME),
2544
2545        /*
2546         * On these Samsung MoviNAND parts, performing secure erase or
2547         * secure trim can result in unrecoverable corruption due to a
2548         * firmware bug.
2549         */
2550        MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2551                  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2552        MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2553                  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2554        MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2555                  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2556        MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2557                  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2558        MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2559                  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2560        MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2561                  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2562        MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2563                  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2564        MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2565                  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2566
2567        /*
2568         *  On Some Kingston eMMCs, performing trim can result in
2569         *  unrecoverable data conrruption occasionally due to a firmware bug.
2570         */
2571        MMC_FIXUP("V10008", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
2572                  MMC_QUIRK_TRIM_BROKEN),
2573        MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
2574                  MMC_QUIRK_TRIM_BROKEN),
2575
2576        END_FIXUP
2577};
2578
2579static int mmc_blk_probe(struct mmc_card *card)
2580{
2581        struct mmc_blk_data *md, *part_md;
2582        char cap_str[10];
2583
2584        /*
2585         * Check that the card supports the command class(es) we need.
2586         */
2587        if (!(card->csd.cmdclass & CCC_BLOCK_READ))
2588                return -ENODEV;
2589
2590        mmc_fixup_device(card, blk_fixups);
2591
2592        md = mmc_blk_alloc(card);
2593        if (IS_ERR(md))
2594                return PTR_ERR(md);
2595
2596        string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2,
2597                        cap_str, sizeof(cap_str));
2598        pr_info("%s: %s %s %s %s\n",
2599                md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
2600                cap_str, md->read_only ? "(ro)" : "");
2601
2602        if (mmc_blk_alloc_parts(card, md))
2603                goto out;
2604
2605        dev_set_drvdata(&card->dev, md);
2606
2607        if (mmc_add_disk(md))
2608                goto out;
2609
2610        list_for_each_entry(part_md, &md->part, part) {
2611                if (mmc_add_disk(part_md))
2612                        goto out;
2613        }
2614
2615        pm_runtime_set_autosuspend_delay(&card->dev, 3000);
2616        pm_runtime_use_autosuspend(&card->dev);
2617
2618        /*
2619         * Don't enable runtime PM for SD-combo cards here. Leave that
2620         * decision to be taken during the SDIO init sequence instead.
2621         */
2622        if (card->type != MMC_TYPE_SD_COMBO) {
2623                pm_runtime_set_active(&card->dev);
2624                pm_runtime_enable(&card->dev);
2625        }
2626
2627        return 0;
2628
2629 out:
2630        mmc_blk_remove_parts(card, md);
2631        mmc_blk_remove_req(md);
2632        return 0;
2633}
2634
2635static void mmc_blk_remove(struct mmc_card *card)
2636{
2637        struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
2638
2639        mmc_blk_remove_parts(card, md);
2640        pm_runtime_get_sync(&card->dev);
2641        mmc_claim_host(card->host);
2642        mmc_blk_part_switch(card, md);
2643        mmc_release_host(card->host);
2644        if (card->type != MMC_TYPE_SD_COMBO)
2645                pm_runtime_disable(&card->dev);
2646        pm_runtime_put_noidle(&card->dev);
2647        mmc_blk_remove_req(md);
2648        dev_set_drvdata(&card->dev, NULL);
2649}
2650
2651static int _mmc_blk_suspend(struct mmc_card *card)
2652{
2653        struct mmc_blk_data *part_md;
2654        struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
2655
2656        if (md) {
2657                mmc_queue_suspend(&md->queue);
2658                list_for_each_entry(part_md, &md->part, part) {
2659                        mmc_queue_suspend(&part_md->queue);
2660                }
2661        }
2662        return 0;
2663}
2664
2665static void mmc_blk_shutdown(struct mmc_card *card)
2666{
2667        _mmc_blk_suspend(card);
2668}
2669
2670#ifdef CONFIG_PM_SLEEP
2671static int mmc_blk_suspend(struct device *dev)
2672{
2673        struct mmc_card *card = mmc_dev_to_card(dev);
2674
2675        return _mmc_blk_suspend(card);
2676}
2677
2678static int mmc_blk_resume(struct device *dev)
2679{
2680        struct mmc_blk_data *part_md;
2681        struct mmc_blk_data *md = dev_get_drvdata(dev);
2682
2683        if (md) {
2684                /*
2685                 * Resume involves the card going into idle state,
2686                 * so current partition is always the main one.
2687                 */
2688                md->part_curr = md->part_type;
2689                mmc_queue_resume(&md->queue);
2690                list_for_each_entry(part_md, &md->part, part) {
2691                        mmc_queue_resume(&part_md->queue);
2692                }
2693        }
2694        return 0;
2695}
2696#endif
2697
2698static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume);
2699
2700static struct mmc_driver mmc_driver = {
2701        .drv            = {
2702                .name   = "mmcblk",
2703                .pm     = &mmc_blk_pm_ops,
2704        },
2705        .probe          = mmc_blk_probe,
2706        .remove         = mmc_blk_remove,
2707        .shutdown       = mmc_blk_shutdown,
2708};
2709
2710static int __init mmc_blk_init(void)
2711{
2712        int res;
2713
2714        if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
2715                pr_info("mmcblk: using %d minors per device\n", perdev_minors);
2716
2717        max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors);
2718
2719        res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
2720        if (res)
2721                goto out;
2722
2723        res = mmc_register_driver(&mmc_driver);
2724        if (res)
2725                goto out2;
2726
2727        return 0;
2728 out2:
2729        unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
2730 out:
2731        return res;
2732}
2733
2734static void __exit mmc_blk_exit(void)
2735{
2736        mmc_unregister_driver(&mmc_driver);
2737        unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
2738}
2739
2740module_init(mmc_blk_init);
2741module_exit(mmc_blk_exit);
2742
2743MODULE_LICENSE("GPL");
2744MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
2745
2746