linux/drivers/mmc/card/block.c
<<
>>
Prefs
   1/*
   2 * Block driver for media (i.e., flash cards)
   3 *
   4 * Copyright 2002 Hewlett-Packard Company
   5 * Copyright 2005-2008 Pierre Ossman
   6 *
   7 * Use consistent with the GNU GPL is permitted,
   8 * provided that this copyright notice is
   9 * preserved in its entirety in all copies and derived works.
  10 *
  11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
  12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
  13 * FITNESS FOR ANY PARTICULAR PURPOSE.
  14 *
  15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
  16 *
  17 * Author:  Andrew Christian
  18 *          28 May 2002
  19 */
  20#include <linux/moduleparam.h>
  21#include <linux/module.h>
  22#include <linux/init.h>
  23
  24#include <linux/kernel.h>
  25#include <linux/fs.h>
  26#include <linux/slab.h>
  27#include <linux/errno.h>
  28#include <linux/hdreg.h>
  29#include <linux/kdev_t.h>
  30#include <linux/blkdev.h>
  31#include <linux/mutex.h>
  32#include <linux/scatterlist.h>
  33#include <linux/string_helpers.h>
  34
  35#include <linux/mmc/card.h>
  36#include <linux/mmc/host.h>
  37#include <linux/mmc/mmc.h>
  38#include <linux/mmc/sd.h>
  39
  40#include <asm/system.h>
  41#include <asm/uaccess.h>
  42
  43#include "queue.h"
  44
  45MODULE_ALIAS("mmc:block");
  46#ifdef MODULE_PARAM_PREFIX
  47#undef MODULE_PARAM_PREFIX
  48#endif
  49#define MODULE_PARAM_PREFIX "mmcblk."
  50
  51static DEFINE_MUTEX(block_mutex);
  52
  53/*
  54 * The defaults come from config options but can be overriden by module
  55 * or bootarg options.
  56 */
  57static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
  58
  59/*
  60 * We've only got one major, so number of mmcblk devices is
  61 * limited to 256 / number of minors per device.
  62 */
  63static int max_devices;
  64
  65/* 256 minors, so at most 256 separate devices */
  66static DECLARE_BITMAP(dev_use, 256);
  67
  68/*
  69 * There is one mmc_blk_data per slot.
  70 */
  71struct mmc_blk_data {
  72        spinlock_t      lock;
  73        struct gendisk  *disk;
  74        struct mmc_queue queue;
  75
  76        unsigned int    usage;
  77        unsigned int    read_only;
  78};
  79
  80static DEFINE_MUTEX(open_lock);
  81
  82module_param(perdev_minors, int, 0444);
  83MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
  84
  85static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
  86{
  87        struct mmc_blk_data *md;
  88
  89        mutex_lock(&open_lock);
  90        md = disk->private_data;
  91        if (md && md->usage == 0)
  92                md = NULL;
  93        if (md)
  94                md->usage++;
  95        mutex_unlock(&open_lock);
  96
  97        return md;
  98}
  99
 100static void mmc_blk_put(struct mmc_blk_data *md)
 101{
 102        mutex_lock(&open_lock);
 103        md->usage--;
 104        if (md->usage == 0) {
 105                int devmaj = MAJOR(disk_devt(md->disk));
 106                int devidx = MINOR(disk_devt(md->disk)) / perdev_minors;
 107
 108                if (!devmaj)
 109                        devidx = md->disk->first_minor / perdev_minors;
 110
 111                blk_cleanup_queue(md->queue.queue);
 112
 113                __clear_bit(devidx, dev_use);
 114
 115                put_disk(md->disk);
 116                kfree(md);
 117        }
 118        mutex_unlock(&open_lock);
 119}
 120
 121static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
 122{
 123        struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
 124        int ret = -ENXIO;
 125
 126        mutex_lock(&block_mutex);
 127        if (md) {
 128                if (md->usage == 2)
 129                        check_disk_change(bdev);
 130                ret = 0;
 131
 132                if ((mode & FMODE_WRITE) && md->read_only) {
 133                        mmc_blk_put(md);
 134                        ret = -EROFS;
 135                }
 136        }
 137        mutex_unlock(&block_mutex);
 138
 139        return ret;
 140}
 141
 142static int mmc_blk_release(struct gendisk *disk, fmode_t mode)
 143{
 144        struct mmc_blk_data *md = disk->private_data;
 145
 146        mutex_lock(&block_mutex);
 147        mmc_blk_put(md);
 148        mutex_unlock(&block_mutex);
 149        return 0;
 150}
 151
 152static int
 153mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 154{
 155        geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
 156        geo->heads = 4;
 157        geo->sectors = 16;
 158        return 0;
 159}
 160
 161static const struct block_device_operations mmc_bdops = {
 162        .open                   = mmc_blk_open,
 163        .release                = mmc_blk_release,
 164        .getgeo                 = mmc_blk_getgeo,
 165        .owner                  = THIS_MODULE,
 166};
 167
 168struct mmc_blk_request {
 169        struct mmc_request      mrq;
 170        struct mmc_command      cmd;
 171        struct mmc_command      stop;
 172        struct mmc_data         data;
 173};
 174
 175static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
 176{
 177        int err;
 178        u32 result;
 179        __be32 *blocks;
 180
 181        struct mmc_request mrq;
 182        struct mmc_command cmd;
 183        struct mmc_data data;
 184        unsigned int timeout_us;
 185
 186        struct scatterlist sg;
 187
 188        memset(&cmd, 0, sizeof(struct mmc_command));
 189
 190        cmd.opcode = MMC_APP_CMD;
 191        cmd.arg = card->rca << 16;
 192        cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
 193
 194        err = mmc_wait_for_cmd(card->host, &cmd, 0);
 195        if (err)
 196                return (u32)-1;
 197        if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
 198                return (u32)-1;
 199
 200        memset(&cmd, 0, sizeof(struct mmc_command));
 201
 202        cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
 203        cmd.arg = 0;
 204        cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
 205
 206        memset(&data, 0, sizeof(struct mmc_data));
 207
 208        data.timeout_ns = card->csd.tacc_ns * 100;
 209        data.timeout_clks = card->csd.tacc_clks * 100;
 210
 211        timeout_us = data.timeout_ns / 1000;
 212        timeout_us += data.timeout_clks * 1000 /
 213                (card->host->ios.clock / 1000);
 214
 215        if (timeout_us > 100000) {
 216                data.timeout_ns = 100000000;
 217                data.timeout_clks = 0;
 218        }
 219
 220        data.blksz = 4;
 221        data.blocks = 1;
 222        data.flags = MMC_DATA_READ;
 223        data.sg = &sg;
 224        data.sg_len = 1;
 225
 226        memset(&mrq, 0, sizeof(struct mmc_request));
 227
 228        mrq.cmd = &cmd;
 229        mrq.data = &data;
 230
 231        blocks = kmalloc(4, GFP_KERNEL);
 232        if (!blocks)
 233                return (u32)-1;
 234
 235        sg_init_one(&sg, blocks, 4);
 236
 237        mmc_wait_for_req(card->host, &mrq);
 238
 239        result = ntohl(*blocks);
 240        kfree(blocks);
 241
 242        if (cmd.error || data.error)
 243                result = (u32)-1;
 244
 245        return result;
 246}
 247
 248static u32 get_card_status(struct mmc_card *card, struct request *req)
 249{
 250        struct mmc_command cmd;
 251        int err;
 252
 253        memset(&cmd, 0, sizeof(struct mmc_command));
 254        cmd.opcode = MMC_SEND_STATUS;
 255        if (!mmc_host_is_spi(card->host))
 256                cmd.arg = card->rca << 16;
 257        cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
 258        err = mmc_wait_for_cmd(card->host, &cmd, 0);
 259        if (err)
 260                printk(KERN_ERR "%s: error %d sending status command",
 261                       req->rq_disk->disk_name, err);
 262        return cmd.resp[0];
 263}
 264
 265static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
 266{
 267        struct mmc_blk_data *md = mq->data;
 268        struct mmc_card *card = md->queue.card;
 269        unsigned int from, nr, arg;
 270        int err = 0;
 271
 272        mmc_claim_host(card->host);
 273
 274        if (!mmc_can_erase(card)) {
 275                err = -EOPNOTSUPP;
 276                goto out;
 277        }
 278
 279        from = blk_rq_pos(req);
 280        nr = blk_rq_sectors(req);
 281
 282        if (mmc_can_trim(card))
 283                arg = MMC_TRIM_ARG;
 284        else
 285                arg = MMC_ERASE_ARG;
 286
 287        err = mmc_erase(card, from, nr, arg);
 288out:
 289        spin_lock_irq(&md->lock);
 290        __blk_end_request(req, err, blk_rq_bytes(req));
 291        spin_unlock_irq(&md->lock);
 292
 293        mmc_release_host(card->host);
 294
 295        return err ? 0 : 1;
 296}
 297
 298static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
 299                                       struct request *req)
 300{
 301        struct mmc_blk_data *md = mq->data;
 302        struct mmc_card *card = md->queue.card;
 303        unsigned int from, nr, arg;
 304        int err = 0;
 305
 306        mmc_claim_host(card->host);
 307
 308        if (!mmc_can_secure_erase_trim(card)) {
 309                err = -EOPNOTSUPP;
 310                goto out;
 311        }
 312
 313        from = blk_rq_pos(req);
 314        nr = blk_rq_sectors(req);
 315
 316        if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
 317                arg = MMC_SECURE_TRIM1_ARG;
 318        else
 319                arg = MMC_SECURE_ERASE_ARG;
 320
 321        err = mmc_erase(card, from, nr, arg);
 322        if (!err && arg == MMC_SECURE_TRIM1_ARG)
 323                err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
 324out:
 325        spin_lock_irq(&md->lock);
 326        __blk_end_request(req, err, blk_rq_bytes(req));
 327        spin_unlock_irq(&md->lock);
 328
 329        mmc_release_host(card->host);
 330
 331        return err ? 0 : 1;
 332}
 333
 334static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
 335{
 336        struct mmc_blk_data *md = mq->data;
 337        struct mmc_card *card = md->queue.card;
 338        struct mmc_blk_request brq;
 339        int ret = 1, disable_multi = 0;
 340
 341        mmc_claim_host(card->host);
 342
 343        do {
 344                struct mmc_command cmd;
 345                u32 readcmd, writecmd, status = 0;
 346
 347                memset(&brq, 0, sizeof(struct mmc_blk_request));
 348                brq.mrq.cmd = &brq.cmd;
 349                brq.mrq.data = &brq.data;
 350
 351                brq.cmd.arg = blk_rq_pos(req);
 352                if (!mmc_card_blockaddr(card))
 353                        brq.cmd.arg <<= 9;
 354                brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
 355                brq.data.blksz = 512;
 356                brq.stop.opcode = MMC_STOP_TRANSMISSION;
 357                brq.stop.arg = 0;
 358                brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
 359                brq.data.blocks = blk_rq_sectors(req);
 360
 361                /*
 362                 * The block layer doesn't support all sector count
 363                 * restrictions, so we need to be prepared for too big
 364                 * requests.
 365                 */
 366                if (brq.data.blocks > card->host->max_blk_count)
 367                        brq.data.blocks = card->host->max_blk_count;
 368
 369                /*
 370                 * After a read error, we redo the request one sector at a time
 371                 * in order to accurately determine which sectors can be read
 372                 * successfully.
 373                 */
 374                if (disable_multi && brq.data.blocks > 1)
 375                        brq.data.blocks = 1;
 376
 377                if (brq.data.blocks > 1) {
 378                        /* SPI multiblock writes terminate using a special
 379                         * token, not a STOP_TRANSMISSION request.
 380                         */
 381                        if (!mmc_host_is_spi(card->host)
 382                                        || rq_data_dir(req) == READ)
 383                                brq.mrq.stop = &brq.stop;
 384                        readcmd = MMC_READ_MULTIPLE_BLOCK;
 385                        writecmd = MMC_WRITE_MULTIPLE_BLOCK;
 386                } else {
 387                        brq.mrq.stop = NULL;
 388                        readcmd = MMC_READ_SINGLE_BLOCK;
 389                        writecmd = MMC_WRITE_BLOCK;
 390                }
 391                if (rq_data_dir(req) == READ) {
 392                        brq.cmd.opcode = readcmd;
 393                        brq.data.flags |= MMC_DATA_READ;
 394                } else {
 395                        brq.cmd.opcode = writecmd;
 396                        brq.data.flags |= MMC_DATA_WRITE;
 397                }
 398
 399                mmc_set_data_timeout(&brq.data, card);
 400
 401                brq.data.sg = mq->sg;
 402                brq.data.sg_len = mmc_queue_map_sg(mq);
 403
 404                /*
 405                 * Adjust the sg list so it is the same size as the
 406                 * request.
 407                 */
 408                if (brq.data.blocks != blk_rq_sectors(req)) {
 409                        int i, data_size = brq.data.blocks << 9;
 410                        struct scatterlist *sg;
 411
 412                        for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
 413                                data_size -= sg->length;
 414                                if (data_size <= 0) {
 415                                        sg->length += data_size;
 416                                        i++;
 417                                        break;
 418                                }
 419                        }
 420                        brq.data.sg_len = i;
 421                }
 422
 423                mmc_queue_bounce_pre(mq);
 424
 425                mmc_wait_for_req(card->host, &brq.mrq);
 426
 427                mmc_queue_bounce_post(mq);
 428
 429                /*
 430                 * Check for errors here, but don't jump to cmd_err
 431                 * until later as we need to wait for the card to leave
 432                 * programming mode even when things go wrong.
 433                 */
 434                if (brq.cmd.error || brq.data.error || brq.stop.error) {
 435                        if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
 436                                /* Redo read one sector at a time */
 437                                printk(KERN_WARNING "%s: retrying using single "
 438                                       "block read\n", req->rq_disk->disk_name);
 439                                disable_multi = 1;
 440                                continue;
 441                        }
 442                        status = get_card_status(card, req);
 443                }
 444
 445                if (brq.cmd.error) {
 446                        printk(KERN_ERR "%s: error %d sending read/write "
 447                               "command, response %#x, card status %#x\n",
 448                               req->rq_disk->disk_name, brq.cmd.error,
 449                               brq.cmd.resp[0], status);
 450                }
 451
 452                if (brq.data.error) {
 453                        if (brq.data.error == -ETIMEDOUT && brq.mrq.stop)
 454                                /* 'Stop' response contains card status */
 455                                status = brq.mrq.stop->resp[0];
 456                        printk(KERN_ERR "%s: error %d transferring data,"
 457                               " sector %u, nr %u, card status %#x\n",
 458                               req->rq_disk->disk_name, brq.data.error,
 459                               (unsigned)blk_rq_pos(req),
 460                               (unsigned)blk_rq_sectors(req), status);
 461                }
 462
 463                if (brq.stop.error) {
 464                        printk(KERN_ERR "%s: error %d sending stop command, "
 465                               "response %#x, card status %#x\n",
 466                               req->rq_disk->disk_name, brq.stop.error,
 467                               brq.stop.resp[0], status);
 468                }
 469
 470                if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
 471                        do {
 472                                int err;
 473
 474                                cmd.opcode = MMC_SEND_STATUS;
 475                                cmd.arg = card->rca << 16;
 476                                cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 477                                err = mmc_wait_for_cmd(card->host, &cmd, 5);
 478                                if (err) {
 479                                        printk(KERN_ERR "%s: error %d requesting status\n",
 480                                               req->rq_disk->disk_name, err);
 481                                        goto cmd_err;
 482                                }
 483                                /*
 484                                 * Some cards mishandle the status bits,
 485                                 * so make sure to check both the busy
 486                                 * indication and the card state.
 487                                 */
 488                        } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
 489                                (R1_CURRENT_STATE(cmd.resp[0]) == 7));
 490
 491#if 0
 492                        if (cmd.resp[0] & ~0x00000900)
 493                                printk(KERN_ERR "%s: status = %08x\n",
 494                                       req->rq_disk->disk_name, cmd.resp[0]);
 495                        if (mmc_decode_status(cmd.resp))
 496                                goto cmd_err;
 497#endif
 498                }
 499
 500                if (brq.cmd.error || brq.stop.error || brq.data.error) {
 501                        if (rq_data_dir(req) == READ) {
 502                                /*
 503                                 * After an error, we redo I/O one sector at a
 504                                 * time, so we only reach here after trying to
 505                                 * read a single sector.
 506                                 */
 507                                spin_lock_irq(&md->lock);
 508                                ret = __blk_end_request(req, -EIO, brq.data.blksz);
 509                                spin_unlock_irq(&md->lock);
 510                                continue;
 511                        }
 512                        goto cmd_err;
 513                }
 514
 515                /*
 516                 * A block was successfully transferred.
 517                 */
 518                spin_lock_irq(&md->lock);
 519                ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
 520                spin_unlock_irq(&md->lock);
 521        } while (ret);
 522
 523        mmc_release_host(card->host);
 524
 525        return 1;
 526
 527 cmd_err:
 528        /*
 529         * If this is an SD card and we're writing, we can first
 530         * mark the known good sectors as ok.
 531         *
 532         * If the card is not SD, we can still ok written sectors
 533         * as reported by the controller (which might be less than
 534         * the real number of written sectors, but never more).
 535         */
 536        if (mmc_card_sd(card)) {
 537                u32 blocks;
 538
 539                blocks = mmc_sd_num_wr_blocks(card);
 540                if (blocks != (u32)-1) {
 541                        spin_lock_irq(&md->lock);
 542                        ret = __blk_end_request(req, 0, blocks << 9);
 543                        spin_unlock_irq(&md->lock);
 544                }
 545        } else {
 546                spin_lock_irq(&md->lock);
 547                ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
 548                spin_unlock_irq(&md->lock);
 549        }
 550
 551        mmc_release_host(card->host);
 552
 553        spin_lock_irq(&md->lock);
 554        while (ret)
 555                ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
 556        spin_unlock_irq(&md->lock);
 557
 558        return 0;
 559}
 560
 561static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
 562{
 563        if (req->cmd_flags & REQ_DISCARD) {
 564                if (req->cmd_flags & REQ_SECURE)
 565                        return mmc_blk_issue_secdiscard_rq(mq, req);
 566                else
 567                        return mmc_blk_issue_discard_rq(mq, req);
 568        } else {
 569                return mmc_blk_issue_rw_rq(mq, req);
 570        }
 571}
 572
 573static inline int mmc_blk_readonly(struct mmc_card *card)
 574{
 575        return mmc_card_readonly(card) ||
 576               !(card->csd.cmdclass & CCC_BLOCK_WRITE);
 577}
 578
 579static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
 580{
 581        struct mmc_blk_data *md;
 582        int devidx, ret;
 583
 584        devidx = find_first_zero_bit(dev_use, max_devices);
 585        if (devidx >= max_devices)
 586                return ERR_PTR(-ENOSPC);
 587        __set_bit(devidx, dev_use);
 588
 589        md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
 590        if (!md) {
 591                ret = -ENOMEM;
 592                goto out;
 593        }
 594
 595
 596        /*
 597         * Set the read-only status based on the supported commands
 598         * and the write protect switch.
 599         */
 600        md->read_only = mmc_blk_readonly(card);
 601
 602        md->disk = alloc_disk(perdev_minors);
 603        if (md->disk == NULL) {
 604                ret = -ENOMEM;
 605                goto err_kfree;
 606        }
 607
 608        spin_lock_init(&md->lock);
 609        md->usage = 1;
 610
 611        ret = mmc_init_queue(&md->queue, card, &md->lock);
 612        if (ret)
 613                goto err_putdisk;
 614
 615        md->queue.issue_fn = mmc_blk_issue_rq;
 616        md->queue.data = md;
 617
 618        md->disk->major = MMC_BLOCK_MAJOR;
 619        md->disk->first_minor = devidx * perdev_minors;
 620        md->disk->fops = &mmc_bdops;
 621        md->disk->private_data = md;
 622        md->disk->queue = md->queue.queue;
 623        md->disk->driverfs_dev = &card->dev;
 624
 625        /*
 626         * As discussed on lkml, GENHD_FL_REMOVABLE should:
 627         *
 628         * - be set for removable media with permanent block devices
 629         * - be unset for removable block devices with permanent media
 630         *
 631         * Since MMC block devices clearly fall under the second
 632         * case, we do not set GENHD_FL_REMOVABLE.  Userspace
 633         * should use the block device creation/destruction hotplug
 634         * messages to tell when the card is present.
 635         */
 636
 637        snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
 638                "mmcblk%d", devidx);
 639
 640        blk_queue_logical_block_size(md->queue.queue, 512);
 641
 642        if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
 643                /*
 644                 * The EXT_CSD sector count is in number or 512 byte
 645                 * sectors.
 646                 */
 647                set_capacity(md->disk, card->ext_csd.sectors);
 648        } else {
 649                /*
 650                 * The CSD capacity field is in units of read_blkbits.
 651                 * set_capacity takes units of 512 bytes.
 652                 */
 653                set_capacity(md->disk,
 654                        card->csd.capacity << (card->csd.read_blkbits - 9));
 655        }
 656        return md;
 657
 658 err_putdisk:
 659        put_disk(md->disk);
 660 err_kfree:
 661        kfree(md);
 662 out:
 663        return ERR_PTR(ret);
 664}
 665
 666static int
 667mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
 668{
 669        int err;
 670
 671        mmc_claim_host(card->host);
 672        err = mmc_set_blocklen(card, 512);
 673        mmc_release_host(card->host);
 674
 675        if (err) {
 676                printk(KERN_ERR "%s: unable to set block size to 512: %d\n",
 677                        md->disk->disk_name, err);
 678                return -EINVAL;
 679        }
 680
 681        return 0;
 682}
 683
 684static int mmc_blk_probe(struct mmc_card *card)
 685{
 686        struct mmc_blk_data *md;
 687        int err;
 688        char cap_str[10];
 689
 690        /*
 691         * Check that the card supports the command class(es) we need.
 692         */
 693        if (!(card->csd.cmdclass & CCC_BLOCK_READ))
 694                return -ENODEV;
 695
 696        md = mmc_blk_alloc(card);
 697        if (IS_ERR(md))
 698                return PTR_ERR(md);
 699
 700        err = mmc_blk_set_blksize(md, card);
 701        if (err)
 702                goto out;
 703
 704        string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
 705                        cap_str, sizeof(cap_str));
 706        printk(KERN_INFO "%s: %s %s %s %s\n",
 707                md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
 708                cap_str, md->read_only ? "(ro)" : "");
 709
 710        mmc_set_drvdata(card, md);
 711        add_disk(md->disk);
 712        return 0;
 713
 714 out:
 715        mmc_cleanup_queue(&md->queue);
 716        mmc_blk_put(md);
 717
 718        return err;
 719}
 720
 721static void mmc_blk_remove(struct mmc_card *card)
 722{
 723        struct mmc_blk_data *md = mmc_get_drvdata(card);
 724
 725        if (md) {
 726                /* Stop new requests from getting into the queue */
 727                del_gendisk(md->disk);
 728
 729                /* Then flush out any already in there */
 730                mmc_cleanup_queue(&md->queue);
 731
 732                mmc_blk_put(md);
 733        }
 734        mmc_set_drvdata(card, NULL);
 735}
 736
 737#ifdef CONFIG_PM
 738static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
 739{
 740        struct mmc_blk_data *md = mmc_get_drvdata(card);
 741
 742        if (md) {
 743                mmc_queue_suspend(&md->queue);
 744        }
 745        return 0;
 746}
 747
 748static int mmc_blk_resume(struct mmc_card *card)
 749{
 750        struct mmc_blk_data *md = mmc_get_drvdata(card);
 751
 752        if (md) {
 753                mmc_blk_set_blksize(md, card);
 754                mmc_queue_resume(&md->queue);
 755        }
 756        return 0;
 757}
 758#else
 759#define mmc_blk_suspend NULL
 760#define mmc_blk_resume  NULL
 761#endif
 762
 763static struct mmc_driver mmc_driver = {
 764        .drv            = {
 765                .name   = "mmcblk",
 766        },
 767        .probe          = mmc_blk_probe,
 768        .remove         = mmc_blk_remove,
 769        .suspend        = mmc_blk_suspend,
 770        .resume         = mmc_blk_resume,
 771};
 772
 773static int __init mmc_blk_init(void)
 774{
 775        int res;
 776
 777        if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
 778                pr_info("mmcblk: using %d minors per device\n", perdev_minors);
 779
 780        max_devices = 256 / perdev_minors;
 781
 782        res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
 783        if (res)
 784                goto out;
 785
 786        res = mmc_register_driver(&mmc_driver);
 787        if (res)
 788                goto out2;
 789
 790        return 0;
 791 out2:
 792        unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
 793 out:
 794        return res;
 795}
 796
 797static void __exit mmc_blk_exit(void)
 798{
 799        mmc_unregister_driver(&mmc_driver);
 800        unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
 801}
 802
 803module_init(mmc_blk_init);
 804module_exit(mmc_blk_exit);
 805
 806MODULE_LICENSE("GPL");
 807MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
 808
 809