linux/drivers/mtd/ubi/block.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2014 Ezequiel Garcia
   3 * Copyright (c) 2011 Free Electrons
   4 *
   5 * Driver parameter handling strongly based on drivers/mtd/ubi/build.c
   6 *   Copyright (c) International Business Machines Corp., 2006
   7 *   Copyright (c) Nokia Corporation, 2007
   8 *   Authors: Artem Bityutskiy, Frank Haverkamp
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License as published by
  12 * the Free Software Foundation, version 2.
  13 *
  14 * This program is distributed in the hope that it will be useful,
  15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
  17 * the GNU General Public License for more details.
  18 */
  19
  20/*
  21 * Read-only block devices on top of UBI volumes
  22 *
  23 * A simple implementation to allow a block device to be layered on top of a
  24 * UBI volume. The implementation is provided by creating a static 1-to-1
  25 * mapping between the block device and the UBI volume.
  26 *
  27 * The addressed byte is obtained from the addressed block sector, which is
  28 * mapped linearly into the corresponding LEB:
  29 *
  30 *   LEB number = addressed byte / LEB size
  31 *
  32 * This feature is compiled in the UBI core, and adds a 'block' parameter
  33 * to allow early creation of block devices on top of UBI volumes. Runtime
  34 * block creation/removal for UBI volumes is provided through two UBI ioctls:
  35 * UBI_IOCVOLCRBLK and UBI_IOCVOLRMBLK.
  36 */
  37
  38#include <linux/module.h>
  39#include <linux/init.h>
  40#include <linux/err.h>
  41#include <linux/kernel.h>
  42#include <linux/list.h>
  43#include <linux/mutex.h>
  44#include <linux/slab.h>
  45#include <linux/mtd/ubi.h>
  46#include <linux/workqueue.h>
  47#include <linux/blkdev.h>
  48#include <linux/blk-mq.h>
  49#include <linux/hdreg.h>
  50#include <linux/scatterlist.h>
  51#include <linux/idr.h>
  52#include <asm/div64.h>
  53
  54#include "ubi-media.h"
  55#include "ubi.h"
  56
  57/* Maximum number of supported devices */
  58#define UBIBLOCK_MAX_DEVICES 32
  59
  60/* Maximum length of the 'block=' parameter */
  61#define UBIBLOCK_PARAM_LEN 63
  62
  63/* Maximum number of comma-separated items in the 'block=' parameter */
  64#define UBIBLOCK_PARAM_COUNT 2
  65
  66struct ubiblock_param {
  67        int ubi_num;
  68        int vol_id;
  69        char name[UBIBLOCK_PARAM_LEN+1];
  70};
  71
  72struct ubiblock_pdu {
  73        struct work_struct work;
  74        struct ubi_sgl usgl;
  75};
  76
  77/* Numbers of elements set in the @ubiblock_param array */
  78static int ubiblock_devs __initdata;
  79
  80/* MTD devices specification parameters */
  81static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES] __initdata;
  82
  83struct ubiblock {
  84        struct ubi_volume_desc *desc;
  85        int ubi_num;
  86        int vol_id;
  87        int refcnt;
  88        int leb_size;
  89
  90        struct gendisk *gd;
  91        struct request_queue *rq;
  92
  93        struct workqueue_struct *wq;
  94
  95        struct mutex dev_mutex;
  96        struct list_head list;
  97        struct blk_mq_tag_set tag_set;
  98};
  99
 100/* Linked list of all ubiblock instances */
 101static LIST_HEAD(ubiblock_devices);
 102static DEFINE_MUTEX(devices_mutex);
 103static int ubiblock_major;
 104
 105static int __init ubiblock_set_param(const char *val,
 106                                     const struct kernel_param *kp)
 107{
 108        int i, ret;
 109        size_t len;
 110        struct ubiblock_param *param;
 111        char buf[UBIBLOCK_PARAM_LEN];
 112        char *pbuf = &buf[0];
 113        char *tokens[UBIBLOCK_PARAM_COUNT];
 114
 115        if (!val)
 116                return -EINVAL;
 117
 118        len = strnlen(val, UBIBLOCK_PARAM_LEN);
 119        if (len == 0) {
 120                pr_warn("UBI: block: empty 'block=' parameter - ignored\n");
 121                return 0;
 122        }
 123
 124        if (len == UBIBLOCK_PARAM_LEN) {
 125                pr_err("UBI: block: parameter \"%s\" is too long, max. is %d\n",
 126                       val, UBIBLOCK_PARAM_LEN);
 127                return -EINVAL;
 128        }
 129
 130        strcpy(buf, val);
 131
 132        /* Get rid of the final newline */
 133        if (buf[len - 1] == '\n')
 134                buf[len - 1] = '\0';
 135
 136        for (i = 0; i < UBIBLOCK_PARAM_COUNT; i++)
 137                tokens[i] = strsep(&pbuf, ",");
 138
 139        param = &ubiblock_param[ubiblock_devs];
 140        if (tokens[1]) {
 141                /* Two parameters: can be 'ubi, vol_id' or 'ubi, vol_name' */
 142                ret = kstrtoint(tokens[0], 10, &param->ubi_num);
 143                if (ret < 0)
 144                        return -EINVAL;
 145
 146                /* Second param can be a number or a name */
 147                ret = kstrtoint(tokens[1], 10, &param->vol_id);
 148                if (ret < 0) {
 149                        param->vol_id = -1;
 150                        strcpy(param->name, tokens[1]);
 151                }
 152
 153        } else {
 154                /* One parameter: must be device path */
 155                strcpy(param->name, tokens[0]);
 156                param->ubi_num = -1;
 157                param->vol_id = -1;
 158        }
 159
 160        ubiblock_devs++;
 161
 162        return 0;
 163}
 164
 165static const struct kernel_param_ops ubiblock_param_ops = {
 166        .set    = ubiblock_set_param,
 167};
 168module_param_cb(block, &ubiblock_param_ops, NULL, 0);
 169MODULE_PARM_DESC(block, "Attach block devices to UBI volumes. Parameter format: block=<path|dev,num|dev,name>.\n"
 170                        "Multiple \"block\" parameters may be specified.\n"
 171                        "UBI volumes may be specified by their number, name, or path to the device node.\n"
 172                        "Examples\n"
 173                        "Using the UBI volume path:\n"
 174                        "ubi.block=/dev/ubi0_0\n"
 175                        "Using the UBI device, and the volume name:\n"
 176                        "ubi.block=0,rootfs\n"
 177                        "Using both UBI device number and UBI volume number:\n"
 178                        "ubi.block=0,0\n");
 179
 180static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id)
 181{
 182        struct ubiblock *dev;
 183
 184        list_for_each_entry(dev, &ubiblock_devices, list)
 185                if (dev->ubi_num == ubi_num && dev->vol_id == vol_id)
 186                        return dev;
 187        return NULL;
 188}
 189
 190static int ubiblock_read(struct ubiblock_pdu *pdu)
 191{
 192        int ret, leb, offset, bytes_left, to_read;
 193        u64 pos;
 194        struct request *req = blk_mq_rq_from_pdu(pdu);
 195        struct ubiblock *dev = req->q->queuedata;
 196
 197        to_read = blk_rq_bytes(req);
 198        pos = blk_rq_pos(req) << 9;
 199
 200        /* Get LEB:offset address to read from */
 201        offset = do_div(pos, dev->leb_size);
 202        leb = pos;
 203        bytes_left = to_read;
 204
 205        while (bytes_left) {
 206                /*
 207                 * We can only read one LEB at a time. Therefore if the read
 208                 * length is larger than one LEB size, we split the operation.
 209                 */
 210                if (offset + to_read > dev->leb_size)
 211                        to_read = dev->leb_size - offset;
 212
 213                ret = ubi_read_sg(dev->desc, leb, &pdu->usgl, offset, to_read);
 214                if (ret < 0)
 215                        return ret;
 216
 217                bytes_left -= to_read;
 218                to_read = bytes_left;
 219                leb += 1;
 220                offset = 0;
 221        }
 222        return 0;
 223}
 224
 225static int ubiblock_open(struct block_device *bdev, fmode_t mode)
 226{
 227        struct ubiblock *dev = bdev->bd_disk->private_data;
 228        int ret;
 229
 230        mutex_lock(&dev->dev_mutex);
 231        if (dev->refcnt > 0) {
 232                /*
 233                 * The volume is already open, just increase the reference
 234                 * counter.
 235                 */
 236                goto out_done;
 237        }
 238
 239        /*
 240         * We want users to be aware they should only mount us as read-only.
 241         * It's just a paranoid check, as write requests will get rejected
 242         * in any case.
 243         */
 244        if (mode & FMODE_WRITE) {
 245                ret = -EPERM;
 246                goto out_unlock;
 247        }
 248
 249        dev->desc = ubi_open_volume(dev->ubi_num, dev->vol_id, UBI_READONLY);
 250        if (IS_ERR(dev->desc)) {
 251                dev_err(disk_to_dev(dev->gd), "failed to open ubi volume %d_%d",
 252                        dev->ubi_num, dev->vol_id);
 253                ret = PTR_ERR(dev->desc);
 254                dev->desc = NULL;
 255                goto out_unlock;
 256        }
 257
 258out_done:
 259        dev->refcnt++;
 260        mutex_unlock(&dev->dev_mutex);
 261        return 0;
 262
 263out_unlock:
 264        mutex_unlock(&dev->dev_mutex);
 265        return ret;
 266}
 267
 268static void ubiblock_release(struct gendisk *gd, fmode_t mode)
 269{
 270        struct ubiblock *dev = gd->private_data;
 271
 272        mutex_lock(&dev->dev_mutex);
 273        dev->refcnt--;
 274        if (dev->refcnt == 0) {
 275                ubi_close_volume(dev->desc);
 276                dev->desc = NULL;
 277        }
 278        mutex_unlock(&dev->dev_mutex);
 279}
 280
 281static int ubiblock_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 282{
 283        /* Some tools might require this information */
 284        geo->heads = 1;
 285        geo->cylinders = 1;
 286        geo->sectors = get_capacity(bdev->bd_disk);
 287        geo->start = 0;
 288        return 0;
 289}
 290
 291static const struct block_device_operations ubiblock_ops = {
 292        .owner = THIS_MODULE,
 293        .open = ubiblock_open,
 294        .release = ubiblock_release,
 295        .getgeo = ubiblock_getgeo,
 296};
 297
 298static void ubiblock_do_work(struct work_struct *work)
 299{
 300        int ret;
 301        struct ubiblock_pdu *pdu = container_of(work, struct ubiblock_pdu, work);
 302        struct request *req = blk_mq_rq_from_pdu(pdu);
 303
 304        blk_mq_start_request(req);
 305
 306        /*
 307         * It is safe to ignore the return value of blk_rq_map_sg() because
 308         * the number of sg entries is limited to UBI_MAX_SG_COUNT
 309         * and ubi_read_sg() will check that limit.
 310         */
 311        blk_rq_map_sg(req->q, req, pdu->usgl.sg);
 312
 313        ret = ubiblock_read(pdu);
 314        rq_flush_dcache_pages(req);
 315
 316        blk_mq_end_request(req, ret);
 317}
 318
 319static int ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
 320                             const struct blk_mq_queue_data *bd)
 321{
 322        struct request *req = bd->rq;
 323        struct ubiblock *dev = hctx->queue->queuedata;
 324        struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
 325
 326        if (req->cmd_type != REQ_TYPE_FS)
 327                return BLK_MQ_RQ_QUEUE_ERROR;
 328
 329        if (rq_data_dir(req) != READ)
 330                return BLK_MQ_RQ_QUEUE_ERROR; /* Write not implemented */
 331
 332        ubi_sgl_init(&pdu->usgl);
 333        queue_work(dev->wq, &pdu->work);
 334
 335        return BLK_MQ_RQ_QUEUE_OK;
 336}
 337
 338static int ubiblock_init_request(void *data, struct request *req,
 339                                 unsigned int hctx_idx,
 340                                 unsigned int request_idx,
 341                                 unsigned int numa_node)
 342{
 343        struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
 344
 345        sg_init_table(pdu->usgl.sg, UBI_MAX_SG_COUNT);
 346        INIT_WORK(&pdu->work, ubiblock_do_work);
 347
 348        return 0;
 349}
 350
 351static struct blk_mq_ops ubiblock_mq_ops = {
 352        .queue_rq       = ubiblock_queue_rq,
 353        .init_request   = ubiblock_init_request,
 354        .map_queue      = blk_mq_map_queue,
 355};
 356
 357static DEFINE_IDR(ubiblock_minor_idr);
 358
 359int ubiblock_create(struct ubi_volume_info *vi)
 360{
 361        struct ubiblock *dev;
 362        struct gendisk *gd;
 363        u64 disk_capacity = vi->used_bytes >> 9;
 364        int ret;
 365
 366        if ((sector_t)disk_capacity != disk_capacity)
 367                return -EFBIG;
 368        /* Check that the volume isn't already handled */
 369        mutex_lock(&devices_mutex);
 370        if (find_dev_nolock(vi->ubi_num, vi->vol_id)) {
 371                mutex_unlock(&devices_mutex);
 372                return -EEXIST;
 373        }
 374        mutex_unlock(&devices_mutex);
 375
 376        dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL);
 377        if (!dev)
 378                return -ENOMEM;
 379
 380        mutex_init(&dev->dev_mutex);
 381
 382        dev->ubi_num = vi->ubi_num;
 383        dev->vol_id = vi->vol_id;
 384        dev->leb_size = vi->usable_leb_size;
 385
 386        /* Initialize the gendisk of this ubiblock device */
 387        gd = alloc_disk(1);
 388        if (!gd) {
 389                pr_err("UBI: block: alloc_disk failed");
 390                ret = -ENODEV;
 391                goto out_free_dev;
 392        }
 393
 394        gd->fops = &ubiblock_ops;
 395        gd->major = ubiblock_major;
 396        gd->first_minor = idr_alloc(&ubiblock_minor_idr, dev, 0, 0, GFP_KERNEL);
 397        if (gd->first_minor < 0) {
 398                dev_err(disk_to_dev(gd),
 399                        "block: dynamic minor allocation failed");
 400                ret = -ENODEV;
 401                goto out_put_disk;
 402        }
 403        gd->private_data = dev;
 404        sprintf(gd->disk_name, "ubiblock%d_%d", dev->ubi_num, dev->vol_id);
 405        set_capacity(gd, disk_capacity);
 406        dev->gd = gd;
 407
 408        dev->tag_set.ops = &ubiblock_mq_ops;
 409        dev->tag_set.queue_depth = 64;
 410        dev->tag_set.numa_node = NUMA_NO_NODE;
 411        dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
 412        dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu);
 413        dev->tag_set.driver_data = dev;
 414        dev->tag_set.nr_hw_queues = 1;
 415
 416        ret = blk_mq_alloc_tag_set(&dev->tag_set);
 417        if (ret) {
 418                dev_err(disk_to_dev(dev->gd), "blk_mq_alloc_tag_set failed");
 419                goto out_remove_minor;
 420        }
 421
 422        dev->rq = blk_mq_init_queue(&dev->tag_set);
 423        if (IS_ERR(dev->rq)) {
 424                dev_err(disk_to_dev(gd), "blk_mq_init_queue failed");
 425                ret = PTR_ERR(dev->rq);
 426                goto out_free_tags;
 427        }
 428        blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT);
 429
 430        dev->rq->queuedata = dev;
 431        dev->gd->queue = dev->rq;
 432
 433        /*
 434         * Create one workqueue per volume (per registered block device).
 435         * Rembember workqueues are cheap, they're not threads.
 436         */
 437        dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name);
 438        if (!dev->wq) {
 439                ret = -ENOMEM;
 440                goto out_free_queue;
 441        }
 442
 443        mutex_lock(&devices_mutex);
 444        list_add_tail(&dev->list, &ubiblock_devices);
 445        mutex_unlock(&devices_mutex);
 446
 447        /* Must be the last step: anyone can call file ops from now on */
 448        add_disk(dev->gd);
 449        dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
 450                 dev->ubi_num, dev->vol_id, vi->name);
 451        return 0;
 452
 453out_free_queue:
 454        blk_cleanup_queue(dev->rq);
 455out_free_tags:
 456        blk_mq_free_tag_set(&dev->tag_set);
 457out_remove_minor:
 458        idr_remove(&ubiblock_minor_idr, gd->first_minor);
 459out_put_disk:
 460        put_disk(dev->gd);
 461out_free_dev:
 462        kfree(dev);
 463
 464        return ret;
 465}
 466
 467static void ubiblock_cleanup(struct ubiblock *dev)
 468{
 469        /* Stop new requests to arrive */
 470        del_gendisk(dev->gd);
 471        /* Flush pending work */
 472        destroy_workqueue(dev->wq);
 473        /* Finally destroy the blk queue */
 474        blk_cleanup_queue(dev->rq);
 475        blk_mq_free_tag_set(&dev->tag_set);
 476        dev_info(disk_to_dev(dev->gd), "released");
 477        idr_remove(&ubiblock_minor_idr, dev->gd->first_minor);
 478        put_disk(dev->gd);
 479}
 480
 481int ubiblock_remove(struct ubi_volume_info *vi)
 482{
 483        struct ubiblock *dev;
 484
 485        mutex_lock(&devices_mutex);
 486        dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
 487        if (!dev) {
 488                mutex_unlock(&devices_mutex);
 489                return -ENODEV;
 490        }
 491
 492        /* Found a device, let's lock it so we can check if it's busy */
 493        mutex_lock(&dev->dev_mutex);
 494        if (dev->refcnt > 0) {
 495                mutex_unlock(&dev->dev_mutex);
 496                mutex_unlock(&devices_mutex);
 497                return -EBUSY;
 498        }
 499
 500        /* Remove from device list */
 501        list_del(&dev->list);
 502        mutex_unlock(&devices_mutex);
 503
 504        ubiblock_cleanup(dev);
 505        mutex_unlock(&dev->dev_mutex);
 506        kfree(dev);
 507        return 0;
 508}
 509
 510static int ubiblock_resize(struct ubi_volume_info *vi)
 511{
 512        struct ubiblock *dev;
 513        u64 disk_capacity = vi->used_bytes >> 9;
 514
 515        /*
 516         * Need to lock the device list until we stop using the device,
 517         * otherwise the device struct might get released in
 518         * 'ubiblock_remove()'.
 519         */
 520        mutex_lock(&devices_mutex);
 521        dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
 522        if (!dev) {
 523                mutex_unlock(&devices_mutex);
 524                return -ENODEV;
 525        }
 526        if ((sector_t)disk_capacity != disk_capacity) {
 527                mutex_unlock(&devices_mutex);
 528                dev_warn(disk_to_dev(dev->gd), "the volume is too big (%d LEBs), cannot resize",
 529                         vi->size);
 530                return -EFBIG;
 531        }
 532
 533        mutex_lock(&dev->dev_mutex);
 534
 535        if (get_capacity(dev->gd) != disk_capacity) {
 536                set_capacity(dev->gd, disk_capacity);
 537                dev_info(disk_to_dev(dev->gd), "resized to %lld bytes",
 538                         vi->used_bytes);
 539        }
 540        mutex_unlock(&dev->dev_mutex);
 541        mutex_unlock(&devices_mutex);
 542        return 0;
 543}
 544
 545static int ubiblock_notify(struct notifier_block *nb,
 546                         unsigned long notification_type, void *ns_ptr)
 547{
 548        struct ubi_notification *nt = ns_ptr;
 549
 550        switch (notification_type) {
 551        case UBI_VOLUME_ADDED:
 552                /*
 553                 * We want to enforce explicit block device creation for
 554                 * volumes, so when a volume is added we do nothing.
 555                 */
 556                break;
 557        case UBI_VOLUME_REMOVED:
 558                ubiblock_remove(&nt->vi);
 559                break;
 560        case UBI_VOLUME_RESIZED:
 561                ubiblock_resize(&nt->vi);
 562                break;
 563        case UBI_VOLUME_UPDATED:
 564                /*
 565                 * If the volume is static, a content update might mean the
 566                 * size (i.e. used_bytes) was also changed.
 567                 */
 568                if (nt->vi.vol_type == UBI_STATIC_VOLUME)
 569                        ubiblock_resize(&nt->vi);
 570                break;
 571        default:
 572                break;
 573        }
 574        return NOTIFY_OK;
 575}
 576
 577static struct notifier_block ubiblock_notifier = {
 578        .notifier_call = ubiblock_notify,
 579};
 580
 581static struct ubi_volume_desc * __init
 582open_volume_desc(const char *name, int ubi_num, int vol_id)
 583{
 584        if (ubi_num == -1)
 585                /* No ubi num, name must be a vol device path */
 586                return ubi_open_volume_path(name, UBI_READONLY);
 587        else if (vol_id == -1)
 588                /* No vol_id, must be vol_name */
 589                return ubi_open_volume_nm(ubi_num, name, UBI_READONLY);
 590        else
 591                return ubi_open_volume(ubi_num, vol_id, UBI_READONLY);
 592}
 593
 594static void __init ubiblock_create_from_param(void)
 595{
 596        int i, ret = 0;
 597        struct ubiblock_param *p;
 598        struct ubi_volume_desc *desc;
 599        struct ubi_volume_info vi;
 600
 601        /*
 602         * If there is an error creating one of the ubiblocks, continue on to
 603         * create the following ubiblocks. This helps in a circumstance where
 604         * the kernel command-line specifies multiple block devices and some
 605         * may be broken, but we still want the working ones to come up.
 606         */
 607        for (i = 0; i < ubiblock_devs; i++) {
 608                p = &ubiblock_param[i];
 609
 610                desc = open_volume_desc(p->name, p->ubi_num, p->vol_id);
 611                if (IS_ERR(desc)) {
 612                        pr_err(
 613                               "UBI: block: can't open volume on ubi%d_%d, err=%ld",
 614                               p->ubi_num, p->vol_id, PTR_ERR(desc));
 615                        continue;
 616                }
 617
 618                ubi_get_volume_info(desc, &vi);
 619                ubi_close_volume(desc);
 620
 621                ret = ubiblock_create(&vi);
 622                if (ret) {
 623                        pr_err(
 624                               "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d",
 625                               vi.name, p->ubi_num, p->vol_id, ret);
 626                        continue;
 627                }
 628        }
 629}
 630
 631static void ubiblock_remove_all(void)
 632{
 633        struct ubiblock *next;
 634        struct ubiblock *dev;
 635
 636        list_for_each_entry_safe(dev, next, &ubiblock_devices, list) {
 637                /* The module is being forcefully removed */
 638                WARN_ON(dev->desc);
 639                /* Remove from device list */
 640                list_del(&dev->list);
 641                ubiblock_cleanup(dev);
 642                kfree(dev);
 643        }
 644}
 645
 646int __init ubiblock_init(void)
 647{
 648        int ret;
 649
 650        ubiblock_major = register_blkdev(0, "ubiblock");
 651        if (ubiblock_major < 0)
 652                return ubiblock_major;
 653
 654        /*
 655         * Attach block devices from 'block=' module param.
 656         * Even if one block device in the param list fails to come up,
 657         * still allow the module to load and leave any others up.
 658         */
 659        ubiblock_create_from_param();
 660
 661        /*
 662         * Block devices are only created upon user requests, so we ignore
 663         * existing volumes.
 664         */
 665        ret = ubi_register_volume_notifier(&ubiblock_notifier, 1);
 666        if (ret)
 667                goto err_unreg;
 668        return 0;
 669
 670err_unreg:
 671        unregister_blkdev(ubiblock_major, "ubiblock");
 672        ubiblock_remove_all();
 673        return ret;
 674}
 675
 676void __exit ubiblock_exit(void)
 677{
 678        ubi_unregister_volume_notifier(&ubiblock_notifier);
 679        ubiblock_remove_all();
 680        unregister_blkdev(ubiblock_major, "ubiblock");
 681}
 682