linux/drivers/mtd/ubi/block.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2014 Ezequiel Garcia
   3 * Copyright (c) 2011 Free Electrons
   4 *
   5 * Driver parameter handling strongly based on drivers/mtd/ubi/build.c
   6 *   Copyright (c) International Business Machines Corp., 2006
   7 *   Copyright (c) Nokia Corporation, 2007
   8 *   Authors: Artem Bityutskiy, Frank Haverkamp
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License as published by
  12 * the Free Software Foundation, version 2.
  13 *
  14 * This program is distributed in the hope that it will be useful,
  15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
  17 * the GNU General Public License for more details.
  18 */
  19
  20/*
  21 * Read-only block devices on top of UBI volumes
  22 *
  23 * A simple implementation to allow a block device to be layered on top of a
  24 * UBI volume. The implementation is provided by creating a static 1-to-1
  25 * mapping between the block device and the UBI volume.
  26 *
  27 * The addressed byte is obtained from the addressed block sector, which is
  28 * mapped linearly into the corresponding LEB:
  29 *
  30 *   LEB number = addressed byte / LEB size
  31 *
  32 * This feature is compiled in the UBI core, and adds a 'block' parameter
  33 * to allow early creation of block devices on top of UBI volumes. Runtime
  34 * block creation/removal for UBI volumes is provided through two UBI ioctls:
  35 * UBI_IOCVOLCRBLK and UBI_IOCVOLRMBLK.
  36 */
  37
  38#include <linux/module.h>
  39#include <linux/init.h>
  40#include <linux/err.h>
  41#include <linux/kernel.h>
  42#include <linux/list.h>
  43#include <linux/mutex.h>
  44#include <linux/slab.h>
  45#include <linux/mtd/ubi.h>
  46#include <linux/workqueue.h>
  47#include <linux/blkdev.h>
  48#include <linux/blk-mq.h>
  49#include <linux/hdreg.h>
  50#include <linux/scatterlist.h>
  51#include <linux/idr.h>
  52#include <asm/div64.h>
  53
  54#include "ubi-media.h"
  55#include "ubi.h"
  56
  57/* Maximum number of supported devices */
  58#define UBIBLOCK_MAX_DEVICES 32
  59
  60/* Maximum length of the 'block=' parameter */
  61#define UBIBLOCK_PARAM_LEN 63
  62
  63/* Maximum number of comma-separated items in the 'block=' parameter */
  64#define UBIBLOCK_PARAM_COUNT 2
  65
  66struct ubiblock_param {
  67        int ubi_num;
  68        int vol_id;
  69        char name[UBIBLOCK_PARAM_LEN+1];
  70};
  71
  72struct ubiblock_pdu {
  73        struct work_struct work;
  74        struct ubi_sgl usgl;
  75};
  76
  77/* Numbers of elements set in the @ubiblock_param array */
  78static int ubiblock_devs __initdata;
  79
  80/* MTD devices specification parameters */
  81static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES] __initdata;
  82
  83struct ubiblock {
  84        struct ubi_volume_desc *desc;
  85        int ubi_num;
  86        int vol_id;
  87        int refcnt;
  88        int leb_size;
  89
  90        struct gendisk *gd;
  91        struct request_queue *rq;
  92
  93        struct workqueue_struct *wq;
  94
  95        struct mutex dev_mutex;
  96        struct list_head list;
  97        struct blk_mq_tag_set tag_set;
  98};
  99
 100/* Linked list of all ubiblock instances */
 101static LIST_HEAD(ubiblock_devices);
 102static DEFINE_IDR(ubiblock_minor_idr);
 103/* Protects ubiblock_devices and ubiblock_minor_idr */
 104static DEFINE_MUTEX(devices_mutex);
 105static int ubiblock_major;
 106
 107static int __init ubiblock_set_param(const char *val,
 108                                     const struct kernel_param *kp)
 109{
 110        int i, ret;
 111        size_t len;
 112        struct ubiblock_param *param;
 113        char buf[UBIBLOCK_PARAM_LEN];
 114        char *pbuf = &buf[0];
 115        char *tokens[UBIBLOCK_PARAM_COUNT];
 116
 117        if (!val)
 118                return -EINVAL;
 119
 120        len = strnlen(val, UBIBLOCK_PARAM_LEN);
 121        if (len == 0) {
 122                pr_warn("UBI: block: empty 'block=' parameter - ignored\n");
 123                return 0;
 124        }
 125
 126        if (len == UBIBLOCK_PARAM_LEN) {
 127                pr_err("UBI: block: parameter \"%s\" is too long, max. is %d\n",
 128                       val, UBIBLOCK_PARAM_LEN);
 129                return -EINVAL;
 130        }
 131
 132        strcpy(buf, val);
 133
 134        /* Get rid of the final newline */
 135        if (buf[len - 1] == '\n')
 136                buf[len - 1] = '\0';
 137
 138        for (i = 0; i < UBIBLOCK_PARAM_COUNT; i++)
 139                tokens[i] = strsep(&pbuf, ",");
 140
 141        param = &ubiblock_param[ubiblock_devs];
 142        if (tokens[1]) {
 143                /* Two parameters: can be 'ubi, vol_id' or 'ubi, vol_name' */
 144                ret = kstrtoint(tokens[0], 10, &param->ubi_num);
 145                if (ret < 0)
 146                        return -EINVAL;
 147
 148                /* Second param can be a number or a name */
 149                ret = kstrtoint(tokens[1], 10, &param->vol_id);
 150                if (ret < 0) {
 151                        param->vol_id = -1;
 152                        strcpy(param->name, tokens[1]);
 153                }
 154
 155        } else {
 156                /* One parameter: must be device path */
 157                strcpy(param->name, tokens[0]);
 158                param->ubi_num = -1;
 159                param->vol_id = -1;
 160        }
 161
 162        ubiblock_devs++;
 163
 164        return 0;
 165}
 166
 167static const struct kernel_param_ops ubiblock_param_ops = {
 168        .set    = ubiblock_set_param,
 169};
 170module_param_cb(block, &ubiblock_param_ops, NULL, 0);
 171MODULE_PARM_DESC(block, "Attach block devices to UBI volumes. Parameter format: block=<path|dev,num|dev,name>.\n"
 172                        "Multiple \"block\" parameters may be specified.\n"
 173                        "UBI volumes may be specified by their number, name, or path to the device node.\n"
 174                        "Examples\n"
 175                        "Using the UBI volume path:\n"
 176                        "ubi.block=/dev/ubi0_0\n"
 177                        "Using the UBI device, and the volume name:\n"
 178                        "ubi.block=0,rootfs\n"
 179                        "Using both UBI device number and UBI volume number:\n"
 180                        "ubi.block=0,0\n");
 181
 182static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id)
 183{
 184        struct ubiblock *dev;
 185
 186        list_for_each_entry(dev, &ubiblock_devices, list)
 187                if (dev->ubi_num == ubi_num && dev->vol_id == vol_id)
 188                        return dev;
 189        return NULL;
 190}
 191
 192static int ubiblock_read(struct ubiblock_pdu *pdu)
 193{
 194        int ret, leb, offset, bytes_left, to_read;
 195        u64 pos;
 196        struct request *req = blk_mq_rq_from_pdu(pdu);
 197        struct ubiblock *dev = req->q->queuedata;
 198
 199        to_read = blk_rq_bytes(req);
 200        pos = blk_rq_pos(req) << 9;
 201
 202        /* Get LEB:offset address to read from */
 203        offset = do_div(pos, dev->leb_size);
 204        leb = pos;
 205        bytes_left = to_read;
 206
 207        while (bytes_left) {
 208                /*
 209                 * We can only read one LEB at a time. Therefore if the read
 210                 * length is larger than one LEB size, we split the operation.
 211                 */
 212                if (offset + to_read > dev->leb_size)
 213                        to_read = dev->leb_size - offset;
 214
 215                ret = ubi_read_sg(dev->desc, leb, &pdu->usgl, offset, to_read);
 216                if (ret < 0)
 217                        return ret;
 218
 219                bytes_left -= to_read;
 220                to_read = bytes_left;
 221                leb += 1;
 222                offset = 0;
 223        }
 224        return 0;
 225}
 226
 227static int ubiblock_open(struct block_device *bdev, fmode_t mode)
 228{
 229        struct ubiblock *dev = bdev->bd_disk->private_data;
 230        int ret;
 231
 232        mutex_lock(&dev->dev_mutex);
 233        if (dev->refcnt > 0) {
 234                /*
 235                 * The volume is already open, just increase the reference
 236                 * counter.
 237                 */
 238                goto out_done;
 239        }
 240
 241        /*
 242         * We want users to be aware they should only mount us as read-only.
 243         * It's just a paranoid check, as write requests will get rejected
 244         * in any case.
 245         */
 246        if (mode & FMODE_WRITE) {
 247                ret = -EROFS;
 248                goto out_unlock;
 249        }
 250
 251        dev->desc = ubi_open_volume(dev->ubi_num, dev->vol_id, UBI_READONLY);
 252        if (IS_ERR(dev->desc)) {
 253                dev_err(disk_to_dev(dev->gd), "failed to open ubi volume %d_%d",
 254                        dev->ubi_num, dev->vol_id);
 255                ret = PTR_ERR(dev->desc);
 256                dev->desc = NULL;
 257                goto out_unlock;
 258        }
 259
 260out_done:
 261        dev->refcnt++;
 262        mutex_unlock(&dev->dev_mutex);
 263        return 0;
 264
 265out_unlock:
 266        mutex_unlock(&dev->dev_mutex);
 267        return ret;
 268}
 269
 270static void ubiblock_release(struct gendisk *gd, fmode_t mode)
 271{
 272        struct ubiblock *dev = gd->private_data;
 273
 274        mutex_lock(&dev->dev_mutex);
 275        dev->refcnt--;
 276        if (dev->refcnt == 0) {
 277                ubi_close_volume(dev->desc);
 278                dev->desc = NULL;
 279        }
 280        mutex_unlock(&dev->dev_mutex);
 281}
 282
 283static int ubiblock_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 284{
 285        /* Some tools might require this information */
 286        geo->heads = 1;
 287        geo->cylinders = 1;
 288        geo->sectors = get_capacity(bdev->bd_disk);
 289        geo->start = 0;
 290        return 0;
 291}
 292
 293static const struct block_device_operations ubiblock_ops = {
 294        .owner = THIS_MODULE,
 295        .open = ubiblock_open,
 296        .release = ubiblock_release,
 297        .getgeo = ubiblock_getgeo,
 298};
 299
 300static void ubiblock_do_work(struct work_struct *work)
 301{
 302        int ret;
 303        struct ubiblock_pdu *pdu = container_of(work, struct ubiblock_pdu, work);
 304        struct request *req = blk_mq_rq_from_pdu(pdu);
 305
 306        blk_mq_start_request(req);
 307
 308        /*
 309         * It is safe to ignore the return value of blk_rq_map_sg() because
 310         * the number of sg entries is limited to UBI_MAX_SG_COUNT
 311         * and ubi_read_sg() will check that limit.
 312         */
 313        blk_rq_map_sg(req->q, req, pdu->usgl.sg);
 314
 315        ret = ubiblock_read(pdu);
 316        rq_flush_dcache_pages(req);
 317
 318        blk_mq_end_request(req, errno_to_blk_status(ret));
 319}
 320
 321static blk_status_t ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
 322                             const struct blk_mq_queue_data *bd)
 323{
 324        struct request *req = bd->rq;
 325        struct ubiblock *dev = hctx->queue->queuedata;
 326        struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
 327
 328        switch (req_op(req)) {
 329        case REQ_OP_READ:
 330                ubi_sgl_init(&pdu->usgl);
 331                queue_work(dev->wq, &pdu->work);
 332                return BLK_STS_OK;
 333        default:
 334                return BLK_STS_IOERR;
 335        }
 336
 337}
 338
 339static int ubiblock_init_request(struct blk_mq_tag_set *set,
 340                struct request *req, unsigned int hctx_idx,
 341                unsigned int numa_node)
 342{
 343        struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
 344
 345        sg_init_table(pdu->usgl.sg, UBI_MAX_SG_COUNT);
 346        INIT_WORK(&pdu->work, ubiblock_do_work);
 347
 348        return 0;
 349}
 350
 351static const struct blk_mq_ops ubiblock_mq_ops = {
 352        .queue_rq       = ubiblock_queue_rq,
 353        .init_request   = ubiblock_init_request,
 354};
 355
 356int ubiblock_create(struct ubi_volume_info *vi)
 357{
 358        struct ubiblock *dev;
 359        struct gendisk *gd;
 360        u64 disk_capacity = vi->used_bytes >> 9;
 361        int ret;
 362
 363        if ((sector_t)disk_capacity != disk_capacity)
 364                return -EFBIG;
 365        /* Check that the volume isn't already handled */
 366        mutex_lock(&devices_mutex);
 367        if (find_dev_nolock(vi->ubi_num, vi->vol_id)) {
 368                ret = -EEXIST;
 369                goto out_unlock;
 370        }
 371
 372        dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL);
 373        if (!dev) {
 374                ret = -ENOMEM;
 375                goto out_unlock;
 376        }
 377
 378        mutex_init(&dev->dev_mutex);
 379
 380        dev->ubi_num = vi->ubi_num;
 381        dev->vol_id = vi->vol_id;
 382        dev->leb_size = vi->usable_leb_size;
 383
 384        /* Initialize the gendisk of this ubiblock device */
 385        gd = alloc_disk(1);
 386        if (!gd) {
 387                pr_err("UBI: block: alloc_disk failed\n");
 388                ret = -ENODEV;
 389                goto out_free_dev;
 390        }
 391
 392        gd->fops = &ubiblock_ops;
 393        gd->major = ubiblock_major;
 394        gd->first_minor = idr_alloc(&ubiblock_minor_idr, dev, 0, 0, GFP_KERNEL);
 395        if (gd->first_minor < 0) {
 396                dev_err(disk_to_dev(gd),
 397                        "block: dynamic minor allocation failed");
 398                ret = -ENODEV;
 399                goto out_put_disk;
 400        }
 401        gd->private_data = dev;
 402        sprintf(gd->disk_name, "ubiblock%d_%d", dev->ubi_num, dev->vol_id);
 403        set_capacity(gd, disk_capacity);
 404        dev->gd = gd;
 405
 406        dev->tag_set.ops = &ubiblock_mq_ops;
 407        dev->tag_set.queue_depth = 64;
 408        dev->tag_set.numa_node = NUMA_NO_NODE;
 409        dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
 410        dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu);
 411        dev->tag_set.driver_data = dev;
 412        dev->tag_set.nr_hw_queues = 1;
 413
 414        ret = blk_mq_alloc_tag_set(&dev->tag_set);
 415        if (ret) {
 416                dev_err(disk_to_dev(dev->gd), "blk_mq_alloc_tag_set failed");
 417                goto out_remove_minor;
 418        }
 419
 420        dev->rq = blk_mq_init_queue(&dev->tag_set);
 421        if (IS_ERR(dev->rq)) {
 422                dev_err(disk_to_dev(gd), "blk_mq_init_queue failed");
 423                ret = PTR_ERR(dev->rq);
 424                goto out_free_tags;
 425        }
 426        blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT);
 427
 428        dev->rq->queuedata = dev;
 429        dev->gd->queue = dev->rq;
 430
 431        /*
 432         * Create one workqueue per volume (per registered block device).
 433         * Rembember workqueues are cheap, they're not threads.
 434         */
 435        dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name);
 436        if (!dev->wq) {
 437                ret = -ENOMEM;
 438                goto out_free_queue;
 439        }
 440
 441        list_add_tail(&dev->list, &ubiblock_devices);
 442
 443        /* Must be the last step: anyone can call file ops from now on */
 444        add_disk(dev->gd);
 445        dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
 446                 dev->ubi_num, dev->vol_id, vi->name);
 447        mutex_unlock(&devices_mutex);
 448        return 0;
 449
 450out_free_queue:
 451        blk_cleanup_queue(dev->rq);
 452out_free_tags:
 453        blk_mq_free_tag_set(&dev->tag_set);
 454out_remove_minor:
 455        idr_remove(&ubiblock_minor_idr, gd->first_minor);
 456out_put_disk:
 457        put_disk(dev->gd);
 458out_free_dev:
 459        kfree(dev);
 460out_unlock:
 461        mutex_unlock(&devices_mutex);
 462
 463        return ret;
 464}
 465
 466static void ubiblock_cleanup(struct ubiblock *dev)
 467{
 468        /* Stop new requests to arrive */
 469        del_gendisk(dev->gd);
 470        /* Flush pending work */
 471        destroy_workqueue(dev->wq);
 472        /* Finally destroy the blk queue */
 473        blk_cleanup_queue(dev->rq);
 474        blk_mq_free_tag_set(&dev->tag_set);
 475        dev_info(disk_to_dev(dev->gd), "released");
 476        idr_remove(&ubiblock_minor_idr, dev->gd->first_minor);
 477        put_disk(dev->gd);
 478}
 479
 480int ubiblock_remove(struct ubi_volume_info *vi)
 481{
 482        struct ubiblock *dev;
 483        int ret;
 484
 485        mutex_lock(&devices_mutex);
 486        dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
 487        if (!dev) {
 488                ret = -ENODEV;
 489                goto out_unlock;
 490        }
 491
 492        /* Found a device, let's lock it so we can check if it's busy */
 493        mutex_lock(&dev->dev_mutex);
 494        if (dev->refcnt > 0) {
 495                ret = -EBUSY;
 496                goto out_unlock_dev;
 497        }
 498
 499        /* Remove from device list */
 500        list_del(&dev->list);
 501        ubiblock_cleanup(dev);
 502        mutex_unlock(&dev->dev_mutex);
 503        mutex_unlock(&devices_mutex);
 504
 505        kfree(dev);
 506        return 0;
 507
 508out_unlock_dev:
 509        mutex_unlock(&dev->dev_mutex);
 510out_unlock:
 511        mutex_unlock(&devices_mutex);
 512        return ret;
 513}
 514
 515static int ubiblock_resize(struct ubi_volume_info *vi)
 516{
 517        struct ubiblock *dev;
 518        u64 disk_capacity = vi->used_bytes >> 9;
 519
 520        /*
 521         * Need to lock the device list until we stop using the device,
 522         * otherwise the device struct might get released in
 523         * 'ubiblock_remove()'.
 524         */
 525        mutex_lock(&devices_mutex);
 526        dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
 527        if (!dev) {
 528                mutex_unlock(&devices_mutex);
 529                return -ENODEV;
 530        }
 531        if ((sector_t)disk_capacity != disk_capacity) {
 532                mutex_unlock(&devices_mutex);
 533                dev_warn(disk_to_dev(dev->gd), "the volume is too big (%d LEBs), cannot resize",
 534                         vi->size);
 535                return -EFBIG;
 536        }
 537
 538        mutex_lock(&dev->dev_mutex);
 539
 540        if (get_capacity(dev->gd) != disk_capacity) {
 541                set_capacity(dev->gd, disk_capacity);
 542                dev_info(disk_to_dev(dev->gd), "resized to %lld bytes",
 543                         vi->used_bytes);
 544        }
 545        mutex_unlock(&dev->dev_mutex);
 546        mutex_unlock(&devices_mutex);
 547        return 0;
 548}
 549
 550static int ubiblock_notify(struct notifier_block *nb,
 551                         unsigned long notification_type, void *ns_ptr)
 552{
 553        struct ubi_notification *nt = ns_ptr;
 554
 555        switch (notification_type) {
 556        case UBI_VOLUME_ADDED:
 557                /*
 558                 * We want to enforce explicit block device creation for
 559                 * volumes, so when a volume is added we do nothing.
 560                 */
 561                break;
 562        case UBI_VOLUME_REMOVED:
 563                ubiblock_remove(&nt->vi);
 564                break;
 565        case UBI_VOLUME_RESIZED:
 566                ubiblock_resize(&nt->vi);
 567                break;
 568        case UBI_VOLUME_UPDATED:
 569                /*
 570                 * If the volume is static, a content update might mean the
 571                 * size (i.e. used_bytes) was also changed.
 572                 */
 573                if (nt->vi.vol_type == UBI_STATIC_VOLUME)
 574                        ubiblock_resize(&nt->vi);
 575                break;
 576        default:
 577                break;
 578        }
 579        return NOTIFY_OK;
 580}
 581
 582static struct notifier_block ubiblock_notifier = {
 583        .notifier_call = ubiblock_notify,
 584};
 585
 586static struct ubi_volume_desc * __init
 587open_volume_desc(const char *name, int ubi_num, int vol_id)
 588{
 589        if (ubi_num == -1)
 590                /* No ubi num, name must be a vol device path */
 591                return ubi_open_volume_path(name, UBI_READONLY);
 592        else if (vol_id == -1)
 593                /* No vol_id, must be vol_name */
 594                return ubi_open_volume_nm(ubi_num, name, UBI_READONLY);
 595        else
 596                return ubi_open_volume(ubi_num, vol_id, UBI_READONLY);
 597}
 598
 599static void __init ubiblock_create_from_param(void)
 600{
 601        int i, ret = 0;
 602        struct ubiblock_param *p;
 603        struct ubi_volume_desc *desc;
 604        struct ubi_volume_info vi;
 605
 606        /*
 607         * If there is an error creating one of the ubiblocks, continue on to
 608         * create the following ubiblocks. This helps in a circumstance where
 609         * the kernel command-line specifies multiple block devices and some
 610         * may be broken, but we still want the working ones to come up.
 611         */
 612        for (i = 0; i < ubiblock_devs; i++) {
 613                p = &ubiblock_param[i];
 614
 615                desc = open_volume_desc(p->name, p->ubi_num, p->vol_id);
 616                if (IS_ERR(desc)) {
 617                        pr_err(
 618                               "UBI: block: can't open volume on ubi%d_%d, err=%ld\n",
 619                               p->ubi_num, p->vol_id, PTR_ERR(desc));
 620                        continue;
 621                }
 622
 623                ubi_get_volume_info(desc, &vi);
 624                ubi_close_volume(desc);
 625
 626                ret = ubiblock_create(&vi);
 627                if (ret) {
 628                        pr_err(
 629                               "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d\n",
 630                               vi.name, p->ubi_num, p->vol_id, ret);
 631                        continue;
 632                }
 633        }
 634}
 635
 636static void ubiblock_remove_all(void)
 637{
 638        struct ubiblock *next;
 639        struct ubiblock *dev;
 640
 641        mutex_lock(&devices_mutex);
 642        list_for_each_entry_safe(dev, next, &ubiblock_devices, list) {
 643                /* The module is being forcefully removed */
 644                WARN_ON(dev->desc);
 645                /* Remove from device list */
 646                list_del(&dev->list);
 647                ubiblock_cleanup(dev);
 648                kfree(dev);
 649        }
 650        mutex_unlock(&devices_mutex);
 651}
 652
 653int __init ubiblock_init(void)
 654{
 655        int ret;
 656
 657        ubiblock_major = register_blkdev(0, "ubiblock");
 658        if (ubiblock_major < 0)
 659                return ubiblock_major;
 660
 661        /*
 662         * Attach block devices from 'block=' module param.
 663         * Even if one block device in the param list fails to come up,
 664         * still allow the module to load and leave any others up.
 665         */
 666        ubiblock_create_from_param();
 667
 668        /*
 669         * Block devices are only created upon user requests, so we ignore
 670         * existing volumes.
 671         */
 672        ret = ubi_register_volume_notifier(&ubiblock_notifier, 1);
 673        if (ret)
 674                goto err_unreg;
 675        return 0;
 676
 677err_unreg:
 678        unregister_blkdev(ubiblock_major, "ubiblock");
 679        ubiblock_remove_all();
 680        return ret;
 681}
 682
 683void __exit ubiblock_exit(void)
 684{
 685        ubi_unregister_volume_notifier(&ubiblock_notifier);
 686        ubiblock_remove_all();
 687        unregister_blkdev(ubiblock_major, "ubiblock");
 688}
 689