linux/drivers/dax/super.c
<<
>>
Prefs
   1/*
   2 * Copyright(c) 2017 Intel Corporation. All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of version 2 of the GNU General Public License as
   6 * published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful, but
   9 * WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 */
  13#include <linux/pagemap.h>
  14#include <linux/module.h>
  15#include <linux/mount.h>
  16#include <linux/magic.h>
  17#include <linux/genhd.h>
  18#include <linux/pfn_t.h>
  19#include <linux/cdev.h>
  20#include <linux/hash.h>
  21#include <linux/slab.h>
  22#include <linux/uio.h>
  23#include <linux/dax.h>
  24#include <linux/fs.h>
  25#include "dax-private.h"
  26
  27static dev_t dax_devt;
  28DEFINE_STATIC_SRCU(dax_srcu);
  29static struct vfsmount *dax_mnt;
  30static DEFINE_IDA(dax_minor_ida);
  31static struct kmem_cache *dax_cache __read_mostly;
  32static struct super_block *dax_superblock __read_mostly;
  33
  34#define DAX_HASH_SIZE (PAGE_SIZE / sizeof(struct hlist_head))
  35static struct hlist_head dax_host_list[DAX_HASH_SIZE];
  36static DEFINE_SPINLOCK(dax_host_lock);
  37
  38int dax_read_lock(void)
  39{
  40        return srcu_read_lock(&dax_srcu);
  41}
  42EXPORT_SYMBOL_GPL(dax_read_lock);
  43
  44void dax_read_unlock(int id)
  45{
  46        srcu_read_unlock(&dax_srcu, id);
  47}
  48EXPORT_SYMBOL_GPL(dax_read_unlock);
  49
  50#ifdef CONFIG_BLOCK
  51#include <linux/blkdev.h>
  52
  53int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size,
  54                pgoff_t *pgoff)
  55{
  56        phys_addr_t phys_off = (get_start_sect(bdev) + sector) * 512;
  57
  58        if (pgoff)
  59                *pgoff = PHYS_PFN(phys_off);
  60        if (phys_off % PAGE_SIZE || size % PAGE_SIZE)
  61                return -EINVAL;
  62        return 0;
  63}
  64EXPORT_SYMBOL(bdev_dax_pgoff);
  65
  66#if IS_ENABLED(CONFIG_FS_DAX)
  67struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
  68{
  69        if (!blk_queue_dax(bdev->bd_queue))
  70                return NULL;
  71        return fs_dax_get_by_host(bdev->bd_disk->disk_name);
  72}
  73EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
  74#endif
  75
  76/**
  77 * __bdev_dax_supported() - Check if the device supports dax for filesystem
  78 * @bdev: block device to check
  79 * @blocksize: The block size of the device
  80 *
  81 * This is a library function for filesystems to check if the block device
  82 * can be mounted with dax option.
  83 *
  84 * Return: true if supported, false if unsupported
  85 */
  86bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
  87{
  88        struct dax_device *dax_dev;
  89        bool dax_enabled = false;
  90        pgoff_t pgoff, pgoff_end;
  91        struct request_queue *q;
  92        char buf[BDEVNAME_SIZE];
  93        void *kaddr, *end_kaddr;
  94        pfn_t pfn, end_pfn;
  95        sector_t last_page;
  96        long len, len2;
  97        int err, id;
  98
  99        if (blocksize != PAGE_SIZE) {
 100                pr_debug("%s: error: unsupported blocksize for dax\n",
 101                                bdevname(bdev, buf));
 102                return false;
 103        }
 104
 105        q = bdev_get_queue(bdev);
 106        if (!q || !blk_queue_dax(q)) {
 107                pr_debug("%s: error: request queue doesn't support dax\n",
 108                                bdevname(bdev, buf));
 109                return false;
 110        }
 111
 112        err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff);
 113        if (err) {
 114                pr_debug("%s: error: unaligned partition for dax\n",
 115                                bdevname(bdev, buf));
 116                return false;
 117        }
 118
 119        last_page = PFN_DOWN(i_size_read(bdev->bd_inode) - 1) * 8;
 120        err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end);
 121        if (err) {
 122                pr_debug("%s: error: unaligned partition for dax\n",
 123                                bdevname(bdev, buf));
 124                return false;
 125        }
 126
 127        dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
 128        if (!dax_dev) {
 129                pr_debug("%s: error: device does not support dax\n",
 130                                bdevname(bdev, buf));
 131                return false;
 132        }
 133
 134        id = dax_read_lock();
 135        len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
 136        len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn);
 137        dax_read_unlock(id);
 138
 139        put_dax(dax_dev);
 140
 141        if (len < 1 || len2 < 1) {
 142                pr_debug("%s: error: dax access failed (%ld)\n",
 143                                bdevname(bdev, buf), len < 1 ? len : len2);
 144                return false;
 145        }
 146
 147        if (IS_ENABLED(CONFIG_FS_DAX_LIMITED) && pfn_t_special(pfn)) {
 148                /*
 149                 * An arch that has enabled the pmem api should also
 150                 * have its drivers support pfn_t_devmap()
 151                 *
 152                 * This is a developer warning and should not trigger in
 153                 * production. dax_flush() will crash since it depends
 154                 * on being able to do (page_address(pfn_to_page())).
 155                 */
 156                WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API));
 157                dax_enabled = true;
 158        } else if (pfn_t_devmap(pfn) && pfn_t_devmap(end_pfn)) {
 159                struct dev_pagemap *pgmap, *end_pgmap;
 160
 161                pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL);
 162                end_pgmap = get_dev_pagemap(pfn_t_to_pfn(end_pfn), NULL);
 163                if (pgmap && pgmap == end_pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX
 164                                && pfn_t_to_page(pfn)->pgmap == pgmap
 165                                && pfn_t_to_page(end_pfn)->pgmap == pgmap
 166                                && pfn_t_to_pfn(pfn) == PHYS_PFN(__pa(kaddr))
 167                                && pfn_t_to_pfn(end_pfn) == PHYS_PFN(__pa(end_kaddr)))
 168                        dax_enabled = true;
 169                put_dev_pagemap(pgmap);
 170                put_dev_pagemap(end_pgmap);
 171
 172        }
 173
 174        if (!dax_enabled) {
 175                pr_debug("%s: error: dax support not enabled\n",
 176                                bdevname(bdev, buf));
 177                return false;
 178        }
 179        return true;
 180}
 181EXPORT_SYMBOL_GPL(__bdev_dax_supported);
 182#endif
 183
 184enum dax_device_flags {
 185        /* !alive + rcu grace period == no new operations / mappings */
 186        DAXDEV_ALIVE,
 187        /* gate whether dax_flush() calls the low level flush routine */
 188        DAXDEV_WRITE_CACHE,
 189};
 190
 191/**
 192 * struct dax_device - anchor object for dax services
 193 * @inode: core vfs
 194 * @cdev: optional character interface for "device dax"
 195 * @host: optional name for lookups where the device path is not available
 196 * @private: dax driver private data
 197 * @flags: state and boolean properties
 198 */
 199struct dax_device {
 200        struct hlist_node list;
 201        struct inode inode;
 202        struct cdev cdev;
 203        const char *host;
 204        void *private;
 205        unsigned long flags;
 206        const struct dax_operations *ops;
 207};
 208
 209static ssize_t write_cache_show(struct device *dev,
 210                struct device_attribute *attr, char *buf)
 211{
 212        struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
 213        ssize_t rc;
 214
 215        WARN_ON_ONCE(!dax_dev);
 216        if (!dax_dev)
 217                return -ENXIO;
 218
 219        rc = sprintf(buf, "%d\n", !!dax_write_cache_enabled(dax_dev));
 220        put_dax(dax_dev);
 221        return rc;
 222}
 223
 224static ssize_t write_cache_store(struct device *dev,
 225                struct device_attribute *attr, const char *buf, size_t len)
 226{
 227        bool write_cache;
 228        int rc = strtobool(buf, &write_cache);
 229        struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
 230
 231        WARN_ON_ONCE(!dax_dev);
 232        if (!dax_dev)
 233                return -ENXIO;
 234
 235        if (rc)
 236                len = rc;
 237        else
 238                dax_write_cache(dax_dev, write_cache);
 239
 240        put_dax(dax_dev);
 241        return len;
 242}
 243static DEVICE_ATTR_RW(write_cache);
 244
 245static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n)
 246{
 247        struct device *dev = container_of(kobj, typeof(*dev), kobj);
 248        struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
 249
 250        WARN_ON_ONCE(!dax_dev);
 251        if (!dax_dev)
 252                return 0;
 253
 254#ifndef CONFIG_ARCH_HAS_PMEM_API
 255        if (a == &dev_attr_write_cache.attr)
 256                return 0;
 257#endif
 258        return a->mode;
 259}
 260
 261static struct attribute *dax_attributes[] = {
 262        &dev_attr_write_cache.attr,
 263        NULL,
 264};
 265
 266struct attribute_group dax_attribute_group = {
 267        .name = "dax",
 268        .attrs = dax_attributes,
 269        .is_visible = dax_visible,
 270};
 271EXPORT_SYMBOL_GPL(dax_attribute_group);
 272
 273/**
 274 * dax_direct_access() - translate a device pgoff to an absolute pfn
 275 * @dax_dev: a dax_device instance representing the logical memory range
 276 * @pgoff: offset in pages from the start of the device to translate
 277 * @nr_pages: number of consecutive pages caller can handle relative to @pfn
 278 * @kaddr: output parameter that returns a virtual address mapping of pfn
 279 * @pfn: output parameter that returns an absolute pfn translation of @pgoff
 280 *
 281 * Return: negative errno if an error occurs, otherwise the number of
 282 * pages accessible at the device relative @pgoff.
 283 */
 284long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
 285                void **kaddr, pfn_t *pfn)
 286{
 287        long avail;
 288
 289        if (!dax_dev)
 290                return -EOPNOTSUPP;
 291
 292        if (!dax_alive(dax_dev))
 293                return -ENXIO;
 294
 295        if (nr_pages < 0)
 296                return nr_pages;
 297
 298        avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages,
 299                        kaddr, pfn);
 300        if (!avail)
 301                return -ERANGE;
 302        return min(avail, nr_pages);
 303}
 304EXPORT_SYMBOL_GPL(dax_direct_access);
 305
 306size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
 307                size_t bytes, struct iov_iter *i)
 308{
 309        if (!dax_alive(dax_dev))
 310                return 0;
 311
 312        return dax_dev->ops->copy_from_iter(dax_dev, pgoff, addr, bytes, i);
 313}
 314EXPORT_SYMBOL_GPL(dax_copy_from_iter);
 315
 316size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
 317                size_t bytes, struct iov_iter *i)
 318{
 319        if (!dax_alive(dax_dev))
 320                return 0;
 321
 322        return dax_dev->ops->copy_to_iter(dax_dev, pgoff, addr, bytes, i);
 323}
 324EXPORT_SYMBOL_GPL(dax_copy_to_iter);
 325
 326#ifdef CONFIG_ARCH_HAS_PMEM_API
 327void arch_wb_cache_pmem(void *addr, size_t size);
 328void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
 329{
 330        if (unlikely(!dax_write_cache_enabled(dax_dev)))
 331                return;
 332
 333        arch_wb_cache_pmem(addr, size);
 334}
 335#else
 336void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
 337{
 338}
 339#endif
 340EXPORT_SYMBOL_GPL(dax_flush);
 341
 342void dax_write_cache(struct dax_device *dax_dev, bool wc)
 343{
 344        if (wc)
 345                set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
 346        else
 347                clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
 348}
 349EXPORT_SYMBOL_GPL(dax_write_cache);
 350
 351bool dax_write_cache_enabled(struct dax_device *dax_dev)
 352{
 353        return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
 354}
 355EXPORT_SYMBOL_GPL(dax_write_cache_enabled);
 356
 357bool dax_alive(struct dax_device *dax_dev)
 358{
 359        lockdep_assert_held(&dax_srcu);
 360        return test_bit(DAXDEV_ALIVE, &dax_dev->flags);
 361}
 362EXPORT_SYMBOL_GPL(dax_alive);
 363
 364static int dax_host_hash(const char *host)
 365{
 366        return hashlen_hash(hashlen_string("DAX", host)) % DAX_HASH_SIZE;
 367}
 368
 369/*
 370 * Note, rcu is not protecting the liveness of dax_dev, rcu is ensuring
 371 * that any fault handlers or operations that might have seen
 372 * dax_alive(), have completed.  Any operations that start after
 373 * synchronize_srcu() has run will abort upon seeing !dax_alive().
 374 */
 375void kill_dax(struct dax_device *dax_dev)
 376{
 377        if (!dax_dev)
 378                return;
 379
 380        clear_bit(DAXDEV_ALIVE, &dax_dev->flags);
 381
 382        synchronize_srcu(&dax_srcu);
 383
 384        spin_lock(&dax_host_lock);
 385        hlist_del_init(&dax_dev->list);
 386        spin_unlock(&dax_host_lock);
 387}
 388EXPORT_SYMBOL_GPL(kill_dax);
 389
 390void run_dax(struct dax_device *dax_dev)
 391{
 392        set_bit(DAXDEV_ALIVE, &dax_dev->flags);
 393}
 394EXPORT_SYMBOL_GPL(run_dax);
 395
 396static struct inode *dax_alloc_inode(struct super_block *sb)
 397{
 398        struct dax_device *dax_dev;
 399        struct inode *inode;
 400
 401        dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL);
 402        if (!dax_dev)
 403                return NULL;
 404
 405        inode = &dax_dev->inode;
 406        inode->i_rdev = 0;
 407        return inode;
 408}
 409
 410static struct dax_device *to_dax_dev(struct inode *inode)
 411{
 412        return container_of(inode, struct dax_device, inode);
 413}
 414
 415static void dax_i_callback(struct rcu_head *head)
 416{
 417        struct inode *inode = container_of(head, struct inode, i_rcu);
 418        struct dax_device *dax_dev = to_dax_dev(inode);
 419
 420        kfree(dax_dev->host);
 421        dax_dev->host = NULL;
 422        if (inode->i_rdev)
 423                ida_simple_remove(&dax_minor_ida, MINOR(inode->i_rdev));
 424        kmem_cache_free(dax_cache, dax_dev);
 425}
 426
 427static void dax_destroy_inode(struct inode *inode)
 428{
 429        struct dax_device *dax_dev = to_dax_dev(inode);
 430
 431        WARN_ONCE(test_bit(DAXDEV_ALIVE, &dax_dev->flags),
 432                        "kill_dax() must be called before final iput()\n");
 433        call_rcu(&inode->i_rcu, dax_i_callback);
 434}
 435
 436static const struct super_operations dax_sops = {
 437        .statfs = simple_statfs,
 438        .alloc_inode = dax_alloc_inode,
 439        .destroy_inode = dax_destroy_inode,
 440        .drop_inode = generic_delete_inode,
 441};
 442
 443static struct dentry *dax_mount(struct file_system_type *fs_type,
 444                int flags, const char *dev_name, void *data)
 445{
 446        return mount_pseudo(fs_type, "dax:", &dax_sops, NULL, DAXFS_MAGIC);
 447}
 448
 449static struct file_system_type dax_fs_type = {
 450        .name = "dax",
 451        .mount = dax_mount,
 452        .kill_sb = kill_anon_super,
 453};
 454
 455static int dax_test(struct inode *inode, void *data)
 456{
 457        dev_t devt = *(dev_t *) data;
 458
 459        return inode->i_rdev == devt;
 460}
 461
 462static int dax_set(struct inode *inode, void *data)
 463{
 464        dev_t devt = *(dev_t *) data;
 465
 466        inode->i_rdev = devt;
 467        return 0;
 468}
 469
 470static struct dax_device *dax_dev_get(dev_t devt)
 471{
 472        struct dax_device *dax_dev;
 473        struct inode *inode;
 474
 475        inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31),
 476                        dax_test, dax_set, &devt);
 477
 478        if (!inode)
 479                return NULL;
 480
 481        dax_dev = to_dax_dev(inode);
 482        if (inode->i_state & I_NEW) {
 483                set_bit(DAXDEV_ALIVE, &dax_dev->flags);
 484                inode->i_cdev = &dax_dev->cdev;
 485                inode->i_mode = S_IFCHR;
 486                inode->i_flags = S_DAX;
 487                mapping_set_gfp_mask(&inode->i_data, GFP_USER);
 488                unlock_new_inode(inode);
 489        }
 490
 491        return dax_dev;
 492}
 493
 494static void dax_add_host(struct dax_device *dax_dev, const char *host)
 495{
 496        int hash;
 497
 498        /*
 499         * Unconditionally init dax_dev since it's coming from a
 500         * non-zeroed slab cache
 501         */
 502        INIT_HLIST_NODE(&dax_dev->list);
 503        dax_dev->host = host;
 504        if (!host)
 505                return;
 506
 507        hash = dax_host_hash(host);
 508        spin_lock(&dax_host_lock);
 509        hlist_add_head(&dax_dev->list, &dax_host_list[hash]);
 510        spin_unlock(&dax_host_lock);
 511}
 512
 513struct dax_device *alloc_dax(void *private, const char *__host,
 514                const struct dax_operations *ops)
 515{
 516        struct dax_device *dax_dev;
 517        const char *host;
 518        dev_t devt;
 519        int minor;
 520
 521        host = kstrdup(__host, GFP_KERNEL);
 522        if (__host && !host)
 523                return NULL;
 524
 525        minor = ida_simple_get(&dax_minor_ida, 0, MINORMASK+1, GFP_KERNEL);
 526        if (minor < 0)
 527                goto err_minor;
 528
 529        devt = MKDEV(MAJOR(dax_devt), minor);
 530        dax_dev = dax_dev_get(devt);
 531        if (!dax_dev)
 532                goto err_dev;
 533
 534        dax_add_host(dax_dev, host);
 535        dax_dev->ops = ops;
 536        dax_dev->private = private;
 537        return dax_dev;
 538
 539 err_dev:
 540        ida_simple_remove(&dax_minor_ida, minor);
 541 err_minor:
 542        kfree(host);
 543        return NULL;
 544}
 545EXPORT_SYMBOL_GPL(alloc_dax);
 546
 547void put_dax(struct dax_device *dax_dev)
 548{
 549        if (!dax_dev)
 550                return;
 551        iput(&dax_dev->inode);
 552}
 553EXPORT_SYMBOL_GPL(put_dax);
 554
 555/**
 556 * dax_get_by_host() - temporary lookup mechanism for filesystem-dax
 557 * @host: alternate name for the device registered by a dax driver
 558 */
 559struct dax_device *dax_get_by_host(const char *host)
 560{
 561        struct dax_device *dax_dev, *found = NULL;
 562        int hash, id;
 563
 564        if (!host)
 565                return NULL;
 566
 567        hash = dax_host_hash(host);
 568
 569        id = dax_read_lock();
 570        spin_lock(&dax_host_lock);
 571        hlist_for_each_entry(dax_dev, &dax_host_list[hash], list) {
 572                if (!dax_alive(dax_dev)
 573                                || strcmp(host, dax_dev->host) != 0)
 574                        continue;
 575
 576                if (igrab(&dax_dev->inode))
 577                        found = dax_dev;
 578                break;
 579        }
 580        spin_unlock(&dax_host_lock);
 581        dax_read_unlock(id);
 582
 583        return found;
 584}
 585EXPORT_SYMBOL_GPL(dax_get_by_host);
 586
 587/**
 588 * inode_dax: convert a public inode into its dax_dev
 589 * @inode: An inode with i_cdev pointing to a dax_dev
 590 *
 591 * Note this is not equivalent to to_dax_dev() which is for private
 592 * internal use where we know the inode filesystem type == dax_fs_type.
 593 */
 594struct dax_device *inode_dax(struct inode *inode)
 595{
 596        struct cdev *cdev = inode->i_cdev;
 597
 598        return container_of(cdev, struct dax_device, cdev);
 599}
 600EXPORT_SYMBOL_GPL(inode_dax);
 601
 602struct inode *dax_inode(struct dax_device *dax_dev)
 603{
 604        return &dax_dev->inode;
 605}
 606EXPORT_SYMBOL_GPL(dax_inode);
 607
 608void *dax_get_private(struct dax_device *dax_dev)
 609{
 610        if (!test_bit(DAXDEV_ALIVE, &dax_dev->flags))
 611                return NULL;
 612        return dax_dev->private;
 613}
 614EXPORT_SYMBOL_GPL(dax_get_private);
 615
 616static void init_once(void *_dax_dev)
 617{
 618        struct dax_device *dax_dev = _dax_dev;
 619        struct inode *inode = &dax_dev->inode;
 620
 621        memset(dax_dev, 0, sizeof(*dax_dev));
 622        inode_init_once(inode);
 623}
 624
 625static int dax_fs_init(void)
 626{
 627        int rc;
 628
 629        dax_cache = kmem_cache_create("dax_cache", sizeof(struct dax_device), 0,
 630                        (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
 631                         SLAB_MEM_SPREAD|SLAB_ACCOUNT),
 632                        init_once);
 633        if (!dax_cache)
 634                return -ENOMEM;
 635
 636        rc = register_filesystem(&dax_fs_type);
 637        if (rc)
 638                goto err_register_fs;
 639
 640        dax_mnt = kern_mount(&dax_fs_type);
 641        if (IS_ERR(dax_mnt)) {
 642                rc = PTR_ERR(dax_mnt);
 643                goto err_mount;
 644        }
 645        dax_superblock = dax_mnt->mnt_sb;
 646
 647        return 0;
 648
 649 err_mount:
 650        unregister_filesystem(&dax_fs_type);
 651 err_register_fs:
 652        kmem_cache_destroy(dax_cache);
 653
 654        return rc;
 655}
 656
 657static void dax_fs_exit(void)
 658{
 659        kern_unmount(dax_mnt);
 660        unregister_filesystem(&dax_fs_type);
 661        kmem_cache_destroy(dax_cache);
 662}
 663
 664static int __init dax_core_init(void)
 665{
 666        int rc;
 667
 668        rc = dax_fs_init();
 669        if (rc)
 670                return rc;
 671
 672        rc = alloc_chrdev_region(&dax_devt, 0, MINORMASK+1, "dax");
 673        if (rc)
 674                goto err_chrdev;
 675
 676        rc = dax_bus_init();
 677        if (rc)
 678                goto err_bus;
 679        return 0;
 680
 681err_bus:
 682        unregister_chrdev_region(dax_devt, MINORMASK+1);
 683err_chrdev:
 684        dax_fs_exit();
 685        return 0;
 686}
 687
 688static void __exit dax_core_exit(void)
 689{
 690        unregister_chrdev_region(dax_devt, MINORMASK+1);
 691        ida_destroy(&dax_minor_ida);
 692        dax_fs_exit();
 693}
 694
 695MODULE_AUTHOR("Intel Corporation");
 696MODULE_LICENSE("GPL v2");
 697subsys_initcall(dax_core_init);
 698module_exit(dax_core_exit);
 699