linux/drivers/dax/super.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright(c) 2017 Intel Corporation. All rights reserved.
   4 */
   5#include <linux/pagemap.h>
   6#include <linux/module.h>
   7#include <linux/mount.h>
   8#include <linux/pseudo_fs.h>
   9#include <linux/magic.h>
  10#include <linux/genhd.h>
  11#include <linux/pfn_t.h>
  12#include <linux/cdev.h>
  13#include <linux/hash.h>
  14#include <linux/slab.h>
  15#include <linux/uio.h>
  16#include <linux/dax.h>
  17#include <linux/fs.h>
  18#include "dax-private.h"
  19
  20static dev_t dax_devt;
  21DEFINE_STATIC_SRCU(dax_srcu);
  22static struct vfsmount *dax_mnt;
  23static DEFINE_IDA(dax_minor_ida);
  24static struct kmem_cache *dax_cache __read_mostly;
  25static struct super_block *dax_superblock __read_mostly;
  26
  27#define DAX_HASH_SIZE (PAGE_SIZE / sizeof(struct hlist_head))
  28static struct hlist_head dax_host_list[DAX_HASH_SIZE];
  29static DEFINE_SPINLOCK(dax_host_lock);
  30
  31int dax_read_lock(void)
  32{
  33        return srcu_read_lock(&dax_srcu);
  34}
  35EXPORT_SYMBOL_GPL(dax_read_lock);
  36
  37void dax_read_unlock(int id)
  38{
  39        srcu_read_unlock(&dax_srcu, id);
  40}
  41EXPORT_SYMBOL_GPL(dax_read_unlock);
  42
  43#ifdef CONFIG_BLOCK
  44#include <linux/blkdev.h>
  45
  46int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size,
  47                pgoff_t *pgoff)
  48{
  49        sector_t start_sect = bdev ? get_start_sect(bdev) : 0;
  50        phys_addr_t phys_off = (start_sect + sector) * 512;
  51
  52        if (pgoff)
  53                *pgoff = PHYS_PFN(phys_off);
  54        if (phys_off % PAGE_SIZE || size % PAGE_SIZE)
  55                return -EINVAL;
  56        return 0;
  57}
  58EXPORT_SYMBOL(bdev_dax_pgoff);
  59
  60#if IS_ENABLED(CONFIG_FS_DAX)
  61struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
  62{
  63        if (!blk_queue_dax(bdev->bd_disk->queue))
  64                return NULL;
  65        return dax_get_by_host(bdev->bd_disk->disk_name);
  66}
  67EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
  68#endif
  69
  70bool __generic_fsdax_supported(struct dax_device *dax_dev,
  71                struct block_device *bdev, int blocksize, sector_t start,
  72                sector_t sectors)
  73{
  74        bool dax_enabled = false;
  75        pgoff_t pgoff, pgoff_end;
  76        char buf[BDEVNAME_SIZE];
  77        void *kaddr, *end_kaddr;
  78        pfn_t pfn, end_pfn;
  79        sector_t last_page;
  80        long len, len2;
  81        int err, id;
  82
  83        if (blocksize != PAGE_SIZE) {
  84                pr_info("%s: error: unsupported blocksize for dax\n",
  85                                bdevname(bdev, buf));
  86                return false;
  87        }
  88
  89        if (!dax_dev) {
  90                pr_debug("%s: error: dax unsupported by block device\n",
  91                                bdevname(bdev, buf));
  92                return false;
  93        }
  94
  95        err = bdev_dax_pgoff(bdev, start, PAGE_SIZE, &pgoff);
  96        if (err) {
  97                pr_info("%s: error: unaligned partition for dax\n",
  98                                bdevname(bdev, buf));
  99                return false;
 100        }
 101
 102        last_page = PFN_DOWN((start + sectors - 1) * 512) * PAGE_SIZE / 512;
 103        err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end);
 104        if (err) {
 105                pr_info("%s: error: unaligned partition for dax\n",
 106                                bdevname(bdev, buf));
 107                return false;
 108        }
 109
 110        id = dax_read_lock();
 111        len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
 112        len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn);
 113
 114        if (len < 1 || len2 < 1) {
 115                pr_info("%s: error: dax access failed (%ld)\n",
 116                                bdevname(bdev, buf), len < 1 ? len : len2);
 117                dax_read_unlock(id);
 118                return false;
 119        }
 120
 121        if (IS_ENABLED(CONFIG_FS_DAX_LIMITED) && pfn_t_special(pfn)) {
 122                /*
 123                 * An arch that has enabled the pmem api should also
 124                 * have its drivers support pfn_t_devmap()
 125                 *
 126                 * This is a developer warning and should not trigger in
 127                 * production. dax_flush() will crash since it depends
 128                 * on being able to do (page_address(pfn_to_page())).
 129                 */
 130                WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API));
 131                dax_enabled = true;
 132        } else if (pfn_t_devmap(pfn) && pfn_t_devmap(end_pfn)) {
 133                struct dev_pagemap *pgmap, *end_pgmap;
 134
 135                pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL);
 136                end_pgmap = get_dev_pagemap(pfn_t_to_pfn(end_pfn), NULL);
 137                if (pgmap && pgmap == end_pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX
 138                                && pfn_t_to_page(pfn)->pgmap == pgmap
 139                                && pfn_t_to_page(end_pfn)->pgmap == pgmap
 140                                && pfn_t_to_pfn(pfn) == PHYS_PFN(__pa(kaddr))
 141                                && pfn_t_to_pfn(end_pfn) == PHYS_PFN(__pa(end_kaddr)))
 142                        dax_enabled = true;
 143                put_dev_pagemap(pgmap);
 144                put_dev_pagemap(end_pgmap);
 145
 146        }
 147        dax_read_unlock(id);
 148
 149        if (!dax_enabled) {
 150                pr_info("%s: error: dax support not enabled\n",
 151                                bdevname(bdev, buf));
 152                return false;
 153        }
 154        return true;
 155}
 156EXPORT_SYMBOL_GPL(__generic_fsdax_supported);
 157
 158/**
 159 * __bdev_dax_supported() - Check if the device supports dax for filesystem
 160 * @bdev: block device to check
 161 * @blocksize: The block size of the device
 162 *
 163 * This is a library function for filesystems to check if the block device
 164 * can be mounted with dax option.
 165 *
 166 * Return: true if supported, false if unsupported
 167 */
 168bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
 169{
 170        struct dax_device *dax_dev;
 171        struct request_queue *q;
 172        char buf[BDEVNAME_SIZE];
 173        bool ret;
 174        int id;
 175
 176        q = bdev_get_queue(bdev);
 177        if (!q || !blk_queue_dax(q)) {
 178                pr_debug("%s: error: request queue doesn't support dax\n",
 179                                bdevname(bdev, buf));
 180                return false;
 181        }
 182
 183        dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
 184        if (!dax_dev) {
 185                pr_debug("%s: error: device does not support dax\n",
 186                                bdevname(bdev, buf));
 187                return false;
 188        }
 189
 190        id = dax_read_lock();
 191        ret = dax_supported(dax_dev, bdev, blocksize, 0,
 192                        i_size_read(bdev->bd_inode) / 512);
 193        dax_read_unlock(id);
 194
 195        put_dax(dax_dev);
 196
 197        return ret;
 198}
 199EXPORT_SYMBOL_GPL(__bdev_dax_supported);
 200#endif
 201
 202enum dax_device_flags {
 203        /* !alive + rcu grace period == no new operations / mappings */
 204        DAXDEV_ALIVE,
 205        /* gate whether dax_flush() calls the low level flush routine */
 206        DAXDEV_WRITE_CACHE,
 207        /* flag to check if device supports synchronous flush */
 208        DAXDEV_SYNC,
 209};
 210
 211/**
 212 * struct dax_device - anchor object for dax services
 213 * @inode: core vfs
 214 * @cdev: optional character interface for "device dax"
 215 * @host: optional name for lookups where the device path is not available
 216 * @private: dax driver private data
 217 * @flags: state and boolean properties
 218 */
 219struct dax_device {
 220        struct hlist_node list;
 221        struct inode inode;
 222        struct cdev cdev;
 223        const char *host;
 224        void *private;
 225        unsigned long flags;
 226        const struct dax_operations *ops;
 227};
 228
 229static ssize_t write_cache_show(struct device *dev,
 230                struct device_attribute *attr, char *buf)
 231{
 232        struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
 233        ssize_t rc;
 234
 235        WARN_ON_ONCE(!dax_dev);
 236        if (!dax_dev)
 237                return -ENXIO;
 238
 239        rc = sprintf(buf, "%d\n", !!dax_write_cache_enabled(dax_dev));
 240        put_dax(dax_dev);
 241        return rc;
 242}
 243
 244static ssize_t write_cache_store(struct device *dev,
 245                struct device_attribute *attr, const char *buf, size_t len)
 246{
 247        bool write_cache;
 248        int rc = strtobool(buf, &write_cache);
 249        struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
 250
 251        WARN_ON_ONCE(!dax_dev);
 252        if (!dax_dev)
 253                return -ENXIO;
 254
 255        if (rc)
 256                len = rc;
 257        else
 258                dax_write_cache(dax_dev, write_cache);
 259
 260        put_dax(dax_dev);
 261        return len;
 262}
 263static DEVICE_ATTR_RW(write_cache);
 264
 265static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n)
 266{
 267        struct device *dev = container_of(kobj, typeof(*dev), kobj);
 268        struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
 269
 270        WARN_ON_ONCE(!dax_dev);
 271        if (!dax_dev)
 272                return 0;
 273
 274#ifndef CONFIG_ARCH_HAS_PMEM_API
 275        if (a == &dev_attr_write_cache.attr)
 276                return 0;
 277#endif
 278        return a->mode;
 279}
 280
 281static struct attribute *dax_attributes[] = {
 282        &dev_attr_write_cache.attr,
 283        NULL,
 284};
 285
 286struct attribute_group dax_attribute_group = {
 287        .name = "dax",
 288        .attrs = dax_attributes,
 289        .is_visible = dax_visible,
 290};
 291EXPORT_SYMBOL_GPL(dax_attribute_group);
 292
 293/**
 294 * dax_direct_access() - translate a device pgoff to an absolute pfn
 295 * @dax_dev: a dax_device instance representing the logical memory range
 296 * @pgoff: offset in pages from the start of the device to translate
 297 * @nr_pages: number of consecutive pages caller can handle relative to @pfn
 298 * @kaddr: output parameter that returns a virtual address mapping of pfn
 299 * @pfn: output parameter that returns an absolute pfn translation of @pgoff
 300 *
 301 * Return: negative errno if an error occurs, otherwise the number of
 302 * pages accessible at the device relative @pgoff.
 303 */
 304long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
 305                void **kaddr, pfn_t *pfn)
 306{
 307        long avail;
 308
 309        if (!dax_dev)
 310                return -EOPNOTSUPP;
 311
 312        if (!dax_alive(dax_dev))
 313                return -ENXIO;
 314
 315        if (nr_pages < 0)
 316                return -EINVAL;
 317
 318        avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages,
 319                        kaddr, pfn);
 320        if (!avail)
 321                return -ERANGE;
 322        return min(avail, nr_pages);
 323}
 324EXPORT_SYMBOL_GPL(dax_direct_access);
 325
 326bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
 327                int blocksize, sector_t start, sector_t len)
 328{
 329        if (!dax_dev)
 330                return false;
 331
 332        if (!dax_alive(dax_dev))
 333                return false;
 334
 335        return dax_dev->ops->dax_supported(dax_dev, bdev, blocksize, start, len);
 336}
 337EXPORT_SYMBOL_GPL(dax_supported);
 338
 339size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
 340                size_t bytes, struct iov_iter *i)
 341{
 342        if (!dax_alive(dax_dev))
 343                return 0;
 344
 345        return dax_dev->ops->copy_from_iter(dax_dev, pgoff, addr, bytes, i);
 346}
 347EXPORT_SYMBOL_GPL(dax_copy_from_iter);
 348
 349size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
 350                size_t bytes, struct iov_iter *i)
 351{
 352        if (!dax_alive(dax_dev))
 353                return 0;
 354
 355        return dax_dev->ops->copy_to_iter(dax_dev, pgoff, addr, bytes, i);
 356}
 357EXPORT_SYMBOL_GPL(dax_copy_to_iter);
 358
 359int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
 360                        size_t nr_pages)
 361{
 362        if (!dax_alive(dax_dev))
 363                return -ENXIO;
 364        /*
 365         * There are no callers that want to zero more than one page as of now.
 366         * Once users are there, this check can be removed after the
 367         * device mapper code has been updated to split ranges across targets.
 368         */
 369        if (nr_pages != 1)
 370                return -EIO;
 371
 372        return dax_dev->ops->zero_page_range(dax_dev, pgoff, nr_pages);
 373}
 374EXPORT_SYMBOL_GPL(dax_zero_page_range);
 375
 376#ifdef CONFIG_ARCH_HAS_PMEM_API
 377void arch_wb_cache_pmem(void *addr, size_t size);
 378void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
 379{
 380        if (unlikely(!dax_write_cache_enabled(dax_dev)))
 381                return;
 382
 383        arch_wb_cache_pmem(addr, size);
 384}
 385#else
 386void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
 387{
 388}
 389#endif
 390EXPORT_SYMBOL_GPL(dax_flush);
 391
 392void dax_write_cache(struct dax_device *dax_dev, bool wc)
 393{
 394        if (wc)
 395                set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
 396        else
 397                clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
 398}
 399EXPORT_SYMBOL_GPL(dax_write_cache);
 400
 401bool dax_write_cache_enabled(struct dax_device *dax_dev)
 402{
 403        return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
 404}
 405EXPORT_SYMBOL_GPL(dax_write_cache_enabled);
 406
 407bool __dax_synchronous(struct dax_device *dax_dev)
 408{
 409        return test_bit(DAXDEV_SYNC, &dax_dev->flags);
 410}
 411EXPORT_SYMBOL_GPL(__dax_synchronous);
 412
 413void __set_dax_synchronous(struct dax_device *dax_dev)
 414{
 415        set_bit(DAXDEV_SYNC, &dax_dev->flags);
 416}
 417EXPORT_SYMBOL_GPL(__set_dax_synchronous);
 418
 419bool dax_alive(struct dax_device *dax_dev)
 420{
 421        lockdep_assert_held(&dax_srcu);
 422        return test_bit(DAXDEV_ALIVE, &dax_dev->flags);
 423}
 424EXPORT_SYMBOL_GPL(dax_alive);
 425
 426static int dax_host_hash(const char *host)
 427{
 428        return hashlen_hash(hashlen_string("DAX", host)) % DAX_HASH_SIZE;
 429}
 430
 431/*
 432 * Note, rcu is not protecting the liveness of dax_dev, rcu is ensuring
 433 * that any fault handlers or operations that might have seen
 434 * dax_alive(), have completed.  Any operations that start after
 435 * synchronize_srcu() has run will abort upon seeing !dax_alive().
 436 */
 437void kill_dax(struct dax_device *dax_dev)
 438{
 439        if (!dax_dev)
 440                return;
 441
 442        clear_bit(DAXDEV_ALIVE, &dax_dev->flags);
 443
 444        synchronize_srcu(&dax_srcu);
 445
 446        spin_lock(&dax_host_lock);
 447        hlist_del_init(&dax_dev->list);
 448        spin_unlock(&dax_host_lock);
 449}
 450EXPORT_SYMBOL_GPL(kill_dax);
 451
 452void run_dax(struct dax_device *dax_dev)
 453{
 454        set_bit(DAXDEV_ALIVE, &dax_dev->flags);
 455}
 456EXPORT_SYMBOL_GPL(run_dax);
 457
 458static struct inode *dax_alloc_inode(struct super_block *sb)
 459{
 460        struct dax_device *dax_dev;
 461        struct inode *inode;
 462
 463        dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL);
 464        if (!dax_dev)
 465                return NULL;
 466
 467        inode = &dax_dev->inode;
 468        inode->i_rdev = 0;
 469        return inode;
 470}
 471
 472static struct dax_device *to_dax_dev(struct inode *inode)
 473{
 474        return container_of(inode, struct dax_device, inode);
 475}
 476
 477static void dax_free_inode(struct inode *inode)
 478{
 479        struct dax_device *dax_dev = to_dax_dev(inode);
 480        kfree(dax_dev->host);
 481        dax_dev->host = NULL;
 482        if (inode->i_rdev)
 483                ida_simple_remove(&dax_minor_ida, iminor(inode));
 484        kmem_cache_free(dax_cache, dax_dev);
 485}
 486
 487static void dax_destroy_inode(struct inode *inode)
 488{
 489        struct dax_device *dax_dev = to_dax_dev(inode);
 490        WARN_ONCE(test_bit(DAXDEV_ALIVE, &dax_dev->flags),
 491                        "kill_dax() must be called before final iput()\n");
 492}
 493
 494static const struct super_operations dax_sops = {
 495        .statfs = simple_statfs,
 496        .alloc_inode = dax_alloc_inode,
 497        .destroy_inode = dax_destroy_inode,
 498        .free_inode = dax_free_inode,
 499        .drop_inode = generic_delete_inode,
 500};
 501
 502static int dax_init_fs_context(struct fs_context *fc)
 503{
 504        struct pseudo_fs_context *ctx = init_pseudo(fc, DAXFS_MAGIC);
 505        if (!ctx)
 506                return -ENOMEM;
 507        ctx->ops = &dax_sops;
 508        return 0;
 509}
 510
 511static struct file_system_type dax_fs_type = {
 512        .name           = "dax",
 513        .init_fs_context = dax_init_fs_context,
 514        .kill_sb        = kill_anon_super,
 515};
 516
 517static int dax_test(struct inode *inode, void *data)
 518{
 519        dev_t devt = *(dev_t *) data;
 520
 521        return inode->i_rdev == devt;
 522}
 523
 524static int dax_set(struct inode *inode, void *data)
 525{
 526        dev_t devt = *(dev_t *) data;
 527
 528        inode->i_rdev = devt;
 529        return 0;
 530}
 531
 532static struct dax_device *dax_dev_get(dev_t devt)
 533{
 534        struct dax_device *dax_dev;
 535        struct inode *inode;
 536
 537        inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31),
 538                        dax_test, dax_set, &devt);
 539
 540        if (!inode)
 541                return NULL;
 542
 543        dax_dev = to_dax_dev(inode);
 544        if (inode->i_state & I_NEW) {
 545                set_bit(DAXDEV_ALIVE, &dax_dev->flags);
 546                inode->i_cdev = &dax_dev->cdev;
 547                inode->i_mode = S_IFCHR;
 548                inode->i_flags = S_DAX;
 549                mapping_set_gfp_mask(&inode->i_data, GFP_USER);
 550                unlock_new_inode(inode);
 551        }
 552
 553        return dax_dev;
 554}
 555
 556static void dax_add_host(struct dax_device *dax_dev, const char *host)
 557{
 558        int hash;
 559
 560        /*
 561         * Unconditionally init dax_dev since it's coming from a
 562         * non-zeroed slab cache
 563         */
 564        INIT_HLIST_NODE(&dax_dev->list);
 565        dax_dev->host = host;
 566        if (!host)
 567                return;
 568
 569        hash = dax_host_hash(host);
 570        spin_lock(&dax_host_lock);
 571        hlist_add_head(&dax_dev->list, &dax_host_list[hash]);
 572        spin_unlock(&dax_host_lock);
 573}
 574
 575struct dax_device *alloc_dax(void *private, const char *__host,
 576                const struct dax_operations *ops, unsigned long flags)
 577{
 578        struct dax_device *dax_dev;
 579        const char *host;
 580        dev_t devt;
 581        int minor;
 582
 583        if (ops && !ops->zero_page_range) {
 584                pr_debug("%s: error: device does not provide dax"
 585                         " operation zero_page_range()\n",
 586                         __host ? __host : "Unknown");
 587                return ERR_PTR(-EINVAL);
 588        }
 589
 590        host = kstrdup(__host, GFP_KERNEL);
 591        if (__host && !host)
 592                return ERR_PTR(-ENOMEM);
 593
 594        minor = ida_simple_get(&dax_minor_ida, 0, MINORMASK+1, GFP_KERNEL);
 595        if (minor < 0)
 596                goto err_minor;
 597
 598        devt = MKDEV(MAJOR(dax_devt), minor);
 599        dax_dev = dax_dev_get(devt);
 600        if (!dax_dev)
 601                goto err_dev;
 602
 603        dax_add_host(dax_dev, host);
 604        dax_dev->ops = ops;
 605        dax_dev->private = private;
 606        if (flags & DAXDEV_F_SYNC)
 607                set_dax_synchronous(dax_dev);
 608
 609        return dax_dev;
 610
 611 err_dev:
 612        ida_simple_remove(&dax_minor_ida, minor);
 613 err_minor:
 614        kfree(host);
 615        return ERR_PTR(-ENOMEM);
 616}
 617EXPORT_SYMBOL_GPL(alloc_dax);
 618
 619void put_dax(struct dax_device *dax_dev)
 620{
 621        if (!dax_dev)
 622                return;
 623        iput(&dax_dev->inode);
 624}
 625EXPORT_SYMBOL_GPL(put_dax);
 626
 627/**
 628 * dax_get_by_host() - temporary lookup mechanism for filesystem-dax
 629 * @host: alternate name for the device registered by a dax driver
 630 */
 631struct dax_device *dax_get_by_host(const char *host)
 632{
 633        struct dax_device *dax_dev, *found = NULL;
 634        int hash, id;
 635
 636        if (!host)
 637                return NULL;
 638
 639        hash = dax_host_hash(host);
 640
 641        id = dax_read_lock();
 642        spin_lock(&dax_host_lock);
 643        hlist_for_each_entry(dax_dev, &dax_host_list[hash], list) {
 644                if (!dax_alive(dax_dev)
 645                                || strcmp(host, dax_dev->host) != 0)
 646                        continue;
 647
 648                if (igrab(&dax_dev->inode))
 649                        found = dax_dev;
 650                break;
 651        }
 652        spin_unlock(&dax_host_lock);
 653        dax_read_unlock(id);
 654
 655        return found;
 656}
 657EXPORT_SYMBOL_GPL(dax_get_by_host);
 658
 659/**
 660 * inode_dax: convert a public inode into its dax_dev
 661 * @inode: An inode with i_cdev pointing to a dax_dev
 662 *
 663 * Note this is not equivalent to to_dax_dev() which is for private
 664 * internal use where we know the inode filesystem type == dax_fs_type.
 665 */
 666struct dax_device *inode_dax(struct inode *inode)
 667{
 668        struct cdev *cdev = inode->i_cdev;
 669
 670        return container_of(cdev, struct dax_device, cdev);
 671}
 672EXPORT_SYMBOL_GPL(inode_dax);
 673
 674struct inode *dax_inode(struct dax_device *dax_dev)
 675{
 676        return &dax_dev->inode;
 677}
 678EXPORT_SYMBOL_GPL(dax_inode);
 679
 680void *dax_get_private(struct dax_device *dax_dev)
 681{
 682        if (!test_bit(DAXDEV_ALIVE, &dax_dev->flags))
 683                return NULL;
 684        return dax_dev->private;
 685}
 686EXPORT_SYMBOL_GPL(dax_get_private);
 687
 688static void init_once(void *_dax_dev)
 689{
 690        struct dax_device *dax_dev = _dax_dev;
 691        struct inode *inode = &dax_dev->inode;
 692
 693        memset(dax_dev, 0, sizeof(*dax_dev));
 694        inode_init_once(inode);
 695}
 696
 697static int dax_fs_init(void)
 698{
 699        int rc;
 700
 701        dax_cache = kmem_cache_create("dax_cache", sizeof(struct dax_device), 0,
 702                        (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
 703                         SLAB_MEM_SPREAD|SLAB_ACCOUNT),
 704                        init_once);
 705        if (!dax_cache)
 706                return -ENOMEM;
 707
 708        dax_mnt = kern_mount(&dax_fs_type);
 709        if (IS_ERR(dax_mnt)) {
 710                rc = PTR_ERR(dax_mnt);
 711                goto err_mount;
 712        }
 713        dax_superblock = dax_mnt->mnt_sb;
 714
 715        return 0;
 716
 717 err_mount:
 718        kmem_cache_destroy(dax_cache);
 719
 720        return rc;
 721}
 722
 723static void dax_fs_exit(void)
 724{
 725        kern_unmount(dax_mnt);
 726        kmem_cache_destroy(dax_cache);
 727}
 728
 729static int __init dax_core_init(void)
 730{
 731        int rc;
 732
 733        rc = dax_fs_init();
 734        if (rc)
 735                return rc;
 736
 737        rc = alloc_chrdev_region(&dax_devt, 0, MINORMASK+1, "dax");
 738        if (rc)
 739                goto err_chrdev;
 740
 741        rc = dax_bus_init();
 742        if (rc)
 743                goto err_bus;
 744        return 0;
 745
 746err_bus:
 747        unregister_chrdev_region(dax_devt, MINORMASK+1);
 748err_chrdev:
 749        dax_fs_exit();
 750        return 0;
 751}
 752
 753static void __exit dax_core_exit(void)
 754{
 755        dax_bus_exit();
 756        unregister_chrdev_region(dax_devt, MINORMASK+1);
 757        ida_destroy(&dax_minor_ida);
 758        dax_fs_exit();
 759}
 760
 761MODULE_AUTHOR("Intel Corporation");
 762MODULE_LICENSE("GPL v2");
 763subsys_initcall(dax_core_init);
 764module_exit(dax_core_exit);
 765