linux/drivers/dax/pmem.c
<<
>>
Prefs
   1/*
   2 * Copyright(c) 2016 Intel Corporation. All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of version 2 of the GNU General Public License as
   6 * published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful, but
   9 * WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 */
  13#include <linux/percpu-refcount.h>
  14#include <linux/memremap.h>
  15#include <linux/module.h>
  16#include <linux/pfn_t.h>
  17#include "../nvdimm/pfn.h"
  18#include "../nvdimm/nd.h"
  19#include "device-dax.h"
  20
  21struct dax_pmem {
  22        struct device *dev;
  23        struct percpu_ref ref;
  24        struct dev_pagemap pgmap;
  25        struct completion cmp;
  26};
  27
  28static struct dax_pmem *to_dax_pmem(struct percpu_ref *ref)
  29{
  30        return container_of(ref, struct dax_pmem, ref);
  31}
  32
  33static void dax_pmem_percpu_release(struct percpu_ref *ref)
  34{
  35        struct dax_pmem *dax_pmem = to_dax_pmem(ref);
  36
  37        dev_dbg(dax_pmem->dev, "trace\n");
  38        complete(&dax_pmem->cmp);
  39}
  40
  41static void dax_pmem_percpu_exit(void *data)
  42{
  43        struct percpu_ref *ref = data;
  44        struct dax_pmem *dax_pmem = to_dax_pmem(ref);
  45
  46        dev_dbg(dax_pmem->dev, "trace\n");
  47        wait_for_completion(&dax_pmem->cmp);
  48        percpu_ref_exit(ref);
  49}
  50
  51static void dax_pmem_percpu_kill(void *data)
  52{
  53        struct percpu_ref *ref = data;
  54        struct dax_pmem *dax_pmem = to_dax_pmem(ref);
  55
  56        dev_dbg(dax_pmem->dev, "trace\n");
  57        percpu_ref_kill(ref);
  58}
  59
  60static int dax_pmem_probe(struct device *dev)
  61{
  62        void *addr;
  63        struct resource res;
  64        int rc, id, region_id;
  65        struct nd_pfn_sb *pfn_sb;
  66        struct dev_dax *dev_dax;
  67        struct dax_pmem *dax_pmem;
  68        struct nd_namespace_io *nsio;
  69        struct dax_region *dax_region;
  70        struct nd_namespace_common *ndns;
  71        struct nd_dax *nd_dax = to_nd_dax(dev);
  72        struct nd_pfn *nd_pfn = &nd_dax->nd_pfn;
  73
  74        ndns = nvdimm_namespace_common_probe(dev);
  75        if (IS_ERR(ndns))
  76                return PTR_ERR(ndns);
  77        nsio = to_nd_namespace_io(&ndns->dev);
  78
  79        dax_pmem = devm_kzalloc(dev, sizeof(*dax_pmem), GFP_KERNEL);
  80        if (!dax_pmem)
  81                return -ENOMEM;
  82
  83        /* parse the 'pfn' info block via ->rw_bytes */
  84        rc = devm_nsio_enable(dev, nsio);
  85        if (rc)
  86                return rc;
  87        rc = nvdimm_setup_pfn(nd_pfn, &dax_pmem->pgmap);
  88        if (rc)
  89                return rc;
  90        devm_nsio_disable(dev, nsio);
  91
  92        pfn_sb = nd_pfn->pfn_sb;
  93
  94        if (!devm_request_mem_region(dev, nsio->res.start,
  95                                resource_size(&nsio->res),
  96                                dev_name(&ndns->dev))) {
  97                dev_warn(dev, "could not reserve region %pR\n", &nsio->res);
  98                return -EBUSY;
  99        }
 100
 101        dax_pmem->dev = dev;
 102        init_completion(&dax_pmem->cmp);
 103        rc = percpu_ref_init(&dax_pmem->ref, dax_pmem_percpu_release, 0,
 104                        GFP_KERNEL);
 105        if (rc)
 106                return rc;
 107
 108        rc = devm_add_action_or_reset(dev, dax_pmem_percpu_exit,
 109                                                        &dax_pmem->ref);
 110        if (rc)
 111                return rc;
 112
 113        dax_pmem->pgmap.ref = &dax_pmem->ref;
 114        addr = devm_memremap_pages(dev, &dax_pmem->pgmap);
 115        if (IS_ERR(addr))
 116                return PTR_ERR(addr);
 117
 118        rc = devm_add_action_or_reset(dev, dax_pmem_percpu_kill,
 119                                                        &dax_pmem->ref);
 120        if (rc)
 121                return rc;
 122
 123        /* adjust the dax_region resource to the start of data */
 124        memcpy(&res, &dax_pmem->pgmap.res, sizeof(res));
 125        res.start += le64_to_cpu(pfn_sb->dataoff);
 126
 127        rc = sscanf(dev_name(&ndns->dev), "namespace%d.%d", &region_id, &id);
 128        if (rc != 2)
 129                return -EINVAL;
 130
 131        dax_region = alloc_dax_region(dev, region_id, &res,
 132                        le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP);
 133        if (!dax_region)
 134                return -ENOMEM;
 135
 136        /* TODO: support for subdividing a dax region... */
 137        dev_dax = devm_create_dev_dax(dax_region, id, &res, 1);
 138
 139        /* child dev_dax instances now own the lifetime of the dax_region */
 140        dax_region_put(dax_region);
 141
 142        return PTR_ERR_OR_ZERO(dev_dax);
 143}
 144
 145static struct nd_device_driver dax_pmem_driver = {
 146        .probe = dax_pmem_probe,
 147        .drv = {
 148                .name = "dax_pmem",
 149        },
 150        .type = ND_DRIVER_DAX_PMEM,
 151};
 152
 153module_nd_driver(dax_pmem_driver);
 154
 155MODULE_LICENSE("GPL v2");
 156MODULE_AUTHOR("Intel Corporation");
 157MODULE_ALIAS_ND_DEVICE(ND_DEVICE_DAX_PMEM);
 158