linux/drivers/nvdimm/claim.c
<<
>>
Prefs
   1/*
   2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of version 2 of the GNU General Public License as
   6 * published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful, but
   9 * WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 */
  13#include <linux/device.h>
  14#include <linux/sizes.h>
  15#include <linux/pmem.h>
  16#include "nd-core.h"
  17#include "pfn.h"
  18#include "btt.h"
  19#include "nd.h"
  20
  21void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns)
  22{
  23        struct nd_namespace_common *ndns = *_ndns;
  24
  25        dev_WARN_ONCE(dev, !mutex_is_locked(&ndns->dev.mutex)
  26                        || ndns->claim != dev,
  27                        "%s: invalid claim\n", __func__);
  28        ndns->claim = NULL;
  29        *_ndns = NULL;
  30        put_device(&ndns->dev);
  31}
  32
  33void nd_detach_ndns(struct device *dev,
  34                struct nd_namespace_common **_ndns)
  35{
  36        struct nd_namespace_common *ndns = *_ndns;
  37
  38        if (!ndns)
  39                return;
  40        get_device(&ndns->dev);
  41        device_lock(&ndns->dev);
  42        __nd_detach_ndns(dev, _ndns);
  43        device_unlock(&ndns->dev);
  44        put_device(&ndns->dev);
  45}
  46
  47bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
  48                struct nd_namespace_common **_ndns)
  49{
  50        if (attach->claim)
  51                return false;
  52        dev_WARN_ONCE(dev, !mutex_is_locked(&attach->dev.mutex)
  53                        || *_ndns,
  54                        "%s: invalid claim\n", __func__);
  55        attach->claim = dev;
  56        *_ndns = attach;
  57        get_device(&attach->dev);
  58        return true;
  59}
  60
  61bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
  62                struct nd_namespace_common **_ndns)
  63{
  64        bool claimed;
  65
  66        device_lock(&attach->dev);
  67        claimed = __nd_attach_ndns(dev, attach, _ndns);
  68        device_unlock(&attach->dev);
  69        return claimed;
  70}
  71
  72static int namespace_match(struct device *dev, void *data)
  73{
  74        char *name = data;
  75
  76        return strcmp(name, dev_name(dev)) == 0;
  77}
  78
  79static bool is_idle(struct device *dev, struct nd_namespace_common *ndns)
  80{
  81        struct nd_region *nd_region = to_nd_region(dev->parent);
  82        struct device *seed = NULL;
  83
  84        if (is_nd_btt(dev))
  85                seed = nd_region->btt_seed;
  86        else if (is_nd_pfn(dev))
  87                seed = nd_region->pfn_seed;
  88        else if (is_nd_dax(dev))
  89                seed = nd_region->dax_seed;
  90
  91        if (seed == dev || ndns || dev->driver)
  92                return false;
  93        return true;
  94}
  95
  96struct nd_pfn *to_nd_pfn_safe(struct device *dev)
  97{
  98        /*
  99         * pfn device attributes are re-used by dax device instances, so we
 100         * need to be careful to correct device-to-nd_pfn conversion.
 101         */
 102        if (is_nd_pfn(dev))
 103                return to_nd_pfn(dev);
 104
 105        if (is_nd_dax(dev)) {
 106                struct nd_dax *nd_dax = to_nd_dax(dev);
 107
 108                return &nd_dax->nd_pfn;
 109        }
 110
 111        WARN_ON(1);
 112        return NULL;
 113}
 114
 115static void nd_detach_and_reset(struct device *dev,
 116                struct nd_namespace_common **_ndns)
 117{
 118        /* detach the namespace and destroy / reset the device */
 119        nd_detach_ndns(dev, _ndns);
 120        if (is_idle(dev, *_ndns)) {
 121                nd_device_unregister(dev, ND_ASYNC);
 122        } else if (is_nd_btt(dev)) {
 123                struct nd_btt *nd_btt = to_nd_btt(dev);
 124
 125                nd_btt->lbasize = 0;
 126                kfree(nd_btt->uuid);
 127                nd_btt->uuid = NULL;
 128        } else if (is_nd_pfn(dev) || is_nd_dax(dev)) {
 129                struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
 130
 131                kfree(nd_pfn->uuid);
 132                nd_pfn->uuid = NULL;
 133                nd_pfn->mode = PFN_MODE_NONE;
 134        }
 135}
 136
 137ssize_t nd_namespace_store(struct device *dev,
 138                struct nd_namespace_common **_ndns, const char *buf,
 139                size_t len)
 140{
 141        struct nd_namespace_common *ndns;
 142        struct device *found;
 143        char *name;
 144
 145        if (dev->driver) {
 146                dev_dbg(dev, "%s: -EBUSY\n", __func__);
 147                return -EBUSY;
 148        }
 149
 150        name = kstrndup(buf, len, GFP_KERNEL);
 151        if (!name)
 152                return -ENOMEM;
 153        strim(name);
 154
 155        if (strncmp(name, "namespace", 9) == 0 || strcmp(name, "") == 0)
 156                /* pass */;
 157        else {
 158                len = -EINVAL;
 159                goto out;
 160        }
 161
 162        ndns = *_ndns;
 163        if (strcmp(name, "") == 0) {
 164                nd_detach_and_reset(dev, _ndns);
 165                goto out;
 166        } else if (ndns) {
 167                dev_dbg(dev, "namespace already set to: %s\n",
 168                                dev_name(&ndns->dev));
 169                len = -EBUSY;
 170                goto out;
 171        }
 172
 173        found = device_find_child(dev->parent, name, namespace_match);
 174        if (!found) {
 175                dev_dbg(dev, "'%s' not found under %s\n", name,
 176                                dev_name(dev->parent));
 177                len = -ENODEV;
 178                goto out;
 179        }
 180
 181        ndns = to_ndns(found);
 182        if (__nvdimm_namespace_capacity(ndns) < SZ_16M) {
 183                dev_dbg(dev, "%s too small to host\n", name);
 184                len = -ENXIO;
 185                goto out_attach;
 186        }
 187
 188        WARN_ON_ONCE(!is_nvdimm_bus_locked(dev));
 189        if (!nd_attach_ndns(dev, ndns, _ndns)) {
 190                dev_dbg(dev, "%s already claimed\n",
 191                                dev_name(&ndns->dev));
 192                len = -EBUSY;
 193        }
 194
 195 out_attach:
 196        put_device(&ndns->dev); /* from device_find_child */
 197 out:
 198        kfree(name);
 199        return len;
 200}
 201
 202/*
 203 * nd_sb_checksum: compute checksum for a generic info block
 204 *
 205 * Returns a fletcher64 checksum of everything in the given info block
 206 * except the last field (since that's where the checksum lives).
 207 */
 208u64 nd_sb_checksum(struct nd_gen_sb *nd_gen_sb)
 209{
 210        u64 sum;
 211        __le64 sum_save;
 212
 213        BUILD_BUG_ON(sizeof(struct btt_sb) != SZ_4K);
 214        BUILD_BUG_ON(sizeof(struct nd_pfn_sb) != SZ_4K);
 215        BUILD_BUG_ON(sizeof(struct nd_gen_sb) != SZ_4K);
 216
 217        sum_save = nd_gen_sb->checksum;
 218        nd_gen_sb->checksum = 0;
 219        sum = nd_fletcher64(nd_gen_sb, sizeof(*nd_gen_sb), 1);
 220        nd_gen_sb->checksum = sum_save;
 221        return sum;
 222}
 223EXPORT_SYMBOL(nd_sb_checksum);
 224
 225static int nsio_rw_bytes(struct nd_namespace_common *ndns,
 226                resource_size_t offset, void *buf, size_t size, int rw)
 227{
 228        struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
 229
 230        if (unlikely(offset + size > nsio->size)) {
 231                dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
 232                return -EFAULT;
 233        }
 234
 235        if (rw == READ) {
 236                unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);
 237
 238                if (unlikely(is_bad_pmem(&nsio->bb, offset / 512, sz_align)))
 239                        return -EIO;
 240                return memcpy_from_pmem(buf, nsio->addr + offset, size);
 241        } else {
 242                memcpy_to_pmem(nsio->addr + offset, buf, size);
 243                nvdimm_flush(to_nd_region(ndns->dev.parent));
 244        }
 245
 246        return 0;
 247}
 248
 249int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
 250{
 251        struct resource *res = &nsio->res;
 252        struct nd_namespace_common *ndns = &nsio->common;
 253
 254        nsio->size = resource_size(res);
 255        if (!devm_request_mem_region(dev, res->start, resource_size(res),
 256                                dev_name(dev))) {
 257                dev_warn(dev, "could not reserve region %pR\n", res);
 258                return -EBUSY;
 259        }
 260
 261        ndns->rw_bytes = nsio_rw_bytes;
 262        if (devm_init_badblocks(dev, &nsio->bb))
 263                return -ENOMEM;
 264        nvdimm_badblocks_populate(to_nd_region(ndns->dev.parent), &nsio->bb,
 265                        &nsio->res);
 266
 267        nsio->addr = devm_memremap(dev, res->start, resource_size(res),
 268                        ARCH_MEMREMAP_PMEM);
 269
 270        return PTR_ERR_OR_ZERO(nsio->addr);
 271}
 272EXPORT_SYMBOL_GPL(devm_nsio_enable);
 273
 274void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio)
 275{
 276        struct resource *res = &nsio->res;
 277
 278        devm_memunmap(dev, nsio->addr);
 279        devm_exit_badblocks(dev, &nsio->bb);
 280        devm_release_mem_region(dev, res->start, resource_size(res));
 281}
 282EXPORT_SYMBOL_GPL(devm_nsio_disable);
 283