linux/drivers/nvdimm/dimm_devs.c
<<
>>
Prefs
   1/*
   2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of version 2 of the GNU General Public License as
   6 * published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful, but
   9 * WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 */
  13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14#include <linux/vmalloc.h>
  15#include <linux/device.h>
  16#include <linux/ndctl.h>
  17#include <linux/slab.h>
  18#include <linux/io.h>
  19#include <linux/fs.h>
  20#include <linux/mm.h>
  21#include "nd-core.h"
  22#include "label.h"
  23#include "pmem.h"
  24#include "nd.h"
  25
  26static DEFINE_IDA(dimm_ida);
  27
  28/*
  29 * Retrieve bus and dimm handle and return if this bus supports
  30 * get_config_data commands
  31 */
  32int nvdimm_check_config_data(struct device *dev)
  33{
  34        struct nvdimm *nvdimm = to_nvdimm(dev);
  35
  36        if (!nvdimm->cmd_mask ||
  37            !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) {
  38                if (test_bit(NDD_ALIASING, &nvdimm->flags))
  39                        return -ENXIO;
  40                else
  41                        return -ENOTTY;
  42        }
  43
  44        return 0;
  45}
  46
  47static int validate_dimm(struct nvdimm_drvdata *ndd)
  48{
  49        int rc;
  50
  51        if (!ndd)
  52                return -EINVAL;
  53
  54        rc = nvdimm_check_config_data(ndd->dev);
  55        if (rc)
  56                dev_dbg(ndd->dev, "%pf: %s error: %d\n",
  57                                __builtin_return_address(0), __func__, rc);
  58        return rc;
  59}
  60
  61/**
  62 * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area
  63 * @nvdimm: dimm to initialize
  64 */
  65int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
  66{
  67        struct nd_cmd_get_config_size *cmd = &ndd->nsarea;
  68        struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
  69        struct nvdimm_bus_descriptor *nd_desc;
  70        int rc = validate_dimm(ndd);
  71        int cmd_rc = 0;
  72
  73        if (rc)
  74                return rc;
  75
  76        if (cmd->config_size)
  77                return 0; /* already valid */
  78
  79        memset(cmd, 0, sizeof(*cmd));
  80        nd_desc = nvdimm_bus->nd_desc;
  81        rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
  82                        ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), &cmd_rc);
  83        if (rc < 0)
  84                return rc;
  85        return cmd_rc;
  86}
  87
  88int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
  89{
  90        struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
  91        struct nd_cmd_get_config_data_hdr *cmd;
  92        struct nvdimm_bus_descriptor *nd_desc;
  93        int rc = validate_dimm(ndd);
  94        u32 max_cmd_size, config_size;
  95        size_t offset;
  96
  97        if (rc)
  98                return rc;
  99
 100        if (ndd->data)
 101                return 0;
 102
 103        if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0
 104                        || ndd->nsarea.config_size < ND_LABEL_MIN_SIZE) {
 105                dev_dbg(ndd->dev, "failed to init config data area: (%d:%d)\n",
 106                                ndd->nsarea.max_xfer, ndd->nsarea.config_size);
 107                return -ENXIO;
 108        }
 109
 110        ndd->data = kvmalloc(ndd->nsarea.config_size, GFP_KERNEL);
 111        if (!ndd->data)
 112                return -ENOMEM;
 113
 114        max_cmd_size = min_t(u32, PAGE_SIZE, ndd->nsarea.max_xfer);
 115        cmd = kzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL);
 116        if (!cmd)
 117                return -ENOMEM;
 118
 119        nd_desc = nvdimm_bus->nd_desc;
 120        for (config_size = ndd->nsarea.config_size, offset = 0;
 121                        config_size; config_size -= cmd->in_length,
 122                        offset += cmd->in_length) {
 123                cmd->in_length = min(config_size, max_cmd_size);
 124                cmd->in_offset = offset;
 125                rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
 126                                ND_CMD_GET_CONFIG_DATA, cmd,
 127                                cmd->in_length + sizeof(*cmd), NULL);
 128                if (rc || cmd->status) {
 129                        rc = -ENXIO;
 130                        break;
 131                }
 132                memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length);
 133        }
 134        dev_dbg(ndd->dev, "%s: len: %zu rc: %d\n", __func__, offset, rc);
 135        kfree(cmd);
 136
 137        return rc;
 138}
 139
 140int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
 141                void *buf, size_t len)
 142{
 143        int rc = validate_dimm(ndd);
 144        size_t max_cmd_size, buf_offset;
 145        struct nd_cmd_set_config_hdr *cmd;
 146        struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
 147        struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
 148
 149        if (rc)
 150                return rc;
 151
 152        if (!ndd->data)
 153                return -ENXIO;
 154
 155        if (offset + len > ndd->nsarea.config_size)
 156                return -ENXIO;
 157
 158        max_cmd_size = min_t(u32, PAGE_SIZE, len);
 159        max_cmd_size = min_t(u32, max_cmd_size, ndd->nsarea.max_xfer);
 160        cmd = kzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL);
 161        if (!cmd)
 162                return -ENOMEM;
 163
 164        for (buf_offset = 0; len; len -= cmd->in_length,
 165                        buf_offset += cmd->in_length) {
 166                size_t cmd_size;
 167                u32 *status;
 168
 169                cmd->in_offset = offset + buf_offset;
 170                cmd->in_length = min(max_cmd_size, len);
 171                memcpy(cmd->in_buf, buf + buf_offset, cmd->in_length);
 172
 173                /* status is output in the last 4-bytes of the command buffer */
 174                cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);
 175                status = ((void *) cmd) + cmd_size - sizeof(u32);
 176
 177                rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
 178                                ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, NULL);
 179                if (rc || *status) {
 180                        rc = rc ? rc : -ENXIO;
 181                        break;
 182                }
 183        }
 184        kfree(cmd);
 185
 186        return rc;
 187}
 188
 189void nvdimm_set_aliasing(struct device *dev)
 190{
 191        struct nvdimm *nvdimm = to_nvdimm(dev);
 192
 193        set_bit(NDD_ALIASING, &nvdimm->flags);
 194}
 195
 196void nvdimm_set_locked(struct device *dev)
 197{
 198        struct nvdimm *nvdimm = to_nvdimm(dev);
 199
 200        set_bit(NDD_LOCKED, &nvdimm->flags);
 201}
 202
 203void nvdimm_clear_locked(struct device *dev)
 204{
 205        struct nvdimm *nvdimm = to_nvdimm(dev);
 206
 207        clear_bit(NDD_LOCKED, &nvdimm->flags);
 208}
 209
 210static void nvdimm_release(struct device *dev)
 211{
 212        struct nvdimm *nvdimm = to_nvdimm(dev);
 213
 214        ida_simple_remove(&dimm_ida, nvdimm->id);
 215        kfree(nvdimm);
 216}
 217
 218static struct device_type nvdimm_device_type = {
 219        .name = "nvdimm",
 220        .release = nvdimm_release,
 221};
 222
 223bool is_nvdimm(struct device *dev)
 224{
 225        return dev->type == &nvdimm_device_type;
 226}
 227
 228struct nvdimm *to_nvdimm(struct device *dev)
 229{
 230        struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev);
 231
 232        WARN_ON(!is_nvdimm(dev));
 233        return nvdimm;
 234}
 235EXPORT_SYMBOL_GPL(to_nvdimm);
 236
 237struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr)
 238{
 239        struct nd_region *nd_region = &ndbr->nd_region;
 240        struct nd_mapping *nd_mapping = &nd_region->mapping[0];
 241
 242        return nd_mapping->nvdimm;
 243}
 244EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm);
 245
 246unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr)
 247{
 248        /* pmem mapping properties are private to libnvdimm */
 249        return ARCH_MEMREMAP_PMEM;
 250}
 251EXPORT_SYMBOL_GPL(nd_blk_memremap_flags);
 252
 253struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping)
 254{
 255        struct nvdimm *nvdimm = nd_mapping->nvdimm;
 256
 257        WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
 258
 259        return dev_get_drvdata(&nvdimm->dev);
 260}
 261EXPORT_SYMBOL(to_ndd);
 262
 263void nvdimm_drvdata_release(struct kref *kref)
 264{
 265        struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref);
 266        struct device *dev = ndd->dev;
 267        struct resource *res, *_r;
 268
 269        dev_dbg(dev, "%s\n", __func__);
 270
 271        nvdimm_bus_lock(dev);
 272        for_each_dpa_resource_safe(ndd, res, _r)
 273                nvdimm_free_dpa(ndd, res);
 274        nvdimm_bus_unlock(dev);
 275
 276        kvfree(ndd->data);
 277        kfree(ndd);
 278        put_device(dev);
 279}
 280
 281void get_ndd(struct nvdimm_drvdata *ndd)
 282{
 283        kref_get(&ndd->kref);
 284}
 285
 286void put_ndd(struct nvdimm_drvdata *ndd)
 287{
 288        if (ndd)
 289                kref_put(&ndd->kref, nvdimm_drvdata_release);
 290}
 291
 292const char *nvdimm_name(struct nvdimm *nvdimm)
 293{
 294        return dev_name(&nvdimm->dev);
 295}
 296EXPORT_SYMBOL_GPL(nvdimm_name);
 297
 298struct kobject *nvdimm_kobj(struct nvdimm *nvdimm)
 299{
 300        return &nvdimm->dev.kobj;
 301}
 302EXPORT_SYMBOL_GPL(nvdimm_kobj);
 303
 304unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm)
 305{
 306        return nvdimm->cmd_mask;
 307}
 308EXPORT_SYMBOL_GPL(nvdimm_cmd_mask);
 309
 310void *nvdimm_provider_data(struct nvdimm *nvdimm)
 311{
 312        if (nvdimm)
 313                return nvdimm->provider_data;
 314        return NULL;
 315}
 316EXPORT_SYMBOL_GPL(nvdimm_provider_data);
 317
 318static ssize_t commands_show(struct device *dev,
 319                struct device_attribute *attr, char *buf)
 320{
 321        struct nvdimm *nvdimm = to_nvdimm(dev);
 322        int cmd, len = 0;
 323
 324        if (!nvdimm->cmd_mask)
 325                return sprintf(buf, "\n");
 326
 327        for_each_set_bit(cmd, &nvdimm->cmd_mask, BITS_PER_LONG)
 328                len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd));
 329        len += sprintf(buf + len, "\n");
 330        return len;
 331}
 332static DEVICE_ATTR_RO(commands);
 333
 334static ssize_t flags_show(struct device *dev,
 335                struct device_attribute *attr, char *buf)
 336{
 337        struct nvdimm *nvdimm = to_nvdimm(dev);
 338
 339        return sprintf(buf, "%s%s\n",
 340                        test_bit(NDD_ALIASING, &nvdimm->flags) ? "alias " : "",
 341                        test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : "");
 342}
 343static DEVICE_ATTR_RO(flags);
 344
 345static ssize_t state_show(struct device *dev, struct device_attribute *attr,
 346                char *buf)
 347{
 348        struct nvdimm *nvdimm = to_nvdimm(dev);
 349
 350        /*
 351         * The state may be in the process of changing, userspace should
 352         * quiesce probing if it wants a static answer
 353         */
 354        nvdimm_bus_lock(dev);
 355        nvdimm_bus_unlock(dev);
 356        return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy)
 357                        ? "active" : "idle");
 358}
 359static DEVICE_ATTR_RO(state);
 360
 361static ssize_t available_slots_show(struct device *dev,
 362                struct device_attribute *attr, char *buf)
 363{
 364        struct nvdimm_drvdata *ndd = dev_get_drvdata(dev);
 365        ssize_t rc;
 366        u32 nfree;
 367
 368        if (!ndd)
 369                return -ENXIO;
 370
 371        nvdimm_bus_lock(dev);
 372        nfree = nd_label_nfree(ndd);
 373        if (nfree - 1 > nfree) {
 374                dev_WARN_ONCE(dev, 1, "we ate our last label?\n");
 375                nfree = 0;
 376        } else
 377                nfree--;
 378        rc = sprintf(buf, "%d\n", nfree);
 379        nvdimm_bus_unlock(dev);
 380        return rc;
 381}
 382static DEVICE_ATTR_RO(available_slots);
 383
 384static struct attribute *nvdimm_attributes[] = {
 385        &dev_attr_state.attr,
 386        &dev_attr_flags.attr,
 387        &dev_attr_commands.attr,
 388        &dev_attr_available_slots.attr,
 389        NULL,
 390};
 391
 392struct attribute_group nvdimm_attribute_group = {
 393        .attrs = nvdimm_attributes,
 394};
 395EXPORT_SYMBOL_GPL(nvdimm_attribute_group);
 396
 397struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data,
 398                const struct attribute_group **groups, unsigned long flags,
 399                unsigned long cmd_mask, int num_flush,
 400                struct resource *flush_wpq)
 401{
 402        struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
 403        struct device *dev;
 404
 405        if (!nvdimm)
 406                return NULL;
 407
 408        nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL);
 409        if (nvdimm->id < 0) {
 410                kfree(nvdimm);
 411                return NULL;
 412        }
 413        nvdimm->provider_data = provider_data;
 414        nvdimm->flags = flags;
 415        nvdimm->cmd_mask = cmd_mask;
 416        nvdimm->num_flush = num_flush;
 417        nvdimm->flush_wpq = flush_wpq;
 418        atomic_set(&nvdimm->busy, 0);
 419        dev = &nvdimm->dev;
 420        dev_set_name(dev, "nmem%d", nvdimm->id);
 421        dev->parent = &nvdimm_bus->dev;
 422        dev->type = &nvdimm_device_type;
 423        dev->devt = MKDEV(nvdimm_major, nvdimm->id);
 424        dev->groups = groups;
 425        nd_device_register(dev);
 426
 427        return nvdimm;
 428}
 429EXPORT_SYMBOL_GPL(nvdimm_create);
 430
 431int alias_dpa_busy(struct device *dev, void *data)
 432{
 433        resource_size_t map_end, blk_start, new;
 434        struct blk_alloc_info *info = data;
 435        struct nd_mapping *nd_mapping;
 436        struct nd_region *nd_region;
 437        struct nvdimm_drvdata *ndd;
 438        struct resource *res;
 439        int i;
 440
 441        if (!is_memory(dev))
 442                return 0;
 443
 444        nd_region = to_nd_region(dev);
 445        for (i = 0; i < nd_region->ndr_mappings; i++) {
 446                nd_mapping  = &nd_region->mapping[i];
 447                if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
 448                        break;
 449        }
 450
 451        if (i >= nd_region->ndr_mappings)
 452                return 0;
 453
 454        ndd = to_ndd(nd_mapping);
 455        map_end = nd_mapping->start + nd_mapping->size - 1;
 456        blk_start = nd_mapping->start;
 457
 458        /*
 459         * In the allocation case ->res is set to free space that we are
 460         * looking to validate against PMEM aliasing collision rules
 461         * (i.e. BLK is allocated after all aliased PMEM).
 462         */
 463        if (info->res) {
 464                if (info->res->start >= nd_mapping->start
 465                                && info->res->start < map_end)
 466                        /* pass */;
 467                else
 468                        return 0;
 469        }
 470
 471 retry:
 472        /*
 473         * Find the free dpa from the end of the last pmem allocation to
 474         * the end of the interleave-set mapping.
 475         */
 476        for_each_dpa_resource(ndd, res) {
 477                if (strncmp(res->name, "pmem", 4) != 0)
 478                        continue;
 479                if ((res->start >= blk_start && res->start < map_end)
 480                                || (res->end >= blk_start
 481                                        && res->end <= map_end)) {
 482                        new = max(blk_start, min(map_end + 1, res->end + 1));
 483                        if (new != blk_start) {
 484                                blk_start = new;
 485                                goto retry;
 486                        }
 487                }
 488        }
 489
 490        /* update the free space range with the probed blk_start */
 491        if (info->res && blk_start > info->res->start) {
 492                info->res->start = max(info->res->start, blk_start);
 493                if (info->res->start > info->res->end)
 494                        info->res->end = info->res->start - 1;
 495                return 1;
 496        }
 497
 498        info->available -= blk_start - nd_mapping->start;
 499
 500        return 0;
 501}
 502
 503/**
 504 * nd_blk_available_dpa - account the unused dpa of BLK region
 505 * @nd_mapping: container of dpa-resource-root + labels
 506 *
 507 * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges, but
 508 * we arrange for them to never start at an lower dpa than the last
 509 * PMEM allocation in an aliased region.
 510 */
 511resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
 512{
 513        struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
 514        struct nd_mapping *nd_mapping = &nd_region->mapping[0];
 515        struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
 516        struct blk_alloc_info info = {
 517                .nd_mapping = nd_mapping,
 518                .available = nd_mapping->size,
 519                .res = NULL,
 520        };
 521        struct resource *res;
 522
 523        if (!ndd)
 524                return 0;
 525
 526        device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
 527
 528        /* now account for busy blk allocations in unaliased dpa */
 529        for_each_dpa_resource(ndd, res) {
 530                if (strncmp(res->name, "blk", 3) != 0)
 531                        continue;
 532                info.available -= resource_size(res);
 533        }
 534
 535        return info.available;
 536}
 537
 538/**
 539 * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa
 540 * @nd_mapping: container of dpa-resource-root + labels
 541 * @nd_region: constrain available space check to this reference region
 542 * @overlap: calculate available space assuming this level of overlap
 543 *
 544 * Validate that a PMEM label, if present, aligns with the start of an
 545 * interleave set and truncate the available size at the lowest BLK
 546 * overlap point.
 547 *
 548 * The expectation is that this routine is called multiple times as it
 549 * probes for the largest BLK encroachment for any single member DIMM of
 550 * the interleave set.  Once that value is determined the PMEM-limit for
 551 * the set can be established.
 552 */
 553resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
 554                struct nd_mapping *nd_mapping, resource_size_t *overlap)
 555{
 556        resource_size_t map_start, map_end, busy = 0, available, blk_start;
 557        struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
 558        struct resource *res;
 559        const char *reason;
 560
 561        if (!ndd)
 562                return 0;
 563
 564        map_start = nd_mapping->start;
 565        map_end = map_start + nd_mapping->size - 1;
 566        blk_start = max(map_start, map_end + 1 - *overlap);
 567        for_each_dpa_resource(ndd, res) {
 568                if (res->start >= map_start && res->start < map_end) {
 569                        if (strncmp(res->name, "blk", 3) == 0)
 570                                blk_start = min(blk_start,
 571                                                max(map_start, res->start));
 572                        else if (res->end > map_end) {
 573                                reason = "misaligned to iset";
 574                                goto err;
 575                        } else
 576                                busy += resource_size(res);
 577                } else if (res->end >= map_start && res->end <= map_end) {
 578                        if (strncmp(res->name, "blk", 3) == 0) {
 579                                /*
 580                                 * If a BLK allocation overlaps the start of
 581                                 * PMEM the entire interleave set may now only
 582                                 * be used for BLK.
 583                                 */
 584                                blk_start = map_start;
 585                        } else
 586                                busy += resource_size(res);
 587                } else if (map_start > res->start && map_start < res->end) {
 588                        /* total eclipse of the mapping */
 589                        busy += nd_mapping->size;
 590                        blk_start = map_start;
 591                }
 592        }
 593
 594        *overlap = map_end + 1 - blk_start;
 595        available = blk_start - map_start;
 596        if (busy < available)
 597                return available - busy;
 598        return 0;
 599
 600 err:
 601        nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason);
 602        return 0;
 603}
 604
 605void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res)
 606{
 607        WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
 608        kfree(res->name);
 609        __release_region(&ndd->dpa, res->start, resource_size(res));
 610}
 611
 612struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
 613                struct nd_label_id *label_id, resource_size_t start,
 614                resource_size_t n)
 615{
 616        char *name = kmemdup(label_id, sizeof(*label_id), GFP_KERNEL);
 617        struct resource *res;
 618
 619        if (!name)
 620                return NULL;
 621
 622        WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
 623        res = __request_region(&ndd->dpa, start, n, name, 0);
 624        if (!res)
 625                kfree(name);
 626        return res;
 627}
 628
 629/**
 630 * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id
 631 * @nvdimm: container of dpa-resource-root + labels
 632 * @label_id: dpa resource name of the form {pmem|blk}-<human readable uuid>
 633 */
 634resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
 635                struct nd_label_id *label_id)
 636{
 637        resource_size_t allocated = 0;
 638        struct resource *res;
 639
 640        for_each_dpa_resource(ndd, res)
 641                if (strcmp(res->name, label_id->id) == 0)
 642                        allocated += resource_size(res);
 643
 644        return allocated;
 645}
 646
 647static int count_dimms(struct device *dev, void *c)
 648{
 649        int *count = c;
 650
 651        if (is_nvdimm(dev))
 652                (*count)++;
 653        return 0;
 654}
 655
 656int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count)
 657{
 658        int count = 0;
 659        /* Flush any possible dimm registration failures */
 660        nd_synchronize();
 661
 662        device_for_each_child(&nvdimm_bus->dev, &count, count_dimms);
 663        dev_dbg(&nvdimm_bus->dev, "%s: count: %d\n", __func__, count);
 664        if (count != dimm_count)
 665                return -ENXIO;
 666        return 0;
 667}
 668EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count);
 669
 670void __exit nvdimm_devs_exit(void)
 671{
 672        ida_destroy(&dimm_ida);
 673}
 674