linux/drivers/nvdimm/dimm_devs.c
<<
>>
Prefs
   1/*
   2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of version 2 of the GNU General Public License as
   6 * published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful, but
   9 * WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 */
  13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14#include <linux/vmalloc.h>
  15#include <linux/device.h>
  16#include <linux/ndctl.h>
  17#include <linux/slab.h>
  18#include <linux/io.h>
  19#include <linux/fs.h>
  20#include <linux/mm.h>
  21#include "nd-core.h"
  22#include "label.h"
  23#include "pmem.h"
  24#include "nd.h"
  25
  26static DEFINE_IDA(dimm_ida);
  27
  28/*
  29 * Retrieve bus and dimm handle and return if this bus supports
  30 * get_config_data commands
  31 */
  32int nvdimm_check_config_data(struct device *dev)
  33{
  34        struct nvdimm *nvdimm = to_nvdimm(dev);
  35
  36        if (!nvdimm->cmd_mask ||
  37            !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) {
  38                if (test_bit(NDD_ALIASING, &nvdimm->flags))
  39                        return -ENXIO;
  40                else
  41                        return -ENOTTY;
  42        }
  43
  44        return 0;
  45}
  46
  47static int validate_dimm(struct nvdimm_drvdata *ndd)
  48{
  49        int rc;
  50
  51        if (!ndd)
  52                return -EINVAL;
  53
  54        rc = nvdimm_check_config_data(ndd->dev);
  55        if (rc)
  56                dev_dbg(ndd->dev, "%pf: %s error: %d\n",
  57                                __builtin_return_address(0), __func__, rc);
  58        return rc;
  59}
  60
  61/**
  62 * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area
  63 * @nvdimm: dimm to initialize
  64 */
  65int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
  66{
  67        struct nd_cmd_get_config_size *cmd = &ndd->nsarea;
  68        struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
  69        struct nvdimm_bus_descriptor *nd_desc;
  70        int rc = validate_dimm(ndd);
  71        int cmd_rc = 0;
  72
  73        if (rc)
  74                return rc;
  75
  76        if (cmd->config_size)
  77                return 0; /* already valid */
  78
  79        memset(cmd, 0, sizeof(*cmd));
  80        nd_desc = nvdimm_bus->nd_desc;
  81        rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
  82                        ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), &cmd_rc);
  83        if (rc < 0)
  84                return rc;
  85        return cmd_rc;
  86}
  87
  88int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
  89{
  90        struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
  91        int rc = validate_dimm(ndd), cmd_rc = 0;
  92        struct nd_cmd_get_config_data_hdr *cmd;
  93        struct nvdimm_bus_descriptor *nd_desc;
  94        u32 max_cmd_size, config_size;
  95        size_t offset;
  96
  97        if (rc)
  98                return rc;
  99
 100        if (ndd->data)
 101                return 0;
 102
 103        if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0
 104                        || ndd->nsarea.config_size < ND_LABEL_MIN_SIZE) {
 105                dev_dbg(ndd->dev, "failed to init config data area: (%d:%d)\n",
 106                                ndd->nsarea.max_xfer, ndd->nsarea.config_size);
 107                return -ENXIO;
 108        }
 109
 110        ndd->data = kvmalloc(ndd->nsarea.config_size, GFP_KERNEL);
 111        if (!ndd->data)
 112                return -ENOMEM;
 113
 114        max_cmd_size = min_t(u32, PAGE_SIZE, ndd->nsarea.max_xfer);
 115        cmd = kzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL);
 116        if (!cmd)
 117                return -ENOMEM;
 118
 119        nd_desc = nvdimm_bus->nd_desc;
 120        for (config_size = ndd->nsarea.config_size, offset = 0;
 121                        config_size; config_size -= cmd->in_length,
 122                        offset += cmd->in_length) {
 123                cmd->in_length = min(config_size, max_cmd_size);
 124                cmd->in_offset = offset;
 125                rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
 126                                ND_CMD_GET_CONFIG_DATA, cmd,
 127                                cmd->in_length + sizeof(*cmd), &cmd_rc);
 128                if (rc < 0)
 129                        break;
 130                if (cmd_rc < 0) {
 131                        rc = cmd_rc;
 132                        break;
 133                }
 134                memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length);
 135        }
 136        dev_dbg(ndd->dev, "len: %zu rc: %d\n", offset, rc);
 137        kfree(cmd);
 138
 139        return rc;
 140}
 141
 142int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
 143                void *buf, size_t len)
 144{
 145        size_t max_cmd_size, buf_offset;
 146        struct nd_cmd_set_config_hdr *cmd;
 147        int rc = validate_dimm(ndd), cmd_rc = 0;
 148        struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
 149        struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
 150
 151        if (rc)
 152                return rc;
 153
 154        if (!ndd->data)
 155                return -ENXIO;
 156
 157        if (offset + len > ndd->nsarea.config_size)
 158                return -ENXIO;
 159
 160        max_cmd_size = min_t(u32, PAGE_SIZE, len);
 161        max_cmd_size = min_t(u32, max_cmd_size, ndd->nsarea.max_xfer);
 162        cmd = kzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL);
 163        if (!cmd)
 164                return -ENOMEM;
 165
 166        for (buf_offset = 0; len; len -= cmd->in_length,
 167                        buf_offset += cmd->in_length) {
 168                size_t cmd_size;
 169
 170                cmd->in_offset = offset + buf_offset;
 171                cmd->in_length = min(max_cmd_size, len);
 172                memcpy(cmd->in_buf, buf + buf_offset, cmd->in_length);
 173
 174                /* status is output in the last 4-bytes of the command buffer */
 175                cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);
 176
 177                rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
 178                                ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
 179                if (rc < 0)
 180                        break;
 181                if (cmd_rc < 0) {
 182                        rc = cmd_rc;
 183                        break;
 184                }
 185        }
 186        kfree(cmd);
 187
 188        return rc;
 189}
 190
 191void nvdimm_set_aliasing(struct device *dev)
 192{
 193        struct nvdimm *nvdimm = to_nvdimm(dev);
 194
 195        set_bit(NDD_ALIASING, &nvdimm->flags);
 196}
 197
 198void nvdimm_set_locked(struct device *dev)
 199{
 200        struct nvdimm *nvdimm = to_nvdimm(dev);
 201
 202        set_bit(NDD_LOCKED, &nvdimm->flags);
 203}
 204
 205void nvdimm_clear_locked(struct device *dev)
 206{
 207        struct nvdimm *nvdimm = to_nvdimm(dev);
 208
 209        clear_bit(NDD_LOCKED, &nvdimm->flags);
 210}
 211
 212static void nvdimm_release(struct device *dev)
 213{
 214        struct nvdimm *nvdimm = to_nvdimm(dev);
 215
 216        ida_simple_remove(&dimm_ida, nvdimm->id);
 217        kfree(nvdimm);
 218}
 219
 220static struct device_type nvdimm_device_type = {
 221        .name = "nvdimm",
 222        .release = nvdimm_release,
 223};
 224
 225bool is_nvdimm(struct device *dev)
 226{
 227        return dev->type == &nvdimm_device_type;
 228}
 229
 230struct nvdimm *to_nvdimm(struct device *dev)
 231{
 232        struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev);
 233
 234        WARN_ON(!is_nvdimm(dev));
 235        return nvdimm;
 236}
 237EXPORT_SYMBOL_GPL(to_nvdimm);
 238
 239struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr)
 240{
 241        struct nd_region *nd_region = &ndbr->nd_region;
 242        struct nd_mapping *nd_mapping = &nd_region->mapping[0];
 243
 244        return nd_mapping->nvdimm;
 245}
 246EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm);
 247
 248unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr)
 249{
 250        /* pmem mapping properties are private to libnvdimm */
 251        return ARCH_MEMREMAP_PMEM;
 252}
 253EXPORT_SYMBOL_GPL(nd_blk_memremap_flags);
 254
 255struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping)
 256{
 257        struct nvdimm *nvdimm = nd_mapping->nvdimm;
 258
 259        WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
 260
 261        return dev_get_drvdata(&nvdimm->dev);
 262}
 263EXPORT_SYMBOL(to_ndd);
 264
 265void nvdimm_drvdata_release(struct kref *kref)
 266{
 267        struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref);
 268        struct device *dev = ndd->dev;
 269        struct resource *res, *_r;
 270
 271        dev_dbg(dev, "trace\n");
 272        nvdimm_bus_lock(dev);
 273        for_each_dpa_resource_safe(ndd, res, _r)
 274                nvdimm_free_dpa(ndd, res);
 275        nvdimm_bus_unlock(dev);
 276
 277        kvfree(ndd->data);
 278        kfree(ndd);
 279        put_device(dev);
 280}
 281
 282void get_ndd(struct nvdimm_drvdata *ndd)
 283{
 284        kref_get(&ndd->kref);
 285}
 286
 287void put_ndd(struct nvdimm_drvdata *ndd)
 288{
 289        if (ndd)
 290                kref_put(&ndd->kref, nvdimm_drvdata_release);
 291}
 292
 293const char *nvdimm_name(struct nvdimm *nvdimm)
 294{
 295        return dev_name(&nvdimm->dev);
 296}
 297EXPORT_SYMBOL_GPL(nvdimm_name);
 298
 299struct kobject *nvdimm_kobj(struct nvdimm *nvdimm)
 300{
 301        return &nvdimm->dev.kobj;
 302}
 303EXPORT_SYMBOL_GPL(nvdimm_kobj);
 304
 305unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm)
 306{
 307        return nvdimm->cmd_mask;
 308}
 309EXPORT_SYMBOL_GPL(nvdimm_cmd_mask);
 310
 311void *nvdimm_provider_data(struct nvdimm *nvdimm)
 312{
 313        if (nvdimm)
 314                return nvdimm->provider_data;
 315        return NULL;
 316}
 317EXPORT_SYMBOL_GPL(nvdimm_provider_data);
 318
 319static ssize_t commands_show(struct device *dev,
 320                struct device_attribute *attr, char *buf)
 321{
 322        struct nvdimm *nvdimm = to_nvdimm(dev);
 323        int cmd, len = 0;
 324
 325        if (!nvdimm->cmd_mask)
 326                return sprintf(buf, "\n");
 327
 328        for_each_set_bit(cmd, &nvdimm->cmd_mask, BITS_PER_LONG)
 329                len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd));
 330        len += sprintf(buf + len, "\n");
 331        return len;
 332}
 333static DEVICE_ATTR_RO(commands);
 334
 335static ssize_t flags_show(struct device *dev,
 336                struct device_attribute *attr, char *buf)
 337{
 338        struct nvdimm *nvdimm = to_nvdimm(dev);
 339
 340        return sprintf(buf, "%s%s\n",
 341                        test_bit(NDD_ALIASING, &nvdimm->flags) ? "alias " : "",
 342                        test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : "");
 343}
 344static DEVICE_ATTR_RO(flags);
 345
 346static ssize_t state_show(struct device *dev, struct device_attribute *attr,
 347                char *buf)
 348{
 349        struct nvdimm *nvdimm = to_nvdimm(dev);
 350
 351        /*
 352         * The state may be in the process of changing, userspace should
 353         * quiesce probing if it wants a static answer
 354         */
 355        nvdimm_bus_lock(dev);
 356        nvdimm_bus_unlock(dev);
 357        return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy)
 358                        ? "active" : "idle");
 359}
 360static DEVICE_ATTR_RO(state);
 361
 362static ssize_t available_slots_show(struct device *dev,
 363                struct device_attribute *attr, char *buf)
 364{
 365        struct nvdimm_drvdata *ndd = dev_get_drvdata(dev);
 366        ssize_t rc;
 367        u32 nfree;
 368
 369        if (!ndd)
 370                return -ENXIO;
 371
 372        nvdimm_bus_lock(dev);
 373        nfree = nd_label_nfree(ndd);
 374        if (nfree - 1 > nfree) {
 375                dev_WARN_ONCE(dev, 1, "we ate our last label?\n");
 376                nfree = 0;
 377        } else
 378                nfree--;
 379        rc = sprintf(buf, "%d\n", nfree);
 380        nvdimm_bus_unlock(dev);
 381        return rc;
 382}
 383static DEVICE_ATTR_RO(available_slots);
 384
 385static struct attribute *nvdimm_attributes[] = {
 386        &dev_attr_state.attr,
 387        &dev_attr_flags.attr,
 388        &dev_attr_commands.attr,
 389        &dev_attr_available_slots.attr,
 390        NULL,
 391};
 392
 393struct attribute_group nvdimm_attribute_group = {
 394        .attrs = nvdimm_attributes,
 395};
 396EXPORT_SYMBOL_GPL(nvdimm_attribute_group);
 397
 398struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data,
 399                const struct attribute_group **groups, unsigned long flags,
 400                unsigned long cmd_mask, int num_flush,
 401                struct resource *flush_wpq)
 402{
 403        struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
 404        struct device *dev;
 405
 406        if (!nvdimm)
 407                return NULL;
 408
 409        nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL);
 410        if (nvdimm->id < 0) {
 411                kfree(nvdimm);
 412                return NULL;
 413        }
 414        nvdimm->provider_data = provider_data;
 415        nvdimm->flags = flags;
 416        nvdimm->cmd_mask = cmd_mask;
 417        nvdimm->num_flush = num_flush;
 418        nvdimm->flush_wpq = flush_wpq;
 419        atomic_set(&nvdimm->busy, 0);
 420        dev = &nvdimm->dev;
 421        dev_set_name(dev, "nmem%d", nvdimm->id);
 422        dev->parent = &nvdimm_bus->dev;
 423        dev->type = &nvdimm_device_type;
 424        dev->devt = MKDEV(nvdimm_major, nvdimm->id);
 425        dev->groups = groups;
 426        nd_device_register(dev);
 427
 428        return nvdimm;
 429}
 430EXPORT_SYMBOL_GPL(nvdimm_create);
 431
 432int alias_dpa_busy(struct device *dev, void *data)
 433{
 434        resource_size_t map_end, blk_start, new;
 435        struct blk_alloc_info *info = data;
 436        struct nd_mapping *nd_mapping;
 437        struct nd_region *nd_region;
 438        struct nvdimm_drvdata *ndd;
 439        struct resource *res;
 440        int i;
 441
 442        if (!is_memory(dev))
 443                return 0;
 444
 445        nd_region = to_nd_region(dev);
 446        for (i = 0; i < nd_region->ndr_mappings; i++) {
 447                nd_mapping  = &nd_region->mapping[i];
 448                if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
 449                        break;
 450        }
 451
 452        if (i >= nd_region->ndr_mappings)
 453                return 0;
 454
 455        ndd = to_ndd(nd_mapping);
 456        map_end = nd_mapping->start + nd_mapping->size - 1;
 457        blk_start = nd_mapping->start;
 458
 459        /*
 460         * In the allocation case ->res is set to free space that we are
 461         * looking to validate against PMEM aliasing collision rules
 462         * (i.e. BLK is allocated after all aliased PMEM).
 463         */
 464        if (info->res) {
 465                if (info->res->start >= nd_mapping->start
 466                                && info->res->start < map_end)
 467                        /* pass */;
 468                else
 469                        return 0;
 470        }
 471
 472 retry:
 473        /*
 474         * Find the free dpa from the end of the last pmem allocation to
 475         * the end of the interleave-set mapping.
 476         */
 477        for_each_dpa_resource(ndd, res) {
 478                if (strncmp(res->name, "pmem", 4) != 0)
 479                        continue;
 480                if ((res->start >= blk_start && res->start < map_end)
 481                                || (res->end >= blk_start
 482                                        && res->end <= map_end)) {
 483                        new = max(blk_start, min(map_end + 1, res->end + 1));
 484                        if (new != blk_start) {
 485                                blk_start = new;
 486                                goto retry;
 487                        }
 488                }
 489        }
 490
 491        /* update the free space range with the probed blk_start */
 492        if (info->res && blk_start > info->res->start) {
 493                info->res->start = max(info->res->start, blk_start);
 494                if (info->res->start > info->res->end)
 495                        info->res->end = info->res->start - 1;
 496                return 1;
 497        }
 498
 499        info->available -= blk_start - nd_mapping->start;
 500
 501        return 0;
 502}
 503
 504/**
 505 * nd_blk_available_dpa - account the unused dpa of BLK region
 506 * @nd_mapping: container of dpa-resource-root + labels
 507 *
 508 * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges, but
 509 * we arrange for them to never start at an lower dpa than the last
 510 * PMEM allocation in an aliased region.
 511 */
 512resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
 513{
 514        struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
 515        struct nd_mapping *nd_mapping = &nd_region->mapping[0];
 516        struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
 517        struct blk_alloc_info info = {
 518                .nd_mapping = nd_mapping,
 519                .available = nd_mapping->size,
 520                .res = NULL,
 521        };
 522        struct resource *res;
 523
 524        if (!ndd)
 525                return 0;
 526
 527        device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
 528
 529        /* now account for busy blk allocations in unaliased dpa */
 530        for_each_dpa_resource(ndd, res) {
 531                if (strncmp(res->name, "blk", 3) != 0)
 532                        continue;
 533                info.available -= resource_size(res);
 534        }
 535
 536        return info.available;
 537}
 538
 539/**
 540 * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa
 541 * @nd_mapping: container of dpa-resource-root + labels
 542 * @nd_region: constrain available space check to this reference region
 543 * @overlap: calculate available space assuming this level of overlap
 544 *
 545 * Validate that a PMEM label, if present, aligns with the start of an
 546 * interleave set and truncate the available size at the lowest BLK
 547 * overlap point.
 548 *
 549 * The expectation is that this routine is called multiple times as it
 550 * probes for the largest BLK encroachment for any single member DIMM of
 551 * the interleave set.  Once that value is determined the PMEM-limit for
 552 * the set can be established.
 553 */
 554resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
 555                struct nd_mapping *nd_mapping, resource_size_t *overlap)
 556{
 557        resource_size_t map_start, map_end, busy = 0, available, blk_start;
 558        struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
 559        struct resource *res;
 560        const char *reason;
 561
 562        if (!ndd)
 563                return 0;
 564
 565        map_start = nd_mapping->start;
 566        map_end = map_start + nd_mapping->size - 1;
 567        blk_start = max(map_start, map_end + 1 - *overlap);
 568        for_each_dpa_resource(ndd, res) {
 569                if (res->start >= map_start && res->start < map_end) {
 570                        if (strncmp(res->name, "blk", 3) == 0)
 571                                blk_start = min(blk_start,
 572                                                max(map_start, res->start));
 573                        else if (res->end > map_end) {
 574                                reason = "misaligned to iset";
 575                                goto err;
 576                        } else
 577                                busy += resource_size(res);
 578                } else if (res->end >= map_start && res->end <= map_end) {
 579                        if (strncmp(res->name, "blk", 3) == 0) {
 580                                /*
 581                                 * If a BLK allocation overlaps the start of
 582                                 * PMEM the entire interleave set may now only
 583                                 * be used for BLK.
 584                                 */
 585                                blk_start = map_start;
 586                        } else
 587                                busy += resource_size(res);
 588                } else if (map_start > res->start && map_start < res->end) {
 589                        /* total eclipse of the mapping */
 590                        busy += nd_mapping->size;
 591                        blk_start = map_start;
 592                }
 593        }
 594
 595        *overlap = map_end + 1 - blk_start;
 596        available = blk_start - map_start;
 597        if (busy < available)
 598                return available - busy;
 599        return 0;
 600
 601 err:
 602        nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason);
 603        return 0;
 604}
 605
 606void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res)
 607{
 608        WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
 609        kfree(res->name);
 610        __release_region(&ndd->dpa, res->start, resource_size(res));
 611}
 612
 613struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
 614                struct nd_label_id *label_id, resource_size_t start,
 615                resource_size_t n)
 616{
 617        char *name = kmemdup(label_id, sizeof(*label_id), GFP_KERNEL);
 618        struct resource *res;
 619
 620        if (!name)
 621                return NULL;
 622
 623        WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
 624        res = __request_region(&ndd->dpa, start, n, name, 0);
 625        if (!res)
 626                kfree(name);
 627        return res;
 628}
 629
 630/**
 631 * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id
 632 * @nvdimm: container of dpa-resource-root + labels
 633 * @label_id: dpa resource name of the form {pmem|blk}-<human readable uuid>
 634 */
 635resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
 636                struct nd_label_id *label_id)
 637{
 638        resource_size_t allocated = 0;
 639        struct resource *res;
 640
 641        for_each_dpa_resource(ndd, res)
 642                if (strcmp(res->name, label_id->id) == 0)
 643                        allocated += resource_size(res);
 644
 645        return allocated;
 646}
 647
 648static int count_dimms(struct device *dev, void *c)
 649{
 650        int *count = c;
 651
 652        if (is_nvdimm(dev))
 653                (*count)++;
 654        return 0;
 655}
 656
 657int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count)
 658{
 659        int count = 0;
 660        /* Flush any possible dimm registration failures */
 661        nd_synchronize();
 662
 663        device_for_each_child(&nvdimm_bus->dev, &count, count_dimms);
 664        dev_dbg(&nvdimm_bus->dev, "count: %d\n", count);
 665        if (count != dimm_count)
 666                return -ENXIO;
 667        return 0;
 668}
 669EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count);
 670
 671void __exit nvdimm_devs_exit(void)
 672{
 673        ida_destroy(&dimm_ida);
 674}
 675