linux/drivers/nvdimm/namespace_devs.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
   4 */
   5#include <linux/module.h>
   6#include <linux/device.h>
   7#include <linux/sort.h>
   8#include <linux/slab.h>
   9#include <linux/list.h>
  10#include <linux/nd.h>
  11#include "nd-core.h"
  12#include "pmem.h"
  13#include "pfn.h"
  14#include "nd.h"
  15
  16static void namespace_io_release(struct device *dev)
  17{
  18        struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
  19
  20        kfree(nsio);
  21}
  22
  23static void namespace_pmem_release(struct device *dev)
  24{
  25        struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
  26        struct nd_region *nd_region = to_nd_region(dev->parent);
  27
  28        if (nspm->id >= 0)
  29                ida_simple_remove(&nd_region->ns_ida, nspm->id);
  30        kfree(nspm->alt_name);
  31        kfree(nspm->uuid);
  32        kfree(nspm);
  33}
  34
  35static void namespace_blk_release(struct device *dev)
  36{
  37        struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
  38        struct nd_region *nd_region = to_nd_region(dev->parent);
  39
  40        if (nsblk->id >= 0)
  41                ida_simple_remove(&nd_region->ns_ida, nsblk->id);
  42        kfree(nsblk->alt_name);
  43        kfree(nsblk->uuid);
  44        kfree(nsblk->res);
  45        kfree(nsblk);
  46}
  47
  48static bool is_namespace_pmem(const struct device *dev);
  49static bool is_namespace_blk(const struct device *dev);
  50static bool is_namespace_io(const struct device *dev);
  51
  52static int is_uuid_busy(struct device *dev, void *data)
  53{
  54        u8 *uuid1 = data, *uuid2 = NULL;
  55
  56        if (is_namespace_pmem(dev)) {
  57                struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
  58
  59                uuid2 = nspm->uuid;
  60        } else if (is_namespace_blk(dev)) {
  61                struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
  62
  63                uuid2 = nsblk->uuid;
  64        } else if (is_nd_btt(dev)) {
  65                struct nd_btt *nd_btt = to_nd_btt(dev);
  66
  67                uuid2 = nd_btt->uuid;
  68        } else if (is_nd_pfn(dev)) {
  69                struct nd_pfn *nd_pfn = to_nd_pfn(dev);
  70
  71                uuid2 = nd_pfn->uuid;
  72        }
  73
  74        if (uuid2 && memcmp(uuid1, uuid2, NSLABEL_UUID_LEN) == 0)
  75                return -EBUSY;
  76
  77        return 0;
  78}
  79
  80static int is_namespace_uuid_busy(struct device *dev, void *data)
  81{
  82        if (is_nd_region(dev))
  83                return device_for_each_child(dev, data, is_uuid_busy);
  84        return 0;
  85}
  86
  87/**
  88 * nd_is_uuid_unique - verify that no other namespace has @uuid
  89 * @dev: any device on a nvdimm_bus
  90 * @uuid: uuid to check
  91 */
  92bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
  93{
  94        struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
  95
  96        if (!nvdimm_bus)
  97                return false;
  98        WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev));
  99        if (device_for_each_child(&nvdimm_bus->dev, uuid,
 100                                is_namespace_uuid_busy) != 0)
 101                return false;
 102        return true;
 103}
 104
 105bool pmem_should_map_pages(struct device *dev)
 106{
 107        struct nd_region *nd_region = to_nd_region(dev->parent);
 108        struct nd_namespace_common *ndns = to_ndns(dev);
 109        struct nd_namespace_io *nsio;
 110
 111        if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
 112                return false;
 113
 114        if (!test_bit(ND_REGION_PAGEMAP, &nd_region->flags))
 115                return false;
 116
 117        if (is_nd_pfn(dev) || is_nd_btt(dev))
 118                return false;
 119
 120        if (ndns->force_raw)
 121                return false;
 122
 123        nsio = to_nd_namespace_io(dev);
 124        if (region_intersects(nsio->res.start, resource_size(&nsio->res),
 125                                IORESOURCE_SYSTEM_RAM,
 126                                IORES_DESC_NONE) == REGION_MIXED)
 127                return false;
 128
 129        return ARCH_MEMREMAP_PMEM == MEMREMAP_WB;
 130}
 131EXPORT_SYMBOL(pmem_should_map_pages);
 132
 133unsigned int pmem_sector_size(struct nd_namespace_common *ndns)
 134{
 135        if (is_namespace_pmem(&ndns->dev)) {
 136                struct nd_namespace_pmem *nspm;
 137
 138                nspm = to_nd_namespace_pmem(&ndns->dev);
 139                if (nspm->lbasize == 0 || nspm->lbasize == 512)
 140                        /* default */;
 141                else if (nspm->lbasize == 4096)
 142                        return 4096;
 143                else
 144                        dev_WARN(&ndns->dev, "unsupported sector size: %ld\n",
 145                                        nspm->lbasize);
 146        }
 147
 148        /*
 149         * There is no namespace label (is_namespace_io()), or the label
 150         * indicates the default sector size.
 151         */
 152        return 512;
 153}
 154EXPORT_SYMBOL(pmem_sector_size);
 155
 156const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
 157                char *name)
 158{
 159        struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
 160        const char *suffix = NULL;
 161
 162        if (ndns->claim && is_nd_btt(ndns->claim))
 163                suffix = "s";
 164
 165        if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev)) {
 166                int nsidx = 0;
 167
 168                if (is_namespace_pmem(&ndns->dev)) {
 169                        struct nd_namespace_pmem *nspm;
 170
 171                        nspm = to_nd_namespace_pmem(&ndns->dev);
 172                        nsidx = nspm->id;
 173                }
 174
 175                if (nsidx)
 176                        sprintf(name, "pmem%d.%d%s", nd_region->id, nsidx,
 177                                        suffix ? suffix : "");
 178                else
 179                        sprintf(name, "pmem%d%s", nd_region->id,
 180                                        suffix ? suffix : "");
 181        } else if (is_namespace_blk(&ndns->dev)) {
 182                struct nd_namespace_blk *nsblk;
 183
 184                nsblk = to_nd_namespace_blk(&ndns->dev);
 185                sprintf(name, "ndblk%d.%d%s", nd_region->id, nsblk->id,
 186                                suffix ? suffix : "");
 187        } else {
 188                return NULL;
 189        }
 190
 191        return name;
 192}
 193EXPORT_SYMBOL(nvdimm_namespace_disk_name);
 194
 195const u8 *nd_dev_to_uuid(struct device *dev)
 196{
 197        static const u8 null_uuid[16];
 198
 199        if (!dev)
 200                return null_uuid;
 201
 202        if (is_namespace_pmem(dev)) {
 203                struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
 204
 205                return nspm->uuid;
 206        } else if (is_namespace_blk(dev)) {
 207                struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
 208
 209                return nsblk->uuid;
 210        } else
 211                return null_uuid;
 212}
 213EXPORT_SYMBOL(nd_dev_to_uuid);
 214
 215static ssize_t nstype_show(struct device *dev,
 216                struct device_attribute *attr, char *buf)
 217{
 218        struct nd_region *nd_region = to_nd_region(dev->parent);
 219
 220        return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
 221}
 222static DEVICE_ATTR_RO(nstype);
 223
 224static ssize_t __alt_name_store(struct device *dev, const char *buf,
 225                const size_t len)
 226{
 227        char *input, *pos, *alt_name, **ns_altname;
 228        ssize_t rc;
 229
 230        if (is_namespace_pmem(dev)) {
 231                struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
 232
 233                ns_altname = &nspm->alt_name;
 234        } else if (is_namespace_blk(dev)) {
 235                struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
 236
 237                ns_altname = &nsblk->alt_name;
 238        } else
 239                return -ENXIO;
 240
 241        if (dev->driver || to_ndns(dev)->claim)
 242                return -EBUSY;
 243
 244        input = kstrndup(buf, len, GFP_KERNEL);
 245        if (!input)
 246                return -ENOMEM;
 247
 248        pos = strim(input);
 249        if (strlen(pos) + 1 > NSLABEL_NAME_LEN) {
 250                rc = -EINVAL;
 251                goto out;
 252        }
 253
 254        alt_name = kzalloc(NSLABEL_NAME_LEN, GFP_KERNEL);
 255        if (!alt_name) {
 256                rc = -ENOMEM;
 257                goto out;
 258        }
 259        kfree(*ns_altname);
 260        *ns_altname = alt_name;
 261        sprintf(*ns_altname, "%s", pos);
 262        rc = len;
 263
 264out:
 265        kfree(input);
 266        return rc;
 267}
 268
 269static resource_size_t nd_namespace_blk_size(struct nd_namespace_blk *nsblk)
 270{
 271        struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
 272        struct nd_mapping *nd_mapping = &nd_region->mapping[0];
 273        struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
 274        struct nd_label_id label_id;
 275        resource_size_t size = 0;
 276        struct resource *res;
 277
 278        if (!nsblk->uuid)
 279                return 0;
 280        nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
 281        for_each_dpa_resource(ndd, res)
 282                if (strcmp(res->name, label_id.id) == 0)
 283                        size += resource_size(res);
 284        return size;
 285}
 286
 287static bool __nd_namespace_blk_validate(struct nd_namespace_blk *nsblk)
 288{
 289        struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
 290        struct nd_mapping *nd_mapping = &nd_region->mapping[0];
 291        struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
 292        struct nd_label_id label_id;
 293        struct resource *res;
 294        int count, i;
 295
 296        if (!nsblk->uuid || !nsblk->lbasize || !ndd)
 297                return false;
 298
 299        count = 0;
 300        nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
 301        for_each_dpa_resource(ndd, res) {
 302                if (strcmp(res->name, label_id.id) != 0)
 303                        continue;
 304                /*
 305                 * Resources with unacknowledged adjustments indicate a
 306                 * failure to update labels
 307                 */
 308                if (res->flags & DPA_RESOURCE_ADJUSTED)
 309                        return false;
 310                count++;
 311        }
 312
 313        /* These values match after a successful label update */
 314        if (count != nsblk->num_resources)
 315                return false;
 316
 317        for (i = 0; i < nsblk->num_resources; i++) {
 318                struct resource *found = NULL;
 319
 320                for_each_dpa_resource(ndd, res)
 321                        if (res == nsblk->res[i]) {
 322                                found = res;
 323                                break;
 324                        }
 325                /* stale resource */
 326                if (!found)
 327                        return false;
 328        }
 329
 330        return true;
 331}
 332
 333resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk)
 334{
 335        resource_size_t size;
 336
 337        nvdimm_bus_lock(&nsblk->common.dev);
 338        size = __nd_namespace_blk_validate(nsblk);
 339        nvdimm_bus_unlock(&nsblk->common.dev);
 340
 341        return size;
 342}
 343EXPORT_SYMBOL(nd_namespace_blk_validate);
 344
 345
 346static int nd_namespace_label_update(struct nd_region *nd_region,
 347                struct device *dev)
 348{
 349        dev_WARN_ONCE(dev, dev->driver || to_ndns(dev)->claim,
 350                        "namespace must be idle during label update\n");
 351        if (dev->driver || to_ndns(dev)->claim)
 352                return 0;
 353
 354        /*
 355         * Only allow label writes that will result in a valid namespace
 356         * or deletion of an existing namespace.
 357         */
 358        if (is_namespace_pmem(dev)) {
 359                struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
 360                resource_size_t size = resource_size(&nspm->nsio.res);
 361
 362                if (size == 0 && nspm->uuid)
 363                        /* delete allocation */;
 364                else if (!nspm->uuid)
 365                        return 0;
 366
 367                return nd_pmem_namespace_label_update(nd_region, nspm, size);
 368        } else if (is_namespace_blk(dev)) {
 369                struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
 370                resource_size_t size = nd_namespace_blk_size(nsblk);
 371
 372                if (size == 0 && nsblk->uuid)
 373                        /* delete allocation */;
 374                else if (!nsblk->uuid || !nsblk->lbasize)
 375                        return 0;
 376
 377                return nd_blk_namespace_label_update(nd_region, nsblk, size);
 378        } else
 379                return -ENXIO;
 380}
 381
 382static ssize_t alt_name_store(struct device *dev,
 383                struct device_attribute *attr, const char *buf, size_t len)
 384{
 385        struct nd_region *nd_region = to_nd_region(dev->parent);
 386        ssize_t rc;
 387
 388        nd_device_lock(dev);
 389        nvdimm_bus_lock(dev);
 390        wait_nvdimm_bus_probe_idle(dev);
 391        rc = __alt_name_store(dev, buf, len);
 392        if (rc >= 0)
 393                rc = nd_namespace_label_update(nd_region, dev);
 394        dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
 395        nvdimm_bus_unlock(dev);
 396        nd_device_unlock(dev);
 397
 398        return rc < 0 ? rc : len;
 399}
 400
 401static ssize_t alt_name_show(struct device *dev,
 402                struct device_attribute *attr, char *buf)
 403{
 404        char *ns_altname;
 405
 406        if (is_namespace_pmem(dev)) {
 407                struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
 408
 409                ns_altname = nspm->alt_name;
 410        } else if (is_namespace_blk(dev)) {
 411                struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
 412
 413                ns_altname = nsblk->alt_name;
 414        } else
 415                return -ENXIO;
 416
 417        return sprintf(buf, "%s\n", ns_altname ? ns_altname : "");
 418}
 419static DEVICE_ATTR_RW(alt_name);
 420
 421static int scan_free(struct nd_region *nd_region,
 422                struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
 423                resource_size_t n)
 424{
 425        bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
 426        struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
 427        int rc = 0;
 428
 429        while (n) {
 430                struct resource *res, *last;
 431                resource_size_t new_start;
 432
 433                last = NULL;
 434                for_each_dpa_resource(ndd, res)
 435                        if (strcmp(res->name, label_id->id) == 0)
 436                                last = res;
 437                res = last;
 438                if (!res)
 439                        return 0;
 440
 441                if (n >= resource_size(res)) {
 442                        n -= resource_size(res);
 443                        nd_dbg_dpa(nd_region, ndd, res, "delete %d\n", rc);
 444                        nvdimm_free_dpa(ndd, res);
 445                        /* retry with last resource deleted */
 446                        continue;
 447                }
 448
 449                /*
 450                 * Keep BLK allocations relegated to high DPA as much as
 451                 * possible
 452                 */
 453                if (is_blk)
 454                        new_start = res->start + n;
 455                else
 456                        new_start = res->start;
 457
 458                rc = adjust_resource(res, new_start, resource_size(res) - n);
 459                if (rc == 0)
 460                        res->flags |= DPA_RESOURCE_ADJUSTED;
 461                nd_dbg_dpa(nd_region, ndd, res, "shrink %d\n", rc);
 462                break;
 463        }
 464
 465        return rc;
 466}
 467
 468/**
 469 * shrink_dpa_allocation - for each dimm in region free n bytes for label_id
 470 * @nd_region: the set of dimms to reclaim @n bytes from
 471 * @label_id: unique identifier for the namespace consuming this dpa range
 472 * @n: number of bytes per-dimm to release
 473 *
 474 * Assumes resources are ordered.  Starting from the end try to
 475 * adjust_resource() the allocation to @n, but if @n is larger than the
 476 * allocation delete it and find the 'new' last allocation in the label
 477 * set.
 478 */
 479static int shrink_dpa_allocation(struct nd_region *nd_region,
 480                struct nd_label_id *label_id, resource_size_t n)
 481{
 482        int i;
 483
 484        for (i = 0; i < nd_region->ndr_mappings; i++) {
 485                struct nd_mapping *nd_mapping = &nd_region->mapping[i];
 486                int rc;
 487
 488                rc = scan_free(nd_region, nd_mapping, label_id, n);
 489                if (rc)
 490                        return rc;
 491        }
 492
 493        return 0;
 494}
 495
 496static resource_size_t init_dpa_allocation(struct nd_label_id *label_id,
 497                struct nd_region *nd_region, struct nd_mapping *nd_mapping,
 498                resource_size_t n)
 499{
 500        bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
 501        struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
 502        resource_size_t first_dpa;
 503        struct resource *res;
 504        int rc = 0;
 505
 506        /* allocate blk from highest dpa first */
 507        if (is_blk)
 508                first_dpa = nd_mapping->start + nd_mapping->size - n;
 509        else
 510                first_dpa = nd_mapping->start;
 511
 512        /* first resource allocation for this label-id or dimm */
 513        res = nvdimm_allocate_dpa(ndd, label_id, first_dpa, n);
 514        if (!res)
 515                rc = -EBUSY;
 516
 517        nd_dbg_dpa(nd_region, ndd, res, "init %d\n", rc);
 518        return rc ? n : 0;
 519}
 520
 521
 522/**
 523 * space_valid() - validate free dpa space against constraints
 524 * @nd_region: hosting region of the free space
 525 * @ndd: dimm device data for debug
 526 * @label_id: namespace id to allocate space
 527 * @prev: potential allocation that precedes free space
 528 * @next: allocation that follows the given free space range
 529 * @exist: first allocation with same id in the mapping
 530 * @n: range that must satisfied for pmem allocations
 531 * @valid: free space range to validate
 532 *
 533 * BLK-space is valid as long as it does not precede a PMEM
 534 * allocation in a given region. PMEM-space must be contiguous
 535 * and adjacent to an existing existing allocation (if one
 536 * exists).  If reserving PMEM any space is valid.
 537 */
 538static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd,
 539                struct nd_label_id *label_id, struct resource *prev,
 540                struct resource *next, struct resource *exist,
 541                resource_size_t n, struct resource *valid)
 542{
 543        bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0;
 544        bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
 545        unsigned long align;
 546
 547        align = nd_region->align / nd_region->ndr_mappings;
 548        valid->start = ALIGN(valid->start, align);
 549        valid->end = ALIGN_DOWN(valid->end + 1, align) - 1;
 550
 551        if (valid->start >= valid->end)
 552                goto invalid;
 553
 554        if (is_reserve)
 555                return;
 556
 557        if (!is_pmem) {
 558                struct nd_mapping *nd_mapping = &nd_region->mapping[0];
 559                struct nvdimm_bus *nvdimm_bus;
 560                struct blk_alloc_info info = {
 561                        .nd_mapping = nd_mapping,
 562                        .available = nd_mapping->size,
 563                        .res = valid,
 564                };
 565
 566                WARN_ON(!is_nd_blk(&nd_region->dev));
 567                nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
 568                device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
 569                return;
 570        }
 571
 572        /* allocation needs to be contiguous, so this is all or nothing */
 573        if (resource_size(valid) < n)
 574                goto invalid;
 575
 576        /* we've got all the space we need and no existing allocation */
 577        if (!exist)
 578                return;
 579
 580        /* allocation needs to be contiguous with the existing namespace */
 581        if (valid->start == exist->end + 1
 582                        || valid->end == exist->start - 1)
 583                return;
 584
 585 invalid:
 586        /* truncate @valid size to 0 */
 587        valid->end = valid->start - 1;
 588}
 589
 590enum alloc_loc {
 591        ALLOC_ERR = 0, ALLOC_BEFORE, ALLOC_MID, ALLOC_AFTER,
 592};
 593
 594static resource_size_t scan_allocate(struct nd_region *nd_region,
 595                struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
 596                resource_size_t n)
 597{
 598        resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1;
 599        bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
 600        struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
 601        struct resource *res, *exist = NULL, valid;
 602        const resource_size_t to_allocate = n;
 603        int first;
 604
 605        for_each_dpa_resource(ndd, res)
 606                if (strcmp(label_id->id, res->name) == 0)
 607                        exist = res;
 608
 609        valid.start = nd_mapping->start;
 610        valid.end = mapping_end;
 611        valid.name = "free space";
 612 retry:
 613        first = 0;
 614        for_each_dpa_resource(ndd, res) {
 615                struct resource *next = res->sibling, *new_res = NULL;
 616                resource_size_t allocate, available = 0;
 617                enum alloc_loc loc = ALLOC_ERR;
 618                const char *action;
 619                int rc = 0;
 620
 621                /* ignore resources outside this nd_mapping */
 622                if (res->start > mapping_end)
 623                        continue;
 624                if (res->end < nd_mapping->start)
 625                        continue;
 626
 627                /* space at the beginning of the mapping */
 628                if (!first++ && res->start > nd_mapping->start) {
 629                        valid.start = nd_mapping->start;
 630                        valid.end = res->start - 1;
 631                        space_valid(nd_region, ndd, label_id, NULL, next, exist,
 632                                        to_allocate, &valid);
 633                        available = resource_size(&valid);
 634                        if (available)
 635                                loc = ALLOC_BEFORE;
 636                }
 637
 638                /* space between allocations */
 639                if (!loc && next) {
 640                        valid.start = res->start + resource_size(res);
 641                        valid.end = min(mapping_end, next->start - 1);
 642                        space_valid(nd_region, ndd, label_id, res, next, exist,
 643                                        to_allocate, &valid);
 644                        available = resource_size(&valid);
 645                        if (available)
 646                                loc = ALLOC_MID;
 647                }
 648
 649                /* space at the end of the mapping */
 650                if (!loc && !next) {
 651                        valid.start = res->start + resource_size(res);
 652                        valid.end = mapping_end;
 653                        space_valid(nd_region, ndd, label_id, res, next, exist,
 654                                        to_allocate, &valid);
 655                        available = resource_size(&valid);
 656                        if (available)
 657                                loc = ALLOC_AFTER;
 658                }
 659
 660                if (!loc || !available)
 661                        continue;
 662                allocate = min(available, n);
 663                switch (loc) {
 664                case ALLOC_BEFORE:
 665                        if (strcmp(res->name, label_id->id) == 0) {
 666                                /* adjust current resource up */
 667                                rc = adjust_resource(res, res->start - allocate,
 668                                                resource_size(res) + allocate);
 669                                action = "cur grow up";
 670                        } else
 671                                action = "allocate";
 672                        break;
 673                case ALLOC_MID:
 674                        if (strcmp(next->name, label_id->id) == 0) {
 675                                /* adjust next resource up */
 676                                rc = adjust_resource(next, next->start
 677                                                - allocate, resource_size(next)
 678                                                + allocate);
 679                                new_res = next;
 680                                action = "next grow up";
 681                        } else if (strcmp(res->name, label_id->id) == 0) {
 682                                action = "grow down";
 683                        } else
 684                                action = "allocate";
 685                        break;
 686                case ALLOC_AFTER:
 687                        if (strcmp(res->name, label_id->id) == 0)
 688                                action = "grow down";
 689                        else
 690                                action = "allocate";
 691                        break;
 692                default:
 693                        return n;
 694                }
 695
 696                if (strcmp(action, "allocate") == 0) {
 697                        /* BLK allocate bottom up */
 698                        if (!is_pmem)
 699                                valid.start += available - allocate;
 700
 701                        new_res = nvdimm_allocate_dpa(ndd, label_id,
 702                                        valid.start, allocate);
 703                        if (!new_res)
 704                                rc = -EBUSY;
 705                } else if (strcmp(action, "grow down") == 0) {
 706                        /* adjust current resource down */
 707                        rc = adjust_resource(res, res->start, resource_size(res)
 708                                        + allocate);
 709                        if (rc == 0)
 710                                res->flags |= DPA_RESOURCE_ADJUSTED;
 711                }
 712
 713                if (!new_res)
 714                        new_res = res;
 715
 716                nd_dbg_dpa(nd_region, ndd, new_res, "%s(%d) %d\n",
 717                                action, loc, rc);
 718
 719                if (rc)
 720                        return n;
 721
 722                n -= allocate;
 723                if (n) {
 724                        /*
 725                         * Retry scan with newly inserted resources.
 726                         * For example, if we did an ALLOC_BEFORE
 727                         * insertion there may also have been space
 728                         * available for an ALLOC_AFTER insertion, so we
 729                         * need to check this same resource again
 730                         */
 731                        goto retry;
 732                } else
 733                        return 0;
 734        }
 735
 736        /*
 737         * If we allocated nothing in the BLK case it may be because we are in
 738         * an initial "pmem-reserve pass".  Only do an initial BLK allocation
 739         * when none of the DPA space is reserved.
 740         */
 741        if ((is_pmem || !ndd->dpa.child) && n == to_allocate)
 742                return init_dpa_allocation(label_id, nd_region, nd_mapping, n);
 743        return n;
 744}
 745
 746static int merge_dpa(struct nd_region *nd_region,
 747                struct nd_mapping *nd_mapping, struct nd_label_id *label_id)
 748{
 749        struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
 750        struct resource *res;
 751
 752        if (strncmp("pmem", label_id->id, 4) == 0)
 753                return 0;
 754 retry:
 755        for_each_dpa_resource(ndd, res) {
 756                int rc;
 757                struct resource *next = res->sibling;
 758                resource_size_t end = res->start + resource_size(res);
 759
 760                if (!next || strcmp(res->name, label_id->id) != 0
 761                                || strcmp(next->name, label_id->id) != 0
 762                                || end != next->start)
 763                        continue;
 764                end += resource_size(next);
 765                nvdimm_free_dpa(ndd, next);
 766                rc = adjust_resource(res, res->start, end - res->start);
 767                nd_dbg_dpa(nd_region, ndd, res, "merge %d\n", rc);
 768                if (rc)
 769                        return rc;
 770                res->flags |= DPA_RESOURCE_ADJUSTED;
 771                goto retry;
 772        }
 773
 774        return 0;
 775}
 776
 777int __reserve_free_pmem(struct device *dev, void *data)
 778{
 779        struct nvdimm *nvdimm = data;
 780        struct nd_region *nd_region;
 781        struct nd_label_id label_id;
 782        int i;
 783
 784        if (!is_memory(dev))
 785                return 0;
 786
 787        nd_region = to_nd_region(dev);
 788        if (nd_region->ndr_mappings == 0)
 789                return 0;
 790
 791        memset(&label_id, 0, sizeof(label_id));
 792        strcat(label_id.id, "pmem-reserve");
 793        for (i = 0; i < nd_region->ndr_mappings; i++) {
 794                struct nd_mapping *nd_mapping = &nd_region->mapping[i];
 795                resource_size_t n, rem = 0;
 796
 797                if (nd_mapping->nvdimm != nvdimm)
 798                        continue;
 799
 800                n = nd_pmem_available_dpa(nd_region, nd_mapping, &rem);
 801                if (n == 0)
 802                        return 0;
 803                rem = scan_allocate(nd_region, nd_mapping, &label_id, n);
 804                dev_WARN_ONCE(&nd_region->dev, rem,
 805                                "pmem reserve underrun: %#llx of %#llx bytes\n",
 806                                (unsigned long long) n - rem,
 807                                (unsigned long long) n);
 808                return rem ? -ENXIO : 0;
 809        }
 810
 811        return 0;
 812}
 813
 814void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
 815                struct nd_mapping *nd_mapping)
 816{
 817        struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
 818        struct resource *res, *_res;
 819
 820        for_each_dpa_resource_safe(ndd, res, _res)
 821                if (strcmp(res->name, "pmem-reserve") == 0)
 822                        nvdimm_free_dpa(ndd, res);
 823}
 824
 825static int reserve_free_pmem(struct nvdimm_bus *nvdimm_bus,
 826                struct nd_mapping *nd_mapping)
 827{
 828        struct nvdimm *nvdimm = nd_mapping->nvdimm;
 829        int rc;
 830
 831        rc = device_for_each_child(&nvdimm_bus->dev, nvdimm,
 832                        __reserve_free_pmem);
 833        if (rc)
 834                release_free_pmem(nvdimm_bus, nd_mapping);
 835        return rc;
 836}
 837
 838/**
 839 * grow_dpa_allocation - for each dimm allocate n bytes for @label_id
 840 * @nd_region: the set of dimms to allocate @n more bytes from
 841 * @label_id: unique identifier for the namespace consuming this dpa range
 842 * @n: number of bytes per-dimm to add to the existing allocation
 843 *
 844 * Assumes resources are ordered.  For BLK regions, first consume
 845 * BLK-only available DPA free space, then consume PMEM-aliased DPA
 846 * space starting at the highest DPA.  For PMEM regions start
 847 * allocations from the start of an interleave set and end at the first
 848 * BLK allocation or the end of the interleave set, whichever comes
 849 * first.
 850 */
 851static int grow_dpa_allocation(struct nd_region *nd_region,
 852                struct nd_label_id *label_id, resource_size_t n)
 853{
 854        struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
 855        bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
 856        int i;
 857
 858        for (i = 0; i < nd_region->ndr_mappings; i++) {
 859                struct nd_mapping *nd_mapping = &nd_region->mapping[i];
 860                resource_size_t rem = n;
 861                int rc, j;
 862
 863                /*
 864                 * In the BLK case try once with all unallocated PMEM
 865                 * reserved, and once without
 866                 */
 867                for (j = is_pmem; j < 2; j++) {
 868                        bool blk_only = j == 0;
 869
 870                        if (blk_only) {
 871                                rc = reserve_free_pmem(nvdimm_bus, nd_mapping);
 872                                if (rc)
 873                                        return rc;
 874                        }
 875                        rem = scan_allocate(nd_region, nd_mapping,
 876                                        label_id, rem);
 877                        if (blk_only)
 878                                release_free_pmem(nvdimm_bus, nd_mapping);
 879
 880                        /* try again and allow encroachments into PMEM */
 881                        if (rem == 0)
 882                                break;
 883                }
 884
 885                dev_WARN_ONCE(&nd_region->dev, rem,
 886                                "allocation underrun: %#llx of %#llx bytes\n",
 887                                (unsigned long long) n - rem,
 888                                (unsigned long long) n);
 889                if (rem)
 890                        return -ENXIO;
 891
 892                rc = merge_dpa(nd_region, nd_mapping, label_id);
 893                if (rc)
 894                        return rc;
 895        }
 896
 897        return 0;
 898}
 899
 900static void nd_namespace_pmem_set_resource(struct nd_region *nd_region,
 901                struct nd_namespace_pmem *nspm, resource_size_t size)
 902{
 903        struct resource *res = &nspm->nsio.res;
 904        resource_size_t offset = 0;
 905
 906        if (size && !nspm->uuid) {
 907                WARN_ON_ONCE(1);
 908                size = 0;
 909        }
 910
 911        if (size && nspm->uuid) {
 912                struct nd_mapping *nd_mapping = &nd_region->mapping[0];
 913                struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
 914                struct nd_label_id label_id;
 915                struct resource *res;
 916
 917                if (!ndd) {
 918                        size = 0;
 919                        goto out;
 920                }
 921
 922                nd_label_gen_id(&label_id, nspm->uuid, 0);
 923
 924                /* calculate a spa offset from the dpa allocation offset */
 925                for_each_dpa_resource(ndd, res)
 926                        if (strcmp(res->name, label_id.id) == 0) {
 927                                offset = (res->start - nd_mapping->start)
 928                                        * nd_region->ndr_mappings;
 929                                goto out;
 930                        }
 931
 932                WARN_ON_ONCE(1);
 933                size = 0;
 934        }
 935
 936 out:
 937        res->start = nd_region->ndr_start + offset;
 938        res->end = res->start + size - 1;
 939}
 940
 941static bool uuid_not_set(const u8 *uuid, struct device *dev, const char *where)
 942{
 943        if (!uuid) {
 944                dev_dbg(dev, "%s: uuid not set\n", where);
 945                return true;
 946        }
 947        return false;
 948}
 949
 950static ssize_t __size_store(struct device *dev, unsigned long long val)
 951{
 952        resource_size_t allocated = 0, available = 0;
 953        struct nd_region *nd_region = to_nd_region(dev->parent);
 954        struct nd_namespace_common *ndns = to_ndns(dev);
 955        struct nd_mapping *nd_mapping;
 956        struct nvdimm_drvdata *ndd;
 957        struct nd_label_id label_id;
 958        u32 flags = 0, remainder;
 959        int rc, i, id = -1;
 960        u8 *uuid = NULL;
 961
 962        if (dev->driver || ndns->claim)
 963                return -EBUSY;
 964
 965        if (is_namespace_pmem(dev)) {
 966                struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
 967
 968                uuid = nspm->uuid;
 969                id = nspm->id;
 970        } else if (is_namespace_blk(dev)) {
 971                struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
 972
 973                uuid = nsblk->uuid;
 974                flags = NSLABEL_FLAG_LOCAL;
 975                id = nsblk->id;
 976        }
 977
 978        /*
 979         * We need a uuid for the allocation-label and dimm(s) on which
 980         * to store the label.
 981         */
 982        if (uuid_not_set(uuid, dev, __func__))
 983                return -ENXIO;
 984        if (nd_region->ndr_mappings == 0) {
 985                dev_dbg(dev, "not associated with dimm(s)\n");
 986                return -ENXIO;
 987        }
 988
 989        div_u64_rem(val, nd_region->align, &remainder);
 990        if (remainder) {
 991                dev_dbg(dev, "%llu is not %ldK aligned\n", val,
 992                                nd_region->align / SZ_1K);
 993                return -EINVAL;
 994        }
 995
 996        nd_label_gen_id(&label_id, uuid, flags);
 997        for (i = 0; i < nd_region->ndr_mappings; i++) {
 998                nd_mapping = &nd_region->mapping[i];
 999                ndd = to_ndd(nd_mapping);
1000
1001                /*
1002                 * All dimms in an interleave set, or the base dimm for a blk
1003                 * region, need to be enabled for the size to be changed.
1004                 */
1005                if (!ndd)
1006                        return -ENXIO;
1007
1008                allocated += nvdimm_allocated_dpa(ndd, &label_id);
1009        }
1010        available = nd_region_allocatable_dpa(nd_region);
1011
1012        if (val > available + allocated)
1013                return -ENOSPC;
1014
1015        if (val == allocated)
1016                return 0;
1017
1018        val = div_u64(val, nd_region->ndr_mappings);
1019        allocated = div_u64(allocated, nd_region->ndr_mappings);
1020        if (val < allocated)
1021                rc = shrink_dpa_allocation(nd_region, &label_id,
1022                                allocated - val);
1023        else
1024                rc = grow_dpa_allocation(nd_region, &label_id, val - allocated);
1025
1026        if (rc)
1027                return rc;
1028
1029        if (is_namespace_pmem(dev)) {
1030                struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1031
1032                nd_namespace_pmem_set_resource(nd_region, nspm,
1033                                val * nd_region->ndr_mappings);
1034        }
1035
1036        /*
1037         * Try to delete the namespace if we deleted all of its
1038         * allocation, this is not the seed or 0th device for the
1039         * region, and it is not actively claimed by a btt, pfn, or dax
1040         * instance.
1041         */
1042        if (val == 0 && id != 0 && nd_region->ns_seed != dev && !ndns->claim)
1043                nd_device_unregister(dev, ND_ASYNC);
1044
1045        return rc;
1046}
1047
1048static ssize_t size_store(struct device *dev,
1049                struct device_attribute *attr, const char *buf, size_t len)
1050{
1051        struct nd_region *nd_region = to_nd_region(dev->parent);
1052        unsigned long long val;
1053        u8 **uuid = NULL;
1054        int rc;
1055
1056        rc = kstrtoull(buf, 0, &val);
1057        if (rc)
1058                return rc;
1059
1060        nd_device_lock(dev);
1061        nvdimm_bus_lock(dev);
1062        wait_nvdimm_bus_probe_idle(dev);
1063        rc = __size_store(dev, val);
1064        if (rc >= 0)
1065                rc = nd_namespace_label_update(nd_region, dev);
1066
1067        if (is_namespace_pmem(dev)) {
1068                struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1069
1070                uuid = &nspm->uuid;
1071        } else if (is_namespace_blk(dev)) {
1072                struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1073
1074                uuid = &nsblk->uuid;
1075        }
1076
1077        if (rc == 0 && val == 0 && uuid) {
1078                /* setting size zero == 'delete namespace' */
1079                kfree(*uuid);
1080                *uuid = NULL;
1081        }
1082
1083        dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc);
1084
1085        nvdimm_bus_unlock(dev);
1086        nd_device_unlock(dev);
1087
1088        return rc < 0 ? rc : len;
1089}
1090
1091resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
1092{
1093        struct device *dev = &ndns->dev;
1094
1095        if (is_namespace_pmem(dev)) {
1096                struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1097
1098                return resource_size(&nspm->nsio.res);
1099        } else if (is_namespace_blk(dev)) {
1100                return nd_namespace_blk_size(to_nd_namespace_blk(dev));
1101        } else if (is_namespace_io(dev)) {
1102                struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
1103
1104                return resource_size(&nsio->res);
1105        } else
1106                WARN_ONCE(1, "unknown namespace type\n");
1107        return 0;
1108}
1109
1110resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
1111{
1112        resource_size_t size;
1113
1114        nvdimm_bus_lock(&ndns->dev);
1115        size = __nvdimm_namespace_capacity(ndns);
1116        nvdimm_bus_unlock(&ndns->dev);
1117
1118        return size;
1119}
1120EXPORT_SYMBOL(nvdimm_namespace_capacity);
1121
1122bool nvdimm_namespace_locked(struct nd_namespace_common *ndns)
1123{
1124        int i;
1125        bool locked = false;
1126        struct device *dev = &ndns->dev;
1127        struct nd_region *nd_region = to_nd_region(dev->parent);
1128
1129        for (i = 0; i < nd_region->ndr_mappings; i++) {
1130                struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1131                struct nvdimm *nvdimm = nd_mapping->nvdimm;
1132
1133                if (test_bit(NDD_LOCKED, &nvdimm->flags)) {
1134                        dev_dbg(dev, "%s locked\n", nvdimm_name(nvdimm));
1135                        locked = true;
1136                }
1137        }
1138        return locked;
1139}
1140EXPORT_SYMBOL(nvdimm_namespace_locked);
1141
1142static ssize_t size_show(struct device *dev,
1143                struct device_attribute *attr, char *buf)
1144{
1145        return sprintf(buf, "%llu\n", (unsigned long long)
1146                        nvdimm_namespace_capacity(to_ndns(dev)));
1147}
1148static DEVICE_ATTR(size, 0444, size_show, size_store);
1149
1150static u8 *namespace_to_uuid(struct device *dev)
1151{
1152        if (is_namespace_pmem(dev)) {
1153                struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1154
1155                return nspm->uuid;
1156        } else if (is_namespace_blk(dev)) {
1157                struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1158
1159                return nsblk->uuid;
1160        } else
1161                return ERR_PTR(-ENXIO);
1162}
1163
1164static ssize_t uuid_show(struct device *dev,
1165                struct device_attribute *attr, char *buf)
1166{
1167        u8 *uuid = namespace_to_uuid(dev);
1168
1169        if (IS_ERR(uuid))
1170                return PTR_ERR(uuid);
1171        if (uuid)
1172                return sprintf(buf, "%pUb\n", uuid);
1173        return sprintf(buf, "\n");
1174}
1175
1176/**
1177 * namespace_update_uuid - check for a unique uuid and whether we're "renaming"
1178 * @nd_region: parent region so we can updates all dimms in the set
1179 * @dev: namespace type for generating label_id
1180 * @new_uuid: incoming uuid
1181 * @old_uuid: reference to the uuid storage location in the namespace object
1182 */
1183static int namespace_update_uuid(struct nd_region *nd_region,
1184                struct device *dev, u8 *new_uuid, u8 **old_uuid)
1185{
1186        u32 flags = is_namespace_blk(dev) ? NSLABEL_FLAG_LOCAL : 0;
1187        struct nd_label_id old_label_id;
1188        struct nd_label_id new_label_id;
1189        int i;
1190
1191        if (!nd_is_uuid_unique(dev, new_uuid))
1192                return -EINVAL;
1193
1194        if (*old_uuid == NULL)
1195                goto out;
1196
1197        /*
1198         * If we've already written a label with this uuid, then it's
1199         * too late to rename because we can't reliably update the uuid
1200         * without losing the old namespace.  Userspace must delete this
1201         * namespace to abandon the old uuid.
1202         */
1203        for (i = 0; i < nd_region->ndr_mappings; i++) {
1204                struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1205
1206                /*
1207                 * This check by itself is sufficient because old_uuid
1208                 * would be NULL above if this uuid did not exist in the
1209                 * currently written set.
1210                 *
1211                 * FIXME: can we delete uuid with zero dpa allocated?
1212                 */
1213                if (list_empty(&nd_mapping->labels))
1214                        return -EBUSY;
1215        }
1216
1217        nd_label_gen_id(&old_label_id, *old_uuid, flags);
1218        nd_label_gen_id(&new_label_id, new_uuid, flags);
1219        for (i = 0; i < nd_region->ndr_mappings; i++) {
1220                struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1221                struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1222                struct nd_label_ent *label_ent;
1223                struct resource *res;
1224
1225                for_each_dpa_resource(ndd, res)
1226                        if (strcmp(res->name, old_label_id.id) == 0)
1227                                sprintf((void *) res->name, "%s",
1228                                                new_label_id.id);
1229
1230                mutex_lock(&nd_mapping->lock);
1231                list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1232                        struct nd_namespace_label *nd_label = label_ent->label;
1233                        struct nd_label_id label_id;
1234
1235                        if (!nd_label)
1236                                continue;
1237                        nd_label_gen_id(&label_id, nd_label->uuid,
1238                                        nsl_get_flags(ndd, nd_label));
1239                        if (strcmp(old_label_id.id, label_id.id) == 0)
1240                                set_bit(ND_LABEL_REAP, &label_ent->flags);
1241                }
1242                mutex_unlock(&nd_mapping->lock);
1243        }
1244        kfree(*old_uuid);
1245 out:
1246        *old_uuid = new_uuid;
1247        return 0;
1248}
1249
1250static ssize_t uuid_store(struct device *dev,
1251                struct device_attribute *attr, const char *buf, size_t len)
1252{
1253        struct nd_region *nd_region = to_nd_region(dev->parent);
1254        u8 *uuid = NULL;
1255        ssize_t rc = 0;
1256        u8 **ns_uuid;
1257
1258        if (is_namespace_pmem(dev)) {
1259                struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1260
1261                ns_uuid = &nspm->uuid;
1262        } else if (is_namespace_blk(dev)) {
1263                struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1264
1265                ns_uuid = &nsblk->uuid;
1266        } else
1267                return -ENXIO;
1268
1269        nd_device_lock(dev);
1270        nvdimm_bus_lock(dev);
1271        wait_nvdimm_bus_probe_idle(dev);
1272        if (to_ndns(dev)->claim)
1273                rc = -EBUSY;
1274        if (rc >= 0)
1275                rc = nd_uuid_store(dev, &uuid, buf, len);
1276        if (rc >= 0)
1277                rc = namespace_update_uuid(nd_region, dev, uuid, ns_uuid);
1278        if (rc >= 0)
1279                rc = nd_namespace_label_update(nd_region, dev);
1280        else
1281                kfree(uuid);
1282        dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
1283                        buf[len - 1] == '\n' ? "" : "\n");
1284        nvdimm_bus_unlock(dev);
1285        nd_device_unlock(dev);
1286
1287        return rc < 0 ? rc : len;
1288}
1289static DEVICE_ATTR_RW(uuid);
1290
1291static ssize_t resource_show(struct device *dev,
1292                struct device_attribute *attr, char *buf)
1293{
1294        struct resource *res;
1295
1296        if (is_namespace_pmem(dev)) {
1297                struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1298
1299                res = &nspm->nsio.res;
1300        } else if (is_namespace_io(dev)) {
1301                struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
1302
1303                res = &nsio->res;
1304        } else
1305                return -ENXIO;
1306
1307        /* no address to convey if the namespace has no allocation */
1308        if (resource_size(res) == 0)
1309                return -ENXIO;
1310        return sprintf(buf, "%#llx\n", (unsigned long long) res->start);
1311}
1312static DEVICE_ATTR_ADMIN_RO(resource);
1313
1314static const unsigned long blk_lbasize_supported[] = { 512, 520, 528,
1315        4096, 4104, 4160, 4224, 0 };
1316
1317static const unsigned long pmem_lbasize_supported[] = { 512, 4096, 0 };
1318
1319static ssize_t sector_size_show(struct device *dev,
1320                struct device_attribute *attr, char *buf)
1321{
1322        if (is_namespace_blk(dev)) {
1323                struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1324
1325                return nd_size_select_show(nsblk->lbasize,
1326                                blk_lbasize_supported, buf);
1327        }
1328
1329        if (is_namespace_pmem(dev)) {
1330                struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1331
1332                return nd_size_select_show(nspm->lbasize,
1333                                pmem_lbasize_supported, buf);
1334        }
1335        return -ENXIO;
1336}
1337
1338static ssize_t sector_size_store(struct device *dev,
1339                struct device_attribute *attr, const char *buf, size_t len)
1340{
1341        struct nd_region *nd_region = to_nd_region(dev->parent);
1342        const unsigned long *supported;
1343        unsigned long *lbasize;
1344        ssize_t rc = 0;
1345
1346        if (is_namespace_blk(dev)) {
1347                struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1348
1349                lbasize = &nsblk->lbasize;
1350                supported = blk_lbasize_supported;
1351        } else if (is_namespace_pmem(dev)) {
1352                struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1353
1354                lbasize = &nspm->lbasize;
1355                supported = pmem_lbasize_supported;
1356        } else
1357                return -ENXIO;
1358
1359        nd_device_lock(dev);
1360        nvdimm_bus_lock(dev);
1361        if (to_ndns(dev)->claim)
1362                rc = -EBUSY;
1363        if (rc >= 0)
1364                rc = nd_size_select_store(dev, buf, lbasize, supported);
1365        if (rc >= 0)
1366                rc = nd_namespace_label_update(nd_region, dev);
1367        dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote",
1368                        buf, buf[len - 1] == '\n' ? "" : "\n");
1369        nvdimm_bus_unlock(dev);
1370        nd_device_unlock(dev);
1371
1372        return rc ? rc : len;
1373}
1374static DEVICE_ATTR_RW(sector_size);
1375
1376static ssize_t dpa_extents_show(struct device *dev,
1377                struct device_attribute *attr, char *buf)
1378{
1379        struct nd_region *nd_region = to_nd_region(dev->parent);
1380        struct nd_label_id label_id;
1381        int count = 0, i;
1382        u8 *uuid = NULL;
1383        u32 flags = 0;
1384
1385        nvdimm_bus_lock(dev);
1386        if (is_namespace_pmem(dev)) {
1387                struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1388
1389                uuid = nspm->uuid;
1390                flags = 0;
1391        } else if (is_namespace_blk(dev)) {
1392                struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1393
1394                uuid = nsblk->uuid;
1395                flags = NSLABEL_FLAG_LOCAL;
1396        }
1397
1398        if (!uuid)
1399                goto out;
1400
1401        nd_label_gen_id(&label_id, uuid, flags);
1402        for (i = 0; i < nd_region->ndr_mappings; i++) {
1403                struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1404                struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1405                struct resource *res;
1406
1407                for_each_dpa_resource(ndd, res)
1408                        if (strcmp(res->name, label_id.id) == 0)
1409                                count++;
1410        }
1411 out:
1412        nvdimm_bus_unlock(dev);
1413
1414        return sprintf(buf, "%d\n", count);
1415}
1416static DEVICE_ATTR_RO(dpa_extents);
1417
1418static int btt_claim_class(struct device *dev)
1419{
1420        struct nd_region *nd_region = to_nd_region(dev->parent);
1421        int i, loop_bitmask = 0;
1422
1423        for (i = 0; i < nd_region->ndr_mappings; i++) {
1424                struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1425                struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1426                struct nd_namespace_index *nsindex;
1427
1428                /*
1429                 * If any of the DIMMs do not support labels the only
1430                 * possible BTT format is v1.
1431                 */
1432                if (!ndd) {
1433                        loop_bitmask = 0;
1434                        break;
1435                }
1436
1437                nsindex = to_namespace_index(ndd, ndd->ns_current);
1438                if (nsindex == NULL)
1439                        loop_bitmask |= 1;
1440                else {
1441                        /* check whether existing labels are v1.1 or v1.2 */
1442                        if (__le16_to_cpu(nsindex->major) == 1
1443                                        && __le16_to_cpu(nsindex->minor) == 1)
1444                                loop_bitmask |= 2;
1445                        else
1446                                loop_bitmask |= 4;
1447                }
1448        }
1449        /*
1450         * If nsindex is null loop_bitmask's bit 0 will be set, and if an index
1451         * block is found, a v1.1 label for any mapping will set bit 1, and a
1452         * v1.2 label will set bit 2.
1453         *
1454         * At the end of the loop, at most one of the three bits must be set.
1455         * If multiple bits were set, it means the different mappings disagree
1456         * about their labels, and this must be cleaned up first.
1457         *
1458         * If all the label index blocks are found to agree, nsindex of NULL
1459         * implies labels haven't been initialized yet, and when they will,
1460         * they will be of the 1.2 format, so we can assume BTT2.0
1461         *
1462         * If 1.1 labels are found, we enforce BTT1.1, and if 1.2 labels are
1463         * found, we enforce BTT2.0
1464         *
1465         * If the loop was never entered, default to BTT1.1 (legacy namespaces)
1466         */
1467        switch (loop_bitmask) {
1468        case 0:
1469        case 2:
1470                return NVDIMM_CCLASS_BTT;
1471        case 1:
1472        case 4:
1473                return NVDIMM_CCLASS_BTT2;
1474        default:
1475                return -ENXIO;
1476        }
1477}
1478
1479static ssize_t holder_show(struct device *dev,
1480                struct device_attribute *attr, char *buf)
1481{
1482        struct nd_namespace_common *ndns = to_ndns(dev);
1483        ssize_t rc;
1484
1485        nd_device_lock(dev);
1486        rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : "");
1487        nd_device_unlock(dev);
1488
1489        return rc;
1490}
1491static DEVICE_ATTR_RO(holder);
1492
1493static int __holder_class_store(struct device *dev, const char *buf)
1494{
1495        struct nd_namespace_common *ndns = to_ndns(dev);
1496
1497        if (dev->driver || ndns->claim)
1498                return -EBUSY;
1499
1500        if (sysfs_streq(buf, "btt")) {
1501                int rc = btt_claim_class(dev);
1502
1503                if (rc < NVDIMM_CCLASS_NONE)
1504                        return rc;
1505                ndns->claim_class = rc;
1506        } else if (sysfs_streq(buf, "pfn"))
1507                ndns->claim_class = NVDIMM_CCLASS_PFN;
1508        else if (sysfs_streq(buf, "dax"))
1509                ndns->claim_class = NVDIMM_CCLASS_DAX;
1510        else if (sysfs_streq(buf, ""))
1511                ndns->claim_class = NVDIMM_CCLASS_NONE;
1512        else
1513                return -EINVAL;
1514
1515        return 0;
1516}
1517
1518static ssize_t holder_class_store(struct device *dev,
1519                struct device_attribute *attr, const char *buf, size_t len)
1520{
1521        struct nd_region *nd_region = to_nd_region(dev->parent);
1522        int rc;
1523
1524        nd_device_lock(dev);
1525        nvdimm_bus_lock(dev);
1526        wait_nvdimm_bus_probe_idle(dev);
1527        rc = __holder_class_store(dev, buf);
1528        if (rc >= 0)
1529                rc = nd_namespace_label_update(nd_region, dev);
1530        dev_dbg(dev, "%s(%d)\n", rc < 0 ? "fail " : "", rc);
1531        nvdimm_bus_unlock(dev);
1532        nd_device_unlock(dev);
1533
1534        return rc < 0 ? rc : len;
1535}
1536
1537static ssize_t holder_class_show(struct device *dev,
1538                struct device_attribute *attr, char *buf)
1539{
1540        struct nd_namespace_common *ndns = to_ndns(dev);
1541        ssize_t rc;
1542
1543        nd_device_lock(dev);
1544        if (ndns->claim_class == NVDIMM_CCLASS_NONE)
1545                rc = sprintf(buf, "\n");
1546        else if ((ndns->claim_class == NVDIMM_CCLASS_BTT) ||
1547                        (ndns->claim_class == NVDIMM_CCLASS_BTT2))
1548                rc = sprintf(buf, "btt\n");
1549        else if (ndns->claim_class == NVDIMM_CCLASS_PFN)
1550                rc = sprintf(buf, "pfn\n");
1551        else if (ndns->claim_class == NVDIMM_CCLASS_DAX)
1552                rc = sprintf(buf, "dax\n");
1553        else
1554                rc = sprintf(buf, "<unknown>\n");
1555        nd_device_unlock(dev);
1556
1557        return rc;
1558}
1559static DEVICE_ATTR_RW(holder_class);
1560
1561static ssize_t mode_show(struct device *dev,
1562                struct device_attribute *attr, char *buf)
1563{
1564        struct nd_namespace_common *ndns = to_ndns(dev);
1565        struct device *claim;
1566        char *mode;
1567        ssize_t rc;
1568
1569        nd_device_lock(dev);
1570        claim = ndns->claim;
1571        if (claim && is_nd_btt(claim))
1572                mode = "safe";
1573        else if (claim && is_nd_pfn(claim))
1574                mode = "memory";
1575        else if (claim && is_nd_dax(claim))
1576                mode = "dax";
1577        else if (!claim && pmem_should_map_pages(dev))
1578                mode = "memory";
1579        else
1580                mode = "raw";
1581        rc = sprintf(buf, "%s\n", mode);
1582        nd_device_unlock(dev);
1583
1584        return rc;
1585}
1586static DEVICE_ATTR_RO(mode);
1587
1588static ssize_t force_raw_store(struct device *dev,
1589                struct device_attribute *attr, const char *buf, size_t len)
1590{
1591        bool force_raw;
1592        int rc = strtobool(buf, &force_raw);
1593
1594        if (rc)
1595                return rc;
1596
1597        to_ndns(dev)->force_raw = force_raw;
1598        return len;
1599}
1600
1601static ssize_t force_raw_show(struct device *dev,
1602                struct device_attribute *attr, char *buf)
1603{
1604        return sprintf(buf, "%d\n", to_ndns(dev)->force_raw);
1605}
1606static DEVICE_ATTR_RW(force_raw);
1607
1608static struct attribute *nd_namespace_attributes[] = {
1609        &dev_attr_nstype.attr,
1610        &dev_attr_size.attr,
1611        &dev_attr_mode.attr,
1612        &dev_attr_uuid.attr,
1613        &dev_attr_holder.attr,
1614        &dev_attr_resource.attr,
1615        &dev_attr_alt_name.attr,
1616        &dev_attr_force_raw.attr,
1617        &dev_attr_sector_size.attr,
1618        &dev_attr_dpa_extents.attr,
1619        &dev_attr_holder_class.attr,
1620        NULL,
1621};
1622
1623static umode_t namespace_visible(struct kobject *kobj,
1624                struct attribute *a, int n)
1625{
1626        struct device *dev = container_of(kobj, struct device, kobj);
1627
1628        if (a == &dev_attr_resource.attr && is_namespace_blk(dev))
1629                return 0;
1630
1631        if (is_namespace_pmem(dev) || is_namespace_blk(dev)) {
1632                if (a == &dev_attr_size.attr)
1633                        return 0644;
1634
1635                return a->mode;
1636        }
1637
1638        /* base is_namespace_io() attributes */
1639        if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr ||
1640            a == &dev_attr_holder.attr || a == &dev_attr_holder_class.attr ||
1641            a == &dev_attr_force_raw.attr || a == &dev_attr_mode.attr ||
1642            a == &dev_attr_resource.attr)
1643                return a->mode;
1644
1645        return 0;
1646}
1647
1648static struct attribute_group nd_namespace_attribute_group = {
1649        .attrs = nd_namespace_attributes,
1650        .is_visible = namespace_visible,
1651};
1652
1653static const struct attribute_group *nd_namespace_attribute_groups[] = {
1654        &nd_device_attribute_group,
1655        &nd_namespace_attribute_group,
1656        &nd_numa_attribute_group,
1657        NULL,
1658};
1659
1660static const struct device_type namespace_io_device_type = {
1661        .name = "nd_namespace_io",
1662        .release = namespace_io_release,
1663        .groups = nd_namespace_attribute_groups,
1664};
1665
1666static const struct device_type namespace_pmem_device_type = {
1667        .name = "nd_namespace_pmem",
1668        .release = namespace_pmem_release,
1669        .groups = nd_namespace_attribute_groups,
1670};
1671
1672static const struct device_type namespace_blk_device_type = {
1673        .name = "nd_namespace_blk",
1674        .release = namespace_blk_release,
1675        .groups = nd_namespace_attribute_groups,
1676};
1677
1678static bool is_namespace_pmem(const struct device *dev)
1679{
1680        return dev ? dev->type == &namespace_pmem_device_type : false;
1681}
1682
1683static bool is_namespace_blk(const struct device *dev)
1684{
1685        return dev ? dev->type == &namespace_blk_device_type : false;
1686}
1687
1688static bool is_namespace_io(const struct device *dev)
1689{
1690        return dev ? dev->type == &namespace_io_device_type : false;
1691}
1692
1693struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
1694{
1695        struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
1696        struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL;
1697        struct nd_dax *nd_dax = is_nd_dax(dev) ? to_nd_dax(dev) : NULL;
1698        struct nd_namespace_common *ndns = NULL;
1699        resource_size_t size;
1700
1701        if (nd_btt || nd_pfn || nd_dax) {
1702                if (nd_btt)
1703                        ndns = nd_btt->ndns;
1704                else if (nd_pfn)
1705                        ndns = nd_pfn->ndns;
1706                else if (nd_dax)
1707                        ndns = nd_dax->nd_pfn.ndns;
1708
1709                if (!ndns)
1710                        return ERR_PTR(-ENODEV);
1711
1712                /*
1713                 * Flush any in-progess probes / removals in the driver
1714                 * for the raw personality of this namespace.
1715                 */
1716                nd_device_lock(&ndns->dev);
1717                nd_device_unlock(&ndns->dev);
1718                if (ndns->dev.driver) {
1719                        dev_dbg(&ndns->dev, "is active, can't bind %s\n",
1720                                        dev_name(dev));
1721                        return ERR_PTR(-EBUSY);
1722                }
1723                if (dev_WARN_ONCE(&ndns->dev, ndns->claim != dev,
1724                                        "host (%s) vs claim (%s) mismatch\n",
1725                                        dev_name(dev),
1726                                        dev_name(ndns->claim)))
1727                        return ERR_PTR(-ENXIO);
1728        } else {
1729                ndns = to_ndns(dev);
1730                if (ndns->claim) {
1731                        dev_dbg(dev, "claimed by %s, failing probe\n",
1732                                dev_name(ndns->claim));
1733
1734                        return ERR_PTR(-ENXIO);
1735                }
1736        }
1737
1738        if (nvdimm_namespace_locked(ndns))
1739                return ERR_PTR(-EACCES);
1740
1741        size = nvdimm_namespace_capacity(ndns);
1742        if (size < ND_MIN_NAMESPACE_SIZE) {
1743                dev_dbg(&ndns->dev, "%pa, too small must be at least %#x\n",
1744                                &size, ND_MIN_NAMESPACE_SIZE);
1745                return ERR_PTR(-ENODEV);
1746        }
1747
1748        /*
1749         * Note, alignment validation for fsdax and devdax mode
1750         * namespaces happens in nd_pfn_validate() where infoblock
1751         * padding parameters can be applied.
1752         */
1753        if (pmem_should_map_pages(dev)) {
1754                struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
1755                struct resource *res = &nsio->res;
1756
1757                if (!IS_ALIGNED(res->start | (res->end + 1),
1758                                        memremap_compat_align())) {
1759                        dev_err(&ndns->dev, "%pr misaligned, unable to map\n", res);
1760                        return ERR_PTR(-EOPNOTSUPP);
1761                }
1762        }
1763
1764        if (is_namespace_pmem(&ndns->dev)) {
1765                struct nd_namespace_pmem *nspm;
1766
1767                nspm = to_nd_namespace_pmem(&ndns->dev);
1768                if (uuid_not_set(nspm->uuid, &ndns->dev, __func__))
1769                        return ERR_PTR(-ENODEV);
1770        } else if (is_namespace_blk(&ndns->dev)) {
1771                struct nd_namespace_blk *nsblk;
1772
1773                nsblk = to_nd_namespace_blk(&ndns->dev);
1774                if (uuid_not_set(nsblk->uuid, &ndns->dev, __func__))
1775                        return ERR_PTR(-ENODEV);
1776                if (!nsblk->lbasize) {
1777                        dev_dbg(&ndns->dev, "sector size not set\n");
1778                        return ERR_PTR(-ENODEV);
1779                }
1780                if (!nd_namespace_blk_validate(nsblk))
1781                        return ERR_PTR(-ENODEV);
1782        }
1783
1784        return ndns;
1785}
1786EXPORT_SYMBOL(nvdimm_namespace_common_probe);
1787
1788int devm_namespace_enable(struct device *dev, struct nd_namespace_common *ndns,
1789                resource_size_t size)
1790{
1791        if (is_namespace_blk(&ndns->dev))
1792                return 0;
1793        return devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev), size);
1794}
1795EXPORT_SYMBOL_GPL(devm_namespace_enable);
1796
1797void devm_namespace_disable(struct device *dev, struct nd_namespace_common *ndns)
1798{
1799        if (is_namespace_blk(&ndns->dev))
1800                return;
1801        devm_nsio_disable(dev, to_nd_namespace_io(&ndns->dev));
1802}
1803EXPORT_SYMBOL_GPL(devm_namespace_disable);
1804
1805static struct device **create_namespace_io(struct nd_region *nd_region)
1806{
1807        struct nd_namespace_io *nsio;
1808        struct device *dev, **devs;
1809        struct resource *res;
1810
1811        nsio = kzalloc(sizeof(*nsio), GFP_KERNEL);
1812        if (!nsio)
1813                return NULL;
1814
1815        devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL);
1816        if (!devs) {
1817                kfree(nsio);
1818                return NULL;
1819        }
1820
1821        dev = &nsio->common.dev;
1822        dev->type = &namespace_io_device_type;
1823        dev->parent = &nd_region->dev;
1824        res = &nsio->res;
1825        res->name = dev_name(&nd_region->dev);
1826        res->flags = IORESOURCE_MEM;
1827        res->start = nd_region->ndr_start;
1828        res->end = res->start + nd_region->ndr_size - 1;
1829
1830        devs[0] = dev;
1831        return devs;
1832}
1833
1834static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
1835                u64 cookie, u16 pos)
1836{
1837        struct nd_namespace_label *found = NULL;
1838        int i;
1839
1840        for (i = 0; i < nd_region->ndr_mappings; i++) {
1841                struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1842                struct nd_interleave_set *nd_set = nd_region->nd_set;
1843                struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1844                struct nd_label_ent *label_ent;
1845                bool found_uuid = false;
1846
1847                list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1848                        struct nd_namespace_label *nd_label = label_ent->label;
1849                        u16 position, nlabel;
1850
1851                        if (!nd_label)
1852                                continue;
1853                        position = nsl_get_position(ndd, nd_label);
1854                        nlabel = nsl_get_nlabel(ndd, nd_label);
1855
1856                        if (!nsl_validate_isetcookie(ndd, nd_label, cookie))
1857                                continue;
1858
1859                        if (memcmp(nd_label->uuid, uuid, NSLABEL_UUID_LEN) != 0)
1860                                continue;
1861
1862                        if (!nsl_validate_type_guid(ndd, nd_label,
1863                                                    &nd_set->type_guid))
1864                                continue;
1865
1866                        if (found_uuid) {
1867                                dev_dbg(ndd->dev, "duplicate entry for uuid\n");
1868                                return false;
1869                        }
1870                        found_uuid = true;
1871                        if (nlabel != nd_region->ndr_mappings)
1872                                continue;
1873                        if (position != pos)
1874                                continue;
1875                        found = nd_label;
1876                        break;
1877                }
1878                if (found)
1879                        break;
1880        }
1881        return found != NULL;
1882}
1883
1884static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
1885{
1886        int i;
1887
1888        if (!pmem_id)
1889                return -ENODEV;
1890
1891        for (i = 0; i < nd_region->ndr_mappings; i++) {
1892                struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1893                struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1894                struct nd_namespace_label *nd_label = NULL;
1895                u64 hw_start, hw_end, pmem_start, pmem_end;
1896                struct nd_label_ent *label_ent;
1897
1898                lockdep_assert_held(&nd_mapping->lock);
1899                list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1900                        nd_label = label_ent->label;
1901                        if (!nd_label)
1902                                continue;
1903                        if (memcmp(nd_label->uuid, pmem_id, NSLABEL_UUID_LEN) == 0)
1904                                break;
1905                        nd_label = NULL;
1906                }
1907
1908                if (!nd_label) {
1909                        WARN_ON(1);
1910                        return -EINVAL;
1911                }
1912
1913                /*
1914                 * Check that this label is compliant with the dpa
1915                 * range published in NFIT
1916                 */
1917                hw_start = nd_mapping->start;
1918                hw_end = hw_start + nd_mapping->size;
1919                pmem_start = nsl_get_dpa(ndd, nd_label);
1920                pmem_end = pmem_start + nsl_get_rawsize(ndd, nd_label);
1921                if (pmem_start >= hw_start && pmem_start < hw_end
1922                                && pmem_end <= hw_end && pmem_end > hw_start)
1923                        /* pass */;
1924                else {
1925                        dev_dbg(&nd_region->dev, "%s invalid label for %pUb\n",
1926                                        dev_name(ndd->dev), nd_label->uuid);
1927                        return -EINVAL;
1928                }
1929
1930                /* move recently validated label to the front of the list */
1931                list_move(&label_ent->list, &nd_mapping->labels);
1932        }
1933        return 0;
1934}
1935
1936/**
1937 * create_namespace_pmem - validate interleave set labelling, retrieve label0
1938 * @nd_region: region with mappings to validate
1939 * @nspm: target namespace to create
1940 * @nd_label: target pmem namespace label to evaluate
1941 */
1942static struct device *create_namespace_pmem(struct nd_region *nd_region,
1943                                            struct nd_mapping *nd_mapping,
1944                                            struct nd_namespace_label *nd_label)
1945{
1946        struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1947        struct nd_namespace_index *nsindex =
1948                to_namespace_index(ndd, ndd->ns_current);
1949        u64 cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
1950        u64 altcookie = nd_region_interleave_set_altcookie(nd_region);
1951        struct nd_label_ent *label_ent;
1952        struct nd_namespace_pmem *nspm;
1953        resource_size_t size = 0;
1954        struct resource *res;
1955        struct device *dev;
1956        int rc = 0;
1957        u16 i;
1958
1959        if (cookie == 0) {
1960                dev_dbg(&nd_region->dev, "invalid interleave-set-cookie\n");
1961                return ERR_PTR(-ENXIO);
1962        }
1963
1964        if (!nsl_validate_isetcookie(ndd, nd_label, cookie)) {
1965                dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n",
1966                                nd_label->uuid);
1967                if (!nsl_validate_isetcookie(ndd, nd_label, altcookie))
1968                        return ERR_PTR(-EAGAIN);
1969
1970                dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb\n",
1971                                nd_label->uuid);
1972        }
1973
1974        nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
1975        if (!nspm)
1976                return ERR_PTR(-ENOMEM);
1977
1978        nspm->id = -1;
1979        dev = &nspm->nsio.common.dev;
1980        dev->type = &namespace_pmem_device_type;
1981        dev->parent = &nd_region->dev;
1982        res = &nspm->nsio.res;
1983        res->name = dev_name(&nd_region->dev);
1984        res->flags = IORESOURCE_MEM;
1985
1986        for (i = 0; i < nd_region->ndr_mappings; i++) {
1987                if (has_uuid_at_pos(nd_region, nd_label->uuid, cookie, i))
1988                        continue;
1989                if (has_uuid_at_pos(nd_region, nd_label->uuid, altcookie, i))
1990                        continue;
1991                break;
1992        }
1993
1994        if (i < nd_region->ndr_mappings) {
1995                struct nvdimm *nvdimm = nd_region->mapping[i].nvdimm;
1996
1997                /*
1998                 * Give up if we don't find an instance of a uuid at each
1999                 * position (from 0 to nd_region->ndr_mappings - 1), or if we
2000                 * find a dimm with two instances of the same uuid.
2001                 */
2002                dev_err(&nd_region->dev, "%s missing label for %pUb\n",
2003                                nvdimm_name(nvdimm), nd_label->uuid);
2004                rc = -EINVAL;
2005                goto err;
2006        }
2007
2008        /*
2009         * Fix up each mapping's 'labels' to have the validated pmem label for
2010         * that position at labels[0], and NULL at labels[1].  In the process,
2011         * check that the namespace aligns with interleave-set.  We know
2012         * that it does not overlap with any blk namespaces by virtue of
2013         * the dimm being enabled (i.e. nd_label_reserve_dpa()
2014         * succeeded).
2015         */
2016        rc = select_pmem_id(nd_region, nd_label->uuid);
2017        if (rc)
2018                goto err;
2019
2020        /* Calculate total size and populate namespace properties from label0 */
2021        for (i = 0; i < nd_region->ndr_mappings; i++) {
2022                struct nd_namespace_label *label0;
2023                struct nvdimm_drvdata *ndd;
2024
2025                nd_mapping = &nd_region->mapping[i];
2026                label_ent = list_first_entry_or_null(&nd_mapping->labels,
2027                                typeof(*label_ent), list);
2028                label0 = label_ent ? label_ent->label : NULL;
2029
2030                if (!label0) {
2031                        WARN_ON(1);
2032                        continue;
2033                }
2034
2035                ndd = to_ndd(nd_mapping);
2036                size += nsl_get_rawsize(ndd, label0);
2037                if (nsl_get_position(ndd, label0) != 0)
2038                        continue;
2039                WARN_ON(nspm->alt_name || nspm->uuid);
2040                nspm->alt_name = kmemdup(nsl_ref_name(ndd, label0),
2041                                         NSLABEL_NAME_LEN, GFP_KERNEL);
2042                nspm->uuid = kmemdup((void __force *) label0->uuid,
2043                                NSLABEL_UUID_LEN, GFP_KERNEL);
2044                nspm->lbasize = nsl_get_lbasize(ndd, label0);
2045                nspm->nsio.common.claim_class =
2046                        nsl_get_claim_class(ndd, label0);
2047        }
2048
2049        if (!nspm->alt_name || !nspm->uuid) {
2050                rc = -ENOMEM;
2051                goto err;
2052        }
2053
2054        nd_namespace_pmem_set_resource(nd_region, nspm, size);
2055
2056        return dev;
2057 err:
2058        namespace_pmem_release(dev);
2059        switch (rc) {
2060        case -EINVAL:
2061                dev_dbg(&nd_region->dev, "invalid label(s)\n");
2062                break;
2063        case -ENODEV:
2064                dev_dbg(&nd_region->dev, "label not found\n");
2065                break;
2066        default:
2067                dev_dbg(&nd_region->dev, "unexpected err: %d\n", rc);
2068                break;
2069        }
2070        return ERR_PTR(rc);
2071}
2072
2073struct resource *nsblk_add_resource(struct nd_region *nd_region,
2074                struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk,
2075                resource_size_t start)
2076{
2077        struct nd_label_id label_id;
2078        struct resource *res;
2079
2080        nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
2081        res = krealloc(nsblk->res,
2082                        sizeof(void *) * (nsblk->num_resources + 1),
2083                        GFP_KERNEL);
2084        if (!res)
2085                return NULL;
2086        nsblk->res = (struct resource **) res;
2087        for_each_dpa_resource(ndd, res)
2088                if (strcmp(res->name, label_id.id) == 0
2089                                && res->start == start) {
2090                        nsblk->res[nsblk->num_resources++] = res;
2091                        return res;
2092                }
2093        return NULL;
2094}
2095
2096static struct device *nd_namespace_blk_create(struct nd_region *nd_region)
2097{
2098        struct nd_namespace_blk *nsblk;
2099        struct device *dev;
2100
2101        if (!is_nd_blk(&nd_region->dev))
2102                return NULL;
2103
2104        nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
2105        if (!nsblk)
2106                return NULL;
2107
2108        dev = &nsblk->common.dev;
2109        dev->type = &namespace_blk_device_type;
2110        nsblk->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
2111        if (nsblk->id < 0) {
2112                kfree(nsblk);
2113                return NULL;
2114        }
2115        dev_set_name(dev, "namespace%d.%d", nd_region->id, nsblk->id);
2116        dev->parent = &nd_region->dev;
2117
2118        return &nsblk->common.dev;
2119}
2120
2121static struct device *nd_namespace_pmem_create(struct nd_region *nd_region)
2122{
2123        struct nd_namespace_pmem *nspm;
2124        struct resource *res;
2125        struct device *dev;
2126
2127        if (!is_memory(&nd_region->dev))
2128                return NULL;
2129
2130        nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
2131        if (!nspm)
2132                return NULL;
2133
2134        dev = &nspm->nsio.common.dev;
2135        dev->type = &namespace_pmem_device_type;
2136        dev->parent = &nd_region->dev;
2137        res = &nspm->nsio.res;
2138        res->name = dev_name(&nd_region->dev);
2139        res->flags = IORESOURCE_MEM;
2140
2141        nspm->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
2142        if (nspm->id < 0) {
2143                kfree(nspm);
2144                return NULL;
2145        }
2146        dev_set_name(dev, "namespace%d.%d", nd_region->id, nspm->id);
2147        nd_namespace_pmem_set_resource(nd_region, nspm, 0);
2148
2149        return dev;
2150}
2151
2152void nd_region_create_ns_seed(struct nd_region *nd_region)
2153{
2154        WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
2155
2156        if (nd_region_to_nstype(nd_region) == ND_DEVICE_NAMESPACE_IO)
2157                return;
2158
2159        if (is_nd_blk(&nd_region->dev))
2160                nd_region->ns_seed = nd_namespace_blk_create(nd_region);
2161        else
2162                nd_region->ns_seed = nd_namespace_pmem_create(nd_region);
2163
2164        /*
2165         * Seed creation failures are not fatal, provisioning is simply
2166         * disabled until memory becomes available
2167         */
2168        if (!nd_region->ns_seed)
2169                dev_err(&nd_region->dev, "failed to create %s namespace\n",
2170                                is_nd_blk(&nd_region->dev) ? "blk" : "pmem");
2171        else
2172                nd_device_register(nd_region->ns_seed);
2173}
2174
2175void nd_region_create_dax_seed(struct nd_region *nd_region)
2176{
2177        WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
2178        nd_region->dax_seed = nd_dax_create(nd_region);
2179        /*
2180         * Seed creation failures are not fatal, provisioning is simply
2181         * disabled until memory becomes available
2182         */
2183        if (!nd_region->dax_seed)
2184                dev_err(&nd_region->dev, "failed to create dax namespace\n");
2185}
2186
2187void nd_region_create_pfn_seed(struct nd_region *nd_region)
2188{
2189        WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
2190        nd_region->pfn_seed = nd_pfn_create(nd_region);
2191        /*
2192         * Seed creation failures are not fatal, provisioning is simply
2193         * disabled until memory becomes available
2194         */
2195        if (!nd_region->pfn_seed)
2196                dev_err(&nd_region->dev, "failed to create pfn namespace\n");
2197}
2198
2199void nd_region_create_btt_seed(struct nd_region *nd_region)
2200{
2201        WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
2202        nd_region->btt_seed = nd_btt_create(nd_region);
2203        /*
2204         * Seed creation failures are not fatal, provisioning is simply
2205         * disabled until memory becomes available
2206         */
2207        if (!nd_region->btt_seed)
2208                dev_err(&nd_region->dev, "failed to create btt namespace\n");
2209}
2210
2211static int add_namespace_resource(struct nd_region *nd_region,
2212                struct nd_namespace_label *nd_label, struct device **devs,
2213                int count)
2214{
2215        struct nd_mapping *nd_mapping = &nd_region->mapping[0];
2216        struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
2217        int i;
2218
2219        for (i = 0; i < count; i++) {
2220                u8 *uuid = namespace_to_uuid(devs[i]);
2221                struct resource *res;
2222
2223                if (IS_ERR_OR_NULL(uuid)) {
2224                        WARN_ON(1);
2225                        continue;
2226                }
2227
2228                if (memcmp(uuid, nd_label->uuid, NSLABEL_UUID_LEN) != 0)
2229                        continue;
2230                if (is_namespace_blk(devs[i])) {
2231                        res = nsblk_add_resource(nd_region, ndd,
2232                                        to_nd_namespace_blk(devs[i]),
2233                                        nsl_get_dpa(ndd, nd_label));
2234                        if (!res)
2235                                return -ENXIO;
2236                        nd_dbg_dpa(nd_region, ndd, res, "%d assign\n", count);
2237                } else {
2238                        dev_err(&nd_region->dev,
2239                                        "error: conflicting extents for uuid: %pUb\n",
2240                                        nd_label->uuid);
2241                        return -ENXIO;
2242                }
2243                break;
2244        }
2245
2246        return i;
2247}
2248
2249static struct device *create_namespace_blk(struct nd_region *nd_region,
2250                struct nd_namespace_label *nd_label, int count)
2251{
2252
2253        struct nd_mapping *nd_mapping = &nd_region->mapping[0];
2254        struct nd_interleave_set *nd_set = nd_region->nd_set;
2255        struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
2256        struct nd_namespace_blk *nsblk;
2257        char name[NSLABEL_NAME_LEN];
2258        struct device *dev = NULL;
2259        struct resource *res;
2260
2261        if (!nsl_validate_type_guid(ndd, nd_label, &nd_set->type_guid))
2262                return ERR_PTR(-EAGAIN);
2263        if (!nsl_validate_blk_isetcookie(ndd, nd_label, nd_set->cookie2))
2264                return ERR_PTR(-EAGAIN);
2265
2266        nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
2267        if (!nsblk)
2268                return ERR_PTR(-ENOMEM);
2269        dev = &nsblk->common.dev;
2270        dev->type = &namespace_blk_device_type;
2271        dev->parent = &nd_region->dev;
2272        nsblk->id = -1;
2273        nsblk->lbasize = nsl_get_lbasize(ndd, nd_label);
2274        nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN, GFP_KERNEL);
2275        nsblk->common.claim_class = nsl_get_claim_class(ndd, nd_label);
2276        if (!nsblk->uuid)
2277                goto blk_err;
2278        nsl_get_name(ndd, nd_label, name);
2279        if (name[0]) {
2280                nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN, GFP_KERNEL);
2281                if (!nsblk->alt_name)
2282                        goto blk_err;
2283        }
2284        res = nsblk_add_resource(nd_region, ndd, nsblk,
2285                        nsl_get_dpa(ndd, nd_label));
2286        if (!res)
2287                goto blk_err;
2288        nd_dbg_dpa(nd_region, ndd, res, "%d: assign\n", count);
2289        return dev;
2290 blk_err:
2291        namespace_blk_release(dev);
2292        return ERR_PTR(-ENXIO);
2293}
2294
2295static int cmp_dpa(const void *a, const void *b)
2296{
2297        const struct device *dev_a = *(const struct device **) a;
2298        const struct device *dev_b = *(const struct device **) b;
2299        struct nd_namespace_blk *nsblk_a, *nsblk_b;
2300        struct nd_namespace_pmem *nspm_a, *nspm_b;
2301
2302        if (is_namespace_io(dev_a))
2303                return 0;
2304
2305        if (is_namespace_blk(dev_a)) {
2306                nsblk_a = to_nd_namespace_blk(dev_a);
2307                nsblk_b = to_nd_namespace_blk(dev_b);
2308
2309                return memcmp(&nsblk_a->res[0]->start, &nsblk_b->res[0]->start,
2310                                sizeof(resource_size_t));
2311        }
2312
2313        nspm_a = to_nd_namespace_pmem(dev_a);
2314        nspm_b = to_nd_namespace_pmem(dev_b);
2315
2316        return memcmp(&nspm_a->nsio.res.start, &nspm_b->nsio.res.start,
2317                        sizeof(resource_size_t));
2318}
2319
2320static struct device **scan_labels(struct nd_region *nd_region)
2321{
2322        int i, count = 0;
2323        struct device *dev, **devs = NULL;
2324        struct nd_label_ent *label_ent, *e;
2325        struct nd_mapping *nd_mapping = &nd_region->mapping[0];
2326        struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
2327        resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1;
2328
2329        /* "safe" because create_namespace_pmem() might list_move() label_ent */
2330        list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
2331                struct nd_namespace_label *nd_label = label_ent->label;
2332                struct device **__devs;
2333                u32 flags;
2334
2335                if (!nd_label)
2336                        continue;
2337                flags = nsl_get_flags(ndd, nd_label);
2338                if (is_nd_blk(&nd_region->dev)
2339                                == !!(flags & NSLABEL_FLAG_LOCAL))
2340                        /* pass, region matches label type */;
2341                else
2342                        continue;
2343
2344                /* skip labels that describe extents outside of the region */
2345                if (nsl_get_dpa(ndd, nd_label) < nd_mapping->start ||
2346                    nsl_get_dpa(ndd, nd_label) > map_end)
2347                        continue;
2348
2349                i = add_namespace_resource(nd_region, nd_label, devs, count);
2350                if (i < 0)
2351                        goto err;
2352                if (i < count)
2353                        continue;
2354                __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL);
2355                if (!__devs)
2356                        goto err;
2357                memcpy(__devs, devs, sizeof(dev) * count);
2358                kfree(devs);
2359                devs = __devs;
2360
2361                if (is_nd_blk(&nd_region->dev))
2362                        dev = create_namespace_blk(nd_region, nd_label, count);
2363                else
2364                        dev = create_namespace_pmem(nd_region, nd_mapping,
2365                                                    nd_label);
2366
2367                if (IS_ERR(dev)) {
2368                        switch (PTR_ERR(dev)) {
2369                        case -EAGAIN:
2370                                /* skip invalid labels */
2371                                continue;
2372                        case -ENODEV:
2373                                /* fallthrough to seed creation */
2374                                break;
2375                        default:
2376                                goto err;
2377                        }
2378                } else
2379                        devs[count++] = dev;
2380
2381        }
2382
2383        dev_dbg(&nd_region->dev, "discovered %d %s namespace%s\n",
2384                        count, is_nd_blk(&nd_region->dev)
2385                        ? "blk" : "pmem", count == 1 ? "" : "s");
2386
2387        if (count == 0) {
2388                /* Publish a zero-sized namespace for userspace to configure. */
2389                nd_mapping_free_labels(nd_mapping);
2390
2391                devs = kcalloc(2, sizeof(dev), GFP_KERNEL);
2392                if (!devs)
2393                        goto err;
2394                if (is_nd_blk(&nd_region->dev)) {
2395                        struct nd_namespace_blk *nsblk;
2396
2397                        nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
2398                        if (!nsblk)
2399                                goto err;
2400                        dev = &nsblk->common.dev;
2401                        dev->type = &namespace_blk_device_type;
2402                } else {
2403                        struct nd_namespace_pmem *nspm;
2404
2405                        nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
2406                        if (!nspm)
2407                                goto err;
2408                        dev = &nspm->nsio.common.dev;
2409                        dev->type = &namespace_pmem_device_type;
2410                        nd_namespace_pmem_set_resource(nd_region, nspm, 0);
2411                }
2412                dev->parent = &nd_region->dev;
2413                devs[count++] = dev;
2414        } else if (is_memory(&nd_region->dev)) {
2415                /* clean unselected labels */
2416                for (i = 0; i < nd_region->ndr_mappings; i++) {
2417                        struct list_head *l, *e;
2418                        LIST_HEAD(list);
2419                        int j;
2420
2421                        nd_mapping = &nd_region->mapping[i];
2422                        if (list_empty(&nd_mapping->labels)) {
2423                                WARN_ON(1);
2424                                continue;
2425                        }
2426
2427                        j = count;
2428                        list_for_each_safe(l, e, &nd_mapping->labels) {
2429                                if (!j--)
2430                                        break;
2431                                list_move_tail(l, &list);
2432                        }
2433                        nd_mapping_free_labels(nd_mapping);
2434                        list_splice_init(&list, &nd_mapping->labels);
2435                }
2436        }
2437
2438        if (count > 1)
2439                sort(devs, count, sizeof(struct device *), cmp_dpa, NULL);
2440
2441        return devs;
2442
2443 err:
2444        if (devs) {
2445                for (i = 0; devs[i]; i++)
2446                        if (is_nd_blk(&nd_region->dev))
2447                                namespace_blk_release(devs[i]);
2448                        else
2449                                namespace_pmem_release(devs[i]);
2450                kfree(devs);
2451        }
2452        return NULL;
2453}
2454
2455static struct device **create_namespaces(struct nd_region *nd_region)
2456{
2457        struct nd_mapping *nd_mapping;
2458        struct device **devs;
2459        int i;
2460
2461        if (nd_region->ndr_mappings == 0)
2462                return NULL;
2463
2464        /* lock down all mappings while we scan labels */
2465        for (i = 0; i < nd_region->ndr_mappings; i++) {
2466                nd_mapping = &nd_region->mapping[i];
2467                mutex_lock_nested(&nd_mapping->lock, i);
2468        }
2469
2470        devs = scan_labels(nd_region);
2471
2472        for (i = 0; i < nd_region->ndr_mappings; i++) {
2473                int reverse = nd_region->ndr_mappings - 1 - i;
2474
2475                nd_mapping = &nd_region->mapping[reverse];
2476                mutex_unlock(&nd_mapping->lock);
2477        }
2478
2479        return devs;
2480}
2481
2482static void deactivate_labels(void *region)
2483{
2484        struct nd_region *nd_region = region;
2485        int i;
2486
2487        for (i = 0; i < nd_region->ndr_mappings; i++) {
2488                struct nd_mapping *nd_mapping = &nd_region->mapping[i];
2489                struct nvdimm_drvdata *ndd = nd_mapping->ndd;
2490                struct nvdimm *nvdimm = nd_mapping->nvdimm;
2491
2492                mutex_lock(&nd_mapping->lock);
2493                nd_mapping_free_labels(nd_mapping);
2494                mutex_unlock(&nd_mapping->lock);
2495
2496                put_ndd(ndd);
2497                nd_mapping->ndd = NULL;
2498                if (ndd)
2499                        atomic_dec(&nvdimm->busy);
2500        }
2501}
2502
2503static int init_active_labels(struct nd_region *nd_region)
2504{
2505        int i, rc = 0;
2506
2507        for (i = 0; i < nd_region->ndr_mappings; i++) {
2508                struct nd_mapping *nd_mapping = &nd_region->mapping[i];
2509                struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
2510                struct nvdimm *nvdimm = nd_mapping->nvdimm;
2511                struct nd_label_ent *label_ent;
2512                int count, j;
2513
2514                /*
2515                 * If the dimm is disabled then we may need to prevent
2516                 * the region from being activated.
2517                 */
2518                if (!ndd) {
2519                        if (test_bit(NDD_LOCKED, &nvdimm->flags))
2520                                /* fail, label data may be unreadable */;
2521                        else if (test_bit(NDD_LABELING, &nvdimm->flags))
2522                                /* fail, labels needed to disambiguate dpa */;
2523                        else
2524                                continue;
2525
2526                        dev_err(&nd_region->dev, "%s: is %s, failing probe\n",
2527                                        dev_name(&nd_mapping->nvdimm->dev),
2528                                        test_bit(NDD_LOCKED, &nvdimm->flags)
2529                                        ? "locked" : "disabled");
2530                        rc = -ENXIO;
2531                        goto out;
2532                }
2533                nd_mapping->ndd = ndd;
2534                atomic_inc(&nvdimm->busy);
2535                get_ndd(ndd);
2536
2537                count = nd_label_active_count(ndd);
2538                dev_dbg(ndd->dev, "count: %d\n", count);
2539                if (!count)
2540                        continue;
2541                for (j = 0; j < count; j++) {
2542                        struct nd_namespace_label *label;
2543
2544                        label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
2545                        if (!label_ent)
2546                                break;
2547                        label = nd_label_active(ndd, j);
2548                        if (test_bit(NDD_NOBLK, &nvdimm->flags)) {
2549                                u32 flags = nsl_get_flags(ndd, label);
2550
2551                                flags &= ~NSLABEL_FLAG_LOCAL;
2552                                nsl_set_flags(ndd, label, flags);
2553                        }
2554                        label_ent->label = label;
2555
2556                        mutex_lock(&nd_mapping->lock);
2557                        list_add_tail(&label_ent->list, &nd_mapping->labels);
2558                        mutex_unlock(&nd_mapping->lock);
2559                }
2560
2561                if (j < count)
2562                        break;
2563        }
2564
2565        if (i < nd_region->ndr_mappings)
2566                rc = -ENOMEM;
2567
2568out:
2569        if (rc) {
2570                deactivate_labels(nd_region);
2571                return rc;
2572        }
2573
2574        return devm_add_action_or_reset(&nd_region->dev, deactivate_labels,
2575                                        nd_region);
2576}
2577
2578int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
2579{
2580        struct device **devs = NULL;
2581        int i, rc = 0, type;
2582
2583        *err = 0;
2584        nvdimm_bus_lock(&nd_region->dev);
2585        rc = init_active_labels(nd_region);
2586        if (rc) {
2587                nvdimm_bus_unlock(&nd_region->dev);
2588                return rc;
2589        }
2590
2591        type = nd_region_to_nstype(nd_region);
2592        switch (type) {
2593        case ND_DEVICE_NAMESPACE_IO:
2594                devs = create_namespace_io(nd_region);
2595                break;
2596        case ND_DEVICE_NAMESPACE_PMEM:
2597        case ND_DEVICE_NAMESPACE_BLK:
2598                devs = create_namespaces(nd_region);
2599                break;
2600        default:
2601                break;
2602        }
2603        nvdimm_bus_unlock(&nd_region->dev);
2604
2605        if (!devs)
2606                return -ENODEV;
2607
2608        for (i = 0; devs[i]; i++) {
2609                struct device *dev = devs[i];
2610                int id;
2611
2612                if (type == ND_DEVICE_NAMESPACE_BLK) {
2613                        struct nd_namespace_blk *nsblk;
2614
2615                        nsblk = to_nd_namespace_blk(dev);
2616                        id = ida_simple_get(&nd_region->ns_ida, 0, 0,
2617                                        GFP_KERNEL);
2618                        nsblk->id = id;
2619                } else if (type == ND_DEVICE_NAMESPACE_PMEM) {
2620                        struct nd_namespace_pmem *nspm;
2621
2622                        nspm = to_nd_namespace_pmem(dev);
2623                        id = ida_simple_get(&nd_region->ns_ida, 0, 0,
2624                                        GFP_KERNEL);
2625                        nspm->id = id;
2626                } else
2627                        id = i;
2628
2629                if (id < 0)
2630                        break;
2631                dev_set_name(dev, "namespace%d.%d", nd_region->id, id);
2632                nd_device_register(dev);
2633        }
2634        if (i)
2635                nd_region->ns_seed = devs[0];
2636
2637        if (devs[i]) {
2638                int j;
2639
2640                for (j = i; devs[j]; j++) {
2641                        struct device *dev = devs[j];
2642
2643                        device_initialize(dev);
2644                        put_device(dev);
2645                }
2646                *err = j - i;
2647                /*
2648                 * All of the namespaces we tried to register failed, so
2649                 * fail region activation.
2650                 */
2651                if (*err == 0)
2652                        rc = -ENODEV;
2653        }
2654        kfree(devs);
2655
2656        if (rc == -ENODEV)
2657                return rc;
2658
2659        return i;
2660}
2661