linux/drivers/nvdimm/pfn_devs.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright(c) 2013-2016 Intel Corporation. All rights reserved.
   4 */
   5#include <linux/memremap.h>
   6#include <linux/blkdev.h>
   7#include <linux/device.h>
   8#include <linux/sizes.h>
   9#include <linux/slab.h>
  10#include <linux/fs.h>
  11#include <linux/mm.h>
  12#include "nd-core.h"
  13#include "pfn.h"
  14#include "nd.h"
  15
  16static void nd_pfn_release(struct device *dev)
  17{
  18        struct nd_region *nd_region = to_nd_region(dev->parent);
  19        struct nd_pfn *nd_pfn = to_nd_pfn(dev);
  20
  21        dev_dbg(dev, "trace\n");
  22        nd_detach_ndns(&nd_pfn->dev, &nd_pfn->ndns);
  23        ida_simple_remove(&nd_region->pfn_ida, nd_pfn->id);
  24        kfree(nd_pfn->uuid);
  25        kfree(nd_pfn);
  26}
  27
  28struct nd_pfn *to_nd_pfn(struct device *dev)
  29{
  30        struct nd_pfn *nd_pfn = container_of(dev, struct nd_pfn, dev);
  31
  32        WARN_ON(!is_nd_pfn(dev));
  33        return nd_pfn;
  34}
  35EXPORT_SYMBOL(to_nd_pfn);
  36
  37static ssize_t mode_show(struct device *dev,
  38                struct device_attribute *attr, char *buf)
  39{
  40        struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
  41
  42        switch (nd_pfn->mode) {
  43        case PFN_MODE_RAM:
  44                return sprintf(buf, "ram\n");
  45        case PFN_MODE_PMEM:
  46                return sprintf(buf, "pmem\n");
  47        default:
  48                return sprintf(buf, "none\n");
  49        }
  50}
  51
  52static ssize_t mode_store(struct device *dev,
  53                struct device_attribute *attr, const char *buf, size_t len)
  54{
  55        struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
  56        ssize_t rc = 0;
  57
  58        nd_device_lock(dev);
  59        nvdimm_bus_lock(dev);
  60        if (dev->driver)
  61                rc = -EBUSY;
  62        else {
  63                size_t n = len - 1;
  64
  65                if (strncmp(buf, "pmem\n", n) == 0
  66                                || strncmp(buf, "pmem", n) == 0) {
  67                        nd_pfn->mode = PFN_MODE_PMEM;
  68                } else if (strncmp(buf, "ram\n", n) == 0
  69                                || strncmp(buf, "ram", n) == 0)
  70                        nd_pfn->mode = PFN_MODE_RAM;
  71                else if (strncmp(buf, "none\n", n) == 0
  72                                || strncmp(buf, "none", n) == 0)
  73                        nd_pfn->mode = PFN_MODE_NONE;
  74                else
  75                        rc = -EINVAL;
  76        }
  77        dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
  78                        buf[len - 1] == '\n' ? "" : "\n");
  79        nvdimm_bus_unlock(dev);
  80        nd_device_unlock(dev);
  81
  82        return rc ? rc : len;
  83}
  84static DEVICE_ATTR_RW(mode);
  85
  86static ssize_t align_show(struct device *dev,
  87                struct device_attribute *attr, char *buf)
  88{
  89        struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
  90
  91        return sprintf(buf, "%ld\n", nd_pfn->align);
  92}
  93
  94static unsigned long *nd_pfn_supported_alignments(unsigned long *alignments)
  95{
  96
  97        alignments[0] = PAGE_SIZE;
  98
  99        if (has_transparent_hugepage()) {
 100                alignments[1] = HPAGE_PMD_SIZE;
 101                if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
 102                        alignments[2] = HPAGE_PUD_SIZE;
 103        }
 104
 105        return alignments;
 106}
 107
 108/*
 109 * Use pmd mapping if supported as default alignment
 110 */
 111static unsigned long nd_pfn_default_alignment(void)
 112{
 113
 114        if (has_transparent_hugepage())
 115                return HPAGE_PMD_SIZE;
 116        return PAGE_SIZE;
 117}
 118
 119static ssize_t align_store(struct device *dev,
 120                struct device_attribute *attr, const char *buf, size_t len)
 121{
 122        struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
 123        unsigned long aligns[MAX_NVDIMM_ALIGN] = { [0] = 0, };
 124        ssize_t rc;
 125
 126        nd_device_lock(dev);
 127        nvdimm_bus_lock(dev);
 128        rc = nd_size_select_store(dev, buf, &nd_pfn->align,
 129                        nd_pfn_supported_alignments(aligns));
 130        dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
 131                        buf[len - 1] == '\n' ? "" : "\n");
 132        nvdimm_bus_unlock(dev);
 133        nd_device_unlock(dev);
 134
 135        return rc ? rc : len;
 136}
 137static DEVICE_ATTR_RW(align);
 138
 139static ssize_t uuid_show(struct device *dev,
 140                struct device_attribute *attr, char *buf)
 141{
 142        struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
 143
 144        if (nd_pfn->uuid)
 145                return sprintf(buf, "%pUb\n", nd_pfn->uuid);
 146        return sprintf(buf, "\n");
 147}
 148
 149static ssize_t uuid_store(struct device *dev,
 150                struct device_attribute *attr, const char *buf, size_t len)
 151{
 152        struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
 153        ssize_t rc;
 154
 155        nd_device_lock(dev);
 156        rc = nd_uuid_store(dev, &nd_pfn->uuid, buf, len);
 157        dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
 158                        buf[len - 1] == '\n' ? "" : "\n");
 159        nd_device_unlock(dev);
 160
 161        return rc ? rc : len;
 162}
 163static DEVICE_ATTR_RW(uuid);
 164
 165static ssize_t namespace_show(struct device *dev,
 166                struct device_attribute *attr, char *buf)
 167{
 168        struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
 169        ssize_t rc;
 170
 171        nvdimm_bus_lock(dev);
 172        rc = sprintf(buf, "%s\n", nd_pfn->ndns
 173                        ? dev_name(&nd_pfn->ndns->dev) : "");
 174        nvdimm_bus_unlock(dev);
 175        return rc;
 176}
 177
 178static ssize_t namespace_store(struct device *dev,
 179                struct device_attribute *attr, const char *buf, size_t len)
 180{
 181        struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
 182        ssize_t rc;
 183
 184        nd_device_lock(dev);
 185        nvdimm_bus_lock(dev);
 186        rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len);
 187        dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
 188                        buf[len - 1] == '\n' ? "" : "\n");
 189        nvdimm_bus_unlock(dev);
 190        nd_device_unlock(dev);
 191
 192        return rc;
 193}
 194static DEVICE_ATTR_RW(namespace);
 195
 196static ssize_t resource_show(struct device *dev,
 197                struct device_attribute *attr, char *buf)
 198{
 199        struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
 200        ssize_t rc;
 201
 202        nd_device_lock(dev);
 203        if (dev->driver) {
 204                struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
 205                u64 offset = __le64_to_cpu(pfn_sb->dataoff);
 206                struct nd_namespace_common *ndns = nd_pfn->ndns;
 207                u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
 208                struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
 209
 210                rc = sprintf(buf, "%#llx\n", (unsigned long long) nsio->res.start
 211                                + start_pad + offset);
 212        } else {
 213                /* no address to convey if the pfn instance is disabled */
 214                rc = -ENXIO;
 215        }
 216        nd_device_unlock(dev);
 217
 218        return rc;
 219}
 220static DEVICE_ATTR_ADMIN_RO(resource);
 221
 222static ssize_t size_show(struct device *dev,
 223                struct device_attribute *attr, char *buf)
 224{
 225        struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
 226        ssize_t rc;
 227
 228        nd_device_lock(dev);
 229        if (dev->driver) {
 230                struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
 231                u64 offset = __le64_to_cpu(pfn_sb->dataoff);
 232                struct nd_namespace_common *ndns = nd_pfn->ndns;
 233                u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
 234                u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
 235                struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
 236
 237                rc = sprintf(buf, "%llu\n", (unsigned long long)
 238                                resource_size(&nsio->res) - start_pad
 239                                - end_trunc - offset);
 240        } else {
 241                /* no size to convey if the pfn instance is disabled */
 242                rc = -ENXIO;
 243        }
 244        nd_device_unlock(dev);
 245
 246        return rc;
 247}
 248static DEVICE_ATTR_RO(size);
 249
 250static ssize_t supported_alignments_show(struct device *dev,
 251                struct device_attribute *attr, char *buf)
 252{
 253        unsigned long aligns[MAX_NVDIMM_ALIGN] = { [0] = 0, };
 254
 255        return nd_size_select_show(0,
 256                        nd_pfn_supported_alignments(aligns), buf);
 257}
 258static DEVICE_ATTR_RO(supported_alignments);
 259
 260static struct attribute *nd_pfn_attributes[] = {
 261        &dev_attr_mode.attr,
 262        &dev_attr_namespace.attr,
 263        &dev_attr_uuid.attr,
 264        &dev_attr_align.attr,
 265        &dev_attr_resource.attr,
 266        &dev_attr_size.attr,
 267        &dev_attr_supported_alignments.attr,
 268        NULL,
 269};
 270
 271static struct attribute_group nd_pfn_attribute_group = {
 272        .attrs = nd_pfn_attributes,
 273};
 274
 275const struct attribute_group *nd_pfn_attribute_groups[] = {
 276        &nd_pfn_attribute_group,
 277        &nd_device_attribute_group,
 278        &nd_numa_attribute_group,
 279        NULL,
 280};
 281
 282static const struct device_type nd_pfn_device_type = {
 283        .name = "nd_pfn",
 284        .release = nd_pfn_release,
 285        .groups = nd_pfn_attribute_groups,
 286};
 287
 288bool is_nd_pfn(struct device *dev)
 289{
 290        return dev ? dev->type == &nd_pfn_device_type : false;
 291}
 292EXPORT_SYMBOL(is_nd_pfn);
 293
 294struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
 295                struct nd_namespace_common *ndns)
 296{
 297        struct device *dev;
 298
 299        if (!nd_pfn)
 300                return NULL;
 301
 302        nd_pfn->mode = PFN_MODE_NONE;
 303        nd_pfn->align = nd_pfn_default_alignment();
 304        dev = &nd_pfn->dev;
 305        device_initialize(&nd_pfn->dev);
 306        if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) {
 307                dev_dbg(&ndns->dev, "failed, already claimed by %s\n",
 308                                dev_name(ndns->claim));
 309                put_device(dev);
 310                return NULL;
 311        }
 312        return dev;
 313}
 314
 315static struct nd_pfn *nd_pfn_alloc(struct nd_region *nd_region)
 316{
 317        struct nd_pfn *nd_pfn;
 318        struct device *dev;
 319
 320        nd_pfn = kzalloc(sizeof(*nd_pfn), GFP_KERNEL);
 321        if (!nd_pfn)
 322                return NULL;
 323
 324        nd_pfn->id = ida_simple_get(&nd_region->pfn_ida, 0, 0, GFP_KERNEL);
 325        if (nd_pfn->id < 0) {
 326                kfree(nd_pfn);
 327                return NULL;
 328        }
 329
 330        dev = &nd_pfn->dev;
 331        dev_set_name(dev, "pfn%d.%d", nd_region->id, nd_pfn->id);
 332        dev->type = &nd_pfn_device_type;
 333        dev->parent = &nd_region->dev;
 334
 335        return nd_pfn;
 336}
 337
 338struct device *nd_pfn_create(struct nd_region *nd_region)
 339{
 340        struct nd_pfn *nd_pfn;
 341        struct device *dev;
 342
 343        if (!is_memory(&nd_region->dev))
 344                return NULL;
 345
 346        nd_pfn = nd_pfn_alloc(nd_region);
 347        dev = nd_pfn_devinit(nd_pfn, NULL);
 348
 349        __nd_device_register(dev);
 350        return dev;
 351}
 352
 353/*
 354 * nd_pfn_clear_memmap_errors() clears any errors in the volatile memmap
 355 * space associated with the namespace. If the memmap is set to DRAM, then
 356 * this is a no-op. Since the memmap area is freshly initialized during
 357 * probe, we have an opportunity to clear any badblocks in this area.
 358 */
 359static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn)
 360{
 361        struct nd_region *nd_region = to_nd_region(nd_pfn->dev.parent);
 362        struct nd_namespace_common *ndns = nd_pfn->ndns;
 363        void *zero_page = page_address(ZERO_PAGE(0));
 364        struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
 365        int num_bad, meta_num, rc, bb_present;
 366        sector_t first_bad, meta_start;
 367        struct nd_namespace_io *nsio;
 368
 369        if (nd_pfn->mode != PFN_MODE_PMEM)
 370                return 0;
 371
 372        nsio = to_nd_namespace_io(&ndns->dev);
 373        meta_start = (SZ_4K + sizeof(*pfn_sb)) >> 9;
 374        meta_num = (le64_to_cpu(pfn_sb->dataoff) >> 9) - meta_start;
 375
 376        /*
 377         * re-enable the namespace with correct size so that we can access
 378         * the device memmap area.
 379         */
 380        devm_namespace_disable(&nd_pfn->dev, ndns);
 381        rc = devm_namespace_enable(&nd_pfn->dev, ndns, le64_to_cpu(pfn_sb->dataoff));
 382        if (rc)
 383                return rc;
 384
 385        do {
 386                unsigned long zero_len;
 387                u64 nsoff;
 388
 389                bb_present = badblocks_check(&nd_region->bb, meta_start,
 390                                meta_num, &first_bad, &num_bad);
 391                if (bb_present) {
 392                        dev_dbg(&nd_pfn->dev, "meta: %x badblocks at %llx\n",
 393                                        num_bad, first_bad);
 394                        nsoff = ALIGN_DOWN((nd_region->ndr_start
 395                                        + (first_bad << 9)) - nsio->res.start,
 396                                        PAGE_SIZE);
 397                        zero_len = ALIGN(num_bad << 9, PAGE_SIZE);
 398                        while (zero_len) {
 399                                unsigned long chunk = min(zero_len, PAGE_SIZE);
 400
 401                                rc = nvdimm_write_bytes(ndns, nsoff, zero_page,
 402                                                        chunk, 0);
 403                                if (rc)
 404                                        break;
 405
 406                                zero_len -= chunk;
 407                                nsoff += chunk;
 408                        }
 409                        if (rc) {
 410                                dev_err(&nd_pfn->dev,
 411                                        "error clearing %x badblocks at %llx\n",
 412                                        num_bad, first_bad);
 413                                return rc;
 414                        }
 415                }
 416        } while (bb_present);
 417
 418        return 0;
 419}
 420
 421static bool nd_supported_alignment(unsigned long align)
 422{
 423        int i;
 424        unsigned long supported[MAX_NVDIMM_ALIGN] = { [0] = 0, };
 425
 426        if (align == 0)
 427                return false;
 428
 429        nd_pfn_supported_alignments(supported);
 430        for (i = 0; supported[i]; i++)
 431                if (align == supported[i])
 432                        return true;
 433        return false;
 434}
 435
 436/**
 437 * nd_pfn_validate - read and validate info-block
 438 * @nd_pfn: fsdax namespace runtime state / properties
 439 * @sig: 'devdax' or 'fsdax' signature
 440 *
 441 * Upon return the info-block buffer contents (->pfn_sb) are
 442 * indeterminate when validation fails, and a coherent info-block
 443 * otherwise.
 444 */
 445int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
 446{
 447        u64 checksum, offset;
 448        struct resource *res;
 449        enum nd_pfn_mode mode;
 450        struct nd_namespace_io *nsio;
 451        unsigned long align, start_pad;
 452        struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
 453        struct nd_namespace_common *ndns = nd_pfn->ndns;
 454        const uuid_t *parent_uuid = nd_dev_to_uuid(&ndns->dev);
 455
 456        if (!pfn_sb || !ndns)
 457                return -ENODEV;
 458
 459        if (!is_memory(nd_pfn->dev.parent))
 460                return -ENODEV;
 461
 462        if (nvdimm_read_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0))
 463                return -ENXIO;
 464
 465        if (memcmp(pfn_sb->signature, sig, PFN_SIG_LEN) != 0)
 466                return -ENODEV;
 467
 468        checksum = le64_to_cpu(pfn_sb->checksum);
 469        pfn_sb->checksum = 0;
 470        if (checksum != nd_sb_checksum((struct nd_gen_sb *) pfn_sb))
 471                return -ENODEV;
 472        pfn_sb->checksum = cpu_to_le64(checksum);
 473
 474        if (memcmp(pfn_sb->parent_uuid, parent_uuid, 16) != 0)
 475                return -ENODEV;
 476
 477        if (__le16_to_cpu(pfn_sb->version_minor) < 1) {
 478                pfn_sb->start_pad = 0;
 479                pfn_sb->end_trunc = 0;
 480        }
 481
 482        if (__le16_to_cpu(pfn_sb->version_minor) < 2)
 483                pfn_sb->align = 0;
 484
 485        if (__le16_to_cpu(pfn_sb->version_minor) < 4) {
 486                pfn_sb->page_struct_size = cpu_to_le16(64);
 487                pfn_sb->page_size = cpu_to_le32(PAGE_SIZE);
 488        }
 489
 490        switch (le32_to_cpu(pfn_sb->mode)) {
 491        case PFN_MODE_RAM:
 492        case PFN_MODE_PMEM:
 493                break;
 494        default:
 495                return -ENXIO;
 496        }
 497
 498        align = le32_to_cpu(pfn_sb->align);
 499        offset = le64_to_cpu(pfn_sb->dataoff);
 500        start_pad = le32_to_cpu(pfn_sb->start_pad);
 501        if (align == 0)
 502                align = 1UL << ilog2(offset);
 503        mode = le32_to_cpu(pfn_sb->mode);
 504
 505        if ((le32_to_cpu(pfn_sb->page_size) > PAGE_SIZE) &&
 506                        (mode == PFN_MODE_PMEM)) {
 507                dev_err(&nd_pfn->dev,
 508                                "init failed, page size mismatch %d\n",
 509                                le32_to_cpu(pfn_sb->page_size));
 510                return -EOPNOTSUPP;
 511        }
 512
 513        if ((le16_to_cpu(pfn_sb->page_struct_size) < sizeof(struct page)) &&
 514                        (mode == PFN_MODE_PMEM)) {
 515                dev_err(&nd_pfn->dev,
 516                                "init failed, struct page size mismatch %d\n",
 517                                le16_to_cpu(pfn_sb->page_struct_size));
 518                return -EOPNOTSUPP;
 519        }
 520
 521        /*
 522         * Check whether the we support the alignment. For Dax if the
 523         * superblock alignment is not matching, we won't initialize
 524         * the device.
 525         */
 526        if (!nd_supported_alignment(align) &&
 527                        !memcmp(pfn_sb->signature, DAX_SIG, PFN_SIG_LEN)) {
 528                dev_err(&nd_pfn->dev, "init failed, alignment mismatch: "
 529                                "%ld:%ld\n", nd_pfn->align, align);
 530                return -EOPNOTSUPP;
 531        }
 532
 533        if (!nd_pfn->uuid) {
 534                /*
 535                 * When probing a namepace via nd_pfn_probe() the uuid
 536                 * is NULL (see: nd_pfn_devinit()) we init settings from
 537                 * pfn_sb
 538                 */
 539                nd_pfn->uuid = kmemdup(pfn_sb->uuid, 16, GFP_KERNEL);
 540                if (!nd_pfn->uuid)
 541                        return -ENOMEM;
 542                nd_pfn->align = align;
 543                nd_pfn->mode = mode;
 544        } else {
 545                /*
 546                 * When probing a pfn / dax instance we validate the
 547                 * live settings against the pfn_sb
 548                 */
 549                if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0)
 550                        return -ENODEV;
 551
 552                /*
 553                 * If the uuid validates, but other settings mismatch
 554                 * return EINVAL because userspace has managed to change
 555                 * the configuration without specifying new
 556                 * identification.
 557                 */
 558                if (nd_pfn->align != align || nd_pfn->mode != mode) {
 559                        dev_err(&nd_pfn->dev,
 560                                        "init failed, settings mismatch\n");
 561                        dev_dbg(&nd_pfn->dev, "align: %lx:%lx mode: %d:%d\n",
 562                                        nd_pfn->align, align, nd_pfn->mode,
 563                                        mode);
 564                        return -EOPNOTSUPP;
 565                }
 566        }
 567
 568        if (align > nvdimm_namespace_capacity(ndns)) {
 569                dev_err(&nd_pfn->dev, "alignment: %lx exceeds capacity %llx\n",
 570                                align, nvdimm_namespace_capacity(ndns));
 571                return -EOPNOTSUPP;
 572        }
 573
 574        /*
 575         * These warnings are verbose because they can only trigger in
 576         * the case where the physical address alignment of the
 577         * namespace has changed since the pfn superblock was
 578         * established.
 579         */
 580        nsio = to_nd_namespace_io(&ndns->dev);
 581        res = &nsio->res;
 582        if (offset >= resource_size(res)) {
 583                dev_err(&nd_pfn->dev, "pfn array size exceeds capacity of %s\n",
 584                                dev_name(&ndns->dev));
 585                return -EOPNOTSUPP;
 586        }
 587
 588        if ((align && !IS_ALIGNED(res->start + offset + start_pad, align))
 589                        || !IS_ALIGNED(offset, PAGE_SIZE)) {
 590                dev_err(&nd_pfn->dev,
 591                                "bad offset: %#llx dax disabled align: %#lx\n",
 592                                offset, align);
 593                return -EOPNOTSUPP;
 594        }
 595
 596        if (!IS_ALIGNED(res->start + le32_to_cpu(pfn_sb->start_pad),
 597                                memremap_compat_align())) {
 598                dev_err(&nd_pfn->dev, "resource start misaligned\n");
 599                return -EOPNOTSUPP;
 600        }
 601
 602        if (!IS_ALIGNED(res->end + 1 - le32_to_cpu(pfn_sb->end_trunc),
 603                                memremap_compat_align())) {
 604                dev_err(&nd_pfn->dev, "resource end misaligned\n");
 605                return -EOPNOTSUPP;
 606        }
 607
 608        return 0;
 609}
 610EXPORT_SYMBOL(nd_pfn_validate);
 611
 612int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
 613{
 614        int rc;
 615        struct nd_pfn *nd_pfn;
 616        struct device *pfn_dev;
 617        struct nd_pfn_sb *pfn_sb;
 618        struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
 619
 620        if (ndns->force_raw)
 621                return -ENODEV;
 622
 623        switch (ndns->claim_class) {
 624        case NVDIMM_CCLASS_NONE:
 625        case NVDIMM_CCLASS_PFN:
 626                break;
 627        default:
 628                return -ENODEV;
 629        }
 630
 631        nvdimm_bus_lock(&ndns->dev);
 632        nd_pfn = nd_pfn_alloc(nd_region);
 633        pfn_dev = nd_pfn_devinit(nd_pfn, ndns);
 634        nvdimm_bus_unlock(&ndns->dev);
 635        if (!pfn_dev)
 636                return -ENOMEM;
 637        pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
 638        nd_pfn = to_nd_pfn(pfn_dev);
 639        nd_pfn->pfn_sb = pfn_sb;
 640        rc = nd_pfn_validate(nd_pfn, PFN_SIG);
 641        dev_dbg(dev, "pfn: %s\n", rc == 0 ? dev_name(pfn_dev) : "<none>");
 642        if (rc < 0) {
 643                nd_detach_ndns(pfn_dev, &nd_pfn->ndns);
 644                put_device(pfn_dev);
 645        } else
 646                __nd_device_register(pfn_dev);
 647
 648        return rc;
 649}
 650EXPORT_SYMBOL(nd_pfn_probe);
 651
 652/*
 653 * We hotplug memory at sub-section granularity, pad the reserved area
 654 * from the previous section base to the namespace base address.
 655 */
 656static unsigned long init_altmap_base(resource_size_t base)
 657{
 658        unsigned long base_pfn = PHYS_PFN(base);
 659
 660        return SUBSECTION_ALIGN_DOWN(base_pfn);
 661}
 662
 663static unsigned long init_altmap_reserve(resource_size_t base)
 664{
 665        unsigned long reserve = nd_info_block_reserve() >> PAGE_SHIFT;
 666        unsigned long base_pfn = PHYS_PFN(base);
 667
 668        reserve += base_pfn - SUBSECTION_ALIGN_DOWN(base_pfn);
 669        return reserve;
 670}
 671
 672static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
 673{
 674        struct range *range = &pgmap->range;
 675        struct vmem_altmap *altmap = &pgmap->altmap;
 676        struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
 677        u64 offset = le64_to_cpu(pfn_sb->dataoff);
 678        u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
 679        u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
 680        u32 reserve = nd_info_block_reserve();
 681        struct nd_namespace_common *ndns = nd_pfn->ndns;
 682        struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
 683        resource_size_t base = nsio->res.start + start_pad;
 684        resource_size_t end = nsio->res.end - end_trunc;
 685        struct vmem_altmap __altmap = {
 686                .base_pfn = init_altmap_base(base),
 687                .reserve = init_altmap_reserve(base),
 688                .end_pfn = PHYS_PFN(end),
 689        };
 690
 691        *range = (struct range) {
 692                .start = nsio->res.start + start_pad,
 693                .end = nsio->res.end - end_trunc,
 694        };
 695        pgmap->nr_range = 1;
 696        if (nd_pfn->mode == PFN_MODE_RAM) {
 697                if (offset < reserve)
 698                        return -EINVAL;
 699                nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
 700        } else if (nd_pfn->mode == PFN_MODE_PMEM) {
 701                nd_pfn->npfns = PHYS_PFN((range_len(range) - offset));
 702                if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
 703                        dev_info(&nd_pfn->dev,
 704                                        "number of pfns truncated from %lld to %ld\n",
 705                                        le64_to_cpu(nd_pfn->pfn_sb->npfns),
 706                                        nd_pfn->npfns);
 707                memcpy(altmap, &__altmap, sizeof(*altmap));
 708                altmap->free = PHYS_PFN(offset - reserve);
 709                altmap->alloc = 0;
 710                pgmap->flags |= PGMAP_ALTMAP_VALID;
 711        } else
 712                return -ENXIO;
 713
 714        return 0;
 715}
 716
 717static int nd_pfn_init(struct nd_pfn *nd_pfn)
 718{
 719        struct nd_namespace_common *ndns = nd_pfn->ndns;
 720        struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
 721        resource_size_t start, size;
 722        struct nd_region *nd_region;
 723        unsigned long npfns, align;
 724        u32 end_trunc;
 725        struct nd_pfn_sb *pfn_sb;
 726        phys_addr_t offset;
 727        const char *sig;
 728        u64 checksum;
 729        int rc;
 730
 731        pfn_sb = devm_kmalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL);
 732        if (!pfn_sb)
 733                return -ENOMEM;
 734
 735        nd_pfn->pfn_sb = pfn_sb;
 736        if (is_nd_dax(&nd_pfn->dev))
 737                sig = DAX_SIG;
 738        else
 739                sig = PFN_SIG;
 740
 741        rc = nd_pfn_validate(nd_pfn, sig);
 742        if (rc == 0)
 743                return nd_pfn_clear_memmap_errors(nd_pfn);
 744        if (rc != -ENODEV)
 745                return rc;
 746
 747        /* no info block, do init */;
 748        memset(pfn_sb, 0, sizeof(*pfn_sb));
 749
 750        nd_region = to_nd_region(nd_pfn->dev.parent);
 751        if (nd_region->ro) {
 752                dev_info(&nd_pfn->dev,
 753                                "%s is read-only, unable to init metadata\n",
 754                                dev_name(&nd_region->dev));
 755                return -ENXIO;
 756        }
 757
 758        /*
 759         * Note, we use 64 here for the standard size of struct page,
 760         * debugging options may cause it to be larger in which case the
 761         * implementation will limit the pfns advertised through
 762         * ->direct_access() to those that are included in the memmap.
 763         */
 764        start = nsio->res.start;
 765        size = resource_size(&nsio->res);
 766        npfns = PHYS_PFN(size - SZ_8K);
 767        align = max(nd_pfn->align, memremap_compat_align());
 768
 769        /*
 770         * When @start is misaligned fail namespace creation. See
 771         * the 'struct nd_pfn_sb' commentary on why ->start_pad is not
 772         * an option.
 773         */
 774        if (!IS_ALIGNED(start, memremap_compat_align())) {
 775                dev_err(&nd_pfn->dev, "%s: start %pa misaligned to %#lx\n",
 776                                dev_name(&ndns->dev), &start,
 777                                memremap_compat_align());
 778                return -EINVAL;
 779        }
 780        end_trunc = start + size - ALIGN_DOWN(start + size, align);
 781        if (nd_pfn->mode == PFN_MODE_PMEM) {
 782                /*
 783                 * The altmap should be padded out to the block size used
 784                 * when populating the vmemmap. This *should* be equal to
 785                 * PMD_SIZE for most architectures.
 786                 *
 787                 * Also make sure size of struct page is less than 64. We
 788                 * want to make sure we use large enough size here so that
 789                 * we don't have a dynamic reserve space depending on
 790                 * struct page size. But we also want to make sure we notice
 791                 * when we end up adding new elements to struct page.
 792                 */
 793                BUILD_BUG_ON(sizeof(struct page) > MAX_STRUCT_PAGE_SIZE);
 794                offset = ALIGN(start + SZ_8K + MAX_STRUCT_PAGE_SIZE * npfns, align)
 795                        - start;
 796        } else if (nd_pfn->mode == PFN_MODE_RAM)
 797                offset = ALIGN(start + SZ_8K, align) - start;
 798        else
 799                return -ENXIO;
 800
 801        if (offset >= size) {
 802                dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n",
 803                                dev_name(&ndns->dev));
 804                return -ENXIO;
 805        }
 806
 807        npfns = PHYS_PFN(size - offset - end_trunc);
 808        pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
 809        pfn_sb->dataoff = cpu_to_le64(offset);
 810        pfn_sb->npfns = cpu_to_le64(npfns);
 811        memcpy(pfn_sb->signature, sig, PFN_SIG_LEN);
 812        memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
 813        memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
 814        pfn_sb->version_major = cpu_to_le16(1);
 815        pfn_sb->version_minor = cpu_to_le16(4);
 816        pfn_sb->end_trunc = cpu_to_le32(end_trunc);
 817        pfn_sb->align = cpu_to_le32(nd_pfn->align);
 818        pfn_sb->page_struct_size = cpu_to_le16(MAX_STRUCT_PAGE_SIZE);
 819        pfn_sb->page_size = cpu_to_le32(PAGE_SIZE);
 820        checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
 821        pfn_sb->checksum = cpu_to_le64(checksum);
 822
 823        rc = nd_pfn_clear_memmap_errors(nd_pfn);
 824        if (rc)
 825                return rc;
 826
 827        return nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0);
 828}
 829
 830/*
 831 * Determine the effective resource range and vmem_altmap from an nd_pfn
 832 * instance.
 833 */
 834int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
 835{
 836        int rc;
 837
 838        if (!nd_pfn->uuid || !nd_pfn->ndns)
 839                return -ENODEV;
 840
 841        rc = nd_pfn_init(nd_pfn);
 842        if (rc)
 843                return rc;
 844
 845        /* we need a valid pfn_sb before we can init a dev_pagemap */
 846        return __nvdimm_setup_pfn(nd_pfn, pgmap);
 847}
 848EXPORT_SYMBOL_GPL(nvdimm_setup_pfn);
 849