linux/drivers/nvdimm/btt.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Block Translation Table
   4 * Copyright (c) 2014-2015, Intel Corporation.
   5 */
   6#include <linux/highmem.h>
   7#include <linux/debugfs.h>
   8#include <linux/blkdev.h>
   9#include <linux/module.h>
  10#include <linux/device.h>
  11#include <linux/mutex.h>
  12#include <linux/hdreg.h>
  13#include <linux/genhd.h>
  14#include <linux/sizes.h>
  15#include <linux/ndctl.h>
  16#include <linux/fs.h>
  17#include <linux/nd.h>
  18#include <linux/backing-dev.h>
  19#include "btt.h"
  20#include "nd.h"
  21
  22enum log_ent_request {
  23        LOG_NEW_ENT = 0,
  24        LOG_OLD_ENT
  25};
  26
  27static struct device *to_dev(struct arena_info *arena)
  28{
  29        return &arena->nd_btt->dev;
  30}
  31
  32static u64 adjust_initial_offset(struct nd_btt *nd_btt, u64 offset)
  33{
  34        return offset + nd_btt->initial_offset;
  35}
  36
  37static int arena_read_bytes(struct arena_info *arena, resource_size_t offset,
  38                void *buf, size_t n, unsigned long flags)
  39{
  40        struct nd_btt *nd_btt = arena->nd_btt;
  41        struct nd_namespace_common *ndns = nd_btt->ndns;
  42
  43        /* arena offsets may be shifted from the base of the device */
  44        offset = adjust_initial_offset(nd_btt, offset);
  45        return nvdimm_read_bytes(ndns, offset, buf, n, flags);
  46}
  47
  48static int arena_write_bytes(struct arena_info *arena, resource_size_t offset,
  49                void *buf, size_t n, unsigned long flags)
  50{
  51        struct nd_btt *nd_btt = arena->nd_btt;
  52        struct nd_namespace_common *ndns = nd_btt->ndns;
  53
  54        /* arena offsets may be shifted from the base of the device */
  55        offset = adjust_initial_offset(nd_btt, offset);
  56        return nvdimm_write_bytes(ndns, offset, buf, n, flags);
  57}
  58
  59static int btt_info_write(struct arena_info *arena, struct btt_sb *super)
  60{
  61        int ret;
  62
  63        /*
  64         * infooff and info2off should always be at least 512B aligned.
  65         * We rely on that to make sure rw_bytes does error clearing
  66         * correctly, so make sure that is the case.
  67         */
  68        dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->infooff, 512),
  69                "arena->infooff: %#llx is unaligned\n", arena->infooff);
  70        dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->info2off, 512),
  71                "arena->info2off: %#llx is unaligned\n", arena->info2off);
  72
  73        ret = arena_write_bytes(arena, arena->info2off, super,
  74                        sizeof(struct btt_sb), 0);
  75        if (ret)
  76                return ret;
  77
  78        return arena_write_bytes(arena, arena->infooff, super,
  79                        sizeof(struct btt_sb), 0);
  80}
  81
  82static int btt_info_read(struct arena_info *arena, struct btt_sb *super)
  83{
  84        return arena_read_bytes(arena, arena->infooff, super,
  85                        sizeof(struct btt_sb), 0);
  86}
  87
  88/*
  89 * 'raw' version of btt_map write
  90 * Assumptions:
  91 *   mapping is in little-endian
  92 *   mapping contains 'E' and 'Z' flags as desired
  93 */
  94static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping,
  95                unsigned long flags)
  96{
  97        u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
  98
  99        if (unlikely(lba >= arena->external_nlba))
 100                dev_err_ratelimited(to_dev(arena),
 101                        "%s: lba %#x out of range (max: %#x)\n",
 102                        __func__, lba, arena->external_nlba);
 103        return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE, flags);
 104}
 105
 106static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping,
 107                        u32 z_flag, u32 e_flag, unsigned long rwb_flags)
 108{
 109        u32 ze;
 110        __le32 mapping_le;
 111
 112        /*
 113         * This 'mapping' is supposed to be just the LBA mapping, without
 114         * any flags set, so strip the flag bits.
 115         */
 116        mapping = ent_lba(mapping);
 117
 118        ze = (z_flag << 1) + e_flag;
 119        switch (ze) {
 120        case 0:
 121                /*
 122                 * We want to set neither of the Z or E flags, and
 123                 * in the actual layout, this means setting the bit
 124                 * positions of both to '1' to indicate a 'normal'
 125                 * map entry
 126                 */
 127                mapping |= MAP_ENT_NORMAL;
 128                break;
 129        case 1:
 130                mapping |= (1 << MAP_ERR_SHIFT);
 131                break;
 132        case 2:
 133                mapping |= (1 << MAP_TRIM_SHIFT);
 134                break;
 135        default:
 136                /*
 137                 * The case where Z and E are both sent in as '1' could be
 138                 * construed as a valid 'normal' case, but we decide not to,
 139                 * to avoid confusion
 140                 */
 141                dev_err_ratelimited(to_dev(arena),
 142                        "Invalid use of Z and E flags\n");
 143                return -EIO;
 144        }
 145
 146        mapping_le = cpu_to_le32(mapping);
 147        return __btt_map_write(arena, lba, mapping_le, rwb_flags);
 148}
 149
 150static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
 151                        int *trim, int *error, unsigned long rwb_flags)
 152{
 153        int ret;
 154        __le32 in;
 155        u32 raw_mapping, postmap, ze, z_flag, e_flag;
 156        u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
 157
 158        if (unlikely(lba >= arena->external_nlba))
 159                dev_err_ratelimited(to_dev(arena),
 160                        "%s: lba %#x out of range (max: %#x)\n",
 161                        __func__, lba, arena->external_nlba);
 162
 163        ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE, rwb_flags);
 164        if (ret)
 165                return ret;
 166
 167        raw_mapping = le32_to_cpu(in);
 168
 169        z_flag = ent_z_flag(raw_mapping);
 170        e_flag = ent_e_flag(raw_mapping);
 171        ze = (z_flag << 1) + e_flag;
 172        postmap = ent_lba(raw_mapping);
 173
 174        /* Reuse the {z,e}_flag variables for *trim and *error */
 175        z_flag = 0;
 176        e_flag = 0;
 177
 178        switch (ze) {
 179        case 0:
 180                /* Initial state. Return postmap = premap */
 181                *mapping = lba;
 182                break;
 183        case 1:
 184                *mapping = postmap;
 185                e_flag = 1;
 186                break;
 187        case 2:
 188                *mapping = postmap;
 189                z_flag = 1;
 190                break;
 191        case 3:
 192                *mapping = postmap;
 193                break;
 194        default:
 195                return -EIO;
 196        }
 197
 198        if (trim)
 199                *trim = z_flag;
 200        if (error)
 201                *error = e_flag;
 202
 203        return ret;
 204}
 205
 206static int btt_log_group_read(struct arena_info *arena, u32 lane,
 207                        struct log_group *log)
 208{
 209        return arena_read_bytes(arena,
 210                        arena->logoff + (lane * LOG_GRP_SIZE), log,
 211                        LOG_GRP_SIZE, 0);
 212}
 213
 214static struct dentry *debugfs_root;
 215
 216static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
 217                                int idx)
 218{
 219        char dirname[32];
 220        struct dentry *d;
 221
 222        /* If for some reason, parent bttN was not created, exit */
 223        if (!parent)
 224                return;
 225
 226        snprintf(dirname, 32, "arena%d", idx);
 227        d = debugfs_create_dir(dirname, parent);
 228        if (IS_ERR_OR_NULL(d))
 229                return;
 230        a->debugfs_dir = d;
 231
 232        debugfs_create_x64("size", S_IRUGO, d, &a->size);
 233        debugfs_create_x64("external_lba_start", S_IRUGO, d,
 234                                &a->external_lba_start);
 235        debugfs_create_x32("internal_nlba", S_IRUGO, d, &a->internal_nlba);
 236        debugfs_create_u32("internal_lbasize", S_IRUGO, d,
 237                                &a->internal_lbasize);
 238        debugfs_create_x32("external_nlba", S_IRUGO, d, &a->external_nlba);
 239        debugfs_create_u32("external_lbasize", S_IRUGO, d,
 240                                &a->external_lbasize);
 241        debugfs_create_u32("nfree", S_IRUGO, d, &a->nfree);
 242        debugfs_create_u16("version_major", S_IRUGO, d, &a->version_major);
 243        debugfs_create_u16("version_minor", S_IRUGO, d, &a->version_minor);
 244        debugfs_create_x64("nextoff", S_IRUGO, d, &a->nextoff);
 245        debugfs_create_x64("infooff", S_IRUGO, d, &a->infooff);
 246        debugfs_create_x64("dataoff", S_IRUGO, d, &a->dataoff);
 247        debugfs_create_x64("mapoff", S_IRUGO, d, &a->mapoff);
 248        debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff);
 249        debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off);
 250        debugfs_create_x32("flags", S_IRUGO, d, &a->flags);
 251        debugfs_create_u32("log_index_0", S_IRUGO, d, &a->log_index[0]);
 252        debugfs_create_u32("log_index_1", S_IRUGO, d, &a->log_index[1]);
 253}
 254
 255static void btt_debugfs_init(struct btt *btt)
 256{
 257        int i = 0;
 258        struct arena_info *arena;
 259
 260        btt->debugfs_dir = debugfs_create_dir(dev_name(&btt->nd_btt->dev),
 261                                                debugfs_root);
 262        if (IS_ERR_OR_NULL(btt->debugfs_dir))
 263                return;
 264
 265        list_for_each_entry(arena, &btt->arena_list, list) {
 266                arena_debugfs_init(arena, btt->debugfs_dir, i);
 267                i++;
 268        }
 269}
 270
 271static u32 log_seq(struct log_group *log, int log_idx)
 272{
 273        return le32_to_cpu(log->ent[log_idx].seq);
 274}
 275
 276/*
 277 * This function accepts two log entries, and uses the
 278 * sequence number to find the 'older' entry.
 279 * It also updates the sequence number in this old entry to
 280 * make it the 'new' one if the mark_flag is set.
 281 * Finally, it returns which of the entries was the older one.
 282 *
 283 * TODO The logic feels a bit kludge-y. make it better..
 284 */
 285static int btt_log_get_old(struct arena_info *a, struct log_group *log)
 286{
 287        int idx0 = a->log_index[0];
 288        int idx1 = a->log_index[1];
 289        int old;
 290
 291        /*
 292         * the first ever time this is seen, the entry goes into [0]
 293         * the next time, the following logic works out to put this
 294         * (next) entry into [1]
 295         */
 296        if (log_seq(log, idx0) == 0) {
 297                log->ent[idx0].seq = cpu_to_le32(1);
 298                return 0;
 299        }
 300
 301        if (log_seq(log, idx0) == log_seq(log, idx1))
 302                return -EINVAL;
 303        if (log_seq(log, idx0) + log_seq(log, idx1) > 5)
 304                return -EINVAL;
 305
 306        if (log_seq(log, idx0) < log_seq(log, idx1)) {
 307                if ((log_seq(log, idx1) - log_seq(log, idx0)) == 1)
 308                        old = 0;
 309                else
 310                        old = 1;
 311        } else {
 312                if ((log_seq(log, idx0) - log_seq(log, idx1)) == 1)
 313                        old = 1;
 314                else
 315                        old = 0;
 316        }
 317
 318        return old;
 319}
 320
 321/*
 322 * This function copies the desired (old/new) log entry into ent if
 323 * it is not NULL. It returns the sub-slot number (0 or 1)
 324 * where the desired log entry was found. Negative return values
 325 * indicate errors.
 326 */
 327static int btt_log_read(struct arena_info *arena, u32 lane,
 328                        struct log_entry *ent, int old_flag)
 329{
 330        int ret;
 331        int old_ent, ret_ent;
 332        struct log_group log;
 333
 334        ret = btt_log_group_read(arena, lane, &log);
 335        if (ret)
 336                return -EIO;
 337
 338        old_ent = btt_log_get_old(arena, &log);
 339        if (old_ent < 0 || old_ent > 1) {
 340                dev_err(to_dev(arena),
 341                                "log corruption (%d): lane %d seq [%d, %d]\n",
 342                                old_ent, lane, log.ent[arena->log_index[0]].seq,
 343                                log.ent[arena->log_index[1]].seq);
 344                /* TODO set error state? */
 345                return -EIO;
 346        }
 347
 348        ret_ent = (old_flag ? old_ent : (1 - old_ent));
 349
 350        if (ent != NULL)
 351                memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE);
 352
 353        return ret_ent;
 354}
 355
 356/*
 357 * This function commits a log entry to media
 358 * It does _not_ prepare the freelist entry for the next write
 359 * btt_flog_write is the wrapper for updating the freelist elements
 360 */
 361static int __btt_log_write(struct arena_info *arena, u32 lane,
 362                        u32 sub, struct log_entry *ent, unsigned long flags)
 363{
 364        int ret;
 365        u32 group_slot = arena->log_index[sub];
 366        unsigned int log_half = LOG_ENT_SIZE / 2;
 367        void *src = ent;
 368        u64 ns_off;
 369
 370        ns_off = arena->logoff + (lane * LOG_GRP_SIZE) +
 371                (group_slot * LOG_ENT_SIZE);
 372        /* split the 16B write into atomic, durable halves */
 373        ret = arena_write_bytes(arena, ns_off, src, log_half, flags);
 374        if (ret)
 375                return ret;
 376
 377        ns_off += log_half;
 378        src += log_half;
 379        return arena_write_bytes(arena, ns_off, src, log_half, flags);
 380}
 381
 382static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
 383                        struct log_entry *ent)
 384{
 385        int ret;
 386
 387        ret = __btt_log_write(arena, lane, sub, ent, NVDIMM_IO_ATOMIC);
 388        if (ret)
 389                return ret;
 390
 391        /* prepare the next free entry */
 392        arena->freelist[lane].sub = 1 - arena->freelist[lane].sub;
 393        if (++(arena->freelist[lane].seq) == 4)
 394                arena->freelist[lane].seq = 1;
 395        if (ent_e_flag(le32_to_cpu(ent->old_map)))
 396                arena->freelist[lane].has_err = 1;
 397        arena->freelist[lane].block = ent_lba(le32_to_cpu(ent->old_map));
 398
 399        return ret;
 400}
 401
 402/*
 403 * This function initializes the BTT map to the initial state, which is
 404 * all-zeroes, and indicates an identity mapping
 405 */
 406static int btt_map_init(struct arena_info *arena)
 407{
 408        int ret = -EINVAL;
 409        void *zerobuf;
 410        size_t offset = 0;
 411        size_t chunk_size = SZ_2M;
 412        size_t mapsize = arena->logoff - arena->mapoff;
 413
 414        zerobuf = kzalloc(chunk_size, GFP_KERNEL);
 415        if (!zerobuf)
 416                return -ENOMEM;
 417
 418        /*
 419         * mapoff should always be at least 512B  aligned. We rely on that to
 420         * make sure rw_bytes does error clearing correctly, so make sure that
 421         * is the case.
 422         */
 423        dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->mapoff, 512),
 424                "arena->mapoff: %#llx is unaligned\n", arena->mapoff);
 425
 426        while (mapsize) {
 427                size_t size = min(mapsize, chunk_size);
 428
 429                dev_WARN_ONCE(to_dev(arena), size < 512,
 430                        "chunk size: %#zx is unaligned\n", size);
 431                ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf,
 432                                size, 0);
 433                if (ret)
 434                        goto free;
 435
 436                offset += size;
 437                mapsize -= size;
 438                cond_resched();
 439        }
 440
 441 free:
 442        kfree(zerobuf);
 443        return ret;
 444}
 445
 446/*
 447 * This function initializes the BTT log with 'fake' entries pointing
 448 * to the initial reserved set of blocks as being free
 449 */
 450static int btt_log_init(struct arena_info *arena)
 451{
 452        size_t logsize = arena->info2off - arena->logoff;
 453        size_t chunk_size = SZ_4K, offset = 0;
 454        struct log_entry ent;
 455        void *zerobuf;
 456        int ret;
 457        u32 i;
 458
 459        zerobuf = kzalloc(chunk_size, GFP_KERNEL);
 460        if (!zerobuf)
 461                return -ENOMEM;
 462        /*
 463         * logoff should always be at least 512B  aligned. We rely on that to
 464         * make sure rw_bytes does error clearing correctly, so make sure that
 465         * is the case.
 466         */
 467        dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->logoff, 512),
 468                "arena->logoff: %#llx is unaligned\n", arena->logoff);
 469
 470        while (logsize) {
 471                size_t size = min(logsize, chunk_size);
 472
 473                dev_WARN_ONCE(to_dev(arena), size < 512,
 474                        "chunk size: %#zx is unaligned\n", size);
 475                ret = arena_write_bytes(arena, arena->logoff + offset, zerobuf,
 476                                size, 0);
 477                if (ret)
 478                        goto free;
 479
 480                offset += size;
 481                logsize -= size;
 482                cond_resched();
 483        }
 484
 485        for (i = 0; i < arena->nfree; i++) {
 486                ent.lba = cpu_to_le32(i);
 487                ent.old_map = cpu_to_le32(arena->external_nlba + i);
 488                ent.new_map = cpu_to_le32(arena->external_nlba + i);
 489                ent.seq = cpu_to_le32(LOG_SEQ_INIT);
 490                ret = __btt_log_write(arena, i, 0, &ent, 0);
 491                if (ret)
 492                        goto free;
 493        }
 494
 495 free:
 496        kfree(zerobuf);
 497        return ret;
 498}
 499
 500static u64 to_namespace_offset(struct arena_info *arena, u64 lba)
 501{
 502        return arena->dataoff + ((u64)lba * arena->internal_lbasize);
 503}
 504
 505static int arena_clear_freelist_error(struct arena_info *arena, u32 lane)
 506{
 507        int ret = 0;
 508
 509        if (arena->freelist[lane].has_err) {
 510                void *zero_page = page_address(ZERO_PAGE(0));
 511                u32 lba = arena->freelist[lane].block;
 512                u64 nsoff = to_namespace_offset(arena, lba);
 513                unsigned long len = arena->sector_size;
 514
 515                mutex_lock(&arena->err_lock);
 516
 517                while (len) {
 518                        unsigned long chunk = min(len, PAGE_SIZE);
 519
 520                        ret = arena_write_bytes(arena, nsoff, zero_page,
 521                                chunk, 0);
 522                        if (ret)
 523                                break;
 524                        len -= chunk;
 525                        nsoff += chunk;
 526                        if (len == 0)
 527                                arena->freelist[lane].has_err = 0;
 528                }
 529                mutex_unlock(&arena->err_lock);
 530        }
 531        return ret;
 532}
 533
 534static int btt_freelist_init(struct arena_info *arena)
 535{
 536        int new, ret;
 537        struct log_entry log_new;
 538        u32 i, map_entry, log_oldmap, log_newmap;
 539
 540        arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry),
 541                                        GFP_KERNEL);
 542        if (!arena->freelist)
 543                return -ENOMEM;
 544
 545        for (i = 0; i < arena->nfree; i++) {
 546                new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT);
 547                if (new < 0)
 548                        return new;
 549
 550                /* old and new map entries with any flags stripped out */
 551                log_oldmap = ent_lba(le32_to_cpu(log_new.old_map));
 552                log_newmap = ent_lba(le32_to_cpu(log_new.new_map));
 553
 554                /* sub points to the next one to be overwritten */
 555                arena->freelist[i].sub = 1 - new;
 556                arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq));
 557                arena->freelist[i].block = log_oldmap;
 558
 559                /*
 560                 * FIXME: if error clearing fails during init, we want to make
 561                 * the BTT read-only
 562                 */
 563                if (ent_e_flag(le32_to_cpu(log_new.old_map)) &&
 564                    !ent_normal(le32_to_cpu(log_new.old_map))) {
 565                        arena->freelist[i].has_err = 1;
 566                        ret = arena_clear_freelist_error(arena, i);
 567                        if (ret)
 568                                dev_err_ratelimited(to_dev(arena),
 569                                        "Unable to clear known errors\n");
 570                }
 571
 572                /* This implies a newly created or untouched flog entry */
 573                if (log_oldmap == log_newmap)
 574                        continue;
 575
 576                /* Check if map recovery is needed */
 577                ret = btt_map_read(arena, le32_to_cpu(log_new.lba), &map_entry,
 578                                NULL, NULL, 0);
 579                if (ret)
 580                        return ret;
 581
 582                /*
 583                 * The map_entry from btt_read_map is stripped of any flag bits,
 584                 * so use the stripped out versions from the log as well for
 585                 * testing whether recovery is needed. For restoration, use the
 586                 * 'raw' version of the log entries as that captured what we
 587                 * were going to write originally.
 588                 */
 589                if ((log_newmap != map_entry) && (log_oldmap == map_entry)) {
 590                        /*
 591                         * Last transaction wrote the flog, but wasn't able
 592                         * to complete the map write. So fix up the map.
 593                         */
 594                        ret = btt_map_write(arena, le32_to_cpu(log_new.lba),
 595                                        le32_to_cpu(log_new.new_map), 0, 0, 0);
 596                        if (ret)
 597                                return ret;
 598                }
 599        }
 600
 601        return 0;
 602}
 603
 604static bool ent_is_padding(struct log_entry *ent)
 605{
 606        return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0)
 607                && (ent->seq == 0);
 608}
 609
 610/*
 611 * Detecting valid log indices: We read a log group (see the comments in btt.h
 612 * for a description of a 'log_group' and its 'slots'), and iterate over its
 613 * four slots. We expect that a padding slot will be all-zeroes, and use this
 614 * to detect a padding slot vs. an actual entry.
 615 *
 616 * If a log_group is in the initial state, i.e. hasn't been used since the
 617 * creation of this BTT layout, it will have three of the four slots with
 618 * zeroes. We skip over these log_groups for the detection of log_index. If
 619 * all log_groups are in the initial state (i.e. the BTT has never been
 620 * written to), it is safe to assume the 'new format' of log entries in slots
 621 * (0, 1).
 622 */
 623static int log_set_indices(struct arena_info *arena)
 624{
 625        bool idx_set = false, initial_state = true;
 626        int ret, log_index[2] = {-1, -1};
 627        u32 i, j, next_idx = 0;
 628        struct log_group log;
 629        u32 pad_count = 0;
 630
 631        for (i = 0; i < arena->nfree; i++) {
 632                ret = btt_log_group_read(arena, i, &log);
 633                if (ret < 0)
 634                        return ret;
 635
 636                for (j = 0; j < 4; j++) {
 637                        if (!idx_set) {
 638                                if (ent_is_padding(&log.ent[j])) {
 639                                        pad_count++;
 640                                        continue;
 641                                } else {
 642                                        /* Skip if index has been recorded */
 643                                        if ((next_idx == 1) &&
 644                                                (j == log_index[0]))
 645                                                continue;
 646                                        /* valid entry, record index */
 647                                        log_index[next_idx] = j;
 648                                        next_idx++;
 649                                }
 650                                if (next_idx == 2) {
 651                                        /* two valid entries found */
 652                                        idx_set = true;
 653                                } else if (next_idx > 2) {
 654                                        /* too many valid indices */
 655                                        return -ENXIO;
 656                                }
 657                        } else {
 658                                /*
 659                                 * once the indices have been set, just verify
 660                                 * that all subsequent log groups are either in
 661                                 * their initial state or follow the same
 662                                 * indices.
 663                                 */
 664                                if (j == log_index[0]) {
 665                                        /* entry must be 'valid' */
 666                                        if (ent_is_padding(&log.ent[j]))
 667                                                return -ENXIO;
 668                                } else if (j == log_index[1]) {
 669                                        ;
 670                                        /*
 671                                         * log_index[1] can be padding if the
 672                                         * lane never got used and it is still
 673                                         * in the initial state (three 'padding'
 674                                         * entries)
 675                                         */
 676                                } else {
 677                                        /* entry must be invalid (padding) */
 678                                        if (!ent_is_padding(&log.ent[j]))
 679                                                return -ENXIO;
 680                                }
 681                        }
 682                }
 683                /*
 684                 * If any of the log_groups have more than one valid,
 685                 * non-padding entry, then the we are no longer in the
 686                 * initial_state
 687                 */
 688                if (pad_count < 3)
 689                        initial_state = false;
 690                pad_count = 0;
 691        }
 692
 693        if (!initial_state && !idx_set)
 694                return -ENXIO;
 695
 696        /*
 697         * If all the entries in the log were in the initial state,
 698         * assume new padding scheme
 699         */
 700        if (initial_state)
 701                log_index[1] = 1;
 702
 703        /*
 704         * Only allow the known permutations of log/padding indices,
 705         * i.e. (0, 1), and (0, 2)
 706         */
 707        if ((log_index[0] == 0) && ((log_index[1] == 1) || (log_index[1] == 2)))
 708                ; /* known index possibilities */
 709        else {
 710                dev_err(to_dev(arena), "Found an unknown padding scheme\n");
 711                return -ENXIO;
 712        }
 713
 714        arena->log_index[0] = log_index[0];
 715        arena->log_index[1] = log_index[1];
 716        dev_dbg(to_dev(arena), "log_index_0 = %d\n", log_index[0]);
 717        dev_dbg(to_dev(arena), "log_index_1 = %d\n", log_index[1]);
 718        return 0;
 719}
 720
 721static int btt_rtt_init(struct arena_info *arena)
 722{
 723        arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL);
 724        if (arena->rtt == NULL)
 725                return -ENOMEM;
 726
 727        return 0;
 728}
 729
 730static int btt_maplocks_init(struct arena_info *arena)
 731{
 732        u32 i;
 733
 734        arena->map_locks = kcalloc(arena->nfree, sizeof(struct aligned_lock),
 735                                GFP_KERNEL);
 736        if (!arena->map_locks)
 737                return -ENOMEM;
 738
 739        for (i = 0; i < arena->nfree; i++)
 740                spin_lock_init(&arena->map_locks[i].lock);
 741
 742        return 0;
 743}
 744
 745static struct arena_info *alloc_arena(struct btt *btt, size_t size,
 746                                size_t start, size_t arena_off)
 747{
 748        struct arena_info *arena;
 749        u64 logsize, mapsize, datasize;
 750        u64 available = size;
 751
 752        arena = kzalloc(sizeof(struct arena_info), GFP_KERNEL);
 753        if (!arena)
 754                return NULL;
 755        arena->nd_btt = btt->nd_btt;
 756        arena->sector_size = btt->sector_size;
 757        mutex_init(&arena->err_lock);
 758
 759        if (!size)
 760                return arena;
 761
 762        arena->size = size;
 763        arena->external_lba_start = start;
 764        arena->external_lbasize = btt->lbasize;
 765        arena->internal_lbasize = roundup(arena->external_lbasize,
 766                                        INT_LBASIZE_ALIGNMENT);
 767        arena->nfree = BTT_DEFAULT_NFREE;
 768        arena->version_major = btt->nd_btt->version_major;
 769        arena->version_minor = btt->nd_btt->version_minor;
 770
 771        if (available % BTT_PG_SIZE)
 772                available -= (available % BTT_PG_SIZE);
 773
 774        /* Two pages are reserved for the super block and its copy */
 775        available -= 2 * BTT_PG_SIZE;
 776
 777        /* The log takes a fixed amount of space based on nfree */
 778        logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE);
 779        available -= logsize;
 780
 781        /* Calculate optimal split between map and data area */
 782        arena->internal_nlba = div_u64(available - BTT_PG_SIZE,
 783                        arena->internal_lbasize + MAP_ENT_SIZE);
 784        arena->external_nlba = arena->internal_nlba - arena->nfree;
 785
 786        mapsize = roundup((arena->external_nlba * MAP_ENT_SIZE), BTT_PG_SIZE);
 787        datasize = available - mapsize;
 788
 789        /* 'Absolute' values, relative to start of storage space */
 790        arena->infooff = arena_off;
 791        arena->dataoff = arena->infooff + BTT_PG_SIZE;
 792        arena->mapoff = arena->dataoff + datasize;
 793        arena->logoff = arena->mapoff + mapsize;
 794        arena->info2off = arena->logoff + logsize;
 795
 796        /* Default log indices are (0,1) */
 797        arena->log_index[0] = 0;
 798        arena->log_index[1] = 1;
 799        return arena;
 800}
 801
 802static void free_arenas(struct btt *btt)
 803{
 804        struct arena_info *arena, *next;
 805
 806        list_for_each_entry_safe(arena, next, &btt->arena_list, list) {
 807                list_del(&arena->list);
 808                kfree(arena->rtt);
 809                kfree(arena->map_locks);
 810                kfree(arena->freelist);
 811                debugfs_remove_recursive(arena->debugfs_dir);
 812                kfree(arena);
 813        }
 814}
 815
 816/*
 817 * This function reads an existing valid btt superblock and
 818 * populates the corresponding arena_info struct
 819 */
 820static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super,
 821                                u64 arena_off)
 822{
 823        arena->internal_nlba = le32_to_cpu(super->internal_nlba);
 824        arena->internal_lbasize = le32_to_cpu(super->internal_lbasize);
 825        arena->external_nlba = le32_to_cpu(super->external_nlba);
 826        arena->external_lbasize = le32_to_cpu(super->external_lbasize);
 827        arena->nfree = le32_to_cpu(super->nfree);
 828        arena->version_major = le16_to_cpu(super->version_major);
 829        arena->version_minor = le16_to_cpu(super->version_minor);
 830
 831        arena->nextoff = (super->nextoff == 0) ? 0 : (arena_off +
 832                        le64_to_cpu(super->nextoff));
 833        arena->infooff = arena_off;
 834        arena->dataoff = arena_off + le64_to_cpu(super->dataoff);
 835        arena->mapoff = arena_off + le64_to_cpu(super->mapoff);
 836        arena->logoff = arena_off + le64_to_cpu(super->logoff);
 837        arena->info2off = arena_off + le64_to_cpu(super->info2off);
 838
 839        arena->size = (le64_to_cpu(super->nextoff) > 0)
 840                ? (le64_to_cpu(super->nextoff))
 841                : (arena->info2off - arena->infooff + BTT_PG_SIZE);
 842
 843        arena->flags = le32_to_cpu(super->flags);
 844}
 845
 846static int discover_arenas(struct btt *btt)
 847{
 848        int ret = 0;
 849        struct arena_info *arena;
 850        struct btt_sb *super;
 851        size_t remaining = btt->rawsize;
 852        u64 cur_nlba = 0;
 853        size_t cur_off = 0;
 854        int num_arenas = 0;
 855
 856        super = kzalloc(sizeof(*super), GFP_KERNEL);
 857        if (!super)
 858                return -ENOMEM;
 859
 860        while (remaining) {
 861                /* Alloc memory for arena */
 862                arena = alloc_arena(btt, 0, 0, 0);
 863                if (!arena) {
 864                        ret = -ENOMEM;
 865                        goto out_super;
 866                }
 867
 868                arena->infooff = cur_off;
 869                ret = btt_info_read(arena, super);
 870                if (ret)
 871                        goto out;
 872
 873                if (!nd_btt_arena_is_valid(btt->nd_btt, super)) {
 874                        if (remaining == btt->rawsize) {
 875                                btt->init_state = INIT_NOTFOUND;
 876                                dev_info(to_dev(arena), "No existing arenas\n");
 877                                goto out;
 878                        } else {
 879                                dev_err(to_dev(arena),
 880                                                "Found corrupted metadata!\n");
 881                                ret = -ENODEV;
 882                                goto out;
 883                        }
 884                }
 885
 886                arena->external_lba_start = cur_nlba;
 887                parse_arena_meta(arena, super, cur_off);
 888
 889                ret = log_set_indices(arena);
 890                if (ret) {
 891                        dev_err(to_dev(arena),
 892                                "Unable to deduce log/padding indices\n");
 893                        goto out;
 894                }
 895
 896                ret = btt_freelist_init(arena);
 897                if (ret)
 898                        goto out;
 899
 900                ret = btt_rtt_init(arena);
 901                if (ret)
 902                        goto out;
 903
 904                ret = btt_maplocks_init(arena);
 905                if (ret)
 906                        goto out;
 907
 908                list_add_tail(&arena->list, &btt->arena_list);
 909
 910                remaining -= arena->size;
 911                cur_off += arena->size;
 912                cur_nlba += arena->external_nlba;
 913                num_arenas++;
 914
 915                if (arena->nextoff == 0)
 916                        break;
 917        }
 918        btt->num_arenas = num_arenas;
 919        btt->nlba = cur_nlba;
 920        btt->init_state = INIT_READY;
 921
 922        kfree(super);
 923        return ret;
 924
 925 out:
 926        kfree(arena);
 927        free_arenas(btt);
 928 out_super:
 929        kfree(super);
 930        return ret;
 931}
 932
 933static int create_arenas(struct btt *btt)
 934{
 935        size_t remaining = btt->rawsize;
 936        size_t cur_off = 0;
 937
 938        while (remaining) {
 939                struct arena_info *arena;
 940                size_t arena_size = min_t(u64, ARENA_MAX_SIZE, remaining);
 941
 942                remaining -= arena_size;
 943                if (arena_size < ARENA_MIN_SIZE)
 944                        break;
 945
 946                arena = alloc_arena(btt, arena_size, btt->nlba, cur_off);
 947                if (!arena) {
 948                        free_arenas(btt);
 949                        return -ENOMEM;
 950                }
 951                btt->nlba += arena->external_nlba;
 952                if (remaining >= ARENA_MIN_SIZE)
 953                        arena->nextoff = arena->size;
 954                else
 955                        arena->nextoff = 0;
 956                cur_off += arena_size;
 957                list_add_tail(&arena->list, &btt->arena_list);
 958        }
 959
 960        return 0;
 961}
 962
 963/*
 964 * This function completes arena initialization by writing
 965 * all the metadata.
 966 * It is only called for an uninitialized arena when a write
 967 * to that arena occurs for the first time.
 968 */
 969static int btt_arena_write_layout(struct arena_info *arena)
 970{
 971        int ret;
 972        u64 sum;
 973        struct btt_sb *super;
 974        struct nd_btt *nd_btt = arena->nd_btt;
 975        const u8 *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
 976
 977        ret = btt_map_init(arena);
 978        if (ret)
 979                return ret;
 980
 981        ret = btt_log_init(arena);
 982        if (ret)
 983                return ret;
 984
 985        super = kzalloc(sizeof(struct btt_sb), GFP_NOIO);
 986        if (!super)
 987                return -ENOMEM;
 988
 989        strncpy(super->signature, BTT_SIG, BTT_SIG_LEN);
 990        memcpy(super->uuid, nd_btt->uuid, 16);
 991        memcpy(super->parent_uuid, parent_uuid, 16);
 992        super->flags = cpu_to_le32(arena->flags);
 993        super->version_major = cpu_to_le16(arena->version_major);
 994        super->version_minor = cpu_to_le16(arena->version_minor);
 995        super->external_lbasize = cpu_to_le32(arena->external_lbasize);
 996        super->external_nlba = cpu_to_le32(arena->external_nlba);
 997        super->internal_lbasize = cpu_to_le32(arena->internal_lbasize);
 998        super->internal_nlba = cpu_to_le32(arena->internal_nlba);
 999        super->nfree = cpu_to_le32(arena->nfree);
1000        super->infosize = cpu_to_le32(sizeof(struct btt_sb));
1001        super->nextoff = cpu_to_le64(arena->nextoff);
1002        /*
1003         * Subtract arena->infooff (arena start) so numbers are relative
1004         * to 'this' arena
1005         */
1006        super->dataoff = cpu_to_le64(arena->dataoff - arena->infooff);
1007        super->mapoff = cpu_to_le64(arena->mapoff - arena->infooff);
1008        super->logoff = cpu_to_le64(arena->logoff - arena->infooff);
1009        super->info2off = cpu_to_le64(arena->info2off - arena->infooff);
1010
1011        super->flags = 0;
1012        sum = nd_sb_checksum((struct nd_gen_sb *) super);
1013        super->checksum = cpu_to_le64(sum);
1014
1015        ret = btt_info_write(arena, super);
1016
1017        kfree(super);
1018        return ret;
1019}
1020
1021/*
1022 * This function completes the initialization for the BTT namespace
1023 * such that it is ready to accept IOs
1024 */
1025static int btt_meta_init(struct btt *btt)
1026{
1027        int ret = 0;
1028        struct arena_info *arena;
1029
1030        mutex_lock(&btt->init_lock);
1031        list_for_each_entry(arena, &btt->arena_list, list) {
1032                ret = btt_arena_write_layout(arena);
1033                if (ret)
1034                        goto unlock;
1035
1036                ret = btt_freelist_init(arena);
1037                if (ret)
1038                        goto unlock;
1039
1040                ret = btt_rtt_init(arena);
1041                if (ret)
1042                        goto unlock;
1043
1044                ret = btt_maplocks_init(arena);
1045                if (ret)
1046                        goto unlock;
1047        }
1048
1049        btt->init_state = INIT_READY;
1050
1051 unlock:
1052        mutex_unlock(&btt->init_lock);
1053        return ret;
1054}
1055
1056static u32 btt_meta_size(struct btt *btt)
1057{
1058        return btt->lbasize - btt->sector_size;
1059}
1060
1061/*
1062 * This function calculates the arena in which the given LBA lies
1063 * by doing a linear walk. This is acceptable since we expect only
1064 * a few arenas. If we have backing devices that get much larger,
1065 * we can construct a balanced binary tree of arenas at init time
1066 * so that this range search becomes faster.
1067 */
1068static int lba_to_arena(struct btt *btt, sector_t sector, __u32 *premap,
1069                                struct arena_info **arena)
1070{
1071        struct arena_info *arena_list;
1072        __u64 lba = div_u64(sector << SECTOR_SHIFT, btt->sector_size);
1073
1074        list_for_each_entry(arena_list, &btt->arena_list, list) {
1075                if (lba < arena_list->external_nlba) {
1076                        *arena = arena_list;
1077                        *premap = lba;
1078                        return 0;
1079                }
1080                lba -= arena_list->external_nlba;
1081        }
1082
1083        return -EIO;
1084}
1085
1086/*
1087 * The following (lock_map, unlock_map) are mostly just to improve
1088 * readability, since they index into an array of locks
1089 */
1090static void lock_map(struct arena_info *arena, u32 premap)
1091                __acquires(&arena->map_locks[idx].lock)
1092{
1093        u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
1094
1095        spin_lock(&arena->map_locks[idx].lock);
1096}
1097
1098static void unlock_map(struct arena_info *arena, u32 premap)
1099                __releases(&arena->map_locks[idx].lock)
1100{
1101        u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
1102
1103        spin_unlock(&arena->map_locks[idx].lock);
1104}
1105
1106static int btt_data_read(struct arena_info *arena, struct page *page,
1107                        unsigned int off, u32 lba, u32 len)
1108{
1109        int ret;
1110        u64 nsoff = to_namespace_offset(arena, lba);
1111        void *mem = kmap_atomic(page);
1112
1113        ret = arena_read_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
1114        kunmap_atomic(mem);
1115
1116        return ret;
1117}
1118
1119static int btt_data_write(struct arena_info *arena, u32 lba,
1120                        struct page *page, unsigned int off, u32 len)
1121{
1122        int ret;
1123        u64 nsoff = to_namespace_offset(arena, lba);
1124        void *mem = kmap_atomic(page);
1125
1126        ret = arena_write_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
1127        kunmap_atomic(mem);
1128
1129        return ret;
1130}
1131
1132static void zero_fill_data(struct page *page, unsigned int off, u32 len)
1133{
1134        void *mem = kmap_atomic(page);
1135
1136        memset(mem + off, 0, len);
1137        kunmap_atomic(mem);
1138}
1139
1140#ifdef CONFIG_BLK_DEV_INTEGRITY
1141static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
1142                        struct arena_info *arena, u32 postmap, int rw)
1143{
1144        unsigned int len = btt_meta_size(btt);
1145        u64 meta_nsoff;
1146        int ret = 0;
1147
1148        if (bip == NULL)
1149                return 0;
1150
1151        meta_nsoff = to_namespace_offset(arena, postmap) + btt->sector_size;
1152
1153        while (len) {
1154                unsigned int cur_len;
1155                struct bio_vec bv;
1156                void *mem;
1157
1158                bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
1159                /*
1160                 * The 'bv' obtained from bvec_iter_bvec has its .bv_len and
1161                 * .bv_offset already adjusted for iter->bi_bvec_done, and we
1162                 * can use those directly
1163                 */
1164
1165                cur_len = min(len, bv.bv_len);
1166                mem = kmap_atomic(bv.bv_page);
1167                if (rw)
1168                        ret = arena_write_bytes(arena, meta_nsoff,
1169                                        mem + bv.bv_offset, cur_len,
1170                                        NVDIMM_IO_ATOMIC);
1171                else
1172                        ret = arena_read_bytes(arena, meta_nsoff,
1173                                        mem + bv.bv_offset, cur_len,
1174                                        NVDIMM_IO_ATOMIC);
1175
1176                kunmap_atomic(mem);
1177                if (ret)
1178                        return ret;
1179
1180                len -= cur_len;
1181                meta_nsoff += cur_len;
1182                if (!bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len))
1183                        return -EIO;
1184        }
1185
1186        return ret;
1187}
1188
1189#else /* CONFIG_BLK_DEV_INTEGRITY */
1190static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
1191                        struct arena_info *arena, u32 postmap, int rw)
1192{
1193        return 0;
1194}
1195#endif
1196
1197static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
1198                        struct page *page, unsigned int off, sector_t sector,
1199                        unsigned int len)
1200{
1201        int ret = 0;
1202        int t_flag, e_flag;
1203        struct arena_info *arena = NULL;
1204        u32 lane = 0, premap, postmap;
1205
1206        while (len) {
1207                u32 cur_len;
1208
1209                lane = nd_region_acquire_lane(btt->nd_region);
1210
1211                ret = lba_to_arena(btt, sector, &premap, &arena);
1212                if (ret)
1213                        goto out_lane;
1214
1215                cur_len = min(btt->sector_size, len);
1216
1217                ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag,
1218                                NVDIMM_IO_ATOMIC);
1219                if (ret)
1220                        goto out_lane;
1221
1222                /*
1223                 * We loop to make sure that the post map LBA didn't change
1224                 * from under us between writing the RTT and doing the actual
1225                 * read.
1226                 */
1227                while (1) {
1228                        u32 new_map;
1229                        int new_t, new_e;
1230
1231                        if (t_flag) {
1232                                zero_fill_data(page, off, cur_len);
1233                                goto out_lane;
1234                        }
1235
1236                        if (e_flag) {
1237                                ret = -EIO;
1238                                goto out_lane;
1239                        }
1240
1241                        arena->rtt[lane] = RTT_VALID | postmap;
1242                        /*
1243                         * Barrier to make sure this write is not reordered
1244                         * to do the verification map_read before the RTT store
1245                         */
1246                        barrier();
1247
1248                        ret = btt_map_read(arena, premap, &new_map, &new_t,
1249                                                &new_e, NVDIMM_IO_ATOMIC);
1250                        if (ret)
1251                                goto out_rtt;
1252
1253                        if ((postmap == new_map) && (t_flag == new_t) &&
1254                                        (e_flag == new_e))
1255                                break;
1256
1257                        postmap = new_map;
1258                        t_flag = new_t;
1259                        e_flag = new_e;
1260                }
1261
1262                ret = btt_data_read(arena, page, off, postmap, cur_len);
1263                if (ret) {
1264                        /* Media error - set the e_flag */
1265                        if (btt_map_write(arena, premap, postmap, 0, 1, NVDIMM_IO_ATOMIC))
1266                                dev_warn_ratelimited(to_dev(arena),
1267                                        "Error persistently tracking bad blocks at %#x\n",
1268                                        premap);
1269                        goto out_rtt;
1270                }
1271
1272                if (bip) {
1273                        ret = btt_rw_integrity(btt, bip, arena, postmap, READ);
1274                        if (ret)
1275                                goto out_rtt;
1276                }
1277
1278                arena->rtt[lane] = RTT_INVALID;
1279                nd_region_release_lane(btt->nd_region, lane);
1280
1281                len -= cur_len;
1282                off += cur_len;
1283                sector += btt->sector_size >> SECTOR_SHIFT;
1284        }
1285
1286        return 0;
1287
1288 out_rtt:
1289        arena->rtt[lane] = RTT_INVALID;
1290 out_lane:
1291        nd_region_release_lane(btt->nd_region, lane);
1292        return ret;
1293}
1294
1295/*
1296 * Normally, arena_{read,write}_bytes will take care of the initial offset
1297 * adjustment, but in the case of btt_is_badblock, where we query is_bad_pmem,
1298 * we need the final, raw namespace offset here
1299 */
1300static bool btt_is_badblock(struct btt *btt, struct arena_info *arena,
1301                u32 postmap)
1302{
1303        u64 nsoff = adjust_initial_offset(arena->nd_btt,
1304                        to_namespace_offset(arena, postmap));
1305        sector_t phys_sector = nsoff >> 9;
1306
1307        return is_bad_pmem(btt->phys_bb, phys_sector, arena->internal_lbasize);
1308}
1309
1310static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
1311                        sector_t sector, struct page *page, unsigned int off,
1312                        unsigned int len)
1313{
1314        int ret = 0;
1315        struct arena_info *arena = NULL;
1316        u32 premap = 0, old_postmap, new_postmap, lane = 0, i;
1317        struct log_entry log;
1318        int sub;
1319
1320        while (len) {
1321                u32 cur_len;
1322                int e_flag;
1323
1324 retry:
1325                lane = nd_region_acquire_lane(btt->nd_region);
1326
1327                ret = lba_to_arena(btt, sector, &premap, &arena);
1328                if (ret)
1329                        goto out_lane;
1330                cur_len = min(btt->sector_size, len);
1331
1332                if ((arena->flags & IB_FLAG_ERROR_MASK) != 0) {
1333                        ret = -EIO;
1334                        goto out_lane;
1335                }
1336
1337                if (btt_is_badblock(btt, arena, arena->freelist[lane].block))
1338                        arena->freelist[lane].has_err = 1;
1339
1340                if (mutex_is_locked(&arena->err_lock)
1341                                || arena->freelist[lane].has_err) {
1342                        nd_region_release_lane(btt->nd_region, lane);
1343
1344                        ret = arena_clear_freelist_error(arena, lane);
1345                        if (ret)
1346                                return ret;
1347
1348                        /* OK to acquire a different lane/free block */
1349                        goto retry;
1350                }
1351
1352                new_postmap = arena->freelist[lane].block;
1353
1354                /* Wait if the new block is being read from */
1355                for (i = 0; i < arena->nfree; i++)
1356                        while (arena->rtt[i] == (RTT_VALID | new_postmap))
1357                                cpu_relax();
1358
1359
1360                if (new_postmap >= arena->internal_nlba) {
1361                        ret = -EIO;
1362                        goto out_lane;
1363                }
1364
1365                ret = btt_data_write(arena, new_postmap, page, off, cur_len);
1366                if (ret)
1367                        goto out_lane;
1368
1369                if (bip) {
1370                        ret = btt_rw_integrity(btt, bip, arena, new_postmap,
1371                                                WRITE);
1372                        if (ret)
1373                                goto out_lane;
1374                }
1375
1376                lock_map(arena, premap);
1377                ret = btt_map_read(arena, premap, &old_postmap, NULL, &e_flag,
1378                                NVDIMM_IO_ATOMIC);
1379                if (ret)
1380                        goto out_map;
1381                if (old_postmap >= arena->internal_nlba) {
1382                        ret = -EIO;
1383                        goto out_map;
1384                }
1385                if (e_flag)
1386                        set_e_flag(old_postmap);
1387
1388                log.lba = cpu_to_le32(premap);
1389                log.old_map = cpu_to_le32(old_postmap);
1390                log.new_map = cpu_to_le32(new_postmap);
1391                log.seq = cpu_to_le32(arena->freelist[lane].seq);
1392                sub = arena->freelist[lane].sub;
1393                ret = btt_flog_write(arena, lane, sub, &log);
1394                if (ret)
1395                        goto out_map;
1396
1397                ret = btt_map_write(arena, premap, new_postmap, 0, 0,
1398                        NVDIMM_IO_ATOMIC);
1399                if (ret)
1400                        goto out_map;
1401
1402                unlock_map(arena, premap);
1403                nd_region_release_lane(btt->nd_region, lane);
1404
1405                if (e_flag) {
1406                        ret = arena_clear_freelist_error(arena, lane);
1407                        if (ret)
1408                                return ret;
1409                }
1410
1411                len -= cur_len;
1412                off += cur_len;
1413                sector += btt->sector_size >> SECTOR_SHIFT;
1414        }
1415
1416        return 0;
1417
1418 out_map:
1419        unlock_map(arena, premap);
1420 out_lane:
1421        nd_region_release_lane(btt->nd_region, lane);
1422        return ret;
1423}
1424
1425static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
1426                        struct page *page, unsigned int len, unsigned int off,
1427                        unsigned int op, sector_t sector)
1428{
1429        int ret;
1430
1431        if (!op_is_write(op)) {
1432                ret = btt_read_pg(btt, bip, page, off, sector, len);
1433                flush_dcache_page(page);
1434        } else {
1435                flush_dcache_page(page);
1436                ret = btt_write_pg(btt, bip, sector, page, off, len);
1437        }
1438
1439        return ret;
1440}
1441
1442static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
1443{
1444        struct bio_integrity_payload *bip = bio_integrity(bio);
1445        struct btt *btt = bio->bi_disk->private_data;
1446        struct bvec_iter iter;
1447        unsigned long start;
1448        struct bio_vec bvec;
1449        int err = 0;
1450        bool do_acct;
1451
1452        if (!bio_integrity_prep(bio))
1453                return BLK_QC_T_NONE;
1454
1455        do_acct = blk_queue_io_stat(bio->bi_disk->queue);
1456        if (do_acct)
1457                start = bio_start_io_acct(bio);
1458        bio_for_each_segment(bvec, bio, iter) {
1459                unsigned int len = bvec.bv_len;
1460
1461                if (len > PAGE_SIZE || len < btt->sector_size ||
1462                                len % btt->sector_size) {
1463                        dev_err_ratelimited(&btt->nd_btt->dev,
1464                                "unaligned bio segment (len: %d)\n", len);
1465                        bio->bi_status = BLK_STS_IOERR;
1466                        break;
1467                }
1468
1469                err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
1470                                  bio_op(bio), iter.bi_sector);
1471                if (err) {
1472                        dev_err(&btt->nd_btt->dev,
1473                                        "io error in %s sector %lld, len %d,\n",
1474                                        (op_is_write(bio_op(bio))) ? "WRITE" :
1475                                        "READ",
1476                                        (unsigned long long) iter.bi_sector, len);
1477                        bio->bi_status = errno_to_blk_status(err);
1478                        break;
1479                }
1480        }
1481        if (do_acct)
1482                bio_end_io_acct(bio, start);
1483
1484        bio_endio(bio);
1485        return BLK_QC_T_NONE;
1486}
1487
1488static int btt_rw_page(struct block_device *bdev, sector_t sector,
1489                struct page *page, unsigned int op)
1490{
1491        struct btt *btt = bdev->bd_disk->private_data;
1492        int rc;
1493        unsigned int len;
1494
1495        len = hpage_nr_pages(page) * PAGE_SIZE;
1496        rc = btt_do_bvec(btt, NULL, page, len, 0, op, sector);
1497        if (rc == 0)
1498                page_endio(page, op_is_write(op), 0);
1499
1500        return rc;
1501}
1502
1503
1504static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo)
1505{
1506        /* some standard values */
1507        geo->heads = 1 << 6;
1508        geo->sectors = 1 << 5;
1509        geo->cylinders = get_capacity(bd->bd_disk) >> 11;
1510        return 0;
1511}
1512
1513static const struct block_device_operations btt_fops = {
1514        .owner =                THIS_MODULE,
1515        .rw_page =              btt_rw_page,
1516        .getgeo =               btt_getgeo,
1517        .revalidate_disk =      nvdimm_revalidate_disk,
1518};
1519
1520static int btt_blk_init(struct btt *btt)
1521{
1522        struct nd_btt *nd_btt = btt->nd_btt;
1523        struct nd_namespace_common *ndns = nd_btt->ndns;
1524
1525        /* create a new disk and request queue for btt */
1526        btt->btt_queue = blk_alloc_queue(btt_make_request, NUMA_NO_NODE);
1527        if (!btt->btt_queue)
1528                return -ENOMEM;
1529
1530        btt->btt_disk = alloc_disk(0);
1531        if (!btt->btt_disk) {
1532                blk_cleanup_queue(btt->btt_queue);
1533                return -ENOMEM;
1534        }
1535
1536        nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name);
1537        btt->btt_disk->first_minor = 0;
1538        btt->btt_disk->fops = &btt_fops;
1539        btt->btt_disk->private_data = btt;
1540        btt->btt_disk->queue = btt->btt_queue;
1541        btt->btt_disk->flags = GENHD_FL_EXT_DEVT;
1542        btt->btt_disk->queue->backing_dev_info->capabilities |=
1543                        BDI_CAP_SYNCHRONOUS_IO;
1544
1545        blk_queue_logical_block_size(btt->btt_queue, btt->sector_size);
1546        blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX);
1547        blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_queue);
1548
1549        if (btt_meta_size(btt)) {
1550                int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
1551
1552                if (rc) {
1553                        del_gendisk(btt->btt_disk);
1554                        put_disk(btt->btt_disk);
1555                        blk_cleanup_queue(btt->btt_queue);
1556                        return rc;
1557                }
1558        }
1559        set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
1560        device_add_disk(&btt->nd_btt->dev, btt->btt_disk, NULL);
1561        btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
1562        revalidate_disk(btt->btt_disk);
1563
1564        return 0;
1565}
1566
1567static void btt_blk_cleanup(struct btt *btt)
1568{
1569        del_gendisk(btt->btt_disk);
1570        put_disk(btt->btt_disk);
1571        blk_cleanup_queue(btt->btt_queue);
1572}
1573
1574/**
1575 * btt_init - initialize a block translation table for the given device
1576 * @nd_btt:     device with BTT geometry and backing device info
1577 * @rawsize:    raw size in bytes of the backing device
1578 * @lbasize:    lba size of the backing device
1579 * @uuid:       A uuid for the backing device - this is stored on media
1580 * @maxlane:    maximum number of parallel requests the device can handle
1581 *
1582 * Initialize a Block Translation Table on a backing device to provide
1583 * single sector power fail atomicity.
1584 *
1585 * Context:
1586 * Might sleep.
1587 *
1588 * Returns:
1589 * Pointer to a new struct btt on success, NULL on failure.
1590 */
1591static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
1592                u32 lbasize, u8 *uuid, struct nd_region *nd_region)
1593{
1594        int ret;
1595        struct btt *btt;
1596        struct nd_namespace_io *nsio;
1597        struct device *dev = &nd_btt->dev;
1598
1599        btt = devm_kzalloc(dev, sizeof(struct btt), GFP_KERNEL);
1600        if (!btt)
1601                return NULL;
1602
1603        btt->nd_btt = nd_btt;
1604        btt->rawsize = rawsize;
1605        btt->lbasize = lbasize;
1606        btt->sector_size = ((lbasize >= 4096) ? 4096 : 512);
1607        INIT_LIST_HEAD(&btt->arena_list);
1608        mutex_init(&btt->init_lock);
1609        btt->nd_region = nd_region;
1610        nsio = to_nd_namespace_io(&nd_btt->ndns->dev);
1611        btt->phys_bb = &nsio->bb;
1612
1613        ret = discover_arenas(btt);
1614        if (ret) {
1615                dev_err(dev, "init: error in arena_discover: %d\n", ret);
1616                return NULL;
1617        }
1618
1619        if (btt->init_state != INIT_READY && nd_region->ro) {
1620                dev_warn(dev, "%s is read-only, unable to init btt metadata\n",
1621                                dev_name(&nd_region->dev));
1622                return NULL;
1623        } else if (btt->init_state != INIT_READY) {
1624                btt->num_arenas = (rawsize / ARENA_MAX_SIZE) +
1625                        ((rawsize % ARENA_MAX_SIZE) ? 1 : 0);
1626                dev_dbg(dev, "init: %d arenas for %llu rawsize\n",
1627                                btt->num_arenas, rawsize);
1628
1629                ret = create_arenas(btt);
1630                if (ret) {
1631                        dev_info(dev, "init: create_arenas: %d\n", ret);
1632                        return NULL;
1633                }
1634
1635                ret = btt_meta_init(btt);
1636                if (ret) {
1637                        dev_err(dev, "init: error in meta_init: %d\n", ret);
1638                        return NULL;
1639                }
1640        }
1641
1642        ret = btt_blk_init(btt);
1643        if (ret) {
1644                dev_err(dev, "init: error in blk_init: %d\n", ret);
1645                return NULL;
1646        }
1647
1648        btt_debugfs_init(btt);
1649
1650        return btt;
1651}
1652
1653/**
1654 * btt_fini - de-initialize a BTT
1655 * @btt:        the BTT handle that was generated by btt_init
1656 *
1657 * De-initialize a Block Translation Table on device removal
1658 *
1659 * Context:
1660 * Might sleep.
1661 */
1662static void btt_fini(struct btt *btt)
1663{
1664        if (btt) {
1665                btt_blk_cleanup(btt);
1666                free_arenas(btt);
1667                debugfs_remove_recursive(btt->debugfs_dir);
1668        }
1669}
1670
1671int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
1672{
1673        struct nd_btt *nd_btt = to_nd_btt(ndns->claim);
1674        struct nd_region *nd_region;
1675        struct btt_sb *btt_sb;
1676        struct btt *btt;
1677        size_t size, rawsize;
1678        int rc;
1679
1680        if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize) {
1681                dev_dbg(&nd_btt->dev, "incomplete btt configuration\n");
1682                return -ENODEV;
1683        }
1684
1685        btt_sb = devm_kzalloc(&nd_btt->dev, sizeof(*btt_sb), GFP_KERNEL);
1686        if (!btt_sb)
1687                return -ENOMEM;
1688
1689        size = nvdimm_namespace_capacity(ndns);
1690        rc = devm_namespace_enable(&nd_btt->dev, ndns, size);
1691        if (rc)
1692                return rc;
1693
1694        /*
1695         * If this returns < 0, that is ok as it just means there wasn't
1696         * an existing BTT, and we're creating a new one. We still need to
1697         * call this as we need the version dependent fields in nd_btt to be
1698         * set correctly based on the holder class
1699         */
1700        nd_btt_version(nd_btt, ndns, btt_sb);
1701
1702        rawsize = size - nd_btt->initial_offset;
1703        if (rawsize < ARENA_MIN_SIZE) {
1704                dev_dbg(&nd_btt->dev, "%s must be at least %ld bytes\n",
1705                                dev_name(&ndns->dev),
1706                                ARENA_MIN_SIZE + nd_btt->initial_offset);
1707                return -ENXIO;
1708        }
1709        nd_region = to_nd_region(nd_btt->dev.parent);
1710        btt = btt_init(nd_btt, rawsize, nd_btt->lbasize, nd_btt->uuid,
1711                        nd_region);
1712        if (!btt)
1713                return -ENOMEM;
1714        nd_btt->btt = btt;
1715
1716        return 0;
1717}
1718EXPORT_SYMBOL(nvdimm_namespace_attach_btt);
1719
1720int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt)
1721{
1722        struct btt *btt = nd_btt->btt;
1723
1724        btt_fini(btt);
1725        nd_btt->btt = NULL;
1726
1727        return 0;
1728}
1729EXPORT_SYMBOL(nvdimm_namespace_detach_btt);
1730
1731static int __init nd_btt_init(void)
1732{
1733        int rc = 0;
1734
1735        debugfs_root = debugfs_create_dir("btt", NULL);
1736        if (IS_ERR_OR_NULL(debugfs_root))
1737                rc = -ENXIO;
1738
1739        return rc;
1740}
1741
1742static void __exit nd_btt_exit(void)
1743{
1744        debugfs_remove_recursive(debugfs_root);
1745}
1746
1747MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT);
1748MODULE_AUTHOR("Vishal Verma <vishal.l.verma@linux.intel.com>");
1749MODULE_LICENSE("GPL v2");
1750module_init(nd_btt_init);
1751module_exit(nd_btt_exit);
1752