linux/drivers/nvdimm/btt.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Block Translation Table
   4 * Copyright (c) 2014-2015, Intel Corporation.
   5 */
   6#include <linux/highmem.h>
   7#include <linux/debugfs.h>
   8#include <linux/blkdev.h>
   9#include <linux/pagemap.h>
  10#include <linux/module.h>
  11#include <linux/device.h>
  12#include <linux/mutex.h>
  13#include <linux/hdreg.h>
  14#include <linux/genhd.h>
  15#include <linux/sizes.h>
  16#include <linux/ndctl.h>
  17#include <linux/fs.h>
  18#include <linux/nd.h>
  19#include <linux/backing-dev.h>
  20#include "btt.h"
  21#include "nd.h"
  22
  23enum log_ent_request {
  24        LOG_NEW_ENT = 0,
  25        LOG_OLD_ENT
  26};
  27
  28static struct device *to_dev(struct arena_info *arena)
  29{
  30        return &arena->nd_btt->dev;
  31}
  32
  33static u64 adjust_initial_offset(struct nd_btt *nd_btt, u64 offset)
  34{
  35        return offset + nd_btt->initial_offset;
  36}
  37
  38static int arena_read_bytes(struct arena_info *arena, resource_size_t offset,
  39                void *buf, size_t n, unsigned long flags)
  40{
  41        struct nd_btt *nd_btt = arena->nd_btt;
  42        struct nd_namespace_common *ndns = nd_btt->ndns;
  43
  44        /* arena offsets may be shifted from the base of the device */
  45        offset = adjust_initial_offset(nd_btt, offset);
  46        return nvdimm_read_bytes(ndns, offset, buf, n, flags);
  47}
  48
  49static int arena_write_bytes(struct arena_info *arena, resource_size_t offset,
  50                void *buf, size_t n, unsigned long flags)
  51{
  52        struct nd_btt *nd_btt = arena->nd_btt;
  53        struct nd_namespace_common *ndns = nd_btt->ndns;
  54
  55        /* arena offsets may be shifted from the base of the device */
  56        offset = adjust_initial_offset(nd_btt, offset);
  57        return nvdimm_write_bytes(ndns, offset, buf, n, flags);
  58}
  59
  60static int btt_info_write(struct arena_info *arena, struct btt_sb *super)
  61{
  62        int ret;
  63
  64        /*
  65         * infooff and info2off should always be at least 512B aligned.
  66         * We rely on that to make sure rw_bytes does error clearing
  67         * correctly, so make sure that is the case.
  68         */
  69        dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->infooff, 512),
  70                "arena->infooff: %#llx is unaligned\n", arena->infooff);
  71        dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->info2off, 512),
  72                "arena->info2off: %#llx is unaligned\n", arena->info2off);
  73
  74        ret = arena_write_bytes(arena, arena->info2off, super,
  75                        sizeof(struct btt_sb), 0);
  76        if (ret)
  77                return ret;
  78
  79        return arena_write_bytes(arena, arena->infooff, super,
  80                        sizeof(struct btt_sb), 0);
  81}
  82
  83static int btt_info_read(struct arena_info *arena, struct btt_sb *super)
  84{
  85        return arena_read_bytes(arena, arena->infooff, super,
  86                        sizeof(struct btt_sb), 0);
  87}
  88
  89/*
  90 * 'raw' version of btt_map write
  91 * Assumptions:
  92 *   mapping is in little-endian
  93 *   mapping contains 'E' and 'Z' flags as desired
  94 */
  95static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping,
  96                unsigned long flags)
  97{
  98        u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
  99
 100        if (unlikely(lba >= arena->external_nlba))
 101                dev_err_ratelimited(to_dev(arena),
 102                        "%s: lba %#x out of range (max: %#x)\n",
 103                        __func__, lba, arena->external_nlba);
 104        return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE, flags);
 105}
 106
 107static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping,
 108                        u32 z_flag, u32 e_flag, unsigned long rwb_flags)
 109{
 110        u32 ze;
 111        __le32 mapping_le;
 112
 113        /*
 114         * This 'mapping' is supposed to be just the LBA mapping, without
 115         * any flags set, so strip the flag bits.
 116         */
 117        mapping = ent_lba(mapping);
 118
 119        ze = (z_flag << 1) + e_flag;
 120        switch (ze) {
 121        case 0:
 122                /*
 123                 * We want to set neither of the Z or E flags, and
 124                 * in the actual layout, this means setting the bit
 125                 * positions of both to '1' to indicate a 'normal'
 126                 * map entry
 127                 */
 128                mapping |= MAP_ENT_NORMAL;
 129                break;
 130        case 1:
 131                mapping |= (1 << MAP_ERR_SHIFT);
 132                break;
 133        case 2:
 134                mapping |= (1 << MAP_TRIM_SHIFT);
 135                break;
 136        default:
 137                /*
 138                 * The case where Z and E are both sent in as '1' could be
 139                 * construed as a valid 'normal' case, but we decide not to,
 140                 * to avoid confusion
 141                 */
 142                dev_err_ratelimited(to_dev(arena),
 143                        "Invalid use of Z and E flags\n");
 144                return -EIO;
 145        }
 146
 147        mapping_le = cpu_to_le32(mapping);
 148        return __btt_map_write(arena, lba, mapping_le, rwb_flags);
 149}
 150
 151static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
 152                        int *trim, int *error, unsigned long rwb_flags)
 153{
 154        int ret;
 155        __le32 in;
 156        u32 raw_mapping, postmap, ze, z_flag, e_flag;
 157        u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
 158
 159        if (unlikely(lba >= arena->external_nlba))
 160                dev_err_ratelimited(to_dev(arena),
 161                        "%s: lba %#x out of range (max: %#x)\n",
 162                        __func__, lba, arena->external_nlba);
 163
 164        ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE, rwb_flags);
 165        if (ret)
 166                return ret;
 167
 168        raw_mapping = le32_to_cpu(in);
 169
 170        z_flag = ent_z_flag(raw_mapping);
 171        e_flag = ent_e_flag(raw_mapping);
 172        ze = (z_flag << 1) + e_flag;
 173        postmap = ent_lba(raw_mapping);
 174
 175        /* Reuse the {z,e}_flag variables for *trim and *error */
 176        z_flag = 0;
 177        e_flag = 0;
 178
 179        switch (ze) {
 180        case 0:
 181                /* Initial state. Return postmap = premap */
 182                *mapping = lba;
 183                break;
 184        case 1:
 185                *mapping = postmap;
 186                e_flag = 1;
 187                break;
 188        case 2:
 189                *mapping = postmap;
 190                z_flag = 1;
 191                break;
 192        case 3:
 193                *mapping = postmap;
 194                break;
 195        default:
 196                return -EIO;
 197        }
 198
 199        if (trim)
 200                *trim = z_flag;
 201        if (error)
 202                *error = e_flag;
 203
 204        return ret;
 205}
 206
 207static int btt_log_group_read(struct arena_info *arena, u32 lane,
 208                        struct log_group *log)
 209{
 210        return arena_read_bytes(arena,
 211                        arena->logoff + (lane * LOG_GRP_SIZE), log,
 212                        LOG_GRP_SIZE, 0);
 213}
 214
 215static struct dentry *debugfs_root;
 216
 217static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
 218                                int idx)
 219{
 220        char dirname[32];
 221        struct dentry *d;
 222
 223        /* If for some reason, parent bttN was not created, exit */
 224        if (!parent)
 225                return;
 226
 227        snprintf(dirname, 32, "arena%d", idx);
 228        d = debugfs_create_dir(dirname, parent);
 229        if (IS_ERR_OR_NULL(d))
 230                return;
 231        a->debugfs_dir = d;
 232
 233        debugfs_create_x64("size", S_IRUGO, d, &a->size);
 234        debugfs_create_x64("external_lba_start", S_IRUGO, d,
 235                                &a->external_lba_start);
 236        debugfs_create_x32("internal_nlba", S_IRUGO, d, &a->internal_nlba);
 237        debugfs_create_u32("internal_lbasize", S_IRUGO, d,
 238                                &a->internal_lbasize);
 239        debugfs_create_x32("external_nlba", S_IRUGO, d, &a->external_nlba);
 240        debugfs_create_u32("external_lbasize", S_IRUGO, d,
 241                                &a->external_lbasize);
 242        debugfs_create_u32("nfree", S_IRUGO, d, &a->nfree);
 243        debugfs_create_u16("version_major", S_IRUGO, d, &a->version_major);
 244        debugfs_create_u16("version_minor", S_IRUGO, d, &a->version_minor);
 245        debugfs_create_x64("nextoff", S_IRUGO, d, &a->nextoff);
 246        debugfs_create_x64("infooff", S_IRUGO, d, &a->infooff);
 247        debugfs_create_x64("dataoff", S_IRUGO, d, &a->dataoff);
 248        debugfs_create_x64("mapoff", S_IRUGO, d, &a->mapoff);
 249        debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff);
 250        debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off);
 251        debugfs_create_x32("flags", S_IRUGO, d, &a->flags);
 252        debugfs_create_u32("log_index_0", S_IRUGO, d, &a->log_index[0]);
 253        debugfs_create_u32("log_index_1", S_IRUGO, d, &a->log_index[1]);
 254}
 255
 256static void btt_debugfs_init(struct btt *btt)
 257{
 258        int i = 0;
 259        struct arena_info *arena;
 260
 261        btt->debugfs_dir = debugfs_create_dir(dev_name(&btt->nd_btt->dev),
 262                                                debugfs_root);
 263        if (IS_ERR_OR_NULL(btt->debugfs_dir))
 264                return;
 265
 266        list_for_each_entry(arena, &btt->arena_list, list) {
 267                arena_debugfs_init(arena, btt->debugfs_dir, i);
 268                i++;
 269        }
 270}
 271
 272static u32 log_seq(struct log_group *log, int log_idx)
 273{
 274        return le32_to_cpu(log->ent[log_idx].seq);
 275}
 276
 277/*
 278 * This function accepts two log entries, and uses the
 279 * sequence number to find the 'older' entry.
 280 * It also updates the sequence number in this old entry to
 281 * make it the 'new' one if the mark_flag is set.
 282 * Finally, it returns which of the entries was the older one.
 283 *
 284 * TODO The logic feels a bit kludge-y. make it better..
 285 */
 286static int btt_log_get_old(struct arena_info *a, struct log_group *log)
 287{
 288        int idx0 = a->log_index[0];
 289        int idx1 = a->log_index[1];
 290        int old;
 291
 292        /*
 293         * the first ever time this is seen, the entry goes into [0]
 294         * the next time, the following logic works out to put this
 295         * (next) entry into [1]
 296         */
 297        if (log_seq(log, idx0) == 0) {
 298                log->ent[idx0].seq = cpu_to_le32(1);
 299                return 0;
 300        }
 301
 302        if (log_seq(log, idx0) == log_seq(log, idx1))
 303                return -EINVAL;
 304        if (log_seq(log, idx0) + log_seq(log, idx1) > 5)
 305                return -EINVAL;
 306
 307        if (log_seq(log, idx0) < log_seq(log, idx1)) {
 308                if ((log_seq(log, idx1) - log_seq(log, idx0)) == 1)
 309                        old = 0;
 310                else
 311                        old = 1;
 312        } else {
 313                if ((log_seq(log, idx0) - log_seq(log, idx1)) == 1)
 314                        old = 1;
 315                else
 316                        old = 0;
 317        }
 318
 319        return old;
 320}
 321
 322/*
 323 * This function copies the desired (old/new) log entry into ent if
 324 * it is not NULL. It returns the sub-slot number (0 or 1)
 325 * where the desired log entry was found. Negative return values
 326 * indicate errors.
 327 */
 328static int btt_log_read(struct arena_info *arena, u32 lane,
 329                        struct log_entry *ent, int old_flag)
 330{
 331        int ret;
 332        int old_ent, ret_ent;
 333        struct log_group log;
 334
 335        ret = btt_log_group_read(arena, lane, &log);
 336        if (ret)
 337                return -EIO;
 338
 339        old_ent = btt_log_get_old(arena, &log);
 340        if (old_ent < 0 || old_ent > 1) {
 341                dev_err(to_dev(arena),
 342                                "log corruption (%d): lane %d seq [%d, %d]\n",
 343                                old_ent, lane, log.ent[arena->log_index[0]].seq,
 344                                log.ent[arena->log_index[1]].seq);
 345                /* TODO set error state? */
 346                return -EIO;
 347        }
 348
 349        ret_ent = (old_flag ? old_ent : (1 - old_ent));
 350
 351        if (ent != NULL)
 352                memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE);
 353
 354        return ret_ent;
 355}
 356
 357/*
 358 * This function commits a log entry to media
 359 * It does _not_ prepare the freelist entry for the next write
 360 * btt_flog_write is the wrapper for updating the freelist elements
 361 */
 362static int __btt_log_write(struct arena_info *arena, u32 lane,
 363                        u32 sub, struct log_entry *ent, unsigned long flags)
 364{
 365        int ret;
 366        u32 group_slot = arena->log_index[sub];
 367        unsigned int log_half = LOG_ENT_SIZE / 2;
 368        void *src = ent;
 369        u64 ns_off;
 370
 371        ns_off = arena->logoff + (lane * LOG_GRP_SIZE) +
 372                (group_slot * LOG_ENT_SIZE);
 373        /* split the 16B write into atomic, durable halves */
 374        ret = arena_write_bytes(arena, ns_off, src, log_half, flags);
 375        if (ret)
 376                return ret;
 377
 378        ns_off += log_half;
 379        src += log_half;
 380        return arena_write_bytes(arena, ns_off, src, log_half, flags);
 381}
 382
 383static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
 384                        struct log_entry *ent)
 385{
 386        int ret;
 387
 388        ret = __btt_log_write(arena, lane, sub, ent, NVDIMM_IO_ATOMIC);
 389        if (ret)
 390                return ret;
 391
 392        /* prepare the next free entry */
 393        arena->freelist[lane].sub = 1 - arena->freelist[lane].sub;
 394        if (++(arena->freelist[lane].seq) == 4)
 395                arena->freelist[lane].seq = 1;
 396        if (ent_e_flag(le32_to_cpu(ent->old_map)))
 397                arena->freelist[lane].has_err = 1;
 398        arena->freelist[lane].block = ent_lba(le32_to_cpu(ent->old_map));
 399
 400        return ret;
 401}
 402
 403/*
 404 * This function initializes the BTT map to the initial state, which is
 405 * all-zeroes, and indicates an identity mapping
 406 */
 407static int btt_map_init(struct arena_info *arena)
 408{
 409        int ret = -EINVAL;
 410        void *zerobuf;
 411        size_t offset = 0;
 412        size_t chunk_size = SZ_2M;
 413        size_t mapsize = arena->logoff - arena->mapoff;
 414
 415        zerobuf = kzalloc(chunk_size, GFP_KERNEL);
 416        if (!zerobuf)
 417                return -ENOMEM;
 418
 419        /*
 420         * mapoff should always be at least 512B  aligned. We rely on that to
 421         * make sure rw_bytes does error clearing correctly, so make sure that
 422         * is the case.
 423         */
 424        dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->mapoff, 512),
 425                "arena->mapoff: %#llx is unaligned\n", arena->mapoff);
 426
 427        while (mapsize) {
 428                size_t size = min(mapsize, chunk_size);
 429
 430                dev_WARN_ONCE(to_dev(arena), size < 512,
 431                        "chunk size: %#zx is unaligned\n", size);
 432                ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf,
 433                                size, 0);
 434                if (ret)
 435                        goto free;
 436
 437                offset += size;
 438                mapsize -= size;
 439                cond_resched();
 440        }
 441
 442 free:
 443        kfree(zerobuf);
 444        return ret;
 445}
 446
 447/*
 448 * This function initializes the BTT log with 'fake' entries pointing
 449 * to the initial reserved set of blocks as being free
 450 */
 451static int btt_log_init(struct arena_info *arena)
 452{
 453        size_t logsize = arena->info2off - arena->logoff;
 454        size_t chunk_size = SZ_4K, offset = 0;
 455        struct log_entry ent;
 456        void *zerobuf;
 457        int ret;
 458        u32 i;
 459
 460        zerobuf = kzalloc(chunk_size, GFP_KERNEL);
 461        if (!zerobuf)
 462                return -ENOMEM;
 463        /*
 464         * logoff should always be at least 512B  aligned. We rely on that to
 465         * make sure rw_bytes does error clearing correctly, so make sure that
 466         * is the case.
 467         */
 468        dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->logoff, 512),
 469                "arena->logoff: %#llx is unaligned\n", arena->logoff);
 470
 471        while (logsize) {
 472                size_t size = min(logsize, chunk_size);
 473
 474                dev_WARN_ONCE(to_dev(arena), size < 512,
 475                        "chunk size: %#zx is unaligned\n", size);
 476                ret = arena_write_bytes(arena, arena->logoff + offset, zerobuf,
 477                                size, 0);
 478                if (ret)
 479                        goto free;
 480
 481                offset += size;
 482                logsize -= size;
 483                cond_resched();
 484        }
 485
 486        for (i = 0; i < arena->nfree; i++) {
 487                ent.lba = cpu_to_le32(i);
 488                ent.old_map = cpu_to_le32(arena->external_nlba + i);
 489                ent.new_map = cpu_to_le32(arena->external_nlba + i);
 490                ent.seq = cpu_to_le32(LOG_SEQ_INIT);
 491                ret = __btt_log_write(arena, i, 0, &ent, 0);
 492                if (ret)
 493                        goto free;
 494        }
 495
 496 free:
 497        kfree(zerobuf);
 498        return ret;
 499}
 500
 501static u64 to_namespace_offset(struct arena_info *arena, u64 lba)
 502{
 503        return arena->dataoff + ((u64)lba * arena->internal_lbasize);
 504}
 505
 506static int arena_clear_freelist_error(struct arena_info *arena, u32 lane)
 507{
 508        int ret = 0;
 509
 510        if (arena->freelist[lane].has_err) {
 511                void *zero_page = page_address(ZERO_PAGE(0));
 512                u32 lba = arena->freelist[lane].block;
 513                u64 nsoff = to_namespace_offset(arena, lba);
 514                unsigned long len = arena->sector_size;
 515
 516                mutex_lock(&arena->err_lock);
 517
 518                while (len) {
 519                        unsigned long chunk = min(len, PAGE_SIZE);
 520
 521                        ret = arena_write_bytes(arena, nsoff, zero_page,
 522                                chunk, 0);
 523                        if (ret)
 524                                break;
 525                        len -= chunk;
 526                        nsoff += chunk;
 527                        if (len == 0)
 528                                arena->freelist[lane].has_err = 0;
 529                }
 530                mutex_unlock(&arena->err_lock);
 531        }
 532        return ret;
 533}
 534
 535static int btt_freelist_init(struct arena_info *arena)
 536{
 537        int new, ret;
 538        struct log_entry log_new;
 539        u32 i, map_entry, log_oldmap, log_newmap;
 540
 541        arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry),
 542                                        GFP_KERNEL);
 543        if (!arena->freelist)
 544                return -ENOMEM;
 545
 546        for (i = 0; i < arena->nfree; i++) {
 547                new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT);
 548                if (new < 0)
 549                        return new;
 550
 551                /* old and new map entries with any flags stripped out */
 552                log_oldmap = ent_lba(le32_to_cpu(log_new.old_map));
 553                log_newmap = ent_lba(le32_to_cpu(log_new.new_map));
 554
 555                /* sub points to the next one to be overwritten */
 556                arena->freelist[i].sub = 1 - new;
 557                arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq));
 558                arena->freelist[i].block = log_oldmap;
 559
 560                /*
 561                 * FIXME: if error clearing fails during init, we want to make
 562                 * the BTT read-only
 563                 */
 564                if (ent_e_flag(le32_to_cpu(log_new.old_map)) &&
 565                    !ent_normal(le32_to_cpu(log_new.old_map))) {
 566                        arena->freelist[i].has_err = 1;
 567                        ret = arena_clear_freelist_error(arena, i);
 568                        if (ret)
 569                                dev_err_ratelimited(to_dev(arena),
 570                                        "Unable to clear known errors\n");
 571                }
 572
 573                /* This implies a newly created or untouched flog entry */
 574                if (log_oldmap == log_newmap)
 575                        continue;
 576
 577                /* Check if map recovery is needed */
 578                ret = btt_map_read(arena, le32_to_cpu(log_new.lba), &map_entry,
 579                                NULL, NULL, 0);
 580                if (ret)
 581                        return ret;
 582
 583                /*
 584                 * The map_entry from btt_read_map is stripped of any flag bits,
 585                 * so use the stripped out versions from the log as well for
 586                 * testing whether recovery is needed. For restoration, use the
 587                 * 'raw' version of the log entries as that captured what we
 588                 * were going to write originally.
 589                 */
 590                if ((log_newmap != map_entry) && (log_oldmap == map_entry)) {
 591                        /*
 592                         * Last transaction wrote the flog, but wasn't able
 593                         * to complete the map write. So fix up the map.
 594                         */
 595                        ret = btt_map_write(arena, le32_to_cpu(log_new.lba),
 596                                        le32_to_cpu(log_new.new_map), 0, 0, 0);
 597                        if (ret)
 598                                return ret;
 599                }
 600        }
 601
 602        return 0;
 603}
 604
 605static bool ent_is_padding(struct log_entry *ent)
 606{
 607        return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0)
 608                && (ent->seq == 0);
 609}
 610
 611/*
 612 * Detecting valid log indices: We read a log group (see the comments in btt.h
 613 * for a description of a 'log_group' and its 'slots'), and iterate over its
 614 * four slots. We expect that a padding slot will be all-zeroes, and use this
 615 * to detect a padding slot vs. an actual entry.
 616 *
 617 * If a log_group is in the initial state, i.e. hasn't been used since the
 618 * creation of this BTT layout, it will have three of the four slots with
 619 * zeroes. We skip over these log_groups for the detection of log_index. If
 620 * all log_groups are in the initial state (i.e. the BTT has never been
 621 * written to), it is safe to assume the 'new format' of log entries in slots
 622 * (0, 1).
 623 */
 624static int log_set_indices(struct arena_info *arena)
 625{
 626        bool idx_set = false, initial_state = true;
 627        int ret, log_index[2] = {-1, -1};
 628        u32 i, j, next_idx = 0;
 629        struct log_group log;
 630        u32 pad_count = 0;
 631
 632        for (i = 0; i < arena->nfree; i++) {
 633                ret = btt_log_group_read(arena, i, &log);
 634                if (ret < 0)
 635                        return ret;
 636
 637                for (j = 0; j < 4; j++) {
 638                        if (!idx_set) {
 639                                if (ent_is_padding(&log.ent[j])) {
 640                                        pad_count++;
 641                                        continue;
 642                                } else {
 643                                        /* Skip if index has been recorded */
 644                                        if ((next_idx == 1) &&
 645                                                (j == log_index[0]))
 646                                                continue;
 647                                        /* valid entry, record index */
 648                                        log_index[next_idx] = j;
 649                                        next_idx++;
 650                                }
 651                                if (next_idx == 2) {
 652                                        /* two valid entries found */
 653                                        idx_set = true;
 654                                } else if (next_idx > 2) {
 655                                        /* too many valid indices */
 656                                        return -ENXIO;
 657                                }
 658                        } else {
 659                                /*
 660                                 * once the indices have been set, just verify
 661                                 * that all subsequent log groups are either in
 662                                 * their initial state or follow the same
 663                                 * indices.
 664                                 */
 665                                if (j == log_index[0]) {
 666                                        /* entry must be 'valid' */
 667                                        if (ent_is_padding(&log.ent[j]))
 668                                                return -ENXIO;
 669                                } else if (j == log_index[1]) {
 670                                        ;
 671                                        /*
 672                                         * log_index[1] can be padding if the
 673                                         * lane never got used and it is still
 674                                         * in the initial state (three 'padding'
 675                                         * entries)
 676                                         */
 677                                } else {
 678                                        /* entry must be invalid (padding) */
 679                                        if (!ent_is_padding(&log.ent[j]))
 680                                                return -ENXIO;
 681                                }
 682                        }
 683                }
 684                /*
 685                 * If any of the log_groups have more than one valid,
 686                 * non-padding entry, then the we are no longer in the
 687                 * initial_state
 688                 */
 689                if (pad_count < 3)
 690                        initial_state = false;
 691                pad_count = 0;
 692        }
 693
 694        if (!initial_state && !idx_set)
 695                return -ENXIO;
 696
 697        /*
 698         * If all the entries in the log were in the initial state,
 699         * assume new padding scheme
 700         */
 701        if (initial_state)
 702                log_index[1] = 1;
 703
 704        /*
 705         * Only allow the known permutations of log/padding indices,
 706         * i.e. (0, 1), and (0, 2)
 707         */
 708        if ((log_index[0] == 0) && ((log_index[1] == 1) || (log_index[1] == 2)))
 709                ; /* known index possibilities */
 710        else {
 711                dev_err(to_dev(arena), "Found an unknown padding scheme\n");
 712                return -ENXIO;
 713        }
 714
 715        arena->log_index[0] = log_index[0];
 716        arena->log_index[1] = log_index[1];
 717        dev_dbg(to_dev(arena), "log_index_0 = %d\n", log_index[0]);
 718        dev_dbg(to_dev(arena), "log_index_1 = %d\n", log_index[1]);
 719        return 0;
 720}
 721
 722static int btt_rtt_init(struct arena_info *arena)
 723{
 724        arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL);
 725        if (arena->rtt == NULL)
 726                return -ENOMEM;
 727
 728        return 0;
 729}
 730
 731static int btt_maplocks_init(struct arena_info *arena)
 732{
 733        u32 i;
 734
 735        arena->map_locks = kcalloc(arena->nfree, sizeof(struct aligned_lock),
 736                                GFP_KERNEL);
 737        if (!arena->map_locks)
 738                return -ENOMEM;
 739
 740        for (i = 0; i < arena->nfree; i++)
 741                spin_lock_init(&arena->map_locks[i].lock);
 742
 743        return 0;
 744}
 745
 746static struct arena_info *alloc_arena(struct btt *btt, size_t size,
 747                                size_t start, size_t arena_off)
 748{
 749        struct arena_info *arena;
 750        u64 logsize, mapsize, datasize;
 751        u64 available = size;
 752
 753        arena = kzalloc(sizeof(struct arena_info), GFP_KERNEL);
 754        if (!arena)
 755                return NULL;
 756        arena->nd_btt = btt->nd_btt;
 757        arena->sector_size = btt->sector_size;
 758        mutex_init(&arena->err_lock);
 759
 760        if (!size)
 761                return arena;
 762
 763        arena->size = size;
 764        arena->external_lba_start = start;
 765        arena->external_lbasize = btt->lbasize;
 766        arena->internal_lbasize = roundup(arena->external_lbasize,
 767                                        INT_LBASIZE_ALIGNMENT);
 768        arena->nfree = BTT_DEFAULT_NFREE;
 769        arena->version_major = btt->nd_btt->version_major;
 770        arena->version_minor = btt->nd_btt->version_minor;
 771
 772        if (available % BTT_PG_SIZE)
 773                available -= (available % BTT_PG_SIZE);
 774
 775        /* Two pages are reserved for the super block and its copy */
 776        available -= 2 * BTT_PG_SIZE;
 777
 778        /* The log takes a fixed amount of space based on nfree */
 779        logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE);
 780        available -= logsize;
 781
 782        /* Calculate optimal split between map and data area */
 783        arena->internal_nlba = div_u64(available - BTT_PG_SIZE,
 784                        arena->internal_lbasize + MAP_ENT_SIZE);
 785        arena->external_nlba = arena->internal_nlba - arena->nfree;
 786
 787        mapsize = roundup((arena->external_nlba * MAP_ENT_SIZE), BTT_PG_SIZE);
 788        datasize = available - mapsize;
 789
 790        /* 'Absolute' values, relative to start of storage space */
 791        arena->infooff = arena_off;
 792        arena->dataoff = arena->infooff + BTT_PG_SIZE;
 793        arena->mapoff = arena->dataoff + datasize;
 794        arena->logoff = arena->mapoff + mapsize;
 795        arena->info2off = arena->logoff + logsize;
 796
 797        /* Default log indices are (0,1) */
 798        arena->log_index[0] = 0;
 799        arena->log_index[1] = 1;
 800        return arena;
 801}
 802
 803static void free_arenas(struct btt *btt)
 804{
 805        struct arena_info *arena, *next;
 806
 807        list_for_each_entry_safe(arena, next, &btt->arena_list, list) {
 808                list_del(&arena->list);
 809                kfree(arena->rtt);
 810                kfree(arena->map_locks);
 811                kfree(arena->freelist);
 812                debugfs_remove_recursive(arena->debugfs_dir);
 813                kfree(arena);
 814        }
 815}
 816
 817/*
 818 * This function reads an existing valid btt superblock and
 819 * populates the corresponding arena_info struct
 820 */
 821static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super,
 822                                u64 arena_off)
 823{
 824        arena->internal_nlba = le32_to_cpu(super->internal_nlba);
 825        arena->internal_lbasize = le32_to_cpu(super->internal_lbasize);
 826        arena->external_nlba = le32_to_cpu(super->external_nlba);
 827        arena->external_lbasize = le32_to_cpu(super->external_lbasize);
 828        arena->nfree = le32_to_cpu(super->nfree);
 829        arena->version_major = le16_to_cpu(super->version_major);
 830        arena->version_minor = le16_to_cpu(super->version_minor);
 831
 832        arena->nextoff = (super->nextoff == 0) ? 0 : (arena_off +
 833                        le64_to_cpu(super->nextoff));
 834        arena->infooff = arena_off;
 835        arena->dataoff = arena_off + le64_to_cpu(super->dataoff);
 836        arena->mapoff = arena_off + le64_to_cpu(super->mapoff);
 837        arena->logoff = arena_off + le64_to_cpu(super->logoff);
 838        arena->info2off = arena_off + le64_to_cpu(super->info2off);
 839
 840        arena->size = (le64_to_cpu(super->nextoff) > 0)
 841                ? (le64_to_cpu(super->nextoff))
 842                : (arena->info2off - arena->infooff + BTT_PG_SIZE);
 843
 844        arena->flags = le32_to_cpu(super->flags);
 845}
 846
 847static int discover_arenas(struct btt *btt)
 848{
 849        int ret = 0;
 850        struct arena_info *arena;
 851        struct btt_sb *super;
 852        size_t remaining = btt->rawsize;
 853        u64 cur_nlba = 0;
 854        size_t cur_off = 0;
 855        int num_arenas = 0;
 856
 857        super = kzalloc(sizeof(*super), GFP_KERNEL);
 858        if (!super)
 859                return -ENOMEM;
 860
 861        while (remaining) {
 862                /* Alloc memory for arena */
 863                arena = alloc_arena(btt, 0, 0, 0);
 864                if (!arena) {
 865                        ret = -ENOMEM;
 866                        goto out_super;
 867                }
 868
 869                arena->infooff = cur_off;
 870                ret = btt_info_read(arena, super);
 871                if (ret)
 872                        goto out;
 873
 874                if (!nd_btt_arena_is_valid(btt->nd_btt, super)) {
 875                        if (remaining == btt->rawsize) {
 876                                btt->init_state = INIT_NOTFOUND;
 877                                dev_info(to_dev(arena), "No existing arenas\n");
 878                                goto out;
 879                        } else {
 880                                dev_err(to_dev(arena),
 881                                                "Found corrupted metadata!\n");
 882                                ret = -ENODEV;
 883                                goto out;
 884                        }
 885                }
 886
 887                arena->external_lba_start = cur_nlba;
 888                parse_arena_meta(arena, super, cur_off);
 889
 890                ret = log_set_indices(arena);
 891                if (ret) {
 892                        dev_err(to_dev(arena),
 893                                "Unable to deduce log/padding indices\n");
 894                        goto out;
 895                }
 896
 897                ret = btt_freelist_init(arena);
 898                if (ret)
 899                        goto out;
 900
 901                ret = btt_rtt_init(arena);
 902                if (ret)
 903                        goto out;
 904
 905                ret = btt_maplocks_init(arena);
 906                if (ret)
 907                        goto out;
 908
 909                list_add_tail(&arena->list, &btt->arena_list);
 910
 911                remaining -= arena->size;
 912                cur_off += arena->size;
 913                cur_nlba += arena->external_nlba;
 914                num_arenas++;
 915
 916                if (arena->nextoff == 0)
 917                        break;
 918        }
 919        btt->num_arenas = num_arenas;
 920        btt->nlba = cur_nlba;
 921        btt->init_state = INIT_READY;
 922
 923        kfree(super);
 924        return ret;
 925
 926 out:
 927        kfree(arena);
 928        free_arenas(btt);
 929 out_super:
 930        kfree(super);
 931        return ret;
 932}
 933
 934static int create_arenas(struct btt *btt)
 935{
 936        size_t remaining = btt->rawsize;
 937        size_t cur_off = 0;
 938
 939        while (remaining) {
 940                struct arena_info *arena;
 941                size_t arena_size = min_t(u64, ARENA_MAX_SIZE, remaining);
 942
 943                remaining -= arena_size;
 944                if (arena_size < ARENA_MIN_SIZE)
 945                        break;
 946
 947                arena = alloc_arena(btt, arena_size, btt->nlba, cur_off);
 948                if (!arena) {
 949                        free_arenas(btt);
 950                        return -ENOMEM;
 951                }
 952                btt->nlba += arena->external_nlba;
 953                if (remaining >= ARENA_MIN_SIZE)
 954                        arena->nextoff = arena->size;
 955                else
 956                        arena->nextoff = 0;
 957                cur_off += arena_size;
 958                list_add_tail(&arena->list, &btt->arena_list);
 959        }
 960
 961        return 0;
 962}
 963
 964/*
 965 * This function completes arena initialization by writing
 966 * all the metadata.
 967 * It is only called for an uninitialized arena when a write
 968 * to that arena occurs for the first time.
 969 */
 970static int btt_arena_write_layout(struct arena_info *arena)
 971{
 972        int ret;
 973        u64 sum;
 974        struct btt_sb *super;
 975        struct nd_btt *nd_btt = arena->nd_btt;
 976        const u8 *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
 977
 978        ret = btt_map_init(arena);
 979        if (ret)
 980                return ret;
 981
 982        ret = btt_log_init(arena);
 983        if (ret)
 984                return ret;
 985
 986        super = kzalloc(sizeof(struct btt_sb), GFP_NOIO);
 987        if (!super)
 988                return -ENOMEM;
 989
 990        strncpy(super->signature, BTT_SIG, BTT_SIG_LEN);
 991        memcpy(super->uuid, nd_btt->uuid, 16);
 992        memcpy(super->parent_uuid, parent_uuid, 16);
 993        super->flags = cpu_to_le32(arena->flags);
 994        super->version_major = cpu_to_le16(arena->version_major);
 995        super->version_minor = cpu_to_le16(arena->version_minor);
 996        super->external_lbasize = cpu_to_le32(arena->external_lbasize);
 997        super->external_nlba = cpu_to_le32(arena->external_nlba);
 998        super->internal_lbasize = cpu_to_le32(arena->internal_lbasize);
 999        super->internal_nlba = cpu_to_le32(arena->internal_nlba);
1000        super->nfree = cpu_to_le32(arena->nfree);
1001        super->infosize = cpu_to_le32(sizeof(struct btt_sb));
1002        super->nextoff = cpu_to_le64(arena->nextoff);
1003        /*
1004         * Subtract arena->infooff (arena start) so numbers are relative
1005         * to 'this' arena
1006         */
1007        super->dataoff = cpu_to_le64(arena->dataoff - arena->infooff);
1008        super->mapoff = cpu_to_le64(arena->mapoff - arena->infooff);
1009        super->logoff = cpu_to_le64(arena->logoff - arena->infooff);
1010        super->info2off = cpu_to_le64(arena->info2off - arena->infooff);
1011
1012        super->flags = 0;
1013        sum = nd_sb_checksum((struct nd_gen_sb *) super);
1014        super->checksum = cpu_to_le64(sum);
1015
1016        ret = btt_info_write(arena, super);
1017
1018        kfree(super);
1019        return ret;
1020}
1021
1022/*
1023 * This function completes the initialization for the BTT namespace
1024 * such that it is ready to accept IOs
1025 */
1026static int btt_meta_init(struct btt *btt)
1027{
1028        int ret = 0;
1029        struct arena_info *arena;
1030
1031        mutex_lock(&btt->init_lock);
1032        list_for_each_entry(arena, &btt->arena_list, list) {
1033                ret = btt_arena_write_layout(arena);
1034                if (ret)
1035                        goto unlock;
1036
1037                ret = btt_freelist_init(arena);
1038                if (ret)
1039                        goto unlock;
1040
1041                ret = btt_rtt_init(arena);
1042                if (ret)
1043                        goto unlock;
1044
1045                ret = btt_maplocks_init(arena);
1046                if (ret)
1047                        goto unlock;
1048        }
1049
1050        btt->init_state = INIT_READY;
1051
1052 unlock:
1053        mutex_unlock(&btt->init_lock);
1054        return ret;
1055}
1056
1057static u32 btt_meta_size(struct btt *btt)
1058{
1059        return btt->lbasize - btt->sector_size;
1060}
1061
1062/*
1063 * This function calculates the arena in which the given LBA lies
1064 * by doing a linear walk. This is acceptable since we expect only
1065 * a few arenas. If we have backing devices that get much larger,
1066 * we can construct a balanced binary tree of arenas at init time
1067 * so that this range search becomes faster.
1068 */
1069static int lba_to_arena(struct btt *btt, sector_t sector, __u32 *premap,
1070                                struct arena_info **arena)
1071{
1072        struct arena_info *arena_list;
1073        __u64 lba = div_u64(sector << SECTOR_SHIFT, btt->sector_size);
1074
1075        list_for_each_entry(arena_list, &btt->arena_list, list) {
1076                if (lba < arena_list->external_nlba) {
1077                        *arena = arena_list;
1078                        *premap = lba;
1079                        return 0;
1080                }
1081                lba -= arena_list->external_nlba;
1082        }
1083
1084        return -EIO;
1085}
1086
1087/*
1088 * The following (lock_map, unlock_map) are mostly just to improve
1089 * readability, since they index into an array of locks
1090 */
1091static void lock_map(struct arena_info *arena, u32 premap)
1092                __acquires(&arena->map_locks[idx].lock)
1093{
1094        u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
1095
1096        spin_lock(&arena->map_locks[idx].lock);
1097}
1098
1099static void unlock_map(struct arena_info *arena, u32 premap)
1100                __releases(&arena->map_locks[idx].lock)
1101{
1102        u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
1103
1104        spin_unlock(&arena->map_locks[idx].lock);
1105}
1106
1107static int btt_data_read(struct arena_info *arena, struct page *page,
1108                        unsigned int off, u32 lba, u32 len)
1109{
1110        int ret;
1111        u64 nsoff = to_namespace_offset(arena, lba);
1112        void *mem = kmap_atomic(page);
1113
1114        ret = arena_read_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
1115        kunmap_atomic(mem);
1116
1117        return ret;
1118}
1119
1120static int btt_data_write(struct arena_info *arena, u32 lba,
1121                        struct page *page, unsigned int off, u32 len)
1122{
1123        int ret;
1124        u64 nsoff = to_namespace_offset(arena, lba);
1125        void *mem = kmap_atomic(page);
1126
1127        ret = arena_write_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
1128        kunmap_atomic(mem);
1129
1130        return ret;
1131}
1132
1133static void zero_fill_data(struct page *page, unsigned int off, u32 len)
1134{
1135        void *mem = kmap_atomic(page);
1136
1137        memset(mem + off, 0, len);
1138        kunmap_atomic(mem);
1139}
1140
1141#ifdef CONFIG_BLK_DEV_INTEGRITY
1142static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
1143                        struct arena_info *arena, u32 postmap, int rw)
1144{
1145        unsigned int len = btt_meta_size(btt);
1146        u64 meta_nsoff;
1147        int ret = 0;
1148
1149        if (bip == NULL)
1150                return 0;
1151
1152        meta_nsoff = to_namespace_offset(arena, postmap) + btt->sector_size;
1153
1154        while (len) {
1155                unsigned int cur_len;
1156                struct bio_vec bv;
1157                void *mem;
1158
1159                bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
1160                /*
1161                 * The 'bv' obtained from bvec_iter_bvec has its .bv_len and
1162                 * .bv_offset already adjusted for iter->bi_bvec_done, and we
1163                 * can use those directly
1164                 */
1165
1166                cur_len = min(len, bv.bv_len);
1167                mem = kmap_atomic(bv.bv_page);
1168                if (rw)
1169                        ret = arena_write_bytes(arena, meta_nsoff,
1170                                        mem + bv.bv_offset, cur_len,
1171                                        NVDIMM_IO_ATOMIC);
1172                else
1173                        ret = arena_read_bytes(arena, meta_nsoff,
1174                                        mem + bv.bv_offset, cur_len,
1175                                        NVDIMM_IO_ATOMIC);
1176
1177                kunmap_atomic(mem);
1178                if (ret)
1179                        return ret;
1180
1181                len -= cur_len;
1182                meta_nsoff += cur_len;
1183                if (!bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len))
1184                        return -EIO;
1185        }
1186
1187        return ret;
1188}
1189
1190#else /* CONFIG_BLK_DEV_INTEGRITY */
1191static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
1192                        struct arena_info *arena, u32 postmap, int rw)
1193{
1194        return 0;
1195}
1196#endif
1197
1198static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
1199                        struct page *page, unsigned int off, sector_t sector,
1200                        unsigned int len)
1201{
1202        int ret = 0;
1203        int t_flag, e_flag;
1204        struct arena_info *arena = NULL;
1205        u32 lane = 0, premap, postmap;
1206
1207        while (len) {
1208                u32 cur_len;
1209
1210                lane = nd_region_acquire_lane(btt->nd_region);
1211
1212                ret = lba_to_arena(btt, sector, &premap, &arena);
1213                if (ret)
1214                        goto out_lane;
1215
1216                cur_len = min(btt->sector_size, len);
1217
1218                ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag,
1219                                NVDIMM_IO_ATOMIC);
1220                if (ret)
1221                        goto out_lane;
1222
1223                /*
1224                 * We loop to make sure that the post map LBA didn't change
1225                 * from under us between writing the RTT and doing the actual
1226                 * read.
1227                 */
1228                while (1) {
1229                        u32 new_map;
1230                        int new_t, new_e;
1231
1232                        if (t_flag) {
1233                                zero_fill_data(page, off, cur_len);
1234                                goto out_lane;
1235                        }
1236
1237                        if (e_flag) {
1238                                ret = -EIO;
1239                                goto out_lane;
1240                        }
1241
1242                        arena->rtt[lane] = RTT_VALID | postmap;
1243                        /*
1244                         * Barrier to make sure this write is not reordered
1245                         * to do the verification map_read before the RTT store
1246                         */
1247                        barrier();
1248
1249                        ret = btt_map_read(arena, premap, &new_map, &new_t,
1250                                                &new_e, NVDIMM_IO_ATOMIC);
1251                        if (ret)
1252                                goto out_rtt;
1253
1254                        if ((postmap == new_map) && (t_flag == new_t) &&
1255                                        (e_flag == new_e))
1256                                break;
1257
1258                        postmap = new_map;
1259                        t_flag = new_t;
1260                        e_flag = new_e;
1261                }
1262
1263                ret = btt_data_read(arena, page, off, postmap, cur_len);
1264                if (ret) {
1265                        /* Media error - set the e_flag */
1266                        if (btt_map_write(arena, premap, postmap, 0, 1, NVDIMM_IO_ATOMIC))
1267                                dev_warn_ratelimited(to_dev(arena),
1268                                        "Error persistently tracking bad blocks at %#x\n",
1269                                        premap);
1270                        goto out_rtt;
1271                }
1272
1273                if (bip) {
1274                        ret = btt_rw_integrity(btt, bip, arena, postmap, READ);
1275                        if (ret)
1276                                goto out_rtt;
1277                }
1278
1279                arena->rtt[lane] = RTT_INVALID;
1280                nd_region_release_lane(btt->nd_region, lane);
1281
1282                len -= cur_len;
1283                off += cur_len;
1284                sector += btt->sector_size >> SECTOR_SHIFT;
1285        }
1286
1287        return 0;
1288
1289 out_rtt:
1290        arena->rtt[lane] = RTT_INVALID;
1291 out_lane:
1292        nd_region_release_lane(btt->nd_region, lane);
1293        return ret;
1294}
1295
1296/*
1297 * Normally, arena_{read,write}_bytes will take care of the initial offset
1298 * adjustment, but in the case of btt_is_badblock, where we query is_bad_pmem,
1299 * we need the final, raw namespace offset here
1300 */
1301static bool btt_is_badblock(struct btt *btt, struct arena_info *arena,
1302                u32 postmap)
1303{
1304        u64 nsoff = adjust_initial_offset(arena->nd_btt,
1305                        to_namespace_offset(arena, postmap));
1306        sector_t phys_sector = nsoff >> 9;
1307
1308        return is_bad_pmem(btt->phys_bb, phys_sector, arena->internal_lbasize);
1309}
1310
1311static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
1312                        sector_t sector, struct page *page, unsigned int off,
1313                        unsigned int len)
1314{
1315        int ret = 0;
1316        struct arena_info *arena = NULL;
1317        u32 premap = 0, old_postmap, new_postmap, lane = 0, i;
1318        struct log_entry log;
1319        int sub;
1320
1321        while (len) {
1322                u32 cur_len;
1323                int e_flag;
1324
1325 retry:
1326                lane = nd_region_acquire_lane(btt->nd_region);
1327
1328                ret = lba_to_arena(btt, sector, &premap, &arena);
1329                if (ret)
1330                        goto out_lane;
1331                cur_len = min(btt->sector_size, len);
1332
1333                if ((arena->flags & IB_FLAG_ERROR_MASK) != 0) {
1334                        ret = -EIO;
1335                        goto out_lane;
1336                }
1337
1338                if (btt_is_badblock(btt, arena, arena->freelist[lane].block))
1339                        arena->freelist[lane].has_err = 1;
1340
1341                if (mutex_is_locked(&arena->err_lock)
1342                                || arena->freelist[lane].has_err) {
1343                        nd_region_release_lane(btt->nd_region, lane);
1344
1345                        ret = arena_clear_freelist_error(arena, lane);
1346                        if (ret)
1347                                return ret;
1348
1349                        /* OK to acquire a different lane/free block */
1350                        goto retry;
1351                }
1352
1353                new_postmap = arena->freelist[lane].block;
1354
1355                /* Wait if the new block is being read from */
1356                for (i = 0; i < arena->nfree; i++)
1357                        while (arena->rtt[i] == (RTT_VALID | new_postmap))
1358                                cpu_relax();
1359
1360
1361                if (new_postmap >= arena->internal_nlba) {
1362                        ret = -EIO;
1363                        goto out_lane;
1364                }
1365
1366                ret = btt_data_write(arena, new_postmap, page, off, cur_len);
1367                if (ret)
1368                        goto out_lane;
1369
1370                if (bip) {
1371                        ret = btt_rw_integrity(btt, bip, arena, new_postmap,
1372                                                WRITE);
1373                        if (ret)
1374                                goto out_lane;
1375                }
1376
1377                lock_map(arena, premap);
1378                ret = btt_map_read(arena, premap, &old_postmap, NULL, &e_flag,
1379                                NVDIMM_IO_ATOMIC);
1380                if (ret)
1381                        goto out_map;
1382                if (old_postmap >= arena->internal_nlba) {
1383                        ret = -EIO;
1384                        goto out_map;
1385                }
1386                if (e_flag)
1387                        set_e_flag(old_postmap);
1388
1389                log.lba = cpu_to_le32(premap);
1390                log.old_map = cpu_to_le32(old_postmap);
1391                log.new_map = cpu_to_le32(new_postmap);
1392                log.seq = cpu_to_le32(arena->freelist[lane].seq);
1393                sub = arena->freelist[lane].sub;
1394                ret = btt_flog_write(arena, lane, sub, &log);
1395                if (ret)
1396                        goto out_map;
1397
1398                ret = btt_map_write(arena, premap, new_postmap, 0, 0,
1399                        NVDIMM_IO_ATOMIC);
1400                if (ret)
1401                        goto out_map;
1402
1403                unlock_map(arena, premap);
1404                nd_region_release_lane(btt->nd_region, lane);
1405
1406                if (e_flag) {
1407                        ret = arena_clear_freelist_error(arena, lane);
1408                        if (ret)
1409                                return ret;
1410                }
1411
1412                len -= cur_len;
1413                off += cur_len;
1414                sector += btt->sector_size >> SECTOR_SHIFT;
1415        }
1416
1417        return 0;
1418
1419 out_map:
1420        unlock_map(arena, premap);
1421 out_lane:
1422        nd_region_release_lane(btt->nd_region, lane);
1423        return ret;
1424}
1425
1426static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
1427                        struct page *page, unsigned int len, unsigned int off,
1428                        unsigned int op, sector_t sector)
1429{
1430        int ret;
1431
1432        if (!op_is_write(op)) {
1433                ret = btt_read_pg(btt, bip, page, off, sector, len);
1434                flush_dcache_page(page);
1435        } else {
1436                flush_dcache_page(page);
1437                ret = btt_write_pg(btt, bip, sector, page, off, len);
1438        }
1439
1440        return ret;
1441}
1442
1443static blk_qc_t btt_submit_bio(struct bio *bio)
1444{
1445        struct bio_integrity_payload *bip = bio_integrity(bio);
1446        struct btt *btt = bio->bi_bdev->bd_disk->private_data;
1447        struct bvec_iter iter;
1448        unsigned long start;
1449        struct bio_vec bvec;
1450        int err = 0;
1451        bool do_acct;
1452
1453        if (!bio_integrity_prep(bio))
1454                return BLK_QC_T_NONE;
1455
1456        do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue);
1457        if (do_acct)
1458                start = bio_start_io_acct(bio);
1459        bio_for_each_segment(bvec, bio, iter) {
1460                unsigned int len = bvec.bv_len;
1461
1462                if (len > PAGE_SIZE || len < btt->sector_size ||
1463                                len % btt->sector_size) {
1464                        dev_err_ratelimited(&btt->nd_btt->dev,
1465                                "unaligned bio segment (len: %d)\n", len);
1466                        bio->bi_status = BLK_STS_IOERR;
1467                        break;
1468                }
1469
1470                err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
1471                                  bio_op(bio), iter.bi_sector);
1472                if (err) {
1473                        dev_err(&btt->nd_btt->dev,
1474                                        "io error in %s sector %lld, len %d,\n",
1475                                        (op_is_write(bio_op(bio))) ? "WRITE" :
1476                                        "READ",
1477                                        (unsigned long long) iter.bi_sector, len);
1478                        bio->bi_status = errno_to_blk_status(err);
1479                        break;
1480                }
1481        }
1482        if (do_acct)
1483                bio_end_io_acct(bio, start);
1484
1485        bio_endio(bio);
1486        return BLK_QC_T_NONE;
1487}
1488
1489static int btt_rw_page(struct block_device *bdev, sector_t sector,
1490                struct page *page, unsigned int op)
1491{
1492        struct btt *btt = bdev->bd_disk->private_data;
1493        int rc;
1494
1495        rc = btt_do_bvec(btt, NULL, page, thp_size(page), 0, op, sector);
1496        if (rc == 0)
1497                page_endio(page, op_is_write(op), 0);
1498
1499        return rc;
1500}
1501
1502
1503static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo)
1504{
1505        /* some standard values */
1506        geo->heads = 1 << 6;
1507        geo->sectors = 1 << 5;
1508        geo->cylinders = get_capacity(bd->bd_disk) >> 11;
1509        return 0;
1510}
1511
1512static const struct block_device_operations btt_fops = {
1513        .owner =                THIS_MODULE,
1514        .submit_bio =           btt_submit_bio,
1515        .rw_page =              btt_rw_page,
1516        .getgeo =               btt_getgeo,
1517};
1518
1519static int btt_blk_init(struct btt *btt)
1520{
1521        struct nd_btt *nd_btt = btt->nd_btt;
1522        struct nd_namespace_common *ndns = nd_btt->ndns;
1523
1524        /* create a new disk and request queue for btt */
1525        btt->btt_queue = blk_alloc_queue(NUMA_NO_NODE);
1526        if (!btt->btt_queue)
1527                return -ENOMEM;
1528
1529        btt->btt_disk = alloc_disk(0);
1530        if (!btt->btt_disk) {
1531                blk_cleanup_queue(btt->btt_queue);
1532                return -ENOMEM;
1533        }
1534
1535        nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name);
1536        btt->btt_disk->first_minor = 0;
1537        btt->btt_disk->fops = &btt_fops;
1538        btt->btt_disk->private_data = btt;
1539        btt->btt_disk->queue = btt->btt_queue;
1540        btt->btt_disk->flags = GENHD_FL_EXT_DEVT;
1541
1542        blk_queue_logical_block_size(btt->btt_queue, btt->sector_size);
1543        blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX);
1544        blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_queue);
1545
1546        if (btt_meta_size(btt)) {
1547                int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
1548
1549                if (rc) {
1550                        del_gendisk(btt->btt_disk);
1551                        put_disk(btt->btt_disk);
1552                        blk_cleanup_queue(btt->btt_queue);
1553                        return rc;
1554                }
1555        }
1556        set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
1557        device_add_disk(&btt->nd_btt->dev, btt->btt_disk, NULL);
1558        btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
1559        nvdimm_check_and_set_ro(btt->btt_disk);
1560
1561        return 0;
1562}
1563
1564static void btt_blk_cleanup(struct btt *btt)
1565{
1566        del_gendisk(btt->btt_disk);
1567        put_disk(btt->btt_disk);
1568        blk_cleanup_queue(btt->btt_queue);
1569}
1570
1571/**
1572 * btt_init - initialize a block translation table for the given device
1573 * @nd_btt:     device with BTT geometry and backing device info
1574 * @rawsize:    raw size in bytes of the backing device
1575 * @lbasize:    lba size of the backing device
1576 * @uuid:       A uuid for the backing device - this is stored on media
1577 * @maxlane:    maximum number of parallel requests the device can handle
1578 *
1579 * Initialize a Block Translation Table on a backing device to provide
1580 * single sector power fail atomicity.
1581 *
1582 * Context:
1583 * Might sleep.
1584 *
1585 * Returns:
1586 * Pointer to a new struct btt on success, NULL on failure.
1587 */
1588static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
1589                u32 lbasize, u8 *uuid, struct nd_region *nd_region)
1590{
1591        int ret;
1592        struct btt *btt;
1593        struct nd_namespace_io *nsio;
1594        struct device *dev = &nd_btt->dev;
1595
1596        btt = devm_kzalloc(dev, sizeof(struct btt), GFP_KERNEL);
1597        if (!btt)
1598                return NULL;
1599
1600        btt->nd_btt = nd_btt;
1601        btt->rawsize = rawsize;
1602        btt->lbasize = lbasize;
1603        btt->sector_size = ((lbasize >= 4096) ? 4096 : 512);
1604        INIT_LIST_HEAD(&btt->arena_list);
1605        mutex_init(&btt->init_lock);
1606        btt->nd_region = nd_region;
1607        nsio = to_nd_namespace_io(&nd_btt->ndns->dev);
1608        btt->phys_bb = &nsio->bb;
1609
1610        ret = discover_arenas(btt);
1611        if (ret) {
1612                dev_err(dev, "init: error in arena_discover: %d\n", ret);
1613                return NULL;
1614        }
1615
1616        if (btt->init_state != INIT_READY && nd_region->ro) {
1617                dev_warn(dev, "%s is read-only, unable to init btt metadata\n",
1618                                dev_name(&nd_region->dev));
1619                return NULL;
1620        } else if (btt->init_state != INIT_READY) {
1621                btt->num_arenas = (rawsize / ARENA_MAX_SIZE) +
1622                        ((rawsize % ARENA_MAX_SIZE) ? 1 : 0);
1623                dev_dbg(dev, "init: %d arenas for %llu rawsize\n",
1624                                btt->num_arenas, rawsize);
1625
1626                ret = create_arenas(btt);
1627                if (ret) {
1628                        dev_info(dev, "init: create_arenas: %d\n", ret);
1629                        return NULL;
1630                }
1631
1632                ret = btt_meta_init(btt);
1633                if (ret) {
1634                        dev_err(dev, "init: error in meta_init: %d\n", ret);
1635                        return NULL;
1636                }
1637        }
1638
1639        ret = btt_blk_init(btt);
1640        if (ret) {
1641                dev_err(dev, "init: error in blk_init: %d\n", ret);
1642                return NULL;
1643        }
1644
1645        btt_debugfs_init(btt);
1646
1647        return btt;
1648}
1649
1650/**
1651 * btt_fini - de-initialize a BTT
1652 * @btt:        the BTT handle that was generated by btt_init
1653 *
1654 * De-initialize a Block Translation Table on device removal
1655 *
1656 * Context:
1657 * Might sleep.
1658 */
1659static void btt_fini(struct btt *btt)
1660{
1661        if (btt) {
1662                btt_blk_cleanup(btt);
1663                free_arenas(btt);
1664                debugfs_remove_recursive(btt->debugfs_dir);
1665        }
1666}
1667
1668int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
1669{
1670        struct nd_btt *nd_btt = to_nd_btt(ndns->claim);
1671        struct nd_region *nd_region;
1672        struct btt_sb *btt_sb;
1673        struct btt *btt;
1674        size_t size, rawsize;
1675        int rc;
1676
1677        if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize) {
1678                dev_dbg(&nd_btt->dev, "incomplete btt configuration\n");
1679                return -ENODEV;
1680        }
1681
1682        btt_sb = devm_kzalloc(&nd_btt->dev, sizeof(*btt_sb), GFP_KERNEL);
1683        if (!btt_sb)
1684                return -ENOMEM;
1685
1686        size = nvdimm_namespace_capacity(ndns);
1687        rc = devm_namespace_enable(&nd_btt->dev, ndns, size);
1688        if (rc)
1689                return rc;
1690
1691        /*
1692         * If this returns < 0, that is ok as it just means there wasn't
1693         * an existing BTT, and we're creating a new one. We still need to
1694         * call this as we need the version dependent fields in nd_btt to be
1695         * set correctly based on the holder class
1696         */
1697        nd_btt_version(nd_btt, ndns, btt_sb);
1698
1699        rawsize = size - nd_btt->initial_offset;
1700        if (rawsize < ARENA_MIN_SIZE) {
1701                dev_dbg(&nd_btt->dev, "%s must be at least %ld bytes\n",
1702                                dev_name(&ndns->dev),
1703                                ARENA_MIN_SIZE + nd_btt->initial_offset);
1704                return -ENXIO;
1705        }
1706        nd_region = to_nd_region(nd_btt->dev.parent);
1707        btt = btt_init(nd_btt, rawsize, nd_btt->lbasize, nd_btt->uuid,
1708                        nd_region);
1709        if (!btt)
1710                return -ENOMEM;
1711        nd_btt->btt = btt;
1712
1713        return 0;
1714}
1715EXPORT_SYMBOL(nvdimm_namespace_attach_btt);
1716
1717int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt)
1718{
1719        struct btt *btt = nd_btt->btt;
1720
1721        btt_fini(btt);
1722        nd_btt->btt = NULL;
1723
1724        return 0;
1725}
1726EXPORT_SYMBOL(nvdimm_namespace_detach_btt);
1727
1728static int __init nd_btt_init(void)
1729{
1730        int rc = 0;
1731
1732        debugfs_root = debugfs_create_dir("btt", NULL);
1733        if (IS_ERR_OR_NULL(debugfs_root))
1734                rc = -ENXIO;
1735
1736        return rc;
1737}
1738
1739static void __exit nd_btt_exit(void)
1740{
1741        debugfs_remove_recursive(debugfs_root);
1742}
1743
1744MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT);
1745MODULE_AUTHOR("Vishal Verma <vishal.l.verma@linux.intel.com>");
1746MODULE_LICENSE("GPL v2");
1747module_init(nd_btt_init);
1748module_exit(nd_btt_exit);
1749