linux/drivers/nvdimm/badrange.c
<<
>>
Prefs
   1/*
   2 * Copyright(c) 2017 Intel Corporation. All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of version 2 of the GNU General Public License as
   6 * published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful, but
   9 * WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 */
  13#include <linux/libnvdimm.h>
  14#include <linux/badblocks.h>
  15#include <linux/export.h>
  16#include <linux/module.h>
  17#include <linux/blkdev.h>
  18#include <linux/device.h>
  19#include <linux/ctype.h>
  20#include <linux/ndctl.h>
  21#include <linux/mutex.h>
  22#include <linux/slab.h>
  23#include <linux/io.h>
  24#include "nd-core.h"
  25#include "nd.h"
  26
  27void badrange_init(struct badrange *badrange)
  28{
  29        INIT_LIST_HEAD(&badrange->list);
  30        spin_lock_init(&badrange->lock);
  31}
  32EXPORT_SYMBOL_GPL(badrange_init);
  33
  34static void append_badrange_entry(struct badrange *badrange,
  35                struct badrange_entry *bre, u64 addr, u64 length)
  36{
  37        lockdep_assert_held(&badrange->lock);
  38        bre->start = addr;
  39        bre->length = length;
  40        list_add_tail(&bre->list, &badrange->list);
  41}
  42
  43static int alloc_and_append_badrange_entry(struct badrange *badrange,
  44                u64 addr, u64 length, gfp_t flags)
  45{
  46        struct badrange_entry *bre;
  47
  48        bre = kzalloc(sizeof(*bre), flags);
  49        if (!bre)
  50                return -ENOMEM;
  51
  52        append_badrange_entry(badrange, bre, addr, length);
  53        return 0;
  54}
  55
  56static int add_badrange(struct badrange *badrange, u64 addr, u64 length)
  57{
  58        struct badrange_entry *bre, *bre_new;
  59
  60        spin_unlock(&badrange->lock);
  61        bre_new = kzalloc(sizeof(*bre_new), GFP_KERNEL);
  62        spin_lock(&badrange->lock);
  63
  64        if (list_empty(&badrange->list)) {
  65                if (!bre_new)
  66                        return -ENOMEM;
  67                append_badrange_entry(badrange, bre_new, addr, length);
  68                return 0;
  69        }
  70
  71        /*
  72         * There is a chance this is a duplicate, check for those first.
  73         * This will be the common case as ARS_STATUS returns all known
  74         * errors in the SPA space, and we can't query it per region
  75         */
  76        list_for_each_entry(bre, &badrange->list, list)
  77                if (bre->start == addr) {
  78                        /* If length has changed, update this list entry */
  79                        if (bre->length != length)
  80                                bre->length = length;
  81                        kfree(bre_new);
  82                        return 0;
  83                }
  84
  85        /*
  86         * If not a duplicate or a simple length update, add the entry as is,
  87         * as any overlapping ranges will get resolved when the list is consumed
  88         * and converted to badblocks
  89         */
  90        if (!bre_new)
  91                return -ENOMEM;
  92        append_badrange_entry(badrange, bre_new, addr, length);
  93
  94        return 0;
  95}
  96
  97int badrange_add(struct badrange *badrange, u64 addr, u64 length)
  98{
  99        int rc;
 100
 101        spin_lock(&badrange->lock);
 102        rc = add_badrange(badrange, addr, length);
 103        spin_unlock(&badrange->lock);
 104
 105        return rc;
 106}
 107EXPORT_SYMBOL_GPL(badrange_add);
 108
 109void badrange_forget(struct badrange *badrange, phys_addr_t start,
 110                unsigned int len)
 111{
 112        struct list_head *badrange_list = &badrange->list;
 113        u64 clr_end = start + len - 1;
 114        struct badrange_entry *bre, *next;
 115
 116        spin_lock(&badrange->lock);
 117
 118        /*
 119         * [start, clr_end] is the badrange interval being cleared.
 120         * [bre->start, bre_end] is the badrange_list entry we're comparing
 121         * the above interval against. The badrange list entry may need
 122         * to be modified (update either start or length), deleted, or
 123         * split into two based on the overlap characteristics
 124         */
 125
 126        list_for_each_entry_safe(bre, next, badrange_list, list) {
 127                u64 bre_end = bre->start + bre->length - 1;
 128
 129                /* Skip intervals with no intersection */
 130                if (bre_end < start)
 131                        continue;
 132                if (bre->start >  clr_end)
 133                        continue;
 134                /* Delete completely overlapped badrange entries */
 135                if ((bre->start >= start) && (bre_end <= clr_end)) {
 136                        list_del(&bre->list);
 137                        kfree(bre);
 138                        continue;
 139                }
 140                /* Adjust start point of partially cleared entries */
 141                if ((start <= bre->start) && (clr_end > bre->start)) {
 142                        bre->length -= clr_end - bre->start + 1;
 143                        bre->start = clr_end + 1;
 144                        continue;
 145                }
 146                /* Adjust bre->length for partial clearing at the tail end */
 147                if ((bre->start < start) && (bre_end <= clr_end)) {
 148                        /* bre->start remains the same */
 149                        bre->length = start - bre->start;
 150                        continue;
 151                }
 152                /*
 153                 * If clearing in the middle of an entry, we split it into
 154                 * two by modifying the current entry to represent one half of
 155                 * the split, and adding a new entry for the second half.
 156                 */
 157                if ((bre->start < start) && (bre_end > clr_end)) {
 158                        u64 new_start = clr_end + 1;
 159                        u64 new_len = bre_end - new_start + 1;
 160
 161                        /* Add new entry covering the right half */
 162                        alloc_and_append_badrange_entry(badrange, new_start,
 163                                        new_len, GFP_NOWAIT);
 164                        /* Adjust this entry to cover the left half */
 165                        bre->length = start - bre->start;
 166                        continue;
 167                }
 168        }
 169        spin_unlock(&badrange->lock);
 170}
 171EXPORT_SYMBOL_GPL(badrange_forget);
 172
 173static void set_badblock(struct badblocks *bb, sector_t s, int num)
 174{
 175        dev_dbg(bb->dev, "Found a bad range (0x%llx, 0x%llx)\n",
 176                        (u64) s * 512, (u64) num * 512);
 177        /* this isn't an error as the hardware will still throw an exception */
 178        if (badblocks_set(bb, s, num, 1))
 179                dev_info_once(bb->dev, "%s: failed for sector %llx\n",
 180                                __func__, (u64) s);
 181}
 182
 183/**
 184 * __add_badblock_range() - Convert a physical address range to bad sectors
 185 * @bb:         badblocks instance to populate
 186 * @ns_offset:  namespace offset where the error range begins (in bytes)
 187 * @len:        number of bytes of badrange to be added
 188 *
 189 * This assumes that the range provided with (ns_offset, len) is within
 190 * the bounds of physical addresses for this namespace, i.e. lies in the
 191 * interval [ns_start, ns_start + ns_size)
 192 */
 193static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
 194{
 195        const unsigned int sector_size = 512;
 196        sector_t start_sector, end_sector;
 197        u64 num_sectors;
 198        u32 rem;
 199
 200        start_sector = div_u64(ns_offset, sector_size);
 201        end_sector = div_u64_rem(ns_offset + len, sector_size, &rem);
 202        if (rem)
 203                end_sector++;
 204        num_sectors = end_sector - start_sector;
 205
 206        if (unlikely(num_sectors > (u64)INT_MAX)) {
 207                u64 remaining = num_sectors;
 208                sector_t s = start_sector;
 209
 210                while (remaining) {
 211                        int done = min_t(u64, remaining, INT_MAX);
 212
 213                        set_badblock(bb, s, done);
 214                        remaining -= done;
 215                        s += done;
 216                }
 217        } else
 218                set_badblock(bb, start_sector, num_sectors);
 219}
 220
 221static void badblocks_populate(struct badrange *badrange,
 222                struct badblocks *bb, const struct range *range)
 223{
 224        struct badrange_entry *bre;
 225
 226        if (list_empty(&badrange->list))
 227                return;
 228
 229        list_for_each_entry(bre, &badrange->list, list) {
 230                u64 bre_end = bre->start + bre->length - 1;
 231
 232                /* Discard intervals with no intersection */
 233                if (bre_end < range->start)
 234                        continue;
 235                if (bre->start > range->end)
 236                        continue;
 237                /* Deal with any overlap after start of the namespace */
 238                if (bre->start >= range->start) {
 239                        u64 start = bre->start;
 240                        u64 len;
 241
 242                        if (bre_end <= range->end)
 243                                len = bre->length;
 244                        else
 245                                len = range->start + range_len(range)
 246                                        - bre->start;
 247                        __add_badblock_range(bb, start - range->start, len);
 248                        continue;
 249                }
 250                /*
 251                 * Deal with overlap for badrange starting before
 252                 * the namespace.
 253                 */
 254                if (bre->start < range->start) {
 255                        u64 len;
 256
 257                        if (bre_end < range->end)
 258                                len = bre->start + bre->length - range->start;
 259                        else
 260                                len = range_len(range);
 261                        __add_badblock_range(bb, 0, len);
 262                }
 263        }
 264}
 265
 266/**
 267 * nvdimm_badblocks_populate() - Convert a list of badranges to badblocks
 268 * @region: parent region of the range to interrogate
 269 * @bb: badblocks instance to populate
 270 * @res: resource range to consider
 271 *
 272 * The badrange list generated during bus initialization may contain
 273 * multiple, possibly overlapping physical address ranges.  Compare each
 274 * of these ranges to the resource range currently being initialized,
 275 * and add badblocks entries for all matching sub-ranges
 276 */
 277void nvdimm_badblocks_populate(struct nd_region *nd_region,
 278                struct badblocks *bb, const struct range *range)
 279{
 280        struct nvdimm_bus *nvdimm_bus;
 281
 282        if (!is_memory(&nd_region->dev)) {
 283                dev_WARN_ONCE(&nd_region->dev, 1,
 284                                "%s only valid for pmem regions\n", __func__);
 285                return;
 286        }
 287        nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
 288
 289        nvdimm_bus_lock(&nvdimm_bus->dev);
 290        badblocks_populate(&nvdimm_bus->badrange, bb, range);
 291        nvdimm_bus_unlock(&nvdimm_bus->dev);
 292}
 293EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
 294