linux/drivers/nvdimm/blk.c
<<
>>
Prefs
   1/*
   2 * NVDIMM Block Window Driver
   3 * Copyright (c) 2014, Intel Corporation.
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms and conditions of the GNU General Public License,
   7 * version 2, as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 */
  14
  15#include <linux/blkdev.h>
  16#include <linux/fs.h>
  17#include <linux/genhd.h>
  18#include <linux/module.h>
  19#include <linux/moduleparam.h>
  20#include <linux/nd.h>
  21#include <linux/sizes.h>
  22#include "nd.h"
  23
  24static u32 nsblk_meta_size(struct nd_namespace_blk *nsblk)
  25{
  26        return nsblk->lbasize - ((nsblk->lbasize >= 4096) ? 4096 : 512);
  27}
  28
  29static u32 nsblk_internal_lbasize(struct nd_namespace_blk *nsblk)
  30{
  31        return roundup(nsblk->lbasize, INT_LBASIZE_ALIGNMENT);
  32}
  33
  34static u32 nsblk_sector_size(struct nd_namespace_blk *nsblk)
  35{
  36        return nsblk->lbasize - nsblk_meta_size(nsblk);
  37}
  38
  39static resource_size_t to_dev_offset(struct nd_namespace_blk *nsblk,
  40                                resource_size_t ns_offset, unsigned int len)
  41{
  42        int i;
  43
  44        for (i = 0; i < nsblk->num_resources; i++) {
  45                if (ns_offset < resource_size(nsblk->res[i])) {
  46                        if (ns_offset + len > resource_size(nsblk->res[i])) {
  47                                dev_WARN_ONCE(&nsblk->common.dev, 1,
  48                                        "illegal request\n");
  49                                return SIZE_MAX;
  50                        }
  51                        return nsblk->res[i]->start + ns_offset;
  52                }
  53                ns_offset -= resource_size(nsblk->res[i]);
  54        }
  55
  56        dev_WARN_ONCE(&nsblk->common.dev, 1, "request out of range\n");
  57        return SIZE_MAX;
  58}
  59
  60static struct nd_blk_region *to_ndbr(struct nd_namespace_blk *nsblk)
  61{
  62        struct nd_region *nd_region;
  63        struct device *parent;
  64
  65        parent = nsblk->common.dev.parent;
  66        nd_region = container_of(parent, struct nd_region, dev);
  67        return container_of(nd_region, struct nd_blk_region, nd_region);
  68}
  69
  70#ifdef CONFIG_BLK_DEV_INTEGRITY
  71static int nd_blk_rw_integrity(struct nd_namespace_blk *nsblk,
  72                struct bio_integrity_payload *bip, u64 lba, int rw)
  73{
  74        struct nd_blk_region *ndbr = to_ndbr(nsblk);
  75        unsigned int len = nsblk_meta_size(nsblk);
  76        resource_size_t dev_offset, ns_offset;
  77        u32 internal_lbasize, sector_size;
  78        int err = 0;
  79
  80        internal_lbasize = nsblk_internal_lbasize(nsblk);
  81        sector_size = nsblk_sector_size(nsblk);
  82        ns_offset = lba * internal_lbasize + sector_size;
  83        dev_offset = to_dev_offset(nsblk, ns_offset, len);
  84        if (dev_offset == SIZE_MAX)
  85                return -EIO;
  86
  87        while (len) {
  88                unsigned int cur_len;
  89                struct bio_vec bv;
  90                void *iobuf;
  91
  92                bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
  93                /*
  94                 * The 'bv' obtained from bvec_iter_bvec has its .bv_len and
  95                 * .bv_offset already adjusted for iter->bi_bvec_done, and we
  96                 * can use those directly
  97                 */
  98
  99                cur_len = min(len, bv.bv_len);
 100                iobuf = kmap_atomic(bv.bv_page);
 101                err = ndbr->do_io(ndbr, dev_offset, iobuf + bv.bv_offset,
 102                                cur_len, rw);
 103                kunmap_atomic(iobuf);
 104                if (err)
 105                        return err;
 106
 107                len -= cur_len;
 108                dev_offset += cur_len;
 109                bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len);
 110        }
 111
 112        return err;
 113}
 114
 115#else /* CONFIG_BLK_DEV_INTEGRITY */
 116static int nd_blk_rw_integrity(struct nd_namespace_blk *nsblk,
 117                struct bio_integrity_payload *bip, u64 lba, int rw)
 118{
 119        return 0;
 120}
 121#endif
 122
 123static int nsblk_do_bvec(struct nd_namespace_blk *nsblk,
 124                struct bio_integrity_payload *bip, struct page *page,
 125                unsigned int len, unsigned int off, int rw, sector_t sector)
 126{
 127        struct nd_blk_region *ndbr = to_ndbr(nsblk);
 128        resource_size_t dev_offset, ns_offset;
 129        u32 internal_lbasize, sector_size;
 130        int err = 0;
 131        void *iobuf;
 132        u64 lba;
 133
 134        internal_lbasize = nsblk_internal_lbasize(nsblk);
 135        sector_size = nsblk_sector_size(nsblk);
 136        while (len) {
 137                unsigned int cur_len;
 138
 139                /*
 140                 * If we don't have an integrity payload, we don't have to
 141                 * split the bvec into sectors, as this would cause unnecessary
 142                 * Block Window setup/move steps. the do_io routine is capable
 143                 * of handling len <= PAGE_SIZE.
 144                 */
 145                cur_len = bip ? min(len, sector_size) : len;
 146
 147                lba = div_u64(sector << SECTOR_SHIFT, sector_size);
 148                ns_offset = lba * internal_lbasize;
 149                dev_offset = to_dev_offset(nsblk, ns_offset, cur_len);
 150                if (dev_offset == SIZE_MAX)
 151                        return -EIO;
 152
 153                iobuf = kmap_atomic(page);
 154                err = ndbr->do_io(ndbr, dev_offset, iobuf + off, cur_len, rw);
 155                kunmap_atomic(iobuf);
 156                if (err)
 157                        return err;
 158
 159                if (bip) {
 160                        err = nd_blk_rw_integrity(nsblk, bip, lba, rw);
 161                        if (err)
 162                                return err;
 163                }
 164                len -= cur_len;
 165                off += cur_len;
 166                sector += sector_size >> SECTOR_SHIFT;
 167        }
 168
 169        return err;
 170}
 171
 172static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio)
 173{
 174        struct bio_integrity_payload *bip;
 175        struct nd_namespace_blk *nsblk;
 176        struct bvec_iter iter;
 177        unsigned long start;
 178        struct bio_vec bvec;
 179        int err = 0, rw;
 180        bool do_acct;
 181
 182        /*
 183         * bio_integrity_enabled also checks if the bio already has an
 184         * integrity payload attached. If it does, we *don't* do a
 185         * bio_integrity_prep here - the payload has been generated by
 186         * another kernel subsystem, and we just pass it through.
 187         */
 188        if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
 189                bio->bi_error = -EIO;
 190                goto out;
 191        }
 192
 193        bip = bio_integrity(bio);
 194        nsblk = q->queuedata;
 195        rw = bio_data_dir(bio);
 196        do_acct = nd_iostat_start(bio, &start);
 197        bio_for_each_segment(bvec, bio, iter) {
 198                unsigned int len = bvec.bv_len;
 199
 200                BUG_ON(len > PAGE_SIZE);
 201                err = nsblk_do_bvec(nsblk, bip, bvec.bv_page, len,
 202                                bvec.bv_offset, rw, iter.bi_sector);
 203                if (err) {
 204                        dev_dbg(&nsblk->common.dev,
 205                                        "io error in %s sector %lld, len %d,\n",
 206                                        (rw == READ) ? "READ" : "WRITE",
 207                                        (unsigned long long) iter.bi_sector, len);
 208                        bio->bi_error = err;
 209                        break;
 210                }
 211        }
 212        if (do_acct)
 213                nd_iostat_end(bio, start);
 214
 215 out:
 216        bio_endio(bio);
 217        return BLK_QC_T_NONE;
 218}
 219
 220static int nsblk_rw_bytes(struct nd_namespace_common *ndns,
 221                resource_size_t offset, void *iobuf, size_t n, int rw)
 222{
 223        struct nd_namespace_blk *nsblk = to_nd_namespace_blk(&ndns->dev);
 224        struct nd_blk_region *ndbr = to_ndbr(nsblk);
 225        resource_size_t dev_offset;
 226
 227        dev_offset = to_dev_offset(nsblk, offset, n);
 228
 229        if (unlikely(offset + n > nsblk->size)) {
 230                dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
 231                return -EFAULT;
 232        }
 233
 234        if (dev_offset == SIZE_MAX)
 235                return -EIO;
 236
 237        return ndbr->do_io(ndbr, dev_offset, iobuf, n, rw);
 238}
 239
 240static const struct block_device_operations nd_blk_fops = {
 241        .owner = THIS_MODULE,
 242        .revalidate_disk = nvdimm_revalidate_disk,
 243};
 244
 245static void nd_blk_release_queue(void *q)
 246{
 247        blk_cleanup_queue(q);
 248}
 249
 250static void nd_blk_release_disk(void *disk)
 251{
 252        del_gendisk(disk);
 253        put_disk(disk);
 254}
 255
 256static int nsblk_attach_disk(struct nd_namespace_blk *nsblk)
 257{
 258        struct device *dev = &nsblk->common.dev;
 259        resource_size_t available_disk_size;
 260        struct request_queue *q;
 261        struct gendisk *disk;
 262        u64 internal_nlba;
 263
 264        internal_nlba = div_u64(nsblk->size, nsblk_internal_lbasize(nsblk));
 265        available_disk_size = internal_nlba * nsblk_sector_size(nsblk);
 266
 267        q = blk_alloc_queue(GFP_KERNEL);
 268        if (!q)
 269                return -ENOMEM;
 270        if (devm_add_action_or_reset(dev, nd_blk_release_queue, q))
 271                return -ENOMEM;
 272
 273        blk_queue_make_request(q, nd_blk_make_request);
 274        blk_queue_max_hw_sectors(q, UINT_MAX);
 275        blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
 276        blk_queue_logical_block_size(q, nsblk_sector_size(nsblk));
 277        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
 278        q->queuedata = nsblk;
 279
 280        disk = alloc_disk(0);
 281        if (!disk)
 282                return -ENOMEM;
 283
 284        disk->first_minor       = 0;
 285        disk->fops              = &nd_blk_fops;
 286        disk->queue             = q;
 287        disk->flags             = GENHD_FL_EXT_DEVT;
 288        nvdimm_namespace_disk_name(&nsblk->common, disk->disk_name);
 289        set_capacity(disk, 0);
 290        device_add_disk(dev, disk);
 291
 292        if (devm_add_action_or_reset(dev, nd_blk_release_disk, disk))
 293                return -ENOMEM;
 294
 295        if (nsblk_meta_size(nsblk)) {
 296                int rc = nd_integrity_init(disk, nsblk_meta_size(nsblk));
 297
 298                if (rc)
 299                        return rc;
 300        }
 301
 302        set_capacity(disk, available_disk_size >> SECTOR_SHIFT);
 303        revalidate_disk(disk);
 304        return 0;
 305}
 306
 307static int nd_blk_probe(struct device *dev)
 308{
 309        struct nd_namespace_common *ndns;
 310        struct nd_namespace_blk *nsblk;
 311
 312        ndns = nvdimm_namespace_common_probe(dev);
 313        if (IS_ERR(ndns))
 314                return PTR_ERR(ndns);
 315
 316        nsblk = to_nd_namespace_blk(&ndns->dev);
 317        nsblk->size = nvdimm_namespace_capacity(ndns);
 318        dev_set_drvdata(dev, nsblk);
 319
 320        ndns->rw_bytes = nsblk_rw_bytes;
 321        if (is_nd_btt(dev))
 322                return nvdimm_namespace_attach_btt(ndns);
 323        else if (nd_btt_probe(dev, ndns) == 0) {
 324                /* we'll come back as btt-blk */
 325                return -ENXIO;
 326        } else
 327                return nsblk_attach_disk(nsblk);
 328}
 329
 330static int nd_blk_remove(struct device *dev)
 331{
 332        if (is_nd_btt(dev))
 333                nvdimm_namespace_detach_btt(to_nd_btt(dev));
 334        return 0;
 335}
 336
 337static struct nd_device_driver nd_blk_driver = {
 338        .probe = nd_blk_probe,
 339        .remove = nd_blk_remove,
 340        .drv = {
 341                .name = "nd_blk",
 342        },
 343        .type = ND_DRIVER_NAMESPACE_BLK,
 344};
 345
 346static int __init nd_blk_init(void)
 347{
 348        return nd_driver_register(&nd_blk_driver);
 349}
 350
 351static void __exit nd_blk_exit(void)
 352{
 353        driver_unregister(&nd_blk_driver.drv);
 354}
 355
 356MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
 357MODULE_LICENSE("GPL v2");
 358MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_BLK);
 359module_init(nd_blk_init);
 360module_exit(nd_blk_exit);
 361