linux/drivers/lightnvm/core.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
   4 * Initial release: Matias Bjorling <m@bjorling.me>
   5 */
   6
   7#include <linux/list.h>
   8#include <linux/types.h>
   9#include <linux/sem.h>
  10#include <linux/bitmap.h>
  11#include <linux/module.h>
  12#include <linux/moduleparam.h>
  13#include <linux/miscdevice.h>
  14#include <linux/lightnvm.h>
  15#include <linux/sched/sysctl.h>
  16
  17static LIST_HEAD(nvm_tgt_types);
  18static DECLARE_RWSEM(nvm_tgtt_lock);
  19static LIST_HEAD(nvm_devices);
  20static DECLARE_RWSEM(nvm_lock);
  21
  22/* Map between virtual and physical channel and lun */
  23struct nvm_ch_map {
  24        int ch_off;
  25        int num_lun;
  26        int *lun_offs;
  27};
  28
  29struct nvm_dev_map {
  30        struct nvm_ch_map *chnls;
  31        int num_ch;
  32};
  33
  34static void nvm_free(struct kref *ref);
  35
  36static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
  37{
  38        struct nvm_target *tgt;
  39
  40        list_for_each_entry(tgt, &dev->targets, list)
  41                if (!strcmp(name, tgt->disk->disk_name))
  42                        return tgt;
  43
  44        return NULL;
  45}
  46
  47static bool nvm_target_exists(const char *name)
  48{
  49        struct nvm_dev *dev;
  50        struct nvm_target *tgt;
  51        bool ret = false;
  52
  53        down_write(&nvm_lock);
  54        list_for_each_entry(dev, &nvm_devices, devices) {
  55                mutex_lock(&dev->mlock);
  56                list_for_each_entry(tgt, &dev->targets, list) {
  57                        if (!strcmp(name, tgt->disk->disk_name)) {
  58                                ret = true;
  59                                mutex_unlock(&dev->mlock);
  60                                goto out;
  61                        }
  62                }
  63                mutex_unlock(&dev->mlock);
  64        }
  65
  66out:
  67        up_write(&nvm_lock);
  68        return ret;
  69}
  70
  71static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
  72{
  73        int i;
  74
  75        for (i = lun_begin; i <= lun_end; i++) {
  76                if (test_and_set_bit(i, dev->lun_map)) {
  77                        pr_err("nvm: lun %d already allocated\n", i);
  78                        goto err;
  79                }
  80        }
  81
  82        return 0;
  83err:
  84        while (--i >= lun_begin)
  85                clear_bit(i, dev->lun_map);
  86
  87        return -EBUSY;
  88}
  89
  90static void nvm_release_luns_err(struct nvm_dev *dev, int lun_begin,
  91                                 int lun_end)
  92{
  93        int i;
  94
  95        for (i = lun_begin; i <= lun_end; i++)
  96                WARN_ON(!test_and_clear_bit(i, dev->lun_map));
  97}
  98
  99static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear)
 100{
 101        struct nvm_dev *dev = tgt_dev->parent;
 102        struct nvm_dev_map *dev_map = tgt_dev->map;
 103        int i, j;
 104
 105        for (i = 0; i < dev_map->num_ch; i++) {
 106                struct nvm_ch_map *ch_map = &dev_map->chnls[i];
 107                int *lun_offs = ch_map->lun_offs;
 108                int ch = i + ch_map->ch_off;
 109
 110                if (clear) {
 111                        for (j = 0; j < ch_map->num_lun; j++) {
 112                                int lun = j + lun_offs[j];
 113                                int lunid = (ch * dev->geo.num_lun) + lun;
 114
 115                                WARN_ON(!test_and_clear_bit(lunid,
 116                                                        dev->lun_map));
 117                        }
 118                }
 119
 120                kfree(ch_map->lun_offs);
 121        }
 122
 123        kfree(dev_map->chnls);
 124        kfree(dev_map);
 125
 126        kfree(tgt_dev->luns);
 127        kfree(tgt_dev);
 128}
 129
 130static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
 131                                              u16 lun_begin, u16 lun_end,
 132                                              u16 op)
 133{
 134        struct nvm_tgt_dev *tgt_dev = NULL;
 135        struct nvm_dev_map *dev_rmap = dev->rmap;
 136        struct nvm_dev_map *dev_map;
 137        struct ppa_addr *luns;
 138        int num_lun = lun_end - lun_begin + 1;
 139        int luns_left = num_lun;
 140        int num_ch = num_lun / dev->geo.num_lun;
 141        int num_ch_mod = num_lun % dev->geo.num_lun;
 142        int bch = lun_begin / dev->geo.num_lun;
 143        int blun = lun_begin % dev->geo.num_lun;
 144        int lunid = 0;
 145        int lun_balanced = 1;
 146        int sec_per_lun, prev_num_lun;
 147        int i, j;
 148
 149        num_ch = (num_ch_mod == 0) ? num_ch : num_ch + 1;
 150
 151        dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
 152        if (!dev_map)
 153                goto err_dev;
 154
 155        dev_map->chnls = kcalloc(num_ch, sizeof(struct nvm_ch_map), GFP_KERNEL);
 156        if (!dev_map->chnls)
 157                goto err_chnls;
 158
 159        luns = kcalloc(num_lun, sizeof(struct ppa_addr), GFP_KERNEL);
 160        if (!luns)
 161                goto err_luns;
 162
 163        prev_num_lun = (luns_left > dev->geo.num_lun) ?
 164                                        dev->geo.num_lun : luns_left;
 165        for (i = 0; i < num_ch; i++) {
 166                struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
 167                int *lun_roffs = ch_rmap->lun_offs;
 168                struct nvm_ch_map *ch_map = &dev_map->chnls[i];
 169                int *lun_offs;
 170                int luns_in_chnl = (luns_left > dev->geo.num_lun) ?
 171                                        dev->geo.num_lun : luns_left;
 172
 173                if (lun_balanced && prev_num_lun != luns_in_chnl)
 174                        lun_balanced = 0;
 175
 176                ch_map->ch_off = ch_rmap->ch_off = bch;
 177                ch_map->num_lun = luns_in_chnl;
 178
 179                lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
 180                if (!lun_offs)
 181                        goto err_ch;
 182
 183                for (j = 0; j < luns_in_chnl; j++) {
 184                        luns[lunid].ppa = 0;
 185                        luns[lunid].a.ch = i;
 186                        luns[lunid++].a.lun = j;
 187
 188                        lun_offs[j] = blun;
 189                        lun_roffs[j + blun] = blun;
 190                }
 191
 192                ch_map->lun_offs = lun_offs;
 193
 194                /* when starting a new channel, lun offset is reset */
 195                blun = 0;
 196                luns_left -= luns_in_chnl;
 197        }
 198
 199        dev_map->num_ch = num_ch;
 200
 201        tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
 202        if (!tgt_dev)
 203                goto err_ch;
 204
 205        /* Inherit device geometry from parent */
 206        memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
 207
 208        /* Target device only owns a portion of the physical device */
 209        tgt_dev->geo.num_ch = num_ch;
 210        tgt_dev->geo.num_lun = (lun_balanced) ? prev_num_lun : -1;
 211        tgt_dev->geo.all_luns = num_lun;
 212        tgt_dev->geo.all_chunks = num_lun * dev->geo.num_chk;
 213
 214        tgt_dev->geo.op = op;
 215
 216        sec_per_lun = dev->geo.clba * dev->geo.num_chk;
 217        tgt_dev->geo.total_secs = num_lun * sec_per_lun;
 218
 219        tgt_dev->q = dev->q;
 220        tgt_dev->map = dev_map;
 221        tgt_dev->luns = luns;
 222        tgt_dev->parent = dev;
 223
 224        return tgt_dev;
 225err_ch:
 226        while (--i >= 0)
 227                kfree(dev_map->chnls[i].lun_offs);
 228        kfree(luns);
 229err_luns:
 230        kfree(dev_map->chnls);
 231err_chnls:
 232        kfree(dev_map);
 233err_dev:
 234        return tgt_dev;
 235}
 236
 237static const struct block_device_operations nvm_fops = {
 238        .owner          = THIS_MODULE,
 239};
 240
 241static struct nvm_tgt_type *__nvm_find_target_type(const char *name)
 242{
 243        struct nvm_tgt_type *tt;
 244
 245        list_for_each_entry(tt, &nvm_tgt_types, list)
 246                if (!strcmp(name, tt->name))
 247                        return tt;
 248
 249        return NULL;
 250}
 251
 252static struct nvm_tgt_type *nvm_find_target_type(const char *name)
 253{
 254        struct nvm_tgt_type *tt;
 255
 256        down_write(&nvm_tgtt_lock);
 257        tt = __nvm_find_target_type(name);
 258        up_write(&nvm_tgtt_lock);
 259
 260        return tt;
 261}
 262
 263static int nvm_config_check_luns(struct nvm_geo *geo, int lun_begin,
 264                                 int lun_end)
 265{
 266        if (lun_begin > lun_end || lun_end >= geo->all_luns) {
 267                pr_err("nvm: lun out of bound (%u:%u > %u)\n",
 268                        lun_begin, lun_end, geo->all_luns - 1);
 269                return -EINVAL;
 270        }
 271
 272        return 0;
 273}
 274
 275static int __nvm_config_simple(struct nvm_dev *dev,
 276                               struct nvm_ioctl_create_simple *s)
 277{
 278        struct nvm_geo *geo = &dev->geo;
 279
 280        if (s->lun_begin == -1 && s->lun_end == -1) {
 281                s->lun_begin = 0;
 282                s->lun_end = geo->all_luns - 1;
 283        }
 284
 285        return nvm_config_check_luns(geo, s->lun_begin, s->lun_end);
 286}
 287
 288static int __nvm_config_extended(struct nvm_dev *dev,
 289                                 struct nvm_ioctl_create_extended *e)
 290{
 291        if (e->lun_begin == 0xFFFF && e->lun_end == 0xFFFF) {
 292                e->lun_begin = 0;
 293                e->lun_end = dev->geo.all_luns - 1;
 294        }
 295
 296        /* op not set falls into target's default */
 297        if (e->op == 0xFFFF) {
 298                e->op = NVM_TARGET_DEFAULT_OP;
 299        } else if (e->op < NVM_TARGET_MIN_OP || e->op > NVM_TARGET_MAX_OP) {
 300                pr_err("nvm: invalid over provisioning value\n");
 301                return -EINVAL;
 302        }
 303
 304        return nvm_config_check_luns(&dev->geo, e->lun_begin, e->lun_end);
 305}
 306
 307static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
 308{
 309        struct nvm_ioctl_create_extended e;
 310        struct request_queue *tqueue;
 311        struct gendisk *tdisk;
 312        struct nvm_tgt_type *tt;
 313        struct nvm_target *t;
 314        struct nvm_tgt_dev *tgt_dev;
 315        void *targetdata;
 316        unsigned int mdts;
 317        int ret;
 318
 319        switch (create->conf.type) {
 320        case NVM_CONFIG_TYPE_SIMPLE:
 321                ret = __nvm_config_simple(dev, &create->conf.s);
 322                if (ret)
 323                        return ret;
 324
 325                e.lun_begin = create->conf.s.lun_begin;
 326                e.lun_end = create->conf.s.lun_end;
 327                e.op = NVM_TARGET_DEFAULT_OP;
 328                break;
 329        case NVM_CONFIG_TYPE_EXTENDED:
 330                ret = __nvm_config_extended(dev, &create->conf.e);
 331                if (ret)
 332                        return ret;
 333
 334                e = create->conf.e;
 335                break;
 336        default:
 337                pr_err("nvm: config type not valid\n");
 338                return -EINVAL;
 339        }
 340
 341        tt = nvm_find_target_type(create->tgttype);
 342        if (!tt) {
 343                pr_err("nvm: target type %s not found\n", create->tgttype);
 344                return -EINVAL;
 345        }
 346
 347        if ((tt->flags & NVM_TGT_F_HOST_L2P) != (dev->geo.dom & NVM_RSP_L2P)) {
 348                pr_err("nvm: device is incompatible with target L2P type.\n");
 349                return -EINVAL;
 350        }
 351
 352        if (nvm_target_exists(create->tgtname)) {
 353                pr_err("nvm: target name already exists (%s)\n",
 354                                                        create->tgtname);
 355                return -EINVAL;
 356        }
 357
 358        ret = nvm_reserve_luns(dev, e.lun_begin, e.lun_end);
 359        if (ret)
 360                return ret;
 361
 362        t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
 363        if (!t) {
 364                ret = -ENOMEM;
 365                goto err_reserve;
 366        }
 367
 368        tgt_dev = nvm_create_tgt_dev(dev, e.lun_begin, e.lun_end, e.op);
 369        if (!tgt_dev) {
 370                pr_err("nvm: could not create target device\n");
 371                ret = -ENOMEM;
 372                goto err_t;
 373        }
 374
 375        tdisk = alloc_disk(0);
 376        if (!tdisk) {
 377                ret = -ENOMEM;
 378                goto err_dev;
 379        }
 380
 381        tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
 382        if (!tqueue) {
 383                ret = -ENOMEM;
 384                goto err_disk;
 385        }
 386        blk_queue_make_request(tqueue, tt->make_rq);
 387
 388        strlcpy(tdisk->disk_name, create->tgtname, sizeof(tdisk->disk_name));
 389        tdisk->flags = GENHD_FL_EXT_DEVT;
 390        tdisk->major = 0;
 391        tdisk->first_minor = 0;
 392        tdisk->fops = &nvm_fops;
 393        tdisk->queue = tqueue;
 394
 395        targetdata = tt->init(tgt_dev, tdisk, create->flags);
 396        if (IS_ERR(targetdata)) {
 397                ret = PTR_ERR(targetdata);
 398                goto err_init;
 399        }
 400
 401        tdisk->private_data = targetdata;
 402        tqueue->queuedata = targetdata;
 403
 404        mdts = (dev->geo.csecs >> 9) * NVM_MAX_VLBA;
 405        if (dev->geo.mdts) {
 406                mdts = min_t(u32, dev->geo.mdts,
 407                                (dev->geo.csecs >> 9) * NVM_MAX_VLBA);
 408        }
 409        blk_queue_max_hw_sectors(tqueue, mdts);
 410
 411        set_capacity(tdisk, tt->capacity(targetdata));
 412        add_disk(tdisk);
 413
 414        if (tt->sysfs_init && tt->sysfs_init(tdisk)) {
 415                ret = -ENOMEM;
 416                goto err_sysfs;
 417        }
 418
 419        t->type = tt;
 420        t->disk = tdisk;
 421        t->dev = tgt_dev;
 422
 423        mutex_lock(&dev->mlock);
 424        list_add_tail(&t->list, &dev->targets);
 425        mutex_unlock(&dev->mlock);
 426
 427        __module_get(tt->owner);
 428
 429        return 0;
 430err_sysfs:
 431        if (tt->exit)
 432                tt->exit(targetdata, true);
 433err_init:
 434        blk_cleanup_queue(tqueue);
 435        tdisk->queue = NULL;
 436err_disk:
 437        put_disk(tdisk);
 438err_dev:
 439        nvm_remove_tgt_dev(tgt_dev, 0);
 440err_t:
 441        kfree(t);
 442err_reserve:
 443        nvm_release_luns_err(dev, e.lun_begin, e.lun_end);
 444        return ret;
 445}
 446
 447static void __nvm_remove_target(struct nvm_target *t, bool graceful)
 448{
 449        struct nvm_tgt_type *tt = t->type;
 450        struct gendisk *tdisk = t->disk;
 451        struct request_queue *q = tdisk->queue;
 452
 453        del_gendisk(tdisk);
 454        blk_cleanup_queue(q);
 455
 456        if (tt->sysfs_exit)
 457                tt->sysfs_exit(tdisk);
 458
 459        if (tt->exit)
 460                tt->exit(tdisk->private_data, graceful);
 461
 462        nvm_remove_tgt_dev(t->dev, 1);
 463        put_disk(tdisk);
 464        module_put(t->type->owner);
 465
 466        list_del(&t->list);
 467        kfree(t);
 468}
 469
 470/**
 471 * nvm_remove_tgt - Removes a target from the media manager
 472 * @remove:     ioctl structure with target name to remove.
 473 *
 474 * Returns:
 475 * 0: on success
 476 * 1: on not found
 477 * <0: on error
 478 */
 479static int nvm_remove_tgt(struct nvm_ioctl_remove *remove)
 480{
 481        struct nvm_target *t = NULL;
 482        struct nvm_dev *dev;
 483
 484        down_read(&nvm_lock);
 485        list_for_each_entry(dev, &nvm_devices, devices) {
 486                mutex_lock(&dev->mlock);
 487                t = nvm_find_target(dev, remove->tgtname);
 488                if (t) {
 489                        mutex_unlock(&dev->mlock);
 490                        break;
 491                }
 492                mutex_unlock(&dev->mlock);
 493        }
 494        up_read(&nvm_lock);
 495
 496        if (!t)
 497                return 1;
 498
 499        __nvm_remove_target(t, true);
 500        kref_put(&dev->ref, nvm_free);
 501
 502        return 0;
 503}
 504
 505static int nvm_register_map(struct nvm_dev *dev)
 506{
 507        struct nvm_dev_map *rmap;
 508        int i, j;
 509
 510        rmap = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
 511        if (!rmap)
 512                goto err_rmap;
 513
 514        rmap->chnls = kcalloc(dev->geo.num_ch, sizeof(struct nvm_ch_map),
 515                                                                GFP_KERNEL);
 516        if (!rmap->chnls)
 517                goto err_chnls;
 518
 519        for (i = 0; i < dev->geo.num_ch; i++) {
 520                struct nvm_ch_map *ch_rmap;
 521                int *lun_roffs;
 522                int luns_in_chnl = dev->geo.num_lun;
 523
 524                ch_rmap = &rmap->chnls[i];
 525
 526                ch_rmap->ch_off = -1;
 527                ch_rmap->num_lun = luns_in_chnl;
 528
 529                lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
 530                if (!lun_roffs)
 531                        goto err_ch;
 532
 533                for (j = 0; j < luns_in_chnl; j++)
 534                        lun_roffs[j] = -1;
 535
 536                ch_rmap->lun_offs = lun_roffs;
 537        }
 538
 539        dev->rmap = rmap;
 540
 541        return 0;
 542err_ch:
 543        while (--i >= 0)
 544                kfree(rmap->chnls[i].lun_offs);
 545err_chnls:
 546        kfree(rmap);
 547err_rmap:
 548        return -ENOMEM;
 549}
 550
 551static void nvm_unregister_map(struct nvm_dev *dev)
 552{
 553        struct nvm_dev_map *rmap = dev->rmap;
 554        int i;
 555
 556        for (i = 0; i < dev->geo.num_ch; i++)
 557                kfree(rmap->chnls[i].lun_offs);
 558
 559        kfree(rmap->chnls);
 560        kfree(rmap);
 561}
 562
 563static void nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
 564{
 565        struct nvm_dev_map *dev_map = tgt_dev->map;
 566        struct nvm_ch_map *ch_map = &dev_map->chnls[p->a.ch];
 567        int lun_off = ch_map->lun_offs[p->a.lun];
 568
 569        p->a.ch += ch_map->ch_off;
 570        p->a.lun += lun_off;
 571}
 572
 573static void nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
 574{
 575        struct nvm_dev *dev = tgt_dev->parent;
 576        struct nvm_dev_map *dev_rmap = dev->rmap;
 577        struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->a.ch];
 578        int lun_roff = ch_rmap->lun_offs[p->a.lun];
 579
 580        p->a.ch -= ch_rmap->ch_off;
 581        p->a.lun -= lun_roff;
 582}
 583
 584static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev *tgt_dev,
 585                                struct ppa_addr *ppa_list, int nr_ppas)
 586{
 587        int i;
 588
 589        for (i = 0; i < nr_ppas; i++) {
 590                nvm_map_to_dev(tgt_dev, &ppa_list[i]);
 591                ppa_list[i] = generic_to_dev_addr(tgt_dev->parent, ppa_list[i]);
 592        }
 593}
 594
 595static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev *tgt_dev,
 596                                struct ppa_addr *ppa_list, int nr_ppas)
 597{
 598        int i;
 599
 600        for (i = 0; i < nr_ppas; i++) {
 601                ppa_list[i] = dev_to_generic_addr(tgt_dev->parent, ppa_list[i]);
 602                nvm_map_to_tgt(tgt_dev, &ppa_list[i]);
 603        }
 604}
 605
 606static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
 607{
 608        struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
 609
 610        nvm_ppa_tgt_to_dev(tgt_dev, ppa_list, rqd->nr_ppas);
 611}
 612
 613static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
 614{
 615        struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
 616
 617        nvm_ppa_dev_to_tgt(tgt_dev, ppa_list, rqd->nr_ppas);
 618}
 619
 620int nvm_register_tgt_type(struct nvm_tgt_type *tt)
 621{
 622        int ret = 0;
 623
 624        down_write(&nvm_tgtt_lock);
 625        if (__nvm_find_target_type(tt->name))
 626                ret = -EEXIST;
 627        else
 628                list_add(&tt->list, &nvm_tgt_types);
 629        up_write(&nvm_tgtt_lock);
 630
 631        return ret;
 632}
 633EXPORT_SYMBOL(nvm_register_tgt_type);
 634
 635void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
 636{
 637        if (!tt)
 638                return;
 639
 640        down_write(&nvm_tgtt_lock);
 641        list_del(&tt->list);
 642        up_write(&nvm_tgtt_lock);
 643}
 644EXPORT_SYMBOL(nvm_unregister_tgt_type);
 645
 646void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
 647                                                        dma_addr_t *dma_handler)
 648{
 649        return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
 650                                                                dma_handler);
 651}
 652EXPORT_SYMBOL(nvm_dev_dma_alloc);
 653
 654void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
 655{
 656        dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
 657}
 658EXPORT_SYMBOL(nvm_dev_dma_free);
 659
 660static struct nvm_dev *nvm_find_nvm_dev(const char *name)
 661{
 662        struct nvm_dev *dev;
 663
 664        list_for_each_entry(dev, &nvm_devices, devices)
 665                if (!strcmp(name, dev->name))
 666                        return dev;
 667
 668        return NULL;
 669}
 670
 671static int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
 672                        const struct ppa_addr *ppas, int nr_ppas)
 673{
 674        struct nvm_dev *dev = tgt_dev->parent;
 675        struct nvm_geo *geo = &tgt_dev->geo;
 676        int i, plane_cnt, pl_idx;
 677        struct ppa_addr ppa;
 678
 679        if (geo->pln_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
 680                rqd->nr_ppas = nr_ppas;
 681                rqd->ppa_addr = ppas[0];
 682
 683                return 0;
 684        }
 685
 686        rqd->nr_ppas = nr_ppas;
 687        rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
 688        if (!rqd->ppa_list) {
 689                pr_err("nvm: failed to allocate dma memory\n");
 690                return -ENOMEM;
 691        }
 692
 693        plane_cnt = geo->pln_mode;
 694        rqd->nr_ppas *= plane_cnt;
 695
 696        for (i = 0; i < nr_ppas; i++) {
 697                for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
 698                        ppa = ppas[i];
 699                        ppa.g.pl = pl_idx;
 700                        rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
 701                }
 702        }
 703
 704        return 0;
 705}
 706
 707static void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev,
 708                        struct nvm_rq *rqd)
 709{
 710        if (!rqd->ppa_list)
 711                return;
 712
 713        nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
 714}
 715
 716static int nvm_set_flags(struct nvm_geo *geo, struct nvm_rq *rqd)
 717{
 718        int flags = 0;
 719
 720        if (geo->version == NVM_OCSSD_SPEC_20)
 721                return 0;
 722
 723        if (rqd->is_seq)
 724                flags |= geo->pln_mode >> 1;
 725
 726        if (rqd->opcode == NVM_OP_PREAD)
 727                flags |= (NVM_IO_SCRAMBLE_ENABLE | NVM_IO_SUSPEND);
 728        else if (rqd->opcode == NVM_OP_PWRITE)
 729                flags |= NVM_IO_SCRAMBLE_ENABLE;
 730
 731        return flags;
 732}
 733
 734int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
 735{
 736        struct nvm_dev *dev = tgt_dev->parent;
 737        int ret;
 738
 739        if (!dev->ops->submit_io)
 740                return -ENODEV;
 741
 742        nvm_rq_tgt_to_dev(tgt_dev, rqd);
 743
 744        rqd->dev = tgt_dev;
 745        rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
 746
 747        /* In case of error, fail with right address format */
 748        ret = dev->ops->submit_io(dev, rqd);
 749        if (ret)
 750                nvm_rq_dev_to_tgt(tgt_dev, rqd);
 751        return ret;
 752}
 753EXPORT_SYMBOL(nvm_submit_io);
 754
 755int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
 756{
 757        struct nvm_dev *dev = tgt_dev->parent;
 758        int ret;
 759
 760        if (!dev->ops->submit_io_sync)
 761                return -ENODEV;
 762
 763        nvm_rq_tgt_to_dev(tgt_dev, rqd);
 764
 765        rqd->dev = tgt_dev;
 766        rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
 767
 768        /* In case of error, fail with right address format */
 769        ret = dev->ops->submit_io_sync(dev, rqd);
 770        nvm_rq_dev_to_tgt(tgt_dev, rqd);
 771
 772        return ret;
 773}
 774EXPORT_SYMBOL(nvm_submit_io_sync);
 775
 776void nvm_end_io(struct nvm_rq *rqd)
 777{
 778        struct nvm_tgt_dev *tgt_dev = rqd->dev;
 779
 780        /* Convert address space */
 781        if (tgt_dev)
 782                nvm_rq_dev_to_tgt(tgt_dev, rqd);
 783
 784        if (rqd->end_io)
 785                rqd->end_io(rqd);
 786}
 787EXPORT_SYMBOL(nvm_end_io);
 788
 789static int nvm_submit_io_sync_raw(struct nvm_dev *dev, struct nvm_rq *rqd)
 790{
 791        if (!dev->ops->submit_io_sync)
 792                return -ENODEV;
 793
 794        rqd->flags = nvm_set_flags(&dev->geo, rqd);
 795
 796        return dev->ops->submit_io_sync(dev, rqd);
 797}
 798
 799static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa)
 800{
 801        struct nvm_rq rqd = { NULL };
 802        struct bio bio;
 803        struct bio_vec bio_vec;
 804        struct page *page;
 805        int ret;
 806
 807        page = alloc_page(GFP_KERNEL);
 808        if (!page)
 809                return -ENOMEM;
 810
 811        bio_init(&bio, &bio_vec, 1);
 812        bio_add_page(&bio, page, PAGE_SIZE, 0);
 813        bio_set_op_attrs(&bio, REQ_OP_READ, 0);
 814
 815        rqd.bio = &bio;
 816        rqd.opcode = NVM_OP_PREAD;
 817        rqd.is_seq = 1;
 818        rqd.nr_ppas = 1;
 819        rqd.ppa_addr = generic_to_dev_addr(dev, ppa);
 820
 821        ret = nvm_submit_io_sync_raw(dev, &rqd);
 822        if (ret)
 823                return ret;
 824
 825        __free_page(page);
 826
 827        return rqd.error;
 828}
 829
 830/*
 831 * Scans a 1.2 chunk first and last page to determine if its state.
 832 * If the chunk is found to be open, also scan it to update the write
 833 * pointer.
 834 */
 835static int nvm_bb_chunk_scan(struct nvm_dev *dev, struct ppa_addr ppa,
 836                             struct nvm_chk_meta *meta)
 837{
 838        struct nvm_geo *geo = &dev->geo;
 839        int ret, pg, pl;
 840
 841        /* sense first page */
 842        ret = nvm_bb_chunk_sense(dev, ppa);
 843        if (ret < 0) /* io error */
 844                return ret;
 845        else if (ret == 0) /* valid data */
 846                meta->state = NVM_CHK_ST_OPEN;
 847        else if (ret > 0) {
 848                /*
 849                 * If empty page, the chunk is free, else it is an
 850                 * actual io error. In that case, mark it offline.
 851                 */
 852                switch (ret) {
 853                case NVM_RSP_ERR_EMPTYPAGE:
 854                        meta->state = NVM_CHK_ST_FREE;
 855                        return 0;
 856                case NVM_RSP_ERR_FAILCRC:
 857                case NVM_RSP_ERR_FAILECC:
 858                case NVM_RSP_WARN_HIGHECC:
 859                        meta->state = NVM_CHK_ST_OPEN;
 860                        goto scan;
 861                default:
 862                        return -ret; /* other io error */
 863                }
 864        }
 865
 866        /* sense last page */
 867        ppa.g.pg = geo->num_pg - 1;
 868        ppa.g.pl = geo->num_pln - 1;
 869
 870        ret = nvm_bb_chunk_sense(dev, ppa);
 871        if (ret < 0) /* io error */
 872                return ret;
 873        else if (ret == 0) { /* Chunk fully written */
 874                meta->state = NVM_CHK_ST_CLOSED;
 875                meta->wp = geo->clba;
 876                return 0;
 877        } else if (ret > 0) {
 878                switch (ret) {
 879                case NVM_RSP_ERR_EMPTYPAGE:
 880                case NVM_RSP_ERR_FAILCRC:
 881                case NVM_RSP_ERR_FAILECC:
 882                case NVM_RSP_WARN_HIGHECC:
 883                        meta->state = NVM_CHK_ST_OPEN;
 884                        break;
 885                default:
 886                        return -ret; /* other io error */
 887                }
 888        }
 889
 890scan:
 891        /*
 892         * chunk is open, we scan sequentially to update the write pointer.
 893         * We make the assumption that targets write data across all planes
 894         * before moving to the next page.
 895         */
 896        for (pg = 0; pg < geo->num_pg; pg++) {
 897                for (pl = 0; pl < geo->num_pln; pl++) {
 898                        ppa.g.pg = pg;
 899                        ppa.g.pl = pl;
 900
 901                        ret = nvm_bb_chunk_sense(dev, ppa);
 902                        if (ret < 0) /* io error */
 903                                return ret;
 904                        else if (ret == 0) {
 905                                meta->wp += geo->ws_min;
 906                        } else if (ret > 0) {
 907                                switch (ret) {
 908                                case NVM_RSP_ERR_EMPTYPAGE:
 909                                        return 0;
 910                                case NVM_RSP_ERR_FAILCRC:
 911                                case NVM_RSP_ERR_FAILECC:
 912                                case NVM_RSP_WARN_HIGHECC:
 913                                        meta->wp += geo->ws_min;
 914                                        break;
 915                                default:
 916                                        return -ret; /* other io error */
 917                                }
 918                        }
 919                }
 920        }
 921
 922        return 0;
 923}
 924
 925/*
 926 * folds a bad block list from its plane representation to its
 927 * chunk representation.
 928 *
 929 * If any of the planes status are bad or grown bad, the chunk is marked
 930 * offline. If not bad, the first plane state acts as the chunk state.
 931 */
 932static int nvm_bb_to_chunk(struct nvm_dev *dev, struct ppa_addr ppa,
 933                           u8 *blks, int nr_blks, struct nvm_chk_meta *meta)
 934{
 935        struct nvm_geo *geo = &dev->geo;
 936        int ret, blk, pl, offset, blktype;
 937
 938        for (blk = 0; blk < geo->num_chk; blk++) {
 939                offset = blk * geo->pln_mode;
 940                blktype = blks[offset];
 941
 942                for (pl = 0; pl < geo->pln_mode; pl++) {
 943                        if (blks[offset + pl] &
 944                                        (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
 945                                blktype = blks[offset + pl];
 946                                break;
 947                        }
 948                }
 949
 950                ppa.g.blk = blk;
 951
 952                meta->wp = 0;
 953                meta->type = NVM_CHK_TP_W_SEQ;
 954                meta->wi = 0;
 955                meta->slba = generic_to_dev_addr(dev, ppa).ppa;
 956                meta->cnlb = dev->geo.clba;
 957
 958                if (blktype == NVM_BLK_T_FREE) {
 959                        ret = nvm_bb_chunk_scan(dev, ppa, meta);
 960                        if (ret)
 961                                return ret;
 962                } else {
 963                        meta->state = NVM_CHK_ST_OFFLINE;
 964                }
 965
 966                meta++;
 967        }
 968
 969        return 0;
 970}
 971
 972static int nvm_get_bb_meta(struct nvm_dev *dev, sector_t slba,
 973                           int nchks, struct nvm_chk_meta *meta)
 974{
 975        struct nvm_geo *geo = &dev->geo;
 976        struct ppa_addr ppa;
 977        u8 *blks;
 978        int ch, lun, nr_blks;
 979        int ret = 0;
 980
 981        ppa.ppa = slba;
 982        ppa = dev_to_generic_addr(dev, ppa);
 983
 984        if (ppa.g.blk != 0)
 985                return -EINVAL;
 986
 987        if ((nchks % geo->num_chk) != 0)
 988                return -EINVAL;
 989
 990        nr_blks = geo->num_chk * geo->pln_mode;
 991
 992        blks = kmalloc(nr_blks, GFP_KERNEL);
 993        if (!blks)
 994                return -ENOMEM;
 995
 996        for (ch = ppa.g.ch; ch < geo->num_ch; ch++) {
 997                for (lun = ppa.g.lun; lun < geo->num_lun; lun++) {
 998                        struct ppa_addr ppa_gen, ppa_dev;
 999
1000                        if (!nchks)
1001                                goto done;
1002
1003                        ppa_gen.ppa = 0;
1004                        ppa_gen.g.ch = ch;
1005                        ppa_gen.g.lun = lun;
1006                        ppa_dev = generic_to_dev_addr(dev, ppa_gen);
1007
1008                        ret = dev->ops->get_bb_tbl(dev, ppa_dev, blks);
1009                        if (ret)
1010                                goto done;
1011
1012                        ret = nvm_bb_to_chunk(dev, ppa_gen, blks, nr_blks,
1013                                                                        meta);
1014                        if (ret)
1015                                goto done;
1016
1017                        meta += geo->num_chk;
1018                        nchks -= geo->num_chk;
1019                }
1020        }
1021done:
1022        kfree(blks);
1023        return ret;
1024}
1025
1026int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
1027                       int nchks, struct nvm_chk_meta *meta)
1028{
1029        struct nvm_dev *dev = tgt_dev->parent;
1030
1031        nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
1032
1033        if (dev->geo.version == NVM_OCSSD_SPEC_12)
1034                return nvm_get_bb_meta(dev, (sector_t)ppa.ppa, nchks, meta);
1035
1036        return dev->ops->get_chk_meta(dev, (sector_t)ppa.ppa, nchks, meta);
1037}
1038EXPORT_SYMBOL_GPL(nvm_get_chunk_meta);
1039
1040int nvm_set_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
1041                       int nr_ppas, int type)
1042{
1043        struct nvm_dev *dev = tgt_dev->parent;
1044        struct nvm_rq rqd;
1045        int ret;
1046
1047        if (dev->geo.version == NVM_OCSSD_SPEC_20)
1048                return 0;
1049
1050        if (nr_ppas > NVM_MAX_VLBA) {
1051                pr_err("nvm: unable to update all blocks atomically\n");
1052                return -EINVAL;
1053        }
1054
1055        memset(&rqd, 0, sizeof(struct nvm_rq));
1056
1057        nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
1058        nvm_rq_tgt_to_dev(tgt_dev, &rqd);
1059
1060        ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
1061        nvm_free_rqd_ppalist(tgt_dev, &rqd);
1062        if (ret)
1063                return -EINVAL;
1064
1065        return 0;
1066}
1067EXPORT_SYMBOL_GPL(nvm_set_chunk_meta);
1068
1069static int nvm_core_init(struct nvm_dev *dev)
1070{
1071        struct nvm_geo *geo = &dev->geo;
1072        int ret;
1073
1074        dev->lun_map = kcalloc(BITS_TO_LONGS(geo->all_luns),
1075                                        sizeof(unsigned long), GFP_KERNEL);
1076        if (!dev->lun_map)
1077                return -ENOMEM;
1078
1079        INIT_LIST_HEAD(&dev->area_list);
1080        INIT_LIST_HEAD(&dev->targets);
1081        mutex_init(&dev->mlock);
1082        spin_lock_init(&dev->lock);
1083
1084        ret = nvm_register_map(dev);
1085        if (ret)
1086                goto err_fmtype;
1087
1088        return 0;
1089err_fmtype:
1090        kfree(dev->lun_map);
1091        return ret;
1092}
1093
1094static void nvm_free(struct kref *ref)
1095{
1096        struct nvm_dev *dev = container_of(ref, struct nvm_dev, ref);
1097
1098        if (dev->dma_pool)
1099                dev->ops->destroy_dma_pool(dev->dma_pool);
1100
1101        if (dev->rmap)
1102                nvm_unregister_map(dev);
1103
1104        kfree(dev->lun_map);
1105        kfree(dev);
1106}
1107
1108static int nvm_init(struct nvm_dev *dev)
1109{
1110        struct nvm_geo *geo = &dev->geo;
1111        int ret = -EINVAL;
1112
1113        if (dev->ops->identity(dev)) {
1114                pr_err("nvm: device could not be identified\n");
1115                goto err;
1116        }
1117
1118        pr_debug("nvm: ver:%u.%u nvm_vendor:%x\n",
1119                                geo->major_ver_id, geo->minor_ver_id,
1120                                geo->vmnt);
1121
1122        ret = nvm_core_init(dev);
1123        if (ret) {
1124                pr_err("nvm: could not initialize core structures.\n");
1125                goto err;
1126        }
1127
1128        pr_info("nvm: registered %s [%u/%u/%u/%u/%u]\n",
1129                        dev->name, dev->geo.ws_min, dev->geo.ws_opt,
1130                        dev->geo.num_chk, dev->geo.all_luns,
1131                        dev->geo.num_ch);
1132        return 0;
1133err:
1134        pr_err("nvm: failed to initialize nvm\n");
1135        return ret;
1136}
1137
1138struct nvm_dev *nvm_alloc_dev(int node)
1139{
1140        struct nvm_dev *dev;
1141
1142        dev = kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
1143        if (dev)
1144                kref_init(&dev->ref);
1145
1146        return dev;
1147}
1148EXPORT_SYMBOL(nvm_alloc_dev);
1149
1150int nvm_register(struct nvm_dev *dev)
1151{
1152        int ret, exp_pool_size;
1153
1154        if (!dev->q || !dev->ops) {
1155                kref_put(&dev->ref, nvm_free);
1156                return -EINVAL;
1157        }
1158
1159        ret = nvm_init(dev);
1160        if (ret) {
1161                kref_put(&dev->ref, nvm_free);
1162                return ret;
1163        }
1164
1165        exp_pool_size = max_t(int, PAGE_SIZE,
1166                              (NVM_MAX_VLBA * (sizeof(u64) + dev->geo.sos)));
1167        exp_pool_size = round_up(exp_pool_size, PAGE_SIZE);
1168
1169        dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist",
1170                                                  exp_pool_size);
1171        if (!dev->dma_pool) {
1172                pr_err("nvm: could not create dma pool\n");
1173                kref_put(&dev->ref, nvm_free);
1174                return -ENOMEM;
1175        }
1176
1177        /* register device with a supported media manager */
1178        down_write(&nvm_lock);
1179        list_add(&dev->devices, &nvm_devices);
1180        up_write(&nvm_lock);
1181
1182        return 0;
1183}
1184EXPORT_SYMBOL(nvm_register);
1185
1186void nvm_unregister(struct nvm_dev *dev)
1187{
1188        struct nvm_target *t, *tmp;
1189
1190        mutex_lock(&dev->mlock);
1191        list_for_each_entry_safe(t, tmp, &dev->targets, list) {
1192                if (t->dev->parent != dev)
1193                        continue;
1194                __nvm_remove_target(t, false);
1195                kref_put(&dev->ref, nvm_free);
1196        }
1197        mutex_unlock(&dev->mlock);
1198
1199        down_write(&nvm_lock);
1200        list_del(&dev->devices);
1201        up_write(&nvm_lock);
1202
1203        kref_put(&dev->ref, nvm_free);
1204}
1205EXPORT_SYMBOL(nvm_unregister);
1206
1207static int __nvm_configure_create(struct nvm_ioctl_create *create)
1208{
1209        struct nvm_dev *dev;
1210        int ret;
1211
1212        down_write(&nvm_lock);
1213        dev = nvm_find_nvm_dev(create->dev);
1214        up_write(&nvm_lock);
1215
1216        if (!dev) {
1217                pr_err("nvm: device not found\n");
1218                return -EINVAL;
1219        }
1220
1221        kref_get(&dev->ref);
1222        ret = nvm_create_tgt(dev, create);
1223        if (ret)
1224                kref_put(&dev->ref, nvm_free);
1225
1226        return ret;
1227}
1228
1229static long nvm_ioctl_info(struct file *file, void __user *arg)
1230{
1231        struct nvm_ioctl_info *info;
1232        struct nvm_tgt_type *tt;
1233        int tgt_iter = 0;
1234
1235        info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
1236        if (IS_ERR(info))
1237                return -EFAULT;
1238
1239        info->version[0] = NVM_VERSION_MAJOR;
1240        info->version[1] = NVM_VERSION_MINOR;
1241        info->version[2] = NVM_VERSION_PATCH;
1242
1243        down_write(&nvm_tgtt_lock);
1244        list_for_each_entry(tt, &nvm_tgt_types, list) {
1245                struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
1246
1247                tgt->version[0] = tt->version[0];
1248                tgt->version[1] = tt->version[1];
1249                tgt->version[2] = tt->version[2];
1250                strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
1251
1252                tgt_iter++;
1253        }
1254
1255        info->tgtsize = tgt_iter;
1256        up_write(&nvm_tgtt_lock);
1257
1258        if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
1259                kfree(info);
1260                return -EFAULT;
1261        }
1262
1263        kfree(info);
1264        return 0;
1265}
1266
1267static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
1268{
1269        struct nvm_ioctl_get_devices *devices;
1270        struct nvm_dev *dev;
1271        int i = 0;
1272
1273        devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
1274        if (!devices)
1275                return -ENOMEM;
1276
1277        down_write(&nvm_lock);
1278        list_for_each_entry(dev, &nvm_devices, devices) {
1279                struct nvm_ioctl_device_info *info = &devices->info[i];
1280
1281                strlcpy(info->devname, dev->name, sizeof(info->devname));
1282
1283                /* kept for compatibility */
1284                info->bmversion[0] = 1;
1285                info->bmversion[1] = 0;
1286                info->bmversion[2] = 0;
1287                strlcpy(info->bmname, "gennvm", sizeof(info->bmname));
1288                i++;
1289
1290                if (i > 31) {
1291                        pr_err("nvm: max 31 devices can be reported.\n");
1292                        break;
1293                }
1294        }
1295        up_write(&nvm_lock);
1296
1297        devices->nr_devices = i;
1298
1299        if (copy_to_user(arg, devices,
1300                         sizeof(struct nvm_ioctl_get_devices))) {
1301                kfree(devices);
1302                return -EFAULT;
1303        }
1304
1305        kfree(devices);
1306        return 0;
1307}
1308
1309static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
1310{
1311        struct nvm_ioctl_create create;
1312
1313        if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
1314                return -EFAULT;
1315
1316        if (create.conf.type == NVM_CONFIG_TYPE_EXTENDED &&
1317            create.conf.e.rsv != 0) {
1318                pr_err("nvm: reserved config field in use\n");
1319                return -EINVAL;
1320        }
1321
1322        create.dev[DISK_NAME_LEN - 1] = '\0';
1323        create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
1324        create.tgtname[DISK_NAME_LEN - 1] = '\0';
1325
1326        if (create.flags != 0) {
1327                __u32 flags = create.flags;
1328
1329                /* Check for valid flags */
1330                if (flags & NVM_TARGET_FACTORY)
1331                        flags &= ~NVM_TARGET_FACTORY;
1332
1333                if (flags) {
1334                        pr_err("nvm: flag not supported\n");
1335                        return -EINVAL;
1336                }
1337        }
1338
1339        return __nvm_configure_create(&create);
1340}
1341
1342static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
1343{
1344        struct nvm_ioctl_remove remove;
1345
1346        if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
1347                return -EFAULT;
1348
1349        remove.tgtname[DISK_NAME_LEN - 1] = '\0';
1350
1351        if (remove.flags != 0) {
1352                pr_err("nvm: no flags supported\n");
1353                return -EINVAL;
1354        }
1355
1356        return nvm_remove_tgt(&remove);
1357}
1358
1359/* kept for compatibility reasons */
1360static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
1361{
1362        struct nvm_ioctl_dev_init init;
1363
1364        if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
1365                return -EFAULT;
1366
1367        if (init.flags != 0) {
1368                pr_err("nvm: no flags supported\n");
1369                return -EINVAL;
1370        }
1371
1372        return 0;
1373}
1374
1375/* Kept for compatibility reasons */
1376static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1377{
1378        struct nvm_ioctl_dev_factory fact;
1379
1380        if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
1381                return -EFAULT;
1382
1383        fact.dev[DISK_NAME_LEN - 1] = '\0';
1384
1385        if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
1386                return -EINVAL;
1387
1388        return 0;
1389}
1390
1391static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
1392{
1393        void __user *argp = (void __user *)arg;
1394
1395        if (!capable(CAP_SYS_ADMIN))
1396                return -EPERM;
1397
1398        switch (cmd) {
1399        case NVM_INFO:
1400                return nvm_ioctl_info(file, argp);
1401        case NVM_GET_DEVICES:
1402                return nvm_ioctl_get_devices(file, argp);
1403        case NVM_DEV_CREATE:
1404                return nvm_ioctl_dev_create(file, argp);
1405        case NVM_DEV_REMOVE:
1406                return nvm_ioctl_dev_remove(file, argp);
1407        case NVM_DEV_INIT:
1408                return nvm_ioctl_dev_init(file, argp);
1409        case NVM_DEV_FACTORY:
1410                return nvm_ioctl_dev_factory(file, argp);
1411        }
1412        return 0;
1413}
1414
1415static const struct file_operations _ctl_fops = {
1416        .open = nonseekable_open,
1417        .unlocked_ioctl = nvm_ctl_ioctl,
1418        .owner = THIS_MODULE,
1419        .llseek  = noop_llseek,
1420};
1421
1422static struct miscdevice _nvm_misc = {
1423        .minor          = MISC_DYNAMIC_MINOR,
1424        .name           = "lightnvm",
1425        .nodename       = "lightnvm/control",
1426        .fops           = &_ctl_fops,
1427};
1428builtin_misc_device(_nvm_misc);
1429