1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/list.h>
22#include <linux/types.h>
23#include <linux/sem.h>
24#include <linux/bitmap.h>
25#include <linux/moduleparam.h>
26#include <linux/miscdevice.h>
27#include <linux/lightnvm.h>
28#include <linux/sched/sysctl.h>
29
30static LIST_HEAD(nvm_tgt_types);
31static DECLARE_RWSEM(nvm_tgtt_lock);
32static LIST_HEAD(nvm_devices);
33static DECLARE_RWSEM(nvm_lock);
34
35
36struct nvm_ch_map {
37 int ch_off;
38 int nr_luns;
39 int *lun_offs;
40};
41
42struct nvm_dev_map {
43 struct nvm_ch_map *chnls;
44 int nr_chnls;
45};
46
47struct nvm_area {
48 struct list_head list;
49 sector_t begin;
50 sector_t end;
51};
52
53static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
54{
55 struct nvm_target *tgt;
56
57 list_for_each_entry(tgt, &dev->targets, list)
58 if (!strcmp(name, tgt->disk->disk_name))
59 return tgt;
60
61 return NULL;
62}
63
64static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
65{
66 int i;
67
68 for (i = lun_begin; i <= lun_end; i++) {
69 if (test_and_set_bit(i, dev->lun_map)) {
70 pr_err("nvm: lun %d already allocated\n", i);
71 goto err;
72 }
73 }
74
75 return 0;
76err:
77 while (--i >= lun_begin)
78 clear_bit(i, dev->lun_map);
79
80 return -EBUSY;
81}
82
83static void nvm_release_luns_err(struct nvm_dev *dev, int lun_begin,
84 int lun_end)
85{
86 int i;
87
88 for (i = lun_begin; i <= lun_end; i++)
89 WARN_ON(!test_and_clear_bit(i, dev->lun_map));
90}
91
92static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear)
93{
94 struct nvm_dev *dev = tgt_dev->parent;
95 struct nvm_dev_map *dev_map = tgt_dev->map;
96 int i, j;
97
98 for (i = 0; i < dev_map->nr_chnls; i++) {
99 struct nvm_ch_map *ch_map = &dev_map->chnls[i];
100 int *lun_offs = ch_map->lun_offs;
101 int ch = i + ch_map->ch_off;
102
103 if (clear) {
104 for (j = 0; j < ch_map->nr_luns; j++) {
105 int lun = j + lun_offs[j];
106 int lunid = (ch * dev->geo.luns_per_chnl) + lun;
107
108 WARN_ON(!test_and_clear_bit(lunid,
109 dev->lun_map));
110 }
111 }
112
113 kfree(ch_map->lun_offs);
114 }
115
116 kfree(dev_map->chnls);
117 kfree(dev_map);
118
119 kfree(tgt_dev->luns);
120 kfree(tgt_dev);
121}
122
123static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
124 int lun_begin, int lun_end)
125{
126 struct nvm_tgt_dev *tgt_dev = NULL;
127 struct nvm_dev_map *dev_rmap = dev->rmap;
128 struct nvm_dev_map *dev_map;
129 struct ppa_addr *luns;
130 int nr_luns = lun_end - lun_begin + 1;
131 int luns_left = nr_luns;
132 int nr_chnls = nr_luns / dev->geo.luns_per_chnl;
133 int nr_chnls_mod = nr_luns % dev->geo.luns_per_chnl;
134 int bch = lun_begin / dev->geo.luns_per_chnl;
135 int blun = lun_begin % dev->geo.luns_per_chnl;
136 int lunid = 0;
137 int lun_balanced = 1;
138 int prev_nr_luns;
139 int i, j;
140
141 nr_chnls = nr_luns / dev->geo.luns_per_chnl;
142 nr_chnls = (nr_chnls_mod == 0) ? nr_chnls : nr_chnls + 1;
143
144 dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
145 if (!dev_map)
146 goto err_dev;
147
148 dev_map->chnls = kcalloc(nr_chnls, sizeof(struct nvm_ch_map),
149 GFP_KERNEL);
150 if (!dev_map->chnls)
151 goto err_chnls;
152
153 luns = kcalloc(nr_luns, sizeof(struct ppa_addr), GFP_KERNEL);
154 if (!luns)
155 goto err_luns;
156
157 prev_nr_luns = (luns_left > dev->geo.luns_per_chnl) ?
158 dev->geo.luns_per_chnl : luns_left;
159 for (i = 0; i < nr_chnls; i++) {
160 struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
161 int *lun_roffs = ch_rmap->lun_offs;
162 struct nvm_ch_map *ch_map = &dev_map->chnls[i];
163 int *lun_offs;
164 int luns_in_chnl = (luns_left > dev->geo.luns_per_chnl) ?
165 dev->geo.luns_per_chnl : luns_left;
166
167 if (lun_balanced && prev_nr_luns != luns_in_chnl)
168 lun_balanced = 0;
169
170 ch_map->ch_off = ch_rmap->ch_off = bch;
171 ch_map->nr_luns = luns_in_chnl;
172
173 lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
174 if (!lun_offs)
175 goto err_ch;
176
177 for (j = 0; j < luns_in_chnl; j++) {
178 luns[lunid].ppa = 0;
179 luns[lunid].g.ch = i;
180 luns[lunid++].g.lun = j;
181
182 lun_offs[j] = blun;
183 lun_roffs[j + blun] = blun;
184 }
185
186 ch_map->lun_offs = lun_offs;
187
188
189 blun = 0;
190 luns_left -= luns_in_chnl;
191 }
192
193 dev_map->nr_chnls = nr_chnls;
194
195 tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
196 if (!tgt_dev)
197 goto err_ch;
198
199 memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
200
201 tgt_dev->geo.nr_chnls = nr_chnls;
202 tgt_dev->geo.nr_luns = nr_luns;
203 tgt_dev->geo.luns_per_chnl = (lun_balanced) ? prev_nr_luns : -1;
204 tgt_dev->total_secs = nr_luns * tgt_dev->geo.sec_per_lun;
205 tgt_dev->q = dev->q;
206 tgt_dev->map = dev_map;
207 tgt_dev->luns = luns;
208 memcpy(&tgt_dev->identity, &dev->identity, sizeof(struct nvm_id));
209
210 tgt_dev->parent = dev;
211
212 return tgt_dev;
213err_ch:
214 while (--i >= 0)
215 kfree(dev_map->chnls[i].lun_offs);
216 kfree(luns);
217err_luns:
218 kfree(dev_map->chnls);
219err_chnls:
220 kfree(dev_map);
221err_dev:
222 return tgt_dev;
223}
224
225static const struct block_device_operations nvm_fops = {
226 .owner = THIS_MODULE,
227};
228
229static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
230{
231 struct nvm_ioctl_create_simple *s = &create->conf.s;
232 struct request_queue *tqueue;
233 struct gendisk *tdisk;
234 struct nvm_tgt_type *tt;
235 struct nvm_target *t;
236 struct nvm_tgt_dev *tgt_dev;
237 void *targetdata;
238 int ret;
239
240 tt = nvm_find_target_type(create->tgttype, 1);
241 if (!tt) {
242 pr_err("nvm: target type %s not found\n", create->tgttype);
243 return -EINVAL;
244 }
245
246 mutex_lock(&dev->mlock);
247 t = nvm_find_target(dev, create->tgtname);
248 if (t) {
249 pr_err("nvm: target name already exists.\n");
250 mutex_unlock(&dev->mlock);
251 return -EINVAL;
252 }
253 mutex_unlock(&dev->mlock);
254
255 ret = nvm_reserve_luns(dev, s->lun_begin, s->lun_end);
256 if (ret)
257 return ret;
258
259 t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
260 if (!t) {
261 ret = -ENOMEM;
262 goto err_reserve;
263 }
264
265 tgt_dev = nvm_create_tgt_dev(dev, s->lun_begin, s->lun_end);
266 if (!tgt_dev) {
267 pr_err("nvm: could not create target device\n");
268 ret = -ENOMEM;
269 goto err_t;
270 }
271
272 tdisk = alloc_disk(0);
273 if (!tdisk) {
274 ret = -ENOMEM;
275 goto err_dev;
276 }
277
278 tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
279 if (!tqueue) {
280 ret = -ENOMEM;
281 goto err_disk;
282 }
283 blk_queue_make_request(tqueue, tt->make_rq);
284
285 strlcpy(tdisk->disk_name, create->tgtname, sizeof(tdisk->disk_name));
286 tdisk->flags = GENHD_FL_EXT_DEVT;
287 tdisk->major = 0;
288 tdisk->first_minor = 0;
289 tdisk->fops = &nvm_fops;
290 tdisk->queue = tqueue;
291
292 targetdata = tt->init(tgt_dev, tdisk, create->flags);
293 if (IS_ERR(targetdata)) {
294 ret = PTR_ERR(targetdata);
295 goto err_init;
296 }
297
298 tdisk->private_data = targetdata;
299 tqueue->queuedata = targetdata;
300
301 blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect);
302
303 set_capacity(tdisk, tt->capacity(targetdata));
304 add_disk(tdisk);
305
306 if (tt->sysfs_init && tt->sysfs_init(tdisk)) {
307 ret = -ENOMEM;
308 goto err_sysfs;
309 }
310
311 t->type = tt;
312 t->disk = tdisk;
313 t->dev = tgt_dev;
314
315 mutex_lock(&dev->mlock);
316 list_add_tail(&t->list, &dev->targets);
317 mutex_unlock(&dev->mlock);
318
319 return 0;
320err_sysfs:
321 if (tt->exit)
322 tt->exit(targetdata);
323err_init:
324 blk_cleanup_queue(tqueue);
325 tdisk->queue = NULL;
326err_disk:
327 put_disk(tdisk);
328err_dev:
329 nvm_remove_tgt_dev(tgt_dev, 0);
330err_t:
331 kfree(t);
332err_reserve:
333 nvm_release_luns_err(dev, s->lun_begin, s->lun_end);
334 return ret;
335}
336
337static void __nvm_remove_target(struct nvm_target *t)
338{
339 struct nvm_tgt_type *tt = t->type;
340 struct gendisk *tdisk = t->disk;
341 struct request_queue *q = tdisk->queue;
342
343 del_gendisk(tdisk);
344 blk_cleanup_queue(q);
345
346 if (tt->sysfs_exit)
347 tt->sysfs_exit(tdisk);
348
349 if (tt->exit)
350 tt->exit(tdisk->private_data);
351
352 nvm_remove_tgt_dev(t->dev, 1);
353 put_disk(tdisk);
354
355 list_del(&t->list);
356 kfree(t);
357}
358
359
360
361
362
363
364
365
366
367
368
369static int nvm_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove)
370{
371 struct nvm_target *t;
372
373 mutex_lock(&dev->mlock);
374 t = nvm_find_target(dev, remove->tgtname);
375 if (!t) {
376 mutex_unlock(&dev->mlock);
377 return 1;
378 }
379 __nvm_remove_target(t);
380 mutex_unlock(&dev->mlock);
381
382 return 0;
383}
384
385static int nvm_register_map(struct nvm_dev *dev)
386{
387 struct nvm_dev_map *rmap;
388 int i, j;
389
390 rmap = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
391 if (!rmap)
392 goto err_rmap;
393
394 rmap->chnls = kcalloc(dev->geo.nr_chnls, sizeof(struct nvm_ch_map),
395 GFP_KERNEL);
396 if (!rmap->chnls)
397 goto err_chnls;
398
399 for (i = 0; i < dev->geo.nr_chnls; i++) {
400 struct nvm_ch_map *ch_rmap;
401 int *lun_roffs;
402 int luns_in_chnl = dev->geo.luns_per_chnl;
403
404 ch_rmap = &rmap->chnls[i];
405
406 ch_rmap->ch_off = -1;
407 ch_rmap->nr_luns = luns_in_chnl;
408
409 lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
410 if (!lun_roffs)
411 goto err_ch;
412
413 for (j = 0; j < luns_in_chnl; j++)
414 lun_roffs[j] = -1;
415
416 ch_rmap->lun_offs = lun_roffs;
417 }
418
419 dev->rmap = rmap;
420
421 return 0;
422err_ch:
423 while (--i >= 0)
424 kfree(rmap->chnls[i].lun_offs);
425err_chnls:
426 kfree(rmap);
427err_rmap:
428 return -ENOMEM;
429}
430
431static void nvm_unregister_map(struct nvm_dev *dev)
432{
433 struct nvm_dev_map *rmap = dev->rmap;
434 int i;
435
436 for (i = 0; i < dev->geo.nr_chnls; i++)
437 kfree(rmap->chnls[i].lun_offs);
438
439 kfree(rmap->chnls);
440 kfree(rmap);
441}
442
443static void nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
444{
445 struct nvm_dev_map *dev_map = tgt_dev->map;
446 struct nvm_ch_map *ch_map = &dev_map->chnls[p->g.ch];
447 int lun_off = ch_map->lun_offs[p->g.lun];
448
449 p->g.ch += ch_map->ch_off;
450 p->g.lun += lun_off;
451}
452
453static void nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
454{
455 struct nvm_dev *dev = tgt_dev->parent;
456 struct nvm_dev_map *dev_rmap = dev->rmap;
457 struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->g.ch];
458 int lun_roff = ch_rmap->lun_offs[p->g.lun];
459
460 p->g.ch -= ch_rmap->ch_off;
461 p->g.lun -= lun_roff;
462}
463
464static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev *tgt_dev,
465 struct ppa_addr *ppa_list, int nr_ppas)
466{
467 int i;
468
469 for (i = 0; i < nr_ppas; i++) {
470 nvm_map_to_dev(tgt_dev, &ppa_list[i]);
471 ppa_list[i] = generic_to_dev_addr(tgt_dev, ppa_list[i]);
472 }
473}
474
475static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev *tgt_dev,
476 struct ppa_addr *ppa_list, int nr_ppas)
477{
478 int i;
479
480 for (i = 0; i < nr_ppas; i++) {
481 ppa_list[i] = dev_to_generic_addr(tgt_dev, ppa_list[i]);
482 nvm_map_to_tgt(tgt_dev, &ppa_list[i]);
483 }
484}
485
486static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
487{
488 if (rqd->nr_ppas == 1) {
489 nvm_ppa_tgt_to_dev(tgt_dev, &rqd->ppa_addr, 1);
490 return;
491 }
492
493 nvm_ppa_tgt_to_dev(tgt_dev, rqd->ppa_list, rqd->nr_ppas);
494}
495
496static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
497{
498 if (rqd->nr_ppas == 1) {
499 nvm_ppa_dev_to_tgt(tgt_dev, &rqd->ppa_addr, 1);
500 return;
501 }
502
503 nvm_ppa_dev_to_tgt(tgt_dev, rqd->ppa_list, rqd->nr_ppas);
504}
505
506void nvm_part_to_tgt(struct nvm_dev *dev, sector_t *entries,
507 int len)
508{
509 struct nvm_geo *geo = &dev->geo;
510 struct nvm_dev_map *dev_rmap = dev->rmap;
511 u64 i;
512
513 for (i = 0; i < len; i++) {
514 struct nvm_ch_map *ch_rmap;
515 int *lun_roffs;
516 struct ppa_addr gaddr;
517 u64 pba = le64_to_cpu(entries[i]);
518 u64 diff;
519
520 if (!pba)
521 continue;
522
523 gaddr = linear_to_generic_addr(geo, pba);
524 ch_rmap = &dev_rmap->chnls[gaddr.g.ch];
525 lun_roffs = ch_rmap->lun_offs;
526
527 diff = ((ch_rmap->ch_off * geo->luns_per_chnl) +
528 (lun_roffs[gaddr.g.lun])) * geo->sec_per_lun;
529
530 entries[i] -= cpu_to_le64(diff);
531 }
532}
533EXPORT_SYMBOL(nvm_part_to_tgt);
534
535struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock)
536{
537 struct nvm_tgt_type *tmp, *tt = NULL;
538
539 if (lock)
540 down_write(&nvm_tgtt_lock);
541
542 list_for_each_entry(tmp, &nvm_tgt_types, list)
543 if (!strcmp(name, tmp->name)) {
544 tt = tmp;
545 break;
546 }
547
548 if (lock)
549 up_write(&nvm_tgtt_lock);
550 return tt;
551}
552EXPORT_SYMBOL(nvm_find_target_type);
553
554int nvm_register_tgt_type(struct nvm_tgt_type *tt)
555{
556 int ret = 0;
557
558 down_write(&nvm_tgtt_lock);
559 if (nvm_find_target_type(tt->name, 0))
560 ret = -EEXIST;
561 else
562 list_add(&tt->list, &nvm_tgt_types);
563 up_write(&nvm_tgtt_lock);
564
565 return ret;
566}
567EXPORT_SYMBOL(nvm_register_tgt_type);
568
569void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
570{
571 if (!tt)
572 return;
573
574 down_write(&nvm_lock);
575 list_del(&tt->list);
576 up_write(&nvm_lock);
577}
578EXPORT_SYMBOL(nvm_unregister_tgt_type);
579
580void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
581 dma_addr_t *dma_handler)
582{
583 return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
584 dma_handler);
585}
586EXPORT_SYMBOL(nvm_dev_dma_alloc);
587
588void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
589{
590 dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
591}
592EXPORT_SYMBOL(nvm_dev_dma_free);
593
594static struct nvm_dev *nvm_find_nvm_dev(const char *name)
595{
596 struct nvm_dev *dev;
597
598 list_for_each_entry(dev, &nvm_devices, devices)
599 if (!strcmp(name, dev->name))
600 return dev;
601
602 return NULL;
603}
604
605int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
606 int nr_ppas, int type)
607{
608 struct nvm_dev *dev = tgt_dev->parent;
609 struct nvm_rq rqd;
610 int ret;
611
612 if (nr_ppas > dev->ops->max_phys_sect) {
613 pr_err("nvm: unable to update all blocks atomically\n");
614 return -EINVAL;
615 }
616
617 memset(&rqd, 0, sizeof(struct nvm_rq));
618
619 nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas, 1);
620 nvm_rq_tgt_to_dev(tgt_dev, &rqd);
621
622 ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
623 nvm_free_rqd_ppalist(tgt_dev, &rqd);
624 if (ret) {
625 pr_err("nvm: failed bb mark\n");
626 return -EINVAL;
627 }
628
629 return 0;
630}
631EXPORT_SYMBOL(nvm_set_tgt_bb_tbl);
632
633int nvm_max_phys_sects(struct nvm_tgt_dev *tgt_dev)
634{
635 struct nvm_dev *dev = tgt_dev->parent;
636
637 return dev->ops->max_phys_sect;
638}
639EXPORT_SYMBOL(nvm_max_phys_sects);
640
641int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
642{
643 struct nvm_dev *dev = tgt_dev->parent;
644 int ret;
645
646 if (!dev->ops->submit_io)
647 return -ENODEV;
648
649 nvm_rq_tgt_to_dev(tgt_dev, rqd);
650
651 rqd->dev = tgt_dev;
652
653
654 ret = dev->ops->submit_io(dev, rqd);
655 if (ret)
656 nvm_rq_dev_to_tgt(tgt_dev, rqd);
657 return ret;
658}
659EXPORT_SYMBOL(nvm_submit_io);
660
661static void nvm_end_io_sync(struct nvm_rq *rqd)
662{
663 struct completion *waiting = rqd->private;
664
665 complete(waiting);
666}
667
668int nvm_erase_sync(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
669 int nr_ppas)
670{
671 struct nvm_geo *geo = &tgt_dev->geo;
672 struct nvm_rq rqd;
673 int ret;
674 DECLARE_COMPLETION_ONSTACK(wait);
675
676 memset(&rqd, 0, sizeof(struct nvm_rq));
677
678 rqd.opcode = NVM_OP_ERASE;
679 rqd.end_io = nvm_end_io_sync;
680 rqd.private = &wait;
681 rqd.flags = geo->plane_mode >> 1;
682
683 ret = nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas, 1);
684 if (ret)
685 return ret;
686
687 ret = nvm_submit_io(tgt_dev, &rqd);
688 if (ret) {
689 pr_err("rrpr: erase I/O submission failed: %d\n", ret);
690 goto free_ppa_list;
691 }
692 wait_for_completion_io(&wait);
693
694free_ppa_list:
695 nvm_free_rqd_ppalist(tgt_dev, &rqd);
696
697 return ret;
698}
699EXPORT_SYMBOL(nvm_erase_sync);
700
701int nvm_get_l2p_tbl(struct nvm_tgt_dev *tgt_dev, u64 slba, u32 nlb,
702 nvm_l2p_update_fn *update_l2p, void *priv)
703{
704 struct nvm_dev *dev = tgt_dev->parent;
705
706 if (!dev->ops->get_l2p_tbl)
707 return 0;
708
709 return dev->ops->get_l2p_tbl(dev, slba, nlb, update_l2p, priv);
710}
711EXPORT_SYMBOL(nvm_get_l2p_tbl);
712
713int nvm_get_area(struct nvm_tgt_dev *tgt_dev, sector_t *lba, sector_t len)
714{
715 struct nvm_dev *dev = tgt_dev->parent;
716 struct nvm_geo *geo = &dev->geo;
717 struct nvm_area *area, *prev, *next;
718 sector_t begin = 0;
719 sector_t max_sectors = (geo->sec_size * dev->total_secs) >> 9;
720
721 if (len > max_sectors)
722 return -EINVAL;
723
724 area = kmalloc(sizeof(struct nvm_area), GFP_KERNEL);
725 if (!area)
726 return -ENOMEM;
727
728 prev = NULL;
729
730 spin_lock(&dev->lock);
731 list_for_each_entry(next, &dev->area_list, list) {
732 if (begin + len > next->begin) {
733 begin = next->end;
734 prev = next;
735 continue;
736 }
737 break;
738 }
739
740 if ((begin + len) > max_sectors) {
741 spin_unlock(&dev->lock);
742 kfree(area);
743 return -EINVAL;
744 }
745
746 area->begin = *lba = begin;
747 area->end = begin + len;
748
749 if (prev)
750 list_add(&area->list, &prev->list);
751 else
752 list_add(&area->list, &dev->area_list);
753 spin_unlock(&dev->lock);
754
755 return 0;
756}
757EXPORT_SYMBOL(nvm_get_area);
758
759void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t begin)
760{
761 struct nvm_dev *dev = tgt_dev->parent;
762 struct nvm_area *area;
763
764 spin_lock(&dev->lock);
765 list_for_each_entry(area, &dev->area_list, list) {
766 if (area->begin != begin)
767 continue;
768
769 list_del(&area->list);
770 spin_unlock(&dev->lock);
771 kfree(area);
772 return;
773 }
774 spin_unlock(&dev->lock);
775}
776EXPORT_SYMBOL(nvm_put_area);
777
778int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
779 const struct ppa_addr *ppas, int nr_ppas, int vblk)
780{
781 struct nvm_dev *dev = tgt_dev->parent;
782 struct nvm_geo *geo = &tgt_dev->geo;
783 int i, plane_cnt, pl_idx;
784 struct ppa_addr ppa;
785
786 if ((!vblk || geo->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
787 rqd->nr_ppas = nr_ppas;
788 rqd->ppa_addr = ppas[0];
789
790 return 0;
791 }
792
793 rqd->nr_ppas = nr_ppas;
794 rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
795 if (!rqd->ppa_list) {
796 pr_err("nvm: failed to allocate dma memory\n");
797 return -ENOMEM;
798 }
799
800 if (!vblk) {
801 for (i = 0; i < nr_ppas; i++)
802 rqd->ppa_list[i] = ppas[i];
803 } else {
804 plane_cnt = geo->plane_mode;
805 rqd->nr_ppas *= plane_cnt;
806
807 for (i = 0; i < nr_ppas; i++) {
808 for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
809 ppa = ppas[i];
810 ppa.g.pl = pl_idx;
811 rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
812 }
813 }
814 }
815
816 return 0;
817}
818EXPORT_SYMBOL(nvm_set_rqd_ppalist);
819
820void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
821{
822 if (!rqd->ppa_list)
823 return;
824
825 nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
826}
827EXPORT_SYMBOL(nvm_free_rqd_ppalist);
828
829void nvm_end_io(struct nvm_rq *rqd)
830{
831 struct nvm_tgt_dev *tgt_dev = rqd->dev;
832
833
834 if (tgt_dev)
835 nvm_rq_dev_to_tgt(tgt_dev, rqd);
836
837 if (rqd->end_io)
838 rqd->end_io(rqd);
839}
840EXPORT_SYMBOL(nvm_end_io);
841
842
843
844
845
846
847
848
849
850int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
851{
852 struct nvm_geo *geo = &dev->geo;
853 int blk, offset, pl, blktype;
854
855 if (nr_blks != geo->blks_per_lun * geo->plane_mode)
856 return -EINVAL;
857
858 for (blk = 0; blk < geo->blks_per_lun; blk++) {
859 offset = blk * geo->plane_mode;
860 blktype = blks[offset];
861
862
863 for (pl = 0; pl < geo->plane_mode; pl++) {
864 if (blks[offset + pl] &
865 (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
866 blktype = blks[offset + pl];
867 break;
868 }
869 }
870
871 blks[blk] = blktype;
872 }
873
874 return geo->blks_per_lun;
875}
876EXPORT_SYMBOL(nvm_bb_tbl_fold);
877
878int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
879 u8 *blks)
880{
881 struct nvm_dev *dev = tgt_dev->parent;
882
883 nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
884
885 return dev->ops->get_bb_tbl(dev, ppa, blks);
886}
887EXPORT_SYMBOL(nvm_get_tgt_bb_tbl);
888
889static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
890{
891 struct nvm_geo *geo = &dev->geo;
892 int i;
893
894 dev->lps_per_blk = geo->pgs_per_blk;
895 dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
896 if (!dev->lptbl)
897 return -ENOMEM;
898
899
900 for (i = 0; i < dev->lps_per_blk; i++)
901 dev->lptbl[i] = i;
902
903 return 0;
904}
905
906static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
907{
908 int i, p;
909 struct nvm_id_lp_mlc *mlc = &grp->lptbl.mlc;
910
911 if (!mlc->num_pairs)
912 return 0;
913
914 dev->lps_per_blk = mlc->num_pairs;
915 dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
916 if (!dev->lptbl)
917 return -ENOMEM;
918
919
920
921
922
923
924 dev->lptbl[0] = mlc->pairs[0] & 0xF;
925 for (i = 1; i < dev->lps_per_blk; i++) {
926 p = mlc->pairs[i >> 1];
927 if (i & 0x1)
928 dev->lptbl[i] = dev->lptbl[i - 1] + ((p & 0xF0) >> 4);
929 else
930 dev->lptbl[i] = dev->lptbl[i - 1] + (p & 0xF);
931 }
932
933 return 0;
934}
935
936static int nvm_core_init(struct nvm_dev *dev)
937{
938 struct nvm_id *id = &dev->identity;
939 struct nvm_id_group *grp = &id->grp;
940 struct nvm_geo *geo = &dev->geo;
941 int ret;
942
943
944 geo->nr_chnls = grp->num_ch;
945 geo->luns_per_chnl = grp->num_lun;
946
947
948 geo->pgs_per_blk = grp->num_pg;
949 geo->blks_per_lun = grp->num_blk;
950 geo->nr_planes = grp->num_pln;
951 geo->fpg_size = grp->fpg_sz;
952 geo->pfpg_size = grp->fpg_sz * grp->num_pln;
953 geo->sec_size = grp->csecs;
954 geo->oob_size = grp->sos;
955 geo->sec_per_pg = grp->fpg_sz / grp->csecs;
956 geo->mccap = grp->mccap;
957 memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
958
959 geo->plane_mode = NVM_PLANE_SINGLE;
960 geo->max_rq_size = dev->ops->max_phys_sect * geo->sec_size;
961
962 if (grp->mpos & 0x020202)
963 geo->plane_mode = NVM_PLANE_DOUBLE;
964 if (grp->mpos & 0x040404)
965 geo->plane_mode = NVM_PLANE_QUAD;
966
967 if (grp->mtype != 0) {
968 pr_err("nvm: memory type not supported\n");
969 return -EINVAL;
970 }
971
972
973 geo->sec_per_pl = geo->sec_per_pg * geo->nr_planes;
974 geo->sec_per_blk = geo->sec_per_pl * geo->pgs_per_blk;
975 geo->sec_per_lun = geo->sec_per_blk * geo->blks_per_lun;
976 geo->nr_luns = geo->luns_per_chnl * geo->nr_chnls;
977
978 dev->total_secs = geo->nr_luns * geo->sec_per_lun;
979 dev->lun_map = kcalloc(BITS_TO_LONGS(geo->nr_luns),
980 sizeof(unsigned long), GFP_KERNEL);
981 if (!dev->lun_map)
982 return -ENOMEM;
983
984 switch (grp->fmtype) {
985 case NVM_ID_FMTYPE_SLC:
986 if (nvm_init_slc_tbl(dev, grp)) {
987 ret = -ENOMEM;
988 goto err_fmtype;
989 }
990 break;
991 case NVM_ID_FMTYPE_MLC:
992 if (nvm_init_mlc_tbl(dev, grp)) {
993 ret = -ENOMEM;
994 goto err_fmtype;
995 }
996 break;
997 default:
998 pr_err("nvm: flash type not supported\n");
999 ret = -EINVAL;
1000 goto err_fmtype;
1001 }
1002
1003 INIT_LIST_HEAD(&dev->area_list);
1004 INIT_LIST_HEAD(&dev->targets);
1005 mutex_init(&dev->mlock);
1006 spin_lock_init(&dev->lock);
1007
1008 ret = nvm_register_map(dev);
1009 if (ret)
1010 goto err_fmtype;
1011
1012 blk_queue_logical_block_size(dev->q, geo->sec_size);
1013 return 0;
1014err_fmtype:
1015 kfree(dev->lun_map);
1016 return ret;
1017}
1018
1019static void nvm_free(struct nvm_dev *dev)
1020{
1021 if (!dev)
1022 return;
1023
1024 if (dev->dma_pool)
1025 dev->ops->destroy_dma_pool(dev->dma_pool);
1026
1027 nvm_unregister_map(dev);
1028 kfree(dev->lptbl);
1029 kfree(dev->lun_map);
1030 kfree(dev);
1031}
1032
1033static int nvm_init(struct nvm_dev *dev)
1034{
1035 struct nvm_geo *geo = &dev->geo;
1036 int ret = -EINVAL;
1037
1038 if (dev->ops->identity(dev, &dev->identity)) {
1039 pr_err("nvm: device could not be identified\n");
1040 goto err;
1041 }
1042
1043 pr_debug("nvm: ver:%x nvm_vendor:%x\n",
1044 dev->identity.ver_id, dev->identity.vmnt);
1045
1046 if (dev->identity.ver_id != 1) {
1047 pr_err("nvm: device not supported by kernel.");
1048 goto err;
1049 }
1050
1051 ret = nvm_core_init(dev);
1052 if (ret) {
1053 pr_err("nvm: could not initialize core structures.\n");
1054 goto err;
1055 }
1056
1057 pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
1058 dev->name, geo->sec_per_pg, geo->nr_planes,
1059 geo->pgs_per_blk, geo->blks_per_lun,
1060 geo->nr_luns, geo->nr_chnls);
1061 return 0;
1062err:
1063 pr_err("nvm: failed to initialize nvm\n");
1064 return ret;
1065}
1066
1067struct nvm_dev *nvm_alloc_dev(int node)
1068{
1069 return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
1070}
1071EXPORT_SYMBOL(nvm_alloc_dev);
1072
1073int nvm_register(struct nvm_dev *dev)
1074{
1075 int ret;
1076
1077 if (!dev->q || !dev->ops)
1078 return -EINVAL;
1079
1080 if (dev->ops->max_phys_sect > 256) {
1081 pr_info("nvm: max sectors supported is 256.\n");
1082 return -EINVAL;
1083 }
1084
1085 if (dev->ops->max_phys_sect > 1) {
1086 dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
1087 if (!dev->dma_pool) {
1088 pr_err("nvm: could not create dma pool\n");
1089 return -ENOMEM;
1090 }
1091 }
1092
1093 ret = nvm_init(dev);
1094 if (ret)
1095 goto err_init;
1096
1097
1098 down_write(&nvm_lock);
1099 list_add(&dev->devices, &nvm_devices);
1100 up_write(&nvm_lock);
1101
1102 return 0;
1103err_init:
1104 dev->ops->destroy_dma_pool(dev->dma_pool);
1105 return ret;
1106}
1107EXPORT_SYMBOL(nvm_register);
1108
1109void nvm_unregister(struct nvm_dev *dev)
1110{
1111 struct nvm_target *t, *tmp;
1112
1113 mutex_lock(&dev->mlock);
1114 list_for_each_entry_safe(t, tmp, &dev->targets, list) {
1115 if (t->dev->parent != dev)
1116 continue;
1117 __nvm_remove_target(t);
1118 }
1119 mutex_unlock(&dev->mlock);
1120
1121 down_write(&nvm_lock);
1122 list_del(&dev->devices);
1123 up_write(&nvm_lock);
1124
1125 nvm_free(dev);
1126}
1127EXPORT_SYMBOL(nvm_unregister);
1128
1129static int __nvm_configure_create(struct nvm_ioctl_create *create)
1130{
1131 struct nvm_dev *dev;
1132 struct nvm_ioctl_create_simple *s;
1133
1134 down_write(&nvm_lock);
1135 dev = nvm_find_nvm_dev(create->dev);
1136 up_write(&nvm_lock);
1137
1138 if (!dev) {
1139 pr_err("nvm: device not found\n");
1140 return -EINVAL;
1141 }
1142
1143 if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
1144 pr_err("nvm: config type not valid\n");
1145 return -EINVAL;
1146 }
1147 s = &create->conf.s;
1148
1149 if (s->lun_begin == -1 && s->lun_end == -1) {
1150 s->lun_begin = 0;
1151 s->lun_end = dev->geo.nr_luns - 1;
1152 }
1153
1154 if (s->lun_begin > s->lun_end || s->lun_end >= dev->geo.nr_luns) {
1155 pr_err("nvm: lun out of bound (%u:%u > %u)\n",
1156 s->lun_begin, s->lun_end, dev->geo.nr_luns - 1);
1157 return -EINVAL;
1158 }
1159
1160 return nvm_create_tgt(dev, create);
1161}
1162
1163static long nvm_ioctl_info(struct file *file, void __user *arg)
1164{
1165 struct nvm_ioctl_info *info;
1166 struct nvm_tgt_type *tt;
1167 int tgt_iter = 0;
1168
1169 if (!capable(CAP_SYS_ADMIN))
1170 return -EPERM;
1171
1172 info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
1173 if (IS_ERR(info))
1174 return -EFAULT;
1175
1176 info->version[0] = NVM_VERSION_MAJOR;
1177 info->version[1] = NVM_VERSION_MINOR;
1178 info->version[2] = NVM_VERSION_PATCH;
1179
1180 down_write(&nvm_lock);
1181 list_for_each_entry(tt, &nvm_tgt_types, list) {
1182 struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
1183
1184 tgt->version[0] = tt->version[0];
1185 tgt->version[1] = tt->version[1];
1186 tgt->version[2] = tt->version[2];
1187 strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
1188
1189 tgt_iter++;
1190 }
1191
1192 info->tgtsize = tgt_iter;
1193 up_write(&nvm_lock);
1194
1195 if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
1196 kfree(info);
1197 return -EFAULT;
1198 }
1199
1200 kfree(info);
1201 return 0;
1202}
1203
1204static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
1205{
1206 struct nvm_ioctl_get_devices *devices;
1207 struct nvm_dev *dev;
1208 int i = 0;
1209
1210 if (!capable(CAP_SYS_ADMIN))
1211 return -EPERM;
1212
1213 devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
1214 if (!devices)
1215 return -ENOMEM;
1216
1217 down_write(&nvm_lock);
1218 list_for_each_entry(dev, &nvm_devices, devices) {
1219 struct nvm_ioctl_device_info *info = &devices->info[i];
1220
1221 strlcpy(info->devname, dev->name, sizeof(info->devname));
1222
1223
1224 info->bmversion[0] = 1;
1225 info->bmversion[1] = 0;
1226 info->bmversion[2] = 0;
1227 strlcpy(info->bmname, "gennvm", sizeof(info->bmname));
1228 i++;
1229
1230 if (i > 31) {
1231 pr_err("nvm: max 31 devices can be reported.\n");
1232 break;
1233 }
1234 }
1235 up_write(&nvm_lock);
1236
1237 devices->nr_devices = i;
1238
1239 if (copy_to_user(arg, devices,
1240 sizeof(struct nvm_ioctl_get_devices))) {
1241 kfree(devices);
1242 return -EFAULT;
1243 }
1244
1245 kfree(devices);
1246 return 0;
1247}
1248
1249static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
1250{
1251 struct nvm_ioctl_create create;
1252
1253 if (!capable(CAP_SYS_ADMIN))
1254 return -EPERM;
1255
1256 if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
1257 return -EFAULT;
1258
1259 create.dev[DISK_NAME_LEN - 1] = '\0';
1260 create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
1261 create.tgtname[DISK_NAME_LEN - 1] = '\0';
1262
1263 if (create.flags != 0) {
1264 __u32 flags = create.flags;
1265
1266
1267 if (flags & NVM_TARGET_FACTORY)
1268 flags &= ~NVM_TARGET_FACTORY;
1269
1270 if (flags) {
1271 pr_err("nvm: flag not supported\n");
1272 return -EINVAL;
1273 }
1274 }
1275
1276 return __nvm_configure_create(&create);
1277}
1278
1279static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
1280{
1281 struct nvm_ioctl_remove remove;
1282 struct nvm_dev *dev;
1283 int ret = 0;
1284
1285 if (!capable(CAP_SYS_ADMIN))
1286 return -EPERM;
1287
1288 if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
1289 return -EFAULT;
1290
1291 remove.tgtname[DISK_NAME_LEN - 1] = '\0';
1292
1293 if (remove.flags != 0) {
1294 pr_err("nvm: no flags supported\n");
1295 return -EINVAL;
1296 }
1297
1298 list_for_each_entry(dev, &nvm_devices, devices) {
1299 ret = nvm_remove_tgt(dev, &remove);
1300 if (!ret)
1301 break;
1302 }
1303
1304 return ret;
1305}
1306
1307
1308static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
1309{
1310 struct nvm_ioctl_dev_init init;
1311
1312 if (!capable(CAP_SYS_ADMIN))
1313 return -EPERM;
1314
1315 if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
1316 return -EFAULT;
1317
1318 if (init.flags != 0) {
1319 pr_err("nvm: no flags supported\n");
1320 return -EINVAL;
1321 }
1322
1323 return 0;
1324}
1325
1326
1327static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1328{
1329 struct nvm_ioctl_dev_factory fact;
1330
1331 if (!capable(CAP_SYS_ADMIN))
1332 return -EPERM;
1333
1334 if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
1335 return -EFAULT;
1336
1337 fact.dev[DISK_NAME_LEN - 1] = '\0';
1338
1339 if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
1340 return -EINVAL;
1341
1342 return 0;
1343}
1344
1345static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
1346{
1347 void __user *argp = (void __user *)arg;
1348
1349 switch (cmd) {
1350 case NVM_INFO:
1351 return nvm_ioctl_info(file, argp);
1352 case NVM_GET_DEVICES:
1353 return nvm_ioctl_get_devices(file, argp);
1354 case NVM_DEV_CREATE:
1355 return nvm_ioctl_dev_create(file, argp);
1356 case NVM_DEV_REMOVE:
1357 return nvm_ioctl_dev_remove(file, argp);
1358 case NVM_DEV_INIT:
1359 return nvm_ioctl_dev_init(file, argp);
1360 case NVM_DEV_FACTORY:
1361 return nvm_ioctl_dev_factory(file, argp);
1362 }
1363 return 0;
1364}
1365
1366static const struct file_operations _ctl_fops = {
1367 .open = nonseekable_open,
1368 .unlocked_ioctl = nvm_ctl_ioctl,
1369 .owner = THIS_MODULE,
1370 .llseek = noop_llseek,
1371};
1372
1373static struct miscdevice _nvm_misc = {
1374 .minor = MISC_DYNAMIC_MINOR,
1375 .name = "lightnvm",
1376 .nodename = "lightnvm/control",
1377 .fops = &_ctl_fops,
1378};
1379builtin_misc_device(_nvm_misc);
1380