1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include "pblk.h"
22
23static struct kmem_cache *pblk_ws_cache, *pblk_rec_cache, *pblk_g_rq_cache,
24 *pblk_w_rq_cache;
25static DECLARE_RWSEM(pblk_lock);
26struct bio_set *pblk_bio_set;
27
28static int pblk_rw_io(struct request_queue *q, struct pblk *pblk,
29 struct bio *bio)
30{
31 int ret;
32
33
34
35
36 if (bio_data_dir(bio) == READ) {
37 blk_queue_split(q, &bio);
38 ret = pblk_submit_read(pblk, bio);
39 if (ret == NVM_IO_DONE && bio_flagged(bio, BIO_CLONED))
40 bio_put(bio);
41
42 return ret;
43 }
44
45
46
47
48
49 if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl))
50 blk_queue_split(q, &bio);
51
52 return pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
53}
54
55static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
56{
57 struct pblk *pblk = q->queuedata;
58
59 if (bio_op(bio) == REQ_OP_DISCARD) {
60 pblk_discard(pblk, bio);
61 if (!(bio->bi_opf & REQ_PREFLUSH)) {
62 bio_endio(bio);
63 return BLK_QC_T_NONE;
64 }
65 }
66
67 switch (pblk_rw_io(q, pblk, bio)) {
68 case NVM_IO_ERR:
69 bio_io_error(bio);
70 break;
71 case NVM_IO_DONE:
72 bio_endio(bio);
73 break;
74 }
75
76 return BLK_QC_T_NONE;
77}
78
79static size_t pblk_trans_map_size(struct pblk *pblk)
80{
81 int entry_size = 8;
82
83 if (pblk->ppaf_bitsize < 32)
84 entry_size = 4;
85
86 return entry_size * pblk->rl.nr_secs;
87}
88
89#ifdef CONFIG_NVM_DEBUG
90static u32 pblk_l2p_crc(struct pblk *pblk)
91{
92 size_t map_size;
93 u32 crc = ~(u32)0;
94
95 map_size = pblk_trans_map_size(pblk);
96 crc = crc32_le(crc, pblk->trans_map, map_size);
97 return crc;
98}
99#endif
100
101static void pblk_l2p_free(struct pblk *pblk)
102{
103 vfree(pblk->trans_map);
104}
105
106static int pblk_l2p_init(struct pblk *pblk)
107{
108 sector_t i;
109 struct ppa_addr ppa;
110 size_t map_size;
111
112 map_size = pblk_trans_map_size(pblk);
113 pblk->trans_map = vmalloc(map_size);
114 if (!pblk->trans_map)
115 return -ENOMEM;
116
117 pblk_ppa_set_empty(&ppa);
118
119 for (i = 0; i < pblk->rl.nr_secs; i++)
120 pblk_trans_map_set(pblk, i, ppa);
121
122 return 0;
123}
124
125static void pblk_rwb_free(struct pblk *pblk)
126{
127 if (pblk_rb_tear_down_check(&pblk->rwb))
128 pr_err("pblk: write buffer error on tear down\n");
129
130 pblk_rb_data_free(&pblk->rwb);
131 vfree(pblk_rb_entries_ref(&pblk->rwb));
132}
133
134static int pblk_rwb_init(struct pblk *pblk)
135{
136 struct nvm_tgt_dev *dev = pblk->dev;
137 struct nvm_geo *geo = &dev->geo;
138 struct pblk_rb_entry *entries;
139 unsigned long nr_entries;
140 unsigned int power_size, power_seg_sz;
141
142 nr_entries = pblk_rb_calculate_size(pblk->pgs_in_buffer);
143
144 entries = vzalloc(nr_entries * sizeof(struct pblk_rb_entry));
145 if (!entries)
146 return -ENOMEM;
147
148 power_size = get_count_order(nr_entries);
149 power_seg_sz = get_count_order(geo->sec_size);
150
151 return pblk_rb_init(&pblk->rwb, entries, power_size, power_seg_sz);
152}
153
154
155#define ADDR_POOL_SIZE 64
156
157static int pblk_set_ppaf(struct pblk *pblk)
158{
159 struct nvm_tgt_dev *dev = pblk->dev;
160 struct nvm_geo *geo = &dev->geo;
161 struct nvm_addr_format ppaf = geo->ppaf;
162 int power_len;
163
164
165 power_len = get_count_order(geo->nr_chnls);
166 if (1 << power_len != geo->nr_chnls) {
167 pr_err("pblk: supports only power-of-two channel config.\n");
168 return -EINVAL;
169 }
170 ppaf.ch_len = power_len;
171
172 power_len = get_count_order(geo->luns_per_chnl);
173 if (1 << power_len != geo->luns_per_chnl) {
174 pr_err("pblk: supports only power-of-two LUN config.\n");
175 return -EINVAL;
176 }
177 ppaf.lun_len = power_len;
178
179 pblk->ppaf.sec_offset = 0;
180 pblk->ppaf.pln_offset = ppaf.sect_len;
181 pblk->ppaf.ch_offset = pblk->ppaf.pln_offset + ppaf.pln_len;
182 pblk->ppaf.lun_offset = pblk->ppaf.ch_offset + ppaf.ch_len;
183 pblk->ppaf.pg_offset = pblk->ppaf.lun_offset + ppaf.lun_len;
184 pblk->ppaf.blk_offset = pblk->ppaf.pg_offset + ppaf.pg_len;
185 pblk->ppaf.sec_mask = (1ULL << ppaf.sect_len) - 1;
186 pblk->ppaf.pln_mask = ((1ULL << ppaf.pln_len) - 1) <<
187 pblk->ppaf.pln_offset;
188 pblk->ppaf.ch_mask = ((1ULL << ppaf.ch_len) - 1) <<
189 pblk->ppaf.ch_offset;
190 pblk->ppaf.lun_mask = ((1ULL << ppaf.lun_len) - 1) <<
191 pblk->ppaf.lun_offset;
192 pblk->ppaf.pg_mask = ((1ULL << ppaf.pg_len) - 1) <<
193 pblk->ppaf.pg_offset;
194 pblk->ppaf.blk_mask = ((1ULL << ppaf.blk_len) - 1) <<
195 pblk->ppaf.blk_offset;
196
197 pblk->ppaf_bitsize = pblk->ppaf.blk_offset + ppaf.blk_len;
198
199 return 0;
200}
201
202static int pblk_init_global_caches(struct pblk *pblk)
203{
204 down_write(&pblk_lock);
205 pblk_ws_cache = kmem_cache_create("pblk_blk_ws",
206 sizeof(struct pblk_line_ws), 0, 0, NULL);
207 if (!pblk_ws_cache) {
208 up_write(&pblk_lock);
209 return -ENOMEM;
210 }
211
212 pblk_rec_cache = kmem_cache_create("pblk_rec",
213 sizeof(struct pblk_rec_ctx), 0, 0, NULL);
214 if (!pblk_rec_cache) {
215 kmem_cache_destroy(pblk_ws_cache);
216 up_write(&pblk_lock);
217 return -ENOMEM;
218 }
219
220 pblk_g_rq_cache = kmem_cache_create("pblk_g_rq", pblk_g_rq_size,
221 0, 0, NULL);
222 if (!pblk_g_rq_cache) {
223 kmem_cache_destroy(pblk_ws_cache);
224 kmem_cache_destroy(pblk_rec_cache);
225 up_write(&pblk_lock);
226 return -ENOMEM;
227 }
228
229 pblk_w_rq_cache = kmem_cache_create("pblk_w_rq", pblk_w_rq_size,
230 0, 0, NULL);
231 if (!pblk_w_rq_cache) {
232 kmem_cache_destroy(pblk_ws_cache);
233 kmem_cache_destroy(pblk_rec_cache);
234 kmem_cache_destroy(pblk_g_rq_cache);
235 up_write(&pblk_lock);
236 return -ENOMEM;
237 }
238 up_write(&pblk_lock);
239
240 return 0;
241}
242
243static void pblk_free_global_caches(struct pblk *pblk)
244{
245 kmem_cache_destroy(pblk_ws_cache);
246 kmem_cache_destroy(pblk_rec_cache);
247 kmem_cache_destroy(pblk_g_rq_cache);
248 kmem_cache_destroy(pblk_w_rq_cache);
249}
250
251static int pblk_core_init(struct pblk *pblk)
252{
253 struct nvm_tgt_dev *dev = pblk->dev;
254 struct nvm_geo *geo = &dev->geo;
255
256 pblk->pgs_in_buffer = NVM_MEM_PAGE_WRITE * geo->sec_per_pg *
257 geo->nr_planes * geo->nr_luns;
258
259 if (pblk_init_global_caches(pblk))
260 return -ENOMEM;
261
262
263 pblk->page_bio_pool = mempool_create_page_pool(nvm_max_phys_sects(dev),
264 0);
265 if (!pblk->page_bio_pool)
266 goto free_global_caches;
267
268 pblk->gen_ws_pool = mempool_create_slab_pool(PBLK_GEN_WS_POOL_SIZE,
269 pblk_ws_cache);
270 if (!pblk->gen_ws_pool)
271 goto free_page_bio_pool;
272
273 pblk->rec_pool = mempool_create_slab_pool(geo->nr_luns, pblk_rec_cache);
274 if (!pblk->rec_pool)
275 goto free_gen_ws_pool;
276
277 pblk->r_rq_pool = mempool_create_slab_pool(geo->nr_luns,
278 pblk_g_rq_cache);
279 if (!pblk->r_rq_pool)
280 goto free_rec_pool;
281
282 pblk->e_rq_pool = mempool_create_slab_pool(geo->nr_luns,
283 pblk_g_rq_cache);
284 if (!pblk->e_rq_pool)
285 goto free_r_rq_pool;
286
287 pblk->w_rq_pool = mempool_create_slab_pool(geo->nr_luns,
288 pblk_w_rq_cache);
289 if (!pblk->w_rq_pool)
290 goto free_e_rq_pool;
291
292 pblk->close_wq = alloc_workqueue("pblk-close-wq",
293 WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_NR_CLOSE_JOBS);
294 if (!pblk->close_wq)
295 goto free_w_rq_pool;
296
297 pblk->bb_wq = alloc_workqueue("pblk-bb-wq",
298 WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
299 if (!pblk->bb_wq)
300 goto free_close_wq;
301
302 pblk->r_end_wq = alloc_workqueue("pblk-read-end-wq",
303 WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
304 if (!pblk->r_end_wq)
305 goto free_bb_wq;
306
307 if (pblk_set_ppaf(pblk))
308 goto free_r_end_wq;
309
310 if (pblk_rwb_init(pblk))
311 goto free_r_end_wq;
312
313 INIT_LIST_HEAD(&pblk->compl_list);
314 return 0;
315
316free_r_end_wq:
317 destroy_workqueue(pblk->r_end_wq);
318free_bb_wq:
319 destroy_workqueue(pblk->bb_wq);
320free_close_wq:
321 destroy_workqueue(pblk->close_wq);
322free_w_rq_pool:
323 mempool_destroy(pblk->w_rq_pool);
324free_e_rq_pool:
325 mempool_destroy(pblk->e_rq_pool);
326free_r_rq_pool:
327 mempool_destroy(pblk->r_rq_pool);
328free_rec_pool:
329 mempool_destroy(pblk->rec_pool);
330free_gen_ws_pool:
331 mempool_destroy(pblk->gen_ws_pool);
332free_page_bio_pool:
333 mempool_destroy(pblk->page_bio_pool);
334free_global_caches:
335 pblk_free_global_caches(pblk);
336 return -ENOMEM;
337}
338
339static void pblk_core_free(struct pblk *pblk)
340{
341 if (pblk->close_wq)
342 destroy_workqueue(pblk->close_wq);
343
344 if (pblk->r_end_wq)
345 destroy_workqueue(pblk->r_end_wq);
346
347 if (pblk->bb_wq)
348 destroy_workqueue(pblk->bb_wq);
349
350 mempool_destroy(pblk->page_bio_pool);
351 mempool_destroy(pblk->gen_ws_pool);
352 mempool_destroy(pblk->rec_pool);
353 mempool_destroy(pblk->r_rq_pool);
354 mempool_destroy(pblk->e_rq_pool);
355 mempool_destroy(pblk->w_rq_pool);
356
357 pblk_free_global_caches(pblk);
358}
359
360static void pblk_luns_free(struct pblk *pblk)
361{
362 kfree(pblk->luns);
363}
364
365static void pblk_free_line_bitmaps(struct pblk_line *line)
366{
367 kfree(line->blk_bitmap);
368 kfree(line->erase_bitmap);
369}
370
371static void pblk_lines_free(struct pblk *pblk)
372{
373 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
374 struct pblk_line *line;
375 int i;
376
377 spin_lock(&l_mg->free_lock);
378 for (i = 0; i < l_mg->nr_lines; i++) {
379 line = &pblk->lines[i];
380
381 pblk_line_free(pblk, line);
382 pblk_free_line_bitmaps(line);
383 }
384 spin_unlock(&l_mg->free_lock);
385}
386
387static void pblk_line_meta_free(struct pblk *pblk)
388{
389 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
390 int i;
391
392 kfree(l_mg->bb_template);
393 kfree(l_mg->bb_aux);
394 kfree(l_mg->vsc_list);
395
396 for (i = 0; i < PBLK_DATA_LINES; i++) {
397 kfree(l_mg->sline_meta[i]);
398 pblk_mfree(l_mg->eline_meta[i]->buf, l_mg->emeta_alloc_type);
399 kfree(l_mg->eline_meta[i]);
400 }
401
402 kfree(pblk->lines);
403}
404
405static int pblk_bb_discovery(struct nvm_tgt_dev *dev, struct pblk_lun *rlun)
406{
407 struct nvm_geo *geo = &dev->geo;
408 struct ppa_addr ppa;
409 u8 *blks;
410 int nr_blks, ret;
411
412 nr_blks = geo->blks_per_lun * geo->plane_mode;
413 blks = kmalloc(nr_blks, GFP_KERNEL);
414 if (!blks)
415 return -ENOMEM;
416
417 ppa.ppa = 0;
418 ppa.g.ch = rlun->bppa.g.ch;
419 ppa.g.lun = rlun->bppa.g.lun;
420
421 ret = nvm_get_tgt_bb_tbl(dev, ppa, blks);
422 if (ret)
423 goto out;
424
425 nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks);
426 if (nr_blks < 0) {
427 ret = nr_blks;
428 goto out;
429 }
430
431 rlun->bb_list = blks;
432
433 return 0;
434out:
435 kfree(blks);
436 return ret;
437}
438
439static int pblk_bb_line(struct pblk *pblk, struct pblk_line *line,
440 int blk_per_line)
441{
442 struct nvm_tgt_dev *dev = pblk->dev;
443 struct nvm_geo *geo = &dev->geo;
444 struct pblk_lun *rlun;
445 int bb_cnt = 0;
446 int i;
447
448 for (i = 0; i < blk_per_line; i++) {
449 rlun = &pblk->luns[i];
450 if (rlun->bb_list[line->id] == NVM_BLK_T_FREE)
451 continue;
452
453 set_bit(pblk_ppa_to_pos(geo, rlun->bppa), line->blk_bitmap);
454 bb_cnt++;
455 }
456
457 return bb_cnt;
458}
459
460static int pblk_alloc_line_bitmaps(struct pblk *pblk, struct pblk_line *line)
461{
462 struct pblk_line_meta *lm = &pblk->lm;
463
464 line->blk_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
465 if (!line->blk_bitmap)
466 return -ENOMEM;
467
468 line->erase_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
469 if (!line->erase_bitmap) {
470 kfree(line->blk_bitmap);
471 return -ENOMEM;
472 }
473
474 return 0;
475}
476
477static int pblk_luns_init(struct pblk *pblk, struct ppa_addr *luns)
478{
479 struct nvm_tgt_dev *dev = pblk->dev;
480 struct nvm_geo *geo = &dev->geo;
481 struct pblk_lun *rlun;
482 int i, ret;
483
484
485 if (geo->luns_per_chnl < 0) {
486 pr_err("pblk: unbalanced LUN config.\n");
487 return -EINVAL;
488 }
489
490 pblk->luns = kcalloc(geo->nr_luns, sizeof(struct pblk_lun), GFP_KERNEL);
491 if (!pblk->luns)
492 return -ENOMEM;
493
494 for (i = 0; i < geo->nr_luns; i++) {
495
496 int ch = i % geo->nr_chnls;
497 int lun_raw = i / geo->nr_chnls;
498 int lunid = lun_raw + ch * geo->luns_per_chnl;
499
500 rlun = &pblk->luns[i];
501 rlun->bppa = luns[lunid];
502
503 sema_init(&rlun->wr_sem, 1);
504
505 ret = pblk_bb_discovery(dev, rlun);
506 if (ret) {
507 while (--i >= 0)
508 kfree(pblk->luns[i].bb_list);
509 return ret;
510 }
511 }
512
513 return 0;
514}
515
516static int pblk_lines_configure(struct pblk *pblk, int flags)
517{
518 struct pblk_line *line = NULL;
519 int ret = 0;
520
521 if (!(flags & NVM_TARGET_FACTORY)) {
522 line = pblk_recov_l2p(pblk);
523 if (IS_ERR(line)) {
524 pr_err("pblk: could not recover l2p table\n");
525 ret = -EFAULT;
526 }
527 }
528
529#ifdef CONFIG_NVM_DEBUG
530 pr_info("pblk init: L2P CRC: %x\n", pblk_l2p_crc(pblk));
531#endif
532
533
534 pblk_gc_free_full_lines(pblk);
535
536 if (!line) {
537
538 line = pblk_line_get_first_data(pblk);
539 if (!line) {
540 pr_err("pblk: line list corrupted\n");
541 ret = -EFAULT;
542 }
543 }
544
545 return ret;
546}
547
548
549static unsigned int calc_emeta_len(struct pblk *pblk)
550{
551 struct pblk_line_meta *lm = &pblk->lm;
552 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
553 struct nvm_tgt_dev *dev = pblk->dev;
554 struct nvm_geo *geo = &dev->geo;
555
556
557 lm->emeta_sec[1] = DIV_ROUND_UP(
558 sizeof(struct line_emeta) + lm->blk_bitmap_len,
559 geo->sec_size);
560 lm->emeta_len[1] = lm->emeta_sec[1] * geo->sec_size;
561
562
563 lm->dsec_per_line = lm->sec_per_line - lm->emeta_sec[0];
564 lm->emeta_sec[2] = DIV_ROUND_UP(lm->dsec_per_line * sizeof(u64),
565 geo->sec_size);
566 lm->emeta_len[2] = lm->emeta_sec[2] * geo->sec_size;
567
568 lm->emeta_sec[3] = DIV_ROUND_UP(l_mg->nr_lines * sizeof(u32),
569 geo->sec_size);
570 lm->emeta_len[3] = lm->emeta_sec[3] * geo->sec_size;
571
572 lm->vsc_list_len = l_mg->nr_lines * sizeof(u32);
573
574 return (lm->emeta_len[1] + lm->emeta_len[2] + lm->emeta_len[3]);
575}
576
577static void pblk_set_provision(struct pblk *pblk, long nr_free_blks)
578{
579 struct nvm_tgt_dev *dev = pblk->dev;
580 struct nvm_geo *geo = &dev->geo;
581 sector_t provisioned;
582
583 pblk->over_pct = 20;
584
585 provisioned = nr_free_blks;
586 provisioned *= (100 - pblk->over_pct);
587 sector_div(provisioned, 100);
588
589
590
591
592 pblk->rl.total_blocks = nr_free_blks;
593 pblk->rl.nr_secs = nr_free_blks * geo->sec_per_blk;
594 pblk->capacity = provisioned * geo->sec_per_blk;
595 atomic_set(&pblk->rl.free_blocks, nr_free_blks);
596}
597
598static int pblk_lines_alloc_metadata(struct pblk *pblk)
599{
600 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
601 struct pblk_line_meta *lm = &pblk->lm;
602 int i;
603
604
605
606
607 for (i = 0; i < PBLK_DATA_LINES; i++) {
608 l_mg->sline_meta[i] = kmalloc(lm->smeta_len, GFP_KERNEL);
609 if (!l_mg->sline_meta[i])
610 goto fail_free_smeta;
611 }
612
613
614
615
616 for (i = 0; i < PBLK_DATA_LINES; i++) {
617 struct pblk_emeta *emeta;
618
619 emeta = kmalloc(sizeof(struct pblk_emeta), GFP_KERNEL);
620 if (!emeta)
621 goto fail_free_emeta;
622
623 if (lm->emeta_len[0] > KMALLOC_MAX_CACHE_SIZE) {
624 l_mg->emeta_alloc_type = PBLK_VMALLOC_META;
625
626 emeta->buf = vmalloc(lm->emeta_len[0]);
627 if (!emeta->buf) {
628 kfree(emeta);
629 goto fail_free_emeta;
630 }
631
632 emeta->nr_entries = lm->emeta_sec[0];
633 l_mg->eline_meta[i] = emeta;
634 } else {
635 l_mg->emeta_alloc_type = PBLK_KMALLOC_META;
636
637 emeta->buf = kmalloc(lm->emeta_len[0], GFP_KERNEL);
638 if (!emeta->buf) {
639 kfree(emeta);
640 goto fail_free_emeta;
641 }
642
643 emeta->nr_entries = lm->emeta_sec[0];
644 l_mg->eline_meta[i] = emeta;
645 }
646 }
647
648 l_mg->vsc_list = kcalloc(l_mg->nr_lines, sizeof(__le32), GFP_KERNEL);
649 if (!l_mg->vsc_list)
650 goto fail_free_emeta;
651
652 for (i = 0; i < l_mg->nr_lines; i++)
653 l_mg->vsc_list[i] = cpu_to_le32(EMPTY_ENTRY);
654
655 return 0;
656
657fail_free_emeta:
658 while (--i >= 0) {
659 if (l_mg->emeta_alloc_type == PBLK_VMALLOC_META)
660 vfree(l_mg->eline_meta[i]->buf);
661 else
662 kfree(l_mg->eline_meta[i]->buf);
663 kfree(l_mg->eline_meta[i]);
664 }
665
666fail_free_smeta:
667 for (i = 0; i < PBLK_DATA_LINES; i++)
668 kfree(l_mg->sline_meta[i]);
669
670 return -ENOMEM;
671}
672
673static int pblk_lines_init(struct pblk *pblk)
674{
675 struct nvm_tgt_dev *dev = pblk->dev;
676 struct nvm_geo *geo = &dev->geo;
677 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
678 struct pblk_line_meta *lm = &pblk->lm;
679 struct pblk_line *line;
680 unsigned int smeta_len, emeta_len;
681 long nr_bad_blks, nr_free_blks;
682 int bb_distance, max_write_ppas, mod;
683 int i, ret;
684
685 pblk->min_write_pgs = geo->sec_per_pl * (geo->sec_size / PAGE_SIZE);
686 max_write_ppas = pblk->min_write_pgs * geo->nr_luns;
687 pblk->max_write_pgs = (max_write_ppas < nvm_max_phys_sects(dev)) ?
688 max_write_ppas : nvm_max_phys_sects(dev);
689 pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
690
691 if (pblk->max_write_pgs > PBLK_MAX_REQ_ADDRS) {
692 pr_err("pblk: cannot support device max_phys_sect\n");
693 return -EINVAL;
694 }
695
696 div_u64_rem(geo->sec_per_blk, pblk->min_write_pgs, &mod);
697 if (mod) {
698 pr_err("pblk: bad configuration of sectors/pages\n");
699 return -EINVAL;
700 }
701
702 l_mg->nr_lines = geo->blks_per_lun;
703 l_mg->log_line = l_mg->data_line = NULL;
704 l_mg->l_seq_nr = l_mg->d_seq_nr = 0;
705 l_mg->nr_free_lines = 0;
706 bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES);
707
708 lm->sec_per_line = geo->sec_per_blk * geo->nr_luns;
709 lm->blk_per_line = geo->nr_luns;
710 lm->blk_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
711 lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long);
712 lm->lun_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
713 lm->mid_thrs = lm->sec_per_line / 2;
714 lm->high_thrs = lm->sec_per_line / 4;
715 lm->meta_distance = (geo->nr_luns / 2) * pblk->min_write_pgs;
716
717
718
719
720 i = 1;
721add_smeta_page:
722 lm->smeta_sec = i * geo->sec_per_pl;
723 lm->smeta_len = lm->smeta_sec * geo->sec_size;
724
725 smeta_len = sizeof(struct line_smeta) + lm->lun_bitmap_len;
726 if (smeta_len > lm->smeta_len) {
727 i++;
728 goto add_smeta_page;
729 }
730
731
732
733
734 i = 1;
735add_emeta_page:
736 lm->emeta_sec[0] = i * geo->sec_per_pl;
737 lm->emeta_len[0] = lm->emeta_sec[0] * geo->sec_size;
738
739 emeta_len = calc_emeta_len(pblk);
740 if (emeta_len > lm->emeta_len[0]) {
741 i++;
742 goto add_emeta_page;
743 }
744
745 lm->emeta_bb = geo->nr_luns > i ? geo->nr_luns - i : 0;
746
747 lm->min_blk_line = 1;
748 if (geo->nr_luns > 1)
749 lm->min_blk_line += DIV_ROUND_UP(lm->smeta_sec +
750 lm->emeta_sec[0], geo->sec_per_blk);
751
752 if (lm->min_blk_line > lm->blk_per_line) {
753 pr_err("pblk: config. not supported. Min. LUN in line:%d\n",
754 lm->blk_per_line);
755 ret = -EINVAL;
756 goto fail;
757 }
758
759 ret = pblk_lines_alloc_metadata(pblk);
760 if (ret)
761 goto fail;
762
763 l_mg->bb_template = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
764 if (!l_mg->bb_template) {
765 ret = -ENOMEM;
766 goto fail_free_meta;
767 }
768
769 l_mg->bb_aux = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
770 if (!l_mg->bb_aux) {
771 ret = -ENOMEM;
772 goto fail_free_bb_template;
773 }
774
775 bb_distance = (geo->nr_luns) * geo->sec_per_pl;
776 for (i = 0; i < lm->sec_per_line; i += bb_distance)
777 bitmap_set(l_mg->bb_template, i, geo->sec_per_pl);
778
779 INIT_LIST_HEAD(&l_mg->free_list);
780 INIT_LIST_HEAD(&l_mg->corrupt_list);
781 INIT_LIST_HEAD(&l_mg->bad_list);
782 INIT_LIST_HEAD(&l_mg->gc_full_list);
783 INIT_LIST_HEAD(&l_mg->gc_high_list);
784 INIT_LIST_HEAD(&l_mg->gc_mid_list);
785 INIT_LIST_HEAD(&l_mg->gc_low_list);
786 INIT_LIST_HEAD(&l_mg->gc_empty_list);
787
788 INIT_LIST_HEAD(&l_mg->emeta_list);
789
790 l_mg->gc_lists[0] = &l_mg->gc_high_list;
791 l_mg->gc_lists[1] = &l_mg->gc_mid_list;
792 l_mg->gc_lists[2] = &l_mg->gc_low_list;
793
794 spin_lock_init(&l_mg->free_lock);
795 spin_lock_init(&l_mg->close_lock);
796 spin_lock_init(&l_mg->gc_lock);
797
798 pblk->lines = kcalloc(l_mg->nr_lines, sizeof(struct pblk_line),
799 GFP_KERNEL);
800 if (!pblk->lines) {
801 ret = -ENOMEM;
802 goto fail_free_bb_aux;
803 }
804
805 nr_free_blks = 0;
806 for (i = 0; i < l_mg->nr_lines; i++) {
807 int blk_in_line;
808
809 line = &pblk->lines[i];
810
811 line->pblk = pblk;
812 line->id = i;
813 line->type = PBLK_LINETYPE_FREE;
814 line->state = PBLK_LINESTATE_FREE;
815 line->gc_group = PBLK_LINEGC_NONE;
816 line->vsc = &l_mg->vsc_list[i];
817 spin_lock_init(&line->lock);
818
819 ret = pblk_alloc_line_bitmaps(pblk, line);
820 if (ret)
821 goto fail_free_lines;
822
823 nr_bad_blks = pblk_bb_line(pblk, line, lm->blk_per_line);
824 if (nr_bad_blks < 0 || nr_bad_blks > lm->blk_per_line) {
825 pblk_free_line_bitmaps(line);
826 ret = -EINVAL;
827 goto fail_free_lines;
828 }
829
830 blk_in_line = lm->blk_per_line - nr_bad_blks;
831 if (blk_in_line < lm->min_blk_line) {
832 line->state = PBLK_LINESTATE_BAD;
833 list_add_tail(&line->list, &l_mg->bad_list);
834 continue;
835 }
836
837 nr_free_blks += blk_in_line;
838 atomic_set(&line->blk_in_line, blk_in_line);
839
840 l_mg->nr_free_lines++;
841 list_add_tail(&line->list, &l_mg->free_list);
842 }
843
844 pblk_set_provision(pblk, nr_free_blks);
845
846
847 for (i = 0; i < geo->nr_luns; i++)
848 kfree(pblk->luns[i].bb_list);
849
850 return 0;
851fail_free_lines:
852 while (--i >= 0)
853 pblk_free_line_bitmaps(&pblk->lines[i]);
854fail_free_bb_aux:
855 kfree(l_mg->bb_aux);
856fail_free_bb_template:
857 kfree(l_mg->bb_template);
858fail_free_meta:
859 pblk_line_meta_free(pblk);
860fail:
861 for (i = 0; i < geo->nr_luns; i++)
862 kfree(pblk->luns[i].bb_list);
863
864 return ret;
865}
866
867static int pblk_writer_init(struct pblk *pblk)
868{
869 timer_setup(&pblk->wtimer, pblk_write_timer_fn, 0);
870 mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
871
872 pblk->writer_ts = kthread_create(pblk_write_ts, pblk, "pblk-writer-t");
873 if (IS_ERR(pblk->writer_ts)) {
874 pr_err("pblk: could not allocate writer kthread\n");
875 return PTR_ERR(pblk->writer_ts);
876 }
877
878 return 0;
879}
880
881static void pblk_writer_stop(struct pblk *pblk)
882{
883
884
885
886 WARN(pblk_rb_read_count(&pblk->rwb),
887 "Stopping not fully persisted write buffer\n");
888
889 WARN(pblk_rb_sync_count(&pblk->rwb),
890 "Stopping not fully synced write buffer\n");
891
892 if (pblk->writer_ts)
893 kthread_stop(pblk->writer_ts);
894 del_timer(&pblk->wtimer);
895}
896
897static void pblk_free(struct pblk *pblk)
898{
899 pblk_luns_free(pblk);
900 pblk_lines_free(pblk);
901 pblk_line_meta_free(pblk);
902 pblk_core_free(pblk);
903 pblk_l2p_free(pblk);
904
905 kfree(pblk);
906}
907
908static void pblk_tear_down(struct pblk *pblk)
909{
910 pblk_pipeline_stop(pblk);
911 pblk_writer_stop(pblk);
912 pblk_rb_sync_l2p(&pblk->rwb);
913 pblk_rwb_free(pblk);
914 pblk_rl_free(&pblk->rl);
915
916 pr_debug("pblk: consistent tear down\n");
917}
918
919static void pblk_exit(void *private)
920{
921 struct pblk *pblk = private;
922
923 down_write(&pblk_lock);
924 pblk_gc_exit(pblk);
925 pblk_tear_down(pblk);
926
927#ifdef CONFIG_NVM_DEBUG
928 pr_info("pblk exit: L2P CRC: %x\n", pblk_l2p_crc(pblk));
929#endif
930
931 pblk_free(pblk);
932 up_write(&pblk_lock);
933}
934
935static sector_t pblk_capacity(void *private)
936{
937 struct pblk *pblk = private;
938
939 return pblk->capacity * NR_PHY_IN_LOG;
940}
941
942static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
943 int flags)
944{
945 struct nvm_geo *geo = &dev->geo;
946 struct request_queue *bqueue = dev->q;
947 struct request_queue *tqueue = tdisk->queue;
948 struct pblk *pblk;
949 int ret;
950
951 if (dev->identity.dom & NVM_RSP_L2P) {
952 pr_err("pblk: host-side L2P table not supported. (%x)\n",
953 dev->identity.dom);
954 return ERR_PTR(-EINVAL);
955 }
956
957 pblk = kzalloc(sizeof(struct pblk), GFP_KERNEL);
958 if (!pblk)
959 return ERR_PTR(-ENOMEM);
960
961 pblk->dev = dev;
962 pblk->disk = tdisk;
963 pblk->state = PBLK_STATE_RUNNING;
964 pblk->gc.gc_enabled = 0;
965
966 spin_lock_init(&pblk->trans_lock);
967 spin_lock_init(&pblk->lock);
968
969 if (flags & NVM_TARGET_FACTORY)
970 pblk_setup_uuid(pblk);
971
972#ifdef CONFIG_NVM_DEBUG
973 atomic_long_set(&pblk->inflight_writes, 0);
974 atomic_long_set(&pblk->padded_writes, 0);
975 atomic_long_set(&pblk->padded_wb, 0);
976 atomic_long_set(&pblk->nr_flush, 0);
977 atomic_long_set(&pblk->req_writes, 0);
978 atomic_long_set(&pblk->sub_writes, 0);
979 atomic_long_set(&pblk->sync_writes, 0);
980 atomic_long_set(&pblk->inflight_reads, 0);
981 atomic_long_set(&pblk->cache_reads, 0);
982 atomic_long_set(&pblk->sync_reads, 0);
983 atomic_long_set(&pblk->recov_writes, 0);
984 atomic_long_set(&pblk->recov_writes, 0);
985 atomic_long_set(&pblk->recov_gc_writes, 0);
986 atomic_long_set(&pblk->recov_gc_reads, 0);
987#endif
988
989 atomic_long_set(&pblk->read_failed, 0);
990 atomic_long_set(&pblk->read_empty, 0);
991 atomic_long_set(&pblk->read_high_ecc, 0);
992 atomic_long_set(&pblk->read_failed_gc, 0);
993 atomic_long_set(&pblk->write_failed, 0);
994 atomic_long_set(&pblk->erase_failed, 0);
995
996 ret = pblk_luns_init(pblk, dev->luns);
997 if (ret) {
998 pr_err("pblk: could not initialize luns\n");
999 goto fail;
1000 }
1001
1002 ret = pblk_lines_init(pblk);
1003 if (ret) {
1004 pr_err("pblk: could not initialize lines\n");
1005 goto fail_free_luns;
1006 }
1007
1008 ret = pblk_core_init(pblk);
1009 if (ret) {
1010 pr_err("pblk: could not initialize core\n");
1011 goto fail_free_line_meta;
1012 }
1013
1014 ret = pblk_l2p_init(pblk);
1015 if (ret) {
1016 pr_err("pblk: could not initialize maps\n");
1017 goto fail_free_core;
1018 }
1019
1020 ret = pblk_lines_configure(pblk, flags);
1021 if (ret) {
1022 pr_err("pblk: could not configure lines\n");
1023 goto fail_free_l2p;
1024 }
1025
1026 ret = pblk_writer_init(pblk);
1027 if (ret) {
1028 pr_err("pblk: could not initialize write thread\n");
1029 goto fail_free_lines;
1030 }
1031
1032 ret = pblk_gc_init(pblk);
1033 if (ret) {
1034 pr_err("pblk: could not initialize gc\n");
1035 goto fail_stop_writer;
1036 }
1037
1038
1039 blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
1040 blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
1041
1042 blk_queue_write_cache(tqueue, true, false);
1043
1044 tqueue->limits.discard_granularity = geo->pgs_per_blk * geo->pfpg_size;
1045 tqueue->limits.discard_alignment = 0;
1046 blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
1047 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, tqueue);
1048
1049 pr_info("pblk init: luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
1050 geo->nr_luns, pblk->l_mg.nr_lines,
1051 (unsigned long long)pblk->rl.nr_secs,
1052 pblk->rwb.nr_entries);
1053
1054 wake_up_process(pblk->writer_ts);
1055
1056
1057 pblk_gc_should_kick(pblk);
1058
1059 return pblk;
1060
1061fail_stop_writer:
1062 pblk_writer_stop(pblk);
1063fail_free_lines:
1064 pblk_lines_free(pblk);
1065fail_free_l2p:
1066 pblk_l2p_free(pblk);
1067fail_free_core:
1068 pblk_core_free(pblk);
1069fail_free_line_meta:
1070 pblk_line_meta_free(pblk);
1071fail_free_luns:
1072 pblk_luns_free(pblk);
1073fail:
1074 kfree(pblk);
1075 return ERR_PTR(ret);
1076}
1077
1078
1079static struct nvm_tgt_type tt_pblk = {
1080 .name = "pblk",
1081 .version = {1, 0, 0},
1082
1083 .make_rq = pblk_make_rq,
1084 .capacity = pblk_capacity,
1085
1086 .init = pblk_init,
1087 .exit = pblk_exit,
1088
1089 .sysfs_init = pblk_sysfs_init,
1090 .sysfs_exit = pblk_sysfs_exit,
1091 .owner = THIS_MODULE,
1092};
1093
1094static int __init pblk_module_init(void)
1095{
1096 int ret;
1097
1098 pblk_bio_set = bioset_create(BIO_POOL_SIZE, 0, 0);
1099 if (!pblk_bio_set)
1100 return -ENOMEM;
1101 ret = nvm_register_tgt_type(&tt_pblk);
1102 if (ret)
1103 bioset_free(pblk_bio_set);
1104 return ret;
1105}
1106
1107static void pblk_module_exit(void)
1108{
1109 bioset_free(pblk_bio_set);
1110 nvm_unregister_tgt_type(&tt_pblk);
1111}
1112
1113module_init(pblk_module_init);
1114module_exit(pblk_module_exit);
1115MODULE_AUTHOR("Javier Gonzalez <javier@cnexlabs.com>");
1116MODULE_AUTHOR("Matias Bjorling <matias@cnexlabs.com>");
1117MODULE_LICENSE("GPL v2");
1118MODULE_DESCRIPTION("Physical Block-Device for Open-Channel SSDs");
1119