1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/string.h>
15#include <linux/parser.h>
16#include <linux/timer.h>
17#include <linux/fs.h>
18#include <linux/blkdev.h>
19#include <linux/slab.h>
20#include <linux/spinlock.h>
21#include <linux/bio.h>
22#include <linux/genhd.h>
23#include <linux/file.h>
24#include <linux/module.h>
25#include <scsi/scsi_proto.h>
26#include <asm/unaligned.h>
27
28#include <target/target_core_base.h>
29#include <target/target_core_backend.h>
30
31#include "target_core_iblock.h"
32
33#define IBLOCK_MAX_BIO_PER_TASK 32
34#define IBLOCK_BIO_POOL_SIZE 128
35
36static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
37{
38 return container_of(dev, struct iblock_dev, dev);
39}
40
41
42static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
43{
44 pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
45 " Generic Target Core Stack %s\n", hba->hba_id,
46 IBLOCK_VERSION, TARGET_CORE_VERSION);
47 return 0;
48}
49
50static void iblock_detach_hba(struct se_hba *hba)
51{
52}
53
54static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
55{
56 struct iblock_dev *ib_dev = NULL;
57
58 ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
59 if (!ib_dev) {
60 pr_err("Unable to allocate struct iblock_dev\n");
61 return NULL;
62 }
63
64 pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
65
66 return &ib_dev->dev;
67}
68
69static int iblock_configure_device(struct se_device *dev)
70{
71 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
72 struct request_queue *q;
73 struct block_device *bd = NULL;
74 struct blk_integrity *bi;
75 fmode_t mode;
76 unsigned int max_write_zeroes_sectors;
77 int ret = -ENOMEM;
78
79 if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
80 pr_err("Missing udev_path= parameters for IBLOCK\n");
81 return -EINVAL;
82 }
83
84 ret = bioset_init(&ib_dev->ibd_bio_set, IBLOCK_BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
85 if (ret) {
86 pr_err("IBLOCK: Unable to create bioset\n");
87 goto out;
88 }
89
90 pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
91 ib_dev->ibd_udev_path);
92
93 mode = FMODE_READ|FMODE_EXCL;
94 if (!ib_dev->ibd_readonly)
95 mode |= FMODE_WRITE;
96 else
97 dev->dev_flags |= DF_READ_ONLY;
98
99 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
100 if (IS_ERR(bd)) {
101 ret = PTR_ERR(bd);
102 goto out_free_bioset;
103 }
104 ib_dev->ibd_bd = bd;
105
106 q = bdev_get_queue(bd);
107
108 dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
109 dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
110 dev->dev_attrib.hw_queue_depth = q->nr_requests;
111
112 if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
113 pr_debug("IBLOCK: BLOCK Discard support available,"
114 " disabled by default\n");
115
116
117
118
119
120 max_write_zeroes_sectors = bdev_write_zeroes_sectors(bd);
121 if (max_write_zeroes_sectors)
122 dev->dev_attrib.max_write_same_len = max_write_zeroes_sectors;
123 else
124 dev->dev_attrib.max_write_same_len = 0xFFFF;
125
126 if (blk_queue_nonrot(q))
127 dev->dev_attrib.is_nonrot = 1;
128
129 bi = bdev_get_integrity(bd);
130 if (bi) {
131 struct bio_set *bs = &ib_dev->ibd_bio_set;
132
133 if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-IP") ||
134 !strcmp(bi->profile->name, "T10-DIF-TYPE1-IP")) {
135 pr_err("IBLOCK export of blk_integrity: %s not"
136 " supported\n", bi->profile->name);
137 ret = -ENOSYS;
138 goto out_blkdev_put;
139 }
140
141 if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-CRC")) {
142 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
143 } else if (!strcmp(bi->profile->name, "T10-DIF-TYPE1-CRC")) {
144 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
145 }
146
147 if (dev->dev_attrib.pi_prot_type) {
148 if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
149 pr_err("Unable to allocate bioset for PI\n");
150 ret = -ENOMEM;
151 goto out_blkdev_put;
152 }
153 pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
154 &bs->bio_integrity_pool);
155 }
156 dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
157 }
158
159 return 0;
160
161out_blkdev_put:
162 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
163out_free_bioset:
164 bioset_exit(&ib_dev->ibd_bio_set);
165out:
166 return ret;
167}
168
169static void iblock_dev_call_rcu(struct rcu_head *p)
170{
171 struct se_device *dev = container_of(p, struct se_device, rcu_head);
172 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
173
174 kfree(ib_dev);
175}
176
177static void iblock_free_device(struct se_device *dev)
178{
179 call_rcu(&dev->rcu_head, iblock_dev_call_rcu);
180}
181
182static void iblock_destroy_device(struct se_device *dev)
183{
184 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
185
186 if (ib_dev->ibd_bd != NULL)
187 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
188 bioset_exit(&ib_dev->ibd_bio_set);
189}
190
191static unsigned long long iblock_emulate_read_cap_with_block_size(
192 struct se_device *dev,
193 struct block_device *bd,
194 struct request_queue *q)
195{
196 unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
197 bdev_logical_block_size(bd)) - 1);
198 u32 block_size = bdev_logical_block_size(bd);
199
200 if (block_size == dev->dev_attrib.block_size)
201 return blocks_long;
202
203 switch (block_size) {
204 case 4096:
205 switch (dev->dev_attrib.block_size) {
206 case 2048:
207 blocks_long <<= 1;
208 break;
209 case 1024:
210 blocks_long <<= 2;
211 break;
212 case 512:
213 blocks_long <<= 3;
214 default:
215 break;
216 }
217 break;
218 case 2048:
219 switch (dev->dev_attrib.block_size) {
220 case 4096:
221 blocks_long >>= 1;
222 break;
223 case 1024:
224 blocks_long <<= 1;
225 break;
226 case 512:
227 blocks_long <<= 2;
228 break;
229 default:
230 break;
231 }
232 break;
233 case 1024:
234 switch (dev->dev_attrib.block_size) {
235 case 4096:
236 blocks_long >>= 2;
237 break;
238 case 2048:
239 blocks_long >>= 1;
240 break;
241 case 512:
242 blocks_long <<= 1;
243 break;
244 default:
245 break;
246 }
247 break;
248 case 512:
249 switch (dev->dev_attrib.block_size) {
250 case 4096:
251 blocks_long >>= 3;
252 break;
253 case 2048:
254 blocks_long >>= 2;
255 break;
256 case 1024:
257 blocks_long >>= 1;
258 break;
259 default:
260 break;
261 }
262 break;
263 default:
264 break;
265 }
266
267 return blocks_long;
268}
269
270static void iblock_complete_cmd(struct se_cmd *cmd)
271{
272 struct iblock_req *ibr = cmd->priv;
273 u8 status;
274
275 if (!refcount_dec_and_test(&ibr->pending))
276 return;
277
278 if (atomic_read(&ibr->ib_bio_err_cnt))
279 status = SAM_STAT_CHECK_CONDITION;
280 else
281 status = SAM_STAT_GOOD;
282
283 target_complete_cmd(cmd, status);
284 kfree(ibr);
285}
286
287static void iblock_bio_done(struct bio *bio)
288{
289 struct se_cmd *cmd = bio->bi_private;
290 struct iblock_req *ibr = cmd->priv;
291
292 if (bio->bi_status) {
293 pr_err("bio error: %p, err: %d\n", bio, bio->bi_status);
294
295
296
297 atomic_inc(&ibr->ib_bio_err_cnt);
298 smp_mb__after_atomic();
299 }
300
301 bio_put(bio);
302
303 iblock_complete_cmd(cmd);
304}
305
306static struct bio *
307iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, int op,
308 int op_flags)
309{
310 struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
311 struct bio *bio;
312
313
314
315
316
317 if (sg_num > BIO_MAX_PAGES)
318 sg_num = BIO_MAX_PAGES;
319
320 bio = bio_alloc_bioset(GFP_NOIO, sg_num, &ib_dev->ibd_bio_set);
321 if (!bio) {
322 pr_err("Unable to allocate memory for bio\n");
323 return NULL;
324 }
325
326 bio_set_dev(bio, ib_dev->ibd_bd);
327 bio->bi_private = cmd;
328 bio->bi_end_io = &iblock_bio_done;
329 bio->bi_iter.bi_sector = lba;
330 bio_set_op_attrs(bio, op, op_flags);
331
332 return bio;
333}
334
335static void iblock_submit_bios(struct bio_list *list)
336{
337 struct blk_plug plug;
338 struct bio *bio;
339
340 blk_start_plug(&plug);
341 while ((bio = bio_list_pop(list)))
342 submit_bio(bio);
343 blk_finish_plug(&plug);
344}
345
346static void iblock_end_io_flush(struct bio *bio)
347{
348 struct se_cmd *cmd = bio->bi_private;
349
350 if (bio->bi_status)
351 pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_status);
352
353 if (cmd) {
354 if (bio->bi_status)
355 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
356 else
357 target_complete_cmd(cmd, SAM_STAT_GOOD);
358 }
359
360 bio_put(bio);
361}
362
363
364
365
366
367static sense_reason_t
368iblock_execute_sync_cache(struct se_cmd *cmd)
369{
370 struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
371 int immed = (cmd->t_task_cdb[1] & 0x2);
372 struct bio *bio;
373
374
375
376
377
378 if (immed)
379 target_complete_cmd(cmd, SAM_STAT_GOOD);
380
381 bio = bio_alloc(GFP_KERNEL, 0);
382 bio->bi_end_io = iblock_end_io_flush;
383 bio_set_dev(bio, ib_dev->ibd_bd);
384 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
385 if (!immed)
386 bio->bi_private = cmd;
387 submit_bio(bio);
388 return 0;
389}
390
391static sense_reason_t
392iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
393{
394 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
395 struct se_device *dev = cmd->se_dev;
396 int ret;
397
398 ret = blkdev_issue_discard(bdev,
399 target_to_linux_sector(dev, lba),
400 target_to_linux_sector(dev, nolb),
401 GFP_KERNEL, 0);
402 if (ret < 0) {
403 pr_err("blkdev_issue_discard() failed: %d\n", ret);
404 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
405 }
406
407 return 0;
408}
409
410static sense_reason_t
411iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
412{
413 struct se_device *dev = cmd->se_dev;
414 struct scatterlist *sg = &cmd->t_data_sg[0];
415 unsigned char *buf, *not_zero;
416 int ret;
417
418 buf = kmap(sg_page(sg)) + sg->offset;
419 if (!buf)
420 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
421
422
423
424
425 not_zero = memchr_inv(buf, 0x00, cmd->data_length);
426 kunmap(sg_page(sg));
427
428 if (not_zero)
429 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
430
431 ret = blkdev_issue_zeroout(bdev,
432 target_to_linux_sector(dev, cmd->t_task_lba),
433 target_to_linux_sector(dev,
434 sbc_get_write_same_sectors(cmd)),
435 GFP_KERNEL, BLKDEV_ZERO_NOUNMAP);
436 if (ret)
437 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
438
439 target_complete_cmd(cmd, GOOD);
440 return 0;
441}
442
443static sense_reason_t
444iblock_execute_write_same(struct se_cmd *cmd)
445{
446 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
447 struct iblock_req *ibr;
448 struct scatterlist *sg;
449 struct bio *bio;
450 struct bio_list list;
451 struct se_device *dev = cmd->se_dev;
452 sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
453 sector_t sectors = target_to_linux_sector(dev,
454 sbc_get_write_same_sectors(cmd));
455
456 if (cmd->prot_op) {
457 pr_err("WRITE_SAME: Protection information with IBLOCK"
458 " backends not supported\n");
459 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
460 }
461 sg = &cmd->t_data_sg[0];
462
463 if (cmd->t_data_nents > 1 ||
464 sg->length != cmd->se_dev->dev_attrib.block_size) {
465 pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
466 " block_size: %u\n", cmd->t_data_nents, sg->length,
467 cmd->se_dev->dev_attrib.block_size);
468 return TCM_INVALID_CDB_FIELD;
469 }
470
471 if (bdev_write_zeroes_sectors(bdev)) {
472 if (!iblock_execute_zero_out(bdev, cmd))
473 return 0;
474 }
475
476 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
477 if (!ibr)
478 goto fail;
479 cmd->priv = ibr;
480
481 bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE, 0);
482 if (!bio)
483 goto fail_free_ibr;
484
485 bio_list_init(&list);
486 bio_list_add(&list, bio);
487
488 refcount_set(&ibr->pending, 1);
489
490 while (sectors) {
491 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
492 != sg->length) {
493
494 bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE,
495 0);
496 if (!bio)
497 goto fail_put_bios;
498
499 refcount_inc(&ibr->pending);
500 bio_list_add(&list, bio);
501 }
502
503
504 block_lba += sg->length >> SECTOR_SHIFT;
505 sectors -= sg->length >> SECTOR_SHIFT;
506 }
507
508 iblock_submit_bios(&list);
509 return 0;
510
511fail_put_bios:
512 while ((bio = bio_list_pop(&list)))
513 bio_put(bio);
514fail_free_ibr:
515 kfree(ibr);
516fail:
517 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
518}
519
520enum {
521 Opt_udev_path, Opt_readonly, Opt_force, Opt_err
522};
523
524static match_table_t tokens = {
525 {Opt_udev_path, "udev_path=%s"},
526 {Opt_readonly, "readonly=%d"},
527 {Opt_force, "force=%d"},
528 {Opt_err, NULL}
529};
530
531static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
532 const char *page, ssize_t count)
533{
534 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
535 char *orig, *ptr, *arg_p, *opts;
536 substring_t args[MAX_OPT_ARGS];
537 int ret = 0, token;
538 unsigned long tmp_readonly;
539
540 opts = kstrdup(page, GFP_KERNEL);
541 if (!opts)
542 return -ENOMEM;
543
544 orig = opts;
545
546 while ((ptr = strsep(&opts, ",\n")) != NULL) {
547 if (!*ptr)
548 continue;
549
550 token = match_token(ptr, tokens, args);
551 switch (token) {
552 case Opt_udev_path:
553 if (ib_dev->ibd_bd) {
554 pr_err("Unable to set udev_path= while"
555 " ib_dev->ibd_bd exists\n");
556 ret = -EEXIST;
557 goto out;
558 }
559 if (match_strlcpy(ib_dev->ibd_udev_path, &args[0],
560 SE_UDEV_PATH_LEN) == 0) {
561 ret = -EINVAL;
562 break;
563 }
564 pr_debug("IBLOCK: Referencing UDEV path: %s\n",
565 ib_dev->ibd_udev_path);
566 ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
567 break;
568 case Opt_readonly:
569 arg_p = match_strdup(&args[0]);
570 if (!arg_p) {
571 ret = -ENOMEM;
572 break;
573 }
574 ret = kstrtoul(arg_p, 0, &tmp_readonly);
575 kfree(arg_p);
576 if (ret < 0) {
577 pr_err("kstrtoul() failed for"
578 " readonly=\n");
579 goto out;
580 }
581 ib_dev->ibd_readonly = tmp_readonly;
582 pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
583 break;
584 case Opt_force:
585 break;
586 default:
587 break;
588 }
589 }
590
591out:
592 kfree(orig);
593 return (!ret) ? count : ret;
594}
595
596static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
597{
598 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
599 struct block_device *bd = ib_dev->ibd_bd;
600 char buf[BDEVNAME_SIZE];
601 ssize_t bl = 0;
602
603 if (bd)
604 bl += sprintf(b + bl, "iBlock device: %s",
605 bdevname(bd, buf));
606 if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
607 bl += sprintf(b + bl, " UDEV PATH: %s",
608 ib_dev->ibd_udev_path);
609 bl += sprintf(b + bl, " readonly: %d\n", ib_dev->ibd_readonly);
610
611 bl += sprintf(b + bl, " ");
612 if (bd) {
613 bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
614 MAJOR(bd->bd_dev), MINOR(bd->bd_dev),
615 "CLAIMED: IBLOCK");
616 } else {
617 bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
618 }
619
620 return bl;
621}
622
623static int
624iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
625 struct sg_mapping_iter *miter)
626{
627 struct se_device *dev = cmd->se_dev;
628 struct blk_integrity *bi;
629 struct bio_integrity_payload *bip;
630 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
631 int rc;
632 size_t resid, len;
633
634 bi = bdev_get_integrity(ib_dev->ibd_bd);
635 if (!bi) {
636 pr_err("Unable to locate bio_integrity\n");
637 return -ENODEV;
638 }
639
640 bip = bio_integrity_alloc(bio, GFP_NOIO,
641 min_t(unsigned int, cmd->t_prot_nents, BIO_MAX_PAGES));
642 if (IS_ERR(bip)) {
643 pr_err("Unable to allocate bio_integrity_payload\n");
644 return PTR_ERR(bip);
645 }
646
647 bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
648
649 bip_set_seed(bip, bio->bi_iter.bi_sector >>
650 (bi->interval_exp - SECTOR_SHIFT));
651
652 pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
653 (unsigned long long)bip->bip_iter.bi_sector);
654
655 resid = bip->bip_iter.bi_size;
656 while (resid > 0 && sg_miter_next(miter)) {
657
658 len = min_t(size_t, miter->length, resid);
659 rc = bio_integrity_add_page(bio, miter->page, len,
660 offset_in_page(miter->addr));
661 if (rc != len) {
662 pr_err("bio_integrity_add_page() failed; %d\n", rc);
663 sg_miter_stop(miter);
664 return -ENOMEM;
665 }
666
667 pr_debug("Added bio integrity page: %p length: %zu offset: %lu\n",
668 miter->page, len, offset_in_page(miter->addr));
669
670 resid -= len;
671 if (len < miter->length)
672 miter->consumed -= miter->length - len;
673 }
674 sg_miter_stop(miter);
675
676 return 0;
677}
678
679static sense_reason_t
680iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
681 enum dma_data_direction data_direction)
682{
683 struct se_device *dev = cmd->se_dev;
684 sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
685 struct iblock_req *ibr;
686 struct bio *bio;
687 struct bio_list list;
688 struct scatterlist *sg;
689 u32 sg_num = sgl_nents;
690 unsigned bio_cnt;
691 int i, rc, op, op_flags = 0;
692 struct sg_mapping_iter prot_miter;
693
694 if (data_direction == DMA_TO_DEVICE) {
695 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
696 struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
697
698
699
700
701 op = REQ_OP_WRITE;
702 if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) {
703 if (cmd->se_cmd_flags & SCF_FUA)
704 op_flags = REQ_FUA;
705 else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
706 op_flags = REQ_FUA;
707 }
708 } else {
709 op = REQ_OP_READ;
710 }
711
712 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
713 if (!ibr)
714 goto fail;
715 cmd->priv = ibr;
716
717 if (!sgl_nents) {
718 refcount_set(&ibr->pending, 1);
719 iblock_complete_cmd(cmd);
720 return 0;
721 }
722
723 bio = iblock_get_bio(cmd, block_lba, sgl_nents, op, op_flags);
724 if (!bio)
725 goto fail_free_ibr;
726
727 bio_list_init(&list);
728 bio_list_add(&list, bio);
729
730 refcount_set(&ibr->pending, 2);
731 bio_cnt = 1;
732
733 if (cmd->prot_type && dev->dev_attrib.pi_prot_type)
734 sg_miter_start(&prot_miter, cmd->t_prot_sg, cmd->t_prot_nents,
735 op == REQ_OP_READ ? SG_MITER_FROM_SG :
736 SG_MITER_TO_SG);
737
738 for_each_sg(sgl, sg, sgl_nents, i) {
739
740
741
742
743
744 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
745 != sg->length) {
746 if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
747 rc = iblock_alloc_bip(cmd, bio, &prot_miter);
748 if (rc)
749 goto fail_put_bios;
750 }
751
752 if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
753 iblock_submit_bios(&list);
754 bio_cnt = 0;
755 }
756
757 bio = iblock_get_bio(cmd, block_lba, sg_num, op,
758 op_flags);
759 if (!bio)
760 goto fail_put_bios;
761
762 refcount_inc(&ibr->pending);
763 bio_list_add(&list, bio);
764 bio_cnt++;
765 }
766
767
768 block_lba += sg->length >> SECTOR_SHIFT;
769 sg_num--;
770 }
771
772 if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
773 rc = iblock_alloc_bip(cmd, bio, &prot_miter);
774 if (rc)
775 goto fail_put_bios;
776 }
777
778 iblock_submit_bios(&list);
779 iblock_complete_cmd(cmd);
780 return 0;
781
782fail_put_bios:
783 while ((bio = bio_list_pop(&list)))
784 bio_put(bio);
785fail_free_ibr:
786 kfree(ibr);
787fail:
788 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
789}
790
791static sector_t iblock_get_blocks(struct se_device *dev)
792{
793 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
794 struct block_device *bd = ib_dev->ibd_bd;
795 struct request_queue *q = bdev_get_queue(bd);
796
797 return iblock_emulate_read_cap_with_block_size(dev, bd, q);
798}
799
800static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
801{
802 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
803 struct block_device *bd = ib_dev->ibd_bd;
804 int ret;
805
806 ret = bdev_alignment_offset(bd);
807 if (ret == -1)
808 return 0;
809
810
811 return ret / bdev_logical_block_size(bd);
812}
813
814static unsigned int iblock_get_lbppbe(struct se_device *dev)
815{
816 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
817 struct block_device *bd = ib_dev->ibd_bd;
818 int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
819
820 return ilog2(logs_per_phys);
821}
822
823static unsigned int iblock_get_io_min(struct se_device *dev)
824{
825 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
826 struct block_device *bd = ib_dev->ibd_bd;
827
828 return bdev_io_min(bd);
829}
830
831static unsigned int iblock_get_io_opt(struct se_device *dev)
832{
833 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
834 struct block_device *bd = ib_dev->ibd_bd;
835
836 return bdev_io_opt(bd);
837}
838
839static struct sbc_ops iblock_sbc_ops = {
840 .execute_rw = iblock_execute_rw,
841 .execute_sync_cache = iblock_execute_sync_cache,
842 .execute_write_same = iblock_execute_write_same,
843 .execute_unmap = iblock_execute_unmap,
844};
845
846static sense_reason_t
847iblock_parse_cdb(struct se_cmd *cmd)
848{
849 return sbc_parse_cdb(cmd, &iblock_sbc_ops);
850}
851
852static bool iblock_get_write_cache(struct se_device *dev)
853{
854 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
855 struct block_device *bd = ib_dev->ibd_bd;
856 struct request_queue *q = bdev_get_queue(bd);
857
858 return test_bit(QUEUE_FLAG_WC, &q->queue_flags);
859}
860
861static const struct target_backend_ops iblock_ops = {
862 .name = "iblock",
863 .inquiry_prod = "IBLOCK",
864 .inquiry_rev = IBLOCK_VERSION,
865 .owner = THIS_MODULE,
866 .attach_hba = iblock_attach_hba,
867 .detach_hba = iblock_detach_hba,
868 .alloc_device = iblock_alloc_device,
869 .configure_device = iblock_configure_device,
870 .destroy_device = iblock_destroy_device,
871 .free_device = iblock_free_device,
872 .parse_cdb = iblock_parse_cdb,
873 .set_configfs_dev_params = iblock_set_configfs_dev_params,
874 .show_configfs_dev_params = iblock_show_configfs_dev_params,
875 .get_device_type = sbc_get_device_type,
876 .get_blocks = iblock_get_blocks,
877 .get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
878 .get_lbppbe = iblock_get_lbppbe,
879 .get_io_min = iblock_get_io_min,
880 .get_io_opt = iblock_get_io_opt,
881 .get_write_cache = iblock_get_write_cache,
882 .tb_dev_attrib_attrs = sbc_attrib_attrs,
883};
884
885static int __init iblock_module_init(void)
886{
887 return transport_backend_register(&iblock_ops);
888}
889
890static void __exit iblock_module_exit(void)
891{
892 target_backend_unregister(&iblock_ops);
893}
894
895MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
896MODULE_AUTHOR("nab@Linux-iSCSI.org");
897MODULE_LICENSE("GPL");
898
899module_init(iblock_module_init);
900module_exit(iblock_module_exit);
901