linux/drivers/target/target_core_iblock.c
<<
>>
Prefs
   1/*******************************************************************************
   2 * Filename:  target_core_iblock.c
   3 *
   4 * This file contains the Storage Engine  <-> Linux BlockIO transport
   5 * specific functions.
   6 *
   7 * (c) Copyright 2003-2013 Datera, Inc.
   8 *
   9 * Nicholas A. Bellinger <nab@kernel.org>
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of the GNU General Public License as published by
  13 * the Free Software Foundation; either version 2 of the License, or
  14 * (at your option) any later version.
  15 *
  16 * This program is distributed in the hope that it will be useful,
  17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  19 * GNU General Public License for more details.
  20 *
  21 * You should have received a copy of the GNU General Public License
  22 * along with this program; if not, write to the Free Software
  23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  24 *
  25 ******************************************************************************/
  26
  27#include <linux/string.h>
  28#include <linux/parser.h>
  29#include <linux/timer.h>
  30#include <linux/fs.h>
  31#include <linux/blkdev.h>
  32#include <linux/slab.h>
  33#include <linux/spinlock.h>
  34#include <linux/bio.h>
  35#include <linux/genhd.h>
  36#include <linux/file.h>
  37#include <linux/module.h>
  38#include <scsi/scsi.h>
  39#include <scsi/scsi_host.h>
  40#include <asm/unaligned.h>
  41
  42#include <target/target_core_base.h>
  43#include <target/target_core_backend.h>
  44#include <target/target_core_backend_configfs.h>
  45
  46#include "target_core_iblock.h"
  47
  48#define IBLOCK_MAX_BIO_PER_TASK  32     /* max # of bios to submit at a time */
  49#define IBLOCK_BIO_POOL_SIZE    128
  50
  51static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
  52{
  53        return container_of(dev, struct iblock_dev, dev);
  54}
  55
  56
  57static struct se_subsystem_api iblock_template;
  58
  59/*      iblock_attach_hba(): (Part of se_subsystem_api_t template)
  60 *
  61 *
  62 */
  63static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
  64{
  65        pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
  66                " Generic Target Core Stack %s\n", hba->hba_id,
  67                IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
  68        return 0;
  69}
  70
  71static void iblock_detach_hba(struct se_hba *hba)
  72{
  73}
  74
  75static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
  76{
  77        struct iblock_dev *ib_dev = NULL;
  78
  79        ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
  80        if (!ib_dev) {
  81                pr_err("Unable to allocate struct iblock_dev\n");
  82                return NULL;
  83        }
  84
  85        pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
  86
  87        return &ib_dev->dev;
  88}
  89
  90static int iblock_configure_device(struct se_device *dev)
  91{
  92        struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
  93        struct request_queue *q;
  94        struct block_device *bd = NULL;
  95        struct blk_integrity *bi;
  96        fmode_t mode;
  97        int ret = -ENOMEM;
  98
  99        if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
 100                pr_err("Missing udev_path= parameters for IBLOCK\n");
 101                return -EINVAL;
 102        }
 103
 104        ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0);
 105        if (!ib_dev->ibd_bio_set) {
 106                pr_err("IBLOCK: Unable to create bioset\n");
 107                goto out;
 108        }
 109
 110        pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
 111                        ib_dev->ibd_udev_path);
 112
 113        mode = FMODE_READ|FMODE_EXCL;
 114        if (!ib_dev->ibd_readonly)
 115                mode |= FMODE_WRITE;
 116
 117        bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
 118        if (IS_ERR(bd)) {
 119                ret = PTR_ERR(bd);
 120                goto out_free_bioset;
 121        }
 122        ib_dev->ibd_bd = bd;
 123
 124        q = bdev_get_queue(bd);
 125
 126        dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
 127        dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
 128        dev->dev_attrib.hw_queue_depth = q->nr_requests;
 129
 130        /*
 131         * Check if the underlying struct block_device request_queue supports
 132         * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
 133         * in ATA and we need to set TPE=1
 134         */
 135        if (blk_queue_discard(q)) {
 136                dev->dev_attrib.max_unmap_lba_count =
 137                                q->limits.max_discard_sectors;
 138
 139                /*
 140                 * Currently hardcoded to 1 in Linux/SCSI code..
 141                 */
 142                dev->dev_attrib.max_unmap_block_desc_count = 1;
 143                dev->dev_attrib.unmap_granularity =
 144                                q->limits.discard_granularity >> 9;
 145                dev->dev_attrib.unmap_granularity_alignment =
 146                                q->limits.discard_alignment;
 147
 148                pr_debug("IBLOCK: BLOCK Discard support available,"
 149                                " disabled by default\n");
 150        }
 151        /*
 152         * Enable write same emulation for IBLOCK and use 0xFFFF as
 153         * the smaller WRITE_SAME(10) only has a two-byte block count.
 154         */
 155        dev->dev_attrib.max_write_same_len = 0xFFFF;
 156
 157        if (blk_queue_nonrot(q))
 158                dev->dev_attrib.is_nonrot = 1;
 159
 160        bi = bdev_get_integrity(bd);
 161        if (bi) {
 162                struct bio_set *bs = ib_dev->ibd_bio_set;
 163
 164                if (!strcmp(bi->name, "T10-DIF-TYPE3-IP") ||
 165                    !strcmp(bi->name, "T10-DIF-TYPE1-IP")) {
 166                        pr_err("IBLOCK export of blk_integrity: %s not"
 167                               " supported\n", bi->name);
 168                        ret = -ENOSYS;
 169                        goto out_blkdev_put;
 170                }
 171
 172                if (!strcmp(bi->name, "T10-DIF-TYPE3-CRC")) {
 173                        dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
 174                } else if (!strcmp(bi->name, "T10-DIF-TYPE1-CRC")) {
 175                        dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
 176                }
 177
 178                if (dev->dev_attrib.pi_prot_type) {
 179                        if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
 180                                pr_err("Unable to allocate bioset for PI\n");
 181                                ret = -ENOMEM;
 182                                goto out_blkdev_put;
 183                        }
 184                        pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
 185                                 bs->bio_integrity_pool);
 186                }
 187                dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
 188        }
 189
 190        return 0;
 191
 192out_blkdev_put:
 193        blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
 194out_free_bioset:
 195        bioset_free(ib_dev->ibd_bio_set);
 196        ib_dev->ibd_bio_set = NULL;
 197out:
 198        return ret;
 199}
 200
 201static void iblock_free_device(struct se_device *dev)
 202{
 203        struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 204
 205        if (ib_dev->ibd_bd != NULL)
 206                blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
 207        if (ib_dev->ibd_bio_set != NULL)
 208                bioset_free(ib_dev->ibd_bio_set);
 209
 210        kfree(ib_dev);
 211}
 212
 213static unsigned long long iblock_emulate_read_cap_with_block_size(
 214        struct se_device *dev,
 215        struct block_device *bd,
 216        struct request_queue *q)
 217{
 218        unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
 219                                        bdev_logical_block_size(bd)) - 1);
 220        u32 block_size = bdev_logical_block_size(bd);
 221
 222        if (block_size == dev->dev_attrib.block_size)
 223                return blocks_long;
 224
 225        switch (block_size) {
 226        case 4096:
 227                switch (dev->dev_attrib.block_size) {
 228                case 2048:
 229                        blocks_long <<= 1;
 230                        break;
 231                case 1024:
 232                        blocks_long <<= 2;
 233                        break;
 234                case 512:
 235                        blocks_long <<= 3;
 236                default:
 237                        break;
 238                }
 239                break;
 240        case 2048:
 241                switch (dev->dev_attrib.block_size) {
 242                case 4096:
 243                        blocks_long >>= 1;
 244                        break;
 245                case 1024:
 246                        blocks_long <<= 1;
 247                        break;
 248                case 512:
 249                        blocks_long <<= 2;
 250                        break;
 251                default:
 252                        break;
 253                }
 254                break;
 255        case 1024:
 256                switch (dev->dev_attrib.block_size) {
 257                case 4096:
 258                        blocks_long >>= 2;
 259                        break;
 260                case 2048:
 261                        blocks_long >>= 1;
 262                        break;
 263                case 512:
 264                        blocks_long <<= 1;
 265                        break;
 266                default:
 267                        break;
 268                }
 269                break;
 270        case 512:
 271                switch (dev->dev_attrib.block_size) {
 272                case 4096:
 273                        blocks_long >>= 3;
 274                        break;
 275                case 2048:
 276                        blocks_long >>= 2;
 277                        break;
 278                case 1024:
 279                        blocks_long >>= 1;
 280                        break;
 281                default:
 282                        break;
 283                }
 284                break;
 285        default:
 286                break;
 287        }
 288
 289        return blocks_long;
 290}
 291
 292static void iblock_complete_cmd(struct se_cmd *cmd)
 293{
 294        struct iblock_req *ibr = cmd->priv;
 295        u8 status;
 296
 297        if (!atomic_dec_and_test(&ibr->pending))
 298                return;
 299
 300        if (atomic_read(&ibr->ib_bio_err_cnt))
 301                status = SAM_STAT_CHECK_CONDITION;
 302        else
 303                status = SAM_STAT_GOOD;
 304
 305        target_complete_cmd(cmd, status);
 306        kfree(ibr);
 307}
 308
 309static void iblock_bio_done(struct bio *bio, int err)
 310{
 311        struct se_cmd *cmd = bio->bi_private;
 312        struct iblock_req *ibr = cmd->priv;
 313
 314        /*
 315         * Set -EIO if !BIO_UPTODATE and the passed is still err=0
 316         */
 317        if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
 318                err = -EIO;
 319
 320        if (err != 0) {
 321                pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
 322                        " err: %d\n", bio, err);
 323                /*
 324                 * Bump the ib_bio_err_cnt and release bio.
 325                 */
 326                atomic_inc(&ibr->ib_bio_err_cnt);
 327                smp_mb__after_atomic();
 328        }
 329
 330        bio_put(bio);
 331
 332        iblock_complete_cmd(cmd);
 333}
 334
 335static struct bio *
 336iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
 337{
 338        struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
 339        struct bio *bio;
 340
 341        /*
 342         * Only allocate as many vector entries as the bio code allows us to,
 343         * we'll loop later on until we have handled the whole request.
 344         */
 345        if (sg_num > BIO_MAX_PAGES)
 346                sg_num = BIO_MAX_PAGES;
 347
 348        bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
 349        if (!bio) {
 350                pr_err("Unable to allocate memory for bio\n");
 351                return NULL;
 352        }
 353
 354        bio->bi_bdev = ib_dev->ibd_bd;
 355        bio->bi_private = cmd;
 356        bio->bi_end_io = &iblock_bio_done;
 357        bio->bi_iter.bi_sector = lba;
 358
 359        return bio;
 360}
 361
 362static void iblock_submit_bios(struct bio_list *list, int rw)
 363{
 364        struct blk_plug plug;
 365        struct bio *bio;
 366
 367        blk_start_plug(&plug);
 368        while ((bio = bio_list_pop(list)))
 369                submit_bio(rw, bio);
 370        blk_finish_plug(&plug);
 371}
 372
 373static void iblock_end_io_flush(struct bio *bio, int err)
 374{
 375        struct se_cmd *cmd = bio->bi_private;
 376
 377        if (err)
 378                pr_err("IBLOCK: cache flush failed: %d\n", err);
 379
 380        if (cmd) {
 381                if (err)
 382                        target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
 383                else
 384                        target_complete_cmd(cmd, SAM_STAT_GOOD);
 385        }
 386
 387        bio_put(bio);
 388}
 389
 390/*
 391 * Implement SYCHRONIZE CACHE.  Note that we can't handle lba ranges and must
 392 * always flush the whole cache.
 393 */
 394static sense_reason_t
 395iblock_execute_sync_cache(struct se_cmd *cmd)
 396{
 397        struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
 398        int immed = (cmd->t_task_cdb[1] & 0x2);
 399        struct bio *bio;
 400
 401        /*
 402         * If the Immediate bit is set, queue up the GOOD response
 403         * for this SYNCHRONIZE_CACHE op.
 404         */
 405        if (immed)
 406                target_complete_cmd(cmd, SAM_STAT_GOOD);
 407
 408        bio = bio_alloc(GFP_KERNEL, 0);
 409        bio->bi_end_io = iblock_end_io_flush;
 410        bio->bi_bdev = ib_dev->ibd_bd;
 411        if (!immed)
 412                bio->bi_private = cmd;
 413        submit_bio(WRITE_FLUSH, bio);
 414        return 0;
 415}
 416
 417static sense_reason_t
 418iblock_do_unmap(struct se_cmd *cmd, void *priv,
 419                sector_t lba, sector_t nolb)
 420{
 421        struct block_device *bdev = priv;
 422        int ret;
 423
 424        ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0);
 425        if (ret < 0) {
 426                pr_err("blkdev_issue_discard() failed: %d\n", ret);
 427                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 428        }
 429
 430        return 0;
 431}
 432
 433static sense_reason_t
 434iblock_execute_unmap(struct se_cmd *cmd)
 435{
 436        struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
 437
 438        return sbc_execute_unmap(cmd, iblock_do_unmap, bdev);
 439}
 440
 441static sense_reason_t
 442iblock_execute_write_same_unmap(struct se_cmd *cmd)
 443{
 444        struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
 445        sector_t lba = cmd->t_task_lba;
 446        sector_t nolb = sbc_get_write_same_sectors(cmd);
 447        int ret;
 448
 449        ret = iblock_do_unmap(cmd, bdev, lba, nolb);
 450        if (ret)
 451                return ret;
 452
 453        target_complete_cmd(cmd, GOOD);
 454        return 0;
 455}
 456
 457static sense_reason_t
 458iblock_execute_write_same(struct se_cmd *cmd)
 459{
 460        struct iblock_req *ibr;
 461        struct scatterlist *sg;
 462        struct bio *bio;
 463        struct bio_list list;
 464        sector_t block_lba = cmd->t_task_lba;
 465        sector_t sectors = sbc_get_write_same_sectors(cmd);
 466
 467        sg = &cmd->t_data_sg[0];
 468
 469        if (cmd->t_data_nents > 1 ||
 470            sg->length != cmd->se_dev->dev_attrib.block_size) {
 471                pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
 472                        " block_size: %u\n", cmd->t_data_nents, sg->length,
 473                        cmd->se_dev->dev_attrib.block_size);
 474                return TCM_INVALID_CDB_FIELD;
 475        }
 476
 477        ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
 478        if (!ibr)
 479                goto fail;
 480        cmd->priv = ibr;
 481
 482        bio = iblock_get_bio(cmd, block_lba, 1);
 483        if (!bio)
 484                goto fail_free_ibr;
 485
 486        bio_list_init(&list);
 487        bio_list_add(&list, bio);
 488
 489        atomic_set(&ibr->pending, 1);
 490
 491        while (sectors) {
 492                while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
 493                                != sg->length) {
 494
 495                        bio = iblock_get_bio(cmd, block_lba, 1);
 496                        if (!bio)
 497                                goto fail_put_bios;
 498
 499                        atomic_inc(&ibr->pending);
 500                        bio_list_add(&list, bio);
 501                }
 502
 503                /* Always in 512 byte units for Linux/Block */
 504                block_lba += sg->length >> IBLOCK_LBA_SHIFT;
 505                sectors -= 1;
 506        }
 507
 508        iblock_submit_bios(&list, WRITE);
 509        return 0;
 510
 511fail_put_bios:
 512        while ((bio = bio_list_pop(&list)))
 513                bio_put(bio);
 514fail_free_ibr:
 515        kfree(ibr);
 516fail:
 517        return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 518}
 519
 520enum {
 521        Opt_udev_path, Opt_readonly, Opt_force, Opt_err
 522};
 523
 524static match_table_t tokens = {
 525        {Opt_udev_path, "udev_path=%s"},
 526        {Opt_readonly, "readonly=%d"},
 527        {Opt_force, "force=%d"},
 528        {Opt_err, NULL}
 529};
 530
 531static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
 532                const char *page, ssize_t count)
 533{
 534        struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 535        char *orig, *ptr, *arg_p, *opts;
 536        substring_t args[MAX_OPT_ARGS];
 537        int ret = 0, token;
 538        unsigned long tmp_readonly;
 539
 540        opts = kstrdup(page, GFP_KERNEL);
 541        if (!opts)
 542                return -ENOMEM;
 543
 544        orig = opts;
 545
 546        while ((ptr = strsep(&opts, ",\n")) != NULL) {
 547                if (!*ptr)
 548                        continue;
 549
 550                token = match_token(ptr, tokens, args);
 551                switch (token) {
 552                case Opt_udev_path:
 553                        if (ib_dev->ibd_bd) {
 554                                pr_err("Unable to set udev_path= while"
 555                                        " ib_dev->ibd_bd exists\n");
 556                                ret = -EEXIST;
 557                                goto out;
 558                        }
 559                        if (match_strlcpy(ib_dev->ibd_udev_path, &args[0],
 560                                SE_UDEV_PATH_LEN) == 0) {
 561                                ret = -EINVAL;
 562                                break;
 563                        }
 564                        pr_debug("IBLOCK: Referencing UDEV path: %s\n",
 565                                        ib_dev->ibd_udev_path);
 566                        ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
 567                        break;
 568                case Opt_readonly:
 569                        arg_p = match_strdup(&args[0]);
 570                        if (!arg_p) {
 571                                ret = -ENOMEM;
 572                                break;
 573                        }
 574                        ret = kstrtoul(arg_p, 0, &tmp_readonly);
 575                        kfree(arg_p);
 576                        if (ret < 0) {
 577                                pr_err("kstrtoul() failed for"
 578                                                " readonly=\n");
 579                                goto out;
 580                        }
 581                        ib_dev->ibd_readonly = tmp_readonly;
 582                        pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
 583                        break;
 584                case Opt_force:
 585                        break;
 586                default:
 587                        break;
 588                }
 589        }
 590
 591out:
 592        kfree(orig);
 593        return (!ret) ? count : ret;
 594}
 595
 596static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
 597{
 598        struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 599        struct block_device *bd = ib_dev->ibd_bd;
 600        char buf[BDEVNAME_SIZE];
 601        ssize_t bl = 0;
 602
 603        if (bd)
 604                bl += sprintf(b + bl, "iBlock device: %s",
 605                                bdevname(bd, buf));
 606        if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
 607                bl += sprintf(b + bl, "  UDEV PATH: %s",
 608                                ib_dev->ibd_udev_path);
 609        bl += sprintf(b + bl, "  readonly: %d\n", ib_dev->ibd_readonly);
 610
 611        bl += sprintf(b + bl, "        ");
 612        if (bd) {
 613                bl += sprintf(b + bl, "Major: %d Minor: %d  %s\n",
 614                        MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
 615                        "" : (bd->bd_holder == ib_dev) ?
 616                        "CLAIMED: IBLOCK" : "CLAIMED: OS");
 617        } else {
 618                bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
 619        }
 620
 621        return bl;
 622}
 623
 624static int
 625iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio)
 626{
 627        struct se_device *dev = cmd->se_dev;
 628        struct blk_integrity *bi;
 629        struct bio_integrity_payload *bip;
 630        struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 631        struct scatterlist *sg;
 632        int i, rc;
 633
 634        bi = bdev_get_integrity(ib_dev->ibd_bd);
 635        if (!bi) {
 636                pr_err("Unable to locate bio_integrity\n");
 637                return -ENODEV;
 638        }
 639
 640        bip = bio_integrity_alloc(bio, GFP_NOIO, cmd->t_prot_nents);
 641        if (!bip) {
 642                pr_err("Unable to allocate bio_integrity_payload\n");
 643                return -ENOMEM;
 644        }
 645
 646        bip->bip_iter.bi_size = (cmd->data_length / dev->dev_attrib.block_size) *
 647                         dev->prot_length;
 648        bip->bip_iter.bi_sector = bio->bi_iter.bi_sector;
 649
 650        pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
 651                 (unsigned long long)bip->bip_iter.bi_sector);
 652
 653        for_each_sg(cmd->t_prot_sg, sg, cmd->t_prot_nents, i) {
 654
 655                rc = bio_integrity_add_page(bio, sg_page(sg), sg->length,
 656                                            sg->offset);
 657                if (rc != sg->length) {
 658                        pr_err("bio_integrity_add_page() failed; %d\n", rc);
 659                        return -ENOMEM;
 660                }
 661
 662                pr_debug("Added bio integrity page: %p length: %d offset; %d\n",
 663                         sg_page(sg), sg->length, sg->offset);
 664        }
 665
 666        return 0;
 667}
 668
 669static sense_reason_t
 670iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
 671                  enum dma_data_direction data_direction)
 672{
 673        struct se_device *dev = cmd->se_dev;
 674        struct iblock_req *ibr;
 675        struct bio *bio, *bio_start;
 676        struct bio_list list;
 677        struct scatterlist *sg;
 678        u32 sg_num = sgl_nents;
 679        sector_t block_lba;
 680        unsigned bio_cnt;
 681        int rw = 0;
 682        int i;
 683
 684        if (data_direction == DMA_TO_DEVICE) {
 685                struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 686                struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
 687                /*
 688                 * Force writethrough using WRITE_FUA if a volatile write cache
 689                 * is not enabled, or if initiator set the Force Unit Access bit.
 690                 */
 691                if (q->flush_flags & REQ_FUA) {
 692                        if (cmd->se_cmd_flags & SCF_FUA)
 693                                rw = WRITE_FUA;
 694                        else if (!(q->flush_flags & REQ_FLUSH))
 695                                rw = WRITE_FUA;
 696                        else
 697                                rw = WRITE;
 698                } else {
 699                        rw = WRITE;
 700                }
 701        } else {
 702                rw = READ;
 703        }
 704
 705        /*
 706         * Convert the blocksize advertised to the initiator to the 512 byte
 707         * units unconditionally used by the Linux block layer.
 708         */
 709        if (dev->dev_attrib.block_size == 4096)
 710                block_lba = (cmd->t_task_lba << 3);
 711        else if (dev->dev_attrib.block_size == 2048)
 712                block_lba = (cmd->t_task_lba << 2);
 713        else if (dev->dev_attrib.block_size == 1024)
 714                block_lba = (cmd->t_task_lba << 1);
 715        else if (dev->dev_attrib.block_size == 512)
 716                block_lba = cmd->t_task_lba;
 717        else {
 718                pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
 719                                " %u\n", dev->dev_attrib.block_size);
 720                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 721        }
 722
 723        ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
 724        if (!ibr)
 725                goto fail;
 726        cmd->priv = ibr;
 727
 728        if (!sgl_nents) {
 729                atomic_set(&ibr->pending, 1);
 730                iblock_complete_cmd(cmd);
 731                return 0;
 732        }
 733
 734        bio = iblock_get_bio(cmd, block_lba, sgl_nents);
 735        if (!bio)
 736                goto fail_free_ibr;
 737
 738        bio_start = bio;
 739        bio_list_init(&list);
 740        bio_list_add(&list, bio);
 741
 742        atomic_set(&ibr->pending, 2);
 743        bio_cnt = 1;
 744
 745        for_each_sg(sgl, sg, sgl_nents, i) {
 746                /*
 747                 * XXX: if the length the device accepts is shorter than the
 748                 *      length of the S/G list entry this will cause and
 749                 *      endless loop.  Better hope no driver uses huge pages.
 750                 */
 751                while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
 752                                != sg->length) {
 753                        if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
 754                                iblock_submit_bios(&list, rw);
 755                                bio_cnt = 0;
 756                        }
 757
 758                        bio = iblock_get_bio(cmd, block_lba, sg_num);
 759                        if (!bio)
 760                                goto fail_put_bios;
 761
 762                        atomic_inc(&ibr->pending);
 763                        bio_list_add(&list, bio);
 764                        bio_cnt++;
 765                }
 766
 767                /* Always in 512 byte units for Linux/Block */
 768                block_lba += sg->length >> IBLOCK_LBA_SHIFT;
 769                sg_num--;
 770        }
 771
 772        if (cmd->prot_type) {
 773                int rc = iblock_alloc_bip(cmd, bio_start);
 774                if (rc)
 775                        goto fail_put_bios;
 776        }
 777
 778        iblock_submit_bios(&list, rw);
 779        iblock_complete_cmd(cmd);
 780        return 0;
 781
 782fail_put_bios:
 783        while ((bio = bio_list_pop(&list)))
 784                bio_put(bio);
 785fail_free_ibr:
 786        kfree(ibr);
 787fail:
 788        return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 789}
 790
 791static sector_t iblock_get_blocks(struct se_device *dev)
 792{
 793        struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 794        struct block_device *bd = ib_dev->ibd_bd;
 795        struct request_queue *q = bdev_get_queue(bd);
 796
 797        return iblock_emulate_read_cap_with_block_size(dev, bd, q);
 798}
 799
 800static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
 801{
 802        struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 803        struct block_device *bd = ib_dev->ibd_bd;
 804        int ret;
 805
 806        ret = bdev_alignment_offset(bd);
 807        if (ret == -1)
 808                return 0;
 809
 810        /* convert offset-bytes to offset-lbas */
 811        return ret / bdev_logical_block_size(bd);
 812}
 813
 814static unsigned int iblock_get_lbppbe(struct se_device *dev)
 815{
 816        struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 817        struct block_device *bd = ib_dev->ibd_bd;
 818        int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
 819
 820        return ilog2(logs_per_phys);
 821}
 822
 823static unsigned int iblock_get_io_min(struct se_device *dev)
 824{
 825        struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 826        struct block_device *bd = ib_dev->ibd_bd;
 827
 828        return bdev_io_min(bd);
 829}
 830
 831static unsigned int iblock_get_io_opt(struct se_device *dev)
 832{
 833        struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 834        struct block_device *bd = ib_dev->ibd_bd;
 835
 836        return bdev_io_opt(bd);
 837}
 838
 839static struct sbc_ops iblock_sbc_ops = {
 840        .execute_rw             = iblock_execute_rw,
 841        .execute_sync_cache     = iblock_execute_sync_cache,
 842        .execute_write_same     = iblock_execute_write_same,
 843        .execute_write_same_unmap = iblock_execute_write_same_unmap,
 844        .execute_unmap          = iblock_execute_unmap,
 845};
 846
 847static sense_reason_t
 848iblock_parse_cdb(struct se_cmd *cmd)
 849{
 850        return sbc_parse_cdb(cmd, &iblock_sbc_ops);
 851}
 852
 853static bool iblock_get_write_cache(struct se_device *dev)
 854{
 855        struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 856        struct block_device *bd = ib_dev->ibd_bd;
 857        struct request_queue *q = bdev_get_queue(bd);
 858
 859        return q->flush_flags & REQ_FLUSH;
 860}
 861
 862DEF_TB_DEFAULT_ATTRIBS(iblock);
 863
 864static struct configfs_attribute *iblock_backend_dev_attrs[] = {
 865        &iblock_dev_attrib_emulate_model_alias.attr,
 866        &iblock_dev_attrib_emulate_dpo.attr,
 867        &iblock_dev_attrib_emulate_fua_write.attr,
 868        &iblock_dev_attrib_emulate_fua_read.attr,
 869        &iblock_dev_attrib_emulate_write_cache.attr,
 870        &iblock_dev_attrib_emulate_ua_intlck_ctrl.attr,
 871        &iblock_dev_attrib_emulate_tas.attr,
 872        &iblock_dev_attrib_emulate_tpu.attr,
 873        &iblock_dev_attrib_emulate_tpws.attr,
 874        &iblock_dev_attrib_emulate_caw.attr,
 875        &iblock_dev_attrib_emulate_3pc.attr,
 876        &iblock_dev_attrib_pi_prot_type.attr,
 877        &iblock_dev_attrib_hw_pi_prot_type.attr,
 878        &iblock_dev_attrib_pi_prot_format.attr,
 879        &iblock_dev_attrib_enforce_pr_isids.attr,
 880        &iblock_dev_attrib_is_nonrot.attr,
 881        &iblock_dev_attrib_emulate_rest_reord.attr,
 882        &iblock_dev_attrib_force_pr_aptpl.attr,
 883        &iblock_dev_attrib_hw_block_size.attr,
 884        &iblock_dev_attrib_block_size.attr,
 885        &iblock_dev_attrib_hw_max_sectors.attr,
 886        &iblock_dev_attrib_optimal_sectors.attr,
 887        &iblock_dev_attrib_hw_queue_depth.attr,
 888        &iblock_dev_attrib_queue_depth.attr,
 889        &iblock_dev_attrib_max_unmap_lba_count.attr,
 890        &iblock_dev_attrib_max_unmap_block_desc_count.attr,
 891        &iblock_dev_attrib_unmap_granularity.attr,
 892        &iblock_dev_attrib_unmap_granularity_alignment.attr,
 893        &iblock_dev_attrib_max_write_same_len.attr,
 894        NULL,
 895};
 896
 897static struct se_subsystem_api iblock_template = {
 898        .name                   = "iblock",
 899        .inquiry_prod           = "IBLOCK",
 900        .inquiry_rev            = IBLOCK_VERSION,
 901        .owner                  = THIS_MODULE,
 902        .transport_type         = TRANSPORT_PLUGIN_VHBA_PDEV,
 903        .attach_hba             = iblock_attach_hba,
 904        .detach_hba             = iblock_detach_hba,
 905        .alloc_device           = iblock_alloc_device,
 906        .configure_device       = iblock_configure_device,
 907        .free_device            = iblock_free_device,
 908        .parse_cdb              = iblock_parse_cdb,
 909        .set_configfs_dev_params = iblock_set_configfs_dev_params,
 910        .show_configfs_dev_params = iblock_show_configfs_dev_params,
 911        .get_device_type        = sbc_get_device_type,
 912        .get_blocks             = iblock_get_blocks,
 913        .get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
 914        .get_lbppbe             = iblock_get_lbppbe,
 915        .get_io_min             = iblock_get_io_min,
 916        .get_io_opt             = iblock_get_io_opt,
 917        .get_write_cache        = iblock_get_write_cache,
 918};
 919
 920static int __init iblock_module_init(void)
 921{
 922        struct target_backend_cits *tbc = &iblock_template.tb_cits;
 923
 924        target_core_setup_sub_cits(&iblock_template);
 925        tbc->tb_dev_attrib_cit.ct_attrs = iblock_backend_dev_attrs;
 926
 927        return transport_subsystem_register(&iblock_template);
 928}
 929
 930static void __exit iblock_module_exit(void)
 931{
 932        transport_subsystem_release(&iblock_template);
 933}
 934
 935MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
 936MODULE_AUTHOR("nab@Linux-iSCSI.org");
 937MODULE_LICENSE("GPL");
 938
 939module_init(iblock_module_init);
 940module_exit(iblock_module_exit);
 941