linux/drivers/target/target_core_rd.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*******************************************************************************
   3 * Filename:  target_core_rd.c
   4 *
   5 * This file contains the Storage Engine <-> Ramdisk transport
   6 * specific functions.
   7 *
   8 * (c) Copyright 2003-2013 Datera, Inc.
   9 *
  10 * Nicholas A. Bellinger <nab@kernel.org>
  11 *
  12 ******************************************************************************/
  13
  14#include <linux/string.h>
  15#include <linux/parser.h>
  16#include <linux/highmem.h>
  17#include <linux/timer.h>
  18#include <linux/scatterlist.h>
  19#include <linux/slab.h>
  20#include <linux/spinlock.h>
  21#include <scsi/scsi_proto.h>
  22
  23#include <target/target_core_base.h>
  24#include <target/target_core_backend.h>
  25
  26#include "target_core_rd.h"
  27
  28static inline struct rd_dev *RD_DEV(struct se_device *dev)
  29{
  30        return container_of(dev, struct rd_dev, dev);
  31}
  32
  33static int rd_attach_hba(struct se_hba *hba, u32 host_id)
  34{
  35        struct rd_host *rd_host;
  36
  37        rd_host = kzalloc(sizeof(*rd_host), GFP_KERNEL);
  38        if (!rd_host)
  39                return -ENOMEM;
  40
  41        rd_host->rd_host_id = host_id;
  42
  43        hba->hba_ptr = rd_host;
  44
  45        pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
  46                " Generic Target Core Stack %s\n", hba->hba_id,
  47                RD_HBA_VERSION, TARGET_CORE_VERSION);
  48
  49        return 0;
  50}
  51
  52static void rd_detach_hba(struct se_hba *hba)
  53{
  54        struct rd_host *rd_host = hba->hba_ptr;
  55
  56        pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
  57                " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
  58
  59        kfree(rd_host);
  60        hba->hba_ptr = NULL;
  61}
  62
  63static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
  64                                 u32 sg_table_count)
  65{
  66        struct page *pg;
  67        struct scatterlist *sg;
  68        u32 i, j, page_count = 0, sg_per_table;
  69
  70        for (i = 0; i < sg_table_count; i++) {
  71                sg = sg_table[i].sg_table;
  72                sg_per_table = sg_table[i].rd_sg_count;
  73
  74                for (j = 0; j < sg_per_table; j++) {
  75                        pg = sg_page(&sg[j]);
  76                        if (pg) {
  77                                __free_page(pg);
  78                                page_count++;
  79                        }
  80                }
  81                kfree(sg);
  82        }
  83
  84        kfree(sg_table);
  85        return page_count;
  86}
  87
  88static void rd_release_device_space(struct rd_dev *rd_dev)
  89{
  90        u32 page_count;
  91
  92        if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
  93                return;
  94
  95        page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array,
  96                                          rd_dev->sg_table_count);
  97
  98        pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
  99                " Device ID: %u, pages %u in %u tables total bytes %lu\n",
 100                rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
 101                rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
 102
 103        rd_dev->sg_table_array = NULL;
 104        rd_dev->sg_table_count = 0;
 105}
 106
 107
 108/*      rd_build_device_space():
 109 *
 110 *
 111 */
 112static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
 113                                 u32 total_sg_needed, unsigned char init_payload)
 114{
 115        u32 i = 0, j, page_offset = 0, sg_per_table;
 116        u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
 117                                sizeof(struct scatterlist));
 118        struct page *pg;
 119        struct scatterlist *sg;
 120        unsigned char *p;
 121
 122        while (total_sg_needed) {
 123                unsigned int chain_entry = 0;
 124
 125                sg_per_table = (total_sg_needed > max_sg_per_table) ?
 126                        max_sg_per_table : total_sg_needed;
 127
 128                /*
 129                 * Reserve extra element for chain entry
 130                 */
 131                if (sg_per_table < total_sg_needed)
 132                        chain_entry = 1;
 133
 134                sg = kcalloc(sg_per_table + chain_entry, sizeof(*sg),
 135                                GFP_KERNEL);
 136                if (!sg)
 137                        return -ENOMEM;
 138
 139                sg_init_table(sg, sg_per_table + chain_entry);
 140
 141                if (i > 0) {
 142                        sg_chain(sg_table[i - 1].sg_table,
 143                                 max_sg_per_table + 1, sg);
 144                }
 145
 146                sg_table[i].sg_table = sg;
 147                sg_table[i].rd_sg_count = sg_per_table;
 148                sg_table[i].page_start_offset = page_offset;
 149                sg_table[i++].page_end_offset = (page_offset + sg_per_table)
 150                                                - 1;
 151
 152                for (j = 0; j < sg_per_table; j++) {
 153                        pg = alloc_pages(GFP_KERNEL, 0);
 154                        if (!pg) {
 155                                pr_err("Unable to allocate scatterlist"
 156                                        " pages for struct rd_dev_sg_table\n");
 157                                return -ENOMEM;
 158                        }
 159                        sg_assign_page(&sg[j], pg);
 160                        sg[j].length = PAGE_SIZE;
 161
 162                        p = kmap(pg);
 163                        memset(p, init_payload, PAGE_SIZE);
 164                        kunmap(pg);
 165                }
 166
 167                page_offset += sg_per_table;
 168                total_sg_needed -= sg_per_table;
 169        }
 170
 171        return 0;
 172}
 173
 174static int rd_build_device_space(struct rd_dev *rd_dev)
 175{
 176        struct rd_dev_sg_table *sg_table;
 177        u32 sg_tables, total_sg_needed;
 178        u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
 179                                sizeof(struct scatterlist));
 180        int rc;
 181
 182        if (rd_dev->rd_page_count <= 0) {
 183                pr_err("Illegal page count: %u for Ramdisk device\n",
 184                       rd_dev->rd_page_count);
 185                return -EINVAL;
 186        }
 187
 188        /* Don't need backing pages for NULLIO */
 189        if (rd_dev->rd_flags & RDF_NULLIO)
 190                return 0;
 191
 192        total_sg_needed = rd_dev->rd_page_count;
 193
 194        sg_tables = (total_sg_needed / max_sg_per_table) + 1;
 195        sg_table = kcalloc(sg_tables, sizeof(*sg_table), GFP_KERNEL);
 196        if (!sg_table)
 197                return -ENOMEM;
 198
 199        rd_dev->sg_table_array = sg_table;
 200        rd_dev->sg_table_count = sg_tables;
 201
 202        rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00);
 203        if (rc)
 204                return rc;
 205
 206        pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
 207                 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
 208                 rd_dev->rd_dev_id, rd_dev->rd_page_count,
 209                 rd_dev->sg_table_count);
 210
 211        return 0;
 212}
 213
 214static void rd_release_prot_space(struct rd_dev *rd_dev)
 215{
 216        u32 page_count;
 217
 218        if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count)
 219                return;
 220
 221        page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array,
 222                                          rd_dev->sg_prot_count);
 223
 224        pr_debug("CORE_RD[%u] - Released protection space for Ramdisk"
 225                 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
 226                 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
 227                 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
 228
 229        rd_dev->sg_prot_array = NULL;
 230        rd_dev->sg_prot_count = 0;
 231}
 232
 233static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length, int block_size)
 234{
 235        struct rd_dev_sg_table *sg_table;
 236        u32 total_sg_needed, sg_tables;
 237        u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
 238                                sizeof(struct scatterlist));
 239        int rc;
 240
 241        if (rd_dev->rd_flags & RDF_NULLIO)
 242                return 0;
 243        /*
 244         * prot_length=8byte dif data
 245         * tot sg needed = rd_page_count * (PGSZ/block_size) *
 246         *                 (prot_length/block_size) + pad
 247         * PGSZ canceled each other.
 248         */
 249        total_sg_needed = (rd_dev->rd_page_count * prot_length / block_size) + 1;
 250
 251        sg_tables = (total_sg_needed / max_sg_per_table) + 1;
 252        sg_table = kcalloc(sg_tables, sizeof(*sg_table), GFP_KERNEL);
 253        if (!sg_table)
 254                return -ENOMEM;
 255
 256        rd_dev->sg_prot_array = sg_table;
 257        rd_dev->sg_prot_count = sg_tables;
 258
 259        rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff);
 260        if (rc)
 261                return rc;
 262
 263        pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of"
 264                 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
 265                 rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count);
 266
 267        return 0;
 268}
 269
 270static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name)
 271{
 272        struct rd_dev *rd_dev;
 273        struct rd_host *rd_host = hba->hba_ptr;
 274
 275        rd_dev = kzalloc(sizeof(*rd_dev), GFP_KERNEL);
 276        if (!rd_dev)
 277                return NULL;
 278
 279        rd_dev->rd_host = rd_host;
 280
 281        return &rd_dev->dev;
 282}
 283
 284static int rd_configure_device(struct se_device *dev)
 285{
 286        struct rd_dev *rd_dev = RD_DEV(dev);
 287        struct rd_host *rd_host = dev->se_hba->hba_ptr;
 288        int ret;
 289
 290        if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
 291                pr_debug("Missing rd_pages= parameter\n");
 292                return -EINVAL;
 293        }
 294
 295        ret = rd_build_device_space(rd_dev);
 296        if (ret < 0)
 297                goto fail;
 298
 299        dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
 300        dev->dev_attrib.hw_max_sectors = UINT_MAX;
 301        dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
 302        dev->dev_attrib.is_nonrot = 1;
 303
 304        rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
 305
 306        pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of"
 307                " %u pages in %u tables, %lu total bytes\n",
 308                rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count,
 309                rd_dev->sg_table_count,
 310                (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
 311
 312        return 0;
 313
 314fail:
 315        rd_release_device_space(rd_dev);
 316        return ret;
 317}
 318
 319static void rd_dev_call_rcu(struct rcu_head *p)
 320{
 321        struct se_device *dev = container_of(p, struct se_device, rcu_head);
 322        struct rd_dev *rd_dev = RD_DEV(dev);
 323
 324        kfree(rd_dev);
 325}
 326
 327static void rd_free_device(struct se_device *dev)
 328{
 329        call_rcu(&dev->rcu_head, rd_dev_call_rcu);
 330}
 331
 332static void rd_destroy_device(struct se_device *dev)
 333{
 334        struct rd_dev *rd_dev = RD_DEV(dev);
 335
 336        rd_release_device_space(rd_dev);
 337}
 338
 339static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
 340{
 341        struct rd_dev_sg_table *sg_table;
 342        u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
 343                                sizeof(struct scatterlist));
 344
 345        i = page / sg_per_table;
 346        if (i < rd_dev->sg_table_count) {
 347                sg_table = &rd_dev->sg_table_array[i];
 348                if ((sg_table->page_start_offset <= page) &&
 349                    (sg_table->page_end_offset >= page))
 350                        return sg_table;
 351        }
 352
 353        pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
 354                        page);
 355
 356        return NULL;
 357}
 358
 359static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page)
 360{
 361        struct rd_dev_sg_table *sg_table;
 362        u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
 363                                sizeof(struct scatterlist));
 364
 365        i = page / sg_per_table;
 366        if (i < rd_dev->sg_prot_count) {
 367                sg_table = &rd_dev->sg_prot_array[i];
 368                if ((sg_table->page_start_offset <= page) &&
 369                     (sg_table->page_end_offset >= page))
 370                        return sg_table;
 371        }
 372
 373        pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n",
 374                        page);
 375
 376        return NULL;
 377}
 378
 379static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read)
 380{
 381        struct se_device *se_dev = cmd->se_dev;
 382        struct rd_dev *dev = RD_DEV(se_dev);
 383        struct rd_dev_sg_table *prot_table;
 384        struct scatterlist *prot_sg;
 385        u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
 386        u32 prot_offset, prot_page;
 387        u32 prot_npages __maybe_unused;
 388        u64 tmp;
 389        sense_reason_t rc = 0;
 390
 391        tmp = cmd->t_task_lba * se_dev->prot_length;
 392        prot_offset = do_div(tmp, PAGE_SIZE);
 393        prot_page = tmp;
 394
 395        prot_table = rd_get_prot_table(dev, prot_page);
 396        if (!prot_table)
 397                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 398
 399        prot_sg = &prot_table->sg_table[prot_page -
 400                                        prot_table->page_start_offset];
 401
 402        if (se_dev->dev_attrib.pi_prot_verify) {
 403                if (is_read)
 404                        rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
 405                                            prot_sg, prot_offset);
 406                else
 407                        rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
 408                                            cmd->t_prot_sg, 0);
 409        }
 410        if (!rc)
 411                sbc_dif_copy_prot(cmd, sectors, is_read, prot_sg, prot_offset);
 412
 413        return rc;
 414}
 415
 416static sense_reason_t
 417rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
 418              enum dma_data_direction data_direction)
 419{
 420        struct se_device *se_dev = cmd->se_dev;
 421        struct rd_dev *dev = RD_DEV(se_dev);
 422        struct rd_dev_sg_table *table;
 423        struct scatterlist *rd_sg;
 424        struct sg_mapping_iter m;
 425        u32 rd_offset;
 426        u32 rd_size;
 427        u32 rd_page;
 428        u32 src_len;
 429        u64 tmp;
 430        sense_reason_t rc;
 431
 432        if (dev->rd_flags & RDF_NULLIO) {
 433                target_complete_cmd(cmd, SAM_STAT_GOOD);
 434                return 0;
 435        }
 436
 437        tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
 438        rd_offset = do_div(tmp, PAGE_SIZE);
 439        rd_page = tmp;
 440        rd_size = cmd->data_length;
 441
 442        table = rd_get_sg_table(dev, rd_page);
 443        if (!table)
 444                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 445
 446        rd_sg = &table->sg_table[rd_page - table->page_start_offset];
 447
 448        pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
 449                        dev->rd_dev_id,
 450                        data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
 451                        cmd->t_task_lba, rd_size, rd_page, rd_offset);
 452
 453        if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
 454            data_direction == DMA_TO_DEVICE) {
 455                rc = rd_do_prot_rw(cmd, false);
 456                if (rc)
 457                        return rc;
 458        }
 459
 460        src_len = PAGE_SIZE - rd_offset;
 461        sg_miter_start(&m, sgl, sgl_nents,
 462                        data_direction == DMA_FROM_DEVICE ?
 463                                SG_MITER_TO_SG : SG_MITER_FROM_SG);
 464        while (rd_size) {
 465                u32 len;
 466                void *rd_addr;
 467
 468                sg_miter_next(&m);
 469                if (!(u32)m.length) {
 470                        pr_debug("RD[%u]: invalid sgl %p len %zu\n",
 471                                 dev->rd_dev_id, m.addr, m.length);
 472                        sg_miter_stop(&m);
 473                        return TCM_INCORRECT_AMOUNT_OF_DATA;
 474                }
 475                len = min((u32)m.length, src_len);
 476                if (len > rd_size) {
 477                        pr_debug("RD[%u]: size underrun page %d offset %d "
 478                                 "size %d\n", dev->rd_dev_id,
 479                                 rd_page, rd_offset, rd_size);
 480                        len = rd_size;
 481                }
 482                m.consumed = len;
 483
 484                rd_addr = sg_virt(rd_sg) + rd_offset;
 485
 486                if (data_direction == DMA_FROM_DEVICE)
 487                        memcpy(m.addr, rd_addr, len);
 488                else
 489                        memcpy(rd_addr, m.addr, len);
 490
 491                rd_size -= len;
 492                if (!rd_size)
 493                        continue;
 494
 495                src_len -= len;
 496                if (src_len) {
 497                        rd_offset += len;
 498                        continue;
 499                }
 500
 501                /* rd page completed, next one please */
 502                rd_page++;
 503                rd_offset = 0;
 504                src_len = PAGE_SIZE;
 505                if (rd_page <= table->page_end_offset) {
 506                        rd_sg++;
 507                        continue;
 508                }
 509
 510                table = rd_get_sg_table(dev, rd_page);
 511                if (!table) {
 512                        sg_miter_stop(&m);
 513                        return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 514                }
 515
 516                /* since we increment, the first sg entry is correct */
 517                rd_sg = table->sg_table;
 518        }
 519        sg_miter_stop(&m);
 520
 521        if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
 522            data_direction == DMA_FROM_DEVICE) {
 523                rc = rd_do_prot_rw(cmd, true);
 524                if (rc)
 525                        return rc;
 526        }
 527
 528        target_complete_cmd(cmd, SAM_STAT_GOOD);
 529        return 0;
 530}
 531
 532enum {
 533        Opt_rd_pages, Opt_rd_nullio, Opt_err
 534};
 535
 536static match_table_t tokens = {
 537        {Opt_rd_pages, "rd_pages=%d"},
 538        {Opt_rd_nullio, "rd_nullio=%d"},
 539        {Opt_err, NULL}
 540};
 541
 542static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
 543                const char *page, ssize_t count)
 544{
 545        struct rd_dev *rd_dev = RD_DEV(dev);
 546        char *orig, *ptr, *opts;
 547        substring_t args[MAX_OPT_ARGS];
 548        int arg, token;
 549
 550        opts = kstrdup(page, GFP_KERNEL);
 551        if (!opts)
 552                return -ENOMEM;
 553
 554        orig = opts;
 555
 556        while ((ptr = strsep(&opts, ",\n")) != NULL) {
 557                if (!*ptr)
 558                        continue;
 559
 560                token = match_token(ptr, tokens, args);
 561                switch (token) {
 562                case Opt_rd_pages:
 563                        match_int(args, &arg);
 564                        rd_dev->rd_page_count = arg;
 565                        pr_debug("RAMDISK: Referencing Page"
 566                                " Count: %u\n", rd_dev->rd_page_count);
 567                        rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
 568                        break;
 569                case Opt_rd_nullio:
 570                        match_int(args, &arg);
 571                        if (arg != 1)
 572                                break;
 573
 574                        pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg);
 575                        rd_dev->rd_flags |= RDF_NULLIO;
 576                        break;
 577                default:
 578                        break;
 579                }
 580        }
 581
 582        kfree(orig);
 583        return count;
 584}
 585
 586static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
 587{
 588        struct rd_dev *rd_dev = RD_DEV(dev);
 589
 590        ssize_t bl = sprintf(b, "TCM RamDisk ID: %u  RamDisk Makeup: rd_mcp\n",
 591                        rd_dev->rd_dev_id);
 592        bl += sprintf(b + bl, "        PAGES/PAGE_SIZE: %u*%lu"
 593                        "  SG_table_count: %u  nullio: %d\n", rd_dev->rd_page_count,
 594                        PAGE_SIZE, rd_dev->sg_table_count,
 595                        !!(rd_dev->rd_flags & RDF_NULLIO));
 596        return bl;
 597}
 598
 599static sector_t rd_get_blocks(struct se_device *dev)
 600{
 601        struct rd_dev *rd_dev = RD_DEV(dev);
 602
 603        unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
 604                        dev->dev_attrib.block_size) - 1;
 605
 606        return blocks_long;
 607}
 608
 609static int rd_init_prot(struct se_device *dev)
 610{
 611        struct rd_dev *rd_dev = RD_DEV(dev);
 612
 613        if (!dev->dev_attrib.pi_prot_type)
 614                return 0;
 615
 616        return rd_build_prot_space(rd_dev, dev->prot_length,
 617                                   dev->dev_attrib.block_size);
 618}
 619
 620static void rd_free_prot(struct se_device *dev)
 621{
 622        struct rd_dev *rd_dev = RD_DEV(dev);
 623
 624        rd_release_prot_space(rd_dev);
 625}
 626
 627static struct sbc_ops rd_sbc_ops = {
 628        .execute_rw             = rd_execute_rw,
 629};
 630
 631static sense_reason_t
 632rd_parse_cdb(struct se_cmd *cmd)
 633{
 634        return sbc_parse_cdb(cmd, &rd_sbc_ops);
 635}
 636
 637static const struct target_backend_ops rd_mcp_ops = {
 638        .name                   = "rd_mcp",
 639        .inquiry_prod           = "RAMDISK-MCP",
 640        .inquiry_rev            = RD_MCP_VERSION,
 641        .attach_hba             = rd_attach_hba,
 642        .detach_hba             = rd_detach_hba,
 643        .alloc_device           = rd_alloc_device,
 644        .configure_device       = rd_configure_device,
 645        .destroy_device         = rd_destroy_device,
 646        .free_device            = rd_free_device,
 647        .parse_cdb              = rd_parse_cdb,
 648        .set_configfs_dev_params = rd_set_configfs_dev_params,
 649        .show_configfs_dev_params = rd_show_configfs_dev_params,
 650        .get_device_type        = sbc_get_device_type,
 651        .get_blocks             = rd_get_blocks,
 652        .init_prot              = rd_init_prot,
 653        .free_prot              = rd_free_prot,
 654        .tb_dev_attrib_attrs    = sbc_attrib_attrs,
 655};
 656
 657int __init rd_module_init(void)
 658{
 659        return transport_backend_register(&rd_mcp_ops);
 660}
 661
 662void rd_module_exit(void)
 663{
 664        target_backend_unregister(&rd_mcp_ops);
 665}
 666