linux/drivers/target/target_core_user.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2013 Shaohua Li <shli@kernel.org>
   3 * Copyright (C) 2014 Red Hat, Inc.
   4 * Copyright (C) 2015 Arrikto, Inc.
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms and conditions of the GNU General Public License,
   8 * version 2, as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope it will be useful, but WITHOUT
  11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13 * more details.
  14 *
  15 * You should have received a copy of the GNU General Public License along with
  16 * this program; if not, write to the Free Software Foundation, Inc.,
  17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  18 */
  19
  20#include <linux/spinlock.h>
  21#include <linux/module.h>
  22#include <linux/idr.h>
  23#include <linux/kernel.h>
  24#include <linux/timer.h>
  25#include <linux/parser.h>
  26#include <linux/vmalloc.h>
  27#include <linux/uio_driver.h>
  28#include <linux/stringify.h>
  29#include <linux/bitops.h>
  30#include <net/genetlink.h>
  31#include <scsi/scsi_common.h>
  32#include <scsi/scsi_proto.h>
  33#include <target/target_core_base.h>
  34#include <target/target_core_fabric.h>
  35#include <target/target_core_backend.h>
  36
  37#include <linux/target_core_user.h>
  38
  39/*
  40 * Define a shared-memory interface for LIO to pass SCSI commands and
  41 * data to userspace for processing. This is to allow backends that
  42 * are too complex for in-kernel support to be possible.
  43 *
  44 * It uses the UIO framework to do a lot of the device-creation and
  45 * introspection work for us.
  46 *
  47 * See the .h file for how the ring is laid out. Note that while the
  48 * command ring is defined, the particulars of the data area are
  49 * not. Offset values in the command entry point to other locations
  50 * internal to the mmap()ed area. There is separate space outside the
  51 * command ring for data buffers. This leaves maximum flexibility for
  52 * moving buffer allocations, or even page flipping or other
  53 * allocation techniques, without altering the command ring layout.
  54 *
  55 * SECURITY:
  56 * The user process must be assumed to be malicious. There's no way to
  57 * prevent it breaking the command ring protocol if it wants, but in
  58 * order to prevent other issues we must only ever read *data* from
  59 * the shared memory area, not offsets or sizes. This applies to
  60 * command ring entries as well as the mailbox. Extra code needed for
  61 * this may have a 'UAM' comment.
  62 */
  63
  64
  65#define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
  66
  67#define DATA_BLOCK_BITS 256
  68#define DATA_BLOCK_SIZE 4096
  69
  70#define CMDR_SIZE (16 * 4096)
  71#define DATA_SIZE (DATA_BLOCK_BITS * DATA_BLOCK_SIZE)
  72
  73#define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE)
  74
  75static struct device *tcmu_root_device;
  76
  77struct tcmu_hba {
  78        u32 host_id;
  79};
  80
  81#define TCMU_CONFIG_LEN 256
  82
  83struct tcmu_dev {
  84        struct se_device se_dev;
  85
  86        char *name;
  87        struct se_hba *hba;
  88
  89#define TCMU_DEV_BIT_OPEN 0
  90#define TCMU_DEV_BIT_BROKEN 1
  91        unsigned long flags;
  92
  93        struct uio_info uio_info;
  94
  95        struct tcmu_mailbox *mb_addr;
  96        size_t dev_size;
  97        u32 cmdr_size;
  98        u32 cmdr_last_cleaned;
  99        /* Offset of data ring from start of mb */
 100        /* Must add data_off and mb_addr to get the address */
 101        size_t data_off;
 102        size_t data_size;
 103
 104        DECLARE_BITMAP(data_bitmap, DATA_BLOCK_BITS);
 105
 106        wait_queue_head_t wait_cmdr;
 107        /* TODO should this be a mutex? */
 108        spinlock_t cmdr_lock;
 109
 110        struct idr commands;
 111        spinlock_t commands_lock;
 112
 113        struct timer_list timeout;
 114
 115        char dev_config[TCMU_CONFIG_LEN];
 116};
 117
 118#define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
 119
 120#define CMDR_OFF sizeof(struct tcmu_mailbox)
 121
 122struct tcmu_cmd {
 123        struct se_cmd *se_cmd;
 124        struct tcmu_dev *tcmu_dev;
 125
 126        uint16_t cmd_id;
 127
 128        /* Can't use se_cmd when cleaning up expired cmds, because if
 129           cmd has been completed then accessing se_cmd is off limits */
 130        DECLARE_BITMAP(data_bitmap, DATA_BLOCK_BITS);
 131
 132        unsigned long deadline;
 133
 134#define TCMU_CMD_BIT_EXPIRED 0
 135        unsigned long flags;
 136};
 137
 138static struct kmem_cache *tcmu_cmd_cache;
 139
 140/* multicast group */
 141enum tcmu_multicast_groups {
 142        TCMU_MCGRP_CONFIG,
 143};
 144
 145static const struct genl_multicast_group tcmu_mcgrps[] = {
 146        [TCMU_MCGRP_CONFIG] = { .name = "config", },
 147};
 148
 149/* Our generic netlink family */
 150static struct genl_family tcmu_genl_family = {
 151        .id = GENL_ID_GENERATE,
 152        .hdrsize = 0,
 153        .name = "TCM-USER",
 154        .version = 1,
 155        .maxattr = TCMU_ATTR_MAX,
 156        .mcgrps = tcmu_mcgrps,
 157        .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
 158        .netnsok = true,
 159};
 160
 161static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
 162{
 163        struct se_device *se_dev = se_cmd->se_dev;
 164        struct tcmu_dev *udev = TCMU_DEV(se_dev);
 165        struct tcmu_cmd *tcmu_cmd;
 166        int cmd_id;
 167
 168        tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL);
 169        if (!tcmu_cmd)
 170                return NULL;
 171
 172        tcmu_cmd->se_cmd = se_cmd;
 173        tcmu_cmd->tcmu_dev = udev;
 174        tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT);
 175
 176        idr_preload(GFP_KERNEL);
 177        spin_lock_irq(&udev->commands_lock);
 178        cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 0,
 179                USHRT_MAX, GFP_NOWAIT);
 180        spin_unlock_irq(&udev->commands_lock);
 181        idr_preload_end();
 182
 183        if (cmd_id < 0) {
 184                kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
 185                return NULL;
 186        }
 187        tcmu_cmd->cmd_id = cmd_id;
 188
 189        return tcmu_cmd;
 190}
 191
 192static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
 193{
 194        unsigned long offset = offset_in_page(vaddr);
 195
 196        size = round_up(size+offset, PAGE_SIZE);
 197        vaddr -= offset;
 198
 199        while (size) {
 200                flush_dcache_page(virt_to_page(vaddr));
 201                size -= PAGE_SIZE;
 202        }
 203}
 204
 205/*
 206 * Some ring helper functions. We don't assume size is a power of 2 so
 207 * we can't use circ_buf.h.
 208 */
 209static inline size_t spc_used(size_t head, size_t tail, size_t size)
 210{
 211        int diff = head - tail;
 212
 213        if (diff >= 0)
 214                return diff;
 215        else
 216                return size + diff;
 217}
 218
 219static inline size_t spc_free(size_t head, size_t tail, size_t size)
 220{
 221        /* Keep 1 byte unused or we can't tell full from empty */
 222        return (size - spc_used(head, tail, size) - 1);
 223}
 224
 225static inline size_t head_to_end(size_t head, size_t size)
 226{
 227        return size - head;
 228}
 229
 230static inline void new_iov(struct iovec **iov, int *iov_cnt,
 231                           struct tcmu_dev *udev)
 232{
 233        struct iovec *iovec;
 234
 235        if (*iov_cnt != 0)
 236                (*iov)++;
 237        (*iov_cnt)++;
 238
 239        iovec = *iov;
 240        memset(iovec, 0, sizeof(struct iovec));
 241}
 242
 243#define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
 244
 245/* offset is relative to mb_addr */
 246static inline size_t get_block_offset(struct tcmu_dev *dev,
 247                int block, int remaining)
 248{
 249        return dev->data_off + block * DATA_BLOCK_SIZE +
 250                DATA_BLOCK_SIZE - remaining;
 251}
 252
 253static inline size_t iov_tail(struct tcmu_dev *udev, struct iovec *iov)
 254{
 255        return (size_t)iov->iov_base + iov->iov_len;
 256}
 257
 258static void alloc_and_scatter_data_area(struct tcmu_dev *udev,
 259        struct scatterlist *data_sg, unsigned int data_nents,
 260        struct iovec **iov, int *iov_cnt, bool copy_data)
 261{
 262        int i, block;
 263        int block_remaining = 0;
 264        void *from, *to;
 265        size_t copy_bytes, to_offset;
 266        struct scatterlist *sg;
 267
 268        for_each_sg(data_sg, sg, data_nents, i) {
 269                int sg_remaining = sg->length;
 270                from = kmap_atomic(sg_page(sg)) + sg->offset;
 271                while (sg_remaining > 0) {
 272                        if (block_remaining == 0) {
 273                                block = find_first_zero_bit(udev->data_bitmap,
 274                                                DATA_BLOCK_BITS);
 275                                block_remaining = DATA_BLOCK_SIZE;
 276                                set_bit(block, udev->data_bitmap);
 277                        }
 278                        copy_bytes = min_t(size_t, sg_remaining,
 279                                        block_remaining);
 280                        to_offset = get_block_offset(udev, block,
 281                                        block_remaining);
 282                        to = (void *)udev->mb_addr + to_offset;
 283                        if (*iov_cnt != 0 &&
 284                            to_offset == iov_tail(udev, *iov)) {
 285                                (*iov)->iov_len += copy_bytes;
 286                        } else {
 287                                new_iov(iov, iov_cnt, udev);
 288                                (*iov)->iov_base = (void __user *) to_offset;
 289                                (*iov)->iov_len = copy_bytes;
 290                        }
 291                        if (copy_data) {
 292                                memcpy(to, from + sg->length - sg_remaining,
 293                                        copy_bytes);
 294                                tcmu_flush_dcache_range(to, copy_bytes);
 295                        }
 296                        sg_remaining -= copy_bytes;
 297                        block_remaining -= copy_bytes;
 298                }
 299                kunmap_atomic(from - sg->offset);
 300        }
 301}
 302
 303static void free_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd)
 304{
 305        bitmap_xor(udev->data_bitmap, udev->data_bitmap, cmd->data_bitmap,
 306                   DATA_BLOCK_BITS);
 307}
 308
 309static void gather_data_area(struct tcmu_dev *udev, unsigned long *cmd_bitmap,
 310                struct scatterlist *data_sg, unsigned int data_nents)
 311{
 312        int i, block;
 313        int block_remaining = 0;
 314        void *from, *to;
 315        size_t copy_bytes, from_offset;
 316        struct scatterlist *sg;
 317
 318        for_each_sg(data_sg, sg, data_nents, i) {
 319                int sg_remaining = sg->length;
 320                to = kmap_atomic(sg_page(sg)) + sg->offset;
 321                while (sg_remaining > 0) {
 322                        if (block_remaining == 0) {
 323                                block = find_first_bit(cmd_bitmap,
 324                                                DATA_BLOCK_BITS);
 325                                block_remaining = DATA_BLOCK_SIZE;
 326                                clear_bit(block, cmd_bitmap);
 327                        }
 328                        copy_bytes = min_t(size_t, sg_remaining,
 329                                        block_remaining);
 330                        from_offset = get_block_offset(udev, block,
 331                                        block_remaining);
 332                        from = (void *) udev->mb_addr + from_offset;
 333                        tcmu_flush_dcache_range(from, copy_bytes);
 334                        memcpy(to + sg->length - sg_remaining, from,
 335                                        copy_bytes);
 336
 337                        sg_remaining -= copy_bytes;
 338                        block_remaining -= copy_bytes;
 339                }
 340                kunmap_atomic(to - sg->offset);
 341        }
 342}
 343
 344static inline size_t spc_bitmap_free(unsigned long *bitmap)
 345{
 346        return DATA_BLOCK_SIZE * (DATA_BLOCK_BITS -
 347                        bitmap_weight(bitmap, DATA_BLOCK_BITS));
 348}
 349
 350/*
 351 * We can't queue a command until we have space available on the cmd ring *and*
 352 * space available on the data ring.
 353 *
 354 * Called with ring lock held.
 355 */
 356static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t data_needed)
 357{
 358        struct tcmu_mailbox *mb = udev->mb_addr;
 359        size_t space, cmd_needed;
 360        u32 cmd_head;
 361
 362        tcmu_flush_dcache_range(mb, sizeof(*mb));
 363
 364        cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
 365
 366        /*
 367         * If cmd end-of-ring space is too small then we need space for a NOP plus
 368         * original cmd - cmds are internally contiguous.
 369         */
 370        if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size)
 371                cmd_needed = cmd_size;
 372        else
 373                cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size);
 374
 375        space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size);
 376        if (space < cmd_needed) {
 377                pr_debug("no cmd space: %u %u %u\n", cmd_head,
 378                       udev->cmdr_last_cleaned, udev->cmdr_size);
 379                return false;
 380        }
 381
 382        space = spc_bitmap_free(udev->data_bitmap);
 383        if (space < data_needed) {
 384                pr_debug("no data space: only %zu available, but ask for %zu\n",
 385                                space, data_needed);
 386                return false;
 387        }
 388
 389        return true;
 390}
 391
 392static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
 393{
 394        struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
 395        struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
 396        size_t base_command_size, command_size;
 397        struct tcmu_mailbox *mb;
 398        struct tcmu_cmd_entry *entry;
 399        struct iovec *iov;
 400        int iov_cnt;
 401        uint32_t cmd_head;
 402        uint64_t cdb_off;
 403        bool copy_to_data_area;
 404        size_t data_length;
 405        DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS);
 406
 407        if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
 408                return -EINVAL;
 409
 410        /*
 411         * Must be a certain minimum size for response sense info, but
 412         * also may be larger if the iov array is large.
 413         *
 414         * We prepare way too many iovs for potential uses here, because it's
 415         * expensive to tell how many regions are freed in the bitmap
 416        */
 417        base_command_size = max(offsetof(struct tcmu_cmd_entry,
 418                                req.iov[se_cmd->t_bidi_data_nents +
 419                                        se_cmd->t_data_nents]),
 420                                sizeof(struct tcmu_cmd_entry));
 421        command_size = base_command_size
 422                + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE);
 423
 424        WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1));
 425
 426        spin_lock_irq(&udev->cmdr_lock);
 427
 428        mb = udev->mb_addr;
 429        cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
 430        data_length = se_cmd->data_length;
 431        if (se_cmd->se_cmd_flags & SCF_BIDI) {
 432                BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
 433                data_length += se_cmd->t_bidi_data_sg->length;
 434        }
 435        if ((command_size > (udev->cmdr_size / 2))
 436            || data_length > udev->data_size)
 437                pr_warn("TCMU: Request of size %zu/%zu may be too big for %u/%zu "
 438                        "cmd/data ring buffers\n", command_size, data_length,
 439                        udev->cmdr_size, udev->data_size);
 440
 441        while (!is_ring_space_avail(udev, command_size, data_length)) {
 442                int ret;
 443                DEFINE_WAIT(__wait);
 444
 445                prepare_to_wait(&udev->wait_cmdr, &__wait, TASK_INTERRUPTIBLE);
 446
 447                pr_debug("sleeping for ring space\n");
 448                spin_unlock_irq(&udev->cmdr_lock);
 449                ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT));
 450                finish_wait(&udev->wait_cmdr, &__wait);
 451                if (!ret) {
 452                        pr_warn("tcmu: command timed out\n");
 453                        return -ETIMEDOUT;
 454                }
 455
 456                spin_lock_irq(&udev->cmdr_lock);
 457
 458                /* We dropped cmdr_lock, cmd_head is stale */
 459                cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
 460        }
 461
 462        /* Insert a PAD if end-of-ring space is too small */
 463        if (head_to_end(cmd_head, udev->cmdr_size) < command_size) {
 464                size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
 465
 466                entry = (void *) mb + CMDR_OFF + cmd_head;
 467                tcmu_flush_dcache_range(entry, sizeof(*entry));
 468                tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD);
 469                tcmu_hdr_set_len(&entry->hdr.len_op, pad_size);
 470                entry->hdr.cmd_id = 0; /* not used for PAD */
 471                entry->hdr.kflags = 0;
 472                entry->hdr.uflags = 0;
 473
 474                UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
 475
 476                cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
 477                WARN_ON(cmd_head != 0);
 478        }
 479
 480        entry = (void *) mb + CMDR_OFF + cmd_head;
 481        tcmu_flush_dcache_range(entry, sizeof(*entry));
 482        tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
 483        tcmu_hdr_set_len(&entry->hdr.len_op, command_size);
 484        entry->hdr.cmd_id = tcmu_cmd->cmd_id;
 485        entry->hdr.kflags = 0;
 486        entry->hdr.uflags = 0;
 487
 488        bitmap_copy(old_bitmap, udev->data_bitmap, DATA_BLOCK_BITS);
 489
 490        /*
 491         * Fix up iovecs, and handle if allocation in data ring wrapped.
 492         */
 493        iov = &entry->req.iov[0];
 494        iov_cnt = 0;
 495        copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE
 496                || se_cmd->se_cmd_flags & SCF_BIDI);
 497        alloc_and_scatter_data_area(udev, se_cmd->t_data_sg,
 498                se_cmd->t_data_nents, &iov, &iov_cnt, copy_to_data_area);
 499        entry->req.iov_cnt = iov_cnt;
 500        entry->req.iov_dif_cnt = 0;
 501
 502        /* Handle BIDI commands */
 503        iov_cnt = 0;
 504        alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg,
 505                se_cmd->t_bidi_data_nents, &iov, &iov_cnt, false);
 506        entry->req.iov_bidi_cnt = iov_cnt;
 507
 508        /* cmd's data_bitmap is what changed in process */
 509        bitmap_xor(tcmu_cmd->data_bitmap, old_bitmap, udev->data_bitmap,
 510                        DATA_BLOCK_BITS);
 511
 512        /* All offsets relative to mb_addr, not start of entry! */
 513        cdb_off = CMDR_OFF + cmd_head + base_command_size;
 514        memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
 515        entry->req.cdb_off = cdb_off;
 516        tcmu_flush_dcache_range(entry, sizeof(*entry));
 517
 518        UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
 519        tcmu_flush_dcache_range(mb, sizeof(*mb));
 520
 521        spin_unlock_irq(&udev->cmdr_lock);
 522
 523        /* TODO: only if FLUSH and FUA? */
 524        uio_event_notify(&udev->uio_info);
 525
 526        mod_timer(&udev->timeout,
 527                round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT)));
 528
 529        return 0;
 530}
 531
 532static int tcmu_queue_cmd(struct se_cmd *se_cmd)
 533{
 534        struct se_device *se_dev = se_cmd->se_dev;
 535        struct tcmu_dev *udev = TCMU_DEV(se_dev);
 536        struct tcmu_cmd *tcmu_cmd;
 537        int ret;
 538
 539        tcmu_cmd = tcmu_alloc_cmd(se_cmd);
 540        if (!tcmu_cmd)
 541                return -ENOMEM;
 542
 543        ret = tcmu_queue_cmd_ring(tcmu_cmd);
 544        if (ret < 0) {
 545                pr_err("TCMU: Could not queue command\n");
 546                spin_lock_irq(&udev->commands_lock);
 547                idr_remove(&udev->commands, tcmu_cmd->cmd_id);
 548                spin_unlock_irq(&udev->commands_lock);
 549
 550                kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
 551        }
 552
 553        return ret;
 554}
 555
 556static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry)
 557{
 558        struct se_cmd *se_cmd = cmd->se_cmd;
 559        struct tcmu_dev *udev = cmd->tcmu_dev;
 560
 561        if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
 562                /*
 563                 * cmd has been completed already from timeout, just reclaim
 564                 * data ring space and free cmd
 565                 */
 566                free_data_area(udev, cmd);
 567
 568                kmem_cache_free(tcmu_cmd_cache, cmd);
 569                return;
 570        }
 571
 572        if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
 573                free_data_area(udev, cmd);
 574                pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
 575                        cmd->se_cmd);
 576                entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
 577        } else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
 578                memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer,
 579                               se_cmd->scsi_sense_length);
 580                free_data_area(udev, cmd);
 581        } else if (se_cmd->se_cmd_flags & SCF_BIDI) {
 582                DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
 583
 584                /* Get Data-In buffer before clean up */
 585                bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
 586                gather_data_area(udev, bitmap,
 587                        se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_nents);
 588                free_data_area(udev, cmd);
 589        } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
 590                DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
 591
 592                bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
 593                gather_data_area(udev, bitmap,
 594                        se_cmd->t_data_sg, se_cmd->t_data_nents);
 595                free_data_area(udev, cmd);
 596        } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
 597                free_data_area(udev, cmd);
 598        } else if (se_cmd->data_direction != DMA_NONE) {
 599                pr_warn("TCMU: data direction was %d!\n",
 600                        se_cmd->data_direction);
 601        }
 602
 603        target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
 604        cmd->se_cmd = NULL;
 605
 606        kmem_cache_free(tcmu_cmd_cache, cmd);
 607}
 608
 609static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
 610{
 611        struct tcmu_mailbox *mb;
 612        unsigned long flags;
 613        int handled = 0;
 614
 615        if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
 616                pr_err("ring broken, not handling completions\n");
 617                return 0;
 618        }
 619
 620        spin_lock_irqsave(&udev->cmdr_lock, flags);
 621
 622        mb = udev->mb_addr;
 623        tcmu_flush_dcache_range(mb, sizeof(*mb));
 624
 625        while (udev->cmdr_last_cleaned != ACCESS_ONCE(mb->cmd_tail)) {
 626
 627                struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
 628                struct tcmu_cmd *cmd;
 629
 630                tcmu_flush_dcache_range(entry, sizeof(*entry));
 631
 632                if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) {
 633                        UPDATE_HEAD(udev->cmdr_last_cleaned,
 634                                    tcmu_hdr_get_len(entry->hdr.len_op),
 635                                    udev->cmdr_size);
 636                        continue;
 637                }
 638                WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
 639
 640                spin_lock(&udev->commands_lock);
 641                cmd = idr_find(&udev->commands, entry->hdr.cmd_id);
 642                if (cmd)
 643                        idr_remove(&udev->commands, cmd->cmd_id);
 644                spin_unlock(&udev->commands_lock);
 645
 646                if (!cmd) {
 647                        pr_err("cmd_id not found, ring is broken\n");
 648                        set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
 649                        break;
 650                }
 651
 652                tcmu_handle_completion(cmd, entry);
 653
 654                UPDATE_HEAD(udev->cmdr_last_cleaned,
 655                            tcmu_hdr_get_len(entry->hdr.len_op),
 656                            udev->cmdr_size);
 657
 658                handled++;
 659        }
 660
 661        if (mb->cmd_tail == mb->cmd_head)
 662                del_timer(&udev->timeout); /* no more pending cmds */
 663
 664        spin_unlock_irqrestore(&udev->cmdr_lock, flags);
 665
 666        wake_up(&udev->wait_cmdr);
 667
 668        return handled;
 669}
 670
 671static int tcmu_check_expired_cmd(int id, void *p, void *data)
 672{
 673        struct tcmu_cmd *cmd = p;
 674
 675        if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
 676                return 0;
 677
 678        if (!time_after(jiffies, cmd->deadline))
 679                return 0;
 680
 681        set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
 682        target_complete_cmd(cmd->se_cmd, SAM_STAT_CHECK_CONDITION);
 683        cmd->se_cmd = NULL;
 684
 685        kmem_cache_free(tcmu_cmd_cache, cmd);
 686
 687        return 0;
 688}
 689
 690static void tcmu_device_timedout(unsigned long data)
 691{
 692        struct tcmu_dev *udev = (struct tcmu_dev *)data;
 693        unsigned long flags;
 694        int handled;
 695
 696        handled = tcmu_handle_completions(udev);
 697
 698        pr_warn("%d completions handled from timeout\n", handled);
 699
 700        spin_lock_irqsave(&udev->commands_lock, flags);
 701        idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
 702        spin_unlock_irqrestore(&udev->commands_lock, flags);
 703
 704        /*
 705         * We don't need to wakeup threads on wait_cmdr since they have their
 706         * own timeout.
 707         */
 708}
 709
 710static int tcmu_attach_hba(struct se_hba *hba, u32 host_id)
 711{
 712        struct tcmu_hba *tcmu_hba;
 713
 714        tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL);
 715        if (!tcmu_hba)
 716                return -ENOMEM;
 717
 718        tcmu_hba->host_id = host_id;
 719        hba->hba_ptr = tcmu_hba;
 720
 721        return 0;
 722}
 723
 724static void tcmu_detach_hba(struct se_hba *hba)
 725{
 726        kfree(hba->hba_ptr);
 727        hba->hba_ptr = NULL;
 728}
 729
 730static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
 731{
 732        struct tcmu_dev *udev;
 733
 734        udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL);
 735        if (!udev)
 736                return NULL;
 737
 738        udev->name = kstrdup(name, GFP_KERNEL);
 739        if (!udev->name) {
 740                kfree(udev);
 741                return NULL;
 742        }
 743
 744        udev->hba = hba;
 745
 746        init_waitqueue_head(&udev->wait_cmdr);
 747        spin_lock_init(&udev->cmdr_lock);
 748
 749        idr_init(&udev->commands);
 750        spin_lock_init(&udev->commands_lock);
 751
 752        setup_timer(&udev->timeout, tcmu_device_timedout,
 753                (unsigned long)udev);
 754
 755        return &udev->se_dev;
 756}
 757
 758static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
 759{
 760        struct tcmu_dev *tcmu_dev = container_of(info, struct tcmu_dev, uio_info);
 761
 762        tcmu_handle_completions(tcmu_dev);
 763
 764        return 0;
 765}
 766
 767/*
 768 * mmap code from uio.c. Copied here because we want to hook mmap()
 769 * and this stuff must come along.
 770 */
 771static int tcmu_find_mem_index(struct vm_area_struct *vma)
 772{
 773        struct tcmu_dev *udev = vma->vm_private_data;
 774        struct uio_info *info = &udev->uio_info;
 775
 776        if (vma->vm_pgoff < MAX_UIO_MAPS) {
 777                if (info->mem[vma->vm_pgoff].size == 0)
 778                        return -1;
 779                return (int)vma->vm_pgoff;
 780        }
 781        return -1;
 782}
 783
 784static int tcmu_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 785{
 786        struct tcmu_dev *udev = vma->vm_private_data;
 787        struct uio_info *info = &udev->uio_info;
 788        struct page *page;
 789        unsigned long offset;
 790        void *addr;
 791
 792        int mi = tcmu_find_mem_index(vma);
 793        if (mi < 0)
 794                return VM_FAULT_SIGBUS;
 795
 796        /*
 797         * We need to subtract mi because userspace uses offset = N*PAGE_SIZE
 798         * to use mem[N].
 799         */
 800        offset = (vmf->pgoff - mi) << PAGE_SHIFT;
 801
 802        addr = (void *)(unsigned long)info->mem[mi].addr + offset;
 803        if (info->mem[mi].memtype == UIO_MEM_LOGICAL)
 804                page = virt_to_page(addr);
 805        else
 806                page = vmalloc_to_page(addr);
 807        get_page(page);
 808        vmf->page = page;
 809        return 0;
 810}
 811
 812static const struct vm_operations_struct tcmu_vm_ops = {
 813        .fault = tcmu_vma_fault,
 814};
 815
 816static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
 817{
 818        struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
 819
 820        vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 821        vma->vm_ops = &tcmu_vm_ops;
 822
 823        vma->vm_private_data = udev;
 824
 825        /* Ensure the mmap is exactly the right size */
 826        if (vma_pages(vma) != (TCMU_RING_SIZE >> PAGE_SHIFT))
 827                return -EINVAL;
 828
 829        return 0;
 830}
 831
 832static int tcmu_open(struct uio_info *info, struct inode *inode)
 833{
 834        struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
 835
 836        /* O_EXCL not supported for char devs, so fake it? */
 837        if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags))
 838                return -EBUSY;
 839
 840        pr_debug("open\n");
 841
 842        return 0;
 843}
 844
 845static int tcmu_release(struct uio_info *info, struct inode *inode)
 846{
 847        struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
 848
 849        clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
 850
 851        pr_debug("close\n");
 852
 853        return 0;
 854}
 855
 856static int tcmu_netlink_event(enum tcmu_genl_cmd cmd, const char *name, int minor)
 857{
 858        struct sk_buff *skb;
 859        void *msg_header;
 860        int ret = -ENOMEM;
 861
 862        skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
 863        if (!skb)
 864                return ret;
 865
 866        msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd);
 867        if (!msg_header)
 868                goto free_skb;
 869
 870        ret = nla_put_string(skb, TCMU_ATTR_DEVICE, name);
 871        if (ret < 0)
 872                goto free_skb;
 873
 874        ret = nla_put_u32(skb, TCMU_ATTR_MINOR, minor);
 875        if (ret < 0)
 876                goto free_skb;
 877
 878        genlmsg_end(skb, msg_header);
 879
 880        ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
 881                                TCMU_MCGRP_CONFIG, GFP_KERNEL);
 882
 883        /* We don't care if no one is listening */
 884        if (ret == -ESRCH)
 885                ret = 0;
 886
 887        return ret;
 888free_skb:
 889        nlmsg_free(skb);
 890        return ret;
 891}
 892
 893static int tcmu_configure_device(struct se_device *dev)
 894{
 895        struct tcmu_dev *udev = TCMU_DEV(dev);
 896        struct tcmu_hba *hba = udev->hba->hba_ptr;
 897        struct uio_info *info;
 898        struct tcmu_mailbox *mb;
 899        size_t size;
 900        size_t used;
 901        int ret = 0;
 902        char *str;
 903
 904        info = &udev->uio_info;
 905
 906        size = snprintf(NULL, 0, "tcm-user/%u/%s/%s", hba->host_id, udev->name,
 907                        udev->dev_config);
 908        size += 1; /* for \0 */
 909        str = kmalloc(size, GFP_KERNEL);
 910        if (!str)
 911                return -ENOMEM;
 912
 913        used = snprintf(str, size, "tcm-user/%u/%s", hba->host_id, udev->name);
 914
 915        if (udev->dev_config[0])
 916                snprintf(str + used, size - used, "/%s", udev->dev_config);
 917
 918        info->name = str;
 919
 920        udev->mb_addr = vzalloc(TCMU_RING_SIZE);
 921        if (!udev->mb_addr) {
 922                ret = -ENOMEM;
 923                goto err_vzalloc;
 924        }
 925
 926        /* mailbox fits in first part of CMDR space */
 927        udev->cmdr_size = CMDR_SIZE - CMDR_OFF;
 928        udev->data_off = CMDR_SIZE;
 929        udev->data_size = TCMU_RING_SIZE - CMDR_SIZE;
 930
 931        mb = udev->mb_addr;
 932        mb->version = TCMU_MAILBOX_VERSION;
 933        mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC;
 934        mb->cmdr_off = CMDR_OFF;
 935        mb->cmdr_size = udev->cmdr_size;
 936
 937        WARN_ON(!PAGE_ALIGNED(udev->data_off));
 938        WARN_ON(udev->data_size % PAGE_SIZE);
 939        WARN_ON(udev->data_size % DATA_BLOCK_SIZE);
 940
 941        info->version = __stringify(TCMU_MAILBOX_VERSION);
 942
 943        info->mem[0].name = "tcm-user command & data buffer";
 944        info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr;
 945        info->mem[0].size = TCMU_RING_SIZE;
 946        info->mem[0].memtype = UIO_MEM_VIRTUAL;
 947
 948        info->irqcontrol = tcmu_irqcontrol;
 949        info->irq = UIO_IRQ_CUSTOM;
 950
 951        info->mmap = tcmu_mmap;
 952        info->open = tcmu_open;
 953        info->release = tcmu_release;
 954
 955        ret = uio_register_device(tcmu_root_device, info);
 956        if (ret)
 957                goto err_register;
 958
 959        /* User can set hw_block_size before enable the device */
 960        if (dev->dev_attrib.hw_block_size == 0)
 961                dev->dev_attrib.hw_block_size = 512;
 962        /* Other attributes can be configured in userspace */
 963        dev->dev_attrib.hw_max_sectors = 128;
 964        dev->dev_attrib.hw_queue_depth = 128;
 965
 966        ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name,
 967                                 udev->uio_info.uio_dev->minor);
 968        if (ret)
 969                goto err_netlink;
 970
 971        return 0;
 972
 973err_netlink:
 974        uio_unregister_device(&udev->uio_info);
 975err_register:
 976        vfree(udev->mb_addr);
 977err_vzalloc:
 978        kfree(info->name);
 979
 980        return ret;
 981}
 982
 983static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
 984{
 985        if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
 986                kmem_cache_free(tcmu_cmd_cache, cmd);
 987                return 0;
 988        }
 989        return -EINVAL;
 990}
 991
 992static void tcmu_dev_call_rcu(struct rcu_head *p)
 993{
 994        struct se_device *dev = container_of(p, struct se_device, rcu_head);
 995        struct tcmu_dev *udev = TCMU_DEV(dev);
 996
 997        kfree(udev);
 998}
 999
1000static void tcmu_free_device(struct se_device *dev)
1001{
1002        struct tcmu_dev *udev = TCMU_DEV(dev);
1003        struct tcmu_cmd *cmd;
1004        bool all_expired = true;
1005        int i;
1006
1007        del_timer_sync(&udev->timeout);
1008
1009        vfree(udev->mb_addr);
1010
1011        /* Upper layer should drain all requests before calling this */
1012        spin_lock_irq(&udev->commands_lock);
1013        idr_for_each_entry(&udev->commands, cmd, i) {
1014                if (tcmu_check_and_free_pending_cmd(cmd) != 0)
1015                        all_expired = false;
1016        }
1017        idr_destroy(&udev->commands);
1018        spin_unlock_irq(&udev->commands_lock);
1019        WARN_ON(!all_expired);
1020
1021        /* Device was configured */
1022        if (udev->uio_info.uio_dev) {
1023                tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name,
1024                                   udev->uio_info.uio_dev->minor);
1025
1026                uio_unregister_device(&udev->uio_info);
1027                kfree(udev->uio_info.name);
1028                kfree(udev->name);
1029        }
1030        call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
1031}
1032
1033enum {
1034        Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_err,
1035};
1036
1037static match_table_t tokens = {
1038        {Opt_dev_config, "dev_config=%s"},
1039        {Opt_dev_size, "dev_size=%u"},
1040        {Opt_hw_block_size, "hw_block_size=%u"},
1041        {Opt_err, NULL}
1042};
1043
1044static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
1045                const char *page, ssize_t count)
1046{
1047        struct tcmu_dev *udev = TCMU_DEV(dev);
1048        char *orig, *ptr, *opts, *arg_p;
1049        substring_t args[MAX_OPT_ARGS];
1050        int ret = 0, token;
1051        unsigned long tmp_ul;
1052
1053        opts = kstrdup(page, GFP_KERNEL);
1054        if (!opts)
1055                return -ENOMEM;
1056
1057        orig = opts;
1058
1059        while ((ptr = strsep(&opts, ",\n")) != NULL) {
1060                if (!*ptr)
1061                        continue;
1062
1063                token = match_token(ptr, tokens, args);
1064                switch (token) {
1065                case Opt_dev_config:
1066                        if (match_strlcpy(udev->dev_config, &args[0],
1067                                          TCMU_CONFIG_LEN) == 0) {
1068                                ret = -EINVAL;
1069                                break;
1070                        }
1071                        pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config);
1072                        break;
1073                case Opt_dev_size:
1074                        arg_p = match_strdup(&args[0]);
1075                        if (!arg_p) {
1076                                ret = -ENOMEM;
1077                                break;
1078                        }
1079                        ret = kstrtoul(arg_p, 0, (unsigned long *) &udev->dev_size);
1080                        kfree(arg_p);
1081                        if (ret < 0)
1082                                pr_err("kstrtoul() failed for dev_size=\n");
1083                        break;
1084                case Opt_hw_block_size:
1085                        arg_p = match_strdup(&args[0]);
1086                        if (!arg_p) {
1087                                ret = -ENOMEM;
1088                                break;
1089                        }
1090                        ret = kstrtoul(arg_p, 0, &tmp_ul);
1091                        kfree(arg_p);
1092                        if (ret < 0) {
1093                                pr_err("kstrtoul() failed for hw_block_size=\n");
1094                                break;
1095                        }
1096                        if (!tmp_ul) {
1097                                pr_err("hw_block_size must be nonzero\n");
1098                                break;
1099                        }
1100                        dev->dev_attrib.hw_block_size = tmp_ul;
1101                        break;
1102                default:
1103                        break;
1104                }
1105        }
1106
1107        kfree(orig);
1108        return (!ret) ? count : ret;
1109}
1110
1111static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
1112{
1113        struct tcmu_dev *udev = TCMU_DEV(dev);
1114        ssize_t bl = 0;
1115
1116        bl = sprintf(b + bl, "Config: %s ",
1117                     udev->dev_config[0] ? udev->dev_config : "NULL");
1118        bl += sprintf(b + bl, "Size: %zu\n", udev->dev_size);
1119
1120        return bl;
1121}
1122
1123static sector_t tcmu_get_blocks(struct se_device *dev)
1124{
1125        struct tcmu_dev *udev = TCMU_DEV(dev);
1126
1127        return div_u64(udev->dev_size - dev->dev_attrib.block_size,
1128                       dev->dev_attrib.block_size);
1129}
1130
1131static sense_reason_t
1132tcmu_pass_op(struct se_cmd *se_cmd)
1133{
1134        int ret = tcmu_queue_cmd(se_cmd);
1135
1136        if (ret != 0)
1137                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1138        else
1139                return TCM_NO_SENSE;
1140}
1141
1142static sense_reason_t
1143tcmu_parse_cdb(struct se_cmd *cmd)
1144{
1145        return passthrough_parse_cdb(cmd, tcmu_pass_op);
1146}
1147
1148static const struct target_backend_ops tcmu_ops = {
1149        .name                   = "user",
1150        .owner                  = THIS_MODULE,
1151        .transport_flags        = TRANSPORT_FLAG_PASSTHROUGH,
1152        .attach_hba             = tcmu_attach_hba,
1153        .detach_hba             = tcmu_detach_hba,
1154        .alloc_device           = tcmu_alloc_device,
1155        .configure_device       = tcmu_configure_device,
1156        .free_device            = tcmu_free_device,
1157        .parse_cdb              = tcmu_parse_cdb,
1158        .set_configfs_dev_params = tcmu_set_configfs_dev_params,
1159        .show_configfs_dev_params = tcmu_show_configfs_dev_params,
1160        .get_device_type        = sbc_get_device_type,
1161        .get_blocks             = tcmu_get_blocks,
1162        .tb_dev_attrib_attrs    = passthrough_attrib_attrs,
1163};
1164
1165static int __init tcmu_module_init(void)
1166{
1167        int ret;
1168
1169        BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
1170
1171        tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache",
1172                                sizeof(struct tcmu_cmd),
1173                                __alignof__(struct tcmu_cmd),
1174                                0, NULL);
1175        if (!tcmu_cmd_cache)
1176                return -ENOMEM;
1177
1178        tcmu_root_device = root_device_register("tcm_user");
1179        if (IS_ERR(tcmu_root_device)) {
1180                ret = PTR_ERR(tcmu_root_device);
1181                goto out_free_cache;
1182        }
1183
1184        ret = genl_register_family(&tcmu_genl_family);
1185        if (ret < 0) {
1186                goto out_unreg_device;
1187        }
1188
1189        ret = transport_backend_register(&tcmu_ops);
1190        if (ret)
1191                goto out_unreg_genl;
1192
1193        return 0;
1194
1195out_unreg_genl:
1196        genl_unregister_family(&tcmu_genl_family);
1197out_unreg_device:
1198        root_device_unregister(tcmu_root_device);
1199out_free_cache:
1200        kmem_cache_destroy(tcmu_cmd_cache);
1201
1202        return ret;
1203}
1204
1205static void __exit tcmu_module_exit(void)
1206{
1207        target_backend_unregister(&tcmu_ops);
1208        genl_unregister_family(&tcmu_genl_family);
1209        root_device_unregister(tcmu_root_device);
1210        kmem_cache_destroy(tcmu_cmd_cache);
1211}
1212
1213MODULE_DESCRIPTION("TCM USER subsystem plugin");
1214MODULE_AUTHOR("Shaohua Li <shli@kernel.org>");
1215MODULE_AUTHOR("Andy Grover <agrover@redhat.com>");
1216MODULE_LICENSE("GPL");
1217
1218module_init(tcmu_module_init);
1219module_exit(tcmu_module_exit);
1220