linux/drivers/target/target_core_user.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2013 Shaohua Li <shli@kernel.org>
   4 * Copyright (C) 2014 Red Hat, Inc.
   5 * Copyright (C) 2015 Arrikto, Inc.
   6 * Copyright (C) 2017 Chinamobile, Inc.
   7 */
   8
   9#include <linux/spinlock.h>
  10#include <linux/module.h>
  11#include <linux/idr.h>
  12#include <linux/kernel.h>
  13#include <linux/timer.h>
  14#include <linux/parser.h>
  15#include <linux/vmalloc.h>
  16#include <linux/uio_driver.h>
  17#include <linux/radix-tree.h>
  18#include <linux/stringify.h>
  19#include <linux/bitops.h>
  20#include <linux/highmem.h>
  21#include <linux/configfs.h>
  22#include <linux/mutex.h>
  23#include <linux/workqueue.h>
  24#include <net/genetlink.h>
  25#include <scsi/scsi_common.h>
  26#include <scsi/scsi_proto.h>
  27#include <target/target_core_base.h>
  28#include <target/target_core_fabric.h>
  29#include <target/target_core_backend.h>
  30
  31#include <linux/target_core_user.h>
  32
  33/**
  34 * DOC: Userspace I/O
  35 * Userspace I/O
  36 * -------------
  37 *
  38 * Define a shared-memory interface for LIO to pass SCSI commands and
  39 * data to userspace for processing. This is to allow backends that
  40 * are too complex for in-kernel support to be possible.
  41 *
  42 * It uses the UIO framework to do a lot of the device-creation and
  43 * introspection work for us.
  44 *
  45 * See the .h file for how the ring is laid out. Note that while the
  46 * command ring is defined, the particulars of the data area are
  47 * not. Offset values in the command entry point to other locations
  48 * internal to the mmap-ed area. There is separate space outside the
  49 * command ring for data buffers. This leaves maximum flexibility for
  50 * moving buffer allocations, or even page flipping or other
  51 * allocation techniques, without altering the command ring layout.
  52 *
  53 * SECURITY:
  54 * The user process must be assumed to be malicious. There's no way to
  55 * prevent it breaking the command ring protocol if it wants, but in
  56 * order to prevent other issues we must only ever read *data* from
  57 * the shared memory area, not offsets or sizes. This applies to
  58 * command ring entries as well as the mailbox. Extra code needed for
  59 * this may have a 'UAM' comment.
  60 */
  61
  62#define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
  63
  64/* For cmd area, the size is fixed 8MB */
  65#define CMDR_SIZE (8 * 1024 * 1024)
  66
  67/*
  68 * For data area, the block size is PAGE_SIZE and
  69 * the total size is 256K * PAGE_SIZE.
  70 */
  71#define DATA_BLOCK_SIZE PAGE_SIZE
  72#define DATA_BLOCK_SHIFT PAGE_SHIFT
  73#define DATA_BLOCK_BITS_DEF (256 * 1024)
  74
  75#define TCMU_MBS_TO_BLOCKS(_mbs) (_mbs << (20 - DATA_BLOCK_SHIFT))
  76#define TCMU_BLOCKS_TO_MBS(_blocks) (_blocks >> (20 - DATA_BLOCK_SHIFT))
  77
  78/*
  79 * Default number of global data blocks(512K * PAGE_SIZE)
  80 * when the unmap thread will be started.
  81 */
  82#define TCMU_GLOBAL_MAX_BLOCKS_DEF (512 * 1024)
  83
  84static u8 tcmu_kern_cmd_reply_supported;
  85static u8 tcmu_netlink_blocked;
  86
  87static struct device *tcmu_root_device;
  88
  89struct tcmu_hba {
  90        u32 host_id;
  91};
  92
  93#define TCMU_CONFIG_LEN 256
  94
  95static DEFINE_MUTEX(tcmu_nl_cmd_mutex);
  96static LIST_HEAD(tcmu_nl_cmd_list);
  97
  98struct tcmu_dev;
  99
 100struct tcmu_nl_cmd {
 101        /* wake up thread waiting for reply */
 102        struct completion complete;
 103        struct list_head nl_list;
 104        struct tcmu_dev *udev;
 105        int cmd;
 106        int status;
 107};
 108
 109struct tcmu_dev {
 110        struct list_head node;
 111        struct kref kref;
 112
 113        struct se_device se_dev;
 114
 115        char *name;
 116        struct se_hba *hba;
 117
 118#define TCMU_DEV_BIT_OPEN 0
 119#define TCMU_DEV_BIT_BROKEN 1
 120#define TCMU_DEV_BIT_BLOCKED 2
 121        unsigned long flags;
 122
 123        struct uio_info uio_info;
 124
 125        struct inode *inode;
 126
 127        struct tcmu_mailbox *mb_addr;
 128        uint64_t dev_size;
 129        u32 cmdr_size;
 130        u32 cmdr_last_cleaned;
 131        /* Offset of data area from start of mb */
 132        /* Must add data_off and mb_addr to get the address */
 133        size_t data_off;
 134        size_t data_size;
 135        uint32_t max_blocks;
 136        size_t ring_size;
 137
 138        struct mutex cmdr_lock;
 139        struct list_head qfull_queue;
 140
 141        uint32_t dbi_max;
 142        uint32_t dbi_thresh;
 143        unsigned long *data_bitmap;
 144        struct radix_tree_root data_blocks;
 145
 146        struct idr commands;
 147
 148        struct timer_list cmd_timer;
 149        unsigned int cmd_time_out;
 150        struct list_head inflight_queue;
 151
 152        struct timer_list qfull_timer;
 153        int qfull_time_out;
 154
 155        struct list_head timedout_entry;
 156
 157        struct tcmu_nl_cmd curr_nl_cmd;
 158
 159        char dev_config[TCMU_CONFIG_LEN];
 160
 161        int nl_reply_supported;
 162};
 163
 164#define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
 165
 166#define CMDR_OFF sizeof(struct tcmu_mailbox)
 167
 168struct tcmu_cmd {
 169        struct se_cmd *se_cmd;
 170        struct tcmu_dev *tcmu_dev;
 171        struct list_head queue_entry;
 172
 173        uint16_t cmd_id;
 174
 175        /* Can't use se_cmd when cleaning up expired cmds, because if
 176           cmd has been completed then accessing se_cmd is off limits */
 177        uint32_t dbi_cnt;
 178        uint32_t dbi_cur;
 179        uint32_t *dbi;
 180
 181        unsigned long deadline;
 182
 183#define TCMU_CMD_BIT_EXPIRED 0
 184#define TCMU_CMD_BIT_INFLIGHT 1
 185        unsigned long flags;
 186};
 187/*
 188 * To avoid dead lock the mutex lock order should always be:
 189 *
 190 * mutex_lock(&root_udev_mutex);
 191 * ...
 192 * mutex_lock(&tcmu_dev->cmdr_lock);
 193 * mutex_unlock(&tcmu_dev->cmdr_lock);
 194 * ...
 195 * mutex_unlock(&root_udev_mutex);
 196 */
 197static DEFINE_MUTEX(root_udev_mutex);
 198static LIST_HEAD(root_udev);
 199
 200static DEFINE_SPINLOCK(timed_out_udevs_lock);
 201static LIST_HEAD(timed_out_udevs);
 202
 203static struct kmem_cache *tcmu_cmd_cache;
 204
 205static atomic_t global_db_count = ATOMIC_INIT(0);
 206static struct delayed_work tcmu_unmap_work;
 207static int tcmu_global_max_blocks = TCMU_GLOBAL_MAX_BLOCKS_DEF;
 208
 209static int tcmu_set_global_max_data_area(const char *str,
 210                                         const struct kernel_param *kp)
 211{
 212        int ret, max_area_mb;
 213
 214        ret = kstrtoint(str, 10, &max_area_mb);
 215        if (ret)
 216                return -EINVAL;
 217
 218        if (max_area_mb <= 0) {
 219                pr_err("global_max_data_area must be larger than 0.\n");
 220                return -EINVAL;
 221        }
 222
 223        tcmu_global_max_blocks = TCMU_MBS_TO_BLOCKS(max_area_mb);
 224        if (atomic_read(&global_db_count) > tcmu_global_max_blocks)
 225                schedule_delayed_work(&tcmu_unmap_work, 0);
 226        else
 227                cancel_delayed_work_sync(&tcmu_unmap_work);
 228
 229        return 0;
 230}
 231
 232static int tcmu_get_global_max_data_area(char *buffer,
 233                                         const struct kernel_param *kp)
 234{
 235        return sprintf(buffer, "%d", TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
 236}
 237
 238static const struct kernel_param_ops tcmu_global_max_data_area_op = {
 239        .set = tcmu_set_global_max_data_area,
 240        .get = tcmu_get_global_max_data_area,
 241};
 242
 243module_param_cb(global_max_data_area_mb, &tcmu_global_max_data_area_op, NULL,
 244                S_IWUSR | S_IRUGO);
 245MODULE_PARM_DESC(global_max_data_area_mb,
 246                 "Max MBs allowed to be allocated to all the tcmu device's "
 247                 "data areas.");
 248
 249static int tcmu_get_block_netlink(char *buffer,
 250                                  const struct kernel_param *kp)
 251{
 252        return sprintf(buffer, "%s\n", tcmu_netlink_blocked ?
 253                       "blocked" : "unblocked");
 254}
 255
 256static int tcmu_set_block_netlink(const char *str,
 257                                  const struct kernel_param *kp)
 258{
 259        int ret;
 260        u8 val;
 261
 262        ret = kstrtou8(str, 0, &val);
 263        if (ret < 0)
 264                return ret;
 265
 266        if (val > 1) {
 267                pr_err("Invalid block netlink value %u\n", val);
 268                return -EINVAL;
 269        }
 270
 271        tcmu_netlink_blocked = val;
 272        return 0;
 273}
 274
 275static const struct kernel_param_ops tcmu_block_netlink_op = {
 276        .set = tcmu_set_block_netlink,
 277        .get = tcmu_get_block_netlink,
 278};
 279
 280module_param_cb(block_netlink, &tcmu_block_netlink_op, NULL, S_IWUSR | S_IRUGO);
 281MODULE_PARM_DESC(block_netlink, "Block new netlink commands.");
 282
 283static int tcmu_fail_netlink_cmd(struct tcmu_nl_cmd *nl_cmd)
 284{
 285        struct tcmu_dev *udev = nl_cmd->udev;
 286
 287        if (!tcmu_netlink_blocked) {
 288                pr_err("Could not reset device's netlink interface. Netlink is not blocked.\n");
 289                return -EBUSY;
 290        }
 291
 292        if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
 293                pr_debug("Aborting nl cmd %d on %s\n", nl_cmd->cmd, udev->name);
 294                nl_cmd->status = -EINTR;
 295                list_del(&nl_cmd->nl_list);
 296                complete(&nl_cmd->complete);
 297        }
 298        return 0;
 299}
 300
 301static int tcmu_set_reset_netlink(const char *str,
 302                                  const struct kernel_param *kp)
 303{
 304        struct tcmu_nl_cmd *nl_cmd, *tmp_cmd;
 305        int ret;
 306        u8 val;
 307
 308        ret = kstrtou8(str, 0, &val);
 309        if (ret < 0)
 310                return ret;
 311
 312        if (val != 1) {
 313                pr_err("Invalid reset netlink value %u\n", val);
 314                return -EINVAL;
 315        }
 316
 317        mutex_lock(&tcmu_nl_cmd_mutex);
 318        list_for_each_entry_safe(nl_cmd, tmp_cmd, &tcmu_nl_cmd_list, nl_list) {
 319                ret = tcmu_fail_netlink_cmd(nl_cmd);
 320                if (ret)
 321                        break;
 322        }
 323        mutex_unlock(&tcmu_nl_cmd_mutex);
 324
 325        return ret;
 326}
 327
 328static const struct kernel_param_ops tcmu_reset_netlink_op = {
 329        .set = tcmu_set_reset_netlink,
 330};
 331
 332module_param_cb(reset_netlink, &tcmu_reset_netlink_op, NULL, S_IWUSR);
 333MODULE_PARM_DESC(reset_netlink, "Reset netlink commands.");
 334
 335/* multicast group */
 336enum tcmu_multicast_groups {
 337        TCMU_MCGRP_CONFIG,
 338};
 339
 340static const struct genl_multicast_group tcmu_mcgrps[] = {
 341        [TCMU_MCGRP_CONFIG] = { .name = "config", },
 342};
 343
 344static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = {
 345        [TCMU_ATTR_DEVICE]      = { .type = NLA_STRING },
 346        [TCMU_ATTR_MINOR]       = { .type = NLA_U32 },
 347        [TCMU_ATTR_CMD_STATUS]  = { .type = NLA_S32 },
 348        [TCMU_ATTR_DEVICE_ID]   = { .type = NLA_U32 },
 349        [TCMU_ATTR_SUPP_KERN_CMD_REPLY] = { .type = NLA_U8 },
 350};
 351
 352static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd)
 353{
 354        struct tcmu_dev *udev = NULL;
 355        struct tcmu_nl_cmd *nl_cmd;
 356        int dev_id, rc, ret = 0;
 357
 358        if (!info->attrs[TCMU_ATTR_CMD_STATUS] ||
 359            !info->attrs[TCMU_ATTR_DEVICE_ID]) {
 360                printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n");
 361                return -EINVAL;
 362        }
 363
 364        dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]);
 365        rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]);
 366
 367        mutex_lock(&tcmu_nl_cmd_mutex);
 368        list_for_each_entry(nl_cmd, &tcmu_nl_cmd_list, nl_list) {
 369                if (nl_cmd->udev->se_dev.dev_index == dev_id) {
 370                        udev = nl_cmd->udev;
 371                        break;
 372                }
 373        }
 374
 375        if (!udev) {
 376                pr_err("tcmu nl cmd %u/%d completion could not find device with dev id %u.\n",
 377                       completed_cmd, rc, dev_id);
 378                ret = -ENODEV;
 379                goto unlock;
 380        }
 381        list_del(&nl_cmd->nl_list);
 382
 383        pr_debug("%s genl cmd done got id %d curr %d done %d rc %d stat %d\n",
 384                 udev->name, dev_id, nl_cmd->cmd, completed_cmd, rc,
 385                 nl_cmd->status);
 386
 387        if (nl_cmd->cmd != completed_cmd) {
 388                pr_err("Mismatched commands on %s (Expecting reply for %d. Current %d).\n",
 389                       udev->name, completed_cmd, nl_cmd->cmd);
 390                ret = -EINVAL;
 391                goto unlock;
 392        }
 393
 394        nl_cmd->status = rc;
 395        complete(&nl_cmd->complete);
 396unlock:
 397        mutex_unlock(&tcmu_nl_cmd_mutex);
 398        return ret;
 399}
 400
 401static int tcmu_genl_rm_dev_done(struct sk_buff *skb, struct genl_info *info)
 402{
 403        return tcmu_genl_cmd_done(info, TCMU_CMD_REMOVED_DEVICE);
 404}
 405
 406static int tcmu_genl_add_dev_done(struct sk_buff *skb, struct genl_info *info)
 407{
 408        return tcmu_genl_cmd_done(info, TCMU_CMD_ADDED_DEVICE);
 409}
 410
 411static int tcmu_genl_reconfig_dev_done(struct sk_buff *skb,
 412                                       struct genl_info *info)
 413{
 414        return tcmu_genl_cmd_done(info, TCMU_CMD_RECONFIG_DEVICE);
 415}
 416
 417static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info)
 418{
 419        if (info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]) {
 420                tcmu_kern_cmd_reply_supported  =
 421                        nla_get_u8(info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]);
 422                printk(KERN_INFO "tcmu daemon: command reply support %u.\n",
 423                       tcmu_kern_cmd_reply_supported);
 424        }
 425
 426        return 0;
 427}
 428
 429static const struct genl_ops tcmu_genl_ops[] = {
 430        {
 431                .cmd    = TCMU_CMD_SET_FEATURES,
 432                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
 433                .flags  = GENL_ADMIN_PERM,
 434                .doit   = tcmu_genl_set_features,
 435        },
 436        {
 437                .cmd    = TCMU_CMD_ADDED_DEVICE_DONE,
 438                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
 439                .flags  = GENL_ADMIN_PERM,
 440                .doit   = tcmu_genl_add_dev_done,
 441        },
 442        {
 443                .cmd    = TCMU_CMD_REMOVED_DEVICE_DONE,
 444                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
 445                .flags  = GENL_ADMIN_PERM,
 446                .doit   = tcmu_genl_rm_dev_done,
 447        },
 448        {
 449                .cmd    = TCMU_CMD_RECONFIG_DEVICE_DONE,
 450                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
 451                .flags  = GENL_ADMIN_PERM,
 452                .doit   = tcmu_genl_reconfig_dev_done,
 453        },
 454};
 455
 456/* Our generic netlink family */
 457static struct genl_family tcmu_genl_family __ro_after_init = {
 458        .module = THIS_MODULE,
 459        .hdrsize = 0,
 460        .name = "TCM-USER",
 461        .version = 2,
 462        .maxattr = TCMU_ATTR_MAX,
 463        .policy = tcmu_attr_policy,
 464        .mcgrps = tcmu_mcgrps,
 465        .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
 466        .netnsok = true,
 467        .ops = tcmu_genl_ops,
 468        .n_ops = ARRAY_SIZE(tcmu_genl_ops),
 469};
 470
 471#define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index))
 472#define tcmu_cmd_reset_dbi_cur(cmd) tcmu_cmd_set_dbi_cur(cmd, 0)
 473#define tcmu_cmd_set_dbi(cmd, index) ((cmd)->dbi[(cmd)->dbi_cur++] = (index))
 474#define tcmu_cmd_get_dbi(cmd) ((cmd)->dbi[(cmd)->dbi_cur++])
 475
 476static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len)
 477{
 478        struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
 479        uint32_t i;
 480
 481        for (i = 0; i < len; i++)
 482                clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap);
 483}
 484
 485static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
 486                                        struct tcmu_cmd *tcmu_cmd)
 487{
 488        struct page *page;
 489        int ret, dbi;
 490
 491        dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh);
 492        if (dbi == udev->dbi_thresh)
 493                return false;
 494
 495        page = radix_tree_lookup(&udev->data_blocks, dbi);
 496        if (!page) {
 497                if (atomic_add_return(1, &global_db_count) >
 498                                      tcmu_global_max_blocks)
 499                        schedule_delayed_work(&tcmu_unmap_work, 0);
 500
 501                /* try to get new page from the mm */
 502                page = alloc_page(GFP_KERNEL);
 503                if (!page)
 504                        goto err_alloc;
 505
 506                ret = radix_tree_insert(&udev->data_blocks, dbi, page);
 507                if (ret)
 508                        goto err_insert;
 509        }
 510
 511        if (dbi > udev->dbi_max)
 512                udev->dbi_max = dbi;
 513
 514        set_bit(dbi, udev->data_bitmap);
 515        tcmu_cmd_set_dbi(tcmu_cmd, dbi);
 516
 517        return true;
 518err_insert:
 519        __free_page(page);
 520err_alloc:
 521        atomic_dec(&global_db_count);
 522        return false;
 523}
 524
 525static bool tcmu_get_empty_blocks(struct tcmu_dev *udev,
 526                                  struct tcmu_cmd *tcmu_cmd)
 527{
 528        int i;
 529
 530        for (i = tcmu_cmd->dbi_cur; i < tcmu_cmd->dbi_cnt; i++) {
 531                if (!tcmu_get_empty_block(udev, tcmu_cmd))
 532                        return false;
 533        }
 534        return true;
 535}
 536
 537static inline struct page *
 538tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
 539{
 540        return radix_tree_lookup(&udev->data_blocks, dbi);
 541}
 542
 543static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
 544{
 545        kfree(tcmu_cmd->dbi);
 546        kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
 547}
 548
 549static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd)
 550{
 551        struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
 552        size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE);
 553
 554        if (se_cmd->se_cmd_flags & SCF_BIDI) {
 555                BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
 556                data_length += round_up(se_cmd->t_bidi_data_sg->length,
 557                                DATA_BLOCK_SIZE);
 558        }
 559
 560        return data_length;
 561}
 562
 563static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd)
 564{
 565        size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
 566
 567        return data_length / DATA_BLOCK_SIZE;
 568}
 569
 570static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
 571{
 572        struct se_device *se_dev = se_cmd->se_dev;
 573        struct tcmu_dev *udev = TCMU_DEV(se_dev);
 574        struct tcmu_cmd *tcmu_cmd;
 575
 576        tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL);
 577        if (!tcmu_cmd)
 578                return NULL;
 579
 580        INIT_LIST_HEAD(&tcmu_cmd->queue_entry);
 581        tcmu_cmd->se_cmd = se_cmd;
 582        tcmu_cmd->tcmu_dev = udev;
 583
 584        tcmu_cmd_reset_dbi_cur(tcmu_cmd);
 585        tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd);
 586        tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t),
 587                                GFP_KERNEL);
 588        if (!tcmu_cmd->dbi) {
 589                kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
 590                return NULL;
 591        }
 592
 593        return tcmu_cmd;
 594}
 595
 596static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
 597{
 598        unsigned long offset = offset_in_page(vaddr);
 599        void *start = vaddr - offset;
 600
 601        size = round_up(size+offset, PAGE_SIZE);
 602
 603        while (size) {
 604                flush_dcache_page(virt_to_page(start));
 605                start += PAGE_SIZE;
 606                size -= PAGE_SIZE;
 607        }
 608}
 609
 610/*
 611 * Some ring helper functions. We don't assume size is a power of 2 so
 612 * we can't use circ_buf.h.
 613 */
 614static inline size_t spc_used(size_t head, size_t tail, size_t size)
 615{
 616        int diff = head - tail;
 617
 618        if (diff >= 0)
 619                return diff;
 620        else
 621                return size + diff;
 622}
 623
 624static inline size_t spc_free(size_t head, size_t tail, size_t size)
 625{
 626        /* Keep 1 byte unused or we can't tell full from empty */
 627        return (size - spc_used(head, tail, size) - 1);
 628}
 629
 630static inline size_t head_to_end(size_t head, size_t size)
 631{
 632        return size - head;
 633}
 634
 635static inline void new_iov(struct iovec **iov, int *iov_cnt)
 636{
 637        struct iovec *iovec;
 638
 639        if (*iov_cnt != 0)
 640                (*iov)++;
 641        (*iov_cnt)++;
 642
 643        iovec = *iov;
 644        memset(iovec, 0, sizeof(struct iovec));
 645}
 646
 647#define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
 648
 649/* offset is relative to mb_addr */
 650static inline size_t get_block_offset_user(struct tcmu_dev *dev,
 651                int dbi, int remaining)
 652{
 653        return dev->data_off + dbi * DATA_BLOCK_SIZE +
 654                DATA_BLOCK_SIZE - remaining;
 655}
 656
 657static inline size_t iov_tail(struct iovec *iov)
 658{
 659        return (size_t)iov->iov_base + iov->iov_len;
 660}
 661
 662static void scatter_data_area(struct tcmu_dev *udev,
 663        struct tcmu_cmd *tcmu_cmd, struct scatterlist *data_sg,
 664        unsigned int data_nents, struct iovec **iov,
 665        int *iov_cnt, bool copy_data)
 666{
 667        int i, dbi;
 668        int block_remaining = 0;
 669        void *from, *to = NULL;
 670        size_t copy_bytes, to_offset, offset;
 671        struct scatterlist *sg;
 672        struct page *page;
 673
 674        for_each_sg(data_sg, sg, data_nents, i) {
 675                int sg_remaining = sg->length;
 676                from = kmap_atomic(sg_page(sg)) + sg->offset;
 677                while (sg_remaining > 0) {
 678                        if (block_remaining == 0) {
 679                                if (to)
 680                                        kunmap_atomic(to);
 681
 682                                block_remaining = DATA_BLOCK_SIZE;
 683                                dbi = tcmu_cmd_get_dbi(tcmu_cmd);
 684                                page = tcmu_get_block_page(udev, dbi);
 685                                to = kmap_atomic(page);
 686                        }
 687
 688                        /*
 689                         * Covert to virtual offset of the ring data area.
 690                         */
 691                        to_offset = get_block_offset_user(udev, dbi,
 692                                        block_remaining);
 693
 694                        /*
 695                         * The following code will gather and map the blocks
 696                         * to the same iovec when the blocks are all next to
 697                         * each other.
 698                         */
 699                        copy_bytes = min_t(size_t, sg_remaining,
 700                                        block_remaining);
 701                        if (*iov_cnt != 0 &&
 702                            to_offset == iov_tail(*iov)) {
 703                                /*
 704                                 * Will append to the current iovec, because
 705                                 * the current block page is next to the
 706                                 * previous one.
 707                                 */
 708                                (*iov)->iov_len += copy_bytes;
 709                        } else {
 710                                /*
 711                                 * Will allocate a new iovec because we are
 712                                 * first time here or the current block page
 713                                 * is not next to the previous one.
 714                                 */
 715                                new_iov(iov, iov_cnt);
 716                                (*iov)->iov_base = (void __user *)to_offset;
 717                                (*iov)->iov_len = copy_bytes;
 718                        }
 719
 720                        if (copy_data) {
 721                                offset = DATA_BLOCK_SIZE - block_remaining;
 722                                memcpy(to + offset,
 723                                       from + sg->length - sg_remaining,
 724                                       copy_bytes);
 725                                tcmu_flush_dcache_range(to, copy_bytes);
 726                        }
 727
 728                        sg_remaining -= copy_bytes;
 729                        block_remaining -= copy_bytes;
 730                }
 731                kunmap_atomic(from - sg->offset);
 732        }
 733
 734        if (to)
 735                kunmap_atomic(to);
 736}
 737
 738static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
 739                             bool bidi, uint32_t read_len)
 740{
 741        struct se_cmd *se_cmd = cmd->se_cmd;
 742        int i, dbi;
 743        int block_remaining = 0;
 744        void *from = NULL, *to;
 745        size_t copy_bytes, offset;
 746        struct scatterlist *sg, *data_sg;
 747        struct page *page;
 748        unsigned int data_nents;
 749        uint32_t count = 0;
 750
 751        if (!bidi) {
 752                data_sg = se_cmd->t_data_sg;
 753                data_nents = se_cmd->t_data_nents;
 754        } else {
 755
 756                /*
 757                 * For bidi case, the first count blocks are for Data-Out
 758                 * buffer blocks, and before gathering the Data-In buffer
 759                 * the Data-Out buffer blocks should be discarded.
 760                 */
 761                count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
 762
 763                data_sg = se_cmd->t_bidi_data_sg;
 764                data_nents = se_cmd->t_bidi_data_nents;
 765        }
 766
 767        tcmu_cmd_set_dbi_cur(cmd, count);
 768
 769        for_each_sg(data_sg, sg, data_nents, i) {
 770                int sg_remaining = sg->length;
 771                to = kmap_atomic(sg_page(sg)) + sg->offset;
 772                while (sg_remaining > 0 && read_len > 0) {
 773                        if (block_remaining == 0) {
 774                                if (from)
 775                                        kunmap_atomic(from);
 776
 777                                block_remaining = DATA_BLOCK_SIZE;
 778                                dbi = tcmu_cmd_get_dbi(cmd);
 779                                page = tcmu_get_block_page(udev, dbi);
 780                                from = kmap_atomic(page);
 781                        }
 782                        copy_bytes = min_t(size_t, sg_remaining,
 783                                        block_remaining);
 784                        if (read_len < copy_bytes)
 785                                copy_bytes = read_len;
 786                        offset = DATA_BLOCK_SIZE - block_remaining;
 787                        tcmu_flush_dcache_range(from, copy_bytes);
 788                        memcpy(to + sg->length - sg_remaining, from + offset,
 789                                        copy_bytes);
 790
 791                        sg_remaining -= copy_bytes;
 792                        block_remaining -= copy_bytes;
 793                        read_len -= copy_bytes;
 794                }
 795                kunmap_atomic(to - sg->offset);
 796                if (read_len == 0)
 797                        break;
 798        }
 799        if (from)
 800                kunmap_atomic(from);
 801}
 802
 803static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh)
 804{
 805        return thresh - bitmap_weight(bitmap, thresh);
 806}
 807
 808/*
 809 * We can't queue a command until we have space available on the cmd ring *and*
 810 * space available on the data area.
 811 *
 812 * Called with ring lock held.
 813 */
 814static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
 815                size_t cmd_size, size_t data_needed)
 816{
 817        struct tcmu_mailbox *mb = udev->mb_addr;
 818        uint32_t blocks_needed = (data_needed + DATA_BLOCK_SIZE - 1)
 819                                / DATA_BLOCK_SIZE;
 820        size_t space, cmd_needed;
 821        u32 cmd_head;
 822
 823        tcmu_flush_dcache_range(mb, sizeof(*mb));
 824
 825        cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
 826
 827        /*
 828         * If cmd end-of-ring space is too small then we need space for a NOP plus
 829         * original cmd - cmds are internally contiguous.
 830         */
 831        if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size)
 832                cmd_needed = cmd_size;
 833        else
 834                cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size);
 835
 836        space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size);
 837        if (space < cmd_needed) {
 838                pr_debug("no cmd space: %u %u %u\n", cmd_head,
 839                       udev->cmdr_last_cleaned, udev->cmdr_size);
 840                return false;
 841        }
 842
 843        /* try to check and get the data blocks as needed */
 844        space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh);
 845        if ((space * DATA_BLOCK_SIZE) < data_needed) {
 846                unsigned long blocks_left =
 847                                (udev->max_blocks - udev->dbi_thresh) + space;
 848
 849                if (blocks_left < blocks_needed) {
 850                        pr_debug("no data space: only %lu available, but ask for %zu\n",
 851                                        blocks_left * DATA_BLOCK_SIZE,
 852                                        data_needed);
 853                        return false;
 854                }
 855
 856                udev->dbi_thresh += blocks_needed;
 857                if (udev->dbi_thresh > udev->max_blocks)
 858                        udev->dbi_thresh = udev->max_blocks;
 859        }
 860
 861        return tcmu_get_empty_blocks(udev, cmd);
 862}
 863
 864static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt)
 865{
 866        return max(offsetof(struct tcmu_cmd_entry, req.iov[iov_cnt]),
 867                        sizeof(struct tcmu_cmd_entry));
 868}
 869
 870static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
 871                                           size_t base_command_size)
 872{
 873        struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
 874        size_t command_size;
 875
 876        command_size = base_command_size +
 877                round_up(scsi_command_size(se_cmd->t_task_cdb),
 878                                TCMU_OP_ALIGN_SIZE);
 879
 880        WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1));
 881
 882        return command_size;
 883}
 884
 885static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo,
 886                                struct timer_list *timer)
 887{
 888        struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
 889        int cmd_id;
 890
 891        if (tcmu_cmd->cmd_id)
 892                goto setup_timer;
 893
 894        cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
 895        if (cmd_id < 0) {
 896                pr_err("tcmu: Could not allocate cmd id.\n");
 897                return cmd_id;
 898        }
 899        tcmu_cmd->cmd_id = cmd_id;
 900
 901        pr_debug("allocated cmd %u for dev %s tmo %lu\n", tcmu_cmd->cmd_id,
 902                 udev->name, tmo / MSEC_PER_SEC);
 903
 904setup_timer:
 905        if (!tmo)
 906                return 0;
 907
 908        tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
 909        if (!timer_pending(timer))
 910                mod_timer(timer, tcmu_cmd->deadline);
 911
 912        return 0;
 913}
 914
 915static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
 916{
 917        struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
 918        unsigned int tmo;
 919        int ret;
 920
 921        /*
 922         * For backwards compat if qfull_time_out is not set use
 923         * cmd_time_out and if that's not set use the default time out.
 924         */
 925        if (!udev->qfull_time_out)
 926                return -ETIMEDOUT;
 927        else if (udev->qfull_time_out > 0)
 928                tmo = udev->qfull_time_out;
 929        else if (udev->cmd_time_out)
 930                tmo = udev->cmd_time_out;
 931        else
 932                tmo = TCMU_TIME_OUT;
 933
 934        ret = tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer);
 935        if (ret)
 936                return ret;
 937
 938        list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue);
 939        pr_debug("adding cmd %u on dev %s to ring space wait queue\n",
 940                 tcmu_cmd->cmd_id, udev->name);
 941        return 0;
 942}
 943
 944/**
 945 * queue_cmd_ring - queue cmd to ring or internally
 946 * @tcmu_cmd: cmd to queue
 947 * @scsi_err: TCM error code if failure (-1) returned.
 948 *
 949 * Returns:
 950 * -1 we cannot queue internally or to the ring.
 951 *  0 success
 952 *  1 internally queued to wait for ring memory to free.
 953 */
 954static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
 955{
 956        struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
 957        struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
 958        size_t base_command_size, command_size;
 959        struct tcmu_mailbox *mb;
 960        struct tcmu_cmd_entry *entry;
 961        struct iovec *iov;
 962        int iov_cnt, ret;
 963        uint32_t cmd_head;
 964        uint64_t cdb_off;
 965        bool copy_to_data_area;
 966        size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
 967
 968        *scsi_err = TCM_NO_SENSE;
 969
 970        if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) {
 971                *scsi_err = TCM_LUN_BUSY;
 972                return -1;
 973        }
 974
 975        if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
 976                *scsi_err = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 977                return -1;
 978        }
 979
 980        /*
 981         * Must be a certain minimum size for response sense info, but
 982         * also may be larger if the iov array is large.
 983         *
 984         * We prepare as many iovs as possbile for potential uses here,
 985         * because it's expensive to tell how many regions are freed in
 986         * the bitmap & global data pool, as the size calculated here
 987         * will only be used to do the checks.
 988         *
 989         * The size will be recalculated later as actually needed to save
 990         * cmd area memories.
 991         */
 992        base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt);
 993        command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
 994
 995        if (!list_empty(&udev->qfull_queue))
 996                goto queue;
 997
 998        mb = udev->mb_addr;
 999        cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
1000        if ((command_size > (udev->cmdr_size / 2)) ||
1001            data_length > udev->data_size) {
1002                pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
1003                        "cmd ring/data area\n", command_size, data_length,
1004                        udev->cmdr_size, udev->data_size);
1005                *scsi_err = TCM_INVALID_CDB_FIELD;
1006                return -1;
1007        }
1008
1009        if (!is_ring_space_avail(udev, tcmu_cmd, command_size, data_length)) {
1010                /*
1011                 * Don't leave commands partially setup because the unmap
1012                 * thread might need the blocks to make forward progress.
1013                 */
1014                tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
1015                tcmu_cmd_reset_dbi_cur(tcmu_cmd);
1016                goto queue;
1017        }
1018
1019        /* Insert a PAD if end-of-ring space is too small */
1020        if (head_to_end(cmd_head, udev->cmdr_size) < command_size) {
1021                size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
1022
1023                entry = (void *) mb + CMDR_OFF + cmd_head;
1024                tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD);
1025                tcmu_hdr_set_len(&entry->hdr.len_op, pad_size);
1026                entry->hdr.cmd_id = 0; /* not used for PAD */
1027                entry->hdr.kflags = 0;
1028                entry->hdr.uflags = 0;
1029                tcmu_flush_dcache_range(entry, sizeof(*entry));
1030
1031                UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
1032                tcmu_flush_dcache_range(mb, sizeof(*mb));
1033
1034                cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
1035                WARN_ON(cmd_head != 0);
1036        }
1037
1038        entry = (void *) mb + CMDR_OFF + cmd_head;
1039        memset(entry, 0, command_size);
1040        tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
1041
1042        /* Handle allocating space from the data area */
1043        tcmu_cmd_reset_dbi_cur(tcmu_cmd);
1044        iov = &entry->req.iov[0];
1045        iov_cnt = 0;
1046        copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE
1047                || se_cmd->se_cmd_flags & SCF_BIDI);
1048        scatter_data_area(udev, tcmu_cmd, se_cmd->t_data_sg,
1049                          se_cmd->t_data_nents, &iov, &iov_cnt,
1050                          copy_to_data_area);
1051        entry->req.iov_cnt = iov_cnt;
1052
1053        /* Handle BIDI commands */
1054        iov_cnt = 0;
1055        if (se_cmd->se_cmd_flags & SCF_BIDI) {
1056                iov++;
1057                scatter_data_area(udev, tcmu_cmd, se_cmd->t_bidi_data_sg,
1058                                  se_cmd->t_bidi_data_nents, &iov, &iov_cnt,
1059                                  false);
1060        }
1061        entry->req.iov_bidi_cnt = iov_cnt;
1062
1063        ret = tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out,
1064                                   &udev->cmd_timer);
1065        if (ret) {
1066                tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
1067
1068                *scsi_err = TCM_OUT_OF_RESOURCES;
1069                return -1;
1070        }
1071        entry->hdr.cmd_id = tcmu_cmd->cmd_id;
1072
1073        /*
1074         * Recalaulate the command's base size and size according
1075         * to the actual needs
1076         */
1077        base_command_size = tcmu_cmd_get_base_cmd_size(entry->req.iov_cnt +
1078                                                       entry->req.iov_bidi_cnt);
1079        command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
1080
1081        tcmu_hdr_set_len(&entry->hdr.len_op, command_size);
1082
1083        /* All offsets relative to mb_addr, not start of entry! */
1084        cdb_off = CMDR_OFF + cmd_head + base_command_size;
1085        memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
1086        entry->req.cdb_off = cdb_off;
1087        tcmu_flush_dcache_range(entry, sizeof(*entry));
1088
1089        UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
1090        tcmu_flush_dcache_range(mb, sizeof(*mb));
1091
1092        list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue);
1093        set_bit(TCMU_CMD_BIT_INFLIGHT, &tcmu_cmd->flags);
1094
1095        /* TODO: only if FLUSH and FUA? */
1096        uio_event_notify(&udev->uio_info);
1097
1098        return 0;
1099
1100queue:
1101        if (add_to_qfull_queue(tcmu_cmd)) {
1102                *scsi_err = TCM_OUT_OF_RESOURCES;
1103                return -1;
1104        }
1105
1106        return 1;
1107}
1108
1109static sense_reason_t
1110tcmu_queue_cmd(struct se_cmd *se_cmd)
1111{
1112        struct se_device *se_dev = se_cmd->se_dev;
1113        struct tcmu_dev *udev = TCMU_DEV(se_dev);
1114        struct tcmu_cmd *tcmu_cmd;
1115        sense_reason_t scsi_ret;
1116        int ret;
1117
1118        tcmu_cmd = tcmu_alloc_cmd(se_cmd);
1119        if (!tcmu_cmd)
1120                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1121
1122        mutex_lock(&udev->cmdr_lock);
1123        ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
1124        mutex_unlock(&udev->cmdr_lock);
1125        if (ret < 0)
1126                tcmu_free_cmd(tcmu_cmd);
1127        return scsi_ret;
1128}
1129
1130static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry)
1131{
1132        struct se_cmd *se_cmd = cmd->se_cmd;
1133        struct tcmu_dev *udev = cmd->tcmu_dev;
1134        bool read_len_valid = false;
1135        uint32_t read_len;
1136
1137        /*
1138         * cmd has been completed already from timeout, just reclaim
1139         * data area space and free cmd
1140         */
1141        if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
1142                WARN_ON_ONCE(se_cmd);
1143                goto out;
1144        }
1145
1146        list_del_init(&cmd->queue_entry);
1147
1148        tcmu_cmd_reset_dbi_cur(cmd);
1149
1150        if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
1151                pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
1152                        cmd->se_cmd);
1153                entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
1154                goto done;
1155        }
1156
1157        read_len = se_cmd->data_length;
1158        if (se_cmd->data_direction == DMA_FROM_DEVICE &&
1159            (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) {
1160                read_len_valid = true;
1161                if (entry->rsp.read_len < read_len)
1162                        read_len = entry->rsp.read_len;
1163        }
1164
1165        if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
1166                transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer);
1167                if (!read_len_valid )
1168                        goto done;
1169                else
1170                        se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL;
1171        }
1172        if (se_cmd->se_cmd_flags & SCF_BIDI) {
1173                /* Get Data-In buffer before clean up */
1174                gather_data_area(udev, cmd, true, read_len);
1175        } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
1176                gather_data_area(udev, cmd, false, read_len);
1177        } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
1178                /* TODO: */
1179        } else if (se_cmd->data_direction != DMA_NONE) {
1180                pr_warn("TCMU: data direction was %d!\n",
1181                        se_cmd->data_direction);
1182        }
1183
1184done:
1185        if (read_len_valid) {
1186                pr_debug("read_len = %d\n", read_len);
1187                target_complete_cmd_with_length(cmd->se_cmd,
1188                                        entry->rsp.scsi_status, read_len);
1189        } else
1190                target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
1191
1192out:
1193        cmd->se_cmd = NULL;
1194        tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
1195        tcmu_free_cmd(cmd);
1196}
1197
1198static void tcmu_set_next_deadline(struct list_head *queue,
1199                                   struct timer_list *timer)
1200{
1201        struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
1202        unsigned long deadline = 0;
1203
1204        list_for_each_entry_safe(tcmu_cmd, tmp_cmd, queue, queue_entry) {
1205                if (!time_after(jiffies, tcmu_cmd->deadline)) {
1206                        deadline = tcmu_cmd->deadline;
1207                        break;
1208                }
1209        }
1210
1211        if (deadline)
1212                mod_timer(timer, deadline);
1213        else
1214                del_timer(timer);
1215}
1216
1217static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
1218{
1219        struct tcmu_mailbox *mb;
1220        struct tcmu_cmd *cmd;
1221        int handled = 0;
1222
1223        if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
1224                pr_err("ring broken, not handling completions\n");
1225                return 0;
1226        }
1227
1228        mb = udev->mb_addr;
1229        tcmu_flush_dcache_range(mb, sizeof(*mb));
1230
1231        while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
1232
1233                struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
1234
1235                tcmu_flush_dcache_range(entry, sizeof(*entry));
1236
1237                if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) {
1238                        UPDATE_HEAD(udev->cmdr_last_cleaned,
1239                                    tcmu_hdr_get_len(entry->hdr.len_op),
1240                                    udev->cmdr_size);
1241                        continue;
1242                }
1243                WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
1244
1245                cmd = idr_remove(&udev->commands, entry->hdr.cmd_id);
1246                if (!cmd) {
1247                        pr_err("cmd_id %u not found, ring is broken\n",
1248                               entry->hdr.cmd_id);
1249                        set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
1250                        break;
1251                }
1252
1253                tcmu_handle_completion(cmd, entry);
1254
1255                UPDATE_HEAD(udev->cmdr_last_cleaned,
1256                            tcmu_hdr_get_len(entry->hdr.len_op),
1257                            udev->cmdr_size);
1258
1259                handled++;
1260        }
1261
1262        if (mb->cmd_tail == mb->cmd_head) {
1263                /* no more pending commands */
1264                del_timer(&udev->cmd_timer);
1265
1266                if (list_empty(&udev->qfull_queue)) {
1267                        /*
1268                         * no more pending or waiting commands so try to
1269                         * reclaim blocks if needed.
1270                         */
1271                        if (atomic_read(&global_db_count) >
1272                            tcmu_global_max_blocks)
1273                                schedule_delayed_work(&tcmu_unmap_work, 0);
1274                }
1275        } else if (udev->cmd_time_out) {
1276                tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
1277        }
1278
1279        return handled;
1280}
1281
1282static int tcmu_check_expired_cmd(int id, void *p, void *data)
1283{
1284        struct tcmu_cmd *cmd = p;
1285        struct tcmu_dev *udev = cmd->tcmu_dev;
1286        u8 scsi_status;
1287        struct se_cmd *se_cmd;
1288        bool is_running;
1289
1290        if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
1291                return 0;
1292
1293        if (!time_after(jiffies, cmd->deadline))
1294                return 0;
1295
1296        is_running = test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags);
1297        se_cmd = cmd->se_cmd;
1298
1299        if (is_running) {
1300                /*
1301                 * If cmd_time_out is disabled but qfull is set deadline
1302                 * will only reflect the qfull timeout. Ignore it.
1303                 */
1304                if (!udev->cmd_time_out)
1305                        return 0;
1306
1307                set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
1308                /*
1309                 * target_complete_cmd will translate this to LUN COMM FAILURE
1310                 */
1311                scsi_status = SAM_STAT_CHECK_CONDITION;
1312                list_del_init(&cmd->queue_entry);
1313                cmd->se_cmd = NULL;
1314        } else {
1315                list_del_init(&cmd->queue_entry);
1316                idr_remove(&udev->commands, id);
1317                tcmu_free_cmd(cmd);
1318                scsi_status = SAM_STAT_TASK_SET_FULL;
1319        }
1320
1321        pr_debug("Timing out cmd %u on dev %s that is %s.\n",
1322                 id, udev->name, is_running ? "inflight" : "queued");
1323
1324        target_complete_cmd(se_cmd, scsi_status);
1325        return 0;
1326}
1327
1328static void tcmu_device_timedout(struct tcmu_dev *udev)
1329{
1330        spin_lock(&timed_out_udevs_lock);
1331        if (list_empty(&udev->timedout_entry))
1332                list_add_tail(&udev->timedout_entry, &timed_out_udevs);
1333        spin_unlock(&timed_out_udevs_lock);
1334
1335        schedule_delayed_work(&tcmu_unmap_work, 0);
1336}
1337
1338static void tcmu_cmd_timedout(struct timer_list *t)
1339{
1340        struct tcmu_dev *udev = from_timer(udev, t, cmd_timer);
1341
1342        pr_debug("%s cmd timeout has expired\n", udev->name);
1343        tcmu_device_timedout(udev);
1344}
1345
1346static void tcmu_qfull_timedout(struct timer_list *t)
1347{
1348        struct tcmu_dev *udev = from_timer(udev, t, qfull_timer);
1349
1350        pr_debug("%s qfull timeout has expired\n", udev->name);
1351        tcmu_device_timedout(udev);
1352}
1353
1354static int tcmu_attach_hba(struct se_hba *hba, u32 host_id)
1355{
1356        struct tcmu_hba *tcmu_hba;
1357
1358        tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL);
1359        if (!tcmu_hba)
1360                return -ENOMEM;
1361
1362        tcmu_hba->host_id = host_id;
1363        hba->hba_ptr = tcmu_hba;
1364
1365        return 0;
1366}
1367
1368static void tcmu_detach_hba(struct se_hba *hba)
1369{
1370        kfree(hba->hba_ptr);
1371        hba->hba_ptr = NULL;
1372}
1373
1374static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
1375{
1376        struct tcmu_dev *udev;
1377
1378        udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL);
1379        if (!udev)
1380                return NULL;
1381        kref_init(&udev->kref);
1382
1383        udev->name = kstrdup(name, GFP_KERNEL);
1384        if (!udev->name) {
1385                kfree(udev);
1386                return NULL;
1387        }
1388
1389        udev->hba = hba;
1390        udev->cmd_time_out = TCMU_TIME_OUT;
1391        udev->qfull_time_out = -1;
1392
1393        udev->max_blocks = DATA_BLOCK_BITS_DEF;
1394        mutex_init(&udev->cmdr_lock);
1395
1396        INIT_LIST_HEAD(&udev->node);
1397        INIT_LIST_HEAD(&udev->timedout_entry);
1398        INIT_LIST_HEAD(&udev->qfull_queue);
1399        INIT_LIST_HEAD(&udev->inflight_queue);
1400        idr_init(&udev->commands);
1401
1402        timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
1403        timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0);
1404
1405        INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
1406
1407        return &udev->se_dev;
1408}
1409
1410static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
1411{
1412        struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
1413        LIST_HEAD(cmds);
1414        bool drained = true;
1415        sense_reason_t scsi_ret;
1416        int ret;
1417
1418        if (list_empty(&udev->qfull_queue))
1419                return true;
1420
1421        pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail);
1422
1423        list_splice_init(&udev->qfull_queue, &cmds);
1424
1425        list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) {
1426                list_del_init(&tcmu_cmd->queue_entry);
1427
1428                pr_debug("removing cmd %u on dev %s from queue\n",
1429                         tcmu_cmd->cmd_id, udev->name);
1430
1431                if (fail) {
1432                        idr_remove(&udev->commands, tcmu_cmd->cmd_id);
1433                        /*
1434                         * We were not able to even start the command, so
1435                         * fail with busy to allow a retry in case runner
1436                         * was only temporarily down. If the device is being
1437                         * removed then LIO core will do the right thing and
1438                         * fail the retry.
1439                         */
1440                        target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY);
1441                        tcmu_free_cmd(tcmu_cmd);
1442                        continue;
1443                }
1444
1445                ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
1446                if (ret < 0) {
1447                        pr_debug("cmd %u on dev %s failed with %u\n",
1448                                 tcmu_cmd->cmd_id, udev->name, scsi_ret);
1449
1450                        idr_remove(&udev->commands, tcmu_cmd->cmd_id);
1451                        /*
1452                         * Ignore scsi_ret for now. target_complete_cmd
1453                         * drops it.
1454                         */
1455                        target_complete_cmd(tcmu_cmd->se_cmd,
1456                                            SAM_STAT_CHECK_CONDITION);
1457                        tcmu_free_cmd(tcmu_cmd);
1458                } else if (ret > 0) {
1459                        pr_debug("ran out of space during cmdr queue run\n");
1460                        /*
1461                         * cmd was requeued, so just put all cmds back in
1462                         * the queue
1463                         */
1464                        list_splice_tail(&cmds, &udev->qfull_queue);
1465                        drained = false;
1466                        break;
1467                }
1468        }
1469
1470        tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
1471        return drained;
1472}
1473
1474static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
1475{
1476        struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1477
1478        mutex_lock(&udev->cmdr_lock);
1479        tcmu_handle_completions(udev);
1480        run_qfull_queue(udev, false);
1481        mutex_unlock(&udev->cmdr_lock);
1482
1483        return 0;
1484}
1485
1486/*
1487 * mmap code from uio.c. Copied here because we want to hook mmap()
1488 * and this stuff must come along.
1489 */
1490static int tcmu_find_mem_index(struct vm_area_struct *vma)
1491{
1492        struct tcmu_dev *udev = vma->vm_private_data;
1493        struct uio_info *info = &udev->uio_info;
1494
1495        if (vma->vm_pgoff < MAX_UIO_MAPS) {
1496                if (info->mem[vma->vm_pgoff].size == 0)
1497                        return -1;
1498                return (int)vma->vm_pgoff;
1499        }
1500        return -1;
1501}
1502
1503static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
1504{
1505        struct page *page;
1506
1507        mutex_lock(&udev->cmdr_lock);
1508        page = tcmu_get_block_page(udev, dbi);
1509        if (likely(page)) {
1510                mutex_unlock(&udev->cmdr_lock);
1511                return page;
1512        }
1513
1514        /*
1515         * Userspace messed up and passed in a address not in the
1516         * data iov passed to it.
1517         */
1518        pr_err("Invalid addr to data block mapping  (dbi %u) on device %s\n",
1519               dbi, udev->name);
1520        page = NULL;
1521        mutex_unlock(&udev->cmdr_lock);
1522
1523        return page;
1524}
1525
1526static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
1527{
1528        struct tcmu_dev *udev = vmf->vma->vm_private_data;
1529        struct uio_info *info = &udev->uio_info;
1530        struct page *page;
1531        unsigned long offset;
1532        void *addr;
1533
1534        int mi = tcmu_find_mem_index(vmf->vma);
1535        if (mi < 0)
1536                return VM_FAULT_SIGBUS;
1537
1538        /*
1539         * We need to subtract mi because userspace uses offset = N*PAGE_SIZE
1540         * to use mem[N].
1541         */
1542        offset = (vmf->pgoff - mi) << PAGE_SHIFT;
1543
1544        if (offset < udev->data_off) {
1545                /* For the vmalloc()ed cmd area pages */
1546                addr = (void *)(unsigned long)info->mem[mi].addr + offset;
1547                page = vmalloc_to_page(addr);
1548        } else {
1549                uint32_t dbi;
1550
1551                /* For the dynamically growing data area pages */
1552                dbi = (offset - udev->data_off) / DATA_BLOCK_SIZE;
1553                page = tcmu_try_get_block_page(udev, dbi);
1554                if (!page)
1555                        return VM_FAULT_SIGBUS;
1556        }
1557
1558        get_page(page);
1559        vmf->page = page;
1560        return 0;
1561}
1562
1563static const struct vm_operations_struct tcmu_vm_ops = {
1564        .fault = tcmu_vma_fault,
1565};
1566
1567static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
1568{
1569        struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1570
1571        vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
1572        vma->vm_ops = &tcmu_vm_ops;
1573
1574        vma->vm_private_data = udev;
1575
1576        /* Ensure the mmap is exactly the right size */
1577        if (vma_pages(vma) != (udev->ring_size >> PAGE_SHIFT))
1578                return -EINVAL;
1579
1580        return 0;
1581}
1582
1583static int tcmu_open(struct uio_info *info, struct inode *inode)
1584{
1585        struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1586
1587        /* O_EXCL not supported for char devs, so fake it? */
1588        if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags))
1589                return -EBUSY;
1590
1591        udev->inode = inode;
1592        kref_get(&udev->kref);
1593
1594        pr_debug("open\n");
1595
1596        return 0;
1597}
1598
1599static void tcmu_dev_call_rcu(struct rcu_head *p)
1600{
1601        struct se_device *dev = container_of(p, struct se_device, rcu_head);
1602        struct tcmu_dev *udev = TCMU_DEV(dev);
1603
1604        kfree(udev->uio_info.name);
1605        kfree(udev->name);
1606        kfree(udev);
1607}
1608
1609static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
1610{
1611        if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
1612                kmem_cache_free(tcmu_cmd_cache, cmd);
1613                return 0;
1614        }
1615        return -EINVAL;
1616}
1617
1618static void tcmu_blocks_release(struct radix_tree_root *blocks,
1619                                int start, int end)
1620{
1621        int i;
1622        struct page *page;
1623
1624        for (i = start; i < end; i++) {
1625                page = radix_tree_delete(blocks, i);
1626                if (page) {
1627                        __free_page(page);
1628                        atomic_dec(&global_db_count);
1629                }
1630        }
1631}
1632
1633static void tcmu_dev_kref_release(struct kref *kref)
1634{
1635        struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
1636        struct se_device *dev = &udev->se_dev;
1637        struct tcmu_cmd *cmd;
1638        bool all_expired = true;
1639        int i;
1640
1641        vfree(udev->mb_addr);
1642        udev->mb_addr = NULL;
1643
1644        spin_lock_bh(&timed_out_udevs_lock);
1645        if (!list_empty(&udev->timedout_entry))
1646                list_del(&udev->timedout_entry);
1647        spin_unlock_bh(&timed_out_udevs_lock);
1648
1649        /* Upper layer should drain all requests before calling this */
1650        mutex_lock(&udev->cmdr_lock);
1651        idr_for_each_entry(&udev->commands, cmd, i) {
1652                if (tcmu_check_and_free_pending_cmd(cmd) != 0)
1653                        all_expired = false;
1654        }
1655        idr_destroy(&udev->commands);
1656        WARN_ON(!all_expired);
1657
1658        tcmu_blocks_release(&udev->data_blocks, 0, udev->dbi_max + 1);
1659        bitmap_free(udev->data_bitmap);
1660        mutex_unlock(&udev->cmdr_lock);
1661
1662        call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
1663}
1664
1665static int tcmu_release(struct uio_info *info, struct inode *inode)
1666{
1667        struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1668
1669        clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
1670
1671        pr_debug("close\n");
1672        /* release ref from open */
1673        kref_put(&udev->kref, tcmu_dev_kref_release);
1674        return 0;
1675}
1676
1677static int tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
1678{
1679        struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
1680
1681        if (!tcmu_kern_cmd_reply_supported)
1682                return 0;
1683
1684        if (udev->nl_reply_supported <= 0)
1685                return 0;
1686
1687        mutex_lock(&tcmu_nl_cmd_mutex);
1688
1689        if (tcmu_netlink_blocked) {
1690                mutex_unlock(&tcmu_nl_cmd_mutex);
1691                pr_warn("Failing nl cmd %d on %s. Interface is blocked.\n", cmd,
1692                        udev->name);
1693                return -EAGAIN;
1694        }
1695
1696        if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
1697                mutex_unlock(&tcmu_nl_cmd_mutex);
1698                pr_warn("netlink cmd %d already executing on %s\n",
1699                         nl_cmd->cmd, udev->name);
1700                return -EBUSY;
1701        }
1702
1703        memset(nl_cmd, 0, sizeof(*nl_cmd));
1704        nl_cmd->cmd = cmd;
1705        nl_cmd->udev = udev;
1706        init_completion(&nl_cmd->complete);
1707        INIT_LIST_HEAD(&nl_cmd->nl_list);
1708
1709        list_add_tail(&nl_cmd->nl_list, &tcmu_nl_cmd_list);
1710
1711        mutex_unlock(&tcmu_nl_cmd_mutex);
1712        return 0;
1713}
1714
1715static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
1716{
1717        struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
1718        int ret;
1719
1720        if (!tcmu_kern_cmd_reply_supported)
1721                return 0;
1722
1723        if (udev->nl_reply_supported <= 0)
1724                return 0;
1725
1726        pr_debug("sleeping for nl reply\n");
1727        wait_for_completion(&nl_cmd->complete);
1728
1729        mutex_lock(&tcmu_nl_cmd_mutex);
1730        nl_cmd->cmd = TCMU_CMD_UNSPEC;
1731        ret = nl_cmd->status;
1732        mutex_unlock(&tcmu_nl_cmd_mutex);
1733
1734        return ret;
1735}
1736
1737static int tcmu_netlink_event_init(struct tcmu_dev *udev,
1738                                   enum tcmu_genl_cmd cmd,
1739                                   struct sk_buff **buf, void **hdr)
1740{
1741        struct sk_buff *skb;
1742        void *msg_header;
1743        int ret = -ENOMEM;
1744
1745        skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1746        if (!skb)
1747                return ret;
1748
1749        msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd);
1750        if (!msg_header)
1751                goto free_skb;
1752
1753        ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name);
1754        if (ret < 0)
1755                goto free_skb;
1756
1757        ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor);
1758        if (ret < 0)
1759                goto free_skb;
1760
1761        ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index);
1762        if (ret < 0)
1763                goto free_skb;
1764
1765        *buf = skb;
1766        *hdr = msg_header;
1767        return ret;
1768
1769free_skb:
1770        nlmsg_free(skb);
1771        return ret;
1772}
1773
1774static int tcmu_netlink_event_send(struct tcmu_dev *udev,
1775                                   enum tcmu_genl_cmd cmd,
1776                                   struct sk_buff *skb, void *msg_header)
1777{
1778        int ret;
1779
1780        genlmsg_end(skb, msg_header);
1781
1782        ret = tcmu_init_genl_cmd_reply(udev, cmd);
1783        if (ret) {
1784                nlmsg_free(skb);
1785                return ret;
1786        }
1787
1788        ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
1789                                      TCMU_MCGRP_CONFIG, GFP_KERNEL);
1790
1791        /* Wait during an add as the listener may not be up yet */
1792        if (ret == 0 ||
1793           (ret == -ESRCH && cmd == TCMU_CMD_ADDED_DEVICE))
1794                return tcmu_wait_genl_cmd_reply(udev);
1795
1796        return ret;
1797}
1798
1799static int tcmu_send_dev_add_event(struct tcmu_dev *udev)
1800{
1801        struct sk_buff *skb = NULL;
1802        void *msg_header = NULL;
1803        int ret = 0;
1804
1805        ret = tcmu_netlink_event_init(udev, TCMU_CMD_ADDED_DEVICE, &skb,
1806                                      &msg_header);
1807        if (ret < 0)
1808                return ret;
1809        return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, skb,
1810                                       msg_header);
1811}
1812
1813static int tcmu_send_dev_remove_event(struct tcmu_dev *udev)
1814{
1815        struct sk_buff *skb = NULL;
1816        void *msg_header = NULL;
1817        int ret = 0;
1818
1819        ret = tcmu_netlink_event_init(udev, TCMU_CMD_REMOVED_DEVICE,
1820                                      &skb, &msg_header);
1821        if (ret < 0)
1822                return ret;
1823        return tcmu_netlink_event_send(udev, TCMU_CMD_REMOVED_DEVICE,
1824                                       skb, msg_header);
1825}
1826
1827static int tcmu_update_uio_info(struct tcmu_dev *udev)
1828{
1829        struct tcmu_hba *hba = udev->hba->hba_ptr;
1830        struct uio_info *info;
1831        char *str;
1832
1833        info = &udev->uio_info;
1834
1835        if (udev->dev_config[0])
1836                str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s/%s", hba->host_id,
1837                                udev->name, udev->dev_config);
1838        else
1839                str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s", hba->host_id,
1840                                udev->name);
1841        if (!str)
1842                return -ENOMEM;
1843
1844        /* If the old string exists, free it */
1845        kfree(info->name);
1846        info->name = str;
1847
1848        return 0;
1849}
1850
1851static int tcmu_configure_device(struct se_device *dev)
1852{
1853        struct tcmu_dev *udev = TCMU_DEV(dev);
1854        struct uio_info *info;
1855        struct tcmu_mailbox *mb;
1856        int ret = 0;
1857
1858        ret = tcmu_update_uio_info(udev);
1859        if (ret)
1860                return ret;
1861
1862        info = &udev->uio_info;
1863
1864        mutex_lock(&udev->cmdr_lock);
1865        udev->data_bitmap = bitmap_zalloc(udev->max_blocks, GFP_KERNEL);
1866        mutex_unlock(&udev->cmdr_lock);
1867        if (!udev->data_bitmap) {
1868                ret = -ENOMEM;
1869                goto err_bitmap_alloc;
1870        }
1871
1872        udev->mb_addr = vzalloc(CMDR_SIZE);
1873        if (!udev->mb_addr) {
1874                ret = -ENOMEM;
1875                goto err_vzalloc;
1876        }
1877
1878        /* mailbox fits in first part of CMDR space */
1879        udev->cmdr_size = CMDR_SIZE - CMDR_OFF;
1880        udev->data_off = CMDR_SIZE;
1881        udev->data_size = udev->max_blocks * DATA_BLOCK_SIZE;
1882        udev->dbi_thresh = 0; /* Default in Idle state */
1883
1884        /* Initialise the mailbox of the ring buffer */
1885        mb = udev->mb_addr;
1886        mb->version = TCMU_MAILBOX_VERSION;
1887        mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC | TCMU_MAILBOX_FLAG_CAP_READ_LEN;
1888        mb->cmdr_off = CMDR_OFF;
1889        mb->cmdr_size = udev->cmdr_size;
1890
1891        WARN_ON(!PAGE_ALIGNED(udev->data_off));
1892        WARN_ON(udev->data_size % PAGE_SIZE);
1893        WARN_ON(udev->data_size % DATA_BLOCK_SIZE);
1894
1895        info->version = __stringify(TCMU_MAILBOX_VERSION);
1896
1897        info->mem[0].name = "tcm-user command & data buffer";
1898        info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr;
1899        info->mem[0].size = udev->ring_size = udev->data_size + CMDR_SIZE;
1900        info->mem[0].memtype = UIO_MEM_NONE;
1901
1902        info->irqcontrol = tcmu_irqcontrol;
1903        info->irq = UIO_IRQ_CUSTOM;
1904
1905        info->mmap = tcmu_mmap;
1906        info->open = tcmu_open;
1907        info->release = tcmu_release;
1908
1909        ret = uio_register_device(tcmu_root_device, info);
1910        if (ret)
1911                goto err_register;
1912
1913        /* User can set hw_block_size before enable the device */
1914        if (dev->dev_attrib.hw_block_size == 0)
1915                dev->dev_attrib.hw_block_size = 512;
1916        /* Other attributes can be configured in userspace */
1917        if (!dev->dev_attrib.hw_max_sectors)
1918                dev->dev_attrib.hw_max_sectors = 128;
1919        if (!dev->dev_attrib.emulate_write_cache)
1920                dev->dev_attrib.emulate_write_cache = 0;
1921        dev->dev_attrib.hw_queue_depth = 128;
1922
1923        /* If user didn't explicitly disable netlink reply support, use
1924         * module scope setting.
1925         */
1926        if (udev->nl_reply_supported >= 0)
1927                udev->nl_reply_supported = tcmu_kern_cmd_reply_supported;
1928
1929        /*
1930         * Get a ref incase userspace does a close on the uio device before
1931         * LIO has initiated tcmu_free_device.
1932         */
1933        kref_get(&udev->kref);
1934
1935        ret = tcmu_send_dev_add_event(udev);
1936        if (ret)
1937                goto err_netlink;
1938
1939        mutex_lock(&root_udev_mutex);
1940        list_add(&udev->node, &root_udev);
1941        mutex_unlock(&root_udev_mutex);
1942
1943        return 0;
1944
1945err_netlink:
1946        kref_put(&udev->kref, tcmu_dev_kref_release);
1947        uio_unregister_device(&udev->uio_info);
1948err_register:
1949        vfree(udev->mb_addr);
1950        udev->mb_addr = NULL;
1951err_vzalloc:
1952        bitmap_free(udev->data_bitmap);
1953        udev->data_bitmap = NULL;
1954err_bitmap_alloc:
1955        kfree(info->name);
1956        info->name = NULL;
1957
1958        return ret;
1959}
1960
1961static void tcmu_free_device(struct se_device *dev)
1962{
1963        struct tcmu_dev *udev = TCMU_DEV(dev);
1964
1965        /* release ref from init */
1966        kref_put(&udev->kref, tcmu_dev_kref_release);
1967}
1968
1969static void tcmu_destroy_device(struct se_device *dev)
1970{
1971        struct tcmu_dev *udev = TCMU_DEV(dev);
1972
1973        del_timer_sync(&udev->cmd_timer);
1974        del_timer_sync(&udev->qfull_timer);
1975
1976        mutex_lock(&root_udev_mutex);
1977        list_del(&udev->node);
1978        mutex_unlock(&root_udev_mutex);
1979
1980        tcmu_send_dev_remove_event(udev);
1981
1982        uio_unregister_device(&udev->uio_info);
1983
1984        /* release ref from configure */
1985        kref_put(&udev->kref, tcmu_dev_kref_release);
1986}
1987
1988static void tcmu_unblock_dev(struct tcmu_dev *udev)
1989{
1990        mutex_lock(&udev->cmdr_lock);
1991        clear_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags);
1992        mutex_unlock(&udev->cmdr_lock);
1993}
1994
1995static void tcmu_block_dev(struct tcmu_dev *udev)
1996{
1997        mutex_lock(&udev->cmdr_lock);
1998
1999        if (test_and_set_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags))
2000                goto unlock;
2001
2002        /* complete IO that has executed successfully */
2003        tcmu_handle_completions(udev);
2004        /* fail IO waiting to be queued */
2005        run_qfull_queue(udev, true);
2006
2007unlock:
2008        mutex_unlock(&udev->cmdr_lock);
2009}
2010
2011static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
2012{
2013        struct tcmu_mailbox *mb;
2014        struct tcmu_cmd *cmd;
2015        int i;
2016
2017        mutex_lock(&udev->cmdr_lock);
2018
2019        idr_for_each_entry(&udev->commands, cmd, i) {
2020                if (!test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags))
2021                        continue;
2022
2023                pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
2024                          cmd->cmd_id, udev->name,
2025                          test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags));
2026
2027                idr_remove(&udev->commands, i);
2028                if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
2029                        WARN_ON(!cmd->se_cmd);
2030                        list_del_init(&cmd->queue_entry);
2031                        if (err_level == 1) {
2032                                /*
2033                                 * Userspace was not able to start the
2034                                 * command or it is retryable.
2035                                 */
2036                                target_complete_cmd(cmd->se_cmd, SAM_STAT_BUSY);
2037                        } else {
2038                                /* hard failure */
2039                                target_complete_cmd(cmd->se_cmd,
2040                                                    SAM_STAT_CHECK_CONDITION);
2041                        }
2042                }
2043                tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
2044                tcmu_free_cmd(cmd);
2045        }
2046
2047        mb = udev->mb_addr;
2048        tcmu_flush_dcache_range(mb, sizeof(*mb));
2049        pr_debug("mb last %u head %u tail %u\n", udev->cmdr_last_cleaned,
2050                 mb->cmd_tail, mb->cmd_head);
2051
2052        udev->cmdr_last_cleaned = 0;
2053        mb->cmd_tail = 0;
2054        mb->cmd_head = 0;
2055        tcmu_flush_dcache_range(mb, sizeof(*mb));
2056
2057        del_timer(&udev->cmd_timer);
2058
2059        mutex_unlock(&udev->cmdr_lock);
2060}
2061
2062enum {
2063        Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
2064        Opt_nl_reply_supported, Opt_max_data_area_mb, Opt_err,
2065};
2066
2067static match_table_t tokens = {
2068        {Opt_dev_config, "dev_config=%s"},
2069        {Opt_dev_size, "dev_size=%s"},
2070        {Opt_hw_block_size, "hw_block_size=%d"},
2071        {Opt_hw_max_sectors, "hw_max_sectors=%d"},
2072        {Opt_nl_reply_supported, "nl_reply_supported=%d"},
2073        {Opt_max_data_area_mb, "max_data_area_mb=%d"},
2074        {Opt_err, NULL}
2075};
2076
2077static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
2078{
2079        int val, ret;
2080
2081        ret = match_int(arg, &val);
2082        if (ret < 0) {
2083                pr_err("match_int() failed for dev attrib. Error %d.\n",
2084                       ret);
2085                return ret;
2086        }
2087
2088        if (val <= 0) {
2089                pr_err("Invalid dev attrib value %d. Must be greater than zero.\n",
2090                       val);
2091                return -EINVAL;
2092        }
2093        *dev_attrib = val;
2094        return 0;
2095}
2096
2097static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg)
2098{
2099        int val, ret;
2100
2101        ret = match_int(arg, &val);
2102        if (ret < 0) {
2103                pr_err("match_int() failed for max_data_area_mb=. Error %d.\n",
2104                       ret);
2105                return ret;
2106        }
2107
2108        if (val <= 0) {
2109                pr_err("Invalid max_data_area %d.\n", val);
2110                return -EINVAL;
2111        }
2112
2113        mutex_lock(&udev->cmdr_lock);
2114        if (udev->data_bitmap) {
2115                pr_err("Cannot set max_data_area_mb after it has been enabled.\n");
2116                ret = -EINVAL;
2117                goto unlock;
2118        }
2119
2120        udev->max_blocks = TCMU_MBS_TO_BLOCKS(val);
2121        if (udev->max_blocks > tcmu_global_max_blocks) {
2122                pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
2123                       val, TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
2124                udev->max_blocks = tcmu_global_max_blocks;
2125        }
2126
2127unlock:
2128        mutex_unlock(&udev->cmdr_lock);
2129        return ret;
2130}
2131
2132static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
2133                const char *page, ssize_t count)
2134{
2135        struct tcmu_dev *udev = TCMU_DEV(dev);
2136        char *orig, *ptr, *opts;
2137        substring_t args[MAX_OPT_ARGS];
2138        int ret = 0, token;
2139
2140        opts = kstrdup(page, GFP_KERNEL);
2141        if (!opts)
2142                return -ENOMEM;
2143
2144        orig = opts;
2145
2146        while ((ptr = strsep(&opts, ",\n")) != NULL) {
2147                if (!*ptr)
2148                        continue;
2149
2150                token = match_token(ptr, tokens, args);
2151                switch (token) {
2152                case Opt_dev_config:
2153                        if (match_strlcpy(udev->dev_config, &args[0],
2154                                          TCMU_CONFIG_LEN) == 0) {
2155                                ret = -EINVAL;
2156                                break;
2157                        }
2158                        pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config);
2159                        break;
2160                case Opt_dev_size:
2161                        ret = match_u64(&args[0], &udev->dev_size);
2162                        if (ret < 0)
2163                                pr_err("match_u64() failed for dev_size=. Error %d.\n",
2164                                       ret);
2165                        break;
2166                case Opt_hw_block_size:
2167                        ret = tcmu_set_dev_attrib(&args[0],
2168                                        &(dev->dev_attrib.hw_block_size));
2169                        break;
2170                case Opt_hw_max_sectors:
2171                        ret = tcmu_set_dev_attrib(&args[0],
2172                                        &(dev->dev_attrib.hw_max_sectors));
2173                        break;
2174                case Opt_nl_reply_supported:
2175                        ret = match_int(&args[0], &udev->nl_reply_supported);
2176                        if (ret < 0)
2177                                pr_err("match_int() failed for nl_reply_supported=. Error %d.\n",
2178                                       ret);
2179                        break;
2180                case Opt_max_data_area_mb:
2181                        ret = tcmu_set_max_blocks_param(udev, &args[0]);
2182                        break;
2183                default:
2184                        break;
2185                }
2186
2187                if (ret)
2188                        break;
2189        }
2190
2191        kfree(orig);
2192        return (!ret) ? count : ret;
2193}
2194
2195static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
2196{
2197        struct tcmu_dev *udev = TCMU_DEV(dev);
2198        ssize_t bl = 0;
2199
2200        bl = sprintf(b + bl, "Config: %s ",
2201                     udev->dev_config[0] ? udev->dev_config : "NULL");
2202        bl += sprintf(b + bl, "Size: %llu ", udev->dev_size);
2203        bl += sprintf(b + bl, "MaxDataAreaMB: %u\n",
2204                      TCMU_BLOCKS_TO_MBS(udev->max_blocks));
2205
2206        return bl;
2207}
2208
2209static sector_t tcmu_get_blocks(struct se_device *dev)
2210{
2211        struct tcmu_dev *udev = TCMU_DEV(dev);
2212
2213        return div_u64(udev->dev_size - dev->dev_attrib.block_size,
2214                       dev->dev_attrib.block_size);
2215}
2216
2217static sense_reason_t
2218tcmu_parse_cdb(struct se_cmd *cmd)
2219{
2220        return passthrough_parse_cdb(cmd, tcmu_queue_cmd);
2221}
2222
2223static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
2224{
2225        struct se_dev_attrib *da = container_of(to_config_group(item),
2226                                        struct se_dev_attrib, da_group);
2227        struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2228
2229        return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
2230}
2231
2232static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page,
2233                                       size_t count)
2234{
2235        struct se_dev_attrib *da = container_of(to_config_group(item),
2236                                        struct se_dev_attrib, da_group);
2237        struct tcmu_dev *udev = container_of(da->da_dev,
2238                                        struct tcmu_dev, se_dev);
2239        u32 val;
2240        int ret;
2241
2242        if (da->da_dev->export_count) {
2243                pr_err("Unable to set tcmu cmd_time_out while exports exist\n");
2244                return -EINVAL;
2245        }
2246
2247        ret = kstrtou32(page, 0, &val);
2248        if (ret < 0)
2249                return ret;
2250
2251        udev->cmd_time_out = val * MSEC_PER_SEC;
2252        return count;
2253}
2254CONFIGFS_ATTR(tcmu_, cmd_time_out);
2255
2256static ssize_t tcmu_qfull_time_out_show(struct config_item *item, char *page)
2257{
2258        struct se_dev_attrib *da = container_of(to_config_group(item),
2259                                                struct se_dev_attrib, da_group);
2260        struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2261
2262        return snprintf(page, PAGE_SIZE, "%ld\n", udev->qfull_time_out <= 0 ?
2263                        udev->qfull_time_out :
2264                        udev->qfull_time_out / MSEC_PER_SEC);
2265}
2266
2267static ssize_t tcmu_qfull_time_out_store(struct config_item *item,
2268                                         const char *page, size_t count)
2269{
2270        struct se_dev_attrib *da = container_of(to_config_group(item),
2271                                        struct se_dev_attrib, da_group);
2272        struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2273        s32 val;
2274        int ret;
2275
2276        ret = kstrtos32(page, 0, &val);
2277        if (ret < 0)
2278                return ret;
2279
2280        if (val >= 0) {
2281                udev->qfull_time_out = val * MSEC_PER_SEC;
2282        } else if (val == -1) {
2283                udev->qfull_time_out = val;
2284        } else {
2285                printk(KERN_ERR "Invalid qfull timeout value %d\n", val);
2286                return -EINVAL;
2287        }
2288        return count;
2289}
2290CONFIGFS_ATTR(tcmu_, qfull_time_out);
2291
2292static ssize_t tcmu_max_data_area_mb_show(struct config_item *item, char *page)
2293{
2294        struct se_dev_attrib *da = container_of(to_config_group(item),
2295                                                struct se_dev_attrib, da_group);
2296        struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2297
2298        return snprintf(page, PAGE_SIZE, "%u\n",
2299                        TCMU_BLOCKS_TO_MBS(udev->max_blocks));
2300}
2301CONFIGFS_ATTR_RO(tcmu_, max_data_area_mb);
2302
2303static ssize_t tcmu_dev_config_show(struct config_item *item, char *page)
2304{
2305        struct se_dev_attrib *da = container_of(to_config_group(item),
2306                                                struct se_dev_attrib, da_group);
2307        struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2308
2309        return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config);
2310}
2311
2312static int tcmu_send_dev_config_event(struct tcmu_dev *udev,
2313                                      const char *reconfig_data)
2314{
2315        struct sk_buff *skb = NULL;
2316        void *msg_header = NULL;
2317        int ret = 0;
2318
2319        ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
2320                                      &skb, &msg_header);
2321        if (ret < 0)
2322                return ret;
2323        ret = nla_put_string(skb, TCMU_ATTR_DEV_CFG, reconfig_data);
2324        if (ret < 0) {
2325                nlmsg_free(skb);
2326                return ret;
2327        }
2328        return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
2329                                       skb, msg_header);
2330}
2331
2332
2333static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page,
2334                                     size_t count)
2335{
2336        struct se_dev_attrib *da = container_of(to_config_group(item),
2337                                                struct se_dev_attrib, da_group);
2338        struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2339        int ret, len;
2340
2341        len = strlen(page);
2342        if (!len || len > TCMU_CONFIG_LEN - 1)
2343                return -EINVAL;
2344
2345        /* Check if device has been configured before */
2346        if (target_dev_configured(&udev->se_dev)) {
2347                ret = tcmu_send_dev_config_event(udev, page);
2348                if (ret) {
2349                        pr_err("Unable to reconfigure device\n");
2350                        return ret;
2351                }
2352                strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
2353
2354                ret = tcmu_update_uio_info(udev);
2355                if (ret)
2356                        return ret;
2357                return count;
2358        }
2359        strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
2360
2361        return count;
2362}
2363CONFIGFS_ATTR(tcmu_, dev_config);
2364
2365static ssize_t tcmu_dev_size_show(struct config_item *item, char *page)
2366{
2367        struct se_dev_attrib *da = container_of(to_config_group(item),
2368                                                struct se_dev_attrib, da_group);
2369        struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2370
2371        return snprintf(page, PAGE_SIZE, "%llu\n", udev->dev_size);
2372}
2373
2374static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size)
2375{
2376        struct sk_buff *skb = NULL;
2377        void *msg_header = NULL;
2378        int ret = 0;
2379
2380        ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
2381                                      &skb, &msg_header);
2382        if (ret < 0)
2383                return ret;
2384        ret = nla_put_u64_64bit(skb, TCMU_ATTR_DEV_SIZE,
2385                                size, TCMU_ATTR_PAD);
2386        if (ret < 0) {
2387                nlmsg_free(skb);
2388                return ret;
2389        }
2390        return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
2391                                       skb, msg_header);
2392}
2393
2394static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
2395                                   size_t count)
2396{
2397        struct se_dev_attrib *da = container_of(to_config_group(item),
2398                                                struct se_dev_attrib, da_group);
2399        struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2400        u64 val;
2401        int ret;
2402
2403        ret = kstrtou64(page, 0, &val);
2404        if (ret < 0)
2405                return ret;
2406
2407        /* Check if device has been configured before */
2408        if (target_dev_configured(&udev->se_dev)) {
2409                ret = tcmu_send_dev_size_event(udev, val);
2410                if (ret) {
2411                        pr_err("Unable to reconfigure device\n");
2412                        return ret;
2413                }
2414        }
2415        udev->dev_size = val;
2416        return count;
2417}
2418CONFIGFS_ATTR(tcmu_, dev_size);
2419
2420static ssize_t tcmu_nl_reply_supported_show(struct config_item *item,
2421                char *page)
2422{
2423        struct se_dev_attrib *da = container_of(to_config_group(item),
2424                                                struct se_dev_attrib, da_group);
2425        struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2426
2427        return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported);
2428}
2429
2430static ssize_t tcmu_nl_reply_supported_store(struct config_item *item,
2431                const char *page, size_t count)
2432{
2433        struct se_dev_attrib *da = container_of(to_config_group(item),
2434                                                struct se_dev_attrib, da_group);
2435        struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2436        s8 val;
2437        int ret;
2438
2439        ret = kstrtos8(page, 0, &val);
2440        if (ret < 0)
2441                return ret;
2442
2443        udev->nl_reply_supported = val;
2444        return count;
2445}
2446CONFIGFS_ATTR(tcmu_, nl_reply_supported);
2447
2448static ssize_t tcmu_emulate_write_cache_show(struct config_item *item,
2449                                             char *page)
2450{
2451        struct se_dev_attrib *da = container_of(to_config_group(item),
2452                                        struct se_dev_attrib, da_group);
2453
2454        return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache);
2455}
2456
2457static int tcmu_send_emulate_write_cache(struct tcmu_dev *udev, u8 val)
2458{
2459        struct sk_buff *skb = NULL;
2460        void *msg_header = NULL;
2461        int ret = 0;
2462
2463        ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
2464                                      &skb, &msg_header);
2465        if (ret < 0)
2466                return ret;
2467        ret = nla_put_u8(skb, TCMU_ATTR_WRITECACHE, val);
2468        if (ret < 0) {
2469                nlmsg_free(skb);
2470                return ret;
2471        }
2472        return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
2473                                       skb, msg_header);
2474}
2475
2476static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
2477                                              const char *page, size_t count)
2478{
2479        struct se_dev_attrib *da = container_of(to_config_group(item),
2480                                        struct se_dev_attrib, da_group);
2481        struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2482        u8 val;
2483        int ret;
2484
2485        ret = kstrtou8(page, 0, &val);
2486        if (ret < 0)
2487                return ret;
2488
2489        /* Check if device has been configured before */
2490        if (target_dev_configured(&udev->se_dev)) {
2491                ret = tcmu_send_emulate_write_cache(udev, val);
2492                if (ret) {
2493                        pr_err("Unable to reconfigure device\n");
2494                        return ret;
2495                }
2496        }
2497
2498        da->emulate_write_cache = val;
2499        return count;
2500}
2501CONFIGFS_ATTR(tcmu_, emulate_write_cache);
2502
2503static ssize_t tcmu_block_dev_show(struct config_item *item, char *page)
2504{
2505        struct se_device *se_dev = container_of(to_config_group(item),
2506                                                struct se_device,
2507                                                dev_action_group);
2508        struct tcmu_dev *udev = TCMU_DEV(se_dev);
2509
2510        if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags))
2511                return snprintf(page, PAGE_SIZE, "%s\n", "blocked");
2512        else
2513                return snprintf(page, PAGE_SIZE, "%s\n", "unblocked");
2514}
2515
2516static ssize_t tcmu_block_dev_store(struct config_item *item, const char *page,
2517                                    size_t count)
2518{
2519        struct se_device *se_dev = container_of(to_config_group(item),
2520                                                struct se_device,
2521                                                dev_action_group);
2522        struct tcmu_dev *udev = TCMU_DEV(se_dev);
2523        u8 val;
2524        int ret;
2525
2526        if (!target_dev_configured(&udev->se_dev)) {
2527                pr_err("Device is not configured.\n");
2528                return -EINVAL;
2529        }
2530
2531        ret = kstrtou8(page, 0, &val);
2532        if (ret < 0)
2533                return ret;
2534
2535        if (val > 1) {
2536                pr_err("Invalid block value %d\n", val);
2537                return -EINVAL;
2538        }
2539
2540        if (!val)
2541                tcmu_unblock_dev(udev);
2542        else
2543                tcmu_block_dev(udev);
2544        return count;
2545}
2546CONFIGFS_ATTR(tcmu_, block_dev);
2547
2548static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page,
2549                                     size_t count)
2550{
2551        struct se_device *se_dev = container_of(to_config_group(item),
2552                                                struct se_device,
2553                                                dev_action_group);
2554        struct tcmu_dev *udev = TCMU_DEV(se_dev);
2555        u8 val;
2556        int ret;
2557
2558        if (!target_dev_configured(&udev->se_dev)) {
2559                pr_err("Device is not configured.\n");
2560                return -EINVAL;
2561        }
2562
2563        ret = kstrtou8(page, 0, &val);
2564        if (ret < 0)
2565                return ret;
2566
2567        if (val != 1 && val != 2) {
2568                pr_err("Invalid reset ring value %d\n", val);
2569                return -EINVAL;
2570        }
2571
2572        tcmu_reset_ring(udev, val);
2573        return count;
2574}
2575CONFIGFS_ATTR_WO(tcmu_, reset_ring);
2576
2577static struct configfs_attribute *tcmu_attrib_attrs[] = {
2578        &tcmu_attr_cmd_time_out,
2579        &tcmu_attr_qfull_time_out,
2580        &tcmu_attr_max_data_area_mb,
2581        &tcmu_attr_dev_config,
2582        &tcmu_attr_dev_size,
2583        &tcmu_attr_emulate_write_cache,
2584        &tcmu_attr_nl_reply_supported,
2585        NULL,
2586};
2587
2588static struct configfs_attribute **tcmu_attrs;
2589
2590static struct configfs_attribute *tcmu_action_attrs[] = {
2591        &tcmu_attr_block_dev,
2592        &tcmu_attr_reset_ring,
2593        NULL,
2594};
2595
2596static struct target_backend_ops tcmu_ops = {
2597        .name                   = "user",
2598        .owner                  = THIS_MODULE,
2599        .transport_flags        = TRANSPORT_FLAG_PASSTHROUGH,
2600        .attach_hba             = tcmu_attach_hba,
2601        .detach_hba             = tcmu_detach_hba,
2602        .alloc_device           = tcmu_alloc_device,
2603        .configure_device       = tcmu_configure_device,
2604        .destroy_device         = tcmu_destroy_device,
2605        .free_device            = tcmu_free_device,
2606        .parse_cdb              = tcmu_parse_cdb,
2607        .set_configfs_dev_params = tcmu_set_configfs_dev_params,
2608        .show_configfs_dev_params = tcmu_show_configfs_dev_params,
2609        .get_device_type        = sbc_get_device_type,
2610        .get_blocks             = tcmu_get_blocks,
2611        .tb_dev_action_attrs    = tcmu_action_attrs,
2612};
2613
2614static void find_free_blocks(void)
2615{
2616        struct tcmu_dev *udev;
2617        loff_t off;
2618        u32 start, end, block, total_freed = 0;
2619
2620        if (atomic_read(&global_db_count) <= tcmu_global_max_blocks)
2621                return;
2622
2623        mutex_lock(&root_udev_mutex);
2624        list_for_each_entry(udev, &root_udev, node) {
2625                mutex_lock(&udev->cmdr_lock);
2626
2627                if (!target_dev_configured(&udev->se_dev)) {
2628                        mutex_unlock(&udev->cmdr_lock);
2629                        continue;
2630                }
2631
2632                /* Try to complete the finished commands first */
2633                tcmu_handle_completions(udev);
2634
2635                /* Skip the udevs in idle */
2636                if (!udev->dbi_thresh) {
2637                        mutex_unlock(&udev->cmdr_lock);
2638                        continue;
2639                }
2640
2641                end = udev->dbi_max + 1;
2642                block = find_last_bit(udev->data_bitmap, end);
2643                if (block == udev->dbi_max) {
2644                        /*
2645                         * The last bit is dbi_max, so it is not possible
2646                         * reclaim any blocks.
2647                         */
2648                        mutex_unlock(&udev->cmdr_lock);
2649                        continue;
2650                } else if (block == end) {
2651                        /* The current udev will goto idle state */
2652                        udev->dbi_thresh = start = 0;
2653                        udev->dbi_max = 0;
2654                } else {
2655                        udev->dbi_thresh = start = block + 1;
2656                        udev->dbi_max = block;
2657                }
2658
2659                /* Here will truncate the data area from off */
2660                off = udev->data_off + start * DATA_BLOCK_SIZE;
2661                unmap_mapping_range(udev->inode->i_mapping, off, 0, 1);
2662
2663                /* Release the block pages */
2664                tcmu_blocks_release(&udev->data_blocks, start, end);
2665                mutex_unlock(&udev->cmdr_lock);
2666
2667                total_freed += end - start;
2668                pr_debug("Freed %u blocks (total %u) from %s.\n", end - start,
2669                         total_freed, udev->name);
2670        }
2671        mutex_unlock(&root_udev_mutex);
2672
2673        if (atomic_read(&global_db_count) > tcmu_global_max_blocks)
2674                schedule_delayed_work(&tcmu_unmap_work, msecs_to_jiffies(5000));
2675}
2676
2677static void check_timedout_devices(void)
2678{
2679        struct tcmu_dev *udev, *tmp_dev;
2680        LIST_HEAD(devs);
2681
2682        spin_lock_bh(&timed_out_udevs_lock);
2683        list_splice_init(&timed_out_udevs, &devs);
2684
2685        list_for_each_entry_safe(udev, tmp_dev, &devs, timedout_entry) {
2686                list_del_init(&udev->timedout_entry);
2687                spin_unlock_bh(&timed_out_udevs_lock);
2688
2689                mutex_lock(&udev->cmdr_lock);
2690                idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
2691
2692                tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
2693                tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
2694
2695                mutex_unlock(&udev->cmdr_lock);
2696
2697                spin_lock_bh(&timed_out_udevs_lock);
2698        }
2699
2700        spin_unlock_bh(&timed_out_udevs_lock);
2701}
2702
2703static void tcmu_unmap_work_fn(struct work_struct *work)
2704{
2705        check_timedout_devices();
2706        find_free_blocks();
2707}
2708
2709static int __init tcmu_module_init(void)
2710{
2711        int ret, i, k, len = 0;
2712
2713        BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
2714
2715        INIT_DELAYED_WORK(&tcmu_unmap_work, tcmu_unmap_work_fn);
2716
2717        tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache",
2718                                sizeof(struct tcmu_cmd),
2719                                __alignof__(struct tcmu_cmd),
2720                                0, NULL);
2721        if (!tcmu_cmd_cache)
2722                return -ENOMEM;
2723
2724        tcmu_root_device = root_device_register("tcm_user");
2725        if (IS_ERR(tcmu_root_device)) {
2726                ret = PTR_ERR(tcmu_root_device);
2727                goto out_free_cache;
2728        }
2729
2730        ret = genl_register_family(&tcmu_genl_family);
2731        if (ret < 0) {
2732                goto out_unreg_device;
2733        }
2734
2735        for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
2736                len += sizeof(struct configfs_attribute *);
2737        }
2738        for (i = 0; tcmu_attrib_attrs[i] != NULL; i++) {
2739                len += sizeof(struct configfs_attribute *);
2740        }
2741        len += sizeof(struct configfs_attribute *);
2742
2743        tcmu_attrs = kzalloc(len, GFP_KERNEL);
2744        if (!tcmu_attrs) {
2745                ret = -ENOMEM;
2746                goto out_unreg_genl;
2747        }
2748
2749        for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
2750                tcmu_attrs[i] = passthrough_attrib_attrs[i];
2751        }
2752        for (k = 0; tcmu_attrib_attrs[k] != NULL; k++) {
2753                tcmu_attrs[i] = tcmu_attrib_attrs[k];
2754                i++;
2755        }
2756        tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs;
2757
2758        ret = transport_backend_register(&tcmu_ops);
2759        if (ret)
2760                goto out_attrs;
2761
2762        return 0;
2763
2764out_attrs:
2765        kfree(tcmu_attrs);
2766out_unreg_genl:
2767        genl_unregister_family(&tcmu_genl_family);
2768out_unreg_device:
2769        root_device_unregister(tcmu_root_device);
2770out_free_cache:
2771        kmem_cache_destroy(tcmu_cmd_cache);
2772
2773        return ret;
2774}
2775
2776static void __exit tcmu_module_exit(void)
2777{
2778        cancel_delayed_work_sync(&tcmu_unmap_work);
2779        target_backend_unregister(&tcmu_ops);
2780        kfree(tcmu_attrs);
2781        genl_unregister_family(&tcmu_genl_family);
2782        root_device_unregister(tcmu_root_device);
2783        kmem_cache_destroy(tcmu_cmd_cache);
2784}
2785
2786MODULE_DESCRIPTION("TCM USER subsystem plugin");
2787MODULE_AUTHOR("Shaohua Li <shli@kernel.org>");
2788MODULE_AUTHOR("Andy Grover <agrover@redhat.com>");
2789MODULE_LICENSE("GPL");
2790
2791module_init(tcmu_module_init);
2792module_exit(tcmu_module_exit);
2793