linux/drivers/vhost/scsi.c
<<
>>
Prefs
   1/*******************************************************************************
   2 * Vhost kernel TCM fabric driver for virtio SCSI initiators
   3 *
   4 * (C) Copyright 2010-2013 Datera, Inc.
   5 * (C) Copyright 2010-2012 IBM Corp.
   6 *
   7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
   8 *
   9 * Authors: Nicholas A. Bellinger <nab@daterainc.com>
  10 *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of the GNU General Public License as published by
  14 * the Free Software Foundation; either version 2 of the License, or
  15 * (at your option) any later version.
  16 *
  17 * This program is distributed in the hope that it will be useful,
  18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  20 * GNU General Public License for more details.
  21 *
  22 ****************************************************************************/
  23
  24#include <linux/module.h>
  25#include <linux/moduleparam.h>
  26#include <generated/utsrelease.h>
  27#include <linux/utsname.h>
  28#include <linux/init.h>
  29#include <linux/slab.h>
  30#include <linux/kthread.h>
  31#include <linux/types.h>
  32#include <linux/string.h>
  33#include <linux/configfs.h>
  34#include <linux/ctype.h>
  35#include <linux/compat.h>
  36#include <linux/eventfd.h>
  37#include <linux/fs.h>
  38#include <linux/miscdevice.h>
  39#include <asm/unaligned.h>
  40#include <scsi/scsi.h>
  41#include <target/target_core_base.h>
  42#include <target/target_core_fabric.h>
  43#include <target/target_core_fabric_configfs.h>
  44#include <target/target_core_configfs.h>
  45#include <target/configfs_macros.h>
  46#include <linux/vhost.h>
  47#include <linux/virtio_scsi.h>
  48#include <linux/llist.h>
  49#include <linux/bitmap.h>
  50#include <linux/percpu_ida.h>
  51
  52#include "vhost.h"
  53
  54#define VHOST_SCSI_VERSION  "v0.1"
  55#define VHOST_SCSI_NAMELEN 256
  56#define VHOST_SCSI_MAX_CDB_SIZE 32
  57#define VHOST_SCSI_DEFAULT_TAGS 256
  58#define VHOST_SCSI_PREALLOC_SGLS 2048
  59#define VHOST_SCSI_PREALLOC_UPAGES 2048
  60#define VHOST_SCSI_PREALLOC_PROT_SGLS 512
  61
  62struct vhost_scsi_inflight {
  63        /* Wait for the flush operation to finish */
  64        struct completion comp;
  65        /* Refcount for the inflight reqs */
  66        struct kref kref;
  67};
  68
  69struct vhost_scsi_cmd {
  70        /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
  71        int tvc_vq_desc;
  72        /* virtio-scsi initiator task attribute */
  73        int tvc_task_attr;
  74        /* virtio-scsi response incoming iovecs */
  75        int tvc_in_iovs;
  76        /* virtio-scsi initiator data direction */
  77        enum dma_data_direction tvc_data_direction;
  78        /* Expected data transfer length from virtio-scsi header */
  79        u32 tvc_exp_data_len;
  80        /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
  81        u64 tvc_tag;
  82        /* The number of scatterlists associated with this cmd */
  83        u32 tvc_sgl_count;
  84        u32 tvc_prot_sgl_count;
  85        /* Saved unpacked SCSI LUN for vhost_scsi_submission_work() */
  86        u32 tvc_lun;
  87        /* Pointer to the SGL formatted memory from virtio-scsi */
  88        struct scatterlist *tvc_sgl;
  89        struct scatterlist *tvc_prot_sgl;
  90        struct page **tvc_upages;
  91        /* Pointer to response header iovec */
  92        struct iovec *tvc_resp_iov;
  93        /* Pointer to vhost_scsi for our device */
  94        struct vhost_scsi *tvc_vhost;
  95        /* Pointer to vhost_virtqueue for the cmd */
  96        struct vhost_virtqueue *tvc_vq;
  97        /* Pointer to vhost nexus memory */
  98        struct vhost_scsi_nexus *tvc_nexus;
  99        /* The TCM I/O descriptor that is accessed via container_of() */
 100        struct se_cmd tvc_se_cmd;
 101        /* work item used for cmwq dispatch to vhost_scsi_submission_work() */
 102        struct work_struct work;
 103        /* Copy of the incoming SCSI command descriptor block (CDB) */
 104        unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
 105        /* Sense buffer that will be mapped into outgoing status */
 106        unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
 107        /* Completed commands list, serviced from vhost worker thread */
 108        struct llist_node tvc_completion_list;
 109        /* Used to track inflight cmd */
 110        struct vhost_scsi_inflight *inflight;
 111};
 112
 113struct vhost_scsi_nexus {
 114        /* Pointer to TCM session for I_T Nexus */
 115        struct se_session *tvn_se_sess;
 116};
 117
 118struct vhost_scsi_nacl {
 119        /* Binary World Wide unique Port Name for Vhost Initiator port */
 120        u64 iport_wwpn;
 121        /* ASCII formatted WWPN for Sas Initiator port */
 122        char iport_name[VHOST_SCSI_NAMELEN];
 123        /* Returned by vhost_scsi_make_nodeacl() */
 124        struct se_node_acl se_node_acl;
 125};
 126
 127struct vhost_scsi_tpg {
 128        /* Vhost port target portal group tag for TCM */
 129        u16 tport_tpgt;
 130        /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
 131        int tv_tpg_port_count;
 132        /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
 133        int tv_tpg_vhost_count;
 134        /* Used for enabling T10-PI with legacy devices */
 135        int tv_fabric_prot_type;
 136        /* list for vhost_scsi_list */
 137        struct list_head tv_tpg_list;
 138        /* Used to protect access for tpg_nexus */
 139        struct mutex tv_tpg_mutex;
 140        /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
 141        struct vhost_scsi_nexus *tpg_nexus;
 142        /* Pointer back to vhost_scsi_tport */
 143        struct vhost_scsi_tport *tport;
 144        /* Returned by vhost_scsi_make_tpg() */
 145        struct se_portal_group se_tpg;
 146        /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
 147        struct vhost_scsi *vhost_scsi;
 148};
 149
 150struct vhost_scsi_tport {
 151        /* SCSI protocol the tport is providing */
 152        u8 tport_proto_id;
 153        /* Binary World Wide unique Port Name for Vhost Target port */
 154        u64 tport_wwpn;
 155        /* ASCII formatted WWPN for Vhost Target port */
 156        char tport_name[VHOST_SCSI_NAMELEN];
 157        /* Returned by vhost_scsi_make_tport() */
 158        struct se_wwn tport_wwn;
 159};
 160
 161struct vhost_scsi_evt {
 162        /* event to be sent to guest */
 163        struct virtio_scsi_event event;
 164        /* event list, serviced from vhost worker thread */
 165        struct llist_node list;
 166};
 167
 168enum {
 169        VHOST_SCSI_VQ_CTL = 0,
 170        VHOST_SCSI_VQ_EVT = 1,
 171        VHOST_SCSI_VQ_IO = 2,
 172};
 173
 174/* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
 175enum {
 176        VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
 177                                               (1ULL << VIRTIO_SCSI_F_T10_PI) |
 178                                               (1ULL << VIRTIO_F_ANY_LAYOUT) |
 179                                               (1ULL << VIRTIO_F_VERSION_1)
 180};
 181
 182#define VHOST_SCSI_MAX_TARGET   256
 183#define VHOST_SCSI_MAX_VQ       128
 184#define VHOST_SCSI_MAX_EVENT    128
 185
 186struct vhost_scsi_virtqueue {
 187        struct vhost_virtqueue vq;
 188        /*
 189         * Reference counting for inflight reqs, used for flush operation. At
 190         * each time, one reference tracks new commands submitted, while we
 191         * wait for another one to reach 0.
 192         */
 193        struct vhost_scsi_inflight inflights[2];
 194        /*
 195         * Indicate current inflight in use, protected by vq->mutex.
 196         * Writers must also take dev mutex and flush under it.
 197         */
 198        int inflight_idx;
 199};
 200
 201struct vhost_scsi {
 202        /* Protected by vhost_scsi->dev.mutex */
 203        struct vhost_scsi_tpg **vs_tpg;
 204        char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
 205
 206        struct vhost_dev dev;
 207        struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
 208
 209        struct vhost_work vs_completion_work; /* cmd completion work item */
 210        struct llist_head vs_completion_list; /* cmd completion queue */
 211
 212        struct vhost_work vs_event_work; /* evt injection work item */
 213        struct llist_head vs_event_list; /* evt injection queue */
 214
 215        bool vs_events_missed; /* any missed events, protected by vq->mutex */
 216        int vs_events_nr; /* num of pending events, protected by vq->mutex */
 217};
 218
 219static struct target_core_fabric_ops vhost_scsi_ops;
 220static struct workqueue_struct *vhost_scsi_workqueue;
 221
 222/* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
 223static DEFINE_MUTEX(vhost_scsi_mutex);
 224static LIST_HEAD(vhost_scsi_list);
 225
 226static int iov_num_pages(void __user *iov_base, size_t iov_len)
 227{
 228        return (PAGE_ALIGN((unsigned long)iov_base + iov_len) -
 229               ((unsigned long)iov_base & PAGE_MASK)) >> PAGE_SHIFT;
 230}
 231
 232static void vhost_scsi_done_inflight(struct kref *kref)
 233{
 234        struct vhost_scsi_inflight *inflight;
 235
 236        inflight = container_of(kref, struct vhost_scsi_inflight, kref);
 237        complete(&inflight->comp);
 238}
 239
 240static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
 241                                    struct vhost_scsi_inflight *old_inflight[])
 242{
 243        struct vhost_scsi_inflight *new_inflight;
 244        struct vhost_virtqueue *vq;
 245        int idx, i;
 246
 247        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
 248                vq = &vs->vqs[i].vq;
 249
 250                mutex_lock(&vq->mutex);
 251
 252                /* store old infight */
 253                idx = vs->vqs[i].inflight_idx;
 254                if (old_inflight)
 255                        old_inflight[i] = &vs->vqs[i].inflights[idx];
 256
 257                /* setup new infight */
 258                vs->vqs[i].inflight_idx = idx ^ 1;
 259                new_inflight = &vs->vqs[i].inflights[idx ^ 1];
 260                kref_init(&new_inflight->kref);
 261                init_completion(&new_inflight->comp);
 262
 263                mutex_unlock(&vq->mutex);
 264        }
 265}
 266
 267static struct vhost_scsi_inflight *
 268vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
 269{
 270        struct vhost_scsi_inflight *inflight;
 271        struct vhost_scsi_virtqueue *svq;
 272
 273        svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
 274        inflight = &svq->inflights[svq->inflight_idx];
 275        kref_get(&inflight->kref);
 276
 277        return inflight;
 278}
 279
 280static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
 281{
 282        kref_put(&inflight->kref, vhost_scsi_done_inflight);
 283}
 284
 285static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
 286{
 287        return 1;
 288}
 289
 290static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
 291{
 292        return 0;
 293}
 294
 295static char *vhost_scsi_get_fabric_name(void)
 296{
 297        return "vhost";
 298}
 299
 300static u8 vhost_scsi_get_fabric_proto_ident(struct se_portal_group *se_tpg)
 301{
 302        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
 303                                struct vhost_scsi_tpg, se_tpg);
 304        struct vhost_scsi_tport *tport = tpg->tport;
 305
 306        switch (tport->tport_proto_id) {
 307        case SCSI_PROTOCOL_SAS:
 308                return sas_get_fabric_proto_ident(se_tpg);
 309        case SCSI_PROTOCOL_FCP:
 310                return fc_get_fabric_proto_ident(se_tpg);
 311        case SCSI_PROTOCOL_ISCSI:
 312                return iscsi_get_fabric_proto_ident(se_tpg);
 313        default:
 314                pr_err("Unknown tport_proto_id: 0x%02x, using"
 315                        " SAS emulation\n", tport->tport_proto_id);
 316                break;
 317        }
 318
 319        return sas_get_fabric_proto_ident(se_tpg);
 320}
 321
 322static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
 323{
 324        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
 325                                struct vhost_scsi_tpg, se_tpg);
 326        struct vhost_scsi_tport *tport = tpg->tport;
 327
 328        return &tport->tport_name[0];
 329}
 330
 331static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
 332{
 333        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
 334                                struct vhost_scsi_tpg, se_tpg);
 335        return tpg->tport_tpgt;
 336}
 337
 338static u32 vhost_scsi_get_default_depth(struct se_portal_group *se_tpg)
 339{
 340        return 1;
 341}
 342
 343static u32
 344vhost_scsi_get_pr_transport_id(struct se_portal_group *se_tpg,
 345                              struct se_node_acl *se_nacl,
 346                              struct t10_pr_registration *pr_reg,
 347                              int *format_code,
 348                              unsigned char *buf)
 349{
 350        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
 351                                struct vhost_scsi_tpg, se_tpg);
 352        struct vhost_scsi_tport *tport = tpg->tport;
 353
 354        switch (tport->tport_proto_id) {
 355        case SCSI_PROTOCOL_SAS:
 356                return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
 357                                        format_code, buf);
 358        case SCSI_PROTOCOL_FCP:
 359                return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
 360                                        format_code, buf);
 361        case SCSI_PROTOCOL_ISCSI:
 362                return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
 363                                        format_code, buf);
 364        default:
 365                pr_err("Unknown tport_proto_id: 0x%02x, using"
 366                        " SAS emulation\n", tport->tport_proto_id);
 367                break;
 368        }
 369
 370        return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
 371                        format_code, buf);
 372}
 373
 374static u32
 375vhost_scsi_get_pr_transport_id_len(struct se_portal_group *se_tpg,
 376                                  struct se_node_acl *se_nacl,
 377                                  struct t10_pr_registration *pr_reg,
 378                                  int *format_code)
 379{
 380        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
 381                                struct vhost_scsi_tpg, se_tpg);
 382        struct vhost_scsi_tport *tport = tpg->tport;
 383
 384        switch (tport->tport_proto_id) {
 385        case SCSI_PROTOCOL_SAS:
 386                return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
 387                                        format_code);
 388        case SCSI_PROTOCOL_FCP:
 389                return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
 390                                        format_code);
 391        case SCSI_PROTOCOL_ISCSI:
 392                return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
 393                                        format_code);
 394        default:
 395                pr_err("Unknown tport_proto_id: 0x%02x, using"
 396                        " SAS emulation\n", tport->tport_proto_id);
 397                break;
 398        }
 399
 400        return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
 401                        format_code);
 402}
 403
 404static char *
 405vhost_scsi_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
 406                                    const char *buf,
 407                                    u32 *out_tid_len,
 408                                    char **port_nexus_ptr)
 409{
 410        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
 411                                struct vhost_scsi_tpg, se_tpg);
 412        struct vhost_scsi_tport *tport = tpg->tport;
 413
 414        switch (tport->tport_proto_id) {
 415        case SCSI_PROTOCOL_SAS:
 416                return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
 417                                        port_nexus_ptr);
 418        case SCSI_PROTOCOL_FCP:
 419                return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
 420                                        port_nexus_ptr);
 421        case SCSI_PROTOCOL_ISCSI:
 422                return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
 423                                        port_nexus_ptr);
 424        default:
 425                pr_err("Unknown tport_proto_id: 0x%02x, using"
 426                        " SAS emulation\n", tport->tport_proto_id);
 427                break;
 428        }
 429
 430        return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
 431                        port_nexus_ptr);
 432}
 433
 434static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
 435{
 436        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
 437                                struct vhost_scsi_tpg, se_tpg);
 438
 439        return tpg->tv_fabric_prot_type;
 440}
 441
 442static struct se_node_acl *
 443vhost_scsi_alloc_fabric_acl(struct se_portal_group *se_tpg)
 444{
 445        struct vhost_scsi_nacl *nacl;
 446
 447        nacl = kzalloc(sizeof(struct vhost_scsi_nacl), GFP_KERNEL);
 448        if (!nacl) {
 449                pr_err("Unable to allocate struct vhost_scsi_nacl\n");
 450                return NULL;
 451        }
 452
 453        return &nacl->se_node_acl;
 454}
 455
 456static void
 457vhost_scsi_release_fabric_acl(struct se_portal_group *se_tpg,
 458                             struct se_node_acl *se_nacl)
 459{
 460        struct vhost_scsi_nacl *nacl = container_of(se_nacl,
 461                        struct vhost_scsi_nacl, se_node_acl);
 462        kfree(nacl);
 463}
 464
 465static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
 466{
 467        return 1;
 468}
 469
 470static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
 471{
 472        struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
 473                                struct vhost_scsi_cmd, tvc_se_cmd);
 474        struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess;
 475        int i;
 476
 477        if (tv_cmd->tvc_sgl_count) {
 478                for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
 479                        put_page(sg_page(&tv_cmd->tvc_sgl[i]));
 480        }
 481        if (tv_cmd->tvc_prot_sgl_count) {
 482                for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
 483                        put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
 484        }
 485
 486        vhost_scsi_put_inflight(tv_cmd->inflight);
 487        percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
 488}
 489
 490static int vhost_scsi_shutdown_session(struct se_session *se_sess)
 491{
 492        return 0;
 493}
 494
 495static void vhost_scsi_close_session(struct se_session *se_sess)
 496{
 497        return;
 498}
 499
 500static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
 501{
 502        return 0;
 503}
 504
 505static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
 506{
 507        /* Go ahead and process the write immediately */
 508        target_execute_cmd(se_cmd);
 509        return 0;
 510}
 511
 512static int vhost_scsi_write_pending_status(struct se_cmd *se_cmd)
 513{
 514        return 0;
 515}
 516
 517static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
 518{
 519        return;
 520}
 521
 522static u32 vhost_scsi_get_task_tag(struct se_cmd *se_cmd)
 523{
 524        return 0;
 525}
 526
 527static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
 528{
 529        return 0;
 530}
 531
 532static void vhost_scsi_complete_cmd(struct vhost_scsi_cmd *cmd)
 533{
 534        struct vhost_scsi *vs = cmd->tvc_vhost;
 535
 536        llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
 537
 538        vhost_work_queue(&vs->dev, &vs->vs_completion_work);
 539}
 540
 541static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
 542{
 543        struct vhost_scsi_cmd *cmd = container_of(se_cmd,
 544                                struct vhost_scsi_cmd, tvc_se_cmd);
 545        vhost_scsi_complete_cmd(cmd);
 546        return 0;
 547}
 548
 549static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
 550{
 551        struct vhost_scsi_cmd *cmd = container_of(se_cmd,
 552                                struct vhost_scsi_cmd, tvc_se_cmd);
 553        vhost_scsi_complete_cmd(cmd);
 554        return 0;
 555}
 556
 557static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
 558{
 559        return;
 560}
 561
 562static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
 563{
 564        return;
 565}
 566
 567static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
 568{
 569        vs->vs_events_nr--;
 570        kfree(evt);
 571}
 572
 573static struct vhost_scsi_evt *
 574vhost_scsi_allocate_evt(struct vhost_scsi *vs,
 575                       u32 event, u32 reason)
 576{
 577        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
 578        struct vhost_scsi_evt *evt;
 579
 580        if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
 581                vs->vs_events_missed = true;
 582                return NULL;
 583        }
 584
 585        evt = kzalloc(sizeof(*evt), GFP_KERNEL);
 586        if (!evt) {
 587                vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
 588                vs->vs_events_missed = true;
 589                return NULL;
 590        }
 591
 592        evt->event.event = cpu_to_vhost32(vq, event);
 593        evt->event.reason = cpu_to_vhost32(vq, reason);
 594        vs->vs_events_nr++;
 595
 596        return evt;
 597}
 598
 599static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd)
 600{
 601        struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
 602
 603        /* TODO locking against target/backend threads? */
 604        transport_generic_free_cmd(se_cmd, 0);
 605
 606}
 607
 608static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
 609{
 610        return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
 611}
 612
 613static void
 614vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
 615{
 616        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
 617        struct virtio_scsi_event *event = &evt->event;
 618        struct virtio_scsi_event __user *eventp;
 619        unsigned out, in;
 620        int head, ret;
 621
 622        if (!vq->private_data) {
 623                vs->vs_events_missed = true;
 624                return;
 625        }
 626
 627again:
 628        vhost_disable_notify(&vs->dev, vq);
 629        head = vhost_get_vq_desc(vq, vq->iov,
 630                        ARRAY_SIZE(vq->iov), &out, &in,
 631                        NULL, NULL);
 632        if (head < 0) {
 633                vs->vs_events_missed = true;
 634                return;
 635        }
 636        if (head == vq->num) {
 637                if (vhost_enable_notify(&vs->dev, vq))
 638                        goto again;
 639                vs->vs_events_missed = true;
 640                return;
 641        }
 642
 643        if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
 644                vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
 645                                vq->iov[out].iov_len);
 646                vs->vs_events_missed = true;
 647                return;
 648        }
 649
 650        if (vs->vs_events_missed) {
 651                event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
 652                vs->vs_events_missed = false;
 653        }
 654
 655        eventp = vq->iov[out].iov_base;
 656        ret = __copy_to_user(eventp, event, sizeof(*event));
 657        if (!ret)
 658                vhost_add_used_and_signal(&vs->dev, vq, head, 0);
 659        else
 660                vq_err(vq, "Faulted on vhost_scsi_send_event\n");
 661}
 662
 663static void vhost_scsi_evt_work(struct vhost_work *work)
 664{
 665        struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
 666                                        vs_event_work);
 667        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
 668        struct vhost_scsi_evt *evt;
 669        struct llist_node *llnode;
 670
 671        mutex_lock(&vq->mutex);
 672        llnode = llist_del_all(&vs->vs_event_list);
 673        while (llnode) {
 674                evt = llist_entry(llnode, struct vhost_scsi_evt, list);
 675                llnode = llist_next(llnode);
 676                vhost_scsi_do_evt_work(vs, evt);
 677                vhost_scsi_free_evt(vs, evt);
 678        }
 679        mutex_unlock(&vq->mutex);
 680}
 681
 682/* Fill in status and signal that we are done processing this command
 683 *
 684 * This is scheduled in the vhost work queue so we are called with the owner
 685 * process mm and can access the vring.
 686 */
 687static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
 688{
 689        struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
 690                                        vs_completion_work);
 691        DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
 692        struct virtio_scsi_cmd_resp v_rsp;
 693        struct vhost_scsi_cmd *cmd;
 694        struct llist_node *llnode;
 695        struct se_cmd *se_cmd;
 696        struct iov_iter iov_iter;
 697        int ret, vq;
 698
 699        bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
 700        llnode = llist_del_all(&vs->vs_completion_list);
 701        while (llnode) {
 702                cmd = llist_entry(llnode, struct vhost_scsi_cmd,
 703                                     tvc_completion_list);
 704                llnode = llist_next(llnode);
 705                se_cmd = &cmd->tvc_se_cmd;
 706
 707                pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
 708                        cmd, se_cmd->residual_count, se_cmd->scsi_status);
 709
 710                memset(&v_rsp, 0, sizeof(v_rsp));
 711                v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count);
 712                /* TODO is status_qualifier field needed? */
 713                v_rsp.status = se_cmd->scsi_status;
 714                v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
 715                                                 se_cmd->scsi_sense_length);
 716                memcpy(v_rsp.sense, cmd->tvc_sense_buf,
 717                       se_cmd->scsi_sense_length);
 718
 719                iov_iter_init(&iov_iter, READ, cmd->tvc_resp_iov,
 720                              cmd->tvc_in_iovs, sizeof(v_rsp));
 721                ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
 722                if (likely(ret == sizeof(v_rsp))) {
 723                        struct vhost_scsi_virtqueue *q;
 724                        vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
 725                        q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
 726                        vq = q - vs->vqs;
 727                        __set_bit(vq, signal);
 728                } else
 729                        pr_err("Faulted on virtio_scsi_cmd_resp\n");
 730
 731                vhost_scsi_free_cmd(cmd);
 732        }
 733
 734        vq = -1;
 735        while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
 736                < VHOST_SCSI_MAX_VQ)
 737                vhost_signal(&vs->dev, &vs->vqs[vq].vq);
 738}
 739
 740static struct vhost_scsi_cmd *
 741vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
 742                   unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
 743                   u32 exp_data_len, int data_direction)
 744{
 745        struct vhost_scsi_cmd *cmd;
 746        struct vhost_scsi_nexus *tv_nexus;
 747        struct se_session *se_sess;
 748        struct scatterlist *sg, *prot_sg;
 749        struct page **pages;
 750        int tag;
 751
 752        tv_nexus = tpg->tpg_nexus;
 753        if (!tv_nexus) {
 754                pr_err("Unable to locate active struct vhost_scsi_nexus\n");
 755                return ERR_PTR(-EIO);
 756        }
 757        se_sess = tv_nexus->tvn_se_sess;
 758
 759        tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
 760        if (tag < 0) {
 761                pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
 762                return ERR_PTR(-ENOMEM);
 763        }
 764
 765        cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[tag];
 766        sg = cmd->tvc_sgl;
 767        prot_sg = cmd->tvc_prot_sgl;
 768        pages = cmd->tvc_upages;
 769        memset(cmd, 0, sizeof(struct vhost_scsi_cmd));
 770
 771        cmd->tvc_sgl = sg;
 772        cmd->tvc_prot_sgl = prot_sg;
 773        cmd->tvc_upages = pages;
 774        cmd->tvc_se_cmd.map_tag = tag;
 775        cmd->tvc_tag = scsi_tag;
 776        cmd->tvc_lun = lun;
 777        cmd->tvc_task_attr = task_attr;
 778        cmd->tvc_exp_data_len = exp_data_len;
 779        cmd->tvc_data_direction = data_direction;
 780        cmd->tvc_nexus = tv_nexus;
 781        cmd->inflight = vhost_scsi_get_inflight(vq);
 782
 783        memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
 784
 785        return cmd;
 786}
 787
 788/*
 789 * Map a user memory range into a scatterlist
 790 *
 791 * Returns the number of scatterlist entries used or -errno on error.
 792 */
 793static int
 794vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
 795                      void __user *ptr,
 796                      size_t len,
 797                      struct scatterlist *sgl,
 798                      bool write)
 799{
 800        unsigned int npages = 0, offset, nbytes;
 801        unsigned int pages_nr = iov_num_pages(ptr, len);
 802        struct scatterlist *sg = sgl;
 803        struct page **pages = cmd->tvc_upages;
 804        int ret, i;
 805
 806        if (pages_nr > VHOST_SCSI_PREALLOC_UPAGES) {
 807                pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
 808                       " preallocated VHOST_SCSI_PREALLOC_UPAGES: %u\n",
 809                        pages_nr, VHOST_SCSI_PREALLOC_UPAGES);
 810                return -ENOBUFS;
 811        }
 812
 813        ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
 814        /* No pages were pinned */
 815        if (ret < 0)
 816                goto out;
 817        /* Less pages pinned than wanted */
 818        if (ret != pages_nr) {
 819                for (i = 0; i < ret; i++)
 820                        put_page(pages[i]);
 821                ret = -EFAULT;
 822                goto out;
 823        }
 824
 825        while (len > 0) {
 826                offset = (uintptr_t)ptr & ~PAGE_MASK;
 827                nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
 828                sg_set_page(sg, pages[npages], nbytes, offset);
 829                ptr += nbytes;
 830                len -= nbytes;
 831                sg++;
 832                npages++;
 833        }
 834
 835out:
 836        return ret;
 837}
 838
 839static int
 840vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
 841{
 842        int sgl_count = 0;
 843
 844        if (!iter || !iter->iov) {
 845                pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
 846                       " present\n", __func__, bytes);
 847                return -EINVAL;
 848        }
 849
 850        sgl_count = iov_iter_npages(iter, 0xffff);
 851        if (sgl_count > max_sgls) {
 852                pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
 853                       " max_sgls: %d\n", __func__, sgl_count, max_sgls);
 854                return -EINVAL;
 855        }
 856        return sgl_count;
 857}
 858
 859static int
 860vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
 861                      struct iov_iter *iter,
 862                      struct scatterlist *sg, int sg_count)
 863{
 864        size_t off = iter->iov_offset;
 865        int i, ret;
 866
 867        for (i = 0; i < iter->nr_segs; i++) {
 868                void __user *base = iter->iov[i].iov_base + off;
 869                size_t len = iter->iov[i].iov_len - off;
 870
 871                ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write);
 872                if (ret < 0) {
 873                        for (i = 0; i < sg_count; i++) {
 874                                struct page *page = sg_page(&sg[i]);
 875                                if (page)
 876                                        put_page(page);
 877                        }
 878                        return ret;
 879                }
 880                sg += ret;
 881                off = 0;
 882        }
 883        return 0;
 884}
 885
 886static int
 887vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
 888                 size_t prot_bytes, struct iov_iter *prot_iter,
 889                 size_t data_bytes, struct iov_iter *data_iter)
 890{
 891        int sgl_count, ret;
 892        bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
 893
 894        if (prot_bytes) {
 895                sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
 896                                                 VHOST_SCSI_PREALLOC_PROT_SGLS);
 897                if (sgl_count < 0)
 898                        return sgl_count;
 899
 900                sg_init_table(cmd->tvc_prot_sgl, sgl_count);
 901                cmd->tvc_prot_sgl_count = sgl_count;
 902                pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
 903                         cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
 904
 905                ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
 906                                            cmd->tvc_prot_sgl,
 907                                            cmd->tvc_prot_sgl_count);
 908                if (ret < 0) {
 909                        cmd->tvc_prot_sgl_count = 0;
 910                        return ret;
 911                }
 912        }
 913        sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
 914                                         VHOST_SCSI_PREALLOC_SGLS);
 915        if (sgl_count < 0)
 916                return sgl_count;
 917
 918        sg_init_table(cmd->tvc_sgl, sgl_count);
 919        cmd->tvc_sgl_count = sgl_count;
 920        pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
 921                  cmd->tvc_sgl, cmd->tvc_sgl_count);
 922
 923        ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
 924                                    cmd->tvc_sgl, cmd->tvc_sgl_count);
 925        if (ret < 0) {
 926                cmd->tvc_sgl_count = 0;
 927                return ret;
 928        }
 929        return 0;
 930}
 931
 932static int vhost_scsi_to_tcm_attr(int attr)
 933{
 934        switch (attr) {
 935        case VIRTIO_SCSI_S_SIMPLE:
 936                return TCM_SIMPLE_TAG;
 937        case VIRTIO_SCSI_S_ORDERED:
 938                return TCM_ORDERED_TAG;
 939        case VIRTIO_SCSI_S_HEAD:
 940                return TCM_HEAD_TAG;
 941        case VIRTIO_SCSI_S_ACA:
 942                return TCM_ACA_TAG;
 943        default:
 944                break;
 945        }
 946        return TCM_SIMPLE_TAG;
 947}
 948
 949static void vhost_scsi_submission_work(struct work_struct *work)
 950{
 951        struct vhost_scsi_cmd *cmd =
 952                container_of(work, struct vhost_scsi_cmd, work);
 953        struct vhost_scsi_nexus *tv_nexus;
 954        struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
 955        struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
 956        int rc;
 957
 958        /* FIXME: BIDI operation */
 959        if (cmd->tvc_sgl_count) {
 960                sg_ptr = cmd->tvc_sgl;
 961
 962                if (cmd->tvc_prot_sgl_count)
 963                        sg_prot_ptr = cmd->tvc_prot_sgl;
 964                else
 965                        se_cmd->prot_pto = true;
 966        } else {
 967                sg_ptr = NULL;
 968        }
 969        tv_nexus = cmd->tvc_nexus;
 970
 971        rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
 972                        cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
 973                        cmd->tvc_lun, cmd->tvc_exp_data_len,
 974                        vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
 975                        cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
 976                        sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
 977                        cmd->tvc_prot_sgl_count);
 978        if (rc < 0) {
 979                transport_send_check_condition_and_sense(se_cmd,
 980                                TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
 981                transport_generic_free_cmd(se_cmd, 0);
 982        }
 983}
 984
 985static void
 986vhost_scsi_send_bad_target(struct vhost_scsi *vs,
 987                           struct vhost_virtqueue *vq,
 988                           int head, unsigned out)
 989{
 990        struct virtio_scsi_cmd_resp __user *resp;
 991        struct virtio_scsi_cmd_resp rsp;
 992        int ret;
 993
 994        memset(&rsp, 0, sizeof(rsp));
 995        rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
 996        resp = vq->iov[out].iov_base;
 997        ret = __copy_to_user(resp, &rsp, sizeof(rsp));
 998        if (!ret)
 999                vhost_add_used_and_signal(&vs->dev, vq, head, 0);
1000        else
1001                pr_err("Faulted on virtio_scsi_cmd_resp\n");
1002}
1003
1004static void
1005vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1006{
1007        struct vhost_scsi_tpg **vs_tpg, *tpg;
1008        struct virtio_scsi_cmd_req v_req;
1009        struct virtio_scsi_cmd_req_pi v_req_pi;
1010        struct vhost_scsi_cmd *cmd;
1011        struct iov_iter out_iter, in_iter, prot_iter, data_iter;
1012        u64 tag;
1013        u32 exp_data_len, data_direction;
1014        unsigned out, in;
1015        int head, ret, prot_bytes;
1016        size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
1017        size_t out_size, in_size;
1018        u16 lun;
1019        u8 *target, *lunp, task_attr;
1020        bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
1021        void *req, *cdb;
1022
1023        mutex_lock(&vq->mutex);
1024        /*
1025         * We can handle the vq only after the endpoint is setup by calling the
1026         * VHOST_SCSI_SET_ENDPOINT ioctl.
1027         */
1028        vs_tpg = vq->private_data;
1029        if (!vs_tpg)
1030                goto out;
1031
1032        vhost_disable_notify(&vs->dev, vq);
1033
1034        for (;;) {
1035                head = vhost_get_vq_desc(vq, vq->iov,
1036                                         ARRAY_SIZE(vq->iov), &out, &in,
1037                                         NULL, NULL);
1038                pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
1039                         head, out, in);
1040                /* On error, stop handling until the next kick. */
1041                if (unlikely(head < 0))
1042                        break;
1043                /* Nothing new?  Wait for eventfd to tell us they refilled. */
1044                if (head == vq->num) {
1045                        if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
1046                                vhost_disable_notify(&vs->dev, vq);
1047                                continue;
1048                        }
1049                        break;
1050                }
1051                /*
1052                 * Check for a sane response buffer so we can report early
1053                 * errors back to the guest.
1054                 */
1055                if (unlikely(vq->iov[out].iov_len < rsp_size)) {
1056                        vq_err(vq, "Expecting at least virtio_scsi_cmd_resp"
1057                                " size, got %zu bytes\n", vq->iov[out].iov_len);
1058                        break;
1059                }
1060                /*
1061                 * Setup pointers and values based upon different virtio-scsi
1062                 * request header if T10_PI is enabled in KVM guest.
1063                 */
1064                if (t10_pi) {
1065                        req = &v_req_pi;
1066                        req_size = sizeof(v_req_pi);
1067                        lunp = &v_req_pi.lun[0];
1068                        target = &v_req_pi.lun[1];
1069                } else {
1070                        req = &v_req;
1071                        req_size = sizeof(v_req);
1072                        lunp = &v_req.lun[0];
1073                        target = &v_req.lun[1];
1074                }
1075                /*
1076                 * FIXME: Not correct for BIDI operation
1077                 */
1078                out_size = iov_length(vq->iov, out);
1079                in_size = iov_length(&vq->iov[out], in);
1080
1081                /*
1082                 * Copy over the virtio-scsi request header, which for a
1083                 * ANY_LAYOUT enabled guest may span multiple iovecs, or a
1084                 * single iovec may contain both the header + outgoing
1085                 * WRITE payloads.
1086                 *
1087                 * copy_from_iter() will advance out_iter, so that it will
1088                 * point at the start of the outgoing WRITE payload, if
1089                 * DMA_TO_DEVICE is set.
1090                 */
1091                iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size);
1092
1093                ret = copy_from_iter(req, req_size, &out_iter);
1094                if (unlikely(ret != req_size)) {
1095                        vq_err(vq, "Faulted on copy_from_iter\n");
1096                        vhost_scsi_send_bad_target(vs, vq, head, out);
1097                        continue;
1098                }
1099                /* virtio-scsi spec requires byte 0 of the lun to be 1 */
1100                if (unlikely(*lunp != 1)) {
1101                        vq_err(vq, "Illegal virtio-scsi lun: %u\n", *lunp);
1102                        vhost_scsi_send_bad_target(vs, vq, head, out);
1103                        continue;
1104                }
1105
1106                tpg = ACCESS_ONCE(vs_tpg[*target]);
1107                if (unlikely(!tpg)) {
1108                        /* Target does not exist, fail the request */
1109                        vhost_scsi_send_bad_target(vs, vq, head, out);
1110                        continue;
1111                }
1112                /*
1113                 * Determine data_direction by calculating the total outgoing
1114                 * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
1115                 * response headers respectively.
1116                 *
1117                 * For DMA_TO_DEVICE this is out_iter, which is already pointing
1118                 * to the right place.
1119                 *
1120                 * For DMA_FROM_DEVICE, the iovec will be just past the end
1121                 * of the virtio-scsi response header in either the same
1122                 * or immediately following iovec.
1123                 *
1124                 * Any associated T10_PI bytes for the outgoing / incoming
1125                 * payloads are included in calculation of exp_data_len here.
1126                 */
1127                prot_bytes = 0;
1128
1129                if (out_size > req_size) {
1130                        data_direction = DMA_TO_DEVICE;
1131                        exp_data_len = out_size - req_size;
1132                        data_iter = out_iter;
1133                } else if (in_size > rsp_size) {
1134                        data_direction = DMA_FROM_DEVICE;
1135                        exp_data_len = in_size - rsp_size;
1136
1137                        iov_iter_init(&in_iter, READ, &vq->iov[out], in,
1138                                      rsp_size + exp_data_len);
1139                        iov_iter_advance(&in_iter, rsp_size);
1140                        data_iter = in_iter;
1141                } else {
1142                        data_direction = DMA_NONE;
1143                        exp_data_len = 0;
1144                }
1145                /*
1146                 * If T10_PI header + payload is present, setup prot_iter values
1147                 * and recalculate data_iter for vhost_scsi_mapal() mapping to
1148                 * host scatterlists via get_user_pages_fast().
1149                 */
1150                if (t10_pi) {
1151                        if (v_req_pi.pi_bytesout) {
1152                                if (data_direction != DMA_TO_DEVICE) {
1153                                        vq_err(vq, "Received non zero pi_bytesout,"
1154                                                " but wrong data_direction\n");
1155                                        vhost_scsi_send_bad_target(vs, vq, head, out);
1156                                        continue;
1157                                }
1158                                prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
1159                        } else if (v_req_pi.pi_bytesin) {
1160                                if (data_direction != DMA_FROM_DEVICE) {
1161                                        vq_err(vq, "Received non zero pi_bytesin,"
1162                                                " but wrong data_direction\n");
1163                                        vhost_scsi_send_bad_target(vs, vq, head, out);
1164                                        continue;
1165                                }
1166                                prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
1167                        }
1168                        /*
1169                         * Set prot_iter to data_iter, and advance past any
1170                         * preceeding prot_bytes that may be present.
1171                         *
1172                         * Also fix up the exp_data_len to reflect only the
1173                         * actual data payload length.
1174                         */
1175                        if (prot_bytes) {
1176                                exp_data_len -= prot_bytes;
1177                                prot_iter = data_iter;
1178                                iov_iter_advance(&data_iter, prot_bytes);
1179                        }
1180                        tag = vhost64_to_cpu(vq, v_req_pi.tag);
1181                        task_attr = v_req_pi.task_attr;
1182                        cdb = &v_req_pi.cdb[0];
1183                        lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF;
1184                } else {
1185                        tag = vhost64_to_cpu(vq, v_req.tag);
1186                        task_attr = v_req.task_attr;
1187                        cdb = &v_req.cdb[0];
1188                        lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
1189                }
1190                /*
1191                 * Check that the received CDB size does not exceeded our
1192                 * hardcoded max for vhost-scsi, then get a pre-allocated
1193                 * cmd descriptor for the new virtio-scsi tag.
1194                 *
1195                 * TODO what if cdb was too small for varlen cdb header?
1196                 */
1197                if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
1198                        vq_err(vq, "Received SCSI CDB with command_size: %d that"
1199                                " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1200                                scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
1201                        vhost_scsi_send_bad_target(vs, vq, head, out);
1202                        continue;
1203                }
1204                cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
1205                                         exp_data_len + prot_bytes,
1206                                         data_direction);
1207                if (IS_ERR(cmd)) {
1208                        vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
1209                               PTR_ERR(cmd));
1210                        vhost_scsi_send_bad_target(vs, vq, head, out);
1211                        continue;
1212                }
1213                cmd->tvc_vhost = vs;
1214                cmd->tvc_vq = vq;
1215                cmd->tvc_resp_iov = &vq->iov[out];
1216                cmd->tvc_in_iovs = in;
1217
1218                pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1219                         cmd->tvc_cdb[0], cmd->tvc_lun);
1220                pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
1221                         " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
1222
1223                if (data_direction != DMA_NONE) {
1224                        ret = vhost_scsi_mapal(cmd,
1225                                               prot_bytes, &prot_iter,
1226                                               exp_data_len, &data_iter);
1227                        if (unlikely(ret)) {
1228                                vq_err(vq, "Failed to map iov to sgl\n");
1229                                vhost_scsi_release_cmd(&cmd->tvc_se_cmd);
1230                                vhost_scsi_send_bad_target(vs, vq, head, out);
1231                                continue;
1232                        }
1233                }
1234                /*
1235                 * Save the descriptor from vhost_get_vq_desc() to be used to
1236                 * complete the virtio-scsi request in TCM callback context via
1237                 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
1238                 */
1239                cmd->tvc_vq_desc = head;
1240                /*
1241                 * Dispatch cmd descriptor for cmwq execution in process
1242                 * context provided by vhost_scsi_workqueue.  This also ensures
1243                 * cmd is executed on the same kworker CPU as this vhost
1244                 * thread to gain positive L2 cache locality effects.
1245                 */
1246                INIT_WORK(&cmd->work, vhost_scsi_submission_work);
1247                queue_work(vhost_scsi_workqueue, &cmd->work);
1248        }
1249out:
1250        mutex_unlock(&vq->mutex);
1251}
1252
1253static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1254{
1255        pr_debug("%s: The handling func for control queue.\n", __func__);
1256}
1257
1258static void
1259vhost_scsi_send_evt(struct vhost_scsi *vs,
1260                   struct vhost_scsi_tpg *tpg,
1261                   struct se_lun *lun,
1262                   u32 event,
1263                   u32 reason)
1264{
1265        struct vhost_scsi_evt *evt;
1266
1267        evt = vhost_scsi_allocate_evt(vs, event, reason);
1268        if (!evt)
1269                return;
1270
1271        if (tpg && lun) {
1272                /* TODO: share lun setup code with virtio-scsi.ko */
1273                /*
1274                 * Note: evt->event is zeroed when we allocate it and
1275                 * lun[4-7] need to be zero according to virtio-scsi spec.
1276                 */
1277                evt->event.lun[0] = 0x01;
1278                evt->event.lun[1] = tpg->tport_tpgt;
1279                if (lun->unpacked_lun >= 256)
1280                        evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1281                evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1282        }
1283
1284        llist_add(&evt->list, &vs->vs_event_list);
1285        vhost_work_queue(&vs->dev, &vs->vs_event_work);
1286}
1287
1288static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1289{
1290        struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1291                                                poll.work);
1292        struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1293
1294        mutex_lock(&vq->mutex);
1295        if (!vq->private_data)
1296                goto out;
1297
1298        if (vs->vs_events_missed)
1299                vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1300out:
1301        mutex_unlock(&vq->mutex);
1302}
1303
1304static void vhost_scsi_handle_kick(struct vhost_work *work)
1305{
1306        struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1307                                                poll.work);
1308        struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1309
1310        vhost_scsi_handle_vq(vs, vq);
1311}
1312
1313static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
1314{
1315        vhost_poll_flush(&vs->vqs[index].vq.poll);
1316}
1317
1318/* Callers must hold dev mutex */
1319static void vhost_scsi_flush(struct vhost_scsi *vs)
1320{
1321        struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1322        int i;
1323
1324        /* Init new inflight and remember the old inflight */
1325        vhost_scsi_init_inflight(vs, old_inflight);
1326
1327        /*
1328         * The inflight->kref was initialized to 1. We decrement it here to
1329         * indicate the start of the flush operation so that it will reach 0
1330         * when all the reqs are finished.
1331         */
1332        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1333                kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
1334
1335        /* Flush both the vhost poll and vhost work */
1336        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1337                vhost_scsi_flush_vq(vs, i);
1338        vhost_work_flush(&vs->dev, &vs->vs_completion_work);
1339        vhost_work_flush(&vs->dev, &vs->vs_event_work);
1340
1341        /* Wait for all reqs issued before the flush to be finished */
1342        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1343                wait_for_completion(&old_inflight[i]->comp);
1344}
1345
1346/*
1347 * Called from vhost_scsi_ioctl() context to walk the list of available
1348 * vhost_scsi_tpg with an active struct vhost_scsi_nexus
1349 *
1350 *  The lock nesting rule is:
1351 *    vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1352 */
1353static int
1354vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1355                        struct vhost_scsi_target *t)
1356{
1357        struct se_portal_group *se_tpg;
1358        struct vhost_scsi_tport *tv_tport;
1359        struct vhost_scsi_tpg *tpg;
1360        struct vhost_scsi_tpg **vs_tpg;
1361        struct vhost_virtqueue *vq;
1362        int index, ret, i, len;
1363        bool match = false;
1364
1365        mutex_lock(&vhost_scsi_mutex);
1366        mutex_lock(&vs->dev.mutex);
1367
1368        /* Verify that ring has been setup correctly. */
1369        for (index = 0; index < vs->dev.nvqs; ++index) {
1370                /* Verify that ring has been setup correctly. */
1371                if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1372                        ret = -EFAULT;
1373                        goto out;
1374                }
1375        }
1376
1377        len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1378        vs_tpg = kzalloc(len, GFP_KERNEL);
1379        if (!vs_tpg) {
1380                ret = -ENOMEM;
1381                goto out;
1382        }
1383        if (vs->vs_tpg)
1384                memcpy(vs_tpg, vs->vs_tpg, len);
1385
1386        list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
1387                mutex_lock(&tpg->tv_tpg_mutex);
1388                if (!tpg->tpg_nexus) {
1389                        mutex_unlock(&tpg->tv_tpg_mutex);
1390                        continue;
1391                }
1392                if (tpg->tv_tpg_vhost_count != 0) {
1393                        mutex_unlock(&tpg->tv_tpg_mutex);
1394                        continue;
1395                }
1396                tv_tport = tpg->tport;
1397
1398                if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1399                        if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1400                                kfree(vs_tpg);
1401                                mutex_unlock(&tpg->tv_tpg_mutex);
1402                                ret = -EEXIST;
1403                                goto out;
1404                        }
1405                        /*
1406                         * In order to ensure individual vhost-scsi configfs
1407                         * groups cannot be removed while in use by vhost ioctl,
1408                         * go ahead and take an explicit se_tpg->tpg_group.cg_item
1409                         * dependency now.
1410                         */
1411                        se_tpg = &tpg->se_tpg;
1412                        ret = target_depend_item(&se_tpg->tpg_group.cg_item);
1413                        if (ret) {
1414                                pr_warn("configfs_depend_item() failed: %d\n", ret);
1415                                kfree(vs_tpg);
1416                                mutex_unlock(&tpg->tv_tpg_mutex);
1417                                goto out;
1418                        }
1419                        tpg->tv_tpg_vhost_count++;
1420                        tpg->vhost_scsi = vs;
1421                        vs_tpg[tpg->tport_tpgt] = tpg;
1422                        smp_mb__after_atomic();
1423                        match = true;
1424                }
1425                mutex_unlock(&tpg->tv_tpg_mutex);
1426        }
1427
1428        if (match) {
1429                memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1430                       sizeof(vs->vs_vhost_wwpn));
1431                for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1432                        vq = &vs->vqs[i].vq;
1433                        mutex_lock(&vq->mutex);
1434                        vq->private_data = vs_tpg;
1435                        vhost_init_used(vq);
1436                        mutex_unlock(&vq->mutex);
1437                }
1438                ret = 0;
1439        } else {
1440                ret = -EEXIST;
1441        }
1442
1443        /*
1444         * Act as synchronize_rcu to make sure access to
1445         * old vs->vs_tpg is finished.
1446         */
1447        vhost_scsi_flush(vs);
1448        kfree(vs->vs_tpg);
1449        vs->vs_tpg = vs_tpg;
1450
1451out:
1452        mutex_unlock(&vs->dev.mutex);
1453        mutex_unlock(&vhost_scsi_mutex);
1454        return ret;
1455}
1456
1457static int
1458vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1459                          struct vhost_scsi_target *t)
1460{
1461        struct se_portal_group *se_tpg;
1462        struct vhost_scsi_tport *tv_tport;
1463        struct vhost_scsi_tpg *tpg;
1464        struct vhost_virtqueue *vq;
1465        bool match = false;
1466        int index, ret, i;
1467        u8 target;
1468
1469        mutex_lock(&vhost_scsi_mutex);
1470        mutex_lock(&vs->dev.mutex);
1471        /* Verify that ring has been setup correctly. */
1472        for (index = 0; index < vs->dev.nvqs; ++index) {
1473                if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1474                        ret = -EFAULT;
1475                        goto err_dev;
1476                }
1477        }
1478
1479        if (!vs->vs_tpg) {
1480                ret = 0;
1481                goto err_dev;
1482        }
1483
1484        for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1485                target = i;
1486                tpg = vs->vs_tpg[target];
1487                if (!tpg)
1488                        continue;
1489
1490                mutex_lock(&tpg->tv_tpg_mutex);
1491                tv_tport = tpg->tport;
1492                if (!tv_tport) {
1493                        ret = -ENODEV;
1494                        goto err_tpg;
1495                }
1496
1497                if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1498                        pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1499                                " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1500                                tv_tport->tport_name, tpg->tport_tpgt,
1501                                t->vhost_wwpn, t->vhost_tpgt);
1502                        ret = -EINVAL;
1503                        goto err_tpg;
1504                }
1505                tpg->tv_tpg_vhost_count--;
1506                tpg->vhost_scsi = NULL;
1507                vs->vs_tpg[target] = NULL;
1508                match = true;
1509                mutex_unlock(&tpg->tv_tpg_mutex);
1510                /*
1511                 * Release se_tpg->tpg_group.cg_item configfs dependency now
1512                 * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
1513                 */
1514                se_tpg = &tpg->se_tpg;
1515                target_undepend_item(&se_tpg->tpg_group.cg_item);
1516        }
1517        if (match) {
1518                for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1519                        vq = &vs->vqs[i].vq;
1520                        mutex_lock(&vq->mutex);
1521                        vq->private_data = NULL;
1522                        mutex_unlock(&vq->mutex);
1523                }
1524        }
1525        /*
1526         * Act as synchronize_rcu to make sure access to
1527         * old vs->vs_tpg is finished.
1528         */
1529        vhost_scsi_flush(vs);
1530        kfree(vs->vs_tpg);
1531        vs->vs_tpg = NULL;
1532        WARN_ON(vs->vs_events_nr);
1533        mutex_unlock(&vs->dev.mutex);
1534        mutex_unlock(&vhost_scsi_mutex);
1535        return 0;
1536
1537err_tpg:
1538        mutex_unlock(&tpg->tv_tpg_mutex);
1539err_dev:
1540        mutex_unlock(&vs->dev.mutex);
1541        mutex_unlock(&vhost_scsi_mutex);
1542        return ret;
1543}
1544
1545static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1546{
1547        struct vhost_virtqueue *vq;
1548        int i;
1549
1550        if (features & ~VHOST_SCSI_FEATURES)
1551                return -EOPNOTSUPP;
1552
1553        mutex_lock(&vs->dev.mutex);
1554        if ((features & (1 << VHOST_F_LOG_ALL)) &&
1555            !vhost_log_access_ok(&vs->dev)) {
1556                mutex_unlock(&vs->dev.mutex);
1557                return -EFAULT;
1558        }
1559
1560        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1561                vq = &vs->vqs[i].vq;
1562                mutex_lock(&vq->mutex);
1563                vq->acked_features = features;
1564                mutex_unlock(&vq->mutex);
1565        }
1566        mutex_unlock(&vs->dev.mutex);
1567        return 0;
1568}
1569
1570static int vhost_scsi_open(struct inode *inode, struct file *f)
1571{
1572        struct vhost_scsi *vs;
1573        struct vhost_virtqueue **vqs;
1574        int r = -ENOMEM, i;
1575
1576        vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
1577        if (!vs) {
1578                vs = vzalloc(sizeof(*vs));
1579                if (!vs)
1580                        goto err_vs;
1581        }
1582
1583        vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
1584        if (!vqs)
1585                goto err_vqs;
1586
1587        vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1588        vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
1589
1590        vs->vs_events_nr = 0;
1591        vs->vs_events_missed = false;
1592
1593        vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1594        vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1595        vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1596        vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1597        for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1598                vqs[i] = &vs->vqs[i].vq;
1599                vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1600        }
1601        vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
1602
1603        vhost_scsi_init_inflight(vs, NULL);
1604
1605        f->private_data = vs;
1606        return 0;
1607
1608err_vqs:
1609        kvfree(vs);
1610err_vs:
1611        return r;
1612}
1613
1614static int vhost_scsi_release(struct inode *inode, struct file *f)
1615{
1616        struct vhost_scsi *vs = f->private_data;
1617        struct vhost_scsi_target t;
1618
1619        mutex_lock(&vs->dev.mutex);
1620        memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1621        mutex_unlock(&vs->dev.mutex);
1622        vhost_scsi_clear_endpoint(vs, &t);
1623        vhost_dev_stop(&vs->dev);
1624        vhost_dev_cleanup(&vs->dev, false);
1625        /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1626        vhost_scsi_flush(vs);
1627        kfree(vs->dev.vqs);
1628        kvfree(vs);
1629        return 0;
1630}
1631
1632static long
1633vhost_scsi_ioctl(struct file *f,
1634                 unsigned int ioctl,
1635                 unsigned long arg)
1636{
1637        struct vhost_scsi *vs = f->private_data;
1638        struct vhost_scsi_target backend;
1639        void __user *argp = (void __user *)arg;
1640        u64 __user *featurep = argp;
1641        u32 __user *eventsp = argp;
1642        u32 events_missed;
1643        u64 features;
1644        int r, abi_version = VHOST_SCSI_ABI_VERSION;
1645        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1646
1647        switch (ioctl) {
1648        case VHOST_SCSI_SET_ENDPOINT:
1649                if (copy_from_user(&backend, argp, sizeof backend))
1650                        return -EFAULT;
1651                if (backend.reserved != 0)
1652                        return -EOPNOTSUPP;
1653
1654                return vhost_scsi_set_endpoint(vs, &backend);
1655        case VHOST_SCSI_CLEAR_ENDPOINT:
1656                if (copy_from_user(&backend, argp, sizeof backend))
1657                        return -EFAULT;
1658                if (backend.reserved != 0)
1659                        return -EOPNOTSUPP;
1660
1661                return vhost_scsi_clear_endpoint(vs, &backend);
1662        case VHOST_SCSI_GET_ABI_VERSION:
1663                if (copy_to_user(argp, &abi_version, sizeof abi_version))
1664                        return -EFAULT;
1665                return 0;
1666        case VHOST_SCSI_SET_EVENTS_MISSED:
1667                if (get_user(events_missed, eventsp))
1668                        return -EFAULT;
1669                mutex_lock(&vq->mutex);
1670                vs->vs_events_missed = events_missed;
1671                mutex_unlock(&vq->mutex);
1672                return 0;
1673        case VHOST_SCSI_GET_EVENTS_MISSED:
1674                mutex_lock(&vq->mutex);
1675                events_missed = vs->vs_events_missed;
1676                mutex_unlock(&vq->mutex);
1677                if (put_user(events_missed, eventsp))
1678                        return -EFAULT;
1679                return 0;
1680        case VHOST_GET_FEATURES:
1681                features = VHOST_SCSI_FEATURES;
1682                if (copy_to_user(featurep, &features, sizeof features))
1683                        return -EFAULT;
1684                return 0;
1685        case VHOST_SET_FEATURES:
1686                if (copy_from_user(&features, featurep, sizeof features))
1687                        return -EFAULT;
1688                return vhost_scsi_set_features(vs, features);
1689        default:
1690                mutex_lock(&vs->dev.mutex);
1691                r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1692                /* TODO: flush backend after dev ioctl. */
1693                if (r == -ENOIOCTLCMD)
1694                        r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1695                mutex_unlock(&vs->dev.mutex);
1696                return r;
1697        }
1698}
1699
1700#ifdef CONFIG_COMPAT
1701static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
1702                                unsigned long arg)
1703{
1704        return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1705}
1706#endif
1707
1708static const struct file_operations vhost_scsi_fops = {
1709        .owner          = THIS_MODULE,
1710        .release        = vhost_scsi_release,
1711        .unlocked_ioctl = vhost_scsi_ioctl,
1712#ifdef CONFIG_COMPAT
1713        .compat_ioctl   = vhost_scsi_compat_ioctl,
1714#endif
1715        .open           = vhost_scsi_open,
1716        .llseek         = noop_llseek,
1717};
1718
1719static struct miscdevice vhost_scsi_misc = {
1720        MISC_DYNAMIC_MINOR,
1721        "vhost-scsi",
1722        &vhost_scsi_fops,
1723};
1724
1725static int __init vhost_scsi_register(void)
1726{
1727        return misc_register(&vhost_scsi_misc);
1728}
1729
1730static int vhost_scsi_deregister(void)
1731{
1732        return misc_deregister(&vhost_scsi_misc);
1733}
1734
1735static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
1736{
1737        switch (tport->tport_proto_id) {
1738        case SCSI_PROTOCOL_SAS:
1739                return "SAS";
1740        case SCSI_PROTOCOL_FCP:
1741                return "FCP";
1742        case SCSI_PROTOCOL_ISCSI:
1743                return "iSCSI";
1744        default:
1745                break;
1746        }
1747
1748        return "Unknown";
1749}
1750
1751static void
1752vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
1753                  struct se_lun *lun, bool plug)
1754{
1755
1756        struct vhost_scsi *vs = tpg->vhost_scsi;
1757        struct vhost_virtqueue *vq;
1758        u32 reason;
1759
1760        if (!vs)
1761                return;
1762
1763        mutex_lock(&vs->dev.mutex);
1764
1765        if (plug)
1766                reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1767        else
1768                reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1769
1770        vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1771        mutex_lock(&vq->mutex);
1772        if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
1773                vhost_scsi_send_evt(vs, tpg, lun,
1774                                   VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1775        mutex_unlock(&vq->mutex);
1776        mutex_unlock(&vs->dev.mutex);
1777}
1778
1779static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1780{
1781        vhost_scsi_do_plug(tpg, lun, true);
1782}
1783
1784static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1785{
1786        vhost_scsi_do_plug(tpg, lun, false);
1787}
1788
1789static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
1790                               struct se_lun *lun)
1791{
1792        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1793                                struct vhost_scsi_tpg, se_tpg);
1794
1795        mutex_lock(&vhost_scsi_mutex);
1796
1797        mutex_lock(&tpg->tv_tpg_mutex);
1798        tpg->tv_tpg_port_count++;
1799        mutex_unlock(&tpg->tv_tpg_mutex);
1800
1801        vhost_scsi_hotplug(tpg, lun);
1802
1803        mutex_unlock(&vhost_scsi_mutex);
1804
1805        return 0;
1806}
1807
1808static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
1809                                  struct se_lun *lun)
1810{
1811        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1812                                struct vhost_scsi_tpg, se_tpg);
1813
1814        mutex_lock(&vhost_scsi_mutex);
1815
1816        mutex_lock(&tpg->tv_tpg_mutex);
1817        tpg->tv_tpg_port_count--;
1818        mutex_unlock(&tpg->tv_tpg_mutex);
1819
1820        vhost_scsi_hotunplug(tpg, lun);
1821
1822        mutex_unlock(&vhost_scsi_mutex);
1823}
1824
1825static struct se_node_acl *
1826vhost_scsi_make_nodeacl(struct se_portal_group *se_tpg,
1827                       struct config_group *group,
1828                       const char *name)
1829{
1830        struct se_node_acl *se_nacl, *se_nacl_new;
1831        struct vhost_scsi_nacl *nacl;
1832        u64 wwpn = 0;
1833        u32 nexus_depth;
1834
1835        /* vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
1836                return ERR_PTR(-EINVAL); */
1837        se_nacl_new = vhost_scsi_alloc_fabric_acl(se_tpg);
1838        if (!se_nacl_new)
1839                return ERR_PTR(-ENOMEM);
1840
1841        nexus_depth = 1;
1842        /*
1843         * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
1844         * when converting a NodeACL from demo mode -> explict
1845         */
1846        se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
1847                                name, nexus_depth);
1848        if (IS_ERR(se_nacl)) {
1849                vhost_scsi_release_fabric_acl(se_tpg, se_nacl_new);
1850                return se_nacl;
1851        }
1852        /*
1853         * Locate our struct vhost_scsi_nacl and set the FC Nport WWPN
1854         */
1855        nacl = container_of(se_nacl, struct vhost_scsi_nacl, se_node_acl);
1856        nacl->iport_wwpn = wwpn;
1857
1858        return se_nacl;
1859}
1860
1861static void vhost_scsi_drop_nodeacl(struct se_node_acl *se_acl)
1862{
1863        struct vhost_scsi_nacl *nacl = container_of(se_acl,
1864                                struct vhost_scsi_nacl, se_node_acl);
1865        core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
1866        kfree(nacl);
1867}
1868
1869static void vhost_scsi_free_cmd_map_res(struct vhost_scsi_nexus *nexus,
1870                                       struct se_session *se_sess)
1871{
1872        struct vhost_scsi_cmd *tv_cmd;
1873        unsigned int i;
1874
1875        if (!se_sess->sess_cmd_map)
1876                return;
1877
1878        for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1879                tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1880
1881                kfree(tv_cmd->tvc_sgl);
1882                kfree(tv_cmd->tvc_prot_sgl);
1883                kfree(tv_cmd->tvc_upages);
1884        }
1885}
1886
1887static ssize_t vhost_scsi_tpg_attrib_store_fabric_prot_type(
1888        struct se_portal_group *se_tpg,
1889        const char *page,
1890        size_t count)
1891{
1892        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1893                                struct vhost_scsi_tpg, se_tpg);
1894        unsigned long val;
1895        int ret = kstrtoul(page, 0, &val);
1896
1897        if (ret) {
1898                pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
1899                return ret;
1900        }
1901        if (val != 0 && val != 1 && val != 3) {
1902                pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
1903                return -EINVAL;
1904        }
1905        tpg->tv_fabric_prot_type = val;
1906
1907        return count;
1908}
1909
1910static ssize_t vhost_scsi_tpg_attrib_show_fabric_prot_type(
1911        struct se_portal_group *se_tpg,
1912        char *page)
1913{
1914        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1915                                struct vhost_scsi_tpg, se_tpg);
1916
1917        return sprintf(page, "%d\n", tpg->tv_fabric_prot_type);
1918}
1919TF_TPG_ATTRIB_ATTR(vhost_scsi, fabric_prot_type, S_IRUGO | S_IWUSR);
1920
1921static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
1922        &vhost_scsi_tpg_attrib_fabric_prot_type.attr,
1923        NULL,
1924};
1925
1926static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
1927                                const char *name)
1928{
1929        struct se_portal_group *se_tpg;
1930        struct se_session *se_sess;
1931        struct vhost_scsi_nexus *tv_nexus;
1932        struct vhost_scsi_cmd *tv_cmd;
1933        unsigned int i;
1934
1935        mutex_lock(&tpg->tv_tpg_mutex);
1936        if (tpg->tpg_nexus) {
1937                mutex_unlock(&tpg->tv_tpg_mutex);
1938                pr_debug("tpg->tpg_nexus already exists\n");
1939                return -EEXIST;
1940        }
1941        se_tpg = &tpg->se_tpg;
1942
1943        tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL);
1944        if (!tv_nexus) {
1945                mutex_unlock(&tpg->tv_tpg_mutex);
1946                pr_err("Unable to allocate struct vhost_scsi_nexus\n");
1947                return -ENOMEM;
1948        }
1949        /*
1950         *  Initialize the struct se_session pointer and setup tagpool
1951         *  for struct vhost_scsi_cmd descriptors
1952         */
1953        tv_nexus->tvn_se_sess = transport_init_session_tags(
1954                                        VHOST_SCSI_DEFAULT_TAGS,
1955                                        sizeof(struct vhost_scsi_cmd),
1956                                        TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
1957        if (IS_ERR(tv_nexus->tvn_se_sess)) {
1958                mutex_unlock(&tpg->tv_tpg_mutex);
1959                kfree(tv_nexus);
1960                return -ENOMEM;
1961        }
1962        se_sess = tv_nexus->tvn_se_sess;
1963        for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1964                tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1965
1966                tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
1967                                        VHOST_SCSI_PREALLOC_SGLS, GFP_KERNEL);
1968                if (!tv_cmd->tvc_sgl) {
1969                        mutex_unlock(&tpg->tv_tpg_mutex);
1970                        pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1971                        goto out;
1972                }
1973
1974                tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
1975                                        VHOST_SCSI_PREALLOC_UPAGES, GFP_KERNEL);
1976                if (!tv_cmd->tvc_upages) {
1977                        mutex_unlock(&tpg->tv_tpg_mutex);
1978                        pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1979                        goto out;
1980                }
1981
1982                tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) *
1983                                        VHOST_SCSI_PREALLOC_PROT_SGLS, GFP_KERNEL);
1984                if (!tv_cmd->tvc_prot_sgl) {
1985                        mutex_unlock(&tpg->tv_tpg_mutex);
1986                        pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
1987                        goto out;
1988                }
1989        }
1990        /*
1991         * Since we are running in 'demo mode' this call with generate a
1992         * struct se_node_acl for the vhost_scsi struct se_portal_group with
1993         * the SCSI Initiator port name of the passed configfs group 'name'.
1994         */
1995        tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
1996                                se_tpg, (unsigned char *)name);
1997        if (!tv_nexus->tvn_se_sess->se_node_acl) {
1998                mutex_unlock(&tpg->tv_tpg_mutex);
1999                pr_debug("core_tpg_check_initiator_node_acl() failed"
2000                                " for %s\n", name);
2001                goto out;
2002        }
2003        /*
2004         * Now register the TCM vhost virtual I_T Nexus as active.
2005         */
2006        transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
2007                        tv_nexus->tvn_se_sess, tv_nexus);
2008        tpg->tpg_nexus = tv_nexus;
2009
2010        mutex_unlock(&tpg->tv_tpg_mutex);
2011        return 0;
2012
2013out:
2014        vhost_scsi_free_cmd_map_res(tv_nexus, se_sess);
2015        transport_free_session(se_sess);
2016        kfree(tv_nexus);
2017        return -ENOMEM;
2018}
2019
2020static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
2021{
2022        struct se_session *se_sess;
2023        struct vhost_scsi_nexus *tv_nexus;
2024
2025        mutex_lock(&tpg->tv_tpg_mutex);
2026        tv_nexus = tpg->tpg_nexus;
2027        if (!tv_nexus) {
2028                mutex_unlock(&tpg->tv_tpg_mutex);
2029                return -ENODEV;
2030        }
2031
2032        se_sess = tv_nexus->tvn_se_sess;
2033        if (!se_sess) {
2034                mutex_unlock(&tpg->tv_tpg_mutex);
2035                return -ENODEV;
2036        }
2037
2038        if (tpg->tv_tpg_port_count != 0) {
2039                mutex_unlock(&tpg->tv_tpg_mutex);
2040                pr_err("Unable to remove TCM_vhost I_T Nexus with"
2041                        " active TPG port count: %d\n",
2042                        tpg->tv_tpg_port_count);
2043                return -EBUSY;
2044        }
2045
2046        if (tpg->tv_tpg_vhost_count != 0) {
2047                mutex_unlock(&tpg->tv_tpg_mutex);
2048                pr_err("Unable to remove TCM_vhost I_T Nexus with"
2049                        " active TPG vhost count: %d\n",
2050                        tpg->tv_tpg_vhost_count);
2051                return -EBUSY;
2052        }
2053
2054        pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
2055                " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
2056                tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2057
2058        vhost_scsi_free_cmd_map_res(tv_nexus, se_sess);
2059        /*
2060         * Release the SCSI I_T Nexus to the emulated vhost Target Port
2061         */
2062        transport_deregister_session(tv_nexus->tvn_se_sess);
2063        tpg->tpg_nexus = NULL;
2064        mutex_unlock(&tpg->tv_tpg_mutex);
2065
2066        kfree(tv_nexus);
2067        return 0;
2068}
2069
2070static ssize_t vhost_scsi_tpg_show_nexus(struct se_portal_group *se_tpg,
2071                                        char *page)
2072{
2073        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2074                                struct vhost_scsi_tpg, se_tpg);
2075        struct vhost_scsi_nexus *tv_nexus;
2076        ssize_t ret;
2077
2078        mutex_lock(&tpg->tv_tpg_mutex);
2079        tv_nexus = tpg->tpg_nexus;
2080        if (!tv_nexus) {
2081                mutex_unlock(&tpg->tv_tpg_mutex);
2082                return -ENODEV;
2083        }
2084        ret = snprintf(page, PAGE_SIZE, "%s\n",
2085                        tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2086        mutex_unlock(&tpg->tv_tpg_mutex);
2087
2088        return ret;
2089}
2090
2091static ssize_t vhost_scsi_tpg_store_nexus(struct se_portal_group *se_tpg,
2092                                         const char *page,
2093                                         size_t count)
2094{
2095        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2096                                struct vhost_scsi_tpg, se_tpg);
2097        struct vhost_scsi_tport *tport_wwn = tpg->tport;
2098        unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
2099        int ret;
2100        /*
2101         * Shutdown the active I_T nexus if 'NULL' is passed..
2102         */
2103        if (!strncmp(page, "NULL", 4)) {
2104                ret = vhost_scsi_drop_nexus(tpg);
2105                return (!ret) ? count : ret;
2106        }
2107        /*
2108         * Otherwise make sure the passed virtual Initiator port WWN matches
2109         * the fabric protocol_id set in vhost_scsi_make_tport(), and call
2110         * vhost_scsi_make_nexus().
2111         */
2112        if (strlen(page) >= VHOST_SCSI_NAMELEN) {
2113                pr_err("Emulated NAA Sas Address: %s, exceeds"
2114                                " max: %d\n", page, VHOST_SCSI_NAMELEN);
2115                return -EINVAL;
2116        }
2117        snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
2118
2119        ptr = strstr(i_port, "naa.");
2120        if (ptr) {
2121                if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
2122                        pr_err("Passed SAS Initiator Port %s does not"
2123                                " match target port protoid: %s\n", i_port,
2124                                vhost_scsi_dump_proto_id(tport_wwn));
2125                        return -EINVAL;
2126                }
2127                port_ptr = &i_port[0];
2128                goto check_newline;
2129        }
2130        ptr = strstr(i_port, "fc.");
2131        if (ptr) {
2132                if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
2133                        pr_err("Passed FCP Initiator Port %s does not"
2134                                " match target port protoid: %s\n", i_port,
2135                                vhost_scsi_dump_proto_id(tport_wwn));
2136                        return -EINVAL;
2137                }
2138                port_ptr = &i_port[3]; /* Skip over "fc." */
2139                goto check_newline;
2140        }
2141        ptr = strstr(i_port, "iqn.");
2142        if (ptr) {
2143                if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
2144                        pr_err("Passed iSCSI Initiator Port %s does not"
2145                                " match target port protoid: %s\n", i_port,
2146                                vhost_scsi_dump_proto_id(tport_wwn));
2147                        return -EINVAL;
2148                }
2149                port_ptr = &i_port[0];
2150                goto check_newline;
2151        }
2152        pr_err("Unable to locate prefix for emulated Initiator Port:"
2153                        " %s\n", i_port);
2154        return -EINVAL;
2155        /*
2156         * Clear any trailing newline for the NAA WWN
2157         */
2158check_newline:
2159        if (i_port[strlen(i_port)-1] == '\n')
2160                i_port[strlen(i_port)-1] = '\0';
2161
2162        ret = vhost_scsi_make_nexus(tpg, port_ptr);
2163        if (ret < 0)
2164                return ret;
2165
2166        return count;
2167}
2168
2169TF_TPG_BASE_ATTR(vhost_scsi, nexus, S_IRUGO | S_IWUSR);
2170
2171static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
2172        &vhost_scsi_tpg_nexus.attr,
2173        NULL,
2174};
2175
2176static struct se_portal_group *
2177vhost_scsi_make_tpg(struct se_wwn *wwn,
2178                   struct config_group *group,
2179                   const char *name)
2180{
2181        struct vhost_scsi_tport *tport = container_of(wwn,
2182                        struct vhost_scsi_tport, tport_wwn);
2183
2184        struct vhost_scsi_tpg *tpg;
2185        u16 tpgt;
2186        int ret;
2187
2188        if (strstr(name, "tpgt_") != name)
2189                return ERR_PTR(-EINVAL);
2190        if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
2191                return ERR_PTR(-EINVAL);
2192
2193        tpg = kzalloc(sizeof(struct vhost_scsi_tpg), GFP_KERNEL);
2194        if (!tpg) {
2195                pr_err("Unable to allocate struct vhost_scsi_tpg");
2196                return ERR_PTR(-ENOMEM);
2197        }
2198        mutex_init(&tpg->tv_tpg_mutex);
2199        INIT_LIST_HEAD(&tpg->tv_tpg_list);
2200        tpg->tport = tport;
2201        tpg->tport_tpgt = tpgt;
2202
2203        ret = core_tpg_register(&vhost_scsi_ops, wwn,
2204                                &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
2205        if (ret < 0) {
2206                kfree(tpg);
2207                return NULL;
2208        }
2209        mutex_lock(&vhost_scsi_mutex);
2210        list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
2211        mutex_unlock(&vhost_scsi_mutex);
2212
2213        return &tpg->se_tpg;
2214}
2215
2216static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
2217{
2218        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2219                                struct vhost_scsi_tpg, se_tpg);
2220
2221        mutex_lock(&vhost_scsi_mutex);
2222        list_del(&tpg->tv_tpg_list);
2223        mutex_unlock(&vhost_scsi_mutex);
2224        /*
2225         * Release the virtual I_T Nexus for this vhost TPG
2226         */
2227        vhost_scsi_drop_nexus(tpg);
2228        /*
2229         * Deregister the se_tpg from TCM..
2230         */
2231        core_tpg_deregister(se_tpg);
2232        kfree(tpg);
2233}
2234
2235static struct se_wwn *
2236vhost_scsi_make_tport(struct target_fabric_configfs *tf,
2237                     struct config_group *group,
2238                     const char *name)
2239{
2240        struct vhost_scsi_tport *tport;
2241        char *ptr;
2242        u64 wwpn = 0;
2243        int off = 0;
2244
2245        /* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
2246                return ERR_PTR(-EINVAL); */
2247
2248        tport = kzalloc(sizeof(struct vhost_scsi_tport), GFP_KERNEL);
2249        if (!tport) {
2250                pr_err("Unable to allocate struct vhost_scsi_tport");
2251                return ERR_PTR(-ENOMEM);
2252        }
2253        tport->tport_wwpn = wwpn;
2254        /*
2255         * Determine the emulated Protocol Identifier and Target Port Name
2256         * based on the incoming configfs directory name.
2257         */
2258        ptr = strstr(name, "naa.");
2259        if (ptr) {
2260                tport->tport_proto_id = SCSI_PROTOCOL_SAS;
2261                goto check_len;
2262        }
2263        ptr = strstr(name, "fc.");
2264        if (ptr) {
2265                tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2266                off = 3; /* Skip over "fc." */
2267                goto check_len;
2268        }
2269        ptr = strstr(name, "iqn.");
2270        if (ptr) {
2271                tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2272                goto check_len;
2273        }
2274
2275        pr_err("Unable to locate prefix for emulated Target Port:"
2276                        " %s\n", name);
2277        kfree(tport);
2278        return ERR_PTR(-EINVAL);
2279
2280check_len:
2281        if (strlen(name) >= VHOST_SCSI_NAMELEN) {
2282                pr_err("Emulated %s Address: %s, exceeds"
2283                        " max: %d\n", name, vhost_scsi_dump_proto_id(tport),
2284                        VHOST_SCSI_NAMELEN);
2285                kfree(tport);
2286                return ERR_PTR(-EINVAL);
2287        }
2288        snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
2289
2290        pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2291                " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
2292
2293        return &tport->tport_wwn;
2294}
2295
2296static void vhost_scsi_drop_tport(struct se_wwn *wwn)
2297{
2298        struct vhost_scsi_tport *tport = container_of(wwn,
2299                                struct vhost_scsi_tport, tport_wwn);
2300
2301        pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2302                " %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
2303                tport->tport_name);
2304
2305        kfree(tport);
2306}
2307
2308static ssize_t
2309vhost_scsi_wwn_show_attr_version(struct target_fabric_configfs *tf,
2310                                char *page)
2311{
2312        return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
2313                "on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2314                utsname()->machine);
2315}
2316
2317TF_WWN_ATTR_RO(vhost_scsi, version);
2318
2319static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2320        &vhost_scsi_wwn_version.attr,
2321        NULL,
2322};
2323
2324static struct target_core_fabric_ops vhost_scsi_ops = {
2325        .module                         = THIS_MODULE,
2326        .name                           = "vhost",
2327        .get_fabric_name                = vhost_scsi_get_fabric_name,
2328        .get_fabric_proto_ident         = vhost_scsi_get_fabric_proto_ident,
2329        .tpg_get_wwn                    = vhost_scsi_get_fabric_wwn,
2330        .tpg_get_tag                    = vhost_scsi_get_tpgt,
2331        .tpg_get_default_depth          = vhost_scsi_get_default_depth,
2332        .tpg_get_pr_transport_id        = vhost_scsi_get_pr_transport_id,
2333        .tpg_get_pr_transport_id_len    = vhost_scsi_get_pr_transport_id_len,
2334        .tpg_parse_pr_out_transport_id  = vhost_scsi_parse_pr_out_transport_id,
2335        .tpg_check_demo_mode            = vhost_scsi_check_true,
2336        .tpg_check_demo_mode_cache      = vhost_scsi_check_true,
2337        .tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
2338        .tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
2339        .tpg_check_prot_fabric_only     = vhost_scsi_check_prot_fabric_only,
2340        .tpg_alloc_fabric_acl           = vhost_scsi_alloc_fabric_acl,
2341        .tpg_release_fabric_acl         = vhost_scsi_release_fabric_acl,
2342        .tpg_get_inst_index             = vhost_scsi_tpg_get_inst_index,
2343        .release_cmd                    = vhost_scsi_release_cmd,
2344        .check_stop_free                = vhost_scsi_check_stop_free,
2345        .shutdown_session               = vhost_scsi_shutdown_session,
2346        .close_session                  = vhost_scsi_close_session,
2347        .sess_get_index                 = vhost_scsi_sess_get_index,
2348        .sess_get_initiator_sid         = NULL,
2349        .write_pending                  = vhost_scsi_write_pending,
2350        .write_pending_status           = vhost_scsi_write_pending_status,
2351        .set_default_node_attributes    = vhost_scsi_set_default_node_attrs,
2352        .get_task_tag                   = vhost_scsi_get_task_tag,
2353        .get_cmd_state                  = vhost_scsi_get_cmd_state,
2354        .queue_data_in                  = vhost_scsi_queue_data_in,
2355        .queue_status                   = vhost_scsi_queue_status,
2356        .queue_tm_rsp                   = vhost_scsi_queue_tm_rsp,
2357        .aborted_task                   = vhost_scsi_aborted_task,
2358        /*
2359         * Setup callers for generic logic in target_core_fabric_configfs.c
2360         */
2361        .fabric_make_wwn                = vhost_scsi_make_tport,
2362        .fabric_drop_wwn                = vhost_scsi_drop_tport,
2363        .fabric_make_tpg                = vhost_scsi_make_tpg,
2364        .fabric_drop_tpg                = vhost_scsi_drop_tpg,
2365        .fabric_post_link               = vhost_scsi_port_link,
2366        .fabric_pre_unlink              = vhost_scsi_port_unlink,
2367        .fabric_make_np                 = NULL,
2368        .fabric_drop_np                 = NULL,
2369        .fabric_make_nodeacl            = vhost_scsi_make_nodeacl,
2370        .fabric_drop_nodeacl            = vhost_scsi_drop_nodeacl,
2371
2372        .tfc_wwn_attrs                  = vhost_scsi_wwn_attrs,
2373        .tfc_tpg_base_attrs             = vhost_scsi_tpg_attrs,
2374        .tfc_tpg_attrib_attrs           = vhost_scsi_tpg_attrib_attrs,
2375};
2376
2377static int __init vhost_scsi_init(void)
2378{
2379        int ret = -ENOMEM;
2380
2381        pr_debug("TCM_VHOST fabric module %s on %s/%s"
2382                " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2383                utsname()->machine);
2384
2385        /*
2386         * Use our own dedicated workqueue for submitting I/O into
2387         * target core to avoid contention within system_wq.
2388         */
2389        vhost_scsi_workqueue = alloc_workqueue("vhost_scsi", 0, 0);
2390        if (!vhost_scsi_workqueue)
2391                goto out;
2392
2393        ret = vhost_scsi_register();
2394        if (ret < 0)
2395                goto out_destroy_workqueue;
2396
2397        ret = target_register_template(&vhost_scsi_ops);
2398        if (ret < 0)
2399                goto out_vhost_scsi_deregister;
2400
2401        return 0;
2402
2403out_vhost_scsi_deregister:
2404        vhost_scsi_deregister();
2405out_destroy_workqueue:
2406        destroy_workqueue(vhost_scsi_workqueue);
2407out:
2408        return ret;
2409};
2410
2411static void vhost_scsi_exit(void)
2412{
2413        target_unregister_template(&vhost_scsi_ops);
2414        vhost_scsi_deregister();
2415        destroy_workqueue(vhost_scsi_workqueue);
2416};
2417
2418MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2419MODULE_ALIAS("tcm_vhost");
2420MODULE_LICENSE("GPL");
2421module_init(vhost_scsi_init);
2422module_exit(vhost_scsi_exit);
2423