linux/drivers/vhost/scsi.c
<<
>>
Prefs
   1/*******************************************************************************
   2 * Vhost kernel TCM fabric driver for virtio SCSI initiators
   3 *
   4 * (C) Copyright 2010-2013 Datera, Inc.
   5 * (C) Copyright 2010-2012 IBM Corp.
   6 *
   7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
   8 *
   9 * Authors: Nicholas A. Bellinger <nab@daterainc.com>
  10 *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of the GNU General Public License as published by
  14 * the Free Software Foundation; either version 2 of the License, or
  15 * (at your option) any later version.
  16 *
  17 * This program is distributed in the hope that it will be useful,
  18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  20 * GNU General Public License for more details.
  21 *
  22 ****************************************************************************/
  23
  24#include <linux/module.h>
  25#include <linux/moduleparam.h>
  26#include <generated/utsrelease.h>
  27#include <linux/utsname.h>
  28#include <linux/init.h>
  29#include <linux/slab.h>
  30#include <linux/kthread.h>
  31#include <linux/types.h>
  32#include <linux/string.h>
  33#include <linux/configfs.h>
  34#include <linux/ctype.h>
  35#include <linux/compat.h>
  36#include <linux/eventfd.h>
  37#include <linux/fs.h>
  38#include <linux/vmalloc.h>
  39#include <linux/miscdevice.h>
  40#include <asm/unaligned.h>
  41#include <scsi/scsi_common.h>
  42#include <scsi/scsi_proto.h>
  43#include <target/target_core_base.h>
  44#include <target/target_core_fabric.h>
  45#include <linux/vhost.h>
  46#include <linux/virtio_scsi.h>
  47#include <linux/llist.h>
  48#include <linux/bitmap.h>
  49#include <linux/percpu_ida.h>
  50
  51#include "vhost.h"
  52
  53#define VHOST_SCSI_VERSION  "v0.1"
  54#define VHOST_SCSI_NAMELEN 256
  55#define VHOST_SCSI_MAX_CDB_SIZE 32
  56#define VHOST_SCSI_DEFAULT_TAGS 256
  57#define VHOST_SCSI_PREALLOC_SGLS 2048
  58#define VHOST_SCSI_PREALLOC_UPAGES 2048
  59#define VHOST_SCSI_PREALLOC_PROT_SGLS 512
  60
  61struct vhost_scsi_inflight {
  62        /* Wait for the flush operation to finish */
  63        struct completion comp;
  64        /* Refcount for the inflight reqs */
  65        struct kref kref;
  66};
  67
  68struct vhost_scsi_cmd {
  69        /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
  70        int tvc_vq_desc;
  71        /* virtio-scsi initiator task attribute */
  72        int tvc_task_attr;
  73        /* virtio-scsi response incoming iovecs */
  74        int tvc_in_iovs;
  75        /* virtio-scsi initiator data direction */
  76        enum dma_data_direction tvc_data_direction;
  77        /* Expected data transfer length from virtio-scsi header */
  78        u32 tvc_exp_data_len;
  79        /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
  80        u64 tvc_tag;
  81        /* The number of scatterlists associated with this cmd */
  82        u32 tvc_sgl_count;
  83        u32 tvc_prot_sgl_count;
  84        /* Saved unpacked SCSI LUN for vhost_scsi_submission_work() */
  85        u32 tvc_lun;
  86        /* Pointer to the SGL formatted memory from virtio-scsi */
  87        struct scatterlist *tvc_sgl;
  88        struct scatterlist *tvc_prot_sgl;
  89        struct page **tvc_upages;
  90        /* Pointer to response header iovec */
  91        struct iovec tvc_resp_iov;
  92        /* Pointer to vhost_scsi for our device */
  93        struct vhost_scsi *tvc_vhost;
  94        /* Pointer to vhost_virtqueue for the cmd */
  95        struct vhost_virtqueue *tvc_vq;
  96        /* Pointer to vhost nexus memory */
  97        struct vhost_scsi_nexus *tvc_nexus;
  98        /* The TCM I/O descriptor that is accessed via container_of() */
  99        struct se_cmd tvc_se_cmd;
 100        /* work item used for cmwq dispatch to vhost_scsi_submission_work() */
 101        struct work_struct work;
 102        /* Copy of the incoming SCSI command descriptor block (CDB) */
 103        unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
 104        /* Sense buffer that will be mapped into outgoing status */
 105        unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
 106        /* Completed commands list, serviced from vhost worker thread */
 107        struct llist_node tvc_completion_list;
 108        /* Used to track inflight cmd */
 109        struct vhost_scsi_inflight *inflight;
 110};
 111
 112struct vhost_scsi_nexus {
 113        /* Pointer to TCM session for I_T Nexus */
 114        struct se_session *tvn_se_sess;
 115};
 116
 117struct vhost_scsi_tpg {
 118        /* Vhost port target portal group tag for TCM */
 119        u16 tport_tpgt;
 120        /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
 121        int tv_tpg_port_count;
 122        /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
 123        int tv_tpg_vhost_count;
 124        /* Used for enabling T10-PI with legacy devices */
 125        int tv_fabric_prot_type;
 126        /* list for vhost_scsi_list */
 127        struct list_head tv_tpg_list;
 128        /* Used to protect access for tpg_nexus */
 129        struct mutex tv_tpg_mutex;
 130        /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
 131        struct vhost_scsi_nexus *tpg_nexus;
 132        /* Pointer back to vhost_scsi_tport */
 133        struct vhost_scsi_tport *tport;
 134        /* Returned by vhost_scsi_make_tpg() */
 135        struct se_portal_group se_tpg;
 136        /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
 137        struct vhost_scsi *vhost_scsi;
 138};
 139
 140struct vhost_scsi_tport {
 141        /* SCSI protocol the tport is providing */
 142        u8 tport_proto_id;
 143        /* Binary World Wide unique Port Name for Vhost Target port */
 144        u64 tport_wwpn;
 145        /* ASCII formatted WWPN for Vhost Target port */
 146        char tport_name[VHOST_SCSI_NAMELEN];
 147        /* Returned by vhost_scsi_make_tport() */
 148        struct se_wwn tport_wwn;
 149};
 150
 151struct vhost_scsi_evt {
 152        /* event to be sent to guest */
 153        struct virtio_scsi_event event;
 154        /* event list, serviced from vhost worker thread */
 155        struct llist_node list;
 156};
 157
 158enum {
 159        VHOST_SCSI_VQ_CTL = 0,
 160        VHOST_SCSI_VQ_EVT = 1,
 161        VHOST_SCSI_VQ_IO = 2,
 162};
 163
 164/* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
 165enum {
 166        VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
 167                                               (1ULL << VIRTIO_SCSI_F_T10_PI)
 168};
 169
 170#define VHOST_SCSI_MAX_TARGET   256
 171#define VHOST_SCSI_MAX_VQ       128
 172#define VHOST_SCSI_MAX_EVENT    128
 173
 174struct vhost_scsi_virtqueue {
 175        struct vhost_virtqueue vq;
 176        /*
 177         * Reference counting for inflight reqs, used for flush operation. At
 178         * each time, one reference tracks new commands submitted, while we
 179         * wait for another one to reach 0.
 180         */
 181        struct vhost_scsi_inflight inflights[2];
 182        /*
 183         * Indicate current inflight in use, protected by vq->mutex.
 184         * Writers must also take dev mutex and flush under it.
 185         */
 186        int inflight_idx;
 187};
 188
 189struct vhost_scsi {
 190        /* Protected by vhost_scsi->dev.mutex */
 191        struct vhost_scsi_tpg **vs_tpg;
 192        char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
 193
 194        struct vhost_dev dev;
 195        struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
 196
 197        struct vhost_work vs_completion_work; /* cmd completion work item */
 198        struct llist_head vs_completion_list; /* cmd completion queue */
 199
 200        struct vhost_work vs_event_work; /* evt injection work item */
 201        struct llist_head vs_event_list; /* evt injection queue */
 202
 203        bool vs_events_missed; /* any missed events, protected by vq->mutex */
 204        int vs_events_nr; /* num of pending events, protected by vq->mutex */
 205};
 206
 207static struct workqueue_struct *vhost_scsi_workqueue;
 208
 209/* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
 210static DEFINE_MUTEX(vhost_scsi_mutex);
 211static LIST_HEAD(vhost_scsi_list);
 212
 213static void vhost_scsi_done_inflight(struct kref *kref)
 214{
 215        struct vhost_scsi_inflight *inflight;
 216
 217        inflight = container_of(kref, struct vhost_scsi_inflight, kref);
 218        complete(&inflight->comp);
 219}
 220
 221static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
 222                                    struct vhost_scsi_inflight *old_inflight[])
 223{
 224        struct vhost_scsi_inflight *new_inflight;
 225        struct vhost_virtqueue *vq;
 226        int idx, i;
 227
 228        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
 229                vq = &vs->vqs[i].vq;
 230
 231                mutex_lock(&vq->mutex);
 232
 233                /* store old infight */
 234                idx = vs->vqs[i].inflight_idx;
 235                if (old_inflight)
 236                        old_inflight[i] = &vs->vqs[i].inflights[idx];
 237
 238                /* setup new infight */
 239                vs->vqs[i].inflight_idx = idx ^ 1;
 240                new_inflight = &vs->vqs[i].inflights[idx ^ 1];
 241                kref_init(&new_inflight->kref);
 242                init_completion(&new_inflight->comp);
 243
 244                mutex_unlock(&vq->mutex);
 245        }
 246}
 247
 248static struct vhost_scsi_inflight *
 249vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
 250{
 251        struct vhost_scsi_inflight *inflight;
 252        struct vhost_scsi_virtqueue *svq;
 253
 254        svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
 255        inflight = &svq->inflights[svq->inflight_idx];
 256        kref_get(&inflight->kref);
 257
 258        return inflight;
 259}
 260
 261static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
 262{
 263        kref_put(&inflight->kref, vhost_scsi_done_inflight);
 264}
 265
 266static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
 267{
 268        return 1;
 269}
 270
 271static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
 272{
 273        return 0;
 274}
 275
 276static char *vhost_scsi_get_fabric_name(void)
 277{
 278        return "vhost";
 279}
 280
 281static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
 282{
 283        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
 284                                struct vhost_scsi_tpg, se_tpg);
 285        struct vhost_scsi_tport *tport = tpg->tport;
 286
 287        return &tport->tport_name[0];
 288}
 289
 290static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
 291{
 292        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
 293                                struct vhost_scsi_tpg, se_tpg);
 294        return tpg->tport_tpgt;
 295}
 296
 297static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
 298{
 299        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
 300                                struct vhost_scsi_tpg, se_tpg);
 301
 302        return tpg->tv_fabric_prot_type;
 303}
 304
 305static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
 306{
 307        return 1;
 308}
 309
 310static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
 311{
 312        struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
 313                                struct vhost_scsi_cmd, tvc_se_cmd);
 314        struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess;
 315        int i;
 316
 317        if (tv_cmd->tvc_sgl_count) {
 318                for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
 319                        put_page(sg_page(&tv_cmd->tvc_sgl[i]));
 320        }
 321        if (tv_cmd->tvc_prot_sgl_count) {
 322                for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
 323                        put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
 324        }
 325
 326        vhost_scsi_put_inflight(tv_cmd->inflight);
 327        percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
 328}
 329
 330static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
 331{
 332        return 0;
 333}
 334
 335static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
 336{
 337        /* Go ahead and process the write immediately */
 338        target_execute_cmd(se_cmd);
 339        return 0;
 340}
 341
 342static int vhost_scsi_write_pending_status(struct se_cmd *se_cmd)
 343{
 344        return 0;
 345}
 346
 347static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
 348{
 349        return;
 350}
 351
 352static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
 353{
 354        return 0;
 355}
 356
 357static void vhost_scsi_complete_cmd(struct vhost_scsi_cmd *cmd)
 358{
 359        struct vhost_scsi *vs = cmd->tvc_vhost;
 360
 361        llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
 362
 363        vhost_work_queue(&vs->dev, &vs->vs_completion_work);
 364}
 365
 366static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
 367{
 368        struct vhost_scsi_cmd *cmd = container_of(se_cmd,
 369                                struct vhost_scsi_cmd, tvc_se_cmd);
 370        vhost_scsi_complete_cmd(cmd);
 371        return 0;
 372}
 373
 374static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
 375{
 376        struct vhost_scsi_cmd *cmd = container_of(se_cmd,
 377                                struct vhost_scsi_cmd, tvc_se_cmd);
 378        vhost_scsi_complete_cmd(cmd);
 379        return 0;
 380}
 381
 382static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
 383{
 384        return;
 385}
 386
 387static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
 388{
 389        return;
 390}
 391
 392static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
 393{
 394        vs->vs_events_nr--;
 395        kfree(evt);
 396}
 397
 398static struct vhost_scsi_evt *
 399vhost_scsi_allocate_evt(struct vhost_scsi *vs,
 400                       u32 event, u32 reason)
 401{
 402        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
 403        struct vhost_scsi_evt *evt;
 404
 405        if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
 406                vs->vs_events_missed = true;
 407                return NULL;
 408        }
 409
 410        evt = kzalloc(sizeof(*evt), GFP_KERNEL);
 411        if (!evt) {
 412                vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
 413                vs->vs_events_missed = true;
 414                return NULL;
 415        }
 416
 417        evt->event.event = cpu_to_vhost32(vq, event);
 418        evt->event.reason = cpu_to_vhost32(vq, reason);
 419        vs->vs_events_nr++;
 420
 421        return evt;
 422}
 423
 424static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd)
 425{
 426        struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
 427
 428        /* TODO locking against target/backend threads? */
 429        transport_generic_free_cmd(se_cmd, 0);
 430
 431}
 432
 433static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
 434{
 435        return target_put_sess_cmd(se_cmd);
 436}
 437
 438static void
 439vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
 440{
 441        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
 442        struct virtio_scsi_event *event = &evt->event;
 443        struct virtio_scsi_event __user *eventp;
 444        unsigned out, in;
 445        int head, ret;
 446
 447        if (!vq->private_data) {
 448                vs->vs_events_missed = true;
 449                return;
 450        }
 451
 452again:
 453        vhost_disable_notify(&vs->dev, vq);
 454        head = vhost_get_vq_desc(vq, vq->iov,
 455                        ARRAY_SIZE(vq->iov), &out, &in,
 456                        NULL, NULL);
 457        if (head < 0) {
 458                vs->vs_events_missed = true;
 459                return;
 460        }
 461        if (head == vq->num) {
 462                if (vhost_enable_notify(&vs->dev, vq))
 463                        goto again;
 464                vs->vs_events_missed = true;
 465                return;
 466        }
 467
 468        if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
 469                vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
 470                                vq->iov[out].iov_len);
 471                vs->vs_events_missed = true;
 472                return;
 473        }
 474
 475        if (vs->vs_events_missed) {
 476                event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
 477                vs->vs_events_missed = false;
 478        }
 479
 480        eventp = vq->iov[out].iov_base;
 481        ret = __copy_to_user(eventp, event, sizeof(*event));
 482        if (!ret)
 483                vhost_add_used_and_signal(&vs->dev, vq, head, 0);
 484        else
 485                vq_err(vq, "Faulted on vhost_scsi_send_event\n");
 486}
 487
 488static void vhost_scsi_evt_work(struct vhost_work *work)
 489{
 490        struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
 491                                        vs_event_work);
 492        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
 493        struct vhost_scsi_evt *evt, *t;
 494        struct llist_node *llnode;
 495
 496        mutex_lock(&vq->mutex);
 497        llnode = llist_del_all(&vs->vs_event_list);
 498        llist_for_each_entry_safe(evt, t, llnode, list) {
 499                vhost_scsi_do_evt_work(vs, evt);
 500                vhost_scsi_free_evt(vs, evt);
 501        }
 502        mutex_unlock(&vq->mutex);
 503}
 504
 505/* Fill in status and signal that we are done processing this command
 506 *
 507 * This is scheduled in the vhost work queue so we are called with the owner
 508 * process mm and can access the vring.
 509 */
 510static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
 511{
 512        struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
 513                                        vs_completion_work);
 514        DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
 515        struct virtio_scsi_cmd_resp v_rsp;
 516        struct vhost_scsi_cmd *cmd, *t;
 517        struct llist_node *llnode;
 518        struct se_cmd *se_cmd;
 519        struct iov_iter iov_iter;
 520        int ret, vq;
 521
 522        bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
 523        llnode = llist_del_all(&vs->vs_completion_list);
 524        llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
 525                se_cmd = &cmd->tvc_se_cmd;
 526
 527                pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
 528                        cmd, se_cmd->residual_count, se_cmd->scsi_status);
 529
 530                memset(&v_rsp, 0, sizeof(v_rsp));
 531                v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count);
 532                /* TODO is status_qualifier field needed? */
 533                v_rsp.status = se_cmd->scsi_status;
 534                v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
 535                                                 se_cmd->scsi_sense_length);
 536                memcpy(v_rsp.sense, cmd->tvc_sense_buf,
 537                       se_cmd->scsi_sense_length);
 538
 539                iov_iter_init(&iov_iter, READ, &cmd->tvc_resp_iov,
 540                              cmd->tvc_in_iovs, sizeof(v_rsp));
 541                ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
 542                if (likely(ret == sizeof(v_rsp))) {
 543                        struct vhost_scsi_virtqueue *q;
 544                        vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
 545                        q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
 546                        vq = q - vs->vqs;
 547                        __set_bit(vq, signal);
 548                } else
 549                        pr_err("Faulted on virtio_scsi_cmd_resp\n");
 550
 551                vhost_scsi_free_cmd(cmd);
 552        }
 553
 554        vq = -1;
 555        while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
 556                < VHOST_SCSI_MAX_VQ)
 557                vhost_signal(&vs->dev, &vs->vqs[vq].vq);
 558}
 559
 560static struct vhost_scsi_cmd *
 561vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
 562                   unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
 563                   u32 exp_data_len, int data_direction)
 564{
 565        struct vhost_scsi_cmd *cmd;
 566        struct vhost_scsi_nexus *tv_nexus;
 567        struct se_session *se_sess;
 568        struct scatterlist *sg, *prot_sg;
 569        struct page **pages;
 570        int tag;
 571
 572        tv_nexus = tpg->tpg_nexus;
 573        if (!tv_nexus) {
 574                pr_err("Unable to locate active struct vhost_scsi_nexus\n");
 575                return ERR_PTR(-EIO);
 576        }
 577        se_sess = tv_nexus->tvn_se_sess;
 578
 579        tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
 580        if (tag < 0) {
 581                pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
 582                return ERR_PTR(-ENOMEM);
 583        }
 584
 585        cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[tag];
 586        sg = cmd->tvc_sgl;
 587        prot_sg = cmd->tvc_prot_sgl;
 588        pages = cmd->tvc_upages;
 589        memset(cmd, 0, sizeof(*cmd));
 590        cmd->tvc_sgl = sg;
 591        cmd->tvc_prot_sgl = prot_sg;
 592        cmd->tvc_upages = pages;
 593        cmd->tvc_se_cmd.map_tag = tag;
 594        cmd->tvc_tag = scsi_tag;
 595        cmd->tvc_lun = lun;
 596        cmd->tvc_task_attr = task_attr;
 597        cmd->tvc_exp_data_len = exp_data_len;
 598        cmd->tvc_data_direction = data_direction;
 599        cmd->tvc_nexus = tv_nexus;
 600        cmd->inflight = vhost_scsi_get_inflight(vq);
 601
 602        memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
 603
 604        return cmd;
 605}
 606
 607/*
 608 * Map a user memory range into a scatterlist
 609 *
 610 * Returns the number of scatterlist entries used or -errno on error.
 611 */
 612static int
 613vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
 614                      struct iov_iter *iter,
 615                      struct scatterlist *sgl,
 616                      bool write)
 617{
 618        struct page **pages = cmd->tvc_upages;
 619        struct scatterlist *sg = sgl;
 620        ssize_t bytes;
 621        size_t offset;
 622        unsigned int npages = 0;
 623
 624        bytes = iov_iter_get_pages(iter, pages, LONG_MAX,
 625                                VHOST_SCSI_PREALLOC_UPAGES, &offset);
 626        /* No pages were pinned */
 627        if (bytes <= 0)
 628                return bytes < 0 ? bytes : -EFAULT;
 629
 630        iov_iter_advance(iter, bytes);
 631
 632        while (bytes) {
 633                unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes);
 634                sg_set_page(sg++, pages[npages++], n, offset);
 635                bytes -= n;
 636                offset = 0;
 637        }
 638        return npages;
 639}
 640
 641static int
 642vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
 643{
 644        int sgl_count = 0;
 645
 646        if (!iter || !iter->iov) {
 647                pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
 648                       " present\n", __func__, bytes);
 649                return -EINVAL;
 650        }
 651
 652        sgl_count = iov_iter_npages(iter, 0xffff);
 653        if (sgl_count > max_sgls) {
 654                pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
 655                       " max_sgls: %d\n", __func__, sgl_count, max_sgls);
 656                return -EINVAL;
 657        }
 658        return sgl_count;
 659}
 660
 661static int
 662vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
 663                      struct iov_iter *iter,
 664                      struct scatterlist *sg, int sg_count)
 665{
 666        struct scatterlist *p = sg;
 667        int ret;
 668
 669        while (iov_iter_count(iter)) {
 670                ret = vhost_scsi_map_to_sgl(cmd, iter, sg, write);
 671                if (ret < 0) {
 672                        while (p < sg) {
 673                                struct page *page = sg_page(p++);
 674                                if (page)
 675                                        put_page(page);
 676                        }
 677                        return ret;
 678                }
 679                sg += ret;
 680        }
 681        return 0;
 682}
 683
 684static int
 685vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
 686                 size_t prot_bytes, struct iov_iter *prot_iter,
 687                 size_t data_bytes, struct iov_iter *data_iter)
 688{
 689        int sgl_count, ret;
 690        bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
 691
 692        if (prot_bytes) {
 693                sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
 694                                                 VHOST_SCSI_PREALLOC_PROT_SGLS);
 695                if (sgl_count < 0)
 696                        return sgl_count;
 697
 698                sg_init_table(cmd->tvc_prot_sgl, sgl_count);
 699                cmd->tvc_prot_sgl_count = sgl_count;
 700                pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
 701                         cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
 702
 703                ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
 704                                            cmd->tvc_prot_sgl,
 705                                            cmd->tvc_prot_sgl_count);
 706                if (ret < 0) {
 707                        cmd->tvc_prot_sgl_count = 0;
 708                        return ret;
 709                }
 710        }
 711        sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
 712                                         VHOST_SCSI_PREALLOC_SGLS);
 713        if (sgl_count < 0)
 714                return sgl_count;
 715
 716        sg_init_table(cmd->tvc_sgl, sgl_count);
 717        cmd->tvc_sgl_count = sgl_count;
 718        pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
 719                  cmd->tvc_sgl, cmd->tvc_sgl_count);
 720
 721        ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
 722                                    cmd->tvc_sgl, cmd->tvc_sgl_count);
 723        if (ret < 0) {
 724                cmd->tvc_sgl_count = 0;
 725                return ret;
 726        }
 727        return 0;
 728}
 729
 730static int vhost_scsi_to_tcm_attr(int attr)
 731{
 732        switch (attr) {
 733        case VIRTIO_SCSI_S_SIMPLE:
 734                return TCM_SIMPLE_TAG;
 735        case VIRTIO_SCSI_S_ORDERED:
 736                return TCM_ORDERED_TAG;
 737        case VIRTIO_SCSI_S_HEAD:
 738                return TCM_HEAD_TAG;
 739        case VIRTIO_SCSI_S_ACA:
 740                return TCM_ACA_TAG;
 741        default:
 742                break;
 743        }
 744        return TCM_SIMPLE_TAG;
 745}
 746
 747static void vhost_scsi_submission_work(struct work_struct *work)
 748{
 749        struct vhost_scsi_cmd *cmd =
 750                container_of(work, struct vhost_scsi_cmd, work);
 751        struct vhost_scsi_nexus *tv_nexus;
 752        struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
 753        struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
 754        int rc;
 755
 756        /* FIXME: BIDI operation */
 757        if (cmd->tvc_sgl_count) {
 758                sg_ptr = cmd->tvc_sgl;
 759
 760                if (cmd->tvc_prot_sgl_count)
 761                        sg_prot_ptr = cmd->tvc_prot_sgl;
 762                else
 763                        se_cmd->prot_pto = true;
 764        } else {
 765                sg_ptr = NULL;
 766        }
 767        tv_nexus = cmd->tvc_nexus;
 768
 769        se_cmd->tag = 0;
 770        rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
 771                        cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
 772                        cmd->tvc_lun, cmd->tvc_exp_data_len,
 773                        vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
 774                        cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
 775                        sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
 776                        cmd->tvc_prot_sgl_count);
 777        if (rc < 0) {
 778                transport_send_check_condition_and_sense(se_cmd,
 779                                TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
 780                transport_generic_free_cmd(se_cmd, 0);
 781        }
 782}
 783
 784static void
 785vhost_scsi_send_bad_target(struct vhost_scsi *vs,
 786                           struct vhost_virtqueue *vq,
 787                           int head, unsigned out)
 788{
 789        struct virtio_scsi_cmd_resp __user *resp;
 790        struct virtio_scsi_cmd_resp rsp;
 791        int ret;
 792
 793        memset(&rsp, 0, sizeof(rsp));
 794        rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
 795        resp = vq->iov[out].iov_base;
 796        ret = __copy_to_user(resp, &rsp, sizeof(rsp));
 797        if (!ret)
 798                vhost_add_used_and_signal(&vs->dev, vq, head, 0);
 799        else
 800                pr_err("Faulted on virtio_scsi_cmd_resp\n");
 801}
 802
 803static void
 804vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
 805{
 806        struct vhost_scsi_tpg **vs_tpg, *tpg;
 807        struct virtio_scsi_cmd_req v_req;
 808        struct virtio_scsi_cmd_req_pi v_req_pi;
 809        struct vhost_scsi_cmd *cmd;
 810        struct iov_iter out_iter, in_iter, prot_iter, data_iter;
 811        u64 tag;
 812        u32 exp_data_len, data_direction;
 813        unsigned int out = 0, in = 0;
 814        int head, ret, prot_bytes;
 815        size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
 816        size_t out_size, in_size;
 817        u16 lun;
 818        u8 *target, *lunp, task_attr;
 819        bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
 820        void *req, *cdb;
 821
 822        mutex_lock(&vq->mutex);
 823        /*
 824         * We can handle the vq only after the endpoint is setup by calling the
 825         * VHOST_SCSI_SET_ENDPOINT ioctl.
 826         */
 827        vs_tpg = vq->private_data;
 828        if (!vs_tpg)
 829                goto out;
 830
 831        vhost_disable_notify(&vs->dev, vq);
 832
 833        for (;;) {
 834                head = vhost_get_vq_desc(vq, vq->iov,
 835                                         ARRAY_SIZE(vq->iov), &out, &in,
 836                                         NULL, NULL);
 837                pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
 838                         head, out, in);
 839                /* On error, stop handling until the next kick. */
 840                if (unlikely(head < 0))
 841                        break;
 842                /* Nothing new?  Wait for eventfd to tell us they refilled. */
 843                if (head == vq->num) {
 844                        if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
 845                                vhost_disable_notify(&vs->dev, vq);
 846                                continue;
 847                        }
 848                        break;
 849                }
 850                /*
 851                 * Check for a sane response buffer so we can report early
 852                 * errors back to the guest.
 853                 */
 854                if (unlikely(vq->iov[out].iov_len < rsp_size)) {
 855                        vq_err(vq, "Expecting at least virtio_scsi_cmd_resp"
 856                                " size, got %zu bytes\n", vq->iov[out].iov_len);
 857                        break;
 858                }
 859                /*
 860                 * Setup pointers and values based upon different virtio-scsi
 861                 * request header if T10_PI is enabled in KVM guest.
 862                 */
 863                if (t10_pi) {
 864                        req = &v_req_pi;
 865                        req_size = sizeof(v_req_pi);
 866                        lunp = &v_req_pi.lun[0];
 867                        target = &v_req_pi.lun[1];
 868                } else {
 869                        req = &v_req;
 870                        req_size = sizeof(v_req);
 871                        lunp = &v_req.lun[0];
 872                        target = &v_req.lun[1];
 873                }
 874                /*
 875                 * FIXME: Not correct for BIDI operation
 876                 */
 877                out_size = iov_length(vq->iov, out);
 878                in_size = iov_length(&vq->iov[out], in);
 879
 880                /*
 881                 * Copy over the virtio-scsi request header, which for a
 882                 * ANY_LAYOUT enabled guest may span multiple iovecs, or a
 883                 * single iovec may contain both the header + outgoing
 884                 * WRITE payloads.
 885                 *
 886                 * copy_from_iter() will advance out_iter, so that it will
 887                 * point at the start of the outgoing WRITE payload, if
 888                 * DMA_TO_DEVICE is set.
 889                 */
 890                iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size);
 891
 892                if (unlikely(!copy_from_iter_full(req, req_size, &out_iter))) {
 893                        vq_err(vq, "Faulted on copy_from_iter\n");
 894                        vhost_scsi_send_bad_target(vs, vq, head, out);
 895                        continue;
 896                }
 897                /* virtio-scsi spec requires byte 0 of the lun to be 1 */
 898                if (unlikely(*lunp != 1)) {
 899                        vq_err(vq, "Illegal virtio-scsi lun: %u\n", *lunp);
 900                        vhost_scsi_send_bad_target(vs, vq, head, out);
 901                        continue;
 902                }
 903
 904                tpg = READ_ONCE(vs_tpg[*target]);
 905                if (unlikely(!tpg)) {
 906                        /* Target does not exist, fail the request */
 907                        vhost_scsi_send_bad_target(vs, vq, head, out);
 908                        continue;
 909                }
 910                /*
 911                 * Determine data_direction by calculating the total outgoing
 912                 * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
 913                 * response headers respectively.
 914                 *
 915                 * For DMA_TO_DEVICE this is out_iter, which is already pointing
 916                 * to the right place.
 917                 *
 918                 * For DMA_FROM_DEVICE, the iovec will be just past the end
 919                 * of the virtio-scsi response header in either the same
 920                 * or immediately following iovec.
 921                 *
 922                 * Any associated T10_PI bytes for the outgoing / incoming
 923                 * payloads are included in calculation of exp_data_len here.
 924                 */
 925                prot_bytes = 0;
 926
 927                if (out_size > req_size) {
 928                        data_direction = DMA_TO_DEVICE;
 929                        exp_data_len = out_size - req_size;
 930                        data_iter = out_iter;
 931                } else if (in_size > rsp_size) {
 932                        data_direction = DMA_FROM_DEVICE;
 933                        exp_data_len = in_size - rsp_size;
 934
 935                        iov_iter_init(&in_iter, READ, &vq->iov[out], in,
 936                                      rsp_size + exp_data_len);
 937                        iov_iter_advance(&in_iter, rsp_size);
 938                        data_iter = in_iter;
 939                } else {
 940                        data_direction = DMA_NONE;
 941                        exp_data_len = 0;
 942                }
 943                /*
 944                 * If T10_PI header + payload is present, setup prot_iter values
 945                 * and recalculate data_iter for vhost_scsi_mapal() mapping to
 946                 * host scatterlists via get_user_pages_fast().
 947                 */
 948                if (t10_pi) {
 949                        if (v_req_pi.pi_bytesout) {
 950                                if (data_direction != DMA_TO_DEVICE) {
 951                                        vq_err(vq, "Received non zero pi_bytesout,"
 952                                                " but wrong data_direction\n");
 953                                        vhost_scsi_send_bad_target(vs, vq, head, out);
 954                                        continue;
 955                                }
 956                                prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
 957                        } else if (v_req_pi.pi_bytesin) {
 958                                if (data_direction != DMA_FROM_DEVICE) {
 959                                        vq_err(vq, "Received non zero pi_bytesin,"
 960                                                " but wrong data_direction\n");
 961                                        vhost_scsi_send_bad_target(vs, vq, head, out);
 962                                        continue;
 963                                }
 964                                prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
 965                        }
 966                        /*
 967                         * Set prot_iter to data_iter, and advance past any
 968                         * preceeding prot_bytes that may be present.
 969                         *
 970                         * Also fix up the exp_data_len to reflect only the
 971                         * actual data payload length.
 972                         */
 973                        if (prot_bytes) {
 974                                exp_data_len -= prot_bytes;
 975                                prot_iter = data_iter;
 976                                iov_iter_advance(&data_iter, prot_bytes);
 977                        }
 978                        tag = vhost64_to_cpu(vq, v_req_pi.tag);
 979                        task_attr = v_req_pi.task_attr;
 980                        cdb = &v_req_pi.cdb[0];
 981                        lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF;
 982                } else {
 983                        tag = vhost64_to_cpu(vq, v_req.tag);
 984                        task_attr = v_req.task_attr;
 985                        cdb = &v_req.cdb[0];
 986                        lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
 987                }
 988                /*
 989                 * Check that the received CDB size does not exceeded our
 990                 * hardcoded max for vhost-scsi, then get a pre-allocated
 991                 * cmd descriptor for the new virtio-scsi tag.
 992                 *
 993                 * TODO what if cdb was too small for varlen cdb header?
 994                 */
 995                if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
 996                        vq_err(vq, "Received SCSI CDB with command_size: %d that"
 997                                " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
 998                                scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
 999                        vhost_scsi_send_bad_target(vs, vq, head, out);
1000                        continue;
1001                }
1002                cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
1003                                         exp_data_len + prot_bytes,
1004                                         data_direction);
1005                if (IS_ERR(cmd)) {
1006                        vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
1007                               PTR_ERR(cmd));
1008                        vhost_scsi_send_bad_target(vs, vq, head, out);
1009                        continue;
1010                }
1011                cmd->tvc_vhost = vs;
1012                cmd->tvc_vq = vq;
1013                cmd->tvc_resp_iov = vq->iov[out];
1014                cmd->tvc_in_iovs = in;
1015
1016                pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1017                         cmd->tvc_cdb[0], cmd->tvc_lun);
1018                pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
1019                         " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
1020
1021                if (data_direction != DMA_NONE) {
1022                        ret = vhost_scsi_mapal(cmd,
1023                                               prot_bytes, &prot_iter,
1024                                               exp_data_len, &data_iter);
1025                        if (unlikely(ret)) {
1026                                vq_err(vq, "Failed to map iov to sgl\n");
1027                                vhost_scsi_release_cmd(&cmd->tvc_se_cmd);
1028                                vhost_scsi_send_bad_target(vs, vq, head, out);
1029                                continue;
1030                        }
1031                }
1032                /*
1033                 * Save the descriptor from vhost_get_vq_desc() to be used to
1034                 * complete the virtio-scsi request in TCM callback context via
1035                 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
1036                 */
1037                cmd->tvc_vq_desc = head;
1038                /*
1039                 * Dispatch cmd descriptor for cmwq execution in process
1040                 * context provided by vhost_scsi_workqueue.  This also ensures
1041                 * cmd is executed on the same kworker CPU as this vhost
1042                 * thread to gain positive L2 cache locality effects.
1043                 */
1044                INIT_WORK(&cmd->work, vhost_scsi_submission_work);
1045                queue_work(vhost_scsi_workqueue, &cmd->work);
1046        }
1047out:
1048        mutex_unlock(&vq->mutex);
1049}
1050
1051static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1052{
1053        pr_debug("%s: The handling func for control queue.\n", __func__);
1054}
1055
1056static void
1057vhost_scsi_send_evt(struct vhost_scsi *vs,
1058                   struct vhost_scsi_tpg *tpg,
1059                   struct se_lun *lun,
1060                   u32 event,
1061                   u32 reason)
1062{
1063        struct vhost_scsi_evt *evt;
1064
1065        evt = vhost_scsi_allocate_evt(vs, event, reason);
1066        if (!evt)
1067                return;
1068
1069        if (tpg && lun) {
1070                /* TODO: share lun setup code with virtio-scsi.ko */
1071                /*
1072                 * Note: evt->event is zeroed when we allocate it and
1073                 * lun[4-7] need to be zero according to virtio-scsi spec.
1074                 */
1075                evt->event.lun[0] = 0x01;
1076                evt->event.lun[1] = tpg->tport_tpgt;
1077                if (lun->unpacked_lun >= 256)
1078                        evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1079                evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1080        }
1081
1082        llist_add(&evt->list, &vs->vs_event_list);
1083        vhost_work_queue(&vs->dev, &vs->vs_event_work);
1084}
1085
1086static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1087{
1088        struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1089                                                poll.work);
1090        struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1091
1092        mutex_lock(&vq->mutex);
1093        if (!vq->private_data)
1094                goto out;
1095
1096        if (vs->vs_events_missed)
1097                vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1098out:
1099        mutex_unlock(&vq->mutex);
1100}
1101
1102static void vhost_scsi_handle_kick(struct vhost_work *work)
1103{
1104        struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1105                                                poll.work);
1106        struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1107
1108        vhost_scsi_handle_vq(vs, vq);
1109}
1110
1111static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
1112{
1113        vhost_poll_flush(&vs->vqs[index].vq.poll);
1114}
1115
1116/* Callers must hold dev mutex */
1117static void vhost_scsi_flush(struct vhost_scsi *vs)
1118{
1119        struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1120        int i;
1121
1122        /* Init new inflight and remember the old inflight */
1123        vhost_scsi_init_inflight(vs, old_inflight);
1124
1125        /*
1126         * The inflight->kref was initialized to 1. We decrement it here to
1127         * indicate the start of the flush operation so that it will reach 0
1128         * when all the reqs are finished.
1129         */
1130        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1131                kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
1132
1133        /* Flush both the vhost poll and vhost work */
1134        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1135                vhost_scsi_flush_vq(vs, i);
1136        vhost_work_flush(&vs->dev, &vs->vs_completion_work);
1137        vhost_work_flush(&vs->dev, &vs->vs_event_work);
1138
1139        /* Wait for all reqs issued before the flush to be finished */
1140        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1141                wait_for_completion(&old_inflight[i]->comp);
1142}
1143
1144/*
1145 * Called from vhost_scsi_ioctl() context to walk the list of available
1146 * vhost_scsi_tpg with an active struct vhost_scsi_nexus
1147 *
1148 *  The lock nesting rule is:
1149 *    vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1150 */
1151static int
1152vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1153                        struct vhost_scsi_target *t)
1154{
1155        struct se_portal_group *se_tpg;
1156        struct vhost_scsi_tport *tv_tport;
1157        struct vhost_scsi_tpg *tpg;
1158        struct vhost_scsi_tpg **vs_tpg;
1159        struct vhost_virtqueue *vq;
1160        int index, ret, i, len;
1161        bool match = false;
1162
1163        mutex_lock(&vhost_scsi_mutex);
1164        mutex_lock(&vs->dev.mutex);
1165
1166        /* Verify that ring has been setup correctly. */
1167        for (index = 0; index < vs->dev.nvqs; ++index) {
1168                /* Verify that ring has been setup correctly. */
1169                if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1170                        ret = -EFAULT;
1171                        goto out;
1172                }
1173        }
1174
1175        len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1176        vs_tpg = kzalloc(len, GFP_KERNEL);
1177        if (!vs_tpg) {
1178                ret = -ENOMEM;
1179                goto out;
1180        }
1181        if (vs->vs_tpg)
1182                memcpy(vs_tpg, vs->vs_tpg, len);
1183
1184        list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
1185                mutex_lock(&tpg->tv_tpg_mutex);
1186                if (!tpg->tpg_nexus) {
1187                        mutex_unlock(&tpg->tv_tpg_mutex);
1188                        continue;
1189                }
1190                if (tpg->tv_tpg_vhost_count != 0) {
1191                        mutex_unlock(&tpg->tv_tpg_mutex);
1192                        continue;
1193                }
1194                tv_tport = tpg->tport;
1195
1196                if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1197                        if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1198                                kfree(vs_tpg);
1199                                mutex_unlock(&tpg->tv_tpg_mutex);
1200                                ret = -EEXIST;
1201                                goto out;
1202                        }
1203                        /*
1204                         * In order to ensure individual vhost-scsi configfs
1205                         * groups cannot be removed while in use by vhost ioctl,
1206                         * go ahead and take an explicit se_tpg->tpg_group.cg_item
1207                         * dependency now.
1208                         */
1209                        se_tpg = &tpg->se_tpg;
1210                        ret = target_depend_item(&se_tpg->tpg_group.cg_item);
1211                        if (ret) {
1212                                pr_warn("configfs_depend_item() failed: %d\n", ret);
1213                                kfree(vs_tpg);
1214                                mutex_unlock(&tpg->tv_tpg_mutex);
1215                                goto out;
1216                        }
1217                        tpg->tv_tpg_vhost_count++;
1218                        tpg->vhost_scsi = vs;
1219                        vs_tpg[tpg->tport_tpgt] = tpg;
1220                        smp_mb__after_atomic();
1221                        match = true;
1222                }
1223                mutex_unlock(&tpg->tv_tpg_mutex);
1224        }
1225
1226        if (match) {
1227                memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1228                       sizeof(vs->vs_vhost_wwpn));
1229                for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1230                        vq = &vs->vqs[i].vq;
1231                        mutex_lock(&vq->mutex);
1232                        vq->private_data = vs_tpg;
1233                        vhost_vq_init_access(vq);
1234                        mutex_unlock(&vq->mutex);
1235                }
1236                ret = 0;
1237        } else {
1238                ret = -EEXIST;
1239        }
1240
1241        /*
1242         * Act as synchronize_rcu to make sure access to
1243         * old vs->vs_tpg is finished.
1244         */
1245        vhost_scsi_flush(vs);
1246        kfree(vs->vs_tpg);
1247        vs->vs_tpg = vs_tpg;
1248
1249out:
1250        mutex_unlock(&vs->dev.mutex);
1251        mutex_unlock(&vhost_scsi_mutex);
1252        return ret;
1253}
1254
1255static int
1256vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1257                          struct vhost_scsi_target *t)
1258{
1259        struct se_portal_group *se_tpg;
1260        struct vhost_scsi_tport *tv_tport;
1261        struct vhost_scsi_tpg *tpg;
1262        struct vhost_virtqueue *vq;
1263        bool match = false;
1264        int index, ret, i;
1265        u8 target;
1266
1267        mutex_lock(&vhost_scsi_mutex);
1268        mutex_lock(&vs->dev.mutex);
1269        /* Verify that ring has been setup correctly. */
1270        for (index = 0; index < vs->dev.nvqs; ++index) {
1271                if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1272                        ret = -EFAULT;
1273                        goto err_dev;
1274                }
1275        }
1276
1277        if (!vs->vs_tpg) {
1278                ret = 0;
1279                goto err_dev;
1280        }
1281
1282        for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1283                target = i;
1284                tpg = vs->vs_tpg[target];
1285                if (!tpg)
1286                        continue;
1287
1288                mutex_lock(&tpg->tv_tpg_mutex);
1289                tv_tport = tpg->tport;
1290                if (!tv_tport) {
1291                        ret = -ENODEV;
1292                        goto err_tpg;
1293                }
1294
1295                if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1296                        pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1297                                " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1298                                tv_tport->tport_name, tpg->tport_tpgt,
1299                                t->vhost_wwpn, t->vhost_tpgt);
1300                        ret = -EINVAL;
1301                        goto err_tpg;
1302                }
1303                tpg->tv_tpg_vhost_count--;
1304                tpg->vhost_scsi = NULL;
1305                vs->vs_tpg[target] = NULL;
1306                match = true;
1307                mutex_unlock(&tpg->tv_tpg_mutex);
1308                /*
1309                 * Release se_tpg->tpg_group.cg_item configfs dependency now
1310                 * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
1311                 */
1312                se_tpg = &tpg->se_tpg;
1313                target_undepend_item(&se_tpg->tpg_group.cg_item);
1314        }
1315        if (match) {
1316                for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1317                        vq = &vs->vqs[i].vq;
1318                        mutex_lock(&vq->mutex);
1319                        vq->private_data = NULL;
1320                        mutex_unlock(&vq->mutex);
1321                }
1322        }
1323        /*
1324         * Act as synchronize_rcu to make sure access to
1325         * old vs->vs_tpg is finished.
1326         */
1327        vhost_scsi_flush(vs);
1328        kfree(vs->vs_tpg);
1329        vs->vs_tpg = NULL;
1330        WARN_ON(vs->vs_events_nr);
1331        mutex_unlock(&vs->dev.mutex);
1332        mutex_unlock(&vhost_scsi_mutex);
1333        return 0;
1334
1335err_tpg:
1336        mutex_unlock(&tpg->tv_tpg_mutex);
1337err_dev:
1338        mutex_unlock(&vs->dev.mutex);
1339        mutex_unlock(&vhost_scsi_mutex);
1340        return ret;
1341}
1342
1343static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1344{
1345        struct vhost_virtqueue *vq;
1346        int i;
1347
1348        if (features & ~VHOST_SCSI_FEATURES)
1349                return -EOPNOTSUPP;
1350
1351        mutex_lock(&vs->dev.mutex);
1352        if ((features & (1 << VHOST_F_LOG_ALL)) &&
1353            !vhost_log_access_ok(&vs->dev)) {
1354                mutex_unlock(&vs->dev.mutex);
1355                return -EFAULT;
1356        }
1357
1358        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1359                vq = &vs->vqs[i].vq;
1360                mutex_lock(&vq->mutex);
1361                vq->acked_features = features;
1362                mutex_unlock(&vq->mutex);
1363        }
1364        mutex_unlock(&vs->dev.mutex);
1365        return 0;
1366}
1367
1368static int vhost_scsi_open(struct inode *inode, struct file *f)
1369{
1370        struct vhost_scsi *vs;
1371        struct vhost_virtqueue **vqs;
1372        int r = -ENOMEM, i;
1373
1374        vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL);
1375        if (!vs) {
1376                vs = vzalloc(sizeof(*vs));
1377                if (!vs)
1378                        goto err_vs;
1379        }
1380
1381        vqs = kmalloc_array(VHOST_SCSI_MAX_VQ, sizeof(*vqs), GFP_KERNEL);
1382        if (!vqs)
1383                goto err_vqs;
1384
1385        vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1386        vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
1387
1388        vs->vs_events_nr = 0;
1389        vs->vs_events_missed = false;
1390
1391        vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1392        vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1393        vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1394        vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1395        for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1396                vqs[i] = &vs->vqs[i].vq;
1397                vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1398        }
1399        vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
1400
1401        vhost_scsi_init_inflight(vs, NULL);
1402
1403        f->private_data = vs;
1404        return 0;
1405
1406err_vqs:
1407        kvfree(vs);
1408err_vs:
1409        return r;
1410}
1411
1412static int vhost_scsi_release(struct inode *inode, struct file *f)
1413{
1414        struct vhost_scsi *vs = f->private_data;
1415        struct vhost_scsi_target t;
1416
1417        mutex_lock(&vs->dev.mutex);
1418        memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1419        mutex_unlock(&vs->dev.mutex);
1420        vhost_scsi_clear_endpoint(vs, &t);
1421        vhost_dev_stop(&vs->dev);
1422        vhost_dev_cleanup(&vs->dev);
1423        /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1424        vhost_scsi_flush(vs);
1425        kfree(vs->dev.vqs);
1426        kvfree(vs);
1427        return 0;
1428}
1429
1430static long
1431vhost_scsi_ioctl(struct file *f,
1432                 unsigned int ioctl,
1433                 unsigned long arg)
1434{
1435        struct vhost_scsi *vs = f->private_data;
1436        struct vhost_scsi_target backend;
1437        void __user *argp = (void __user *)arg;
1438        u64 __user *featurep = argp;
1439        u32 __user *eventsp = argp;
1440        u32 events_missed;
1441        u64 features;
1442        int r, abi_version = VHOST_SCSI_ABI_VERSION;
1443        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1444
1445        switch (ioctl) {
1446        case VHOST_SCSI_SET_ENDPOINT:
1447                if (copy_from_user(&backend, argp, sizeof backend))
1448                        return -EFAULT;
1449                if (backend.reserved != 0)
1450                        return -EOPNOTSUPP;
1451
1452                return vhost_scsi_set_endpoint(vs, &backend);
1453        case VHOST_SCSI_CLEAR_ENDPOINT:
1454                if (copy_from_user(&backend, argp, sizeof backend))
1455                        return -EFAULT;
1456                if (backend.reserved != 0)
1457                        return -EOPNOTSUPP;
1458
1459                return vhost_scsi_clear_endpoint(vs, &backend);
1460        case VHOST_SCSI_GET_ABI_VERSION:
1461                if (copy_to_user(argp, &abi_version, sizeof abi_version))
1462                        return -EFAULT;
1463                return 0;
1464        case VHOST_SCSI_SET_EVENTS_MISSED:
1465                if (get_user(events_missed, eventsp))
1466                        return -EFAULT;
1467                mutex_lock(&vq->mutex);
1468                vs->vs_events_missed = events_missed;
1469                mutex_unlock(&vq->mutex);
1470                return 0;
1471        case VHOST_SCSI_GET_EVENTS_MISSED:
1472                mutex_lock(&vq->mutex);
1473                events_missed = vs->vs_events_missed;
1474                mutex_unlock(&vq->mutex);
1475                if (put_user(events_missed, eventsp))
1476                        return -EFAULT;
1477                return 0;
1478        case VHOST_GET_FEATURES:
1479                features = VHOST_SCSI_FEATURES;
1480                if (copy_to_user(featurep, &features, sizeof features))
1481                        return -EFAULT;
1482                return 0;
1483        case VHOST_SET_FEATURES:
1484                if (copy_from_user(&features, featurep, sizeof features))
1485                        return -EFAULT;
1486                return vhost_scsi_set_features(vs, features);
1487        default:
1488                mutex_lock(&vs->dev.mutex);
1489                r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1490                /* TODO: flush backend after dev ioctl. */
1491                if (r == -ENOIOCTLCMD)
1492                        r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1493                mutex_unlock(&vs->dev.mutex);
1494                return r;
1495        }
1496}
1497
1498#ifdef CONFIG_COMPAT
1499static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
1500                                unsigned long arg)
1501{
1502        return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1503}
1504#endif
1505
1506static const struct file_operations vhost_scsi_fops = {
1507        .owner          = THIS_MODULE,
1508        .release        = vhost_scsi_release,
1509        .unlocked_ioctl = vhost_scsi_ioctl,
1510#ifdef CONFIG_COMPAT
1511        .compat_ioctl   = vhost_scsi_compat_ioctl,
1512#endif
1513        .open           = vhost_scsi_open,
1514        .llseek         = noop_llseek,
1515};
1516
1517static struct miscdevice vhost_scsi_misc = {
1518        MISC_DYNAMIC_MINOR,
1519        "vhost-scsi",
1520        &vhost_scsi_fops,
1521};
1522
1523static int __init vhost_scsi_register(void)
1524{
1525        return misc_register(&vhost_scsi_misc);
1526}
1527
1528static void vhost_scsi_deregister(void)
1529{
1530        misc_deregister(&vhost_scsi_misc);
1531}
1532
1533static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
1534{
1535        switch (tport->tport_proto_id) {
1536        case SCSI_PROTOCOL_SAS:
1537                return "SAS";
1538        case SCSI_PROTOCOL_FCP:
1539                return "FCP";
1540        case SCSI_PROTOCOL_ISCSI:
1541                return "iSCSI";
1542        default:
1543                break;
1544        }
1545
1546        return "Unknown";
1547}
1548
1549static void
1550vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
1551                  struct se_lun *lun, bool plug)
1552{
1553
1554        struct vhost_scsi *vs = tpg->vhost_scsi;
1555        struct vhost_virtqueue *vq;
1556        u32 reason;
1557
1558        if (!vs)
1559                return;
1560
1561        mutex_lock(&vs->dev.mutex);
1562
1563        if (plug)
1564                reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1565        else
1566                reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1567
1568        vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1569        mutex_lock(&vq->mutex);
1570        if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
1571                vhost_scsi_send_evt(vs, tpg, lun,
1572                                   VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1573        mutex_unlock(&vq->mutex);
1574        mutex_unlock(&vs->dev.mutex);
1575}
1576
1577static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1578{
1579        vhost_scsi_do_plug(tpg, lun, true);
1580}
1581
1582static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1583{
1584        vhost_scsi_do_plug(tpg, lun, false);
1585}
1586
1587static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
1588                               struct se_lun *lun)
1589{
1590        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1591                                struct vhost_scsi_tpg, se_tpg);
1592
1593        mutex_lock(&vhost_scsi_mutex);
1594
1595        mutex_lock(&tpg->tv_tpg_mutex);
1596        tpg->tv_tpg_port_count++;
1597        mutex_unlock(&tpg->tv_tpg_mutex);
1598
1599        vhost_scsi_hotplug(tpg, lun);
1600
1601        mutex_unlock(&vhost_scsi_mutex);
1602
1603        return 0;
1604}
1605
1606static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
1607                                  struct se_lun *lun)
1608{
1609        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1610                                struct vhost_scsi_tpg, se_tpg);
1611
1612        mutex_lock(&vhost_scsi_mutex);
1613
1614        mutex_lock(&tpg->tv_tpg_mutex);
1615        tpg->tv_tpg_port_count--;
1616        mutex_unlock(&tpg->tv_tpg_mutex);
1617
1618        vhost_scsi_hotunplug(tpg, lun);
1619
1620        mutex_unlock(&vhost_scsi_mutex);
1621}
1622
1623static void vhost_scsi_free_cmd_map_res(struct se_session *se_sess)
1624{
1625        struct vhost_scsi_cmd *tv_cmd;
1626        unsigned int i;
1627
1628        if (!se_sess->sess_cmd_map)
1629                return;
1630
1631        for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1632                tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1633
1634                kfree(tv_cmd->tvc_sgl);
1635                kfree(tv_cmd->tvc_prot_sgl);
1636                kfree(tv_cmd->tvc_upages);
1637        }
1638}
1639
1640static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
1641                struct config_item *item, const char *page, size_t count)
1642{
1643        struct se_portal_group *se_tpg = attrib_to_tpg(item);
1644        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1645                                struct vhost_scsi_tpg, se_tpg);
1646        unsigned long val;
1647        int ret = kstrtoul(page, 0, &val);
1648
1649        if (ret) {
1650                pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
1651                return ret;
1652        }
1653        if (val != 0 && val != 1 && val != 3) {
1654                pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
1655                return -EINVAL;
1656        }
1657        tpg->tv_fabric_prot_type = val;
1658
1659        return count;
1660}
1661
1662static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show(
1663                struct config_item *item, char *page)
1664{
1665        struct se_portal_group *se_tpg = attrib_to_tpg(item);
1666        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1667                                struct vhost_scsi_tpg, se_tpg);
1668
1669        return sprintf(page, "%d\n", tpg->tv_fabric_prot_type);
1670}
1671
1672CONFIGFS_ATTR(vhost_scsi_tpg_attrib_, fabric_prot_type);
1673
1674static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
1675        &vhost_scsi_tpg_attrib_attr_fabric_prot_type,
1676        NULL,
1677};
1678
1679static int vhost_scsi_nexus_cb(struct se_portal_group *se_tpg,
1680                               struct se_session *se_sess, void *p)
1681{
1682        struct vhost_scsi_cmd *tv_cmd;
1683        unsigned int i;
1684
1685        for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1686                tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1687
1688                tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS,
1689                                          sizeof(struct scatterlist),
1690                                          GFP_KERNEL);
1691                if (!tv_cmd->tvc_sgl) {
1692                        pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1693                        goto out;
1694                }
1695
1696                tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES,
1697                                             sizeof(struct page *),
1698                                             GFP_KERNEL);
1699                if (!tv_cmd->tvc_upages) {
1700                        pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1701                        goto out;
1702                }
1703
1704                tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
1705                                               sizeof(struct scatterlist),
1706                                               GFP_KERNEL);
1707                if (!tv_cmd->tvc_prot_sgl) {
1708                        pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
1709                        goto out;
1710                }
1711        }
1712        return 0;
1713out:
1714        vhost_scsi_free_cmd_map_res(se_sess);
1715        return -ENOMEM;
1716}
1717
1718static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
1719                                const char *name)
1720{
1721        struct vhost_scsi_nexus *tv_nexus;
1722
1723        mutex_lock(&tpg->tv_tpg_mutex);
1724        if (tpg->tpg_nexus) {
1725                mutex_unlock(&tpg->tv_tpg_mutex);
1726                pr_debug("tpg->tpg_nexus already exists\n");
1727                return -EEXIST;
1728        }
1729
1730        tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
1731        if (!tv_nexus) {
1732                mutex_unlock(&tpg->tv_tpg_mutex);
1733                pr_err("Unable to allocate struct vhost_scsi_nexus\n");
1734                return -ENOMEM;
1735        }
1736        /*
1737         * Since we are running in 'demo mode' this call with generate a
1738         * struct se_node_acl for the vhost_scsi struct se_portal_group with
1739         * the SCSI Initiator port name of the passed configfs group 'name'.
1740         */
1741        tv_nexus->tvn_se_sess = target_alloc_session(&tpg->se_tpg,
1742                                        VHOST_SCSI_DEFAULT_TAGS,
1743                                        sizeof(struct vhost_scsi_cmd),
1744                                        TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
1745                                        (unsigned char *)name, tv_nexus,
1746                                        vhost_scsi_nexus_cb);
1747        if (IS_ERR(tv_nexus->tvn_se_sess)) {
1748                mutex_unlock(&tpg->tv_tpg_mutex);
1749                kfree(tv_nexus);
1750                return -ENOMEM;
1751        }
1752        tpg->tpg_nexus = tv_nexus;
1753
1754        mutex_unlock(&tpg->tv_tpg_mutex);
1755        return 0;
1756}
1757
1758static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
1759{
1760        struct se_session *se_sess;
1761        struct vhost_scsi_nexus *tv_nexus;
1762
1763        mutex_lock(&tpg->tv_tpg_mutex);
1764        tv_nexus = tpg->tpg_nexus;
1765        if (!tv_nexus) {
1766                mutex_unlock(&tpg->tv_tpg_mutex);
1767                return -ENODEV;
1768        }
1769
1770        se_sess = tv_nexus->tvn_se_sess;
1771        if (!se_sess) {
1772                mutex_unlock(&tpg->tv_tpg_mutex);
1773                return -ENODEV;
1774        }
1775
1776        if (tpg->tv_tpg_port_count != 0) {
1777                mutex_unlock(&tpg->tv_tpg_mutex);
1778                pr_err("Unable to remove TCM_vhost I_T Nexus with"
1779                        " active TPG port count: %d\n",
1780                        tpg->tv_tpg_port_count);
1781                return -EBUSY;
1782        }
1783
1784        if (tpg->tv_tpg_vhost_count != 0) {
1785                mutex_unlock(&tpg->tv_tpg_mutex);
1786                pr_err("Unable to remove TCM_vhost I_T Nexus with"
1787                        " active TPG vhost count: %d\n",
1788                        tpg->tv_tpg_vhost_count);
1789                return -EBUSY;
1790        }
1791
1792        pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
1793                " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
1794                tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1795
1796        vhost_scsi_free_cmd_map_res(se_sess);
1797        /*
1798         * Release the SCSI I_T Nexus to the emulated vhost Target Port
1799         */
1800        transport_deregister_session(tv_nexus->tvn_se_sess);
1801        tpg->tpg_nexus = NULL;
1802        mutex_unlock(&tpg->tv_tpg_mutex);
1803
1804        kfree(tv_nexus);
1805        return 0;
1806}
1807
1808static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page)
1809{
1810        struct se_portal_group *se_tpg = to_tpg(item);
1811        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1812                                struct vhost_scsi_tpg, se_tpg);
1813        struct vhost_scsi_nexus *tv_nexus;
1814        ssize_t ret;
1815
1816        mutex_lock(&tpg->tv_tpg_mutex);
1817        tv_nexus = tpg->tpg_nexus;
1818        if (!tv_nexus) {
1819                mutex_unlock(&tpg->tv_tpg_mutex);
1820                return -ENODEV;
1821        }
1822        ret = snprintf(page, PAGE_SIZE, "%s\n",
1823                        tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1824        mutex_unlock(&tpg->tv_tpg_mutex);
1825
1826        return ret;
1827}
1828
1829static ssize_t vhost_scsi_tpg_nexus_store(struct config_item *item,
1830                const char *page, size_t count)
1831{
1832        struct se_portal_group *se_tpg = to_tpg(item);
1833        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1834                                struct vhost_scsi_tpg, se_tpg);
1835        struct vhost_scsi_tport *tport_wwn = tpg->tport;
1836        unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
1837        int ret;
1838        /*
1839         * Shutdown the active I_T nexus if 'NULL' is passed..
1840         */
1841        if (!strncmp(page, "NULL", 4)) {
1842                ret = vhost_scsi_drop_nexus(tpg);
1843                return (!ret) ? count : ret;
1844        }
1845        /*
1846         * Otherwise make sure the passed virtual Initiator port WWN matches
1847         * the fabric protocol_id set in vhost_scsi_make_tport(), and call
1848         * vhost_scsi_make_nexus().
1849         */
1850        if (strlen(page) >= VHOST_SCSI_NAMELEN) {
1851                pr_err("Emulated NAA Sas Address: %s, exceeds"
1852                                " max: %d\n", page, VHOST_SCSI_NAMELEN);
1853                return -EINVAL;
1854        }
1855        snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
1856
1857        ptr = strstr(i_port, "naa.");
1858        if (ptr) {
1859                if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
1860                        pr_err("Passed SAS Initiator Port %s does not"
1861                                " match target port protoid: %s\n", i_port,
1862                                vhost_scsi_dump_proto_id(tport_wwn));
1863                        return -EINVAL;
1864                }
1865                port_ptr = &i_port[0];
1866                goto check_newline;
1867        }
1868        ptr = strstr(i_port, "fc.");
1869        if (ptr) {
1870                if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
1871                        pr_err("Passed FCP Initiator Port %s does not"
1872                                " match target port protoid: %s\n", i_port,
1873                                vhost_scsi_dump_proto_id(tport_wwn));
1874                        return -EINVAL;
1875                }
1876                port_ptr = &i_port[3]; /* Skip over "fc." */
1877                goto check_newline;
1878        }
1879        ptr = strstr(i_port, "iqn.");
1880        if (ptr) {
1881                if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
1882                        pr_err("Passed iSCSI Initiator Port %s does not"
1883                                " match target port protoid: %s\n", i_port,
1884                                vhost_scsi_dump_proto_id(tport_wwn));
1885                        return -EINVAL;
1886                }
1887                port_ptr = &i_port[0];
1888                goto check_newline;
1889        }
1890        pr_err("Unable to locate prefix for emulated Initiator Port:"
1891                        " %s\n", i_port);
1892        return -EINVAL;
1893        /*
1894         * Clear any trailing newline for the NAA WWN
1895         */
1896check_newline:
1897        if (i_port[strlen(i_port)-1] == '\n')
1898                i_port[strlen(i_port)-1] = '\0';
1899
1900        ret = vhost_scsi_make_nexus(tpg, port_ptr);
1901        if (ret < 0)
1902                return ret;
1903
1904        return count;
1905}
1906
1907CONFIGFS_ATTR(vhost_scsi_tpg_, nexus);
1908
1909static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
1910        &vhost_scsi_tpg_attr_nexus,
1911        NULL,
1912};
1913
1914static struct se_portal_group *
1915vhost_scsi_make_tpg(struct se_wwn *wwn,
1916                   struct config_group *group,
1917                   const char *name)
1918{
1919        struct vhost_scsi_tport *tport = container_of(wwn,
1920                        struct vhost_scsi_tport, tport_wwn);
1921
1922        struct vhost_scsi_tpg *tpg;
1923        u16 tpgt;
1924        int ret;
1925
1926        if (strstr(name, "tpgt_") != name)
1927                return ERR_PTR(-EINVAL);
1928        if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
1929                return ERR_PTR(-EINVAL);
1930
1931        tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
1932        if (!tpg) {
1933                pr_err("Unable to allocate struct vhost_scsi_tpg");
1934                return ERR_PTR(-ENOMEM);
1935        }
1936        mutex_init(&tpg->tv_tpg_mutex);
1937        INIT_LIST_HEAD(&tpg->tv_tpg_list);
1938        tpg->tport = tport;
1939        tpg->tport_tpgt = tpgt;
1940
1941        ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
1942        if (ret < 0) {
1943                kfree(tpg);
1944                return NULL;
1945        }
1946        mutex_lock(&vhost_scsi_mutex);
1947        list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
1948        mutex_unlock(&vhost_scsi_mutex);
1949
1950        return &tpg->se_tpg;
1951}
1952
1953static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
1954{
1955        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1956                                struct vhost_scsi_tpg, se_tpg);
1957
1958        mutex_lock(&vhost_scsi_mutex);
1959        list_del(&tpg->tv_tpg_list);
1960        mutex_unlock(&vhost_scsi_mutex);
1961        /*
1962         * Release the virtual I_T Nexus for this vhost TPG
1963         */
1964        vhost_scsi_drop_nexus(tpg);
1965        /*
1966         * Deregister the se_tpg from TCM..
1967         */
1968        core_tpg_deregister(se_tpg);
1969        kfree(tpg);
1970}
1971
1972static struct se_wwn *
1973vhost_scsi_make_tport(struct target_fabric_configfs *tf,
1974                     struct config_group *group,
1975                     const char *name)
1976{
1977        struct vhost_scsi_tport *tport;
1978        char *ptr;
1979        u64 wwpn = 0;
1980        int off = 0;
1981
1982        /* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
1983                return ERR_PTR(-EINVAL); */
1984
1985        tport = kzalloc(sizeof(*tport), GFP_KERNEL);
1986        if (!tport) {
1987                pr_err("Unable to allocate struct vhost_scsi_tport");
1988                return ERR_PTR(-ENOMEM);
1989        }
1990        tport->tport_wwpn = wwpn;
1991        /*
1992         * Determine the emulated Protocol Identifier and Target Port Name
1993         * based on the incoming configfs directory name.
1994         */
1995        ptr = strstr(name, "naa.");
1996        if (ptr) {
1997                tport->tport_proto_id = SCSI_PROTOCOL_SAS;
1998                goto check_len;
1999        }
2000        ptr = strstr(name, "fc.");
2001        if (ptr) {
2002                tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2003                off = 3; /* Skip over "fc." */
2004                goto check_len;
2005        }
2006        ptr = strstr(name, "iqn.");
2007        if (ptr) {
2008                tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2009                goto check_len;
2010        }
2011
2012        pr_err("Unable to locate prefix for emulated Target Port:"
2013                        " %s\n", name);
2014        kfree(tport);
2015        return ERR_PTR(-EINVAL);
2016
2017check_len:
2018        if (strlen(name) >= VHOST_SCSI_NAMELEN) {
2019                pr_err("Emulated %s Address: %s, exceeds"
2020                        " max: %d\n", name, vhost_scsi_dump_proto_id(tport),
2021                        VHOST_SCSI_NAMELEN);
2022                kfree(tport);
2023                return ERR_PTR(-EINVAL);
2024        }
2025        snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
2026
2027        pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2028                " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
2029
2030        return &tport->tport_wwn;
2031}
2032
2033static void vhost_scsi_drop_tport(struct se_wwn *wwn)
2034{
2035        struct vhost_scsi_tport *tport = container_of(wwn,
2036                                struct vhost_scsi_tport, tport_wwn);
2037
2038        pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2039                " %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
2040                tport->tport_name);
2041
2042        kfree(tport);
2043}
2044
2045static ssize_t
2046vhost_scsi_wwn_version_show(struct config_item *item, char *page)
2047{
2048        return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
2049                "on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2050                utsname()->machine);
2051}
2052
2053CONFIGFS_ATTR_RO(vhost_scsi_wwn_, version);
2054
2055static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2056        &vhost_scsi_wwn_attr_version,
2057        NULL,
2058};
2059
2060static const struct target_core_fabric_ops vhost_scsi_ops = {
2061        .module                         = THIS_MODULE,
2062        .name                           = "vhost",
2063        .get_fabric_name                = vhost_scsi_get_fabric_name,
2064        .tpg_get_wwn                    = vhost_scsi_get_fabric_wwn,
2065        .tpg_get_tag                    = vhost_scsi_get_tpgt,
2066        .tpg_check_demo_mode            = vhost_scsi_check_true,
2067        .tpg_check_demo_mode_cache      = vhost_scsi_check_true,
2068        .tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
2069        .tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
2070        .tpg_check_prot_fabric_only     = vhost_scsi_check_prot_fabric_only,
2071        .tpg_get_inst_index             = vhost_scsi_tpg_get_inst_index,
2072        .release_cmd                    = vhost_scsi_release_cmd,
2073        .check_stop_free                = vhost_scsi_check_stop_free,
2074        .sess_get_index                 = vhost_scsi_sess_get_index,
2075        .sess_get_initiator_sid         = NULL,
2076        .write_pending                  = vhost_scsi_write_pending,
2077        .write_pending_status           = vhost_scsi_write_pending_status,
2078        .set_default_node_attributes    = vhost_scsi_set_default_node_attrs,
2079        .get_cmd_state                  = vhost_scsi_get_cmd_state,
2080        .queue_data_in                  = vhost_scsi_queue_data_in,
2081        .queue_status                   = vhost_scsi_queue_status,
2082        .queue_tm_rsp                   = vhost_scsi_queue_tm_rsp,
2083        .aborted_task                   = vhost_scsi_aborted_task,
2084        /*
2085         * Setup callers for generic logic in target_core_fabric_configfs.c
2086         */
2087        .fabric_make_wwn                = vhost_scsi_make_tport,
2088        .fabric_drop_wwn                = vhost_scsi_drop_tport,
2089        .fabric_make_tpg                = vhost_scsi_make_tpg,
2090        .fabric_drop_tpg                = vhost_scsi_drop_tpg,
2091        .fabric_post_link               = vhost_scsi_port_link,
2092        .fabric_pre_unlink              = vhost_scsi_port_unlink,
2093
2094        .tfc_wwn_attrs                  = vhost_scsi_wwn_attrs,
2095        .tfc_tpg_base_attrs             = vhost_scsi_tpg_attrs,
2096        .tfc_tpg_attrib_attrs           = vhost_scsi_tpg_attrib_attrs,
2097};
2098
2099static int __init vhost_scsi_init(void)
2100{
2101        int ret = -ENOMEM;
2102
2103        pr_debug("TCM_VHOST fabric module %s on %s/%s"
2104                " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2105                utsname()->machine);
2106
2107        /*
2108         * Use our own dedicated workqueue for submitting I/O into
2109         * target core to avoid contention within system_wq.
2110         */
2111        vhost_scsi_workqueue = alloc_workqueue("vhost_scsi", 0, 0);
2112        if (!vhost_scsi_workqueue)
2113                goto out;
2114
2115        ret = vhost_scsi_register();
2116        if (ret < 0)
2117                goto out_destroy_workqueue;
2118
2119        ret = target_register_template(&vhost_scsi_ops);
2120        if (ret < 0)
2121                goto out_vhost_scsi_deregister;
2122
2123        return 0;
2124
2125out_vhost_scsi_deregister:
2126        vhost_scsi_deregister();
2127out_destroy_workqueue:
2128        destroy_workqueue(vhost_scsi_workqueue);
2129out:
2130        return ret;
2131};
2132
2133static void vhost_scsi_exit(void)
2134{
2135        target_unregister_template(&vhost_scsi_ops);
2136        vhost_scsi_deregister();
2137        destroy_workqueue(vhost_scsi_workqueue);
2138};
2139
2140MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2141MODULE_ALIAS("tcm_vhost");
2142MODULE_LICENSE("GPL");
2143module_init(vhost_scsi_init);
2144module_exit(vhost_scsi_exit);
2145