linux/drivers/vhost/scsi.c
<<
>>
Prefs
   1/*******************************************************************************
   2 * Vhost kernel TCM fabric driver for virtio SCSI initiators
   3 *
   4 * (C) Copyright 2010-2013 Datera, Inc.
   5 * (C) Copyright 2010-2012 IBM Corp.
   6 *
   7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
   8 *
   9 * Authors: Nicholas A. Bellinger <nab@daterainc.com>
  10 *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of the GNU General Public License as published by
  14 * the Free Software Foundation; either version 2 of the License, or
  15 * (at your option) any later version.
  16 *
  17 * This program is distributed in the hope that it will be useful,
  18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  20 * GNU General Public License for more details.
  21 *
  22 ****************************************************************************/
  23
  24#include <linux/module.h>
  25#include <linux/moduleparam.h>
  26#include <generated/utsrelease.h>
  27#include <linux/utsname.h>
  28#include <linux/init.h>
  29#include <linux/slab.h>
  30#include <linux/kthread.h>
  31#include <linux/types.h>
  32#include <linux/string.h>
  33#include <linux/configfs.h>
  34#include <linux/ctype.h>
  35#include <linux/compat.h>
  36#include <linux/eventfd.h>
  37#include <linux/fs.h>
  38#include <linux/vmalloc.h>
  39#include <linux/miscdevice.h>
  40#include <asm/unaligned.h>
  41#include <scsi/scsi_common.h>
  42#include <scsi/scsi_proto.h>
  43#include <target/target_core_base.h>
  44#include <target/target_core_fabric.h>
  45#include <linux/vhost.h>
  46#include <linux/virtio_scsi.h>
  47#include <linux/llist.h>
  48#include <linux/bitmap.h>
  49
  50#include "vhost.h"
  51
  52#define VHOST_SCSI_VERSION  "v0.1"
  53#define VHOST_SCSI_NAMELEN 256
  54#define VHOST_SCSI_MAX_CDB_SIZE 32
  55#define VHOST_SCSI_PREALLOC_SGLS 2048
  56#define VHOST_SCSI_PREALLOC_UPAGES 2048
  57#define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
  58
  59/* Max number of requests before requeueing the job.
  60 * Using this limit prevents one virtqueue from starving others with
  61 * request.
  62 */
  63#define VHOST_SCSI_WEIGHT 256
  64
  65struct vhost_scsi_inflight {
  66        /* Wait for the flush operation to finish */
  67        struct completion comp;
  68        /* Refcount for the inflight reqs */
  69        struct kref kref;
  70};
  71
  72struct vhost_scsi_cmd {
  73        /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
  74        int tvc_vq_desc;
  75        /* virtio-scsi initiator task attribute */
  76        int tvc_task_attr;
  77        /* virtio-scsi response incoming iovecs */
  78        int tvc_in_iovs;
  79        /* virtio-scsi initiator data direction */
  80        enum dma_data_direction tvc_data_direction;
  81        /* Expected data transfer length from virtio-scsi header */
  82        u32 tvc_exp_data_len;
  83        /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
  84        u64 tvc_tag;
  85        /* The number of scatterlists associated with this cmd */
  86        u32 tvc_sgl_count;
  87        u32 tvc_prot_sgl_count;
  88        /* Saved unpacked SCSI LUN for vhost_scsi_submission_work() */
  89        u32 tvc_lun;
  90        /* Pointer to the SGL formatted memory from virtio-scsi */
  91        struct scatterlist *tvc_sgl;
  92        struct scatterlist *tvc_prot_sgl;
  93        struct page **tvc_upages;
  94        /* Pointer to response header iovec */
  95        struct iovec tvc_resp_iov;
  96        /* Pointer to vhost_scsi for our device */
  97        struct vhost_scsi *tvc_vhost;
  98        /* Pointer to vhost_virtqueue for the cmd */
  99        struct vhost_virtqueue *tvc_vq;
 100        /* Pointer to vhost nexus memory */
 101        struct vhost_scsi_nexus *tvc_nexus;
 102        /* The TCM I/O descriptor that is accessed via container_of() */
 103        struct se_cmd tvc_se_cmd;
 104        /* work item used for cmwq dispatch to vhost_scsi_submission_work() */
 105        struct work_struct work;
 106        /* Copy of the incoming SCSI command descriptor block (CDB) */
 107        unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
 108        /* Sense buffer that will be mapped into outgoing status */
 109        unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
 110        /* Completed commands list, serviced from vhost worker thread */
 111        struct llist_node tvc_completion_list;
 112        /* Used to track inflight cmd */
 113        struct vhost_scsi_inflight *inflight;
 114};
 115
 116struct vhost_scsi_nexus {
 117        /* Pointer to TCM session for I_T Nexus */
 118        struct se_session *tvn_se_sess;
 119};
 120
 121struct vhost_scsi_tpg {
 122        /* Vhost port target portal group tag for TCM */
 123        u16 tport_tpgt;
 124        /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
 125        int tv_tpg_port_count;
 126        /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
 127        int tv_tpg_vhost_count;
 128        /* Used for enabling T10-PI with legacy devices */
 129        int tv_fabric_prot_type;
 130        /* list for vhost_scsi_list */
 131        struct list_head tv_tpg_list;
 132        /* Used to protect access for tpg_nexus */
 133        struct mutex tv_tpg_mutex;
 134        /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
 135        struct vhost_scsi_nexus *tpg_nexus;
 136        /* Pointer back to vhost_scsi_tport */
 137        struct vhost_scsi_tport *tport;
 138        /* Returned by vhost_scsi_make_tpg() */
 139        struct se_portal_group se_tpg;
 140        /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
 141        struct vhost_scsi *vhost_scsi;
 142        struct list_head tmf_queue;
 143};
 144
 145struct vhost_scsi_tport {
 146        /* SCSI protocol the tport is providing */
 147        u8 tport_proto_id;
 148        /* Binary World Wide unique Port Name for Vhost Target port */
 149        u64 tport_wwpn;
 150        /* ASCII formatted WWPN for Vhost Target port */
 151        char tport_name[VHOST_SCSI_NAMELEN];
 152        /* Returned by vhost_scsi_make_tport() */
 153        struct se_wwn tport_wwn;
 154};
 155
 156struct vhost_scsi_evt {
 157        /* event to be sent to guest */
 158        struct virtio_scsi_event event;
 159        /* event list, serviced from vhost worker thread */
 160        struct llist_node list;
 161};
 162
 163enum {
 164        VHOST_SCSI_VQ_CTL = 0,
 165        VHOST_SCSI_VQ_EVT = 1,
 166        VHOST_SCSI_VQ_IO = 2,
 167};
 168
 169/* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
 170enum {
 171        VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
 172                                               (1ULL << VIRTIO_SCSI_F_T10_PI)
 173};
 174
 175#define VHOST_SCSI_MAX_TARGET   256
 176#define VHOST_SCSI_MAX_VQ       128
 177#define VHOST_SCSI_MAX_EVENT    128
 178
 179struct vhost_scsi_virtqueue {
 180        struct vhost_virtqueue vq;
 181        /*
 182         * Reference counting for inflight reqs, used for flush operation. At
 183         * each time, one reference tracks new commands submitted, while we
 184         * wait for another one to reach 0.
 185         */
 186        struct vhost_scsi_inflight inflights[2];
 187        /*
 188         * Indicate current inflight in use, protected by vq->mutex.
 189         * Writers must also take dev mutex and flush under it.
 190         */
 191        int inflight_idx;
 192        struct vhost_scsi_cmd *scsi_cmds;
 193        struct sbitmap scsi_tags;
 194        int max_cmds;
 195};
 196
 197struct vhost_scsi {
 198        /* Protected by vhost_scsi->dev.mutex */
 199        struct vhost_scsi_tpg **vs_tpg;
 200        char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
 201
 202        struct vhost_dev dev;
 203        struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
 204
 205        struct vhost_work vs_completion_work; /* cmd completion work item */
 206        struct llist_head vs_completion_list; /* cmd completion queue */
 207
 208        struct vhost_work vs_event_work; /* evt injection work item */
 209        struct llist_head vs_event_list; /* evt injection queue */
 210
 211        bool vs_events_missed; /* any missed events, protected by vq->mutex */
 212        int vs_events_nr; /* num of pending events, protected by vq->mutex */
 213};
 214
 215struct vhost_scsi_tmf {
 216        struct vhost_work vwork;
 217        struct vhost_scsi_tpg *tpg;
 218        struct vhost_scsi *vhost;
 219        struct vhost_scsi_virtqueue *svq;
 220        struct list_head queue_entry;
 221
 222        struct se_cmd se_cmd;
 223        u8 scsi_resp;
 224        struct vhost_scsi_inflight *inflight;
 225        struct iovec resp_iov;
 226        int in_iovs;
 227        int vq_desc;
 228};
 229
 230/*
 231 * Context for processing request and control queue operations.
 232 */
 233struct vhost_scsi_ctx {
 234        int head;
 235        unsigned int out, in;
 236        size_t req_size, rsp_size;
 237        size_t out_size, in_size;
 238        u8 *target, *lunp;
 239        void *req;
 240        struct iov_iter out_iter;
 241};
 242
 243static struct workqueue_struct *vhost_scsi_workqueue;
 244
 245/* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
 246static DEFINE_MUTEX(vhost_scsi_mutex);
 247static LIST_HEAD(vhost_scsi_list);
 248
 249static void vhost_scsi_done_inflight(struct kref *kref)
 250{
 251        struct vhost_scsi_inflight *inflight;
 252
 253        inflight = container_of(kref, struct vhost_scsi_inflight, kref);
 254        complete(&inflight->comp);
 255}
 256
 257static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
 258                                    struct vhost_scsi_inflight *old_inflight[])
 259{
 260        struct vhost_scsi_inflight *new_inflight;
 261        struct vhost_virtqueue *vq;
 262        int idx, i;
 263
 264        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
 265                vq = &vs->vqs[i].vq;
 266
 267                mutex_lock(&vq->mutex);
 268
 269                /* store old infight */
 270                idx = vs->vqs[i].inflight_idx;
 271                if (old_inflight)
 272                        old_inflight[i] = &vs->vqs[i].inflights[idx];
 273
 274                /* setup new infight */
 275                vs->vqs[i].inflight_idx = idx ^ 1;
 276                new_inflight = &vs->vqs[i].inflights[idx ^ 1];
 277                kref_init(&new_inflight->kref);
 278                init_completion(&new_inflight->comp);
 279
 280                mutex_unlock(&vq->mutex);
 281        }
 282}
 283
 284static struct vhost_scsi_inflight *
 285vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
 286{
 287        struct vhost_scsi_inflight *inflight;
 288        struct vhost_scsi_virtqueue *svq;
 289
 290        svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
 291        inflight = &svq->inflights[svq->inflight_idx];
 292        kref_get(&inflight->kref);
 293
 294        return inflight;
 295}
 296
 297static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
 298{
 299        kref_put(&inflight->kref, vhost_scsi_done_inflight);
 300}
 301
 302static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
 303{
 304        return 1;
 305}
 306
 307static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
 308{
 309        return 0;
 310}
 311
 312static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
 313{
 314        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
 315                                struct vhost_scsi_tpg, se_tpg);
 316        struct vhost_scsi_tport *tport = tpg->tport;
 317
 318        return &tport->tport_name[0];
 319}
 320
 321static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
 322{
 323        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
 324                                struct vhost_scsi_tpg, se_tpg);
 325        return tpg->tport_tpgt;
 326}
 327
 328static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
 329{
 330        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
 331                                struct vhost_scsi_tpg, se_tpg);
 332
 333        return tpg->tv_fabric_prot_type;
 334}
 335
 336static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
 337{
 338        return 1;
 339}
 340
 341static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
 342{
 343        struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
 344                                struct vhost_scsi_cmd, tvc_se_cmd);
 345        struct vhost_scsi_virtqueue *svq = container_of(tv_cmd->tvc_vq,
 346                                struct vhost_scsi_virtqueue, vq);
 347        struct vhost_scsi_inflight *inflight = tv_cmd->inflight;
 348        int i;
 349
 350        if (tv_cmd->tvc_sgl_count) {
 351                for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
 352                        put_page(sg_page(&tv_cmd->tvc_sgl[i]));
 353        }
 354        if (tv_cmd->tvc_prot_sgl_count) {
 355                for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
 356                        put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
 357        }
 358
 359        sbitmap_clear_bit(&svq->scsi_tags, se_cmd->map_tag);
 360        vhost_scsi_put_inflight(inflight);
 361}
 362
 363static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf)
 364{
 365        struct vhost_scsi_tpg *tpg = tmf->tpg;
 366        struct vhost_scsi_inflight *inflight = tmf->inflight;
 367
 368        mutex_lock(&tpg->tv_tpg_mutex);
 369        list_add_tail(&tpg->tmf_queue, &tmf->queue_entry);
 370        mutex_unlock(&tpg->tv_tpg_mutex);
 371        vhost_scsi_put_inflight(inflight);
 372}
 373
 374static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
 375{
 376        if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
 377                struct vhost_scsi_tmf *tmf = container_of(se_cmd,
 378                                        struct vhost_scsi_tmf, se_cmd);
 379
 380                vhost_work_queue(&tmf->vhost->dev, &tmf->vwork);
 381        } else {
 382                struct vhost_scsi_cmd *cmd = container_of(se_cmd,
 383                                        struct vhost_scsi_cmd, tvc_se_cmd);
 384                struct vhost_scsi *vs = cmd->tvc_vhost;
 385
 386                llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
 387                vhost_work_queue(&vs->dev, &vs->vs_completion_work);
 388        }
 389}
 390
 391static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
 392{
 393        return 0;
 394}
 395
 396static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
 397{
 398        /* Go ahead and process the write immediately */
 399        target_execute_cmd(se_cmd);
 400        return 0;
 401}
 402
 403static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
 404{
 405        return;
 406}
 407
 408static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
 409{
 410        return 0;
 411}
 412
 413static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
 414{
 415        transport_generic_free_cmd(se_cmd, 0);
 416        return 0;
 417}
 418
 419static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
 420{
 421        transport_generic_free_cmd(se_cmd, 0);
 422        return 0;
 423}
 424
 425static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
 426{
 427        struct vhost_scsi_tmf *tmf = container_of(se_cmd, struct vhost_scsi_tmf,
 428                                                  se_cmd);
 429
 430        tmf->scsi_resp = se_cmd->se_tmr_req->response;
 431        transport_generic_free_cmd(&tmf->se_cmd, 0);
 432}
 433
 434static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
 435{
 436        return;
 437}
 438
 439static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
 440{
 441        vs->vs_events_nr--;
 442        kfree(evt);
 443}
 444
 445static struct vhost_scsi_evt *
 446vhost_scsi_allocate_evt(struct vhost_scsi *vs,
 447                       u32 event, u32 reason)
 448{
 449        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
 450        struct vhost_scsi_evt *evt;
 451
 452        if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
 453                vs->vs_events_missed = true;
 454                return NULL;
 455        }
 456
 457        evt = kzalloc(sizeof(*evt), GFP_KERNEL);
 458        if (!evt) {
 459                vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
 460                vs->vs_events_missed = true;
 461                return NULL;
 462        }
 463
 464        evt->event.event = cpu_to_vhost32(vq, event);
 465        evt->event.reason = cpu_to_vhost32(vq, reason);
 466        vs->vs_events_nr++;
 467
 468        return evt;
 469}
 470
 471static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
 472{
 473        return target_put_sess_cmd(se_cmd);
 474}
 475
 476static void
 477vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
 478{
 479        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
 480        struct virtio_scsi_event *event = &evt->event;
 481        struct virtio_scsi_event __user *eventp;
 482        unsigned out, in;
 483        int head, ret;
 484
 485        if (!vhost_vq_get_backend(vq)) {
 486                vs->vs_events_missed = true;
 487                return;
 488        }
 489
 490again:
 491        vhost_disable_notify(&vs->dev, vq);
 492        head = vhost_get_vq_desc(vq, vq->iov,
 493                        ARRAY_SIZE(vq->iov), &out, &in,
 494                        NULL, NULL);
 495        if (head < 0) {
 496                vs->vs_events_missed = true;
 497                return;
 498        }
 499        if (head == vq->num) {
 500                if (vhost_enable_notify(&vs->dev, vq))
 501                        goto again;
 502                vs->vs_events_missed = true;
 503                return;
 504        }
 505
 506        if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
 507                vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
 508                                vq->iov[out].iov_len);
 509                vs->vs_events_missed = true;
 510                return;
 511        }
 512
 513        if (vs->vs_events_missed) {
 514                event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
 515                vs->vs_events_missed = false;
 516        }
 517
 518        eventp = vq->iov[out].iov_base;
 519        ret = __copy_to_user(eventp, event, sizeof(*event));
 520        if (!ret)
 521                vhost_add_used_and_signal(&vs->dev, vq, head, 0);
 522        else
 523                vq_err(vq, "Faulted on vhost_scsi_send_event\n");
 524}
 525
 526static void vhost_scsi_evt_work(struct vhost_work *work)
 527{
 528        struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
 529                                        vs_event_work);
 530        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
 531        struct vhost_scsi_evt *evt, *t;
 532        struct llist_node *llnode;
 533
 534        mutex_lock(&vq->mutex);
 535        llnode = llist_del_all(&vs->vs_event_list);
 536        llist_for_each_entry_safe(evt, t, llnode, list) {
 537                vhost_scsi_do_evt_work(vs, evt);
 538                vhost_scsi_free_evt(vs, evt);
 539        }
 540        mutex_unlock(&vq->mutex);
 541}
 542
 543/* Fill in status and signal that we are done processing this command
 544 *
 545 * This is scheduled in the vhost work queue so we are called with the owner
 546 * process mm and can access the vring.
 547 */
 548static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
 549{
 550        struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
 551                                        vs_completion_work);
 552        DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
 553        struct virtio_scsi_cmd_resp v_rsp;
 554        struct vhost_scsi_cmd *cmd, *t;
 555        struct llist_node *llnode;
 556        struct se_cmd *se_cmd;
 557        struct iov_iter iov_iter;
 558        int ret, vq;
 559
 560        bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
 561        llnode = llist_del_all(&vs->vs_completion_list);
 562        llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
 563                se_cmd = &cmd->tvc_se_cmd;
 564
 565                pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
 566                        cmd, se_cmd->residual_count, se_cmd->scsi_status);
 567
 568                memset(&v_rsp, 0, sizeof(v_rsp));
 569                v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count);
 570                /* TODO is status_qualifier field needed? */
 571                v_rsp.status = se_cmd->scsi_status;
 572                v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
 573                                                 se_cmd->scsi_sense_length);
 574                memcpy(v_rsp.sense, cmd->tvc_sense_buf,
 575                       se_cmd->scsi_sense_length);
 576
 577                iov_iter_init(&iov_iter, READ, &cmd->tvc_resp_iov,
 578                              cmd->tvc_in_iovs, sizeof(v_rsp));
 579                ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
 580                if (likely(ret == sizeof(v_rsp))) {
 581                        struct vhost_scsi_virtqueue *q;
 582                        vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
 583                        q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
 584                        vq = q - vs->vqs;
 585                        __set_bit(vq, signal);
 586                } else
 587                        pr_err("Faulted on virtio_scsi_cmd_resp\n");
 588
 589                vhost_scsi_release_cmd_res(se_cmd);
 590        }
 591
 592        vq = -1;
 593        while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
 594                < VHOST_SCSI_MAX_VQ)
 595                vhost_signal(&vs->dev, &vs->vqs[vq].vq);
 596}
 597
 598static struct vhost_scsi_cmd *
 599vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
 600                   unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
 601                   u32 exp_data_len, int data_direction)
 602{
 603        struct vhost_scsi_virtqueue *svq = container_of(vq,
 604                                        struct vhost_scsi_virtqueue, vq);
 605        struct vhost_scsi_cmd *cmd;
 606        struct vhost_scsi_nexus *tv_nexus;
 607        struct scatterlist *sg, *prot_sg;
 608        struct page **pages;
 609        int tag;
 610
 611        tv_nexus = tpg->tpg_nexus;
 612        if (!tv_nexus) {
 613                pr_err("Unable to locate active struct vhost_scsi_nexus\n");
 614                return ERR_PTR(-EIO);
 615        }
 616
 617        tag = sbitmap_get(&svq->scsi_tags, 0, false);
 618        if (tag < 0) {
 619                pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
 620                return ERR_PTR(-ENOMEM);
 621        }
 622
 623        cmd = &svq->scsi_cmds[tag];
 624        sg = cmd->tvc_sgl;
 625        prot_sg = cmd->tvc_prot_sgl;
 626        pages = cmd->tvc_upages;
 627        memset(cmd, 0, sizeof(*cmd));
 628        cmd->tvc_sgl = sg;
 629        cmd->tvc_prot_sgl = prot_sg;
 630        cmd->tvc_upages = pages;
 631        cmd->tvc_se_cmd.map_tag = tag;
 632        cmd->tvc_tag = scsi_tag;
 633        cmd->tvc_lun = lun;
 634        cmd->tvc_task_attr = task_attr;
 635        cmd->tvc_exp_data_len = exp_data_len;
 636        cmd->tvc_data_direction = data_direction;
 637        cmd->tvc_nexus = tv_nexus;
 638        cmd->inflight = vhost_scsi_get_inflight(vq);
 639
 640        memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
 641
 642        return cmd;
 643}
 644
 645/*
 646 * Map a user memory range into a scatterlist
 647 *
 648 * Returns the number of scatterlist entries used or -errno on error.
 649 */
 650static int
 651vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
 652                      struct iov_iter *iter,
 653                      struct scatterlist *sgl,
 654                      bool write)
 655{
 656        struct page **pages = cmd->tvc_upages;
 657        struct scatterlist *sg = sgl;
 658        ssize_t bytes;
 659        size_t offset;
 660        unsigned int npages = 0;
 661
 662        bytes = iov_iter_get_pages(iter, pages, LONG_MAX,
 663                                VHOST_SCSI_PREALLOC_UPAGES, &offset);
 664        /* No pages were pinned */
 665        if (bytes <= 0)
 666                return bytes < 0 ? bytes : -EFAULT;
 667
 668        iov_iter_advance(iter, bytes);
 669
 670        while (bytes) {
 671                unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes);
 672                sg_set_page(sg++, pages[npages++], n, offset);
 673                bytes -= n;
 674                offset = 0;
 675        }
 676        return npages;
 677}
 678
 679static int
 680vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
 681{
 682        int sgl_count = 0;
 683
 684        if (!iter || !iter->iov) {
 685                pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
 686                       " present\n", __func__, bytes);
 687                return -EINVAL;
 688        }
 689
 690        sgl_count = iov_iter_npages(iter, 0xffff);
 691        if (sgl_count > max_sgls) {
 692                pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
 693                       " max_sgls: %d\n", __func__, sgl_count, max_sgls);
 694                return -EINVAL;
 695        }
 696        return sgl_count;
 697}
 698
 699static int
 700vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
 701                      struct iov_iter *iter,
 702                      struct scatterlist *sg, int sg_count)
 703{
 704        struct scatterlist *p = sg;
 705        int ret;
 706
 707        while (iov_iter_count(iter)) {
 708                ret = vhost_scsi_map_to_sgl(cmd, iter, sg, write);
 709                if (ret < 0) {
 710                        while (p < sg) {
 711                                struct page *page = sg_page(p++);
 712                                if (page)
 713                                        put_page(page);
 714                        }
 715                        return ret;
 716                }
 717                sg += ret;
 718        }
 719        return 0;
 720}
 721
 722static int
 723vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
 724                 size_t prot_bytes, struct iov_iter *prot_iter,
 725                 size_t data_bytes, struct iov_iter *data_iter)
 726{
 727        int sgl_count, ret;
 728        bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
 729
 730        if (prot_bytes) {
 731                sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
 732                                                 VHOST_SCSI_PREALLOC_PROT_SGLS);
 733                if (sgl_count < 0)
 734                        return sgl_count;
 735
 736                sg_init_table(cmd->tvc_prot_sgl, sgl_count);
 737                cmd->tvc_prot_sgl_count = sgl_count;
 738                pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
 739                         cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
 740
 741                ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
 742                                            cmd->tvc_prot_sgl,
 743                                            cmd->tvc_prot_sgl_count);
 744                if (ret < 0) {
 745                        cmd->tvc_prot_sgl_count = 0;
 746                        return ret;
 747                }
 748        }
 749        sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
 750                                         VHOST_SCSI_PREALLOC_SGLS);
 751        if (sgl_count < 0)
 752                return sgl_count;
 753
 754        sg_init_table(cmd->tvc_sgl, sgl_count);
 755        cmd->tvc_sgl_count = sgl_count;
 756        pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
 757                  cmd->tvc_sgl, cmd->tvc_sgl_count);
 758
 759        ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
 760                                    cmd->tvc_sgl, cmd->tvc_sgl_count);
 761        if (ret < 0) {
 762                cmd->tvc_sgl_count = 0;
 763                return ret;
 764        }
 765        return 0;
 766}
 767
 768static int vhost_scsi_to_tcm_attr(int attr)
 769{
 770        switch (attr) {
 771        case VIRTIO_SCSI_S_SIMPLE:
 772                return TCM_SIMPLE_TAG;
 773        case VIRTIO_SCSI_S_ORDERED:
 774                return TCM_ORDERED_TAG;
 775        case VIRTIO_SCSI_S_HEAD:
 776                return TCM_HEAD_TAG;
 777        case VIRTIO_SCSI_S_ACA:
 778                return TCM_ACA_TAG;
 779        default:
 780                break;
 781        }
 782        return TCM_SIMPLE_TAG;
 783}
 784
 785static void vhost_scsi_submission_work(struct work_struct *work)
 786{
 787        struct vhost_scsi_cmd *cmd =
 788                container_of(work, struct vhost_scsi_cmd, work);
 789        struct vhost_scsi_nexus *tv_nexus;
 790        struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
 791        struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
 792        int rc;
 793
 794        /* FIXME: BIDI operation */
 795        if (cmd->tvc_sgl_count) {
 796                sg_ptr = cmd->tvc_sgl;
 797
 798                if (cmd->tvc_prot_sgl_count)
 799                        sg_prot_ptr = cmd->tvc_prot_sgl;
 800                else
 801                        se_cmd->prot_pto = true;
 802        } else {
 803                sg_ptr = NULL;
 804        }
 805        tv_nexus = cmd->tvc_nexus;
 806
 807        se_cmd->tag = 0;
 808        rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
 809                        cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
 810                        cmd->tvc_lun, cmd->tvc_exp_data_len,
 811                        vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
 812                        cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
 813                        sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
 814                        cmd->tvc_prot_sgl_count);
 815        if (rc < 0) {
 816                transport_send_check_condition_and_sense(se_cmd,
 817                                TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
 818                transport_generic_free_cmd(se_cmd, 0);
 819        }
 820}
 821
 822static void
 823vhost_scsi_send_bad_target(struct vhost_scsi *vs,
 824                           struct vhost_virtqueue *vq,
 825                           int head, unsigned out)
 826{
 827        struct virtio_scsi_cmd_resp __user *resp;
 828        struct virtio_scsi_cmd_resp rsp;
 829        int ret;
 830
 831        memset(&rsp, 0, sizeof(rsp));
 832        rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
 833        resp = vq->iov[out].iov_base;
 834        ret = __copy_to_user(resp, &rsp, sizeof(rsp));
 835        if (!ret)
 836                vhost_add_used_and_signal(&vs->dev, vq, head, 0);
 837        else
 838                pr_err("Faulted on virtio_scsi_cmd_resp\n");
 839}
 840
 841static int
 842vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
 843                    struct vhost_scsi_ctx *vc)
 844{
 845        int ret = -ENXIO;
 846
 847        vc->head = vhost_get_vq_desc(vq, vq->iov,
 848                                     ARRAY_SIZE(vq->iov), &vc->out, &vc->in,
 849                                     NULL, NULL);
 850
 851        pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
 852                 vc->head, vc->out, vc->in);
 853
 854        /* On error, stop handling until the next kick. */
 855        if (unlikely(vc->head < 0))
 856                goto done;
 857
 858        /* Nothing new?  Wait for eventfd to tell us they refilled. */
 859        if (vc->head == vq->num) {
 860                if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
 861                        vhost_disable_notify(&vs->dev, vq);
 862                        ret = -EAGAIN;
 863                }
 864                goto done;
 865        }
 866
 867        /*
 868         * Get the size of request and response buffers.
 869         * FIXME: Not correct for BIDI operation
 870         */
 871        vc->out_size = iov_length(vq->iov, vc->out);
 872        vc->in_size = iov_length(&vq->iov[vc->out], vc->in);
 873
 874        /*
 875         * Copy over the virtio-scsi request header, which for a
 876         * ANY_LAYOUT enabled guest may span multiple iovecs, or a
 877         * single iovec may contain both the header + outgoing
 878         * WRITE payloads.
 879         *
 880         * copy_from_iter() will advance out_iter, so that it will
 881         * point at the start of the outgoing WRITE payload, if
 882         * DMA_TO_DEVICE is set.
 883         */
 884        iov_iter_init(&vc->out_iter, WRITE, vq->iov, vc->out, vc->out_size);
 885        ret = 0;
 886
 887done:
 888        return ret;
 889}
 890
 891static int
 892vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc)
 893{
 894        if (unlikely(vc->in_size < vc->rsp_size)) {
 895                vq_err(vq,
 896                       "Response buf too small, need min %zu bytes got %zu",
 897                       vc->rsp_size, vc->in_size);
 898                return -EINVAL;
 899        } else if (unlikely(vc->out_size < vc->req_size)) {
 900                vq_err(vq,
 901                       "Request buf too small, need min %zu bytes got %zu",
 902                       vc->req_size, vc->out_size);
 903                return -EIO;
 904        }
 905
 906        return 0;
 907}
 908
 909static int
 910vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
 911                   struct vhost_scsi_tpg **tpgp)
 912{
 913        int ret = -EIO;
 914
 915        if (unlikely(!copy_from_iter_full(vc->req, vc->req_size,
 916                                          &vc->out_iter))) {
 917                vq_err(vq, "Faulted on copy_from_iter_full\n");
 918        } else if (unlikely(*vc->lunp != 1)) {
 919                /* virtio-scsi spec requires byte 0 of the lun to be 1 */
 920                vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp);
 921        } else {
 922                struct vhost_scsi_tpg **vs_tpg, *tpg;
 923
 924                vs_tpg = vhost_vq_get_backend(vq);      /* validated at handler entry */
 925
 926                tpg = READ_ONCE(vs_tpg[*vc->target]);
 927                if (unlikely(!tpg)) {
 928                        vq_err(vq, "Target 0x%x does not exist\n", *vc->target);
 929                } else {
 930                        if (tpgp)
 931                                *tpgp = tpg;
 932                        ret = 0;
 933                }
 934        }
 935
 936        return ret;
 937}
 938
 939static u16 vhost_buf_to_lun(u8 *lun_buf)
 940{
 941        return ((lun_buf[2] << 8) | lun_buf[3]) & 0x3FFF;
 942}
 943
 944static void
 945vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
 946{
 947        struct vhost_scsi_tpg **vs_tpg, *tpg;
 948        struct virtio_scsi_cmd_req v_req;
 949        struct virtio_scsi_cmd_req_pi v_req_pi;
 950        struct vhost_scsi_ctx vc;
 951        struct vhost_scsi_cmd *cmd;
 952        struct iov_iter in_iter, prot_iter, data_iter;
 953        u64 tag;
 954        u32 exp_data_len, data_direction;
 955        int ret, prot_bytes, c = 0;
 956        u16 lun;
 957        u8 task_attr;
 958        bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
 959        void *cdb;
 960
 961        mutex_lock(&vq->mutex);
 962        /*
 963         * We can handle the vq only after the endpoint is setup by calling the
 964         * VHOST_SCSI_SET_ENDPOINT ioctl.
 965         */
 966        vs_tpg = vhost_vq_get_backend(vq);
 967        if (!vs_tpg)
 968                goto out;
 969
 970        memset(&vc, 0, sizeof(vc));
 971        vc.rsp_size = sizeof(struct virtio_scsi_cmd_resp);
 972
 973        vhost_disable_notify(&vs->dev, vq);
 974
 975        do {
 976                ret = vhost_scsi_get_desc(vs, vq, &vc);
 977                if (ret)
 978                        goto err;
 979
 980                /*
 981                 * Setup pointers and values based upon different virtio-scsi
 982                 * request header if T10_PI is enabled in KVM guest.
 983                 */
 984                if (t10_pi) {
 985                        vc.req = &v_req_pi;
 986                        vc.req_size = sizeof(v_req_pi);
 987                        vc.lunp = &v_req_pi.lun[0];
 988                        vc.target = &v_req_pi.lun[1];
 989                } else {
 990                        vc.req = &v_req;
 991                        vc.req_size = sizeof(v_req);
 992                        vc.lunp = &v_req.lun[0];
 993                        vc.target = &v_req.lun[1];
 994                }
 995
 996                /*
 997                 * Validate the size of request and response buffers.
 998                 * Check for a sane response buffer so we can report
 999                 * early errors back to the guest.
1000                 */
1001                ret = vhost_scsi_chk_size(vq, &vc);
1002                if (ret)
1003                        goto err;
1004
1005                ret = vhost_scsi_get_req(vq, &vc, &tpg);
1006                if (ret)
1007                        goto err;
1008
1009                ret = -EIO;     /* bad target on any error from here on */
1010
1011                /*
1012                 * Determine data_direction by calculating the total outgoing
1013                 * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
1014                 * response headers respectively.
1015                 *
1016                 * For DMA_TO_DEVICE this is out_iter, which is already pointing
1017                 * to the right place.
1018                 *
1019                 * For DMA_FROM_DEVICE, the iovec will be just past the end
1020                 * of the virtio-scsi response header in either the same
1021                 * or immediately following iovec.
1022                 *
1023                 * Any associated T10_PI bytes for the outgoing / incoming
1024                 * payloads are included in calculation of exp_data_len here.
1025                 */
1026                prot_bytes = 0;
1027
1028                if (vc.out_size > vc.req_size) {
1029                        data_direction = DMA_TO_DEVICE;
1030                        exp_data_len = vc.out_size - vc.req_size;
1031                        data_iter = vc.out_iter;
1032                } else if (vc.in_size > vc.rsp_size) {
1033                        data_direction = DMA_FROM_DEVICE;
1034                        exp_data_len = vc.in_size - vc.rsp_size;
1035
1036                        iov_iter_init(&in_iter, READ, &vq->iov[vc.out], vc.in,
1037                                      vc.rsp_size + exp_data_len);
1038                        iov_iter_advance(&in_iter, vc.rsp_size);
1039                        data_iter = in_iter;
1040                } else {
1041                        data_direction = DMA_NONE;
1042                        exp_data_len = 0;
1043                }
1044                /*
1045                 * If T10_PI header + payload is present, setup prot_iter values
1046                 * and recalculate data_iter for vhost_scsi_mapal() mapping to
1047                 * host scatterlists via get_user_pages_fast().
1048                 */
1049                if (t10_pi) {
1050                        if (v_req_pi.pi_bytesout) {
1051                                if (data_direction != DMA_TO_DEVICE) {
1052                                        vq_err(vq, "Received non zero pi_bytesout,"
1053                                                " but wrong data_direction\n");
1054                                        goto err;
1055                                }
1056                                prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
1057                        } else if (v_req_pi.pi_bytesin) {
1058                                if (data_direction != DMA_FROM_DEVICE) {
1059                                        vq_err(vq, "Received non zero pi_bytesin,"
1060                                                " but wrong data_direction\n");
1061                                        goto err;
1062                                }
1063                                prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
1064                        }
1065                        /*
1066                         * Set prot_iter to data_iter and truncate it to
1067                         * prot_bytes, and advance data_iter past any
1068                         * preceeding prot_bytes that may be present.
1069                         *
1070                         * Also fix up the exp_data_len to reflect only the
1071                         * actual data payload length.
1072                         */
1073                        if (prot_bytes) {
1074                                exp_data_len -= prot_bytes;
1075                                prot_iter = data_iter;
1076                                iov_iter_truncate(&prot_iter, prot_bytes);
1077                                iov_iter_advance(&data_iter, prot_bytes);
1078                        }
1079                        tag = vhost64_to_cpu(vq, v_req_pi.tag);
1080                        task_attr = v_req_pi.task_attr;
1081                        cdb = &v_req_pi.cdb[0];
1082                        lun = vhost_buf_to_lun(v_req_pi.lun);
1083                } else {
1084                        tag = vhost64_to_cpu(vq, v_req.tag);
1085                        task_attr = v_req.task_attr;
1086                        cdb = &v_req.cdb[0];
1087                        lun = vhost_buf_to_lun(v_req.lun);
1088                }
1089                /*
1090                 * Check that the received CDB size does not exceeded our
1091                 * hardcoded max for vhost-scsi, then get a pre-allocated
1092                 * cmd descriptor for the new virtio-scsi tag.
1093                 *
1094                 * TODO what if cdb was too small for varlen cdb header?
1095                 */
1096                if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
1097                        vq_err(vq, "Received SCSI CDB with command_size: %d that"
1098                                " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1099                                scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
1100                                goto err;
1101                }
1102                cmd = vhost_scsi_get_cmd(vq, tpg, cdb, tag, lun, task_attr,
1103                                         exp_data_len + prot_bytes,
1104                                         data_direction);
1105                if (IS_ERR(cmd)) {
1106                        vq_err(vq, "vhost_scsi_get_cmd failed %ld\n",
1107                               PTR_ERR(cmd));
1108                        goto err;
1109                }
1110                cmd->tvc_vhost = vs;
1111                cmd->tvc_vq = vq;
1112                cmd->tvc_resp_iov = vq->iov[vc.out];
1113                cmd->tvc_in_iovs = vc.in;
1114
1115                pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1116                         cmd->tvc_cdb[0], cmd->tvc_lun);
1117                pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
1118                         " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
1119
1120                if (data_direction != DMA_NONE) {
1121                        if (unlikely(vhost_scsi_mapal(cmd, prot_bytes,
1122                                                      &prot_iter, exp_data_len,
1123                                                      &data_iter))) {
1124                                vq_err(vq, "Failed to map iov to sgl\n");
1125                                vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
1126                                goto err;
1127                        }
1128                }
1129                /*
1130                 * Save the descriptor from vhost_get_vq_desc() to be used to
1131                 * complete the virtio-scsi request in TCM callback context via
1132                 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
1133                 */
1134                cmd->tvc_vq_desc = vc.head;
1135                /*
1136                 * Dispatch cmd descriptor for cmwq execution in process
1137                 * context provided by vhost_scsi_workqueue.  This also ensures
1138                 * cmd is executed on the same kworker CPU as this vhost
1139                 * thread to gain positive L2 cache locality effects.
1140                 */
1141                INIT_WORK(&cmd->work, vhost_scsi_submission_work);
1142                queue_work(vhost_scsi_workqueue, &cmd->work);
1143                ret = 0;
1144err:
1145                /*
1146                 * ENXIO:  No more requests, or read error, wait for next kick
1147                 * EINVAL: Invalid response buffer, drop the request
1148                 * EIO:    Respond with bad target
1149                 * EAGAIN: Pending request
1150                 */
1151                if (ret == -ENXIO)
1152                        break;
1153                else if (ret == -EIO)
1154                        vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1155        } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1156out:
1157        mutex_unlock(&vq->mutex);
1158}
1159
1160static void
1161vhost_scsi_send_tmf_resp(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
1162                         int in_iovs, int vq_desc, struct iovec *resp_iov,
1163                         int tmf_resp_code)
1164{
1165        struct virtio_scsi_ctrl_tmf_resp rsp;
1166        struct iov_iter iov_iter;
1167        int ret;
1168
1169        pr_debug("%s\n", __func__);
1170        memset(&rsp, 0, sizeof(rsp));
1171        rsp.response = tmf_resp_code;
1172
1173        iov_iter_init(&iov_iter, READ, resp_iov, in_iovs, sizeof(rsp));
1174
1175        ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1176        if (likely(ret == sizeof(rsp)))
1177                vhost_add_used_and_signal(&vs->dev, vq, vq_desc, 0);
1178        else
1179                pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
1180}
1181
1182static void vhost_scsi_tmf_resp_work(struct vhost_work *work)
1183{
1184        struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf,
1185                                                  vwork);
1186        int resp_code;
1187
1188        if (tmf->scsi_resp == TMR_FUNCTION_COMPLETE)
1189                resp_code = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
1190        else
1191                resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED;
1192
1193        vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs,
1194                                 tmf->vq_desc, &tmf->resp_iov, resp_code);
1195        vhost_scsi_release_tmf_res(tmf);
1196}
1197
1198static void
1199vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
1200                      struct vhost_virtqueue *vq,
1201                      struct virtio_scsi_ctrl_tmf_req *vtmf,
1202                      struct vhost_scsi_ctx *vc)
1203{
1204        struct vhost_scsi_virtqueue *svq = container_of(vq,
1205                                        struct vhost_scsi_virtqueue, vq);
1206        struct vhost_scsi_tmf *tmf;
1207
1208        if (vhost32_to_cpu(vq, vtmf->subtype) !=
1209            VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET)
1210                goto send_reject;
1211
1212        if (!tpg->tpg_nexus || !tpg->tpg_nexus->tvn_se_sess) {
1213                pr_err("Unable to locate active struct vhost_scsi_nexus for LUN RESET.\n");
1214                goto send_reject;
1215        }
1216
1217        mutex_lock(&tpg->tv_tpg_mutex);
1218        if (list_empty(&tpg->tmf_queue)) {
1219                pr_err("Missing reserve TMF. Could not handle LUN RESET.\n");
1220                mutex_unlock(&tpg->tv_tpg_mutex);
1221                goto send_reject;
1222        }
1223
1224        tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf,
1225                               queue_entry);
1226        list_del_init(&tmf->queue_entry);
1227        mutex_unlock(&tpg->tv_tpg_mutex);
1228
1229        tmf->tpg = tpg;
1230        tmf->vhost = vs;
1231        tmf->svq = svq;
1232        tmf->resp_iov = vq->iov[vc->out];
1233        tmf->vq_desc = vc->head;
1234        tmf->in_iovs = vc->in;
1235        tmf->inflight = vhost_scsi_get_inflight(vq);
1236
1237        if (target_submit_tmr(&tmf->se_cmd, tpg->tpg_nexus->tvn_se_sess, NULL,
1238                              vhost_buf_to_lun(vtmf->lun), NULL,
1239                              TMR_LUN_RESET, GFP_KERNEL, 0,
1240                              TARGET_SCF_ACK_KREF) < 0) {
1241                vhost_scsi_release_tmf_res(tmf);
1242                goto send_reject;
1243        }
1244
1245        return;
1246
1247send_reject:
1248        vhost_scsi_send_tmf_resp(vs, vq, vc->in, vc->head, &vq->iov[vc->out],
1249                                 VIRTIO_SCSI_S_FUNCTION_REJECTED);
1250}
1251
1252static void
1253vhost_scsi_send_an_resp(struct vhost_scsi *vs,
1254                        struct vhost_virtqueue *vq,
1255                        struct vhost_scsi_ctx *vc)
1256{
1257        struct virtio_scsi_ctrl_an_resp rsp;
1258        struct iov_iter iov_iter;
1259        int ret;
1260
1261        pr_debug("%s\n", __func__);
1262        memset(&rsp, 0, sizeof(rsp));   /* event_actual = 0 */
1263        rsp.response = VIRTIO_SCSI_S_OK;
1264
1265        iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
1266
1267        ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1268        if (likely(ret == sizeof(rsp)))
1269                vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1270        else
1271                pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
1272}
1273
1274static void
1275vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1276{
1277        struct vhost_scsi_tpg *tpg;
1278        union {
1279                __virtio32 type;
1280                struct virtio_scsi_ctrl_an_req an;
1281                struct virtio_scsi_ctrl_tmf_req tmf;
1282        } v_req;
1283        struct vhost_scsi_ctx vc;
1284        size_t typ_size;
1285        int ret, c = 0;
1286
1287        mutex_lock(&vq->mutex);
1288        /*
1289         * We can handle the vq only after the endpoint is setup by calling the
1290         * VHOST_SCSI_SET_ENDPOINT ioctl.
1291         */
1292        if (!vhost_vq_get_backend(vq))
1293                goto out;
1294
1295        memset(&vc, 0, sizeof(vc));
1296
1297        vhost_disable_notify(&vs->dev, vq);
1298
1299        do {
1300                ret = vhost_scsi_get_desc(vs, vq, &vc);
1301                if (ret)
1302                        goto err;
1303
1304                /*
1305                 * Get the request type first in order to setup
1306                 * other parameters dependent on the type.
1307                 */
1308                vc.req = &v_req.type;
1309                typ_size = sizeof(v_req.type);
1310
1311                if (unlikely(!copy_from_iter_full(vc.req, typ_size,
1312                                                  &vc.out_iter))) {
1313                        vq_err(vq, "Faulted on copy_from_iter tmf type\n");
1314                        /*
1315                         * The size of the response buffer depends on the
1316                         * request type and must be validated against it.
1317                         * Since the request type is not known, don't send
1318                         * a response.
1319                         */
1320                        continue;
1321                }
1322
1323                switch (vhost32_to_cpu(vq, v_req.type)) {
1324                case VIRTIO_SCSI_T_TMF:
1325                        vc.req = &v_req.tmf;
1326                        vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
1327                        vc.rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
1328                        vc.lunp = &v_req.tmf.lun[0];
1329                        vc.target = &v_req.tmf.lun[1];
1330                        break;
1331                case VIRTIO_SCSI_T_AN_QUERY:
1332                case VIRTIO_SCSI_T_AN_SUBSCRIBE:
1333                        vc.req = &v_req.an;
1334                        vc.req_size = sizeof(struct virtio_scsi_ctrl_an_req);
1335                        vc.rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
1336                        vc.lunp = &v_req.an.lun[0];
1337                        vc.target = NULL;
1338                        break;
1339                default:
1340                        vq_err(vq, "Unknown control request %d", v_req.type);
1341                        continue;
1342                }
1343
1344                /*
1345                 * Validate the size of request and response buffers.
1346                 * Check for a sane response buffer so we can report
1347                 * early errors back to the guest.
1348                 */
1349                ret = vhost_scsi_chk_size(vq, &vc);
1350                if (ret)
1351                        goto err;
1352
1353                /*
1354                 * Get the rest of the request now that its size is known.
1355                 */
1356                vc.req += typ_size;
1357                vc.req_size -= typ_size;
1358
1359                ret = vhost_scsi_get_req(vq, &vc, &tpg);
1360                if (ret)
1361                        goto err;
1362
1363                if (v_req.type == VIRTIO_SCSI_T_TMF)
1364                        vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc);
1365                else
1366                        vhost_scsi_send_an_resp(vs, vq, &vc);
1367err:
1368                /*
1369                 * ENXIO:  No more requests, or read error, wait for next kick
1370                 * EINVAL: Invalid response buffer, drop the request
1371                 * EIO:    Respond with bad target
1372                 * EAGAIN: Pending request
1373                 */
1374                if (ret == -ENXIO)
1375                        break;
1376                else if (ret == -EIO)
1377                        vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1378        } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1379out:
1380        mutex_unlock(&vq->mutex);
1381}
1382
1383static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1384{
1385        struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1386                                                poll.work);
1387        struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1388
1389        pr_debug("%s: The handling func for control queue.\n", __func__);
1390        vhost_scsi_ctl_handle_vq(vs, vq);
1391}
1392
1393static void
1394vhost_scsi_send_evt(struct vhost_scsi *vs,
1395                   struct vhost_scsi_tpg *tpg,
1396                   struct se_lun *lun,
1397                   u32 event,
1398                   u32 reason)
1399{
1400        struct vhost_scsi_evt *evt;
1401
1402        evt = vhost_scsi_allocate_evt(vs, event, reason);
1403        if (!evt)
1404                return;
1405
1406        if (tpg && lun) {
1407                /* TODO: share lun setup code with virtio-scsi.ko */
1408                /*
1409                 * Note: evt->event is zeroed when we allocate it and
1410                 * lun[4-7] need to be zero according to virtio-scsi spec.
1411                 */
1412                evt->event.lun[0] = 0x01;
1413                evt->event.lun[1] = tpg->tport_tpgt;
1414                if (lun->unpacked_lun >= 256)
1415                        evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1416                evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1417        }
1418
1419        llist_add(&evt->list, &vs->vs_event_list);
1420        vhost_work_queue(&vs->dev, &vs->vs_event_work);
1421}
1422
1423static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1424{
1425        struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1426                                                poll.work);
1427        struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1428
1429        mutex_lock(&vq->mutex);
1430        if (!vhost_vq_get_backend(vq))
1431                goto out;
1432
1433        if (vs->vs_events_missed)
1434                vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1435out:
1436        mutex_unlock(&vq->mutex);
1437}
1438
1439static void vhost_scsi_handle_kick(struct vhost_work *work)
1440{
1441        struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1442                                                poll.work);
1443        struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1444
1445        vhost_scsi_handle_vq(vs, vq);
1446}
1447
1448static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
1449{
1450        vhost_poll_flush(&vs->vqs[index].vq.poll);
1451}
1452
1453/* Callers must hold dev mutex */
1454static void vhost_scsi_flush(struct vhost_scsi *vs)
1455{
1456        struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1457        int i;
1458
1459        /* Init new inflight and remember the old inflight */
1460        vhost_scsi_init_inflight(vs, old_inflight);
1461
1462        /*
1463         * The inflight->kref was initialized to 1. We decrement it here to
1464         * indicate the start of the flush operation so that it will reach 0
1465         * when all the reqs are finished.
1466         */
1467        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1468                kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
1469
1470        /* Flush both the vhost poll and vhost work */
1471        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1472                vhost_scsi_flush_vq(vs, i);
1473        vhost_work_flush(&vs->dev, &vs->vs_completion_work);
1474        vhost_work_flush(&vs->dev, &vs->vs_event_work);
1475
1476        /* Wait for all reqs issued before the flush to be finished */
1477        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1478                wait_for_completion(&old_inflight[i]->comp);
1479}
1480
1481static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
1482{
1483        struct vhost_scsi_virtqueue *svq = container_of(vq,
1484                                        struct vhost_scsi_virtqueue, vq);
1485        struct vhost_scsi_cmd *tv_cmd;
1486        unsigned int i;
1487
1488        if (!svq->scsi_cmds)
1489                return;
1490
1491        for (i = 0; i < svq->max_cmds; i++) {
1492                tv_cmd = &svq->scsi_cmds[i];
1493
1494                kfree(tv_cmd->tvc_sgl);
1495                kfree(tv_cmd->tvc_prot_sgl);
1496                kfree(tv_cmd->tvc_upages);
1497        }
1498
1499        sbitmap_free(&svq->scsi_tags);
1500        kfree(svq->scsi_cmds);
1501        svq->scsi_cmds = NULL;
1502}
1503
1504static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds)
1505{
1506        struct vhost_scsi_virtqueue *svq = container_of(vq,
1507                                        struct vhost_scsi_virtqueue, vq);
1508        struct vhost_scsi_cmd *tv_cmd;
1509        unsigned int i;
1510
1511        if (svq->scsi_cmds)
1512                return 0;
1513
1514        if (sbitmap_init_node(&svq->scsi_tags, max_cmds, -1, GFP_KERNEL,
1515                              NUMA_NO_NODE))
1516                return -ENOMEM;
1517        svq->max_cmds = max_cmds;
1518
1519        svq->scsi_cmds = kcalloc(max_cmds, sizeof(*tv_cmd), GFP_KERNEL);
1520        if (!svq->scsi_cmds) {
1521                sbitmap_free(&svq->scsi_tags);
1522                return -ENOMEM;
1523        }
1524
1525        for (i = 0; i < max_cmds; i++) {
1526                tv_cmd = &svq->scsi_cmds[i];
1527
1528                tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS,
1529                                          sizeof(struct scatterlist),
1530                                          GFP_KERNEL);
1531                if (!tv_cmd->tvc_sgl) {
1532                        pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1533                        goto out;
1534                }
1535
1536                tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES,
1537                                             sizeof(struct page *),
1538                                             GFP_KERNEL);
1539                if (!tv_cmd->tvc_upages) {
1540                        pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1541                        goto out;
1542                }
1543
1544                tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
1545                                               sizeof(struct scatterlist),
1546                                               GFP_KERNEL);
1547                if (!tv_cmd->tvc_prot_sgl) {
1548                        pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
1549                        goto out;
1550                }
1551        }
1552        return 0;
1553out:
1554        vhost_scsi_destroy_vq_cmds(vq);
1555        return -ENOMEM;
1556}
1557
1558/*
1559 * Called from vhost_scsi_ioctl() context to walk the list of available
1560 * vhost_scsi_tpg with an active struct vhost_scsi_nexus
1561 *
1562 *  The lock nesting rule is:
1563 *    vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1564 */
1565static int
1566vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1567                        struct vhost_scsi_target *t)
1568{
1569        struct se_portal_group *se_tpg;
1570        struct vhost_scsi_tport *tv_tport;
1571        struct vhost_scsi_tpg *tpg;
1572        struct vhost_scsi_tpg **vs_tpg;
1573        struct vhost_virtqueue *vq;
1574        int index, ret, i, len;
1575        bool match = false;
1576
1577        mutex_lock(&vhost_scsi_mutex);
1578        mutex_lock(&vs->dev.mutex);
1579
1580        /* Verify that ring has been setup correctly. */
1581        for (index = 0; index < vs->dev.nvqs; ++index) {
1582                /* Verify that ring has been setup correctly. */
1583                if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1584                        ret = -EFAULT;
1585                        goto out;
1586                }
1587        }
1588
1589        len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1590        vs_tpg = kzalloc(len, GFP_KERNEL);
1591        if (!vs_tpg) {
1592                ret = -ENOMEM;
1593                goto out;
1594        }
1595        if (vs->vs_tpg)
1596                memcpy(vs_tpg, vs->vs_tpg, len);
1597
1598        list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
1599                mutex_lock(&tpg->tv_tpg_mutex);
1600                if (!tpg->tpg_nexus) {
1601                        mutex_unlock(&tpg->tv_tpg_mutex);
1602                        continue;
1603                }
1604                if (tpg->tv_tpg_vhost_count != 0) {
1605                        mutex_unlock(&tpg->tv_tpg_mutex);
1606                        continue;
1607                }
1608                tv_tport = tpg->tport;
1609
1610                if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1611                        if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1612                                mutex_unlock(&tpg->tv_tpg_mutex);
1613                                ret = -EEXIST;
1614                                goto undepend;
1615                        }
1616                        /*
1617                         * In order to ensure individual vhost-scsi configfs
1618                         * groups cannot be removed while in use by vhost ioctl,
1619                         * go ahead and take an explicit se_tpg->tpg_group.cg_item
1620                         * dependency now.
1621                         */
1622                        se_tpg = &tpg->se_tpg;
1623                        ret = target_depend_item(&se_tpg->tpg_group.cg_item);
1624                        if (ret) {
1625                                pr_warn("target_depend_item() failed: %d\n", ret);
1626                                mutex_unlock(&tpg->tv_tpg_mutex);
1627                                goto undepend;
1628                        }
1629                        tpg->tv_tpg_vhost_count++;
1630                        tpg->vhost_scsi = vs;
1631                        vs_tpg[tpg->tport_tpgt] = tpg;
1632                        match = true;
1633                }
1634                mutex_unlock(&tpg->tv_tpg_mutex);
1635        }
1636
1637        if (match) {
1638                memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1639                       sizeof(vs->vs_vhost_wwpn));
1640
1641                for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1642                        vq = &vs->vqs[i].vq;
1643                        if (!vhost_vq_is_setup(vq))
1644                                continue;
1645
1646                        ret = vhost_scsi_setup_vq_cmds(vq, vq->num);
1647                        if (ret)
1648                                goto destroy_vq_cmds;
1649                }
1650
1651                for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1652                        vq = &vs->vqs[i].vq;
1653                        mutex_lock(&vq->mutex);
1654                        vhost_vq_set_backend(vq, vs_tpg);
1655                        vhost_vq_init_access(vq);
1656                        mutex_unlock(&vq->mutex);
1657                }
1658                ret = 0;
1659        } else {
1660                ret = -EEXIST;
1661        }
1662
1663        /*
1664         * Act as synchronize_rcu to make sure access to
1665         * old vs->vs_tpg is finished.
1666         */
1667        vhost_scsi_flush(vs);
1668        kfree(vs->vs_tpg);
1669        vs->vs_tpg = vs_tpg;
1670        goto out;
1671
1672destroy_vq_cmds:
1673        for (i--; i >= VHOST_SCSI_VQ_IO; i--) {
1674                if (!vhost_vq_get_backend(&vs->vqs[i].vq))
1675                        vhost_scsi_destroy_vq_cmds(&vs->vqs[i].vq);
1676        }
1677undepend:
1678        for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1679                tpg = vs_tpg[i];
1680                if (tpg) {
1681                        tpg->tv_tpg_vhost_count--;
1682                        target_undepend_item(&tpg->se_tpg.tpg_group.cg_item);
1683                }
1684        }
1685        kfree(vs_tpg);
1686out:
1687        mutex_unlock(&vs->dev.mutex);
1688        mutex_unlock(&vhost_scsi_mutex);
1689        return ret;
1690}
1691
1692static int
1693vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1694                          struct vhost_scsi_target *t)
1695{
1696        struct se_portal_group *se_tpg;
1697        struct vhost_scsi_tport *tv_tport;
1698        struct vhost_scsi_tpg *tpg;
1699        struct vhost_virtqueue *vq;
1700        bool match = false;
1701        int index, ret, i;
1702        u8 target;
1703
1704        mutex_lock(&vhost_scsi_mutex);
1705        mutex_lock(&vs->dev.mutex);
1706        /* Verify that ring has been setup correctly. */
1707        for (index = 0; index < vs->dev.nvqs; ++index) {
1708                if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1709                        ret = -EFAULT;
1710                        goto err_dev;
1711                }
1712        }
1713
1714        if (!vs->vs_tpg) {
1715                ret = 0;
1716                goto err_dev;
1717        }
1718
1719        for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1720                target = i;
1721                tpg = vs->vs_tpg[target];
1722                if (!tpg)
1723                        continue;
1724
1725                mutex_lock(&tpg->tv_tpg_mutex);
1726                tv_tport = tpg->tport;
1727                if (!tv_tport) {
1728                        ret = -ENODEV;
1729                        goto err_tpg;
1730                }
1731
1732                if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1733                        pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1734                                " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1735                                tv_tport->tport_name, tpg->tport_tpgt,
1736                                t->vhost_wwpn, t->vhost_tpgt);
1737                        ret = -EINVAL;
1738                        goto err_tpg;
1739                }
1740                tpg->tv_tpg_vhost_count--;
1741                tpg->vhost_scsi = NULL;
1742                vs->vs_tpg[target] = NULL;
1743                match = true;
1744                mutex_unlock(&tpg->tv_tpg_mutex);
1745                /*
1746                 * Release se_tpg->tpg_group.cg_item configfs dependency now
1747                 * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
1748                 */
1749                se_tpg = &tpg->se_tpg;
1750                target_undepend_item(&se_tpg->tpg_group.cg_item);
1751        }
1752        if (match) {
1753                for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1754                        vq = &vs->vqs[i].vq;
1755                        mutex_lock(&vq->mutex);
1756                        vhost_vq_set_backend(vq, NULL);
1757                        mutex_unlock(&vq->mutex);
1758                        /*
1759                         * Make sure cmds are not running before tearing them
1760                         * down.
1761                         */
1762                        vhost_scsi_flush(vs);
1763                        vhost_scsi_destroy_vq_cmds(vq);
1764                }
1765        }
1766        /*
1767         * Act as synchronize_rcu to make sure access to
1768         * old vs->vs_tpg is finished.
1769         */
1770        vhost_scsi_flush(vs);
1771        kfree(vs->vs_tpg);
1772        vs->vs_tpg = NULL;
1773        WARN_ON(vs->vs_events_nr);
1774        mutex_unlock(&vs->dev.mutex);
1775        mutex_unlock(&vhost_scsi_mutex);
1776        return 0;
1777
1778err_tpg:
1779        mutex_unlock(&tpg->tv_tpg_mutex);
1780err_dev:
1781        mutex_unlock(&vs->dev.mutex);
1782        mutex_unlock(&vhost_scsi_mutex);
1783        return ret;
1784}
1785
1786static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1787{
1788        struct vhost_virtqueue *vq;
1789        int i;
1790
1791        if (features & ~VHOST_SCSI_FEATURES)
1792                return -EOPNOTSUPP;
1793
1794        mutex_lock(&vs->dev.mutex);
1795        if ((features & (1 << VHOST_F_LOG_ALL)) &&
1796            !vhost_log_access_ok(&vs->dev)) {
1797                mutex_unlock(&vs->dev.mutex);
1798                return -EFAULT;
1799        }
1800
1801        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1802                vq = &vs->vqs[i].vq;
1803                mutex_lock(&vq->mutex);
1804                vq->acked_features = features;
1805                mutex_unlock(&vq->mutex);
1806        }
1807        mutex_unlock(&vs->dev.mutex);
1808        return 0;
1809}
1810
1811static int vhost_scsi_open(struct inode *inode, struct file *f)
1812{
1813        struct vhost_scsi *vs;
1814        struct vhost_virtqueue **vqs;
1815        int r = -ENOMEM, i;
1816
1817        vs = kvzalloc(sizeof(*vs), GFP_KERNEL);
1818        if (!vs)
1819                goto err_vs;
1820
1821        vqs = kmalloc_array(VHOST_SCSI_MAX_VQ, sizeof(*vqs), GFP_KERNEL);
1822        if (!vqs)
1823                goto err_vqs;
1824
1825        vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1826        vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
1827
1828        vs->vs_events_nr = 0;
1829        vs->vs_events_missed = false;
1830
1831        vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1832        vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1833        vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1834        vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1835        for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1836                vqs[i] = &vs->vqs[i].vq;
1837                vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1838        }
1839        vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV,
1840                       VHOST_SCSI_WEIGHT, 0, true, NULL);
1841
1842        vhost_scsi_init_inflight(vs, NULL);
1843
1844        f->private_data = vs;
1845        return 0;
1846
1847err_vqs:
1848        kvfree(vs);
1849err_vs:
1850        return r;
1851}
1852
1853static int vhost_scsi_release(struct inode *inode, struct file *f)
1854{
1855        struct vhost_scsi *vs = f->private_data;
1856        struct vhost_scsi_target t;
1857
1858        mutex_lock(&vs->dev.mutex);
1859        memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1860        mutex_unlock(&vs->dev.mutex);
1861        vhost_scsi_clear_endpoint(vs, &t);
1862        vhost_dev_stop(&vs->dev);
1863        vhost_dev_cleanup(&vs->dev);
1864        /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1865        vhost_scsi_flush(vs);
1866        kfree(vs->dev.vqs);
1867        kvfree(vs);
1868        return 0;
1869}
1870
1871static long
1872vhost_scsi_ioctl(struct file *f,
1873                 unsigned int ioctl,
1874                 unsigned long arg)
1875{
1876        struct vhost_scsi *vs = f->private_data;
1877        struct vhost_scsi_target backend;
1878        void __user *argp = (void __user *)arg;
1879        u64 __user *featurep = argp;
1880        u32 __user *eventsp = argp;
1881        u32 events_missed;
1882        u64 features;
1883        int r, abi_version = VHOST_SCSI_ABI_VERSION;
1884        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1885
1886        switch (ioctl) {
1887        case VHOST_SCSI_SET_ENDPOINT:
1888                if (copy_from_user(&backend, argp, sizeof backend))
1889                        return -EFAULT;
1890                if (backend.reserved != 0)
1891                        return -EOPNOTSUPP;
1892
1893                return vhost_scsi_set_endpoint(vs, &backend);
1894        case VHOST_SCSI_CLEAR_ENDPOINT:
1895                if (copy_from_user(&backend, argp, sizeof backend))
1896                        return -EFAULT;
1897                if (backend.reserved != 0)
1898                        return -EOPNOTSUPP;
1899
1900                return vhost_scsi_clear_endpoint(vs, &backend);
1901        case VHOST_SCSI_GET_ABI_VERSION:
1902                if (copy_to_user(argp, &abi_version, sizeof abi_version))
1903                        return -EFAULT;
1904                return 0;
1905        case VHOST_SCSI_SET_EVENTS_MISSED:
1906                if (get_user(events_missed, eventsp))
1907                        return -EFAULT;
1908                mutex_lock(&vq->mutex);
1909                vs->vs_events_missed = events_missed;
1910                mutex_unlock(&vq->mutex);
1911                return 0;
1912        case VHOST_SCSI_GET_EVENTS_MISSED:
1913                mutex_lock(&vq->mutex);
1914                events_missed = vs->vs_events_missed;
1915                mutex_unlock(&vq->mutex);
1916                if (put_user(events_missed, eventsp))
1917                        return -EFAULT;
1918                return 0;
1919        case VHOST_GET_FEATURES:
1920                features = VHOST_SCSI_FEATURES;
1921                if (copy_to_user(featurep, &features, sizeof features))
1922                        return -EFAULT;
1923                return 0;
1924        case VHOST_SET_FEATURES:
1925                if (copy_from_user(&features, featurep, sizeof features))
1926                        return -EFAULT;
1927                return vhost_scsi_set_features(vs, features);
1928        default:
1929                mutex_lock(&vs->dev.mutex);
1930                r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1931                /* TODO: flush backend after dev ioctl. */
1932                if (r == -ENOIOCTLCMD)
1933                        r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1934                mutex_unlock(&vs->dev.mutex);
1935                return r;
1936        }
1937}
1938
1939static const struct file_operations vhost_scsi_fops = {
1940        .owner          = THIS_MODULE,
1941        .release        = vhost_scsi_release,
1942        .unlocked_ioctl = vhost_scsi_ioctl,
1943        .compat_ioctl   = compat_ptr_ioctl,
1944        .open           = vhost_scsi_open,
1945        .llseek         = noop_llseek,
1946};
1947
1948static struct miscdevice vhost_scsi_misc = {
1949        MISC_DYNAMIC_MINOR,
1950        "vhost-scsi",
1951        &vhost_scsi_fops,
1952};
1953
1954static int __init vhost_scsi_register(void)
1955{
1956        return misc_register(&vhost_scsi_misc);
1957}
1958
1959static void vhost_scsi_deregister(void)
1960{
1961        misc_deregister(&vhost_scsi_misc);
1962}
1963
1964static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
1965{
1966        switch (tport->tport_proto_id) {
1967        case SCSI_PROTOCOL_SAS:
1968                return "SAS";
1969        case SCSI_PROTOCOL_FCP:
1970                return "FCP";
1971        case SCSI_PROTOCOL_ISCSI:
1972                return "iSCSI";
1973        default:
1974                break;
1975        }
1976
1977        return "Unknown";
1978}
1979
1980static void
1981vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
1982                  struct se_lun *lun, bool plug)
1983{
1984
1985        struct vhost_scsi *vs = tpg->vhost_scsi;
1986        struct vhost_virtqueue *vq;
1987        u32 reason;
1988
1989        if (!vs)
1990                return;
1991
1992        mutex_lock(&vs->dev.mutex);
1993
1994        if (plug)
1995                reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1996        else
1997                reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1998
1999        vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
2000        mutex_lock(&vq->mutex);
2001        if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
2002                vhost_scsi_send_evt(vs, tpg, lun,
2003                                   VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
2004        mutex_unlock(&vq->mutex);
2005        mutex_unlock(&vs->dev.mutex);
2006}
2007
2008static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
2009{
2010        vhost_scsi_do_plug(tpg, lun, true);
2011}
2012
2013static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
2014{
2015        vhost_scsi_do_plug(tpg, lun, false);
2016}
2017
2018static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
2019                               struct se_lun *lun)
2020{
2021        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2022                                struct vhost_scsi_tpg, se_tpg);
2023        struct vhost_scsi_tmf *tmf;
2024
2025        tmf = kzalloc(sizeof(*tmf), GFP_KERNEL);
2026        if (!tmf)
2027                return -ENOMEM;
2028        INIT_LIST_HEAD(&tmf->queue_entry);
2029        vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work);
2030
2031        mutex_lock(&vhost_scsi_mutex);
2032
2033        mutex_lock(&tpg->tv_tpg_mutex);
2034        tpg->tv_tpg_port_count++;
2035        list_add_tail(&tmf->queue_entry, &tpg->tmf_queue);
2036        mutex_unlock(&tpg->tv_tpg_mutex);
2037
2038        vhost_scsi_hotplug(tpg, lun);
2039
2040        mutex_unlock(&vhost_scsi_mutex);
2041
2042        return 0;
2043}
2044
2045static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
2046                                  struct se_lun *lun)
2047{
2048        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2049                                struct vhost_scsi_tpg, se_tpg);
2050        struct vhost_scsi_tmf *tmf;
2051
2052        mutex_lock(&vhost_scsi_mutex);
2053
2054        mutex_lock(&tpg->tv_tpg_mutex);
2055        tpg->tv_tpg_port_count--;
2056        tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf,
2057                               queue_entry);
2058        list_del(&tmf->queue_entry);
2059        kfree(tmf);
2060        mutex_unlock(&tpg->tv_tpg_mutex);
2061
2062        vhost_scsi_hotunplug(tpg, lun);
2063
2064        mutex_unlock(&vhost_scsi_mutex);
2065}
2066
2067static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
2068                struct config_item *item, const char *page, size_t count)
2069{
2070        struct se_portal_group *se_tpg = attrib_to_tpg(item);
2071        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2072                                struct vhost_scsi_tpg, se_tpg);
2073        unsigned long val;
2074        int ret = kstrtoul(page, 0, &val);
2075
2076        if (ret) {
2077                pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
2078                return ret;
2079        }
2080        if (val != 0 && val != 1 && val != 3) {
2081                pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
2082                return -EINVAL;
2083        }
2084        tpg->tv_fabric_prot_type = val;
2085
2086        return count;
2087}
2088
2089static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show(
2090                struct config_item *item, char *page)
2091{
2092        struct se_portal_group *se_tpg = attrib_to_tpg(item);
2093        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2094                                struct vhost_scsi_tpg, se_tpg);
2095
2096        return sprintf(page, "%d\n", tpg->tv_fabric_prot_type);
2097}
2098
2099CONFIGFS_ATTR(vhost_scsi_tpg_attrib_, fabric_prot_type);
2100
2101static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
2102        &vhost_scsi_tpg_attrib_attr_fabric_prot_type,
2103        NULL,
2104};
2105
2106static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
2107                                const char *name)
2108{
2109        struct vhost_scsi_nexus *tv_nexus;
2110
2111        mutex_lock(&tpg->tv_tpg_mutex);
2112        if (tpg->tpg_nexus) {
2113                mutex_unlock(&tpg->tv_tpg_mutex);
2114                pr_debug("tpg->tpg_nexus already exists\n");
2115                return -EEXIST;
2116        }
2117
2118        tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
2119        if (!tv_nexus) {
2120                mutex_unlock(&tpg->tv_tpg_mutex);
2121                pr_err("Unable to allocate struct vhost_scsi_nexus\n");
2122                return -ENOMEM;
2123        }
2124        /*
2125         * Since we are running in 'demo mode' this call with generate a
2126         * struct se_node_acl for the vhost_scsi struct se_portal_group with
2127         * the SCSI Initiator port name of the passed configfs group 'name'.
2128         */
2129        tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg, 0, 0,
2130                                        TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
2131                                        (unsigned char *)name, tv_nexus, NULL);
2132        if (IS_ERR(tv_nexus->tvn_se_sess)) {
2133                mutex_unlock(&tpg->tv_tpg_mutex);
2134                kfree(tv_nexus);
2135                return -ENOMEM;
2136        }
2137        tpg->tpg_nexus = tv_nexus;
2138
2139        mutex_unlock(&tpg->tv_tpg_mutex);
2140        return 0;
2141}
2142
2143static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
2144{
2145        struct se_session *se_sess;
2146        struct vhost_scsi_nexus *tv_nexus;
2147
2148        mutex_lock(&tpg->tv_tpg_mutex);
2149        tv_nexus = tpg->tpg_nexus;
2150        if (!tv_nexus) {
2151                mutex_unlock(&tpg->tv_tpg_mutex);
2152                return -ENODEV;
2153        }
2154
2155        se_sess = tv_nexus->tvn_se_sess;
2156        if (!se_sess) {
2157                mutex_unlock(&tpg->tv_tpg_mutex);
2158                return -ENODEV;
2159        }
2160
2161        if (tpg->tv_tpg_port_count != 0) {
2162                mutex_unlock(&tpg->tv_tpg_mutex);
2163                pr_err("Unable to remove TCM_vhost I_T Nexus with"
2164                        " active TPG port count: %d\n",
2165                        tpg->tv_tpg_port_count);
2166                return -EBUSY;
2167        }
2168
2169        if (tpg->tv_tpg_vhost_count != 0) {
2170                mutex_unlock(&tpg->tv_tpg_mutex);
2171                pr_err("Unable to remove TCM_vhost I_T Nexus with"
2172                        " active TPG vhost count: %d\n",
2173                        tpg->tv_tpg_vhost_count);
2174                return -EBUSY;
2175        }
2176
2177        pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
2178                " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
2179                tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2180
2181        /*
2182         * Release the SCSI I_T Nexus to the emulated vhost Target Port
2183         */
2184        target_remove_session(se_sess);
2185        tpg->tpg_nexus = NULL;
2186        mutex_unlock(&tpg->tv_tpg_mutex);
2187
2188        kfree(tv_nexus);
2189        return 0;
2190}
2191
2192static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page)
2193{
2194        struct se_portal_group *se_tpg = to_tpg(item);
2195        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2196                                struct vhost_scsi_tpg, se_tpg);
2197        struct vhost_scsi_nexus *tv_nexus;
2198        ssize_t ret;
2199
2200        mutex_lock(&tpg->tv_tpg_mutex);
2201        tv_nexus = tpg->tpg_nexus;
2202        if (!tv_nexus) {
2203                mutex_unlock(&tpg->tv_tpg_mutex);
2204                return -ENODEV;
2205        }
2206        ret = snprintf(page, PAGE_SIZE, "%s\n",
2207                        tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2208        mutex_unlock(&tpg->tv_tpg_mutex);
2209
2210        return ret;
2211}
2212
2213static ssize_t vhost_scsi_tpg_nexus_store(struct config_item *item,
2214                const char *page, size_t count)
2215{
2216        struct se_portal_group *se_tpg = to_tpg(item);
2217        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2218                                struct vhost_scsi_tpg, se_tpg);
2219        struct vhost_scsi_tport *tport_wwn = tpg->tport;
2220        unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
2221        int ret;
2222        /*
2223         * Shutdown the active I_T nexus if 'NULL' is passed..
2224         */
2225        if (!strncmp(page, "NULL", 4)) {
2226                ret = vhost_scsi_drop_nexus(tpg);
2227                return (!ret) ? count : ret;
2228        }
2229        /*
2230         * Otherwise make sure the passed virtual Initiator port WWN matches
2231         * the fabric protocol_id set in vhost_scsi_make_tport(), and call
2232         * vhost_scsi_make_nexus().
2233         */
2234        if (strlen(page) >= VHOST_SCSI_NAMELEN) {
2235                pr_err("Emulated NAA Sas Address: %s, exceeds"
2236                                " max: %d\n", page, VHOST_SCSI_NAMELEN);
2237                return -EINVAL;
2238        }
2239        snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
2240
2241        ptr = strstr(i_port, "naa.");
2242        if (ptr) {
2243                if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
2244                        pr_err("Passed SAS Initiator Port %s does not"
2245                                " match target port protoid: %s\n", i_port,
2246                                vhost_scsi_dump_proto_id(tport_wwn));
2247                        return -EINVAL;
2248                }
2249                port_ptr = &i_port[0];
2250                goto check_newline;
2251        }
2252        ptr = strstr(i_port, "fc.");
2253        if (ptr) {
2254                if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
2255                        pr_err("Passed FCP Initiator Port %s does not"
2256                                " match target port protoid: %s\n", i_port,
2257                                vhost_scsi_dump_proto_id(tport_wwn));
2258                        return -EINVAL;
2259                }
2260                port_ptr = &i_port[3]; /* Skip over "fc." */
2261                goto check_newline;
2262        }
2263        ptr = strstr(i_port, "iqn.");
2264        if (ptr) {
2265                if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
2266                        pr_err("Passed iSCSI Initiator Port %s does not"
2267                                " match target port protoid: %s\n", i_port,
2268                                vhost_scsi_dump_proto_id(tport_wwn));
2269                        return -EINVAL;
2270                }
2271                port_ptr = &i_port[0];
2272                goto check_newline;
2273        }
2274        pr_err("Unable to locate prefix for emulated Initiator Port:"
2275                        " %s\n", i_port);
2276        return -EINVAL;
2277        /*
2278         * Clear any trailing newline for the NAA WWN
2279         */
2280check_newline:
2281        if (i_port[strlen(i_port)-1] == '\n')
2282                i_port[strlen(i_port)-1] = '\0';
2283
2284        ret = vhost_scsi_make_nexus(tpg, port_ptr);
2285        if (ret < 0)
2286                return ret;
2287
2288        return count;
2289}
2290
2291CONFIGFS_ATTR(vhost_scsi_tpg_, nexus);
2292
2293static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
2294        &vhost_scsi_tpg_attr_nexus,
2295        NULL,
2296};
2297
2298static struct se_portal_group *
2299vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)
2300{
2301        struct vhost_scsi_tport *tport = container_of(wwn,
2302                        struct vhost_scsi_tport, tport_wwn);
2303
2304        struct vhost_scsi_tpg *tpg;
2305        u16 tpgt;
2306        int ret;
2307
2308        if (strstr(name, "tpgt_") != name)
2309                return ERR_PTR(-EINVAL);
2310        if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
2311                return ERR_PTR(-EINVAL);
2312
2313        tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
2314        if (!tpg) {
2315                pr_err("Unable to allocate struct vhost_scsi_tpg");
2316                return ERR_PTR(-ENOMEM);
2317        }
2318        mutex_init(&tpg->tv_tpg_mutex);
2319        INIT_LIST_HEAD(&tpg->tv_tpg_list);
2320        INIT_LIST_HEAD(&tpg->tmf_queue);
2321        tpg->tport = tport;
2322        tpg->tport_tpgt = tpgt;
2323
2324        ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
2325        if (ret < 0) {
2326                kfree(tpg);
2327                return NULL;
2328        }
2329        mutex_lock(&vhost_scsi_mutex);
2330        list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
2331        mutex_unlock(&vhost_scsi_mutex);
2332
2333        return &tpg->se_tpg;
2334}
2335
2336static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
2337{
2338        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2339                                struct vhost_scsi_tpg, se_tpg);
2340
2341        mutex_lock(&vhost_scsi_mutex);
2342        list_del(&tpg->tv_tpg_list);
2343        mutex_unlock(&vhost_scsi_mutex);
2344        /*
2345         * Release the virtual I_T Nexus for this vhost TPG
2346         */
2347        vhost_scsi_drop_nexus(tpg);
2348        /*
2349         * Deregister the se_tpg from TCM..
2350         */
2351        core_tpg_deregister(se_tpg);
2352        kfree(tpg);
2353}
2354
2355static struct se_wwn *
2356vhost_scsi_make_tport(struct target_fabric_configfs *tf,
2357                     struct config_group *group,
2358                     const char *name)
2359{
2360        struct vhost_scsi_tport *tport;
2361        char *ptr;
2362        u64 wwpn = 0;
2363        int off = 0;
2364
2365        /* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
2366                return ERR_PTR(-EINVAL); */
2367
2368        tport = kzalloc(sizeof(*tport), GFP_KERNEL);
2369        if (!tport) {
2370                pr_err("Unable to allocate struct vhost_scsi_tport");
2371                return ERR_PTR(-ENOMEM);
2372        }
2373        tport->tport_wwpn = wwpn;
2374        /*
2375         * Determine the emulated Protocol Identifier and Target Port Name
2376         * based on the incoming configfs directory name.
2377         */
2378        ptr = strstr(name, "naa.");
2379        if (ptr) {
2380                tport->tport_proto_id = SCSI_PROTOCOL_SAS;
2381                goto check_len;
2382        }
2383        ptr = strstr(name, "fc.");
2384        if (ptr) {
2385                tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2386                off = 3; /* Skip over "fc." */
2387                goto check_len;
2388        }
2389        ptr = strstr(name, "iqn.");
2390        if (ptr) {
2391                tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2392                goto check_len;
2393        }
2394
2395        pr_err("Unable to locate prefix for emulated Target Port:"
2396                        " %s\n", name);
2397        kfree(tport);
2398        return ERR_PTR(-EINVAL);
2399
2400check_len:
2401        if (strlen(name) >= VHOST_SCSI_NAMELEN) {
2402                pr_err("Emulated %s Address: %s, exceeds"
2403                        " max: %d\n", name, vhost_scsi_dump_proto_id(tport),
2404                        VHOST_SCSI_NAMELEN);
2405                kfree(tport);
2406                return ERR_PTR(-EINVAL);
2407        }
2408        snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
2409
2410        pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2411                " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
2412
2413        return &tport->tport_wwn;
2414}
2415
2416static void vhost_scsi_drop_tport(struct se_wwn *wwn)
2417{
2418        struct vhost_scsi_tport *tport = container_of(wwn,
2419                                struct vhost_scsi_tport, tport_wwn);
2420
2421        pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2422                " %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
2423                tport->tport_name);
2424
2425        kfree(tport);
2426}
2427
2428static ssize_t
2429vhost_scsi_wwn_version_show(struct config_item *item, char *page)
2430{
2431        return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
2432                "on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2433                utsname()->machine);
2434}
2435
2436CONFIGFS_ATTR_RO(vhost_scsi_wwn_, version);
2437
2438static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2439        &vhost_scsi_wwn_attr_version,
2440        NULL,
2441};
2442
2443static const struct target_core_fabric_ops vhost_scsi_ops = {
2444        .module                         = THIS_MODULE,
2445        .fabric_name                    = "vhost",
2446        .max_data_sg_nents              = VHOST_SCSI_PREALLOC_SGLS,
2447        .tpg_get_wwn                    = vhost_scsi_get_fabric_wwn,
2448        .tpg_get_tag                    = vhost_scsi_get_tpgt,
2449        .tpg_check_demo_mode            = vhost_scsi_check_true,
2450        .tpg_check_demo_mode_cache      = vhost_scsi_check_true,
2451        .tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
2452        .tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
2453        .tpg_check_prot_fabric_only     = vhost_scsi_check_prot_fabric_only,
2454        .tpg_get_inst_index             = vhost_scsi_tpg_get_inst_index,
2455        .release_cmd                    = vhost_scsi_release_cmd,
2456        .check_stop_free                = vhost_scsi_check_stop_free,
2457        .sess_get_index                 = vhost_scsi_sess_get_index,
2458        .sess_get_initiator_sid         = NULL,
2459        .write_pending                  = vhost_scsi_write_pending,
2460        .set_default_node_attributes    = vhost_scsi_set_default_node_attrs,
2461        .get_cmd_state                  = vhost_scsi_get_cmd_state,
2462        .queue_data_in                  = vhost_scsi_queue_data_in,
2463        .queue_status                   = vhost_scsi_queue_status,
2464        .queue_tm_rsp                   = vhost_scsi_queue_tm_rsp,
2465        .aborted_task                   = vhost_scsi_aborted_task,
2466        /*
2467         * Setup callers for generic logic in target_core_fabric_configfs.c
2468         */
2469        .fabric_make_wwn                = vhost_scsi_make_tport,
2470        .fabric_drop_wwn                = vhost_scsi_drop_tport,
2471        .fabric_make_tpg                = vhost_scsi_make_tpg,
2472        .fabric_drop_tpg                = vhost_scsi_drop_tpg,
2473        .fabric_post_link               = vhost_scsi_port_link,
2474        .fabric_pre_unlink              = vhost_scsi_port_unlink,
2475
2476        .tfc_wwn_attrs                  = vhost_scsi_wwn_attrs,
2477        .tfc_tpg_base_attrs             = vhost_scsi_tpg_attrs,
2478        .tfc_tpg_attrib_attrs           = vhost_scsi_tpg_attrib_attrs,
2479};
2480
2481static int __init vhost_scsi_init(void)
2482{
2483        int ret = -ENOMEM;
2484
2485        pr_debug("TCM_VHOST fabric module %s on %s/%s"
2486                " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2487                utsname()->machine);
2488
2489        /*
2490         * Use our own dedicated workqueue for submitting I/O into
2491         * target core to avoid contention within system_wq.
2492         */
2493        vhost_scsi_workqueue = alloc_workqueue("vhost_scsi", 0, 0);
2494        if (!vhost_scsi_workqueue)
2495                goto out;
2496
2497        ret = vhost_scsi_register();
2498        if (ret < 0)
2499                goto out_destroy_workqueue;
2500
2501        ret = target_register_template(&vhost_scsi_ops);
2502        if (ret < 0)
2503                goto out_vhost_scsi_deregister;
2504
2505        return 0;
2506
2507out_vhost_scsi_deregister:
2508        vhost_scsi_deregister();
2509out_destroy_workqueue:
2510        destroy_workqueue(vhost_scsi_workqueue);
2511out:
2512        return ret;
2513};
2514
2515static void vhost_scsi_exit(void)
2516{
2517        target_unregister_template(&vhost_scsi_ops);
2518        vhost_scsi_deregister();
2519        destroy_workqueue(vhost_scsi_workqueue);
2520};
2521
2522MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2523MODULE_ALIAS("tcm_vhost");
2524MODULE_LICENSE("GPL");
2525module_init(vhost_scsi_init);
2526module_exit(vhost_scsi_exit);
2527