linux/drivers/vhost/scsi.c
<<
>>
Prefs
   1/*******************************************************************************
   2 * Vhost kernel TCM fabric driver for virtio SCSI initiators
   3 *
   4 * (C) Copyright 2010-2013 Datera, Inc.
   5 * (C) Copyright 2010-2012 IBM Corp.
   6 *
   7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
   8 *
   9 * Authors: Nicholas A. Bellinger <nab@daterainc.com>
  10 *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of the GNU General Public License as published by
  14 * the Free Software Foundation; either version 2 of the License, or
  15 * (at your option) any later version.
  16 *
  17 * This program is distributed in the hope that it will be useful,
  18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  20 * GNU General Public License for more details.
  21 *
  22 ****************************************************************************/
  23
  24#include <linux/module.h>
  25#include <linux/moduleparam.h>
  26#include <generated/utsrelease.h>
  27#include <linux/utsname.h>
  28#include <linux/init.h>
  29#include <linux/slab.h>
  30#include <linux/kthread.h>
  31#include <linux/types.h>
  32#include <linux/string.h>
  33#include <linux/configfs.h>
  34#include <linux/ctype.h>
  35#include <linux/compat.h>
  36#include <linux/eventfd.h>
  37#include <linux/fs.h>
  38#include <linux/vmalloc.h>
  39#include <linux/miscdevice.h>
  40#include <asm/unaligned.h>
  41#include <scsi/scsi_common.h>
  42#include <scsi/scsi_proto.h>
  43#include <target/target_core_base.h>
  44#include <target/target_core_fabric.h>
  45#include <linux/vhost.h>
  46#include <linux/virtio_scsi.h>
  47#include <linux/llist.h>
  48#include <linux/bitmap.h>
  49#include <linux/percpu_ida.h>
  50
  51#include "vhost.h"
  52
  53#define VHOST_SCSI_VERSION  "v0.1"
  54#define VHOST_SCSI_NAMELEN 256
  55#define VHOST_SCSI_MAX_CDB_SIZE 32
  56#define VHOST_SCSI_DEFAULT_TAGS 256
  57#define VHOST_SCSI_PREALLOC_SGLS 2048
  58#define VHOST_SCSI_PREALLOC_UPAGES 2048
  59#define VHOST_SCSI_PREALLOC_PROT_SGLS 512
  60
  61struct vhost_scsi_inflight {
  62        /* Wait for the flush operation to finish */
  63        struct completion comp;
  64        /* Refcount for the inflight reqs */
  65        struct kref kref;
  66};
  67
  68struct vhost_scsi_cmd {
  69        /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
  70        int tvc_vq_desc;
  71        /* virtio-scsi initiator task attribute */
  72        int tvc_task_attr;
  73        /* virtio-scsi response incoming iovecs */
  74        int tvc_in_iovs;
  75        /* virtio-scsi initiator data direction */
  76        enum dma_data_direction tvc_data_direction;
  77        /* Expected data transfer length from virtio-scsi header */
  78        u32 tvc_exp_data_len;
  79        /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
  80        u64 tvc_tag;
  81        /* The number of scatterlists associated with this cmd */
  82        u32 tvc_sgl_count;
  83        u32 tvc_prot_sgl_count;
  84        /* Saved unpacked SCSI LUN for vhost_scsi_submission_work() */
  85        u32 tvc_lun;
  86        /* Pointer to the SGL formatted memory from virtio-scsi */
  87        struct scatterlist *tvc_sgl;
  88        struct scatterlist *tvc_prot_sgl;
  89        struct page **tvc_upages;
  90        /* Pointer to response header iovec */
  91        struct iovec tvc_resp_iov;
  92        /* Pointer to vhost_scsi for our device */
  93        struct vhost_scsi *tvc_vhost;
  94        /* Pointer to vhost_virtqueue for the cmd */
  95        struct vhost_virtqueue *tvc_vq;
  96        /* Pointer to vhost nexus memory */
  97        struct vhost_scsi_nexus *tvc_nexus;
  98        /* The TCM I/O descriptor that is accessed via container_of() */
  99        struct se_cmd tvc_se_cmd;
 100        /* work item used for cmwq dispatch to vhost_scsi_submission_work() */
 101        struct work_struct work;
 102        /* Copy of the incoming SCSI command descriptor block (CDB) */
 103        unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
 104        /* Sense buffer that will be mapped into outgoing status */
 105        unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
 106        /* Completed commands list, serviced from vhost worker thread */
 107        struct llist_node tvc_completion_list;
 108        /* Used to track inflight cmd */
 109        struct vhost_scsi_inflight *inflight;
 110};
 111
 112struct vhost_scsi_nexus {
 113        /* Pointer to TCM session for I_T Nexus */
 114        struct se_session *tvn_se_sess;
 115};
 116
 117struct vhost_scsi_tpg {
 118        /* Vhost port target portal group tag for TCM */
 119        u16 tport_tpgt;
 120        /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
 121        int tv_tpg_port_count;
 122        /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
 123        int tv_tpg_vhost_count;
 124        /* Used for enabling T10-PI with legacy devices */
 125        int tv_fabric_prot_type;
 126        /* list for vhost_scsi_list */
 127        struct list_head tv_tpg_list;
 128        /* Used to protect access for tpg_nexus */
 129        struct mutex tv_tpg_mutex;
 130        /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
 131        struct vhost_scsi_nexus *tpg_nexus;
 132        /* Pointer back to vhost_scsi_tport */
 133        struct vhost_scsi_tport *tport;
 134        /* Returned by vhost_scsi_make_tpg() */
 135        struct se_portal_group se_tpg;
 136        /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
 137        struct vhost_scsi *vhost_scsi;
 138};
 139
 140struct vhost_scsi_tport {
 141        /* SCSI protocol the tport is providing */
 142        u8 tport_proto_id;
 143        /* Binary World Wide unique Port Name for Vhost Target port */
 144        u64 tport_wwpn;
 145        /* ASCII formatted WWPN for Vhost Target port */
 146        char tport_name[VHOST_SCSI_NAMELEN];
 147        /* Returned by vhost_scsi_make_tport() */
 148        struct se_wwn tport_wwn;
 149};
 150
 151struct vhost_scsi_evt {
 152        /* event to be sent to guest */
 153        struct virtio_scsi_event event;
 154        /* event list, serviced from vhost worker thread */
 155        struct llist_node list;
 156};
 157
 158enum {
 159        VHOST_SCSI_VQ_CTL = 0,
 160        VHOST_SCSI_VQ_EVT = 1,
 161        VHOST_SCSI_VQ_IO = 2,
 162};
 163
 164/* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
 165enum {
 166        VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
 167                                               (1ULL << VIRTIO_SCSI_F_T10_PI)
 168};
 169
 170#define VHOST_SCSI_MAX_TARGET   256
 171#define VHOST_SCSI_MAX_VQ       128
 172#define VHOST_SCSI_MAX_EVENT    128
 173
 174struct vhost_scsi_virtqueue {
 175        struct vhost_virtqueue vq;
 176        /*
 177         * Reference counting for inflight reqs, used for flush operation. At
 178         * each time, one reference tracks new commands submitted, while we
 179         * wait for another one to reach 0.
 180         */
 181        struct vhost_scsi_inflight inflights[2];
 182        /*
 183         * Indicate current inflight in use, protected by vq->mutex.
 184         * Writers must also take dev mutex and flush under it.
 185         */
 186        int inflight_idx;
 187};
 188
 189struct vhost_scsi {
 190        /* Protected by vhost_scsi->dev.mutex */
 191        struct vhost_scsi_tpg **vs_tpg;
 192        char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
 193
 194        struct vhost_dev dev;
 195        struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
 196
 197        struct vhost_work vs_completion_work; /* cmd completion work item */
 198        struct llist_head vs_completion_list; /* cmd completion queue */
 199
 200        struct vhost_work vs_event_work; /* evt injection work item */
 201        struct llist_head vs_event_list; /* evt injection queue */
 202
 203        bool vs_events_missed; /* any missed events, protected by vq->mutex */
 204        int vs_events_nr; /* num of pending events, protected by vq->mutex */
 205};
 206
 207static struct workqueue_struct *vhost_scsi_workqueue;
 208
 209/* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
 210static DEFINE_MUTEX(vhost_scsi_mutex);
 211static LIST_HEAD(vhost_scsi_list);
 212
 213static int iov_num_pages(void __user *iov_base, size_t iov_len)
 214{
 215        return (PAGE_ALIGN((unsigned long)iov_base + iov_len) -
 216               ((unsigned long)iov_base & PAGE_MASK)) >> PAGE_SHIFT;
 217}
 218
 219static void vhost_scsi_done_inflight(struct kref *kref)
 220{
 221        struct vhost_scsi_inflight *inflight;
 222
 223        inflight = container_of(kref, struct vhost_scsi_inflight, kref);
 224        complete(&inflight->comp);
 225}
 226
 227static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
 228                                    struct vhost_scsi_inflight *old_inflight[])
 229{
 230        struct vhost_scsi_inflight *new_inflight;
 231        struct vhost_virtqueue *vq;
 232        int idx, i;
 233
 234        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
 235                vq = &vs->vqs[i].vq;
 236
 237                mutex_lock(&vq->mutex);
 238
 239                /* store old infight */
 240                idx = vs->vqs[i].inflight_idx;
 241                if (old_inflight)
 242                        old_inflight[i] = &vs->vqs[i].inflights[idx];
 243
 244                /* setup new infight */
 245                vs->vqs[i].inflight_idx = idx ^ 1;
 246                new_inflight = &vs->vqs[i].inflights[idx ^ 1];
 247                kref_init(&new_inflight->kref);
 248                init_completion(&new_inflight->comp);
 249
 250                mutex_unlock(&vq->mutex);
 251        }
 252}
 253
 254static struct vhost_scsi_inflight *
 255vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
 256{
 257        struct vhost_scsi_inflight *inflight;
 258        struct vhost_scsi_virtqueue *svq;
 259
 260        svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
 261        inflight = &svq->inflights[svq->inflight_idx];
 262        kref_get(&inflight->kref);
 263
 264        return inflight;
 265}
 266
 267static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
 268{
 269        kref_put(&inflight->kref, vhost_scsi_done_inflight);
 270}
 271
 272static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
 273{
 274        return 1;
 275}
 276
 277static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
 278{
 279        return 0;
 280}
 281
 282static char *vhost_scsi_get_fabric_name(void)
 283{
 284        return "vhost";
 285}
 286
 287static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
 288{
 289        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
 290                                struct vhost_scsi_tpg, se_tpg);
 291        struct vhost_scsi_tport *tport = tpg->tport;
 292
 293        return &tport->tport_name[0];
 294}
 295
 296static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
 297{
 298        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
 299                                struct vhost_scsi_tpg, se_tpg);
 300        return tpg->tport_tpgt;
 301}
 302
 303static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
 304{
 305        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
 306                                struct vhost_scsi_tpg, se_tpg);
 307
 308        return tpg->tv_fabric_prot_type;
 309}
 310
 311static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
 312{
 313        return 1;
 314}
 315
 316static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
 317{
 318        struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
 319                                struct vhost_scsi_cmd, tvc_se_cmd);
 320        struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess;
 321        int i;
 322
 323        if (tv_cmd->tvc_sgl_count) {
 324                for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
 325                        put_page(sg_page(&tv_cmd->tvc_sgl[i]));
 326        }
 327        if (tv_cmd->tvc_prot_sgl_count) {
 328                for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
 329                        put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
 330        }
 331
 332        vhost_scsi_put_inflight(tv_cmd->inflight);
 333        percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
 334}
 335
 336static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
 337{
 338        return 0;
 339}
 340
 341static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
 342{
 343        /* Go ahead and process the write immediately */
 344        target_execute_cmd(se_cmd);
 345        return 0;
 346}
 347
 348static int vhost_scsi_write_pending_status(struct se_cmd *se_cmd)
 349{
 350        return 0;
 351}
 352
 353static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
 354{
 355        return;
 356}
 357
 358static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
 359{
 360        return 0;
 361}
 362
 363static void vhost_scsi_complete_cmd(struct vhost_scsi_cmd *cmd)
 364{
 365        struct vhost_scsi *vs = cmd->tvc_vhost;
 366
 367        llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
 368
 369        vhost_work_queue(&vs->dev, &vs->vs_completion_work);
 370}
 371
 372static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
 373{
 374        struct vhost_scsi_cmd *cmd = container_of(se_cmd,
 375                                struct vhost_scsi_cmd, tvc_se_cmd);
 376        vhost_scsi_complete_cmd(cmd);
 377        return 0;
 378}
 379
 380static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
 381{
 382        struct vhost_scsi_cmd *cmd = container_of(se_cmd,
 383                                struct vhost_scsi_cmd, tvc_se_cmd);
 384        vhost_scsi_complete_cmd(cmd);
 385        return 0;
 386}
 387
 388static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
 389{
 390        return;
 391}
 392
 393static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
 394{
 395        return;
 396}
 397
 398static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
 399{
 400        vs->vs_events_nr--;
 401        kfree(evt);
 402}
 403
 404static struct vhost_scsi_evt *
 405vhost_scsi_allocate_evt(struct vhost_scsi *vs,
 406                       u32 event, u32 reason)
 407{
 408        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
 409        struct vhost_scsi_evt *evt;
 410
 411        if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
 412                vs->vs_events_missed = true;
 413                return NULL;
 414        }
 415
 416        evt = kzalloc(sizeof(*evt), GFP_KERNEL);
 417        if (!evt) {
 418                vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
 419                vs->vs_events_missed = true;
 420                return NULL;
 421        }
 422
 423        evt->event.event = cpu_to_vhost32(vq, event);
 424        evt->event.reason = cpu_to_vhost32(vq, reason);
 425        vs->vs_events_nr++;
 426
 427        return evt;
 428}
 429
 430static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd)
 431{
 432        struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
 433
 434        /* TODO locking against target/backend threads? */
 435        transport_generic_free_cmd(se_cmd, 0);
 436
 437}
 438
 439static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
 440{
 441        return target_put_sess_cmd(se_cmd);
 442}
 443
 444static void
 445vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
 446{
 447        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
 448        struct virtio_scsi_event *event = &evt->event;
 449        struct virtio_scsi_event __user *eventp;
 450        unsigned out, in;
 451        int head, ret;
 452
 453        if (!vq->private_data) {
 454                vs->vs_events_missed = true;
 455                return;
 456        }
 457
 458again:
 459        vhost_disable_notify(&vs->dev, vq);
 460        head = vhost_get_vq_desc(vq, vq->iov,
 461                        ARRAY_SIZE(vq->iov), &out, &in,
 462                        NULL, NULL);
 463        if (head < 0) {
 464                vs->vs_events_missed = true;
 465                return;
 466        }
 467        if (head == vq->num) {
 468                if (vhost_enable_notify(&vs->dev, vq))
 469                        goto again;
 470                vs->vs_events_missed = true;
 471                return;
 472        }
 473
 474        if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
 475                vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
 476                                vq->iov[out].iov_len);
 477                vs->vs_events_missed = true;
 478                return;
 479        }
 480
 481        if (vs->vs_events_missed) {
 482                event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
 483                vs->vs_events_missed = false;
 484        }
 485
 486        eventp = vq->iov[out].iov_base;
 487        ret = __copy_to_user(eventp, event, sizeof(*event));
 488        if (!ret)
 489                vhost_add_used_and_signal(&vs->dev, vq, head, 0);
 490        else
 491                vq_err(vq, "Faulted on vhost_scsi_send_event\n");
 492}
 493
 494static void vhost_scsi_evt_work(struct vhost_work *work)
 495{
 496        struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
 497                                        vs_event_work);
 498        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
 499        struct vhost_scsi_evt *evt;
 500        struct llist_node *llnode;
 501
 502        mutex_lock(&vq->mutex);
 503        llnode = llist_del_all(&vs->vs_event_list);
 504        while (llnode) {
 505                evt = llist_entry(llnode, struct vhost_scsi_evt, list);
 506                llnode = llist_next(llnode);
 507                vhost_scsi_do_evt_work(vs, evt);
 508                vhost_scsi_free_evt(vs, evt);
 509        }
 510        mutex_unlock(&vq->mutex);
 511}
 512
 513/* Fill in status and signal that we are done processing this command
 514 *
 515 * This is scheduled in the vhost work queue so we are called with the owner
 516 * process mm and can access the vring.
 517 */
 518static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
 519{
 520        struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
 521                                        vs_completion_work);
 522        DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
 523        struct virtio_scsi_cmd_resp v_rsp;
 524        struct vhost_scsi_cmd *cmd;
 525        struct llist_node *llnode;
 526        struct se_cmd *se_cmd;
 527        struct iov_iter iov_iter;
 528        int ret, vq;
 529
 530        bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
 531        llnode = llist_del_all(&vs->vs_completion_list);
 532        while (llnode) {
 533                cmd = llist_entry(llnode, struct vhost_scsi_cmd,
 534                                     tvc_completion_list);
 535                llnode = llist_next(llnode);
 536                se_cmd = &cmd->tvc_se_cmd;
 537
 538                pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
 539                        cmd, se_cmd->residual_count, se_cmd->scsi_status);
 540
 541                memset(&v_rsp, 0, sizeof(v_rsp));
 542                v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count);
 543                /* TODO is status_qualifier field needed? */
 544                v_rsp.status = se_cmd->scsi_status;
 545                v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
 546                                                 se_cmd->scsi_sense_length);
 547                memcpy(v_rsp.sense, cmd->tvc_sense_buf,
 548                       se_cmd->scsi_sense_length);
 549
 550                iov_iter_init(&iov_iter, READ, &cmd->tvc_resp_iov,
 551                              cmd->tvc_in_iovs, sizeof(v_rsp));
 552                ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
 553                if (likely(ret == sizeof(v_rsp))) {
 554                        struct vhost_scsi_virtqueue *q;
 555                        vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
 556                        q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
 557                        vq = q - vs->vqs;
 558                        __set_bit(vq, signal);
 559                } else
 560                        pr_err("Faulted on virtio_scsi_cmd_resp\n");
 561
 562                vhost_scsi_free_cmd(cmd);
 563        }
 564
 565        vq = -1;
 566        while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
 567                < VHOST_SCSI_MAX_VQ)
 568                vhost_signal(&vs->dev, &vs->vqs[vq].vq);
 569}
 570
 571static struct vhost_scsi_cmd *
 572vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
 573                   unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
 574                   u32 exp_data_len, int data_direction)
 575{
 576        struct vhost_scsi_cmd *cmd;
 577        struct vhost_scsi_nexus *tv_nexus;
 578        struct se_session *se_sess;
 579        struct scatterlist *sg, *prot_sg;
 580        struct page **pages;
 581        int tag;
 582
 583        tv_nexus = tpg->tpg_nexus;
 584        if (!tv_nexus) {
 585                pr_err("Unable to locate active struct vhost_scsi_nexus\n");
 586                return ERR_PTR(-EIO);
 587        }
 588        se_sess = tv_nexus->tvn_se_sess;
 589
 590        tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
 591        if (tag < 0) {
 592                pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
 593                return ERR_PTR(-ENOMEM);
 594        }
 595
 596        cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[tag];
 597        sg = cmd->tvc_sgl;
 598        prot_sg = cmd->tvc_prot_sgl;
 599        pages = cmd->tvc_upages;
 600        memset(cmd, 0, sizeof(struct vhost_scsi_cmd));
 601
 602        cmd->tvc_sgl = sg;
 603        cmd->tvc_prot_sgl = prot_sg;
 604        cmd->tvc_upages = pages;
 605        cmd->tvc_se_cmd.map_tag = tag;
 606        cmd->tvc_tag = scsi_tag;
 607        cmd->tvc_lun = lun;
 608        cmd->tvc_task_attr = task_attr;
 609        cmd->tvc_exp_data_len = exp_data_len;
 610        cmd->tvc_data_direction = data_direction;
 611        cmd->tvc_nexus = tv_nexus;
 612        cmd->inflight = vhost_scsi_get_inflight(vq);
 613
 614        memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
 615
 616        return cmd;
 617}
 618
 619/*
 620 * Map a user memory range into a scatterlist
 621 *
 622 * Returns the number of scatterlist entries used or -errno on error.
 623 */
 624static int
 625vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
 626                      void __user *ptr,
 627                      size_t len,
 628                      struct scatterlist *sgl,
 629                      bool write)
 630{
 631        unsigned int npages = 0, offset, nbytes;
 632        unsigned int pages_nr = iov_num_pages(ptr, len);
 633        struct scatterlist *sg = sgl;
 634        struct page **pages = cmd->tvc_upages;
 635        int ret, i;
 636
 637        if (pages_nr > VHOST_SCSI_PREALLOC_UPAGES) {
 638                pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
 639                       " preallocated VHOST_SCSI_PREALLOC_UPAGES: %u\n",
 640                        pages_nr, VHOST_SCSI_PREALLOC_UPAGES);
 641                return -ENOBUFS;
 642        }
 643
 644        ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
 645        /* No pages were pinned */
 646        if (ret < 0)
 647                goto out;
 648        /* Less pages pinned than wanted */
 649        if (ret != pages_nr) {
 650                for (i = 0; i < ret; i++)
 651                        put_page(pages[i]);
 652                ret = -EFAULT;
 653                goto out;
 654        }
 655
 656        while (len > 0) {
 657                offset = (uintptr_t)ptr & ~PAGE_MASK;
 658                nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
 659                sg_set_page(sg, pages[npages], nbytes, offset);
 660                ptr += nbytes;
 661                len -= nbytes;
 662                sg++;
 663                npages++;
 664        }
 665
 666out:
 667        return ret;
 668}
 669
 670static int
 671vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
 672{
 673        int sgl_count = 0;
 674
 675        if (!iter || !iter->iov) {
 676                pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
 677                       " present\n", __func__, bytes);
 678                return -EINVAL;
 679        }
 680
 681        sgl_count = iov_iter_npages(iter, 0xffff);
 682        if (sgl_count > max_sgls) {
 683                pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
 684                       " max_sgls: %d\n", __func__, sgl_count, max_sgls);
 685                return -EINVAL;
 686        }
 687        return sgl_count;
 688}
 689
 690static int
 691vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
 692                      struct iov_iter *iter,
 693                      struct scatterlist *sg, int sg_count)
 694{
 695        size_t off = iter->iov_offset;
 696        int i, ret;
 697
 698        for (i = 0; i < iter->nr_segs; i++) {
 699                void __user *base = iter->iov[i].iov_base + off;
 700                size_t len = iter->iov[i].iov_len - off;
 701
 702                ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write);
 703                if (ret < 0) {
 704                        for (i = 0; i < sg_count; i++) {
 705                                struct page *page = sg_page(&sg[i]);
 706                                if (page)
 707                                        put_page(page);
 708                        }
 709                        return ret;
 710                }
 711                sg += ret;
 712                off = 0;
 713        }
 714        return 0;
 715}
 716
 717static int
 718vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
 719                 size_t prot_bytes, struct iov_iter *prot_iter,
 720                 size_t data_bytes, struct iov_iter *data_iter)
 721{
 722        int sgl_count, ret;
 723        bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
 724
 725        if (prot_bytes) {
 726                sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
 727                                                 VHOST_SCSI_PREALLOC_PROT_SGLS);
 728                if (sgl_count < 0)
 729                        return sgl_count;
 730
 731                sg_init_table(cmd->tvc_prot_sgl, sgl_count);
 732                cmd->tvc_prot_sgl_count = sgl_count;
 733                pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
 734                         cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
 735
 736                ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
 737                                            cmd->tvc_prot_sgl,
 738                                            cmd->tvc_prot_sgl_count);
 739                if (ret < 0) {
 740                        cmd->tvc_prot_sgl_count = 0;
 741                        return ret;
 742                }
 743        }
 744        sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
 745                                         VHOST_SCSI_PREALLOC_SGLS);
 746        if (sgl_count < 0)
 747                return sgl_count;
 748
 749        sg_init_table(cmd->tvc_sgl, sgl_count);
 750        cmd->tvc_sgl_count = sgl_count;
 751        pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
 752                  cmd->tvc_sgl, cmd->tvc_sgl_count);
 753
 754        ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
 755                                    cmd->tvc_sgl, cmd->tvc_sgl_count);
 756        if (ret < 0) {
 757                cmd->tvc_sgl_count = 0;
 758                return ret;
 759        }
 760        return 0;
 761}
 762
 763static int vhost_scsi_to_tcm_attr(int attr)
 764{
 765        switch (attr) {
 766        case VIRTIO_SCSI_S_SIMPLE:
 767                return TCM_SIMPLE_TAG;
 768        case VIRTIO_SCSI_S_ORDERED:
 769                return TCM_ORDERED_TAG;
 770        case VIRTIO_SCSI_S_HEAD:
 771                return TCM_HEAD_TAG;
 772        case VIRTIO_SCSI_S_ACA:
 773                return TCM_ACA_TAG;
 774        default:
 775                break;
 776        }
 777        return TCM_SIMPLE_TAG;
 778}
 779
 780static void vhost_scsi_submission_work(struct work_struct *work)
 781{
 782        struct vhost_scsi_cmd *cmd =
 783                container_of(work, struct vhost_scsi_cmd, work);
 784        struct vhost_scsi_nexus *tv_nexus;
 785        struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
 786        struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
 787        int rc;
 788
 789        /* FIXME: BIDI operation */
 790        if (cmd->tvc_sgl_count) {
 791                sg_ptr = cmd->tvc_sgl;
 792
 793                if (cmd->tvc_prot_sgl_count)
 794                        sg_prot_ptr = cmd->tvc_prot_sgl;
 795                else
 796                        se_cmd->prot_pto = true;
 797        } else {
 798                sg_ptr = NULL;
 799        }
 800        tv_nexus = cmd->tvc_nexus;
 801
 802        se_cmd->tag = 0;
 803        rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
 804                        cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
 805                        cmd->tvc_lun, cmd->tvc_exp_data_len,
 806                        vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
 807                        cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
 808                        sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
 809                        cmd->tvc_prot_sgl_count);
 810        if (rc < 0) {
 811                transport_send_check_condition_and_sense(se_cmd,
 812                                TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
 813                transport_generic_free_cmd(se_cmd, 0);
 814        }
 815}
 816
 817static void
 818vhost_scsi_send_bad_target(struct vhost_scsi *vs,
 819                           struct vhost_virtqueue *vq,
 820                           int head, unsigned out)
 821{
 822        struct virtio_scsi_cmd_resp __user *resp;
 823        struct virtio_scsi_cmd_resp rsp;
 824        int ret;
 825
 826        memset(&rsp, 0, sizeof(rsp));
 827        rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
 828        resp = vq->iov[out].iov_base;
 829        ret = __copy_to_user(resp, &rsp, sizeof(rsp));
 830        if (!ret)
 831                vhost_add_used_and_signal(&vs->dev, vq, head, 0);
 832        else
 833                pr_err("Faulted on virtio_scsi_cmd_resp\n");
 834}
 835
 836static void
 837vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
 838{
 839        struct vhost_scsi_tpg **vs_tpg, *tpg;
 840        struct virtio_scsi_cmd_req v_req;
 841        struct virtio_scsi_cmd_req_pi v_req_pi;
 842        struct vhost_scsi_cmd *cmd;
 843        struct iov_iter out_iter, in_iter, prot_iter, data_iter;
 844        u64 tag;
 845        u32 exp_data_len, data_direction;
 846        unsigned int out = 0, in = 0;
 847        int head, ret, prot_bytes;
 848        size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
 849        size_t out_size, in_size;
 850        u16 lun;
 851        u8 *target, *lunp, task_attr;
 852        bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
 853        void *req, *cdb;
 854
 855        mutex_lock(&vq->mutex);
 856        /*
 857         * We can handle the vq only after the endpoint is setup by calling the
 858         * VHOST_SCSI_SET_ENDPOINT ioctl.
 859         */
 860        vs_tpg = vq->private_data;
 861        if (!vs_tpg)
 862                goto out;
 863
 864        vhost_disable_notify(&vs->dev, vq);
 865
 866        for (;;) {
 867                head = vhost_get_vq_desc(vq, vq->iov,
 868                                         ARRAY_SIZE(vq->iov), &out, &in,
 869                                         NULL, NULL);
 870                pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
 871                         head, out, in);
 872                /* On error, stop handling until the next kick. */
 873                if (unlikely(head < 0))
 874                        break;
 875                /* Nothing new?  Wait for eventfd to tell us they refilled. */
 876                if (head == vq->num) {
 877                        if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
 878                                vhost_disable_notify(&vs->dev, vq);
 879                                continue;
 880                        }
 881                        break;
 882                }
 883                /*
 884                 * Check for a sane response buffer so we can report early
 885                 * errors back to the guest.
 886                 */
 887                if (unlikely(vq->iov[out].iov_len < rsp_size)) {
 888                        vq_err(vq, "Expecting at least virtio_scsi_cmd_resp"
 889                                " size, got %zu bytes\n", vq->iov[out].iov_len);
 890                        break;
 891                }
 892                /*
 893                 * Setup pointers and values based upon different virtio-scsi
 894                 * request header if T10_PI is enabled in KVM guest.
 895                 */
 896                if (t10_pi) {
 897                        req = &v_req_pi;
 898                        req_size = sizeof(v_req_pi);
 899                        lunp = &v_req_pi.lun[0];
 900                        target = &v_req_pi.lun[1];
 901                } else {
 902                        req = &v_req;
 903                        req_size = sizeof(v_req);
 904                        lunp = &v_req.lun[0];
 905                        target = &v_req.lun[1];
 906                }
 907                /*
 908                 * FIXME: Not correct for BIDI operation
 909                 */
 910                out_size = iov_length(vq->iov, out);
 911                in_size = iov_length(&vq->iov[out], in);
 912
 913                /*
 914                 * Copy over the virtio-scsi request header, which for a
 915                 * ANY_LAYOUT enabled guest may span multiple iovecs, or a
 916                 * single iovec may contain both the header + outgoing
 917                 * WRITE payloads.
 918                 *
 919                 * copy_from_iter() will advance out_iter, so that it will
 920                 * point at the start of the outgoing WRITE payload, if
 921                 * DMA_TO_DEVICE is set.
 922                 */
 923                iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size);
 924
 925                if (unlikely(!copy_from_iter_full(req, req_size, &out_iter))) {
 926                        vq_err(vq, "Faulted on copy_from_iter\n");
 927                        vhost_scsi_send_bad_target(vs, vq, head, out);
 928                        continue;
 929                }
 930                /* virtio-scsi spec requires byte 0 of the lun to be 1 */
 931                if (unlikely(*lunp != 1)) {
 932                        vq_err(vq, "Illegal virtio-scsi lun: %u\n", *lunp);
 933                        vhost_scsi_send_bad_target(vs, vq, head, out);
 934                        continue;
 935                }
 936
 937                tpg = ACCESS_ONCE(vs_tpg[*target]);
 938                if (unlikely(!tpg)) {
 939                        /* Target does not exist, fail the request */
 940                        vhost_scsi_send_bad_target(vs, vq, head, out);
 941                        continue;
 942                }
 943                /*
 944                 * Determine data_direction by calculating the total outgoing
 945                 * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
 946                 * response headers respectively.
 947                 *
 948                 * For DMA_TO_DEVICE this is out_iter, which is already pointing
 949                 * to the right place.
 950                 *
 951                 * For DMA_FROM_DEVICE, the iovec will be just past the end
 952                 * of the virtio-scsi response header in either the same
 953                 * or immediately following iovec.
 954                 *
 955                 * Any associated T10_PI bytes for the outgoing / incoming
 956                 * payloads are included in calculation of exp_data_len here.
 957                 */
 958                prot_bytes = 0;
 959
 960                if (out_size > req_size) {
 961                        data_direction = DMA_TO_DEVICE;
 962                        exp_data_len = out_size - req_size;
 963                        data_iter = out_iter;
 964                } else if (in_size > rsp_size) {
 965                        data_direction = DMA_FROM_DEVICE;
 966                        exp_data_len = in_size - rsp_size;
 967
 968                        iov_iter_init(&in_iter, READ, &vq->iov[out], in,
 969                                      rsp_size + exp_data_len);
 970                        iov_iter_advance(&in_iter, rsp_size);
 971                        data_iter = in_iter;
 972                } else {
 973                        data_direction = DMA_NONE;
 974                        exp_data_len = 0;
 975                }
 976                /*
 977                 * If T10_PI header + payload is present, setup prot_iter values
 978                 * and recalculate data_iter for vhost_scsi_mapal() mapping to
 979                 * host scatterlists via get_user_pages_fast().
 980                 */
 981                if (t10_pi) {
 982                        if (v_req_pi.pi_bytesout) {
 983                                if (data_direction != DMA_TO_DEVICE) {
 984                                        vq_err(vq, "Received non zero pi_bytesout,"
 985                                                " but wrong data_direction\n");
 986                                        vhost_scsi_send_bad_target(vs, vq, head, out);
 987                                        continue;
 988                                }
 989                                prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
 990                        } else if (v_req_pi.pi_bytesin) {
 991                                if (data_direction != DMA_FROM_DEVICE) {
 992                                        vq_err(vq, "Received non zero pi_bytesin,"
 993                                                " but wrong data_direction\n");
 994                                        vhost_scsi_send_bad_target(vs, vq, head, out);
 995                                        continue;
 996                                }
 997                                prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
 998                        }
 999                        /*
1000                         * Set prot_iter to data_iter, and advance past any
1001                         * preceeding prot_bytes that may be present.
1002                         *
1003                         * Also fix up the exp_data_len to reflect only the
1004                         * actual data payload length.
1005                         */
1006                        if (prot_bytes) {
1007                                exp_data_len -= prot_bytes;
1008                                prot_iter = data_iter;
1009                                iov_iter_advance(&data_iter, prot_bytes);
1010                        }
1011                        tag = vhost64_to_cpu(vq, v_req_pi.tag);
1012                        task_attr = v_req_pi.task_attr;
1013                        cdb = &v_req_pi.cdb[0];
1014                        lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF;
1015                } else {
1016                        tag = vhost64_to_cpu(vq, v_req.tag);
1017                        task_attr = v_req.task_attr;
1018                        cdb = &v_req.cdb[0];
1019                        lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
1020                }
1021                /*
1022                 * Check that the received CDB size does not exceeded our
1023                 * hardcoded max for vhost-scsi, then get a pre-allocated
1024                 * cmd descriptor for the new virtio-scsi tag.
1025                 *
1026                 * TODO what if cdb was too small for varlen cdb header?
1027                 */
1028                if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
1029                        vq_err(vq, "Received SCSI CDB with command_size: %d that"
1030                                " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1031                                scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
1032                        vhost_scsi_send_bad_target(vs, vq, head, out);
1033                        continue;
1034                }
1035                cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
1036                                         exp_data_len + prot_bytes,
1037                                         data_direction);
1038                if (IS_ERR(cmd)) {
1039                        vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
1040                               PTR_ERR(cmd));
1041                        vhost_scsi_send_bad_target(vs, vq, head, out);
1042                        continue;
1043                }
1044                cmd->tvc_vhost = vs;
1045                cmd->tvc_vq = vq;
1046                cmd->tvc_resp_iov = vq->iov[out];
1047                cmd->tvc_in_iovs = in;
1048
1049                pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1050                         cmd->tvc_cdb[0], cmd->tvc_lun);
1051                pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
1052                         " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
1053
1054                if (data_direction != DMA_NONE) {
1055                        ret = vhost_scsi_mapal(cmd,
1056                                               prot_bytes, &prot_iter,
1057                                               exp_data_len, &data_iter);
1058                        if (unlikely(ret)) {
1059                                vq_err(vq, "Failed to map iov to sgl\n");
1060                                vhost_scsi_release_cmd(&cmd->tvc_se_cmd);
1061                                vhost_scsi_send_bad_target(vs, vq, head, out);
1062                                continue;
1063                        }
1064                }
1065                /*
1066                 * Save the descriptor from vhost_get_vq_desc() to be used to
1067                 * complete the virtio-scsi request in TCM callback context via
1068                 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
1069                 */
1070                cmd->tvc_vq_desc = head;
1071                /*
1072                 * Dispatch cmd descriptor for cmwq execution in process
1073                 * context provided by vhost_scsi_workqueue.  This also ensures
1074                 * cmd is executed on the same kworker CPU as this vhost
1075                 * thread to gain positive L2 cache locality effects.
1076                 */
1077                INIT_WORK(&cmd->work, vhost_scsi_submission_work);
1078                queue_work(vhost_scsi_workqueue, &cmd->work);
1079        }
1080out:
1081        mutex_unlock(&vq->mutex);
1082}
1083
1084static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1085{
1086        pr_debug("%s: The handling func for control queue.\n", __func__);
1087}
1088
1089static void
1090vhost_scsi_send_evt(struct vhost_scsi *vs,
1091                   struct vhost_scsi_tpg *tpg,
1092                   struct se_lun *lun,
1093                   u32 event,
1094                   u32 reason)
1095{
1096        struct vhost_scsi_evt *evt;
1097
1098        evt = vhost_scsi_allocate_evt(vs, event, reason);
1099        if (!evt)
1100                return;
1101
1102        if (tpg && lun) {
1103                /* TODO: share lun setup code with virtio-scsi.ko */
1104                /*
1105                 * Note: evt->event is zeroed when we allocate it and
1106                 * lun[4-7] need to be zero according to virtio-scsi spec.
1107                 */
1108                evt->event.lun[0] = 0x01;
1109                evt->event.lun[1] = tpg->tport_tpgt;
1110                if (lun->unpacked_lun >= 256)
1111                        evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1112                evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1113        }
1114
1115        llist_add(&evt->list, &vs->vs_event_list);
1116        vhost_work_queue(&vs->dev, &vs->vs_event_work);
1117}
1118
1119static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1120{
1121        struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1122                                                poll.work);
1123        struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1124
1125        mutex_lock(&vq->mutex);
1126        if (!vq->private_data)
1127                goto out;
1128
1129        if (vs->vs_events_missed)
1130                vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1131out:
1132        mutex_unlock(&vq->mutex);
1133}
1134
1135static void vhost_scsi_handle_kick(struct vhost_work *work)
1136{
1137        struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1138                                                poll.work);
1139        struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1140
1141        vhost_scsi_handle_vq(vs, vq);
1142}
1143
1144static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
1145{
1146        vhost_poll_flush(&vs->vqs[index].vq.poll);
1147}
1148
1149/* Callers must hold dev mutex */
1150static void vhost_scsi_flush(struct vhost_scsi *vs)
1151{
1152        struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1153        int i;
1154
1155        /* Init new inflight and remember the old inflight */
1156        vhost_scsi_init_inflight(vs, old_inflight);
1157
1158        /*
1159         * The inflight->kref was initialized to 1. We decrement it here to
1160         * indicate the start of the flush operation so that it will reach 0
1161         * when all the reqs are finished.
1162         */
1163        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1164                kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
1165
1166        /* Flush both the vhost poll and vhost work */
1167        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1168                vhost_scsi_flush_vq(vs, i);
1169        vhost_work_flush(&vs->dev, &vs->vs_completion_work);
1170        vhost_work_flush(&vs->dev, &vs->vs_event_work);
1171
1172        /* Wait for all reqs issued before the flush to be finished */
1173        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1174                wait_for_completion(&old_inflight[i]->comp);
1175}
1176
1177/*
1178 * Called from vhost_scsi_ioctl() context to walk the list of available
1179 * vhost_scsi_tpg with an active struct vhost_scsi_nexus
1180 *
1181 *  The lock nesting rule is:
1182 *    vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1183 */
1184static int
1185vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1186                        struct vhost_scsi_target *t)
1187{
1188        struct se_portal_group *se_tpg;
1189        struct vhost_scsi_tport *tv_tport;
1190        struct vhost_scsi_tpg *tpg;
1191        struct vhost_scsi_tpg **vs_tpg;
1192        struct vhost_virtqueue *vq;
1193        int index, ret, i, len;
1194        bool match = false;
1195
1196        mutex_lock(&vhost_scsi_mutex);
1197        mutex_lock(&vs->dev.mutex);
1198
1199        /* Verify that ring has been setup correctly. */
1200        for (index = 0; index < vs->dev.nvqs; ++index) {
1201                /* Verify that ring has been setup correctly. */
1202                if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1203                        ret = -EFAULT;
1204                        goto out;
1205                }
1206        }
1207
1208        len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1209        vs_tpg = kzalloc(len, GFP_KERNEL);
1210        if (!vs_tpg) {
1211                ret = -ENOMEM;
1212                goto out;
1213        }
1214        if (vs->vs_tpg)
1215                memcpy(vs_tpg, vs->vs_tpg, len);
1216
1217        list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
1218                mutex_lock(&tpg->tv_tpg_mutex);
1219                if (!tpg->tpg_nexus) {
1220                        mutex_unlock(&tpg->tv_tpg_mutex);
1221                        continue;
1222                }
1223                if (tpg->tv_tpg_vhost_count != 0) {
1224                        mutex_unlock(&tpg->tv_tpg_mutex);
1225                        continue;
1226                }
1227                tv_tport = tpg->tport;
1228
1229                if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1230                        if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1231                                kfree(vs_tpg);
1232                                mutex_unlock(&tpg->tv_tpg_mutex);
1233                                ret = -EEXIST;
1234                                goto out;
1235                        }
1236                        /*
1237                         * In order to ensure individual vhost-scsi configfs
1238                         * groups cannot be removed while in use by vhost ioctl,
1239                         * go ahead and take an explicit se_tpg->tpg_group.cg_item
1240                         * dependency now.
1241                         */
1242                        se_tpg = &tpg->se_tpg;
1243                        ret = target_depend_item(&se_tpg->tpg_group.cg_item);
1244                        if (ret) {
1245                                pr_warn("configfs_depend_item() failed: %d\n", ret);
1246                                kfree(vs_tpg);
1247                                mutex_unlock(&tpg->tv_tpg_mutex);
1248                                goto out;
1249                        }
1250                        tpg->tv_tpg_vhost_count++;
1251                        tpg->vhost_scsi = vs;
1252                        vs_tpg[tpg->tport_tpgt] = tpg;
1253                        smp_mb__after_atomic();
1254                        match = true;
1255                }
1256                mutex_unlock(&tpg->tv_tpg_mutex);
1257        }
1258
1259        if (match) {
1260                memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1261                       sizeof(vs->vs_vhost_wwpn));
1262                for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1263                        vq = &vs->vqs[i].vq;
1264                        mutex_lock(&vq->mutex);
1265                        vq->private_data = vs_tpg;
1266                        vhost_vq_init_access(vq);
1267                        mutex_unlock(&vq->mutex);
1268                }
1269                ret = 0;
1270        } else {
1271                ret = -EEXIST;
1272        }
1273
1274        /*
1275         * Act as synchronize_rcu to make sure access to
1276         * old vs->vs_tpg is finished.
1277         */
1278        vhost_scsi_flush(vs);
1279        kfree(vs->vs_tpg);
1280        vs->vs_tpg = vs_tpg;
1281
1282out:
1283        mutex_unlock(&vs->dev.mutex);
1284        mutex_unlock(&vhost_scsi_mutex);
1285        return ret;
1286}
1287
1288static int
1289vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1290                          struct vhost_scsi_target *t)
1291{
1292        struct se_portal_group *se_tpg;
1293        struct vhost_scsi_tport *tv_tport;
1294        struct vhost_scsi_tpg *tpg;
1295        struct vhost_virtqueue *vq;
1296        bool match = false;
1297        int index, ret, i;
1298        u8 target;
1299
1300        mutex_lock(&vhost_scsi_mutex);
1301        mutex_lock(&vs->dev.mutex);
1302        /* Verify that ring has been setup correctly. */
1303        for (index = 0; index < vs->dev.nvqs; ++index) {
1304                if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1305                        ret = -EFAULT;
1306                        goto err_dev;
1307                }
1308        }
1309
1310        if (!vs->vs_tpg) {
1311                ret = 0;
1312                goto err_dev;
1313        }
1314
1315        for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1316                target = i;
1317                tpg = vs->vs_tpg[target];
1318                if (!tpg)
1319                        continue;
1320
1321                mutex_lock(&tpg->tv_tpg_mutex);
1322                tv_tport = tpg->tport;
1323                if (!tv_tport) {
1324                        ret = -ENODEV;
1325                        goto err_tpg;
1326                }
1327
1328                if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1329                        pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1330                                " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1331                                tv_tport->tport_name, tpg->tport_tpgt,
1332                                t->vhost_wwpn, t->vhost_tpgt);
1333                        ret = -EINVAL;
1334                        goto err_tpg;
1335                }
1336                tpg->tv_tpg_vhost_count--;
1337                tpg->vhost_scsi = NULL;
1338                vs->vs_tpg[target] = NULL;
1339                match = true;
1340                mutex_unlock(&tpg->tv_tpg_mutex);
1341                /*
1342                 * Release se_tpg->tpg_group.cg_item configfs dependency now
1343                 * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
1344                 */
1345                se_tpg = &tpg->se_tpg;
1346                target_undepend_item(&se_tpg->tpg_group.cg_item);
1347        }
1348        if (match) {
1349                for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1350                        vq = &vs->vqs[i].vq;
1351                        mutex_lock(&vq->mutex);
1352                        vq->private_data = NULL;
1353                        mutex_unlock(&vq->mutex);
1354                }
1355        }
1356        /*
1357         * Act as synchronize_rcu to make sure access to
1358         * old vs->vs_tpg is finished.
1359         */
1360        vhost_scsi_flush(vs);
1361        kfree(vs->vs_tpg);
1362        vs->vs_tpg = NULL;
1363        WARN_ON(vs->vs_events_nr);
1364        mutex_unlock(&vs->dev.mutex);
1365        mutex_unlock(&vhost_scsi_mutex);
1366        return 0;
1367
1368err_tpg:
1369        mutex_unlock(&tpg->tv_tpg_mutex);
1370err_dev:
1371        mutex_unlock(&vs->dev.mutex);
1372        mutex_unlock(&vhost_scsi_mutex);
1373        return ret;
1374}
1375
1376static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1377{
1378        struct vhost_virtqueue *vq;
1379        int i;
1380
1381        if (features & ~VHOST_SCSI_FEATURES)
1382                return -EOPNOTSUPP;
1383
1384        mutex_lock(&vs->dev.mutex);
1385        if ((features & (1 << VHOST_F_LOG_ALL)) &&
1386            !vhost_log_access_ok(&vs->dev)) {
1387                mutex_unlock(&vs->dev.mutex);
1388                return -EFAULT;
1389        }
1390
1391        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1392                vq = &vs->vqs[i].vq;
1393                mutex_lock(&vq->mutex);
1394                vq->acked_features = features;
1395                mutex_unlock(&vq->mutex);
1396        }
1397        mutex_unlock(&vs->dev.mutex);
1398        return 0;
1399}
1400
1401static int vhost_scsi_open(struct inode *inode, struct file *f)
1402{
1403        struct vhost_scsi *vs;
1404        struct vhost_virtqueue **vqs;
1405        int r = -ENOMEM, i;
1406
1407        vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
1408        if (!vs) {
1409                vs = vzalloc(sizeof(*vs));
1410                if (!vs)
1411                        goto err_vs;
1412        }
1413
1414        vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
1415        if (!vqs)
1416                goto err_vqs;
1417
1418        vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1419        vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
1420
1421        vs->vs_events_nr = 0;
1422        vs->vs_events_missed = false;
1423
1424        vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1425        vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1426        vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1427        vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1428        for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1429                vqs[i] = &vs->vqs[i].vq;
1430                vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1431        }
1432        vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
1433
1434        vhost_scsi_init_inflight(vs, NULL);
1435
1436        f->private_data = vs;
1437        return 0;
1438
1439err_vqs:
1440        kvfree(vs);
1441err_vs:
1442        return r;
1443}
1444
1445static int vhost_scsi_release(struct inode *inode, struct file *f)
1446{
1447        struct vhost_scsi *vs = f->private_data;
1448        struct vhost_scsi_target t;
1449
1450        mutex_lock(&vs->dev.mutex);
1451        memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1452        mutex_unlock(&vs->dev.mutex);
1453        vhost_scsi_clear_endpoint(vs, &t);
1454        vhost_dev_stop(&vs->dev);
1455        vhost_dev_cleanup(&vs->dev, false);
1456        /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1457        vhost_scsi_flush(vs);
1458        kfree(vs->dev.vqs);
1459        kvfree(vs);
1460        return 0;
1461}
1462
1463static long
1464vhost_scsi_ioctl(struct file *f,
1465                 unsigned int ioctl,
1466                 unsigned long arg)
1467{
1468        struct vhost_scsi *vs = f->private_data;
1469        struct vhost_scsi_target backend;
1470        void __user *argp = (void __user *)arg;
1471        u64 __user *featurep = argp;
1472        u32 __user *eventsp = argp;
1473        u32 events_missed;
1474        u64 features;
1475        int r, abi_version = VHOST_SCSI_ABI_VERSION;
1476        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1477
1478        switch (ioctl) {
1479        case VHOST_SCSI_SET_ENDPOINT:
1480                if (copy_from_user(&backend, argp, sizeof backend))
1481                        return -EFAULT;
1482                if (backend.reserved != 0)
1483                        return -EOPNOTSUPP;
1484
1485                return vhost_scsi_set_endpoint(vs, &backend);
1486        case VHOST_SCSI_CLEAR_ENDPOINT:
1487                if (copy_from_user(&backend, argp, sizeof backend))
1488                        return -EFAULT;
1489                if (backend.reserved != 0)
1490                        return -EOPNOTSUPP;
1491
1492                return vhost_scsi_clear_endpoint(vs, &backend);
1493        case VHOST_SCSI_GET_ABI_VERSION:
1494                if (copy_to_user(argp, &abi_version, sizeof abi_version))
1495                        return -EFAULT;
1496                return 0;
1497        case VHOST_SCSI_SET_EVENTS_MISSED:
1498                if (get_user(events_missed, eventsp))
1499                        return -EFAULT;
1500                mutex_lock(&vq->mutex);
1501                vs->vs_events_missed = events_missed;
1502                mutex_unlock(&vq->mutex);
1503                return 0;
1504        case VHOST_SCSI_GET_EVENTS_MISSED:
1505                mutex_lock(&vq->mutex);
1506                events_missed = vs->vs_events_missed;
1507                mutex_unlock(&vq->mutex);
1508                if (put_user(events_missed, eventsp))
1509                        return -EFAULT;
1510                return 0;
1511        case VHOST_GET_FEATURES:
1512                features = VHOST_SCSI_FEATURES;
1513                if (copy_to_user(featurep, &features, sizeof features))
1514                        return -EFAULT;
1515                return 0;
1516        case VHOST_SET_FEATURES:
1517                if (copy_from_user(&features, featurep, sizeof features))
1518                        return -EFAULT;
1519                return vhost_scsi_set_features(vs, features);
1520        default:
1521                mutex_lock(&vs->dev.mutex);
1522                r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1523                /* TODO: flush backend after dev ioctl. */
1524                if (r == -ENOIOCTLCMD)
1525                        r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1526                mutex_unlock(&vs->dev.mutex);
1527                return r;
1528        }
1529}
1530
1531#ifdef CONFIG_COMPAT
1532static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
1533                                unsigned long arg)
1534{
1535        return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1536}
1537#endif
1538
1539static const struct file_operations vhost_scsi_fops = {
1540        .owner          = THIS_MODULE,
1541        .release        = vhost_scsi_release,
1542        .unlocked_ioctl = vhost_scsi_ioctl,
1543#ifdef CONFIG_COMPAT
1544        .compat_ioctl   = vhost_scsi_compat_ioctl,
1545#endif
1546        .open           = vhost_scsi_open,
1547        .llseek         = noop_llseek,
1548};
1549
1550static struct miscdevice vhost_scsi_misc = {
1551        MISC_DYNAMIC_MINOR,
1552        "vhost-scsi",
1553        &vhost_scsi_fops,
1554};
1555
1556static int __init vhost_scsi_register(void)
1557{
1558        return misc_register(&vhost_scsi_misc);
1559}
1560
1561static void vhost_scsi_deregister(void)
1562{
1563        misc_deregister(&vhost_scsi_misc);
1564}
1565
1566static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
1567{
1568        switch (tport->tport_proto_id) {
1569        case SCSI_PROTOCOL_SAS:
1570                return "SAS";
1571        case SCSI_PROTOCOL_FCP:
1572                return "FCP";
1573        case SCSI_PROTOCOL_ISCSI:
1574                return "iSCSI";
1575        default:
1576                break;
1577        }
1578
1579        return "Unknown";
1580}
1581
1582static void
1583vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
1584                  struct se_lun *lun, bool plug)
1585{
1586
1587        struct vhost_scsi *vs = tpg->vhost_scsi;
1588        struct vhost_virtqueue *vq;
1589        u32 reason;
1590
1591        if (!vs)
1592                return;
1593
1594        mutex_lock(&vs->dev.mutex);
1595
1596        if (plug)
1597                reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1598        else
1599                reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1600
1601        vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1602        mutex_lock(&vq->mutex);
1603        if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
1604                vhost_scsi_send_evt(vs, tpg, lun,
1605                                   VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1606        mutex_unlock(&vq->mutex);
1607        mutex_unlock(&vs->dev.mutex);
1608}
1609
1610static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1611{
1612        vhost_scsi_do_plug(tpg, lun, true);
1613}
1614
1615static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1616{
1617        vhost_scsi_do_plug(tpg, lun, false);
1618}
1619
1620static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
1621                               struct se_lun *lun)
1622{
1623        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1624                                struct vhost_scsi_tpg, se_tpg);
1625
1626        mutex_lock(&vhost_scsi_mutex);
1627
1628        mutex_lock(&tpg->tv_tpg_mutex);
1629        tpg->tv_tpg_port_count++;
1630        mutex_unlock(&tpg->tv_tpg_mutex);
1631
1632        vhost_scsi_hotplug(tpg, lun);
1633
1634        mutex_unlock(&vhost_scsi_mutex);
1635
1636        return 0;
1637}
1638
1639static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
1640                                  struct se_lun *lun)
1641{
1642        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1643                                struct vhost_scsi_tpg, se_tpg);
1644
1645        mutex_lock(&vhost_scsi_mutex);
1646
1647        mutex_lock(&tpg->tv_tpg_mutex);
1648        tpg->tv_tpg_port_count--;
1649        mutex_unlock(&tpg->tv_tpg_mutex);
1650
1651        vhost_scsi_hotunplug(tpg, lun);
1652
1653        mutex_unlock(&vhost_scsi_mutex);
1654}
1655
1656static void vhost_scsi_free_cmd_map_res(struct se_session *se_sess)
1657{
1658        struct vhost_scsi_cmd *tv_cmd;
1659        unsigned int i;
1660
1661        if (!se_sess->sess_cmd_map)
1662                return;
1663
1664        for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1665                tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1666
1667                kfree(tv_cmd->tvc_sgl);
1668                kfree(tv_cmd->tvc_prot_sgl);
1669                kfree(tv_cmd->tvc_upages);
1670        }
1671}
1672
1673static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
1674                struct config_item *item, const char *page, size_t count)
1675{
1676        struct se_portal_group *se_tpg = attrib_to_tpg(item);
1677        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1678                                struct vhost_scsi_tpg, se_tpg);
1679        unsigned long val;
1680        int ret = kstrtoul(page, 0, &val);
1681
1682        if (ret) {
1683                pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
1684                return ret;
1685        }
1686        if (val != 0 && val != 1 && val != 3) {
1687                pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
1688                return -EINVAL;
1689        }
1690        tpg->tv_fabric_prot_type = val;
1691
1692        return count;
1693}
1694
1695static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show(
1696                struct config_item *item, char *page)
1697{
1698        struct se_portal_group *se_tpg = attrib_to_tpg(item);
1699        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1700                                struct vhost_scsi_tpg, se_tpg);
1701
1702        return sprintf(page, "%d\n", tpg->tv_fabric_prot_type);
1703}
1704
1705CONFIGFS_ATTR(vhost_scsi_tpg_attrib_, fabric_prot_type);
1706
1707static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
1708        &vhost_scsi_tpg_attrib_attr_fabric_prot_type,
1709        NULL,
1710};
1711
1712static int vhost_scsi_nexus_cb(struct se_portal_group *se_tpg,
1713                               struct se_session *se_sess, void *p)
1714{
1715        struct vhost_scsi_cmd *tv_cmd;
1716        unsigned int i;
1717
1718        for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1719                tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1720
1721                tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
1722                                        VHOST_SCSI_PREALLOC_SGLS, GFP_KERNEL);
1723                if (!tv_cmd->tvc_sgl) {
1724                        pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1725                        goto out;
1726                }
1727
1728                tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
1729                                VHOST_SCSI_PREALLOC_UPAGES, GFP_KERNEL);
1730                if (!tv_cmd->tvc_upages) {
1731                        pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1732                        goto out;
1733                }
1734
1735                tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) *
1736                                VHOST_SCSI_PREALLOC_PROT_SGLS, GFP_KERNEL);
1737                if (!tv_cmd->tvc_prot_sgl) {
1738                        pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
1739                        goto out;
1740                }
1741        }
1742        return 0;
1743out:
1744        vhost_scsi_free_cmd_map_res(se_sess);
1745        return -ENOMEM;
1746}
1747
1748static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
1749                                const char *name)
1750{
1751        struct vhost_scsi_nexus *tv_nexus;
1752
1753        mutex_lock(&tpg->tv_tpg_mutex);
1754        if (tpg->tpg_nexus) {
1755                mutex_unlock(&tpg->tv_tpg_mutex);
1756                pr_debug("tpg->tpg_nexus already exists\n");
1757                return -EEXIST;
1758        }
1759
1760        tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL);
1761        if (!tv_nexus) {
1762                mutex_unlock(&tpg->tv_tpg_mutex);
1763                pr_err("Unable to allocate struct vhost_scsi_nexus\n");
1764                return -ENOMEM;
1765        }
1766        /*
1767         * Since we are running in 'demo mode' this call with generate a
1768         * struct se_node_acl for the vhost_scsi struct se_portal_group with
1769         * the SCSI Initiator port name of the passed configfs group 'name'.
1770         */
1771        tv_nexus->tvn_se_sess = target_alloc_session(&tpg->se_tpg,
1772                                        VHOST_SCSI_DEFAULT_TAGS,
1773                                        sizeof(struct vhost_scsi_cmd),
1774                                        TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
1775                                        (unsigned char *)name, tv_nexus,
1776                                        vhost_scsi_nexus_cb);
1777        if (IS_ERR(tv_nexus->tvn_se_sess)) {
1778                mutex_unlock(&tpg->tv_tpg_mutex);
1779                kfree(tv_nexus);
1780                return -ENOMEM;
1781        }
1782        tpg->tpg_nexus = tv_nexus;
1783
1784        mutex_unlock(&tpg->tv_tpg_mutex);
1785        return 0;
1786}
1787
1788static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
1789{
1790        struct se_session *se_sess;
1791        struct vhost_scsi_nexus *tv_nexus;
1792
1793        mutex_lock(&tpg->tv_tpg_mutex);
1794        tv_nexus = tpg->tpg_nexus;
1795        if (!tv_nexus) {
1796                mutex_unlock(&tpg->tv_tpg_mutex);
1797                return -ENODEV;
1798        }
1799
1800        se_sess = tv_nexus->tvn_se_sess;
1801        if (!se_sess) {
1802                mutex_unlock(&tpg->tv_tpg_mutex);
1803                return -ENODEV;
1804        }
1805
1806        if (tpg->tv_tpg_port_count != 0) {
1807                mutex_unlock(&tpg->tv_tpg_mutex);
1808                pr_err("Unable to remove TCM_vhost I_T Nexus with"
1809                        " active TPG port count: %d\n",
1810                        tpg->tv_tpg_port_count);
1811                return -EBUSY;
1812        }
1813
1814        if (tpg->tv_tpg_vhost_count != 0) {
1815                mutex_unlock(&tpg->tv_tpg_mutex);
1816                pr_err("Unable to remove TCM_vhost I_T Nexus with"
1817                        " active TPG vhost count: %d\n",
1818                        tpg->tv_tpg_vhost_count);
1819                return -EBUSY;
1820        }
1821
1822        pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
1823                " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
1824                tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1825
1826        vhost_scsi_free_cmd_map_res(se_sess);
1827        /*
1828         * Release the SCSI I_T Nexus to the emulated vhost Target Port
1829         */
1830        transport_deregister_session(tv_nexus->tvn_se_sess);
1831        tpg->tpg_nexus = NULL;
1832        mutex_unlock(&tpg->tv_tpg_mutex);
1833
1834        kfree(tv_nexus);
1835        return 0;
1836}
1837
1838static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page)
1839{
1840        struct se_portal_group *se_tpg = to_tpg(item);
1841        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1842                                struct vhost_scsi_tpg, se_tpg);
1843        struct vhost_scsi_nexus *tv_nexus;
1844        ssize_t ret;
1845
1846        mutex_lock(&tpg->tv_tpg_mutex);
1847        tv_nexus = tpg->tpg_nexus;
1848        if (!tv_nexus) {
1849                mutex_unlock(&tpg->tv_tpg_mutex);
1850                return -ENODEV;
1851        }
1852        ret = snprintf(page, PAGE_SIZE, "%s\n",
1853                        tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1854        mutex_unlock(&tpg->tv_tpg_mutex);
1855
1856        return ret;
1857}
1858
1859static ssize_t vhost_scsi_tpg_nexus_store(struct config_item *item,
1860                const char *page, size_t count)
1861{
1862        struct se_portal_group *se_tpg = to_tpg(item);
1863        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1864                                struct vhost_scsi_tpg, se_tpg);
1865        struct vhost_scsi_tport *tport_wwn = tpg->tport;
1866        unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
1867        int ret;
1868        /*
1869         * Shutdown the active I_T nexus if 'NULL' is passed..
1870         */
1871        if (!strncmp(page, "NULL", 4)) {
1872                ret = vhost_scsi_drop_nexus(tpg);
1873                return (!ret) ? count : ret;
1874        }
1875        /*
1876         * Otherwise make sure the passed virtual Initiator port WWN matches
1877         * the fabric protocol_id set in vhost_scsi_make_tport(), and call
1878         * vhost_scsi_make_nexus().
1879         */
1880        if (strlen(page) >= VHOST_SCSI_NAMELEN) {
1881                pr_err("Emulated NAA Sas Address: %s, exceeds"
1882                                " max: %d\n", page, VHOST_SCSI_NAMELEN);
1883                return -EINVAL;
1884        }
1885        snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
1886
1887        ptr = strstr(i_port, "naa.");
1888        if (ptr) {
1889                if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
1890                        pr_err("Passed SAS Initiator Port %s does not"
1891                                " match target port protoid: %s\n", i_port,
1892                                vhost_scsi_dump_proto_id(tport_wwn));
1893                        return -EINVAL;
1894                }
1895                port_ptr = &i_port[0];
1896                goto check_newline;
1897        }
1898        ptr = strstr(i_port, "fc.");
1899        if (ptr) {
1900                if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
1901                        pr_err("Passed FCP Initiator Port %s does not"
1902                                " match target port protoid: %s\n", i_port,
1903                                vhost_scsi_dump_proto_id(tport_wwn));
1904                        return -EINVAL;
1905                }
1906                port_ptr = &i_port[3]; /* Skip over "fc." */
1907                goto check_newline;
1908        }
1909        ptr = strstr(i_port, "iqn.");
1910        if (ptr) {
1911                if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
1912                        pr_err("Passed iSCSI Initiator Port %s does not"
1913                                " match target port protoid: %s\n", i_port,
1914                                vhost_scsi_dump_proto_id(tport_wwn));
1915                        return -EINVAL;
1916                }
1917                port_ptr = &i_port[0];
1918                goto check_newline;
1919        }
1920        pr_err("Unable to locate prefix for emulated Initiator Port:"
1921                        " %s\n", i_port);
1922        return -EINVAL;
1923        /*
1924         * Clear any trailing newline for the NAA WWN
1925         */
1926check_newline:
1927        if (i_port[strlen(i_port)-1] == '\n')
1928                i_port[strlen(i_port)-1] = '\0';
1929
1930        ret = vhost_scsi_make_nexus(tpg, port_ptr);
1931        if (ret < 0)
1932                return ret;
1933
1934        return count;
1935}
1936
1937CONFIGFS_ATTR(vhost_scsi_tpg_, nexus);
1938
1939static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
1940        &vhost_scsi_tpg_attr_nexus,
1941        NULL,
1942};
1943
1944static struct se_portal_group *
1945vhost_scsi_make_tpg(struct se_wwn *wwn,
1946                   struct config_group *group,
1947                   const char *name)
1948{
1949        struct vhost_scsi_tport *tport = container_of(wwn,
1950                        struct vhost_scsi_tport, tport_wwn);
1951
1952        struct vhost_scsi_tpg *tpg;
1953        u16 tpgt;
1954        int ret;
1955
1956        if (strstr(name, "tpgt_") != name)
1957                return ERR_PTR(-EINVAL);
1958        if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
1959                return ERR_PTR(-EINVAL);
1960
1961        tpg = kzalloc(sizeof(struct vhost_scsi_tpg), GFP_KERNEL);
1962        if (!tpg) {
1963                pr_err("Unable to allocate struct vhost_scsi_tpg");
1964                return ERR_PTR(-ENOMEM);
1965        }
1966        mutex_init(&tpg->tv_tpg_mutex);
1967        INIT_LIST_HEAD(&tpg->tv_tpg_list);
1968        tpg->tport = tport;
1969        tpg->tport_tpgt = tpgt;
1970
1971        ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
1972        if (ret < 0) {
1973                kfree(tpg);
1974                return NULL;
1975        }
1976        mutex_lock(&vhost_scsi_mutex);
1977        list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
1978        mutex_unlock(&vhost_scsi_mutex);
1979
1980        return &tpg->se_tpg;
1981}
1982
1983static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
1984{
1985        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1986                                struct vhost_scsi_tpg, se_tpg);
1987
1988        mutex_lock(&vhost_scsi_mutex);
1989        list_del(&tpg->tv_tpg_list);
1990        mutex_unlock(&vhost_scsi_mutex);
1991        /*
1992         * Release the virtual I_T Nexus for this vhost TPG
1993         */
1994        vhost_scsi_drop_nexus(tpg);
1995        /*
1996         * Deregister the se_tpg from TCM..
1997         */
1998        core_tpg_deregister(se_tpg);
1999        kfree(tpg);
2000}
2001
2002static struct se_wwn *
2003vhost_scsi_make_tport(struct target_fabric_configfs *tf,
2004                     struct config_group *group,
2005                     const char *name)
2006{
2007        struct vhost_scsi_tport *tport;
2008        char *ptr;
2009        u64 wwpn = 0;
2010        int off = 0;
2011
2012        /* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
2013                return ERR_PTR(-EINVAL); */
2014
2015        tport = kzalloc(sizeof(struct vhost_scsi_tport), GFP_KERNEL);
2016        if (!tport) {
2017                pr_err("Unable to allocate struct vhost_scsi_tport");
2018                return ERR_PTR(-ENOMEM);
2019        }
2020        tport->tport_wwpn = wwpn;
2021        /*
2022         * Determine the emulated Protocol Identifier and Target Port Name
2023         * based on the incoming configfs directory name.
2024         */
2025        ptr = strstr(name, "naa.");
2026        if (ptr) {
2027                tport->tport_proto_id = SCSI_PROTOCOL_SAS;
2028                goto check_len;
2029        }
2030        ptr = strstr(name, "fc.");
2031        if (ptr) {
2032                tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2033                off = 3; /* Skip over "fc." */
2034                goto check_len;
2035        }
2036        ptr = strstr(name, "iqn.");
2037        if (ptr) {
2038                tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2039                goto check_len;
2040        }
2041
2042        pr_err("Unable to locate prefix for emulated Target Port:"
2043                        " %s\n", name);
2044        kfree(tport);
2045        return ERR_PTR(-EINVAL);
2046
2047check_len:
2048        if (strlen(name) >= VHOST_SCSI_NAMELEN) {
2049                pr_err("Emulated %s Address: %s, exceeds"
2050                        " max: %d\n", name, vhost_scsi_dump_proto_id(tport),
2051                        VHOST_SCSI_NAMELEN);
2052                kfree(tport);
2053                return ERR_PTR(-EINVAL);
2054        }
2055        snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
2056
2057        pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2058                " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
2059
2060        return &tport->tport_wwn;
2061}
2062
2063static void vhost_scsi_drop_tport(struct se_wwn *wwn)
2064{
2065        struct vhost_scsi_tport *tport = container_of(wwn,
2066                                struct vhost_scsi_tport, tport_wwn);
2067
2068        pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2069                " %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
2070                tport->tport_name);
2071
2072        kfree(tport);
2073}
2074
2075static ssize_t
2076vhost_scsi_wwn_version_show(struct config_item *item, char *page)
2077{
2078        return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
2079                "on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2080                utsname()->machine);
2081}
2082
2083CONFIGFS_ATTR_RO(vhost_scsi_wwn_, version);
2084
2085static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2086        &vhost_scsi_wwn_attr_version,
2087        NULL,
2088};
2089
2090static const struct target_core_fabric_ops vhost_scsi_ops = {
2091        .module                         = THIS_MODULE,
2092        .name                           = "vhost",
2093        .get_fabric_name                = vhost_scsi_get_fabric_name,
2094        .tpg_get_wwn                    = vhost_scsi_get_fabric_wwn,
2095        .tpg_get_tag                    = vhost_scsi_get_tpgt,
2096        .tpg_check_demo_mode            = vhost_scsi_check_true,
2097        .tpg_check_demo_mode_cache      = vhost_scsi_check_true,
2098        .tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
2099        .tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
2100        .tpg_check_prot_fabric_only     = vhost_scsi_check_prot_fabric_only,
2101        .tpg_get_inst_index             = vhost_scsi_tpg_get_inst_index,
2102        .release_cmd                    = vhost_scsi_release_cmd,
2103        .check_stop_free                = vhost_scsi_check_stop_free,
2104        .sess_get_index                 = vhost_scsi_sess_get_index,
2105        .sess_get_initiator_sid         = NULL,
2106        .write_pending                  = vhost_scsi_write_pending,
2107        .write_pending_status           = vhost_scsi_write_pending_status,
2108        .set_default_node_attributes    = vhost_scsi_set_default_node_attrs,
2109        .get_cmd_state                  = vhost_scsi_get_cmd_state,
2110        .queue_data_in                  = vhost_scsi_queue_data_in,
2111        .queue_status                   = vhost_scsi_queue_status,
2112        .queue_tm_rsp                   = vhost_scsi_queue_tm_rsp,
2113        .aborted_task                   = vhost_scsi_aborted_task,
2114        /*
2115         * Setup callers for generic logic in target_core_fabric_configfs.c
2116         */
2117        .fabric_make_wwn                = vhost_scsi_make_tport,
2118        .fabric_drop_wwn                = vhost_scsi_drop_tport,
2119        .fabric_make_tpg                = vhost_scsi_make_tpg,
2120        .fabric_drop_tpg                = vhost_scsi_drop_tpg,
2121        .fabric_post_link               = vhost_scsi_port_link,
2122        .fabric_pre_unlink              = vhost_scsi_port_unlink,
2123
2124        .tfc_wwn_attrs                  = vhost_scsi_wwn_attrs,
2125        .tfc_tpg_base_attrs             = vhost_scsi_tpg_attrs,
2126        .tfc_tpg_attrib_attrs           = vhost_scsi_tpg_attrib_attrs,
2127};
2128
2129static int __init vhost_scsi_init(void)
2130{
2131        int ret = -ENOMEM;
2132
2133        pr_debug("TCM_VHOST fabric module %s on %s/%s"
2134                " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2135                utsname()->machine);
2136
2137        /*
2138         * Use our own dedicated workqueue for submitting I/O into
2139         * target core to avoid contention within system_wq.
2140         */
2141        vhost_scsi_workqueue = alloc_workqueue("vhost_scsi", 0, 0);
2142        if (!vhost_scsi_workqueue)
2143                goto out;
2144
2145        ret = vhost_scsi_register();
2146        if (ret < 0)
2147                goto out_destroy_workqueue;
2148
2149        ret = target_register_template(&vhost_scsi_ops);
2150        if (ret < 0)
2151                goto out_vhost_scsi_deregister;
2152
2153        return 0;
2154
2155out_vhost_scsi_deregister:
2156        vhost_scsi_deregister();
2157out_destroy_workqueue:
2158        destroy_workqueue(vhost_scsi_workqueue);
2159out:
2160        return ret;
2161};
2162
2163static void vhost_scsi_exit(void)
2164{
2165        target_unregister_template(&vhost_scsi_ops);
2166        vhost_scsi_deregister();
2167        destroy_workqueue(vhost_scsi_workqueue);
2168};
2169
2170MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2171MODULE_ALIAS("tcm_vhost");
2172MODULE_LICENSE("GPL");
2173module_init(vhost_scsi_init);
2174module_exit(vhost_scsi_exit);
2175