linux/drivers/vhost/scsi.c
<<
>>
Prefs
   1/*******************************************************************************
   2 * Vhost kernel TCM fabric driver for virtio SCSI initiators
   3 *
   4 * (C) Copyright 2010-2013 Datera, Inc.
   5 * (C) Copyright 2010-2012 IBM Corp.
   6 *
   7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
   8 *
   9 * Authors: Nicholas A. Bellinger <nab@daterainc.com>
  10 *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of the GNU General Public License as published by
  14 * the Free Software Foundation; either version 2 of the License, or
  15 * (at your option) any later version.
  16 *
  17 * This program is distributed in the hope that it will be useful,
  18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  20 * GNU General Public License for more details.
  21 *
  22 ****************************************************************************/
  23
  24#include <linux/module.h>
  25#include <linux/moduleparam.h>
  26#include <generated/utsrelease.h>
  27#include <linux/utsname.h>
  28#include <linux/init.h>
  29#include <linux/slab.h>
  30#include <linux/kthread.h>
  31#include <linux/types.h>
  32#include <linux/string.h>
  33#include <linux/configfs.h>
  34#include <linux/ctype.h>
  35#include <linux/compat.h>
  36#include <linux/eventfd.h>
  37#include <linux/fs.h>
  38#include <linux/vmalloc.h>
  39#include <linux/miscdevice.h>
  40#include <asm/unaligned.h>
  41#include <scsi/scsi_common.h>
  42#include <scsi/scsi_proto.h>
  43#include <target/target_core_base.h>
  44#include <target/target_core_fabric.h>
  45#include <linux/vhost.h>
  46#include <linux/virtio_scsi.h>
  47#include <linux/llist.h>
  48#include <linux/bitmap.h>
  49#include <linux/percpu_ida.h>
  50
  51#include "vhost.h"
  52
  53#define VHOST_SCSI_VERSION  "v0.1"
  54#define VHOST_SCSI_NAMELEN 256
  55#define VHOST_SCSI_MAX_CDB_SIZE 32
  56#define VHOST_SCSI_DEFAULT_TAGS 256
  57#define VHOST_SCSI_PREALLOC_SGLS 2048
  58#define VHOST_SCSI_PREALLOC_UPAGES 2048
  59#define VHOST_SCSI_PREALLOC_PROT_SGLS 512
  60
  61struct vhost_scsi_inflight {
  62        /* Wait for the flush operation to finish */
  63        struct completion comp;
  64        /* Refcount for the inflight reqs */
  65        struct kref kref;
  66};
  67
  68struct vhost_scsi_cmd {
  69        /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
  70        int tvc_vq_desc;
  71        /* virtio-scsi initiator task attribute */
  72        int tvc_task_attr;
  73        /* virtio-scsi response incoming iovecs */
  74        int tvc_in_iovs;
  75        /* virtio-scsi initiator data direction */
  76        enum dma_data_direction tvc_data_direction;
  77        /* Expected data transfer length from virtio-scsi header */
  78        u32 tvc_exp_data_len;
  79        /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
  80        u64 tvc_tag;
  81        /* The number of scatterlists associated with this cmd */
  82        u32 tvc_sgl_count;
  83        u32 tvc_prot_sgl_count;
  84        /* Saved unpacked SCSI LUN for vhost_scsi_submission_work() */
  85        u32 tvc_lun;
  86        /* Pointer to the SGL formatted memory from virtio-scsi */
  87        struct scatterlist *tvc_sgl;
  88        struct scatterlist *tvc_prot_sgl;
  89        struct page **tvc_upages;
  90        /* Pointer to response header iovec */
  91        struct iovec *tvc_resp_iov;
  92        /* Pointer to vhost_scsi for our device */
  93        struct vhost_scsi *tvc_vhost;
  94        /* Pointer to vhost_virtqueue for the cmd */
  95        struct vhost_virtqueue *tvc_vq;
  96        /* Pointer to vhost nexus memory */
  97        struct vhost_scsi_nexus *tvc_nexus;
  98        /* The TCM I/O descriptor that is accessed via container_of() */
  99        struct se_cmd tvc_se_cmd;
 100        /* work item used for cmwq dispatch to vhost_scsi_submission_work() */
 101        struct work_struct work;
 102        /* Copy of the incoming SCSI command descriptor block (CDB) */
 103        unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
 104        /* Sense buffer that will be mapped into outgoing status */
 105        unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
 106        /* Completed commands list, serviced from vhost worker thread */
 107        struct llist_node tvc_completion_list;
 108        /* Used to track inflight cmd */
 109        struct vhost_scsi_inflight *inflight;
 110};
 111
 112struct vhost_scsi_nexus {
 113        /* Pointer to TCM session for I_T Nexus */
 114        struct se_session *tvn_se_sess;
 115};
 116
 117struct vhost_scsi_tpg {
 118        /* Vhost port target portal group tag for TCM */
 119        u16 tport_tpgt;
 120        /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
 121        int tv_tpg_port_count;
 122        /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
 123        int tv_tpg_vhost_count;
 124        /* Used for enabling T10-PI with legacy devices */
 125        int tv_fabric_prot_type;
 126        /* list for vhost_scsi_list */
 127        struct list_head tv_tpg_list;
 128        /* Used to protect access for tpg_nexus */
 129        struct mutex tv_tpg_mutex;
 130        /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
 131        struct vhost_scsi_nexus *tpg_nexus;
 132        /* Pointer back to vhost_scsi_tport */
 133        struct vhost_scsi_tport *tport;
 134        /* Returned by vhost_scsi_make_tpg() */
 135        struct se_portal_group se_tpg;
 136        /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
 137        struct vhost_scsi *vhost_scsi;
 138};
 139
 140struct vhost_scsi_tport {
 141        /* SCSI protocol the tport is providing */
 142        u8 tport_proto_id;
 143        /* Binary World Wide unique Port Name for Vhost Target port */
 144        u64 tport_wwpn;
 145        /* ASCII formatted WWPN for Vhost Target port */
 146        char tport_name[VHOST_SCSI_NAMELEN];
 147        /* Returned by vhost_scsi_make_tport() */
 148        struct se_wwn tport_wwn;
 149};
 150
 151struct vhost_scsi_evt {
 152        /* event to be sent to guest */
 153        struct virtio_scsi_event event;
 154        /* event list, serviced from vhost worker thread */
 155        struct llist_node list;
 156};
 157
 158enum {
 159        VHOST_SCSI_VQ_CTL = 0,
 160        VHOST_SCSI_VQ_EVT = 1,
 161        VHOST_SCSI_VQ_IO = 2,
 162};
 163
 164/* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
 165enum {
 166        VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
 167                                               (1ULL << VIRTIO_SCSI_F_T10_PI)
 168};
 169
 170#define VHOST_SCSI_MAX_TARGET   256
 171#define VHOST_SCSI_MAX_VQ       128
 172#define VHOST_SCSI_MAX_EVENT    128
 173
 174struct vhost_scsi_virtqueue {
 175        struct vhost_virtqueue vq;
 176        /*
 177         * Reference counting for inflight reqs, used for flush operation. At
 178         * each time, one reference tracks new commands submitted, while we
 179         * wait for another one to reach 0.
 180         */
 181        struct vhost_scsi_inflight inflights[2];
 182        /*
 183         * Indicate current inflight in use, protected by vq->mutex.
 184         * Writers must also take dev mutex and flush under it.
 185         */
 186        int inflight_idx;
 187};
 188
 189struct vhost_scsi {
 190        /* Protected by vhost_scsi->dev.mutex */
 191        struct vhost_scsi_tpg **vs_tpg;
 192        char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
 193
 194        struct vhost_dev dev;
 195        struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
 196
 197        struct vhost_work vs_completion_work; /* cmd completion work item */
 198        struct llist_head vs_completion_list; /* cmd completion queue */
 199
 200        struct vhost_work vs_event_work; /* evt injection work item */
 201        struct llist_head vs_event_list; /* evt injection queue */
 202
 203        bool vs_events_missed; /* any missed events, protected by vq->mutex */
 204        int vs_events_nr; /* num of pending events, protected by vq->mutex */
 205};
 206
 207static struct workqueue_struct *vhost_scsi_workqueue;
 208
 209/* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
 210static DEFINE_MUTEX(vhost_scsi_mutex);
 211static LIST_HEAD(vhost_scsi_list);
 212
 213static int iov_num_pages(void __user *iov_base, size_t iov_len)
 214{
 215        return (PAGE_ALIGN((unsigned long)iov_base + iov_len) -
 216               ((unsigned long)iov_base & PAGE_MASK)) >> PAGE_SHIFT;
 217}
 218
 219static void vhost_scsi_done_inflight(struct kref *kref)
 220{
 221        struct vhost_scsi_inflight *inflight;
 222
 223        inflight = container_of(kref, struct vhost_scsi_inflight, kref);
 224        complete(&inflight->comp);
 225}
 226
 227static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
 228                                    struct vhost_scsi_inflight *old_inflight[])
 229{
 230        struct vhost_scsi_inflight *new_inflight;
 231        struct vhost_virtqueue *vq;
 232        int idx, i;
 233
 234        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
 235                vq = &vs->vqs[i].vq;
 236
 237                mutex_lock(&vq->mutex);
 238
 239                /* store old infight */
 240                idx = vs->vqs[i].inflight_idx;
 241                if (old_inflight)
 242                        old_inflight[i] = &vs->vqs[i].inflights[idx];
 243
 244                /* setup new infight */
 245                vs->vqs[i].inflight_idx = idx ^ 1;
 246                new_inflight = &vs->vqs[i].inflights[idx ^ 1];
 247                kref_init(&new_inflight->kref);
 248                init_completion(&new_inflight->comp);
 249
 250                mutex_unlock(&vq->mutex);
 251        }
 252}
 253
 254static struct vhost_scsi_inflight *
 255vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
 256{
 257        struct vhost_scsi_inflight *inflight;
 258        struct vhost_scsi_virtqueue *svq;
 259
 260        svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
 261        inflight = &svq->inflights[svq->inflight_idx];
 262        kref_get(&inflight->kref);
 263
 264        return inflight;
 265}
 266
 267static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
 268{
 269        kref_put(&inflight->kref, vhost_scsi_done_inflight);
 270}
 271
 272static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
 273{
 274        return 1;
 275}
 276
 277static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
 278{
 279        return 0;
 280}
 281
 282static char *vhost_scsi_get_fabric_name(void)
 283{
 284        return "vhost";
 285}
 286
 287static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
 288{
 289        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
 290                                struct vhost_scsi_tpg, se_tpg);
 291        struct vhost_scsi_tport *tport = tpg->tport;
 292
 293        return &tport->tport_name[0];
 294}
 295
 296static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
 297{
 298        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
 299                                struct vhost_scsi_tpg, se_tpg);
 300        return tpg->tport_tpgt;
 301}
 302
 303static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
 304{
 305        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
 306                                struct vhost_scsi_tpg, se_tpg);
 307
 308        return tpg->tv_fabric_prot_type;
 309}
 310
 311static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
 312{
 313        return 1;
 314}
 315
 316static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
 317{
 318        struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
 319                                struct vhost_scsi_cmd, tvc_se_cmd);
 320        struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess;
 321        int i;
 322
 323        if (tv_cmd->tvc_sgl_count) {
 324                for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
 325                        put_page(sg_page(&tv_cmd->tvc_sgl[i]));
 326        }
 327        if (tv_cmd->tvc_prot_sgl_count) {
 328                for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
 329                        put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
 330        }
 331
 332        vhost_scsi_put_inflight(tv_cmd->inflight);
 333        percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
 334}
 335
 336static int vhost_scsi_shutdown_session(struct se_session *se_sess)
 337{
 338        return 0;
 339}
 340
 341static void vhost_scsi_close_session(struct se_session *se_sess)
 342{
 343        return;
 344}
 345
 346static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
 347{
 348        return 0;
 349}
 350
 351static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
 352{
 353        /* Go ahead and process the write immediately */
 354        target_execute_cmd(se_cmd);
 355        return 0;
 356}
 357
 358static int vhost_scsi_write_pending_status(struct se_cmd *se_cmd)
 359{
 360        return 0;
 361}
 362
 363static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
 364{
 365        return;
 366}
 367
 368static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
 369{
 370        return 0;
 371}
 372
 373static void vhost_scsi_complete_cmd(struct vhost_scsi_cmd *cmd)
 374{
 375        struct vhost_scsi *vs = cmd->tvc_vhost;
 376
 377        llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
 378
 379        vhost_work_queue(&vs->dev, &vs->vs_completion_work);
 380}
 381
 382static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
 383{
 384        struct vhost_scsi_cmd *cmd = container_of(se_cmd,
 385                                struct vhost_scsi_cmd, tvc_se_cmd);
 386        vhost_scsi_complete_cmd(cmd);
 387        return 0;
 388}
 389
 390static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
 391{
 392        struct vhost_scsi_cmd *cmd = container_of(se_cmd,
 393                                struct vhost_scsi_cmd, tvc_se_cmd);
 394        vhost_scsi_complete_cmd(cmd);
 395        return 0;
 396}
 397
 398static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
 399{
 400        return;
 401}
 402
 403static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
 404{
 405        return;
 406}
 407
 408static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
 409{
 410        vs->vs_events_nr--;
 411        kfree(evt);
 412}
 413
 414static struct vhost_scsi_evt *
 415vhost_scsi_allocate_evt(struct vhost_scsi *vs,
 416                       u32 event, u32 reason)
 417{
 418        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
 419        struct vhost_scsi_evt *evt;
 420
 421        if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
 422                vs->vs_events_missed = true;
 423                return NULL;
 424        }
 425
 426        evt = kzalloc(sizeof(*evt), GFP_KERNEL);
 427        if (!evt) {
 428                vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
 429                vs->vs_events_missed = true;
 430                return NULL;
 431        }
 432
 433        evt->event.event = cpu_to_vhost32(vq, event);
 434        evt->event.reason = cpu_to_vhost32(vq, reason);
 435        vs->vs_events_nr++;
 436
 437        return evt;
 438}
 439
 440static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd)
 441{
 442        struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
 443
 444        /* TODO locking against target/backend threads? */
 445        transport_generic_free_cmd(se_cmd, 0);
 446
 447}
 448
 449static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
 450{
 451        return target_put_sess_cmd(se_cmd);
 452}
 453
 454static void
 455vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
 456{
 457        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
 458        struct virtio_scsi_event *event = &evt->event;
 459        struct virtio_scsi_event __user *eventp;
 460        unsigned out, in;
 461        int head, ret;
 462
 463        if (!vq->private_data) {
 464                vs->vs_events_missed = true;
 465                return;
 466        }
 467
 468again:
 469        vhost_disable_notify(&vs->dev, vq);
 470        head = vhost_get_vq_desc(vq, vq->iov,
 471                        ARRAY_SIZE(vq->iov), &out, &in,
 472                        NULL, NULL);
 473        if (head < 0) {
 474                vs->vs_events_missed = true;
 475                return;
 476        }
 477        if (head == vq->num) {
 478                if (vhost_enable_notify(&vs->dev, vq))
 479                        goto again;
 480                vs->vs_events_missed = true;
 481                return;
 482        }
 483
 484        if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
 485                vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
 486                                vq->iov[out].iov_len);
 487                vs->vs_events_missed = true;
 488                return;
 489        }
 490
 491        if (vs->vs_events_missed) {
 492                event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
 493                vs->vs_events_missed = false;
 494        }
 495
 496        eventp = vq->iov[out].iov_base;
 497        ret = __copy_to_user(eventp, event, sizeof(*event));
 498        if (!ret)
 499                vhost_add_used_and_signal(&vs->dev, vq, head, 0);
 500        else
 501                vq_err(vq, "Faulted on vhost_scsi_send_event\n");
 502}
 503
 504static void vhost_scsi_evt_work(struct vhost_work *work)
 505{
 506        struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
 507                                        vs_event_work);
 508        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
 509        struct vhost_scsi_evt *evt;
 510        struct llist_node *llnode;
 511
 512        mutex_lock(&vq->mutex);
 513        llnode = llist_del_all(&vs->vs_event_list);
 514        while (llnode) {
 515                evt = llist_entry(llnode, struct vhost_scsi_evt, list);
 516                llnode = llist_next(llnode);
 517                vhost_scsi_do_evt_work(vs, evt);
 518                vhost_scsi_free_evt(vs, evt);
 519        }
 520        mutex_unlock(&vq->mutex);
 521}
 522
 523/* Fill in status and signal that we are done processing this command
 524 *
 525 * This is scheduled in the vhost work queue so we are called with the owner
 526 * process mm and can access the vring.
 527 */
 528static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
 529{
 530        struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
 531                                        vs_completion_work);
 532        DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
 533        struct virtio_scsi_cmd_resp v_rsp;
 534        struct vhost_scsi_cmd *cmd;
 535        struct llist_node *llnode;
 536        struct se_cmd *se_cmd;
 537        struct iov_iter iov_iter;
 538        int ret, vq;
 539
 540        bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
 541        llnode = llist_del_all(&vs->vs_completion_list);
 542        while (llnode) {
 543                cmd = llist_entry(llnode, struct vhost_scsi_cmd,
 544                                     tvc_completion_list);
 545                llnode = llist_next(llnode);
 546                se_cmd = &cmd->tvc_se_cmd;
 547
 548                pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
 549                        cmd, se_cmd->residual_count, se_cmd->scsi_status);
 550
 551                memset(&v_rsp, 0, sizeof(v_rsp));
 552                v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count);
 553                /* TODO is status_qualifier field needed? */
 554                v_rsp.status = se_cmd->scsi_status;
 555                v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
 556                                                 se_cmd->scsi_sense_length);
 557                memcpy(v_rsp.sense, cmd->tvc_sense_buf,
 558                       se_cmd->scsi_sense_length);
 559
 560                iov_iter_init(&iov_iter, READ, cmd->tvc_resp_iov,
 561                              cmd->tvc_in_iovs, sizeof(v_rsp));
 562                ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
 563                if (likely(ret == sizeof(v_rsp))) {
 564                        struct vhost_scsi_virtqueue *q;
 565                        vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
 566                        q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
 567                        vq = q - vs->vqs;
 568                        __set_bit(vq, signal);
 569                } else
 570                        pr_err("Faulted on virtio_scsi_cmd_resp\n");
 571
 572                vhost_scsi_free_cmd(cmd);
 573        }
 574
 575        vq = -1;
 576        while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
 577                < VHOST_SCSI_MAX_VQ)
 578                vhost_signal(&vs->dev, &vs->vqs[vq].vq);
 579}
 580
 581static struct vhost_scsi_cmd *
 582vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
 583                   unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
 584                   u32 exp_data_len, int data_direction)
 585{
 586        struct vhost_scsi_cmd *cmd;
 587        struct vhost_scsi_nexus *tv_nexus;
 588        struct se_session *se_sess;
 589        struct scatterlist *sg, *prot_sg;
 590        struct page **pages;
 591        int tag;
 592
 593        tv_nexus = tpg->tpg_nexus;
 594        if (!tv_nexus) {
 595                pr_err("Unable to locate active struct vhost_scsi_nexus\n");
 596                return ERR_PTR(-EIO);
 597        }
 598        se_sess = tv_nexus->tvn_se_sess;
 599
 600        tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
 601        if (tag < 0) {
 602                pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
 603                return ERR_PTR(-ENOMEM);
 604        }
 605
 606        cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[tag];
 607        sg = cmd->tvc_sgl;
 608        prot_sg = cmd->tvc_prot_sgl;
 609        pages = cmd->tvc_upages;
 610        memset(cmd, 0, sizeof(struct vhost_scsi_cmd));
 611
 612        cmd->tvc_sgl = sg;
 613        cmd->tvc_prot_sgl = prot_sg;
 614        cmd->tvc_upages = pages;
 615        cmd->tvc_se_cmd.map_tag = tag;
 616        cmd->tvc_tag = scsi_tag;
 617        cmd->tvc_lun = lun;
 618        cmd->tvc_task_attr = task_attr;
 619        cmd->tvc_exp_data_len = exp_data_len;
 620        cmd->tvc_data_direction = data_direction;
 621        cmd->tvc_nexus = tv_nexus;
 622        cmd->inflight = vhost_scsi_get_inflight(vq);
 623
 624        memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
 625
 626        return cmd;
 627}
 628
 629/*
 630 * Map a user memory range into a scatterlist
 631 *
 632 * Returns the number of scatterlist entries used or -errno on error.
 633 */
 634static int
 635vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
 636                      void __user *ptr,
 637                      size_t len,
 638                      struct scatterlist *sgl,
 639                      bool write)
 640{
 641        unsigned int npages = 0, offset, nbytes;
 642        unsigned int pages_nr = iov_num_pages(ptr, len);
 643        struct scatterlist *sg = sgl;
 644        struct page **pages = cmd->tvc_upages;
 645        int ret, i;
 646
 647        if (pages_nr > VHOST_SCSI_PREALLOC_UPAGES) {
 648                pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
 649                       " preallocated VHOST_SCSI_PREALLOC_UPAGES: %u\n",
 650                        pages_nr, VHOST_SCSI_PREALLOC_UPAGES);
 651                return -ENOBUFS;
 652        }
 653
 654        ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
 655        /* No pages were pinned */
 656        if (ret < 0)
 657                goto out;
 658        /* Less pages pinned than wanted */
 659        if (ret != pages_nr) {
 660                for (i = 0; i < ret; i++)
 661                        put_page(pages[i]);
 662                ret = -EFAULT;
 663                goto out;
 664        }
 665
 666        while (len > 0) {
 667                offset = (uintptr_t)ptr & ~PAGE_MASK;
 668                nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
 669                sg_set_page(sg, pages[npages], nbytes, offset);
 670                ptr += nbytes;
 671                len -= nbytes;
 672                sg++;
 673                npages++;
 674        }
 675
 676out:
 677        return ret;
 678}
 679
 680static int
 681vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
 682{
 683        int sgl_count = 0;
 684
 685        if (!iter || !iter->iov) {
 686                pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
 687                       " present\n", __func__, bytes);
 688                return -EINVAL;
 689        }
 690
 691        sgl_count = iov_iter_npages(iter, 0xffff);
 692        if (sgl_count > max_sgls) {
 693                pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
 694                       " max_sgls: %d\n", __func__, sgl_count, max_sgls);
 695                return -EINVAL;
 696        }
 697        return sgl_count;
 698}
 699
 700static int
 701vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
 702                      struct iov_iter *iter,
 703                      struct scatterlist *sg, int sg_count)
 704{
 705        size_t off = iter->iov_offset;
 706        int i, ret;
 707
 708        for (i = 0; i < iter->nr_segs; i++) {
 709                void __user *base = iter->iov[i].iov_base + off;
 710                size_t len = iter->iov[i].iov_len - off;
 711
 712                ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write);
 713                if (ret < 0) {
 714                        for (i = 0; i < sg_count; i++) {
 715                                struct page *page = sg_page(&sg[i]);
 716                                if (page)
 717                                        put_page(page);
 718                        }
 719                        return ret;
 720                }
 721                sg += ret;
 722                off = 0;
 723        }
 724        return 0;
 725}
 726
 727static int
 728vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
 729                 size_t prot_bytes, struct iov_iter *prot_iter,
 730                 size_t data_bytes, struct iov_iter *data_iter)
 731{
 732        int sgl_count, ret;
 733        bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
 734
 735        if (prot_bytes) {
 736                sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
 737                                                 VHOST_SCSI_PREALLOC_PROT_SGLS);
 738                if (sgl_count < 0)
 739                        return sgl_count;
 740
 741                sg_init_table(cmd->tvc_prot_sgl, sgl_count);
 742                cmd->tvc_prot_sgl_count = sgl_count;
 743                pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
 744                         cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
 745
 746                ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
 747                                            cmd->tvc_prot_sgl,
 748                                            cmd->tvc_prot_sgl_count);
 749                if (ret < 0) {
 750                        cmd->tvc_prot_sgl_count = 0;
 751                        return ret;
 752                }
 753        }
 754        sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
 755                                         VHOST_SCSI_PREALLOC_SGLS);
 756        if (sgl_count < 0)
 757                return sgl_count;
 758
 759        sg_init_table(cmd->tvc_sgl, sgl_count);
 760        cmd->tvc_sgl_count = sgl_count;
 761        pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
 762                  cmd->tvc_sgl, cmd->tvc_sgl_count);
 763
 764        ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
 765                                    cmd->tvc_sgl, cmd->tvc_sgl_count);
 766        if (ret < 0) {
 767                cmd->tvc_sgl_count = 0;
 768                return ret;
 769        }
 770        return 0;
 771}
 772
 773static int vhost_scsi_to_tcm_attr(int attr)
 774{
 775        switch (attr) {
 776        case VIRTIO_SCSI_S_SIMPLE:
 777                return TCM_SIMPLE_TAG;
 778        case VIRTIO_SCSI_S_ORDERED:
 779                return TCM_ORDERED_TAG;
 780        case VIRTIO_SCSI_S_HEAD:
 781                return TCM_HEAD_TAG;
 782        case VIRTIO_SCSI_S_ACA:
 783                return TCM_ACA_TAG;
 784        default:
 785                break;
 786        }
 787        return TCM_SIMPLE_TAG;
 788}
 789
 790static void vhost_scsi_submission_work(struct work_struct *work)
 791{
 792        struct vhost_scsi_cmd *cmd =
 793                container_of(work, struct vhost_scsi_cmd, work);
 794        struct vhost_scsi_nexus *tv_nexus;
 795        struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
 796        struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
 797        int rc;
 798
 799        /* FIXME: BIDI operation */
 800        if (cmd->tvc_sgl_count) {
 801                sg_ptr = cmd->tvc_sgl;
 802
 803                if (cmd->tvc_prot_sgl_count)
 804                        sg_prot_ptr = cmd->tvc_prot_sgl;
 805                else
 806                        se_cmd->prot_pto = true;
 807        } else {
 808                sg_ptr = NULL;
 809        }
 810        tv_nexus = cmd->tvc_nexus;
 811
 812        se_cmd->tag = 0;
 813        rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
 814                        cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
 815                        cmd->tvc_lun, cmd->tvc_exp_data_len,
 816                        vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
 817                        cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
 818                        sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
 819                        cmd->tvc_prot_sgl_count);
 820        if (rc < 0) {
 821                transport_send_check_condition_and_sense(se_cmd,
 822                                TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
 823                transport_generic_free_cmd(se_cmd, 0);
 824        }
 825}
 826
 827static void
 828vhost_scsi_send_bad_target(struct vhost_scsi *vs,
 829                           struct vhost_virtqueue *vq,
 830                           int head, unsigned out)
 831{
 832        struct virtio_scsi_cmd_resp __user *resp;
 833        struct virtio_scsi_cmd_resp rsp;
 834        int ret;
 835
 836        memset(&rsp, 0, sizeof(rsp));
 837        rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
 838        resp = vq->iov[out].iov_base;
 839        ret = __copy_to_user(resp, &rsp, sizeof(rsp));
 840        if (!ret)
 841                vhost_add_used_and_signal(&vs->dev, vq, head, 0);
 842        else
 843                pr_err("Faulted on virtio_scsi_cmd_resp\n");
 844}
 845
 846static void
 847vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
 848{
 849        struct vhost_scsi_tpg **vs_tpg, *tpg;
 850        struct virtio_scsi_cmd_req v_req;
 851        struct virtio_scsi_cmd_req_pi v_req_pi;
 852        struct vhost_scsi_cmd *cmd;
 853        struct iov_iter out_iter, in_iter, prot_iter, data_iter;
 854        u64 tag;
 855        u32 exp_data_len, data_direction;
 856        unsigned out, in;
 857        int head, ret, prot_bytes;
 858        size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
 859        size_t out_size, in_size;
 860        u16 lun;
 861        u8 *target, *lunp, task_attr;
 862        bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
 863        void *req, *cdb;
 864
 865        mutex_lock(&vq->mutex);
 866        /*
 867         * We can handle the vq only after the endpoint is setup by calling the
 868         * VHOST_SCSI_SET_ENDPOINT ioctl.
 869         */
 870        vs_tpg = vq->private_data;
 871        if (!vs_tpg)
 872                goto out;
 873
 874        vhost_disable_notify(&vs->dev, vq);
 875
 876        for (;;) {
 877                head = vhost_get_vq_desc(vq, vq->iov,
 878                                         ARRAY_SIZE(vq->iov), &out, &in,
 879                                         NULL, NULL);
 880                pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
 881                         head, out, in);
 882                /* On error, stop handling until the next kick. */
 883                if (unlikely(head < 0))
 884                        break;
 885                /* Nothing new?  Wait for eventfd to tell us they refilled. */
 886                if (head == vq->num) {
 887                        if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
 888                                vhost_disable_notify(&vs->dev, vq);
 889                                continue;
 890                        }
 891                        break;
 892                }
 893                /*
 894                 * Check for a sane response buffer so we can report early
 895                 * errors back to the guest.
 896                 */
 897                if (unlikely(vq->iov[out].iov_len < rsp_size)) {
 898                        vq_err(vq, "Expecting at least virtio_scsi_cmd_resp"
 899                                " size, got %zu bytes\n", vq->iov[out].iov_len);
 900                        break;
 901                }
 902                /*
 903                 * Setup pointers and values based upon different virtio-scsi
 904                 * request header if T10_PI is enabled in KVM guest.
 905                 */
 906                if (t10_pi) {
 907                        req = &v_req_pi;
 908                        req_size = sizeof(v_req_pi);
 909                        lunp = &v_req_pi.lun[0];
 910                        target = &v_req_pi.lun[1];
 911                } else {
 912                        req = &v_req;
 913                        req_size = sizeof(v_req);
 914                        lunp = &v_req.lun[0];
 915                        target = &v_req.lun[1];
 916                }
 917                /*
 918                 * FIXME: Not correct for BIDI operation
 919                 */
 920                out_size = iov_length(vq->iov, out);
 921                in_size = iov_length(&vq->iov[out], in);
 922
 923                /*
 924                 * Copy over the virtio-scsi request header, which for a
 925                 * ANY_LAYOUT enabled guest may span multiple iovecs, or a
 926                 * single iovec may contain both the header + outgoing
 927                 * WRITE payloads.
 928                 *
 929                 * copy_from_iter() will advance out_iter, so that it will
 930                 * point at the start of the outgoing WRITE payload, if
 931                 * DMA_TO_DEVICE is set.
 932                 */
 933                iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size);
 934
 935                ret = copy_from_iter(req, req_size, &out_iter);
 936                if (unlikely(ret != req_size)) {
 937                        vq_err(vq, "Faulted on copy_from_iter\n");
 938                        vhost_scsi_send_bad_target(vs, vq, head, out);
 939                        continue;
 940                }
 941                /* virtio-scsi spec requires byte 0 of the lun to be 1 */
 942                if (unlikely(*lunp != 1)) {
 943                        vq_err(vq, "Illegal virtio-scsi lun: %u\n", *lunp);
 944                        vhost_scsi_send_bad_target(vs, vq, head, out);
 945                        continue;
 946                }
 947
 948                tpg = ACCESS_ONCE(vs_tpg[*target]);
 949                if (unlikely(!tpg)) {
 950                        /* Target does not exist, fail the request */
 951                        vhost_scsi_send_bad_target(vs, vq, head, out);
 952                        continue;
 953                }
 954                /*
 955                 * Determine data_direction by calculating the total outgoing
 956                 * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
 957                 * response headers respectively.
 958                 *
 959                 * For DMA_TO_DEVICE this is out_iter, which is already pointing
 960                 * to the right place.
 961                 *
 962                 * For DMA_FROM_DEVICE, the iovec will be just past the end
 963                 * of the virtio-scsi response header in either the same
 964                 * or immediately following iovec.
 965                 *
 966                 * Any associated T10_PI bytes for the outgoing / incoming
 967                 * payloads are included in calculation of exp_data_len here.
 968                 */
 969                prot_bytes = 0;
 970
 971                if (out_size > req_size) {
 972                        data_direction = DMA_TO_DEVICE;
 973                        exp_data_len = out_size - req_size;
 974                        data_iter = out_iter;
 975                } else if (in_size > rsp_size) {
 976                        data_direction = DMA_FROM_DEVICE;
 977                        exp_data_len = in_size - rsp_size;
 978
 979                        iov_iter_init(&in_iter, READ, &vq->iov[out], in,
 980                                      rsp_size + exp_data_len);
 981                        iov_iter_advance(&in_iter, rsp_size);
 982                        data_iter = in_iter;
 983                } else {
 984                        data_direction = DMA_NONE;
 985                        exp_data_len = 0;
 986                }
 987                /*
 988                 * If T10_PI header + payload is present, setup prot_iter values
 989                 * and recalculate data_iter for vhost_scsi_mapal() mapping to
 990                 * host scatterlists via get_user_pages_fast().
 991                 */
 992                if (t10_pi) {
 993                        if (v_req_pi.pi_bytesout) {
 994                                if (data_direction != DMA_TO_DEVICE) {
 995                                        vq_err(vq, "Received non zero pi_bytesout,"
 996                                                " but wrong data_direction\n");
 997                                        vhost_scsi_send_bad_target(vs, vq, head, out);
 998                                        continue;
 999                                }
1000                                prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
1001                        } else if (v_req_pi.pi_bytesin) {
1002                                if (data_direction != DMA_FROM_DEVICE) {
1003                                        vq_err(vq, "Received non zero pi_bytesin,"
1004                                                " but wrong data_direction\n");
1005                                        vhost_scsi_send_bad_target(vs, vq, head, out);
1006                                        continue;
1007                                }
1008                                prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
1009                        }
1010                        /*
1011                         * Set prot_iter to data_iter, and advance past any
1012                         * preceeding prot_bytes that may be present.
1013                         *
1014                         * Also fix up the exp_data_len to reflect only the
1015                         * actual data payload length.
1016                         */
1017                        if (prot_bytes) {
1018                                exp_data_len -= prot_bytes;
1019                                prot_iter = data_iter;
1020                                iov_iter_advance(&data_iter, prot_bytes);
1021                        }
1022                        tag = vhost64_to_cpu(vq, v_req_pi.tag);
1023                        task_attr = v_req_pi.task_attr;
1024                        cdb = &v_req_pi.cdb[0];
1025                        lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF;
1026                } else {
1027                        tag = vhost64_to_cpu(vq, v_req.tag);
1028                        task_attr = v_req.task_attr;
1029                        cdb = &v_req.cdb[0];
1030                        lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
1031                }
1032                /*
1033                 * Check that the received CDB size does not exceeded our
1034                 * hardcoded max for vhost-scsi, then get a pre-allocated
1035                 * cmd descriptor for the new virtio-scsi tag.
1036                 *
1037                 * TODO what if cdb was too small for varlen cdb header?
1038                 */
1039                if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
1040                        vq_err(vq, "Received SCSI CDB with command_size: %d that"
1041                                " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1042                                scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
1043                        vhost_scsi_send_bad_target(vs, vq, head, out);
1044                        continue;
1045                }
1046                cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
1047                                         exp_data_len + prot_bytes,
1048                                         data_direction);
1049                if (IS_ERR(cmd)) {
1050                        vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
1051                               PTR_ERR(cmd));
1052                        vhost_scsi_send_bad_target(vs, vq, head, out);
1053                        continue;
1054                }
1055                cmd->tvc_vhost = vs;
1056                cmd->tvc_vq = vq;
1057                cmd->tvc_resp_iov = &vq->iov[out];
1058                cmd->tvc_in_iovs = in;
1059
1060                pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1061                         cmd->tvc_cdb[0], cmd->tvc_lun);
1062                pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
1063                         " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
1064
1065                if (data_direction != DMA_NONE) {
1066                        ret = vhost_scsi_mapal(cmd,
1067                                               prot_bytes, &prot_iter,
1068                                               exp_data_len, &data_iter);
1069                        if (unlikely(ret)) {
1070                                vq_err(vq, "Failed to map iov to sgl\n");
1071                                vhost_scsi_release_cmd(&cmd->tvc_se_cmd);
1072                                vhost_scsi_send_bad_target(vs, vq, head, out);
1073                                continue;
1074                        }
1075                }
1076                /*
1077                 * Save the descriptor from vhost_get_vq_desc() to be used to
1078                 * complete the virtio-scsi request in TCM callback context via
1079                 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
1080                 */
1081                cmd->tvc_vq_desc = head;
1082                /*
1083                 * Dispatch cmd descriptor for cmwq execution in process
1084                 * context provided by vhost_scsi_workqueue.  This also ensures
1085                 * cmd is executed on the same kworker CPU as this vhost
1086                 * thread to gain positive L2 cache locality effects.
1087                 */
1088                INIT_WORK(&cmd->work, vhost_scsi_submission_work);
1089                queue_work(vhost_scsi_workqueue, &cmd->work);
1090        }
1091out:
1092        mutex_unlock(&vq->mutex);
1093}
1094
1095static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1096{
1097        pr_debug("%s: The handling func for control queue.\n", __func__);
1098}
1099
1100static void
1101vhost_scsi_send_evt(struct vhost_scsi *vs,
1102                   struct vhost_scsi_tpg *tpg,
1103                   struct se_lun *lun,
1104                   u32 event,
1105                   u32 reason)
1106{
1107        struct vhost_scsi_evt *evt;
1108
1109        evt = vhost_scsi_allocate_evt(vs, event, reason);
1110        if (!evt)
1111                return;
1112
1113        if (tpg && lun) {
1114                /* TODO: share lun setup code with virtio-scsi.ko */
1115                /*
1116                 * Note: evt->event is zeroed when we allocate it and
1117                 * lun[4-7] need to be zero according to virtio-scsi spec.
1118                 */
1119                evt->event.lun[0] = 0x01;
1120                evt->event.lun[1] = tpg->tport_tpgt;
1121                if (lun->unpacked_lun >= 256)
1122                        evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1123                evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1124        }
1125
1126        llist_add(&evt->list, &vs->vs_event_list);
1127        vhost_work_queue(&vs->dev, &vs->vs_event_work);
1128}
1129
1130static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1131{
1132        struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1133                                                poll.work);
1134        struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1135
1136        mutex_lock(&vq->mutex);
1137        if (!vq->private_data)
1138                goto out;
1139
1140        if (vs->vs_events_missed)
1141                vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1142out:
1143        mutex_unlock(&vq->mutex);
1144}
1145
1146static void vhost_scsi_handle_kick(struct vhost_work *work)
1147{
1148        struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1149                                                poll.work);
1150        struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1151
1152        vhost_scsi_handle_vq(vs, vq);
1153}
1154
1155static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
1156{
1157        vhost_poll_flush(&vs->vqs[index].vq.poll);
1158}
1159
1160/* Callers must hold dev mutex */
1161static void vhost_scsi_flush(struct vhost_scsi *vs)
1162{
1163        struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1164        int i;
1165
1166        /* Init new inflight and remember the old inflight */
1167        vhost_scsi_init_inflight(vs, old_inflight);
1168
1169        /*
1170         * The inflight->kref was initialized to 1. We decrement it here to
1171         * indicate the start of the flush operation so that it will reach 0
1172         * when all the reqs are finished.
1173         */
1174        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1175                kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
1176
1177        /* Flush both the vhost poll and vhost work */
1178        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1179                vhost_scsi_flush_vq(vs, i);
1180        vhost_work_flush(&vs->dev, &vs->vs_completion_work);
1181        vhost_work_flush(&vs->dev, &vs->vs_event_work);
1182
1183        /* Wait for all reqs issued before the flush to be finished */
1184        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1185                wait_for_completion(&old_inflight[i]->comp);
1186}
1187
1188/*
1189 * Called from vhost_scsi_ioctl() context to walk the list of available
1190 * vhost_scsi_tpg with an active struct vhost_scsi_nexus
1191 *
1192 *  The lock nesting rule is:
1193 *    vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1194 */
1195static int
1196vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1197                        struct vhost_scsi_target *t)
1198{
1199        struct se_portal_group *se_tpg;
1200        struct vhost_scsi_tport *tv_tport;
1201        struct vhost_scsi_tpg *tpg;
1202        struct vhost_scsi_tpg **vs_tpg;
1203        struct vhost_virtqueue *vq;
1204        int index, ret, i, len;
1205        bool match = false;
1206
1207        mutex_lock(&vhost_scsi_mutex);
1208        mutex_lock(&vs->dev.mutex);
1209
1210        /* Verify that ring has been setup correctly. */
1211        for (index = 0; index < vs->dev.nvqs; ++index) {
1212                /* Verify that ring has been setup correctly. */
1213                if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1214                        ret = -EFAULT;
1215                        goto out;
1216                }
1217        }
1218
1219        len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1220        vs_tpg = kzalloc(len, GFP_KERNEL);
1221        if (!vs_tpg) {
1222                ret = -ENOMEM;
1223                goto out;
1224        }
1225        if (vs->vs_tpg)
1226                memcpy(vs_tpg, vs->vs_tpg, len);
1227
1228        list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
1229                mutex_lock(&tpg->tv_tpg_mutex);
1230                if (!tpg->tpg_nexus) {
1231                        mutex_unlock(&tpg->tv_tpg_mutex);
1232                        continue;
1233                }
1234                if (tpg->tv_tpg_vhost_count != 0) {
1235                        mutex_unlock(&tpg->tv_tpg_mutex);
1236                        continue;
1237                }
1238                tv_tport = tpg->tport;
1239
1240                if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1241                        if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1242                                kfree(vs_tpg);
1243                                mutex_unlock(&tpg->tv_tpg_mutex);
1244                                ret = -EEXIST;
1245                                goto out;
1246                        }
1247                        /*
1248                         * In order to ensure individual vhost-scsi configfs
1249                         * groups cannot be removed while in use by vhost ioctl,
1250                         * go ahead and take an explicit se_tpg->tpg_group.cg_item
1251                         * dependency now.
1252                         */
1253                        se_tpg = &tpg->se_tpg;
1254                        ret = target_depend_item(&se_tpg->tpg_group.cg_item);
1255                        if (ret) {
1256                                pr_warn("configfs_depend_item() failed: %d\n", ret);
1257                                kfree(vs_tpg);
1258                                mutex_unlock(&tpg->tv_tpg_mutex);
1259                                goto out;
1260                        }
1261                        tpg->tv_tpg_vhost_count++;
1262                        tpg->vhost_scsi = vs;
1263                        vs_tpg[tpg->tport_tpgt] = tpg;
1264                        smp_mb__after_atomic();
1265                        match = true;
1266                }
1267                mutex_unlock(&tpg->tv_tpg_mutex);
1268        }
1269
1270        if (match) {
1271                memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1272                       sizeof(vs->vs_vhost_wwpn));
1273                for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1274                        vq = &vs->vqs[i].vq;
1275                        mutex_lock(&vq->mutex);
1276                        vq->private_data = vs_tpg;
1277                        vhost_init_used(vq);
1278                        mutex_unlock(&vq->mutex);
1279                }
1280                ret = 0;
1281        } else {
1282                ret = -EEXIST;
1283        }
1284
1285        /*
1286         * Act as synchronize_rcu to make sure access to
1287         * old vs->vs_tpg is finished.
1288         */
1289        vhost_scsi_flush(vs);
1290        kfree(vs->vs_tpg);
1291        vs->vs_tpg = vs_tpg;
1292
1293out:
1294        mutex_unlock(&vs->dev.mutex);
1295        mutex_unlock(&vhost_scsi_mutex);
1296        return ret;
1297}
1298
1299static int
1300vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1301                          struct vhost_scsi_target *t)
1302{
1303        struct se_portal_group *se_tpg;
1304        struct vhost_scsi_tport *tv_tport;
1305        struct vhost_scsi_tpg *tpg;
1306        struct vhost_virtqueue *vq;
1307        bool match = false;
1308        int index, ret, i;
1309        u8 target;
1310
1311        mutex_lock(&vhost_scsi_mutex);
1312        mutex_lock(&vs->dev.mutex);
1313        /* Verify that ring has been setup correctly. */
1314        for (index = 0; index < vs->dev.nvqs; ++index) {
1315                if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1316                        ret = -EFAULT;
1317                        goto err_dev;
1318                }
1319        }
1320
1321        if (!vs->vs_tpg) {
1322                ret = 0;
1323                goto err_dev;
1324        }
1325
1326        for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1327                target = i;
1328                tpg = vs->vs_tpg[target];
1329                if (!tpg)
1330                        continue;
1331
1332                mutex_lock(&tpg->tv_tpg_mutex);
1333                tv_tport = tpg->tport;
1334                if (!tv_tport) {
1335                        ret = -ENODEV;
1336                        goto err_tpg;
1337                }
1338
1339                if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1340                        pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1341                                " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1342                                tv_tport->tport_name, tpg->tport_tpgt,
1343                                t->vhost_wwpn, t->vhost_tpgt);
1344                        ret = -EINVAL;
1345                        goto err_tpg;
1346                }
1347                tpg->tv_tpg_vhost_count--;
1348                tpg->vhost_scsi = NULL;
1349                vs->vs_tpg[target] = NULL;
1350                match = true;
1351                mutex_unlock(&tpg->tv_tpg_mutex);
1352                /*
1353                 * Release se_tpg->tpg_group.cg_item configfs dependency now
1354                 * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
1355                 */
1356                se_tpg = &tpg->se_tpg;
1357                target_undepend_item(&se_tpg->tpg_group.cg_item);
1358        }
1359        if (match) {
1360                for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1361                        vq = &vs->vqs[i].vq;
1362                        mutex_lock(&vq->mutex);
1363                        vq->private_data = NULL;
1364                        mutex_unlock(&vq->mutex);
1365                }
1366        }
1367        /*
1368         * Act as synchronize_rcu to make sure access to
1369         * old vs->vs_tpg is finished.
1370         */
1371        vhost_scsi_flush(vs);
1372        kfree(vs->vs_tpg);
1373        vs->vs_tpg = NULL;
1374        WARN_ON(vs->vs_events_nr);
1375        mutex_unlock(&vs->dev.mutex);
1376        mutex_unlock(&vhost_scsi_mutex);
1377        return 0;
1378
1379err_tpg:
1380        mutex_unlock(&tpg->tv_tpg_mutex);
1381err_dev:
1382        mutex_unlock(&vs->dev.mutex);
1383        mutex_unlock(&vhost_scsi_mutex);
1384        return ret;
1385}
1386
1387static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1388{
1389        struct vhost_virtqueue *vq;
1390        int i;
1391
1392        if (features & ~VHOST_SCSI_FEATURES)
1393                return -EOPNOTSUPP;
1394
1395        mutex_lock(&vs->dev.mutex);
1396        if ((features & (1 << VHOST_F_LOG_ALL)) &&
1397            !vhost_log_access_ok(&vs->dev)) {
1398                mutex_unlock(&vs->dev.mutex);
1399                return -EFAULT;
1400        }
1401
1402        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1403                vq = &vs->vqs[i].vq;
1404                mutex_lock(&vq->mutex);
1405                vq->acked_features = features;
1406                mutex_unlock(&vq->mutex);
1407        }
1408        mutex_unlock(&vs->dev.mutex);
1409        return 0;
1410}
1411
1412static int vhost_scsi_open(struct inode *inode, struct file *f)
1413{
1414        struct vhost_scsi *vs;
1415        struct vhost_virtqueue **vqs;
1416        int r = -ENOMEM, i;
1417
1418        vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
1419        if (!vs) {
1420                vs = vzalloc(sizeof(*vs));
1421                if (!vs)
1422                        goto err_vs;
1423        }
1424
1425        vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
1426        if (!vqs)
1427                goto err_vqs;
1428
1429        vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1430        vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
1431
1432        vs->vs_events_nr = 0;
1433        vs->vs_events_missed = false;
1434
1435        vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1436        vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1437        vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1438        vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1439        for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1440                vqs[i] = &vs->vqs[i].vq;
1441                vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1442        }
1443        vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
1444
1445        vhost_scsi_init_inflight(vs, NULL);
1446
1447        f->private_data = vs;
1448        return 0;
1449
1450err_vqs:
1451        kvfree(vs);
1452err_vs:
1453        return r;
1454}
1455
1456static int vhost_scsi_release(struct inode *inode, struct file *f)
1457{
1458        struct vhost_scsi *vs = f->private_data;
1459        struct vhost_scsi_target t;
1460
1461        mutex_lock(&vs->dev.mutex);
1462        memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1463        mutex_unlock(&vs->dev.mutex);
1464        vhost_scsi_clear_endpoint(vs, &t);
1465        vhost_dev_stop(&vs->dev);
1466        vhost_dev_cleanup(&vs->dev, false);
1467        /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1468        vhost_scsi_flush(vs);
1469        kfree(vs->dev.vqs);
1470        kvfree(vs);
1471        return 0;
1472}
1473
1474static long
1475vhost_scsi_ioctl(struct file *f,
1476                 unsigned int ioctl,
1477                 unsigned long arg)
1478{
1479        struct vhost_scsi *vs = f->private_data;
1480        struct vhost_scsi_target backend;
1481        void __user *argp = (void __user *)arg;
1482        u64 __user *featurep = argp;
1483        u32 __user *eventsp = argp;
1484        u32 events_missed;
1485        u64 features;
1486        int r, abi_version = VHOST_SCSI_ABI_VERSION;
1487        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1488
1489        switch (ioctl) {
1490        case VHOST_SCSI_SET_ENDPOINT:
1491                if (copy_from_user(&backend, argp, sizeof backend))
1492                        return -EFAULT;
1493                if (backend.reserved != 0)
1494                        return -EOPNOTSUPP;
1495
1496                return vhost_scsi_set_endpoint(vs, &backend);
1497        case VHOST_SCSI_CLEAR_ENDPOINT:
1498                if (copy_from_user(&backend, argp, sizeof backend))
1499                        return -EFAULT;
1500                if (backend.reserved != 0)
1501                        return -EOPNOTSUPP;
1502
1503                return vhost_scsi_clear_endpoint(vs, &backend);
1504        case VHOST_SCSI_GET_ABI_VERSION:
1505                if (copy_to_user(argp, &abi_version, sizeof abi_version))
1506                        return -EFAULT;
1507                return 0;
1508        case VHOST_SCSI_SET_EVENTS_MISSED:
1509                if (get_user(events_missed, eventsp))
1510                        return -EFAULT;
1511                mutex_lock(&vq->mutex);
1512                vs->vs_events_missed = events_missed;
1513                mutex_unlock(&vq->mutex);
1514                return 0;
1515        case VHOST_SCSI_GET_EVENTS_MISSED:
1516                mutex_lock(&vq->mutex);
1517                events_missed = vs->vs_events_missed;
1518                mutex_unlock(&vq->mutex);
1519                if (put_user(events_missed, eventsp))
1520                        return -EFAULT;
1521                return 0;
1522        case VHOST_GET_FEATURES:
1523                features = VHOST_SCSI_FEATURES;
1524                if (copy_to_user(featurep, &features, sizeof features))
1525                        return -EFAULT;
1526                return 0;
1527        case VHOST_SET_FEATURES:
1528                if (copy_from_user(&features, featurep, sizeof features))
1529                        return -EFAULT;
1530                return vhost_scsi_set_features(vs, features);
1531        default:
1532                mutex_lock(&vs->dev.mutex);
1533                r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1534                /* TODO: flush backend after dev ioctl. */
1535                if (r == -ENOIOCTLCMD)
1536                        r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1537                mutex_unlock(&vs->dev.mutex);
1538                return r;
1539        }
1540}
1541
1542#ifdef CONFIG_COMPAT
1543static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
1544                                unsigned long arg)
1545{
1546        return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1547}
1548#endif
1549
1550static const struct file_operations vhost_scsi_fops = {
1551        .owner          = THIS_MODULE,
1552        .release        = vhost_scsi_release,
1553        .unlocked_ioctl = vhost_scsi_ioctl,
1554#ifdef CONFIG_COMPAT
1555        .compat_ioctl   = vhost_scsi_compat_ioctl,
1556#endif
1557        .open           = vhost_scsi_open,
1558        .llseek         = noop_llseek,
1559};
1560
1561static struct miscdevice vhost_scsi_misc = {
1562        MISC_DYNAMIC_MINOR,
1563        "vhost-scsi",
1564        &vhost_scsi_fops,
1565};
1566
1567static int __init vhost_scsi_register(void)
1568{
1569        return misc_register(&vhost_scsi_misc);
1570}
1571
1572static void vhost_scsi_deregister(void)
1573{
1574        misc_deregister(&vhost_scsi_misc);
1575}
1576
1577static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
1578{
1579        switch (tport->tport_proto_id) {
1580        case SCSI_PROTOCOL_SAS:
1581                return "SAS";
1582        case SCSI_PROTOCOL_FCP:
1583                return "FCP";
1584        case SCSI_PROTOCOL_ISCSI:
1585                return "iSCSI";
1586        default:
1587                break;
1588        }
1589
1590        return "Unknown";
1591}
1592
1593static void
1594vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
1595                  struct se_lun *lun, bool plug)
1596{
1597
1598        struct vhost_scsi *vs = tpg->vhost_scsi;
1599        struct vhost_virtqueue *vq;
1600        u32 reason;
1601
1602        if (!vs)
1603                return;
1604
1605        mutex_lock(&vs->dev.mutex);
1606
1607        if (plug)
1608                reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1609        else
1610                reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1611
1612        vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1613        mutex_lock(&vq->mutex);
1614        if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
1615                vhost_scsi_send_evt(vs, tpg, lun,
1616                                   VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1617        mutex_unlock(&vq->mutex);
1618        mutex_unlock(&vs->dev.mutex);
1619}
1620
1621static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1622{
1623        vhost_scsi_do_plug(tpg, lun, true);
1624}
1625
1626static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1627{
1628        vhost_scsi_do_plug(tpg, lun, false);
1629}
1630
1631static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
1632                               struct se_lun *lun)
1633{
1634        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1635                                struct vhost_scsi_tpg, se_tpg);
1636
1637        mutex_lock(&vhost_scsi_mutex);
1638
1639        mutex_lock(&tpg->tv_tpg_mutex);
1640        tpg->tv_tpg_port_count++;
1641        mutex_unlock(&tpg->tv_tpg_mutex);
1642
1643        vhost_scsi_hotplug(tpg, lun);
1644
1645        mutex_unlock(&vhost_scsi_mutex);
1646
1647        return 0;
1648}
1649
1650static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
1651                                  struct se_lun *lun)
1652{
1653        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1654                                struct vhost_scsi_tpg, se_tpg);
1655
1656        mutex_lock(&vhost_scsi_mutex);
1657
1658        mutex_lock(&tpg->tv_tpg_mutex);
1659        tpg->tv_tpg_port_count--;
1660        mutex_unlock(&tpg->tv_tpg_mutex);
1661
1662        vhost_scsi_hotunplug(tpg, lun);
1663
1664        mutex_unlock(&vhost_scsi_mutex);
1665}
1666
1667static void vhost_scsi_free_cmd_map_res(struct vhost_scsi_nexus *nexus,
1668                                       struct se_session *se_sess)
1669{
1670        struct vhost_scsi_cmd *tv_cmd;
1671        unsigned int i;
1672
1673        if (!se_sess->sess_cmd_map)
1674                return;
1675
1676        for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1677                tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1678
1679                kfree(tv_cmd->tvc_sgl);
1680                kfree(tv_cmd->tvc_prot_sgl);
1681                kfree(tv_cmd->tvc_upages);
1682        }
1683}
1684
1685static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
1686                struct config_item *item, const char *page, size_t count)
1687{
1688        struct se_portal_group *se_tpg = attrib_to_tpg(item);
1689        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1690                                struct vhost_scsi_tpg, se_tpg);
1691        unsigned long val;
1692        int ret = kstrtoul(page, 0, &val);
1693
1694        if (ret) {
1695                pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
1696                return ret;
1697        }
1698        if (val != 0 && val != 1 && val != 3) {
1699                pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
1700                return -EINVAL;
1701        }
1702        tpg->tv_fabric_prot_type = val;
1703
1704        return count;
1705}
1706
1707static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show(
1708                struct config_item *item, char *page)
1709{
1710        struct se_portal_group *se_tpg = attrib_to_tpg(item);
1711        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1712                                struct vhost_scsi_tpg, se_tpg);
1713
1714        return sprintf(page, "%d\n", tpg->tv_fabric_prot_type);
1715}
1716
1717CONFIGFS_ATTR(vhost_scsi_tpg_attrib_, fabric_prot_type);
1718
1719static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
1720        &vhost_scsi_tpg_attrib_attr_fabric_prot_type,
1721        NULL,
1722};
1723
1724static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
1725                                const char *name)
1726{
1727        struct se_portal_group *se_tpg;
1728        struct se_session *se_sess;
1729        struct vhost_scsi_nexus *tv_nexus;
1730        struct vhost_scsi_cmd *tv_cmd;
1731        unsigned int i;
1732
1733        mutex_lock(&tpg->tv_tpg_mutex);
1734        if (tpg->tpg_nexus) {
1735                mutex_unlock(&tpg->tv_tpg_mutex);
1736                pr_debug("tpg->tpg_nexus already exists\n");
1737                return -EEXIST;
1738        }
1739        se_tpg = &tpg->se_tpg;
1740
1741        tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL);
1742        if (!tv_nexus) {
1743                mutex_unlock(&tpg->tv_tpg_mutex);
1744                pr_err("Unable to allocate struct vhost_scsi_nexus\n");
1745                return -ENOMEM;
1746        }
1747        /*
1748         *  Initialize the struct se_session pointer and setup tagpool
1749         *  for struct vhost_scsi_cmd descriptors
1750         */
1751        tv_nexus->tvn_se_sess = transport_init_session_tags(
1752                                        VHOST_SCSI_DEFAULT_TAGS,
1753                                        sizeof(struct vhost_scsi_cmd),
1754                                        TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
1755        if (IS_ERR(tv_nexus->tvn_se_sess)) {
1756                mutex_unlock(&tpg->tv_tpg_mutex);
1757                kfree(tv_nexus);
1758                return -ENOMEM;
1759        }
1760        se_sess = tv_nexus->tvn_se_sess;
1761        for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1762                tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1763
1764                tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
1765                                        VHOST_SCSI_PREALLOC_SGLS, GFP_KERNEL);
1766                if (!tv_cmd->tvc_sgl) {
1767                        mutex_unlock(&tpg->tv_tpg_mutex);
1768                        pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1769                        goto out;
1770                }
1771
1772                tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
1773                                        VHOST_SCSI_PREALLOC_UPAGES, GFP_KERNEL);
1774                if (!tv_cmd->tvc_upages) {
1775                        mutex_unlock(&tpg->tv_tpg_mutex);
1776                        pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1777                        goto out;
1778                }
1779
1780                tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) *
1781                                        VHOST_SCSI_PREALLOC_PROT_SGLS, GFP_KERNEL);
1782                if (!tv_cmd->tvc_prot_sgl) {
1783                        mutex_unlock(&tpg->tv_tpg_mutex);
1784                        pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
1785                        goto out;
1786                }
1787        }
1788        /*
1789         * Since we are running in 'demo mode' this call with generate a
1790         * struct se_node_acl for the vhost_scsi struct se_portal_group with
1791         * the SCSI Initiator port name of the passed configfs group 'name'.
1792         */
1793        tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
1794                                se_tpg, (unsigned char *)name);
1795        if (!tv_nexus->tvn_se_sess->se_node_acl) {
1796                mutex_unlock(&tpg->tv_tpg_mutex);
1797                pr_debug("core_tpg_check_initiator_node_acl() failed"
1798                                " for %s\n", name);
1799                goto out;
1800        }
1801        /*
1802         * Now register the TCM vhost virtual I_T Nexus as active.
1803         */
1804        transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1805                        tv_nexus->tvn_se_sess, tv_nexus);
1806        tpg->tpg_nexus = tv_nexus;
1807
1808        mutex_unlock(&tpg->tv_tpg_mutex);
1809        return 0;
1810
1811out:
1812        vhost_scsi_free_cmd_map_res(tv_nexus, se_sess);
1813        transport_free_session(se_sess);
1814        kfree(tv_nexus);
1815        return -ENOMEM;
1816}
1817
1818static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
1819{
1820        struct se_session *se_sess;
1821        struct vhost_scsi_nexus *tv_nexus;
1822
1823        mutex_lock(&tpg->tv_tpg_mutex);
1824        tv_nexus = tpg->tpg_nexus;
1825        if (!tv_nexus) {
1826                mutex_unlock(&tpg->tv_tpg_mutex);
1827                return -ENODEV;
1828        }
1829
1830        se_sess = tv_nexus->tvn_se_sess;
1831        if (!se_sess) {
1832                mutex_unlock(&tpg->tv_tpg_mutex);
1833                return -ENODEV;
1834        }
1835
1836        if (tpg->tv_tpg_port_count != 0) {
1837                mutex_unlock(&tpg->tv_tpg_mutex);
1838                pr_err("Unable to remove TCM_vhost I_T Nexus with"
1839                        " active TPG port count: %d\n",
1840                        tpg->tv_tpg_port_count);
1841                return -EBUSY;
1842        }
1843
1844        if (tpg->tv_tpg_vhost_count != 0) {
1845                mutex_unlock(&tpg->tv_tpg_mutex);
1846                pr_err("Unable to remove TCM_vhost I_T Nexus with"
1847                        " active TPG vhost count: %d\n",
1848                        tpg->tv_tpg_vhost_count);
1849                return -EBUSY;
1850        }
1851
1852        pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
1853                " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
1854                tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1855
1856        vhost_scsi_free_cmd_map_res(tv_nexus, se_sess);
1857        /*
1858         * Release the SCSI I_T Nexus to the emulated vhost Target Port
1859         */
1860        transport_deregister_session(tv_nexus->tvn_se_sess);
1861        tpg->tpg_nexus = NULL;
1862        mutex_unlock(&tpg->tv_tpg_mutex);
1863
1864        kfree(tv_nexus);
1865        return 0;
1866}
1867
1868static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page)
1869{
1870        struct se_portal_group *se_tpg = to_tpg(item);
1871        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1872                                struct vhost_scsi_tpg, se_tpg);
1873        struct vhost_scsi_nexus *tv_nexus;
1874        ssize_t ret;
1875
1876        mutex_lock(&tpg->tv_tpg_mutex);
1877        tv_nexus = tpg->tpg_nexus;
1878        if (!tv_nexus) {
1879                mutex_unlock(&tpg->tv_tpg_mutex);
1880                return -ENODEV;
1881        }
1882        ret = snprintf(page, PAGE_SIZE, "%s\n",
1883                        tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1884        mutex_unlock(&tpg->tv_tpg_mutex);
1885
1886        return ret;
1887}
1888
1889static ssize_t vhost_scsi_tpg_nexus_store(struct config_item *item,
1890                const char *page, size_t count)
1891{
1892        struct se_portal_group *se_tpg = to_tpg(item);
1893        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1894                                struct vhost_scsi_tpg, se_tpg);
1895        struct vhost_scsi_tport *tport_wwn = tpg->tport;
1896        unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
1897        int ret;
1898        /*
1899         * Shutdown the active I_T nexus if 'NULL' is passed..
1900         */
1901        if (!strncmp(page, "NULL", 4)) {
1902                ret = vhost_scsi_drop_nexus(tpg);
1903                return (!ret) ? count : ret;
1904        }
1905        /*
1906         * Otherwise make sure the passed virtual Initiator port WWN matches
1907         * the fabric protocol_id set in vhost_scsi_make_tport(), and call
1908         * vhost_scsi_make_nexus().
1909         */
1910        if (strlen(page) >= VHOST_SCSI_NAMELEN) {
1911                pr_err("Emulated NAA Sas Address: %s, exceeds"
1912                                " max: %d\n", page, VHOST_SCSI_NAMELEN);
1913                return -EINVAL;
1914        }
1915        snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
1916
1917        ptr = strstr(i_port, "naa.");
1918        if (ptr) {
1919                if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
1920                        pr_err("Passed SAS Initiator Port %s does not"
1921                                " match target port protoid: %s\n", i_port,
1922                                vhost_scsi_dump_proto_id(tport_wwn));
1923                        return -EINVAL;
1924                }
1925                port_ptr = &i_port[0];
1926                goto check_newline;
1927        }
1928        ptr = strstr(i_port, "fc.");
1929        if (ptr) {
1930                if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
1931                        pr_err("Passed FCP Initiator Port %s does not"
1932                                " match target port protoid: %s\n", i_port,
1933                                vhost_scsi_dump_proto_id(tport_wwn));
1934                        return -EINVAL;
1935                }
1936                port_ptr = &i_port[3]; /* Skip over "fc." */
1937                goto check_newline;
1938        }
1939        ptr = strstr(i_port, "iqn.");
1940        if (ptr) {
1941                if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
1942                        pr_err("Passed iSCSI Initiator Port %s does not"
1943                                " match target port protoid: %s\n", i_port,
1944                                vhost_scsi_dump_proto_id(tport_wwn));
1945                        return -EINVAL;
1946                }
1947                port_ptr = &i_port[0];
1948                goto check_newline;
1949        }
1950        pr_err("Unable to locate prefix for emulated Initiator Port:"
1951                        " %s\n", i_port);
1952        return -EINVAL;
1953        /*
1954         * Clear any trailing newline for the NAA WWN
1955         */
1956check_newline:
1957        if (i_port[strlen(i_port)-1] == '\n')
1958                i_port[strlen(i_port)-1] = '\0';
1959
1960        ret = vhost_scsi_make_nexus(tpg, port_ptr);
1961        if (ret < 0)
1962                return ret;
1963
1964        return count;
1965}
1966
1967CONFIGFS_ATTR(vhost_scsi_tpg_, nexus);
1968
1969static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
1970        &vhost_scsi_tpg_attr_nexus,
1971        NULL,
1972};
1973
1974static struct se_portal_group *
1975vhost_scsi_make_tpg(struct se_wwn *wwn,
1976                   struct config_group *group,
1977                   const char *name)
1978{
1979        struct vhost_scsi_tport *tport = container_of(wwn,
1980                        struct vhost_scsi_tport, tport_wwn);
1981
1982        struct vhost_scsi_tpg *tpg;
1983        u16 tpgt;
1984        int ret;
1985
1986        if (strstr(name, "tpgt_") != name)
1987                return ERR_PTR(-EINVAL);
1988        if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
1989                return ERR_PTR(-EINVAL);
1990
1991        tpg = kzalloc(sizeof(struct vhost_scsi_tpg), GFP_KERNEL);
1992        if (!tpg) {
1993                pr_err("Unable to allocate struct vhost_scsi_tpg");
1994                return ERR_PTR(-ENOMEM);
1995        }
1996        mutex_init(&tpg->tv_tpg_mutex);
1997        INIT_LIST_HEAD(&tpg->tv_tpg_list);
1998        tpg->tport = tport;
1999        tpg->tport_tpgt = tpgt;
2000
2001        ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
2002        if (ret < 0) {
2003                kfree(tpg);
2004                return NULL;
2005        }
2006        mutex_lock(&vhost_scsi_mutex);
2007        list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
2008        mutex_unlock(&vhost_scsi_mutex);
2009
2010        return &tpg->se_tpg;
2011}
2012
2013static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
2014{
2015        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2016                                struct vhost_scsi_tpg, se_tpg);
2017
2018        mutex_lock(&vhost_scsi_mutex);
2019        list_del(&tpg->tv_tpg_list);
2020        mutex_unlock(&vhost_scsi_mutex);
2021        /*
2022         * Release the virtual I_T Nexus for this vhost TPG
2023         */
2024        vhost_scsi_drop_nexus(tpg);
2025        /*
2026         * Deregister the se_tpg from TCM..
2027         */
2028        core_tpg_deregister(se_tpg);
2029        kfree(tpg);
2030}
2031
2032static struct se_wwn *
2033vhost_scsi_make_tport(struct target_fabric_configfs *tf,
2034                     struct config_group *group,
2035                     const char *name)
2036{
2037        struct vhost_scsi_tport *tport;
2038        char *ptr;
2039        u64 wwpn = 0;
2040        int off = 0;
2041
2042        /* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
2043                return ERR_PTR(-EINVAL); */
2044
2045        tport = kzalloc(sizeof(struct vhost_scsi_tport), GFP_KERNEL);
2046        if (!tport) {
2047                pr_err("Unable to allocate struct vhost_scsi_tport");
2048                return ERR_PTR(-ENOMEM);
2049        }
2050        tport->tport_wwpn = wwpn;
2051        /*
2052         * Determine the emulated Protocol Identifier and Target Port Name
2053         * based on the incoming configfs directory name.
2054         */
2055        ptr = strstr(name, "naa.");
2056        if (ptr) {
2057                tport->tport_proto_id = SCSI_PROTOCOL_SAS;
2058                goto check_len;
2059        }
2060        ptr = strstr(name, "fc.");
2061        if (ptr) {
2062                tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2063                off = 3; /* Skip over "fc." */
2064                goto check_len;
2065        }
2066        ptr = strstr(name, "iqn.");
2067        if (ptr) {
2068                tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2069                goto check_len;
2070        }
2071
2072        pr_err("Unable to locate prefix for emulated Target Port:"
2073                        " %s\n", name);
2074        kfree(tport);
2075        return ERR_PTR(-EINVAL);
2076
2077check_len:
2078        if (strlen(name) >= VHOST_SCSI_NAMELEN) {
2079                pr_err("Emulated %s Address: %s, exceeds"
2080                        " max: %d\n", name, vhost_scsi_dump_proto_id(tport),
2081                        VHOST_SCSI_NAMELEN);
2082                kfree(tport);
2083                return ERR_PTR(-EINVAL);
2084        }
2085        snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
2086
2087        pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2088                " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
2089
2090        return &tport->tport_wwn;
2091}
2092
2093static void vhost_scsi_drop_tport(struct se_wwn *wwn)
2094{
2095        struct vhost_scsi_tport *tport = container_of(wwn,
2096                                struct vhost_scsi_tport, tport_wwn);
2097
2098        pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2099                " %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
2100                tport->tport_name);
2101
2102        kfree(tport);
2103}
2104
2105static ssize_t
2106vhost_scsi_wwn_version_show(struct config_item *item, char *page)
2107{
2108        return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
2109                "on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2110                utsname()->machine);
2111}
2112
2113CONFIGFS_ATTR_RO(vhost_scsi_wwn_, version);
2114
2115static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2116        &vhost_scsi_wwn_attr_version,
2117        NULL,
2118};
2119
2120static struct target_core_fabric_ops vhost_scsi_ops = {
2121        .module                         = THIS_MODULE,
2122        .name                           = "vhost",
2123        .get_fabric_name                = vhost_scsi_get_fabric_name,
2124        .tpg_get_wwn                    = vhost_scsi_get_fabric_wwn,
2125        .tpg_get_tag                    = vhost_scsi_get_tpgt,
2126        .tpg_check_demo_mode            = vhost_scsi_check_true,
2127        .tpg_check_demo_mode_cache      = vhost_scsi_check_true,
2128        .tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
2129        .tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
2130        .tpg_check_prot_fabric_only     = vhost_scsi_check_prot_fabric_only,
2131        .tpg_get_inst_index             = vhost_scsi_tpg_get_inst_index,
2132        .release_cmd                    = vhost_scsi_release_cmd,
2133        .check_stop_free                = vhost_scsi_check_stop_free,
2134        .shutdown_session               = vhost_scsi_shutdown_session,
2135        .close_session                  = vhost_scsi_close_session,
2136        .sess_get_index                 = vhost_scsi_sess_get_index,
2137        .sess_get_initiator_sid         = NULL,
2138        .write_pending                  = vhost_scsi_write_pending,
2139        .write_pending_status           = vhost_scsi_write_pending_status,
2140        .set_default_node_attributes    = vhost_scsi_set_default_node_attrs,
2141        .get_cmd_state                  = vhost_scsi_get_cmd_state,
2142        .queue_data_in                  = vhost_scsi_queue_data_in,
2143        .queue_status                   = vhost_scsi_queue_status,
2144        .queue_tm_rsp                   = vhost_scsi_queue_tm_rsp,
2145        .aborted_task                   = vhost_scsi_aborted_task,
2146        /*
2147         * Setup callers for generic logic in target_core_fabric_configfs.c
2148         */
2149        .fabric_make_wwn                = vhost_scsi_make_tport,
2150        .fabric_drop_wwn                = vhost_scsi_drop_tport,
2151        .fabric_make_tpg                = vhost_scsi_make_tpg,
2152        .fabric_drop_tpg                = vhost_scsi_drop_tpg,
2153        .fabric_post_link               = vhost_scsi_port_link,
2154        .fabric_pre_unlink              = vhost_scsi_port_unlink,
2155
2156        .tfc_wwn_attrs                  = vhost_scsi_wwn_attrs,
2157        .tfc_tpg_base_attrs             = vhost_scsi_tpg_attrs,
2158        .tfc_tpg_attrib_attrs           = vhost_scsi_tpg_attrib_attrs,
2159};
2160
2161static int __init vhost_scsi_init(void)
2162{
2163        int ret = -ENOMEM;
2164
2165        pr_debug("TCM_VHOST fabric module %s on %s/%s"
2166                " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2167                utsname()->machine);
2168
2169        /*
2170         * Use our own dedicated workqueue for submitting I/O into
2171         * target core to avoid contention within system_wq.
2172         */
2173        vhost_scsi_workqueue = alloc_workqueue("vhost_scsi", 0, 0);
2174        if (!vhost_scsi_workqueue)
2175                goto out;
2176
2177        ret = vhost_scsi_register();
2178        if (ret < 0)
2179                goto out_destroy_workqueue;
2180
2181        ret = target_register_template(&vhost_scsi_ops);
2182        if (ret < 0)
2183                goto out_vhost_scsi_deregister;
2184
2185        return 0;
2186
2187out_vhost_scsi_deregister:
2188        vhost_scsi_deregister();
2189out_destroy_workqueue:
2190        destroy_workqueue(vhost_scsi_workqueue);
2191out:
2192        return ret;
2193};
2194
2195static void vhost_scsi_exit(void)
2196{
2197        target_unregister_template(&vhost_scsi_ops);
2198        vhost_scsi_deregister();
2199        destroy_workqueue(vhost_scsi_workqueue);
2200};
2201
2202MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2203MODULE_ALIAS("tcm_vhost");
2204MODULE_LICENSE("GPL");
2205module_init(vhost_scsi_init);
2206module_exit(vhost_scsi_exit);
2207