linux/drivers/vhost/scsi.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*******************************************************************************
   3 * Vhost kernel TCM fabric driver for virtio SCSI initiators
   4 *
   5 * (C) Copyright 2010-2013 Datera, Inc.
   6 * (C) Copyright 2010-2012 IBM Corp.
   7 *
   8 * Authors: Nicholas A. Bellinger <nab@daterainc.com>
   9 *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
  10 ****************************************************************************/
  11
  12#include <linux/module.h>
  13#include <linux/moduleparam.h>
  14#include <generated/utsrelease.h>
  15#include <linux/utsname.h>
  16#include <linux/init.h>
  17#include <linux/slab.h>
  18#include <linux/kthread.h>
  19#include <linux/types.h>
  20#include <linux/string.h>
  21#include <linux/configfs.h>
  22#include <linux/ctype.h>
  23#include <linux/compat.h>
  24#include <linux/eventfd.h>
  25#include <linux/fs.h>
  26#include <linux/vmalloc.h>
  27#include <linux/miscdevice.h>
  28#include <asm/unaligned.h>
  29#include <scsi/scsi_common.h>
  30#include <scsi/scsi_proto.h>
  31#include <target/target_core_base.h>
  32#include <target/target_core_fabric.h>
  33#include <linux/vhost.h>
  34#include <linux/virtio_scsi.h>
  35#include <linux/llist.h>
  36#include <linux/bitmap.h>
  37
  38#include "vhost.h"
  39
  40#define VHOST_SCSI_VERSION  "v0.1"
  41#define VHOST_SCSI_NAMELEN 256
  42#define VHOST_SCSI_MAX_CDB_SIZE 32
  43#define VHOST_SCSI_PREALLOC_SGLS 2048
  44#define VHOST_SCSI_PREALLOC_UPAGES 2048
  45#define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
  46
  47/* Max number of requests before requeueing the job.
  48 * Using this limit prevents one virtqueue from starving others with
  49 * request.
  50 */
  51#define VHOST_SCSI_WEIGHT 256
  52
  53struct vhost_scsi_inflight {
  54        /* Wait for the flush operation to finish */
  55        struct completion comp;
  56        /* Refcount for the inflight reqs */
  57        struct kref kref;
  58};
  59
  60struct vhost_scsi_cmd {
  61        /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
  62        int tvc_vq_desc;
  63        /* virtio-scsi initiator task attribute */
  64        int tvc_task_attr;
  65        /* virtio-scsi response incoming iovecs */
  66        int tvc_in_iovs;
  67        /* virtio-scsi initiator data direction */
  68        enum dma_data_direction tvc_data_direction;
  69        /* Expected data transfer length from virtio-scsi header */
  70        u32 tvc_exp_data_len;
  71        /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
  72        u64 tvc_tag;
  73        /* The number of scatterlists associated with this cmd */
  74        u32 tvc_sgl_count;
  75        u32 tvc_prot_sgl_count;
  76        /* Saved unpacked SCSI LUN for vhost_scsi_target_queue_cmd() */
  77        u32 tvc_lun;
  78        /* Pointer to the SGL formatted memory from virtio-scsi */
  79        struct scatterlist *tvc_sgl;
  80        struct scatterlist *tvc_prot_sgl;
  81        struct page **tvc_upages;
  82        /* Pointer to response header iovec */
  83        struct iovec tvc_resp_iov;
  84        /* Pointer to vhost_scsi for our device */
  85        struct vhost_scsi *tvc_vhost;
  86        /* Pointer to vhost_virtqueue for the cmd */
  87        struct vhost_virtqueue *tvc_vq;
  88        /* Pointer to vhost nexus memory */
  89        struct vhost_scsi_nexus *tvc_nexus;
  90        /* The TCM I/O descriptor that is accessed via container_of() */
  91        struct se_cmd tvc_se_cmd;
  92        /* Copy of the incoming SCSI command descriptor block (CDB) */
  93        unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
  94        /* Sense buffer that will be mapped into outgoing status */
  95        unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
  96        /* Completed commands list, serviced from vhost worker thread */
  97        struct llist_node tvc_completion_list;
  98        /* Used to track inflight cmd */
  99        struct vhost_scsi_inflight *inflight;
 100};
 101
 102struct vhost_scsi_nexus {
 103        /* Pointer to TCM session for I_T Nexus */
 104        struct se_session *tvn_se_sess;
 105};
 106
 107struct vhost_scsi_tpg {
 108        /* Vhost port target portal group tag for TCM */
 109        u16 tport_tpgt;
 110        /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
 111        int tv_tpg_port_count;
 112        /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
 113        int tv_tpg_vhost_count;
 114        /* Used for enabling T10-PI with legacy devices */
 115        int tv_fabric_prot_type;
 116        /* list for vhost_scsi_list */
 117        struct list_head tv_tpg_list;
 118        /* Used to protect access for tpg_nexus */
 119        struct mutex tv_tpg_mutex;
 120        /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
 121        struct vhost_scsi_nexus *tpg_nexus;
 122        /* Pointer back to vhost_scsi_tport */
 123        struct vhost_scsi_tport *tport;
 124        /* Returned by vhost_scsi_make_tpg() */
 125        struct se_portal_group se_tpg;
 126        /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
 127        struct vhost_scsi *vhost_scsi;
 128        struct list_head tmf_queue;
 129};
 130
 131struct vhost_scsi_tport {
 132        /* SCSI protocol the tport is providing */
 133        u8 tport_proto_id;
 134        /* Binary World Wide unique Port Name for Vhost Target port */
 135        u64 tport_wwpn;
 136        /* ASCII formatted WWPN for Vhost Target port */
 137        char tport_name[VHOST_SCSI_NAMELEN];
 138        /* Returned by vhost_scsi_make_tport() */
 139        struct se_wwn tport_wwn;
 140};
 141
 142struct vhost_scsi_evt {
 143        /* event to be sent to guest */
 144        struct virtio_scsi_event event;
 145        /* event list, serviced from vhost worker thread */
 146        struct llist_node list;
 147};
 148
 149enum {
 150        VHOST_SCSI_VQ_CTL = 0,
 151        VHOST_SCSI_VQ_EVT = 1,
 152        VHOST_SCSI_VQ_IO = 2,
 153};
 154
 155/* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
 156enum {
 157        VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
 158                                               (1ULL << VIRTIO_SCSI_F_T10_PI)
 159};
 160
 161#define VHOST_SCSI_MAX_TARGET   256
 162#define VHOST_SCSI_MAX_VQ       128
 163#define VHOST_SCSI_MAX_EVENT    128
 164
 165struct vhost_scsi_virtqueue {
 166        struct vhost_virtqueue vq;
 167        /*
 168         * Reference counting for inflight reqs, used for flush operation. At
 169         * each time, one reference tracks new commands submitted, while we
 170         * wait for another one to reach 0.
 171         */
 172        struct vhost_scsi_inflight inflights[2];
 173        /*
 174         * Indicate current inflight in use, protected by vq->mutex.
 175         * Writers must also take dev mutex and flush under it.
 176         */
 177        int inflight_idx;
 178        struct vhost_scsi_cmd *scsi_cmds;
 179        struct sbitmap scsi_tags;
 180        int max_cmds;
 181};
 182
 183struct vhost_scsi {
 184        /* Protected by vhost_scsi->dev.mutex */
 185        struct vhost_scsi_tpg **vs_tpg;
 186        char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
 187
 188        struct vhost_dev dev;
 189        struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
 190
 191        struct vhost_work vs_completion_work; /* cmd completion work item */
 192        struct llist_head vs_completion_list; /* cmd completion queue */
 193
 194        struct vhost_work vs_event_work; /* evt injection work item */
 195        struct llist_head vs_event_list; /* evt injection queue */
 196
 197        bool vs_events_missed; /* any missed events, protected by vq->mutex */
 198        int vs_events_nr; /* num of pending events, protected by vq->mutex */
 199};
 200
 201struct vhost_scsi_tmf {
 202        struct vhost_work vwork;
 203        struct vhost_scsi_tpg *tpg;
 204        struct vhost_scsi *vhost;
 205        struct vhost_scsi_virtqueue *svq;
 206        struct list_head queue_entry;
 207
 208        struct se_cmd se_cmd;
 209        u8 scsi_resp;
 210        struct vhost_scsi_inflight *inflight;
 211        struct iovec resp_iov;
 212        int in_iovs;
 213        int vq_desc;
 214};
 215
 216/*
 217 * Context for processing request and control queue operations.
 218 */
 219struct vhost_scsi_ctx {
 220        int head;
 221        unsigned int out, in;
 222        size_t req_size, rsp_size;
 223        size_t out_size, in_size;
 224        u8 *target, *lunp;
 225        void *req;
 226        struct iov_iter out_iter;
 227};
 228
 229/* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
 230static DEFINE_MUTEX(vhost_scsi_mutex);
 231static LIST_HEAD(vhost_scsi_list);
 232
 233static void vhost_scsi_done_inflight(struct kref *kref)
 234{
 235        struct vhost_scsi_inflight *inflight;
 236
 237        inflight = container_of(kref, struct vhost_scsi_inflight, kref);
 238        complete(&inflight->comp);
 239}
 240
 241static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
 242                                    struct vhost_scsi_inflight *old_inflight[])
 243{
 244        struct vhost_scsi_inflight *new_inflight;
 245        struct vhost_virtqueue *vq;
 246        int idx, i;
 247
 248        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
 249                vq = &vs->vqs[i].vq;
 250
 251                mutex_lock(&vq->mutex);
 252
 253                /* store old infight */
 254                idx = vs->vqs[i].inflight_idx;
 255                if (old_inflight)
 256                        old_inflight[i] = &vs->vqs[i].inflights[idx];
 257
 258                /* setup new infight */
 259                vs->vqs[i].inflight_idx = idx ^ 1;
 260                new_inflight = &vs->vqs[i].inflights[idx ^ 1];
 261                kref_init(&new_inflight->kref);
 262                init_completion(&new_inflight->comp);
 263
 264                mutex_unlock(&vq->mutex);
 265        }
 266}
 267
 268static struct vhost_scsi_inflight *
 269vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
 270{
 271        struct vhost_scsi_inflight *inflight;
 272        struct vhost_scsi_virtqueue *svq;
 273
 274        svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
 275        inflight = &svq->inflights[svq->inflight_idx];
 276        kref_get(&inflight->kref);
 277
 278        return inflight;
 279}
 280
 281static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
 282{
 283        kref_put(&inflight->kref, vhost_scsi_done_inflight);
 284}
 285
 286static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
 287{
 288        return 1;
 289}
 290
 291static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
 292{
 293        return 0;
 294}
 295
 296static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
 297{
 298        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
 299                                struct vhost_scsi_tpg, se_tpg);
 300        struct vhost_scsi_tport *tport = tpg->tport;
 301
 302        return &tport->tport_name[0];
 303}
 304
 305static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
 306{
 307        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
 308                                struct vhost_scsi_tpg, se_tpg);
 309        return tpg->tport_tpgt;
 310}
 311
 312static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
 313{
 314        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
 315                                struct vhost_scsi_tpg, se_tpg);
 316
 317        return tpg->tv_fabric_prot_type;
 318}
 319
 320static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
 321{
 322        return 1;
 323}
 324
 325static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
 326{
 327        struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
 328                                struct vhost_scsi_cmd, tvc_se_cmd);
 329        struct vhost_scsi_virtqueue *svq = container_of(tv_cmd->tvc_vq,
 330                                struct vhost_scsi_virtqueue, vq);
 331        struct vhost_scsi_inflight *inflight = tv_cmd->inflight;
 332        int i;
 333
 334        if (tv_cmd->tvc_sgl_count) {
 335                for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
 336                        put_page(sg_page(&tv_cmd->tvc_sgl[i]));
 337        }
 338        if (tv_cmd->tvc_prot_sgl_count) {
 339                for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
 340                        put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
 341        }
 342
 343        sbitmap_clear_bit(&svq->scsi_tags, se_cmd->map_tag);
 344        vhost_scsi_put_inflight(inflight);
 345}
 346
 347static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf)
 348{
 349        struct vhost_scsi_tpg *tpg = tmf->tpg;
 350        struct vhost_scsi_inflight *inflight = tmf->inflight;
 351
 352        mutex_lock(&tpg->tv_tpg_mutex);
 353        list_add_tail(&tpg->tmf_queue, &tmf->queue_entry);
 354        mutex_unlock(&tpg->tv_tpg_mutex);
 355        vhost_scsi_put_inflight(inflight);
 356}
 357
 358static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
 359{
 360        if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
 361                struct vhost_scsi_tmf *tmf = container_of(se_cmd,
 362                                        struct vhost_scsi_tmf, se_cmd);
 363
 364                vhost_work_queue(&tmf->vhost->dev, &tmf->vwork);
 365        } else {
 366                struct vhost_scsi_cmd *cmd = container_of(se_cmd,
 367                                        struct vhost_scsi_cmd, tvc_se_cmd);
 368                struct vhost_scsi *vs = cmd->tvc_vhost;
 369
 370                llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
 371                vhost_work_queue(&vs->dev, &vs->vs_completion_work);
 372        }
 373}
 374
 375static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
 376{
 377        return 0;
 378}
 379
 380static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
 381{
 382        /* Go ahead and process the write immediately */
 383        target_execute_cmd(se_cmd);
 384        return 0;
 385}
 386
 387static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
 388{
 389        return;
 390}
 391
 392static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
 393{
 394        return 0;
 395}
 396
 397static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
 398{
 399        transport_generic_free_cmd(se_cmd, 0);
 400        return 0;
 401}
 402
 403static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
 404{
 405        transport_generic_free_cmd(se_cmd, 0);
 406        return 0;
 407}
 408
 409static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
 410{
 411        struct vhost_scsi_tmf *tmf = container_of(se_cmd, struct vhost_scsi_tmf,
 412                                                  se_cmd);
 413
 414        tmf->scsi_resp = se_cmd->se_tmr_req->response;
 415        transport_generic_free_cmd(&tmf->se_cmd, 0);
 416}
 417
 418static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
 419{
 420        return;
 421}
 422
 423static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
 424{
 425        vs->vs_events_nr--;
 426        kfree(evt);
 427}
 428
 429static struct vhost_scsi_evt *
 430vhost_scsi_allocate_evt(struct vhost_scsi *vs,
 431                       u32 event, u32 reason)
 432{
 433        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
 434        struct vhost_scsi_evt *evt;
 435
 436        if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
 437                vs->vs_events_missed = true;
 438                return NULL;
 439        }
 440
 441        evt = kzalloc(sizeof(*evt), GFP_KERNEL);
 442        if (!evt) {
 443                vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
 444                vs->vs_events_missed = true;
 445                return NULL;
 446        }
 447
 448        evt->event.event = cpu_to_vhost32(vq, event);
 449        evt->event.reason = cpu_to_vhost32(vq, reason);
 450        vs->vs_events_nr++;
 451
 452        return evt;
 453}
 454
 455static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
 456{
 457        return target_put_sess_cmd(se_cmd);
 458}
 459
 460static void
 461vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
 462{
 463        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
 464        struct virtio_scsi_event *event = &evt->event;
 465        struct virtio_scsi_event __user *eventp;
 466        unsigned out, in;
 467        int head, ret;
 468
 469        if (!vhost_vq_get_backend(vq)) {
 470                vs->vs_events_missed = true;
 471                return;
 472        }
 473
 474again:
 475        vhost_disable_notify(&vs->dev, vq);
 476        head = vhost_get_vq_desc(vq, vq->iov,
 477                        ARRAY_SIZE(vq->iov), &out, &in,
 478                        NULL, NULL);
 479        if (head < 0) {
 480                vs->vs_events_missed = true;
 481                return;
 482        }
 483        if (head == vq->num) {
 484                if (vhost_enable_notify(&vs->dev, vq))
 485                        goto again;
 486                vs->vs_events_missed = true;
 487                return;
 488        }
 489
 490        if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
 491                vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
 492                                vq->iov[out].iov_len);
 493                vs->vs_events_missed = true;
 494                return;
 495        }
 496
 497        if (vs->vs_events_missed) {
 498                event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
 499                vs->vs_events_missed = false;
 500        }
 501
 502        eventp = vq->iov[out].iov_base;
 503        ret = __copy_to_user(eventp, event, sizeof(*event));
 504        if (!ret)
 505                vhost_add_used_and_signal(&vs->dev, vq, head, 0);
 506        else
 507                vq_err(vq, "Faulted on vhost_scsi_send_event\n");
 508}
 509
 510static void vhost_scsi_evt_work(struct vhost_work *work)
 511{
 512        struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
 513                                        vs_event_work);
 514        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
 515        struct vhost_scsi_evt *evt, *t;
 516        struct llist_node *llnode;
 517
 518        mutex_lock(&vq->mutex);
 519        llnode = llist_del_all(&vs->vs_event_list);
 520        llist_for_each_entry_safe(evt, t, llnode, list) {
 521                vhost_scsi_do_evt_work(vs, evt);
 522                vhost_scsi_free_evt(vs, evt);
 523        }
 524        mutex_unlock(&vq->mutex);
 525}
 526
 527/* Fill in status and signal that we are done processing this command
 528 *
 529 * This is scheduled in the vhost work queue so we are called with the owner
 530 * process mm and can access the vring.
 531 */
 532static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
 533{
 534        struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
 535                                        vs_completion_work);
 536        DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
 537        struct virtio_scsi_cmd_resp v_rsp;
 538        struct vhost_scsi_cmd *cmd, *t;
 539        struct llist_node *llnode;
 540        struct se_cmd *se_cmd;
 541        struct iov_iter iov_iter;
 542        int ret, vq;
 543
 544        bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
 545        llnode = llist_del_all(&vs->vs_completion_list);
 546        llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
 547                se_cmd = &cmd->tvc_se_cmd;
 548
 549                pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
 550                        cmd, se_cmd->residual_count, se_cmd->scsi_status);
 551
 552                memset(&v_rsp, 0, sizeof(v_rsp));
 553                v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count);
 554                /* TODO is status_qualifier field needed? */
 555                v_rsp.status = se_cmd->scsi_status;
 556                v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
 557                                                 se_cmd->scsi_sense_length);
 558                memcpy(v_rsp.sense, cmd->tvc_sense_buf,
 559                       se_cmd->scsi_sense_length);
 560
 561                iov_iter_init(&iov_iter, READ, &cmd->tvc_resp_iov,
 562                              cmd->tvc_in_iovs, sizeof(v_rsp));
 563                ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
 564                if (likely(ret == sizeof(v_rsp))) {
 565                        struct vhost_scsi_virtqueue *q;
 566                        vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
 567                        q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
 568                        vq = q - vs->vqs;
 569                        __set_bit(vq, signal);
 570                } else
 571                        pr_err("Faulted on virtio_scsi_cmd_resp\n");
 572
 573                vhost_scsi_release_cmd_res(se_cmd);
 574        }
 575
 576        vq = -1;
 577        while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
 578                < VHOST_SCSI_MAX_VQ)
 579                vhost_signal(&vs->dev, &vs->vqs[vq].vq);
 580}
 581
 582static struct vhost_scsi_cmd *
 583vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
 584                   unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
 585                   u32 exp_data_len, int data_direction)
 586{
 587        struct vhost_scsi_virtqueue *svq = container_of(vq,
 588                                        struct vhost_scsi_virtqueue, vq);
 589        struct vhost_scsi_cmd *cmd;
 590        struct vhost_scsi_nexus *tv_nexus;
 591        struct scatterlist *sg, *prot_sg;
 592        struct page **pages;
 593        int tag;
 594
 595        tv_nexus = tpg->tpg_nexus;
 596        if (!tv_nexus) {
 597                pr_err("Unable to locate active struct vhost_scsi_nexus\n");
 598                return ERR_PTR(-EIO);
 599        }
 600
 601        tag = sbitmap_get(&svq->scsi_tags);
 602        if (tag < 0) {
 603                pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
 604                return ERR_PTR(-ENOMEM);
 605        }
 606
 607        cmd = &svq->scsi_cmds[tag];
 608        sg = cmd->tvc_sgl;
 609        prot_sg = cmd->tvc_prot_sgl;
 610        pages = cmd->tvc_upages;
 611        memset(cmd, 0, sizeof(*cmd));
 612        cmd->tvc_sgl = sg;
 613        cmd->tvc_prot_sgl = prot_sg;
 614        cmd->tvc_upages = pages;
 615        cmd->tvc_se_cmd.map_tag = tag;
 616        cmd->tvc_tag = scsi_tag;
 617        cmd->tvc_lun = lun;
 618        cmd->tvc_task_attr = task_attr;
 619        cmd->tvc_exp_data_len = exp_data_len;
 620        cmd->tvc_data_direction = data_direction;
 621        cmd->tvc_nexus = tv_nexus;
 622        cmd->inflight = vhost_scsi_get_inflight(vq);
 623
 624        memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
 625
 626        return cmd;
 627}
 628
 629/*
 630 * Map a user memory range into a scatterlist
 631 *
 632 * Returns the number of scatterlist entries used or -errno on error.
 633 */
 634static int
 635vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
 636                      struct iov_iter *iter,
 637                      struct scatterlist *sgl,
 638                      bool write)
 639{
 640        struct page **pages = cmd->tvc_upages;
 641        struct scatterlist *sg = sgl;
 642        ssize_t bytes;
 643        size_t offset;
 644        unsigned int npages = 0;
 645
 646        bytes = iov_iter_get_pages(iter, pages, LONG_MAX,
 647                                VHOST_SCSI_PREALLOC_UPAGES, &offset);
 648        /* No pages were pinned */
 649        if (bytes <= 0)
 650                return bytes < 0 ? bytes : -EFAULT;
 651
 652        iov_iter_advance(iter, bytes);
 653
 654        while (bytes) {
 655                unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes);
 656                sg_set_page(sg++, pages[npages++], n, offset);
 657                bytes -= n;
 658                offset = 0;
 659        }
 660        return npages;
 661}
 662
 663static int
 664vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
 665{
 666        int sgl_count = 0;
 667
 668        if (!iter || !iter->iov) {
 669                pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
 670                       " present\n", __func__, bytes);
 671                return -EINVAL;
 672        }
 673
 674        sgl_count = iov_iter_npages(iter, 0xffff);
 675        if (sgl_count > max_sgls) {
 676                pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
 677                       " max_sgls: %d\n", __func__, sgl_count, max_sgls);
 678                return -EINVAL;
 679        }
 680        return sgl_count;
 681}
 682
 683static int
 684vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
 685                      struct iov_iter *iter,
 686                      struct scatterlist *sg, int sg_count)
 687{
 688        struct scatterlist *p = sg;
 689        int ret;
 690
 691        while (iov_iter_count(iter)) {
 692                ret = vhost_scsi_map_to_sgl(cmd, iter, sg, write);
 693                if (ret < 0) {
 694                        while (p < sg) {
 695                                struct page *page = sg_page(p++);
 696                                if (page)
 697                                        put_page(page);
 698                        }
 699                        return ret;
 700                }
 701                sg += ret;
 702        }
 703        return 0;
 704}
 705
 706static int
 707vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
 708                 size_t prot_bytes, struct iov_iter *prot_iter,
 709                 size_t data_bytes, struct iov_iter *data_iter)
 710{
 711        int sgl_count, ret;
 712        bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
 713
 714        if (prot_bytes) {
 715                sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
 716                                                 VHOST_SCSI_PREALLOC_PROT_SGLS);
 717                if (sgl_count < 0)
 718                        return sgl_count;
 719
 720                sg_init_table(cmd->tvc_prot_sgl, sgl_count);
 721                cmd->tvc_prot_sgl_count = sgl_count;
 722                pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
 723                         cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
 724
 725                ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
 726                                            cmd->tvc_prot_sgl,
 727                                            cmd->tvc_prot_sgl_count);
 728                if (ret < 0) {
 729                        cmd->tvc_prot_sgl_count = 0;
 730                        return ret;
 731                }
 732        }
 733        sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
 734                                         VHOST_SCSI_PREALLOC_SGLS);
 735        if (sgl_count < 0)
 736                return sgl_count;
 737
 738        sg_init_table(cmd->tvc_sgl, sgl_count);
 739        cmd->tvc_sgl_count = sgl_count;
 740        pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
 741                  cmd->tvc_sgl, cmd->tvc_sgl_count);
 742
 743        ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
 744                                    cmd->tvc_sgl, cmd->tvc_sgl_count);
 745        if (ret < 0) {
 746                cmd->tvc_sgl_count = 0;
 747                return ret;
 748        }
 749        return 0;
 750}
 751
 752static int vhost_scsi_to_tcm_attr(int attr)
 753{
 754        switch (attr) {
 755        case VIRTIO_SCSI_S_SIMPLE:
 756                return TCM_SIMPLE_TAG;
 757        case VIRTIO_SCSI_S_ORDERED:
 758                return TCM_ORDERED_TAG;
 759        case VIRTIO_SCSI_S_HEAD:
 760                return TCM_HEAD_TAG;
 761        case VIRTIO_SCSI_S_ACA:
 762                return TCM_ACA_TAG;
 763        default:
 764                break;
 765        }
 766        return TCM_SIMPLE_TAG;
 767}
 768
 769static void vhost_scsi_target_queue_cmd(struct vhost_scsi_cmd *cmd)
 770{
 771        struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
 772        struct vhost_scsi_nexus *tv_nexus;
 773        struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
 774
 775        /* FIXME: BIDI operation */
 776        if (cmd->tvc_sgl_count) {
 777                sg_ptr = cmd->tvc_sgl;
 778
 779                if (cmd->tvc_prot_sgl_count)
 780                        sg_prot_ptr = cmd->tvc_prot_sgl;
 781                else
 782                        se_cmd->prot_pto = true;
 783        } else {
 784                sg_ptr = NULL;
 785        }
 786        tv_nexus = cmd->tvc_nexus;
 787
 788        se_cmd->tag = 0;
 789        target_init_cmd(se_cmd, tv_nexus->tvn_se_sess, &cmd->tvc_sense_buf[0],
 790                        cmd->tvc_lun, cmd->tvc_exp_data_len,
 791                        vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
 792                        cmd->tvc_data_direction, TARGET_SCF_ACK_KREF);
 793
 794        if (target_submit_prep(se_cmd, cmd->tvc_cdb, sg_ptr,
 795                               cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
 796                               cmd->tvc_prot_sgl_count, GFP_KERNEL))
 797                return;
 798
 799        target_queue_submission(se_cmd);
 800}
 801
 802static void
 803vhost_scsi_send_bad_target(struct vhost_scsi *vs,
 804                           struct vhost_virtqueue *vq,
 805                           int head, unsigned out)
 806{
 807        struct virtio_scsi_cmd_resp __user *resp;
 808        struct virtio_scsi_cmd_resp rsp;
 809        int ret;
 810
 811        memset(&rsp, 0, sizeof(rsp));
 812        rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
 813        resp = vq->iov[out].iov_base;
 814        ret = __copy_to_user(resp, &rsp, sizeof(rsp));
 815        if (!ret)
 816                vhost_add_used_and_signal(&vs->dev, vq, head, 0);
 817        else
 818                pr_err("Faulted on virtio_scsi_cmd_resp\n");
 819}
 820
 821static int
 822vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
 823                    struct vhost_scsi_ctx *vc)
 824{
 825        int ret = -ENXIO;
 826
 827        vc->head = vhost_get_vq_desc(vq, vq->iov,
 828                                     ARRAY_SIZE(vq->iov), &vc->out, &vc->in,
 829                                     NULL, NULL);
 830
 831        pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
 832                 vc->head, vc->out, vc->in);
 833
 834        /* On error, stop handling until the next kick. */
 835        if (unlikely(vc->head < 0))
 836                goto done;
 837
 838        /* Nothing new?  Wait for eventfd to tell us they refilled. */
 839        if (vc->head == vq->num) {
 840                if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
 841                        vhost_disable_notify(&vs->dev, vq);
 842                        ret = -EAGAIN;
 843                }
 844                goto done;
 845        }
 846
 847        /*
 848         * Get the size of request and response buffers.
 849         * FIXME: Not correct for BIDI operation
 850         */
 851        vc->out_size = iov_length(vq->iov, vc->out);
 852        vc->in_size = iov_length(&vq->iov[vc->out], vc->in);
 853
 854        /*
 855         * Copy over the virtio-scsi request header, which for a
 856         * ANY_LAYOUT enabled guest may span multiple iovecs, or a
 857         * single iovec may contain both the header + outgoing
 858         * WRITE payloads.
 859         *
 860         * copy_from_iter() will advance out_iter, so that it will
 861         * point at the start of the outgoing WRITE payload, if
 862         * DMA_TO_DEVICE is set.
 863         */
 864        iov_iter_init(&vc->out_iter, WRITE, vq->iov, vc->out, vc->out_size);
 865        ret = 0;
 866
 867done:
 868        return ret;
 869}
 870
 871static int
 872vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc)
 873{
 874        if (unlikely(vc->in_size < vc->rsp_size)) {
 875                vq_err(vq,
 876                       "Response buf too small, need min %zu bytes got %zu",
 877                       vc->rsp_size, vc->in_size);
 878                return -EINVAL;
 879        } else if (unlikely(vc->out_size < vc->req_size)) {
 880                vq_err(vq,
 881                       "Request buf too small, need min %zu bytes got %zu",
 882                       vc->req_size, vc->out_size);
 883                return -EIO;
 884        }
 885
 886        return 0;
 887}
 888
 889static int
 890vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
 891                   struct vhost_scsi_tpg **tpgp)
 892{
 893        int ret = -EIO;
 894
 895        if (unlikely(!copy_from_iter_full(vc->req, vc->req_size,
 896                                          &vc->out_iter))) {
 897                vq_err(vq, "Faulted on copy_from_iter_full\n");
 898        } else if (unlikely(*vc->lunp != 1)) {
 899                /* virtio-scsi spec requires byte 0 of the lun to be 1 */
 900                vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp);
 901        } else {
 902                struct vhost_scsi_tpg **vs_tpg, *tpg;
 903
 904                vs_tpg = vhost_vq_get_backend(vq);      /* validated at handler entry */
 905
 906                tpg = READ_ONCE(vs_tpg[*vc->target]);
 907                if (unlikely(!tpg)) {
 908                        vq_err(vq, "Target 0x%x does not exist\n", *vc->target);
 909                } else {
 910                        if (tpgp)
 911                                *tpgp = tpg;
 912                        ret = 0;
 913                }
 914        }
 915
 916        return ret;
 917}
 918
 919static u16 vhost_buf_to_lun(u8 *lun_buf)
 920{
 921        return ((lun_buf[2] << 8) | lun_buf[3]) & 0x3FFF;
 922}
 923
 924static void
 925vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
 926{
 927        struct vhost_scsi_tpg **vs_tpg, *tpg;
 928        struct virtio_scsi_cmd_req v_req;
 929        struct virtio_scsi_cmd_req_pi v_req_pi;
 930        struct vhost_scsi_ctx vc;
 931        struct vhost_scsi_cmd *cmd;
 932        struct iov_iter in_iter, prot_iter, data_iter;
 933        u64 tag;
 934        u32 exp_data_len, data_direction;
 935        int ret, prot_bytes, c = 0;
 936        u16 lun;
 937        u8 task_attr;
 938        bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
 939        void *cdb;
 940
 941        mutex_lock(&vq->mutex);
 942        /*
 943         * We can handle the vq only after the endpoint is setup by calling the
 944         * VHOST_SCSI_SET_ENDPOINT ioctl.
 945         */
 946        vs_tpg = vhost_vq_get_backend(vq);
 947        if (!vs_tpg)
 948                goto out;
 949
 950        memset(&vc, 0, sizeof(vc));
 951        vc.rsp_size = sizeof(struct virtio_scsi_cmd_resp);
 952
 953        vhost_disable_notify(&vs->dev, vq);
 954
 955        do {
 956                ret = vhost_scsi_get_desc(vs, vq, &vc);
 957                if (ret)
 958                        goto err;
 959
 960                /*
 961                 * Setup pointers and values based upon different virtio-scsi
 962                 * request header if T10_PI is enabled in KVM guest.
 963                 */
 964                if (t10_pi) {
 965                        vc.req = &v_req_pi;
 966                        vc.req_size = sizeof(v_req_pi);
 967                        vc.lunp = &v_req_pi.lun[0];
 968                        vc.target = &v_req_pi.lun[1];
 969                } else {
 970                        vc.req = &v_req;
 971                        vc.req_size = sizeof(v_req);
 972                        vc.lunp = &v_req.lun[0];
 973                        vc.target = &v_req.lun[1];
 974                }
 975
 976                /*
 977                 * Validate the size of request and response buffers.
 978                 * Check for a sane response buffer so we can report
 979                 * early errors back to the guest.
 980                 */
 981                ret = vhost_scsi_chk_size(vq, &vc);
 982                if (ret)
 983                        goto err;
 984
 985                ret = vhost_scsi_get_req(vq, &vc, &tpg);
 986                if (ret)
 987                        goto err;
 988
 989                ret = -EIO;     /* bad target on any error from here on */
 990
 991                /*
 992                 * Determine data_direction by calculating the total outgoing
 993                 * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
 994                 * response headers respectively.
 995                 *
 996                 * For DMA_TO_DEVICE this is out_iter, which is already pointing
 997                 * to the right place.
 998                 *
 999                 * For DMA_FROM_DEVICE, the iovec will be just past the end
1000                 * of the virtio-scsi response header in either the same
1001                 * or immediately following iovec.
1002                 *
1003                 * Any associated T10_PI bytes for the outgoing / incoming
1004                 * payloads are included in calculation of exp_data_len here.
1005                 */
1006                prot_bytes = 0;
1007
1008                if (vc.out_size > vc.req_size) {
1009                        data_direction = DMA_TO_DEVICE;
1010                        exp_data_len = vc.out_size - vc.req_size;
1011                        data_iter = vc.out_iter;
1012                } else if (vc.in_size > vc.rsp_size) {
1013                        data_direction = DMA_FROM_DEVICE;
1014                        exp_data_len = vc.in_size - vc.rsp_size;
1015
1016                        iov_iter_init(&in_iter, READ, &vq->iov[vc.out], vc.in,
1017                                      vc.rsp_size + exp_data_len);
1018                        iov_iter_advance(&in_iter, vc.rsp_size);
1019                        data_iter = in_iter;
1020                } else {
1021                        data_direction = DMA_NONE;
1022                        exp_data_len = 0;
1023                }
1024                /*
1025                 * If T10_PI header + payload is present, setup prot_iter values
1026                 * and recalculate data_iter for vhost_scsi_mapal() mapping to
1027                 * host scatterlists via get_user_pages_fast().
1028                 */
1029                if (t10_pi) {
1030                        if (v_req_pi.pi_bytesout) {
1031                                if (data_direction != DMA_TO_DEVICE) {
1032                                        vq_err(vq, "Received non zero pi_bytesout,"
1033                                                " but wrong data_direction\n");
1034                                        goto err;
1035                                }
1036                                prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
1037                        } else if (v_req_pi.pi_bytesin) {
1038                                if (data_direction != DMA_FROM_DEVICE) {
1039                                        vq_err(vq, "Received non zero pi_bytesin,"
1040                                                " but wrong data_direction\n");
1041                                        goto err;
1042                                }
1043                                prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
1044                        }
1045                        /*
1046                         * Set prot_iter to data_iter and truncate it to
1047                         * prot_bytes, and advance data_iter past any
1048                         * preceeding prot_bytes that may be present.
1049                         *
1050                         * Also fix up the exp_data_len to reflect only the
1051                         * actual data payload length.
1052                         */
1053                        if (prot_bytes) {
1054                                exp_data_len -= prot_bytes;
1055                                prot_iter = data_iter;
1056                                iov_iter_truncate(&prot_iter, prot_bytes);
1057                                iov_iter_advance(&data_iter, prot_bytes);
1058                        }
1059                        tag = vhost64_to_cpu(vq, v_req_pi.tag);
1060                        task_attr = v_req_pi.task_attr;
1061                        cdb = &v_req_pi.cdb[0];
1062                        lun = vhost_buf_to_lun(v_req_pi.lun);
1063                } else {
1064                        tag = vhost64_to_cpu(vq, v_req.tag);
1065                        task_attr = v_req.task_attr;
1066                        cdb = &v_req.cdb[0];
1067                        lun = vhost_buf_to_lun(v_req.lun);
1068                }
1069                /*
1070                 * Check that the received CDB size does not exceeded our
1071                 * hardcoded max for vhost-scsi, then get a pre-allocated
1072                 * cmd descriptor for the new virtio-scsi tag.
1073                 *
1074                 * TODO what if cdb was too small for varlen cdb header?
1075                 */
1076                if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
1077                        vq_err(vq, "Received SCSI CDB with command_size: %d that"
1078                                " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1079                                scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
1080                                goto err;
1081                }
1082                cmd = vhost_scsi_get_cmd(vq, tpg, cdb, tag, lun, task_attr,
1083                                         exp_data_len + prot_bytes,
1084                                         data_direction);
1085                if (IS_ERR(cmd)) {
1086                        vq_err(vq, "vhost_scsi_get_cmd failed %ld\n",
1087                               PTR_ERR(cmd));
1088                        goto err;
1089                }
1090                cmd->tvc_vhost = vs;
1091                cmd->tvc_vq = vq;
1092                cmd->tvc_resp_iov = vq->iov[vc.out];
1093                cmd->tvc_in_iovs = vc.in;
1094
1095                pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1096                         cmd->tvc_cdb[0], cmd->tvc_lun);
1097                pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
1098                         " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
1099
1100                if (data_direction != DMA_NONE) {
1101                        if (unlikely(vhost_scsi_mapal(cmd, prot_bytes,
1102                                                      &prot_iter, exp_data_len,
1103                                                      &data_iter))) {
1104                                vq_err(vq, "Failed to map iov to sgl\n");
1105                                vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
1106                                goto err;
1107                        }
1108                }
1109                /*
1110                 * Save the descriptor from vhost_get_vq_desc() to be used to
1111                 * complete the virtio-scsi request in TCM callback context via
1112                 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
1113                 */
1114                cmd->tvc_vq_desc = vc.head;
1115                vhost_scsi_target_queue_cmd(cmd);
1116                ret = 0;
1117err:
1118                /*
1119                 * ENXIO:  No more requests, or read error, wait for next kick
1120                 * EINVAL: Invalid response buffer, drop the request
1121                 * EIO:    Respond with bad target
1122                 * EAGAIN: Pending request
1123                 */
1124                if (ret == -ENXIO)
1125                        break;
1126                else if (ret == -EIO)
1127                        vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1128        } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1129out:
1130        mutex_unlock(&vq->mutex);
1131}
1132
1133static void
1134vhost_scsi_send_tmf_resp(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
1135                         int in_iovs, int vq_desc, struct iovec *resp_iov,
1136                         int tmf_resp_code)
1137{
1138        struct virtio_scsi_ctrl_tmf_resp rsp;
1139        struct iov_iter iov_iter;
1140        int ret;
1141
1142        pr_debug("%s\n", __func__);
1143        memset(&rsp, 0, sizeof(rsp));
1144        rsp.response = tmf_resp_code;
1145
1146        iov_iter_init(&iov_iter, READ, resp_iov, in_iovs, sizeof(rsp));
1147
1148        ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1149        if (likely(ret == sizeof(rsp)))
1150                vhost_add_used_and_signal(&vs->dev, vq, vq_desc, 0);
1151        else
1152                pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
1153}
1154
1155static void vhost_scsi_tmf_resp_work(struct vhost_work *work)
1156{
1157        struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf,
1158                                                  vwork);
1159        int resp_code;
1160
1161        if (tmf->scsi_resp == TMR_FUNCTION_COMPLETE)
1162                resp_code = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
1163        else
1164                resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED;
1165
1166        vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs,
1167                                 tmf->vq_desc, &tmf->resp_iov, resp_code);
1168        vhost_scsi_release_tmf_res(tmf);
1169}
1170
1171static void
1172vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
1173                      struct vhost_virtqueue *vq,
1174                      struct virtio_scsi_ctrl_tmf_req *vtmf,
1175                      struct vhost_scsi_ctx *vc)
1176{
1177        struct vhost_scsi_virtqueue *svq = container_of(vq,
1178                                        struct vhost_scsi_virtqueue, vq);
1179        struct vhost_scsi_tmf *tmf;
1180
1181        if (vhost32_to_cpu(vq, vtmf->subtype) !=
1182            VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET)
1183                goto send_reject;
1184
1185        if (!tpg->tpg_nexus || !tpg->tpg_nexus->tvn_se_sess) {
1186                pr_err("Unable to locate active struct vhost_scsi_nexus for LUN RESET.\n");
1187                goto send_reject;
1188        }
1189
1190        mutex_lock(&tpg->tv_tpg_mutex);
1191        if (list_empty(&tpg->tmf_queue)) {
1192                pr_err("Missing reserve TMF. Could not handle LUN RESET.\n");
1193                mutex_unlock(&tpg->tv_tpg_mutex);
1194                goto send_reject;
1195        }
1196
1197        tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf,
1198                               queue_entry);
1199        list_del_init(&tmf->queue_entry);
1200        mutex_unlock(&tpg->tv_tpg_mutex);
1201
1202        tmf->tpg = tpg;
1203        tmf->vhost = vs;
1204        tmf->svq = svq;
1205        tmf->resp_iov = vq->iov[vc->out];
1206        tmf->vq_desc = vc->head;
1207        tmf->in_iovs = vc->in;
1208        tmf->inflight = vhost_scsi_get_inflight(vq);
1209
1210        if (target_submit_tmr(&tmf->se_cmd, tpg->tpg_nexus->tvn_se_sess, NULL,
1211                              vhost_buf_to_lun(vtmf->lun), NULL,
1212                              TMR_LUN_RESET, GFP_KERNEL, 0,
1213                              TARGET_SCF_ACK_KREF) < 0) {
1214                vhost_scsi_release_tmf_res(tmf);
1215                goto send_reject;
1216        }
1217
1218        return;
1219
1220send_reject:
1221        vhost_scsi_send_tmf_resp(vs, vq, vc->in, vc->head, &vq->iov[vc->out],
1222                                 VIRTIO_SCSI_S_FUNCTION_REJECTED);
1223}
1224
1225static void
1226vhost_scsi_send_an_resp(struct vhost_scsi *vs,
1227                        struct vhost_virtqueue *vq,
1228                        struct vhost_scsi_ctx *vc)
1229{
1230        struct virtio_scsi_ctrl_an_resp rsp;
1231        struct iov_iter iov_iter;
1232        int ret;
1233
1234        pr_debug("%s\n", __func__);
1235        memset(&rsp, 0, sizeof(rsp));   /* event_actual = 0 */
1236        rsp.response = VIRTIO_SCSI_S_OK;
1237
1238        iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
1239
1240        ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1241        if (likely(ret == sizeof(rsp)))
1242                vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1243        else
1244                pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
1245}
1246
1247static void
1248vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1249{
1250        struct vhost_scsi_tpg *tpg;
1251        union {
1252                __virtio32 type;
1253                struct virtio_scsi_ctrl_an_req an;
1254                struct virtio_scsi_ctrl_tmf_req tmf;
1255        } v_req;
1256        struct vhost_scsi_ctx vc;
1257        size_t typ_size;
1258        int ret, c = 0;
1259
1260        mutex_lock(&vq->mutex);
1261        /*
1262         * We can handle the vq only after the endpoint is setup by calling the
1263         * VHOST_SCSI_SET_ENDPOINT ioctl.
1264         */
1265        if (!vhost_vq_get_backend(vq))
1266                goto out;
1267
1268        memset(&vc, 0, sizeof(vc));
1269
1270        vhost_disable_notify(&vs->dev, vq);
1271
1272        do {
1273                ret = vhost_scsi_get_desc(vs, vq, &vc);
1274                if (ret)
1275                        goto err;
1276
1277                /*
1278                 * Get the request type first in order to setup
1279                 * other parameters dependent on the type.
1280                 */
1281                vc.req = &v_req.type;
1282                typ_size = sizeof(v_req.type);
1283
1284                if (unlikely(!copy_from_iter_full(vc.req, typ_size,
1285                                                  &vc.out_iter))) {
1286                        vq_err(vq, "Faulted on copy_from_iter tmf type\n");
1287                        /*
1288                         * The size of the response buffer depends on the
1289                         * request type and must be validated against it.
1290                         * Since the request type is not known, don't send
1291                         * a response.
1292                         */
1293                        continue;
1294                }
1295
1296                switch (vhost32_to_cpu(vq, v_req.type)) {
1297                case VIRTIO_SCSI_T_TMF:
1298                        vc.req = &v_req.tmf;
1299                        vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
1300                        vc.rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
1301                        vc.lunp = &v_req.tmf.lun[0];
1302                        vc.target = &v_req.tmf.lun[1];
1303                        break;
1304                case VIRTIO_SCSI_T_AN_QUERY:
1305                case VIRTIO_SCSI_T_AN_SUBSCRIBE:
1306                        vc.req = &v_req.an;
1307                        vc.req_size = sizeof(struct virtio_scsi_ctrl_an_req);
1308                        vc.rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
1309                        vc.lunp = &v_req.an.lun[0];
1310                        vc.target = NULL;
1311                        break;
1312                default:
1313                        vq_err(vq, "Unknown control request %d", v_req.type);
1314                        continue;
1315                }
1316
1317                /*
1318                 * Validate the size of request and response buffers.
1319                 * Check for a sane response buffer so we can report
1320                 * early errors back to the guest.
1321                 */
1322                ret = vhost_scsi_chk_size(vq, &vc);
1323                if (ret)
1324                        goto err;
1325
1326                /*
1327                 * Get the rest of the request now that its size is known.
1328                 */
1329                vc.req += typ_size;
1330                vc.req_size -= typ_size;
1331
1332                ret = vhost_scsi_get_req(vq, &vc, &tpg);
1333                if (ret)
1334                        goto err;
1335
1336                if (v_req.type == VIRTIO_SCSI_T_TMF)
1337                        vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc);
1338                else
1339                        vhost_scsi_send_an_resp(vs, vq, &vc);
1340err:
1341                /*
1342                 * ENXIO:  No more requests, or read error, wait for next kick
1343                 * EINVAL: Invalid response buffer, drop the request
1344                 * EIO:    Respond with bad target
1345                 * EAGAIN: Pending request
1346                 */
1347                if (ret == -ENXIO)
1348                        break;
1349                else if (ret == -EIO)
1350                        vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1351        } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1352out:
1353        mutex_unlock(&vq->mutex);
1354}
1355
1356static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1357{
1358        struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1359                                                poll.work);
1360        struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1361
1362        pr_debug("%s: The handling func for control queue.\n", __func__);
1363        vhost_scsi_ctl_handle_vq(vs, vq);
1364}
1365
1366static void
1367vhost_scsi_send_evt(struct vhost_scsi *vs,
1368                   struct vhost_scsi_tpg *tpg,
1369                   struct se_lun *lun,
1370                   u32 event,
1371                   u32 reason)
1372{
1373        struct vhost_scsi_evt *evt;
1374
1375        evt = vhost_scsi_allocate_evt(vs, event, reason);
1376        if (!evt)
1377                return;
1378
1379        if (tpg && lun) {
1380                /* TODO: share lun setup code with virtio-scsi.ko */
1381                /*
1382                 * Note: evt->event is zeroed when we allocate it and
1383                 * lun[4-7] need to be zero according to virtio-scsi spec.
1384                 */
1385                evt->event.lun[0] = 0x01;
1386                evt->event.lun[1] = tpg->tport_tpgt;
1387                if (lun->unpacked_lun >= 256)
1388                        evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1389                evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1390        }
1391
1392        llist_add(&evt->list, &vs->vs_event_list);
1393        vhost_work_queue(&vs->dev, &vs->vs_event_work);
1394}
1395
1396static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1397{
1398        struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1399                                                poll.work);
1400        struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1401
1402        mutex_lock(&vq->mutex);
1403        if (!vhost_vq_get_backend(vq))
1404                goto out;
1405
1406        if (vs->vs_events_missed)
1407                vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1408out:
1409        mutex_unlock(&vq->mutex);
1410}
1411
1412static void vhost_scsi_handle_kick(struct vhost_work *work)
1413{
1414        struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1415                                                poll.work);
1416        struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1417
1418        vhost_scsi_handle_vq(vs, vq);
1419}
1420
1421/* Callers must hold dev mutex */
1422static void vhost_scsi_flush(struct vhost_scsi *vs)
1423{
1424        struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1425        int i;
1426
1427        /* Init new inflight and remember the old inflight */
1428        vhost_scsi_init_inflight(vs, old_inflight);
1429
1430        /*
1431         * The inflight->kref was initialized to 1. We decrement it here to
1432         * indicate the start of the flush operation so that it will reach 0
1433         * when all the reqs are finished.
1434         */
1435        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1436                kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
1437
1438        /* Flush both the vhost poll and vhost work */
1439        vhost_work_dev_flush(&vs->dev);
1440
1441        /* Wait for all reqs issued before the flush to be finished */
1442        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1443                wait_for_completion(&old_inflight[i]->comp);
1444}
1445
1446static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
1447{
1448        struct vhost_scsi_virtqueue *svq = container_of(vq,
1449                                        struct vhost_scsi_virtqueue, vq);
1450        struct vhost_scsi_cmd *tv_cmd;
1451        unsigned int i;
1452
1453        if (!svq->scsi_cmds)
1454                return;
1455
1456        for (i = 0; i < svq->max_cmds; i++) {
1457                tv_cmd = &svq->scsi_cmds[i];
1458
1459                kfree(tv_cmd->tvc_sgl);
1460                kfree(tv_cmd->tvc_prot_sgl);
1461                kfree(tv_cmd->tvc_upages);
1462        }
1463
1464        sbitmap_free(&svq->scsi_tags);
1465        kfree(svq->scsi_cmds);
1466        svq->scsi_cmds = NULL;
1467}
1468
1469static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds)
1470{
1471        struct vhost_scsi_virtqueue *svq = container_of(vq,
1472                                        struct vhost_scsi_virtqueue, vq);
1473        struct vhost_scsi_cmd *tv_cmd;
1474        unsigned int i;
1475
1476        if (svq->scsi_cmds)
1477                return 0;
1478
1479        if (sbitmap_init_node(&svq->scsi_tags, max_cmds, -1, GFP_KERNEL,
1480                              NUMA_NO_NODE, false, true))
1481                return -ENOMEM;
1482        svq->max_cmds = max_cmds;
1483
1484        svq->scsi_cmds = kcalloc(max_cmds, sizeof(*tv_cmd), GFP_KERNEL);
1485        if (!svq->scsi_cmds) {
1486                sbitmap_free(&svq->scsi_tags);
1487                return -ENOMEM;
1488        }
1489
1490        for (i = 0; i < max_cmds; i++) {
1491                tv_cmd = &svq->scsi_cmds[i];
1492
1493                tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS,
1494                                          sizeof(struct scatterlist),
1495                                          GFP_KERNEL);
1496                if (!tv_cmd->tvc_sgl) {
1497                        pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1498                        goto out;
1499                }
1500
1501                tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES,
1502                                             sizeof(struct page *),
1503                                             GFP_KERNEL);
1504                if (!tv_cmd->tvc_upages) {
1505                        pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1506                        goto out;
1507                }
1508
1509                tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
1510                                               sizeof(struct scatterlist),
1511                                               GFP_KERNEL);
1512                if (!tv_cmd->tvc_prot_sgl) {
1513                        pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
1514                        goto out;
1515                }
1516        }
1517        return 0;
1518out:
1519        vhost_scsi_destroy_vq_cmds(vq);
1520        return -ENOMEM;
1521}
1522
1523/*
1524 * Called from vhost_scsi_ioctl() context to walk the list of available
1525 * vhost_scsi_tpg with an active struct vhost_scsi_nexus
1526 *
1527 *  The lock nesting rule is:
1528 *    vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1529 */
1530static int
1531vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1532                        struct vhost_scsi_target *t)
1533{
1534        struct se_portal_group *se_tpg;
1535        struct vhost_scsi_tport *tv_tport;
1536        struct vhost_scsi_tpg *tpg;
1537        struct vhost_scsi_tpg **vs_tpg;
1538        struct vhost_virtqueue *vq;
1539        int index, ret, i, len;
1540        bool match = false;
1541
1542        mutex_lock(&vhost_scsi_mutex);
1543        mutex_lock(&vs->dev.mutex);
1544
1545        /* Verify that ring has been setup correctly. */
1546        for (index = 0; index < vs->dev.nvqs; ++index) {
1547                /* Verify that ring has been setup correctly. */
1548                if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1549                        ret = -EFAULT;
1550                        goto out;
1551                }
1552        }
1553
1554        len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1555        vs_tpg = kzalloc(len, GFP_KERNEL);
1556        if (!vs_tpg) {
1557                ret = -ENOMEM;
1558                goto out;
1559        }
1560        if (vs->vs_tpg)
1561                memcpy(vs_tpg, vs->vs_tpg, len);
1562
1563        list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
1564                mutex_lock(&tpg->tv_tpg_mutex);
1565                if (!tpg->tpg_nexus) {
1566                        mutex_unlock(&tpg->tv_tpg_mutex);
1567                        continue;
1568                }
1569                if (tpg->tv_tpg_vhost_count != 0) {
1570                        mutex_unlock(&tpg->tv_tpg_mutex);
1571                        continue;
1572                }
1573                tv_tport = tpg->tport;
1574
1575                if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1576                        if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1577                                mutex_unlock(&tpg->tv_tpg_mutex);
1578                                ret = -EEXIST;
1579                                goto undepend;
1580                        }
1581                        /*
1582                         * In order to ensure individual vhost-scsi configfs
1583                         * groups cannot be removed while in use by vhost ioctl,
1584                         * go ahead and take an explicit se_tpg->tpg_group.cg_item
1585                         * dependency now.
1586                         */
1587                        se_tpg = &tpg->se_tpg;
1588                        ret = target_depend_item(&se_tpg->tpg_group.cg_item);
1589                        if (ret) {
1590                                pr_warn("target_depend_item() failed: %d\n", ret);
1591                                mutex_unlock(&tpg->tv_tpg_mutex);
1592                                goto undepend;
1593                        }
1594                        tpg->tv_tpg_vhost_count++;
1595                        tpg->vhost_scsi = vs;
1596                        vs_tpg[tpg->tport_tpgt] = tpg;
1597                        match = true;
1598                }
1599                mutex_unlock(&tpg->tv_tpg_mutex);
1600        }
1601
1602        if (match) {
1603                memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1604                       sizeof(vs->vs_vhost_wwpn));
1605
1606                for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1607                        vq = &vs->vqs[i].vq;
1608                        if (!vhost_vq_is_setup(vq))
1609                                continue;
1610
1611                        ret = vhost_scsi_setup_vq_cmds(vq, vq->num);
1612                        if (ret)
1613                                goto destroy_vq_cmds;
1614                }
1615
1616                for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1617                        vq = &vs->vqs[i].vq;
1618                        mutex_lock(&vq->mutex);
1619                        vhost_vq_set_backend(vq, vs_tpg);
1620                        vhost_vq_init_access(vq);
1621                        mutex_unlock(&vq->mutex);
1622                }
1623                ret = 0;
1624        } else {
1625                ret = -EEXIST;
1626        }
1627
1628        /*
1629         * Act as synchronize_rcu to make sure access to
1630         * old vs->vs_tpg is finished.
1631         */
1632        vhost_scsi_flush(vs);
1633        kfree(vs->vs_tpg);
1634        vs->vs_tpg = vs_tpg;
1635        goto out;
1636
1637destroy_vq_cmds:
1638        for (i--; i >= VHOST_SCSI_VQ_IO; i--) {
1639                if (!vhost_vq_get_backend(&vs->vqs[i].vq))
1640                        vhost_scsi_destroy_vq_cmds(&vs->vqs[i].vq);
1641        }
1642undepend:
1643        for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1644                tpg = vs_tpg[i];
1645                if (tpg) {
1646                        tpg->tv_tpg_vhost_count--;
1647                        target_undepend_item(&tpg->se_tpg.tpg_group.cg_item);
1648                }
1649        }
1650        kfree(vs_tpg);
1651out:
1652        mutex_unlock(&vs->dev.mutex);
1653        mutex_unlock(&vhost_scsi_mutex);
1654        return ret;
1655}
1656
1657static int
1658vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1659                          struct vhost_scsi_target *t)
1660{
1661        struct se_portal_group *se_tpg;
1662        struct vhost_scsi_tport *tv_tport;
1663        struct vhost_scsi_tpg *tpg;
1664        struct vhost_virtqueue *vq;
1665        bool match = false;
1666        int index, ret, i;
1667        u8 target;
1668
1669        mutex_lock(&vhost_scsi_mutex);
1670        mutex_lock(&vs->dev.mutex);
1671        /* Verify that ring has been setup correctly. */
1672        for (index = 0; index < vs->dev.nvqs; ++index) {
1673                if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1674                        ret = -EFAULT;
1675                        goto err_dev;
1676                }
1677        }
1678
1679        if (!vs->vs_tpg) {
1680                ret = 0;
1681                goto err_dev;
1682        }
1683
1684        for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1685                target = i;
1686                tpg = vs->vs_tpg[target];
1687                if (!tpg)
1688                        continue;
1689
1690                mutex_lock(&tpg->tv_tpg_mutex);
1691                tv_tport = tpg->tport;
1692                if (!tv_tport) {
1693                        ret = -ENODEV;
1694                        goto err_tpg;
1695                }
1696
1697                if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1698                        pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1699                                " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1700                                tv_tport->tport_name, tpg->tport_tpgt,
1701                                t->vhost_wwpn, t->vhost_tpgt);
1702                        ret = -EINVAL;
1703                        goto err_tpg;
1704                }
1705                tpg->tv_tpg_vhost_count--;
1706                tpg->vhost_scsi = NULL;
1707                vs->vs_tpg[target] = NULL;
1708                match = true;
1709                mutex_unlock(&tpg->tv_tpg_mutex);
1710                /*
1711                 * Release se_tpg->tpg_group.cg_item configfs dependency now
1712                 * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
1713                 */
1714                se_tpg = &tpg->se_tpg;
1715                target_undepend_item(&se_tpg->tpg_group.cg_item);
1716        }
1717        if (match) {
1718                for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1719                        vq = &vs->vqs[i].vq;
1720                        mutex_lock(&vq->mutex);
1721                        vhost_vq_set_backend(vq, NULL);
1722                        mutex_unlock(&vq->mutex);
1723                }
1724                /* Make sure cmds are not running before tearing them down. */
1725                vhost_scsi_flush(vs);
1726
1727                for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1728                        vq = &vs->vqs[i].vq;
1729                        vhost_scsi_destroy_vq_cmds(vq);
1730                }
1731        }
1732        /*
1733         * Act as synchronize_rcu to make sure access to
1734         * old vs->vs_tpg is finished.
1735         */
1736        vhost_scsi_flush(vs);
1737        kfree(vs->vs_tpg);
1738        vs->vs_tpg = NULL;
1739        WARN_ON(vs->vs_events_nr);
1740        mutex_unlock(&vs->dev.mutex);
1741        mutex_unlock(&vhost_scsi_mutex);
1742        return 0;
1743
1744err_tpg:
1745        mutex_unlock(&tpg->tv_tpg_mutex);
1746err_dev:
1747        mutex_unlock(&vs->dev.mutex);
1748        mutex_unlock(&vhost_scsi_mutex);
1749        return ret;
1750}
1751
1752static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1753{
1754        struct vhost_virtqueue *vq;
1755        int i;
1756
1757        if (features & ~VHOST_SCSI_FEATURES)
1758                return -EOPNOTSUPP;
1759
1760        mutex_lock(&vs->dev.mutex);
1761        if ((features & (1 << VHOST_F_LOG_ALL)) &&
1762            !vhost_log_access_ok(&vs->dev)) {
1763                mutex_unlock(&vs->dev.mutex);
1764                return -EFAULT;
1765        }
1766
1767        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1768                vq = &vs->vqs[i].vq;
1769                mutex_lock(&vq->mutex);
1770                vq->acked_features = features;
1771                mutex_unlock(&vq->mutex);
1772        }
1773        mutex_unlock(&vs->dev.mutex);
1774        return 0;
1775}
1776
1777static int vhost_scsi_open(struct inode *inode, struct file *f)
1778{
1779        struct vhost_scsi *vs;
1780        struct vhost_virtqueue **vqs;
1781        int r = -ENOMEM, i;
1782
1783        vs = kvzalloc(sizeof(*vs), GFP_KERNEL);
1784        if (!vs)
1785                goto err_vs;
1786
1787        vqs = kmalloc_array(VHOST_SCSI_MAX_VQ, sizeof(*vqs), GFP_KERNEL);
1788        if (!vqs)
1789                goto err_vqs;
1790
1791        vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1792        vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
1793
1794        vs->vs_events_nr = 0;
1795        vs->vs_events_missed = false;
1796
1797        vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1798        vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1799        vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1800        vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1801        for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1802                vqs[i] = &vs->vqs[i].vq;
1803                vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1804        }
1805        vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV,
1806                       VHOST_SCSI_WEIGHT, 0, true, NULL);
1807
1808        vhost_scsi_init_inflight(vs, NULL);
1809
1810        f->private_data = vs;
1811        return 0;
1812
1813err_vqs:
1814        kvfree(vs);
1815err_vs:
1816        return r;
1817}
1818
1819static int vhost_scsi_release(struct inode *inode, struct file *f)
1820{
1821        struct vhost_scsi *vs = f->private_data;
1822        struct vhost_scsi_target t;
1823
1824        mutex_lock(&vs->dev.mutex);
1825        memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1826        mutex_unlock(&vs->dev.mutex);
1827        vhost_scsi_clear_endpoint(vs, &t);
1828        vhost_dev_stop(&vs->dev);
1829        vhost_dev_cleanup(&vs->dev);
1830        /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1831        vhost_scsi_flush(vs);
1832        kfree(vs->dev.vqs);
1833        kvfree(vs);
1834        return 0;
1835}
1836
1837static long
1838vhost_scsi_ioctl(struct file *f,
1839                 unsigned int ioctl,
1840                 unsigned long arg)
1841{
1842        struct vhost_scsi *vs = f->private_data;
1843        struct vhost_scsi_target backend;
1844        void __user *argp = (void __user *)arg;
1845        u64 __user *featurep = argp;
1846        u32 __user *eventsp = argp;
1847        u32 events_missed;
1848        u64 features;
1849        int r, abi_version = VHOST_SCSI_ABI_VERSION;
1850        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1851
1852        switch (ioctl) {
1853        case VHOST_SCSI_SET_ENDPOINT:
1854                if (copy_from_user(&backend, argp, sizeof backend))
1855                        return -EFAULT;
1856                if (backend.reserved != 0)
1857                        return -EOPNOTSUPP;
1858
1859                return vhost_scsi_set_endpoint(vs, &backend);
1860        case VHOST_SCSI_CLEAR_ENDPOINT:
1861                if (copy_from_user(&backend, argp, sizeof backend))
1862                        return -EFAULT;
1863                if (backend.reserved != 0)
1864                        return -EOPNOTSUPP;
1865
1866                return vhost_scsi_clear_endpoint(vs, &backend);
1867        case VHOST_SCSI_GET_ABI_VERSION:
1868                if (copy_to_user(argp, &abi_version, sizeof abi_version))
1869                        return -EFAULT;
1870                return 0;
1871        case VHOST_SCSI_SET_EVENTS_MISSED:
1872                if (get_user(events_missed, eventsp))
1873                        return -EFAULT;
1874                mutex_lock(&vq->mutex);
1875                vs->vs_events_missed = events_missed;
1876                mutex_unlock(&vq->mutex);
1877                return 0;
1878        case VHOST_SCSI_GET_EVENTS_MISSED:
1879                mutex_lock(&vq->mutex);
1880                events_missed = vs->vs_events_missed;
1881                mutex_unlock(&vq->mutex);
1882                if (put_user(events_missed, eventsp))
1883                        return -EFAULT;
1884                return 0;
1885        case VHOST_GET_FEATURES:
1886                features = VHOST_SCSI_FEATURES;
1887                if (copy_to_user(featurep, &features, sizeof features))
1888                        return -EFAULT;
1889                return 0;
1890        case VHOST_SET_FEATURES:
1891                if (copy_from_user(&features, featurep, sizeof features))
1892                        return -EFAULT;
1893                return vhost_scsi_set_features(vs, features);
1894        default:
1895                mutex_lock(&vs->dev.mutex);
1896                r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1897                /* TODO: flush backend after dev ioctl. */
1898                if (r == -ENOIOCTLCMD)
1899                        r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1900                mutex_unlock(&vs->dev.mutex);
1901                return r;
1902        }
1903}
1904
1905static const struct file_operations vhost_scsi_fops = {
1906        .owner          = THIS_MODULE,
1907        .release        = vhost_scsi_release,
1908        .unlocked_ioctl = vhost_scsi_ioctl,
1909        .compat_ioctl   = compat_ptr_ioctl,
1910        .open           = vhost_scsi_open,
1911        .llseek         = noop_llseek,
1912};
1913
1914static struct miscdevice vhost_scsi_misc = {
1915        MISC_DYNAMIC_MINOR,
1916        "vhost-scsi",
1917        &vhost_scsi_fops,
1918};
1919
1920static int __init vhost_scsi_register(void)
1921{
1922        return misc_register(&vhost_scsi_misc);
1923}
1924
1925static void vhost_scsi_deregister(void)
1926{
1927        misc_deregister(&vhost_scsi_misc);
1928}
1929
1930static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
1931{
1932        switch (tport->tport_proto_id) {
1933        case SCSI_PROTOCOL_SAS:
1934                return "SAS";
1935        case SCSI_PROTOCOL_FCP:
1936                return "FCP";
1937        case SCSI_PROTOCOL_ISCSI:
1938                return "iSCSI";
1939        default:
1940                break;
1941        }
1942
1943        return "Unknown";
1944}
1945
1946static void
1947vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
1948                  struct se_lun *lun, bool plug)
1949{
1950
1951        struct vhost_scsi *vs = tpg->vhost_scsi;
1952        struct vhost_virtqueue *vq;
1953        u32 reason;
1954
1955        if (!vs)
1956                return;
1957
1958        mutex_lock(&vs->dev.mutex);
1959
1960        if (plug)
1961                reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1962        else
1963                reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1964
1965        vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1966        mutex_lock(&vq->mutex);
1967        if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
1968                vhost_scsi_send_evt(vs, tpg, lun,
1969                                   VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1970        mutex_unlock(&vq->mutex);
1971        mutex_unlock(&vs->dev.mutex);
1972}
1973
1974static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1975{
1976        vhost_scsi_do_plug(tpg, lun, true);
1977}
1978
1979static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1980{
1981        vhost_scsi_do_plug(tpg, lun, false);
1982}
1983
1984static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
1985                               struct se_lun *lun)
1986{
1987        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1988                                struct vhost_scsi_tpg, se_tpg);
1989        struct vhost_scsi_tmf *tmf;
1990
1991        tmf = kzalloc(sizeof(*tmf), GFP_KERNEL);
1992        if (!tmf)
1993                return -ENOMEM;
1994        INIT_LIST_HEAD(&tmf->queue_entry);
1995        vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work);
1996
1997        mutex_lock(&vhost_scsi_mutex);
1998
1999        mutex_lock(&tpg->tv_tpg_mutex);
2000        tpg->tv_tpg_port_count++;
2001        list_add_tail(&tmf->queue_entry, &tpg->tmf_queue);
2002        mutex_unlock(&tpg->tv_tpg_mutex);
2003
2004        vhost_scsi_hotplug(tpg, lun);
2005
2006        mutex_unlock(&vhost_scsi_mutex);
2007
2008        return 0;
2009}
2010
2011static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
2012                                  struct se_lun *lun)
2013{
2014        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2015                                struct vhost_scsi_tpg, se_tpg);
2016        struct vhost_scsi_tmf *tmf;
2017
2018        mutex_lock(&vhost_scsi_mutex);
2019
2020        mutex_lock(&tpg->tv_tpg_mutex);
2021        tpg->tv_tpg_port_count--;
2022        tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf,
2023                               queue_entry);
2024        list_del(&tmf->queue_entry);
2025        kfree(tmf);
2026        mutex_unlock(&tpg->tv_tpg_mutex);
2027
2028        vhost_scsi_hotunplug(tpg, lun);
2029
2030        mutex_unlock(&vhost_scsi_mutex);
2031}
2032
2033static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
2034                struct config_item *item, const char *page, size_t count)
2035{
2036        struct se_portal_group *se_tpg = attrib_to_tpg(item);
2037        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2038                                struct vhost_scsi_tpg, se_tpg);
2039        unsigned long val;
2040        int ret = kstrtoul(page, 0, &val);
2041
2042        if (ret) {
2043                pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
2044                return ret;
2045        }
2046        if (val != 0 && val != 1 && val != 3) {
2047                pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
2048                return -EINVAL;
2049        }
2050        tpg->tv_fabric_prot_type = val;
2051
2052        return count;
2053}
2054
2055static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show(
2056                struct config_item *item, char *page)
2057{
2058        struct se_portal_group *se_tpg = attrib_to_tpg(item);
2059        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2060                                struct vhost_scsi_tpg, se_tpg);
2061
2062        return sprintf(page, "%d\n", tpg->tv_fabric_prot_type);
2063}
2064
2065CONFIGFS_ATTR(vhost_scsi_tpg_attrib_, fabric_prot_type);
2066
2067static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
2068        &vhost_scsi_tpg_attrib_attr_fabric_prot_type,
2069        NULL,
2070};
2071
2072static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
2073                                const char *name)
2074{
2075        struct vhost_scsi_nexus *tv_nexus;
2076
2077        mutex_lock(&tpg->tv_tpg_mutex);
2078        if (tpg->tpg_nexus) {
2079                mutex_unlock(&tpg->tv_tpg_mutex);
2080                pr_debug("tpg->tpg_nexus already exists\n");
2081                return -EEXIST;
2082        }
2083
2084        tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
2085        if (!tv_nexus) {
2086                mutex_unlock(&tpg->tv_tpg_mutex);
2087                pr_err("Unable to allocate struct vhost_scsi_nexus\n");
2088                return -ENOMEM;
2089        }
2090        /*
2091         * Since we are running in 'demo mode' this call with generate a
2092         * struct se_node_acl for the vhost_scsi struct se_portal_group with
2093         * the SCSI Initiator port name of the passed configfs group 'name'.
2094         */
2095        tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg, 0, 0,
2096                                        TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
2097                                        (unsigned char *)name, tv_nexus, NULL);
2098        if (IS_ERR(tv_nexus->tvn_se_sess)) {
2099                mutex_unlock(&tpg->tv_tpg_mutex);
2100                kfree(tv_nexus);
2101                return -ENOMEM;
2102        }
2103        tpg->tpg_nexus = tv_nexus;
2104
2105        mutex_unlock(&tpg->tv_tpg_mutex);
2106        return 0;
2107}
2108
2109static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
2110{
2111        struct se_session *se_sess;
2112        struct vhost_scsi_nexus *tv_nexus;
2113
2114        mutex_lock(&tpg->tv_tpg_mutex);
2115        tv_nexus = tpg->tpg_nexus;
2116        if (!tv_nexus) {
2117                mutex_unlock(&tpg->tv_tpg_mutex);
2118                return -ENODEV;
2119        }
2120
2121        se_sess = tv_nexus->tvn_se_sess;
2122        if (!se_sess) {
2123                mutex_unlock(&tpg->tv_tpg_mutex);
2124                return -ENODEV;
2125        }
2126
2127        if (tpg->tv_tpg_port_count != 0) {
2128                mutex_unlock(&tpg->tv_tpg_mutex);
2129                pr_err("Unable to remove TCM_vhost I_T Nexus with"
2130                        " active TPG port count: %d\n",
2131                        tpg->tv_tpg_port_count);
2132                return -EBUSY;
2133        }
2134
2135        if (tpg->tv_tpg_vhost_count != 0) {
2136                mutex_unlock(&tpg->tv_tpg_mutex);
2137                pr_err("Unable to remove TCM_vhost I_T Nexus with"
2138                        " active TPG vhost count: %d\n",
2139                        tpg->tv_tpg_vhost_count);
2140                return -EBUSY;
2141        }
2142
2143        pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
2144                " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
2145                tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2146
2147        /*
2148         * Release the SCSI I_T Nexus to the emulated vhost Target Port
2149         */
2150        target_remove_session(se_sess);
2151        tpg->tpg_nexus = NULL;
2152        mutex_unlock(&tpg->tv_tpg_mutex);
2153
2154        kfree(tv_nexus);
2155        return 0;
2156}
2157
2158static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page)
2159{
2160        struct se_portal_group *se_tpg = to_tpg(item);
2161        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2162                                struct vhost_scsi_tpg, se_tpg);
2163        struct vhost_scsi_nexus *tv_nexus;
2164        ssize_t ret;
2165
2166        mutex_lock(&tpg->tv_tpg_mutex);
2167        tv_nexus = tpg->tpg_nexus;
2168        if (!tv_nexus) {
2169                mutex_unlock(&tpg->tv_tpg_mutex);
2170                return -ENODEV;
2171        }
2172        ret = snprintf(page, PAGE_SIZE, "%s\n",
2173                        tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2174        mutex_unlock(&tpg->tv_tpg_mutex);
2175
2176        return ret;
2177}
2178
2179static ssize_t vhost_scsi_tpg_nexus_store(struct config_item *item,
2180                const char *page, size_t count)
2181{
2182        struct se_portal_group *se_tpg = to_tpg(item);
2183        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2184                                struct vhost_scsi_tpg, se_tpg);
2185        struct vhost_scsi_tport *tport_wwn = tpg->tport;
2186        unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
2187        int ret;
2188        /*
2189         * Shutdown the active I_T nexus if 'NULL' is passed..
2190         */
2191        if (!strncmp(page, "NULL", 4)) {
2192                ret = vhost_scsi_drop_nexus(tpg);
2193                return (!ret) ? count : ret;
2194        }
2195        /*
2196         * Otherwise make sure the passed virtual Initiator port WWN matches
2197         * the fabric protocol_id set in vhost_scsi_make_tport(), and call
2198         * vhost_scsi_make_nexus().
2199         */
2200        if (strlen(page) >= VHOST_SCSI_NAMELEN) {
2201                pr_err("Emulated NAA Sas Address: %s, exceeds"
2202                                " max: %d\n", page, VHOST_SCSI_NAMELEN);
2203                return -EINVAL;
2204        }
2205        snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
2206
2207        ptr = strstr(i_port, "naa.");
2208        if (ptr) {
2209                if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
2210                        pr_err("Passed SAS Initiator Port %s does not"
2211                                " match target port protoid: %s\n", i_port,
2212                                vhost_scsi_dump_proto_id(tport_wwn));
2213                        return -EINVAL;
2214                }
2215                port_ptr = &i_port[0];
2216                goto check_newline;
2217        }
2218        ptr = strstr(i_port, "fc.");
2219        if (ptr) {
2220                if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
2221                        pr_err("Passed FCP Initiator Port %s does not"
2222                                " match target port protoid: %s\n", i_port,
2223                                vhost_scsi_dump_proto_id(tport_wwn));
2224                        return -EINVAL;
2225                }
2226                port_ptr = &i_port[3]; /* Skip over "fc." */
2227                goto check_newline;
2228        }
2229        ptr = strstr(i_port, "iqn.");
2230        if (ptr) {
2231                if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
2232                        pr_err("Passed iSCSI Initiator Port %s does not"
2233                                " match target port protoid: %s\n", i_port,
2234                                vhost_scsi_dump_proto_id(tport_wwn));
2235                        return -EINVAL;
2236                }
2237                port_ptr = &i_port[0];
2238                goto check_newline;
2239        }
2240        pr_err("Unable to locate prefix for emulated Initiator Port:"
2241                        " %s\n", i_port);
2242        return -EINVAL;
2243        /*
2244         * Clear any trailing newline for the NAA WWN
2245         */
2246check_newline:
2247        if (i_port[strlen(i_port)-1] == '\n')
2248                i_port[strlen(i_port)-1] = '\0';
2249
2250        ret = vhost_scsi_make_nexus(tpg, port_ptr);
2251        if (ret < 0)
2252                return ret;
2253
2254        return count;
2255}
2256
2257CONFIGFS_ATTR(vhost_scsi_tpg_, nexus);
2258
2259static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
2260        &vhost_scsi_tpg_attr_nexus,
2261        NULL,
2262};
2263
2264static struct se_portal_group *
2265vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)
2266{
2267        struct vhost_scsi_tport *tport = container_of(wwn,
2268                        struct vhost_scsi_tport, tport_wwn);
2269
2270        struct vhost_scsi_tpg *tpg;
2271        u16 tpgt;
2272        int ret;
2273
2274        if (strstr(name, "tpgt_") != name)
2275                return ERR_PTR(-EINVAL);
2276        if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
2277                return ERR_PTR(-EINVAL);
2278
2279        tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
2280        if (!tpg) {
2281                pr_err("Unable to allocate struct vhost_scsi_tpg");
2282                return ERR_PTR(-ENOMEM);
2283        }
2284        mutex_init(&tpg->tv_tpg_mutex);
2285        INIT_LIST_HEAD(&tpg->tv_tpg_list);
2286        INIT_LIST_HEAD(&tpg->tmf_queue);
2287        tpg->tport = tport;
2288        tpg->tport_tpgt = tpgt;
2289
2290        ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
2291        if (ret < 0) {
2292                kfree(tpg);
2293                return NULL;
2294        }
2295        mutex_lock(&vhost_scsi_mutex);
2296        list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
2297        mutex_unlock(&vhost_scsi_mutex);
2298
2299        return &tpg->se_tpg;
2300}
2301
2302static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
2303{
2304        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2305                                struct vhost_scsi_tpg, se_tpg);
2306
2307        mutex_lock(&vhost_scsi_mutex);
2308        list_del(&tpg->tv_tpg_list);
2309        mutex_unlock(&vhost_scsi_mutex);
2310        /*
2311         * Release the virtual I_T Nexus for this vhost TPG
2312         */
2313        vhost_scsi_drop_nexus(tpg);
2314        /*
2315         * Deregister the se_tpg from TCM..
2316         */
2317        core_tpg_deregister(se_tpg);
2318        kfree(tpg);
2319}
2320
2321static struct se_wwn *
2322vhost_scsi_make_tport(struct target_fabric_configfs *tf,
2323                     struct config_group *group,
2324                     const char *name)
2325{
2326        struct vhost_scsi_tport *tport;
2327        char *ptr;
2328        u64 wwpn = 0;
2329        int off = 0;
2330
2331        /* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
2332                return ERR_PTR(-EINVAL); */
2333
2334        tport = kzalloc(sizeof(*tport), GFP_KERNEL);
2335        if (!tport) {
2336                pr_err("Unable to allocate struct vhost_scsi_tport");
2337                return ERR_PTR(-ENOMEM);
2338        }
2339        tport->tport_wwpn = wwpn;
2340        /*
2341         * Determine the emulated Protocol Identifier and Target Port Name
2342         * based on the incoming configfs directory name.
2343         */
2344        ptr = strstr(name, "naa.");
2345        if (ptr) {
2346                tport->tport_proto_id = SCSI_PROTOCOL_SAS;
2347                goto check_len;
2348        }
2349        ptr = strstr(name, "fc.");
2350        if (ptr) {
2351                tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2352                off = 3; /* Skip over "fc." */
2353                goto check_len;
2354        }
2355        ptr = strstr(name, "iqn.");
2356        if (ptr) {
2357                tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2358                goto check_len;
2359        }
2360
2361        pr_err("Unable to locate prefix for emulated Target Port:"
2362                        " %s\n", name);
2363        kfree(tport);
2364        return ERR_PTR(-EINVAL);
2365
2366check_len:
2367        if (strlen(name) >= VHOST_SCSI_NAMELEN) {
2368                pr_err("Emulated %s Address: %s, exceeds"
2369                        " max: %d\n", name, vhost_scsi_dump_proto_id(tport),
2370                        VHOST_SCSI_NAMELEN);
2371                kfree(tport);
2372                return ERR_PTR(-EINVAL);
2373        }
2374        snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
2375
2376        pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2377                " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
2378
2379        return &tport->tport_wwn;
2380}
2381
2382static void vhost_scsi_drop_tport(struct se_wwn *wwn)
2383{
2384        struct vhost_scsi_tport *tport = container_of(wwn,
2385                                struct vhost_scsi_tport, tport_wwn);
2386
2387        pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2388                " %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
2389                tport->tport_name);
2390
2391        kfree(tport);
2392}
2393
2394static ssize_t
2395vhost_scsi_wwn_version_show(struct config_item *item, char *page)
2396{
2397        return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
2398                "on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2399                utsname()->machine);
2400}
2401
2402CONFIGFS_ATTR_RO(vhost_scsi_wwn_, version);
2403
2404static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2405        &vhost_scsi_wwn_attr_version,
2406        NULL,
2407};
2408
2409static const struct target_core_fabric_ops vhost_scsi_ops = {
2410        .module                         = THIS_MODULE,
2411        .fabric_name                    = "vhost",
2412        .max_data_sg_nents              = VHOST_SCSI_PREALLOC_SGLS,
2413        .tpg_get_wwn                    = vhost_scsi_get_fabric_wwn,
2414        .tpg_get_tag                    = vhost_scsi_get_tpgt,
2415        .tpg_check_demo_mode            = vhost_scsi_check_true,
2416        .tpg_check_demo_mode_cache      = vhost_scsi_check_true,
2417        .tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
2418        .tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
2419        .tpg_check_prot_fabric_only     = vhost_scsi_check_prot_fabric_only,
2420        .tpg_get_inst_index             = vhost_scsi_tpg_get_inst_index,
2421        .release_cmd                    = vhost_scsi_release_cmd,
2422        .check_stop_free                = vhost_scsi_check_stop_free,
2423        .sess_get_index                 = vhost_scsi_sess_get_index,
2424        .sess_get_initiator_sid         = NULL,
2425        .write_pending                  = vhost_scsi_write_pending,
2426        .set_default_node_attributes    = vhost_scsi_set_default_node_attrs,
2427        .get_cmd_state                  = vhost_scsi_get_cmd_state,
2428        .queue_data_in                  = vhost_scsi_queue_data_in,
2429        .queue_status                   = vhost_scsi_queue_status,
2430        .queue_tm_rsp                   = vhost_scsi_queue_tm_rsp,
2431        .aborted_task                   = vhost_scsi_aborted_task,
2432        /*
2433         * Setup callers for generic logic in target_core_fabric_configfs.c
2434         */
2435        .fabric_make_wwn                = vhost_scsi_make_tport,
2436        .fabric_drop_wwn                = vhost_scsi_drop_tport,
2437        .fabric_make_tpg                = vhost_scsi_make_tpg,
2438        .fabric_drop_tpg                = vhost_scsi_drop_tpg,
2439        .fabric_post_link               = vhost_scsi_port_link,
2440        .fabric_pre_unlink              = vhost_scsi_port_unlink,
2441
2442        .tfc_wwn_attrs                  = vhost_scsi_wwn_attrs,
2443        .tfc_tpg_base_attrs             = vhost_scsi_tpg_attrs,
2444        .tfc_tpg_attrib_attrs           = vhost_scsi_tpg_attrib_attrs,
2445};
2446
2447static int __init vhost_scsi_init(void)
2448{
2449        int ret = -ENOMEM;
2450
2451        pr_debug("TCM_VHOST fabric module %s on %s/%s"
2452                " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2453                utsname()->machine);
2454
2455        ret = vhost_scsi_register();
2456        if (ret < 0)
2457                goto out;
2458
2459        ret = target_register_template(&vhost_scsi_ops);
2460        if (ret < 0)
2461                goto out_vhost_scsi_deregister;
2462
2463        return 0;
2464
2465out_vhost_scsi_deregister:
2466        vhost_scsi_deregister();
2467out:
2468        return ret;
2469};
2470
2471static void vhost_scsi_exit(void)
2472{
2473        target_unregister_template(&vhost_scsi_ops);
2474        vhost_scsi_deregister();
2475};
2476
2477MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2478MODULE_ALIAS("tcm_vhost");
2479MODULE_LICENSE("GPL");
2480module_init(vhost_scsi_init);
2481module_exit(vhost_scsi_exit);
2482