linux/drivers/scsi/scsi_tgt_if.c
<<
>>
Prefs
   1/*
   2 * SCSI target kernel/user interface functions
   3 *
   4 * Copyright (C) 2005 FUJITA Tomonori <tomof@acm.org>
   5 * Copyright (C) 2005 Mike Christie <michaelc@cs.wisc.edu>
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License as
   9 * published by the Free Software Foundation; either version 2 of the
  10 * License, or (at your option) any later version.
  11 *
  12 * This program is distributed in the hope that it will be useful, but
  13 * WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program; if not, write to the Free Software
  19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  20 * 02110-1301 USA
  21 */
  22#include <linux/miscdevice.h>
  23#include <linux/gfp.h>
  24#include <linux/file.h>
  25#include <linux/export.h>
  26#include <net/tcp.h>
  27#include <scsi/scsi.h>
  28#include <scsi/scsi_cmnd.h>
  29#include <scsi/scsi_device.h>
  30#include <scsi/scsi_host.h>
  31#include <scsi/scsi_tgt.h>
  32#include <scsi/scsi_tgt_if.h>
  33
  34#include <asm/cacheflush.h>
  35
  36#include "scsi_tgt_priv.h"
  37
  38#if TGT_RING_SIZE < PAGE_SIZE
  39#  define TGT_RING_SIZE PAGE_SIZE
  40#endif
  41
  42#define TGT_RING_PAGES (TGT_RING_SIZE >> PAGE_SHIFT)
  43#define TGT_EVENT_PER_PAGE (PAGE_SIZE / sizeof(struct tgt_event))
  44#define TGT_MAX_EVENTS (TGT_EVENT_PER_PAGE * TGT_RING_PAGES)
  45
  46struct tgt_ring {
  47        u32 tr_idx;
  48        unsigned long tr_pages[TGT_RING_PAGES];
  49        spinlock_t tr_lock;
  50};
  51
  52/* tx_ring : kernel->user, rx_ring : user->kernel */
  53static struct tgt_ring tx_ring, rx_ring;
  54static DECLARE_WAIT_QUEUE_HEAD(tgt_poll_wait);
  55
  56static inline void tgt_ring_idx_inc(struct tgt_ring *ring)
  57{
  58        if (ring->tr_idx == TGT_MAX_EVENTS - 1)
  59                ring->tr_idx = 0;
  60        else
  61                ring->tr_idx++;
  62}
  63
  64static struct tgt_event *tgt_head_event(struct tgt_ring *ring, u32 idx)
  65{
  66        u32 pidx, off;
  67
  68        pidx = idx / TGT_EVENT_PER_PAGE;
  69        off = idx % TGT_EVENT_PER_PAGE;
  70
  71        return (struct tgt_event *)
  72                (ring->tr_pages[pidx] + sizeof(struct tgt_event) * off);
  73}
  74
  75static int tgt_uspace_send_event(u32 type, struct tgt_event *p)
  76{
  77        struct tgt_event *ev;
  78        struct tgt_ring *ring = &tx_ring;
  79        unsigned long flags;
  80        int err = 0;
  81
  82        spin_lock_irqsave(&ring->tr_lock, flags);
  83
  84        ev = tgt_head_event(ring, ring->tr_idx);
  85        if (!ev->hdr.status)
  86                tgt_ring_idx_inc(ring);
  87        else
  88                err = -BUSY;
  89
  90        spin_unlock_irqrestore(&ring->tr_lock, flags);
  91
  92        if (err)
  93                return err;
  94
  95        memcpy(ev, p, sizeof(*ev));
  96        ev->hdr.type = type;
  97        mb();
  98        ev->hdr.status = 1;
  99
 100        flush_dcache_page(virt_to_page(ev));
 101
 102        wake_up_interruptible(&tgt_poll_wait);
 103
 104        return 0;
 105}
 106
 107int scsi_tgt_uspace_send_cmd(struct scsi_cmnd *cmd, u64 itn_id,
 108                             struct scsi_lun *lun, u64 tag)
 109{
 110        struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
 111        struct tgt_event ev;
 112        int err;
 113
 114        memset(&ev, 0, sizeof(ev));
 115        ev.p.cmd_req.host_no = shost->host_no;
 116        ev.p.cmd_req.itn_id = itn_id;
 117        ev.p.cmd_req.data_len = scsi_bufflen(cmd);
 118        memcpy(ev.p.cmd_req.scb, cmd->cmnd, sizeof(ev.p.cmd_req.scb));
 119        memcpy(ev.p.cmd_req.lun, lun, sizeof(ev.p.cmd_req.lun));
 120        ev.p.cmd_req.attribute = cmd->tag;
 121        ev.p.cmd_req.tag = tag;
 122
 123        dprintk("%p %d %u %x %llx\n", cmd, shost->host_no,
 124                ev.p.cmd_req.data_len, cmd->tag,
 125                (unsigned long long) ev.p.cmd_req.tag);
 126
 127        err = tgt_uspace_send_event(TGT_KEVENT_CMD_REQ, &ev);
 128        if (err)
 129                eprintk("tx buf is full, could not send\n");
 130
 131        return err;
 132}
 133
 134int scsi_tgt_uspace_send_status(struct scsi_cmnd *cmd, u64 itn_id, u64 tag)
 135{
 136        struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
 137        struct tgt_event ev;
 138        int err;
 139
 140        memset(&ev, 0, sizeof(ev));
 141        ev.p.cmd_done.host_no = shost->host_no;
 142        ev.p.cmd_done.itn_id = itn_id;
 143        ev.p.cmd_done.tag = tag;
 144        ev.p.cmd_done.result = cmd->result;
 145
 146        dprintk("%p %d %llu %u %x\n", cmd, shost->host_no,
 147                (unsigned long long) ev.p.cmd_req.tag,
 148                ev.p.cmd_req.data_len, cmd->tag);
 149
 150        err = tgt_uspace_send_event(TGT_KEVENT_CMD_DONE, &ev);
 151        if (err)
 152                eprintk("tx buf is full, could not send\n");
 153
 154        return err;
 155}
 156
 157int scsi_tgt_uspace_send_tsk_mgmt(int host_no, u64 itn_id, int function,
 158                                  u64 tag, struct scsi_lun *scsilun, void *data)
 159{
 160        struct tgt_event ev;
 161        int err;
 162
 163        memset(&ev, 0, sizeof(ev));
 164        ev.p.tsk_mgmt_req.host_no = host_no;
 165        ev.p.tsk_mgmt_req.itn_id = itn_id;
 166        ev.p.tsk_mgmt_req.function = function;
 167        ev.p.tsk_mgmt_req.tag = tag;
 168        memcpy(ev.p.tsk_mgmt_req.lun, scsilun, sizeof(ev.p.tsk_mgmt_req.lun));
 169        ev.p.tsk_mgmt_req.mid = (u64) (unsigned long) data;
 170
 171        dprintk("%d %x %llx %llx\n", host_no, function, (unsigned long long) tag,
 172                (unsigned long long) ev.p.tsk_mgmt_req.mid);
 173
 174        err = tgt_uspace_send_event(TGT_KEVENT_TSK_MGMT_REQ, &ev);
 175        if (err)
 176                eprintk("tx buf is full, could not send\n");
 177
 178        return err;
 179}
 180
 181int scsi_tgt_uspace_send_it_nexus_request(int host_no, u64 itn_id,
 182                                          int function, char *initiator_id)
 183{
 184        struct tgt_event ev;
 185        int err;
 186
 187        memset(&ev, 0, sizeof(ev));
 188        ev.p.it_nexus_req.host_no = host_no;
 189        ev.p.it_nexus_req.function = function;
 190        ev.p.it_nexus_req.itn_id = itn_id;
 191        if (initiator_id)
 192                strncpy(ev.p.it_nexus_req.initiator_id, initiator_id,
 193                        sizeof(ev.p.it_nexus_req.initiator_id));
 194
 195        dprintk("%d %x %llx\n", host_no, function, (unsigned long long)itn_id);
 196
 197        err = tgt_uspace_send_event(TGT_KEVENT_IT_NEXUS_REQ, &ev);
 198        if (err)
 199                eprintk("tx buf is full, could not send\n");
 200
 201        return err;
 202}
 203
 204static int event_recv_msg(struct tgt_event *ev)
 205{
 206        int err = 0;
 207
 208        switch (ev->hdr.type) {
 209        case TGT_UEVENT_CMD_RSP:
 210                err = scsi_tgt_kspace_exec(ev->p.cmd_rsp.host_no,
 211                                           ev->p.cmd_rsp.itn_id,
 212                                           ev->p.cmd_rsp.result,
 213                                           ev->p.cmd_rsp.tag,
 214                                           ev->p.cmd_rsp.uaddr,
 215                                           ev->p.cmd_rsp.len,
 216                                           ev->p.cmd_rsp.sense_uaddr,
 217                                           ev->p.cmd_rsp.sense_len,
 218                                           ev->p.cmd_rsp.rw);
 219                break;
 220        case TGT_UEVENT_TSK_MGMT_RSP:
 221                err = scsi_tgt_kspace_tsk_mgmt(ev->p.tsk_mgmt_rsp.host_no,
 222                                               ev->p.tsk_mgmt_rsp.itn_id,
 223                                               ev->p.tsk_mgmt_rsp.mid,
 224                                               ev->p.tsk_mgmt_rsp.result);
 225                break;
 226        case TGT_UEVENT_IT_NEXUS_RSP:
 227                err = scsi_tgt_kspace_it_nexus_rsp(ev->p.it_nexus_rsp.host_no,
 228                                                   ev->p.it_nexus_rsp.itn_id,
 229                                                   ev->p.it_nexus_rsp.result);
 230                break;
 231        default:
 232                eprintk("unknown type %d\n", ev->hdr.type);
 233                err = -EINVAL;
 234        }
 235
 236        return err;
 237}
 238
 239static ssize_t tgt_write(struct file *file, const char __user * buffer,
 240                         size_t count, loff_t * ppos)
 241{
 242        struct tgt_event *ev;
 243        struct tgt_ring *ring = &rx_ring;
 244
 245        while (1) {
 246                ev = tgt_head_event(ring, ring->tr_idx);
 247                /* do we need this? */
 248                flush_dcache_page(virt_to_page(ev));
 249
 250                if (!ev->hdr.status)
 251                        break;
 252
 253                tgt_ring_idx_inc(ring);
 254                event_recv_msg(ev);
 255                ev->hdr.status = 0;
 256        };
 257
 258        return count;
 259}
 260
 261static unsigned int tgt_poll(struct file * file, struct poll_table_struct *wait)
 262{
 263        struct tgt_event *ev;
 264        struct tgt_ring *ring = &tx_ring;
 265        unsigned long flags;
 266        unsigned int mask = 0;
 267        u32 idx;
 268
 269        poll_wait(file, &tgt_poll_wait, wait);
 270
 271        spin_lock_irqsave(&ring->tr_lock, flags);
 272
 273        idx = ring->tr_idx ? ring->tr_idx - 1 : TGT_MAX_EVENTS - 1;
 274        ev = tgt_head_event(ring, idx);
 275        if (ev->hdr.status)
 276                mask |= POLLIN | POLLRDNORM;
 277
 278        spin_unlock_irqrestore(&ring->tr_lock, flags);
 279
 280        return mask;
 281}
 282
 283static int uspace_ring_map(struct vm_area_struct *vma, unsigned long addr,
 284                           struct tgt_ring *ring)
 285{
 286        int i, err;
 287
 288        for (i = 0; i < TGT_RING_PAGES; i++) {
 289                struct page *page = virt_to_page(ring->tr_pages[i]);
 290                err = vm_insert_page(vma, addr, page);
 291                if (err)
 292                        return err;
 293                addr += PAGE_SIZE;
 294        }
 295
 296        return 0;
 297}
 298
 299static int tgt_mmap(struct file *filp, struct vm_area_struct *vma)
 300{
 301        unsigned long addr;
 302        int err;
 303
 304        if (vma->vm_pgoff)
 305                return -EINVAL;
 306
 307        if (vma->vm_end - vma->vm_start != TGT_RING_SIZE * 2) {
 308                eprintk("mmap size must be %lu, not %lu \n",
 309                        TGT_RING_SIZE * 2, vma->vm_end - vma->vm_start);
 310                return -EINVAL;
 311        }
 312
 313        addr = vma->vm_start;
 314        err = uspace_ring_map(vma, addr, &tx_ring);
 315        if (err)
 316                return err;
 317        err = uspace_ring_map(vma, addr + TGT_RING_SIZE, &rx_ring);
 318
 319        return err;
 320}
 321
 322static int tgt_open(struct inode *inode, struct file *file)
 323{
 324        tx_ring.tr_idx = rx_ring.tr_idx = 0;
 325
 326        return 0;
 327}
 328
 329static const struct file_operations tgt_fops = {
 330        .owner          = THIS_MODULE,
 331        .open           = tgt_open,
 332        .poll           = tgt_poll,
 333        .write          = tgt_write,
 334        .mmap           = tgt_mmap,
 335        .llseek         = noop_llseek,
 336};
 337
 338static struct miscdevice tgt_miscdev = {
 339        .minor = MISC_DYNAMIC_MINOR,
 340        .name = "tgt",
 341        .fops = &tgt_fops,
 342};
 343
 344static void tgt_ring_exit(struct tgt_ring *ring)
 345{
 346        int i;
 347
 348        for (i = 0; i < TGT_RING_PAGES; i++)
 349                free_page(ring->tr_pages[i]);
 350}
 351
 352static int tgt_ring_init(struct tgt_ring *ring)
 353{
 354        int i;
 355
 356        spin_lock_init(&ring->tr_lock);
 357
 358        for (i = 0; i < TGT_RING_PAGES; i++) {
 359                ring->tr_pages[i] = get_zeroed_page(GFP_KERNEL);
 360                if (!ring->tr_pages[i]) {
 361                        eprintk("out of memory\n");
 362                        return -ENOMEM;
 363                }
 364        }
 365
 366        return 0;
 367}
 368
 369void scsi_tgt_if_exit(void)
 370{
 371        tgt_ring_exit(&tx_ring);
 372        tgt_ring_exit(&rx_ring);
 373        misc_deregister(&tgt_miscdev);
 374}
 375
 376int scsi_tgt_if_init(void)
 377{
 378        int err;
 379
 380        err = tgt_ring_init(&tx_ring);
 381        if (err)
 382                return err;
 383
 384        err = tgt_ring_init(&rx_ring);
 385        if (err)
 386                goto free_tx_ring;
 387
 388        err = misc_register(&tgt_miscdev);
 389        if (err)
 390                goto free_rx_ring;
 391
 392        return 0;
 393free_rx_ring:
 394        tgt_ring_exit(&rx_ring);
 395free_tx_ring:
 396        tgt_ring_exit(&tx_ring);
 397
 398        return err;
 399}
 400