linux/drivers/block/nbd.c
<<
>>
Prefs
   1/*
   2 * Network block device - make block devices work over TCP
   3 *
   4 * Note that you can not swap over this thing, yet. Seems to work but
   5 * deadlocks sometimes - you can not swap over TCP in general.
   6 * 
   7 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
   8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
   9 *
  10 * This file is released under GPLv2 or later.
  11 *
  12 * (part of code stolen from loop.c)
  13 */
  14
  15#include <linux/major.h>
  16
  17#include <linux/blkdev.h>
  18#include <linux/module.h>
  19#include <linux/init.h>
  20#include <linux/sched.h>
  21#include <linux/fs.h>
  22#include <linux/bio.h>
  23#include <linux/stat.h>
  24#include <linux/errno.h>
  25#include <linux/file.h>
  26#include <linux/ioctl.h>
  27#include <linux/mutex.h>
  28#include <linux/compiler.h>
  29#include <linux/err.h>
  30#include <linux/kernel.h>
  31#include <linux/slab.h>
  32#include <net/sock.h>
  33#include <linux/net.h>
  34#include <linux/kthread.h>
  35
  36#include <asm/uaccess.h>
  37#include <asm/types.h>
  38
  39#include <linux/nbd.h>
  40
  41#define NBD_MAGIC 0x68797548
  42
  43#ifdef NDEBUG
  44#define dprintk(flags, fmt...)
  45#else /* NDEBUG */
  46#define dprintk(flags, fmt...) do { \
  47        if (debugflags & (flags)) printk(KERN_DEBUG fmt); \
  48} while (0)
  49#define DBG_IOCTL       0x0004
  50#define DBG_INIT        0x0010
  51#define DBG_EXIT        0x0020
  52#define DBG_BLKDEV      0x0100
  53#define DBG_RX          0x0200
  54#define DBG_TX          0x0400
  55static unsigned int debugflags;
  56#endif /* NDEBUG */
  57
  58static unsigned int nbds_max = 16;
  59static struct nbd_device *nbd_dev;
  60static int max_part;
  61
  62/*
  63 * Use just one lock (or at most 1 per NIC). Two arguments for this:
  64 * 1. Each NIC is essentially a synchronization point for all servers
  65 *    accessed through that NIC so there's no need to have more locks
  66 *    than NICs anyway.
  67 * 2. More locks lead to more "Dirty cache line bouncing" which will slow
  68 *    down each lock to the point where they're actually slower than just
  69 *    a single lock.
  70 * Thanks go to Jens Axboe and Al Viro for their LKML emails explaining this!
  71 */
  72static DEFINE_SPINLOCK(nbd_lock);
  73
  74#ifndef NDEBUG
  75static const char *ioctl_cmd_to_ascii(int cmd)
  76{
  77        switch (cmd) {
  78        case NBD_SET_SOCK: return "set-sock";
  79        case NBD_SET_BLKSIZE: return "set-blksize";
  80        case NBD_SET_SIZE: return "set-size";
  81        case NBD_SET_TIMEOUT: return "set-timeout";
  82        case NBD_SET_FLAGS: return "set-flags";
  83        case NBD_DO_IT: return "do-it";
  84        case NBD_CLEAR_SOCK: return "clear-sock";
  85        case NBD_CLEAR_QUE: return "clear-que";
  86        case NBD_PRINT_DEBUG: return "print-debug";
  87        case NBD_SET_SIZE_BLOCKS: return "set-size-blocks";
  88        case NBD_DISCONNECT: return "disconnect";
  89        case BLKROSET: return "set-read-only";
  90        case BLKFLSBUF: return "flush-buffer-cache";
  91        }
  92        return "unknown";
  93}
  94
  95static const char *nbdcmd_to_ascii(int cmd)
  96{
  97        switch (cmd) {
  98        case  NBD_CMD_READ: return "read";
  99        case NBD_CMD_WRITE: return "write";
 100        case  NBD_CMD_DISC: return "disconnect";
 101        case NBD_CMD_FLUSH: return "flush";
 102        case  NBD_CMD_TRIM: return "trim/discard";
 103        }
 104        return "invalid";
 105}
 106#endif /* NDEBUG */
 107
 108static void nbd_end_request(struct request *req)
 109{
 110        int error = req->errors ? -EIO : 0;
 111        struct request_queue *q = req->q;
 112        unsigned long flags;
 113
 114        dprintk(DBG_BLKDEV, "%s: request %p: %s\n", req->rq_disk->disk_name,
 115                        req, error ? "failed" : "done");
 116
 117        spin_lock_irqsave(q->queue_lock, flags);
 118        __blk_end_request_all(req, error);
 119        spin_unlock_irqrestore(q->queue_lock, flags);
 120}
 121
 122static void sock_shutdown(struct nbd_device *nbd, int lock)
 123{
 124        /* Forcibly shutdown the socket causing all listeners
 125         * to error
 126         *
 127         * FIXME: This code is duplicated from sys_shutdown, but
 128         * there should be a more generic interface rather than
 129         * calling socket ops directly here */
 130        if (lock)
 131                mutex_lock(&nbd->tx_lock);
 132        if (nbd->sock) {
 133                dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n");
 134                kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
 135                nbd->sock = NULL;
 136        }
 137        if (lock)
 138                mutex_unlock(&nbd->tx_lock);
 139}
 140
 141static void nbd_xmit_timeout(unsigned long arg)
 142{
 143        struct task_struct *task = (struct task_struct *)arg;
 144
 145        printk(KERN_WARNING "nbd: killing hung xmit (%s, pid: %d)\n",
 146                task->comm, task->pid);
 147        force_sig(SIGKILL, task);
 148}
 149
 150/*
 151 *  Send or receive packet.
 152 */
 153static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
 154                int msg_flags)
 155{
 156        struct socket *sock = nbd->sock;
 157        int result;
 158        struct msghdr msg;
 159        struct kvec iov;
 160        sigset_t blocked, oldset;
 161        unsigned long pflags = current->flags;
 162
 163        if (unlikely(!sock)) {
 164                dev_err(disk_to_dev(nbd->disk),
 165                        "Attempted %s on closed socket in sock_xmit\n",
 166                        (send ? "send" : "recv"));
 167                return -EINVAL;
 168        }
 169
 170        /* Allow interception of SIGKILL only
 171         * Don't allow other signals to interrupt the transmission */
 172        siginitsetinv(&blocked, sigmask(SIGKILL));
 173        sigprocmask(SIG_SETMASK, &blocked, &oldset);
 174
 175        current->flags |= PF_MEMALLOC;
 176        do {
 177                sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
 178                iov.iov_base = buf;
 179                iov.iov_len = size;
 180                msg.msg_name = NULL;
 181                msg.msg_namelen = 0;
 182                msg.msg_control = NULL;
 183                msg.msg_controllen = 0;
 184                msg.msg_flags = msg_flags | MSG_NOSIGNAL;
 185
 186                if (send) {
 187                        struct timer_list ti;
 188
 189                        if (nbd->xmit_timeout) {
 190                                init_timer(&ti);
 191                                ti.function = nbd_xmit_timeout;
 192                                ti.data = (unsigned long)current;
 193                                ti.expires = jiffies + nbd->xmit_timeout;
 194                                add_timer(&ti);
 195                        }
 196                        result = kernel_sendmsg(sock, &msg, &iov, 1, size);
 197                        if (nbd->xmit_timeout)
 198                                del_timer_sync(&ti);
 199                } else
 200                        result = kernel_recvmsg(sock, &msg, &iov, 1, size,
 201                                                msg.msg_flags);
 202
 203                if (signal_pending(current)) {
 204                        siginfo_t info;
 205                        printk(KERN_WARNING "nbd (pid %d: %s) got signal %d\n",
 206                                task_pid_nr(current), current->comm,
 207                                dequeue_signal_lock(current, &current->blocked, &info));
 208                        result = -EINTR;
 209                        sock_shutdown(nbd, !send);
 210                        break;
 211                }
 212
 213                if (result <= 0) {
 214                        if (result == 0)
 215                                result = -EPIPE; /* short read */
 216                        break;
 217                }
 218                size -= result;
 219                buf += result;
 220        } while (size > 0);
 221
 222        sigprocmask(SIG_SETMASK, &oldset, NULL);
 223        tsk_restore_flags(current, pflags, PF_MEMALLOC);
 224
 225        return result;
 226}
 227
 228static inline int sock_send_bvec(struct nbd_device *nbd, struct bio_vec *bvec,
 229                int flags)
 230{
 231        int result;
 232        void *kaddr = kmap(bvec->bv_page);
 233        result = sock_xmit(nbd, 1, kaddr + bvec->bv_offset,
 234                           bvec->bv_len, flags);
 235        kunmap(bvec->bv_page);
 236        return result;
 237}
 238
 239/* always call with the tx_lock held */
 240static int nbd_send_req(struct nbd_device *nbd, struct request *req)
 241{
 242        int result, flags;
 243        struct nbd_request request;
 244        unsigned long size = blk_rq_bytes(req);
 245
 246        request.magic = htonl(NBD_REQUEST_MAGIC);
 247        request.type = htonl(nbd_cmd(req));
 248
 249        if (nbd_cmd(req) == NBD_CMD_FLUSH) {
 250                /* Other values are reserved for FLUSH requests.  */
 251                request.from = 0;
 252                request.len = 0;
 253        } else {
 254                request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
 255                request.len = htonl(size);
 256        }
 257        memcpy(request.handle, &req, sizeof(req));
 258
 259        dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n",
 260                        nbd->disk->disk_name, req,
 261                        nbdcmd_to_ascii(nbd_cmd(req)),
 262                        (unsigned long long)blk_rq_pos(req) << 9,
 263                        blk_rq_bytes(req));
 264        result = sock_xmit(nbd, 1, &request, sizeof(request),
 265                        (nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0);
 266        if (result <= 0) {
 267                dev_err(disk_to_dev(nbd->disk),
 268                        "Send control failed (result %d)\n", result);
 269                goto error_out;
 270        }
 271
 272        if (nbd_cmd(req) == NBD_CMD_WRITE) {
 273                struct req_iterator iter;
 274                struct bio_vec *bvec;
 275                /*
 276                 * we are really probing at internals to determine
 277                 * whether to set MSG_MORE or not...
 278                 */
 279                rq_for_each_segment(bvec, req, iter) {
 280                        flags = 0;
 281                        if (!rq_iter_last(req, iter))
 282                                flags = MSG_MORE;
 283                        dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
 284                                        nbd->disk->disk_name, req, bvec->bv_len);
 285                        result = sock_send_bvec(nbd, bvec, flags);
 286                        if (result <= 0) {
 287                                dev_err(disk_to_dev(nbd->disk),
 288                                        "Send data failed (result %d)\n",
 289                                        result);
 290                                goto error_out;
 291                        }
 292                }
 293        }
 294        return 0;
 295
 296error_out:
 297        return -EIO;
 298}
 299
 300static struct request *nbd_find_request(struct nbd_device *nbd,
 301                                        struct request *xreq)
 302{
 303        struct request *req, *tmp;
 304        int err;
 305
 306        err = wait_event_interruptible(nbd->active_wq, nbd->active_req != xreq);
 307        if (unlikely(err))
 308                goto out;
 309
 310        spin_lock(&nbd->queue_lock);
 311        list_for_each_entry_safe(req, tmp, &nbd->queue_head, queuelist) {
 312                if (req != xreq)
 313                        continue;
 314                list_del_init(&req->queuelist);
 315                spin_unlock(&nbd->queue_lock);
 316                return req;
 317        }
 318        spin_unlock(&nbd->queue_lock);
 319
 320        err = -ENOENT;
 321
 322out:
 323        return ERR_PTR(err);
 324}
 325
 326static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec)
 327{
 328        int result;
 329        void *kaddr = kmap(bvec->bv_page);
 330        result = sock_xmit(nbd, 0, kaddr + bvec->bv_offset, bvec->bv_len,
 331                        MSG_WAITALL);
 332        kunmap(bvec->bv_page);
 333        return result;
 334}
 335
 336/* NULL returned = something went wrong, inform userspace */
 337static struct request *nbd_read_stat(struct nbd_device *nbd)
 338{
 339        int result;
 340        struct nbd_reply reply;
 341        struct request *req;
 342
 343        reply.magic = 0;
 344        result = sock_xmit(nbd, 0, &reply, sizeof(reply), MSG_WAITALL);
 345        if (result <= 0) {
 346                dev_err(disk_to_dev(nbd->disk),
 347                        "Receive control failed (result %d)\n", result);
 348                goto harderror;
 349        }
 350
 351        if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
 352                dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
 353                                (unsigned long)ntohl(reply.magic));
 354                result = -EPROTO;
 355                goto harderror;
 356        }
 357
 358        req = nbd_find_request(nbd, *(struct request **)reply.handle);
 359        if (IS_ERR(req)) {
 360                result = PTR_ERR(req);
 361                if (result != -ENOENT)
 362                        goto harderror;
 363
 364                dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%p)\n",
 365                        reply.handle);
 366                result = -EBADR;
 367                goto harderror;
 368        }
 369
 370        if (ntohl(reply.error)) {
 371                dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
 372                        ntohl(reply.error));
 373                req->errors++;
 374                return req;
 375        }
 376
 377        dprintk(DBG_RX, "%s: request %p: got reply\n",
 378                        nbd->disk->disk_name, req);
 379        if (nbd_cmd(req) == NBD_CMD_READ) {
 380                struct req_iterator iter;
 381                struct bio_vec *bvec;
 382
 383                rq_for_each_segment(bvec, req, iter) {
 384                        result = sock_recv_bvec(nbd, bvec);
 385                        if (result <= 0) {
 386                                dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
 387                                        result);
 388                                req->errors++;
 389                                return req;
 390                        }
 391                        dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
 392                                nbd->disk->disk_name, req, bvec->bv_len);
 393                }
 394        }
 395        return req;
 396harderror:
 397        nbd->harderror = result;
 398        return NULL;
 399}
 400
 401static ssize_t pid_show(struct device *dev,
 402                        struct device_attribute *attr, char *buf)
 403{
 404        struct gendisk *disk = dev_to_disk(dev);
 405
 406        return sprintf(buf, "%ld\n",
 407                (long) ((struct nbd_device *)disk->private_data)->pid);
 408}
 409
 410static struct device_attribute pid_attr = {
 411        .attr = { .name = "pid", .mode = S_IRUGO},
 412        .show = pid_show,
 413};
 414
 415static int nbd_do_it(struct nbd_device *nbd)
 416{
 417        struct request *req;
 418        int ret;
 419
 420        BUG_ON(nbd->magic != NBD_MAGIC);
 421
 422        sk_set_memalloc(nbd->sock->sk);
 423        nbd->pid = task_pid_nr(current);
 424        ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
 425        if (ret) {
 426                dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
 427                nbd->pid = 0;
 428                return ret;
 429        }
 430
 431        while ((req = nbd_read_stat(nbd)) != NULL)
 432                nbd_end_request(req);
 433
 434        device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
 435        nbd->pid = 0;
 436        return 0;
 437}
 438
 439static void nbd_clear_que(struct nbd_device *nbd)
 440{
 441        struct request *req;
 442
 443        BUG_ON(nbd->magic != NBD_MAGIC);
 444
 445        /*
 446         * Because we have set nbd->sock to NULL under the tx_lock, all
 447         * modifications to the list must have completed by now.  For
 448         * the same reason, the active_req must be NULL.
 449         *
 450         * As a consequence, we don't need to take the spin lock while
 451         * purging the list here.
 452         */
 453        BUG_ON(nbd->sock);
 454        BUG_ON(nbd->active_req);
 455
 456        while (!list_empty(&nbd->queue_head)) {
 457                req = list_entry(nbd->queue_head.next, struct request,
 458                                 queuelist);
 459                list_del_init(&req->queuelist);
 460                req->errors++;
 461                nbd_end_request(req);
 462        }
 463
 464        while (!list_empty(&nbd->waiting_queue)) {
 465                req = list_entry(nbd->waiting_queue.next, struct request,
 466                                 queuelist);
 467                list_del_init(&req->queuelist);
 468                req->errors++;
 469                nbd_end_request(req);
 470        }
 471}
 472
 473
 474static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
 475{
 476        if (req->cmd_type != REQ_TYPE_FS)
 477                goto error_out;
 478
 479        nbd_cmd(req) = NBD_CMD_READ;
 480        if (rq_data_dir(req) == WRITE) {
 481                if ((req->cmd_flags & REQ_DISCARD)) {
 482                        WARN_ON(!(nbd->flags & NBD_FLAG_SEND_TRIM));
 483                        nbd_cmd(req) = NBD_CMD_TRIM;
 484                } else
 485                        nbd_cmd(req) = NBD_CMD_WRITE;
 486                if (nbd->flags & NBD_FLAG_READ_ONLY) {
 487                        dev_err(disk_to_dev(nbd->disk),
 488                                "Write on read-only\n");
 489                        goto error_out;
 490                }
 491        }
 492
 493        if (req->cmd_flags & REQ_FLUSH) {
 494                BUG_ON(unlikely(blk_rq_sectors(req)));
 495                nbd_cmd(req) = NBD_CMD_FLUSH;
 496        }
 497
 498        req->errors = 0;
 499
 500        mutex_lock(&nbd->tx_lock);
 501        if (unlikely(!nbd->sock)) {
 502                mutex_unlock(&nbd->tx_lock);
 503                dev_err(disk_to_dev(nbd->disk),
 504                        "Attempted send on closed socket\n");
 505                goto error_out;
 506        }
 507
 508        nbd->active_req = req;
 509
 510        if (nbd_send_req(nbd, req) != 0) {
 511                dev_err(disk_to_dev(nbd->disk), "Request send failed\n");
 512                req->errors++;
 513                nbd_end_request(req);
 514        } else {
 515                spin_lock(&nbd->queue_lock);
 516                list_add_tail(&req->queuelist, &nbd->queue_head);
 517                spin_unlock(&nbd->queue_lock);
 518        }
 519
 520        nbd->active_req = NULL;
 521        mutex_unlock(&nbd->tx_lock);
 522        wake_up_all(&nbd->active_wq);
 523
 524        return;
 525
 526error_out:
 527        req->errors++;
 528        nbd_end_request(req);
 529}
 530
 531static int nbd_thread(void *data)
 532{
 533        struct nbd_device *nbd = data;
 534        struct request *req;
 535
 536        set_user_nice(current, -20);
 537        while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) {
 538                /* wait for something to do */
 539                wait_event_interruptible(nbd->waiting_wq,
 540                                         kthread_should_stop() ||
 541                                         !list_empty(&nbd->waiting_queue));
 542
 543                /* extract request */
 544                if (list_empty(&nbd->waiting_queue))
 545                        continue;
 546
 547                spin_lock_irq(&nbd->queue_lock);
 548                req = list_entry(nbd->waiting_queue.next, struct request,
 549                                 queuelist);
 550                list_del_init(&req->queuelist);
 551                spin_unlock_irq(&nbd->queue_lock);
 552
 553                /* handle request */
 554                nbd_handle_req(nbd, req);
 555        }
 556        return 0;
 557}
 558
 559/*
 560 * We always wait for result of write, for now. It would be nice to make it optional
 561 * in future
 562 * if ((rq_data_dir(req) == WRITE) && (nbd->flags & NBD_WRITE_NOCHK))
 563 *   { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
 564 */
 565
 566static void do_nbd_request(struct request_queue *q)
 567                __releases(q->queue_lock) __acquires(q->queue_lock)
 568{
 569        struct request *req;
 570        
 571        while ((req = blk_fetch_request(q)) != NULL) {
 572                struct nbd_device *nbd;
 573
 574                spin_unlock_irq(q->queue_lock);
 575
 576                dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n",
 577                                req->rq_disk->disk_name, req, req->cmd_type);
 578
 579                nbd = req->rq_disk->private_data;
 580
 581                BUG_ON(nbd->magic != NBD_MAGIC);
 582
 583                if (unlikely(!nbd->sock)) {
 584                        dev_err(disk_to_dev(nbd->disk),
 585                                "Attempted send on closed socket\n");
 586                        req->errors++;
 587                        nbd_end_request(req);
 588                        spin_lock_irq(q->queue_lock);
 589                        continue;
 590                }
 591
 592                spin_lock_irq(&nbd->queue_lock);
 593                list_add_tail(&req->queuelist, &nbd->waiting_queue);
 594                spin_unlock_irq(&nbd->queue_lock);
 595
 596                wake_up(&nbd->waiting_wq);
 597
 598                spin_lock_irq(q->queue_lock);
 599        }
 600}
 601
 602/* Must be called with tx_lock held */
 603
 604static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
 605                       unsigned int cmd, unsigned long arg)
 606{
 607        switch (cmd) {
 608        case NBD_DISCONNECT: {
 609                struct request sreq;
 610
 611                dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
 612                if (!nbd->sock)
 613                        return -EINVAL;
 614
 615                mutex_unlock(&nbd->tx_lock);
 616                fsync_bdev(bdev);
 617                mutex_lock(&nbd->tx_lock);
 618                blk_rq_init(NULL, &sreq);
 619                sreq.cmd_type = REQ_TYPE_SPECIAL;
 620                nbd_cmd(&sreq) = NBD_CMD_DISC;
 621
 622                /* Check again after getting mutex back.  */
 623                if (!nbd->sock)
 624                        return -EINVAL;
 625
 626                nbd->disconnect = 1;
 627
 628                nbd_send_req(nbd, &sreq);
 629                return 0;
 630        }
 631 
 632        case NBD_CLEAR_SOCK: {
 633                struct file *file;
 634
 635                nbd->sock = NULL;
 636                file = nbd->file;
 637                nbd->file = NULL;
 638                nbd_clear_que(nbd);
 639                BUG_ON(!list_empty(&nbd->queue_head));
 640                BUG_ON(!list_empty(&nbd->waiting_queue));
 641                kill_bdev(bdev);
 642                if (file)
 643                        fput(file);
 644                return 0;
 645        }
 646
 647        case NBD_SET_SOCK: {
 648                struct file *file;
 649                if (nbd->file)
 650                        return -EBUSY;
 651                file = fget(arg);
 652                if (file) {
 653                        struct inode *inode = file_inode(file);
 654                        if (S_ISSOCK(inode->i_mode)) {
 655                                nbd->file = file;
 656                                nbd->sock = SOCKET_I(inode);
 657                                if (max_part > 0)
 658                                        bdev->bd_invalidated = 1;
 659                                nbd->disconnect = 0; /* we're connected now */
 660                                return 0;
 661                        } else {
 662                                fput(file);
 663                        }
 664                }
 665                return -EINVAL;
 666        }
 667
 668        case NBD_SET_BLKSIZE:
 669                nbd->blksize = arg;
 670                nbd->bytesize &= ~(nbd->blksize-1);
 671                bdev->bd_inode->i_size = nbd->bytesize;
 672                set_blocksize(bdev, nbd->blksize);
 673                set_capacity(nbd->disk, nbd->bytesize >> 9);
 674                return 0;
 675
 676        case NBD_SET_SIZE:
 677                nbd->bytesize = arg & ~(nbd->blksize-1);
 678                bdev->bd_inode->i_size = nbd->bytesize;
 679                set_blocksize(bdev, nbd->blksize);
 680                set_capacity(nbd->disk, nbd->bytesize >> 9);
 681                return 0;
 682
 683        case NBD_SET_TIMEOUT:
 684                nbd->xmit_timeout = arg * HZ;
 685                return 0;
 686
 687        case NBD_SET_FLAGS:
 688                nbd->flags = arg;
 689                return 0;
 690
 691        case NBD_SET_SIZE_BLOCKS:
 692                nbd->bytesize = ((u64) arg) * nbd->blksize;
 693                bdev->bd_inode->i_size = nbd->bytesize;
 694                set_blocksize(bdev, nbd->blksize);
 695                set_capacity(nbd->disk, nbd->bytesize >> 9);
 696                return 0;
 697
 698        case NBD_DO_IT: {
 699                struct task_struct *thread;
 700                struct file *file;
 701                int error;
 702
 703                if (nbd->pid)
 704                        return -EBUSY;
 705                if (!nbd->file)
 706                        return -EINVAL;
 707
 708                mutex_unlock(&nbd->tx_lock);
 709
 710                if (nbd->flags & NBD_FLAG_READ_ONLY)
 711                        set_device_ro(bdev, true);
 712                if (nbd->flags & NBD_FLAG_SEND_TRIM)
 713                        queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
 714                                nbd->disk->queue);
 715                if (nbd->flags & NBD_FLAG_SEND_FLUSH)
 716                        blk_queue_flush(nbd->disk->queue, REQ_FLUSH);
 717                else
 718                        blk_queue_flush(nbd->disk->queue, 0);
 719
 720                thread = kthread_create(nbd_thread, nbd, "%s",
 721                                        nbd->disk->disk_name);
 722                if (IS_ERR(thread)) {
 723                        mutex_lock(&nbd->tx_lock);
 724                        return PTR_ERR(thread);
 725                }
 726                wake_up_process(thread);
 727                error = nbd_do_it(nbd);
 728                kthread_stop(thread);
 729
 730                mutex_lock(&nbd->tx_lock);
 731                if (error)
 732                        return error;
 733                sock_shutdown(nbd, 0);
 734                file = nbd->file;
 735                nbd->file = NULL;
 736                nbd_clear_que(nbd);
 737                dev_warn(disk_to_dev(nbd->disk), "queue cleared\n");
 738                kill_bdev(bdev);
 739                queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
 740                set_device_ro(bdev, false);
 741                if (file)
 742                        fput(file);
 743                nbd->flags = 0;
 744                nbd->bytesize = 0;
 745                bdev->bd_inode->i_size = 0;
 746                set_capacity(nbd->disk, 0);
 747                if (max_part > 0)
 748                        ioctl_by_bdev(bdev, BLKRRPART, 0);
 749                if (nbd->disconnect) /* user requested, ignore socket errors */
 750                        return 0;
 751                return nbd->harderror;
 752        }
 753
 754        case NBD_CLEAR_QUE:
 755                /*
 756                 * This is for compatibility only.  The queue is always cleared
 757                 * by NBD_DO_IT or NBD_CLEAR_SOCK.
 758                 */
 759                return 0;
 760
 761        case NBD_PRINT_DEBUG:
 762                dev_info(disk_to_dev(nbd->disk),
 763                        "next = %p, prev = %p, head = %p\n",
 764                        nbd->queue_head.next, nbd->queue_head.prev,
 765                        &nbd->queue_head);
 766                return 0;
 767        }
 768        return -ENOTTY;
 769}
 770
 771static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
 772                     unsigned int cmd, unsigned long arg)
 773{
 774        struct nbd_device *nbd = bdev->bd_disk->private_data;
 775        int error;
 776
 777        if (!capable(CAP_SYS_ADMIN))
 778                return -EPERM;
 779
 780        BUG_ON(nbd->magic != NBD_MAGIC);
 781
 782        /* Anyone capable of this syscall can do *real bad* things */
 783        dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n",
 784                nbd->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg);
 785
 786        mutex_lock(&nbd->tx_lock);
 787        error = __nbd_ioctl(bdev, nbd, cmd, arg);
 788        mutex_unlock(&nbd->tx_lock);
 789
 790        return error;
 791}
 792
 793static const struct block_device_operations nbd_fops =
 794{
 795        .owner =        THIS_MODULE,
 796        .ioctl =        nbd_ioctl,
 797};
 798
 799/*
 800 * And here should be modules and kernel interface 
 801 *  (Just smiley confuses emacs :-)
 802 */
 803
 804static int __init nbd_init(void)
 805{
 806        int err = -ENOMEM;
 807        int i;
 808        int part_shift;
 809
 810        BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
 811
 812        if (max_part < 0) {
 813                printk(KERN_ERR "nbd: max_part must be >= 0\n");
 814                return -EINVAL;
 815        }
 816
 817        nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
 818        if (!nbd_dev)
 819                return -ENOMEM;
 820
 821        part_shift = 0;
 822        if (max_part > 0) {
 823                part_shift = fls(max_part);
 824
 825                /*
 826                 * Adjust max_part according to part_shift as it is exported
 827                 * to user space so that user can know the max number of
 828                 * partition kernel should be able to manage.
 829                 *
 830                 * Note that -1 is required because partition 0 is reserved
 831                 * for the whole disk.
 832                 */
 833                max_part = (1UL << part_shift) - 1;
 834        }
 835
 836        if ((1UL << part_shift) > DISK_MAX_PARTS)
 837                return -EINVAL;
 838
 839        if (nbds_max > 1UL << (MINORBITS - part_shift))
 840                return -EINVAL;
 841
 842        for (i = 0; i < nbds_max; i++) {
 843                struct gendisk *disk = alloc_disk(1 << part_shift);
 844                if (!disk)
 845                        goto out;
 846                nbd_dev[i].disk = disk;
 847                /*
 848                 * The new linux 2.5 block layer implementation requires
 849                 * every gendisk to have its very own request_queue struct.
 850                 * These structs are big so we dynamically allocate them.
 851                 */
 852                disk->queue = blk_init_queue(do_nbd_request, &nbd_lock);
 853                if (!disk->queue) {
 854                        put_disk(disk);
 855                        goto out;
 856                }
 857                /*
 858                 * Tell the block layer that we are not a rotational device
 859                 */
 860                queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
 861                disk->queue->limits.discard_granularity = 512;
 862                disk->queue->limits.max_discard_sectors = UINT_MAX;
 863                disk->queue->limits.discard_zeroes_data = 0;
 864                blk_queue_max_hw_sectors(disk->queue, 65536);
 865                disk->queue->limits.max_sectors = 256;
 866        }
 867
 868        if (register_blkdev(NBD_MAJOR, "nbd")) {
 869                err = -EIO;
 870                goto out;
 871        }
 872
 873        printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR);
 874        dprintk(DBG_INIT, "nbd: debugflags=0x%x\n", debugflags);
 875
 876        for (i = 0; i < nbds_max; i++) {
 877                struct gendisk *disk = nbd_dev[i].disk;
 878                nbd_dev[i].file = NULL;
 879                nbd_dev[i].magic = NBD_MAGIC;
 880                nbd_dev[i].flags = 0;
 881                INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
 882                spin_lock_init(&nbd_dev[i].queue_lock);
 883                INIT_LIST_HEAD(&nbd_dev[i].queue_head);
 884                mutex_init(&nbd_dev[i].tx_lock);
 885                init_waitqueue_head(&nbd_dev[i].active_wq);
 886                init_waitqueue_head(&nbd_dev[i].waiting_wq);
 887                nbd_dev[i].blksize = 1024;
 888                nbd_dev[i].bytesize = 0;
 889                disk->major = NBD_MAJOR;
 890                disk->first_minor = i << part_shift;
 891                disk->fops = &nbd_fops;
 892                disk->private_data = &nbd_dev[i];
 893                sprintf(disk->disk_name, "nbd%d", i);
 894                set_capacity(disk, 0);
 895                add_disk(disk);
 896        }
 897
 898        return 0;
 899out:
 900        while (i--) {
 901                blk_cleanup_queue(nbd_dev[i].disk->queue);
 902                put_disk(nbd_dev[i].disk);
 903        }
 904        kfree(nbd_dev);
 905        return err;
 906}
 907
 908static void __exit nbd_cleanup(void)
 909{
 910        int i;
 911        for (i = 0; i < nbds_max; i++) {
 912                struct gendisk *disk = nbd_dev[i].disk;
 913                nbd_dev[i].magic = 0;
 914                if (disk) {
 915                        del_gendisk(disk);
 916                        blk_cleanup_queue(disk->queue);
 917                        put_disk(disk);
 918                }
 919        }
 920        unregister_blkdev(NBD_MAJOR, "nbd");
 921        kfree(nbd_dev);
 922        printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR);
 923}
 924
 925module_init(nbd_init);
 926module_exit(nbd_cleanup);
 927
 928MODULE_DESCRIPTION("Network Block Device");
 929MODULE_LICENSE("GPL");
 930
 931module_param(nbds_max, int, 0444);
 932MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
 933module_param(max_part, int, 0444);
 934MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)");
 935#ifndef NDEBUG
 936module_param(debugflags, int, 0644);
 937MODULE_PARM_DESC(debugflags, "flags for controlling debug output");
 938#endif
 939