linux/drivers/block/xen-blkback/blkback.c
<<
>>
Prefs
   1/******************************************************************************
   2 *
   3 * Back-end of the driver for virtual block devices. This portion of the
   4 * driver exports a 'unified' block-device interface that can be accessed
   5 * by any operating system that implements a compatible front end. A
   6 * reference front-end implementation can be found in:
   7 *  drivers/block/xen-blkfront.c
   8 *
   9 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
  10 * Copyright (c) 2005, Christopher Clark
  11 *
  12 * This program is free software; you can redistribute it and/or
  13 * modify it under the terms of the GNU General Public License version 2
  14 * as published by the Free Software Foundation; or, when distributed
  15 * separately from the Linux kernel or incorporated into other
  16 * software packages, subject to the following license:
  17 *
  18 * Permission is hereby granted, free of charge, to any person obtaining a copy
  19 * of this source file (the "Software"), to deal in the Software without
  20 * restriction, including without limitation the rights to use, copy, modify,
  21 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  22 * and to permit persons to whom the Software is furnished to do so, subject to
  23 * the following conditions:
  24 *
  25 * The above copyright notice and this permission notice shall be included in
  26 * all copies or substantial portions of the Software.
  27 *
  28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  31 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  32 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  33 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  34 * IN THE SOFTWARE.
  35 */
  36
  37#include <linux/spinlock.h>
  38#include <linux/kthread.h>
  39#include <linux/list.h>
  40#include <linux/delay.h>
  41#include <linux/freezer.h>
  42#include <linux/bitmap.h>
  43
  44#include <xen/events.h>
  45#include <xen/page.h>
  46#include <xen/xen.h>
  47#include <asm/xen/hypervisor.h>
  48#include <asm/xen/hypercall.h>
  49#include <xen/balloon.h>
  50#include "common.h"
  51
  52/*
  53 * Maximum number of unused free pages to keep in the internal buffer.
  54 * Setting this to a value too low will reduce memory used in each backend,
  55 * but can have a performance penalty.
  56 *
  57 * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can
  58 * be set to a lower value that might degrade performance on some intensive
  59 * IO workloads.
  60 */
  61
  62static int xen_blkif_max_buffer_pages = 1024;
  63module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644);
  64MODULE_PARM_DESC(max_buffer_pages,
  65"Maximum number of free pages to keep in each block backend buffer");
  66
  67/*
  68 * Maximum number of grants to map persistently in blkback. For maximum
  69 * performance this should be the total numbers of grants that can be used
  70 * to fill the ring, but since this might become too high, specially with
  71 * the use of indirect descriptors, we set it to a value that provides good
  72 * performance without using too much memory.
  73 *
  74 * When the list of persistent grants is full we clean it up using a LRU
  75 * algorithm.
  76 */
  77
  78static int xen_blkif_max_pgrants = 1056;
  79module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644);
  80MODULE_PARM_DESC(max_persistent_grants,
  81                 "Maximum number of grants to map persistently");
  82
  83/*
  84 * The LRU mechanism to clean the lists of persistent grants needs to
  85 * be executed periodically. The time interval between consecutive executions
  86 * of the purge mechanism is set in ms.
  87 */
  88#define LRU_INTERVAL 100
  89
  90/*
  91 * When the persistent grants list is full we will remove unused grants
  92 * from the list. The percent number of grants to be removed at each LRU
  93 * execution.
  94 */
  95#define LRU_PERCENT_CLEAN 5
  96
  97/* Run-time switchable: /sys/module/blkback/parameters/ */
  98static unsigned int log_stats;
  99module_param(log_stats, int, 0644);
 100
 101#define BLKBACK_INVALID_HANDLE (~0)
 102
 103/* Number of free pages to remove on each call to free_xenballooned_pages */
 104#define NUM_BATCH_FREE_PAGES 10
 105
 106static inline int get_free_page(struct xen_blkif *blkif, struct page **page)
 107{
 108        unsigned long flags;
 109
 110        spin_lock_irqsave(&blkif->free_pages_lock, flags);
 111        if (list_empty(&blkif->free_pages)) {
 112                BUG_ON(blkif->free_pages_num != 0);
 113                spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
 114                return alloc_xenballooned_pages(1, page, false);
 115        }
 116        BUG_ON(blkif->free_pages_num == 0);
 117        page[0] = list_first_entry(&blkif->free_pages, struct page, lru);
 118        list_del(&page[0]->lru);
 119        blkif->free_pages_num--;
 120        spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
 121
 122        return 0;
 123}
 124
 125static inline void put_free_pages(struct xen_blkif *blkif, struct page **page,
 126                                  int num)
 127{
 128        unsigned long flags;
 129        int i;
 130
 131        spin_lock_irqsave(&blkif->free_pages_lock, flags);
 132        for (i = 0; i < num; i++)
 133                list_add(&page[i]->lru, &blkif->free_pages);
 134        blkif->free_pages_num += num;
 135        spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
 136}
 137
 138static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num)
 139{
 140        /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
 141        struct page *page[NUM_BATCH_FREE_PAGES];
 142        unsigned int num_pages = 0;
 143        unsigned long flags;
 144
 145        spin_lock_irqsave(&blkif->free_pages_lock, flags);
 146        while (blkif->free_pages_num > num) {
 147                BUG_ON(list_empty(&blkif->free_pages));
 148                page[num_pages] = list_first_entry(&blkif->free_pages,
 149                                                   struct page, lru);
 150                list_del(&page[num_pages]->lru);
 151                blkif->free_pages_num--;
 152                if (++num_pages == NUM_BATCH_FREE_PAGES) {
 153                        spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
 154                        free_xenballooned_pages(num_pages, page);
 155                        spin_lock_irqsave(&blkif->free_pages_lock, flags);
 156                        num_pages = 0;
 157                }
 158        }
 159        spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
 160        if (num_pages != 0)
 161                free_xenballooned_pages(num_pages, page);
 162}
 163
 164#define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
 165
 166static int do_block_io_op(struct xen_blkif *blkif);
 167static int dispatch_rw_block_io(struct xen_blkif *blkif,
 168                                struct blkif_request *req,
 169                                struct pending_req *pending_req);
 170static void make_response(struct xen_blkif *blkif, u64 id,
 171                          unsigned short op, int st);
 172
 173#define foreach_grant_safe(pos, n, rbtree, node) \
 174        for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
 175             (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
 176             &(pos)->node != NULL; \
 177             (pos) = container_of(n, typeof(*(pos)), node), \
 178             (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
 179
 180
 181/*
 182 * We don't need locking around the persistent grant helpers
 183 * because blkback uses a single-thread for each backed, so we
 184 * can be sure that this functions will never be called recursively.
 185 *
 186 * The only exception to that is put_persistent_grant, that can be called
 187 * from interrupt context (by xen_blkbk_unmap), so we have to use atomic
 188 * bit operations to modify the flags of a persistent grant and to count
 189 * the number of used grants.
 190 */
 191static int add_persistent_gnt(struct xen_blkif *blkif,
 192                               struct persistent_gnt *persistent_gnt)
 193{
 194        struct rb_node **new = NULL, *parent = NULL;
 195        struct persistent_gnt *this;
 196
 197        if (blkif->persistent_gnt_c >= xen_blkif_max_pgrants) {
 198                if (!blkif->vbd.overflow_max_grants)
 199                        blkif->vbd.overflow_max_grants = 1;
 200                return -EBUSY;
 201        }
 202        /* Figure out where to put new node */
 203        new = &blkif->persistent_gnts.rb_node;
 204        while (*new) {
 205                this = container_of(*new, struct persistent_gnt, node);
 206
 207                parent = *new;
 208                if (persistent_gnt->gnt < this->gnt)
 209                        new = &((*new)->rb_left);
 210                else if (persistent_gnt->gnt > this->gnt)
 211                        new = &((*new)->rb_right);
 212                else {
 213                        pr_alert_ratelimited(DRV_PFX " trying to add a gref that's already in the tree\n");
 214                        return -EINVAL;
 215                }
 216        }
 217
 218        bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE);
 219        set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
 220        /* Add new node and rebalance tree. */
 221        rb_link_node(&(persistent_gnt->node), parent, new);
 222        rb_insert_color(&(persistent_gnt->node), &blkif->persistent_gnts);
 223        blkif->persistent_gnt_c++;
 224        atomic_inc(&blkif->persistent_gnt_in_use);
 225        return 0;
 226}
 227
 228static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif,
 229                                                 grant_ref_t gref)
 230{
 231        struct persistent_gnt *data;
 232        struct rb_node *node = NULL;
 233
 234        node = blkif->persistent_gnts.rb_node;
 235        while (node) {
 236                data = container_of(node, struct persistent_gnt, node);
 237
 238                if (gref < data->gnt)
 239                        node = node->rb_left;
 240                else if (gref > data->gnt)
 241                        node = node->rb_right;
 242                else {
 243                        if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) {
 244                                pr_alert_ratelimited(DRV_PFX " requesting a grant already in use\n");
 245                                return NULL;
 246                        }
 247                        set_bit(PERSISTENT_GNT_ACTIVE, data->flags);
 248                        atomic_inc(&blkif->persistent_gnt_in_use);
 249                        return data;
 250                }
 251        }
 252        return NULL;
 253}
 254
 255static void put_persistent_gnt(struct xen_blkif *blkif,
 256                               struct persistent_gnt *persistent_gnt)
 257{
 258        if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
 259                  pr_alert_ratelimited(DRV_PFX " freeing a grant already unused");
 260        set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
 261        clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
 262        atomic_dec(&blkif->persistent_gnt_in_use);
 263}
 264
 265static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
 266                                 unsigned int num)
 267{
 268        struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 269        struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 270        struct persistent_gnt *persistent_gnt;
 271        struct rb_node *n;
 272        int ret = 0;
 273        int segs_to_unmap = 0;
 274
 275        foreach_grant_safe(persistent_gnt, n, root, node) {
 276                BUG_ON(persistent_gnt->handle ==
 277                        BLKBACK_INVALID_HANDLE);
 278                gnttab_set_unmap_op(&unmap[segs_to_unmap],
 279                        (unsigned long) pfn_to_kaddr(page_to_pfn(
 280                                persistent_gnt->page)),
 281                        GNTMAP_host_map,
 282                        persistent_gnt->handle);
 283
 284                pages[segs_to_unmap] = persistent_gnt->page;
 285
 286                if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
 287                        !rb_next(&persistent_gnt->node)) {
 288                        ret = gnttab_unmap_refs(unmap, NULL, pages,
 289                                segs_to_unmap);
 290                        BUG_ON(ret);
 291                        put_free_pages(blkif, pages, segs_to_unmap);
 292                        segs_to_unmap = 0;
 293                }
 294
 295                rb_erase(&persistent_gnt->node, root);
 296                kfree(persistent_gnt);
 297                num--;
 298        }
 299        BUG_ON(num != 0);
 300}
 301
 302static void unmap_purged_grants(struct work_struct *work)
 303{
 304        struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 305        struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 306        struct persistent_gnt *persistent_gnt;
 307        int ret, segs_to_unmap = 0;
 308        struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work);
 309
 310        while(!list_empty(&blkif->persistent_purge_list)) {
 311                persistent_gnt = list_first_entry(&blkif->persistent_purge_list,
 312                                                  struct persistent_gnt,
 313                                                  remove_node);
 314                list_del(&persistent_gnt->remove_node);
 315
 316                gnttab_set_unmap_op(&unmap[segs_to_unmap],
 317                        vaddr(persistent_gnt->page),
 318                        GNTMAP_host_map,
 319                        persistent_gnt->handle);
 320
 321                pages[segs_to_unmap] = persistent_gnt->page;
 322
 323                if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
 324                        ret = gnttab_unmap_refs(unmap, NULL, pages,
 325                                segs_to_unmap);
 326                        BUG_ON(ret);
 327                        put_free_pages(blkif, pages, segs_to_unmap);
 328                        segs_to_unmap = 0;
 329                }
 330                kfree(persistent_gnt);
 331        }
 332        if (segs_to_unmap > 0) {
 333                ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap);
 334                BUG_ON(ret);
 335                put_free_pages(blkif, pages, segs_to_unmap);
 336        }
 337}
 338
 339static void purge_persistent_gnt(struct xen_blkif *blkif)
 340{
 341        struct persistent_gnt *persistent_gnt;
 342        struct rb_node *n;
 343        unsigned int num_clean, total;
 344        bool scan_used = false, clean_used = false;
 345        struct rb_root *root;
 346
 347        if (blkif->persistent_gnt_c < xen_blkif_max_pgrants ||
 348            (blkif->persistent_gnt_c == xen_blkif_max_pgrants &&
 349            !blkif->vbd.overflow_max_grants)) {
 350                return;
 351        }
 352
 353        if (work_pending(&blkif->persistent_purge_work)) {
 354                pr_alert_ratelimited(DRV_PFX "Scheduled work from previous purge is still pending, cannot purge list\n");
 355                return;
 356        }
 357
 358        num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
 359        num_clean = blkif->persistent_gnt_c - xen_blkif_max_pgrants + num_clean;
 360        num_clean = min(blkif->persistent_gnt_c, num_clean);
 361        if ((num_clean == 0) ||
 362            (num_clean > (blkif->persistent_gnt_c - atomic_read(&blkif->persistent_gnt_in_use))))
 363                return;
 364
 365        /*
 366         * At this point, we can assure that there will be no calls
 367         * to get_persistent_grant (because we are executing this code from
 368         * xen_blkif_schedule), there can only be calls to put_persistent_gnt,
 369         * which means that the number of currently used grants will go down,
 370         * but never up, so we will always be able to remove the requested
 371         * number of grants.
 372         */
 373
 374        total = num_clean;
 375
 376        pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean);
 377
 378        INIT_LIST_HEAD(&blkif->persistent_purge_list);
 379        root = &blkif->persistent_gnts;
 380purge_list:
 381        foreach_grant_safe(persistent_gnt, n, root, node) {
 382                BUG_ON(persistent_gnt->handle ==
 383                        BLKBACK_INVALID_HANDLE);
 384
 385                if (clean_used) {
 386                        clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
 387                        continue;
 388                }
 389
 390                if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
 391                        continue;
 392                if (!scan_used &&
 393                    (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags)))
 394                        continue;
 395
 396                rb_erase(&persistent_gnt->node, root);
 397                list_add(&persistent_gnt->remove_node,
 398                         &blkif->persistent_purge_list);
 399                if (--num_clean == 0)
 400                        goto finished;
 401        }
 402        /*
 403         * If we get here it means we also need to start cleaning
 404         * grants that were used since last purge in order to cope
 405         * with the requested num
 406         */
 407        if (!scan_used && !clean_used) {
 408                pr_debug(DRV_PFX "Still missing %u purged frames\n", num_clean);
 409                scan_used = true;
 410                goto purge_list;
 411        }
 412finished:
 413        if (!clean_used) {
 414                pr_debug(DRV_PFX "Finished scanning for grants to clean, removing used flag\n");
 415                clean_used = true;
 416                goto purge_list;
 417        }
 418
 419        blkif->persistent_gnt_c -= (total - num_clean);
 420        blkif->vbd.overflow_max_grants = 0;
 421
 422        /* We can defer this work */
 423        INIT_WORK(&blkif->persistent_purge_work, unmap_purged_grants);
 424        schedule_work(&blkif->persistent_purge_work);
 425        pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total);
 426        return;
 427}
 428
 429/*
 430 * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
 431 */
 432static struct pending_req *alloc_req(struct xen_blkif *blkif)
 433{
 434        struct pending_req *req = NULL;
 435        unsigned long flags;
 436
 437        spin_lock_irqsave(&blkif->pending_free_lock, flags);
 438        if (!list_empty(&blkif->pending_free)) {
 439                req = list_entry(blkif->pending_free.next, struct pending_req,
 440                                 free_list);
 441                list_del(&req->free_list);
 442        }
 443        spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
 444        return req;
 445}
 446
 447/*
 448 * Return the 'pending_req' structure back to the freepool. We also
 449 * wake up the thread if it was waiting for a free page.
 450 */
 451static void free_req(struct xen_blkif *blkif, struct pending_req *req)
 452{
 453        unsigned long flags;
 454        int was_empty;
 455
 456        spin_lock_irqsave(&blkif->pending_free_lock, flags);
 457        was_empty = list_empty(&blkif->pending_free);
 458        list_add(&req->free_list, &blkif->pending_free);
 459        spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
 460        if (was_empty)
 461                wake_up(&blkif->pending_free_wq);
 462}
 463
 464/*
 465 * Routines for managing virtual block devices (vbds).
 466 */
 467static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
 468                             int operation)
 469{
 470        struct xen_vbd *vbd = &blkif->vbd;
 471        int rc = -EACCES;
 472
 473        if ((operation != READ) && vbd->readonly)
 474                goto out;
 475
 476        if (likely(req->nr_sects)) {
 477                blkif_sector_t end = req->sector_number + req->nr_sects;
 478
 479                if (unlikely(end < req->sector_number))
 480                        goto out;
 481                if (unlikely(end > vbd_sz(vbd)))
 482                        goto out;
 483        }
 484
 485        req->dev  = vbd->pdevice;
 486        req->bdev = vbd->bdev;
 487        rc = 0;
 488
 489 out:
 490        return rc;
 491}
 492
 493static void xen_vbd_resize(struct xen_blkif *blkif)
 494{
 495        struct xen_vbd *vbd = &blkif->vbd;
 496        struct xenbus_transaction xbt;
 497        int err;
 498        struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
 499        unsigned long long new_size = vbd_sz(vbd);
 500
 501        pr_info(DRV_PFX "VBD Resize: Domid: %d, Device: (%d, %d)\n",
 502                blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
 503        pr_info(DRV_PFX "VBD Resize: new size %llu\n", new_size);
 504        vbd->size = new_size;
 505again:
 506        err = xenbus_transaction_start(&xbt);
 507        if (err) {
 508                pr_warn(DRV_PFX "Error starting transaction");
 509                return;
 510        }
 511        err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
 512                            (unsigned long long)vbd_sz(vbd));
 513        if (err) {
 514                pr_warn(DRV_PFX "Error writing new size");
 515                goto abort;
 516        }
 517        /*
 518         * Write the current state; we will use this to synchronize
 519         * the front-end. If the current state is "connected" the
 520         * front-end will get the new size information online.
 521         */
 522        err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
 523        if (err) {
 524                pr_warn(DRV_PFX "Error writing the state");
 525                goto abort;
 526        }
 527
 528        err = xenbus_transaction_end(xbt, 0);
 529        if (err == -EAGAIN)
 530                goto again;
 531        if (err)
 532                pr_warn(DRV_PFX "Error ending transaction");
 533        return;
 534abort:
 535        xenbus_transaction_end(xbt, 1);
 536}
 537
 538/*
 539 * Notification from the guest OS.
 540 */
 541static void blkif_notify_work(struct xen_blkif *blkif)
 542{
 543        blkif->waiting_reqs = 1;
 544        wake_up(&blkif->wq);
 545}
 546
 547irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
 548{
 549        blkif_notify_work(dev_id);
 550        return IRQ_HANDLED;
 551}
 552
 553/*
 554 * SCHEDULER FUNCTIONS
 555 */
 556
 557static void print_stats(struct xen_blkif *blkif)
 558{
 559        pr_info("xen-blkback (%s): oo %3llu  |  rd %4llu  |  wr %4llu  |  f %4llu"
 560                 "  |  ds %4llu | pg: %4u/%4d\n",
 561                 current->comm, blkif->st_oo_req,
 562                 blkif->st_rd_req, blkif->st_wr_req,
 563                 blkif->st_f_req, blkif->st_ds_req,
 564                 blkif->persistent_gnt_c,
 565                 xen_blkif_max_pgrants);
 566        blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
 567        blkif->st_rd_req = 0;
 568        blkif->st_wr_req = 0;
 569        blkif->st_oo_req = 0;
 570        blkif->st_ds_req = 0;
 571}
 572
 573int xen_blkif_schedule(void *arg)
 574{
 575        struct xen_blkif *blkif = arg;
 576        struct xen_vbd *vbd = &blkif->vbd;
 577        unsigned long timeout;
 578        int ret;
 579
 580        xen_blkif_get(blkif);
 581
 582        while (!kthread_should_stop()) {
 583                if (try_to_freeze())
 584                        continue;
 585                if (unlikely(vbd->size != vbd_sz(vbd)))
 586                        xen_vbd_resize(blkif);
 587
 588                timeout = msecs_to_jiffies(LRU_INTERVAL);
 589
 590                timeout = wait_event_interruptible_timeout(
 591                        blkif->wq,
 592                        blkif->waiting_reqs || kthread_should_stop(),
 593                        timeout);
 594                if (timeout == 0)
 595                        goto purge_gnt_list;
 596                timeout = wait_event_interruptible_timeout(
 597                        blkif->pending_free_wq,
 598                        !list_empty(&blkif->pending_free) ||
 599                        kthread_should_stop(),
 600                        timeout);
 601                if (timeout == 0)
 602                        goto purge_gnt_list;
 603
 604                blkif->waiting_reqs = 0;
 605                smp_mb(); /* clear flag *before* checking for work */
 606
 607                ret = do_block_io_op(blkif);
 608                if (ret > 0)
 609                        blkif->waiting_reqs = 1;
 610                if (ret == -EACCES)
 611                        wait_event_interruptible(blkif->shutdown_wq,
 612                                                 kthread_should_stop());
 613
 614purge_gnt_list:
 615                if (blkif->vbd.feature_gnt_persistent &&
 616                    time_after(jiffies, blkif->next_lru)) {
 617                        purge_persistent_gnt(blkif);
 618                        blkif->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
 619                }
 620
 621                /* Shrink if we have more than xen_blkif_max_buffer_pages */
 622                shrink_free_pagepool(blkif, xen_blkif_max_buffer_pages);
 623
 624                if (log_stats && time_after(jiffies, blkif->st_print))
 625                        print_stats(blkif);
 626        }
 627
 628        /* Since we are shutting down remove all pages from the buffer */
 629        shrink_free_pagepool(blkif, 0 /* All */);
 630
 631        /* Free all persistent grant pages */
 632        if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
 633                free_persistent_gnts(blkif, &blkif->persistent_gnts,
 634                        blkif->persistent_gnt_c);
 635
 636        BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
 637        blkif->persistent_gnt_c = 0;
 638
 639        if (log_stats)
 640                print_stats(blkif);
 641
 642        blkif->xenblkd = NULL;
 643        xen_blkif_put(blkif);
 644
 645        return 0;
 646}
 647
 648/*
 649 * Unmap the grant references, and also remove the M2P over-rides
 650 * used in the 'pending_req'.
 651 */
 652static void xen_blkbk_unmap(struct xen_blkif *blkif,
 653                            struct grant_page *pages[],
 654                            int num)
 655{
 656        struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 657        struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 658        unsigned int i, invcount = 0;
 659        int ret;
 660
 661        for (i = 0; i < num; i++) {
 662                if (pages[i]->persistent_gnt != NULL) {
 663                        put_persistent_gnt(blkif, pages[i]->persistent_gnt);
 664                        continue;
 665                }
 666                if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
 667                        continue;
 668                unmap_pages[invcount] = pages[i]->page;
 669                gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[i]->page),
 670                                    GNTMAP_host_map, pages[i]->handle);
 671                pages[i]->handle = BLKBACK_INVALID_HANDLE;
 672                if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
 673                        ret = gnttab_unmap_refs(unmap, NULL, unmap_pages,
 674                                                invcount);
 675                        BUG_ON(ret);
 676                        put_free_pages(blkif, unmap_pages, invcount);
 677                        invcount = 0;
 678                }
 679        }
 680        if (invcount) {
 681                ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
 682                BUG_ON(ret);
 683                put_free_pages(blkif, unmap_pages, invcount);
 684        }
 685}
 686
 687static int xen_blkbk_map(struct xen_blkif *blkif,
 688                         struct grant_page *pages[],
 689                         int num, bool ro)
 690{
 691        struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 692        struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 693        struct persistent_gnt *persistent_gnt = NULL;
 694        phys_addr_t addr = 0;
 695        int i, seg_idx, new_map_idx;
 696        int segs_to_map = 0;
 697        int ret = 0;
 698        int last_map = 0, map_until = 0;
 699        int use_persistent_gnts;
 700
 701        use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
 702
 703        /*
 704         * Fill out preq.nr_sects with proper amount of sectors, and setup
 705         * assign map[..] with the PFN of the page in our domain with the
 706         * corresponding grant reference for each page.
 707         */
 708again:
 709        for (i = map_until; i < num; i++) {
 710                uint32_t flags;
 711
 712                if (use_persistent_gnts)
 713                        persistent_gnt = get_persistent_gnt(
 714                                blkif,
 715                                pages[i]->gref);
 716
 717                if (persistent_gnt) {
 718                        /*
 719                         * We are using persistent grants and
 720                         * the grant is already mapped
 721                         */
 722                        pages[i]->page = persistent_gnt->page;
 723                        pages[i]->persistent_gnt = persistent_gnt;
 724                } else {
 725                        if (get_free_page(blkif, &pages[i]->page))
 726                                goto out_of_memory;
 727                        addr = vaddr(pages[i]->page);
 728                        pages_to_gnt[segs_to_map] = pages[i]->page;
 729                        pages[i]->persistent_gnt = NULL;
 730                        flags = GNTMAP_host_map;
 731                        if (!use_persistent_gnts && ro)
 732                                flags |= GNTMAP_readonly;
 733                        gnttab_set_map_op(&map[segs_to_map++], addr,
 734                                          flags, pages[i]->gref,
 735                                          blkif->domid);
 736                }
 737                map_until = i + 1;
 738                if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST)
 739                        break;
 740        }
 741
 742        if (segs_to_map) {
 743                ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
 744                BUG_ON(ret);
 745        }
 746
 747        /*
 748         * Now swizzle the MFN in our domain with the MFN from the other domain
 749         * so that when we access vaddr(pending_req,i) it has the contents of
 750         * the page from the other domain.
 751         */
 752        for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) {
 753                if (!pages[seg_idx]->persistent_gnt) {
 754                        /* This is a newly mapped grant */
 755                        BUG_ON(new_map_idx >= segs_to_map);
 756                        if (unlikely(map[new_map_idx].status != 0)) {
 757                                pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
 758                                pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
 759                                ret |= 1;
 760                                goto next;
 761                        }
 762                        pages[seg_idx]->handle = map[new_map_idx].handle;
 763                } else {
 764                        continue;
 765                }
 766                if (use_persistent_gnts &&
 767                    blkif->persistent_gnt_c < xen_blkif_max_pgrants) {
 768                        /*
 769                         * We are using persistent grants, the grant is
 770                         * not mapped but we might have room for it.
 771                         */
 772                        persistent_gnt = kmalloc(sizeof(struct persistent_gnt),
 773                                                 GFP_KERNEL);
 774                        if (!persistent_gnt) {
 775                                /*
 776                                 * If we don't have enough memory to
 777                                 * allocate the persistent_gnt struct
 778                                 * map this grant non-persistenly
 779                                 */
 780                                goto next;
 781                        }
 782                        persistent_gnt->gnt = map[new_map_idx].ref;
 783                        persistent_gnt->handle = map[new_map_idx].handle;
 784                        persistent_gnt->page = pages[seg_idx]->page;
 785                        if (add_persistent_gnt(blkif,
 786                                               persistent_gnt)) {
 787                                kfree(persistent_gnt);
 788                                persistent_gnt = NULL;
 789                                goto next;
 790                        }
 791                        pages[seg_idx]->persistent_gnt = persistent_gnt;
 792                        pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n",
 793                                 persistent_gnt->gnt, blkif->persistent_gnt_c,
 794                                 xen_blkif_max_pgrants);
 795                        goto next;
 796                }
 797                if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
 798                        blkif->vbd.overflow_max_grants = 1;
 799                        pr_debug(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n",
 800                                 blkif->domid, blkif->vbd.handle);
 801                }
 802                /*
 803                 * We could not map this grant persistently, so use it as
 804                 * a non-persistent grant.
 805                 */
 806next:
 807                new_map_idx++;
 808        }
 809        segs_to_map = 0;
 810        last_map = map_until;
 811        if (map_until != num)
 812                goto again;
 813
 814        return ret;
 815
 816out_of_memory:
 817        pr_alert(DRV_PFX "%s: out of memory\n", __func__);
 818        put_free_pages(blkif, pages_to_gnt, segs_to_map);
 819        return -ENOMEM;
 820}
 821
 822static int xen_blkbk_map_seg(struct pending_req *pending_req)
 823{
 824        int rc;
 825
 826        rc = xen_blkbk_map(pending_req->blkif, pending_req->segments,
 827                           pending_req->nr_pages,
 828                           (pending_req->operation != BLKIF_OP_READ));
 829
 830        return rc;
 831}
 832
 833static int xen_blkbk_parse_indirect(struct blkif_request *req,
 834                                    struct pending_req *pending_req,
 835                                    struct seg_buf seg[],
 836                                    struct phys_req *preq)
 837{
 838        struct grant_page **pages = pending_req->indirect_pages;
 839        struct xen_blkif *blkif = pending_req->blkif;
 840        int indirect_grefs, rc, n, nseg, i;
 841        struct blkif_request_segment_aligned *segments = NULL;
 842
 843        nseg = pending_req->nr_pages;
 844        indirect_grefs = INDIRECT_PAGES(nseg);
 845        BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
 846
 847        for (i = 0; i < indirect_grefs; i++)
 848                pages[i]->gref = req->u.indirect.indirect_grefs[i];
 849
 850        rc = xen_blkbk_map(blkif, pages, indirect_grefs, true);
 851        if (rc)
 852                goto unmap;
 853
 854        for (n = 0, i = 0; n < nseg; n++) {
 855                if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
 856                        /* Map indirect segments */
 857                        if (segments)
 858                                kunmap_atomic(segments);
 859                        segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
 860                }
 861                i = n % SEGS_PER_INDIRECT_FRAME;
 862                pending_req->segments[n]->gref = segments[i].gref;
 863                seg[n].nsec = segments[i].last_sect -
 864                        segments[i].first_sect + 1;
 865                seg[n].offset = (segments[i].first_sect << 9);
 866                if ((segments[i].last_sect >= (PAGE_SIZE >> 9)) ||
 867                    (segments[i].last_sect < segments[i].first_sect)) {
 868                        rc = -EINVAL;
 869                        goto unmap;
 870                }
 871                preq->nr_sects += seg[n].nsec;
 872        }
 873
 874unmap:
 875        if (segments)
 876                kunmap_atomic(segments);
 877        xen_blkbk_unmap(blkif, pages, indirect_grefs);
 878        return rc;
 879}
 880
 881static int dispatch_discard_io(struct xen_blkif *blkif,
 882                                struct blkif_request *req)
 883{
 884        int err = 0;
 885        int status = BLKIF_RSP_OKAY;
 886        struct block_device *bdev = blkif->vbd.bdev;
 887        unsigned long secure;
 888        struct phys_req preq;
 889
 890        preq.sector_number = req->u.discard.sector_number;
 891        preq.nr_sects      = req->u.discard.nr_sectors;
 892
 893        err = xen_vbd_translate(&preq, blkif, WRITE);
 894        if (err) {
 895                pr_warn(DRV_PFX "access denied: DISCARD [%llu->%llu] on dev=%04x\n",
 896                        preq.sector_number,
 897                        preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
 898                goto fail_response;
 899        }
 900        blkif->st_ds_req++;
 901
 902        xen_blkif_get(blkif);
 903        secure = (blkif->vbd.discard_secure &&
 904                 (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
 905                 BLKDEV_DISCARD_SECURE : 0;
 906
 907        err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
 908                                   req->u.discard.nr_sectors,
 909                                   GFP_KERNEL, secure);
 910fail_response:
 911        if (err == -EOPNOTSUPP) {
 912                pr_debug(DRV_PFX "discard op failed, not supported\n");
 913                status = BLKIF_RSP_EOPNOTSUPP;
 914        } else if (err)
 915                status = BLKIF_RSP_ERROR;
 916
 917        make_response(blkif, req->u.discard.id, req->operation, status);
 918        xen_blkif_put(blkif);
 919        return err;
 920}
 921
 922static int dispatch_other_io(struct xen_blkif *blkif,
 923                             struct blkif_request *req,
 924                             struct pending_req *pending_req)
 925{
 926        free_req(blkif, pending_req);
 927        make_response(blkif, req->u.other.id, req->operation,
 928                      BLKIF_RSP_EOPNOTSUPP);
 929        return -EIO;
 930}
 931
 932static void xen_blk_drain_io(struct xen_blkif *blkif)
 933{
 934        atomic_set(&blkif->drain, 1);
 935        do {
 936                /* The initial value is one, and one refcnt taken at the
 937                 * start of the xen_blkif_schedule thread. */
 938                if (atomic_read(&blkif->refcnt) <= 2)
 939                        break;
 940                wait_for_completion_interruptible_timeout(
 941                                &blkif->drain_complete, HZ);
 942
 943                if (!atomic_read(&blkif->drain))
 944                        break;
 945        } while (!kthread_should_stop());
 946        atomic_set(&blkif->drain, 0);
 947}
 948
 949/*
 950 * Completion callback on the bio's. Called as bh->b_end_io()
 951 */
 952
 953static void __end_block_io_op(struct pending_req *pending_req, int error)
 954{
 955        /* An error fails the entire request. */
 956        if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
 957            (error == -EOPNOTSUPP)) {
 958                pr_debug(DRV_PFX "flush diskcache op failed, not supported\n");
 959                xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0);
 960                pending_req->status = BLKIF_RSP_EOPNOTSUPP;
 961        } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
 962                    (error == -EOPNOTSUPP)) {
 963                pr_debug(DRV_PFX "write barrier op failed, not supported\n");
 964                xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0);
 965                pending_req->status = BLKIF_RSP_EOPNOTSUPP;
 966        } else if (error) {
 967                pr_debug(DRV_PFX "Buffer not up-to-date at end of operation,"
 968                         " error=%d\n", error);
 969                pending_req->status = BLKIF_RSP_ERROR;
 970        }
 971
 972        /*
 973         * If all of the bio's have completed it is time to unmap
 974         * the grant references associated with 'request' and provide
 975         * the proper response on the ring.
 976         */
 977        if (atomic_dec_and_test(&pending_req->pendcnt)) {
 978                xen_blkbk_unmap(pending_req->blkif,
 979                                pending_req->segments,
 980                                pending_req->nr_pages);
 981                make_response(pending_req->blkif, pending_req->id,
 982                              pending_req->operation, pending_req->status);
 983                xen_blkif_put(pending_req->blkif);
 984                if (atomic_read(&pending_req->blkif->refcnt) <= 2) {
 985                        if (atomic_read(&pending_req->blkif->drain))
 986                                complete(&pending_req->blkif->drain_complete);
 987                }
 988                free_req(pending_req->blkif, pending_req);
 989        }
 990}
 991
 992/*
 993 * bio callback.
 994 */
 995static void end_block_io_op(struct bio *bio, int error)
 996{
 997        __end_block_io_op(bio->bi_private, error);
 998        bio_put(bio);
 999}
1000
1001
1002
1003/*
1004 * Function to copy the from the ring buffer the 'struct blkif_request'
1005 * (which has the sectors we want, number of them, grant references, etc),
1006 * and transmute  it to the block API to hand it over to the proper block disk.
1007 */
1008static int
1009__do_block_io_op(struct xen_blkif *blkif)
1010{
1011        union blkif_back_rings *blk_rings = &blkif->blk_rings;
1012        struct blkif_request req;
1013        struct pending_req *pending_req;
1014        RING_IDX rc, rp;
1015        int more_to_do = 0;
1016
1017        rc = blk_rings->common.req_cons;
1018        rp = blk_rings->common.sring->req_prod;
1019        rmb(); /* Ensure we see queued requests up to 'rp'. */
1020
1021        if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
1022                rc = blk_rings->common.rsp_prod_pvt;
1023                pr_warn(DRV_PFX "Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
1024                        rp, rc, rp - rc, blkif->vbd.pdevice);
1025                return -EACCES;
1026        }
1027        while (rc != rp) {
1028
1029                if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
1030                        break;
1031
1032                if (kthread_should_stop()) {
1033                        more_to_do = 1;
1034                        break;
1035                }
1036
1037                pending_req = alloc_req(blkif);
1038                if (NULL == pending_req) {
1039                        blkif->st_oo_req++;
1040                        more_to_do = 1;
1041                        break;
1042                }
1043
1044                switch (blkif->blk_protocol) {
1045                case BLKIF_PROTOCOL_NATIVE:
1046                        memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
1047                        break;
1048                case BLKIF_PROTOCOL_X86_32:
1049                        blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
1050                        break;
1051                case BLKIF_PROTOCOL_X86_64:
1052                        blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
1053                        break;
1054                default:
1055                        BUG();
1056                }
1057                blk_rings->common.req_cons = ++rc; /* before make_response() */
1058
1059                /* Apply all sanity checks to /private copy/ of request. */
1060                barrier();
1061
1062                switch (req.operation) {
1063                case BLKIF_OP_READ:
1064                case BLKIF_OP_WRITE:
1065                case BLKIF_OP_WRITE_BARRIER:
1066                case BLKIF_OP_FLUSH_DISKCACHE:
1067                case BLKIF_OP_INDIRECT:
1068                        if (dispatch_rw_block_io(blkif, &req, pending_req))
1069                                goto done;
1070                        break;
1071                case BLKIF_OP_DISCARD:
1072                        free_req(blkif, pending_req);
1073                        if (dispatch_discard_io(blkif, &req))
1074                                goto done;
1075                        break;
1076                default:
1077                        if (dispatch_other_io(blkif, &req, pending_req))
1078                                goto done;
1079                        break;
1080                }
1081
1082                /* Yield point for this unbounded loop. */
1083                cond_resched();
1084        }
1085done:
1086        return more_to_do;
1087}
1088
1089static int
1090do_block_io_op(struct xen_blkif *blkif)
1091{
1092        union blkif_back_rings *blk_rings = &blkif->blk_rings;
1093        int more_to_do;
1094
1095        do {
1096                more_to_do = __do_block_io_op(blkif);
1097                if (more_to_do)
1098                        break;
1099
1100                RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
1101        } while (more_to_do);
1102
1103        return more_to_do;
1104}
1105/*
1106 * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
1107 * and call the 'submit_bio' to pass it to the underlying storage.
1108 */
1109static int dispatch_rw_block_io(struct xen_blkif *blkif,
1110                                struct blkif_request *req,
1111                                struct pending_req *pending_req)
1112{
1113        struct phys_req preq;
1114        struct seg_buf *seg = pending_req->seg;
1115        unsigned int nseg;
1116        struct bio *bio = NULL;
1117        struct bio **biolist = pending_req->biolist;
1118        int i, nbio = 0;
1119        int operation;
1120        struct blk_plug plug;
1121        bool drain = false;
1122        struct grant_page **pages = pending_req->segments;
1123        unsigned short req_operation;
1124
1125        req_operation = req->operation == BLKIF_OP_INDIRECT ?
1126                        req->u.indirect.indirect_op : req->operation;
1127        if ((req->operation == BLKIF_OP_INDIRECT) &&
1128            (req_operation != BLKIF_OP_READ) &&
1129            (req_operation != BLKIF_OP_WRITE)) {
1130                pr_debug(DRV_PFX "Invalid indirect operation (%u)\n",
1131                         req_operation);
1132                goto fail_response;
1133        }
1134
1135        switch (req_operation) {
1136        case BLKIF_OP_READ:
1137                blkif->st_rd_req++;
1138                operation = READ;
1139                break;
1140        case BLKIF_OP_WRITE:
1141                blkif->st_wr_req++;
1142                operation = WRITE_ODIRECT;
1143                break;
1144        case BLKIF_OP_WRITE_BARRIER:
1145                drain = true;
1146        case BLKIF_OP_FLUSH_DISKCACHE:
1147                blkif->st_f_req++;
1148                operation = WRITE_FLUSH;
1149                break;
1150        default:
1151                operation = 0; /* make gcc happy */
1152                goto fail_response;
1153                break;
1154        }
1155
1156        /* Check that the number of segments is sane. */
1157        nseg = req->operation == BLKIF_OP_INDIRECT ?
1158               req->u.indirect.nr_segments : req->u.rw.nr_segments;
1159
1160        if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
1161            unlikely((req->operation != BLKIF_OP_INDIRECT) &&
1162                     (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
1163            unlikely((req->operation == BLKIF_OP_INDIRECT) &&
1164                     (nseg > MAX_INDIRECT_SEGMENTS))) {
1165                pr_debug(DRV_PFX "Bad number of segments in request (%d)\n",
1166                         nseg);
1167                /* Haven't submitted any bio's yet. */
1168                goto fail_response;
1169        }
1170
1171        preq.nr_sects      = 0;
1172
1173        pending_req->blkif     = blkif;
1174        pending_req->id        = req->u.rw.id;
1175        pending_req->operation = req_operation;
1176        pending_req->status    = BLKIF_RSP_OKAY;
1177        pending_req->nr_pages  = nseg;
1178
1179        if (req->operation != BLKIF_OP_INDIRECT) {
1180                preq.dev               = req->u.rw.handle;
1181                preq.sector_number     = req->u.rw.sector_number;
1182                for (i = 0; i < nseg; i++) {
1183                        pages[i]->gref = req->u.rw.seg[i].gref;
1184                        seg[i].nsec = req->u.rw.seg[i].last_sect -
1185                                req->u.rw.seg[i].first_sect + 1;
1186                        seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
1187                        if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
1188                            (req->u.rw.seg[i].last_sect <
1189                             req->u.rw.seg[i].first_sect))
1190                                goto fail_response;
1191                        preq.nr_sects += seg[i].nsec;
1192                }
1193        } else {
1194                preq.dev               = req->u.indirect.handle;
1195                preq.sector_number     = req->u.indirect.sector_number;
1196                if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq))
1197                        goto fail_response;
1198        }
1199
1200        if (xen_vbd_translate(&preq, blkif, operation) != 0) {
1201                pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n",
1202                         operation == READ ? "read" : "write",
1203                         preq.sector_number,
1204                         preq.sector_number + preq.nr_sects,
1205                         blkif->vbd.pdevice);
1206                goto fail_response;
1207        }
1208
1209        /*
1210         * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
1211         * is set there.
1212         */
1213        for (i = 0; i < nseg; i++) {
1214                if (((int)preq.sector_number|(int)seg[i].nsec) &
1215                    ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
1216                        pr_debug(DRV_PFX "Misaligned I/O request from domain %d",
1217                                 blkif->domid);
1218                        goto fail_response;
1219                }
1220        }
1221
1222        /* Wait on all outstanding I/O's and once that has been completed
1223         * issue the WRITE_FLUSH.
1224         */
1225        if (drain)
1226                xen_blk_drain_io(pending_req->blkif);
1227
1228        /*
1229         * If we have failed at this point, we need to undo the M2P override,
1230         * set gnttab_set_unmap_op on all of the grant references and perform
1231         * the hypercall to unmap the grants - that is all done in
1232         * xen_blkbk_unmap.
1233         */
1234        if (xen_blkbk_map_seg(pending_req))
1235                goto fail_flush;
1236
1237        /*
1238         * This corresponding xen_blkif_put is done in __end_block_io_op, or
1239         * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
1240         */
1241        xen_blkif_get(blkif);
1242
1243        for (i = 0; i < nseg; i++) {
1244                while ((bio == NULL) ||
1245                       (bio_add_page(bio,
1246                                     pages[i]->page,
1247                                     seg[i].nsec << 9,
1248                                     seg[i].offset) == 0)) {
1249
1250                        int nr_iovecs = min_t(int, (nseg-i), BIO_MAX_PAGES);
1251                        bio = bio_alloc(GFP_KERNEL, nr_iovecs);
1252                        if (unlikely(bio == NULL))
1253                                goto fail_put_bio;
1254
1255                        biolist[nbio++] = bio;
1256                        bio->bi_bdev    = preq.bdev;
1257                        bio->bi_private = pending_req;
1258                        bio->bi_end_io  = end_block_io_op;
1259                        bio->bi_sector  = preq.sector_number;
1260                }
1261
1262                preq.sector_number += seg[i].nsec;
1263        }
1264
1265        /* This will be hit if the operation was a flush or discard. */
1266        if (!bio) {
1267                BUG_ON(operation != WRITE_FLUSH);
1268
1269                bio = bio_alloc(GFP_KERNEL, 0);
1270                if (unlikely(bio == NULL))
1271                        goto fail_put_bio;
1272
1273                biolist[nbio++] = bio;
1274                bio->bi_bdev    = preq.bdev;
1275                bio->bi_private = pending_req;
1276                bio->bi_end_io  = end_block_io_op;
1277        }
1278
1279        atomic_set(&pending_req->pendcnt, nbio);
1280        blk_start_plug(&plug);
1281
1282        for (i = 0; i < nbio; i++)
1283                submit_bio(operation, biolist[i]);
1284
1285        /* Let the I/Os go.. */
1286        blk_finish_plug(&plug);
1287
1288        if (operation == READ)
1289                blkif->st_rd_sect += preq.nr_sects;
1290        else if (operation & WRITE)
1291                blkif->st_wr_sect += preq.nr_sects;
1292
1293        return 0;
1294
1295 fail_flush:
1296        xen_blkbk_unmap(blkif, pending_req->segments,
1297                        pending_req->nr_pages);
1298 fail_response:
1299        /* Haven't submitted any bio's yet. */
1300        make_response(blkif, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
1301        free_req(blkif, pending_req);
1302        msleep(1); /* back off a bit */
1303        return -EIO;
1304
1305 fail_put_bio:
1306        for (i = 0; i < nbio; i++)
1307                bio_put(biolist[i]);
1308        atomic_set(&pending_req->pendcnt, 1);
1309        __end_block_io_op(pending_req, -EINVAL);
1310        msleep(1); /* back off a bit */
1311        return -EIO;
1312}
1313
1314
1315
1316/*
1317 * Put a response on the ring on how the operation fared.
1318 */
1319static void make_response(struct xen_blkif *blkif, u64 id,
1320                          unsigned short op, int st)
1321{
1322        struct blkif_response  resp;
1323        unsigned long     flags;
1324        union blkif_back_rings *blk_rings = &blkif->blk_rings;
1325        int notify;
1326
1327        resp.id        = id;
1328        resp.operation = op;
1329        resp.status    = st;
1330
1331        spin_lock_irqsave(&blkif->blk_ring_lock, flags);
1332        /* Place on the response ring for the relevant domain. */
1333        switch (blkif->blk_protocol) {
1334        case BLKIF_PROTOCOL_NATIVE:
1335                memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
1336                       &resp, sizeof(resp));
1337                break;
1338        case BLKIF_PROTOCOL_X86_32:
1339                memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
1340                       &resp, sizeof(resp));
1341                break;
1342        case BLKIF_PROTOCOL_X86_64:
1343                memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
1344                       &resp, sizeof(resp));
1345                break;
1346        default:
1347                BUG();
1348        }
1349        blk_rings->common.rsp_prod_pvt++;
1350        RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
1351        spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
1352        if (notify)
1353                notify_remote_via_irq(blkif->irq);
1354}
1355
1356static int __init xen_blkif_init(void)
1357{
1358        int rc = 0;
1359
1360        if (!xen_domain())
1361                return -ENODEV;
1362
1363        rc = xen_blkif_interface_init();
1364        if (rc)
1365                goto failed_init;
1366
1367        rc = xen_blkif_xenbus_init();
1368        if (rc)
1369                goto failed_init;
1370
1371 failed_init:
1372        return rc;
1373}
1374
1375module_init(xen_blkif_init);
1376
1377MODULE_LICENSE("Dual BSD/GPL");
1378MODULE_ALIAS("xen-backend:vbd");
1379