linux/drivers/block/xen-blkback/common.h
<<
>>
Prefs
   1/*
   2 * This program is free software; you can redistribute it and/or
   3 * modify it under the terms of the GNU General Public License version 2
   4 * as published by the Free Software Foundation; or, when distributed
   5 * separately from the Linux kernel or incorporated into other
   6 * software packages, subject to the following license:
   7 *
   8 * Permission is hereby granted, free of charge, to any person obtaining a copy
   9 * of this source file (the "Software"), to deal in the Software without
  10 * restriction, including without limitation the rights to use, copy, modify,
  11 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  12 * and to permit persons to whom the Software is furnished to do so, subject to
  13 * the following conditions:
  14 *
  15 * The above copyright notice and this permission notice shall be included in
  16 * all copies or substantial portions of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  24 * IN THE SOFTWARE.
  25 */
  26
  27#ifndef __XEN_BLKIF__BACKEND__COMMON_H__
  28#define __XEN_BLKIF__BACKEND__COMMON_H__
  29
  30#include <linux/module.h>
  31#include <linux/interrupt.h>
  32#include <linux/slab.h>
  33#include <linux/blkdev.h>
  34#include <linux/vmalloc.h>
  35#include <linux/wait.h>
  36#include <linux/io.h>
  37#include <linux/rbtree.h>
  38#include <asm/setup.h>
  39#include <asm/pgalloc.h>
  40#include <asm/hypervisor.h>
  41#include <xen/grant_table.h>
  42#include <xen/xenbus.h>
  43#include <xen/interface/io/ring.h>
  44#include <xen/interface/io/blkif.h>
  45#include <xen/interface/io/protocols.h>
  46
  47#define DRV_PFX "xen-blkback:"
  48#define DPRINTK(fmt, args...)                           \
  49        pr_debug(DRV_PFX "(%s:%d) " fmt ".\n",          \
  50                 __func__, __LINE__, ##args)
  51
  52
  53/*
  54 * This is the maximum number of segments that would be allowed in indirect
  55 * requests. This value will also be passed to the frontend.
  56 */
  57#define MAX_INDIRECT_SEGMENTS 256
  58
  59#define SEGS_PER_INDIRECT_FRAME \
  60        (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned))
  61#define MAX_INDIRECT_PAGES \
  62        ((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
  63#define INDIRECT_PAGES(_segs) \
  64        ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
  65
  66/* Not a real protocol.  Used to generate ring structs which contain
  67 * the elements common to all protocols only.  This way we get a
  68 * compiler-checkable way to use common struct elements, so we can
  69 * avoid using switch(protocol) in a number of places.  */
  70struct blkif_common_request {
  71        char dummy;
  72};
  73struct blkif_common_response {
  74        char dummy;
  75};
  76
  77struct blkif_x86_32_request_rw {
  78        uint8_t        nr_segments;  /* number of segments                   */
  79        blkif_vdev_t   handle;       /* only for read/write requests         */
  80        uint64_t       id;           /* private guest value, echoed in resp  */
  81        blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
  82        struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
  83} __attribute__((__packed__));
  84
  85struct blkif_x86_32_request_discard {
  86        uint8_t        flag;         /* BLKIF_DISCARD_SECURE or zero         */
  87        blkif_vdev_t   _pad1;        /* was "handle" for read/write requests */
  88        uint64_t       id;           /* private guest value, echoed in resp  */
  89        blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
  90        uint64_t       nr_sectors;
  91} __attribute__((__packed__));
  92
  93struct blkif_x86_32_request_other {
  94        uint8_t        _pad1;
  95        blkif_vdev_t   _pad2;
  96        uint64_t       id;           /* private guest value, echoed in resp  */
  97} __attribute__((__packed__));
  98
  99struct blkif_x86_32_request_indirect {
 100        uint8_t        indirect_op;
 101        uint16_t       nr_segments;
 102        uint64_t       id;
 103        blkif_sector_t sector_number;
 104        blkif_vdev_t   handle;
 105        uint16_t       _pad1;
 106        grant_ref_t    indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
 107        /*
 108         * The maximum number of indirect segments (and pages) that will
 109         * be used is determined by MAX_INDIRECT_SEGMENTS, this value
 110         * is also exported to the guest (via xenstore
 111         * feature-max-indirect-segments entry), so the frontend knows how
 112         * many indirect segments the backend supports.
 113         */
 114        uint64_t       _pad2;        /* make it 64 byte aligned */
 115} __attribute__((__packed__));
 116
 117struct blkif_x86_32_request {
 118        uint8_t        operation;    /* BLKIF_OP_???                         */
 119        union {
 120                struct blkif_x86_32_request_rw rw;
 121                struct blkif_x86_32_request_discard discard;
 122                struct blkif_x86_32_request_other other;
 123                struct blkif_x86_32_request_indirect indirect;
 124        } u;
 125} __attribute__((__packed__));
 126
 127/* i386 protocol version */
 128#pragma pack(push, 4)
 129struct blkif_x86_32_response {
 130        uint64_t        id;              /* copied from request */
 131        uint8_t         operation;       /* copied from request */
 132        int16_t         status;          /* BLKIF_RSP_???       */
 133};
 134#pragma pack(pop)
 135/* x86_64 protocol version */
 136
 137struct blkif_x86_64_request_rw {
 138        uint8_t        nr_segments;  /* number of segments                   */
 139        blkif_vdev_t   handle;       /* only for read/write requests         */
 140        uint32_t       _pad1;        /* offsetof(blkif_reqest..,u.rw.id)==8  */
 141        uint64_t       id;
 142        blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
 143        struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 144} __attribute__((__packed__));
 145
 146struct blkif_x86_64_request_discard {
 147        uint8_t        flag;         /* BLKIF_DISCARD_SECURE or zero         */
 148        blkif_vdev_t   _pad1;        /* was "handle" for read/write requests */
 149        uint32_t       _pad2;        /* offsetof(blkif_..,u.discard.id)==8   */
 150        uint64_t       id;
 151        blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
 152        uint64_t       nr_sectors;
 153} __attribute__((__packed__));
 154
 155struct blkif_x86_64_request_other {
 156        uint8_t        _pad1;
 157        blkif_vdev_t   _pad2;
 158        uint32_t       _pad3;        /* offsetof(blkif_..,u.discard.id)==8   */
 159        uint64_t       id;           /* private guest value, echoed in resp  */
 160} __attribute__((__packed__));
 161
 162struct blkif_x86_64_request_indirect {
 163        uint8_t        indirect_op;
 164        uint16_t       nr_segments;
 165        uint32_t       _pad1;        /* offsetof(blkif_..,u.indirect.id)==8   */
 166        uint64_t       id;
 167        blkif_sector_t sector_number;
 168        blkif_vdev_t   handle;
 169        uint16_t       _pad2;
 170        grant_ref_t    indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
 171        /*
 172         * The maximum number of indirect segments (and pages) that will
 173         * be used is determined by MAX_INDIRECT_SEGMENTS, this value
 174         * is also exported to the guest (via xenstore
 175         * feature-max-indirect-segments entry), so the frontend knows how
 176         * many indirect segments the backend supports.
 177         */
 178        uint32_t       _pad3;        /* make it 64 byte aligned */
 179} __attribute__((__packed__));
 180
 181struct blkif_x86_64_request {
 182        uint8_t        operation;    /* BLKIF_OP_???                         */
 183        union {
 184                struct blkif_x86_64_request_rw rw;
 185                struct blkif_x86_64_request_discard discard;
 186                struct blkif_x86_64_request_other other;
 187                struct blkif_x86_64_request_indirect indirect;
 188        } u;
 189} __attribute__((__packed__));
 190
 191struct blkif_x86_64_response {
 192        uint64_t       __attribute__((__aligned__(8))) id;
 193        uint8_t         operation;       /* copied from request */
 194        int16_t         status;          /* BLKIF_RSP_???       */
 195};
 196
 197DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
 198                  struct blkif_common_response);
 199DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
 200                  struct blkif_x86_32_response);
 201DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
 202                  struct blkif_x86_64_response);
 203
 204union blkif_back_rings {
 205        struct blkif_back_ring        native;
 206        struct blkif_common_back_ring common;
 207        struct blkif_x86_32_back_ring x86_32;
 208        struct blkif_x86_64_back_ring x86_64;
 209};
 210
 211enum blkif_protocol {
 212        BLKIF_PROTOCOL_NATIVE = 1,
 213        BLKIF_PROTOCOL_X86_32 = 2,
 214        BLKIF_PROTOCOL_X86_64 = 3,
 215};
 216
 217struct xen_vbd {
 218        /* What the domain refers to this vbd as. */
 219        blkif_vdev_t            handle;
 220        /* Non-zero -> read-only */
 221        unsigned char           readonly;
 222        /* VDISK_xxx */
 223        unsigned char           type;
 224        /* phys device that this vbd maps to. */
 225        u32                     pdevice;
 226        struct block_device     *bdev;
 227        /* Cached size parameter. */
 228        sector_t                size;
 229        unsigned int            flush_support:1;
 230        unsigned int            discard_secure:1;
 231        unsigned int            feature_gnt_persistent:1;
 232        unsigned int            overflow_max_grants:1;
 233};
 234
 235struct backend_info;
 236
 237/* Number of available flags */
 238#define PERSISTENT_GNT_FLAGS_SIZE       2
 239/* This persistent grant is currently in use */
 240#define PERSISTENT_GNT_ACTIVE           0
 241/*
 242 * This persistent grant has been used, this flag is set when we remove the
 243 * PERSISTENT_GNT_ACTIVE, to know that this grant has been used recently.
 244 */
 245#define PERSISTENT_GNT_WAS_ACTIVE       1
 246
 247/* Number of requests that we can fit in a ring */
 248#define XEN_BLKIF_REQS                  32
 249
 250struct persistent_gnt {
 251        struct page *page;
 252        grant_ref_t gnt;
 253        grant_handle_t handle;
 254        DECLARE_BITMAP(flags, PERSISTENT_GNT_FLAGS_SIZE);
 255        struct rb_node node;
 256        struct list_head remove_node;
 257};
 258
 259struct xen_blkif {
 260        /* Unique identifier for this interface. */
 261        domid_t                 domid;
 262        unsigned int            handle;
 263        /* Physical parameters of the comms window. */
 264        unsigned int            irq;
 265        /* Comms information. */
 266        enum blkif_protocol     blk_protocol;
 267        union blkif_back_rings  blk_rings;
 268        void                    *blk_ring;
 269        /* The VBD attached to this interface. */
 270        struct xen_vbd          vbd;
 271        /* Back pointer to the backend_info. */
 272        struct backend_info     *be;
 273        /* Private fields. */
 274        spinlock_t              blk_ring_lock;
 275        atomic_t                refcnt;
 276
 277        wait_queue_head_t       wq;
 278        /* for barrier (drain) requests */
 279        struct completion       drain_complete;
 280        atomic_t                drain;
 281        /* One thread per one blkif. */
 282        struct task_struct      *xenblkd;
 283        unsigned int            waiting_reqs;
 284
 285        /* tree to store persistent grants */
 286        struct rb_root          persistent_gnts;
 287        unsigned int            persistent_gnt_c;
 288        atomic_t                persistent_gnt_in_use;
 289        unsigned long           next_lru;
 290
 291        /* used by the kworker that offload work from the persistent purge */
 292        struct list_head        persistent_purge_list;
 293        struct work_struct      persistent_purge_work;
 294
 295        /* buffer of free pages to map grant refs */
 296        spinlock_t              free_pages_lock;
 297        int                     free_pages_num;
 298        struct list_head        free_pages;
 299
 300        /* List of all 'pending_req' available */
 301        struct list_head        pending_free;
 302        /* And its spinlock. */
 303        spinlock_t              pending_free_lock;
 304        wait_queue_head_t       pending_free_wq;
 305
 306        /* statistics */
 307        unsigned long           st_print;
 308        unsigned long long                      st_rd_req;
 309        unsigned long long                      st_wr_req;
 310        unsigned long long                      st_oo_req;
 311        unsigned long long                      st_f_req;
 312        unsigned long long                      st_ds_req;
 313        unsigned long long                      st_rd_sect;
 314        unsigned long long                      st_wr_sect;
 315
 316        wait_queue_head_t       waiting_to_free;
 317        /* Thread shutdown wait queue. */
 318        wait_queue_head_t       shutdown_wq;
 319};
 320
 321struct seg_buf {
 322        unsigned long offset;
 323        unsigned int nsec;
 324};
 325
 326struct grant_page {
 327        struct page             *page;
 328        struct persistent_gnt   *persistent_gnt;
 329        grant_handle_t          handle;
 330        grant_ref_t             gref;
 331};
 332
 333/*
 334 * Each outstanding request that we've passed to the lower device layers has a
 335 * 'pending_req' allocated to it. Each buffer_head that completes decrements
 336 * the pendcnt towards zero. When it hits zero, the specified domain has a
 337 * response queued for it, with the saved 'id' passed back.
 338 */
 339struct pending_req {
 340        struct xen_blkif        *blkif;
 341        u64                     id;
 342        int                     nr_pages;
 343        atomic_t                pendcnt;
 344        unsigned short          operation;
 345        int                     status;
 346        struct list_head        free_list;
 347        struct grant_page       *segments[MAX_INDIRECT_SEGMENTS];
 348        /* Indirect descriptors */
 349        struct grant_page       *indirect_pages[MAX_INDIRECT_PAGES];
 350        struct seg_buf          seg[MAX_INDIRECT_SEGMENTS];
 351        struct bio              *biolist[MAX_INDIRECT_SEGMENTS];
 352};
 353
 354
 355#define vbd_sz(_v)      ((_v)->bdev->bd_part ? \
 356                         (_v)->bdev->bd_part->nr_sects : \
 357                          get_capacity((_v)->bdev->bd_disk))
 358
 359#define xen_blkif_get(_b) (atomic_inc(&(_b)->refcnt))
 360#define xen_blkif_put(_b)                               \
 361        do {                                            \
 362                if (atomic_dec_and_test(&(_b)->refcnt)) \
 363                        wake_up(&(_b)->waiting_to_free);\
 364        } while (0)
 365
 366struct phys_req {
 367        unsigned short          dev;
 368        blkif_sector_t          nr_sects;
 369        struct block_device     *bdev;
 370        blkif_sector_t          sector_number;
 371};
 372int xen_blkif_interface_init(void);
 373
 374int xen_blkif_xenbus_init(void);
 375
 376irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
 377int xen_blkif_schedule(void *arg);
 378int xen_blkif_purge_persistent(void *arg);
 379
 380int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
 381                              struct backend_info *be, int state);
 382
 383int xen_blkbk_barrier(struct xenbus_transaction xbt,
 384                      struct backend_info *be, int state);
 385struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
 386
 387static inline void blkif_get_x86_32_req(struct blkif_request *dst,
 388                                        struct blkif_x86_32_request *src)
 389{
 390        int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
 391        dst->operation = src->operation;
 392        switch (src->operation) {
 393        case BLKIF_OP_READ:
 394        case BLKIF_OP_WRITE:
 395        case BLKIF_OP_WRITE_BARRIER:
 396        case BLKIF_OP_FLUSH_DISKCACHE:
 397                dst->u.rw.nr_segments = src->u.rw.nr_segments;
 398                dst->u.rw.handle = src->u.rw.handle;
 399                dst->u.rw.id = src->u.rw.id;
 400                dst->u.rw.sector_number = src->u.rw.sector_number;
 401                barrier();
 402                if (n > dst->u.rw.nr_segments)
 403                        n = dst->u.rw.nr_segments;
 404                for (i = 0; i < n; i++)
 405                        dst->u.rw.seg[i] = src->u.rw.seg[i];
 406                break;
 407        case BLKIF_OP_DISCARD:
 408                dst->u.discard.flag = src->u.discard.flag;
 409                dst->u.discard.id = src->u.discard.id;
 410                dst->u.discard.sector_number = src->u.discard.sector_number;
 411                dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
 412                break;
 413        case BLKIF_OP_INDIRECT:
 414                dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
 415                dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
 416                dst->u.indirect.handle = src->u.indirect.handle;
 417                dst->u.indirect.id = src->u.indirect.id;
 418                dst->u.indirect.sector_number = src->u.indirect.sector_number;
 419                barrier();
 420                j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
 421                for (i = 0; i < j; i++)
 422                        dst->u.indirect.indirect_grefs[i] =
 423                                src->u.indirect.indirect_grefs[i];
 424                break;
 425        default:
 426                /*
 427                 * Don't know how to translate this op. Only get the
 428                 * ID so failure can be reported to the frontend.
 429                 */
 430                dst->u.other.id = src->u.other.id;
 431                break;
 432        }
 433}
 434
 435static inline void blkif_get_x86_64_req(struct blkif_request *dst,
 436                                        struct blkif_x86_64_request *src)
 437{
 438        int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
 439        dst->operation = src->operation;
 440        switch (src->operation) {
 441        case BLKIF_OP_READ:
 442        case BLKIF_OP_WRITE:
 443        case BLKIF_OP_WRITE_BARRIER:
 444        case BLKIF_OP_FLUSH_DISKCACHE:
 445                dst->u.rw.nr_segments = src->u.rw.nr_segments;
 446                dst->u.rw.handle = src->u.rw.handle;
 447                dst->u.rw.id = src->u.rw.id;
 448                dst->u.rw.sector_number = src->u.rw.sector_number;
 449                barrier();
 450                if (n > dst->u.rw.nr_segments)
 451                        n = dst->u.rw.nr_segments;
 452                for (i = 0; i < n; i++)
 453                        dst->u.rw.seg[i] = src->u.rw.seg[i];
 454                break;
 455        case BLKIF_OP_DISCARD:
 456                dst->u.discard.flag = src->u.discard.flag;
 457                dst->u.discard.id = src->u.discard.id;
 458                dst->u.discard.sector_number = src->u.discard.sector_number;
 459                dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
 460                break;
 461        case BLKIF_OP_INDIRECT:
 462                dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
 463                dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
 464                dst->u.indirect.handle = src->u.indirect.handle;
 465                dst->u.indirect.id = src->u.indirect.id;
 466                dst->u.indirect.sector_number = src->u.indirect.sector_number;
 467                barrier();
 468                j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
 469                for (i = 0; i < j; i++)
 470                        dst->u.indirect.indirect_grefs[i] =
 471                                src->u.indirect.indirect_grefs[i];
 472                break;
 473        default:
 474                /*
 475                 * Don't know how to translate this op. Only get the
 476                 * ID so failure can be reported to the frontend.
 477                 */
 478                dst->u.other.id = src->u.other.id;
 479                break;
 480        }
 481}
 482
 483#endif /* __XEN_BLKIF__BACKEND__COMMON_H__ */
 484