linux/drivers/virtio/virtio_ring.c
<<
>>
Prefs
   1/* Virtio ring implementation.
   2 *
   3 *  Copyright 2007 Rusty Russell IBM Corporation
   4 *
   5 *  This program is free software; you can redistribute it and/or modify
   6 *  it under the terms of the GNU General Public License as published by
   7 *  the Free Software Foundation; either version 2 of the License, or
   8 *  (at your option) any later version.
   9 *
  10 *  This program is distributed in the hope that it will be useful,
  11 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 *  GNU General Public License for more details.
  14 *
  15 *  You should have received a copy of the GNU General Public License
  16 *  along with this program; if not, write to the Free Software
  17 *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  18 */
  19#include <linux/virtio.h>
  20#include <linux/virtio_ring.h>
  21#include <linux/virtio_config.h>
  22#include <linux/device.h>
  23#include <linux/slab.h>
  24#include <linux/module.h>
  25#include <linux/hrtimer.h>
  26#include <linux/kmemleak.h>
  27
  28#ifdef DEBUG
  29/* For development, we want to crash whenever the ring is screwed. */
  30#define BAD_RING(_vq, fmt, args...)                             \
  31        do {                                                    \
  32                dev_err(&(_vq)->vq.vdev->dev,                   \
  33                        "%s:"fmt, (_vq)->vq.name, ##args);      \
  34                BUG();                                          \
  35        } while (0)
  36/* Caller is supposed to guarantee no reentry. */
  37#define START_USE(_vq)                                          \
  38        do {                                                    \
  39                if ((_vq)->in_use)                              \
  40                        panic("%s:in_use = %i\n",               \
  41                              (_vq)->vq.name, (_vq)->in_use);   \
  42                (_vq)->in_use = __LINE__;                       \
  43        } while (0)
  44#define END_USE(_vq) \
  45        do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
  46#else
  47#define BAD_RING(_vq, fmt, args...)                             \
  48        do {                                                    \
  49                dev_err(&_vq->vq.vdev->dev,                     \
  50                        "%s:"fmt, (_vq)->vq.name, ##args);      \
  51                (_vq)->broken = true;                           \
  52        } while (0)
  53#define START_USE(vq)
  54#define END_USE(vq)
  55#endif
  56
  57struct vring_virtqueue
  58{
  59        struct virtqueue vq;
  60
  61        /* Actual memory layout for this queue */
  62        struct vring vring;
  63
  64        /* Can we use weak barriers? */
  65        bool weak_barriers;
  66
  67        /* Other side has made a mess, don't try any more. */
  68        bool broken;
  69
  70        /* Host supports indirect buffers */
  71        bool indirect;
  72
  73        /* Host publishes avail event idx */
  74        bool event;
  75
  76        /* Head of free buffer list. */
  77        unsigned int free_head;
  78        /* Number we've added since last sync. */
  79        unsigned int num_added;
  80
  81        /* Last used index we've seen. */
  82        u16 last_used_idx;
  83
  84        /* How to notify other side. FIXME: commonalize hcalls! */
  85        bool (*notify)(struct virtqueue *vq);
  86
  87#ifdef DEBUG
  88        /* They're supposed to lock for us. */
  89        unsigned int in_use;
  90
  91        /* Figure out if their kicks are too delayed. */
  92        bool last_add_time_valid;
  93        ktime_t last_add_time;
  94#endif
  95
  96        /* Tokens for callbacks. */
  97        void *data[];
  98};
  99
 100#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
 101
 102static struct vring_desc *alloc_indirect(unsigned int total_sg, gfp_t gfp)
 103{
 104        struct vring_desc *desc;
 105        unsigned int i;
 106
 107        /*
 108         * We require lowmem mappings for the descriptors because
 109         * otherwise virt_to_phys will give us bogus addresses in the
 110         * virtqueue.
 111         */
 112        gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH);
 113
 114        desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp);
 115        if (!desc)
 116                return NULL;
 117
 118        for (i = 0; i < total_sg; i++)
 119                desc[i].next = i+1;
 120        return desc;
 121}
 122
 123static inline int virtqueue_add(struct virtqueue *_vq,
 124                                struct scatterlist *sgs[],
 125                                unsigned int total_sg,
 126                                unsigned int out_sgs,
 127                                unsigned int in_sgs,
 128                                void *data,
 129                                gfp_t gfp)
 130{
 131        struct vring_virtqueue *vq = to_vvq(_vq);
 132        struct scatterlist *sg;
 133        struct vring_desc *desc;
 134        unsigned int i, n, avail, descs_used, uninitialized_var(prev);
 135        int head;
 136        bool indirect;
 137
 138        START_USE(vq);
 139
 140        BUG_ON(data == NULL);
 141
 142        if (unlikely(vq->broken)) {
 143                END_USE(vq);
 144                return -EIO;
 145        }
 146
 147#ifdef DEBUG
 148        {
 149                ktime_t now = ktime_get();
 150
 151                /* No kick or get, with .1 second between?  Warn. */
 152                if (vq->last_add_time_valid)
 153                        WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
 154                                            > 100);
 155                vq->last_add_time = now;
 156                vq->last_add_time_valid = true;
 157        }
 158#endif
 159
 160        BUG_ON(total_sg > vq->vring.num);
 161        BUG_ON(total_sg == 0);
 162
 163        head = vq->free_head;
 164
 165        /* If the host supports indirect descriptor tables, and we have multiple
 166         * buffers, then go indirect. FIXME: tune this threshold */
 167        if (vq->indirect && total_sg > 1 && vq->vq.num_free)
 168                desc = alloc_indirect(total_sg, gfp);
 169        else
 170                desc = NULL;
 171
 172        if (desc) {
 173                /* Use a single buffer which doesn't continue */
 174                vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
 175                vq->vring.desc[head].addr = virt_to_phys(desc);
 176                /* avoid kmemleak false positive (hidden by virt_to_phys) */
 177                kmemleak_ignore(desc);
 178                vq->vring.desc[head].len = total_sg * sizeof(struct vring_desc);
 179
 180                /* Set up rest to use this indirect table. */
 181                i = 0;
 182                descs_used = 1;
 183                indirect = true;
 184        } else {
 185                desc = vq->vring.desc;
 186                i = head;
 187                descs_used = total_sg;
 188                indirect = false;
 189        }
 190
 191        if (vq->vq.num_free < descs_used) {
 192                pr_debug("Can't add buf len %i - avail = %i\n",
 193                         descs_used, vq->vq.num_free);
 194                /* FIXME: for historical reasons, we force a notify here if
 195                 * there are outgoing parts to the buffer.  Presumably the
 196                 * host should service the ring ASAP. */
 197                if (out_sgs)
 198                        vq->notify(&vq->vq);
 199                END_USE(vq);
 200                return -ENOSPC;
 201        }
 202
 203        /* We're about to use some buffers from the free list. */
 204        vq->vq.num_free -= descs_used;
 205
 206        for (n = 0; n < out_sgs; n++) {
 207                for (sg = sgs[n]; sg; sg = sg_next(sg)) {
 208                        desc[i].flags = VRING_DESC_F_NEXT;
 209                        desc[i].addr = sg_phys(sg);
 210                        desc[i].len = sg->length;
 211                        prev = i;
 212                        i = desc[i].next;
 213                }
 214        }
 215        for (; n < (out_sgs + in_sgs); n++) {
 216                for (sg = sgs[n]; sg; sg = sg_next(sg)) {
 217                        desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
 218                        desc[i].addr = sg_phys(sg);
 219                        desc[i].len = sg->length;
 220                        prev = i;
 221                        i = desc[i].next;
 222                }
 223        }
 224        /* Last one doesn't continue. */
 225        desc[prev].flags &= ~VRING_DESC_F_NEXT;
 226
 227        /* Update free pointer */
 228        if (indirect)
 229                vq->free_head = vq->vring.desc[head].next;
 230        else
 231                vq->free_head = i;
 232
 233        /* Set token. */
 234        vq->data[head] = data;
 235
 236        /* Put entry in available array (but don't update avail->idx until they
 237         * do sync). */
 238        avail = (vq->vring.avail->idx & (vq->vring.num-1));
 239        vq->vring.avail->ring[avail] = head;
 240
 241        /* Descriptors and available array need to be set before we expose the
 242         * new available array entries. */
 243        virtio_wmb(vq->weak_barriers);
 244        vq->vring.avail->idx++;
 245        vq->num_added++;
 246
 247        /* This is very unlikely, but theoretically possible.  Kick
 248         * just in case. */
 249        if (unlikely(vq->num_added == (1 << 16) - 1))
 250                virtqueue_kick(_vq);
 251
 252        pr_debug("Added buffer head %i to %p\n", head, vq);
 253        END_USE(vq);
 254
 255        return 0;
 256}
 257
 258/**
 259 * virtqueue_add_sgs - expose buffers to other end
 260 * @vq: the struct virtqueue we're talking about.
 261 * @sgs: array of terminated scatterlists.
 262 * @out_num: the number of scatterlists readable by other side
 263 * @in_num: the number of scatterlists which are writable (after readable ones)
 264 * @data: the token identifying the buffer.
 265 * @gfp: how to do memory allocations (if necessary).
 266 *
 267 * Caller must ensure we don't call this with other virtqueue operations
 268 * at the same time (except where noted).
 269 *
 270 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
 271 */
 272int virtqueue_add_sgs(struct virtqueue *_vq,
 273                      struct scatterlist *sgs[],
 274                      unsigned int out_sgs,
 275                      unsigned int in_sgs,
 276                      void *data,
 277                      gfp_t gfp)
 278{
 279        unsigned int i, total_sg = 0;
 280
 281        /* Count them first. */
 282        for (i = 0; i < out_sgs + in_sgs; i++) {
 283                struct scatterlist *sg;
 284                for (sg = sgs[i]; sg; sg = sg_next(sg))
 285                        total_sg++;
 286        }
 287        return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, data, gfp);
 288}
 289EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
 290
 291/**
 292 * virtqueue_add_outbuf - expose output buffers to other end
 293 * @vq: the struct virtqueue we're talking about.
 294 * @sg: scatterlist (must be well-formed and terminated!)
 295 * @num: the number of entries in @sg readable by other side
 296 * @data: the token identifying the buffer.
 297 * @gfp: how to do memory allocations (if necessary).
 298 *
 299 * Caller must ensure we don't call this with other virtqueue operations
 300 * at the same time (except where noted).
 301 *
 302 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
 303 */
 304int virtqueue_add_outbuf(struct virtqueue *vq,
 305                         struct scatterlist *sg, unsigned int num,
 306                         void *data,
 307                         gfp_t gfp)
 308{
 309        return virtqueue_add(vq, &sg, num, 1, 0, data, gfp);
 310}
 311EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
 312
 313/**
 314 * virtqueue_add_inbuf - expose input buffers to other end
 315 * @vq: the struct virtqueue we're talking about.
 316 * @sg: scatterlist (must be well-formed and terminated!)
 317 * @num: the number of entries in @sg writable by other side
 318 * @data: the token identifying the buffer.
 319 * @gfp: how to do memory allocations (if necessary).
 320 *
 321 * Caller must ensure we don't call this with other virtqueue operations
 322 * at the same time (except where noted).
 323 *
 324 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
 325 */
 326int virtqueue_add_inbuf(struct virtqueue *vq,
 327                        struct scatterlist *sg, unsigned int num,
 328                        void *data,
 329                        gfp_t gfp)
 330{
 331        return virtqueue_add(vq, &sg, num, 0, 1, data, gfp);
 332}
 333EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
 334
 335/**
 336 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
 337 * @vq: the struct virtqueue
 338 *
 339 * Instead of virtqueue_kick(), you can do:
 340 *      if (virtqueue_kick_prepare(vq))
 341 *              virtqueue_notify(vq);
 342 *
 343 * This is sometimes useful because the virtqueue_kick_prepare() needs
 344 * to be serialized, but the actual virtqueue_notify() call does not.
 345 */
 346bool virtqueue_kick_prepare(struct virtqueue *_vq)
 347{
 348        struct vring_virtqueue *vq = to_vvq(_vq);
 349        u16 new, old;
 350        bool needs_kick;
 351
 352        START_USE(vq);
 353        /* We need to expose available array entries before checking avail
 354         * event. */
 355        virtio_mb(vq->weak_barriers);
 356
 357        old = vq->vring.avail->idx - vq->num_added;
 358        new = vq->vring.avail->idx;
 359        vq->num_added = 0;
 360
 361#ifdef DEBUG
 362        if (vq->last_add_time_valid) {
 363                WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
 364                                              vq->last_add_time)) > 100);
 365        }
 366        vq->last_add_time_valid = false;
 367#endif
 368
 369        if (vq->event) {
 370                needs_kick = vring_need_event(vring_avail_event(&vq->vring),
 371                                              new, old);
 372        } else {
 373                needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY);
 374        }
 375        END_USE(vq);
 376        return needs_kick;
 377}
 378EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
 379
 380/**
 381 * virtqueue_notify - second half of split virtqueue_kick call.
 382 * @vq: the struct virtqueue
 383 *
 384 * This does not need to be serialized.
 385 *
 386 * Returns false if host notify failed or queue is broken, otherwise true.
 387 */
 388bool virtqueue_notify(struct virtqueue *_vq)
 389{
 390        struct vring_virtqueue *vq = to_vvq(_vq);
 391
 392        if (unlikely(vq->broken))
 393                return false;
 394
 395        /* Prod other side to tell it about changes. */
 396        if (!vq->notify(_vq)) {
 397                vq->broken = true;
 398                return false;
 399        }
 400        return true;
 401}
 402EXPORT_SYMBOL_GPL(virtqueue_notify);
 403
 404/**
 405 * virtqueue_kick - update after add_buf
 406 * @vq: the struct virtqueue
 407 *
 408 * After one or more virtqueue_add_* calls, invoke this to kick
 409 * the other side.
 410 *
 411 * Caller must ensure we don't call this with other virtqueue
 412 * operations at the same time (except where noted).
 413 *
 414 * Returns false if kick failed, otherwise true.
 415 */
 416bool virtqueue_kick(struct virtqueue *vq)
 417{
 418        if (virtqueue_kick_prepare(vq))
 419                return virtqueue_notify(vq);
 420        return true;
 421}
 422EXPORT_SYMBOL_GPL(virtqueue_kick);
 423
 424static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
 425{
 426        unsigned int i;
 427
 428        /* Clear data ptr. */
 429        vq->data[head] = NULL;
 430
 431        /* Put back on free list: find end */
 432        i = head;
 433
 434        /* Free the indirect table */
 435        if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT)
 436                kfree(phys_to_virt(vq->vring.desc[i].addr));
 437
 438        while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
 439                i = vq->vring.desc[i].next;
 440                vq->vq.num_free++;
 441        }
 442
 443        vq->vring.desc[i].next = vq->free_head;
 444        vq->free_head = head;
 445        /* Plus final descriptor */
 446        vq->vq.num_free++;
 447}
 448
 449static inline bool more_used(const struct vring_virtqueue *vq)
 450{
 451        return vq->last_used_idx != vq->vring.used->idx;
 452}
 453
 454/**
 455 * virtqueue_get_buf - get the next used buffer
 456 * @vq: the struct virtqueue we're talking about.
 457 * @len: the length written into the buffer
 458 *
 459 * If the driver wrote data into the buffer, @len will be set to the
 460 * amount written.  This means you don't need to clear the buffer
 461 * beforehand to ensure there's no data leakage in the case of short
 462 * writes.
 463 *
 464 * Caller must ensure we don't call this with other virtqueue
 465 * operations at the same time (except where noted).
 466 *
 467 * Returns NULL if there are no used buffers, or the "data" token
 468 * handed to virtqueue_add_*().
 469 */
 470void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
 471{
 472        struct vring_virtqueue *vq = to_vvq(_vq);
 473        void *ret;
 474        unsigned int i;
 475        u16 last_used;
 476
 477        START_USE(vq);
 478
 479        if (unlikely(vq->broken)) {
 480                END_USE(vq);
 481                return NULL;
 482        }
 483
 484        if (!more_used(vq)) {
 485                pr_debug("No more buffers in queue\n");
 486                END_USE(vq);
 487                return NULL;
 488        }
 489
 490        /* Only get used array entries after they have been exposed by host. */
 491        virtio_rmb(vq->weak_barriers);
 492
 493        last_used = (vq->last_used_idx & (vq->vring.num - 1));
 494        i = vq->vring.used->ring[last_used].id;
 495        *len = vq->vring.used->ring[last_used].len;
 496
 497        if (unlikely(i >= vq->vring.num)) {
 498                BAD_RING(vq, "id %u out of range\n", i);
 499                return NULL;
 500        }
 501        if (unlikely(!vq->data[i])) {
 502                BAD_RING(vq, "id %u is not a head!\n", i);
 503                return NULL;
 504        }
 505
 506        /* detach_buf clears data, so grab it now. */
 507        ret = vq->data[i];
 508        detach_buf(vq, i);
 509        vq->last_used_idx++;
 510        /* If we expect an interrupt for the next entry, tell host
 511         * by writing event index and flush out the write before
 512         * the read in the next get_buf call. */
 513        if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
 514                vring_used_event(&vq->vring) = vq->last_used_idx;
 515                virtio_mb(vq->weak_barriers);
 516        }
 517
 518#ifdef DEBUG
 519        vq->last_add_time_valid = false;
 520#endif
 521
 522        END_USE(vq);
 523        return ret;
 524}
 525EXPORT_SYMBOL_GPL(virtqueue_get_buf);
 526
 527/**
 528 * virtqueue_disable_cb - disable callbacks
 529 * @vq: the struct virtqueue we're talking about.
 530 *
 531 * Note that this is not necessarily synchronous, hence unreliable and only
 532 * useful as an optimization.
 533 *
 534 * Unlike other operations, this need not be serialized.
 535 */
 536void virtqueue_disable_cb(struct virtqueue *_vq)
 537{
 538        struct vring_virtqueue *vq = to_vvq(_vq);
 539
 540        vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
 541}
 542EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
 543
 544/**
 545 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
 546 * @vq: the struct virtqueue we're talking about.
 547 *
 548 * This re-enables callbacks; it returns current queue state
 549 * in an opaque unsigned value. This value should be later tested by
 550 * virtqueue_poll, to detect a possible race between the driver checking for
 551 * more work, and enabling callbacks.
 552 *
 553 * Caller must ensure we don't call this with other virtqueue
 554 * operations at the same time (except where noted).
 555 */
 556unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
 557{
 558        struct vring_virtqueue *vq = to_vvq(_vq);
 559        u16 last_used_idx;
 560
 561        START_USE(vq);
 562
 563        /* We optimistically turn back on interrupts, then check if there was
 564         * more to do. */
 565        /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
 566         * either clear the flags bit or point the event index at the next
 567         * entry. Always do both to keep code simple. */
 568        vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
 569        vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx;
 570        END_USE(vq);
 571        return last_used_idx;
 572}
 573EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
 574
 575/**
 576 * virtqueue_poll - query pending used buffers
 577 * @vq: the struct virtqueue we're talking about.
 578 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
 579 *
 580 * Returns "true" if there are pending used buffers in the queue.
 581 *
 582 * This does not need to be serialized.
 583 */
 584bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
 585{
 586        struct vring_virtqueue *vq = to_vvq(_vq);
 587
 588        virtio_mb(vq->weak_barriers);
 589        return (u16)last_used_idx != vq->vring.used->idx;
 590}
 591EXPORT_SYMBOL_GPL(virtqueue_poll);
 592
 593/**
 594 * virtqueue_enable_cb - restart callbacks after disable_cb.
 595 * @vq: the struct virtqueue we're talking about.
 596 *
 597 * This re-enables callbacks; it returns "false" if there are pending
 598 * buffers in the queue, to detect a possible race between the driver
 599 * checking for more work, and enabling callbacks.
 600 *
 601 * Caller must ensure we don't call this with other virtqueue
 602 * operations at the same time (except where noted).
 603 */
 604bool virtqueue_enable_cb(struct virtqueue *_vq)
 605{
 606        unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
 607        return !virtqueue_poll(_vq, last_used_idx);
 608}
 609EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
 610
 611/**
 612 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
 613 * @vq: the struct virtqueue we're talking about.
 614 *
 615 * This re-enables callbacks but hints to the other side to delay
 616 * interrupts until most of the available buffers have been processed;
 617 * it returns "false" if there are many pending buffers in the queue,
 618 * to detect a possible race between the driver checking for more work,
 619 * and enabling callbacks.
 620 *
 621 * Caller must ensure we don't call this with other virtqueue
 622 * operations at the same time (except where noted).
 623 */
 624bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
 625{
 626        struct vring_virtqueue *vq = to_vvq(_vq);
 627        u16 bufs;
 628
 629        START_USE(vq);
 630
 631        /* We optimistically turn back on interrupts, then check if there was
 632         * more to do. */
 633        /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
 634         * either clear the flags bit or point the event index at the next
 635         * entry. Always do both to keep code simple. */
 636        vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
 637        /* TODO: tune this threshold */
 638        bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4;
 639        vring_used_event(&vq->vring) = vq->last_used_idx + bufs;
 640        virtio_mb(vq->weak_barriers);
 641        if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) {
 642                END_USE(vq);
 643                return false;
 644        }
 645
 646        END_USE(vq);
 647        return true;
 648}
 649EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
 650
 651/**
 652 * virtqueue_detach_unused_buf - detach first unused buffer
 653 * @vq: the struct virtqueue we're talking about.
 654 *
 655 * Returns NULL or the "data" token handed to virtqueue_add_*().
 656 * This is not valid on an active queue; it is useful only for device
 657 * shutdown.
 658 */
 659void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
 660{
 661        struct vring_virtqueue *vq = to_vvq(_vq);
 662        unsigned int i;
 663        void *buf;
 664
 665        START_USE(vq);
 666
 667        for (i = 0; i < vq->vring.num; i++) {
 668                if (!vq->data[i])
 669                        continue;
 670                /* detach_buf clears data, so grab it now. */
 671                buf = vq->data[i];
 672                detach_buf(vq, i);
 673                vq->vring.avail->idx--;
 674                END_USE(vq);
 675                return buf;
 676        }
 677        /* That should have freed everything. */
 678        BUG_ON(vq->vq.num_free != vq->vring.num);
 679
 680        END_USE(vq);
 681        return NULL;
 682}
 683EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
 684
 685irqreturn_t vring_interrupt(int irq, void *_vq)
 686{
 687        struct vring_virtqueue *vq = to_vvq(_vq);
 688
 689        if (!more_used(vq)) {
 690                pr_debug("virtqueue interrupt with no work for %p\n", vq);
 691                return IRQ_NONE;
 692        }
 693
 694        if (unlikely(vq->broken))
 695                return IRQ_HANDLED;
 696
 697        pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
 698        if (vq->vq.callback)
 699                vq->vq.callback(&vq->vq);
 700
 701        return IRQ_HANDLED;
 702}
 703EXPORT_SYMBOL_GPL(vring_interrupt);
 704
 705struct virtqueue *vring_new_virtqueue(unsigned int index,
 706                                      unsigned int num,
 707                                      unsigned int vring_align,
 708                                      struct virtio_device *vdev,
 709                                      bool weak_barriers,
 710                                      void *pages,
 711                                      bool (*notify)(struct virtqueue *),
 712                                      void (*callback)(struct virtqueue *),
 713                                      const char *name)
 714{
 715        struct vring_virtqueue *vq;
 716        unsigned int i;
 717
 718        /* We assume num is a power of 2. */
 719        if (num & (num - 1)) {
 720                dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
 721                return NULL;
 722        }
 723
 724        vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
 725        if (!vq)
 726                return NULL;
 727
 728        vring_init(&vq->vring, num, pages, vring_align);
 729        vq->vq.callback = callback;
 730        vq->vq.vdev = vdev;
 731        vq->vq.name = name;
 732        vq->vq.num_free = num;
 733        vq->vq.index = index;
 734        vq->notify = notify;
 735        vq->weak_barriers = weak_barriers;
 736        vq->broken = false;
 737        vq->last_used_idx = 0;
 738        vq->num_added = 0;
 739        list_add_tail(&vq->vq.list, &vdev->vqs);
 740#ifdef DEBUG
 741        vq->in_use = false;
 742        vq->last_add_time_valid = false;
 743#endif
 744
 745        vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
 746        vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
 747
 748        /* No callback?  Tell other side not to bother us. */
 749        if (!callback)
 750                vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
 751
 752        /* Put everything in free lists. */
 753        vq->free_head = 0;
 754        for (i = 0; i < num-1; i++) {
 755                vq->vring.desc[i].next = i+1;
 756                vq->data[i] = NULL;
 757        }
 758        vq->data[i] = NULL;
 759
 760        return &vq->vq;
 761}
 762EXPORT_SYMBOL_GPL(vring_new_virtqueue);
 763
 764void vring_del_virtqueue(struct virtqueue *vq)
 765{
 766        list_del(&vq->list);
 767        kfree(to_vvq(vq));
 768}
 769EXPORT_SYMBOL_GPL(vring_del_virtqueue);
 770
 771/* Manipulates transport-specific feature bits. */
 772void vring_transport_features(struct virtio_device *vdev)
 773{
 774        unsigned int i;
 775
 776        for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
 777                switch (i) {
 778                case VIRTIO_RING_F_INDIRECT_DESC:
 779                        break;
 780                case VIRTIO_RING_F_EVENT_IDX:
 781                        break;
 782                default:
 783                        /* We don't understand this bit. */
 784                        clear_bit(i, vdev->features);
 785                }
 786        }
 787}
 788EXPORT_SYMBOL_GPL(vring_transport_features);
 789
 790/**
 791 * virtqueue_get_vring_size - return the size of the virtqueue's vring
 792 * @vq: the struct virtqueue containing the vring of interest.
 793 *
 794 * Returns the size of the vring.  This is mainly used for boasting to
 795 * userspace.  Unlike other operations, this need not be serialized.
 796 */
 797unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
 798{
 799
 800        struct vring_virtqueue *vq = to_vvq(_vq);
 801
 802        return vq->vring.num;
 803}
 804EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
 805
 806bool virtqueue_is_broken(struct virtqueue *_vq)
 807{
 808        struct vring_virtqueue *vq = to_vvq(_vq);
 809
 810        return vq->broken;
 811}
 812EXPORT_SYMBOL_GPL(virtqueue_is_broken);
 813
 814/*
 815 * This should prevent the device from being used, allowing drivers to
 816 * recover.  You may need to grab appropriate locks to flush.
 817 */
 818void virtio_break_device(struct virtio_device *dev)
 819{
 820        struct virtqueue *_vq;
 821
 822        list_for_each_entry(_vq, &dev->vqs, list) {
 823                struct vring_virtqueue *vq = to_vvq(_vq);
 824                vq->broken = true;
 825        }
 826}
 827EXPORT_SYMBOL_GPL(virtio_break_device);
 828
 829MODULE_LICENSE("GPL");
 830