uboot/drivers/virtio/virtio_ring.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Copyright (C) 2018, Tuomas Tynkkynen <tuomas.tynkkynen@iki.fi>
   4 * Copyright (C) 2018, Bin Meng <bmeng.cn@gmail.com>
   5 *
   6 * virtio ring implementation
   7 */
   8
   9#include <common.h>
  10#include <dm.h>
  11#include <log.h>
  12#include <malloc.h>
  13#include <virtio_types.h>
  14#include <virtio.h>
  15#include <virtio_ring.h>
  16#include <linux/bug.h>
  17#include <linux/compat.h>
  18
  19int virtqueue_add(struct virtqueue *vq, struct virtio_sg *sgs[],
  20                  unsigned int out_sgs, unsigned int in_sgs)
  21{
  22        struct vring_desc *desc;
  23        unsigned int total_sg = out_sgs + in_sgs;
  24        unsigned int i, n, avail, descs_used, uninitialized_var(prev);
  25        int head;
  26
  27        WARN_ON(total_sg == 0);
  28
  29        head = vq->free_head;
  30
  31        desc = vq->vring.desc;
  32        i = head;
  33        descs_used = total_sg;
  34
  35        if (vq->num_free < descs_used) {
  36                debug("Can't add buf len %i - avail = %i\n",
  37                      descs_used, vq->num_free);
  38                /*
  39                 * FIXME: for historical reasons, we force a notify here if
  40                 * there are outgoing parts to the buffer.  Presumably the
  41                 * host should service the ring ASAP.
  42                 */
  43                if (out_sgs)
  44                        virtio_notify(vq->vdev, vq);
  45                return -ENOSPC;
  46        }
  47
  48        for (n = 0; n < out_sgs; n++) {
  49                struct virtio_sg *sg = sgs[n];
  50
  51                desc[i].flags = cpu_to_virtio16(vq->vdev, VRING_DESC_F_NEXT);
  52                desc[i].addr = cpu_to_virtio64(vq->vdev, (u64)(size_t)sg->addr);
  53                desc[i].len = cpu_to_virtio32(vq->vdev, sg->length);
  54
  55                prev = i;
  56                i = virtio16_to_cpu(vq->vdev, desc[i].next);
  57        }
  58        for (; n < (out_sgs + in_sgs); n++) {
  59                struct virtio_sg *sg = sgs[n];
  60
  61                desc[i].flags = cpu_to_virtio16(vq->vdev, VRING_DESC_F_NEXT |
  62                                                VRING_DESC_F_WRITE);
  63                desc[i].addr = cpu_to_virtio64(vq->vdev,
  64                                               (u64)(uintptr_t)sg->addr);
  65                desc[i].len = cpu_to_virtio32(vq->vdev, sg->length);
  66
  67                prev = i;
  68                i = virtio16_to_cpu(vq->vdev, desc[i].next);
  69        }
  70        /* Last one doesn't continue */
  71        desc[prev].flags &= cpu_to_virtio16(vq->vdev, ~VRING_DESC_F_NEXT);
  72
  73        /* We're using some buffers from the free list. */
  74        vq->num_free -= descs_used;
  75
  76        /* Update free pointer */
  77        vq->free_head = i;
  78
  79        /*
  80         * Put entry in available array (but don't update avail->idx
  81         * until they do sync).
  82         */
  83        avail = vq->avail_idx_shadow & (vq->vring.num - 1);
  84        vq->vring.avail->ring[avail] = cpu_to_virtio16(vq->vdev, head);
  85
  86        /*
  87         * Descriptors and available array need to be set before we expose the
  88         * new available array entries.
  89         */
  90        virtio_wmb();
  91        vq->avail_idx_shadow++;
  92        vq->vring.avail->idx = cpu_to_virtio16(vq->vdev, vq->avail_idx_shadow);
  93        vq->num_added++;
  94
  95        /*
  96         * This is very unlikely, but theoretically possible.
  97         * Kick just in case.
  98         */
  99        if (unlikely(vq->num_added == (1 << 16) - 1))
 100                virtqueue_kick(vq);
 101
 102        return 0;
 103}
 104
 105static bool virtqueue_kick_prepare(struct virtqueue *vq)
 106{
 107        u16 new, old;
 108        bool needs_kick;
 109
 110        /*
 111         * We need to expose available array entries before checking
 112         * avail event.
 113         */
 114        virtio_mb();
 115
 116        old = vq->avail_idx_shadow - vq->num_added;
 117        new = vq->avail_idx_shadow;
 118        vq->num_added = 0;
 119
 120        if (vq->event) {
 121                needs_kick = vring_need_event(virtio16_to_cpu(vq->vdev,
 122                                vring_avail_event(&vq->vring)), new, old);
 123        } else {
 124                needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(vq->vdev,
 125                                VRING_USED_F_NO_NOTIFY));
 126        }
 127
 128        return needs_kick;
 129}
 130
 131void virtqueue_kick(struct virtqueue *vq)
 132{
 133        if (virtqueue_kick_prepare(vq))
 134                virtio_notify(vq->vdev, vq);
 135}
 136
 137static void detach_buf(struct virtqueue *vq, unsigned int head)
 138{
 139        unsigned int i;
 140        __virtio16 nextflag = cpu_to_virtio16(vq->vdev, VRING_DESC_F_NEXT);
 141
 142        /* Put back on free list: unmap first-level descriptors and find end */
 143        i = head;
 144
 145        while (vq->vring.desc[i].flags & nextflag) {
 146                i = virtio16_to_cpu(vq->vdev, vq->vring.desc[i].next);
 147                vq->num_free++;
 148        }
 149
 150        vq->vring.desc[i].next = cpu_to_virtio16(vq->vdev, vq->free_head);
 151        vq->free_head = head;
 152
 153        /* Plus final descriptor */
 154        vq->num_free++;
 155}
 156
 157static inline bool more_used(const struct virtqueue *vq)
 158{
 159        return vq->last_used_idx != virtio16_to_cpu(vq->vdev,
 160                        vq->vring.used->idx);
 161}
 162
 163void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len)
 164{
 165        unsigned int i;
 166        u16 last_used;
 167
 168        if (!more_used(vq)) {
 169                debug("(%s.%d): No more buffers in queue\n",
 170                      vq->vdev->name, vq->index);
 171                return NULL;
 172        }
 173
 174        /* Only get used array entries after they have been exposed by host */
 175        virtio_rmb();
 176
 177        last_used = (vq->last_used_idx & (vq->vring.num - 1));
 178        i = virtio32_to_cpu(vq->vdev, vq->vring.used->ring[last_used].id);
 179        if (len) {
 180                *len = virtio32_to_cpu(vq->vdev,
 181                                       vq->vring.used->ring[last_used].len);
 182                debug("(%s.%d): last used idx %u with len %u\n",
 183                      vq->vdev->name, vq->index, i, *len);
 184        }
 185
 186        if (unlikely(i >= vq->vring.num)) {
 187                printf("(%s.%d): id %u out of range\n",
 188                       vq->vdev->name, vq->index, i);
 189                return NULL;
 190        }
 191
 192        detach_buf(vq, i);
 193        vq->last_used_idx++;
 194        /*
 195         * If we expect an interrupt for the next entry, tell host
 196         * by writing event index and flush out the write before
 197         * the read in the next get_buf call.
 198         */
 199        if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
 200                virtio_store_mb(&vring_used_event(&vq->vring),
 201                                cpu_to_virtio16(vq->vdev, vq->last_used_idx));
 202
 203        return (void *)(uintptr_t)virtio64_to_cpu(vq->vdev,
 204                                                  vq->vring.desc[i].addr);
 205}
 206
 207static struct virtqueue *__vring_new_virtqueue(unsigned int index,
 208                                               struct vring vring,
 209                                               struct udevice *udev)
 210{
 211        unsigned int i;
 212        struct virtqueue *vq;
 213        struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(udev);
 214        struct udevice *vdev = uc_priv->vdev;
 215
 216        vq = malloc(sizeof(*vq));
 217        if (!vq)
 218                return NULL;
 219
 220        vq->vdev = vdev;
 221        vq->index = index;
 222        vq->num_free = vring.num;
 223        vq->vring = vring;
 224        vq->last_used_idx = 0;
 225        vq->avail_flags_shadow = 0;
 226        vq->avail_idx_shadow = 0;
 227        vq->num_added = 0;
 228        list_add_tail(&vq->list, &uc_priv->vqs);
 229
 230        vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
 231
 232        /* Tell other side not to bother us */
 233        vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
 234        if (!vq->event)
 235                vq->vring.avail->flags = cpu_to_virtio16(vdev,
 236                                vq->avail_flags_shadow);
 237
 238        /* Put everything in free lists */
 239        vq->free_head = 0;
 240        for (i = 0; i < vring.num - 1; i++)
 241                vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
 242
 243        return vq;
 244}
 245
 246struct virtqueue *vring_create_virtqueue(unsigned int index, unsigned int num,
 247                                         unsigned int vring_align,
 248                                         struct udevice *udev)
 249{
 250        struct virtqueue *vq;
 251        void *queue = NULL;
 252        struct vring vring;
 253
 254        /* We assume num is a power of 2 */
 255        if (num & (num - 1)) {
 256                printf("Bad virtqueue length %u\n", num);
 257                return NULL;
 258        }
 259
 260        /* TODO: allocate each queue chunk individually */
 261        for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
 262                queue = memalign(PAGE_SIZE, vring_size(num, vring_align));
 263                if (queue)
 264                        break;
 265        }
 266
 267        if (!num)
 268                return NULL;
 269
 270        if (!queue) {
 271                /* Try to get a single page. You are my only hope! */
 272                queue = memalign(PAGE_SIZE, vring_size(num, vring_align));
 273        }
 274        if (!queue)
 275                return NULL;
 276
 277        memset(queue, 0, vring_size(num, vring_align));
 278        vring_init(&vring, num, queue, vring_align);
 279
 280        vq = __vring_new_virtqueue(index, vring, udev);
 281        if (!vq) {
 282                free(queue);
 283                return NULL;
 284        }
 285        debug("(%s): created vring @ %p for vq @ %p with num %u\n", udev->name,
 286              queue, vq, num);
 287
 288        return vq;
 289}
 290
 291void vring_del_virtqueue(struct virtqueue *vq)
 292{
 293        free(vq->vring.desc);
 294        list_del(&vq->list);
 295        free(vq);
 296}
 297
 298unsigned int virtqueue_get_vring_size(struct virtqueue *vq)
 299{
 300        return vq->vring.num;
 301}
 302
 303ulong virtqueue_get_desc_addr(struct virtqueue *vq)
 304{
 305        return (ulong)vq->vring.desc;
 306}
 307
 308ulong virtqueue_get_avail_addr(struct virtqueue *vq)
 309{
 310        return (ulong)vq->vring.desc +
 311               ((char *)vq->vring.avail - (char *)vq->vring.desc);
 312}
 313
 314ulong virtqueue_get_used_addr(struct virtqueue *vq)
 315{
 316        return (ulong)vq->vring.desc +
 317               ((char *)vq->vring.used - (char *)vq->vring.desc);
 318}
 319
 320bool virtqueue_poll(struct virtqueue *vq, u16 last_used_idx)
 321{
 322        virtio_mb();
 323
 324        return last_used_idx != virtio16_to_cpu(vq->vdev, vq->vring.used->idx);
 325}
 326
 327void virtqueue_dump(struct virtqueue *vq)
 328{
 329        unsigned int i;
 330
 331        printf("virtqueue %p for dev %s:\n", vq, vq->vdev->name);
 332        printf("\tindex %u, phys addr %p num %u\n",
 333               vq->index, vq->vring.desc, vq->vring.num);
 334        printf("\tfree_head %u, num_added %u, num_free %u\n",
 335               vq->free_head, vq->num_added, vq->num_free);
 336        printf("\tlast_used_idx %u, avail_flags_shadow %u, avail_idx_shadow %u\n",
 337               vq->last_used_idx, vq->avail_flags_shadow, vq->avail_idx_shadow);
 338
 339        printf("Descriptor dump:\n");
 340        for (i = 0; i < vq->vring.num; i++) {
 341                printf("\tdesc[%u] = { 0x%llx, len %u, flags %u, next %u }\n",
 342                       i, vq->vring.desc[i].addr, vq->vring.desc[i].len,
 343                       vq->vring.desc[i].flags, vq->vring.desc[i].next);
 344        }
 345
 346        printf("Avail ring dump:\n");
 347        printf("\tflags %u, idx %u\n",
 348               vq->vring.avail->flags, vq->vring.avail->idx);
 349        for (i = 0; i < vq->vring.num; i++) {
 350                printf("\tavail[%u] = %u\n",
 351                       i, vq->vring.avail->ring[i]);
 352        }
 353
 354        printf("Used ring dump:\n");
 355        printf("\tflags %u, idx %u\n",
 356               vq->vring.used->flags, vq->vring.used->idx);
 357        for (i = 0; i < vq->vring.num; i++) {
 358                printf("\tused[%u] = { %u, %u }\n", i,
 359                       vq->vring.used->ring[i].id, vq->vring.used->ring[i].len);
 360        }
 361}
 362