linux/kernel/bpf/ringbuf.c
<<
>>
Prefs
   1#include <linux/bpf.h>
   2#include <linux/btf.h>
   3#include <linux/err.h>
   4#include <linux/irq_work.h>
   5#include <linux/slab.h>
   6#include <linux/filter.h>
   7#include <linux/mm.h>
   8#include <linux/vmalloc.h>
   9#include <linux/wait.h>
  10#include <linux/poll.h>
  11#include <linux/kmemleak.h>
  12#include <uapi/linux/btf.h>
  13
  14#define RINGBUF_CREATE_FLAG_MASK (BPF_F_NUMA_NODE)
  15
  16/* non-mmap()'able part of bpf_ringbuf (everything up to consumer page) */
  17#define RINGBUF_PGOFF \
  18        (offsetof(struct bpf_ringbuf, consumer_pos) >> PAGE_SHIFT)
  19/* consumer page and producer page */
  20#define RINGBUF_POS_PAGES 2
  21
  22#define RINGBUF_MAX_RECORD_SZ (UINT_MAX/4)
  23
  24/* Maximum size of ring buffer area is limited by 32-bit page offset within
  25 * record header, counted in pages. Reserve 8 bits for extensibility, and take
  26 * into account few extra pages for consumer/producer pages and
  27 * non-mmap()'able parts. This gives 64GB limit, which seems plenty for single
  28 * ring buffer.
  29 */
  30#define RINGBUF_MAX_DATA_SZ \
  31        (((1ULL << 24) - RINGBUF_POS_PAGES - RINGBUF_PGOFF) * PAGE_SIZE)
  32
  33struct bpf_ringbuf {
  34        wait_queue_head_t waitq;
  35        struct irq_work work;
  36        u64 mask;
  37        struct page **pages;
  38        int nr_pages;
  39        spinlock_t spinlock ____cacheline_aligned_in_smp;
  40        /* Consumer and producer counters are put into separate pages to allow
  41         * mapping consumer page as r/w, but restrict producer page to r/o.
  42         * This protects producer position from being modified by user-space
  43         * application and ruining in-kernel position tracking.
  44         */
  45        unsigned long consumer_pos __aligned(PAGE_SIZE);
  46        unsigned long producer_pos __aligned(PAGE_SIZE);
  47        char data[] __aligned(PAGE_SIZE);
  48};
  49
  50struct bpf_ringbuf_map {
  51        struct bpf_map map;
  52        struct bpf_ringbuf *rb;
  53};
  54
  55/* 8-byte ring buffer record header structure */
  56struct bpf_ringbuf_hdr {
  57        u32 len;
  58        u32 pg_off;
  59};
  60
  61static struct bpf_ringbuf *bpf_ringbuf_area_alloc(size_t data_sz, int numa_node)
  62{
  63        const gfp_t flags = GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL |
  64                            __GFP_NOWARN | __GFP_ZERO;
  65        int nr_meta_pages = RINGBUF_PGOFF + RINGBUF_POS_PAGES;
  66        int nr_data_pages = data_sz >> PAGE_SHIFT;
  67        int nr_pages = nr_meta_pages + nr_data_pages;
  68        struct page **pages, *page;
  69        struct bpf_ringbuf *rb;
  70        size_t array_size;
  71        int i;
  72
  73        /* Each data page is mapped twice to allow "virtual"
  74         * continuous read of samples wrapping around the end of ring
  75         * buffer area:
  76         * ------------------------------------------------------
  77         * | meta pages |  real data pages  |  same data pages  |
  78         * ------------------------------------------------------
  79         * |            | 1 2 3 4 5 6 7 8 9 | 1 2 3 4 5 6 7 8 9 |
  80         * ------------------------------------------------------
  81         * |            | TA             DA | TA             DA |
  82         * ------------------------------------------------------
  83         *                               ^^^^^^^
  84         *                                  |
  85         * Here, no need to worry about special handling of wrapped-around
  86         * data due to double-mapped data pages. This works both in kernel and
  87         * when mmap()'ed in user-space, simplifying both kernel and
  88         * user-space implementations significantly.
  89         */
  90        array_size = (nr_meta_pages + 2 * nr_data_pages) * sizeof(*pages);
  91        pages = bpf_map_area_alloc(array_size, numa_node);
  92        if (!pages)
  93                return NULL;
  94
  95        for (i = 0; i < nr_pages; i++) {
  96                page = alloc_pages_node(numa_node, flags, 0);
  97                if (!page) {
  98                        nr_pages = i;
  99                        goto err_free_pages;
 100                }
 101                pages[i] = page;
 102                if (i >= nr_meta_pages)
 103                        pages[nr_data_pages + i] = page;
 104        }
 105
 106        rb = vmap(pages, nr_meta_pages + 2 * nr_data_pages,
 107                  VM_ALLOC | VM_USERMAP, PAGE_KERNEL);
 108        if (rb) {
 109                kmemleak_not_leak(pages);
 110                rb->pages = pages;
 111                rb->nr_pages = nr_pages;
 112                return rb;
 113        }
 114
 115err_free_pages:
 116        for (i = 0; i < nr_pages; i++)
 117                __free_page(pages[i]);
 118        kvfree(pages);
 119        return NULL;
 120}
 121
 122static void bpf_ringbuf_notify(struct irq_work *work)
 123{
 124        struct bpf_ringbuf *rb = container_of(work, struct bpf_ringbuf, work);
 125
 126        wake_up_all(&rb->waitq);
 127}
 128
 129static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node)
 130{
 131        struct bpf_ringbuf *rb;
 132
 133        rb = bpf_ringbuf_area_alloc(data_sz, numa_node);
 134        if (!rb)
 135                return NULL;
 136
 137        spin_lock_init(&rb->spinlock);
 138        init_waitqueue_head(&rb->waitq);
 139        init_irq_work(&rb->work, bpf_ringbuf_notify);
 140
 141        rb->mask = data_sz - 1;
 142        rb->consumer_pos = 0;
 143        rb->producer_pos = 0;
 144
 145        return rb;
 146}
 147
 148static struct bpf_map *ringbuf_map_alloc(union bpf_attr *attr)
 149{
 150        struct bpf_ringbuf_map *rb_map;
 151
 152        if (attr->map_flags & ~RINGBUF_CREATE_FLAG_MASK)
 153                return ERR_PTR(-EINVAL);
 154
 155        if (attr->key_size || attr->value_size ||
 156            !is_power_of_2(attr->max_entries) ||
 157            !PAGE_ALIGNED(attr->max_entries))
 158                return ERR_PTR(-EINVAL);
 159
 160#ifdef CONFIG_64BIT
 161        /* on 32-bit arch, it's impossible to overflow record's hdr->pgoff */
 162        if (attr->max_entries > RINGBUF_MAX_DATA_SZ)
 163                return ERR_PTR(-E2BIG);
 164#endif
 165
 166        rb_map = kzalloc(sizeof(*rb_map), GFP_USER | __GFP_ACCOUNT);
 167        if (!rb_map)
 168                return ERR_PTR(-ENOMEM);
 169
 170        bpf_map_init_from_attr(&rb_map->map, attr);
 171
 172        rb_map->rb = bpf_ringbuf_alloc(attr->max_entries, rb_map->map.numa_node);
 173        if (!rb_map->rb) {
 174                kfree(rb_map);
 175                return ERR_PTR(-ENOMEM);
 176        }
 177
 178        return &rb_map->map;
 179}
 180
 181static void bpf_ringbuf_free(struct bpf_ringbuf *rb)
 182{
 183        /* copy pages pointer and nr_pages to local variable, as we are going
 184         * to unmap rb itself with vunmap() below
 185         */
 186        struct page **pages = rb->pages;
 187        int i, nr_pages = rb->nr_pages;
 188
 189        vunmap(rb);
 190        for (i = 0; i < nr_pages; i++)
 191                __free_page(pages[i]);
 192        kvfree(pages);
 193}
 194
 195static void ringbuf_map_free(struct bpf_map *map)
 196{
 197        struct bpf_ringbuf_map *rb_map;
 198
 199        rb_map = container_of(map, struct bpf_ringbuf_map, map);
 200        bpf_ringbuf_free(rb_map->rb);
 201        kfree(rb_map);
 202}
 203
 204static void *ringbuf_map_lookup_elem(struct bpf_map *map, void *key)
 205{
 206        return ERR_PTR(-ENOTSUPP);
 207}
 208
 209static int ringbuf_map_update_elem(struct bpf_map *map, void *key, void *value,
 210                                   u64 flags)
 211{
 212        return -ENOTSUPP;
 213}
 214
 215static int ringbuf_map_delete_elem(struct bpf_map *map, void *key)
 216{
 217        return -ENOTSUPP;
 218}
 219
 220static int ringbuf_map_get_next_key(struct bpf_map *map, void *key,
 221                                    void *next_key)
 222{
 223        return -ENOTSUPP;
 224}
 225
 226static int ringbuf_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
 227{
 228        struct bpf_ringbuf_map *rb_map;
 229
 230        rb_map = container_of(map, struct bpf_ringbuf_map, map);
 231
 232        if (vma->vm_flags & VM_WRITE) {
 233                /* allow writable mapping for the consumer_pos only */
 234                if (vma->vm_pgoff != 0 || vma->vm_end - vma->vm_start != PAGE_SIZE)
 235                        return -EPERM;
 236        } else {
 237                vma->vm_flags &= ~VM_MAYWRITE;
 238        }
 239        /* remap_vmalloc_range() checks size and offset constraints */
 240        return remap_vmalloc_range(vma, rb_map->rb,
 241                                   vma->vm_pgoff + RINGBUF_PGOFF);
 242}
 243
 244static unsigned long ringbuf_avail_data_sz(struct bpf_ringbuf *rb)
 245{
 246        unsigned long cons_pos, prod_pos;
 247
 248        cons_pos = smp_load_acquire(&rb->consumer_pos);
 249        prod_pos = smp_load_acquire(&rb->producer_pos);
 250        return prod_pos - cons_pos;
 251}
 252
 253static __poll_t ringbuf_map_poll(struct bpf_map *map, struct file *filp,
 254                                 struct poll_table_struct *pts)
 255{
 256        struct bpf_ringbuf_map *rb_map;
 257
 258        rb_map = container_of(map, struct bpf_ringbuf_map, map);
 259        poll_wait(filp, &rb_map->rb->waitq, pts);
 260
 261        if (ringbuf_avail_data_sz(rb_map->rb))
 262                return EPOLLIN | EPOLLRDNORM;
 263        return 0;
 264}
 265
 266static int ringbuf_map_btf_id;
 267const struct bpf_map_ops ringbuf_map_ops = {
 268        .map_meta_equal = bpf_map_meta_equal,
 269        .map_alloc = ringbuf_map_alloc,
 270        .map_free = ringbuf_map_free,
 271        .map_mmap = ringbuf_map_mmap,
 272        .map_poll = ringbuf_map_poll,
 273        .map_lookup_elem = ringbuf_map_lookup_elem,
 274        .map_update_elem = ringbuf_map_update_elem,
 275        .map_delete_elem = ringbuf_map_delete_elem,
 276        .map_get_next_key = ringbuf_map_get_next_key,
 277        .map_btf_name = "bpf_ringbuf_map",
 278        .map_btf_id = &ringbuf_map_btf_id,
 279};
 280
 281/* Given pointer to ring buffer record metadata and struct bpf_ringbuf itself,
 282 * calculate offset from record metadata to ring buffer in pages, rounded
 283 * down. This page offset is stored as part of record metadata and allows to
 284 * restore struct bpf_ringbuf * from record pointer. This page offset is
 285 * stored at offset 4 of record metadata header.
 286 */
 287static size_t bpf_ringbuf_rec_pg_off(struct bpf_ringbuf *rb,
 288                                     struct bpf_ringbuf_hdr *hdr)
 289{
 290        return ((void *)hdr - (void *)rb) >> PAGE_SHIFT;
 291}
 292
 293/* Given pointer to ring buffer record header, restore pointer to struct
 294 * bpf_ringbuf itself by using page offset stored at offset 4
 295 */
 296static struct bpf_ringbuf *
 297bpf_ringbuf_restore_from_rec(struct bpf_ringbuf_hdr *hdr)
 298{
 299        unsigned long addr = (unsigned long)(void *)hdr;
 300        unsigned long off = (unsigned long)hdr->pg_off << PAGE_SHIFT;
 301
 302        return (void*)((addr & PAGE_MASK) - off);
 303}
 304
 305static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
 306{
 307        unsigned long cons_pos, prod_pos, new_prod_pos, flags;
 308        u32 len, pg_off;
 309        struct bpf_ringbuf_hdr *hdr;
 310
 311        if (unlikely(size > RINGBUF_MAX_RECORD_SZ))
 312                return NULL;
 313
 314        len = round_up(size + BPF_RINGBUF_HDR_SZ, 8);
 315        if (len > rb->mask + 1)
 316                return NULL;
 317
 318        cons_pos = smp_load_acquire(&rb->consumer_pos);
 319
 320        if (in_nmi()) {
 321                if (!spin_trylock_irqsave(&rb->spinlock, flags))
 322                        return NULL;
 323        } else {
 324                spin_lock_irqsave(&rb->spinlock, flags);
 325        }
 326
 327        prod_pos = rb->producer_pos;
 328        new_prod_pos = prod_pos + len;
 329
 330        /* check for out of ringbuf space by ensuring producer position
 331         * doesn't advance more than (ringbuf_size - 1) ahead
 332         */
 333        if (new_prod_pos - cons_pos > rb->mask) {
 334                spin_unlock_irqrestore(&rb->spinlock, flags);
 335                return NULL;
 336        }
 337
 338        hdr = (void *)rb->data + (prod_pos & rb->mask);
 339        pg_off = bpf_ringbuf_rec_pg_off(rb, hdr);
 340        hdr->len = size | BPF_RINGBUF_BUSY_BIT;
 341        hdr->pg_off = pg_off;
 342
 343        /* pairs with consumer's smp_load_acquire() */
 344        smp_store_release(&rb->producer_pos, new_prod_pos);
 345
 346        spin_unlock_irqrestore(&rb->spinlock, flags);
 347
 348        return (void *)hdr + BPF_RINGBUF_HDR_SZ;
 349}
 350
 351BPF_CALL_3(bpf_ringbuf_reserve, struct bpf_map *, map, u64, size, u64, flags)
 352{
 353        struct bpf_ringbuf_map *rb_map;
 354
 355        if (unlikely(flags))
 356                return 0;
 357
 358        rb_map = container_of(map, struct bpf_ringbuf_map, map);
 359        return (unsigned long)__bpf_ringbuf_reserve(rb_map->rb, size);
 360}
 361
 362const struct bpf_func_proto bpf_ringbuf_reserve_proto = {
 363        .func           = bpf_ringbuf_reserve,
 364        .ret_type       = RET_PTR_TO_ALLOC_MEM_OR_NULL,
 365        .arg1_type      = ARG_CONST_MAP_PTR,
 366        .arg2_type      = ARG_CONST_ALLOC_SIZE_OR_ZERO,
 367        .arg3_type      = ARG_ANYTHING,
 368};
 369
 370static void bpf_ringbuf_commit(void *sample, u64 flags, bool discard)
 371{
 372        unsigned long rec_pos, cons_pos;
 373        struct bpf_ringbuf_hdr *hdr;
 374        struct bpf_ringbuf *rb;
 375        u32 new_len;
 376
 377        hdr = sample - BPF_RINGBUF_HDR_SZ;
 378        rb = bpf_ringbuf_restore_from_rec(hdr);
 379        new_len = hdr->len ^ BPF_RINGBUF_BUSY_BIT;
 380        if (discard)
 381                new_len |= BPF_RINGBUF_DISCARD_BIT;
 382
 383        /* update record header with correct final size prefix */
 384        xchg(&hdr->len, new_len);
 385
 386        /* if consumer caught up and is waiting for our record, notify about
 387         * new data availability
 388         */
 389        rec_pos = (void *)hdr - (void *)rb->data;
 390        cons_pos = smp_load_acquire(&rb->consumer_pos) & rb->mask;
 391
 392        if (flags & BPF_RB_FORCE_WAKEUP)
 393                irq_work_queue(&rb->work);
 394        else if (cons_pos == rec_pos && !(flags & BPF_RB_NO_WAKEUP))
 395                irq_work_queue(&rb->work);
 396}
 397
 398BPF_CALL_2(bpf_ringbuf_submit, void *, sample, u64, flags)
 399{
 400        bpf_ringbuf_commit(sample, flags, false /* discard */);
 401        return 0;
 402}
 403
 404const struct bpf_func_proto bpf_ringbuf_submit_proto = {
 405        .func           = bpf_ringbuf_submit,
 406        .ret_type       = RET_VOID,
 407        .arg1_type      = ARG_PTR_TO_ALLOC_MEM,
 408        .arg2_type      = ARG_ANYTHING,
 409};
 410
 411BPF_CALL_2(bpf_ringbuf_discard, void *, sample, u64, flags)
 412{
 413        bpf_ringbuf_commit(sample, flags, true /* discard */);
 414        return 0;
 415}
 416
 417const struct bpf_func_proto bpf_ringbuf_discard_proto = {
 418        .func           = bpf_ringbuf_discard,
 419        .ret_type       = RET_VOID,
 420        .arg1_type      = ARG_PTR_TO_ALLOC_MEM,
 421        .arg2_type      = ARG_ANYTHING,
 422};
 423
 424BPF_CALL_4(bpf_ringbuf_output, struct bpf_map *, map, void *, data, u64, size,
 425           u64, flags)
 426{
 427        struct bpf_ringbuf_map *rb_map;
 428        void *rec;
 429
 430        if (unlikely(flags & ~(BPF_RB_NO_WAKEUP | BPF_RB_FORCE_WAKEUP)))
 431                return -EINVAL;
 432
 433        rb_map = container_of(map, struct bpf_ringbuf_map, map);
 434        rec = __bpf_ringbuf_reserve(rb_map->rb, size);
 435        if (!rec)
 436                return -EAGAIN;
 437
 438        memcpy(rec, data, size);
 439        bpf_ringbuf_commit(rec, flags, false /* discard */);
 440        return 0;
 441}
 442
 443const struct bpf_func_proto bpf_ringbuf_output_proto = {
 444        .func           = bpf_ringbuf_output,
 445        .ret_type       = RET_INTEGER,
 446        .arg1_type      = ARG_CONST_MAP_PTR,
 447        .arg2_type      = ARG_PTR_TO_MEM,
 448        .arg3_type      = ARG_CONST_SIZE_OR_ZERO,
 449        .arg4_type      = ARG_ANYTHING,
 450};
 451
 452BPF_CALL_2(bpf_ringbuf_query, struct bpf_map *, map, u64, flags)
 453{
 454        struct bpf_ringbuf *rb;
 455
 456        rb = container_of(map, struct bpf_ringbuf_map, map)->rb;
 457
 458        switch (flags) {
 459        case BPF_RB_AVAIL_DATA:
 460                return ringbuf_avail_data_sz(rb);
 461        case BPF_RB_RING_SIZE:
 462                return rb->mask + 1;
 463        case BPF_RB_CONS_POS:
 464                return smp_load_acquire(&rb->consumer_pos);
 465        case BPF_RB_PROD_POS:
 466                return smp_load_acquire(&rb->producer_pos);
 467        default:
 468                return 0;
 469        }
 470}
 471
 472const struct bpf_func_proto bpf_ringbuf_query_proto = {
 473        .func           = bpf_ringbuf_query,
 474        .ret_type       = RET_INTEGER,
 475        .arg1_type      = ARG_CONST_MAP_PTR,
 476        .arg2_type      = ARG_ANYTHING,
 477};
 478