linux/kernel/bpf/cpumap.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* bpf/cpumap.c
   3 *
   4 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
   5 */
   6
   7/* The 'cpumap' is primarily used as a backend map for XDP BPF helper
   8 * call bpf_redirect_map() and XDP_REDIRECT action, like 'devmap'.
   9 *
  10 * Unlike devmap which redirects XDP frames out another NIC device,
  11 * this map type redirects raw XDP frames to another CPU.  The remote
  12 * CPU will do SKB-allocation and call the normal network stack.
  13 *
  14 * This is a scalability and isolation mechanism, that allow
  15 * separating the early driver network XDP layer, from the rest of the
  16 * netstack, and assigning dedicated CPUs for this stage.  This
  17 * basically allows for 10G wirespeed pre-filtering via bpf.
  18 */
  19#include <linux/bpf.h>
  20#include <linux/filter.h>
  21#include <linux/ptr_ring.h>
  22#include <net/xdp.h>
  23
  24#include <linux/sched.h>
  25#include <linux/workqueue.h>
  26#include <linux/kthread.h>
  27#include <linux/capability.h>
  28#include <trace/events/xdp.h>
  29
  30#include <linux/netdevice.h>   /* netif_receive_skb_core */
  31#include <linux/etherdevice.h> /* eth_type_trans */
  32
  33/* General idea: XDP packets getting XDP redirected to another CPU,
  34 * will maximum be stored/queued for one driver ->poll() call.  It is
  35 * guaranteed that queueing the frame and the flush operation happen on
  36 * same CPU.  Thus, cpu_map_flush operation can deduct via this_cpu_ptr()
  37 * which queue in bpf_cpu_map_entry contains packets.
  38 */
  39
  40#define CPU_MAP_BULK_SIZE 8  /* 8 == one cacheline on 64-bit archs */
  41struct bpf_cpu_map_entry;
  42struct bpf_cpu_map;
  43
  44struct xdp_bulk_queue {
  45        void *q[CPU_MAP_BULK_SIZE];
  46        struct list_head flush_node;
  47        struct bpf_cpu_map_entry *obj;
  48        unsigned int count;
  49};
  50
  51/* Struct for every remote "destination" CPU in map */
  52struct bpf_cpu_map_entry {
  53        u32 cpu;    /* kthread CPU and map index */
  54        int map_id; /* Back reference to map */
  55
  56        /* XDP can run multiple RX-ring queues, need __percpu enqueue store */
  57        struct xdp_bulk_queue __percpu *bulkq;
  58
  59        struct bpf_cpu_map *cmap;
  60
  61        /* Queue with potential multi-producers, and single-consumer kthread */
  62        struct ptr_ring *queue;
  63        struct task_struct *kthread;
  64
  65        struct bpf_cpumap_val value;
  66        struct bpf_prog *prog;
  67
  68        atomic_t refcnt; /* Control when this struct can be free'ed */
  69        struct rcu_head rcu;
  70
  71        struct work_struct kthread_stop_wq;
  72};
  73
  74struct bpf_cpu_map {
  75        struct bpf_map map;
  76        /* Below members specific for map type */
  77        struct bpf_cpu_map_entry **cpu_map;
  78};
  79
  80static DEFINE_PER_CPU(struct list_head, cpu_map_flush_list);
  81
  82static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
  83{
  84        u32 value_size = attr->value_size;
  85        struct bpf_cpu_map *cmap;
  86        int err = -ENOMEM;
  87        u64 cost;
  88        int ret;
  89
  90        if (!bpf_capable())
  91                return ERR_PTR(-EPERM);
  92
  93        /* check sanity of attributes */
  94        if (attr->max_entries == 0 || attr->key_size != 4 ||
  95            (value_size != offsetofend(struct bpf_cpumap_val, qsize) &&
  96             value_size != offsetofend(struct bpf_cpumap_val, bpf_prog.fd)) ||
  97            attr->map_flags & ~BPF_F_NUMA_NODE)
  98                return ERR_PTR(-EINVAL);
  99
 100        cmap = kzalloc(sizeof(*cmap), GFP_USER);
 101        if (!cmap)
 102                return ERR_PTR(-ENOMEM);
 103
 104        bpf_map_init_from_attr(&cmap->map, attr);
 105
 106        /* Pre-limit array size based on NR_CPUS, not final CPU check */
 107        if (cmap->map.max_entries > NR_CPUS) {
 108                err = -E2BIG;
 109                goto free_cmap;
 110        }
 111
 112        /* make sure page count doesn't overflow */
 113        cost = (u64) cmap->map.max_entries * sizeof(struct bpf_cpu_map_entry *);
 114
 115        /* Notice returns -EPERM on if map size is larger than memlock limit */
 116        ret = bpf_map_charge_init(&cmap->map.memory, cost);
 117        if (ret) {
 118                err = ret;
 119                goto free_cmap;
 120        }
 121
 122        /* Alloc array for possible remote "destination" CPUs */
 123        cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries *
 124                                           sizeof(struct bpf_cpu_map_entry *),
 125                                           cmap->map.numa_node);
 126        if (!cmap->cpu_map)
 127                goto free_charge;
 128
 129        return &cmap->map;
 130free_charge:
 131        bpf_map_charge_finish(&cmap->map.memory);
 132free_cmap:
 133        kfree(cmap);
 134        return ERR_PTR(err);
 135}
 136
 137static void get_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
 138{
 139        atomic_inc(&rcpu->refcnt);
 140}
 141
 142/* called from workqueue, to workaround syscall using preempt_disable */
 143static void cpu_map_kthread_stop(struct work_struct *work)
 144{
 145        struct bpf_cpu_map_entry *rcpu;
 146
 147        rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq);
 148
 149        /* Wait for flush in __cpu_map_entry_free(), via full RCU barrier,
 150         * as it waits until all in-flight call_rcu() callbacks complete.
 151         */
 152        rcu_barrier();
 153
 154        /* kthread_stop will wake_up_process and wait for it to complete */
 155        kthread_stop(rcpu->kthread);
 156}
 157
 158static struct sk_buff *cpu_map_build_skb(struct xdp_frame *xdpf,
 159                                         struct sk_buff *skb)
 160{
 161        unsigned int hard_start_headroom;
 162        unsigned int frame_size;
 163        void *pkt_data_start;
 164
 165        /* Part of headroom was reserved to xdpf */
 166        hard_start_headroom = sizeof(struct xdp_frame) +  xdpf->headroom;
 167
 168        /* Memory size backing xdp_frame data already have reserved
 169         * room for build_skb to place skb_shared_info in tailroom.
 170         */
 171        frame_size = xdpf->frame_sz;
 172
 173        pkt_data_start = xdpf->data - hard_start_headroom;
 174        skb = build_skb_around(skb, pkt_data_start, frame_size);
 175        if (unlikely(!skb))
 176                return NULL;
 177
 178        skb_reserve(skb, hard_start_headroom);
 179        __skb_put(skb, xdpf->len);
 180        if (xdpf->metasize)
 181                skb_metadata_set(skb, xdpf->metasize);
 182
 183        /* Essential SKB info: protocol and skb->dev */
 184        skb->protocol = eth_type_trans(skb, xdpf->dev_rx);
 185
 186        /* Optional SKB info, currently missing:
 187         * - HW checksum info           (skb->ip_summed)
 188         * - HW RX hash                 (skb_set_hash)
 189         * - RX ring dev queue index    (skb_record_rx_queue)
 190         */
 191
 192        /* Until page_pool get SKB return path, release DMA here */
 193        xdp_release_frame(xdpf);
 194
 195        /* Allow SKB to reuse area used by xdp_frame */
 196        xdp_scrub_frame(xdpf);
 197
 198        return skb;
 199}
 200
 201static void __cpu_map_ring_cleanup(struct ptr_ring *ring)
 202{
 203        /* The tear-down procedure should have made sure that queue is
 204         * empty.  See __cpu_map_entry_replace() and work-queue
 205         * invoked cpu_map_kthread_stop(). Catch any broken behaviour
 206         * gracefully and warn once.
 207         */
 208        struct xdp_frame *xdpf;
 209
 210        while ((xdpf = ptr_ring_consume(ring)))
 211                if (WARN_ON_ONCE(xdpf))
 212                        xdp_return_frame(xdpf);
 213}
 214
 215static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
 216{
 217        if (atomic_dec_and_test(&rcpu->refcnt)) {
 218                if (rcpu->prog)
 219                        bpf_prog_put(rcpu->prog);
 220                /* The queue should be empty at this point */
 221                __cpu_map_ring_cleanup(rcpu->queue);
 222                ptr_ring_cleanup(rcpu->queue, NULL);
 223                kfree(rcpu->queue);
 224                kfree(rcpu);
 225        }
 226}
 227
 228static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu,
 229                                    void **frames, int n,
 230                                    struct xdp_cpumap_stats *stats)
 231{
 232        struct xdp_rxq_info rxq;
 233        struct xdp_buff xdp;
 234        int i, nframes = 0;
 235
 236        if (!rcpu->prog)
 237                return n;
 238
 239        rcu_read_lock_bh();
 240
 241        xdp_set_return_frame_no_direct();
 242        xdp.rxq = &rxq;
 243
 244        for (i = 0; i < n; i++) {
 245                struct xdp_frame *xdpf = frames[i];
 246                u32 act;
 247                int err;
 248
 249                rxq.dev = xdpf->dev_rx;
 250                rxq.mem = xdpf->mem;
 251                /* TODO: report queue_index to xdp_rxq_info */
 252
 253                xdp_convert_frame_to_buff(xdpf, &xdp);
 254
 255                act = bpf_prog_run_xdp(rcpu->prog, &xdp);
 256                switch (act) {
 257                case XDP_PASS:
 258                        err = xdp_update_frame_from_buff(&xdp, xdpf);
 259                        if (err < 0) {
 260                                xdp_return_frame(xdpf);
 261                                stats->drop++;
 262                        } else {
 263                                frames[nframes++] = xdpf;
 264                                stats->pass++;
 265                        }
 266                        break;
 267                case XDP_REDIRECT:
 268                        err = xdp_do_redirect(xdpf->dev_rx, &xdp,
 269                                              rcpu->prog);
 270                        if (unlikely(err)) {
 271                                xdp_return_frame(xdpf);
 272                                stats->drop++;
 273                        } else {
 274                                stats->redirect++;
 275                        }
 276                        break;
 277                default:
 278                        bpf_warn_invalid_xdp_action(act);
 279                        fallthrough;
 280                case XDP_DROP:
 281                        xdp_return_frame(xdpf);
 282                        stats->drop++;
 283                        break;
 284                }
 285        }
 286
 287        if (stats->redirect)
 288                xdp_do_flush_map();
 289
 290        xdp_clear_return_frame_no_direct();
 291
 292        rcu_read_unlock_bh(); /* resched point, may call do_softirq() */
 293
 294        return nframes;
 295}
 296
 297#define CPUMAP_BATCH 8
 298
 299static int cpu_map_kthread_run(void *data)
 300{
 301        struct bpf_cpu_map_entry *rcpu = data;
 302
 303        set_current_state(TASK_INTERRUPTIBLE);
 304
 305        /* When kthread gives stop order, then rcpu have been disconnected
 306         * from map, thus no new packets can enter. Remaining in-flight
 307         * per CPU stored packets are flushed to this queue.  Wait honoring
 308         * kthread_stop signal until queue is empty.
 309         */
 310        while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) {
 311                struct xdp_cpumap_stats stats = {}; /* zero stats */
 312                gfp_t gfp = __GFP_ZERO | GFP_ATOMIC;
 313                unsigned int drops = 0, sched = 0;
 314                void *frames[CPUMAP_BATCH];
 315                void *skbs[CPUMAP_BATCH];
 316                int i, n, m, nframes;
 317
 318                /* Release CPU reschedule checks */
 319                if (__ptr_ring_empty(rcpu->queue)) {
 320                        set_current_state(TASK_INTERRUPTIBLE);
 321                        /* Recheck to avoid lost wake-up */
 322                        if (__ptr_ring_empty(rcpu->queue)) {
 323                                schedule();
 324                                sched = 1;
 325                        } else {
 326                                __set_current_state(TASK_RUNNING);
 327                        }
 328                } else {
 329                        sched = cond_resched();
 330                }
 331
 332                /*
 333                 * The bpf_cpu_map_entry is single consumer, with this
 334                 * kthread CPU pinned. Lockless access to ptr_ring
 335                 * consume side valid as no-resize allowed of queue.
 336                 */
 337                n = __ptr_ring_consume_batched(rcpu->queue, frames,
 338                                               CPUMAP_BATCH);
 339                for (i = 0; i < n; i++) {
 340                        void *f = frames[i];
 341                        struct page *page = virt_to_page(f);
 342
 343                        /* Bring struct page memory area to curr CPU. Read by
 344                         * build_skb_around via page_is_pfmemalloc(), and when
 345                         * freed written by page_frag_free call.
 346                         */
 347                        prefetchw(page);
 348                }
 349
 350                /* Support running another XDP prog on this CPU */
 351                nframes = cpu_map_bpf_prog_run_xdp(rcpu, frames, n, &stats);
 352                if (nframes) {
 353                        m = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, nframes, skbs);
 354                        if (unlikely(m == 0)) {
 355                                for (i = 0; i < nframes; i++)
 356                                        skbs[i] = NULL; /* effect: xdp_return_frame */
 357                                drops += nframes;
 358                        }
 359                }
 360
 361                local_bh_disable();
 362                for (i = 0; i < nframes; i++) {
 363                        struct xdp_frame *xdpf = frames[i];
 364                        struct sk_buff *skb = skbs[i];
 365                        int ret;
 366
 367                        skb = cpu_map_build_skb(xdpf, skb);
 368                        if (!skb) {
 369                                xdp_return_frame(xdpf);
 370                                continue;
 371                        }
 372
 373                        /* Inject into network stack */
 374                        ret = netif_receive_skb_core(skb);
 375                        if (ret == NET_RX_DROP)
 376                                drops++;
 377                }
 378                /* Feedback loop via tracepoint */
 379                trace_xdp_cpumap_kthread(rcpu->map_id, n, drops, sched, &stats);
 380
 381                local_bh_enable(); /* resched point, may call do_softirq() */
 382        }
 383        __set_current_state(TASK_RUNNING);
 384
 385        put_cpu_map_entry(rcpu);
 386        return 0;
 387}
 388
 389bool cpu_map_prog_allowed(struct bpf_map *map)
 390{
 391        return map->map_type == BPF_MAP_TYPE_CPUMAP &&
 392               map->value_size != offsetofend(struct bpf_cpumap_val, qsize);
 393}
 394
 395static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu, int fd)
 396{
 397        struct bpf_prog *prog;
 398
 399        prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP);
 400        if (IS_ERR(prog))
 401                return PTR_ERR(prog);
 402
 403        if (prog->expected_attach_type != BPF_XDP_CPUMAP) {
 404                bpf_prog_put(prog);
 405                return -EINVAL;
 406        }
 407
 408        rcpu->value.bpf_prog.id = prog->aux->id;
 409        rcpu->prog = prog;
 410
 411        return 0;
 412}
 413
 414static struct bpf_cpu_map_entry *
 415__cpu_map_entry_alloc(struct bpf_cpumap_val *value, u32 cpu, int map_id)
 416{
 417        int numa, err, i, fd = value->bpf_prog.fd;
 418        gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
 419        struct bpf_cpu_map_entry *rcpu;
 420        struct xdp_bulk_queue *bq;
 421
 422        /* Have map->numa_node, but choose node of redirect target CPU */
 423        numa = cpu_to_node(cpu);
 424
 425        rcpu = kzalloc_node(sizeof(*rcpu), gfp, numa);
 426        if (!rcpu)
 427                return NULL;
 428
 429        /* Alloc percpu bulkq */
 430        rcpu->bulkq = __alloc_percpu_gfp(sizeof(*rcpu->bulkq),
 431                                         sizeof(void *), gfp);
 432        if (!rcpu->bulkq)
 433                goto free_rcu;
 434
 435        for_each_possible_cpu(i) {
 436                bq = per_cpu_ptr(rcpu->bulkq, i);
 437                bq->obj = rcpu;
 438        }
 439
 440        /* Alloc queue */
 441        rcpu->queue = kzalloc_node(sizeof(*rcpu->queue), gfp, numa);
 442        if (!rcpu->queue)
 443                goto free_bulkq;
 444
 445        err = ptr_ring_init(rcpu->queue, value->qsize, gfp);
 446        if (err)
 447                goto free_queue;
 448
 449        rcpu->cpu    = cpu;
 450        rcpu->map_id = map_id;
 451        rcpu->value.qsize  = value->qsize;
 452
 453        if (fd > 0 && __cpu_map_load_bpf_program(rcpu, fd))
 454                goto free_ptr_ring;
 455
 456        /* Setup kthread */
 457        rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa,
 458                                               "cpumap/%d/map:%d", cpu, map_id);
 459        if (IS_ERR(rcpu->kthread))
 460                goto free_prog;
 461
 462        get_cpu_map_entry(rcpu); /* 1-refcnt for being in cmap->cpu_map[] */
 463        get_cpu_map_entry(rcpu); /* 1-refcnt for kthread */
 464
 465        /* Make sure kthread runs on a single CPU */
 466        kthread_bind(rcpu->kthread, cpu);
 467        wake_up_process(rcpu->kthread);
 468
 469        return rcpu;
 470
 471free_prog:
 472        if (rcpu->prog)
 473                bpf_prog_put(rcpu->prog);
 474free_ptr_ring:
 475        ptr_ring_cleanup(rcpu->queue, NULL);
 476free_queue:
 477        kfree(rcpu->queue);
 478free_bulkq:
 479        free_percpu(rcpu->bulkq);
 480free_rcu:
 481        kfree(rcpu);
 482        return NULL;
 483}
 484
 485static void __cpu_map_entry_free(struct rcu_head *rcu)
 486{
 487        struct bpf_cpu_map_entry *rcpu;
 488
 489        /* This cpu_map_entry have been disconnected from map and one
 490         * RCU grace-period have elapsed.  Thus, XDP cannot queue any
 491         * new packets and cannot change/set flush_needed that can
 492         * find this entry.
 493         */
 494        rcpu = container_of(rcu, struct bpf_cpu_map_entry, rcu);
 495
 496        free_percpu(rcpu->bulkq);
 497        /* Cannot kthread_stop() here, last put free rcpu resources */
 498        put_cpu_map_entry(rcpu);
 499}
 500
 501/* After xchg pointer to bpf_cpu_map_entry, use the call_rcu() to
 502 * ensure any driver rcu critical sections have completed, but this
 503 * does not guarantee a flush has happened yet. Because driver side
 504 * rcu_read_lock/unlock only protects the running XDP program.  The
 505 * atomic xchg and NULL-ptr check in __cpu_map_flush() makes sure a
 506 * pending flush op doesn't fail.
 507 *
 508 * The bpf_cpu_map_entry is still used by the kthread, and there can
 509 * still be pending packets (in queue and percpu bulkq).  A refcnt
 510 * makes sure to last user (kthread_stop vs. call_rcu) free memory
 511 * resources.
 512 *
 513 * The rcu callback __cpu_map_entry_free flush remaining packets in
 514 * percpu bulkq to queue.  Due to caller map_delete_elem() disable
 515 * preemption, cannot call kthread_stop() to make sure queue is empty.
 516 * Instead a work_queue is started for stopping kthread,
 517 * cpu_map_kthread_stop, which waits for an RCU grace period before
 518 * stopping kthread, emptying the queue.
 519 */
 520static void __cpu_map_entry_replace(struct bpf_cpu_map *cmap,
 521                                    u32 key_cpu, struct bpf_cpu_map_entry *rcpu)
 522{
 523        struct bpf_cpu_map_entry *old_rcpu;
 524
 525        old_rcpu = xchg(&cmap->cpu_map[key_cpu], rcpu);
 526        if (old_rcpu) {
 527                call_rcu(&old_rcpu->rcu, __cpu_map_entry_free);
 528                INIT_WORK(&old_rcpu->kthread_stop_wq, cpu_map_kthread_stop);
 529                schedule_work(&old_rcpu->kthread_stop_wq);
 530        }
 531}
 532
 533static int cpu_map_delete_elem(struct bpf_map *map, void *key)
 534{
 535        struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
 536        u32 key_cpu = *(u32 *)key;
 537
 538        if (key_cpu >= map->max_entries)
 539                return -EINVAL;
 540
 541        /* notice caller map_delete_elem() use preempt_disable() */
 542        __cpu_map_entry_replace(cmap, key_cpu, NULL);
 543        return 0;
 544}
 545
 546static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
 547                               u64 map_flags)
 548{
 549        struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
 550        struct bpf_cpumap_val cpumap_value = {};
 551        struct bpf_cpu_map_entry *rcpu;
 552        /* Array index key correspond to CPU number */
 553        u32 key_cpu = *(u32 *)key;
 554
 555        memcpy(&cpumap_value, value, map->value_size);
 556
 557        if (unlikely(map_flags > BPF_EXIST))
 558                return -EINVAL;
 559        if (unlikely(key_cpu >= cmap->map.max_entries))
 560                return -E2BIG;
 561        if (unlikely(map_flags == BPF_NOEXIST))
 562                return -EEXIST;
 563        if (unlikely(cpumap_value.qsize > 16384)) /* sanity limit on qsize */
 564                return -EOVERFLOW;
 565
 566        /* Make sure CPU is a valid possible cpu */
 567        if (key_cpu >= nr_cpumask_bits || !cpu_possible(key_cpu))
 568                return -ENODEV;
 569
 570        if (cpumap_value.qsize == 0) {
 571                rcpu = NULL; /* Same as deleting */
 572        } else {
 573                /* Updating qsize cause re-allocation of bpf_cpu_map_entry */
 574                rcpu = __cpu_map_entry_alloc(&cpumap_value, key_cpu, map->id);
 575                if (!rcpu)
 576                        return -ENOMEM;
 577                rcpu->cmap = cmap;
 578        }
 579        rcu_read_lock();
 580        __cpu_map_entry_replace(cmap, key_cpu, rcpu);
 581        rcu_read_unlock();
 582        return 0;
 583}
 584
 585static void cpu_map_free(struct bpf_map *map)
 586{
 587        struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
 588        u32 i;
 589
 590        /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
 591         * so the bpf programs (can be more than one that used this map) were
 592         * disconnected from events. Wait for outstanding critical sections in
 593         * these programs to complete. The rcu critical section only guarantees
 594         * no further "XDP/bpf-side" reads against bpf_cpu_map->cpu_map.
 595         * It does __not__ ensure pending flush operations (if any) are
 596         * complete.
 597         */
 598
 599        bpf_clear_redirect_map(map);
 600        synchronize_rcu();
 601
 602        /* For cpu_map the remote CPUs can still be using the entries
 603         * (struct bpf_cpu_map_entry).
 604         */
 605        for (i = 0; i < cmap->map.max_entries; i++) {
 606                struct bpf_cpu_map_entry *rcpu;
 607
 608                rcpu = READ_ONCE(cmap->cpu_map[i]);
 609                if (!rcpu)
 610                        continue;
 611
 612                /* bq flush and cleanup happens after RCU grace-period */
 613                __cpu_map_entry_replace(cmap, i, NULL); /* call_rcu */
 614        }
 615        bpf_map_area_free(cmap->cpu_map);
 616        kfree(cmap);
 617}
 618
 619struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
 620{
 621        struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
 622        struct bpf_cpu_map_entry *rcpu;
 623
 624        if (key >= map->max_entries)
 625                return NULL;
 626
 627        rcpu = READ_ONCE(cmap->cpu_map[key]);
 628        return rcpu;
 629}
 630
 631static void *cpu_map_lookup_elem(struct bpf_map *map, void *key)
 632{
 633        struct bpf_cpu_map_entry *rcpu =
 634                __cpu_map_lookup_elem(map, *(u32 *)key);
 635
 636        return rcpu ? &rcpu->value : NULL;
 637}
 638
 639static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
 640{
 641        struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
 642        u32 index = key ? *(u32 *)key : U32_MAX;
 643        u32 *next = next_key;
 644
 645        if (index >= cmap->map.max_entries) {
 646                *next = 0;
 647                return 0;
 648        }
 649
 650        if (index == cmap->map.max_entries - 1)
 651                return -ENOENT;
 652        *next = index + 1;
 653        return 0;
 654}
 655
 656static int cpu_map_btf_id;
 657const struct bpf_map_ops cpu_map_ops = {
 658        .map_meta_equal         = bpf_map_meta_equal,
 659        .map_alloc              = cpu_map_alloc,
 660        .map_free               = cpu_map_free,
 661        .map_delete_elem        = cpu_map_delete_elem,
 662        .map_update_elem        = cpu_map_update_elem,
 663        .map_lookup_elem        = cpu_map_lookup_elem,
 664        .map_get_next_key       = cpu_map_get_next_key,
 665        .map_check_btf          = map_check_no_btf,
 666        .map_btf_name           = "bpf_cpu_map",
 667        .map_btf_id             = &cpu_map_btf_id,
 668};
 669
 670static void bq_flush_to_queue(struct xdp_bulk_queue *bq)
 671{
 672        struct bpf_cpu_map_entry *rcpu = bq->obj;
 673        unsigned int processed = 0, drops = 0;
 674        const int to_cpu = rcpu->cpu;
 675        struct ptr_ring *q;
 676        int i;
 677
 678        if (unlikely(!bq->count))
 679                return;
 680
 681        q = rcpu->queue;
 682        spin_lock(&q->producer_lock);
 683
 684        for (i = 0; i < bq->count; i++) {
 685                struct xdp_frame *xdpf = bq->q[i];
 686                int err;
 687
 688                err = __ptr_ring_produce(q, xdpf);
 689                if (err) {
 690                        drops++;
 691                        xdp_return_frame_rx_napi(xdpf);
 692                }
 693                processed++;
 694        }
 695        bq->count = 0;
 696        spin_unlock(&q->producer_lock);
 697
 698        __list_del_clearprev(&bq->flush_node);
 699
 700        /* Feedback loop via tracepoints */
 701        trace_xdp_cpumap_enqueue(rcpu->map_id, processed, drops, to_cpu);
 702}
 703
 704/* Runs under RCU-read-side, plus in softirq under NAPI protection.
 705 * Thus, safe percpu variable access.
 706 */
 707static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
 708{
 709        struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list);
 710        struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
 711
 712        if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
 713                bq_flush_to_queue(bq);
 714
 715        /* Notice, xdp_buff/page MUST be queued here, long enough for
 716         * driver to code invoking us to finished, due to driver
 717         * (e.g. ixgbe) recycle tricks based on page-refcnt.
 718         *
 719         * Thus, incoming xdp_frame is always queued here (else we race
 720         * with another CPU on page-refcnt and remaining driver code).
 721         * Queue time is very short, as driver will invoke flush
 722         * operation, when completing napi->poll call.
 723         */
 724        bq->q[bq->count++] = xdpf;
 725
 726        if (!bq->flush_node.prev)
 727                list_add(&bq->flush_node, flush_list);
 728}
 729
 730int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
 731                    struct net_device *dev_rx)
 732{
 733        struct xdp_frame *xdpf;
 734
 735        xdpf = xdp_convert_buff_to_frame(xdp);
 736        if (unlikely(!xdpf))
 737                return -EOVERFLOW;
 738
 739        /* Info needed when constructing SKB on remote CPU */
 740        xdpf->dev_rx = dev_rx;
 741
 742        bq_enqueue(rcpu, xdpf);
 743        return 0;
 744}
 745
 746void __cpu_map_flush(void)
 747{
 748        struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list);
 749        struct xdp_bulk_queue *bq, *tmp;
 750
 751        list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
 752                bq_flush_to_queue(bq);
 753
 754                /* If already running, costs spin_lock_irqsave + smb_mb */
 755                wake_up_process(bq->obj->kthread);
 756        }
 757}
 758
 759static int __init cpu_map_init(void)
 760{
 761        int cpu;
 762
 763        for_each_possible_cpu(cpu)
 764                INIT_LIST_HEAD(&per_cpu(cpu_map_flush_list, cpu));
 765        return 0;
 766}
 767
 768subsys_initcall(cpu_map_init);
 769