linux/samples/bpf/xdp_monitor_kern.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0
   2 *  Copyright(c) 2017-2018 Jesper Dangaard Brouer, Red Hat Inc.
   3 *
   4 * XDP monitor tool, based on tracepoints
   5 */
   6#include <uapi/linux/bpf.h>
   7#include "bpf_helpers.h"
   8
   9struct bpf_map_def SEC("maps") redirect_err_cnt = {
  10        .type = BPF_MAP_TYPE_PERCPU_ARRAY,
  11        .key_size = sizeof(u32),
  12        .value_size = sizeof(u64),
  13        .max_entries = 2,
  14        /* TODO: have entries for all possible errno's */
  15};
  16
  17#define XDP_UNKNOWN     XDP_REDIRECT + 1
  18struct bpf_map_def SEC("maps") exception_cnt = {
  19        .type           = BPF_MAP_TYPE_PERCPU_ARRAY,
  20        .key_size       = sizeof(u32),
  21        .value_size     = sizeof(u64),
  22        .max_entries    = XDP_UNKNOWN + 1,
  23};
  24
  25/* Tracepoint format: /sys/kernel/debug/tracing/events/xdp/xdp_redirect/format
  26 * Code in:                kernel/include/trace/events/xdp.h
  27 */
  28struct xdp_redirect_ctx {
  29        u64 __pad;              // First 8 bytes are not accessible by bpf code
  30        int prog_id;            //      offset:8;  size:4; signed:1;
  31        u32 act;                //      offset:12  size:4; signed:0;
  32        int ifindex;            //      offset:16  size:4; signed:1;
  33        int err;                //      offset:20  size:4; signed:1;
  34        int to_ifindex;         //      offset:24  size:4; signed:1;
  35        u32 map_id;             //      offset:28  size:4; signed:0;
  36        int map_index;          //      offset:32  size:4; signed:1;
  37};                              //      offset:36
  38
  39enum {
  40        XDP_REDIRECT_SUCCESS = 0,
  41        XDP_REDIRECT_ERROR = 1
  42};
  43
  44static __always_inline
  45int xdp_redirect_collect_stat(struct xdp_redirect_ctx *ctx)
  46{
  47        u32 key = XDP_REDIRECT_ERROR;
  48        int err = ctx->err;
  49        u64 *cnt;
  50
  51        if (!err)
  52                key = XDP_REDIRECT_SUCCESS;
  53
  54        cnt  = bpf_map_lookup_elem(&redirect_err_cnt, &key);
  55        if (!cnt)
  56                return 1;
  57        *cnt += 1;
  58
  59        return 0; /* Indicate event was filtered (no further processing)*/
  60        /*
  61         * Returning 1 here would allow e.g. a perf-record tracepoint
  62         * to see and record these events, but it doesn't work well
  63         * in-practice as stopping perf-record also unload this
  64         * bpf_prog.  Plus, there is additional overhead of doing so.
  65         */
  66}
  67
  68SEC("tracepoint/xdp/xdp_redirect_err")
  69int trace_xdp_redirect_err(struct xdp_redirect_ctx *ctx)
  70{
  71        return xdp_redirect_collect_stat(ctx);
  72}
  73
  74
  75SEC("tracepoint/xdp/xdp_redirect_map_err")
  76int trace_xdp_redirect_map_err(struct xdp_redirect_ctx *ctx)
  77{
  78        return xdp_redirect_collect_stat(ctx);
  79}
  80
  81/* Likely unloaded when prog starts */
  82SEC("tracepoint/xdp/xdp_redirect")
  83int trace_xdp_redirect(struct xdp_redirect_ctx *ctx)
  84{
  85        return xdp_redirect_collect_stat(ctx);
  86}
  87
  88/* Likely unloaded when prog starts */
  89SEC("tracepoint/xdp/xdp_redirect_map")
  90int trace_xdp_redirect_map(struct xdp_redirect_ctx *ctx)
  91{
  92        return xdp_redirect_collect_stat(ctx);
  93}
  94
  95/* Tracepoint format: /sys/kernel/debug/tracing/events/xdp/xdp_exception/format
  96 * Code in:                kernel/include/trace/events/xdp.h
  97 */
  98struct xdp_exception_ctx {
  99        u64 __pad;      // First 8 bytes are not accessible by bpf code
 100        int prog_id;    //      offset:8;  size:4; signed:1;
 101        u32 act;        //      offset:12; size:4; signed:0;
 102        int ifindex;    //      offset:16; size:4; signed:1;
 103};
 104
 105SEC("tracepoint/xdp/xdp_exception")
 106int trace_xdp_exception(struct xdp_exception_ctx *ctx)
 107{
 108        u64 *cnt;
 109        u32 key;
 110
 111        key = ctx->act;
 112        if (key > XDP_REDIRECT)
 113                key = XDP_UNKNOWN;
 114
 115        cnt = bpf_map_lookup_elem(&exception_cnt, &key);
 116        if (!cnt)
 117                return 1;
 118        *cnt += 1;
 119
 120        return 0;
 121}
 122
 123/* Common stats data record shared with _user.c */
 124struct datarec {
 125        u64 processed;
 126        u64 dropped;
 127        u64 info;
 128};
 129#define MAX_CPUS 64
 130
 131struct bpf_map_def SEC("maps") cpumap_enqueue_cnt = {
 132        .type           = BPF_MAP_TYPE_PERCPU_ARRAY,
 133        .key_size       = sizeof(u32),
 134        .value_size     = sizeof(struct datarec),
 135        .max_entries    = MAX_CPUS,
 136};
 137
 138struct bpf_map_def SEC("maps") cpumap_kthread_cnt = {
 139        .type           = BPF_MAP_TYPE_PERCPU_ARRAY,
 140        .key_size       = sizeof(u32),
 141        .value_size     = sizeof(struct datarec),
 142        .max_entries    = 1,
 143};
 144
 145/* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_cpumap_enqueue/format
 146 * Code in:         kernel/include/trace/events/xdp.h
 147 */
 148struct cpumap_enqueue_ctx {
 149        u64 __pad;              // First 8 bytes are not accessible by bpf code
 150        int map_id;             //      offset:8;  size:4; signed:1;
 151        u32 act;                //      offset:12; size:4; signed:0;
 152        int cpu;                //      offset:16; size:4; signed:1;
 153        unsigned int drops;     //      offset:20; size:4; signed:0;
 154        unsigned int processed; //      offset:24; size:4; signed:0;
 155        int to_cpu;             //      offset:28; size:4; signed:1;
 156};
 157
 158SEC("tracepoint/xdp/xdp_cpumap_enqueue")
 159int trace_xdp_cpumap_enqueue(struct cpumap_enqueue_ctx *ctx)
 160{
 161        u32 to_cpu = ctx->to_cpu;
 162        struct datarec *rec;
 163
 164        if (to_cpu >= MAX_CPUS)
 165                return 1;
 166
 167        rec = bpf_map_lookup_elem(&cpumap_enqueue_cnt, &to_cpu);
 168        if (!rec)
 169                return 0;
 170        rec->processed += ctx->processed;
 171        rec->dropped   += ctx->drops;
 172
 173        /* Record bulk events, then userspace can calc average bulk size */
 174        if (ctx->processed > 0)
 175                rec->info += 1;
 176
 177        return 0;
 178}
 179
 180/* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_cpumap_kthread/format
 181 * Code in:         kernel/include/trace/events/xdp.h
 182 */
 183struct cpumap_kthread_ctx {
 184        u64 __pad;              // First 8 bytes are not accessible by bpf code
 185        int map_id;             //      offset:8;  size:4; signed:1;
 186        u32 act;                //      offset:12; size:4; signed:0;
 187        int cpu;                //      offset:16; size:4; signed:1;
 188        unsigned int drops;     //      offset:20; size:4; signed:0;
 189        unsigned int processed; //      offset:24; size:4; signed:0;
 190        int sched;              //      offset:28; size:4; signed:1;
 191};
 192
 193SEC("tracepoint/xdp/xdp_cpumap_kthread")
 194int trace_xdp_cpumap_kthread(struct cpumap_kthread_ctx *ctx)
 195{
 196        struct datarec *rec;
 197        u32 key = 0;
 198
 199        rec = bpf_map_lookup_elem(&cpumap_kthread_cnt, &key);
 200        if (!rec)
 201                return 0;
 202        rec->processed += ctx->processed;
 203        rec->dropped   += ctx->drops;
 204
 205        /* Count times kthread yielded CPU via schedule call */
 206        if (ctx->sched)
 207                rec->info++;
 208
 209        return 0;
 210}
 211