1
2
3
4
5
6#include <uapi/linux/bpf.h>
7#include "bpf_helpers.h"
8
9struct bpf_map_def SEC("maps") redirect_err_cnt = {
10 .type = BPF_MAP_TYPE_PERCPU_ARRAY,
11 .key_size = sizeof(u32),
12 .value_size = sizeof(u64),
13 .max_entries = 2,
14
15};
16
17#define XDP_UNKNOWN XDP_REDIRECT + 1
18struct bpf_map_def SEC("maps") exception_cnt = {
19 .type = BPF_MAP_TYPE_PERCPU_ARRAY,
20 .key_size = sizeof(u32),
21 .value_size = sizeof(u64),
22 .max_entries = XDP_UNKNOWN + 1,
23};
24
25
26
27
28struct xdp_redirect_ctx {
29 u64 __pad;
30 int prog_id;
31 u32 act;
32 int ifindex;
33 int err;
34 int to_ifindex;
35 u32 map_id;
36 int map_index;
37};
38
39enum {
40 XDP_REDIRECT_SUCCESS = 0,
41 XDP_REDIRECT_ERROR = 1
42};
43
44static __always_inline
45int xdp_redirect_collect_stat(struct xdp_redirect_ctx *ctx)
46{
47 u32 key = XDP_REDIRECT_ERROR;
48 int err = ctx->err;
49 u64 *cnt;
50
51 if (!err)
52 key = XDP_REDIRECT_SUCCESS;
53
54 cnt = bpf_map_lookup_elem(&redirect_err_cnt, &key);
55 if (!cnt)
56 return 1;
57 *cnt += 1;
58
59 return 0;
60
61
62
63
64
65
66}
67
68SEC("tracepoint/xdp/xdp_redirect_err")
69int trace_xdp_redirect_err(struct xdp_redirect_ctx *ctx)
70{
71 return xdp_redirect_collect_stat(ctx);
72}
73
74
75SEC("tracepoint/xdp/xdp_redirect_map_err")
76int trace_xdp_redirect_map_err(struct xdp_redirect_ctx *ctx)
77{
78 return xdp_redirect_collect_stat(ctx);
79}
80
81
82SEC("tracepoint/xdp/xdp_redirect")
83int trace_xdp_redirect(struct xdp_redirect_ctx *ctx)
84{
85 return xdp_redirect_collect_stat(ctx);
86}
87
88
89SEC("tracepoint/xdp/xdp_redirect_map")
90int trace_xdp_redirect_map(struct xdp_redirect_ctx *ctx)
91{
92 return xdp_redirect_collect_stat(ctx);
93}
94
95
96
97
98struct xdp_exception_ctx {
99 u64 __pad;
100 int prog_id;
101 u32 act;
102 int ifindex;
103};
104
105SEC("tracepoint/xdp/xdp_exception")
106int trace_xdp_exception(struct xdp_exception_ctx *ctx)
107{
108 u64 *cnt;
109 u32 key;
110
111 key = ctx->act;
112 if (key > XDP_REDIRECT)
113 key = XDP_UNKNOWN;
114
115 cnt = bpf_map_lookup_elem(&exception_cnt, &key);
116 if (!cnt)
117 return 1;
118 *cnt += 1;
119
120 return 0;
121}
122
123
124struct datarec {
125 u64 processed;
126 u64 dropped;
127 u64 info;
128};
129#define MAX_CPUS 64
130
131struct bpf_map_def SEC("maps") cpumap_enqueue_cnt = {
132 .type = BPF_MAP_TYPE_PERCPU_ARRAY,
133 .key_size = sizeof(u32),
134 .value_size = sizeof(struct datarec),
135 .max_entries = MAX_CPUS,
136};
137
138struct bpf_map_def SEC("maps") cpumap_kthread_cnt = {
139 .type = BPF_MAP_TYPE_PERCPU_ARRAY,
140 .key_size = sizeof(u32),
141 .value_size = sizeof(struct datarec),
142 .max_entries = 1,
143};
144
145
146
147
148struct cpumap_enqueue_ctx {
149 u64 __pad;
150 int map_id;
151 u32 act;
152 int cpu;
153 unsigned int drops;
154 unsigned int processed;
155 int to_cpu;
156};
157
158SEC("tracepoint/xdp/xdp_cpumap_enqueue")
159int trace_xdp_cpumap_enqueue(struct cpumap_enqueue_ctx *ctx)
160{
161 u32 to_cpu = ctx->to_cpu;
162 struct datarec *rec;
163
164 if (to_cpu >= MAX_CPUS)
165 return 1;
166
167 rec = bpf_map_lookup_elem(&cpumap_enqueue_cnt, &to_cpu);
168 if (!rec)
169 return 0;
170 rec->processed += ctx->processed;
171 rec->dropped += ctx->drops;
172
173
174 if (ctx->processed > 0)
175 rec->info += 1;
176
177 return 0;
178}
179
180
181
182
183struct cpumap_kthread_ctx {
184 u64 __pad;
185 int map_id;
186 u32 act;
187 int cpu;
188 unsigned int drops;
189 unsigned int processed;
190 int sched;
191};
192
193SEC("tracepoint/xdp/xdp_cpumap_kthread")
194int trace_xdp_cpumap_kthread(struct cpumap_kthread_ctx *ctx)
195{
196 struct datarec *rec;
197 u32 key = 0;
198
199 rec = bpf_map_lookup_elem(&cpumap_kthread_cnt, &key);
200 if (!rec)
201 return 0;
202 rec->processed += ctx->processed;
203 rec->dropped += ctx->drops;
204
205
206 if (ctx->sched)
207 rec->info++;
208
209 return 0;
210}
211