1
2#undef TRACE_SYSTEM
3#define TRACE_SYSTEM xdp
4
5#if !defined(_TRACE_XDP_H) || defined(TRACE_HEADER_MULTI_READ)
6#define _TRACE_XDP_H
7
8#include <linux/netdevice.h>
9#include <linux/filter.h>
10#include <linux/tracepoint.h>
11#include <linux/bpf.h>
12
13#define __XDP_ACT_MAP(FN) \
14 FN(ABORTED) \
15 FN(DROP) \
16 FN(PASS) \
17 FN(TX) \
18 FN(REDIRECT)
19
20#define __XDP_ACT_TP_FN(x) \
21 TRACE_DEFINE_ENUM(XDP_##x);
22#define __XDP_ACT_SYM_FN(x) \
23 { XDP_##x, #x },
24#define __XDP_ACT_SYM_TAB \
25 __XDP_ACT_MAP(__XDP_ACT_SYM_FN) { -1, 0 }
26__XDP_ACT_MAP(__XDP_ACT_TP_FN)
27
28TRACE_EVENT(xdp_exception,
29
30 TP_PROTO(const struct net_device *dev,
31 const struct bpf_prog *xdp, u32 act),
32
33 TP_ARGS(dev, xdp, act),
34
35 TP_STRUCT__entry(
36 __field(int, prog_id)
37 __field(u32, act)
38 __field(int, ifindex)
39 ),
40
41 TP_fast_assign(
42 __entry->prog_id = xdp->aux->id;
43 __entry->act = act;
44 __entry->ifindex = dev->ifindex;
45 ),
46
47 TP_printk("prog_id=%d action=%s ifindex=%d",
48 __entry->prog_id,
49 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
50 __entry->ifindex)
51);
52
53TRACE_EVENT(xdp_bulk_tx,
54
55 TP_PROTO(const struct net_device *dev,
56 int sent, int drops, int err),
57
58 TP_ARGS(dev, sent, drops, err),
59
60 TP_STRUCT__entry(
61 __field(int, ifindex)
62 __field(u32, act)
63 __field(int, drops)
64 __field(int, sent)
65 __field(int, err)
66 ),
67
68 TP_fast_assign(
69 __entry->ifindex = dev->ifindex;
70 __entry->act = XDP_TX;
71 __entry->drops = drops;
72 __entry->sent = sent;
73 __entry->err = err;
74 ),
75
76 TP_printk("ifindex=%d action=%s sent=%d drops=%d err=%d",
77 __entry->ifindex,
78 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
79 __entry->sent, __entry->drops, __entry->err)
80);
81
82DECLARE_EVENT_CLASS(xdp_redirect_template,
83
84 TP_PROTO(const struct net_device *dev,
85 const struct bpf_prog *xdp,
86 int to_ifindex, int err,
87 const struct bpf_map *map, u32 map_index),
88
89 TP_ARGS(dev, xdp, to_ifindex, err, map, map_index),
90
91 TP_STRUCT__entry(
92 __field(int, prog_id)
93 __field(u32, act)
94 __field(int, ifindex)
95 __field(int, err)
96 __field(int, to_ifindex)
97 __field(u32, map_id)
98 __field(int, map_index)
99 ),
100
101 TP_fast_assign(
102 __entry->prog_id = xdp->aux->id;
103 __entry->act = XDP_REDIRECT;
104 __entry->ifindex = dev->ifindex;
105 __entry->err = err;
106 __entry->to_ifindex = to_ifindex;
107 __entry->map_id = map ? map->id : 0;
108 __entry->map_index = map_index;
109 ),
110
111 TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d",
112 __entry->prog_id,
113 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
114 __entry->ifindex, __entry->to_ifindex,
115 __entry->err)
116);
117
118DEFINE_EVENT(xdp_redirect_template, xdp_redirect,
119 TP_PROTO(const struct net_device *dev,
120 const struct bpf_prog *xdp,
121 int to_ifindex, int err,
122 const struct bpf_map *map, u32 map_index),
123 TP_ARGS(dev, xdp, to_ifindex, err, map, map_index)
124);
125
126DEFINE_EVENT(xdp_redirect_template, xdp_redirect_err,
127 TP_PROTO(const struct net_device *dev,
128 const struct bpf_prog *xdp,
129 int to_ifindex, int err,
130 const struct bpf_map *map, u32 map_index),
131 TP_ARGS(dev, xdp, to_ifindex, err, map, map_index)
132);
133
134#define _trace_xdp_redirect(dev, xdp, to) \
135 trace_xdp_redirect(dev, xdp, to, 0, NULL, 0);
136
137#define _trace_xdp_redirect_err(dev, xdp, to, err) \
138 trace_xdp_redirect_err(dev, xdp, to, err, NULL, 0);
139
140DEFINE_EVENT_PRINT(xdp_redirect_template, xdp_redirect_map,
141 TP_PROTO(const struct net_device *dev,
142 const struct bpf_prog *xdp,
143 int to_ifindex, int err,
144 const struct bpf_map *map, u32 map_index),
145 TP_ARGS(dev, xdp, to_ifindex, err, map, map_index),
146 TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
147 " map_id=%d map_index=%d",
148 __entry->prog_id,
149 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
150 __entry->ifindex, __entry->to_ifindex,
151 __entry->err,
152 __entry->map_id, __entry->map_index)
153);
154
155DEFINE_EVENT_PRINT(xdp_redirect_template, xdp_redirect_map_err,
156 TP_PROTO(const struct net_device *dev,
157 const struct bpf_prog *xdp,
158 int to_ifindex, int err,
159 const struct bpf_map *map, u32 map_index),
160 TP_ARGS(dev, xdp, to_ifindex, err, map, map_index),
161 TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
162 " map_id=%d map_index=%d",
163 __entry->prog_id,
164 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
165 __entry->ifindex, __entry->to_ifindex,
166 __entry->err,
167 __entry->map_id, __entry->map_index)
168);
169
170#ifndef __DEVMAP_OBJ_TYPE
171#define __DEVMAP_OBJ_TYPE
172struct _bpf_dtab_netdev {
173 struct net_device *dev;
174};
175#endif
176
177#define devmap_ifindex(fwd, map) \
178 ((map->map_type == BPF_MAP_TYPE_DEVMAP) ? \
179 ((struct _bpf_dtab_netdev *)fwd)->dev->ifindex : 0)
180
181#define _trace_xdp_redirect_map(dev, xdp, fwd, map, idx) \
182 trace_xdp_redirect_map(dev, xdp, devmap_ifindex(fwd, map), \
183 0, map, idx)
184
185#define _trace_xdp_redirect_map_err(dev, xdp, fwd, map, idx, err) \
186 trace_xdp_redirect_map_err(dev, xdp, devmap_ifindex(fwd, map), \
187 err, map, idx)
188
189TRACE_EVENT(xdp_cpumap_kthread,
190
191 TP_PROTO(int map_id, unsigned int processed, unsigned int drops,
192 int sched),
193
194 TP_ARGS(map_id, processed, drops, sched),
195
196 TP_STRUCT__entry(
197 __field(int, map_id)
198 __field(u32, act)
199 __field(int, cpu)
200 __field(unsigned int, drops)
201 __field(unsigned int, processed)
202 __field(int, sched)
203 ),
204
205 TP_fast_assign(
206 __entry->map_id = map_id;
207 __entry->act = XDP_REDIRECT;
208 __entry->cpu = smp_processor_id();
209 __entry->drops = drops;
210 __entry->processed = processed;
211 __entry->sched = sched;
212 ),
213
214 TP_printk("kthread"
215 " cpu=%d map_id=%d action=%s"
216 " processed=%u drops=%u"
217 " sched=%d",
218 __entry->cpu, __entry->map_id,
219 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
220 __entry->processed, __entry->drops,
221 __entry->sched)
222);
223
224TRACE_EVENT(xdp_cpumap_enqueue,
225
226 TP_PROTO(int map_id, unsigned int processed, unsigned int drops,
227 int to_cpu),
228
229 TP_ARGS(map_id, processed, drops, to_cpu),
230
231 TP_STRUCT__entry(
232 __field(int, map_id)
233 __field(u32, act)
234 __field(int, cpu)
235 __field(unsigned int, drops)
236 __field(unsigned int, processed)
237 __field(int, to_cpu)
238 ),
239
240 TP_fast_assign(
241 __entry->map_id = map_id;
242 __entry->act = XDP_REDIRECT;
243 __entry->cpu = smp_processor_id();
244 __entry->drops = drops;
245 __entry->processed = processed;
246 __entry->to_cpu = to_cpu;
247 ),
248
249 TP_printk("enqueue"
250 " cpu=%d map_id=%d action=%s"
251 " processed=%u drops=%u"
252 " to_cpu=%d",
253 __entry->cpu, __entry->map_id,
254 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
255 __entry->processed, __entry->drops,
256 __entry->to_cpu)
257);
258
259TRACE_EVENT(xdp_devmap_xmit,
260
261 TP_PROTO(const struct bpf_map *map, u32 map_index,
262 int sent, int drops,
263 const struct net_device *from_dev,
264 const struct net_device *to_dev, int err),
265
266 TP_ARGS(map, map_index, sent, drops, from_dev, to_dev, err),
267
268 TP_STRUCT__entry(
269 __field(int, map_id)
270 __field(u32, act)
271 __field(u32, map_index)
272 __field(int, drops)
273 __field(int, sent)
274 __field(int, from_ifindex)
275 __field(int, to_ifindex)
276 __field(int, err)
277 ),
278
279 TP_fast_assign(
280 __entry->map_id = map->id;
281 __entry->act = XDP_REDIRECT;
282 __entry->map_index = map_index;
283 __entry->drops = drops;
284 __entry->sent = sent;
285 __entry->from_ifindex = from_dev->ifindex;
286 __entry->to_ifindex = to_dev->ifindex;
287 __entry->err = err;
288 ),
289
290 TP_printk("ndo_xdp_xmit"
291 " map_id=%d map_index=%d action=%s"
292 " sent=%d drops=%d"
293 " from_ifindex=%d to_ifindex=%d err=%d",
294 __entry->map_id, __entry->map_index,
295 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
296 __entry->sent, __entry->drops,
297 __entry->from_ifindex, __entry->to_ifindex, __entry->err)
298);
299
300
301#include <net/xdp_priv.h>
302
303#define __MEM_TYPE_MAP(FN) \
304 FN(PAGE_SHARED) \
305 FN(PAGE_ORDER0) \
306 FN(PAGE_POOL) \
307 FN(ZERO_COPY)
308
309#define __MEM_TYPE_TP_FN(x) \
310 TRACE_DEFINE_ENUM(MEM_TYPE_##x);
311#define __MEM_TYPE_SYM_FN(x) \
312 { MEM_TYPE_##x, #x },
313#define __MEM_TYPE_SYM_TAB \
314 __MEM_TYPE_MAP(__MEM_TYPE_SYM_FN) { -1, 0 }
315__MEM_TYPE_MAP(__MEM_TYPE_TP_FN)
316
317TRACE_EVENT(mem_disconnect,
318
319 TP_PROTO(const struct xdp_mem_allocator *xa,
320 bool safe_to_remove, bool force),
321
322 TP_ARGS(xa, safe_to_remove, force),
323
324 TP_STRUCT__entry(
325 __field(const struct xdp_mem_allocator *, xa)
326 __field(u32, mem_id)
327 __field(u32, mem_type)
328 __field(const void *, allocator)
329 __field(bool, safe_to_remove)
330 __field(bool, force)
331 __field(int, disconnect_cnt)
332 ),
333
334 TP_fast_assign(
335 __entry->xa = xa;
336 __entry->mem_id = xa->mem.id;
337 __entry->mem_type = xa->mem.type;
338 __entry->allocator = xa->allocator;
339 __entry->safe_to_remove = safe_to_remove;
340 __entry->force = force;
341 __entry->disconnect_cnt = xa->disconnect_cnt;
342 ),
343
344 TP_printk("mem_id=%d mem_type=%s allocator=%p"
345 " safe_to_remove=%s force=%s disconnect_cnt=%d",
346 __entry->mem_id,
347 __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
348 __entry->allocator,
349 __entry->safe_to_remove ? "true" : "false",
350 __entry->force ? "true" : "false",
351 __entry->disconnect_cnt
352 )
353);
354
355TRACE_EVENT(mem_connect,
356
357 TP_PROTO(const struct xdp_mem_allocator *xa,
358 const struct xdp_rxq_info *rxq),
359
360 TP_ARGS(xa, rxq),
361
362 TP_STRUCT__entry(
363 __field(const struct xdp_mem_allocator *, xa)
364 __field(u32, mem_id)
365 __field(u32, mem_type)
366 __field(const void *, allocator)
367 __field(const struct xdp_rxq_info *, rxq)
368 __field(int, ifindex)
369 ),
370
371 TP_fast_assign(
372 __entry->xa = xa;
373 __entry->mem_id = xa->mem.id;
374 __entry->mem_type = xa->mem.type;
375 __entry->allocator = xa->allocator;
376 __entry->rxq = rxq;
377 __entry->ifindex = rxq->dev->ifindex;
378 ),
379
380 TP_printk("mem_id=%d mem_type=%s allocator=%p"
381 " ifindex=%d",
382 __entry->mem_id,
383 __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
384 __entry->allocator,
385 __entry->ifindex
386 )
387);
388
389TRACE_EVENT(mem_return_failed,
390
391 TP_PROTO(const struct xdp_mem_info *mem,
392 const struct page *page),
393
394 TP_ARGS(mem, page),
395
396 TP_STRUCT__entry(
397 __field(const struct page *, page)
398 __field(u32, mem_id)
399 __field(u32, mem_type)
400 ),
401
402 TP_fast_assign(
403 __entry->page = page;
404 __entry->mem_id = mem->id;
405 __entry->mem_type = mem->type;
406 ),
407
408 TP_printk("mem_id=%d mem_type=%s page=%p",
409 __entry->mem_id,
410 __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
411 __entry->page
412 )
413);
414
415#endif
416
417#include <trace/define_trace.h>
418