1
2#ifndef __BPF_API__
3#define __BPF_API__
4
5
6
7
8
9
10
11
12#include <stdint.h>
13
14#include <linux/pkt_cls.h>
15#include <linux/bpf.h>
16#include <linux/filter.h>
17
18#include <asm/byteorder.h>
19
20#include "bpf_elf.h"
21
22
23enum libbpf_pin_type {
24 LIBBPF_PIN_NONE,
25
26 LIBBPF_PIN_BY_NAME,
27};
28
29
30
31#define __uint(name, val) int (*name)[val]
32#define __type(name, val) typeof(val) *name
33#define __array(name, val) typeof(val) *name[]
34
35
36
37#ifndef __stringify
38# define __stringify(X) #X
39#endif
40
41#ifndef __maybe_unused
42# define __maybe_unused __attribute__((__unused__))
43#endif
44
45#ifndef offsetof
46# define offsetof(TYPE, MEMBER) __builtin_offsetof(TYPE, MEMBER)
47#endif
48
49#ifndef likely
50# define likely(X) __builtin_expect(!!(X), 1)
51#endif
52
53#ifndef unlikely
54# define unlikely(X) __builtin_expect(!!(X), 0)
55#endif
56
57#ifndef htons
58# define htons(X) __constant_htons((X))
59#endif
60
61#ifndef ntohs
62# define ntohs(X) __constant_ntohs((X))
63#endif
64
65#ifndef htonl
66# define htonl(X) __constant_htonl((X))
67#endif
68
69#ifndef ntohl
70# define ntohl(X) __constant_ntohl((X))
71#endif
72
73#ifndef __inline__
74# define __inline__ __attribute__((always_inline))
75#endif
76
77
78
79#ifndef __section
80# define __section(NAME) \
81 __attribute__((section(NAME), used))
82#endif
83
84#ifndef __section_tail
85# define __section_tail(ID, KEY) \
86 __section(__stringify(ID) "/" __stringify(KEY))
87#endif
88
89#ifndef __section_xdp_entry
90# define __section_xdp_entry \
91 __section(ELF_SECTION_PROG)
92#endif
93
94#ifndef __section_cls_entry
95# define __section_cls_entry \
96 __section(ELF_SECTION_CLASSIFIER)
97#endif
98
99#ifndef __section_act_entry
100# define __section_act_entry \
101 __section(ELF_SECTION_ACTION)
102#endif
103
104#ifndef __section_lwt_entry
105# define __section_lwt_entry \
106 __section(ELF_SECTION_PROG)
107#endif
108
109#ifndef __section_license
110# define __section_license \
111 __section(ELF_SECTION_LICENSE)
112#endif
113
114#ifndef __section_maps
115# define __section_maps \
116 __section(ELF_SECTION_MAPS)
117#endif
118
119
120
121#ifndef BPF_LICENSE
122# define BPF_LICENSE(NAME) \
123 char ____license[] __section_license = NAME
124#endif
125
126
127
128#ifndef BPF_H_DEFAULT
129# define BPF_H_DEFAULT -1
130#endif
131
132
133
134#ifndef __BPF_FUNC
135# define __BPF_FUNC(NAME, ...) \
136 (* NAME)(__VA_ARGS__) __maybe_unused
137#endif
138
139#ifndef BPF_FUNC
140# define BPF_FUNC(NAME, ...) \
141 __BPF_FUNC(NAME, __VA_ARGS__) = (void *) BPF_FUNC_##NAME
142#endif
143
144
145static void *BPF_FUNC(map_lookup_elem, void *map, const void *key);
146static int BPF_FUNC(map_update_elem, void *map, const void *key,
147 const void *value, uint32_t flags);
148static int BPF_FUNC(map_delete_elem, void *map, const void *key);
149
150
151static uint64_t BPF_FUNC(ktime_get_ns);
152
153
154
155
156
157
158
159
160static void BPF_FUNC(trace_printk, const char *fmt, int fmt_size, ...);
161
162#ifndef printt
163# define printt(fmt, ...) \
164 ({ \
165 char ____fmt[] = fmt; \
166 trace_printk(____fmt, sizeof(____fmt), ##__VA_ARGS__); \
167 })
168#endif
169
170
171static uint32_t BPF_FUNC(get_prandom_u32);
172
173
174static void BPF_FUNC(tail_call, struct __sk_buff *skb, void *map,
175 uint32_t index);
176
177
178static uint32_t BPF_FUNC(get_smp_processor_id);
179static uint32_t BPF_FUNC(get_numa_node_id);
180
181
182static uint32_t BPF_FUNC(get_cgroup_classid, struct __sk_buff *skb);
183static int BPF_FUNC(skb_under_cgroup, void *map, uint32_t index);
184
185static uint32_t BPF_FUNC(get_route_realm, struct __sk_buff *skb);
186static uint32_t BPF_FUNC(get_hash_recalc, struct __sk_buff *skb);
187static uint32_t BPF_FUNC(set_hash_invalid, struct __sk_buff *skb);
188
189
190static int BPF_FUNC(redirect, int ifindex, uint32_t flags);
191static int BPF_FUNC(clone_redirect, struct __sk_buff *skb, int ifindex,
192 uint32_t flags);
193
194
195static int BPF_FUNC(skb_load_bytes, struct __sk_buff *skb, uint32_t off,
196 void *to, uint32_t len);
197static int BPF_FUNC(skb_store_bytes, struct __sk_buff *skb, uint32_t off,
198 const void *from, uint32_t len, uint32_t flags);
199
200static int BPF_FUNC(l3_csum_replace, struct __sk_buff *skb, uint32_t off,
201 uint32_t from, uint32_t to, uint32_t flags);
202static int BPF_FUNC(l4_csum_replace, struct __sk_buff *skb, uint32_t off,
203 uint32_t from, uint32_t to, uint32_t flags);
204static int BPF_FUNC(csum_diff, const void *from, uint32_t from_size,
205 const void *to, uint32_t to_size, uint32_t seed);
206static int BPF_FUNC(csum_update, struct __sk_buff *skb, uint32_t wsum);
207
208static int BPF_FUNC(skb_change_type, struct __sk_buff *skb, uint32_t type);
209static int BPF_FUNC(skb_change_proto, struct __sk_buff *skb, uint32_t proto,
210 uint32_t flags);
211static int BPF_FUNC(skb_change_tail, struct __sk_buff *skb, uint32_t nlen,
212 uint32_t flags);
213
214static int BPF_FUNC(skb_pull_data, struct __sk_buff *skb, uint32_t len);
215
216
217static int __BPF_FUNC(skb_event_output, struct __sk_buff *skb, void *map,
218 uint64_t index, const void *data, uint32_t size) =
219 (void *) BPF_FUNC_perf_event_output;
220
221
222static int BPF_FUNC(skb_vlan_push, struct __sk_buff *skb, uint16_t proto,
223 uint16_t vlan_tci);
224static int BPF_FUNC(skb_vlan_pop, struct __sk_buff *skb);
225
226
227static int BPF_FUNC(skb_get_tunnel_key, struct __sk_buff *skb,
228 struct bpf_tunnel_key *to, uint32_t size, uint32_t flags);
229static int BPF_FUNC(skb_set_tunnel_key, struct __sk_buff *skb,
230 const struct bpf_tunnel_key *from, uint32_t size,
231 uint32_t flags);
232
233static int BPF_FUNC(skb_get_tunnel_opt, struct __sk_buff *skb,
234 void *to, uint32_t size);
235static int BPF_FUNC(skb_set_tunnel_opt, struct __sk_buff *skb,
236 const void *from, uint32_t size);
237
238
239
240#ifndef lock_xadd
241# define lock_xadd(ptr, val) ((void) __sync_fetch_and_add(ptr, val))
242#endif
243
244#ifndef memset
245# define memset(s, c, n) __builtin_memset((s), (c), (n))
246#endif
247
248#ifndef memcpy
249# define memcpy(d, s, n) __builtin_memcpy((d), (s), (n))
250#endif
251
252#ifndef memmove
253# define memmove(d, s, n) __builtin_memmove((d), (s), (n))
254#endif
255
256
257
258
259
260#if 0
261#ifndef memcmp
262# define memcmp(a, b, n) __builtin_memcmp((a), (b), (n))
263#endif
264#endif
265
266unsigned long long load_byte(void *skb, unsigned long long off)
267 asm ("llvm.bpf.load.byte");
268
269unsigned long long load_half(void *skb, unsigned long long off)
270 asm ("llvm.bpf.load.half");
271
272unsigned long long load_word(void *skb, unsigned long long off)
273 asm ("llvm.bpf.load.word");
274
275#endif
276