1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#ifndef __NFP_BPF_H__
35#define __NFP_BPF_H__ 1
36
37#include <linux/bitfield.h>
38#include <linux/bpf.h>
39#include <linux/bpf_verifier.h>
40#include <linux/kernel.h>
41#include <linux/list.h>
42#include <linux/skbuff.h>
43#include <linux/types.h>
44#include <linux/wait.h>
45
46#include "../nfp_asm.h"
47#include "fw.h"
48
49
50
51
52#define OP_RELO_TYPE 0xff00000000000000ULL
53
54enum nfp_relo_type {
55 RELO_NONE = 0,
56
57 RELO_BR_REL,
58
59 RELO_BR_GO_OUT,
60 RELO_BR_GO_ABORT,
61
62 RELO_BR_NEXT_PKT,
63 RELO_BR_HELPER,
64
65 RELO_IMMED_REL,
66};
67
68
69
70
71
72#define BR_OFF_RELO 15000
73
74enum static_regs {
75 STATIC_REG_IMMA = 20,
76 STATIC_REG_IMM = 21,
77 STATIC_REG_STACK = 22,
78 STATIC_REG_PKT_LEN = 22,
79};
80
81enum pkt_vec {
82 PKT_VEC_PKT_LEN = 0,
83 PKT_VEC_PKT_PTR = 2,
84};
85
86#define pv_len(np) reg_lm(1, PKT_VEC_PKT_LEN)
87#define pv_ctm_ptr(np) reg_lm(1, PKT_VEC_PKT_PTR)
88
89#define stack_reg(np) reg_a(STATIC_REG_STACK)
90#define stack_imm(np) imm_b(np)
91#define plen_reg(np) reg_b(STATIC_REG_PKT_LEN)
92#define pptr_reg(np) pv_ctm_ptr(np)
93#define imm_a(np) reg_a(STATIC_REG_IMM)
94#define imm_b(np) reg_b(STATIC_REG_IMM)
95#define imma_a(np) reg_a(STATIC_REG_IMMA)
96#define imma_b(np) reg_b(STATIC_REG_IMMA)
97#define imm_both(np) reg_both(STATIC_REG_IMM)
98
99#define NFP_BPF_ABI_FLAGS reg_imm(0)
100#define NFP_BPF_ABI_FLAG_MARK 1
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139struct nfp_app_bpf {
140 struct nfp_app *app;
141
142 DECLARE_BITMAP(tag_allocator, U16_MAX + 1);
143 u16 tag_alloc_next;
144 u16 tag_alloc_last;
145
146 struct sk_buff_head cmsg_replies;
147 struct wait_queue_head cmsg_wq;
148
149 struct list_head map_list;
150 unsigned int maps_in_use;
151 unsigned int map_elems_in_use;
152
153 struct nfp_bpf_cap_adjust_head {
154 u32 flags;
155 int off_min;
156 int off_max;
157 int guaranteed_sub;
158 int guaranteed_add;
159 } adjust_head;
160
161 struct {
162 u32 types;
163 u32 max_maps;
164 u32 max_elems;
165 u32 max_key_sz;
166 u32 max_val_sz;
167 u32 max_elem_sz;
168 } maps;
169
170 struct {
171 u32 map_lookup;
172 u32 map_update;
173 u32 map_delete;
174 } helpers;
175
176 bool pseudo_random;
177};
178
179enum nfp_bpf_map_use {
180 NFP_MAP_UNUSED = 0,
181 NFP_MAP_USE_READ,
182 NFP_MAP_USE_WRITE,
183 NFP_MAP_USE_ATOMIC_CNT,
184};
185
186
187
188
189
190
191
192
193
194struct nfp_bpf_map {
195 struct bpf_offloaded_map *offmap;
196 struct nfp_app_bpf *bpf;
197 u32 tid;
198 struct list_head l;
199 enum nfp_bpf_map_use use_map[];
200};
201
202struct nfp_prog;
203struct nfp_insn_meta;
204typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
205
206#define nfp_prog_first_meta(nfp_prog) \
207 list_first_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l)
208#define nfp_prog_last_meta(nfp_prog) \
209 list_last_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l)
210#define nfp_meta_next(meta) list_next_entry(meta, l)
211#define nfp_meta_prev(meta) list_prev_entry(meta, l)
212
213
214
215
216
217
218struct nfp_bpf_reg_state {
219 struct bpf_reg_state reg;
220 bool var_off;
221};
222
223#define FLAG_INSN_IS_JUMP_DST BIT(0)
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249struct nfp_insn_meta {
250 struct bpf_insn insn;
251 union {
252
253 struct {
254 struct bpf_reg_state ptr;
255 struct bpf_insn *paired_st;
256 s16 ldst_gather_len;
257 bool ptr_not_const;
258 struct {
259 s16 range_start;
260 s16 range_end;
261 bool do_init;
262 } pkt_cache;
263 bool xadd_over_16bit;
264 bool xadd_maybe_16bit;
265 };
266
267 struct nfp_insn_meta *jmp_dst;
268
269 struct {
270 u32 func_id;
271 struct bpf_reg_state arg1;
272 struct nfp_bpf_reg_state arg2;
273 };
274 };
275 unsigned int off;
276 unsigned short n;
277 unsigned short flags;
278 bool skip;
279 instr_cb_t double_cb;
280
281 struct list_head l;
282};
283
284#define BPF_SIZE_MASK 0x18
285
286static inline u8 mbpf_class(const struct nfp_insn_meta *meta)
287{
288 return BPF_CLASS(meta->insn.code);
289}
290
291static inline u8 mbpf_src(const struct nfp_insn_meta *meta)
292{
293 return BPF_SRC(meta->insn.code);
294}
295
296static inline u8 mbpf_op(const struct nfp_insn_meta *meta)
297{
298 return BPF_OP(meta->insn.code);
299}
300
301static inline u8 mbpf_mode(const struct nfp_insn_meta *meta)
302{
303 return BPF_MODE(meta->insn.code);
304}
305
306static inline bool is_mbpf_load(const struct nfp_insn_meta *meta)
307{
308 return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM);
309}
310
311static inline bool is_mbpf_store(const struct nfp_insn_meta *meta)
312{
313 return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM);
314}
315
316static inline bool is_mbpf_load_pkt(const struct nfp_insn_meta *meta)
317{
318 return is_mbpf_load(meta) && meta->ptr.type == PTR_TO_PACKET;
319}
320
321static inline bool is_mbpf_store_pkt(const struct nfp_insn_meta *meta)
322{
323 return is_mbpf_store(meta) && meta->ptr.type == PTR_TO_PACKET;
324}
325
326static inline bool is_mbpf_classic_load(const struct nfp_insn_meta *meta)
327{
328 u8 code = meta->insn.code;
329
330 return BPF_CLASS(code) == BPF_LD &&
331 (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND);
332}
333
334static inline bool is_mbpf_classic_store(const struct nfp_insn_meta *meta)
335{
336 u8 code = meta->insn.code;
337
338 return BPF_CLASS(code) == BPF_ST && BPF_MODE(code) == BPF_MEM;
339}
340
341static inline bool is_mbpf_classic_store_pkt(const struct nfp_insn_meta *meta)
342{
343 return is_mbpf_classic_store(meta) && meta->ptr.type == PTR_TO_PACKET;
344}
345
346static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta)
347{
348 return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_XADD);
349}
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368struct nfp_prog {
369 struct nfp_app_bpf *bpf;
370
371 u64 *prog;
372 unsigned int prog_len;
373 unsigned int __prog_alloc_len;
374
375 struct nfp_insn_meta *verifier_meta;
376
377 enum bpf_prog_type type;
378
379 unsigned int last_bpf_off;
380 unsigned int tgt_out;
381 unsigned int tgt_abort;
382
383 unsigned int n_translated;
384 int error;
385
386 unsigned int stack_depth;
387 unsigned int adjust_head_location;
388
389 struct list_head insns;
390};
391
392
393
394
395
396
397
398struct nfp_bpf_vnic {
399 struct bpf_prog *tc_prog;
400 unsigned int start_off;
401 unsigned int tgt_done;
402};
403
404void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt);
405int nfp_bpf_jit(struct nfp_prog *prog);
406bool nfp_bpf_supported_opcode(u8 code);
407
408extern const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops;
409
410struct netdev_bpf;
411struct nfp_app;
412struct nfp_net;
413
414int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn,
415 struct netdev_bpf *bpf);
416int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
417 bool old_prog, struct netlink_ext_ack *extack);
418
419struct nfp_insn_meta *
420nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
421 unsigned int insn_idx, unsigned int n_insns);
422
423void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv);
424
425long long int
426nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map);
427void
428nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map);
429int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
430 void *next_key);
431int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
432 void *key, void *value, u64 flags);
433int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key);
434int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
435 void *key, void *value);
436int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
437 void *key, void *next_key);
438
439void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb);
440#endif
441