1
2
3
4#ifndef __NFP_BPF_H__
5#define __NFP_BPF_H__ 1
6
7#include <linux/bitfield.h>
8#include <linux/bpf.h>
9#include <linux/bpf_verifier.h>
10#include <linux/kernel.h>
11#include <linux/list.h>
12#include <linux/rhashtable.h>
13#include <linux/skbuff.h>
14#include <linux/types.h>
15#include <linux/wait.h>
16
17#include "../ccm.h"
18#include "../nfp_asm.h"
19#include "fw.h"
20
21#define cmsg_warn(bpf, msg...) nn_dp_warn(&(bpf)->app->ctrl->dp, msg)
22
23
24
25
26#define OP_RELO_TYPE 0xff00000000000000ULL
27
28enum nfp_relo_type {
29 RELO_NONE = 0,
30
31 RELO_BR_REL,
32
33 RELO_BR_GO_OUT,
34 RELO_BR_GO_ABORT,
35 RELO_BR_GO_CALL_PUSH_REGS,
36 RELO_BR_GO_CALL_POP_REGS,
37
38 RELO_BR_NEXT_PKT,
39 RELO_BR_HELPER,
40
41 RELO_IMMED_REL,
42};
43
44
45
46
47
48#define BR_OFF_RELO 15000
49
50enum static_regs {
51 STATIC_REG_IMMA = 20,
52 STATIC_REG_IMM = 21,
53 STATIC_REG_STACK = 22,
54 STATIC_REG_PKT_LEN = 22,
55};
56
57enum pkt_vec {
58 PKT_VEC_PKT_LEN = 0,
59 PKT_VEC_PKT_PTR = 2,
60 PKT_VEC_QSEL_SET = 4,
61 PKT_VEC_QSEL_VAL = 6,
62};
63
64#define PKT_VEL_QSEL_SET_BIT 4
65
66#define pv_len(np) reg_lm(1, PKT_VEC_PKT_LEN)
67#define pv_ctm_ptr(np) reg_lm(1, PKT_VEC_PKT_PTR)
68#define pv_qsel_set(np) reg_lm(1, PKT_VEC_QSEL_SET)
69#define pv_qsel_val(np) reg_lm(1, PKT_VEC_QSEL_VAL)
70
71#define stack_reg(np) reg_a(STATIC_REG_STACK)
72#define stack_imm(np) imm_b(np)
73#define plen_reg(np) reg_b(STATIC_REG_PKT_LEN)
74#define pptr_reg(np) pv_ctm_ptr(np)
75#define imm_a(np) reg_a(STATIC_REG_IMM)
76#define imm_b(np) reg_b(STATIC_REG_IMM)
77#define imma_a(np) reg_a(STATIC_REG_IMMA)
78#define imma_b(np) reg_b(STATIC_REG_IMMA)
79#define imm_both(np) reg_both(STATIC_REG_IMM)
80#define ret_reg(np) imm_a(np)
81
82#define NFP_BPF_ABI_FLAGS reg_imm(0)
83#define NFP_BPF_ABI_FLAG_MARK 1
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128struct nfp_app_bpf {
129 struct nfp_app *app;
130 struct nfp_ccm ccm;
131
132 struct bpf_offload_dev *bpf_dev;
133
134 unsigned int cmsg_key_sz;
135 unsigned int cmsg_val_sz;
136
137 struct list_head map_list;
138 unsigned int maps_in_use;
139 unsigned int map_elems_in_use;
140
141 struct rhashtable maps_neutral;
142
143 u32 abi_version;
144
145 struct nfp_bpf_cap_adjust_head {
146 u32 flags;
147 int off_min;
148 int off_max;
149 int guaranteed_sub;
150 int guaranteed_add;
151 } adjust_head;
152
153 struct {
154 u32 types;
155 u32 max_maps;
156 u32 max_elems;
157 u32 max_key_sz;
158 u32 max_val_sz;
159 u32 max_elem_sz;
160 } maps;
161
162 struct {
163 u32 map_lookup;
164 u32 map_update;
165 u32 map_delete;
166 u32 perf_event_output;
167 } helpers;
168
169 bool pseudo_random;
170 bool queue_select;
171 bool adjust_tail;
172};
173
174enum nfp_bpf_map_use {
175 NFP_MAP_UNUSED = 0,
176 NFP_MAP_USE_READ,
177 NFP_MAP_USE_WRITE,
178 NFP_MAP_USE_ATOMIC_CNT,
179};
180
181struct nfp_bpf_map_word {
182 unsigned char type :4;
183 unsigned char non_zero_update :1;
184};
185
186
187
188
189
190
191
192
193
194struct nfp_bpf_map {
195 struct bpf_offloaded_map *offmap;
196 struct nfp_app_bpf *bpf;
197 u32 tid;
198 struct list_head l;
199 struct nfp_bpf_map_word use_map[];
200};
201
202struct nfp_bpf_neutral_map {
203 struct rhash_head l;
204 struct bpf_map *ptr;
205 u32 map_id;
206 u32 count;
207};
208
209extern const struct rhashtable_params nfp_bpf_maps_neutral_params;
210
211struct nfp_prog;
212struct nfp_insn_meta;
213typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
214
215#define nfp_prog_first_meta(nfp_prog) \
216 list_first_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l)
217#define nfp_prog_last_meta(nfp_prog) \
218 list_last_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l)
219#define nfp_meta_next(meta) list_next_entry(meta, l)
220#define nfp_meta_prev(meta) list_prev_entry(meta, l)
221
222
223
224
225
226
227struct nfp_bpf_reg_state {
228 struct bpf_reg_state reg;
229 bool var_off;
230};
231
232#define FLAG_INSN_IS_JUMP_DST BIT(0)
233#define FLAG_INSN_IS_SUBPROG_START BIT(1)
234#define FLAG_INSN_PTR_CALLER_STACK_FRAME BIT(2)
235
236#define FLAG_INSN_SKIP_NOOP BIT(3)
237
238#define FLAG_INSN_SKIP_PREC_DEPENDENT BIT(4)
239
240#define FLAG_INSN_SKIP_VERIFIER_OPT BIT(5)
241
242#define FLAG_INSN_DO_ZEXT BIT(6)
243
244#define FLAG_INSN_SKIP_MASK (FLAG_INSN_SKIP_NOOP | \
245 FLAG_INSN_SKIP_PREC_DEPENDENT | \
246 FLAG_INSN_SKIP_VERIFIER_OPT)
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278struct nfp_insn_meta {
279 struct bpf_insn insn;
280 union {
281
282 struct {
283 struct bpf_reg_state ptr;
284 struct bpf_insn *paired_st;
285 s16 ldst_gather_len;
286 bool ptr_not_const;
287 struct {
288 s16 range_start;
289 s16 range_end;
290 bool do_init;
291 } pkt_cache;
292 bool xadd_over_16bit;
293 bool xadd_maybe_16bit;
294 };
295
296 struct {
297 struct nfp_insn_meta *jmp_dst;
298 bool jump_neg_op;
299 u32 num_insns_after_br;
300 };
301
302 struct {
303 u32 func_id;
304 struct bpf_reg_state arg1;
305 struct nfp_bpf_reg_state arg2;
306 };
307
308
309
310
311 struct {
312 u64 umin_src;
313 u64 umax_src;
314 u64 umin_dst;
315 u64 umax_dst;
316 };
317 };
318 unsigned int off;
319 unsigned short n;
320 unsigned short flags;
321 unsigned short subprog_idx;
322 instr_cb_t double_cb;
323
324 struct list_head l;
325};
326
327#define BPF_SIZE_MASK 0x18
328
329static inline u8 mbpf_class(const struct nfp_insn_meta *meta)
330{
331 return BPF_CLASS(meta->insn.code);
332}
333
334static inline u8 mbpf_src(const struct nfp_insn_meta *meta)
335{
336 return BPF_SRC(meta->insn.code);
337}
338
339static inline u8 mbpf_op(const struct nfp_insn_meta *meta)
340{
341 return BPF_OP(meta->insn.code);
342}
343
344static inline u8 mbpf_mode(const struct nfp_insn_meta *meta)
345{
346 return BPF_MODE(meta->insn.code);
347}
348
349static inline bool is_mbpf_alu(const struct nfp_insn_meta *meta)
350{
351 return mbpf_class(meta) == BPF_ALU64 || mbpf_class(meta) == BPF_ALU;
352}
353
354static inline bool is_mbpf_load(const struct nfp_insn_meta *meta)
355{
356 return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM);
357}
358
359static inline bool is_mbpf_jmp32(const struct nfp_insn_meta *meta)
360{
361 return mbpf_class(meta) == BPF_JMP32;
362}
363
364static inline bool is_mbpf_jmp64(const struct nfp_insn_meta *meta)
365{
366 return mbpf_class(meta) == BPF_JMP;
367}
368
369static inline bool is_mbpf_jmp(const struct nfp_insn_meta *meta)
370{
371 return is_mbpf_jmp32(meta) || is_mbpf_jmp64(meta);
372}
373
374static inline bool is_mbpf_store(const struct nfp_insn_meta *meta)
375{
376 return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM);
377}
378
379static inline bool is_mbpf_load_pkt(const struct nfp_insn_meta *meta)
380{
381 return is_mbpf_load(meta) && meta->ptr.type == PTR_TO_PACKET;
382}
383
384static inline bool is_mbpf_store_pkt(const struct nfp_insn_meta *meta)
385{
386 return is_mbpf_store(meta) && meta->ptr.type == PTR_TO_PACKET;
387}
388
389static inline bool is_mbpf_classic_load(const struct nfp_insn_meta *meta)
390{
391 u8 code = meta->insn.code;
392
393 return BPF_CLASS(code) == BPF_LD &&
394 (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND);
395}
396
397static inline bool is_mbpf_classic_store(const struct nfp_insn_meta *meta)
398{
399 u8 code = meta->insn.code;
400
401 return BPF_CLASS(code) == BPF_ST && BPF_MODE(code) == BPF_MEM;
402}
403
404static inline bool is_mbpf_classic_store_pkt(const struct nfp_insn_meta *meta)
405{
406 return is_mbpf_classic_store(meta) && meta->ptr.type == PTR_TO_PACKET;
407}
408
409static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta)
410{
411 return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_XADD);
412}
413
414static inline bool is_mbpf_mul(const struct nfp_insn_meta *meta)
415{
416 return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_MUL;
417}
418
419static inline bool is_mbpf_div(const struct nfp_insn_meta *meta)
420{
421 return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_DIV;
422}
423
424static inline bool is_mbpf_cond_jump(const struct nfp_insn_meta *meta)
425{
426 u8 op;
427
428 if (is_mbpf_jmp32(meta))
429 return true;
430
431 if (!is_mbpf_jmp64(meta))
432 return false;
433
434 op = mbpf_op(meta);
435 return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
436}
437
438static inline bool is_mbpf_helper_call(const struct nfp_insn_meta *meta)
439{
440 struct bpf_insn insn = meta->insn;
441
442 return insn.code == (BPF_JMP | BPF_CALL) &&
443 insn.src_reg != BPF_PSEUDO_CALL;
444}
445
446static inline bool is_mbpf_pseudo_call(const struct nfp_insn_meta *meta)
447{
448 struct bpf_insn insn = meta->insn;
449
450 return insn.code == (BPF_JMP | BPF_CALL) &&
451 insn.src_reg == BPF_PSEUDO_CALL;
452}
453
454#define STACK_FRAME_ALIGN 64
455
456
457
458
459
460
461struct nfp_bpf_subprog_info {
462 u16 stack_depth;
463 u8 needs_reg_push : 1;
464};
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491struct nfp_prog {
492 struct nfp_app_bpf *bpf;
493
494 u64 *prog;
495 unsigned int prog_len;
496 unsigned int __prog_alloc_len;
497
498 unsigned int stack_size;
499
500 struct nfp_insn_meta *verifier_meta;
501
502 enum bpf_prog_type type;
503
504 unsigned int last_bpf_off;
505 unsigned int tgt_out;
506 unsigned int tgt_abort;
507 unsigned int tgt_call_push_regs;
508 unsigned int tgt_call_pop_regs;
509
510 unsigned int n_translated;
511 int error;
512
513 unsigned int stack_frame_depth;
514 unsigned int adjust_head_location;
515
516 unsigned int map_records_cnt;
517 unsigned int subprog_cnt;
518 struct nfp_bpf_neutral_map **map_records;
519 struct nfp_bpf_subprog_info *subprog;
520
521 unsigned int n_insns;
522 struct list_head insns;
523};
524
525
526
527
528
529
530
531struct nfp_bpf_vnic {
532 struct bpf_prog *tc_prog;
533 unsigned int start_off;
534 unsigned int tgt_done;
535};
536
537bool nfp_is_subprog_start(struct nfp_insn_meta *meta);
538void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog);
539int nfp_bpf_jit(struct nfp_prog *prog);
540bool nfp_bpf_supported_opcode(u8 code);
541
542int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
543 int prev_insn_idx);
544int nfp_bpf_finalize(struct bpf_verifier_env *env);
545
546int nfp_bpf_opt_replace_insn(struct bpf_verifier_env *env, u32 off,
547 struct bpf_insn *insn);
548int nfp_bpf_opt_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
549
550extern const struct bpf_prog_offload_ops nfp_bpf_dev_ops;
551
552struct netdev_bpf;
553struct nfp_app;
554struct nfp_net;
555
556int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn,
557 struct netdev_bpf *bpf);
558int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
559 bool old_prog, struct netlink_ext_ack *extack);
560
561struct nfp_insn_meta *
562nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
563 unsigned int insn_idx);
564
565void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv);
566
567unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf);
568long long int
569nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map);
570void
571nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map);
572int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
573 void *next_key);
574int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
575 void *key, void *value, u64 flags);
576int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key);
577int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
578 void *key, void *value);
579int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
580 void *key, void *next_key);
581
582int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
583 unsigned int len);
584
585void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb);
586void
587nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data,
588 unsigned int len);
589#endif
590