1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/bpf.h>
35#include <linux/bpf_verifier.h>
36#include <linux/kernel.h>
37#include <linux/pkt_cls.h>
38
39#include "../nfp_app.h"
40#include "../nfp_main.h"
41#include "fw.h"
42#include "main.h"
43
44#define pr_vlog(env, fmt, ...) \
45 bpf_verifier_log_write(env, "[nfp] " fmt, ##__VA_ARGS__)
46
47struct nfp_insn_meta *
48nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
49 unsigned int insn_idx, unsigned int n_insns)
50{
51 unsigned int forward, backward, i;
52
53 backward = meta->n - insn_idx;
54 forward = insn_idx - meta->n;
55
56 if (min(forward, backward) > n_insns - insn_idx - 1) {
57 backward = n_insns - insn_idx - 1;
58 meta = nfp_prog_last_meta(nfp_prog);
59 }
60 if (min(forward, backward) > insn_idx && backward > insn_idx) {
61 forward = insn_idx;
62 meta = nfp_prog_first_meta(nfp_prog);
63 }
64
65 if (forward < backward)
66 for (i = 0; i < forward; i++)
67 meta = nfp_meta_next(meta);
68 else
69 for (i = 0; i < backward; i++)
70 meta = nfp_meta_prev(meta);
71
72 return meta;
73}
74
75static void
76nfp_record_adjust_head(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
77 struct nfp_insn_meta *meta,
78 const struct bpf_reg_state *reg2)
79{
80 unsigned int location = UINT_MAX;
81 int imm;
82
83
84
85
86
87 if (reg2->type != SCALAR_VALUE || !tnum_is_const(reg2->var_off))
88 goto exit_set_location;
89 imm = reg2->var_off.value;
90
91 if (imm > ETH_ZLEN - ETH_HLEN)
92 goto exit_set_location;
93 if (imm > (int)bpf->adjust_head.guaranteed_add ||
94 imm < -bpf->adjust_head.guaranteed_sub)
95 goto exit_set_location;
96
97 if (nfp_prog->adjust_head_location) {
98
99 if (nfp_prog->adjust_head_location != meta->n)
100 goto exit_set_location;
101
102 if (meta->arg2.reg.var_off.value != imm)
103 goto exit_set_location;
104 }
105
106 location = meta->n;
107exit_set_location:
108 nfp_prog->adjust_head_location = location;
109}
110
111static int
112nfp_bpf_stack_arg_ok(const char *fname, struct bpf_verifier_env *env,
113 const struct bpf_reg_state *reg,
114 struct nfp_bpf_reg_state *old_arg)
115{
116 s64 off, old_off;
117
118 if (reg->type != PTR_TO_STACK) {
119 pr_vlog(env, "%s: unsupported ptr type %d\n",
120 fname, reg->type);
121 return false;
122 }
123 if (!tnum_is_const(reg->var_off)) {
124 pr_vlog(env, "%s: variable pointer\n", fname);
125 return false;
126 }
127
128 off = reg->var_off.value + reg->off;
129 if (-off % 4) {
130 pr_vlog(env, "%s: unaligned stack pointer %lld\n", fname, -off);
131 return false;
132 }
133
134
135 if (!old_arg)
136 return true;
137
138 old_off = old_arg->reg.var_off.value + old_arg->reg.off;
139 old_arg->var_off |= off != old_off;
140
141 return true;
142}
143
144static bool
145nfp_bpf_map_call_ok(const char *fname, struct bpf_verifier_env *env,
146 struct nfp_insn_meta *meta,
147 u32 helper_tgt, const struct bpf_reg_state *reg1)
148{
149 if (!helper_tgt) {
150 pr_vlog(env, "%s: not supported by FW\n", fname);
151 return false;
152 }
153
154 return true;
155}
156
157static int
158nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
159 struct nfp_insn_meta *meta)
160{
161 const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1;
162 const struct bpf_reg_state *reg2 = cur_regs(env) + BPF_REG_2;
163 const struct bpf_reg_state *reg3 = cur_regs(env) + BPF_REG_3;
164 struct nfp_app_bpf *bpf = nfp_prog->bpf;
165 u32 func_id = meta->insn.imm;
166
167 switch (func_id) {
168 case BPF_FUNC_xdp_adjust_head:
169 if (!bpf->adjust_head.off_max) {
170 pr_vlog(env, "adjust_head not supported by FW\n");
171 return -EOPNOTSUPP;
172 }
173 if (!(bpf->adjust_head.flags & NFP_BPF_ADJUST_HEAD_NO_META)) {
174 pr_vlog(env, "adjust_head: FW requires shifting metadata, not supported by the driver\n");
175 return -EOPNOTSUPP;
176 }
177
178 nfp_record_adjust_head(bpf, nfp_prog, meta, reg2);
179 break;
180
181 case BPF_FUNC_map_lookup_elem:
182 if (!nfp_bpf_map_call_ok("map_lookup", env, meta,
183 bpf->helpers.map_lookup, reg1) ||
184 !nfp_bpf_stack_arg_ok("map_lookup", env, reg2,
185 meta->func_id ? &meta->arg2 : NULL))
186 return -EOPNOTSUPP;
187 break;
188
189 case BPF_FUNC_map_update_elem:
190 if (!nfp_bpf_map_call_ok("map_update", env, meta,
191 bpf->helpers.map_update, reg1) ||
192 !nfp_bpf_stack_arg_ok("map_update", env, reg2,
193 meta->func_id ? &meta->arg2 : NULL) ||
194 !nfp_bpf_stack_arg_ok("map_update", env, reg3, NULL))
195 return -EOPNOTSUPP;
196 break;
197
198 case BPF_FUNC_map_delete_elem:
199 if (!nfp_bpf_map_call_ok("map_delete", env, meta,
200 bpf->helpers.map_delete, reg1) ||
201 !nfp_bpf_stack_arg_ok("map_delete", env, reg2,
202 meta->func_id ? &meta->arg2 : NULL))
203 return -EOPNOTSUPP;
204 break;
205
206 case BPF_FUNC_get_prandom_u32:
207 if (bpf->pseudo_random)
208 break;
209 pr_vlog(env, "bpf_get_prandom_u32(): FW doesn't support random number generation\n");
210 return -EOPNOTSUPP;
211
212 case BPF_FUNC_perf_event_output:
213 BUILD_BUG_ON(NFP_BPF_SCALAR_VALUE != SCALAR_VALUE ||
214 NFP_BPF_MAP_VALUE != PTR_TO_MAP_VALUE ||
215 NFP_BPF_STACK != PTR_TO_STACK ||
216 NFP_BPF_PACKET_DATA != PTR_TO_PACKET);
217
218 if (!bpf->helpers.perf_event_output) {
219 pr_vlog(env, "event_output: not supported by FW\n");
220 return -EOPNOTSUPP;
221 }
222
223
224
225
226 if (reg3->var_off.mask & BPF_F_INDEX_MASK ||
227 (reg3->var_off.value & BPF_F_INDEX_MASK) !=
228 BPF_F_CURRENT_CPU) {
229 char tn_buf[48];
230
231 tnum_strn(tn_buf, sizeof(tn_buf), reg3->var_off);
232 pr_vlog(env, "event_output: must use BPF_F_CURRENT_CPU, var_off: %s\n",
233 tn_buf);
234 return -EOPNOTSUPP;
235 }
236
237
238
239
240 reg1 = cur_regs(env) + BPF_REG_4;
241
242 if (reg1->type != SCALAR_VALUE &&
243 reg1->type != PTR_TO_STACK &&
244 reg1->type != PTR_TO_MAP_VALUE &&
245 reg1->type != PTR_TO_PACKET) {
246 pr_vlog(env, "event_output: unsupported ptr type: %d\n",
247 reg1->type);
248 return -EOPNOTSUPP;
249 }
250
251 if (reg1->type == PTR_TO_STACK &&
252 !nfp_bpf_stack_arg_ok("event_output", env, reg1, NULL))
253 return -EOPNOTSUPP;
254
255
256
257
258
259
260
261
262
263 dev_warn_once(&nfp_prog->bpf->app->pf->pdev->dev,
264 "bpf: note: return codes and behavior of bpf_event_output() helper differs for offloaded programs!\n");
265 pr_vlog(env, "warning: return codes and behavior of event_output helper differ for offload!\n");
266
267 if (!meta->func_id)
268 break;
269
270 if (reg1->type != meta->arg1.type) {
271 pr_vlog(env, "event_output: ptr type changed: %d %d\n",
272 meta->arg1.type, reg1->type);
273 return -EINVAL;
274 }
275 break;
276
277 default:
278 pr_vlog(env, "unsupported function id: %d\n", func_id);
279 return -EOPNOTSUPP;
280 }
281
282 meta->func_id = func_id;
283 meta->arg1 = *reg1;
284 meta->arg2.reg = *reg2;
285
286 return 0;
287}
288
289static int
290nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
291 struct bpf_verifier_env *env)
292{
293 const struct bpf_reg_state *reg0 = cur_regs(env) + BPF_REG_0;
294 u64 imm;
295
296 if (nfp_prog->type == BPF_PROG_TYPE_XDP)
297 return 0;
298
299 if (!(reg0->type == SCALAR_VALUE && tnum_is_const(reg0->var_off))) {
300 char tn_buf[48];
301
302 tnum_strn(tn_buf, sizeof(tn_buf), reg0->var_off);
303 pr_vlog(env, "unsupported exit state: %d, var_off: %s\n",
304 reg0->type, tn_buf);
305 return -EINVAL;
306 }
307
308 imm = reg0->var_off.value;
309 if (nfp_prog->type == BPF_PROG_TYPE_SCHED_CLS &&
310 imm <= TC_ACT_REDIRECT &&
311 imm != TC_ACT_SHOT && imm != TC_ACT_STOLEN &&
312 imm != TC_ACT_QUEUED) {
313 pr_vlog(env, "unsupported exit state: %d, imm: %llx\n",
314 reg0->type, imm);
315 return -EINVAL;
316 }
317
318 return 0;
319}
320
321static int
322nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
323 struct nfp_insn_meta *meta,
324 const struct bpf_reg_state *reg,
325 struct bpf_verifier_env *env)
326{
327 s32 old_off, new_off;
328
329 if (!tnum_is_const(reg->var_off)) {
330 pr_vlog(env, "variable ptr stack access\n");
331 return -EINVAL;
332 }
333
334 if (meta->ptr.type == NOT_INIT)
335 return 0;
336
337 old_off = meta->ptr.off + meta->ptr.var_off.value;
338 new_off = reg->off + reg->var_off.value;
339
340 meta->ptr_not_const |= old_off != new_off;
341
342 if (!meta->ptr_not_const)
343 return 0;
344
345 if (old_off % 4 == new_off % 4)
346 return 0;
347
348 pr_vlog(env, "stack access changed location was:%d is:%d\n",
349 old_off, new_off);
350 return -EINVAL;
351}
352
353static const char *nfp_bpf_map_use_name(enum nfp_bpf_map_use use)
354{
355 static const char * const names[] = {
356 [NFP_MAP_UNUSED] = "unused",
357 [NFP_MAP_USE_READ] = "read",
358 [NFP_MAP_USE_WRITE] = "write",
359 [NFP_MAP_USE_ATOMIC_CNT] = "atomic",
360 };
361
362 if (use >= ARRAY_SIZE(names) || !names[use])
363 return "unknown";
364 return names[use];
365}
366
367static int
368nfp_bpf_map_mark_used_one(struct bpf_verifier_env *env,
369 struct nfp_bpf_map *nfp_map,
370 unsigned int off, enum nfp_bpf_map_use use)
371{
372 if (nfp_map->use_map[off / 4] != NFP_MAP_UNUSED &&
373 nfp_map->use_map[off / 4] != use) {
374 pr_vlog(env, "map value use type conflict %s vs %s off: %u\n",
375 nfp_bpf_map_use_name(nfp_map->use_map[off / 4]),
376 nfp_bpf_map_use_name(use), off);
377 return -EOPNOTSUPP;
378 }
379
380 nfp_map->use_map[off / 4] = use;
381
382 return 0;
383}
384
385static int
386nfp_bpf_map_mark_used(struct bpf_verifier_env *env, struct nfp_insn_meta *meta,
387 const struct bpf_reg_state *reg,
388 enum nfp_bpf_map_use use)
389{
390 struct bpf_offloaded_map *offmap;
391 struct nfp_bpf_map *nfp_map;
392 unsigned int size, off;
393 int i, err;
394
395 if (!tnum_is_const(reg->var_off)) {
396 pr_vlog(env, "map value offset is variable\n");
397 return -EOPNOTSUPP;
398 }
399
400 off = reg->var_off.value + meta->insn.off + reg->off;
401 size = BPF_LDST_BYTES(&meta->insn);
402 offmap = map_to_offmap(reg->map_ptr);
403 nfp_map = offmap->dev_priv;
404
405 if (off + size > offmap->map.value_size) {
406 pr_vlog(env, "map value access out-of-bounds\n");
407 return -EINVAL;
408 }
409
410 for (i = 0; i < size; i += 4 - (off + i) % 4) {
411 err = nfp_bpf_map_mark_used_one(env, nfp_map, off + i, use);
412 if (err)
413 return err;
414 }
415
416 return 0;
417}
418
419static int
420nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
421 struct bpf_verifier_env *env, u8 reg_no)
422{
423 const struct bpf_reg_state *reg = cur_regs(env) + reg_no;
424 int err;
425
426 if (reg->type != PTR_TO_CTX &&
427 reg->type != PTR_TO_STACK &&
428 reg->type != PTR_TO_MAP_VALUE &&
429 reg->type != PTR_TO_PACKET) {
430 pr_vlog(env, "unsupported ptr type: %d\n", reg->type);
431 return -EINVAL;
432 }
433
434 if (reg->type == PTR_TO_STACK) {
435 err = nfp_bpf_check_stack_access(nfp_prog, meta, reg, env);
436 if (err)
437 return err;
438 }
439
440 if (reg->type == PTR_TO_MAP_VALUE) {
441 if (is_mbpf_load(meta)) {
442 err = nfp_bpf_map_mark_used(env, meta, reg,
443 NFP_MAP_USE_READ);
444 if (err)
445 return err;
446 }
447 if (is_mbpf_store(meta)) {
448 pr_vlog(env, "map writes not supported\n");
449 return -EOPNOTSUPP;
450 }
451 if (is_mbpf_xadd(meta)) {
452 err = nfp_bpf_map_mark_used(env, meta, reg,
453 NFP_MAP_USE_ATOMIC_CNT);
454 if (err)
455 return err;
456 }
457 }
458
459 if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) {
460 pr_vlog(env, "ptr type changed for instruction %d -> %d\n",
461 meta->ptr.type, reg->type);
462 return -EINVAL;
463 }
464
465 meta->ptr = *reg;
466
467 return 0;
468}
469
470static int
471nfp_bpf_check_store(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
472 struct bpf_verifier_env *env)
473{
474 const struct bpf_reg_state *reg = cur_regs(env) + meta->insn.dst_reg;
475
476 if (reg->type == PTR_TO_CTX) {
477 if (nfp_prog->type == BPF_PROG_TYPE_XDP) {
478
479 switch (meta->insn.off) {
480 case offsetof(struct xdp_md, rx_queue_index):
481 if (nfp_prog->bpf->queue_select)
482 goto exit_check_ptr;
483 pr_vlog(env, "queue selection not supported by FW\n");
484 return -EOPNOTSUPP;
485 }
486 }
487 pr_vlog(env, "unsupported store to context field\n");
488 return -EOPNOTSUPP;
489 }
490exit_check_ptr:
491 return nfp_bpf_check_ptr(nfp_prog, meta, env, meta->insn.dst_reg);
492}
493
494static int
495nfp_bpf_check_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
496 struct bpf_verifier_env *env)
497{
498 const struct bpf_reg_state *sreg = cur_regs(env) + meta->insn.src_reg;
499 const struct bpf_reg_state *dreg = cur_regs(env) + meta->insn.dst_reg;
500
501 if (dreg->type != PTR_TO_MAP_VALUE) {
502 pr_vlog(env, "atomic add not to a map value pointer: %d\n",
503 dreg->type);
504 return -EOPNOTSUPP;
505 }
506 if (sreg->type != SCALAR_VALUE) {
507 pr_vlog(env, "atomic add not of a scalar: %d\n", sreg->type);
508 return -EOPNOTSUPP;
509 }
510
511 meta->xadd_over_16bit |=
512 sreg->var_off.value > 0xffff || sreg->var_off.mask > 0xffff;
513 meta->xadd_maybe_16bit |=
514 (sreg->var_off.value & ~sreg->var_off.mask) <= 0xffff;
515
516 return nfp_bpf_check_ptr(nfp_prog, meta, env, meta->insn.dst_reg);
517}
518
519static int
520nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
521{
522 struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
523 struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
524
525 meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx, env->prog->len);
526 nfp_prog->verifier_meta = meta;
527
528 if (!nfp_bpf_supported_opcode(meta->insn.code)) {
529 pr_vlog(env, "instruction %#02x not supported\n",
530 meta->insn.code);
531 return -EINVAL;
532 }
533
534 if (meta->insn.src_reg >= MAX_BPF_REG ||
535 meta->insn.dst_reg >= MAX_BPF_REG) {
536 pr_vlog(env, "program uses extended registers - jit hardening?\n");
537 return -EINVAL;
538 }
539
540 if (meta->insn.code == (BPF_JMP | BPF_CALL))
541 return nfp_bpf_check_call(nfp_prog, env, meta);
542 if (meta->insn.code == (BPF_JMP | BPF_EXIT))
543 return nfp_bpf_check_exit(nfp_prog, env);
544
545 if (is_mbpf_load(meta))
546 return nfp_bpf_check_ptr(nfp_prog, meta, env,
547 meta->insn.src_reg);
548 if (is_mbpf_store(meta))
549 return nfp_bpf_check_store(nfp_prog, meta, env);
550
551 if (is_mbpf_xadd(meta))
552 return nfp_bpf_check_xadd(nfp_prog, meta, env);
553
554 if (is_mbpf_indir_shift(meta)) {
555 const struct bpf_reg_state *sreg =
556 cur_regs(env) + meta->insn.src_reg;
557
558 meta->umin = min(meta->umin, sreg->umin_value);
559 meta->umax = max(meta->umax, sreg->umax_value);
560 }
561
562 return 0;
563}
564
565const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops = {
566 .insn_hook = nfp_verify_insn,
567};
568