1
2
3
4
5
6
7
8
9
10
11
12#include <linux/bitops.h>
13#include <linux/compiler.h>
14#include <linux/errno.h>
15#include <linux/filter.h>
16#include <linux/if_vlan.h>
17#include <linux/moduleloader.h>
18#include <linux/netdevice.h>
19#include <linux/string.h>
20#include <linux/slab.h>
21#include <linux/types.h>
22#include <asm/asm.h>
23#include <asm/bitops.h>
24#include <asm/cacheflush.h>
25#include <asm/cpu-features.h>
26#include <asm/uasm.h>
27
28#include "bpf_jit.h"
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66#define ptr typeof(unsigned long)
67
68#define SCRATCH_OFF(k) (4 * (k))
69
70
71#define SEEN_CALL (1 << BPF_MEMWORDS)
72#define SEEN_SREG_SFT (BPF_MEMWORDS + 1)
73#define SEEN_SREG_BASE (1 << SEEN_SREG_SFT)
74#define SEEN_SREG(x) (SEEN_SREG_BASE << (x))
75#define SEEN_OFF SEEN_SREG(2)
76#define SEEN_A SEEN_SREG(3)
77#define SEEN_X SEEN_SREG(4)
78#define SEEN_SKB SEEN_SREG(5)
79#define SEEN_MEM SEEN_SREG(6)
80
81#define SEEN_SKB_DATA (SEEN_SREG(7) | SEEN_SREG(1) | SEEN_SREG(0))
82
83
84#define ARGS_USED_BY_JIT 2
85
86#define SBIT(x) (1 << (x))
87
88
89
90
91
92
93
94
95
96
97struct jit_ctx {
98 const struct bpf_prog *skf;
99 unsigned int prologue_bytes;
100 u32 idx;
101 u32 flags;
102 u32 *offsets;
103 u32 *target;
104};
105
106
107static inline int optimize_div(u32 *k)
108{
109
110 if (!(*k & (*k-1))) {
111 *k = ilog2(*k);
112 return 1;
113 }
114
115 return 0;
116}
117
118static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx);
119
120
121#define emit_instr(ctx, func, ...) \
122do { \
123 if ((ctx)->target != NULL) { \
124 u32 *p = &(ctx)->target[ctx->idx]; \
125 uasm_i_##func(&p, ##__VA_ARGS__); \
126 } \
127 (ctx)->idx++; \
128} while (0)
129
130
131
132
133
134#define emit_long_instr(ctx, func, ...) \
135do { \
136 if ((ctx)->target != NULL) { \
137 u32 *p = &(ctx)->target[ctx->idx]; \
138 UASM_i_##func(&p, ##__VA_ARGS__); \
139 } \
140 (ctx)->idx++; \
141} while (0)
142
143
144static inline bool is_range16(s32 imm)
145{
146 return !(imm >= SBIT(15) || imm < -SBIT(15));
147}
148
149static inline void emit_addu(unsigned int dst, unsigned int src1,
150 unsigned int src2, struct jit_ctx *ctx)
151{
152 emit_instr(ctx, addu, dst, src1, src2);
153}
154
155static inline void emit_nop(struct jit_ctx *ctx)
156{
157 emit_instr(ctx, nop);
158}
159
160
161static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx)
162{
163 if (ctx->target != NULL) {
164
165 if (!is_range16(imm)) {
166 u32 *p = &ctx->target[ctx->idx];
167 uasm_i_lui(&p, r_tmp_imm, (s32)imm >> 16);
168 p = &ctx->target[ctx->idx + 1];
169 uasm_i_ori(&p, dst, r_tmp_imm, imm & 0xffff);
170 } else {
171 u32 *p = &ctx->target[ctx->idx];
172 uasm_i_addiu(&p, dst, r_zero, imm);
173 }
174 }
175 ctx->idx++;
176
177 if (!is_range16(imm))
178 ctx->idx++;
179}
180
181static inline void emit_or(unsigned int dst, unsigned int src1,
182 unsigned int src2, struct jit_ctx *ctx)
183{
184 emit_instr(ctx, or, dst, src1, src2);
185}
186
187static inline void emit_ori(unsigned int dst, unsigned src, u32 imm,
188 struct jit_ctx *ctx)
189{
190 if (imm >= BIT(16)) {
191 emit_load_imm(r_tmp, imm, ctx);
192 emit_or(dst, src, r_tmp, ctx);
193 } else {
194 emit_instr(ctx, ori, dst, src, imm);
195 }
196}
197
198static inline void emit_daddiu(unsigned int dst, unsigned int src,
199 int imm, struct jit_ctx *ctx)
200{
201
202
203
204
205 emit_instr(ctx, daddiu, dst, src, imm);
206}
207
208static inline void emit_addiu(unsigned int dst, unsigned int src,
209 u32 imm, struct jit_ctx *ctx)
210{
211 if (!is_range16(imm)) {
212 emit_load_imm(r_tmp, imm, ctx);
213 emit_addu(dst, r_tmp, src, ctx);
214 } else {
215 emit_instr(ctx, addiu, dst, src, imm);
216 }
217}
218
219static inline void emit_and(unsigned int dst, unsigned int src1,
220 unsigned int src2, struct jit_ctx *ctx)
221{
222 emit_instr(ctx, and, dst, src1, src2);
223}
224
225static inline void emit_andi(unsigned int dst, unsigned int src,
226 u32 imm, struct jit_ctx *ctx)
227{
228
229 if (imm >= BIT(16)) {
230 emit_load_imm(r_tmp, imm, ctx);
231 emit_and(dst, src, r_tmp, ctx);
232 } else {
233 emit_instr(ctx, andi, dst, src, imm);
234 }
235}
236
237static inline void emit_xor(unsigned int dst, unsigned int src1,
238 unsigned int src2, struct jit_ctx *ctx)
239{
240 emit_instr(ctx, xor, dst, src1, src2);
241}
242
243static inline void emit_xori(ptr dst, ptr src, u32 imm, struct jit_ctx *ctx)
244{
245
246 if (imm >= BIT(16)) {
247 emit_load_imm(r_tmp, imm, ctx);
248 emit_xor(dst, src, r_tmp, ctx);
249 } else {
250 emit_instr(ctx, xori, dst, src, imm);
251 }
252}
253
254static inline void emit_stack_offset(int offset, struct jit_ctx *ctx)
255{
256 emit_long_instr(ctx, ADDIU, r_sp, r_sp, offset);
257}
258
259static inline void emit_subu(unsigned int dst, unsigned int src1,
260 unsigned int src2, struct jit_ctx *ctx)
261{
262 emit_instr(ctx, subu, dst, src1, src2);
263}
264
265static inline void emit_neg(unsigned int reg, struct jit_ctx *ctx)
266{
267 emit_subu(reg, r_zero, reg, ctx);
268}
269
270static inline void emit_sllv(unsigned int dst, unsigned int src,
271 unsigned int sa, struct jit_ctx *ctx)
272{
273 emit_instr(ctx, sllv, dst, src, sa);
274}
275
276static inline void emit_sll(unsigned int dst, unsigned int src,
277 unsigned int sa, struct jit_ctx *ctx)
278{
279
280 if (sa >= BIT(5))
281
282 emit_jit_reg_move(dst, r_zero, ctx);
283 else
284 emit_instr(ctx, sll, dst, src, sa);
285}
286
287static inline void emit_srlv(unsigned int dst, unsigned int src,
288 unsigned int sa, struct jit_ctx *ctx)
289{
290 emit_instr(ctx, srlv, dst, src, sa);
291}
292
293static inline void emit_srl(unsigned int dst, unsigned int src,
294 unsigned int sa, struct jit_ctx *ctx)
295{
296
297 if (sa >= BIT(5))
298
299 emit_jit_reg_move(dst, r_zero, ctx);
300 else
301 emit_instr(ctx, srl, dst, src, sa);
302}
303
304static inline void emit_slt(unsigned int dst, unsigned int src1,
305 unsigned int src2, struct jit_ctx *ctx)
306{
307 emit_instr(ctx, slt, dst, src1, src2);
308}
309
310static inline void emit_sltu(unsigned int dst, unsigned int src1,
311 unsigned int src2, struct jit_ctx *ctx)
312{
313 emit_instr(ctx, sltu, dst, src1, src2);
314}
315
316static inline void emit_sltiu(unsigned dst, unsigned int src,
317 unsigned int imm, struct jit_ctx *ctx)
318{
319
320 if (!is_range16((s32)imm)) {
321 emit_load_imm(r_tmp, imm, ctx);
322 emit_sltu(dst, src, r_tmp, ctx);
323 } else {
324 emit_instr(ctx, sltiu, dst, src, imm);
325 }
326
327}
328
329
330static inline void emit_store_stack_reg(ptr reg, ptr base,
331 unsigned int offset,
332 struct jit_ctx *ctx)
333{
334 emit_long_instr(ctx, SW, reg, offset, base);
335}
336
337static inline void emit_store(ptr reg, ptr base, unsigned int offset,
338 struct jit_ctx *ctx)
339{
340 emit_instr(ctx, sw, reg, offset, base);
341}
342
343static inline void emit_load_stack_reg(ptr reg, ptr base,
344 unsigned int offset,
345 struct jit_ctx *ctx)
346{
347 emit_long_instr(ctx, LW, reg, offset, base);
348}
349
350static inline void emit_load(unsigned int reg, unsigned int base,
351 unsigned int offset, struct jit_ctx *ctx)
352{
353 emit_instr(ctx, lw, reg, offset, base);
354}
355
356static inline void emit_load_byte(unsigned int reg, unsigned int base,
357 unsigned int offset, struct jit_ctx *ctx)
358{
359 emit_instr(ctx, lb, reg, offset, base);
360}
361
362static inline void emit_half_load(unsigned int reg, unsigned int base,
363 unsigned int offset, struct jit_ctx *ctx)
364{
365 emit_instr(ctx, lh, reg, offset, base);
366}
367
368static inline void emit_half_load_unsigned(unsigned int reg, unsigned int base,
369 unsigned int offset, struct jit_ctx *ctx)
370{
371 emit_instr(ctx, lhu, reg, offset, base);
372}
373
374static inline void emit_mul(unsigned int dst, unsigned int src1,
375 unsigned int src2, struct jit_ctx *ctx)
376{
377 emit_instr(ctx, mul, dst, src1, src2);
378}
379
380static inline void emit_div(unsigned int dst, unsigned int src,
381 struct jit_ctx *ctx)
382{
383 if (ctx->target != NULL) {
384 u32 *p = &ctx->target[ctx->idx];
385 uasm_i_divu(&p, dst, src);
386 p = &ctx->target[ctx->idx + 1];
387 uasm_i_mflo(&p, dst);
388 }
389 ctx->idx += 2;
390}
391
392static inline void emit_mod(unsigned int dst, unsigned int src,
393 struct jit_ctx *ctx)
394{
395 if (ctx->target != NULL) {
396 u32 *p = &ctx->target[ctx->idx];
397 uasm_i_divu(&p, dst, src);
398 p = &ctx->target[ctx->idx + 1];
399 uasm_i_mfhi(&p, dst);
400 }
401 ctx->idx += 2;
402}
403
404static inline void emit_dsll(unsigned int dst, unsigned int src,
405 unsigned int sa, struct jit_ctx *ctx)
406{
407 emit_instr(ctx, dsll, dst, src, sa);
408}
409
410static inline void emit_dsrl32(unsigned int dst, unsigned int src,
411 unsigned int sa, struct jit_ctx *ctx)
412{
413 emit_instr(ctx, dsrl32, dst, src, sa);
414}
415
416static inline void emit_wsbh(unsigned int dst, unsigned int src,
417 struct jit_ctx *ctx)
418{
419 emit_instr(ctx, wsbh, dst, src);
420}
421
422
423static inline void emit_load_ptr(unsigned int dst, unsigned int src,
424 int imm, struct jit_ctx *ctx)
425{
426
427 emit_long_instr(ctx, LW, dst, imm, src);
428}
429
430
431static inline void emit_load_func(unsigned int reg, ptr imm,
432 struct jit_ctx *ctx)
433{
434 if (IS_ENABLED(CONFIG_64BIT)) {
435
436 emit_load_imm(r_tmp, (u64)imm >> 32, ctx);
437 emit_dsll(r_tmp_imm, r_tmp, 16, ctx);
438 emit_ori(r_tmp, r_tmp_imm, (imm >> 16) & 0xffff, ctx);
439 emit_dsll(r_tmp_imm, r_tmp, 16, ctx);
440 emit_ori(reg, r_tmp_imm, imm & 0xffff, ctx);
441 } else {
442 emit_load_imm(reg, imm, ctx);
443 }
444}
445
446
447static inline void emit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx)
448{
449 emit_long_instr(ctx, ADDU, dst, src, r_zero);
450}
451
452
453static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx)
454{
455 emit_addu(dst, src, r_zero, ctx);
456}
457
458
459static inline u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
460{
461 if (ctx->target == NULL)
462 return 0;
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477 return ctx->offsets[tgt] -
478 (ctx->idx * 4 - ctx->prologue_bytes) - 4;
479}
480
481static inline void emit_bcond(int cond, unsigned int reg1, unsigned int reg2,
482 unsigned int imm, struct jit_ctx *ctx)
483{
484 if (ctx->target != NULL) {
485 u32 *p = &ctx->target[ctx->idx];
486
487 switch (cond) {
488 case MIPS_COND_EQ:
489 uasm_i_beq(&p, reg1, reg2, imm);
490 break;
491 case MIPS_COND_NE:
492 uasm_i_bne(&p, reg1, reg2, imm);
493 break;
494 case MIPS_COND_ALL:
495 uasm_i_b(&p, imm);
496 break;
497 default:
498 pr_warn("%s: Unhandled branch conditional: %d\n",
499 __func__, cond);
500 }
501 }
502 ctx->idx++;
503}
504
505static inline void emit_b(unsigned int imm, struct jit_ctx *ctx)
506{
507 emit_bcond(MIPS_COND_ALL, r_zero, r_zero, imm, ctx);
508}
509
510static inline void emit_jalr(unsigned int link, unsigned int reg,
511 struct jit_ctx *ctx)
512{
513 emit_instr(ctx, jalr, link, reg);
514}
515
516static inline void emit_jr(unsigned int reg, struct jit_ctx *ctx)
517{
518 emit_instr(ctx, jr, reg);
519}
520
521static inline u16 align_sp(unsigned int num)
522{
523
524 unsigned int align = IS_ENABLED(CONFIG_64BIT) ? 16 : 8;
525 num = (num + (align - 1)) & -align;
526 return num;
527}
528
529static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset)
530{
531 int i = 0, real_off = 0;
532 u32 sflags, tmp_flags;
533
534
535 if (offset)
536 emit_stack_offset(-align_sp(offset), ctx);
537
538 tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
539
540 while (tmp_flags) {
541 if ((sflags >> i) & 0x1) {
542 emit_store_stack_reg(MIPS_R_S0 + i, r_sp, real_off,
543 ctx);
544 real_off += SZREG;
545 }
546 i++;
547 tmp_flags >>= 1;
548 }
549
550
551 if (ctx->flags & SEEN_CALL) {
552 emit_store_stack_reg(r_ra, r_sp, real_off, ctx);
553 real_off += SZREG;
554 }
555
556
557 if (ctx->flags & SEEN_MEM) {
558 if (real_off % (SZREG * 2))
559 real_off += SZREG;
560 emit_long_instr(ctx, ADDIU, r_M, r_sp, real_off);
561 }
562}
563
564static void restore_bpf_jit_regs(struct jit_ctx *ctx,
565 unsigned int offset)
566{
567 int i, real_off = 0;
568 u32 sflags, tmp_flags;
569
570 tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
571
572 i = 0;
573 while (tmp_flags) {
574 if ((sflags >> i) & 0x1) {
575 emit_load_stack_reg(MIPS_R_S0 + i, r_sp, real_off,
576 ctx);
577 real_off += SZREG;
578 }
579 i++;
580 tmp_flags >>= 1;
581 }
582
583
584 if (ctx->flags & SEEN_CALL)
585 emit_load_stack_reg(r_ra, r_sp, real_off, ctx);
586
587
588 if (offset)
589 emit_stack_offset(align_sp(offset), ctx);
590}
591
592static unsigned int get_stack_depth(struct jit_ctx *ctx)
593{
594 int sp_off = 0;
595
596
597
598 sp_off += hweight32(ctx->flags >> SEEN_SREG_SFT) * SZREG;
599
600 if (ctx->flags & SEEN_MEM)
601 sp_off += 4 * BPF_MEMWORDS;
602
603 if (ctx->flags & SEEN_CALL)
604 sp_off += SZREG;
605
606 return sp_off;
607}
608
609static void build_prologue(struct jit_ctx *ctx)
610{
611 int sp_off;
612
613
614 sp_off = get_stack_depth(ctx);
615 save_bpf_jit_regs(ctx, sp_off);
616
617 if (ctx->flags & SEEN_SKB)
618 emit_reg_move(r_skb, MIPS_R_A0, ctx);
619
620 if (ctx->flags & SEEN_SKB_DATA) {
621
622 emit_load(r_skb_len, r_skb, offsetof(struct sk_buff, len),
623 ctx);
624 emit_load(r_tmp, r_skb, offsetof(struct sk_buff, data_len),
625 ctx);
626
627 emit_load_ptr(r_skb_data, r_skb,
628 offsetof(struct sk_buff, data), ctx);
629
630 emit_subu(r_skb_hl, r_skb_len, r_tmp, ctx);
631 }
632
633 if (ctx->flags & SEEN_X)
634 emit_jit_reg_move(r_X, r_zero, ctx);
635
636
637
638
639
640
641
642 if (bpf_needs_clear_a(&ctx->skf->insns[0]) &&
643 (ctx->flags & SEEN_A))
644 emit_jit_reg_move(r_A, r_zero, ctx);
645}
646
647static void build_epilogue(struct jit_ctx *ctx)
648{
649 unsigned int sp_off;
650
651
652
653 sp_off = get_stack_depth(ctx);
654 restore_bpf_jit_regs(ctx, sp_off);
655
656
657 emit_jr(r_ra, ctx);
658 emit_nop(ctx);
659}
660
661#define CHOOSE_LOAD_FUNC(K, func) \
662 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative : func) : \
663 func##_positive)
664
665static int build_body(struct jit_ctx *ctx)
666{
667 const struct bpf_prog *prog = ctx->skf;
668 const struct sock_filter *inst;
669 unsigned int i, off, condt;
670 u32 k, b_off __maybe_unused;
671 u8 (*sk_load_func)(unsigned long *skb, int offset);
672
673 for (i = 0; i < prog->len; i++) {
674 u16 code;
675
676 inst = &(prog->insns[i]);
677 pr_debug("%s: code->0x%02x, jt->0x%x, jf->0x%x, k->0x%x\n",
678 __func__, inst->code, inst->jt, inst->jf, inst->k);
679 k = inst->k;
680 code = bpf_anc_helper(inst);
681
682 if (ctx->target == NULL)
683 ctx->offsets[i] = ctx->idx * 4;
684
685 switch (code) {
686 case BPF_LD | BPF_IMM:
687
688 ctx->flags |= SEEN_A;
689 emit_load_imm(r_A, k, ctx);
690 break;
691 case BPF_LD | BPF_W | BPF_LEN:
692 BUILD_BUG_ON(sizeof_field(struct sk_buff, len) != 4);
693
694 ctx->flags |= SEEN_SKB | SEEN_A;
695 off = offsetof(struct sk_buff, len);
696 emit_load(r_A, r_skb, off, ctx);
697 break;
698 case BPF_LD | BPF_MEM:
699
700 ctx->flags |= SEEN_MEM | SEEN_A;
701 emit_load(r_A, r_M, SCRATCH_OFF(k), ctx);
702 break;
703 case BPF_LD | BPF_W | BPF_ABS:
704
705 sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_word);
706 goto load;
707 case BPF_LD | BPF_H | BPF_ABS:
708
709 sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_half);
710 goto load;
711 case BPF_LD | BPF_B | BPF_ABS:
712
713 sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_byte);
714load:
715 emit_load_imm(r_off, k, ctx);
716load_common:
717 ctx->flags |= SEEN_CALL | SEEN_OFF |
718 SEEN_SKB | SEEN_A | SEEN_SKB_DATA;
719
720 emit_load_func(r_s0, (ptr)sk_load_func, ctx);
721 emit_reg_move(MIPS_R_A0, r_skb, ctx);
722 emit_jalr(MIPS_R_RA, r_s0, ctx);
723
724 emit_reg_move(MIPS_R_A1, r_off, ctx);
725
726 emit_bcond(MIPS_COND_EQ, r_ret, 0, b_imm(i + 1, ctx),
727 ctx);
728
729 emit_reg_move(r_ret, r_zero, ctx);
730
731 emit_b(b_imm(prog->len, ctx), ctx);
732 emit_nop(ctx);
733 break;
734 case BPF_LD | BPF_W | BPF_IND:
735
736 sk_load_func = sk_load_word;
737 goto load_ind;
738 case BPF_LD | BPF_H | BPF_IND:
739
740 sk_load_func = sk_load_half;
741 goto load_ind;
742 case BPF_LD | BPF_B | BPF_IND:
743
744 sk_load_func = sk_load_byte;
745load_ind:
746 ctx->flags |= SEEN_OFF | SEEN_X;
747 emit_addiu(r_off, r_X, k, ctx);
748 goto load_common;
749 case BPF_LDX | BPF_IMM:
750
751 ctx->flags |= SEEN_X;
752 emit_load_imm(r_X, k, ctx);
753 break;
754 case BPF_LDX | BPF_MEM:
755
756 ctx->flags |= SEEN_X | SEEN_MEM;
757 emit_load(r_X, r_M, SCRATCH_OFF(k), ctx);
758 break;
759 case BPF_LDX | BPF_W | BPF_LEN:
760
761 ctx->flags |= SEEN_X | SEEN_SKB;
762 off = offsetof(struct sk_buff, len);
763 emit_load(r_X, r_skb, off, ctx);
764 break;
765 case BPF_LDX | BPF_B | BPF_MSH:
766
767 ctx->flags |= SEEN_X | SEEN_CALL | SEEN_SKB;
768
769 emit_load_func(r_s0, (ptr)sk_load_byte, ctx);
770
771
772
773
774 emit_load_imm(MIPS_R_A1, k, ctx);
775 emit_jalr(MIPS_R_RA, r_s0, ctx);
776 emit_reg_move(MIPS_R_A0, r_skb, ctx);
777
778 emit_bcond(MIPS_COND_NE, r_ret, 0,
779 b_imm(prog->len, ctx), ctx);
780 emit_reg_move(r_ret, r_zero, ctx);
781
782
783 emit_andi(r_X, r_A, 0xf, ctx);
784
785 emit_b(b_imm(i + 1, ctx), ctx);
786 emit_sll(r_X, r_X, 2, ctx);
787 break;
788 case BPF_ST:
789
790 ctx->flags |= SEEN_MEM | SEEN_A;
791 emit_store(r_A, r_M, SCRATCH_OFF(k), ctx);
792 break;
793 case BPF_STX:
794
795 ctx->flags |= SEEN_MEM | SEEN_X;
796 emit_store(r_X, r_M, SCRATCH_OFF(k), ctx);
797 break;
798 case BPF_ALU | BPF_ADD | BPF_K:
799
800 ctx->flags |= SEEN_A;
801 emit_addiu(r_A, r_A, k, ctx);
802 break;
803 case BPF_ALU | BPF_ADD | BPF_X:
804
805 ctx->flags |= SEEN_A | SEEN_X;
806 emit_addu(r_A, r_A, r_X, ctx);
807 break;
808 case BPF_ALU | BPF_SUB | BPF_K:
809
810 ctx->flags |= SEEN_A;
811 emit_addiu(r_A, r_A, -k, ctx);
812 break;
813 case BPF_ALU | BPF_SUB | BPF_X:
814
815 ctx->flags |= SEEN_A | SEEN_X;
816 emit_subu(r_A, r_A, r_X, ctx);
817 break;
818 case BPF_ALU | BPF_MUL | BPF_K:
819
820
821 ctx->flags |= SEEN_A;
822 emit_load_imm(r_s0, k, ctx);
823 emit_mul(r_A, r_A, r_s0, ctx);
824 break;
825 case BPF_ALU | BPF_MUL | BPF_X:
826
827 ctx->flags |= SEEN_A | SEEN_X;
828 emit_mul(r_A, r_A, r_X, ctx);
829 break;
830 case BPF_ALU | BPF_DIV | BPF_K:
831
832 if (k == 1)
833 break;
834 if (optimize_div(&k)) {
835 ctx->flags |= SEEN_A;
836 emit_srl(r_A, r_A, k, ctx);
837 break;
838 }
839 ctx->flags |= SEEN_A;
840 emit_load_imm(r_s0, k, ctx);
841 emit_div(r_A, r_s0, ctx);
842 break;
843 case BPF_ALU | BPF_MOD | BPF_K:
844
845 if (k == 1) {
846 ctx->flags |= SEEN_A;
847 emit_jit_reg_move(r_A, r_zero, ctx);
848 } else {
849 ctx->flags |= SEEN_A;
850 emit_load_imm(r_s0, k, ctx);
851 emit_mod(r_A, r_s0, ctx);
852 }
853 break;
854 case BPF_ALU | BPF_DIV | BPF_X:
855
856 ctx->flags |= SEEN_X | SEEN_A;
857
858 emit_bcond(MIPS_COND_EQ, r_X, r_zero,
859 b_imm(prog->len, ctx), ctx);
860 emit_load_imm(r_ret, 0, ctx);
861 emit_div(r_A, r_X, ctx);
862 break;
863 case BPF_ALU | BPF_MOD | BPF_X:
864
865 ctx->flags |= SEEN_X | SEEN_A;
866
867 emit_bcond(MIPS_COND_EQ, r_X, r_zero,
868 b_imm(prog->len, ctx), ctx);
869 emit_load_imm(r_ret, 0, ctx);
870 emit_mod(r_A, r_X, ctx);
871 break;
872 case BPF_ALU | BPF_OR | BPF_K:
873
874 ctx->flags |= SEEN_A;
875 emit_ori(r_A, r_A, k, ctx);
876 break;
877 case BPF_ALU | BPF_OR | BPF_X:
878
879 ctx->flags |= SEEN_A;
880 emit_ori(r_A, r_A, r_X, ctx);
881 break;
882 case BPF_ALU | BPF_XOR | BPF_K:
883
884 ctx->flags |= SEEN_A;
885 emit_xori(r_A, r_A, k, ctx);
886 break;
887 case BPF_ANC | SKF_AD_ALU_XOR_X:
888 case BPF_ALU | BPF_XOR | BPF_X:
889
890 ctx->flags |= SEEN_A;
891 emit_xor(r_A, r_A, r_X, ctx);
892 break;
893 case BPF_ALU | BPF_AND | BPF_K:
894
895 ctx->flags |= SEEN_A;
896 emit_andi(r_A, r_A, k, ctx);
897 break;
898 case BPF_ALU | BPF_AND | BPF_X:
899
900 ctx->flags |= SEEN_A | SEEN_X;
901 emit_and(r_A, r_A, r_X, ctx);
902 break;
903 case BPF_ALU | BPF_LSH | BPF_K:
904
905 ctx->flags |= SEEN_A;
906 emit_sll(r_A, r_A, k, ctx);
907 break;
908 case BPF_ALU | BPF_LSH | BPF_X:
909
910 ctx->flags |= SEEN_A | SEEN_X;
911 emit_sllv(r_A, r_A, r_X, ctx);
912 break;
913 case BPF_ALU | BPF_RSH | BPF_K:
914
915 ctx->flags |= SEEN_A;
916 emit_srl(r_A, r_A, k, ctx);
917 break;
918 case BPF_ALU | BPF_RSH | BPF_X:
919 ctx->flags |= SEEN_A | SEEN_X;
920 emit_srlv(r_A, r_A, r_X, ctx);
921 break;
922 case BPF_ALU | BPF_NEG:
923
924 ctx->flags |= SEEN_A;
925 emit_neg(r_A, ctx);
926 break;
927 case BPF_JMP | BPF_JA:
928
929 emit_b(b_imm(i + k + 1, ctx), ctx);
930 emit_nop(ctx);
931 break;
932 case BPF_JMP | BPF_JEQ | BPF_K:
933
934 condt = MIPS_COND_EQ | MIPS_COND_K;
935 goto jmp_cmp;
936 case BPF_JMP | BPF_JEQ | BPF_X:
937 ctx->flags |= SEEN_X;
938
939 condt = MIPS_COND_EQ | MIPS_COND_X;
940 goto jmp_cmp;
941 case BPF_JMP | BPF_JGE | BPF_K:
942
943 condt = MIPS_COND_GE | MIPS_COND_K;
944 goto jmp_cmp;
945 case BPF_JMP | BPF_JGE | BPF_X:
946 ctx->flags |= SEEN_X;
947
948 condt = MIPS_COND_GE | MIPS_COND_X;
949 goto jmp_cmp;
950 case BPF_JMP | BPF_JGT | BPF_K:
951
952 condt = MIPS_COND_GT | MIPS_COND_K;
953 goto jmp_cmp;
954 case BPF_JMP | BPF_JGT | BPF_X:
955 ctx->flags |= SEEN_X;
956
957 condt = MIPS_COND_GT | MIPS_COND_X;
958jmp_cmp:
959
960 if ((condt & MIPS_COND_GE) ||
961 (condt & MIPS_COND_GT)) {
962 if (condt & MIPS_COND_K) {
963 ctx->flags |= SEEN_A;
964 emit_sltiu(r_s0, r_A, k, ctx);
965 } else {
966 ctx->flags |= SEEN_A |
967 SEEN_X;
968 emit_sltu(r_s0, r_A, r_X, ctx);
969 }
970
971 b_off = b_imm(i + inst->jf + 1, ctx);
972 emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off,
973 ctx);
974 emit_nop(ctx);
975
976 if (condt & MIPS_COND_GT) {
977
978 ctx->flags |= SEEN_A | SEEN_X;
979 if (condt & MIPS_COND_K)
980 emit_load_imm(r_s0, k, ctx);
981 else
982 emit_jit_reg_move(r_s0, r_X,
983 ctx);
984 b_off = b_imm(i + inst->jf + 1, ctx);
985 emit_bcond(MIPS_COND_EQ, r_A, r_s0,
986 b_off, ctx);
987 emit_nop(ctx);
988
989 b_off = b_imm(i + inst->jt + 1, ctx);
990 emit_b(b_off, ctx);
991 emit_nop(ctx);
992 } else {
993
994 b_off = b_imm(i + inst->jt + 1, ctx);
995 emit_b(b_off, ctx);
996 emit_nop(ctx);
997 }
998 } else {
999
1000 if (condt & MIPS_COND_K) {
1001 ctx->flags |= SEEN_A;
1002 emit_load_imm(r_s0, k, ctx);
1003
1004 b_off = b_imm(i + inst->jt + 1, ctx);
1005 emit_bcond(MIPS_COND_EQ, r_A, r_s0,
1006 b_off, ctx);
1007 emit_nop(ctx);
1008
1009 b_off = b_imm(i + inst->jf + 1,
1010 ctx);
1011 emit_bcond(MIPS_COND_NE, r_A, r_s0,
1012 b_off, ctx);
1013 emit_nop(ctx);
1014 } else {
1015
1016 ctx->flags |= SEEN_A | SEEN_X;
1017 b_off = b_imm(i + inst->jt + 1,
1018 ctx);
1019 emit_bcond(MIPS_COND_EQ, r_A, r_X,
1020 b_off, ctx);
1021 emit_nop(ctx);
1022
1023 b_off = b_imm(i + inst->jf + 1, ctx);
1024 emit_bcond(MIPS_COND_NE, r_A, r_X,
1025 b_off, ctx);
1026 emit_nop(ctx);
1027 }
1028 }
1029 break;
1030 case BPF_JMP | BPF_JSET | BPF_K:
1031 ctx->flags |= SEEN_A;
1032
1033 emit_load_imm(r_s1, k, ctx);
1034 emit_and(r_s0, r_A, r_s1, ctx);
1035
1036 b_off = b_imm(i + inst->jt + 1, ctx);
1037 emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx);
1038 emit_nop(ctx);
1039
1040 b_off = b_imm(i + inst->jf + 1, ctx);
1041 emit_b(b_off, ctx);
1042 emit_nop(ctx);
1043 break;
1044 case BPF_JMP | BPF_JSET | BPF_X:
1045 ctx->flags |= SEEN_X | SEEN_A;
1046
1047 emit_and(r_s0, r_A, r_X, ctx);
1048
1049 b_off = b_imm(i + inst->jt + 1, ctx);
1050 emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx);
1051 emit_nop(ctx);
1052
1053 b_off = b_imm(i + inst->jf + 1, ctx);
1054 emit_b(b_off, ctx);
1055 emit_nop(ctx);
1056 break;
1057 case BPF_RET | BPF_A:
1058 ctx->flags |= SEEN_A;
1059 if (i != prog->len - 1)
1060
1061
1062
1063
1064 emit_b(b_imm(prog->len, ctx), ctx);
1065 emit_reg_move(r_ret, r_A, ctx);
1066 break;
1067 case BPF_RET | BPF_K:
1068
1069
1070
1071
1072 emit_load_imm(r_ret, k, ctx);
1073 if (i != prog->len - 1) {
1074
1075
1076
1077
1078 emit_b(b_imm(prog->len, ctx), ctx);
1079 emit_nop(ctx);
1080 }
1081 break;
1082 case BPF_MISC | BPF_TAX:
1083
1084 ctx->flags |= SEEN_X | SEEN_A;
1085 emit_jit_reg_move(r_X, r_A, ctx);
1086 break;
1087 case BPF_MISC | BPF_TXA:
1088
1089 ctx->flags |= SEEN_A | SEEN_X;
1090 emit_jit_reg_move(r_A, r_X, ctx);
1091 break;
1092
1093 case BPF_ANC | SKF_AD_PROTOCOL:
1094
1095 ctx->flags |= SEEN_SKB | SEEN_OFF | SEEN_A;
1096 BUILD_BUG_ON(sizeof_field(struct sk_buff,
1097 protocol) != 2);
1098 off = offsetof(struct sk_buff, protocol);
1099 emit_half_load(r_A, r_skb, off, ctx);
1100#ifdef CONFIG_CPU_LITTLE_ENDIAN
1101
1102 if (cpu_has_wsbh) {
1103
1104 emit_wsbh(r_A, r_A, ctx);
1105 } else {
1106
1107 emit_andi(r_tmp_imm, r_A, 0xff, ctx);
1108
1109 emit_sll(r_tmp, r_tmp_imm, 8, ctx);
1110
1111 emit_srl(r_tmp_imm, r_A, 8, ctx);
1112 emit_andi(r_tmp_imm, r_tmp_imm, 0xff, ctx);
1113
1114 emit_or(r_A, r_tmp, r_tmp_imm, ctx);
1115 }
1116#endif
1117 break;
1118 case BPF_ANC | SKF_AD_CPU:
1119 ctx->flags |= SEEN_A | SEEN_OFF;
1120
1121 BUILD_BUG_ON(sizeof_field(struct thread_info,
1122 cpu) != 4);
1123 off = offsetof(struct thread_info, cpu);
1124
1125 emit_load(r_A, 28, off, ctx);
1126 break;
1127 case BPF_ANC | SKF_AD_IFINDEX:
1128
1129 case BPF_ANC | SKF_AD_HATYPE:
1130
1131 ctx->flags |= SEEN_SKB | SEEN_A;
1132 off = offsetof(struct sk_buff, dev);
1133
1134 emit_load_ptr(r_s0, r_skb, off, ctx);
1135
1136 emit_bcond(MIPS_COND_EQ, r_s0, r_zero,
1137 b_imm(prog->len, ctx), ctx);
1138 emit_reg_move(r_ret, r_zero, ctx);
1139 if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
1140 BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4);
1141 off = offsetof(struct net_device, ifindex);
1142 emit_load(r_A, r_s0, off, ctx);
1143 } else {
1144 BUILD_BUG_ON(sizeof_field(struct net_device, type) != 2);
1145 off = offsetof(struct net_device, type);
1146 emit_half_load_unsigned(r_A, r_s0, off, ctx);
1147 }
1148 break;
1149 case BPF_ANC | SKF_AD_MARK:
1150 ctx->flags |= SEEN_SKB | SEEN_A;
1151 BUILD_BUG_ON(sizeof_field(struct sk_buff, mark) != 4);
1152 off = offsetof(struct sk_buff, mark);
1153 emit_load(r_A, r_skb, off, ctx);
1154 break;
1155 case BPF_ANC | SKF_AD_RXHASH:
1156 ctx->flags |= SEEN_SKB | SEEN_A;
1157 BUILD_BUG_ON(sizeof_field(struct sk_buff, hash) != 4);
1158 off = offsetof(struct sk_buff, hash);
1159 emit_load(r_A, r_skb, off, ctx);
1160 break;
1161 case BPF_ANC | SKF_AD_VLAN_TAG:
1162 ctx->flags |= SEEN_SKB | SEEN_A;
1163 BUILD_BUG_ON(sizeof_field(struct sk_buff,
1164 vlan_tci) != 2);
1165 off = offsetof(struct sk_buff, vlan_tci);
1166 emit_half_load_unsigned(r_A, r_skb, off, ctx);
1167 break;
1168 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
1169 ctx->flags |= SEEN_SKB | SEEN_A;
1170 emit_load_byte(r_A, r_skb, PKT_VLAN_PRESENT_OFFSET(), ctx);
1171 if (PKT_VLAN_PRESENT_BIT)
1172 emit_srl(r_A, r_A, PKT_VLAN_PRESENT_BIT, ctx);
1173 if (PKT_VLAN_PRESENT_BIT < 7)
1174 emit_andi(r_A, r_A, 1, ctx);
1175 break;
1176 case BPF_ANC | SKF_AD_PKTTYPE:
1177 ctx->flags |= SEEN_SKB;
1178
1179 emit_load_byte(r_tmp, r_skb, PKT_TYPE_OFFSET(), ctx);
1180
1181 emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx);
1182#ifdef __BIG_ENDIAN_BITFIELD
1183
1184 emit_srl(r_A, r_A, 5, ctx);
1185#endif
1186 break;
1187 case BPF_ANC | SKF_AD_QUEUE:
1188 ctx->flags |= SEEN_SKB | SEEN_A;
1189 BUILD_BUG_ON(sizeof_field(struct sk_buff,
1190 queue_mapping) != 2);
1191 BUILD_BUG_ON(offsetof(struct sk_buff,
1192 queue_mapping) > 0xff);
1193 off = offsetof(struct sk_buff, queue_mapping);
1194 emit_half_load_unsigned(r_A, r_skb, off, ctx);
1195 break;
1196 default:
1197 pr_debug("%s: Unhandled opcode: 0x%02x\n", __FILE__,
1198 inst->code);
1199 return -1;
1200 }
1201 }
1202
1203
1204 if (ctx->target == NULL)
1205 ctx->offsets[i] = ctx->idx * 4;
1206
1207 return 0;
1208}
1209
1210void bpf_jit_compile(struct bpf_prog *fp)
1211{
1212 struct jit_ctx ctx;
1213 unsigned int alloc_size, tmp_idx;
1214
1215 if (!bpf_jit_enable)
1216 return;
1217
1218 memset(&ctx, 0, sizeof(ctx));
1219
1220 ctx.offsets = kcalloc(fp->len + 1, sizeof(*ctx.offsets), GFP_KERNEL);
1221 if (ctx.offsets == NULL)
1222 return;
1223
1224 ctx.skf = fp;
1225
1226 if (build_body(&ctx))
1227 goto out;
1228
1229 tmp_idx = ctx.idx;
1230 build_prologue(&ctx);
1231 ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
1232
1233 build_epilogue(&ctx);
1234
1235 alloc_size = 4 * ctx.idx;
1236 ctx.target = module_alloc(alloc_size);
1237 if (ctx.target == NULL)
1238 goto out;
1239
1240
1241 memset(ctx.target, 0, alloc_size);
1242
1243 ctx.idx = 0;
1244
1245
1246 build_prologue(&ctx);
1247 build_body(&ctx);
1248 build_epilogue(&ctx);
1249
1250
1251 flush_icache_range((ptr)ctx.target, (ptr)(ctx.target + ctx.idx));
1252
1253 if (bpf_jit_enable > 1)
1254
1255 bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
1256
1257 fp->bpf_func = (void *)ctx.target;
1258 fp->jited = 1;
1259
1260out:
1261 kfree(ctx.offsets);
1262}
1263
1264void bpf_jit_free(struct bpf_prog *fp)
1265{
1266 if (fp->jited)
1267 module_memfree(fp->bpf_func);
1268
1269 bpf_prog_unlock_free(fp);
1270}
1271