1#include <linux/moduleloader.h>
2#include <linux/workqueue.h>
3#include <linux/netdevice.h>
4#include <linux/filter.h>
5#include <linux/cache.h>
6
7#include <asm/cacheflush.h>
8#include <asm/ptrace.h>
9
10#include "bpf_jit.h"
11
12int bpf_jit_enable __read_mostly;
13
14static inline bool is_simm13(unsigned int value)
15{
16 return value + 0x1000 < 0x2000;
17}
18
19static void bpf_flush_icache(void *start_, void *end_)
20{
21#ifdef CONFIG_SPARC64
22
23 if (tlb_type == spitfire) {
24 unsigned long start = (unsigned long) start_;
25 unsigned long end = (unsigned long) end_;
26
27 start &= ~7UL;
28 end = (end + 7UL) & ~7UL;
29 while (start < end) {
30 flushi(start);
31 start += 32;
32 }
33 }
34#endif
35}
36
37#define SEEN_DATAREF 1
38#define SEEN_XREG 2
39#define SEEN_MEM 4
40
41#define S13(X) ((X) & 0x1fff)
42#define IMMED 0x00002000
43#define RD(X) ((X) << 25)
44#define RS1(X) ((X) << 14)
45#define RS2(X) ((X))
46#define OP(X) ((X) << 30)
47#define OP2(X) ((X) << 22)
48#define OP3(X) ((X) << 19)
49#define COND(X) ((X) << 25)
50#define F1(X) OP(X)
51#define F2(X, Y) (OP(X) | OP2(Y))
52#define F3(X, Y) (OP(X) | OP3(Y))
53
54#define CONDN COND(0x0)
55#define CONDE COND(0x1)
56#define CONDLE COND(0x2)
57#define CONDL COND(0x3)
58#define CONDLEU COND(0x4)
59#define CONDCS COND(0x5)
60#define CONDNEG COND(0x6)
61#define CONDVC COND(0x7)
62#define CONDA COND(0x8)
63#define CONDNE COND(0x9)
64#define CONDG COND(0xa)
65#define CONDGE COND(0xb)
66#define CONDGU COND(0xc)
67#define CONDCC COND(0xd)
68#define CONDPOS COND(0xe)
69#define CONDVS COND(0xf)
70
71#define CONDGEU CONDCC
72#define CONDLU CONDCS
73
74#define WDISP22(X) (((X) >> 2) & 0x3fffff)
75
76#define BA (F2(0, 2) | CONDA)
77#define BGU (F2(0, 2) | CONDGU)
78#define BLEU (F2(0, 2) | CONDLEU)
79#define BGEU (F2(0, 2) | CONDGEU)
80#define BLU (F2(0, 2) | CONDLU)
81#define BE (F2(0, 2) | CONDE)
82#define BNE (F2(0, 2) | CONDNE)
83
84#ifdef CONFIG_SPARC64
85#define BNE_PTR (F2(0, 1) | CONDNE | (2 << 20))
86#else
87#define BNE_PTR BNE
88#endif
89
90#define SETHI(K, REG) \
91 (F2(0, 0x4) | RD(REG) | (((K) >> 10) & 0x3fffff))
92#define OR_LO(K, REG) \
93 (F3(2, 0x02) | IMMED | RS1(REG) | ((K) & 0x3ff) | RD(REG))
94
95#define ADD F3(2, 0x00)
96#define AND F3(2, 0x01)
97#define ANDCC F3(2, 0x11)
98#define OR F3(2, 0x02)
99#define XOR F3(2, 0x03)
100#define SUB F3(2, 0x04)
101#define SUBCC F3(2, 0x14)
102#define MUL F3(2, 0x0a)
103#define DIV F3(2, 0x0e)
104#define SLL F3(2, 0x25)
105#define SRL F3(2, 0x26)
106#define JMPL F3(2, 0x38)
107#define CALL F1(1)
108#define BR F2(0, 0x01)
109#define RD_Y F3(2, 0x28)
110#define WR_Y F3(2, 0x30)
111
112#define LD32 F3(3, 0x00)
113#define LD8 F3(3, 0x01)
114#define LD16 F3(3, 0x02)
115#define LD64 F3(3, 0x0b)
116#define ST32 F3(3, 0x04)
117
118#ifdef CONFIG_SPARC64
119#define LDPTR LD64
120#define BASE_STACKFRAME 176
121#else
122#define LDPTR LD32
123#define BASE_STACKFRAME 96
124#endif
125
126#define LD32I (LD32 | IMMED)
127#define LD8I (LD8 | IMMED)
128#define LD16I (LD16 | IMMED)
129#define LD64I (LD64 | IMMED)
130#define LDPTRI (LDPTR | IMMED)
131#define ST32I (ST32 | IMMED)
132
133#define emit_nop() \
134do { \
135 *prog++ = SETHI(0, G0); \
136} while (0)
137
138#define emit_neg() \
139do { \
140 *prog++ = SUB | RS1(G0) | RS2(r_A) | RD(r_A); \
141} while (0)
142
143#define emit_reg_move(FROM, TO) \
144do { \
145 *prog++ = OR | RS1(G0) | RS2(FROM) | RD(TO); \
146} while (0)
147
148#define emit_clear(REG) \
149do { \
150 *prog++ = OR | RS1(G0) | RS2(G0) | RD(REG); \
151} while (0)
152
153#define emit_set_const(K, REG) \
154do { \
155 *prog++ = SETHI(K, REG); \
156 \
157 *prog++ = OR_LO(K, REG); \
158} while (0)
159
160
161
162
163
164#define emit_alu_X(OPCODE) \
165do { \
166 seen |= SEEN_XREG; \
167 *prog++ = OPCODE | RS1(r_A) | RS2(r_X) | RD(r_A); \
168} while (0)
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184#define emit_alu_K(OPCODE, K) \
185do { \
186 if (K) { \
187 unsigned int _insn = OPCODE; \
188 _insn |= RS1(r_A) | RD(r_A); \
189 if (is_simm13(K)) { \
190 *prog++ = _insn | IMMED | S13(K); \
191 } else { \
192 emit_set_const(K, r_TMP); \
193 *prog++ = _insn | RS2(r_TMP); \
194 } \
195 } \
196} while (0)
197
198#define emit_loadimm(K, DEST) \
199do { \
200 if (is_simm13(K)) { \
201 \
202 *prog++ = OR | IMMED | RS1(G0) | S13(K) | RD(DEST); \
203 } else { \
204 emit_set_const(K, DEST); \
205 } \
206} while (0)
207
208#define emit_loadptr(BASE, STRUCT, FIELD, DEST) \
209do { unsigned int _off = offsetof(STRUCT, FIELD); \
210 BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(void *)); \
211 *prog++ = LDPTRI | RS1(BASE) | S13(_off) | RD(DEST); \
212} while (0)
213
214#define emit_load32(BASE, STRUCT, FIELD, DEST) \
215do { unsigned int _off = offsetof(STRUCT, FIELD); \
216 BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u32)); \
217 *prog++ = LD32I | RS1(BASE) | S13(_off) | RD(DEST); \
218} while (0)
219
220#define emit_load16(BASE, STRUCT, FIELD, DEST) \
221do { unsigned int _off = offsetof(STRUCT, FIELD); \
222 BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u16)); \
223 *prog++ = LD16I | RS1(BASE) | S13(_off) | RD(DEST); \
224} while (0)
225
226#define __emit_load8(BASE, STRUCT, FIELD, DEST) \
227do { unsigned int _off = offsetof(STRUCT, FIELD); \
228 *prog++ = LD8I | RS1(BASE) | S13(_off) | RD(DEST); \
229} while (0)
230
231#define emit_load8(BASE, STRUCT, FIELD, DEST) \
232do { BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u8)); \
233 __emit_load8(BASE, STRUCT, FIELD, DEST); \
234} while (0)
235
236#define emit_ldmem(OFF, DEST) \
237do { *prog++ = LD32I | RS1(FP) | S13(-(OFF)) | RD(DEST); \
238} while (0)
239
240#define emit_stmem(OFF, SRC) \
241do { *prog++ = LD32I | RS1(FP) | S13(-(OFF)) | RD(SRC); \
242} while (0)
243
244#ifdef CONFIG_SMP
245#ifdef CONFIG_SPARC64
246#define emit_load_cpu(REG) \
247 emit_load16(G6, struct thread_info, cpu, REG)
248#else
249#define emit_load_cpu(REG) \
250 emit_load32(G6, struct thread_info, cpu, REG)
251#endif
252#else
253#define emit_load_cpu(REG) emit_clear(REG)
254#endif
255
256#define emit_skb_loadptr(FIELD, DEST) \
257 emit_loadptr(r_SKB, struct sk_buff, FIELD, DEST)
258#define emit_skb_load32(FIELD, DEST) \
259 emit_load32(r_SKB, struct sk_buff, FIELD, DEST)
260#define emit_skb_load16(FIELD, DEST) \
261 emit_load16(r_SKB, struct sk_buff, FIELD, DEST)
262#define __emit_skb_load8(FIELD, DEST) \
263 __emit_load8(r_SKB, struct sk_buff, FIELD, DEST)
264#define emit_skb_load8(FIELD, DEST) \
265 emit_load8(r_SKB, struct sk_buff, FIELD, DEST)
266
267#define emit_jmpl(BASE, IMM_OFF, LREG) \
268 *prog++ = (JMPL | IMMED | RS1(BASE) | S13(IMM_OFF) | RD(LREG))
269
270#define emit_call(FUNC) \
271do { void *_here = image + addrs[i] - 8; \
272 unsigned int _off = (void *)(FUNC) - _here; \
273 *prog++ = CALL | (((_off) >> 2) & 0x3fffffff); \
274 emit_nop(); \
275} while (0)
276
277#define emit_branch(BR_OPC, DEST) \
278do { unsigned int _here = addrs[i] - 8; \
279 *prog++ = BR_OPC | WDISP22((DEST) - _here); \
280} while (0)
281
282#define emit_branch_off(BR_OPC, OFF) \
283do { *prog++ = BR_OPC | WDISP22(OFF); \
284} while (0)
285
286#define emit_jump(DEST) emit_branch(BA, DEST)
287
288#define emit_read_y(REG) *prog++ = RD_Y | RD(REG)
289#define emit_write_y(REG) *prog++ = WR_Y | IMMED | RS1(REG) | S13(0)
290
291#define emit_cmp(R1, R2) \
292 *prog++ = (SUBCC | RS1(R1) | RS2(R2) | RD(G0))
293
294#define emit_cmpi(R1, IMM) \
295 *prog++ = (SUBCC | IMMED | RS1(R1) | S13(IMM) | RD(G0));
296
297#define emit_btst(R1, R2) \
298 *prog++ = (ANDCC | RS1(R1) | RS2(R2) | RD(G0))
299
300#define emit_btsti(R1, IMM) \
301 *prog++ = (ANDCC | IMMED | RS1(R1) | S13(IMM) | RD(G0));
302
303#define emit_sub(R1, R2, R3) \
304 *prog++ = (SUB | RS1(R1) | RS2(R2) | RD(R3))
305
306#define emit_subi(R1, IMM, R3) \
307 *prog++ = (SUB | IMMED | RS1(R1) | S13(IMM) | RD(R3))
308
309#define emit_add(R1, R2, R3) \
310 *prog++ = (ADD | RS1(R1) | RS2(R2) | RD(R3))
311
312#define emit_addi(R1, IMM, R3) \
313 *prog++ = (ADD | IMMED | RS1(R1) | S13(IMM) | RD(R3))
314
315#define emit_alloc_stack(SZ) \
316 *prog++ = (SUB | IMMED | RS1(SP) | S13(SZ) | RD(SP))
317
318#define emit_release_stack(SZ) \
319 *prog++ = (ADD | IMMED | RS1(SP) | S13(SZ) | RD(SP))
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350void bpf_jit_compile(struct sk_filter *fp)
351{
352 unsigned int cleanup_addr, proglen, oldproglen = 0;
353 u32 temp[8], *prog, *func, seen = 0, pass;
354 const struct sock_filter *filter = fp->insns;
355 int i, flen = fp->len, pc_ret0 = -1;
356 unsigned int *addrs;
357 void *image;
358
359 if (!bpf_jit_enable)
360 return;
361
362 addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL);
363 if (addrs == NULL)
364 return;
365
366
367
368
369 for (proglen = 0, i = 0; i < flen; i++) {
370 proglen += 64;
371 addrs[i] = proglen;
372 }
373 cleanup_addr = proglen;
374 image = NULL;
375 for (pass = 0; pass < 10; pass++) {
376 u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
377
378
379 proglen = 0;
380 prog = temp;
381
382
383 if (seen_or_pass0) {
384 if (seen_or_pass0 & SEEN_MEM) {
385 unsigned int sz = BASE_STACKFRAME;
386 sz += BPF_MEMWORDS * sizeof(u32);
387 emit_alloc_stack(sz);
388 }
389
390
391 if (seen_or_pass0 & SEEN_XREG)
392 emit_clear(r_X);
393
394
395
396
397
398
399
400
401 if (seen_or_pass0 & SEEN_DATAREF) {
402 emit_load32(r_SKB, struct sk_buff, len, r_HEADLEN);
403 emit_load32(r_SKB, struct sk_buff, data_len, r_TMP);
404 emit_sub(r_HEADLEN, r_TMP, r_HEADLEN);
405 emit_loadptr(r_SKB, struct sk_buff, data, r_SKB_DATA);
406 }
407 }
408 emit_reg_move(O7, r_saved_O7);
409
410 switch (filter[0].code) {
411 case BPF_S_RET_K:
412 case BPF_S_LD_W_LEN:
413 case BPF_S_ANC_PROTOCOL:
414 case BPF_S_ANC_PKTTYPE:
415 case BPF_S_ANC_IFINDEX:
416 case BPF_S_ANC_MARK:
417 case BPF_S_ANC_RXHASH:
418 case BPF_S_ANC_CPU:
419 case BPF_S_ANC_QUEUE:
420 case BPF_S_LD_W_ABS:
421 case BPF_S_LD_H_ABS:
422 case BPF_S_LD_B_ABS:
423
424
425
426 break;
427 default:
428
429
430
431 emit_clear(r_A);
432 }
433
434 for (i = 0; i < flen; i++) {
435 unsigned int K = filter[i].k;
436 unsigned int t_offset;
437 unsigned int f_offset;
438 u32 t_op, f_op;
439 int ilen;
440
441 switch (filter[i].code) {
442 case BPF_S_ALU_ADD_X:
443 emit_alu_X(ADD);
444 break;
445 case BPF_S_ALU_ADD_K:
446 emit_alu_K(ADD, K);
447 break;
448 case BPF_S_ALU_SUB_X:
449 emit_alu_X(SUB);
450 break;
451 case BPF_S_ALU_SUB_K:
452 emit_alu_K(SUB, K);
453 break;
454 case BPF_S_ALU_AND_X:
455 emit_alu_X(AND);
456 break;
457 case BPF_S_ALU_AND_K:
458 emit_alu_K(AND, K);
459 break;
460 case BPF_S_ALU_OR_X:
461 emit_alu_X(OR);
462 break;
463 case BPF_S_ALU_OR_K:
464 emit_alu_K(OR, K);
465 break;
466 case BPF_S_ANC_ALU_XOR_X:
467 case BPF_S_ALU_XOR_X:
468 emit_alu_X(XOR);
469 break;
470 case BPF_S_ALU_XOR_K:
471 emit_alu_K(XOR, K);
472 break;
473 case BPF_S_ALU_LSH_X:
474 emit_alu_X(SLL);
475 break;
476 case BPF_S_ALU_LSH_K:
477 emit_alu_K(SLL, K);
478 break;
479 case BPF_S_ALU_RSH_X:
480 emit_alu_X(SRL);
481 break;
482 case BPF_S_ALU_RSH_K:
483 emit_alu_K(SRL, K);
484 break;
485 case BPF_S_ALU_MUL_X:
486 emit_alu_X(MUL);
487 break;
488 case BPF_S_ALU_MUL_K:
489 emit_alu_K(MUL, K);
490 break;
491 case BPF_S_ALU_DIV_K:
492 emit_alu_K(MUL, K);
493 emit_read_y(r_A);
494 break;
495 case BPF_S_ALU_DIV_X:
496 emit_cmpi(r_X, 0);
497 if (pc_ret0 > 0) {
498 t_offset = addrs[pc_ret0 - 1];
499#ifdef CONFIG_SPARC32
500 emit_branch(BE, t_offset + 20);
501#else
502 emit_branch(BE, t_offset + 8);
503#endif
504 emit_nop();
505 } else {
506 emit_branch_off(BNE, 16);
507 emit_nop();
508#ifdef CONFIG_SPARC32
509 emit_jump(cleanup_addr + 20);
510#else
511 emit_jump(cleanup_addr + 8);
512#endif
513 emit_clear(r_A);
514 }
515 emit_write_y(G0);
516#ifdef CONFIG_SPARC32
517
518
519
520
521 emit_nop();
522 emit_nop();
523 emit_nop();
524#endif
525 emit_alu_X(DIV);
526 break;
527 case BPF_S_ALU_NEG:
528 emit_neg();
529 break;
530 case BPF_S_RET_K:
531 if (!K) {
532 if (pc_ret0 == -1)
533 pc_ret0 = i;
534 emit_clear(r_A);
535 } else {
536 emit_loadimm(K, r_A);
537 }
538
539 case BPF_S_RET_A:
540 if (seen_or_pass0) {
541 if (i != flen - 1) {
542 emit_jump(cleanup_addr);
543 emit_nop();
544 break;
545 }
546 if (seen_or_pass0 & SEEN_MEM) {
547 unsigned int sz = BASE_STACKFRAME;
548 sz += BPF_MEMWORDS * sizeof(u32);
549 emit_release_stack(sz);
550 }
551 }
552
553 emit_jmpl(r_saved_O7, 8, G0);
554 emit_reg_move(r_A, O0);
555 break;
556 case BPF_S_MISC_TAX:
557 seen |= SEEN_XREG;
558 emit_reg_move(r_A, r_X);
559 break;
560 case BPF_S_MISC_TXA:
561 seen |= SEEN_XREG;
562 emit_reg_move(r_X, r_A);
563 break;
564 case BPF_S_ANC_CPU:
565 emit_load_cpu(r_A);
566 break;
567 case BPF_S_ANC_PROTOCOL:
568 emit_skb_load16(protocol, r_A);
569 break;
570#if 0
571
572
573
574
575 case BPF_S_ANC_PKTTYPE:
576 __emit_skb_load8(pkt_type, r_A);
577 emit_alu_K(SRL, 5);
578 break;
579#endif
580 case BPF_S_ANC_IFINDEX:
581 emit_skb_loadptr(dev, r_A);
582 emit_cmpi(r_A, 0);
583 emit_branch(BNE_PTR, cleanup_addr + 4);
584 emit_nop();
585 emit_load32(r_A, struct net_device, ifindex, r_A);
586 break;
587 case BPF_S_ANC_MARK:
588 emit_skb_load32(mark, r_A);
589 break;
590 case BPF_S_ANC_QUEUE:
591 emit_skb_load16(queue_mapping, r_A);
592 break;
593 case BPF_S_ANC_HATYPE:
594 emit_skb_loadptr(dev, r_A);
595 emit_cmpi(r_A, 0);
596 emit_branch(BNE_PTR, cleanup_addr + 4);
597 emit_nop();
598 emit_load16(r_A, struct net_device, type, r_A);
599 break;
600 case BPF_S_ANC_RXHASH:
601 emit_skb_load32(rxhash, r_A);
602 break;
603
604 case BPF_S_LD_IMM:
605 emit_loadimm(K, r_A);
606 break;
607 case BPF_S_LDX_IMM:
608 emit_loadimm(K, r_X);
609 break;
610 case BPF_S_LD_MEM:
611 emit_ldmem(K * 4, r_A);
612 break;
613 case BPF_S_LDX_MEM:
614 emit_ldmem(K * 4, r_X);
615 break;
616 case BPF_S_ST:
617 emit_stmem(K * 4, r_A);
618 break;
619 case BPF_S_STX:
620 emit_stmem(K * 4, r_X);
621 break;
622
623#define CHOOSE_LOAD_FUNC(K, func) \
624 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
625
626 case BPF_S_LD_W_ABS:
627 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_word);
628common_load: seen |= SEEN_DATAREF;
629 emit_loadimm(K, r_OFF);
630 emit_call(func);
631 break;
632 case BPF_S_LD_H_ABS:
633 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_half);
634 goto common_load;
635 case BPF_S_LD_B_ABS:
636 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte);
637 goto common_load;
638 case BPF_S_LDX_B_MSH:
639 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte_msh);
640 goto common_load;
641 case BPF_S_LD_W_IND:
642 func = bpf_jit_load_word;
643common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
644 if (K) {
645 if (is_simm13(K)) {
646 emit_addi(r_X, K, r_OFF);
647 } else {
648 emit_loadimm(K, r_TMP);
649 emit_add(r_X, r_TMP, r_OFF);
650 }
651 } else {
652 emit_reg_move(r_X, r_OFF);
653 }
654 emit_call(func);
655 break;
656 case BPF_S_LD_H_IND:
657 func = bpf_jit_load_half;
658 goto common_load_ind;
659 case BPF_S_LD_B_IND:
660 func = bpf_jit_load_byte;
661 goto common_load_ind;
662 case BPF_S_JMP_JA:
663 emit_jump(addrs[i + K]);
664 emit_nop();
665 break;
666
667#define COND_SEL(CODE, TOP, FOP) \
668 case CODE: \
669 t_op = TOP; \
670 f_op = FOP; \
671 goto cond_branch
672
673 COND_SEL(BPF_S_JMP_JGT_K, BGU, BLEU);
674 COND_SEL(BPF_S_JMP_JGE_K, BGEU, BLU);
675 COND_SEL(BPF_S_JMP_JEQ_K, BE, BNE);
676 COND_SEL(BPF_S_JMP_JSET_K, BNE, BE);
677 COND_SEL(BPF_S_JMP_JGT_X, BGU, BLEU);
678 COND_SEL(BPF_S_JMP_JGE_X, BGEU, BLU);
679 COND_SEL(BPF_S_JMP_JEQ_X, BE, BNE);
680 COND_SEL(BPF_S_JMP_JSET_X, BNE, BE);
681
682cond_branch: f_offset = addrs[i + filter[i].jf];
683 t_offset = addrs[i + filter[i].jt];
684
685
686 if (filter[i].jt == filter[i].jf) {
687 emit_jump(t_offset);
688 emit_nop();
689 break;
690 }
691
692 switch (filter[i].code) {
693 case BPF_S_JMP_JGT_X:
694 case BPF_S_JMP_JGE_X:
695 case BPF_S_JMP_JEQ_X:
696 seen |= SEEN_XREG;
697 emit_cmp(r_A, r_X);
698 break;
699 case BPF_S_JMP_JSET_X:
700 seen |= SEEN_XREG;
701 emit_btst(r_A, r_X);
702 break;
703 case BPF_S_JMP_JEQ_K:
704 case BPF_S_JMP_JGT_K:
705 case BPF_S_JMP_JGE_K:
706 if (is_simm13(K)) {
707 emit_cmpi(r_A, K);
708 } else {
709 emit_loadimm(K, r_TMP);
710 emit_cmp(r_A, r_TMP);
711 }
712 break;
713 case BPF_S_JMP_JSET_K:
714 if (is_simm13(K)) {
715 emit_btsti(r_A, K);
716 } else {
717 emit_loadimm(K, r_TMP);
718 emit_btst(r_A, r_TMP);
719 }
720 break;
721 }
722 if (filter[i].jt != 0) {
723 if (filter[i].jf)
724 t_offset += 8;
725 emit_branch(t_op, t_offset);
726 emit_nop();
727 if (filter[i].jf) {
728 emit_jump(f_offset);
729 emit_nop();
730 }
731 break;
732 }
733 emit_branch(f_op, f_offset);
734 emit_nop();
735 break;
736
737 default:
738
739 goto out;
740 }
741 ilen = (void *) prog - (void *) temp;
742 if (image) {
743 if (unlikely(proglen + ilen > oldproglen)) {
744 pr_err("bpb_jit_compile fatal error\n");
745 kfree(addrs);
746 module_free(NULL, image);
747 return;
748 }
749 memcpy(image + proglen, temp, ilen);
750 }
751 proglen += ilen;
752 addrs[i] = proglen;
753 prog = temp;
754 }
755
756
757
758 cleanup_addr = proglen - 8;
759 if (seen_or_pass0 & SEEN_MEM)
760 cleanup_addr -= 4;
761
762 if (image) {
763 if (proglen != oldproglen)
764 pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n",
765 proglen, oldproglen);
766 break;
767 }
768 if (proglen == oldproglen) {
769 image = module_alloc(max_t(unsigned int,
770 proglen,
771 sizeof(struct work_struct)));
772 if (!image)
773 goto out;
774 }
775 oldproglen = proglen;
776 }
777
778 if (bpf_jit_enable > 1)
779 pr_err("flen=%d proglen=%u pass=%d image=%p\n",
780 flen, proglen, pass, image);
781
782 if (image) {
783 if (bpf_jit_enable > 1)
784 print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_ADDRESS,
785 16, 1, image, proglen, false);
786 bpf_flush_icache(image, image + proglen);
787 fp->bpf_func = (void *)image;
788 }
789out:
790 kfree(addrs);
791 return;
792}
793
794static void jit_free_defer(struct work_struct *arg)
795{
796 module_free(NULL, arg);
797}
798
799
800
801
802void bpf_jit_free(struct sk_filter *fp)
803{
804 if (fp->bpf_func != sk_run_filter) {
805 struct work_struct *work = (struct work_struct *)fp->bpf_func;
806
807 INIT_WORK(work, jit_free_defer);
808 schedule_work(work);
809 }
810}
811