1
2
3
4
5
6
7
8
9
10#include <linux/moduleloader.h>
11#include <asm/cacheflush.h>
12#include <linux/netdevice.h>
13#include <linux/filter.h>
14#include <linux/if_vlan.h>
15#include <linux/random.h>
16
17
18
19
20
21
22
23
24
25
26
27
28
29int bpf_jit_enable __read_mostly;
30
31
32
33
34extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
35extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[];
36extern u8 sk_load_byte_positive_offset[], sk_load_byte_msh_positive_offset[];
37extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
38extern u8 sk_load_byte_negative_offset[], sk_load_byte_msh_negative_offset[];
39
40static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
41{
42 if (len == 1)
43 *ptr = bytes;
44 else if (len == 2)
45 *(u16 *)ptr = bytes;
46 else {
47 *(u32 *)ptr = bytes;
48 barrier();
49 }
50 return ptr + len;
51}
52
53#define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
54
55#define EMIT1(b1) EMIT(b1, 1)
56#define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
57#define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
58#define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
59#define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
60
61#define CLEAR_A() EMIT2(0x31, 0xc0)
62#define CLEAR_X() EMIT2(0x31, 0xdb)
63
64static inline bool is_imm8(int value)
65{
66 return value <= 127 && value >= -128;
67}
68
69static inline bool is_near(int offset)
70{
71 return offset <= 127 && offset >= -128;
72}
73
74#define EMIT_JMP(offset) \
75do { \
76 if (offset) { \
77 if (is_near(offset)) \
78 EMIT2(0xeb, offset); \
79 else \
80 EMIT1_off32(0xe9, offset); \
81 } \
82} while (0)
83
84
85
86
87#define X86_JB 0x72
88#define X86_JAE 0x73
89#define X86_JE 0x74
90#define X86_JNE 0x75
91#define X86_JBE 0x76
92#define X86_JA 0x77
93
94#define EMIT_COND_JMP(op, offset) \
95do { \
96 if (is_near(offset)) \
97 EMIT2(op, offset); \
98 else { \
99 EMIT2(0x0f, op + 0x10); \
100 EMIT(offset, 4); \
101 } \
102} while (0)
103
104#define COND_SEL(CODE, TOP, FOP) \
105 case CODE: \
106 t_op = TOP; \
107 f_op = FOP; \
108 goto cond_branch
109
110
111#define SEEN_DATAREF 1
112#define SEEN_XREG 2
113#define SEEN_MEM 4
114
115static inline void bpf_flush_icache(void *start, void *end)
116{
117 mm_segment_t old_fs = get_fs();
118
119 set_fs(KERNEL_DS);
120 smp_wmb();
121 flush_icache_range((unsigned long)start, (unsigned long)end);
122 set_fs(old_fs);
123}
124
125#define CHOOSE_LOAD_FUNC(K, func) \
126 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
127
128
129
130
131#define PKT_TYPE_MAX 7
132static int pkt_type_offset(void)
133{
134 struct sk_buff skb_probe = {
135 .pkt_type = ~0,
136 };
137 char *ct = (char *)&skb_probe;
138 unsigned int off;
139
140 for (off = 0; off < sizeof(struct sk_buff); off++) {
141 if (ct[off] == PKT_TYPE_MAX)
142 return off;
143 }
144 pr_err_once("Please fix pkt_type_offset(), as pkt_type couldn't be found\n");
145 return -1;
146}
147
148struct bpf_binary_header {
149 unsigned int pages;
150
151
152
153 u8 image[];
154};
155
156static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
157 u8 **image_ptr)
158{
159 unsigned int sz, hole;
160 struct bpf_binary_header *header;
161
162
163
164
165
166 sz = round_up(proglen + sizeof(*header) + 128, PAGE_SIZE);
167 header = module_alloc(sz);
168 if (!header)
169 return NULL;
170
171 memset(header, 0xcc, sz);
172
173 header->pages = sz / PAGE_SIZE;
174 hole = sz - (proglen + sizeof(*header));
175
176
177 *image_ptr = &header->image[prandom_u32() % hole];
178 return header;
179}
180
181void bpf_jit_compile(struct sk_filter *fp)
182{
183 u8 temp[64];
184 u8 *prog;
185 unsigned int proglen, oldproglen = 0;
186 int ilen, i;
187 int t_offset, f_offset;
188 u8 t_op, f_op, seen = 0, pass;
189 u8 *image = NULL;
190 struct bpf_binary_header *header = NULL;
191 u8 *func;
192 int pc_ret0 = -1;
193 unsigned int cleanup_addr;
194 unsigned int *addrs;
195 const struct sock_filter *filter = fp->insns;
196 int flen = fp->len;
197
198 if (!bpf_jit_enable)
199 return;
200
201 addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL);
202 if (addrs == NULL)
203 return;
204
205
206
207
208 for (proglen = 0, i = 0; i < flen; i++) {
209 proglen += 64;
210 addrs[i] = proglen;
211 }
212 cleanup_addr = proglen;
213
214 for (pass = 0; pass < 10; pass++) {
215 u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
216
217 proglen = 0;
218 prog = temp;
219
220 if (seen_or_pass0) {
221 EMIT4(0x55, 0x48, 0x89, 0xe5);
222 EMIT4(0x48, 0x83, 0xec, 96);
223
224 if (seen_or_pass0 & (SEEN_XREG | SEEN_DATAREF))
225 EMIT4(0x48, 0x89, 0x5d, 0xf8);
226 if (seen_or_pass0 & SEEN_XREG)
227 CLEAR_X();
228
229
230
231
232
233
234
235 if (seen_or_pass0 & SEEN_DATAREF) {
236 if (offsetof(struct sk_buff, len) <= 127)
237
238 EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len));
239 else {
240
241 EMIT3(0x44, 0x8b, 0x8f);
242 EMIT(offsetof(struct sk_buff, len), 4);
243 }
244 if (is_imm8(offsetof(struct sk_buff, data_len)))
245
246 EMIT4(0x44, 0x2b, 0x4f, offsetof(struct sk_buff, data_len));
247 else {
248 EMIT3(0x44, 0x2b, 0x8f);
249 EMIT(offsetof(struct sk_buff, data_len), 4);
250 }
251
252 if (is_imm8(offsetof(struct sk_buff, data)))
253
254 EMIT4(0x4c, 0x8b, 0x47, offsetof(struct sk_buff, data));
255 else {
256
257 EMIT3(0x4c, 0x8b, 0x87);
258 EMIT(offsetof(struct sk_buff, data), 4);
259 }
260 }
261 }
262
263 switch (filter[0].code) {
264 case BPF_S_RET_K:
265 case BPF_S_LD_W_LEN:
266 case BPF_S_ANC_PROTOCOL:
267 case BPF_S_ANC_IFINDEX:
268 case BPF_S_ANC_MARK:
269 case BPF_S_ANC_RXHASH:
270 case BPF_S_ANC_CPU:
271 case BPF_S_ANC_VLAN_TAG:
272 case BPF_S_ANC_VLAN_TAG_PRESENT:
273 case BPF_S_ANC_QUEUE:
274 case BPF_S_ANC_PKTTYPE:
275 case BPF_S_LD_W_ABS:
276 case BPF_S_LD_H_ABS:
277 case BPF_S_LD_B_ABS:
278
279 break;
280 default:
281
282 CLEAR_A();
283 }
284
285 for (i = 0; i < flen; i++) {
286 unsigned int K = filter[i].k;
287
288 switch (filter[i].code) {
289 case BPF_S_ALU_ADD_X:
290 seen |= SEEN_XREG;
291 EMIT2(0x01, 0xd8);
292 break;
293 case BPF_S_ALU_ADD_K:
294 if (!K)
295 break;
296 if (is_imm8(K))
297 EMIT3(0x83, 0xc0, K);
298 else
299 EMIT1_off32(0x05, K);
300 break;
301 case BPF_S_ALU_SUB_X:
302 seen |= SEEN_XREG;
303 EMIT2(0x29, 0xd8);
304 break;
305 case BPF_S_ALU_SUB_K:
306 if (!K)
307 break;
308 if (is_imm8(K))
309 EMIT3(0x83, 0xe8, K);
310 else
311 EMIT1_off32(0x2d, K);
312 break;
313 case BPF_S_ALU_MUL_X:
314 seen |= SEEN_XREG;
315 EMIT3(0x0f, 0xaf, 0xc3);
316 break;
317 case BPF_S_ALU_MUL_K:
318 if (is_imm8(K))
319 EMIT3(0x6b, 0xc0, K);
320 else {
321 EMIT2(0x69, 0xc0);
322 EMIT(K, 4);
323 }
324 break;
325 case BPF_S_ALU_DIV_X:
326 seen |= SEEN_XREG;
327 EMIT2(0x85, 0xdb);
328 if (pc_ret0 > 0) {
329
330
331
332
333 EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
334 (addrs[i] - 4));
335 } else {
336 EMIT_COND_JMP(X86_JNE, 2 + 5);
337 CLEAR_A();
338 EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4));
339 }
340 EMIT4(0x31, 0xd2, 0xf7, 0xf3);
341 break;
342 case BPF_S_ALU_MOD_X:
343 seen |= SEEN_XREG;
344 EMIT2(0x85, 0xdb);
345 if (pc_ret0 > 0) {
346
347
348
349
350 EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
351 (addrs[i] - 6));
352 } else {
353 EMIT_COND_JMP(X86_JNE, 2 + 5);
354 CLEAR_A();
355 EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 6));
356 }
357 EMIT2(0x31, 0xd2);
358 EMIT2(0xf7, 0xf3);
359 EMIT2(0x89, 0xd0);
360 break;
361 case BPF_S_ALU_MOD_K:
362 EMIT2(0x31, 0xd2);
363 EMIT1(0xb9);EMIT(K, 4);
364 EMIT2(0xf7, 0xf1);
365 EMIT2(0x89, 0xd0);
366 break;
367 case BPF_S_ALU_DIV_K:
368 EMIT3(0x48, 0x69, 0xc0);
369 EMIT(K, 4);
370 EMIT4(0x48, 0xc1, 0xe8, 0x20);
371 break;
372 case BPF_S_ALU_AND_X:
373 seen |= SEEN_XREG;
374 EMIT2(0x21, 0xd8);
375 break;
376 case BPF_S_ALU_AND_K:
377 if (K >= 0xFFFFFF00) {
378 EMIT2(0x24, K & 0xFF);
379 } else if (K >= 0xFFFF0000) {
380 EMIT2(0x66, 0x25);
381 EMIT(K, 2);
382 } else {
383 EMIT1_off32(0x25, K);
384 }
385 break;
386 case BPF_S_ALU_OR_X:
387 seen |= SEEN_XREG;
388 EMIT2(0x09, 0xd8);
389 break;
390 case BPF_S_ALU_OR_K:
391 if (is_imm8(K))
392 EMIT3(0x83, 0xc8, K);
393 else
394 EMIT1_off32(0x0d, K);
395 break;
396 case BPF_S_ANC_ALU_XOR_X:
397 case BPF_S_ALU_XOR_X:
398 seen |= SEEN_XREG;
399 EMIT2(0x31, 0xd8);
400 break;
401 case BPF_S_ALU_XOR_K:
402 if (K == 0)
403 break;
404 if (is_imm8(K))
405 EMIT3(0x83, 0xf0, K);
406 else
407 EMIT1_off32(0x35, K);
408 break;
409 case BPF_S_ALU_LSH_X:
410 seen |= SEEN_XREG;
411 EMIT4(0x89, 0xd9, 0xd3, 0xe0);
412 break;
413 case BPF_S_ALU_LSH_K:
414 if (K == 0)
415 break;
416 else if (K == 1)
417 EMIT2(0xd1, 0xe0);
418 else
419 EMIT3(0xc1, 0xe0, K);
420 break;
421 case BPF_S_ALU_RSH_X:
422 seen |= SEEN_XREG;
423 EMIT4(0x89, 0xd9, 0xd3, 0xe8);
424 break;
425 case BPF_S_ALU_RSH_K:
426 if (K == 0)
427 break;
428 else if (K == 1)
429 EMIT2(0xd1, 0xe8);
430 else
431 EMIT3(0xc1, 0xe8, K);
432 break;
433 case BPF_S_ALU_NEG:
434 EMIT2(0xf7, 0xd8);
435 break;
436 case BPF_S_RET_K:
437 if (!K) {
438 if (pc_ret0 == -1)
439 pc_ret0 = i;
440 CLEAR_A();
441 } else {
442 EMIT1_off32(0xb8, K);
443 }
444
445 case BPF_S_RET_A:
446 if (seen_or_pass0) {
447 if (i != flen - 1) {
448 EMIT_JMP(cleanup_addr - addrs[i]);
449 break;
450 }
451 if (seen_or_pass0 & SEEN_XREG)
452 EMIT4(0x48, 0x8b, 0x5d, 0xf8);
453 EMIT1(0xc9);
454 }
455 EMIT1(0xc3);
456 break;
457 case BPF_S_MISC_TAX:
458 seen |= SEEN_XREG;
459 EMIT2(0x89, 0xc3);
460 break;
461 case BPF_S_MISC_TXA:
462 seen |= SEEN_XREG;
463 EMIT2(0x89, 0xd8);
464 break;
465 case BPF_S_LD_IMM:
466 if (!K)
467 CLEAR_A();
468 else
469 EMIT1_off32(0xb8, K);
470 break;
471 case BPF_S_LDX_IMM:
472 seen |= SEEN_XREG;
473 if (!K)
474 CLEAR_X();
475 else
476 EMIT1_off32(0xbb, K);
477 break;
478 case BPF_S_LD_MEM:
479 seen |= SEEN_MEM;
480 EMIT3(0x8b, 0x45, 0xf0 - K*4);
481 break;
482 case BPF_S_LDX_MEM:
483 seen |= SEEN_XREG | SEEN_MEM;
484 EMIT3(0x8b, 0x5d, 0xf0 - K*4);
485 break;
486 case BPF_S_ST:
487 seen |= SEEN_MEM;
488 EMIT3(0x89, 0x45, 0xf0 - K*4);
489 break;
490 case BPF_S_STX:
491 seen |= SEEN_XREG | SEEN_MEM;
492 EMIT3(0x89, 0x5d, 0xf0 - K*4);
493 break;
494 case BPF_S_LD_W_LEN:
495 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
496 if (is_imm8(offsetof(struct sk_buff, len)))
497
498 EMIT3(0x8b, 0x47, offsetof(struct sk_buff, len));
499 else {
500 EMIT2(0x8b, 0x87);
501 EMIT(offsetof(struct sk_buff, len), 4);
502 }
503 break;
504 case BPF_S_LDX_W_LEN:
505 seen |= SEEN_XREG;
506 if (is_imm8(offsetof(struct sk_buff, len)))
507
508 EMIT3(0x8b, 0x5f, offsetof(struct sk_buff, len));
509 else {
510 EMIT2(0x8b, 0x9f);
511 EMIT(offsetof(struct sk_buff, len), 4);
512 }
513 break;
514 case BPF_S_ANC_PROTOCOL:
515 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
516 if (is_imm8(offsetof(struct sk_buff, protocol))) {
517
518 EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, protocol));
519 } else {
520 EMIT3(0x0f, 0xb7, 0x87);
521 EMIT(offsetof(struct sk_buff, protocol), 4);
522 }
523 EMIT2(0x86, 0xc4);
524 break;
525 case BPF_S_ANC_IFINDEX:
526 if (is_imm8(offsetof(struct sk_buff, dev))) {
527
528 EMIT4(0x48, 0x8b, 0x47, offsetof(struct sk_buff, dev));
529 } else {
530 EMIT3(0x48, 0x8b, 0x87);
531 EMIT(offsetof(struct sk_buff, dev), 4);
532 }
533 EMIT3(0x48, 0x85, 0xc0);
534 EMIT_COND_JMP(X86_JE, cleanup_addr - (addrs[i] - 6));
535 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
536 EMIT2(0x8b, 0x80);
537 EMIT(offsetof(struct net_device, ifindex), 4);
538 break;
539 case BPF_S_ANC_MARK:
540 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
541 if (is_imm8(offsetof(struct sk_buff, mark))) {
542
543 EMIT3(0x8b, 0x47, offsetof(struct sk_buff, mark));
544 } else {
545 EMIT2(0x8b, 0x87);
546 EMIT(offsetof(struct sk_buff, mark), 4);
547 }
548 break;
549 case BPF_S_ANC_RXHASH:
550 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
551 if (is_imm8(offsetof(struct sk_buff, rxhash))) {
552
553 EMIT3(0x8b, 0x47, offsetof(struct sk_buff, rxhash));
554 } else {
555 EMIT2(0x8b, 0x87);
556 EMIT(offsetof(struct sk_buff, rxhash), 4);
557 }
558 break;
559 case BPF_S_ANC_QUEUE:
560 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
561 if (is_imm8(offsetof(struct sk_buff, queue_mapping))) {
562
563 EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, queue_mapping));
564 } else {
565 EMIT3(0x0f, 0xb7, 0x87);
566 EMIT(offsetof(struct sk_buff, queue_mapping), 4);
567 }
568 break;
569 case BPF_S_ANC_CPU:
570#ifdef CONFIG_SMP
571 EMIT4(0x65, 0x8b, 0x04, 0x25);
572 EMIT((u32)(unsigned long)&cpu_number, 4);
573#else
574 CLEAR_A();
575#endif
576 break;
577 case BPF_S_ANC_VLAN_TAG:
578 case BPF_S_ANC_VLAN_TAG_PRESENT:
579 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
580 if (is_imm8(offsetof(struct sk_buff, vlan_tci))) {
581
582 EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, vlan_tci));
583 } else {
584 EMIT3(0x0f, 0xb7, 0x87);
585 EMIT(offsetof(struct sk_buff, vlan_tci), 4);
586 }
587 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
588 if (filter[i].code == BPF_S_ANC_VLAN_TAG) {
589 EMIT3(0x80, 0xe4, 0xef);
590 } else {
591 EMIT3(0xc1, 0xe8, 0x0c);
592 EMIT3(0x83, 0xe0, 0x01);
593 }
594 break;
595 case BPF_S_ANC_PKTTYPE:
596 {
597 int off = pkt_type_offset();
598
599 if (off < 0)
600 goto out;
601 if (is_imm8(off)) {
602
603 EMIT4(0x0f, 0xb6, 0x47, off);
604 } else {
605
606 EMIT3(0x0f, 0xb6, 0x87);
607 EMIT(off, 4);
608 }
609 EMIT3(0x83, 0xe0, PKT_TYPE_MAX);
610 break;
611 }
612 case BPF_S_LD_W_ABS:
613 func = CHOOSE_LOAD_FUNC(K, sk_load_word);
614common_load: seen |= SEEN_DATAREF;
615 t_offset = func - (image + addrs[i]);
616 EMIT1_off32(0xbe, K);
617 EMIT1_off32(0xe8, t_offset);
618 break;
619 case BPF_S_LD_H_ABS:
620 func = CHOOSE_LOAD_FUNC(K, sk_load_half);
621 goto common_load;
622 case BPF_S_LD_B_ABS:
623 func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
624 goto common_load;
625 case BPF_S_LDX_B_MSH:
626 func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
627 seen |= SEEN_DATAREF | SEEN_XREG;
628 t_offset = func - (image + addrs[i]);
629 EMIT1_off32(0xbe, K);
630 EMIT1_off32(0xe8, t_offset);
631 break;
632 case BPF_S_LD_W_IND:
633 func = sk_load_word;
634common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
635 t_offset = func - (image + addrs[i]);
636 if (K) {
637 if (is_imm8(K)) {
638 EMIT3(0x8d, 0x73, K);
639 } else {
640 EMIT2(0x8d, 0xb3);
641 EMIT(K, 4);
642 }
643 } else {
644 EMIT2(0x89,0xde);
645 }
646 EMIT1_off32(0xe8, t_offset);
647 break;
648 case BPF_S_LD_H_IND:
649 func = sk_load_half;
650 goto common_load_ind;
651 case BPF_S_LD_B_IND:
652 func = sk_load_byte;
653 goto common_load_ind;
654 case BPF_S_JMP_JA:
655 t_offset = addrs[i + K] - addrs[i];
656 EMIT_JMP(t_offset);
657 break;
658 COND_SEL(BPF_S_JMP_JGT_K, X86_JA, X86_JBE);
659 COND_SEL(BPF_S_JMP_JGE_K, X86_JAE, X86_JB);
660 COND_SEL(BPF_S_JMP_JEQ_K, X86_JE, X86_JNE);
661 COND_SEL(BPF_S_JMP_JSET_K,X86_JNE, X86_JE);
662 COND_SEL(BPF_S_JMP_JGT_X, X86_JA, X86_JBE);
663 COND_SEL(BPF_S_JMP_JGE_X, X86_JAE, X86_JB);
664 COND_SEL(BPF_S_JMP_JEQ_X, X86_JE, X86_JNE);
665 COND_SEL(BPF_S_JMP_JSET_X,X86_JNE, X86_JE);
666
667cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
668 t_offset = addrs[i + filter[i].jt] - addrs[i];
669
670
671 if (filter[i].jt == filter[i].jf) {
672 EMIT_JMP(t_offset);
673 break;
674 }
675
676 switch (filter[i].code) {
677 case BPF_S_JMP_JGT_X:
678 case BPF_S_JMP_JGE_X:
679 case BPF_S_JMP_JEQ_X:
680 seen |= SEEN_XREG;
681 EMIT2(0x39, 0xd8);
682 break;
683 case BPF_S_JMP_JSET_X:
684 seen |= SEEN_XREG;
685 EMIT2(0x85, 0xd8);
686 break;
687 case BPF_S_JMP_JEQ_K:
688 if (K == 0) {
689 EMIT2(0x85, 0xc0);
690 break;
691 }
692 case BPF_S_JMP_JGT_K:
693 case BPF_S_JMP_JGE_K:
694 if (K <= 127)
695 EMIT3(0x83, 0xf8, K);
696 else
697 EMIT1_off32(0x3d, K);
698 break;
699 case BPF_S_JMP_JSET_K:
700 if (K <= 0xFF)
701 EMIT2(0xa8, K);
702 else if (!(K & 0xFFFF00FF))
703 EMIT3(0xf6, 0xc4, K >> 8);
704 else if (K <= 0xFFFF) {
705 EMIT2(0x66, 0xa9);
706 EMIT(K, 2);
707 } else {
708 EMIT1_off32(0xa9, K);
709 }
710 break;
711 }
712 if (filter[i].jt != 0) {
713 if (filter[i].jf && f_offset)
714 t_offset += is_near(f_offset) ? 2 : 5;
715 EMIT_COND_JMP(t_op, t_offset);
716 if (filter[i].jf)
717 EMIT_JMP(f_offset);
718 break;
719 }
720 EMIT_COND_JMP(f_op, f_offset);
721 break;
722 default:
723
724 goto out;
725 }
726 ilen = prog - temp;
727 if (image) {
728 if (unlikely(proglen + ilen > oldproglen)) {
729 pr_err("bpb_jit_compile fatal error\n");
730 kfree(addrs);
731 module_free(NULL, header);
732 return;
733 }
734 memcpy(image + proglen, temp, ilen);
735 }
736 proglen += ilen;
737 addrs[i] = proglen;
738 prog = temp;
739 }
740
741
742
743 cleanup_addr = proglen - 1;
744 if (seen_or_pass0)
745 cleanup_addr -= 1;
746 if (seen_or_pass0 & SEEN_XREG)
747 cleanup_addr -= 4;
748
749 if (image) {
750 if (proglen != oldproglen)
751 pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n", proglen, oldproglen);
752 break;
753 }
754 if (proglen == oldproglen) {
755 header = bpf_alloc_binary(proglen, &image);
756 if (!header)
757 goto out;
758 }
759 oldproglen = proglen;
760 }
761
762 if (bpf_jit_enable > 1)
763 bpf_jit_dump(flen, proglen, pass, image);
764
765 if (image) {
766 bpf_flush_icache(header, image + proglen);
767 set_memory_ro((unsigned long)header, header->pages);
768 fp->bpf_func = (void *)image;
769 }
770out:
771 kfree(addrs);
772 return;
773}
774
775static void bpf_jit_free_deferred(struct work_struct *work)
776{
777 struct sk_filter *fp = container_of(work, struct sk_filter, work);
778 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
779 struct bpf_binary_header *header = (void *)addr;
780
781 set_memory_rw(addr, header->pages);
782 module_free(NULL, header);
783 kfree(fp);
784}
785
786void bpf_jit_free(struct sk_filter *fp)
787{
788 if (fp->bpf_func != sk_run_filter) {
789 INIT_WORK(&fp->work, bpf_jit_free_deferred);
790 schedule_work(&fp->work);
791 }
792}
793