1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/socket.h>
20#include <linux/net.h>
21#include <linux/proc_fs.h>
22#include <linux/seq_file.h>
23#include <linux/string.h>
24#include <linux/vmalloc.h>
25#include <linux/mutex.h>
26#include <linux/mm.h>
27#include <linux/slab.h>
28#include <linux/audit.h>
29#include <linux/user_namespace.h>
30#include <net/net_namespace.h>
31
32#include <linux/netfilter/x_tables.h>
33#include <linux/netfilter_arp.h>
34#include <linux/netfilter_ipv4/ip_tables.h>
35#include <linux/netfilter_ipv6/ip6_tables.h>
36#include <linux/netfilter_arp/arp_tables.h>
37
38MODULE_LICENSE("GPL");
39MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
40MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
41
42#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
43
44struct compat_delta {
45 unsigned int offset;
46 int delta;
47};
48
49struct xt_af {
50 struct mutex mutex;
51 struct list_head match;
52 struct list_head target;
53#ifdef CONFIG_COMPAT
54 struct mutex compat_mutex;
55 struct compat_delta *compat_tab;
56 unsigned int number;
57 unsigned int cur;
58#endif
59};
60
61static struct xt_af *xt;
62
63static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
64 [NFPROTO_UNSPEC] = "x",
65 [NFPROTO_IPV4] = "ip",
66 [NFPROTO_ARP] = "arp",
67 [NFPROTO_BRIDGE] = "eb",
68 [NFPROTO_IPV6] = "ip6",
69};
70
71
72int xt_register_target(struct xt_target *target)
73{
74 u_int8_t af = target->family;
75
76 mutex_lock(&xt[af].mutex);
77 list_add(&target->list, &xt[af].target);
78 mutex_unlock(&xt[af].mutex);
79 return 0;
80}
81EXPORT_SYMBOL(xt_register_target);
82
83void
84xt_unregister_target(struct xt_target *target)
85{
86 u_int8_t af = target->family;
87
88 mutex_lock(&xt[af].mutex);
89 list_del(&target->list);
90 mutex_unlock(&xt[af].mutex);
91}
92EXPORT_SYMBOL(xt_unregister_target);
93
94int
95xt_register_targets(struct xt_target *target, unsigned int n)
96{
97 unsigned int i;
98 int err = 0;
99
100 for (i = 0; i < n; i++) {
101 err = xt_register_target(&target[i]);
102 if (err)
103 goto err;
104 }
105 return err;
106
107err:
108 if (i > 0)
109 xt_unregister_targets(target, i);
110 return err;
111}
112EXPORT_SYMBOL(xt_register_targets);
113
114void
115xt_unregister_targets(struct xt_target *target, unsigned int n)
116{
117 while (n-- > 0)
118 xt_unregister_target(&target[n]);
119}
120EXPORT_SYMBOL(xt_unregister_targets);
121
122int xt_register_match(struct xt_match *match)
123{
124 u_int8_t af = match->family;
125
126 mutex_lock(&xt[af].mutex);
127 list_add(&match->list, &xt[af].match);
128 mutex_unlock(&xt[af].mutex);
129 return 0;
130}
131EXPORT_SYMBOL(xt_register_match);
132
133void
134xt_unregister_match(struct xt_match *match)
135{
136 u_int8_t af = match->family;
137
138 mutex_lock(&xt[af].mutex);
139 list_del(&match->list);
140 mutex_unlock(&xt[af].mutex);
141}
142EXPORT_SYMBOL(xt_unregister_match);
143
144int
145xt_register_matches(struct xt_match *match, unsigned int n)
146{
147 unsigned int i;
148 int err = 0;
149
150 for (i = 0; i < n; i++) {
151 err = xt_register_match(&match[i]);
152 if (err)
153 goto err;
154 }
155 return err;
156
157err:
158 if (i > 0)
159 xt_unregister_matches(match, i);
160 return err;
161}
162EXPORT_SYMBOL(xt_register_matches);
163
164void
165xt_unregister_matches(struct xt_match *match, unsigned int n)
166{
167 while (n-- > 0)
168 xt_unregister_match(&match[n]);
169}
170EXPORT_SYMBOL(xt_unregister_matches);
171
172
173
174
175
176
177
178
179
180struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
181{
182 struct xt_match *m;
183 int err = -ENOENT;
184
185 mutex_lock(&xt[af].mutex);
186 list_for_each_entry(m, &xt[af].match, list) {
187 if (strcmp(m->name, name) == 0) {
188 if (m->revision == revision) {
189 if (try_module_get(m->me)) {
190 mutex_unlock(&xt[af].mutex);
191 return m;
192 }
193 } else
194 err = -EPROTOTYPE;
195 }
196 }
197 mutex_unlock(&xt[af].mutex);
198
199 if (af != NFPROTO_UNSPEC)
200
201 return xt_find_match(NFPROTO_UNSPEC, name, revision);
202
203 return ERR_PTR(err);
204}
205EXPORT_SYMBOL(xt_find_match);
206
207struct xt_match *
208xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
209{
210 struct xt_match *match;
211
212 match = xt_find_match(nfproto, name, revision);
213 if (IS_ERR(match)) {
214 request_module("%st_%s", xt_prefix[nfproto], name);
215 match = xt_find_match(nfproto, name, revision);
216 }
217
218 return match;
219}
220EXPORT_SYMBOL_GPL(xt_request_find_match);
221
222
223struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
224{
225 struct xt_target *t;
226 int err = -ENOENT;
227
228 mutex_lock(&xt[af].mutex);
229 list_for_each_entry(t, &xt[af].target, list) {
230 if (strcmp(t->name, name) == 0) {
231 if (t->revision == revision) {
232 if (try_module_get(t->me)) {
233 mutex_unlock(&xt[af].mutex);
234 return t;
235 }
236 } else
237 err = -EPROTOTYPE;
238 }
239 }
240 mutex_unlock(&xt[af].mutex);
241
242 if (af != NFPROTO_UNSPEC)
243
244 return xt_find_target(NFPROTO_UNSPEC, name, revision);
245
246 return ERR_PTR(err);
247}
248EXPORT_SYMBOL(xt_find_target);
249
250struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
251{
252 struct xt_target *target;
253
254 target = xt_find_target(af, name, revision);
255 if (IS_ERR(target)) {
256 request_module("%st_%s", xt_prefix[af], name);
257 target = xt_find_target(af, name, revision);
258 }
259
260 return target;
261}
262EXPORT_SYMBOL_GPL(xt_request_find_target);
263
264static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
265{
266 const struct xt_match *m;
267 int have_rev = 0;
268
269 list_for_each_entry(m, &xt[af].match, list) {
270 if (strcmp(m->name, name) == 0) {
271 if (m->revision > *bestp)
272 *bestp = m->revision;
273 if (m->revision == revision)
274 have_rev = 1;
275 }
276 }
277
278 if (af != NFPROTO_UNSPEC && !have_rev)
279 return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
280
281 return have_rev;
282}
283
284static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
285{
286 const struct xt_target *t;
287 int have_rev = 0;
288
289 list_for_each_entry(t, &xt[af].target, list) {
290 if (strcmp(t->name, name) == 0) {
291 if (t->revision > *bestp)
292 *bestp = t->revision;
293 if (t->revision == revision)
294 have_rev = 1;
295 }
296 }
297
298 if (af != NFPROTO_UNSPEC && !have_rev)
299 return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
300
301 return have_rev;
302}
303
304
305int xt_find_revision(u8 af, const char *name, u8 revision, int target,
306 int *err)
307{
308 int have_rev, best = -1;
309
310 mutex_lock(&xt[af].mutex);
311 if (target == 1)
312 have_rev = target_revfn(af, name, revision, &best);
313 else
314 have_rev = match_revfn(af, name, revision, &best);
315 mutex_unlock(&xt[af].mutex);
316
317
318 if (best == -1) {
319 *err = -ENOENT;
320 return 0;
321 }
322
323 *err = best;
324 if (!have_rev)
325 *err = -EPROTONOSUPPORT;
326 return 1;
327}
328EXPORT_SYMBOL_GPL(xt_find_revision);
329
330static char *
331textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
332{
333 static const char *const inetbr_names[] = {
334 "PREROUTING", "INPUT", "FORWARD",
335 "OUTPUT", "POSTROUTING", "BROUTING",
336 };
337 static const char *const arp_names[] = {
338 "INPUT", "FORWARD", "OUTPUT",
339 };
340 const char *const *names;
341 unsigned int i, max;
342 char *p = buf;
343 bool np = false;
344 int res;
345
346 names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names;
347 max = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) :
348 ARRAY_SIZE(inetbr_names);
349 *p = '\0';
350 for (i = 0; i < max; ++i) {
351 if (!(mask & (1 << i)))
352 continue;
353 res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
354 if (res > 0) {
355 size -= res;
356 p += res;
357 }
358 np = true;
359 }
360
361 return buf;
362}
363
364int xt_check_match(struct xt_mtchk_param *par,
365 unsigned int size, u_int8_t proto, bool inv_proto)
366{
367 int ret;
368
369 if (XT_ALIGN(par->match->matchsize) != size &&
370 par->match->matchsize != -1) {
371
372
373
374
375 pr_err("%s_tables: %s.%u match: invalid size "
376 "%u (kernel) != (user) %u\n",
377 xt_prefix[par->family], par->match->name,
378 par->match->revision,
379 XT_ALIGN(par->match->matchsize), size);
380 return -EINVAL;
381 }
382 if (par->match->table != NULL &&
383 strcmp(par->match->table, par->table) != 0) {
384 pr_err("%s_tables: %s match: only valid in %s table, not %s\n",
385 xt_prefix[par->family], par->match->name,
386 par->match->table, par->table);
387 return -EINVAL;
388 }
389 if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
390 char used[64], allow[64];
391
392 pr_err("%s_tables: %s match: used from hooks %s, but only "
393 "valid from %s\n",
394 xt_prefix[par->family], par->match->name,
395 textify_hooks(used, sizeof(used), par->hook_mask,
396 par->family),
397 textify_hooks(allow, sizeof(allow), par->match->hooks,
398 par->family));
399 return -EINVAL;
400 }
401 if (par->match->proto && (par->match->proto != proto || inv_proto)) {
402 pr_err("%s_tables: %s match: only valid for protocol %u\n",
403 xt_prefix[par->family], par->match->name,
404 par->match->proto);
405 return -EINVAL;
406 }
407 if (par->match->checkentry != NULL) {
408 ret = par->match->checkentry(par);
409 if (ret < 0)
410 return ret;
411 else if (ret > 0)
412
413 return -EIO;
414 }
415 return 0;
416}
417EXPORT_SYMBOL_GPL(xt_check_match);
418
419
420
421
422
423
424
425
426
427
428
429
430static int xt_check_entry_match(const char *match, const char *target,
431 const size_t alignment)
432{
433 const struct xt_entry_match *pos;
434 int length = target - match;
435
436 if (length == 0)
437 return 0;
438
439 pos = (struct xt_entry_match *)match;
440 do {
441 if ((unsigned long)pos % alignment)
442 return -EINVAL;
443
444 if (length < (int)sizeof(struct xt_entry_match))
445 return -EINVAL;
446
447 if (pos->u.match_size < sizeof(struct xt_entry_match))
448 return -EINVAL;
449
450 if (pos->u.match_size > length)
451 return -EINVAL;
452
453 length -= pos->u.match_size;
454 pos = ((void *)((char *)(pos) + (pos)->u.match_size));
455 } while (length > 0);
456
457 return 0;
458}
459
460#ifdef CONFIG_COMPAT
461int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
462{
463 struct xt_af *xp = &xt[af];
464
465 if (!xp->compat_tab) {
466 if (!xp->number)
467 return -EINVAL;
468 xp->compat_tab = vmalloc(sizeof(struct compat_delta) * xp->number);
469 if (!xp->compat_tab)
470 return -ENOMEM;
471 xp->cur = 0;
472 }
473
474 if (xp->cur >= xp->number)
475 return -EINVAL;
476
477 if (xp->cur)
478 delta += xp->compat_tab[xp->cur - 1].delta;
479 xp->compat_tab[xp->cur].offset = offset;
480 xp->compat_tab[xp->cur].delta = delta;
481 xp->cur++;
482 return 0;
483}
484EXPORT_SYMBOL_GPL(xt_compat_add_offset);
485
486void xt_compat_flush_offsets(u_int8_t af)
487{
488 if (xt[af].compat_tab) {
489 vfree(xt[af].compat_tab);
490 xt[af].compat_tab = NULL;
491 xt[af].number = 0;
492 xt[af].cur = 0;
493 }
494}
495EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
496
497int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
498{
499 struct compat_delta *tmp = xt[af].compat_tab;
500 int mid, left = 0, right = xt[af].cur - 1;
501
502 while (left <= right) {
503 mid = (left + right) >> 1;
504 if (offset > tmp[mid].offset)
505 left = mid + 1;
506 else if (offset < tmp[mid].offset)
507 right = mid - 1;
508 else
509 return mid ? tmp[mid - 1].delta : 0;
510 }
511 return left ? tmp[left - 1].delta : 0;
512}
513EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
514
515void xt_compat_init_offsets(u_int8_t af, unsigned int number)
516{
517 xt[af].number = number;
518 xt[af].cur = 0;
519}
520EXPORT_SYMBOL(xt_compat_init_offsets);
521
522int xt_compat_match_offset(const struct xt_match *match)
523{
524 u_int16_t csize = match->compatsize ? : match->matchsize;
525 return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
526}
527EXPORT_SYMBOL_GPL(xt_compat_match_offset);
528
529void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
530 unsigned int *size)
531{
532 const struct xt_match *match = m->u.kernel.match;
533 struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
534 int pad, off = xt_compat_match_offset(match);
535 u_int16_t msize = cm->u.user.match_size;
536 char name[sizeof(m->u.user.name)];
537
538 m = *dstptr;
539 memcpy(m, cm, sizeof(*cm));
540 if (match->compat_from_user)
541 match->compat_from_user(m->data, cm->data);
542 else
543 memcpy(m->data, cm->data, msize - sizeof(*cm));
544 pad = XT_ALIGN(match->matchsize) - match->matchsize;
545 if (pad > 0)
546 memset(m->data + match->matchsize, 0, pad);
547
548 msize += off;
549 m->u.user.match_size = msize;
550 strlcpy(name, match->name, sizeof(name));
551 module_put(match->me);
552 strncpy(m->u.user.name, name, sizeof(m->u.user.name));
553
554 *size += off;
555 *dstptr += msize;
556}
557EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
558
559int xt_compat_match_to_user(const struct xt_entry_match *m,
560 void __user **dstptr, unsigned int *size)
561{
562 const struct xt_match *match = m->u.kernel.match;
563 struct compat_xt_entry_match __user *cm = *dstptr;
564 int off = xt_compat_match_offset(match);
565 u_int16_t msize = m->u.user.match_size - off;
566
567 if (copy_to_user(cm, m, sizeof(*cm)) ||
568 put_user(msize, &cm->u.user.match_size) ||
569 copy_to_user(cm->u.user.name, m->u.kernel.match->name,
570 strlen(m->u.kernel.match->name) + 1))
571 return -EFAULT;
572
573 if (match->compat_to_user) {
574 if (match->compat_to_user((void __user *)cm->data, m->data))
575 return -EFAULT;
576 } else {
577 if (copy_to_user(cm->data, m->data, msize - sizeof(*cm)))
578 return -EFAULT;
579 }
580
581 *size -= off;
582 *dstptr += msize;
583 return 0;
584}
585EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
586
587
588struct compat_xt_standard_target {
589 struct compat_xt_entry_target t;
590 compat_uint_t verdict;
591};
592
593int xt_compat_check_entry_offsets(const void *base, const char *elems,
594 unsigned int target_offset,
595 unsigned int next_offset)
596{
597 long size_of_base_struct = elems - (const char *)base;
598 const struct compat_xt_entry_target *t;
599 const char *e = base;
600
601 if (target_offset < size_of_base_struct)
602 return -EINVAL;
603
604 if (target_offset + sizeof(*t) > next_offset)
605 return -EINVAL;
606
607 t = (void *)(e + target_offset);
608 if (t->u.target_size < sizeof(*t))
609 return -EINVAL;
610
611 if (target_offset + t->u.target_size > next_offset)
612 return -EINVAL;
613
614 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
615 COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset)
616 return -EINVAL;
617
618
619
620
621
622 BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match));
623
624 return xt_check_entry_match(elems, base + target_offset,
625 __alignof__(struct compat_xt_entry_match));
626}
627EXPORT_SYMBOL(xt_compat_check_entry_offsets);
628#endif
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673int xt_check_entry_offsets(const void *base,
674 const char *elems,
675 unsigned int target_offset,
676 unsigned int next_offset)
677{
678 long size_of_base_struct = elems - (const char *)base;
679 const struct xt_entry_target *t;
680 const char *e = base;
681
682
683 if (target_offset < size_of_base_struct)
684 return -EINVAL;
685
686 if (target_offset + sizeof(*t) > next_offset)
687 return -EINVAL;
688
689 t = (void *)(e + target_offset);
690 if (t->u.target_size < sizeof(*t))
691 return -EINVAL;
692
693 if (target_offset + t->u.target_size > next_offset)
694 return -EINVAL;
695
696 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
697 XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset)
698 return -EINVAL;
699
700 return xt_check_entry_match(elems, base + target_offset,
701 __alignof__(struct xt_entry_match));
702}
703EXPORT_SYMBOL(xt_check_entry_offsets);
704
705
706
707
708
709
710
711
712unsigned int *xt_alloc_entry_offsets(unsigned int size)
713{
714 unsigned int *off;
715
716 off = kcalloc(size, sizeof(unsigned int), GFP_KERNEL | __GFP_NOWARN);
717
718 if (off)
719 return off;
720
721 if (size < (SIZE_MAX / sizeof(unsigned int)))
722 off = vmalloc(size * sizeof(unsigned int));
723
724 return off;
725}
726EXPORT_SYMBOL(xt_alloc_entry_offsets);
727
728
729
730
731
732
733
734
735bool xt_find_jump_offset(const unsigned int *offsets,
736 unsigned int target, unsigned int size)
737{
738 int m, low = 0, hi = size;
739
740 while (hi > low) {
741 m = (low + hi) / 2u;
742
743 if (offsets[m] > target)
744 hi = m;
745 else if (offsets[m] < target)
746 low = m + 1;
747 else
748 return true;
749 }
750
751 return false;
752}
753EXPORT_SYMBOL(xt_find_jump_offset);
754
755int xt_check_target(struct xt_tgchk_param *par,
756 unsigned int size, u_int8_t proto, bool inv_proto)
757{
758 int ret;
759
760 if (XT_ALIGN(par->target->targetsize) != size) {
761 pr_err("%s_tables: %s.%u target: invalid size "
762 "%u (kernel) != (user) %u\n",
763 xt_prefix[par->family], par->target->name,
764 par->target->revision,
765 XT_ALIGN(par->target->targetsize), size);
766 return -EINVAL;
767 }
768 if (par->target->table != NULL &&
769 strcmp(par->target->table, par->table) != 0) {
770 pr_err("%s_tables: %s target: only valid in %s table, not %s\n",
771 xt_prefix[par->family], par->target->name,
772 par->target->table, par->table);
773 return -EINVAL;
774 }
775 if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
776 char used[64], allow[64];
777
778 pr_err("%s_tables: %s target: used from hooks %s, but only "
779 "usable from %s\n",
780 xt_prefix[par->family], par->target->name,
781 textify_hooks(used, sizeof(used), par->hook_mask,
782 par->family),
783 textify_hooks(allow, sizeof(allow), par->target->hooks,
784 par->family));
785 return -EINVAL;
786 }
787 if (par->target->proto && (par->target->proto != proto || inv_proto)) {
788 pr_err("%s_tables: %s target: only valid for protocol %u\n",
789 xt_prefix[par->family], par->target->name,
790 par->target->proto);
791 return -EINVAL;
792 }
793 if (par->target->checkentry != NULL) {
794 ret = par->target->checkentry(par);
795 if (ret < 0)
796 return ret;
797 else if (ret > 0)
798
799 return -EIO;
800 }
801 return 0;
802}
803EXPORT_SYMBOL_GPL(xt_check_target);
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
827 struct xt_counters_info *info, bool compat)
828{
829 void *mem;
830 u64 size;
831
832#ifdef CONFIG_COMPAT
833 if (compat) {
834
835 struct compat_xt_counters_info compat_tmp;
836
837 if (len <= sizeof(compat_tmp))
838 return ERR_PTR(-EINVAL);
839
840 len -= sizeof(compat_tmp);
841 if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0)
842 return ERR_PTR(-EFAULT);
843
844 strlcpy(info->name, compat_tmp.name, sizeof(info->name));
845 info->num_counters = compat_tmp.num_counters;
846 user += sizeof(compat_tmp);
847 } else
848#endif
849 {
850 if (len <= sizeof(*info))
851 return ERR_PTR(-EINVAL);
852
853 len -= sizeof(*info);
854 if (copy_from_user(info, user, sizeof(*info)) != 0)
855 return ERR_PTR(-EFAULT);
856
857 info->name[sizeof(info->name) - 1] = '\0';
858 user += sizeof(*info);
859 }
860
861 size = sizeof(struct xt_counters);
862 size *= info->num_counters;
863
864 if (size != (u64)len)
865 return ERR_PTR(-EINVAL);
866
867 mem = vmalloc(len);
868 if (!mem)
869 return ERR_PTR(-ENOMEM);
870
871 if (copy_from_user(mem, user, len) == 0)
872 return mem;
873
874 vfree(mem);
875 return ERR_PTR(-EFAULT);
876}
877EXPORT_SYMBOL_GPL(xt_copy_counters_from_user);
878
879#ifdef CONFIG_COMPAT
880int xt_compat_target_offset(const struct xt_target *target)
881{
882 u_int16_t csize = target->compatsize ? : target->targetsize;
883 return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
884}
885EXPORT_SYMBOL_GPL(xt_compat_target_offset);
886
887void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
888 unsigned int *size)
889{
890 const struct xt_target *target = t->u.kernel.target;
891 struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
892 int pad, off = xt_compat_target_offset(target);
893 u_int16_t tsize = ct->u.user.target_size;
894 char name[sizeof(t->u.user.name)];
895
896 t = *dstptr;
897 memcpy(t, ct, sizeof(*ct));
898 if (target->compat_from_user)
899 target->compat_from_user(t->data, ct->data);
900 else
901 memcpy(t->data, ct->data, tsize - sizeof(*ct));
902 pad = XT_ALIGN(target->targetsize) - target->targetsize;
903 if (pad > 0)
904 memset(t->data + target->targetsize, 0, pad);
905
906 tsize += off;
907 t->u.user.target_size = tsize;
908 strlcpy(name, target->name, sizeof(name));
909 module_put(target->me);
910 strncpy(t->u.user.name, name, sizeof(t->u.user.name));
911
912 *size += off;
913 *dstptr += tsize;
914}
915EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
916
917int xt_compat_target_to_user(const struct xt_entry_target *t,
918 void __user **dstptr, unsigned int *size)
919{
920 const struct xt_target *target = t->u.kernel.target;
921 struct compat_xt_entry_target __user *ct = *dstptr;
922 int off = xt_compat_target_offset(target);
923 u_int16_t tsize = t->u.user.target_size - off;
924
925 if (copy_to_user(ct, t, sizeof(*ct)) ||
926 put_user(tsize, &ct->u.user.target_size) ||
927 copy_to_user(ct->u.user.name, t->u.kernel.target->name,
928 strlen(t->u.kernel.target->name) + 1))
929 return -EFAULT;
930
931 if (target->compat_to_user) {
932 if (target->compat_to_user((void __user *)ct->data, t->data))
933 return -EFAULT;
934 } else {
935 if (copy_to_user(ct->data, t->data, tsize - sizeof(*ct)))
936 return -EFAULT;
937 }
938
939 *size -= off;
940 *dstptr += tsize;
941 return 0;
942}
943EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
944#endif
945
946struct xt_table_info *xt_alloc_table_info(unsigned int size)
947{
948 struct xt_table_info *info = NULL;
949 size_t sz = sizeof(*info) + size;
950
951 if (sz < sizeof(*info))
952 return NULL;
953
954
955 if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
956 return NULL;
957
958 if (sz <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
959 info = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
960 if (!info) {
961 info = vmalloc(sz);
962 if (!info)
963 return NULL;
964 }
965 memset(info, 0, sizeof(*info));
966 info->size = size;
967 return info;
968}
969EXPORT_SYMBOL(xt_alloc_table_info);
970
971void xt_free_table_info(struct xt_table_info *info)
972{
973 int cpu;
974
975 if (info->jumpstack != NULL) {
976 for_each_possible_cpu(cpu)
977 kvfree(info->jumpstack[cpu]);
978 kvfree(info->jumpstack);
979 }
980
981 kvfree(info);
982}
983EXPORT_SYMBOL(xt_free_table_info);
984
985
986struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
987 const char *name)
988{
989 struct xt_table *t, *found = NULL;
990
991 mutex_lock(&xt[af].mutex);
992 list_for_each_entry(t, &net->xt.tables[af], list)
993 if (strcmp(t->name, name) == 0 && try_module_get(t->me))
994 return t;
995
996 if (net == &init_net)
997 goto out;
998
999
1000 list_for_each_entry(t, &init_net.xt.tables[af], list) {
1001 if (strcmp(t->name, name))
1002 continue;
1003 if (!try_module_get(t->me))
1004 return NULL;
1005
1006 mutex_unlock(&xt[af].mutex);
1007 if (t->table_init(net) != 0) {
1008 module_put(t->me);
1009 return NULL;
1010 }
1011
1012 found = t;
1013
1014 mutex_lock(&xt[af].mutex);
1015 break;
1016 }
1017
1018 if (!found)
1019 goto out;
1020
1021
1022 list_for_each_entry(t, &net->xt.tables[af], list)
1023 if (strcmp(t->name, name) == 0)
1024 return t;
1025
1026 module_put(found->me);
1027 out:
1028 mutex_unlock(&xt[af].mutex);
1029 return NULL;
1030}
1031EXPORT_SYMBOL_GPL(xt_find_table_lock);
1032
1033void xt_table_unlock(struct xt_table *table)
1034{
1035 mutex_unlock(&xt[table->af].mutex);
1036}
1037EXPORT_SYMBOL_GPL(xt_table_unlock);
1038
1039#ifdef CONFIG_COMPAT
1040void xt_compat_lock(u_int8_t af)
1041{
1042 mutex_lock(&xt[af].compat_mutex);
1043}
1044EXPORT_SYMBOL_GPL(xt_compat_lock);
1045
1046void xt_compat_unlock(u_int8_t af)
1047{
1048 mutex_unlock(&xt[af].compat_mutex);
1049}
1050EXPORT_SYMBOL_GPL(xt_compat_unlock);
1051#endif
1052
1053DEFINE_PER_CPU(seqcount_t, xt_recseq);
1054EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
1055
1056struct static_key xt_tee_enabled __read_mostly;
1057EXPORT_SYMBOL_GPL(xt_tee_enabled);
1058
1059static int xt_jumpstack_alloc(struct xt_table_info *i)
1060{
1061 unsigned int size;
1062 int cpu;
1063
1064 size = sizeof(void **) * nr_cpu_ids;
1065 if (size > PAGE_SIZE)
1066 i->jumpstack = vzalloc(size);
1067 else
1068 i->jumpstack = kzalloc(size, GFP_KERNEL);
1069 if (i->jumpstack == NULL)
1070 return -ENOMEM;
1071
1072
1073 if (i->stacksize == 0)
1074 return 0;
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086 size = sizeof(void *) * i->stacksize * 2u;
1087 for_each_possible_cpu(cpu) {
1088 if (size > PAGE_SIZE)
1089 i->jumpstack[cpu] = vmalloc_node(size,
1090 cpu_to_node(cpu));
1091 else
1092 i->jumpstack[cpu] = kmalloc_node(size,
1093 GFP_KERNEL, cpu_to_node(cpu));
1094 if (i->jumpstack[cpu] == NULL)
1095
1096
1097
1098
1099
1100 return -ENOMEM;
1101 }
1102
1103 return 0;
1104}
1105
1106struct xt_table_info *
1107xt_replace_table(struct xt_table *table,
1108 unsigned int num_counters,
1109 struct xt_table_info *newinfo,
1110 int *error)
1111{
1112 struct xt_table_info *private;
1113 int ret;
1114
1115 ret = xt_jumpstack_alloc(newinfo);
1116 if (ret < 0) {
1117 *error = ret;
1118 return NULL;
1119 }
1120
1121
1122 local_bh_disable();
1123 private = table->private;
1124
1125
1126 if (num_counters != private->number) {
1127 pr_debug("num_counters != table->private->number (%u/%u)\n",
1128 num_counters, private->number);
1129 local_bh_enable();
1130 *error = -EAGAIN;
1131 return NULL;
1132 }
1133
1134 newinfo->initial_entries = private->initial_entries;
1135
1136
1137
1138
1139 smp_wmb();
1140 table->private = newinfo;
1141
1142
1143
1144
1145
1146
1147
1148 local_bh_enable();
1149
1150#ifdef CONFIG_AUDIT
1151 if (audit_enabled) {
1152 struct audit_buffer *ab;
1153
1154 ab = audit_log_start(current->audit_context, GFP_KERNEL,
1155 AUDIT_NETFILTER_CFG);
1156 if (ab) {
1157 audit_log_format(ab, "table=%s family=%u entries=%u",
1158 table->name, table->af,
1159 private->number);
1160 audit_log_end(ab);
1161 }
1162 }
1163#endif
1164
1165 return private;
1166}
1167EXPORT_SYMBOL_GPL(xt_replace_table);
1168
1169struct xt_table *xt_register_table(struct net *net,
1170 const struct xt_table *input_table,
1171 struct xt_table_info *bootstrap,
1172 struct xt_table_info *newinfo)
1173{
1174 int ret;
1175 struct xt_table_info *private;
1176 struct xt_table *t, *table;
1177
1178
1179 table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
1180 if (!table) {
1181 ret = -ENOMEM;
1182 goto out;
1183 }
1184
1185 mutex_lock(&xt[table->af].mutex);
1186
1187 list_for_each_entry(t, &net->xt.tables[table->af], list) {
1188 if (strcmp(t->name, table->name) == 0) {
1189 ret = -EEXIST;
1190 goto unlock;
1191 }
1192 }
1193
1194
1195 table->private = bootstrap;
1196
1197 if (!xt_replace_table(table, 0, newinfo, &ret))
1198 goto unlock;
1199
1200 private = table->private;
1201 pr_debug("table->private->number = %u\n", private->number);
1202
1203
1204 private->initial_entries = private->number;
1205
1206 list_add(&table->list, &net->xt.tables[table->af]);
1207 mutex_unlock(&xt[table->af].mutex);
1208 return table;
1209
1210unlock:
1211 mutex_unlock(&xt[table->af].mutex);
1212 kfree(table);
1213out:
1214 return ERR_PTR(ret);
1215}
1216EXPORT_SYMBOL_GPL(xt_register_table);
1217
1218void *xt_unregister_table(struct xt_table *table)
1219{
1220 struct xt_table_info *private;
1221
1222 mutex_lock(&xt[table->af].mutex);
1223 private = table->private;
1224 list_del(&table->list);
1225 mutex_unlock(&xt[table->af].mutex);
1226 kfree(table);
1227
1228 return private;
1229}
1230EXPORT_SYMBOL_GPL(xt_unregister_table);
1231
1232#ifdef CONFIG_PROC_FS
1233struct xt_names_priv {
1234 struct seq_net_private p;
1235 u_int8_t af;
1236};
1237static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
1238{
1239 struct xt_names_priv *priv = seq->private;
1240 struct net *net = seq_file_net(seq);
1241 u_int8_t af = priv->af;
1242
1243 mutex_lock(&xt[af].mutex);
1244 return seq_list_start(&net->xt.tables[af], *pos);
1245}
1246
1247static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1248{
1249 struct xt_names_priv *priv = seq->private;
1250 struct net *net = seq_file_net(seq);
1251 u_int8_t af = priv->af;
1252
1253 return seq_list_next(v, &net->xt.tables[af], pos);
1254}
1255
1256static void xt_table_seq_stop(struct seq_file *seq, void *v)
1257{
1258 struct xt_names_priv *priv = seq->private;
1259 u_int8_t af = priv->af;
1260
1261 mutex_unlock(&xt[af].mutex);
1262}
1263
1264static int xt_table_seq_show(struct seq_file *seq, void *v)
1265{
1266 struct xt_table *table = list_entry(v, struct xt_table, list);
1267
1268 if (*table->name)
1269 seq_printf(seq, "%s\n", table->name);
1270 return 0;
1271}
1272
1273static const struct seq_operations xt_table_seq_ops = {
1274 .start = xt_table_seq_start,
1275 .next = xt_table_seq_next,
1276 .stop = xt_table_seq_stop,
1277 .show = xt_table_seq_show,
1278};
1279
1280static int xt_table_open(struct inode *inode, struct file *file)
1281{
1282 int ret;
1283 struct xt_names_priv *priv;
1284
1285 ret = seq_open_net(inode, file, &xt_table_seq_ops,
1286 sizeof(struct xt_names_priv));
1287 if (!ret) {
1288 priv = ((struct seq_file *)file->private_data)->private;
1289 priv->af = (unsigned long)PDE_DATA(inode);
1290 }
1291 return ret;
1292}
1293
1294static const struct file_operations xt_table_ops = {
1295 .owner = THIS_MODULE,
1296 .open = xt_table_open,
1297 .read = seq_read,
1298 .llseek = seq_lseek,
1299 .release = seq_release_net,
1300};
1301
1302
1303
1304
1305
1306struct nf_mttg_trav {
1307 struct list_head *head, *curr;
1308 uint8_t class, nfproto;
1309};
1310
1311enum {
1312 MTTG_TRAV_INIT,
1313 MTTG_TRAV_NFP_UNSPEC,
1314 MTTG_TRAV_NFP_SPEC,
1315 MTTG_TRAV_DONE,
1316};
1317
1318static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
1319 bool is_target)
1320{
1321 static const uint8_t next_class[] = {
1322 [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
1323 [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE,
1324 };
1325 struct nf_mttg_trav *trav = seq->private;
1326
1327 switch (trav->class) {
1328 case MTTG_TRAV_INIT:
1329 trav->class = MTTG_TRAV_NFP_UNSPEC;
1330 mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
1331 trav->head = trav->curr = is_target ?
1332 &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
1333 break;
1334 case MTTG_TRAV_NFP_UNSPEC:
1335 trav->curr = trav->curr->next;
1336 if (trav->curr != trav->head)
1337 break;
1338 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1339 mutex_lock(&xt[trav->nfproto].mutex);
1340 trav->head = trav->curr = is_target ?
1341 &xt[trav->nfproto].target : &xt[trav->nfproto].match;
1342 trav->class = next_class[trav->class];
1343 break;
1344 case MTTG_TRAV_NFP_SPEC:
1345 trav->curr = trav->curr->next;
1346 if (trav->curr != trav->head)
1347 break;
1348
1349 default:
1350 return NULL;
1351 }
1352
1353 if (ppos != NULL)
1354 ++*ppos;
1355 return trav;
1356}
1357
1358static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
1359 bool is_target)
1360{
1361 struct nf_mttg_trav *trav = seq->private;
1362 unsigned int j;
1363
1364 trav->class = MTTG_TRAV_INIT;
1365 for (j = 0; j < *pos; ++j)
1366 if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
1367 return NULL;
1368 return trav;
1369}
1370
1371static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
1372{
1373 struct nf_mttg_trav *trav = seq->private;
1374
1375 switch (trav->class) {
1376 case MTTG_TRAV_NFP_UNSPEC:
1377 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1378 break;
1379 case MTTG_TRAV_NFP_SPEC:
1380 mutex_unlock(&xt[trav->nfproto].mutex);
1381 break;
1382 }
1383}
1384
1385static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
1386{
1387 return xt_mttg_seq_start(seq, pos, false);
1388}
1389
1390static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1391{
1392 return xt_mttg_seq_next(seq, v, ppos, false);
1393}
1394
1395static int xt_match_seq_show(struct seq_file *seq, void *v)
1396{
1397 const struct nf_mttg_trav *trav = seq->private;
1398 const struct xt_match *match;
1399
1400 switch (trav->class) {
1401 case MTTG_TRAV_NFP_UNSPEC:
1402 case MTTG_TRAV_NFP_SPEC:
1403 if (trav->curr == trav->head)
1404 return 0;
1405 match = list_entry(trav->curr, struct xt_match, list);
1406 if (*match->name)
1407 seq_printf(seq, "%s\n", match->name);
1408 }
1409 return 0;
1410}
1411
1412static const struct seq_operations xt_match_seq_ops = {
1413 .start = xt_match_seq_start,
1414 .next = xt_match_seq_next,
1415 .stop = xt_mttg_seq_stop,
1416 .show = xt_match_seq_show,
1417};
1418
1419static int xt_match_open(struct inode *inode, struct file *file)
1420{
1421 struct nf_mttg_trav *trav;
1422 trav = __seq_open_private(file, &xt_match_seq_ops, sizeof(*trav));
1423 if (!trav)
1424 return -ENOMEM;
1425
1426 trav->nfproto = (unsigned long)PDE_DATA(inode);
1427 return 0;
1428}
1429
1430static const struct file_operations xt_match_ops = {
1431 .owner = THIS_MODULE,
1432 .open = xt_match_open,
1433 .read = seq_read,
1434 .llseek = seq_lseek,
1435 .release = seq_release_private,
1436};
1437
1438static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
1439{
1440 return xt_mttg_seq_start(seq, pos, true);
1441}
1442
1443static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1444{
1445 return xt_mttg_seq_next(seq, v, ppos, true);
1446}
1447
1448static int xt_target_seq_show(struct seq_file *seq, void *v)
1449{
1450 const struct nf_mttg_trav *trav = seq->private;
1451 const struct xt_target *target;
1452
1453 switch (trav->class) {
1454 case MTTG_TRAV_NFP_UNSPEC:
1455 case MTTG_TRAV_NFP_SPEC:
1456 if (trav->curr == trav->head)
1457 return 0;
1458 target = list_entry(trav->curr, struct xt_target, list);
1459 if (*target->name)
1460 seq_printf(seq, "%s\n", target->name);
1461 }
1462 return 0;
1463}
1464
1465static const struct seq_operations xt_target_seq_ops = {
1466 .start = xt_target_seq_start,
1467 .next = xt_target_seq_next,
1468 .stop = xt_mttg_seq_stop,
1469 .show = xt_target_seq_show,
1470};
1471
1472static int xt_target_open(struct inode *inode, struct file *file)
1473{
1474 struct nf_mttg_trav *trav;
1475 trav = __seq_open_private(file, &xt_target_seq_ops, sizeof(*trav));
1476 if (!trav)
1477 return -ENOMEM;
1478
1479 trav->nfproto = (unsigned long)PDE_DATA(inode);
1480 return 0;
1481}
1482
1483static const struct file_operations xt_target_ops = {
1484 .owner = THIS_MODULE,
1485 .open = xt_target_open,
1486 .read = seq_read,
1487 .llseek = seq_lseek,
1488 .release = seq_release_private,
1489};
1490
1491#define FORMAT_TABLES "_tables_names"
1492#define FORMAT_MATCHES "_tables_matches"
1493#define FORMAT_TARGETS "_tables_targets"
1494
1495#endif
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505struct nf_hook_ops *
1506xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn)
1507{
1508 unsigned int hook_mask = table->valid_hooks;
1509 uint8_t i, num_hooks = hweight32(hook_mask);
1510 uint8_t hooknum;
1511 struct nf_hook_ops *ops;
1512
1513 if (!num_hooks)
1514 return ERR_PTR(-EINVAL);
1515
1516 ops = kcalloc(num_hooks, sizeof(*ops), GFP_KERNEL);
1517 if (ops == NULL)
1518 return ERR_PTR(-ENOMEM);
1519
1520 for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
1521 hook_mask >>= 1, ++hooknum) {
1522 if (!(hook_mask & 1))
1523 continue;
1524 ops[i].hook = fn;
1525 ops[i].pf = table->af;
1526 ops[i].hooknum = hooknum;
1527 ops[i].priority = table->priority;
1528 ++i;
1529 }
1530
1531 return ops;
1532}
1533EXPORT_SYMBOL_GPL(xt_hook_ops_alloc);
1534
1535int xt_proto_init(struct net *net, u_int8_t af)
1536{
1537#ifdef CONFIG_PROC_FS
1538 char buf[XT_FUNCTION_MAXNAMELEN];
1539 struct proc_dir_entry *proc;
1540 kuid_t root_uid;
1541 kgid_t root_gid;
1542#endif
1543
1544 if (af >= ARRAY_SIZE(xt_prefix))
1545 return -EINVAL;
1546
1547
1548#ifdef CONFIG_PROC_FS
1549 root_uid = make_kuid(net->user_ns, 0);
1550 root_gid = make_kgid(net->user_ns, 0);
1551
1552 strlcpy(buf, xt_prefix[af], sizeof(buf));
1553 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1554 proc = proc_create_data(buf, 0440, net->proc_net, &xt_table_ops,
1555 (void *)(unsigned long)af);
1556 if (!proc)
1557 goto out;
1558 if (uid_valid(root_uid) && gid_valid(root_gid))
1559 proc_set_user(proc, root_uid, root_gid);
1560
1561 strlcpy(buf, xt_prefix[af], sizeof(buf));
1562 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1563 proc = proc_create_data(buf, 0440, net->proc_net, &xt_match_ops,
1564 (void *)(unsigned long)af);
1565 if (!proc)
1566 goto out_remove_tables;
1567 if (uid_valid(root_uid) && gid_valid(root_gid))
1568 proc_set_user(proc, root_uid, root_gid);
1569
1570 strlcpy(buf, xt_prefix[af], sizeof(buf));
1571 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1572 proc = proc_create_data(buf, 0440, net->proc_net, &xt_target_ops,
1573 (void *)(unsigned long)af);
1574 if (!proc)
1575 goto out_remove_matches;
1576 if (uid_valid(root_uid) && gid_valid(root_gid))
1577 proc_set_user(proc, root_uid, root_gid);
1578#endif
1579
1580 return 0;
1581
1582#ifdef CONFIG_PROC_FS
1583out_remove_matches:
1584 strlcpy(buf, xt_prefix[af], sizeof(buf));
1585 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1586 remove_proc_entry(buf, net->proc_net);
1587
1588out_remove_tables:
1589 strlcpy(buf, xt_prefix[af], sizeof(buf));
1590 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1591 remove_proc_entry(buf, net->proc_net);
1592out:
1593 return -1;
1594#endif
1595}
1596EXPORT_SYMBOL_GPL(xt_proto_init);
1597
1598void xt_proto_fini(struct net *net, u_int8_t af)
1599{
1600#ifdef CONFIG_PROC_FS
1601 char buf[XT_FUNCTION_MAXNAMELEN];
1602
1603 strlcpy(buf, xt_prefix[af], sizeof(buf));
1604 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1605 remove_proc_entry(buf, net->proc_net);
1606
1607 strlcpy(buf, xt_prefix[af], sizeof(buf));
1608 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1609 remove_proc_entry(buf, net->proc_net);
1610
1611 strlcpy(buf, xt_prefix[af], sizeof(buf));
1612 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1613 remove_proc_entry(buf, net->proc_net);
1614#endif
1615}
1616EXPORT_SYMBOL_GPL(xt_proto_fini);
1617
1618static int __net_init xt_net_init(struct net *net)
1619{
1620 int i;
1621
1622 for (i = 0; i < NFPROTO_NUMPROTO; i++)
1623 INIT_LIST_HEAD(&net->xt.tables[i]);
1624 return 0;
1625}
1626
1627static struct pernet_operations xt_net_ops = {
1628 .init = xt_net_init,
1629};
1630
1631static int __init xt_init(void)
1632{
1633 unsigned int i;
1634 int rv;
1635
1636 for_each_possible_cpu(i) {
1637 seqcount_init(&per_cpu(xt_recseq, i));
1638 }
1639
1640 xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL);
1641 if (!xt)
1642 return -ENOMEM;
1643
1644 for (i = 0; i < NFPROTO_NUMPROTO; i++) {
1645 mutex_init(&xt[i].mutex);
1646#ifdef CONFIG_COMPAT
1647 mutex_init(&xt[i].compat_mutex);
1648 xt[i].compat_tab = NULL;
1649#endif
1650 INIT_LIST_HEAD(&xt[i].target);
1651 INIT_LIST_HEAD(&xt[i].match);
1652 }
1653 rv = register_pernet_subsys(&xt_net_ops);
1654 if (rv < 0)
1655 kfree(xt);
1656 return rv;
1657}
1658
1659static void __exit xt_fini(void)
1660{
1661 unregister_pernet_subsys(&xt_net_ops);
1662 kfree(xt);
1663}
1664
1665module_init(xt_init);
1666module_exit(xt_fini);
1667
1668