1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/socket.h>
20#include <linux/net.h>
21#include <linux/proc_fs.h>
22#include <linux/seq_file.h>
23#include <linux/string.h>
24#include <linux/vmalloc.h>
25#include <linux/mutex.h>
26#include <linux/mm.h>
27#include <linux/slab.h>
28#include <linux/audit.h>
29#include <net/net_namespace.h>
30
31#include <linux/netfilter/x_tables.h>
32#include <linux/netfilter_arp.h>
33#include <linux/netfilter_ipv4/ip_tables.h>
34#include <linux/netfilter_ipv6/ip6_tables.h>
35#include <linux/netfilter_arp/arp_tables.h>
36
37MODULE_LICENSE("GPL");
38MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
39MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
40
41#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
42#define XT_PCPU_BLOCK_SIZE 4096
43
44struct compat_delta {
45 unsigned int offset;
46 int delta;
47};
48
49struct xt_af {
50 struct mutex mutex;
51 struct list_head match;
52 struct list_head target;
53#ifdef CONFIG_COMPAT
54 struct mutex compat_mutex;
55 struct compat_delta *compat_tab;
56 unsigned int number;
57 unsigned int cur;
58#endif
59};
60
61static struct xt_af *xt;
62
63static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
64 [NFPROTO_UNSPEC] = "x",
65 [NFPROTO_IPV4] = "ip",
66 [NFPROTO_ARP] = "arp",
67 [NFPROTO_BRIDGE] = "eb",
68 [NFPROTO_IPV6] = "ip6",
69};
70
71
72static const unsigned int xt_jumpstack_multiplier = 2;
73
74
75int
76xt_register_target(struct xt_target *target)
77{
78 u_int8_t af = target->family;
79 int ret;
80
81 ret = mutex_lock_interruptible(&xt[af].mutex);
82 if (ret != 0)
83 return ret;
84 list_add(&target->list, &xt[af].target);
85 mutex_unlock(&xt[af].mutex);
86 return ret;
87}
88EXPORT_SYMBOL(xt_register_target);
89
90void
91xt_unregister_target(struct xt_target *target)
92{
93 u_int8_t af = target->family;
94
95 mutex_lock(&xt[af].mutex);
96 list_del(&target->list);
97 mutex_unlock(&xt[af].mutex);
98}
99EXPORT_SYMBOL(xt_unregister_target);
100
101int
102xt_register_targets(struct xt_target *target, unsigned int n)
103{
104 unsigned int i;
105 int err = 0;
106
107 for (i = 0; i < n; i++) {
108 err = xt_register_target(&target[i]);
109 if (err)
110 goto err;
111 }
112 return err;
113
114err:
115 if (i > 0)
116 xt_unregister_targets(target, i);
117 return err;
118}
119EXPORT_SYMBOL(xt_register_targets);
120
121void
122xt_unregister_targets(struct xt_target *target, unsigned int n)
123{
124 while (n-- > 0)
125 xt_unregister_target(&target[n]);
126}
127EXPORT_SYMBOL(xt_unregister_targets);
128
129int
130xt_register_match(struct xt_match *match)
131{
132 u_int8_t af = match->family;
133 int ret;
134
135 ret = mutex_lock_interruptible(&xt[af].mutex);
136 if (ret != 0)
137 return ret;
138
139 list_add(&match->list, &xt[af].match);
140 mutex_unlock(&xt[af].mutex);
141
142 return ret;
143}
144EXPORT_SYMBOL(xt_register_match);
145
146void
147xt_unregister_match(struct xt_match *match)
148{
149 u_int8_t af = match->family;
150
151 mutex_lock(&xt[af].mutex);
152 list_del(&match->list);
153 mutex_unlock(&xt[af].mutex);
154}
155EXPORT_SYMBOL(xt_unregister_match);
156
157int
158xt_register_matches(struct xt_match *match, unsigned int n)
159{
160 unsigned int i;
161 int err = 0;
162
163 for (i = 0; i < n; i++) {
164 err = xt_register_match(&match[i]);
165 if (err)
166 goto err;
167 }
168 return err;
169
170err:
171 if (i > 0)
172 xt_unregister_matches(match, i);
173 return err;
174}
175EXPORT_SYMBOL(xt_register_matches);
176
177void
178xt_unregister_matches(struct xt_match *match, unsigned int n)
179{
180 while (n-- > 0)
181 xt_unregister_match(&match[n]);
182}
183EXPORT_SYMBOL(xt_unregister_matches);
184
185
186
187
188
189
190
191
192
193struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
194{
195 struct xt_match *m;
196 int err = -ENOENT;
197
198 if (mutex_lock_interruptible(&xt[af].mutex) != 0)
199 return ERR_PTR(-EINTR);
200
201 list_for_each_entry(m, &xt[af].match, list) {
202 if (strcmp(m->name, name) == 0) {
203 if (m->revision == revision) {
204 if (try_module_get(m->me)) {
205 mutex_unlock(&xt[af].mutex);
206 return m;
207 }
208 } else
209 err = -EPROTOTYPE;
210 }
211 }
212 mutex_unlock(&xt[af].mutex);
213
214 if (af != NFPROTO_UNSPEC)
215
216 return xt_find_match(NFPROTO_UNSPEC, name, revision);
217
218 return ERR_PTR(err);
219}
220EXPORT_SYMBOL(xt_find_match);
221
222struct xt_match *
223xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
224{
225 struct xt_match *match;
226
227 match = xt_find_match(nfproto, name, revision);
228 if (IS_ERR(match)) {
229 request_module("%st_%s", xt_prefix[nfproto], name);
230 match = xt_find_match(nfproto, name, revision);
231 }
232
233 return match;
234}
235EXPORT_SYMBOL_GPL(xt_request_find_match);
236
237
238struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
239{
240 struct xt_target *t;
241 int err = -ENOENT;
242
243 if (mutex_lock_interruptible(&xt[af].mutex) != 0)
244 return ERR_PTR(-EINTR);
245
246 list_for_each_entry(t, &xt[af].target, list) {
247 if (strcmp(t->name, name) == 0) {
248 if (t->revision == revision) {
249 if (try_module_get(t->me)) {
250 mutex_unlock(&xt[af].mutex);
251 return t;
252 }
253 } else
254 err = -EPROTOTYPE;
255 }
256 }
257 mutex_unlock(&xt[af].mutex);
258
259 if (af != NFPROTO_UNSPEC)
260
261 return xt_find_target(NFPROTO_UNSPEC, name, revision);
262
263 return ERR_PTR(err);
264}
265EXPORT_SYMBOL(xt_find_target);
266
267struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
268{
269 struct xt_target *target;
270
271 target = xt_find_target(af, name, revision);
272 if (IS_ERR(target)) {
273 request_module("%st_%s", xt_prefix[af], name);
274 target = xt_find_target(af, name, revision);
275 }
276
277 return target;
278}
279EXPORT_SYMBOL_GPL(xt_request_find_target);
280
281static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
282{
283 const struct xt_match *m;
284 int have_rev = 0;
285
286 list_for_each_entry(m, &xt[af].match, list) {
287 if (strcmp(m->name, name) == 0) {
288 if (m->revision > *bestp)
289 *bestp = m->revision;
290 if (m->revision == revision)
291 have_rev = 1;
292 }
293 }
294
295 if (af != NFPROTO_UNSPEC && !have_rev)
296 return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
297
298 return have_rev;
299}
300
301static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
302{
303 const struct xt_target *t;
304 int have_rev = 0;
305
306 list_for_each_entry(t, &xt[af].target, list) {
307 if (strcmp(t->name, name) == 0) {
308 if (t->revision > *bestp)
309 *bestp = t->revision;
310 if (t->revision == revision)
311 have_rev = 1;
312 }
313 }
314
315 if (af != NFPROTO_UNSPEC && !have_rev)
316 return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
317
318 return have_rev;
319}
320
321
322int xt_find_revision(u8 af, const char *name, u8 revision, int target,
323 int *err)
324{
325 int have_rev, best = -1;
326
327 if (mutex_lock_interruptible(&xt[af].mutex) != 0) {
328 *err = -EINTR;
329 return 1;
330 }
331 if (target == 1)
332 have_rev = target_revfn(af, name, revision, &best);
333 else
334 have_rev = match_revfn(af, name, revision, &best);
335 mutex_unlock(&xt[af].mutex);
336
337
338 if (best == -1) {
339 *err = -ENOENT;
340 return 0;
341 }
342
343 *err = best;
344 if (!have_rev)
345 *err = -EPROTONOSUPPORT;
346 return 1;
347}
348EXPORT_SYMBOL_GPL(xt_find_revision);
349
350static char *
351textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
352{
353 static const char *const inetbr_names[] = {
354 "PREROUTING", "INPUT", "FORWARD",
355 "OUTPUT", "POSTROUTING", "BROUTING",
356 };
357 static const char *const arp_names[] = {
358 "INPUT", "FORWARD", "OUTPUT",
359 };
360 const char *const *names;
361 unsigned int i, max;
362 char *p = buf;
363 bool np = false;
364 int res;
365
366 names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names;
367 max = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) :
368 ARRAY_SIZE(inetbr_names);
369 *p = '\0';
370 for (i = 0; i < max; ++i) {
371 if (!(mask & (1 << i)))
372 continue;
373 res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
374 if (res > 0) {
375 size -= res;
376 p += res;
377 }
378 np = true;
379 }
380
381 return buf;
382}
383
384int xt_check_match(struct xt_mtchk_param *par,
385 unsigned int size, u_int8_t proto, bool inv_proto)
386{
387 int ret;
388
389 if (XT_ALIGN(par->match->matchsize) != size &&
390 par->match->matchsize != -1) {
391
392
393
394
395 pr_err("%s_tables: %s.%u match: invalid size "
396 "%u (kernel) != (user) %u\n",
397 xt_prefix[par->family], par->match->name,
398 par->match->revision,
399 XT_ALIGN(par->match->matchsize), size);
400 return -EINVAL;
401 }
402 if (par->match->table != NULL &&
403 strcmp(par->match->table, par->table) != 0) {
404 pr_err("%s_tables: %s match: only valid in %s table, not %s\n",
405 xt_prefix[par->family], par->match->name,
406 par->match->table, par->table);
407 return -EINVAL;
408 }
409 if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
410 char used[64], allow[64];
411
412 pr_err("%s_tables: %s match: used from hooks %s, but only "
413 "valid from %s\n",
414 xt_prefix[par->family], par->match->name,
415 textify_hooks(used, sizeof(used), par->hook_mask,
416 par->family),
417 textify_hooks(allow, sizeof(allow), par->match->hooks,
418 par->family));
419 return -EINVAL;
420 }
421 if (par->match->proto && (par->match->proto != proto || inv_proto)) {
422 pr_err("%s_tables: %s match: only valid for protocol %u\n",
423 xt_prefix[par->family], par->match->name,
424 par->match->proto);
425 return -EINVAL;
426 }
427 if (par->match->checkentry != NULL) {
428 ret = par->match->checkentry(par);
429 if (ret < 0)
430 return ret;
431 else if (ret > 0)
432
433 return -EIO;
434 }
435 return 0;
436}
437EXPORT_SYMBOL_GPL(xt_check_match);
438
439
440
441
442
443
444
445
446
447
448
449
450static int xt_check_entry_match(const char *match, const char *target,
451 const size_t alignment)
452{
453 const struct xt_entry_match *pos;
454 int length = target - match;
455
456 if (length == 0)
457 return 0;
458
459 pos = (struct xt_entry_match *)match;
460 do {
461 if ((unsigned long)pos % alignment)
462 return -EINVAL;
463
464 if (length < (int)sizeof(struct xt_entry_match))
465 return -EINVAL;
466
467 if (pos->u.match_size < sizeof(struct xt_entry_match))
468 return -EINVAL;
469
470 if (pos->u.match_size > length)
471 return -EINVAL;
472
473 length -= pos->u.match_size;
474 pos = ((void *)((char *)(pos) + (pos)->u.match_size));
475 } while (length > 0);
476
477 return 0;
478}
479
480#ifdef CONFIG_COMPAT
481int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
482{
483 struct xt_af *xp = &xt[af];
484
485 if (!xp->compat_tab) {
486 if (!xp->number)
487 return -EINVAL;
488 xp->compat_tab = vmalloc(sizeof(struct compat_delta) * xp->number);
489 if (!xp->compat_tab)
490 return -ENOMEM;
491 xp->cur = 0;
492 }
493
494 if (xp->cur >= xp->number)
495 return -EINVAL;
496
497 if (xp->cur)
498 delta += xp->compat_tab[xp->cur - 1].delta;
499 xp->compat_tab[xp->cur].offset = offset;
500 xp->compat_tab[xp->cur].delta = delta;
501 xp->cur++;
502 return 0;
503}
504EXPORT_SYMBOL_GPL(xt_compat_add_offset);
505
506void xt_compat_flush_offsets(u_int8_t af)
507{
508 if (xt[af].compat_tab) {
509 vfree(xt[af].compat_tab);
510 xt[af].compat_tab = NULL;
511 xt[af].number = 0;
512 xt[af].cur = 0;
513 }
514}
515EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
516
517int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
518{
519 struct compat_delta *tmp = xt[af].compat_tab;
520 int mid, left = 0, right = xt[af].cur - 1;
521
522 while (left <= right) {
523 mid = (left + right) >> 1;
524 if (offset > tmp[mid].offset)
525 left = mid + 1;
526 else if (offset < tmp[mid].offset)
527 right = mid - 1;
528 else
529 return mid ? tmp[mid - 1].delta : 0;
530 }
531 return left ? tmp[left - 1].delta : 0;
532}
533EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
534
535void xt_compat_init_offsets(u_int8_t af, unsigned int number)
536{
537 xt[af].number = number;
538 xt[af].cur = 0;
539}
540EXPORT_SYMBOL(xt_compat_init_offsets);
541
542int xt_compat_match_offset(const struct xt_match *match)
543{
544 u_int16_t csize = match->compatsize ? : match->matchsize;
545 return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
546}
547EXPORT_SYMBOL_GPL(xt_compat_match_offset);
548
549void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
550 unsigned int *size)
551{
552 const struct xt_match *match = m->u.kernel.match;
553 struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
554 int pad, off = xt_compat_match_offset(match);
555 u_int16_t msize = cm->u.user.match_size;
556 char name[sizeof(m->u.user.name)];
557
558 m = *dstptr;
559 memcpy(m, cm, sizeof(*cm));
560 if (match->compat_from_user)
561 match->compat_from_user(m->data, cm->data);
562 else
563 memcpy(m->data, cm->data, msize - sizeof(*cm));
564 pad = XT_ALIGN(match->matchsize) - match->matchsize;
565 if (pad > 0)
566 memset(m->data + match->matchsize, 0, pad);
567
568 msize += off;
569 m->u.user.match_size = msize;
570 strlcpy(name, match->name, sizeof(name));
571 module_put(match->me);
572 strncpy(m->u.user.name, name, sizeof(m->u.user.name));
573
574 *size += off;
575 *dstptr += msize;
576}
577EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
578
579int xt_compat_match_to_user(const struct xt_entry_match *m,
580 void __user **dstptr, unsigned int *size)
581{
582 const struct xt_match *match = m->u.kernel.match;
583 struct compat_xt_entry_match __user *cm = *dstptr;
584 int off = xt_compat_match_offset(match);
585 u_int16_t msize = m->u.user.match_size - off;
586
587 if (copy_to_user(cm, m, sizeof(*cm)) ||
588 put_user(msize, &cm->u.user.match_size) ||
589 copy_to_user(cm->u.user.name, m->u.kernel.match->name,
590 strlen(m->u.kernel.match->name) + 1))
591 return -EFAULT;
592
593 if (match->compat_to_user) {
594 if (match->compat_to_user((void __user *)cm->data, m->data))
595 return -EFAULT;
596 } else {
597 if (copy_to_user(cm->data, m->data, msize - sizeof(*cm)))
598 return -EFAULT;
599 }
600
601 *size -= off;
602 *dstptr += msize;
603 return 0;
604}
605EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
606
607
608struct compat_xt_standard_target {
609 struct compat_xt_entry_target t;
610 compat_uint_t verdict;
611};
612
613int xt_compat_check_entry_offsets(const void *base, const char *elems,
614 unsigned int target_offset,
615 unsigned int next_offset)
616{
617 long size_of_base_struct = elems - (const char *)base;
618 const struct compat_xt_entry_target *t;
619 const char *e = base;
620
621 if (target_offset < size_of_base_struct)
622 return -EINVAL;
623
624 if (target_offset + sizeof(*t) > next_offset)
625 return -EINVAL;
626
627 t = (void *)(e + target_offset);
628 if (t->u.target_size < sizeof(*t))
629 return -EINVAL;
630
631 if (target_offset + t->u.target_size > next_offset)
632 return -EINVAL;
633
634 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
635 COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset)
636 return -EINVAL;
637
638
639
640
641
642 BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match));
643
644 return xt_check_entry_match(elems, base + target_offset,
645 __alignof__(struct compat_xt_entry_match));
646}
647EXPORT_SYMBOL(xt_compat_check_entry_offsets);
648#endif
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693int xt_check_entry_offsets(const void *base,
694 const char *elems,
695 unsigned int target_offset,
696 unsigned int next_offset)
697{
698 long size_of_base_struct = elems - (const char *)base;
699 const struct xt_entry_target *t;
700 const char *e = base;
701
702
703 if (target_offset < size_of_base_struct)
704 return -EINVAL;
705
706 if (target_offset + sizeof(*t) > next_offset)
707 return -EINVAL;
708
709 t = (void *)(e + target_offset);
710 if (t->u.target_size < sizeof(*t))
711 return -EINVAL;
712
713 if (target_offset + t->u.target_size > next_offset)
714 return -EINVAL;
715
716 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
717 XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset)
718 return -EINVAL;
719
720 return xt_check_entry_match(elems, base + target_offset,
721 __alignof__(struct xt_entry_match));
722}
723EXPORT_SYMBOL(xt_check_entry_offsets);
724
725
726
727
728
729
730
731
732unsigned int *xt_alloc_entry_offsets(unsigned int size)
733{
734 return kvmalloc_array(size, sizeof(unsigned int), GFP_KERNEL | __GFP_ZERO);
735
736}
737EXPORT_SYMBOL(xt_alloc_entry_offsets);
738
739
740
741
742
743
744
745
746bool xt_find_jump_offset(const unsigned int *offsets,
747 unsigned int target, unsigned int size)
748{
749 int m, low = 0, hi = size;
750
751 while (hi > low) {
752 m = (low + hi) / 2u;
753
754 if (offsets[m] > target)
755 hi = m;
756 else if (offsets[m] < target)
757 low = m + 1;
758 else
759 return true;
760 }
761
762 return false;
763}
764EXPORT_SYMBOL(xt_find_jump_offset);
765
766int xt_check_target(struct xt_tgchk_param *par,
767 unsigned int size, u_int8_t proto, bool inv_proto)
768{
769 int ret;
770
771 if (XT_ALIGN(par->target->targetsize) != size) {
772 pr_err("%s_tables: %s.%u target: invalid size "
773 "%u (kernel) != (user) %u\n",
774 xt_prefix[par->family], par->target->name,
775 par->target->revision,
776 XT_ALIGN(par->target->targetsize), size);
777 return -EINVAL;
778 }
779 if (par->target->table != NULL &&
780 strcmp(par->target->table, par->table) != 0) {
781 pr_err("%s_tables: %s target: only valid in %s table, not %s\n",
782 xt_prefix[par->family], par->target->name,
783 par->target->table, par->table);
784 return -EINVAL;
785 }
786 if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
787 char used[64], allow[64];
788
789 pr_err("%s_tables: %s target: used from hooks %s, but only "
790 "usable from %s\n",
791 xt_prefix[par->family], par->target->name,
792 textify_hooks(used, sizeof(used), par->hook_mask,
793 par->family),
794 textify_hooks(allow, sizeof(allow), par->target->hooks,
795 par->family));
796 return -EINVAL;
797 }
798 if (par->target->proto && (par->target->proto != proto || inv_proto)) {
799 pr_err("%s_tables: %s target: only valid for protocol %u\n",
800 xt_prefix[par->family], par->target->name,
801 par->target->proto);
802 return -EINVAL;
803 }
804 if (par->target->checkentry != NULL) {
805 ret = par->target->checkentry(par);
806 if (ret < 0)
807 return ret;
808 else if (ret > 0)
809
810 return -EIO;
811 }
812 return 0;
813}
814EXPORT_SYMBOL_GPL(xt_check_target);
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
838 struct xt_counters_info *info, bool compat)
839{
840 void *mem;
841 u64 size;
842
843#ifdef CONFIG_COMPAT
844 if (compat) {
845
846 struct compat_xt_counters_info compat_tmp;
847
848 if (len <= sizeof(compat_tmp))
849 return ERR_PTR(-EINVAL);
850
851 len -= sizeof(compat_tmp);
852 if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0)
853 return ERR_PTR(-EFAULT);
854
855 strlcpy(info->name, compat_tmp.name, sizeof(info->name));
856 info->num_counters = compat_tmp.num_counters;
857 user += sizeof(compat_tmp);
858 } else
859#endif
860 {
861 if (len <= sizeof(*info))
862 return ERR_PTR(-EINVAL);
863
864 len -= sizeof(*info);
865 if (copy_from_user(info, user, sizeof(*info)) != 0)
866 return ERR_PTR(-EFAULT);
867
868 info->name[sizeof(info->name) - 1] = '\0';
869 user += sizeof(*info);
870 }
871
872 size = sizeof(struct xt_counters);
873 size *= info->num_counters;
874
875 if (size != (u64)len)
876 return ERR_PTR(-EINVAL);
877
878 mem = vmalloc(len);
879 if (!mem)
880 return ERR_PTR(-ENOMEM);
881
882 if (copy_from_user(mem, user, len) == 0)
883 return mem;
884
885 vfree(mem);
886 return ERR_PTR(-EFAULT);
887}
888EXPORT_SYMBOL_GPL(xt_copy_counters_from_user);
889
890#ifdef CONFIG_COMPAT
891int xt_compat_target_offset(const struct xt_target *target)
892{
893 u_int16_t csize = target->compatsize ? : target->targetsize;
894 return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
895}
896EXPORT_SYMBOL_GPL(xt_compat_target_offset);
897
898void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
899 unsigned int *size)
900{
901 const struct xt_target *target = t->u.kernel.target;
902 struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
903 int pad, off = xt_compat_target_offset(target);
904 u_int16_t tsize = ct->u.user.target_size;
905 char name[sizeof(t->u.user.name)];
906
907 t = *dstptr;
908 memcpy(t, ct, sizeof(*ct));
909 if (target->compat_from_user)
910 target->compat_from_user(t->data, ct->data);
911 else
912 memcpy(t->data, ct->data, tsize - sizeof(*ct));
913 pad = XT_ALIGN(target->targetsize) - target->targetsize;
914 if (pad > 0)
915 memset(t->data + target->targetsize, 0, pad);
916
917 tsize += off;
918 t->u.user.target_size = tsize;
919 strlcpy(name, target->name, sizeof(name));
920 module_put(target->me);
921 strncpy(t->u.user.name, name, sizeof(t->u.user.name));
922
923 *size += off;
924 *dstptr += tsize;
925}
926EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
927
928int xt_compat_target_to_user(const struct xt_entry_target *t,
929 void __user **dstptr, unsigned int *size)
930{
931 const struct xt_target *target = t->u.kernel.target;
932 struct compat_xt_entry_target __user *ct = *dstptr;
933 int off = xt_compat_target_offset(target);
934 u_int16_t tsize = t->u.user.target_size - off;
935
936 if (copy_to_user(ct, t, sizeof(*ct)) ||
937 put_user(tsize, &ct->u.user.target_size) ||
938 copy_to_user(ct->u.user.name, t->u.kernel.target->name,
939 strlen(t->u.kernel.target->name) + 1))
940 return -EFAULT;
941
942 if (target->compat_to_user) {
943 if (target->compat_to_user((void __user *)ct->data, t->data))
944 return -EFAULT;
945 } else {
946 if (copy_to_user(ct->data, t->data, tsize - sizeof(*ct)))
947 return -EFAULT;
948 }
949
950 *size -= off;
951 *dstptr += tsize;
952 return 0;
953}
954EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
955#endif
956
957struct xt_table_info *xt_alloc_table_info(unsigned int size)
958{
959 struct xt_table_info *info = NULL;
960 size_t sz = sizeof(*info) + size;
961
962 if (sz < sizeof(*info))
963 return NULL;
964
965
966 if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
967 return NULL;
968
969 if (sz <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
970 info = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
971 if (!info) {
972 info = vmalloc(sz);
973 if (!info)
974 return NULL;
975 }
976 memset(info, 0, sizeof(*info));
977 info->size = size;
978 return info;
979}
980EXPORT_SYMBOL(xt_alloc_table_info);
981
982void xt_free_table_info(struct xt_table_info *info)
983{
984 int cpu;
985
986 if (info->jumpstack != NULL) {
987 for_each_possible_cpu(cpu)
988 kvfree(info->jumpstack[cpu]);
989 kvfree(info->jumpstack);
990 }
991
992 free_percpu(info->stackptr);
993
994 kvfree(info);
995}
996EXPORT_SYMBOL(xt_free_table_info);
997
998
999struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
1000 const char *name)
1001{
1002 struct xt_table *t;
1003
1004 if (mutex_lock_interruptible(&xt[af].mutex) != 0)
1005 return ERR_PTR(-EINTR);
1006
1007 list_for_each_entry(t, &net->xt.tables[af], list)
1008 if (strcmp(t->name, name) == 0 && try_module_get(t->me))
1009 return t;
1010 mutex_unlock(&xt[af].mutex);
1011 return NULL;
1012}
1013EXPORT_SYMBOL_GPL(xt_find_table_lock);
1014
1015void xt_table_unlock(struct xt_table *table)
1016{
1017 mutex_unlock(&xt[table->af].mutex);
1018}
1019EXPORT_SYMBOL_GPL(xt_table_unlock);
1020
1021#ifdef CONFIG_COMPAT
1022void xt_compat_lock(u_int8_t af)
1023{
1024 mutex_lock(&xt[af].compat_mutex);
1025}
1026EXPORT_SYMBOL_GPL(xt_compat_lock);
1027
1028void xt_compat_unlock(u_int8_t af)
1029{
1030 mutex_unlock(&xt[af].compat_mutex);
1031}
1032EXPORT_SYMBOL_GPL(xt_compat_unlock);
1033#endif
1034
1035DEFINE_PER_CPU(seqcount_t, xt_recseq);
1036EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
1037
1038static int xt_jumpstack_alloc(struct xt_table_info *i)
1039{
1040 unsigned int size;
1041 int cpu;
1042
1043 i->stackptr = alloc_percpu(unsigned int);
1044 if (i->stackptr == NULL)
1045 return -ENOMEM;
1046
1047 size = sizeof(void **) * nr_cpu_ids;
1048 if (size > PAGE_SIZE)
1049 i->jumpstack = kvzalloc(size, GFP_KERNEL);
1050 else
1051 i->jumpstack = kzalloc(size, GFP_KERNEL);
1052 if (i->jumpstack == NULL)
1053 return -ENOMEM;
1054
1055 i->stacksize *= xt_jumpstack_multiplier;
1056 size = sizeof(void *) * i->stacksize;
1057 for_each_possible_cpu(cpu) {
1058 i->jumpstack[cpu] = kvmalloc_node(size, GFP_KERNEL,
1059 cpu_to_node(cpu));
1060 if (i->jumpstack[cpu] == NULL)
1061
1062
1063
1064
1065
1066 return -ENOMEM;
1067 }
1068
1069 return 0;
1070}
1071
1072struct xt_table_info *
1073xt_replace_table(struct xt_table *table,
1074 unsigned int num_counters,
1075 struct xt_table_info *newinfo,
1076 int *error)
1077{
1078 struct xt_table_info *private;
1079 unsigned int cpu;
1080 int ret;
1081
1082 ret = xt_jumpstack_alloc(newinfo);
1083 if (ret < 0) {
1084 *error = ret;
1085 return NULL;
1086 }
1087
1088
1089 local_bh_disable();
1090 private = table->private;
1091
1092
1093 if (num_counters != private->number) {
1094 pr_debug("num_counters != table->private->number (%u/%u)\n",
1095 num_counters, private->number);
1096 local_bh_enable();
1097 *error = -EAGAIN;
1098 return NULL;
1099 }
1100
1101 newinfo->initial_entries = private->initial_entries;
1102
1103
1104
1105
1106 smp_wmb();
1107 table->private = newinfo;
1108
1109
1110 smp_wmb();
1111
1112
1113
1114
1115
1116 local_bh_enable();
1117
1118
1119 for_each_possible_cpu(cpu) {
1120 seqcount_t *s = &per_cpu(xt_recseq, cpu);
1121 u32 seq = raw_read_seqcount(s);
1122
1123 if (seq & 1) {
1124 do {
1125 cond_resched();
1126 cpu_relax();
1127 } while (seq == raw_read_seqcount(s));
1128 }
1129 }
1130
1131#ifdef CONFIG_AUDIT
1132 if (audit_enabled) {
1133 struct audit_buffer *ab;
1134
1135 ab = audit_log_start(current->audit_context, GFP_KERNEL,
1136 AUDIT_NETFILTER_CFG);
1137 if (ab) {
1138 audit_log_format(ab, "table=%s family=%u entries=%u",
1139 table->name, table->af,
1140 private->number);
1141 audit_log_end(ab);
1142 }
1143 }
1144#endif
1145
1146 return private;
1147}
1148EXPORT_SYMBOL_GPL(xt_replace_table);
1149
1150struct xt_table *xt_register_table(struct net *net,
1151 const struct xt_table *input_table,
1152 struct xt_table_info *bootstrap,
1153 struct xt_table_info *newinfo)
1154{
1155 int ret;
1156 struct xt_table_info *private;
1157 struct xt_table *t, *table;
1158
1159
1160 table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
1161 if (!table) {
1162 ret = -ENOMEM;
1163 goto out;
1164 }
1165
1166 ret = mutex_lock_interruptible(&xt[table->af].mutex);
1167 if (ret != 0)
1168 goto out_free;
1169
1170
1171 list_for_each_entry(t, &net->xt.tables[table->af], list) {
1172 if (strcmp(t->name, table->name) == 0) {
1173 ret = -EEXIST;
1174 goto unlock;
1175 }
1176 }
1177
1178
1179 table->private = bootstrap;
1180
1181 if (!xt_replace_table(table, 0, newinfo, &ret))
1182 goto unlock;
1183
1184 private = table->private;
1185 pr_debug("table->private->number = %u\n", private->number);
1186
1187
1188 private->initial_entries = private->number;
1189
1190 list_add(&table->list, &net->xt.tables[table->af]);
1191 mutex_unlock(&xt[table->af].mutex);
1192 return table;
1193
1194 unlock:
1195 mutex_unlock(&xt[table->af].mutex);
1196out_free:
1197 kfree(table);
1198out:
1199 return ERR_PTR(ret);
1200}
1201EXPORT_SYMBOL_GPL(xt_register_table);
1202
1203void *xt_unregister_table(struct xt_table *table)
1204{
1205 struct xt_table_info *private;
1206
1207 mutex_lock(&xt[table->af].mutex);
1208 private = table->private;
1209 list_del(&table->list);
1210 mutex_unlock(&xt[table->af].mutex);
1211 kfree(table);
1212
1213 return private;
1214}
1215EXPORT_SYMBOL_GPL(xt_unregister_table);
1216
1217#ifdef CONFIG_PROC_FS
1218struct xt_names_priv {
1219 struct seq_net_private p;
1220 u_int8_t af;
1221};
1222static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
1223{
1224 struct xt_names_priv *priv = seq->private;
1225 struct net *net = seq_file_net(seq);
1226 u_int8_t af = priv->af;
1227
1228 mutex_lock(&xt[af].mutex);
1229 return seq_list_start(&net->xt.tables[af], *pos);
1230}
1231
1232static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1233{
1234 struct xt_names_priv *priv = seq->private;
1235 struct net *net = seq_file_net(seq);
1236 u_int8_t af = priv->af;
1237
1238 return seq_list_next(v, &net->xt.tables[af], pos);
1239}
1240
1241static void xt_table_seq_stop(struct seq_file *seq, void *v)
1242{
1243 struct xt_names_priv *priv = seq->private;
1244 u_int8_t af = priv->af;
1245
1246 mutex_unlock(&xt[af].mutex);
1247}
1248
1249static int xt_table_seq_show(struct seq_file *seq, void *v)
1250{
1251 struct xt_table *table = list_entry(v, struct xt_table, list);
1252
1253 if (strlen(table->name))
1254 return seq_printf(seq, "%s\n", table->name);
1255 else
1256 return 0;
1257}
1258
1259static const struct seq_operations xt_table_seq_ops = {
1260 .start = xt_table_seq_start,
1261 .next = xt_table_seq_next,
1262 .stop = xt_table_seq_stop,
1263 .show = xt_table_seq_show,
1264};
1265
1266static int xt_table_open(struct inode *inode, struct file *file)
1267{
1268 int ret;
1269 struct xt_names_priv *priv;
1270
1271 ret = seq_open_net(inode, file, &xt_table_seq_ops,
1272 sizeof(struct xt_names_priv));
1273 if (!ret) {
1274 priv = ((struct seq_file *)file->private_data)->private;
1275 priv->af = (unsigned long)PDE_DATA(inode);
1276 }
1277 return ret;
1278}
1279
1280static const struct file_operations xt_table_ops = {
1281 .owner = THIS_MODULE,
1282 .open = xt_table_open,
1283 .read = seq_read,
1284 .llseek = seq_lseek,
1285 .release = seq_release_net,
1286};
1287
1288
1289
1290
1291
1292struct nf_mttg_trav {
1293 struct list_head *head, *curr;
1294 uint8_t class, nfproto;
1295};
1296
1297enum {
1298 MTTG_TRAV_INIT,
1299 MTTG_TRAV_NFP_UNSPEC,
1300 MTTG_TRAV_NFP_SPEC,
1301 MTTG_TRAV_DONE,
1302};
1303
1304static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
1305 bool is_target)
1306{
1307 static const uint8_t next_class[] = {
1308 [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
1309 [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE,
1310 };
1311 struct nf_mttg_trav *trav = seq->private;
1312
1313 switch (trav->class) {
1314 case MTTG_TRAV_INIT:
1315 trav->class = MTTG_TRAV_NFP_UNSPEC;
1316 mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
1317 trav->head = trav->curr = is_target ?
1318 &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
1319 break;
1320 case MTTG_TRAV_NFP_UNSPEC:
1321 trav->curr = trav->curr->next;
1322 if (trav->curr != trav->head)
1323 break;
1324 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1325 mutex_lock(&xt[trav->nfproto].mutex);
1326 trav->head = trav->curr = is_target ?
1327 &xt[trav->nfproto].target : &xt[trav->nfproto].match;
1328 trav->class = next_class[trav->class];
1329 break;
1330 case MTTG_TRAV_NFP_SPEC:
1331 trav->curr = trav->curr->next;
1332 if (trav->curr != trav->head)
1333 break;
1334
1335 default:
1336 return NULL;
1337 }
1338
1339 if (ppos != NULL)
1340 ++*ppos;
1341 return trav;
1342}
1343
1344static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
1345 bool is_target)
1346{
1347 struct nf_mttg_trav *trav = seq->private;
1348 unsigned int j;
1349
1350 trav->class = MTTG_TRAV_INIT;
1351 for (j = 0; j < *pos; ++j)
1352 if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
1353 return NULL;
1354 return trav;
1355}
1356
1357static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
1358{
1359 struct nf_mttg_trav *trav = seq->private;
1360
1361 switch (trav->class) {
1362 case MTTG_TRAV_NFP_UNSPEC:
1363 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1364 break;
1365 case MTTG_TRAV_NFP_SPEC:
1366 mutex_unlock(&xt[trav->nfproto].mutex);
1367 break;
1368 }
1369}
1370
1371static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
1372{
1373 return xt_mttg_seq_start(seq, pos, false);
1374}
1375
1376static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1377{
1378 return xt_mttg_seq_next(seq, v, ppos, false);
1379}
1380
1381static int xt_match_seq_show(struct seq_file *seq, void *v)
1382{
1383 const struct nf_mttg_trav *trav = seq->private;
1384 const struct xt_match *match;
1385
1386 switch (trav->class) {
1387 case MTTG_TRAV_NFP_UNSPEC:
1388 case MTTG_TRAV_NFP_SPEC:
1389 if (trav->curr == trav->head)
1390 return 0;
1391 match = list_entry(trav->curr, struct xt_match, list);
1392 return (*match->name == '\0') ? 0 :
1393 seq_printf(seq, "%s\n", match->name);
1394 }
1395 return 0;
1396}
1397
1398static const struct seq_operations xt_match_seq_ops = {
1399 .start = xt_match_seq_start,
1400 .next = xt_match_seq_next,
1401 .stop = xt_mttg_seq_stop,
1402 .show = xt_match_seq_show,
1403};
1404
1405static int xt_match_open(struct inode *inode, struct file *file)
1406{
1407 struct seq_file *seq;
1408 struct nf_mttg_trav *trav;
1409 int ret;
1410
1411 trav = kmalloc(sizeof(*trav), GFP_KERNEL);
1412 if (trav == NULL)
1413 return -ENOMEM;
1414
1415 ret = seq_open(file, &xt_match_seq_ops);
1416 if (ret < 0) {
1417 kfree(trav);
1418 return ret;
1419 }
1420
1421 seq = file->private_data;
1422 seq->private = trav;
1423 trav->nfproto = (unsigned long)PDE_DATA(inode);
1424 return 0;
1425}
1426
1427static const struct file_operations xt_match_ops = {
1428 .owner = THIS_MODULE,
1429 .open = xt_match_open,
1430 .read = seq_read,
1431 .llseek = seq_lseek,
1432 .release = seq_release_private,
1433};
1434
1435static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
1436{
1437 return xt_mttg_seq_start(seq, pos, true);
1438}
1439
1440static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1441{
1442 return xt_mttg_seq_next(seq, v, ppos, true);
1443}
1444
1445static int xt_target_seq_show(struct seq_file *seq, void *v)
1446{
1447 const struct nf_mttg_trav *trav = seq->private;
1448 const struct xt_target *target;
1449
1450 switch (trav->class) {
1451 case MTTG_TRAV_NFP_UNSPEC:
1452 case MTTG_TRAV_NFP_SPEC:
1453 if (trav->curr == trav->head)
1454 return 0;
1455 target = list_entry(trav->curr, struct xt_target, list);
1456 return (*target->name == '\0') ? 0 :
1457 seq_printf(seq, "%s\n", target->name);
1458 }
1459 return 0;
1460}
1461
1462static const struct seq_operations xt_target_seq_ops = {
1463 .start = xt_target_seq_start,
1464 .next = xt_target_seq_next,
1465 .stop = xt_mttg_seq_stop,
1466 .show = xt_target_seq_show,
1467};
1468
1469static int xt_target_open(struct inode *inode, struct file *file)
1470{
1471 struct seq_file *seq;
1472 struct nf_mttg_trav *trav;
1473 int ret;
1474
1475 trav = kmalloc(sizeof(*trav), GFP_KERNEL);
1476 if (trav == NULL)
1477 return -ENOMEM;
1478
1479 ret = seq_open(file, &xt_target_seq_ops);
1480 if (ret < 0) {
1481 kfree(trav);
1482 return ret;
1483 }
1484
1485 seq = file->private_data;
1486 seq->private = trav;
1487 trav->nfproto = (unsigned long)PDE_DATA(inode);
1488 return 0;
1489}
1490
1491static const struct file_operations xt_target_ops = {
1492 .owner = THIS_MODULE,
1493 .open = xt_target_open,
1494 .read = seq_read,
1495 .llseek = seq_lseek,
1496 .release = seq_release_private,
1497};
1498
1499#define FORMAT_TABLES "_tables_names"
1500#define FORMAT_MATCHES "_tables_matches"
1501#define FORMAT_TARGETS "_tables_targets"
1502
1503#endif
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513struct nf_hook_ops *xt_hook_link(const struct xt_table *table, nf_hookfn *fn)
1514{
1515 unsigned int hook_mask = table->valid_hooks;
1516 uint8_t i, num_hooks = hweight32(hook_mask);
1517 uint8_t hooknum;
1518 struct nf_hook_ops *ops;
1519 int ret;
1520
1521 ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL);
1522 if (ops == NULL)
1523 return ERR_PTR(-ENOMEM);
1524
1525 for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
1526 hook_mask >>= 1, ++hooknum) {
1527 if (!(hook_mask & 1))
1528 continue;
1529 ops[i].hook = fn;
1530 ops[i].owner = table->me;
1531 ops[i].pf = table->af;
1532 ops[i].hooknum = hooknum;
1533 ops[i].priority = table->priority;
1534 ++i;
1535 }
1536
1537 ret = nf_register_hooks(ops, num_hooks);
1538 if (ret < 0) {
1539 kfree(ops);
1540 return ERR_PTR(ret);
1541 }
1542
1543 return ops;
1544}
1545EXPORT_SYMBOL_GPL(xt_hook_link);
1546
1547
1548
1549
1550
1551
1552void xt_hook_unlink(const struct xt_table *table, struct nf_hook_ops *ops)
1553{
1554 nf_unregister_hooks(ops, hweight32(table->valid_hooks));
1555 kfree(ops);
1556}
1557EXPORT_SYMBOL_GPL(xt_hook_unlink);
1558
1559int xt_proto_init(struct net *net, u_int8_t af)
1560{
1561#ifdef CONFIG_PROC_FS
1562 char buf[XT_FUNCTION_MAXNAMELEN];
1563 struct proc_dir_entry *proc;
1564#endif
1565
1566 if (af >= ARRAY_SIZE(xt_prefix))
1567 return -EINVAL;
1568
1569
1570#ifdef CONFIG_PROC_FS
1571 strlcpy(buf, xt_prefix[af], sizeof(buf));
1572 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1573 proc = proc_create_data(buf, 0440, net->proc_net, &xt_table_ops,
1574 (void *)(unsigned long)af);
1575 if (!proc)
1576 goto out;
1577
1578 strlcpy(buf, xt_prefix[af], sizeof(buf));
1579 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1580 proc = proc_create_data(buf, 0440, net->proc_net, &xt_match_ops,
1581 (void *)(unsigned long)af);
1582 if (!proc)
1583 goto out_remove_tables;
1584
1585 strlcpy(buf, xt_prefix[af], sizeof(buf));
1586 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1587 proc = proc_create_data(buf, 0440, net->proc_net, &xt_target_ops,
1588 (void *)(unsigned long)af);
1589 if (!proc)
1590 goto out_remove_matches;
1591#endif
1592
1593 return 0;
1594
1595#ifdef CONFIG_PROC_FS
1596out_remove_matches:
1597 strlcpy(buf, xt_prefix[af], sizeof(buf));
1598 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1599 remove_proc_entry(buf, net->proc_net);
1600
1601out_remove_tables:
1602 strlcpy(buf, xt_prefix[af], sizeof(buf));
1603 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1604 remove_proc_entry(buf, net->proc_net);
1605out:
1606 return -1;
1607#endif
1608}
1609EXPORT_SYMBOL_GPL(xt_proto_init);
1610
1611void xt_proto_fini(struct net *net, u_int8_t af)
1612{
1613#ifdef CONFIG_PROC_FS
1614 char buf[XT_FUNCTION_MAXNAMELEN];
1615
1616 strlcpy(buf, xt_prefix[af], sizeof(buf));
1617 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1618 remove_proc_entry(buf, net->proc_net);
1619
1620 strlcpy(buf, xt_prefix[af], sizeof(buf));
1621 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1622 remove_proc_entry(buf, net->proc_net);
1623
1624 strlcpy(buf, xt_prefix[af], sizeof(buf));
1625 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1626 remove_proc_entry(buf, net->proc_net);
1627#endif
1628}
1629EXPORT_SYMBOL_GPL(xt_proto_fini);
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state,
1652 struct xt_counters *counter)
1653{
1654 BUILD_BUG_ON(XT_PCPU_BLOCK_SIZE < (sizeof(*counter) * 2));
1655
1656 if (nr_cpu_ids <= 1)
1657 return true;
1658
1659 if (!state->mem) {
1660 state->mem = __alloc_percpu(XT_PCPU_BLOCK_SIZE,
1661 XT_PCPU_BLOCK_SIZE);
1662 if (!state->mem)
1663 return false;
1664 }
1665 counter->pcnt = (__force unsigned long)(state->mem + state->off);
1666 state->off += sizeof(*counter);
1667 if (state->off > (XT_PCPU_BLOCK_SIZE - sizeof(*counter))) {
1668 state->mem = NULL;
1669 state->off = 0;
1670 }
1671 return true;
1672}
1673EXPORT_SYMBOL_GPL(xt_percpu_counter_alloc);
1674
1675void xt_percpu_counter_free(struct xt_counters *counters)
1676{
1677 unsigned long pcnt = counters->pcnt;
1678
1679 if (nr_cpu_ids > 1 && (pcnt & (XT_PCPU_BLOCK_SIZE - 1)) == 0)
1680 free_percpu((void __percpu *)pcnt);
1681}
1682EXPORT_SYMBOL_GPL(xt_percpu_counter_free);
1683
1684static int __net_init xt_net_init(struct net *net)
1685{
1686 int i;
1687
1688 for (i = 0; i < NFPROTO_NUMPROTO; i++)
1689 INIT_LIST_HEAD(&net->xt.tables[i]);
1690 return 0;
1691}
1692
1693static struct pernet_operations xt_net_ops = {
1694 .init = xt_net_init,
1695};
1696
1697static int __init xt_init(void)
1698{
1699 unsigned int i;
1700 int rv;
1701
1702 for_each_possible_cpu(i) {
1703 seqcount_init(&per_cpu(xt_recseq, i));
1704 }
1705
1706 xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL);
1707 if (!xt)
1708 return -ENOMEM;
1709
1710 for (i = 0; i < NFPROTO_NUMPROTO; i++) {
1711 mutex_init(&xt[i].mutex);
1712#ifdef CONFIG_COMPAT
1713 mutex_init(&xt[i].compat_mutex);
1714 xt[i].compat_tab = NULL;
1715#endif
1716 INIT_LIST_HEAD(&xt[i].target);
1717 INIT_LIST_HEAD(&xt[i].match);
1718 }
1719 rv = register_pernet_subsys(&xt_net_ops);
1720 if (rv < 0)
1721 kfree(xt);
1722 return rv;
1723}
1724
1725static void __exit xt_fini(void)
1726{
1727 unregister_pernet_subsys(&xt_net_ops);
1728 kfree(xt);
1729}
1730
1731module_init(xt_init);
1732module_exit(xt_fini);
1733
1734