1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/socket.h>
20#include <linux/net.h>
21#include <linux/proc_fs.h>
22#include <linux/seq_file.h>
23#include <linux/string.h>
24#include <linux/vmalloc.h>
25#include <linux/mutex.h>
26#include <linux/mm.h>
27#include <linux/slab.h>
28#include <linux/audit.h>
29#include <linux/user_namespace.h>
30#include <net/net_namespace.h>
31
32#include <linux/netfilter/x_tables.h>
33#include <linux/netfilter_arp.h>
34#include <linux/netfilter_ipv4/ip_tables.h>
35#include <linux/netfilter_ipv6/ip6_tables.h>
36#include <linux/netfilter_arp/arp_tables.h>
37
38MODULE_LICENSE("GPL");
39MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
40MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
41
42#define XT_PCPU_BLOCK_SIZE 4096
43#define XT_MAX_TABLE_SIZE (512 * 1024 * 1024)
44
45struct compat_delta {
46 unsigned int offset;
47 int delta;
48};
49
50struct xt_af {
51 struct mutex mutex;
52 struct list_head match;
53 struct list_head target;
54#ifdef CONFIG_COMPAT
55 struct mutex compat_mutex;
56 struct compat_delta *compat_tab;
57 unsigned int number;
58 unsigned int cur;
59#endif
60};
61
62static struct xt_af *xt;
63
64static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
65 [NFPROTO_UNSPEC] = "x",
66 [NFPROTO_IPV4] = "ip",
67 [NFPROTO_ARP] = "arp",
68 [NFPROTO_BRIDGE] = "eb",
69 [NFPROTO_IPV6] = "ip6",
70};
71
72
73int xt_register_target(struct xt_target *target)
74{
75 u_int8_t af = target->family;
76
77 mutex_lock(&xt[af].mutex);
78 list_add(&target->list, &xt[af].target);
79 mutex_unlock(&xt[af].mutex);
80 return 0;
81}
82EXPORT_SYMBOL(xt_register_target);
83
84void
85xt_unregister_target(struct xt_target *target)
86{
87 u_int8_t af = target->family;
88
89 mutex_lock(&xt[af].mutex);
90 list_del(&target->list);
91 mutex_unlock(&xt[af].mutex);
92}
93EXPORT_SYMBOL(xt_unregister_target);
94
95int
96xt_register_targets(struct xt_target *target, unsigned int n)
97{
98 unsigned int i;
99 int err = 0;
100
101 for (i = 0; i < n; i++) {
102 err = xt_register_target(&target[i]);
103 if (err)
104 goto err;
105 }
106 return err;
107
108err:
109 if (i > 0)
110 xt_unregister_targets(target, i);
111 return err;
112}
113EXPORT_SYMBOL(xt_register_targets);
114
115void
116xt_unregister_targets(struct xt_target *target, unsigned int n)
117{
118 while (n-- > 0)
119 xt_unregister_target(&target[n]);
120}
121EXPORT_SYMBOL(xt_unregister_targets);
122
123int xt_register_match(struct xt_match *match)
124{
125 u_int8_t af = match->family;
126
127 mutex_lock(&xt[af].mutex);
128 list_add(&match->list, &xt[af].match);
129 mutex_unlock(&xt[af].mutex);
130 return 0;
131}
132EXPORT_SYMBOL(xt_register_match);
133
134void
135xt_unregister_match(struct xt_match *match)
136{
137 u_int8_t af = match->family;
138
139 mutex_lock(&xt[af].mutex);
140 list_del(&match->list);
141 mutex_unlock(&xt[af].mutex);
142}
143EXPORT_SYMBOL(xt_unregister_match);
144
145int
146xt_register_matches(struct xt_match *match, unsigned int n)
147{
148 unsigned int i;
149 int err = 0;
150
151 for (i = 0; i < n; i++) {
152 err = xt_register_match(&match[i]);
153 if (err)
154 goto err;
155 }
156 return err;
157
158err:
159 if (i > 0)
160 xt_unregister_matches(match, i);
161 return err;
162}
163EXPORT_SYMBOL(xt_register_matches);
164
165void
166xt_unregister_matches(struct xt_match *match, unsigned int n)
167{
168 while (n-- > 0)
169 xt_unregister_match(&match[n]);
170}
171EXPORT_SYMBOL(xt_unregister_matches);
172
173
174
175
176
177
178
179
180
181struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
182{
183 struct xt_match *m;
184 int err = -ENOENT;
185
186 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
187 return ERR_PTR(-EINVAL);
188
189 mutex_lock(&xt[af].mutex);
190 list_for_each_entry(m, &xt[af].match, list) {
191 if (strcmp(m->name, name) == 0) {
192 if (m->revision == revision) {
193 if (try_module_get(m->me)) {
194 mutex_unlock(&xt[af].mutex);
195 return m;
196 }
197 } else
198 err = -EPROTOTYPE;
199 }
200 }
201 mutex_unlock(&xt[af].mutex);
202
203 if (af != NFPROTO_UNSPEC)
204
205 return xt_find_match(NFPROTO_UNSPEC, name, revision);
206
207 return ERR_PTR(err);
208}
209EXPORT_SYMBOL(xt_find_match);
210
211struct xt_match *
212xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
213{
214 struct xt_match *match;
215
216 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
217 return ERR_PTR(-EINVAL);
218
219 match = xt_find_match(nfproto, name, revision);
220 if (IS_ERR(match)) {
221 request_module("%st_%s", xt_prefix[nfproto], name);
222 match = xt_find_match(nfproto, name, revision);
223 }
224
225 return match;
226}
227EXPORT_SYMBOL_GPL(xt_request_find_match);
228
229
230struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
231{
232 struct xt_target *t;
233 int err = -ENOENT;
234
235 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
236 return ERR_PTR(-EINVAL);
237
238 mutex_lock(&xt[af].mutex);
239 list_for_each_entry(t, &xt[af].target, list) {
240 if (strcmp(t->name, name) == 0) {
241 if (t->revision == revision) {
242 if (try_module_get(t->me)) {
243 mutex_unlock(&xt[af].mutex);
244 return t;
245 }
246 } else
247 err = -EPROTOTYPE;
248 }
249 }
250 mutex_unlock(&xt[af].mutex);
251
252 if (af != NFPROTO_UNSPEC)
253
254 return xt_find_target(NFPROTO_UNSPEC, name, revision);
255
256 return ERR_PTR(err);
257}
258EXPORT_SYMBOL(xt_find_target);
259
260struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
261{
262 struct xt_target *target;
263
264 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
265 return ERR_PTR(-EINVAL);
266
267 target = xt_find_target(af, name, revision);
268 if (IS_ERR(target)) {
269 request_module("%st_%s", xt_prefix[af], name);
270 target = xt_find_target(af, name, revision);
271 }
272
273 return target;
274}
275EXPORT_SYMBOL_GPL(xt_request_find_target);
276
277
278static int xt_obj_to_user(u16 __user *psize, u16 size,
279 void __user *pname, const char *name,
280 u8 __user *prev, u8 rev)
281{
282 if (put_user(size, psize))
283 return -EFAULT;
284 if (copy_to_user(pname, name, strlen(name) + 1))
285 return -EFAULT;
286 if (put_user(rev, prev))
287 return -EFAULT;
288
289 return 0;
290}
291
292#define XT_OBJ_TO_USER(U, K, TYPE, C_SIZE) \
293 xt_obj_to_user(&U->u.TYPE##_size, C_SIZE ? : K->u.TYPE##_size, \
294 U->u.user.name, K->u.kernel.TYPE->name, \
295 &U->u.user.revision, K->u.kernel.TYPE->revision)
296
297int xt_data_to_user(void __user *dst, const void *src,
298 int usersize, int size, int aligned_size)
299{
300 usersize = usersize ? : size;
301 if (copy_to_user(dst, src, usersize))
302 return -EFAULT;
303 if (usersize != aligned_size &&
304 clear_user(dst + usersize, aligned_size - usersize))
305 return -EFAULT;
306
307 return 0;
308}
309EXPORT_SYMBOL_GPL(xt_data_to_user);
310
311#define XT_DATA_TO_USER(U, K, TYPE) \
312 xt_data_to_user(U->data, K->data, \
313 K->u.kernel.TYPE->usersize, \
314 K->u.kernel.TYPE->TYPE##size, \
315 XT_ALIGN(K->u.kernel.TYPE->TYPE##size))
316
317int xt_match_to_user(const struct xt_entry_match *m,
318 struct xt_entry_match __user *u)
319{
320 return XT_OBJ_TO_USER(u, m, match, 0) ||
321 XT_DATA_TO_USER(u, m, match);
322}
323EXPORT_SYMBOL_GPL(xt_match_to_user);
324
325int xt_target_to_user(const struct xt_entry_target *t,
326 struct xt_entry_target __user *u)
327{
328 return XT_OBJ_TO_USER(u, t, target, 0) ||
329 XT_DATA_TO_USER(u, t, target);
330}
331EXPORT_SYMBOL_GPL(xt_target_to_user);
332
333static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
334{
335 const struct xt_match *m;
336 int have_rev = 0;
337
338 list_for_each_entry(m, &xt[af].match, list) {
339 if (strcmp(m->name, name) == 0) {
340 if (m->revision > *bestp)
341 *bestp = m->revision;
342 if (m->revision == revision)
343 have_rev = 1;
344 }
345 }
346
347 if (af != NFPROTO_UNSPEC && !have_rev)
348 return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
349
350 return have_rev;
351}
352
353static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
354{
355 const struct xt_target *t;
356 int have_rev = 0;
357
358 list_for_each_entry(t, &xt[af].target, list) {
359 if (strcmp(t->name, name) == 0) {
360 if (t->revision > *bestp)
361 *bestp = t->revision;
362 if (t->revision == revision)
363 have_rev = 1;
364 }
365 }
366
367 if (af != NFPROTO_UNSPEC && !have_rev)
368 return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
369
370 return have_rev;
371}
372
373
374int xt_find_revision(u8 af, const char *name, u8 revision, int target,
375 int *err)
376{
377 int have_rev, best = -1;
378
379 mutex_lock(&xt[af].mutex);
380 if (target == 1)
381 have_rev = target_revfn(af, name, revision, &best);
382 else
383 have_rev = match_revfn(af, name, revision, &best);
384 mutex_unlock(&xt[af].mutex);
385
386
387 if (best == -1) {
388 *err = -ENOENT;
389 return 0;
390 }
391
392 *err = best;
393 if (!have_rev)
394 *err = -EPROTONOSUPPORT;
395 return 1;
396}
397EXPORT_SYMBOL_GPL(xt_find_revision);
398
399static char *
400textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
401{
402 static const char *const inetbr_names[] = {
403 "PREROUTING", "INPUT", "FORWARD",
404 "OUTPUT", "POSTROUTING", "BROUTING",
405 };
406 static const char *const arp_names[] = {
407 "INPUT", "FORWARD", "OUTPUT",
408 };
409 const char *const *names;
410 unsigned int i, max;
411 char *p = buf;
412 bool np = false;
413 int res;
414
415 names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names;
416 max = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) :
417 ARRAY_SIZE(inetbr_names);
418 *p = '\0';
419 for (i = 0; i < max; ++i) {
420 if (!(mask & (1 << i)))
421 continue;
422 res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
423 if (res > 0) {
424 size -= res;
425 p += res;
426 }
427 np = true;
428 }
429
430 return buf;
431}
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446int xt_check_proc_name(const char *name, unsigned int size)
447{
448 if (name[0] == '\0')
449 return -EINVAL;
450
451 if (strnlen(name, size) == size)
452 return -ENAMETOOLONG;
453
454 if (strcmp(name, ".") == 0 ||
455 strcmp(name, "..") == 0 ||
456 strchr(name, '/'))
457 return -EINVAL;
458
459 return 0;
460}
461EXPORT_SYMBOL(xt_check_proc_name);
462
463int xt_check_match(struct xt_mtchk_param *par,
464 unsigned int size, u_int8_t proto, bool inv_proto)
465{
466 int ret;
467
468 if (XT_ALIGN(par->match->matchsize) != size &&
469 par->match->matchsize != -1) {
470
471
472
473
474 pr_err_ratelimited("%s_tables: %s.%u match: invalid size %u (kernel) != (user) %u\n",
475 xt_prefix[par->family], par->match->name,
476 par->match->revision,
477 XT_ALIGN(par->match->matchsize), size);
478 return -EINVAL;
479 }
480 if (par->match->table != NULL &&
481 strcmp(par->match->table, par->table) != 0) {
482 pr_info_ratelimited("%s_tables: %s match: only valid in %s table, not %s\n",
483 xt_prefix[par->family], par->match->name,
484 par->match->table, par->table);
485 return -EINVAL;
486 }
487 if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
488 char used[64], allow[64];
489
490 pr_info_ratelimited("%s_tables: %s match: used from hooks %s, but only valid from %s\n",
491 xt_prefix[par->family], par->match->name,
492 textify_hooks(used, sizeof(used),
493 par->hook_mask, par->family),
494 textify_hooks(allow, sizeof(allow),
495 par->match->hooks,
496 par->family));
497 return -EINVAL;
498 }
499 if (par->match->proto && (par->match->proto != proto || inv_proto)) {
500 pr_info_ratelimited("%s_tables: %s match: only valid for protocol %u\n",
501 xt_prefix[par->family], par->match->name,
502 par->match->proto);
503 return -EINVAL;
504 }
505 if (par->match->checkentry != NULL) {
506 ret = par->match->checkentry(par);
507 if (ret < 0)
508 return ret;
509 else if (ret > 0)
510
511 return -EIO;
512 }
513 return 0;
514}
515EXPORT_SYMBOL_GPL(xt_check_match);
516
517
518
519
520
521
522
523
524
525
526
527
528static int xt_check_entry_match(const char *match, const char *target,
529 const size_t alignment)
530{
531 const struct xt_entry_match *pos;
532 int length = target - match;
533
534 if (length == 0)
535 return 0;
536
537 pos = (struct xt_entry_match *)match;
538 do {
539 if ((unsigned long)pos % alignment)
540 return -EINVAL;
541
542 if (length < (int)sizeof(struct xt_entry_match))
543 return -EINVAL;
544
545 if (pos->u.match_size < sizeof(struct xt_entry_match))
546 return -EINVAL;
547
548 if (pos->u.match_size > length)
549 return -EINVAL;
550
551 length -= pos->u.match_size;
552 pos = ((void *)((char *)(pos) + (pos)->u.match_size));
553 } while (length > 0);
554
555 return 0;
556}
557
558
559
560
561
562
563
564
565
566
567int xt_check_table_hooks(const struct xt_table_info *info, unsigned int valid_hooks)
568{
569 const char *err = "unsorted underflow";
570 unsigned int i, max_uflow, max_entry;
571 bool check_hooks = false;
572
573 BUILD_BUG_ON(ARRAY_SIZE(info->hook_entry) != ARRAY_SIZE(info->underflow));
574
575 max_entry = 0;
576 max_uflow = 0;
577
578 for (i = 0; i < ARRAY_SIZE(info->hook_entry); i++) {
579 if (!(valid_hooks & (1 << i)))
580 continue;
581
582 if (info->hook_entry[i] == 0xFFFFFFFF)
583 return -EINVAL;
584 if (info->underflow[i] == 0xFFFFFFFF)
585 return -EINVAL;
586
587 if (check_hooks) {
588 if (max_uflow > info->underflow[i])
589 goto error;
590
591 if (max_uflow == info->underflow[i]) {
592 err = "duplicate underflow";
593 goto error;
594 }
595 if (max_entry > info->hook_entry[i]) {
596 err = "unsorted entry";
597 goto error;
598 }
599 if (max_entry == info->hook_entry[i]) {
600 err = "duplicate entry";
601 goto error;
602 }
603 }
604 max_entry = info->hook_entry[i];
605 max_uflow = info->underflow[i];
606 check_hooks = true;
607 }
608
609 return 0;
610error:
611 pr_err_ratelimited("%s at hook %d\n", err, i);
612 return -EINVAL;
613}
614EXPORT_SYMBOL(xt_check_table_hooks);
615
616static bool verdict_ok(int verdict)
617{
618 if (verdict > 0)
619 return true;
620
621 if (verdict < 0) {
622 int v = -verdict - 1;
623
624 if (verdict == XT_RETURN)
625 return true;
626
627 switch (v) {
628 case NF_ACCEPT: return true;
629 case NF_DROP: return true;
630 case NF_QUEUE: return true;
631 default:
632 break;
633 }
634
635 return false;
636 }
637
638 return false;
639}
640
641static bool error_tg_ok(unsigned int usersize, unsigned int kernsize,
642 const char *msg, unsigned int msglen)
643{
644 return usersize == kernsize && strnlen(msg, msglen) < msglen;
645}
646
647#ifdef CONFIG_COMPAT
648int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
649{
650 struct xt_af *xp = &xt[af];
651
652 WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
653
654 if (WARN_ON(!xp->compat_tab))
655 return -ENOMEM;
656
657 if (xp->cur >= xp->number)
658 return -EINVAL;
659
660 if (xp->cur)
661 delta += xp->compat_tab[xp->cur - 1].delta;
662 xp->compat_tab[xp->cur].offset = offset;
663 xp->compat_tab[xp->cur].delta = delta;
664 xp->cur++;
665 return 0;
666}
667EXPORT_SYMBOL_GPL(xt_compat_add_offset);
668
669void xt_compat_flush_offsets(u_int8_t af)
670{
671 WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
672
673 if (xt[af].compat_tab) {
674 vfree(xt[af].compat_tab);
675 xt[af].compat_tab = NULL;
676 xt[af].number = 0;
677 xt[af].cur = 0;
678 }
679}
680EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
681
682int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
683{
684 struct compat_delta *tmp = xt[af].compat_tab;
685 int mid, left = 0, right = xt[af].cur - 1;
686
687 while (left <= right) {
688 mid = (left + right) >> 1;
689 if (offset > tmp[mid].offset)
690 left = mid + 1;
691 else if (offset < tmp[mid].offset)
692 right = mid - 1;
693 else
694 return mid ? tmp[mid - 1].delta : 0;
695 }
696 return left ? tmp[left - 1].delta : 0;
697}
698EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
699
700int xt_compat_init_offsets(u8 af, unsigned int number)
701{
702 size_t mem;
703
704 WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
705
706 if (!number || number > (INT_MAX / sizeof(struct compat_delta)))
707 return -EINVAL;
708
709 if (WARN_ON(xt[af].compat_tab))
710 return -EINVAL;
711
712 mem = sizeof(struct compat_delta) * number;
713 if (mem > XT_MAX_TABLE_SIZE)
714 return -ENOMEM;
715
716 xt[af].compat_tab = vmalloc(mem);
717 if (!xt[af].compat_tab)
718 return -ENOMEM;
719
720 xt[af].number = number;
721 xt[af].cur = 0;
722
723 return 0;
724}
725EXPORT_SYMBOL(xt_compat_init_offsets);
726
727int xt_compat_match_offset(const struct xt_match *match)
728{
729 u_int16_t csize = match->compatsize ? : match->matchsize;
730 return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
731}
732EXPORT_SYMBOL_GPL(xt_compat_match_offset);
733
734void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
735 unsigned int *size)
736{
737 const struct xt_match *match = m->u.kernel.match;
738 struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
739 int pad, off = xt_compat_match_offset(match);
740 u_int16_t msize = cm->u.user.match_size;
741 char name[sizeof(m->u.user.name)];
742
743 m = *dstptr;
744 memcpy(m, cm, sizeof(*cm));
745 if (match->compat_from_user)
746 match->compat_from_user(m->data, cm->data);
747 else
748 memcpy(m->data, cm->data, msize - sizeof(*cm));
749 pad = XT_ALIGN(match->matchsize) - match->matchsize;
750 if (pad > 0)
751 memset(m->data + match->matchsize, 0, pad);
752
753 msize += off;
754 m->u.user.match_size = msize;
755 strlcpy(name, match->name, sizeof(name));
756 module_put(match->me);
757 strncpy(m->u.user.name, name, sizeof(m->u.user.name));
758
759 *size += off;
760 *dstptr += msize;
761}
762EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
763
764#define COMPAT_XT_DATA_TO_USER(U, K, TYPE, C_SIZE) \
765 xt_data_to_user(U->data, K->data, \
766 K->u.kernel.TYPE->usersize, \
767 C_SIZE, \
768 COMPAT_XT_ALIGN(C_SIZE))
769
770int xt_compat_match_to_user(const struct xt_entry_match *m,
771 void __user **dstptr, unsigned int *size)
772{
773 const struct xt_match *match = m->u.kernel.match;
774 struct compat_xt_entry_match __user *cm = *dstptr;
775 int off = xt_compat_match_offset(match);
776 u_int16_t msize = m->u.user.match_size - off;
777
778 if (XT_OBJ_TO_USER(cm, m, match, msize))
779 return -EFAULT;
780
781 if (match->compat_to_user) {
782 if (match->compat_to_user((void __user *)cm->data, m->data))
783 return -EFAULT;
784 } else {
785 if (COMPAT_XT_DATA_TO_USER(cm, m, match, msize - sizeof(*cm)))
786 return -EFAULT;
787 }
788
789 *size -= off;
790 *dstptr += msize;
791 return 0;
792}
793EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
794
795
796struct compat_xt_standard_target {
797 struct compat_xt_entry_target t;
798 compat_uint_t verdict;
799};
800
801struct compat_xt_error_target {
802 struct compat_xt_entry_target t;
803 char errorname[XT_FUNCTION_MAXNAMELEN];
804};
805
806int xt_compat_check_entry_offsets(const void *base, const char *elems,
807 unsigned int target_offset,
808 unsigned int next_offset)
809{
810 long size_of_base_struct = elems - (const char *)base;
811 const struct compat_xt_entry_target *t;
812 const char *e = base;
813
814 if (target_offset < size_of_base_struct)
815 return -EINVAL;
816
817 if (target_offset + sizeof(*t) > next_offset)
818 return -EINVAL;
819
820 t = (void *)(e + target_offset);
821 if (t->u.target_size < sizeof(*t))
822 return -EINVAL;
823
824 if (target_offset + t->u.target_size > next_offset)
825 return -EINVAL;
826
827 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0) {
828 const struct compat_xt_standard_target *st = (const void *)t;
829
830 if (COMPAT_XT_ALIGN(target_offset + sizeof(*st)) != next_offset)
831 return -EINVAL;
832
833 if (!verdict_ok(st->verdict))
834 return -EINVAL;
835 } else if (strcmp(t->u.user.name, XT_ERROR_TARGET) == 0) {
836 const struct compat_xt_error_target *et = (const void *)t;
837
838 if (!error_tg_ok(t->u.target_size, sizeof(*et),
839 et->errorname, sizeof(et->errorname)))
840 return -EINVAL;
841 }
842
843
844
845
846
847 BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match));
848
849 return xt_check_entry_match(elems, base + target_offset,
850 __alignof__(struct compat_xt_entry_match));
851}
852EXPORT_SYMBOL(xt_compat_check_entry_offsets);
853#endif
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898int xt_check_entry_offsets(const void *base,
899 const char *elems,
900 unsigned int target_offset,
901 unsigned int next_offset)
902{
903 long size_of_base_struct = elems - (const char *)base;
904 const struct xt_entry_target *t;
905 const char *e = base;
906
907
908 if (target_offset < size_of_base_struct)
909 return -EINVAL;
910
911 if (target_offset + sizeof(*t) > next_offset)
912 return -EINVAL;
913
914 t = (void *)(e + target_offset);
915 if (t->u.target_size < sizeof(*t))
916 return -EINVAL;
917
918 if (target_offset + t->u.target_size > next_offset)
919 return -EINVAL;
920
921 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0) {
922 const struct xt_standard_target *st = (const void *)t;
923
924 if (XT_ALIGN(target_offset + sizeof(*st)) != next_offset)
925 return -EINVAL;
926
927 if (!verdict_ok(st->verdict))
928 return -EINVAL;
929 } else if (strcmp(t->u.user.name, XT_ERROR_TARGET) == 0) {
930 const struct xt_error_target *et = (const void *)t;
931
932 if (!error_tg_ok(t->u.target_size, sizeof(*et),
933 et->errorname, sizeof(et->errorname)))
934 return -EINVAL;
935 }
936
937 return xt_check_entry_match(elems, base + target_offset,
938 __alignof__(struct xt_entry_match));
939}
940EXPORT_SYMBOL(xt_check_entry_offsets);
941
942
943
944
945
946
947
948
949unsigned int *xt_alloc_entry_offsets(unsigned int size)
950{
951 if (size > XT_MAX_TABLE_SIZE / sizeof(unsigned int))
952 return NULL;
953
954 return kvmalloc_array(size, sizeof(unsigned int), GFP_KERNEL | __GFP_ZERO);
955
956}
957EXPORT_SYMBOL(xt_alloc_entry_offsets);
958
959
960
961
962
963
964
965
966bool xt_find_jump_offset(const unsigned int *offsets,
967 unsigned int target, unsigned int size)
968{
969 int m, low = 0, hi = size;
970
971 while (hi > low) {
972 m = (low + hi) / 2u;
973
974 if (offsets[m] > target)
975 hi = m;
976 else if (offsets[m] < target)
977 low = m + 1;
978 else
979 return true;
980 }
981
982 return false;
983}
984EXPORT_SYMBOL(xt_find_jump_offset);
985
986int xt_check_target(struct xt_tgchk_param *par,
987 unsigned int size, u_int8_t proto, bool inv_proto)
988{
989 int ret;
990
991 if (XT_ALIGN(par->target->targetsize) != size) {
992 pr_err_ratelimited("%s_tables: %s.%u target: invalid size %u (kernel) != (user) %u\n",
993 xt_prefix[par->family], par->target->name,
994 par->target->revision,
995 XT_ALIGN(par->target->targetsize), size);
996 return -EINVAL;
997 }
998 if (par->target->table != NULL &&
999 strcmp(par->target->table, par->table) != 0) {
1000 pr_info_ratelimited("%s_tables: %s target: only valid in %s table, not %s\n",
1001 xt_prefix[par->family], par->target->name,
1002 par->target->table, par->table);
1003 return -EINVAL;
1004 }
1005 if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
1006 char used[64], allow[64];
1007
1008 pr_info_ratelimited("%s_tables: %s target: used from hooks %s, but only usable from %s\n",
1009 xt_prefix[par->family], par->target->name,
1010 textify_hooks(used, sizeof(used),
1011 par->hook_mask, par->family),
1012 textify_hooks(allow, sizeof(allow),
1013 par->target->hooks,
1014 par->family));
1015 return -EINVAL;
1016 }
1017 if (par->target->proto && (par->target->proto != proto || inv_proto)) {
1018 pr_info_ratelimited("%s_tables: %s target: only valid for protocol %u\n",
1019 xt_prefix[par->family], par->target->name,
1020 par->target->proto);
1021 return -EINVAL;
1022 }
1023 if (par->target->checkentry != NULL) {
1024 ret = par->target->checkentry(par);
1025 if (ret < 0)
1026 return ret;
1027 else if (ret > 0)
1028
1029 return -EIO;
1030 }
1031 return 0;
1032}
1033EXPORT_SYMBOL_GPL(xt_check_target);
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
1057 struct xt_counters_info *info, bool compat)
1058{
1059 void *mem;
1060 u64 size;
1061
1062#ifdef CONFIG_COMPAT
1063 if (compat) {
1064
1065 struct compat_xt_counters_info compat_tmp;
1066
1067 if (len <= sizeof(compat_tmp))
1068 return ERR_PTR(-EINVAL);
1069
1070 len -= sizeof(compat_tmp);
1071 if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0)
1072 return ERR_PTR(-EFAULT);
1073
1074 memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1);
1075 info->num_counters = compat_tmp.num_counters;
1076 user += sizeof(compat_tmp);
1077 } else
1078#endif
1079 {
1080 if (len <= sizeof(*info))
1081 return ERR_PTR(-EINVAL);
1082
1083 len -= sizeof(*info);
1084 if (copy_from_user(info, user, sizeof(*info)) != 0)
1085 return ERR_PTR(-EFAULT);
1086
1087 user += sizeof(*info);
1088 }
1089 info->name[sizeof(info->name) - 1] = '\0';
1090
1091 size = sizeof(struct xt_counters);
1092 size *= info->num_counters;
1093
1094 if (size != (u64)len)
1095 return ERR_PTR(-EINVAL);
1096
1097 mem = vmalloc(len);
1098 if (!mem)
1099 return ERR_PTR(-ENOMEM);
1100
1101 if (copy_from_user(mem, user, len) == 0)
1102 return mem;
1103
1104 vfree(mem);
1105 return ERR_PTR(-EFAULT);
1106}
1107EXPORT_SYMBOL_GPL(xt_copy_counters_from_user);
1108
1109#ifdef CONFIG_COMPAT
1110int xt_compat_target_offset(const struct xt_target *target)
1111{
1112 u_int16_t csize = target->compatsize ? : target->targetsize;
1113 return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
1114}
1115EXPORT_SYMBOL_GPL(xt_compat_target_offset);
1116
1117void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
1118 unsigned int *size)
1119{
1120 const struct xt_target *target = t->u.kernel.target;
1121 struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
1122 int pad, off = xt_compat_target_offset(target);
1123 u_int16_t tsize = ct->u.user.target_size;
1124 char name[sizeof(t->u.user.name)];
1125
1126 t = *dstptr;
1127 memcpy(t, ct, sizeof(*ct));
1128 if (target->compat_from_user)
1129 target->compat_from_user(t->data, ct->data);
1130 else
1131 memcpy(t->data, ct->data, tsize - sizeof(*ct));
1132 pad = XT_ALIGN(target->targetsize) - target->targetsize;
1133 if (pad > 0)
1134 memset(t->data + target->targetsize, 0, pad);
1135
1136 tsize += off;
1137 t->u.user.target_size = tsize;
1138 strlcpy(name, target->name, sizeof(name));
1139 module_put(target->me);
1140 strncpy(t->u.user.name, name, sizeof(t->u.user.name));
1141
1142 *size += off;
1143 *dstptr += tsize;
1144}
1145EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
1146
1147int xt_compat_target_to_user(const struct xt_entry_target *t,
1148 void __user **dstptr, unsigned int *size)
1149{
1150 const struct xt_target *target = t->u.kernel.target;
1151 struct compat_xt_entry_target __user *ct = *dstptr;
1152 int off = xt_compat_target_offset(target);
1153 u_int16_t tsize = t->u.user.target_size - off;
1154
1155 if (XT_OBJ_TO_USER(ct, t, target, tsize))
1156 return -EFAULT;
1157
1158 if (target->compat_to_user) {
1159 if (target->compat_to_user((void __user *)ct->data, t->data))
1160 return -EFAULT;
1161 } else {
1162 if (COMPAT_XT_DATA_TO_USER(ct, t, target, tsize - sizeof(*ct)))
1163 return -EFAULT;
1164 }
1165
1166 *size -= off;
1167 *dstptr += tsize;
1168 return 0;
1169}
1170EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
1171#endif
1172
1173struct xt_table_info *xt_alloc_table_info(unsigned int size)
1174{
1175 struct xt_table_info *info = NULL;
1176 size_t sz = sizeof(*info) + size;
1177
1178 if (sz < sizeof(*info) || sz >= XT_MAX_TABLE_SIZE)
1179 return NULL;
1180
1181
1182
1183
1184
1185
1186 info = kvmalloc(sz, GFP_KERNEL | __GFP_NORETRY);
1187 if (!info)
1188 return NULL;
1189
1190 memset(info, 0, sizeof(*info));
1191 info->size = size;
1192 return info;
1193}
1194EXPORT_SYMBOL(xt_alloc_table_info);
1195
1196void xt_free_table_info(struct xt_table_info *info)
1197{
1198 int cpu;
1199
1200 if (info->jumpstack != NULL) {
1201 for_each_possible_cpu(cpu)
1202 kvfree(info->jumpstack[cpu]);
1203 kvfree(info->jumpstack);
1204 }
1205
1206 kvfree(info);
1207}
1208EXPORT_SYMBOL(xt_free_table_info);
1209
1210
1211struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
1212 const char *name)
1213{
1214 struct xt_table *t, *found = NULL;
1215
1216 mutex_lock(&xt[af].mutex);
1217 list_for_each_entry(t, &net->xt.tables[af], list)
1218 if (strcmp(t->name, name) == 0 && try_module_get(t->me))
1219 return t;
1220
1221 if (net == &init_net)
1222 goto out;
1223
1224
1225 list_for_each_entry(t, &init_net.xt.tables[af], list) {
1226 int err;
1227
1228 if (strcmp(t->name, name))
1229 continue;
1230 if (!try_module_get(t->me))
1231 goto out;
1232 mutex_unlock(&xt[af].mutex);
1233 err = t->table_init(net);
1234 if (err < 0) {
1235 module_put(t->me);
1236 return ERR_PTR(err);
1237 }
1238
1239 found = t;
1240
1241 mutex_lock(&xt[af].mutex);
1242 break;
1243 }
1244
1245 if (!found)
1246 goto out;
1247
1248
1249 list_for_each_entry(t, &net->xt.tables[af], list)
1250 if (strcmp(t->name, name) == 0)
1251 return t;
1252
1253 module_put(found->me);
1254 out:
1255 mutex_unlock(&xt[af].mutex);
1256 return ERR_PTR(-ENOENT);
1257}
1258EXPORT_SYMBOL_GPL(xt_find_table_lock);
1259
1260struct xt_table *xt_request_find_table_lock(struct net *net, u_int8_t af,
1261 const char *name)
1262{
1263 struct xt_table *t = xt_find_table_lock(net, af, name);
1264
1265#ifdef CONFIG_MODULES
1266 if (IS_ERR(t)) {
1267 int err = request_module("%stable_%s", xt_prefix[af], name);
1268 if (err < 0)
1269 return ERR_PTR(err);
1270 t = xt_find_table_lock(net, af, name);
1271 }
1272#endif
1273
1274 return t;
1275}
1276EXPORT_SYMBOL_GPL(xt_request_find_table_lock);
1277
1278void xt_table_unlock(struct xt_table *table)
1279{
1280 mutex_unlock(&xt[table->af].mutex);
1281}
1282EXPORT_SYMBOL_GPL(xt_table_unlock);
1283
1284#ifdef CONFIG_COMPAT
1285void xt_compat_lock(u_int8_t af)
1286{
1287 mutex_lock(&xt[af].compat_mutex);
1288}
1289EXPORT_SYMBOL_GPL(xt_compat_lock);
1290
1291void xt_compat_unlock(u_int8_t af)
1292{
1293 mutex_unlock(&xt[af].compat_mutex);
1294}
1295EXPORT_SYMBOL_GPL(xt_compat_unlock);
1296#endif
1297
1298DEFINE_PER_CPU(seqcount_t, xt_recseq);
1299EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
1300
1301struct static_key xt_tee_enabled __read_mostly;
1302EXPORT_SYMBOL_GPL(xt_tee_enabled);
1303
1304static int xt_jumpstack_alloc(struct xt_table_info *i)
1305{
1306 unsigned int size;
1307 int cpu;
1308
1309 size = sizeof(void **) * nr_cpu_ids;
1310 if (size > PAGE_SIZE)
1311 i->jumpstack = kvzalloc(size, GFP_KERNEL);
1312 else
1313 i->jumpstack = kzalloc(size, GFP_KERNEL);
1314 if (i->jumpstack == NULL)
1315 return -ENOMEM;
1316
1317
1318 if (i->stacksize == 0)
1319 return 0;
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331 size = sizeof(void *) * i->stacksize * 2u;
1332 for_each_possible_cpu(cpu) {
1333 i->jumpstack[cpu] = kvmalloc_node(size, GFP_KERNEL,
1334 cpu_to_node(cpu));
1335 if (i->jumpstack[cpu] == NULL)
1336
1337
1338
1339
1340
1341 return -ENOMEM;
1342 }
1343
1344 return 0;
1345}
1346
1347struct xt_counters *xt_counters_alloc(unsigned int counters)
1348{
1349 struct xt_counters *mem;
1350
1351 if (counters == 0 || counters > INT_MAX / sizeof(*mem))
1352 return NULL;
1353
1354 counters *= sizeof(*mem);
1355 if (counters > XT_MAX_TABLE_SIZE)
1356 return NULL;
1357
1358 return vzalloc(counters);
1359}
1360EXPORT_SYMBOL(xt_counters_alloc);
1361
1362struct xt_table_info *
1363xt_replace_table(struct xt_table *table,
1364 unsigned int num_counters,
1365 struct xt_table_info *newinfo,
1366 int *error)
1367{
1368 struct xt_table_info *private;
1369 unsigned int cpu;
1370 int ret;
1371
1372 ret = xt_jumpstack_alloc(newinfo);
1373 if (ret < 0) {
1374 *error = ret;
1375 return NULL;
1376 }
1377
1378
1379 local_bh_disable();
1380 private = table->private;
1381
1382
1383 if (num_counters != private->number) {
1384 pr_debug("num_counters != table->private->number (%u/%u)\n",
1385 num_counters, private->number);
1386 local_bh_enable();
1387 *error = -EAGAIN;
1388 return NULL;
1389 }
1390
1391 newinfo->initial_entries = private->initial_entries;
1392
1393
1394
1395
1396 smp_wmb();
1397 table->private = newinfo;
1398
1399
1400 smp_wmb();
1401
1402
1403
1404
1405
1406 local_bh_enable();
1407
1408
1409 for_each_possible_cpu(cpu) {
1410 seqcount_t *s = &per_cpu(xt_recseq, cpu);
1411 u32 seq = raw_read_seqcount(s);
1412
1413 if (seq & 1) {
1414 do {
1415 cond_resched();
1416 cpu_relax();
1417 } while (seq == raw_read_seqcount(s));
1418 }
1419 }
1420
1421#ifdef CONFIG_AUDIT
1422 if (audit_enabled) {
1423 audit_log(audit_context(), GFP_KERNEL,
1424 AUDIT_NETFILTER_CFG,
1425 "table=%s family=%u entries=%u",
1426 table->name, table->af, private->number);
1427 }
1428#endif
1429
1430 return private;
1431}
1432EXPORT_SYMBOL_GPL(xt_replace_table);
1433
1434struct xt_table *xt_register_table(struct net *net,
1435 const struct xt_table *input_table,
1436 struct xt_table_info *bootstrap,
1437 struct xt_table_info *newinfo)
1438{
1439 int ret;
1440 struct xt_table_info *private;
1441 struct xt_table *t, *table;
1442
1443
1444 table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
1445 if (!table) {
1446 ret = -ENOMEM;
1447 goto out;
1448 }
1449
1450 mutex_lock(&xt[table->af].mutex);
1451
1452 list_for_each_entry(t, &net->xt.tables[table->af], list) {
1453 if (strcmp(t->name, table->name) == 0) {
1454 ret = -EEXIST;
1455 goto unlock;
1456 }
1457 }
1458
1459
1460 table->private = bootstrap;
1461
1462 if (!xt_replace_table(table, 0, newinfo, &ret))
1463 goto unlock;
1464
1465 private = table->private;
1466 pr_debug("table->private->number = %u\n", private->number);
1467
1468
1469 private->initial_entries = private->number;
1470
1471 list_add(&table->list, &net->xt.tables[table->af]);
1472 mutex_unlock(&xt[table->af].mutex);
1473 return table;
1474
1475unlock:
1476 mutex_unlock(&xt[table->af].mutex);
1477 kfree(table);
1478out:
1479 return ERR_PTR(ret);
1480}
1481EXPORT_SYMBOL_GPL(xt_register_table);
1482
1483void *xt_unregister_table(struct xt_table *table)
1484{
1485 struct xt_table_info *private;
1486
1487 mutex_lock(&xt[table->af].mutex);
1488 private = table->private;
1489 list_del(&table->list);
1490 mutex_unlock(&xt[table->af].mutex);
1491 kfree(table);
1492
1493 return private;
1494}
1495EXPORT_SYMBOL_GPL(xt_unregister_table);
1496
1497#ifdef CONFIG_PROC_FS
1498static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
1499{
1500 struct net *net = seq_file_net(seq);
1501 u_int8_t af = (unsigned long)PDE_DATA(file_inode(seq->file));
1502
1503 mutex_lock(&xt[af].mutex);
1504 return seq_list_start(&net->xt.tables[af], *pos);
1505}
1506
1507static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1508{
1509 struct net *net = seq_file_net(seq);
1510 u_int8_t af = (unsigned long)PDE_DATA(file_inode(seq->file));
1511
1512 return seq_list_next(v, &net->xt.tables[af], pos);
1513}
1514
1515static void xt_table_seq_stop(struct seq_file *seq, void *v)
1516{
1517 u_int8_t af = (unsigned long)PDE_DATA(file_inode(seq->file));
1518
1519 mutex_unlock(&xt[af].mutex);
1520}
1521
1522static int xt_table_seq_show(struct seq_file *seq, void *v)
1523{
1524 struct xt_table *table = list_entry(v, struct xt_table, list);
1525
1526 if (*table->name)
1527 seq_printf(seq, "%s\n", table->name);
1528 return 0;
1529}
1530
1531static const struct seq_operations xt_table_seq_ops = {
1532 .start = xt_table_seq_start,
1533 .next = xt_table_seq_next,
1534 .stop = xt_table_seq_stop,
1535 .show = xt_table_seq_show,
1536};
1537
1538
1539
1540
1541
1542struct nf_mttg_trav {
1543 struct list_head *head, *curr;
1544 uint8_t class;
1545};
1546
1547enum {
1548 MTTG_TRAV_INIT,
1549 MTTG_TRAV_NFP_UNSPEC,
1550 MTTG_TRAV_NFP_SPEC,
1551 MTTG_TRAV_DONE,
1552};
1553
1554static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
1555 bool is_target)
1556{
1557 static const uint8_t next_class[] = {
1558 [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
1559 [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE,
1560 };
1561 uint8_t nfproto = (unsigned long)PDE_DATA(file_inode(seq->file));
1562 struct nf_mttg_trav *trav = seq->private;
1563
1564 switch (trav->class) {
1565 case MTTG_TRAV_INIT:
1566 trav->class = MTTG_TRAV_NFP_UNSPEC;
1567 mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
1568 trav->head = trav->curr = is_target ?
1569 &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
1570 break;
1571 case MTTG_TRAV_NFP_UNSPEC:
1572 trav->curr = trav->curr->next;
1573 if (trav->curr != trav->head)
1574 break;
1575 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1576 mutex_lock(&xt[nfproto].mutex);
1577 trav->head = trav->curr = is_target ?
1578 &xt[nfproto].target : &xt[nfproto].match;
1579 trav->class = next_class[trav->class];
1580 break;
1581 case MTTG_TRAV_NFP_SPEC:
1582 trav->curr = trav->curr->next;
1583 if (trav->curr != trav->head)
1584 break;
1585
1586 default:
1587 return NULL;
1588 }
1589
1590 if (ppos != NULL)
1591 ++*ppos;
1592 return trav;
1593}
1594
1595static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
1596 bool is_target)
1597{
1598 struct nf_mttg_trav *trav = seq->private;
1599 unsigned int j;
1600
1601 trav->class = MTTG_TRAV_INIT;
1602 for (j = 0; j < *pos; ++j)
1603 if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
1604 return NULL;
1605 return trav;
1606}
1607
1608static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
1609{
1610 uint8_t nfproto = (unsigned long)PDE_DATA(file_inode(seq->file));
1611 struct nf_mttg_trav *trav = seq->private;
1612
1613 switch (trav->class) {
1614 case MTTG_TRAV_NFP_UNSPEC:
1615 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1616 break;
1617 case MTTG_TRAV_NFP_SPEC:
1618 mutex_unlock(&xt[nfproto].mutex);
1619 break;
1620 }
1621}
1622
1623static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
1624{
1625 return xt_mttg_seq_start(seq, pos, false);
1626}
1627
1628static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1629{
1630 return xt_mttg_seq_next(seq, v, ppos, false);
1631}
1632
1633static int xt_match_seq_show(struct seq_file *seq, void *v)
1634{
1635 const struct nf_mttg_trav *trav = seq->private;
1636 const struct xt_match *match;
1637
1638 switch (trav->class) {
1639 case MTTG_TRAV_NFP_UNSPEC:
1640 case MTTG_TRAV_NFP_SPEC:
1641 if (trav->curr == trav->head)
1642 return 0;
1643 match = list_entry(trav->curr, struct xt_match, list);
1644 if (*match->name)
1645 seq_printf(seq, "%s\n", match->name);
1646 }
1647 return 0;
1648}
1649
1650static const struct seq_operations xt_match_seq_ops = {
1651 .start = xt_match_seq_start,
1652 .next = xt_match_seq_next,
1653 .stop = xt_mttg_seq_stop,
1654 .show = xt_match_seq_show,
1655};
1656
1657static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
1658{
1659 return xt_mttg_seq_start(seq, pos, true);
1660}
1661
1662static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1663{
1664 return xt_mttg_seq_next(seq, v, ppos, true);
1665}
1666
1667static int xt_target_seq_show(struct seq_file *seq, void *v)
1668{
1669 const struct nf_mttg_trav *trav = seq->private;
1670 const struct xt_target *target;
1671
1672 switch (trav->class) {
1673 case MTTG_TRAV_NFP_UNSPEC:
1674 case MTTG_TRAV_NFP_SPEC:
1675 if (trav->curr == trav->head)
1676 return 0;
1677 target = list_entry(trav->curr, struct xt_target, list);
1678 if (*target->name)
1679 seq_printf(seq, "%s\n", target->name);
1680 }
1681 return 0;
1682}
1683
1684static const struct seq_operations xt_target_seq_ops = {
1685 .start = xt_target_seq_start,
1686 .next = xt_target_seq_next,
1687 .stop = xt_mttg_seq_stop,
1688 .show = xt_target_seq_show,
1689};
1690
1691#define FORMAT_TABLES "_tables_names"
1692#define FORMAT_MATCHES "_tables_matches"
1693#define FORMAT_TARGETS "_tables_targets"
1694
1695#endif
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705struct nf_hook_ops *
1706xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn)
1707{
1708 unsigned int hook_mask = table->valid_hooks;
1709 uint8_t i, num_hooks = hweight32(hook_mask);
1710 uint8_t hooknum;
1711 struct nf_hook_ops *ops;
1712
1713 if (!num_hooks)
1714 return ERR_PTR(-EINVAL);
1715
1716 ops = kcalloc(num_hooks, sizeof(*ops), GFP_KERNEL);
1717 if (ops == NULL)
1718 return ERR_PTR(-ENOMEM);
1719
1720 for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
1721 hook_mask >>= 1, ++hooknum) {
1722 if (!(hook_mask & 1))
1723 continue;
1724 ops[i].hook = fn;
1725 ops[i].pf = table->af;
1726 ops[i].hooknum = hooknum;
1727 ops[i].priority = table->priority;
1728 ++i;
1729 }
1730
1731 return ops;
1732}
1733EXPORT_SYMBOL_GPL(xt_hook_ops_alloc);
1734
1735int xt_proto_init(struct net *net, u_int8_t af)
1736{
1737#ifdef CONFIG_PROC_FS
1738 char buf[XT_FUNCTION_MAXNAMELEN];
1739 struct proc_dir_entry *proc;
1740 kuid_t root_uid;
1741 kgid_t root_gid;
1742#endif
1743
1744 if (af >= ARRAY_SIZE(xt_prefix))
1745 return -EINVAL;
1746
1747
1748#ifdef CONFIG_PROC_FS
1749 root_uid = make_kuid(net->user_ns, 0);
1750 root_gid = make_kgid(net->user_ns, 0);
1751
1752 strlcpy(buf, xt_prefix[af], sizeof(buf));
1753 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1754 proc = proc_create_net_data(buf, 0440, net->proc_net, &xt_table_seq_ops,
1755 sizeof(struct seq_net_private),
1756 (void *)(unsigned long)af);
1757 if (!proc)
1758 goto out;
1759 if (uid_valid(root_uid) && gid_valid(root_gid))
1760 proc_set_user(proc, root_uid, root_gid);
1761
1762 strlcpy(buf, xt_prefix[af], sizeof(buf));
1763 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1764 proc = proc_create_seq_private(buf, 0440, net->proc_net,
1765 &xt_match_seq_ops, sizeof(struct nf_mttg_trav),
1766 (void *)(unsigned long)af);
1767 if (!proc)
1768 goto out_remove_tables;
1769 if (uid_valid(root_uid) && gid_valid(root_gid))
1770 proc_set_user(proc, root_uid, root_gid);
1771
1772 strlcpy(buf, xt_prefix[af], sizeof(buf));
1773 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1774 proc = proc_create_seq_private(buf, 0440, net->proc_net,
1775 &xt_target_seq_ops, sizeof(struct nf_mttg_trav),
1776 (void *)(unsigned long)af);
1777 if (!proc)
1778 goto out_remove_matches;
1779 if (uid_valid(root_uid) && gid_valid(root_gid))
1780 proc_set_user(proc, root_uid, root_gid);
1781#endif
1782
1783 return 0;
1784
1785#ifdef CONFIG_PROC_FS
1786out_remove_matches:
1787 strlcpy(buf, xt_prefix[af], sizeof(buf));
1788 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1789 remove_proc_entry(buf, net->proc_net);
1790
1791out_remove_tables:
1792 strlcpy(buf, xt_prefix[af], sizeof(buf));
1793 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1794 remove_proc_entry(buf, net->proc_net);
1795out:
1796 return -1;
1797#endif
1798}
1799EXPORT_SYMBOL_GPL(xt_proto_init);
1800
1801void xt_proto_fini(struct net *net, u_int8_t af)
1802{
1803#ifdef CONFIG_PROC_FS
1804 char buf[XT_FUNCTION_MAXNAMELEN];
1805
1806 strlcpy(buf, xt_prefix[af], sizeof(buf));
1807 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1808 remove_proc_entry(buf, net->proc_net);
1809
1810 strlcpy(buf, xt_prefix[af], sizeof(buf));
1811 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1812 remove_proc_entry(buf, net->proc_net);
1813
1814 strlcpy(buf, xt_prefix[af], sizeof(buf));
1815 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1816 remove_proc_entry(buf, net->proc_net);
1817#endif
1818}
1819EXPORT_SYMBOL_GPL(xt_proto_fini);
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state,
1844 struct xt_counters *counter)
1845{
1846 BUILD_BUG_ON(XT_PCPU_BLOCK_SIZE < (sizeof(*counter) * 2));
1847
1848 if (nr_cpu_ids <= 1)
1849 return true;
1850
1851 if (!state->mem) {
1852 state->mem = __alloc_percpu(XT_PCPU_BLOCK_SIZE,
1853 XT_PCPU_BLOCK_SIZE);
1854 if (!state->mem)
1855 return false;
1856 }
1857 counter->pcnt = (__force unsigned long)(state->mem + state->off);
1858 state->off += sizeof(*counter);
1859 if (state->off > (XT_PCPU_BLOCK_SIZE - sizeof(*counter))) {
1860 state->mem = NULL;
1861 state->off = 0;
1862 }
1863 return true;
1864}
1865EXPORT_SYMBOL_GPL(xt_percpu_counter_alloc);
1866
1867void xt_percpu_counter_free(struct xt_counters *counters)
1868{
1869 unsigned long pcnt = counters->pcnt;
1870
1871 if (nr_cpu_ids > 1 && (pcnt & (XT_PCPU_BLOCK_SIZE - 1)) == 0)
1872 free_percpu((void __percpu *)pcnt);
1873}
1874EXPORT_SYMBOL_GPL(xt_percpu_counter_free);
1875
1876static int __net_init xt_net_init(struct net *net)
1877{
1878 int i;
1879
1880 for (i = 0; i < NFPROTO_NUMPROTO; i++)
1881 INIT_LIST_HEAD(&net->xt.tables[i]);
1882 return 0;
1883}
1884
1885static void __net_exit xt_net_exit(struct net *net)
1886{
1887 int i;
1888
1889 for (i = 0; i < NFPROTO_NUMPROTO; i++)
1890 WARN_ON_ONCE(!list_empty(&net->xt.tables[i]));
1891}
1892
1893static struct pernet_operations xt_net_ops = {
1894 .init = xt_net_init,
1895 .exit = xt_net_exit,
1896};
1897
1898static int __init xt_init(void)
1899{
1900 unsigned int i;
1901 int rv;
1902
1903 for_each_possible_cpu(i) {
1904 seqcount_init(&per_cpu(xt_recseq, i));
1905 }
1906
1907 xt = kmalloc_array(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL);
1908 if (!xt)
1909 return -ENOMEM;
1910
1911 for (i = 0; i < NFPROTO_NUMPROTO; i++) {
1912 mutex_init(&xt[i].mutex);
1913#ifdef CONFIG_COMPAT
1914 mutex_init(&xt[i].compat_mutex);
1915 xt[i].compat_tab = NULL;
1916#endif
1917 INIT_LIST_HEAD(&xt[i].target);
1918 INIT_LIST_HEAD(&xt[i].match);
1919 }
1920 rv = register_pernet_subsys(&xt_net_ops);
1921 if (rv < 0)
1922 kfree(xt);
1923 return rv;
1924}
1925
1926static void __exit xt_fini(void)
1927{
1928 unregister_pernet_subsys(&xt_net_ops);
1929 kfree(xt);
1930}
1931
1932module_init(xt_init);
1933module_exit(xt_fini);
1934
1935