1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/socket.h>
20#include <linux/net.h>
21#include <linux/proc_fs.h>
22#include <linux/seq_file.h>
23#include <linux/string.h>
24#include <linux/vmalloc.h>
25#include <linux/mutex.h>
26#include <linux/mm.h>
27#include <linux/slab.h>
28#include <linux/audit.h>
29#include <linux/user_namespace.h>
30#include <net/net_namespace.h>
31
32#include <linux/netfilter/x_tables.h>
33#include <linux/netfilter_arp.h>
34#include <linux/netfilter_ipv4/ip_tables.h>
35#include <linux/netfilter_ipv6/ip6_tables.h>
36#include <linux/netfilter_arp/arp_tables.h>
37
38MODULE_LICENSE("GPL");
39MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
40MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
41
42#define XT_PCPU_BLOCK_SIZE 4096
43#define XT_MAX_TABLE_SIZE (512 * 1024 * 1024)
44
45struct compat_delta {
46 unsigned int offset;
47 int delta;
48};
49
50struct xt_af {
51 struct mutex mutex;
52 struct list_head match;
53 struct list_head target;
54#ifdef CONFIG_COMPAT
55 struct mutex compat_mutex;
56 struct compat_delta *compat_tab;
57 unsigned int number;
58 unsigned int cur;
59#endif
60};
61
62static struct xt_af *xt;
63
64static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
65 [NFPROTO_UNSPEC] = "x",
66 [NFPROTO_IPV4] = "ip",
67 [NFPROTO_ARP] = "arp",
68 [NFPROTO_BRIDGE] = "eb",
69 [NFPROTO_IPV6] = "ip6",
70};
71
72
73int xt_register_target(struct xt_target *target)
74{
75 u_int8_t af = target->family;
76
77 mutex_lock(&xt[af].mutex);
78 list_add(&target->list, &xt[af].target);
79 mutex_unlock(&xt[af].mutex);
80 return 0;
81}
82EXPORT_SYMBOL(xt_register_target);
83
84void
85xt_unregister_target(struct xt_target *target)
86{
87 u_int8_t af = target->family;
88
89 mutex_lock(&xt[af].mutex);
90 list_del(&target->list);
91 mutex_unlock(&xt[af].mutex);
92}
93EXPORT_SYMBOL(xt_unregister_target);
94
95int
96xt_register_targets(struct xt_target *target, unsigned int n)
97{
98 unsigned int i;
99 int err = 0;
100
101 for (i = 0; i < n; i++) {
102 err = xt_register_target(&target[i]);
103 if (err)
104 goto err;
105 }
106 return err;
107
108err:
109 if (i > 0)
110 xt_unregister_targets(target, i);
111 return err;
112}
113EXPORT_SYMBOL(xt_register_targets);
114
115void
116xt_unregister_targets(struct xt_target *target, unsigned int n)
117{
118 while (n-- > 0)
119 xt_unregister_target(&target[n]);
120}
121EXPORT_SYMBOL(xt_unregister_targets);
122
123int xt_register_match(struct xt_match *match)
124{
125 u_int8_t af = match->family;
126
127 mutex_lock(&xt[af].mutex);
128 list_add(&match->list, &xt[af].match);
129 mutex_unlock(&xt[af].mutex);
130 return 0;
131}
132EXPORT_SYMBOL(xt_register_match);
133
134void
135xt_unregister_match(struct xt_match *match)
136{
137 u_int8_t af = match->family;
138
139 mutex_lock(&xt[af].mutex);
140 list_del(&match->list);
141 mutex_unlock(&xt[af].mutex);
142}
143EXPORT_SYMBOL(xt_unregister_match);
144
145int
146xt_register_matches(struct xt_match *match, unsigned int n)
147{
148 unsigned int i;
149 int err = 0;
150
151 for (i = 0; i < n; i++) {
152 err = xt_register_match(&match[i]);
153 if (err)
154 goto err;
155 }
156 return err;
157
158err:
159 if (i > 0)
160 xt_unregister_matches(match, i);
161 return err;
162}
163EXPORT_SYMBOL(xt_register_matches);
164
165void
166xt_unregister_matches(struct xt_match *match, unsigned int n)
167{
168 while (n-- > 0)
169 xt_unregister_match(&match[n]);
170}
171EXPORT_SYMBOL(xt_unregister_matches);
172
173
174
175
176
177
178
179
180
181struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
182{
183 struct xt_match *m;
184 int err = -ENOENT;
185
186 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
187 return ERR_PTR(-EINVAL);
188
189 mutex_lock(&xt[af].mutex);
190 list_for_each_entry(m, &xt[af].match, list) {
191 if (strcmp(m->name, name) == 0) {
192 if (m->revision == revision) {
193 if (try_module_get(m->me)) {
194 mutex_unlock(&xt[af].mutex);
195 return m;
196 }
197 } else
198 err = -EPROTOTYPE;
199 }
200 }
201 mutex_unlock(&xt[af].mutex);
202
203 if (af != NFPROTO_UNSPEC)
204
205 return xt_find_match(NFPROTO_UNSPEC, name, revision);
206
207 return ERR_PTR(err);
208}
209EXPORT_SYMBOL(xt_find_match);
210
211struct xt_match *
212xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
213{
214 struct xt_match *match;
215
216 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
217 return ERR_PTR(-EINVAL);
218
219 match = xt_find_match(nfproto, name, revision);
220 if (IS_ERR(match)) {
221 request_module("%st_%s", xt_prefix[nfproto], name);
222 match = xt_find_match(nfproto, name, revision);
223 }
224
225 return match;
226}
227EXPORT_SYMBOL_GPL(xt_request_find_match);
228
229
230struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
231{
232 struct xt_target *t;
233 int err = -ENOENT;
234
235 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
236 return ERR_PTR(-EINVAL);
237
238 mutex_lock(&xt[af].mutex);
239 list_for_each_entry(t, &xt[af].target, list) {
240 if (strcmp(t->name, name) == 0) {
241 if (t->revision == revision) {
242 if (try_module_get(t->me)) {
243 mutex_unlock(&xt[af].mutex);
244 return t;
245 }
246 } else
247 err = -EPROTOTYPE;
248 }
249 }
250 mutex_unlock(&xt[af].mutex);
251
252 if (af != NFPROTO_UNSPEC)
253
254 return xt_find_target(NFPROTO_UNSPEC, name, revision);
255
256 return ERR_PTR(err);
257}
258EXPORT_SYMBOL(xt_find_target);
259
260struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
261{
262 struct xt_target *target;
263
264 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
265 return ERR_PTR(-EINVAL);
266
267 target = xt_find_target(af, name, revision);
268 if (IS_ERR(target)) {
269 request_module("%st_%s", xt_prefix[af], name);
270 target = xt_find_target(af, name, revision);
271 }
272
273 return target;
274}
275EXPORT_SYMBOL_GPL(xt_request_find_target);
276
277
278static int xt_obj_to_user(u16 __user *psize, u16 size,
279 void __user *pname, const char *name,
280 u8 __user *prev, u8 rev)
281{
282 if (put_user(size, psize))
283 return -EFAULT;
284 if (copy_to_user(pname, name, strlen(name) + 1))
285 return -EFAULT;
286 if (put_user(rev, prev))
287 return -EFAULT;
288
289 return 0;
290}
291
292#define XT_OBJ_TO_USER(U, K, TYPE, C_SIZE) \
293 xt_obj_to_user(&U->u.TYPE##_size, C_SIZE ? : K->u.TYPE##_size, \
294 U->u.user.name, K->u.kernel.TYPE->name, \
295 &U->u.user.revision, K->u.kernel.TYPE->revision)
296
297int xt_data_to_user(void __user *dst, const void *src,
298 int usersize, int size, int aligned_size)
299{
300 usersize = usersize ? : size;
301 if (copy_to_user(dst, src, usersize))
302 return -EFAULT;
303 if (usersize != aligned_size &&
304 clear_user(dst + usersize, aligned_size - usersize))
305 return -EFAULT;
306
307 return 0;
308}
309EXPORT_SYMBOL_GPL(xt_data_to_user);
310
311#define XT_DATA_TO_USER(U, K, TYPE) \
312 xt_data_to_user(U->data, K->data, \
313 K->u.kernel.TYPE->usersize, \
314 K->u.kernel.TYPE->TYPE##size, \
315 XT_ALIGN(K->u.kernel.TYPE->TYPE##size))
316
317int xt_match_to_user(const struct xt_entry_match *m,
318 struct xt_entry_match __user *u)
319{
320 return XT_OBJ_TO_USER(u, m, match, 0) ||
321 XT_DATA_TO_USER(u, m, match);
322}
323EXPORT_SYMBOL_GPL(xt_match_to_user);
324
325int xt_target_to_user(const struct xt_entry_target *t,
326 struct xt_entry_target __user *u)
327{
328 return XT_OBJ_TO_USER(u, t, target, 0) ||
329 XT_DATA_TO_USER(u, t, target);
330}
331EXPORT_SYMBOL_GPL(xt_target_to_user);
332
333static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
334{
335 const struct xt_match *m;
336 int have_rev = 0;
337
338 list_for_each_entry(m, &xt[af].match, list) {
339 if (strcmp(m->name, name) == 0) {
340 if (m->revision > *bestp)
341 *bestp = m->revision;
342 if (m->revision == revision)
343 have_rev = 1;
344 }
345 }
346
347 if (af != NFPROTO_UNSPEC && !have_rev)
348 return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
349
350 return have_rev;
351}
352
353static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
354{
355 const struct xt_target *t;
356 int have_rev = 0;
357
358 list_for_each_entry(t, &xt[af].target, list) {
359 if (strcmp(t->name, name) == 0) {
360 if (t->revision > *bestp)
361 *bestp = t->revision;
362 if (t->revision == revision)
363 have_rev = 1;
364 }
365 }
366
367 if (af != NFPROTO_UNSPEC && !have_rev)
368 return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
369
370 return have_rev;
371}
372
373
374int xt_find_revision(u8 af, const char *name, u8 revision, int target,
375 int *err)
376{
377 int have_rev, best = -1;
378
379 mutex_lock(&xt[af].mutex);
380 if (target == 1)
381 have_rev = target_revfn(af, name, revision, &best);
382 else
383 have_rev = match_revfn(af, name, revision, &best);
384 mutex_unlock(&xt[af].mutex);
385
386
387 if (best == -1) {
388 *err = -ENOENT;
389 return 0;
390 }
391
392 *err = best;
393 if (!have_rev)
394 *err = -EPROTONOSUPPORT;
395 return 1;
396}
397EXPORT_SYMBOL_GPL(xt_find_revision);
398
399static char *
400textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
401{
402 static const char *const inetbr_names[] = {
403 "PREROUTING", "INPUT", "FORWARD",
404 "OUTPUT", "POSTROUTING", "BROUTING",
405 };
406 static const char *const arp_names[] = {
407 "INPUT", "FORWARD", "OUTPUT",
408 };
409 const char *const *names;
410 unsigned int i, max;
411 char *p = buf;
412 bool np = false;
413 int res;
414
415 names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names;
416 max = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) :
417 ARRAY_SIZE(inetbr_names);
418 *p = '\0';
419 for (i = 0; i < max; ++i) {
420 if (!(mask & (1 << i)))
421 continue;
422 res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
423 if (res > 0) {
424 size -= res;
425 p += res;
426 }
427 np = true;
428 }
429
430 return buf;
431}
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446int xt_check_proc_name(const char *name, unsigned int size)
447{
448 if (name[0] == '\0')
449 return -EINVAL;
450
451 if (strnlen(name, size) == size)
452 return -ENAMETOOLONG;
453
454 if (strcmp(name, ".") == 0 ||
455 strcmp(name, "..") == 0 ||
456 strchr(name, '/'))
457 return -EINVAL;
458
459 return 0;
460}
461EXPORT_SYMBOL(xt_check_proc_name);
462
463int xt_check_match(struct xt_mtchk_param *par,
464 unsigned int size, u_int8_t proto, bool inv_proto)
465{
466 int ret;
467
468 if (XT_ALIGN(par->match->matchsize) != size &&
469 par->match->matchsize != -1) {
470
471
472
473
474 pr_err_ratelimited("%s_tables: %s.%u match: invalid size %u (kernel) != (user) %u\n",
475 xt_prefix[par->family], par->match->name,
476 par->match->revision,
477 XT_ALIGN(par->match->matchsize), size);
478 return -EINVAL;
479 }
480 if (par->match->table != NULL &&
481 strcmp(par->match->table, par->table) != 0) {
482 pr_info_ratelimited("%s_tables: %s match: only valid in %s table, not %s\n",
483 xt_prefix[par->family], par->match->name,
484 par->match->table, par->table);
485 return -EINVAL;
486 }
487 if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
488 char used[64], allow[64];
489
490 pr_info_ratelimited("%s_tables: %s match: used from hooks %s, but only valid from %s\n",
491 xt_prefix[par->family], par->match->name,
492 textify_hooks(used, sizeof(used),
493 par->hook_mask, par->family),
494 textify_hooks(allow, sizeof(allow),
495 par->match->hooks,
496 par->family));
497 return -EINVAL;
498 }
499 if (par->match->proto && (par->match->proto != proto || inv_proto)) {
500 pr_info_ratelimited("%s_tables: %s match: only valid for protocol %u\n",
501 xt_prefix[par->family], par->match->name,
502 par->match->proto);
503 return -EINVAL;
504 }
505 if (par->match->checkentry != NULL) {
506 ret = par->match->checkentry(par);
507 if (ret < 0)
508 return ret;
509 else if (ret > 0)
510
511 return -EIO;
512 }
513 return 0;
514}
515EXPORT_SYMBOL_GPL(xt_check_match);
516
517
518
519
520
521
522
523
524
525
526
527
528static int xt_check_entry_match(const char *match, const char *target,
529 const size_t alignment)
530{
531 const struct xt_entry_match *pos;
532 int length = target - match;
533
534 if (length == 0)
535 return 0;
536
537 pos = (struct xt_entry_match *)match;
538 do {
539 if ((unsigned long)pos % alignment)
540 return -EINVAL;
541
542 if (length < (int)sizeof(struct xt_entry_match))
543 return -EINVAL;
544
545 if (pos->u.match_size < sizeof(struct xt_entry_match))
546 return -EINVAL;
547
548 if (pos->u.match_size > length)
549 return -EINVAL;
550
551 length -= pos->u.match_size;
552 pos = ((void *)((char *)(pos) + (pos)->u.match_size));
553 } while (length > 0);
554
555 return 0;
556}
557
558
559
560
561
562
563
564
565
566
567int xt_check_table_hooks(const struct xt_table_info *info, unsigned int valid_hooks)
568{
569 const char *err = "unsorted underflow";
570 unsigned int i, max_uflow, max_entry;
571 bool check_hooks = false;
572
573 BUILD_BUG_ON(ARRAY_SIZE(info->hook_entry) != ARRAY_SIZE(info->underflow));
574
575 max_entry = 0;
576 max_uflow = 0;
577
578 for (i = 0; i < ARRAY_SIZE(info->hook_entry); i++) {
579 if (!(valid_hooks & (1 << i)))
580 continue;
581
582 if (info->hook_entry[i] == 0xFFFFFFFF)
583 return -EINVAL;
584 if (info->underflow[i] == 0xFFFFFFFF)
585 return -EINVAL;
586
587 if (check_hooks) {
588 if (max_uflow > info->underflow[i])
589 goto error;
590
591 if (max_uflow == info->underflow[i]) {
592 err = "duplicate underflow";
593 goto error;
594 }
595 if (max_entry > info->hook_entry[i]) {
596 err = "unsorted entry";
597 goto error;
598 }
599 if (max_entry == info->hook_entry[i]) {
600 err = "duplicate entry";
601 goto error;
602 }
603 }
604 max_entry = info->hook_entry[i];
605 max_uflow = info->underflow[i];
606 check_hooks = true;
607 }
608
609 return 0;
610error:
611 pr_err_ratelimited("%s at hook %d\n", err, i);
612 return -EINVAL;
613}
614EXPORT_SYMBOL(xt_check_table_hooks);
615
616static bool verdict_ok(int verdict)
617{
618 if (verdict > 0)
619 return true;
620
621 if (verdict < 0) {
622 int v = -verdict - 1;
623
624 if (verdict == XT_RETURN)
625 return true;
626
627 switch (v) {
628 case NF_ACCEPT: return true;
629 case NF_DROP: return true;
630 case NF_QUEUE: return true;
631 default:
632 break;
633 }
634
635 return false;
636 }
637
638 return false;
639}
640
641static bool error_tg_ok(unsigned int usersize, unsigned int kernsize,
642 const char *msg, unsigned int msglen)
643{
644 return usersize == kernsize && strnlen(msg, msglen) < msglen;
645}
646
647#ifdef CONFIG_COMPAT
648int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
649{
650 struct xt_af *xp = &xt[af];
651
652 WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
653
654 if (WARN_ON(!xp->compat_tab))
655 return -ENOMEM;
656
657 if (xp->cur >= xp->number)
658 return -EINVAL;
659
660 if (xp->cur)
661 delta += xp->compat_tab[xp->cur - 1].delta;
662 xp->compat_tab[xp->cur].offset = offset;
663 xp->compat_tab[xp->cur].delta = delta;
664 xp->cur++;
665 return 0;
666}
667EXPORT_SYMBOL_GPL(xt_compat_add_offset);
668
669void xt_compat_flush_offsets(u_int8_t af)
670{
671 WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
672
673 if (xt[af].compat_tab) {
674 vfree(xt[af].compat_tab);
675 xt[af].compat_tab = NULL;
676 xt[af].number = 0;
677 xt[af].cur = 0;
678 }
679}
680EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
681
682int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
683{
684 struct compat_delta *tmp = xt[af].compat_tab;
685 int mid, left = 0, right = xt[af].cur - 1;
686
687 while (left <= right) {
688 mid = (left + right) >> 1;
689 if (offset > tmp[mid].offset)
690 left = mid + 1;
691 else if (offset < tmp[mid].offset)
692 right = mid - 1;
693 else
694 return mid ? tmp[mid - 1].delta : 0;
695 }
696 return left ? tmp[left - 1].delta : 0;
697}
698EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
699
700int xt_compat_init_offsets(u8 af, unsigned int number)
701{
702 size_t mem;
703
704 WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
705
706 if (!number || number > (INT_MAX / sizeof(struct compat_delta)))
707 return -EINVAL;
708
709 if (WARN_ON(xt[af].compat_tab))
710 return -EINVAL;
711
712 mem = sizeof(struct compat_delta) * number;
713 if (mem > XT_MAX_TABLE_SIZE)
714 return -ENOMEM;
715
716 xt[af].compat_tab = vmalloc(mem);
717 if (!xt[af].compat_tab)
718 return -ENOMEM;
719
720 xt[af].number = number;
721 xt[af].cur = 0;
722
723 return 0;
724}
725EXPORT_SYMBOL(xt_compat_init_offsets);
726
727int xt_compat_match_offset(const struct xt_match *match)
728{
729 u_int16_t csize = match->compatsize ? : match->matchsize;
730 return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
731}
732EXPORT_SYMBOL_GPL(xt_compat_match_offset);
733
734void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
735 unsigned int *size)
736{
737 const struct xt_match *match = m->u.kernel.match;
738 struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
739 int pad, off = xt_compat_match_offset(match);
740 u_int16_t msize = cm->u.user.match_size;
741 char name[sizeof(m->u.user.name)];
742
743 m = *dstptr;
744 memcpy(m, cm, sizeof(*cm));
745 if (match->compat_from_user)
746 match->compat_from_user(m->data, cm->data);
747 else
748 memcpy(m->data, cm->data, msize - sizeof(*cm));
749 pad = XT_ALIGN(match->matchsize) - match->matchsize;
750 if (pad > 0)
751 memset(m->data + match->matchsize, 0, pad);
752
753 msize += off;
754 m->u.user.match_size = msize;
755 strlcpy(name, match->name, sizeof(name));
756 module_put(match->me);
757 strncpy(m->u.user.name, name, sizeof(m->u.user.name));
758
759 *size += off;
760 *dstptr += msize;
761}
762EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
763
764#define COMPAT_XT_DATA_TO_USER(U, K, TYPE, C_SIZE) \
765 xt_data_to_user(U->data, K->data, \
766 K->u.kernel.TYPE->usersize, \
767 C_SIZE, \
768 COMPAT_XT_ALIGN(C_SIZE))
769
770int xt_compat_match_to_user(const struct xt_entry_match *m,
771 void __user **dstptr, unsigned int *size)
772{
773 const struct xt_match *match = m->u.kernel.match;
774 struct compat_xt_entry_match __user *cm = *dstptr;
775 int off = xt_compat_match_offset(match);
776 u_int16_t msize = m->u.user.match_size - off;
777
778 if (XT_OBJ_TO_USER(cm, m, match, msize))
779 return -EFAULT;
780
781 if (match->compat_to_user) {
782 if (match->compat_to_user((void __user *)cm->data, m->data))
783 return -EFAULT;
784 } else {
785 if (COMPAT_XT_DATA_TO_USER(cm, m, match, msize - sizeof(*cm)))
786 return -EFAULT;
787 }
788
789 *size -= off;
790 *dstptr += msize;
791 return 0;
792}
793EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
794
795
796struct compat_xt_standard_target {
797 struct compat_xt_entry_target t;
798 compat_uint_t verdict;
799};
800
801struct compat_xt_error_target {
802 struct compat_xt_entry_target t;
803 char errorname[XT_FUNCTION_MAXNAMELEN];
804};
805
806int xt_compat_check_entry_offsets(const void *base, const char *elems,
807 unsigned int target_offset,
808 unsigned int next_offset)
809{
810 long size_of_base_struct = elems - (const char *)base;
811 const struct compat_xt_entry_target *t;
812 const char *e = base;
813
814 if (target_offset < size_of_base_struct)
815 return -EINVAL;
816
817 if (target_offset + sizeof(*t) > next_offset)
818 return -EINVAL;
819
820 t = (void *)(e + target_offset);
821 if (t->u.target_size < sizeof(*t))
822 return -EINVAL;
823
824 if (target_offset + t->u.target_size > next_offset)
825 return -EINVAL;
826
827 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0) {
828 const struct compat_xt_standard_target *st = (const void *)t;
829
830 if (COMPAT_XT_ALIGN(target_offset + sizeof(*st)) != next_offset)
831 return -EINVAL;
832
833 if (!verdict_ok(st->verdict))
834 return -EINVAL;
835 } else if (strcmp(t->u.user.name, XT_ERROR_TARGET) == 0) {
836 const struct compat_xt_error_target *et = (const void *)t;
837
838 if (!error_tg_ok(t->u.target_size, sizeof(*et),
839 et->errorname, sizeof(et->errorname)))
840 return -EINVAL;
841 }
842
843
844
845
846
847 BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match));
848
849 return xt_check_entry_match(elems, base + target_offset,
850 __alignof__(struct compat_xt_entry_match));
851}
852EXPORT_SYMBOL(xt_compat_check_entry_offsets);
853#endif
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898int xt_check_entry_offsets(const void *base,
899 const char *elems,
900 unsigned int target_offset,
901 unsigned int next_offset)
902{
903 long size_of_base_struct = elems - (const char *)base;
904 const struct xt_entry_target *t;
905 const char *e = base;
906
907
908 if (target_offset < size_of_base_struct)
909 return -EINVAL;
910
911 if (target_offset + sizeof(*t) > next_offset)
912 return -EINVAL;
913
914 t = (void *)(e + target_offset);
915 if (t->u.target_size < sizeof(*t))
916 return -EINVAL;
917
918 if (target_offset + t->u.target_size > next_offset)
919 return -EINVAL;
920
921 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0) {
922 const struct xt_standard_target *st = (const void *)t;
923
924 if (XT_ALIGN(target_offset + sizeof(*st)) != next_offset)
925 return -EINVAL;
926
927 if (!verdict_ok(st->verdict))
928 return -EINVAL;
929 } else if (strcmp(t->u.user.name, XT_ERROR_TARGET) == 0) {
930 const struct xt_error_target *et = (const void *)t;
931
932 if (!error_tg_ok(t->u.target_size, sizeof(*et),
933 et->errorname, sizeof(et->errorname)))
934 return -EINVAL;
935 }
936
937 return xt_check_entry_match(elems, base + target_offset,
938 __alignof__(struct xt_entry_match));
939}
940EXPORT_SYMBOL(xt_check_entry_offsets);
941
942
943
944
945
946
947
948
949unsigned int *xt_alloc_entry_offsets(unsigned int size)
950{
951 if (size > XT_MAX_TABLE_SIZE / sizeof(unsigned int))
952 return NULL;
953
954 return kvmalloc_array(size, sizeof(unsigned int), GFP_KERNEL | __GFP_ZERO);
955
956}
957EXPORT_SYMBOL(xt_alloc_entry_offsets);
958
959
960
961
962
963
964
965
966bool xt_find_jump_offset(const unsigned int *offsets,
967 unsigned int target, unsigned int size)
968{
969 int m, low = 0, hi = size;
970
971 while (hi > low) {
972 m = (low + hi) / 2u;
973
974 if (offsets[m] > target)
975 hi = m;
976 else if (offsets[m] < target)
977 low = m + 1;
978 else
979 return true;
980 }
981
982 return false;
983}
984EXPORT_SYMBOL(xt_find_jump_offset);
985
986int xt_check_target(struct xt_tgchk_param *par,
987 unsigned int size, u_int8_t proto, bool inv_proto)
988{
989 int ret;
990
991 if (XT_ALIGN(par->target->targetsize) != size) {
992 pr_err_ratelimited("%s_tables: %s.%u target: invalid size %u (kernel) != (user) %u\n",
993 xt_prefix[par->family], par->target->name,
994 par->target->revision,
995 XT_ALIGN(par->target->targetsize), size);
996 return -EINVAL;
997 }
998 if (par->target->table != NULL &&
999 strcmp(par->target->table, par->table) != 0) {
1000 pr_info_ratelimited("%s_tables: %s target: only valid in %s table, not %s\n",
1001 xt_prefix[par->family], par->target->name,
1002 par->target->table, par->table);
1003 return -EINVAL;
1004 }
1005 if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
1006 char used[64], allow[64];
1007
1008 pr_info_ratelimited("%s_tables: %s target: used from hooks %s, but only usable from %s\n",
1009 xt_prefix[par->family], par->target->name,
1010 textify_hooks(used, sizeof(used),
1011 par->hook_mask, par->family),
1012 textify_hooks(allow, sizeof(allow),
1013 par->target->hooks,
1014 par->family));
1015 return -EINVAL;
1016 }
1017 if (par->target->proto && (par->target->proto != proto || inv_proto)) {
1018 pr_info_ratelimited("%s_tables: %s target: only valid for protocol %u\n",
1019 xt_prefix[par->family], par->target->name,
1020 par->target->proto);
1021 return -EINVAL;
1022 }
1023 if (par->target->checkentry != NULL) {
1024 ret = par->target->checkentry(par);
1025 if (ret < 0)
1026 return ret;
1027 else if (ret > 0)
1028
1029 return -EIO;
1030 }
1031 return 0;
1032}
1033EXPORT_SYMBOL_GPL(xt_check_target);
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
1057 struct xt_counters_info *info, bool compat)
1058{
1059 void *mem;
1060 u64 size;
1061
1062#ifdef CONFIG_COMPAT
1063 if (compat) {
1064
1065 struct compat_xt_counters_info compat_tmp;
1066
1067 if (len <= sizeof(compat_tmp))
1068 return ERR_PTR(-EINVAL);
1069
1070 len -= sizeof(compat_tmp);
1071 if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0)
1072 return ERR_PTR(-EFAULT);
1073
1074 memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1);
1075 info->num_counters = compat_tmp.num_counters;
1076 user += sizeof(compat_tmp);
1077 } else
1078#endif
1079 {
1080 if (len <= sizeof(*info))
1081 return ERR_PTR(-EINVAL);
1082
1083 len -= sizeof(*info);
1084 if (copy_from_user(info, user, sizeof(*info)) != 0)
1085 return ERR_PTR(-EFAULT);
1086
1087 user += sizeof(*info);
1088 }
1089 info->name[sizeof(info->name) - 1] = '\0';
1090
1091 size = sizeof(struct xt_counters);
1092 size *= info->num_counters;
1093
1094 if (size != (u64)len)
1095 return ERR_PTR(-EINVAL);
1096
1097 mem = vmalloc(len);
1098 if (!mem)
1099 return ERR_PTR(-ENOMEM);
1100
1101 if (copy_from_user(mem, user, len) == 0)
1102 return mem;
1103
1104 vfree(mem);
1105 return ERR_PTR(-EFAULT);
1106}
1107EXPORT_SYMBOL_GPL(xt_copy_counters_from_user);
1108
1109#ifdef CONFIG_COMPAT
1110int xt_compat_target_offset(const struct xt_target *target)
1111{
1112 u_int16_t csize = target->compatsize ? : target->targetsize;
1113 return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
1114}
1115EXPORT_SYMBOL_GPL(xt_compat_target_offset);
1116
1117void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
1118 unsigned int *size)
1119{
1120 const struct xt_target *target = t->u.kernel.target;
1121 struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
1122 int pad, off = xt_compat_target_offset(target);
1123 u_int16_t tsize = ct->u.user.target_size;
1124 char name[sizeof(t->u.user.name)];
1125
1126 t = *dstptr;
1127 memcpy(t, ct, sizeof(*ct));
1128 if (target->compat_from_user)
1129 target->compat_from_user(t->data, ct->data);
1130 else
1131 memcpy(t->data, ct->data, tsize - sizeof(*ct));
1132 pad = XT_ALIGN(target->targetsize) - target->targetsize;
1133 if (pad > 0)
1134 memset(t->data + target->targetsize, 0, pad);
1135
1136 tsize += off;
1137 t->u.user.target_size = tsize;
1138 strlcpy(name, target->name, sizeof(name));
1139 module_put(target->me);
1140 strncpy(t->u.user.name, name, sizeof(t->u.user.name));
1141
1142 *size += off;
1143 *dstptr += tsize;
1144}
1145EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
1146
1147int xt_compat_target_to_user(const struct xt_entry_target *t,
1148 void __user **dstptr, unsigned int *size)
1149{
1150 const struct xt_target *target = t->u.kernel.target;
1151 struct compat_xt_entry_target __user *ct = *dstptr;
1152 int off = xt_compat_target_offset(target);
1153 u_int16_t tsize = t->u.user.target_size - off;
1154
1155 if (XT_OBJ_TO_USER(ct, t, target, tsize))
1156 return -EFAULT;
1157
1158 if (target->compat_to_user) {
1159 if (target->compat_to_user((void __user *)ct->data, t->data))
1160 return -EFAULT;
1161 } else {
1162 if (COMPAT_XT_DATA_TO_USER(ct, t, target, tsize - sizeof(*ct)))
1163 return -EFAULT;
1164 }
1165
1166 *size -= off;
1167 *dstptr += tsize;
1168 return 0;
1169}
1170EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
1171#endif
1172
1173struct xt_table_info *xt_alloc_table_info(unsigned int size)
1174{
1175 struct xt_table_info *info = NULL;
1176 size_t sz = sizeof(*info) + size;
1177
1178 if (sz < sizeof(*info) || sz >= XT_MAX_TABLE_SIZE)
1179 return NULL;
1180
1181 info = kvmalloc(sz, GFP_KERNEL_ACCOUNT);
1182 if (!info)
1183 return NULL;
1184
1185 memset(info, 0, sizeof(*info));
1186 info->size = size;
1187 return info;
1188}
1189EXPORT_SYMBOL(xt_alloc_table_info);
1190
1191void xt_free_table_info(struct xt_table_info *info)
1192{
1193 int cpu;
1194
1195 if (info->jumpstack != NULL) {
1196 for_each_possible_cpu(cpu)
1197 kvfree(info->jumpstack[cpu]);
1198 kvfree(info->jumpstack);
1199 }
1200
1201 kvfree(info);
1202}
1203EXPORT_SYMBOL(xt_free_table_info);
1204
1205
1206struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
1207 const char *name)
1208{
1209 struct xt_table *t, *found = NULL;
1210
1211 mutex_lock(&xt[af].mutex);
1212 list_for_each_entry(t, &net->xt.tables[af], list)
1213 if (strcmp(t->name, name) == 0 && try_module_get(t->me))
1214 return t;
1215
1216 if (net == &init_net)
1217 goto out;
1218
1219
1220 list_for_each_entry(t, &init_net.xt.tables[af], list) {
1221 int err;
1222
1223 if (strcmp(t->name, name))
1224 continue;
1225 if (!try_module_get(t->me))
1226 goto out;
1227 mutex_unlock(&xt[af].mutex);
1228 err = t->table_init(net);
1229 if (err < 0) {
1230 module_put(t->me);
1231 return ERR_PTR(err);
1232 }
1233
1234 found = t;
1235
1236 mutex_lock(&xt[af].mutex);
1237 break;
1238 }
1239
1240 if (!found)
1241 goto out;
1242
1243
1244 list_for_each_entry(t, &net->xt.tables[af], list)
1245 if (strcmp(t->name, name) == 0)
1246 return t;
1247
1248 module_put(found->me);
1249 out:
1250 mutex_unlock(&xt[af].mutex);
1251 return ERR_PTR(-ENOENT);
1252}
1253EXPORT_SYMBOL_GPL(xt_find_table_lock);
1254
1255struct xt_table *xt_request_find_table_lock(struct net *net, u_int8_t af,
1256 const char *name)
1257{
1258 struct xt_table *t = xt_find_table_lock(net, af, name);
1259
1260#ifdef CONFIG_MODULES
1261 if (IS_ERR(t)) {
1262 int err = request_module("%stable_%s", xt_prefix[af], name);
1263 if (err < 0)
1264 return ERR_PTR(err);
1265 t = xt_find_table_lock(net, af, name);
1266 }
1267#endif
1268
1269 return t;
1270}
1271EXPORT_SYMBOL_GPL(xt_request_find_table_lock);
1272
1273void xt_table_unlock(struct xt_table *table)
1274{
1275 mutex_unlock(&xt[table->af].mutex);
1276}
1277EXPORT_SYMBOL_GPL(xt_table_unlock);
1278
1279#ifdef CONFIG_COMPAT
1280void xt_compat_lock(u_int8_t af)
1281{
1282 mutex_lock(&xt[af].compat_mutex);
1283}
1284EXPORT_SYMBOL_GPL(xt_compat_lock);
1285
1286void xt_compat_unlock(u_int8_t af)
1287{
1288 mutex_unlock(&xt[af].compat_mutex);
1289}
1290EXPORT_SYMBOL_GPL(xt_compat_unlock);
1291#endif
1292
1293DEFINE_PER_CPU(seqcount_t, xt_recseq);
1294EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
1295
1296struct static_key xt_tee_enabled __read_mostly;
1297EXPORT_SYMBOL_GPL(xt_tee_enabled);
1298
1299static int xt_jumpstack_alloc(struct xt_table_info *i)
1300{
1301 unsigned int size;
1302 int cpu;
1303
1304 size = sizeof(void **) * nr_cpu_ids;
1305 if (size > PAGE_SIZE)
1306 i->jumpstack = kvzalloc(size, GFP_KERNEL);
1307 else
1308 i->jumpstack = kzalloc(size, GFP_KERNEL);
1309 if (i->jumpstack == NULL)
1310 return -ENOMEM;
1311
1312
1313 if (i->stacksize == 0)
1314 return 0;
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326 size = sizeof(void *) * i->stacksize * 2u;
1327 for_each_possible_cpu(cpu) {
1328 i->jumpstack[cpu] = kvmalloc_node(size, GFP_KERNEL,
1329 cpu_to_node(cpu));
1330 if (i->jumpstack[cpu] == NULL)
1331
1332
1333
1334
1335
1336 return -ENOMEM;
1337 }
1338
1339 return 0;
1340}
1341
1342struct xt_counters *xt_counters_alloc(unsigned int counters)
1343{
1344 struct xt_counters *mem;
1345
1346 if (counters == 0 || counters > INT_MAX / sizeof(*mem))
1347 return NULL;
1348
1349 counters *= sizeof(*mem);
1350 if (counters > XT_MAX_TABLE_SIZE)
1351 return NULL;
1352
1353 return vzalloc(counters);
1354}
1355EXPORT_SYMBOL(xt_counters_alloc);
1356
1357struct xt_table_info *
1358xt_replace_table(struct xt_table *table,
1359 unsigned int num_counters,
1360 struct xt_table_info *newinfo,
1361 int *error)
1362{
1363 struct xt_table_info *private;
1364 unsigned int cpu;
1365 int ret;
1366
1367 ret = xt_jumpstack_alloc(newinfo);
1368 if (ret < 0) {
1369 *error = ret;
1370 return NULL;
1371 }
1372
1373
1374 local_bh_disable();
1375 private = table->private;
1376
1377
1378 if (num_counters != private->number) {
1379 pr_debug("num_counters != table->private->number (%u/%u)\n",
1380 num_counters, private->number);
1381 local_bh_enable();
1382 *error = -EAGAIN;
1383 return NULL;
1384 }
1385
1386 newinfo->initial_entries = private->initial_entries;
1387
1388
1389
1390
1391 smp_wmb();
1392 table->private = newinfo;
1393
1394
1395 smp_wmb();
1396
1397
1398
1399
1400
1401 local_bh_enable();
1402
1403
1404 for_each_possible_cpu(cpu) {
1405 seqcount_t *s = &per_cpu(xt_recseq, cpu);
1406 u32 seq = raw_read_seqcount(s);
1407
1408 if (seq & 1) {
1409 do {
1410 cond_resched();
1411 cpu_relax();
1412 } while (seq == raw_read_seqcount(s));
1413 }
1414 }
1415
1416 audit_log_nfcfg(table->name, table->af, private->number,
1417 !private->number ? AUDIT_XT_OP_REGISTER :
1418 AUDIT_XT_OP_REPLACE);
1419 return private;
1420}
1421EXPORT_SYMBOL_GPL(xt_replace_table);
1422
1423struct xt_table *xt_register_table(struct net *net,
1424 const struct xt_table *input_table,
1425 struct xt_table_info *bootstrap,
1426 struct xt_table_info *newinfo)
1427{
1428 int ret;
1429 struct xt_table_info *private;
1430 struct xt_table *t, *table;
1431
1432
1433 table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
1434 if (!table) {
1435 ret = -ENOMEM;
1436 goto out;
1437 }
1438
1439 mutex_lock(&xt[table->af].mutex);
1440
1441 list_for_each_entry(t, &net->xt.tables[table->af], list) {
1442 if (strcmp(t->name, table->name) == 0) {
1443 ret = -EEXIST;
1444 goto unlock;
1445 }
1446 }
1447
1448
1449 table->private = bootstrap;
1450
1451 if (!xt_replace_table(table, 0, newinfo, &ret))
1452 goto unlock;
1453
1454 private = table->private;
1455 pr_debug("table->private->number = %u\n", private->number);
1456
1457
1458 private->initial_entries = private->number;
1459
1460 list_add(&table->list, &net->xt.tables[table->af]);
1461 mutex_unlock(&xt[table->af].mutex);
1462 return table;
1463
1464unlock:
1465 mutex_unlock(&xt[table->af].mutex);
1466 kfree(table);
1467out:
1468 return ERR_PTR(ret);
1469}
1470EXPORT_SYMBOL_GPL(xt_register_table);
1471
1472void *xt_unregister_table(struct xt_table *table)
1473{
1474 struct xt_table_info *private;
1475
1476 mutex_lock(&xt[table->af].mutex);
1477 private = table->private;
1478 list_del(&table->list);
1479 mutex_unlock(&xt[table->af].mutex);
1480 audit_log_nfcfg(table->name, table->af, private->number,
1481 AUDIT_XT_OP_UNREGISTER);
1482 kfree(table);
1483
1484 return private;
1485}
1486EXPORT_SYMBOL_GPL(xt_unregister_table);
1487
1488#ifdef CONFIG_PROC_FS
1489static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
1490{
1491 struct net *net = seq_file_net(seq);
1492 u_int8_t af = (unsigned long)PDE_DATA(file_inode(seq->file));
1493
1494 mutex_lock(&xt[af].mutex);
1495 return seq_list_start(&net->xt.tables[af], *pos);
1496}
1497
1498static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1499{
1500 struct net *net = seq_file_net(seq);
1501 u_int8_t af = (unsigned long)PDE_DATA(file_inode(seq->file));
1502
1503 return seq_list_next(v, &net->xt.tables[af], pos);
1504}
1505
1506static void xt_table_seq_stop(struct seq_file *seq, void *v)
1507{
1508 u_int8_t af = (unsigned long)PDE_DATA(file_inode(seq->file));
1509
1510 mutex_unlock(&xt[af].mutex);
1511}
1512
1513static int xt_table_seq_show(struct seq_file *seq, void *v)
1514{
1515 struct xt_table *table = list_entry(v, struct xt_table, list);
1516
1517 if (*table->name)
1518 seq_printf(seq, "%s\n", table->name);
1519 return 0;
1520}
1521
1522static const struct seq_operations xt_table_seq_ops = {
1523 .start = xt_table_seq_start,
1524 .next = xt_table_seq_next,
1525 .stop = xt_table_seq_stop,
1526 .show = xt_table_seq_show,
1527};
1528
1529
1530
1531
1532
1533struct nf_mttg_trav {
1534 struct list_head *head, *curr;
1535 uint8_t class;
1536};
1537
1538enum {
1539 MTTG_TRAV_INIT,
1540 MTTG_TRAV_NFP_UNSPEC,
1541 MTTG_TRAV_NFP_SPEC,
1542 MTTG_TRAV_DONE,
1543};
1544
1545static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
1546 bool is_target)
1547{
1548 static const uint8_t next_class[] = {
1549 [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
1550 [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE,
1551 };
1552 uint8_t nfproto = (unsigned long)PDE_DATA(file_inode(seq->file));
1553 struct nf_mttg_trav *trav = seq->private;
1554
1555 switch (trav->class) {
1556 case MTTG_TRAV_INIT:
1557 trav->class = MTTG_TRAV_NFP_UNSPEC;
1558 mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
1559 trav->head = trav->curr = is_target ?
1560 &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
1561 break;
1562 case MTTG_TRAV_NFP_UNSPEC:
1563 trav->curr = trav->curr->next;
1564 if (trav->curr != trav->head)
1565 break;
1566 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1567 mutex_lock(&xt[nfproto].mutex);
1568 trav->head = trav->curr = is_target ?
1569 &xt[nfproto].target : &xt[nfproto].match;
1570 trav->class = next_class[trav->class];
1571 break;
1572 case MTTG_TRAV_NFP_SPEC:
1573 trav->curr = trav->curr->next;
1574 if (trav->curr != trav->head)
1575 break;
1576
1577 default:
1578 return NULL;
1579 }
1580
1581 if (ppos != NULL)
1582 ++*ppos;
1583 return trav;
1584}
1585
1586static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
1587 bool is_target)
1588{
1589 struct nf_mttg_trav *trav = seq->private;
1590 unsigned int j;
1591
1592 trav->class = MTTG_TRAV_INIT;
1593 for (j = 0; j < *pos; ++j)
1594 if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
1595 return NULL;
1596 return trav;
1597}
1598
1599static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
1600{
1601 uint8_t nfproto = (unsigned long)PDE_DATA(file_inode(seq->file));
1602 struct nf_mttg_trav *trav = seq->private;
1603
1604 switch (trav->class) {
1605 case MTTG_TRAV_NFP_UNSPEC:
1606 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1607 break;
1608 case MTTG_TRAV_NFP_SPEC:
1609 mutex_unlock(&xt[nfproto].mutex);
1610 break;
1611 }
1612}
1613
1614static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
1615{
1616 return xt_mttg_seq_start(seq, pos, false);
1617}
1618
1619static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1620{
1621 return xt_mttg_seq_next(seq, v, ppos, false);
1622}
1623
1624static int xt_match_seq_show(struct seq_file *seq, void *v)
1625{
1626 const struct nf_mttg_trav *trav = seq->private;
1627 const struct xt_match *match;
1628
1629 switch (trav->class) {
1630 case MTTG_TRAV_NFP_UNSPEC:
1631 case MTTG_TRAV_NFP_SPEC:
1632 if (trav->curr == trav->head)
1633 return 0;
1634 match = list_entry(trav->curr, struct xt_match, list);
1635 if (*match->name)
1636 seq_printf(seq, "%s\n", match->name);
1637 }
1638 return 0;
1639}
1640
1641static const struct seq_operations xt_match_seq_ops = {
1642 .start = xt_match_seq_start,
1643 .next = xt_match_seq_next,
1644 .stop = xt_mttg_seq_stop,
1645 .show = xt_match_seq_show,
1646};
1647
1648static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
1649{
1650 return xt_mttg_seq_start(seq, pos, true);
1651}
1652
1653static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1654{
1655 return xt_mttg_seq_next(seq, v, ppos, true);
1656}
1657
1658static int xt_target_seq_show(struct seq_file *seq, void *v)
1659{
1660 const struct nf_mttg_trav *trav = seq->private;
1661 const struct xt_target *target;
1662
1663 switch (trav->class) {
1664 case MTTG_TRAV_NFP_UNSPEC:
1665 case MTTG_TRAV_NFP_SPEC:
1666 if (trav->curr == trav->head)
1667 return 0;
1668 target = list_entry(trav->curr, struct xt_target, list);
1669 if (*target->name)
1670 seq_printf(seq, "%s\n", target->name);
1671 }
1672 return 0;
1673}
1674
1675static const struct seq_operations xt_target_seq_ops = {
1676 .start = xt_target_seq_start,
1677 .next = xt_target_seq_next,
1678 .stop = xt_mttg_seq_stop,
1679 .show = xt_target_seq_show,
1680};
1681
1682#define FORMAT_TABLES "_tables_names"
1683#define FORMAT_MATCHES "_tables_matches"
1684#define FORMAT_TARGETS "_tables_targets"
1685
1686#endif
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696struct nf_hook_ops *
1697xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn)
1698{
1699 unsigned int hook_mask = table->valid_hooks;
1700 uint8_t i, num_hooks = hweight32(hook_mask);
1701 uint8_t hooknum;
1702 struct nf_hook_ops *ops;
1703
1704 if (!num_hooks)
1705 return ERR_PTR(-EINVAL);
1706
1707 ops = kcalloc(num_hooks, sizeof(*ops), GFP_KERNEL);
1708 if (ops == NULL)
1709 return ERR_PTR(-ENOMEM);
1710
1711 for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
1712 hook_mask >>= 1, ++hooknum) {
1713 if (!(hook_mask & 1))
1714 continue;
1715 ops[i].hook = fn;
1716 ops[i].pf = table->af;
1717 ops[i].hooknum = hooknum;
1718 ops[i].priority = table->priority;
1719 ++i;
1720 }
1721
1722 return ops;
1723}
1724EXPORT_SYMBOL_GPL(xt_hook_ops_alloc);
1725
1726int xt_proto_init(struct net *net, u_int8_t af)
1727{
1728#ifdef CONFIG_PROC_FS
1729 char buf[XT_FUNCTION_MAXNAMELEN];
1730 struct proc_dir_entry *proc;
1731 kuid_t root_uid;
1732 kgid_t root_gid;
1733#endif
1734
1735 if (af >= ARRAY_SIZE(xt_prefix))
1736 return -EINVAL;
1737
1738
1739#ifdef CONFIG_PROC_FS
1740 root_uid = make_kuid(net->user_ns, 0);
1741 root_gid = make_kgid(net->user_ns, 0);
1742
1743 strlcpy(buf, xt_prefix[af], sizeof(buf));
1744 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1745 proc = proc_create_net_data(buf, 0440, net->proc_net, &xt_table_seq_ops,
1746 sizeof(struct seq_net_private),
1747 (void *)(unsigned long)af);
1748 if (!proc)
1749 goto out;
1750 if (uid_valid(root_uid) && gid_valid(root_gid))
1751 proc_set_user(proc, root_uid, root_gid);
1752
1753 strlcpy(buf, xt_prefix[af], sizeof(buf));
1754 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1755 proc = proc_create_seq_private(buf, 0440, net->proc_net,
1756 &xt_match_seq_ops, sizeof(struct nf_mttg_trav),
1757 (void *)(unsigned long)af);
1758 if (!proc)
1759 goto out_remove_tables;
1760 if (uid_valid(root_uid) && gid_valid(root_gid))
1761 proc_set_user(proc, root_uid, root_gid);
1762
1763 strlcpy(buf, xt_prefix[af], sizeof(buf));
1764 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1765 proc = proc_create_seq_private(buf, 0440, net->proc_net,
1766 &xt_target_seq_ops, sizeof(struct nf_mttg_trav),
1767 (void *)(unsigned long)af);
1768 if (!proc)
1769 goto out_remove_matches;
1770 if (uid_valid(root_uid) && gid_valid(root_gid))
1771 proc_set_user(proc, root_uid, root_gid);
1772#endif
1773
1774 return 0;
1775
1776#ifdef CONFIG_PROC_FS
1777out_remove_matches:
1778 strlcpy(buf, xt_prefix[af], sizeof(buf));
1779 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1780 remove_proc_entry(buf, net->proc_net);
1781
1782out_remove_tables:
1783 strlcpy(buf, xt_prefix[af], sizeof(buf));
1784 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1785 remove_proc_entry(buf, net->proc_net);
1786out:
1787 return -1;
1788#endif
1789}
1790EXPORT_SYMBOL_GPL(xt_proto_init);
1791
1792void xt_proto_fini(struct net *net, u_int8_t af)
1793{
1794#ifdef CONFIG_PROC_FS
1795 char buf[XT_FUNCTION_MAXNAMELEN];
1796
1797 strlcpy(buf, xt_prefix[af], sizeof(buf));
1798 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1799 remove_proc_entry(buf, net->proc_net);
1800
1801 strlcpy(buf, xt_prefix[af], sizeof(buf));
1802 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1803 remove_proc_entry(buf, net->proc_net);
1804
1805 strlcpy(buf, xt_prefix[af], sizeof(buf));
1806 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1807 remove_proc_entry(buf, net->proc_net);
1808#endif
1809}
1810EXPORT_SYMBOL_GPL(xt_proto_fini);
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state,
1835 struct xt_counters *counter)
1836{
1837 BUILD_BUG_ON(XT_PCPU_BLOCK_SIZE < (sizeof(*counter) * 2));
1838
1839 if (nr_cpu_ids <= 1)
1840 return true;
1841
1842 if (!state->mem) {
1843 state->mem = __alloc_percpu(XT_PCPU_BLOCK_SIZE,
1844 XT_PCPU_BLOCK_SIZE);
1845 if (!state->mem)
1846 return false;
1847 }
1848 counter->pcnt = (__force unsigned long)(state->mem + state->off);
1849 state->off += sizeof(*counter);
1850 if (state->off > (XT_PCPU_BLOCK_SIZE - sizeof(*counter))) {
1851 state->mem = NULL;
1852 state->off = 0;
1853 }
1854 return true;
1855}
1856EXPORT_SYMBOL_GPL(xt_percpu_counter_alloc);
1857
1858void xt_percpu_counter_free(struct xt_counters *counters)
1859{
1860 unsigned long pcnt = counters->pcnt;
1861
1862 if (nr_cpu_ids > 1 && (pcnt & (XT_PCPU_BLOCK_SIZE - 1)) == 0)
1863 free_percpu((void __percpu *)pcnt);
1864}
1865EXPORT_SYMBOL_GPL(xt_percpu_counter_free);
1866
1867static int __net_init xt_net_init(struct net *net)
1868{
1869 int i;
1870
1871 for (i = 0; i < NFPROTO_NUMPROTO; i++)
1872 INIT_LIST_HEAD(&net->xt.tables[i]);
1873 return 0;
1874}
1875
1876static void __net_exit xt_net_exit(struct net *net)
1877{
1878 int i;
1879
1880 for (i = 0; i < NFPROTO_NUMPROTO; i++)
1881 WARN_ON_ONCE(!list_empty(&net->xt.tables[i]));
1882}
1883
1884static struct pernet_operations xt_net_ops = {
1885 .init = xt_net_init,
1886 .exit = xt_net_exit,
1887};
1888
1889static int __init xt_init(void)
1890{
1891 unsigned int i;
1892 int rv;
1893
1894 for_each_possible_cpu(i) {
1895 seqcount_init(&per_cpu(xt_recseq, i));
1896 }
1897
1898 xt = kmalloc_array(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL);
1899 if (!xt)
1900 return -ENOMEM;
1901
1902 for (i = 0; i < NFPROTO_NUMPROTO; i++) {
1903 mutex_init(&xt[i].mutex);
1904#ifdef CONFIG_COMPAT
1905 mutex_init(&xt[i].compat_mutex);
1906 xt[i].compat_tab = NULL;
1907#endif
1908 INIT_LIST_HEAD(&xt[i].target);
1909 INIT_LIST_HEAD(&xt[i].match);
1910 }
1911 rv = register_pernet_subsys(&xt_net_ops);
1912 if (rv < 0)
1913 kfree(xt);
1914 return rv;
1915}
1916
1917static void __exit xt_fini(void)
1918{
1919 unregister_pernet_subsys(&xt_net_ops);
1920 kfree(xt);
1921}
1922
1923module_init(xt_init);
1924module_exit(xt_fini);
1925
1926