1
2
3
4
5
6
7
8
9
10
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/socket.h>
16#include <linux/net.h>
17#include <linux/proc_fs.h>
18#include <linux/seq_file.h>
19#include <linux/string.h>
20#include <linux/vmalloc.h>
21#include <linux/mutex.h>
22#include <linux/mm.h>
23#include <linux/slab.h>
24#include <linux/audit.h>
25#include <linux/user_namespace.h>
26#include <net/net_namespace.h>
27
28#include <linux/netfilter/x_tables.h>
29#include <linux/netfilter_arp.h>
30#include <linux/netfilter_ipv4/ip_tables.h>
31#include <linux/netfilter_ipv6/ip6_tables.h>
32#include <linux/netfilter_arp/arp_tables.h>
33
34MODULE_LICENSE("GPL");
35MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
36MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
37
38#define XT_PCPU_BLOCK_SIZE 4096
39#define XT_MAX_TABLE_SIZE (512 * 1024 * 1024)
40
41struct compat_delta {
42 unsigned int offset;
43 int delta;
44};
45
46struct xt_af {
47 struct mutex mutex;
48 struct list_head match;
49 struct list_head target;
50#ifdef CONFIG_COMPAT
51 struct mutex compat_mutex;
52 struct compat_delta *compat_tab;
53 unsigned int number;
54 unsigned int cur;
55#endif
56};
57
58static struct xt_af *xt;
59
60static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
61 [NFPROTO_UNSPEC] = "x",
62 [NFPROTO_IPV4] = "ip",
63 [NFPROTO_ARP] = "arp",
64 [NFPROTO_BRIDGE] = "eb",
65 [NFPROTO_IPV6] = "ip6",
66};
67
68
69int xt_register_target(struct xt_target *target)
70{
71 u_int8_t af = target->family;
72
73 mutex_lock(&xt[af].mutex);
74 list_add(&target->list, &xt[af].target);
75 mutex_unlock(&xt[af].mutex);
76 return 0;
77}
78EXPORT_SYMBOL(xt_register_target);
79
80void
81xt_unregister_target(struct xt_target *target)
82{
83 u_int8_t af = target->family;
84
85 mutex_lock(&xt[af].mutex);
86 list_del(&target->list);
87 mutex_unlock(&xt[af].mutex);
88}
89EXPORT_SYMBOL(xt_unregister_target);
90
91int
92xt_register_targets(struct xt_target *target, unsigned int n)
93{
94 unsigned int i;
95 int err = 0;
96
97 for (i = 0; i < n; i++) {
98 err = xt_register_target(&target[i]);
99 if (err)
100 goto err;
101 }
102 return err;
103
104err:
105 if (i > 0)
106 xt_unregister_targets(target, i);
107 return err;
108}
109EXPORT_SYMBOL(xt_register_targets);
110
111void
112xt_unregister_targets(struct xt_target *target, unsigned int n)
113{
114 while (n-- > 0)
115 xt_unregister_target(&target[n]);
116}
117EXPORT_SYMBOL(xt_unregister_targets);
118
119int xt_register_match(struct xt_match *match)
120{
121 u_int8_t af = match->family;
122
123 mutex_lock(&xt[af].mutex);
124 list_add(&match->list, &xt[af].match);
125 mutex_unlock(&xt[af].mutex);
126 return 0;
127}
128EXPORT_SYMBOL(xt_register_match);
129
130void
131xt_unregister_match(struct xt_match *match)
132{
133 u_int8_t af = match->family;
134
135 mutex_lock(&xt[af].mutex);
136 list_del(&match->list);
137 mutex_unlock(&xt[af].mutex);
138}
139EXPORT_SYMBOL(xt_unregister_match);
140
141int
142xt_register_matches(struct xt_match *match, unsigned int n)
143{
144 unsigned int i;
145 int err = 0;
146
147 for (i = 0; i < n; i++) {
148 err = xt_register_match(&match[i]);
149 if (err)
150 goto err;
151 }
152 return err;
153
154err:
155 if (i > 0)
156 xt_unregister_matches(match, i);
157 return err;
158}
159EXPORT_SYMBOL(xt_register_matches);
160
161void
162xt_unregister_matches(struct xt_match *match, unsigned int n)
163{
164 while (n-- > 0)
165 xt_unregister_match(&match[n]);
166}
167EXPORT_SYMBOL(xt_unregister_matches);
168
169
170
171
172
173
174
175
176
177struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
178{
179 struct xt_match *m;
180 int err = -ENOENT;
181
182 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
183 return ERR_PTR(-EINVAL);
184
185 mutex_lock(&xt[af].mutex);
186 list_for_each_entry(m, &xt[af].match, list) {
187 if (strcmp(m->name, name) == 0) {
188 if (m->revision == revision) {
189 if (try_module_get(m->me)) {
190 mutex_unlock(&xt[af].mutex);
191 return m;
192 }
193 } else
194 err = -EPROTOTYPE;
195 }
196 }
197 mutex_unlock(&xt[af].mutex);
198
199 if (af != NFPROTO_UNSPEC)
200
201 return xt_find_match(NFPROTO_UNSPEC, name, revision);
202
203 return ERR_PTR(err);
204}
205EXPORT_SYMBOL(xt_find_match);
206
207struct xt_match *
208xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
209{
210 struct xt_match *match;
211
212 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
213 return ERR_PTR(-EINVAL);
214
215 match = xt_find_match(nfproto, name, revision);
216 if (IS_ERR(match)) {
217 request_module("%st_%s", xt_prefix[nfproto], name);
218 match = xt_find_match(nfproto, name, revision);
219 }
220
221 return match;
222}
223EXPORT_SYMBOL_GPL(xt_request_find_match);
224
225
226static struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
227{
228 struct xt_target *t;
229 int err = -ENOENT;
230
231 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
232 return ERR_PTR(-EINVAL);
233
234 mutex_lock(&xt[af].mutex);
235 list_for_each_entry(t, &xt[af].target, list) {
236 if (strcmp(t->name, name) == 0) {
237 if (t->revision == revision) {
238 if (try_module_get(t->me)) {
239 mutex_unlock(&xt[af].mutex);
240 return t;
241 }
242 } else
243 err = -EPROTOTYPE;
244 }
245 }
246 mutex_unlock(&xt[af].mutex);
247
248 if (af != NFPROTO_UNSPEC)
249
250 return xt_find_target(NFPROTO_UNSPEC, name, revision);
251
252 return ERR_PTR(err);
253}
254
255struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
256{
257 struct xt_target *target;
258
259 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
260 return ERR_PTR(-EINVAL);
261
262 target = xt_find_target(af, name, revision);
263 if (IS_ERR(target)) {
264 request_module("%st_%s", xt_prefix[af], name);
265 target = xt_find_target(af, name, revision);
266 }
267
268 return target;
269}
270EXPORT_SYMBOL_GPL(xt_request_find_target);
271
272
273static int xt_obj_to_user(u16 __user *psize, u16 size,
274 void __user *pname, const char *name,
275 u8 __user *prev, u8 rev)
276{
277 if (put_user(size, psize))
278 return -EFAULT;
279 if (copy_to_user(pname, name, strlen(name) + 1))
280 return -EFAULT;
281 if (put_user(rev, prev))
282 return -EFAULT;
283
284 return 0;
285}
286
287#define XT_OBJ_TO_USER(U, K, TYPE, C_SIZE) \
288 xt_obj_to_user(&U->u.TYPE##_size, C_SIZE ? : K->u.TYPE##_size, \
289 U->u.user.name, K->u.kernel.TYPE->name, \
290 &U->u.user.revision, K->u.kernel.TYPE->revision)
291
292int xt_data_to_user(void __user *dst, const void *src,
293 int usersize, int size, int aligned_size)
294{
295 usersize = usersize ? : size;
296 if (copy_to_user(dst, src, usersize))
297 return -EFAULT;
298 if (usersize != aligned_size &&
299 clear_user(dst + usersize, aligned_size - usersize))
300 return -EFAULT;
301
302 return 0;
303}
304EXPORT_SYMBOL_GPL(xt_data_to_user);
305
306#define XT_DATA_TO_USER(U, K, TYPE) \
307 xt_data_to_user(U->data, K->data, \
308 K->u.kernel.TYPE->usersize, \
309 K->u.kernel.TYPE->TYPE##size, \
310 XT_ALIGN(K->u.kernel.TYPE->TYPE##size))
311
312int xt_match_to_user(const struct xt_entry_match *m,
313 struct xt_entry_match __user *u)
314{
315 return XT_OBJ_TO_USER(u, m, match, 0) ||
316 XT_DATA_TO_USER(u, m, match);
317}
318EXPORT_SYMBOL_GPL(xt_match_to_user);
319
320int xt_target_to_user(const struct xt_entry_target *t,
321 struct xt_entry_target __user *u)
322{
323 return XT_OBJ_TO_USER(u, t, target, 0) ||
324 XT_DATA_TO_USER(u, t, target);
325}
326EXPORT_SYMBOL_GPL(xt_target_to_user);
327
328static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
329{
330 const struct xt_match *m;
331 int have_rev = 0;
332
333 mutex_lock(&xt[af].mutex);
334 list_for_each_entry(m, &xt[af].match, list) {
335 if (strcmp(m->name, name) == 0) {
336 if (m->revision > *bestp)
337 *bestp = m->revision;
338 if (m->revision == revision)
339 have_rev = 1;
340 }
341 }
342 mutex_unlock(&xt[af].mutex);
343
344 if (af != NFPROTO_UNSPEC && !have_rev)
345 return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
346
347 return have_rev;
348}
349
350static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
351{
352 const struct xt_target *t;
353 int have_rev = 0;
354
355 mutex_lock(&xt[af].mutex);
356 list_for_each_entry(t, &xt[af].target, list) {
357 if (strcmp(t->name, name) == 0) {
358 if (t->revision > *bestp)
359 *bestp = t->revision;
360 if (t->revision == revision)
361 have_rev = 1;
362 }
363 }
364 mutex_unlock(&xt[af].mutex);
365
366 if (af != NFPROTO_UNSPEC && !have_rev)
367 return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
368
369 return have_rev;
370}
371
372
373int xt_find_revision(u8 af, const char *name, u8 revision, int target,
374 int *err)
375{
376 int have_rev, best = -1;
377
378 if (target == 1)
379 have_rev = target_revfn(af, name, revision, &best);
380 else
381 have_rev = match_revfn(af, name, revision, &best);
382
383
384 if (best == -1) {
385 *err = -ENOENT;
386 return 0;
387 }
388
389 *err = best;
390 if (!have_rev)
391 *err = -EPROTONOSUPPORT;
392 return 1;
393}
394EXPORT_SYMBOL_GPL(xt_find_revision);
395
396static char *
397textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
398{
399 static const char *const inetbr_names[] = {
400 "PREROUTING", "INPUT", "FORWARD",
401 "OUTPUT", "POSTROUTING", "BROUTING",
402 };
403 static const char *const arp_names[] = {
404 "INPUT", "FORWARD", "OUTPUT",
405 };
406 const char *const *names;
407 unsigned int i, max;
408 char *p = buf;
409 bool np = false;
410 int res;
411
412 names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names;
413 max = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) :
414 ARRAY_SIZE(inetbr_names);
415 *p = '\0';
416 for (i = 0; i < max; ++i) {
417 if (!(mask & (1 << i)))
418 continue;
419 res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
420 if (res > 0) {
421 size -= res;
422 p += res;
423 }
424 np = true;
425 }
426
427 return buf;
428}
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443int xt_check_proc_name(const char *name, unsigned int size)
444{
445 if (name[0] == '\0')
446 return -EINVAL;
447
448 if (strnlen(name, size) == size)
449 return -ENAMETOOLONG;
450
451 if (strcmp(name, ".") == 0 ||
452 strcmp(name, "..") == 0 ||
453 strchr(name, '/'))
454 return -EINVAL;
455
456 return 0;
457}
458EXPORT_SYMBOL(xt_check_proc_name);
459
460int xt_check_match(struct xt_mtchk_param *par,
461 unsigned int size, u16 proto, bool inv_proto)
462{
463 int ret;
464
465 if (XT_ALIGN(par->match->matchsize) != size &&
466 par->match->matchsize != -1) {
467
468
469
470
471 pr_err_ratelimited("%s_tables: %s.%u match: invalid size %u (kernel) != (user) %u\n",
472 xt_prefix[par->family], par->match->name,
473 par->match->revision,
474 XT_ALIGN(par->match->matchsize), size);
475 return -EINVAL;
476 }
477 if (par->match->table != NULL &&
478 strcmp(par->match->table, par->table) != 0) {
479 pr_info_ratelimited("%s_tables: %s match: only valid in %s table, not %s\n",
480 xt_prefix[par->family], par->match->name,
481 par->match->table, par->table);
482 return -EINVAL;
483 }
484 if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
485 char used[64], allow[64];
486
487 pr_info_ratelimited("%s_tables: %s match: used from hooks %s, but only valid from %s\n",
488 xt_prefix[par->family], par->match->name,
489 textify_hooks(used, sizeof(used),
490 par->hook_mask, par->family),
491 textify_hooks(allow, sizeof(allow),
492 par->match->hooks,
493 par->family));
494 return -EINVAL;
495 }
496 if (par->match->proto && (par->match->proto != proto || inv_proto)) {
497 pr_info_ratelimited("%s_tables: %s match: only valid for protocol %u\n",
498 xt_prefix[par->family], par->match->name,
499 par->match->proto);
500 return -EINVAL;
501 }
502 if (par->match->checkentry != NULL) {
503 ret = par->match->checkentry(par);
504 if (ret < 0)
505 return ret;
506 else if (ret > 0)
507
508 return -EIO;
509 }
510 return 0;
511}
512EXPORT_SYMBOL_GPL(xt_check_match);
513
514
515
516
517
518
519
520
521
522
523
524
525static int xt_check_entry_match(const char *match, const char *target,
526 const size_t alignment)
527{
528 const struct xt_entry_match *pos;
529 int length = target - match;
530
531 if (length == 0)
532 return 0;
533
534 pos = (struct xt_entry_match *)match;
535 do {
536 if ((unsigned long)pos % alignment)
537 return -EINVAL;
538
539 if (length < (int)sizeof(struct xt_entry_match))
540 return -EINVAL;
541
542 if (pos->u.match_size < sizeof(struct xt_entry_match))
543 return -EINVAL;
544
545 if (pos->u.match_size > length)
546 return -EINVAL;
547
548 length -= pos->u.match_size;
549 pos = ((void *)((char *)(pos) + (pos)->u.match_size));
550 } while (length > 0);
551
552 return 0;
553}
554
555
556
557
558
559
560
561
562
563
564int xt_check_table_hooks(const struct xt_table_info *info, unsigned int valid_hooks)
565{
566 const char *err = "unsorted underflow";
567 unsigned int i, max_uflow, max_entry;
568 bool check_hooks = false;
569
570 BUILD_BUG_ON(ARRAY_SIZE(info->hook_entry) != ARRAY_SIZE(info->underflow));
571
572 max_entry = 0;
573 max_uflow = 0;
574
575 for (i = 0; i < ARRAY_SIZE(info->hook_entry); i++) {
576 if (!(valid_hooks & (1 << i)))
577 continue;
578
579 if (info->hook_entry[i] == 0xFFFFFFFF)
580 return -EINVAL;
581 if (info->underflow[i] == 0xFFFFFFFF)
582 return -EINVAL;
583
584 if (check_hooks) {
585 if (max_uflow > info->underflow[i])
586 goto error;
587
588 if (max_uflow == info->underflow[i]) {
589 err = "duplicate underflow";
590 goto error;
591 }
592 if (max_entry > info->hook_entry[i]) {
593 err = "unsorted entry";
594 goto error;
595 }
596 if (max_entry == info->hook_entry[i]) {
597 err = "duplicate entry";
598 goto error;
599 }
600 }
601 max_entry = info->hook_entry[i];
602 max_uflow = info->underflow[i];
603 check_hooks = true;
604 }
605
606 return 0;
607error:
608 pr_err_ratelimited("%s at hook %d\n", err, i);
609 return -EINVAL;
610}
611EXPORT_SYMBOL(xt_check_table_hooks);
612
613static bool verdict_ok(int verdict)
614{
615 if (verdict > 0)
616 return true;
617
618 if (verdict < 0) {
619 int v = -verdict - 1;
620
621 if (verdict == XT_RETURN)
622 return true;
623
624 switch (v) {
625 case NF_ACCEPT: return true;
626 case NF_DROP: return true;
627 case NF_QUEUE: return true;
628 default:
629 break;
630 }
631
632 return false;
633 }
634
635 return false;
636}
637
638static bool error_tg_ok(unsigned int usersize, unsigned int kernsize,
639 const char *msg, unsigned int msglen)
640{
641 return usersize == kernsize && strnlen(msg, msglen) < msglen;
642}
643
644#ifdef CONFIG_COMPAT
645int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
646{
647 struct xt_af *xp = &xt[af];
648
649 WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
650
651 if (WARN_ON(!xp->compat_tab))
652 return -ENOMEM;
653
654 if (xp->cur >= xp->number)
655 return -EINVAL;
656
657 if (xp->cur)
658 delta += xp->compat_tab[xp->cur - 1].delta;
659 xp->compat_tab[xp->cur].offset = offset;
660 xp->compat_tab[xp->cur].delta = delta;
661 xp->cur++;
662 return 0;
663}
664EXPORT_SYMBOL_GPL(xt_compat_add_offset);
665
666void xt_compat_flush_offsets(u_int8_t af)
667{
668 WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
669
670 if (xt[af].compat_tab) {
671 vfree(xt[af].compat_tab);
672 xt[af].compat_tab = NULL;
673 xt[af].number = 0;
674 xt[af].cur = 0;
675 }
676}
677EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
678
679int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
680{
681 struct compat_delta *tmp = xt[af].compat_tab;
682 int mid, left = 0, right = xt[af].cur - 1;
683
684 while (left <= right) {
685 mid = (left + right) >> 1;
686 if (offset > tmp[mid].offset)
687 left = mid + 1;
688 else if (offset < tmp[mid].offset)
689 right = mid - 1;
690 else
691 return mid ? tmp[mid - 1].delta : 0;
692 }
693 return left ? tmp[left - 1].delta : 0;
694}
695EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
696
697int xt_compat_init_offsets(u8 af, unsigned int number)
698{
699 size_t mem;
700
701 WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
702
703 if (!number || number > (INT_MAX / sizeof(struct compat_delta)))
704 return -EINVAL;
705
706 if (WARN_ON(xt[af].compat_tab))
707 return -EINVAL;
708
709 mem = sizeof(struct compat_delta) * number;
710 if (mem > XT_MAX_TABLE_SIZE)
711 return -ENOMEM;
712
713 xt[af].compat_tab = vmalloc(mem);
714 if (!xt[af].compat_tab)
715 return -ENOMEM;
716
717 xt[af].number = number;
718 xt[af].cur = 0;
719
720 return 0;
721}
722EXPORT_SYMBOL(xt_compat_init_offsets);
723
724int xt_compat_match_offset(const struct xt_match *match)
725{
726 u_int16_t csize = match->compatsize ? : match->matchsize;
727 return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
728}
729EXPORT_SYMBOL_GPL(xt_compat_match_offset);
730
731void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
732 unsigned int *size)
733{
734 const struct xt_match *match = m->u.kernel.match;
735 struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
736 int off = xt_compat_match_offset(match);
737 u_int16_t msize = cm->u.user.match_size;
738 char name[sizeof(m->u.user.name)];
739
740 m = *dstptr;
741 memcpy(m, cm, sizeof(*cm));
742 if (match->compat_from_user)
743 match->compat_from_user(m->data, cm->data);
744 else
745 memcpy(m->data, cm->data, msize - sizeof(*cm));
746
747 msize += off;
748 m->u.user.match_size = msize;
749 strlcpy(name, match->name, sizeof(name));
750 module_put(match->me);
751 strncpy(m->u.user.name, name, sizeof(m->u.user.name));
752
753 *size += off;
754 *dstptr += msize;
755}
756EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
757
758#define COMPAT_XT_DATA_TO_USER(U, K, TYPE, C_SIZE) \
759 xt_data_to_user(U->data, K->data, \
760 K->u.kernel.TYPE->usersize, \
761 C_SIZE, \
762 COMPAT_XT_ALIGN(C_SIZE))
763
764int xt_compat_match_to_user(const struct xt_entry_match *m,
765 void __user **dstptr, unsigned int *size)
766{
767 const struct xt_match *match = m->u.kernel.match;
768 struct compat_xt_entry_match __user *cm = *dstptr;
769 int off = xt_compat_match_offset(match);
770 u_int16_t msize = m->u.user.match_size - off;
771
772 if (XT_OBJ_TO_USER(cm, m, match, msize))
773 return -EFAULT;
774
775 if (match->compat_to_user) {
776 if (match->compat_to_user((void __user *)cm->data, m->data))
777 return -EFAULT;
778 } else {
779 if (COMPAT_XT_DATA_TO_USER(cm, m, match, msize - sizeof(*cm)))
780 return -EFAULT;
781 }
782
783 *size -= off;
784 *dstptr += msize;
785 return 0;
786}
787EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
788
789
790struct compat_xt_standard_target {
791 struct compat_xt_entry_target t;
792 compat_uint_t verdict;
793};
794
795struct compat_xt_error_target {
796 struct compat_xt_entry_target t;
797 char errorname[XT_FUNCTION_MAXNAMELEN];
798};
799
800int xt_compat_check_entry_offsets(const void *base, const char *elems,
801 unsigned int target_offset,
802 unsigned int next_offset)
803{
804 long size_of_base_struct = elems - (const char *)base;
805 const struct compat_xt_entry_target *t;
806 const char *e = base;
807
808 if (target_offset < size_of_base_struct)
809 return -EINVAL;
810
811 if (target_offset + sizeof(*t) > next_offset)
812 return -EINVAL;
813
814 t = (void *)(e + target_offset);
815 if (t->u.target_size < sizeof(*t))
816 return -EINVAL;
817
818 if (target_offset + t->u.target_size > next_offset)
819 return -EINVAL;
820
821 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0) {
822 const struct compat_xt_standard_target *st = (const void *)t;
823
824 if (COMPAT_XT_ALIGN(target_offset + sizeof(*st)) != next_offset)
825 return -EINVAL;
826
827 if (!verdict_ok(st->verdict))
828 return -EINVAL;
829 } else if (strcmp(t->u.user.name, XT_ERROR_TARGET) == 0) {
830 const struct compat_xt_error_target *et = (const void *)t;
831
832 if (!error_tg_ok(t->u.target_size, sizeof(*et),
833 et->errorname, sizeof(et->errorname)))
834 return -EINVAL;
835 }
836
837
838
839
840
841 BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match));
842
843 return xt_check_entry_match(elems, base + target_offset,
844 __alignof__(struct compat_xt_entry_match));
845}
846EXPORT_SYMBOL(xt_compat_check_entry_offsets);
847#endif
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892int xt_check_entry_offsets(const void *base,
893 const char *elems,
894 unsigned int target_offset,
895 unsigned int next_offset)
896{
897 long size_of_base_struct = elems - (const char *)base;
898 const struct xt_entry_target *t;
899 const char *e = base;
900
901
902 if (target_offset < size_of_base_struct)
903 return -EINVAL;
904
905 if (target_offset + sizeof(*t) > next_offset)
906 return -EINVAL;
907
908 t = (void *)(e + target_offset);
909 if (t->u.target_size < sizeof(*t))
910 return -EINVAL;
911
912 if (target_offset + t->u.target_size > next_offset)
913 return -EINVAL;
914
915 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0) {
916 const struct xt_standard_target *st = (const void *)t;
917
918 if (XT_ALIGN(target_offset + sizeof(*st)) != next_offset)
919 return -EINVAL;
920
921 if (!verdict_ok(st->verdict))
922 return -EINVAL;
923 } else if (strcmp(t->u.user.name, XT_ERROR_TARGET) == 0) {
924 const struct xt_error_target *et = (const void *)t;
925
926 if (!error_tg_ok(t->u.target_size, sizeof(*et),
927 et->errorname, sizeof(et->errorname)))
928 return -EINVAL;
929 }
930
931 return xt_check_entry_match(elems, base + target_offset,
932 __alignof__(struct xt_entry_match));
933}
934EXPORT_SYMBOL(xt_check_entry_offsets);
935
936
937
938
939
940
941
942
943unsigned int *xt_alloc_entry_offsets(unsigned int size)
944{
945 if (size > XT_MAX_TABLE_SIZE / sizeof(unsigned int))
946 return NULL;
947
948 return kvcalloc(size, sizeof(unsigned int), GFP_KERNEL);
949
950}
951EXPORT_SYMBOL(xt_alloc_entry_offsets);
952
953
954
955
956
957
958
959
960bool xt_find_jump_offset(const unsigned int *offsets,
961 unsigned int target, unsigned int size)
962{
963 int m, low = 0, hi = size;
964
965 while (hi > low) {
966 m = (low + hi) / 2u;
967
968 if (offsets[m] > target)
969 hi = m;
970 else if (offsets[m] < target)
971 low = m + 1;
972 else
973 return true;
974 }
975
976 return false;
977}
978EXPORT_SYMBOL(xt_find_jump_offset);
979
980int xt_check_target(struct xt_tgchk_param *par,
981 unsigned int size, u16 proto, bool inv_proto)
982{
983 int ret;
984
985 if (XT_ALIGN(par->target->targetsize) != size) {
986 pr_err_ratelimited("%s_tables: %s.%u target: invalid size %u (kernel) != (user) %u\n",
987 xt_prefix[par->family], par->target->name,
988 par->target->revision,
989 XT_ALIGN(par->target->targetsize), size);
990 return -EINVAL;
991 }
992 if (par->target->table != NULL &&
993 strcmp(par->target->table, par->table) != 0) {
994 pr_info_ratelimited("%s_tables: %s target: only valid in %s table, not %s\n",
995 xt_prefix[par->family], par->target->name,
996 par->target->table, par->table);
997 return -EINVAL;
998 }
999 if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
1000 char used[64], allow[64];
1001
1002 pr_info_ratelimited("%s_tables: %s target: used from hooks %s, but only usable from %s\n",
1003 xt_prefix[par->family], par->target->name,
1004 textify_hooks(used, sizeof(used),
1005 par->hook_mask, par->family),
1006 textify_hooks(allow, sizeof(allow),
1007 par->target->hooks,
1008 par->family));
1009 return -EINVAL;
1010 }
1011 if (par->target->proto && (par->target->proto != proto || inv_proto)) {
1012 pr_info_ratelimited("%s_tables: %s target: only valid for protocol %u\n",
1013 xt_prefix[par->family], par->target->name,
1014 par->target->proto);
1015 return -EINVAL;
1016 }
1017 if (par->target->checkentry != NULL) {
1018 ret = par->target->checkentry(par);
1019 if (ret < 0)
1020 return ret;
1021 else if (ret > 0)
1022
1023 return -EIO;
1024 }
1025 return 0;
1026}
1027EXPORT_SYMBOL_GPL(xt_check_target);
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049void *xt_copy_counters(sockptr_t arg, unsigned int len,
1050 struct xt_counters_info *info)
1051{
1052 size_t offset;
1053 void *mem;
1054 u64 size;
1055
1056#ifdef CONFIG_COMPAT
1057 if (in_compat_syscall()) {
1058
1059 struct compat_xt_counters_info compat_tmp;
1060
1061 if (len <= sizeof(compat_tmp))
1062 return ERR_PTR(-EINVAL);
1063
1064 len -= sizeof(compat_tmp);
1065 if (copy_from_sockptr(&compat_tmp, arg, sizeof(compat_tmp)) != 0)
1066 return ERR_PTR(-EFAULT);
1067
1068 memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1);
1069 info->num_counters = compat_tmp.num_counters;
1070 offset = sizeof(compat_tmp);
1071 } else
1072#endif
1073 {
1074 if (len <= sizeof(*info))
1075 return ERR_PTR(-EINVAL);
1076
1077 len -= sizeof(*info);
1078 if (copy_from_sockptr(info, arg, sizeof(*info)) != 0)
1079 return ERR_PTR(-EFAULT);
1080
1081 offset = sizeof(*info);
1082 }
1083 info->name[sizeof(info->name) - 1] = '\0';
1084
1085 size = sizeof(struct xt_counters);
1086 size *= info->num_counters;
1087
1088 if (size != (u64)len)
1089 return ERR_PTR(-EINVAL);
1090
1091 mem = vmalloc(len);
1092 if (!mem)
1093 return ERR_PTR(-ENOMEM);
1094
1095 if (copy_from_sockptr_offset(mem, arg, offset, len) == 0)
1096 return mem;
1097
1098 vfree(mem);
1099 return ERR_PTR(-EFAULT);
1100}
1101EXPORT_SYMBOL_GPL(xt_copy_counters);
1102
1103#ifdef CONFIG_COMPAT
1104int xt_compat_target_offset(const struct xt_target *target)
1105{
1106 u_int16_t csize = target->compatsize ? : target->targetsize;
1107 return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
1108}
1109EXPORT_SYMBOL_GPL(xt_compat_target_offset);
1110
1111void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
1112 unsigned int *size)
1113{
1114 const struct xt_target *target = t->u.kernel.target;
1115 struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
1116 int off = xt_compat_target_offset(target);
1117 u_int16_t tsize = ct->u.user.target_size;
1118 char name[sizeof(t->u.user.name)];
1119
1120 t = *dstptr;
1121 memcpy(t, ct, sizeof(*ct));
1122 if (target->compat_from_user)
1123 target->compat_from_user(t->data, ct->data);
1124 else
1125 memcpy(t->data, ct->data, tsize - sizeof(*ct));
1126
1127 tsize += off;
1128 t->u.user.target_size = tsize;
1129 strlcpy(name, target->name, sizeof(name));
1130 module_put(target->me);
1131 strncpy(t->u.user.name, name, sizeof(t->u.user.name));
1132
1133 *size += off;
1134 *dstptr += tsize;
1135}
1136EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
1137
1138int xt_compat_target_to_user(const struct xt_entry_target *t,
1139 void __user **dstptr, unsigned int *size)
1140{
1141 const struct xt_target *target = t->u.kernel.target;
1142 struct compat_xt_entry_target __user *ct = *dstptr;
1143 int off = xt_compat_target_offset(target);
1144 u_int16_t tsize = t->u.user.target_size - off;
1145
1146 if (XT_OBJ_TO_USER(ct, t, target, tsize))
1147 return -EFAULT;
1148
1149 if (target->compat_to_user) {
1150 if (target->compat_to_user((void __user *)ct->data, t->data))
1151 return -EFAULT;
1152 } else {
1153 if (COMPAT_XT_DATA_TO_USER(ct, t, target, tsize - sizeof(*ct)))
1154 return -EFAULT;
1155 }
1156
1157 *size -= off;
1158 *dstptr += tsize;
1159 return 0;
1160}
1161EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
1162#endif
1163
1164struct xt_table_info *xt_alloc_table_info(unsigned int size)
1165{
1166 struct xt_table_info *info = NULL;
1167 size_t sz = sizeof(*info) + size;
1168
1169 if (sz < sizeof(*info) || sz >= XT_MAX_TABLE_SIZE)
1170 return NULL;
1171
1172 info = kvmalloc(sz, GFP_KERNEL_ACCOUNT);
1173 if (!info)
1174 return NULL;
1175
1176 memset(info, 0, sizeof(*info));
1177 info->size = size;
1178 return info;
1179}
1180EXPORT_SYMBOL(xt_alloc_table_info);
1181
1182void xt_free_table_info(struct xt_table_info *info)
1183{
1184 int cpu;
1185
1186 if (info->jumpstack != NULL) {
1187 for_each_possible_cpu(cpu)
1188 kvfree(info->jumpstack[cpu]);
1189 kvfree(info->jumpstack);
1190 }
1191
1192 kvfree(info);
1193}
1194EXPORT_SYMBOL(xt_free_table_info);
1195
1196
1197struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
1198 const char *name)
1199{
1200 struct xt_table *t, *found = NULL;
1201
1202 mutex_lock(&xt[af].mutex);
1203 list_for_each_entry(t, &net->xt.tables[af], list)
1204 if (strcmp(t->name, name) == 0 && try_module_get(t->me))
1205 return t;
1206
1207 if (net == &init_net)
1208 goto out;
1209
1210
1211 list_for_each_entry(t, &init_net.xt.tables[af], list) {
1212 int err;
1213
1214 if (strcmp(t->name, name))
1215 continue;
1216 if (!try_module_get(t->me))
1217 goto out;
1218 mutex_unlock(&xt[af].mutex);
1219 err = t->table_init(net);
1220 if (err < 0) {
1221 module_put(t->me);
1222 return ERR_PTR(err);
1223 }
1224
1225 found = t;
1226
1227 mutex_lock(&xt[af].mutex);
1228 break;
1229 }
1230
1231 if (!found)
1232 goto out;
1233
1234
1235 list_for_each_entry(t, &net->xt.tables[af], list)
1236 if (strcmp(t->name, name) == 0)
1237 return t;
1238
1239 module_put(found->me);
1240 out:
1241 mutex_unlock(&xt[af].mutex);
1242 return ERR_PTR(-ENOENT);
1243}
1244EXPORT_SYMBOL_GPL(xt_find_table_lock);
1245
1246struct xt_table *xt_request_find_table_lock(struct net *net, u_int8_t af,
1247 const char *name)
1248{
1249 struct xt_table *t = xt_find_table_lock(net, af, name);
1250
1251#ifdef CONFIG_MODULES
1252 if (IS_ERR(t)) {
1253 int err = request_module("%stable_%s", xt_prefix[af], name);
1254 if (err < 0)
1255 return ERR_PTR(err);
1256 t = xt_find_table_lock(net, af, name);
1257 }
1258#endif
1259
1260 return t;
1261}
1262EXPORT_SYMBOL_GPL(xt_request_find_table_lock);
1263
1264void xt_table_unlock(struct xt_table *table)
1265{
1266 mutex_unlock(&xt[table->af].mutex);
1267}
1268EXPORT_SYMBOL_GPL(xt_table_unlock);
1269
1270#ifdef CONFIG_COMPAT
1271void xt_compat_lock(u_int8_t af)
1272{
1273 mutex_lock(&xt[af].compat_mutex);
1274}
1275EXPORT_SYMBOL_GPL(xt_compat_lock);
1276
1277void xt_compat_unlock(u_int8_t af)
1278{
1279 mutex_unlock(&xt[af].compat_mutex);
1280}
1281EXPORT_SYMBOL_GPL(xt_compat_unlock);
1282#endif
1283
1284DEFINE_PER_CPU(seqcount_t, xt_recseq);
1285EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
1286
1287struct static_key xt_tee_enabled __read_mostly;
1288EXPORT_SYMBOL_GPL(xt_tee_enabled);
1289
1290static int xt_jumpstack_alloc(struct xt_table_info *i)
1291{
1292 unsigned int size;
1293 int cpu;
1294
1295 size = sizeof(void **) * nr_cpu_ids;
1296 if (size > PAGE_SIZE)
1297 i->jumpstack = kvzalloc(size, GFP_KERNEL);
1298 else
1299 i->jumpstack = kzalloc(size, GFP_KERNEL);
1300 if (i->jumpstack == NULL)
1301 return -ENOMEM;
1302
1303
1304 if (i->stacksize == 0)
1305 return 0;
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317 size = sizeof(void *) * i->stacksize * 2u;
1318 for_each_possible_cpu(cpu) {
1319 i->jumpstack[cpu] = kvmalloc_node(size, GFP_KERNEL,
1320 cpu_to_node(cpu));
1321 if (i->jumpstack[cpu] == NULL)
1322
1323
1324
1325
1326
1327 return -ENOMEM;
1328 }
1329
1330 return 0;
1331}
1332
1333struct xt_counters *xt_counters_alloc(unsigned int counters)
1334{
1335 struct xt_counters *mem;
1336
1337 if (counters == 0 || counters > INT_MAX / sizeof(*mem))
1338 return NULL;
1339
1340 counters *= sizeof(*mem);
1341 if (counters > XT_MAX_TABLE_SIZE)
1342 return NULL;
1343
1344 return vzalloc(counters);
1345}
1346EXPORT_SYMBOL(xt_counters_alloc);
1347
1348struct xt_table_info *
1349xt_replace_table(struct xt_table *table,
1350 unsigned int num_counters,
1351 struct xt_table_info *newinfo,
1352 int *error)
1353{
1354 struct xt_table_info *private;
1355 unsigned int cpu;
1356 int ret;
1357
1358 ret = xt_jumpstack_alloc(newinfo);
1359 if (ret < 0) {
1360 *error = ret;
1361 return NULL;
1362 }
1363
1364
1365 local_bh_disable();
1366 private = table->private;
1367
1368
1369 if (num_counters != private->number) {
1370 pr_debug("num_counters != table->private->number (%u/%u)\n",
1371 num_counters, private->number);
1372 local_bh_enable();
1373 *error = -EAGAIN;
1374 return NULL;
1375 }
1376
1377 newinfo->initial_entries = private->initial_entries;
1378
1379
1380
1381
1382 smp_wmb();
1383 table->private = newinfo;
1384
1385
1386 smp_mb();
1387
1388
1389
1390
1391
1392 local_bh_enable();
1393
1394
1395 for_each_possible_cpu(cpu) {
1396 seqcount_t *s = &per_cpu(xt_recseq, cpu);
1397 u32 seq = raw_read_seqcount(s);
1398
1399 if (seq & 1) {
1400 do {
1401 cond_resched();
1402 cpu_relax();
1403 } while (seq == raw_read_seqcount(s));
1404 }
1405 }
1406
1407 audit_log_nfcfg(table->name, table->af, private->number,
1408 !private->number ? AUDIT_XT_OP_REGISTER :
1409 AUDIT_XT_OP_REPLACE,
1410 GFP_KERNEL);
1411 return private;
1412}
1413EXPORT_SYMBOL_GPL(xt_replace_table);
1414
1415struct xt_table *xt_register_table(struct net *net,
1416 const struct xt_table *input_table,
1417 struct xt_table_info *bootstrap,
1418 struct xt_table_info *newinfo)
1419{
1420 int ret;
1421 struct xt_table_info *private;
1422 struct xt_table *t, *table;
1423
1424
1425 table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
1426 if (!table) {
1427 ret = -ENOMEM;
1428 goto out;
1429 }
1430
1431 mutex_lock(&xt[table->af].mutex);
1432
1433 list_for_each_entry(t, &net->xt.tables[table->af], list) {
1434 if (strcmp(t->name, table->name) == 0) {
1435 ret = -EEXIST;
1436 goto unlock;
1437 }
1438 }
1439
1440
1441 table->private = bootstrap;
1442
1443 if (!xt_replace_table(table, 0, newinfo, &ret))
1444 goto unlock;
1445
1446 private = table->private;
1447 pr_debug("table->private->number = %u\n", private->number);
1448
1449
1450 private->initial_entries = private->number;
1451
1452 list_add(&table->list, &net->xt.tables[table->af]);
1453 mutex_unlock(&xt[table->af].mutex);
1454 return table;
1455
1456unlock:
1457 mutex_unlock(&xt[table->af].mutex);
1458 kfree(table);
1459out:
1460 return ERR_PTR(ret);
1461}
1462EXPORT_SYMBOL_GPL(xt_register_table);
1463
1464void *xt_unregister_table(struct xt_table *table)
1465{
1466 struct xt_table_info *private;
1467
1468 mutex_lock(&xt[table->af].mutex);
1469 private = table->private;
1470 list_del(&table->list);
1471 mutex_unlock(&xt[table->af].mutex);
1472 audit_log_nfcfg(table->name, table->af, private->number,
1473 AUDIT_XT_OP_UNREGISTER, GFP_KERNEL);
1474 kfree(table);
1475
1476 return private;
1477}
1478EXPORT_SYMBOL_GPL(xt_unregister_table);
1479
1480#ifdef CONFIG_PROC_FS
1481static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
1482{
1483 struct net *net = seq_file_net(seq);
1484 u_int8_t af = (unsigned long)PDE_DATA(file_inode(seq->file));
1485
1486 mutex_lock(&xt[af].mutex);
1487 return seq_list_start(&net->xt.tables[af], *pos);
1488}
1489
1490static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1491{
1492 struct net *net = seq_file_net(seq);
1493 u_int8_t af = (unsigned long)PDE_DATA(file_inode(seq->file));
1494
1495 return seq_list_next(v, &net->xt.tables[af], pos);
1496}
1497
1498static void xt_table_seq_stop(struct seq_file *seq, void *v)
1499{
1500 u_int8_t af = (unsigned long)PDE_DATA(file_inode(seq->file));
1501
1502 mutex_unlock(&xt[af].mutex);
1503}
1504
1505static int xt_table_seq_show(struct seq_file *seq, void *v)
1506{
1507 struct xt_table *table = list_entry(v, struct xt_table, list);
1508
1509 if (*table->name)
1510 seq_printf(seq, "%s\n", table->name);
1511 return 0;
1512}
1513
1514static const struct seq_operations xt_table_seq_ops = {
1515 .start = xt_table_seq_start,
1516 .next = xt_table_seq_next,
1517 .stop = xt_table_seq_stop,
1518 .show = xt_table_seq_show,
1519};
1520
1521
1522
1523
1524
1525struct nf_mttg_trav {
1526 struct list_head *head, *curr;
1527 uint8_t class;
1528};
1529
1530enum {
1531 MTTG_TRAV_INIT,
1532 MTTG_TRAV_NFP_UNSPEC,
1533 MTTG_TRAV_NFP_SPEC,
1534 MTTG_TRAV_DONE,
1535};
1536
1537static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
1538 bool is_target)
1539{
1540 static const uint8_t next_class[] = {
1541 [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
1542 [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE,
1543 };
1544 uint8_t nfproto = (unsigned long)PDE_DATA(file_inode(seq->file));
1545 struct nf_mttg_trav *trav = seq->private;
1546
1547 if (ppos != NULL)
1548 ++(*ppos);
1549
1550 switch (trav->class) {
1551 case MTTG_TRAV_INIT:
1552 trav->class = MTTG_TRAV_NFP_UNSPEC;
1553 mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
1554 trav->head = trav->curr = is_target ?
1555 &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
1556 break;
1557 case MTTG_TRAV_NFP_UNSPEC:
1558 trav->curr = trav->curr->next;
1559 if (trav->curr != trav->head)
1560 break;
1561 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1562 mutex_lock(&xt[nfproto].mutex);
1563 trav->head = trav->curr = is_target ?
1564 &xt[nfproto].target : &xt[nfproto].match;
1565 trav->class = next_class[trav->class];
1566 break;
1567 case MTTG_TRAV_NFP_SPEC:
1568 trav->curr = trav->curr->next;
1569 if (trav->curr != trav->head)
1570 break;
1571 fallthrough;
1572 default:
1573 return NULL;
1574 }
1575 return trav;
1576}
1577
1578static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
1579 bool is_target)
1580{
1581 struct nf_mttg_trav *trav = seq->private;
1582 unsigned int j;
1583
1584 trav->class = MTTG_TRAV_INIT;
1585 for (j = 0; j < *pos; ++j)
1586 if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
1587 return NULL;
1588 return trav;
1589}
1590
1591static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
1592{
1593 uint8_t nfproto = (unsigned long)PDE_DATA(file_inode(seq->file));
1594 struct nf_mttg_trav *trav = seq->private;
1595
1596 switch (trav->class) {
1597 case MTTG_TRAV_NFP_UNSPEC:
1598 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1599 break;
1600 case MTTG_TRAV_NFP_SPEC:
1601 mutex_unlock(&xt[nfproto].mutex);
1602 break;
1603 }
1604}
1605
1606static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
1607{
1608 return xt_mttg_seq_start(seq, pos, false);
1609}
1610
1611static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1612{
1613 return xt_mttg_seq_next(seq, v, ppos, false);
1614}
1615
1616static int xt_match_seq_show(struct seq_file *seq, void *v)
1617{
1618 const struct nf_mttg_trav *trav = seq->private;
1619 const struct xt_match *match;
1620
1621 switch (trav->class) {
1622 case MTTG_TRAV_NFP_UNSPEC:
1623 case MTTG_TRAV_NFP_SPEC:
1624 if (trav->curr == trav->head)
1625 return 0;
1626 match = list_entry(trav->curr, struct xt_match, list);
1627 if (*match->name)
1628 seq_printf(seq, "%s\n", match->name);
1629 }
1630 return 0;
1631}
1632
1633static const struct seq_operations xt_match_seq_ops = {
1634 .start = xt_match_seq_start,
1635 .next = xt_match_seq_next,
1636 .stop = xt_mttg_seq_stop,
1637 .show = xt_match_seq_show,
1638};
1639
1640static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
1641{
1642 return xt_mttg_seq_start(seq, pos, true);
1643}
1644
1645static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1646{
1647 return xt_mttg_seq_next(seq, v, ppos, true);
1648}
1649
1650static int xt_target_seq_show(struct seq_file *seq, void *v)
1651{
1652 const struct nf_mttg_trav *trav = seq->private;
1653 const struct xt_target *target;
1654
1655 switch (trav->class) {
1656 case MTTG_TRAV_NFP_UNSPEC:
1657 case MTTG_TRAV_NFP_SPEC:
1658 if (trav->curr == trav->head)
1659 return 0;
1660 target = list_entry(trav->curr, struct xt_target, list);
1661 if (*target->name)
1662 seq_printf(seq, "%s\n", target->name);
1663 }
1664 return 0;
1665}
1666
1667static const struct seq_operations xt_target_seq_ops = {
1668 .start = xt_target_seq_start,
1669 .next = xt_target_seq_next,
1670 .stop = xt_mttg_seq_stop,
1671 .show = xt_target_seq_show,
1672};
1673
1674#define FORMAT_TABLES "_tables_names"
1675#define FORMAT_MATCHES "_tables_matches"
1676#define FORMAT_TARGETS "_tables_targets"
1677
1678#endif
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688struct nf_hook_ops *
1689xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn)
1690{
1691 unsigned int hook_mask = table->valid_hooks;
1692 uint8_t i, num_hooks = hweight32(hook_mask);
1693 uint8_t hooknum;
1694 struct nf_hook_ops *ops;
1695
1696 if (!num_hooks)
1697 return ERR_PTR(-EINVAL);
1698
1699 ops = kcalloc(num_hooks, sizeof(*ops), GFP_KERNEL);
1700 if (ops == NULL)
1701 return ERR_PTR(-ENOMEM);
1702
1703 for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
1704 hook_mask >>= 1, ++hooknum) {
1705 if (!(hook_mask & 1))
1706 continue;
1707 ops[i].hook = fn;
1708 ops[i].pf = table->af;
1709 ops[i].hooknum = hooknum;
1710 ops[i].priority = table->priority;
1711 ++i;
1712 }
1713
1714 return ops;
1715}
1716EXPORT_SYMBOL_GPL(xt_hook_ops_alloc);
1717
1718int xt_proto_init(struct net *net, u_int8_t af)
1719{
1720#ifdef CONFIG_PROC_FS
1721 char buf[XT_FUNCTION_MAXNAMELEN];
1722 struct proc_dir_entry *proc;
1723 kuid_t root_uid;
1724 kgid_t root_gid;
1725#endif
1726
1727 if (af >= ARRAY_SIZE(xt_prefix))
1728 return -EINVAL;
1729
1730
1731#ifdef CONFIG_PROC_FS
1732 root_uid = make_kuid(net->user_ns, 0);
1733 root_gid = make_kgid(net->user_ns, 0);
1734
1735 strlcpy(buf, xt_prefix[af], sizeof(buf));
1736 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1737 proc = proc_create_net_data(buf, 0440, net->proc_net, &xt_table_seq_ops,
1738 sizeof(struct seq_net_private),
1739 (void *)(unsigned long)af);
1740 if (!proc)
1741 goto out;
1742 if (uid_valid(root_uid) && gid_valid(root_gid))
1743 proc_set_user(proc, root_uid, root_gid);
1744
1745 strlcpy(buf, xt_prefix[af], sizeof(buf));
1746 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1747 proc = proc_create_seq_private(buf, 0440, net->proc_net,
1748 &xt_match_seq_ops, sizeof(struct nf_mttg_trav),
1749 (void *)(unsigned long)af);
1750 if (!proc)
1751 goto out_remove_tables;
1752 if (uid_valid(root_uid) && gid_valid(root_gid))
1753 proc_set_user(proc, root_uid, root_gid);
1754
1755 strlcpy(buf, xt_prefix[af], sizeof(buf));
1756 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1757 proc = proc_create_seq_private(buf, 0440, net->proc_net,
1758 &xt_target_seq_ops, sizeof(struct nf_mttg_trav),
1759 (void *)(unsigned long)af);
1760 if (!proc)
1761 goto out_remove_matches;
1762 if (uid_valid(root_uid) && gid_valid(root_gid))
1763 proc_set_user(proc, root_uid, root_gid);
1764#endif
1765
1766 return 0;
1767
1768#ifdef CONFIG_PROC_FS
1769out_remove_matches:
1770 strlcpy(buf, xt_prefix[af], sizeof(buf));
1771 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1772 remove_proc_entry(buf, net->proc_net);
1773
1774out_remove_tables:
1775 strlcpy(buf, xt_prefix[af], sizeof(buf));
1776 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1777 remove_proc_entry(buf, net->proc_net);
1778out:
1779 return -1;
1780#endif
1781}
1782EXPORT_SYMBOL_GPL(xt_proto_init);
1783
1784void xt_proto_fini(struct net *net, u_int8_t af)
1785{
1786#ifdef CONFIG_PROC_FS
1787 char buf[XT_FUNCTION_MAXNAMELEN];
1788
1789 strlcpy(buf, xt_prefix[af], sizeof(buf));
1790 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1791 remove_proc_entry(buf, net->proc_net);
1792
1793 strlcpy(buf, xt_prefix[af], sizeof(buf));
1794 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1795 remove_proc_entry(buf, net->proc_net);
1796
1797 strlcpy(buf, xt_prefix[af], sizeof(buf));
1798 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1799 remove_proc_entry(buf, net->proc_net);
1800#endif
1801}
1802EXPORT_SYMBOL_GPL(xt_proto_fini);
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state,
1827 struct xt_counters *counter)
1828{
1829 BUILD_BUG_ON(XT_PCPU_BLOCK_SIZE < (sizeof(*counter) * 2));
1830
1831 if (nr_cpu_ids <= 1)
1832 return true;
1833
1834 if (!state->mem) {
1835 state->mem = __alloc_percpu(XT_PCPU_BLOCK_SIZE,
1836 XT_PCPU_BLOCK_SIZE);
1837 if (!state->mem)
1838 return false;
1839 }
1840 counter->pcnt = (__force unsigned long)(state->mem + state->off);
1841 state->off += sizeof(*counter);
1842 if (state->off > (XT_PCPU_BLOCK_SIZE - sizeof(*counter))) {
1843 state->mem = NULL;
1844 state->off = 0;
1845 }
1846 return true;
1847}
1848EXPORT_SYMBOL_GPL(xt_percpu_counter_alloc);
1849
1850void xt_percpu_counter_free(struct xt_counters *counters)
1851{
1852 unsigned long pcnt = counters->pcnt;
1853
1854 if (nr_cpu_ids > 1 && (pcnt & (XT_PCPU_BLOCK_SIZE - 1)) == 0)
1855 free_percpu((void __percpu *)pcnt);
1856}
1857EXPORT_SYMBOL_GPL(xt_percpu_counter_free);
1858
1859static int __net_init xt_net_init(struct net *net)
1860{
1861 int i;
1862
1863 for (i = 0; i < NFPROTO_NUMPROTO; i++)
1864 INIT_LIST_HEAD(&net->xt.tables[i]);
1865 return 0;
1866}
1867
1868static void __net_exit xt_net_exit(struct net *net)
1869{
1870 int i;
1871
1872 for (i = 0; i < NFPROTO_NUMPROTO; i++)
1873 WARN_ON_ONCE(!list_empty(&net->xt.tables[i]));
1874}
1875
1876static struct pernet_operations xt_net_ops = {
1877 .init = xt_net_init,
1878 .exit = xt_net_exit,
1879};
1880
1881static int __init xt_init(void)
1882{
1883 unsigned int i;
1884 int rv;
1885
1886 for_each_possible_cpu(i) {
1887 seqcount_init(&per_cpu(xt_recseq, i));
1888 }
1889
1890 xt = kcalloc(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL);
1891 if (!xt)
1892 return -ENOMEM;
1893
1894 for (i = 0; i < NFPROTO_NUMPROTO; i++) {
1895 mutex_init(&xt[i].mutex);
1896#ifdef CONFIG_COMPAT
1897 mutex_init(&xt[i].compat_mutex);
1898 xt[i].compat_tab = NULL;
1899#endif
1900 INIT_LIST_HEAD(&xt[i].target);
1901 INIT_LIST_HEAD(&xt[i].match);
1902 }
1903 rv = register_pernet_subsys(&xt_net_ops);
1904 if (rv < 0)
1905 kfree(xt);
1906 return rv;
1907}
1908
1909static void __exit xt_fini(void)
1910{
1911 unregister_pernet_subsys(&xt_net_ops);
1912 kfree(xt);
1913}
1914
1915module_init(xt_init);
1916module_exit(xt_fini);
1917
1918