1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/module.h>
26
27#include <linux/capability.h>
28#include <linux/kernel.h>
29#include <linux/init.h>
30#include <linux/signal.h>
31#include <linux/sched.h>
32#include <linux/errno.h>
33#include <linux/string.h>
34#include <linux/stat.h>
35#include <linux/socket.h>
36#include <linux/un.h>
37#include <linux/fcntl.h>
38#include <linux/termios.h>
39#include <linux/sockios.h>
40#include <linux/net.h>
41#include <linux/fs.h>
42#include <linux/slab.h>
43#include <linux/uaccess.h>
44#include <linux/skbuff.h>
45#include <linux/netdevice.h>
46#include <linux/rtnetlink.h>
47#include <linux/proc_fs.h>
48#include <linux/seq_file.h>
49#include <linux/notifier.h>
50#include <linux/security.h>
51#include <linux/jhash.h>
52#include <linux/jiffies.h>
53#include <linux/random.h>
54#include <linux/bitops.h>
55#include <linux/mm.h>
56#include <linux/types.h>
57#include <linux/audit.h>
58#include <linux/mutex.h>
59#include <linux/vmalloc.h>
60#include <linux/if_arp.h>
61#include <linux/rhashtable.h>
62#include <asm/cacheflush.h>
63#include <linux/hash.h>
64#include <linux/genetlink.h>
65#include <linux/net_namespace.h>
66#include <linux/nospec.h>
67#include <linux/btf_ids.h>
68
69#include <net/net_namespace.h>
70#include <net/netns/generic.h>
71#include <net/sock.h>
72#include <net/scm.h>
73#include <net/netlink.h>
74#define CREATE_TRACE_POINTS
75#include RH_KABI_HIDE_INCLUDE(<trace/events/netlink.h>)
76
77#include "af_netlink.h"
78
79struct listeners {
80 struct rcu_head rcu;
81 unsigned long masks[0];
82};
83
84
85#define NETLINK_S_CONGESTED 0x0
86
87static inline int netlink_is_kernel(struct sock *sk)
88{
89 return nlk_sk(sk)->flags & NETLINK_F_KERNEL_SOCKET;
90}
91
92struct netlink_table *nl_table __read_mostly;
93EXPORT_SYMBOL_GPL(nl_table);
94
95static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
96
97static struct lock_class_key nlk_cb_mutex_keys[MAX_LINKS];
98
99static const char *const nlk_cb_mutex_key_strings[MAX_LINKS + 1] = {
100 "nlk_cb_mutex-ROUTE",
101 "nlk_cb_mutex-1",
102 "nlk_cb_mutex-USERSOCK",
103 "nlk_cb_mutex-FIREWALL",
104 "nlk_cb_mutex-SOCK_DIAG",
105 "nlk_cb_mutex-NFLOG",
106 "nlk_cb_mutex-XFRM",
107 "nlk_cb_mutex-SELINUX",
108 "nlk_cb_mutex-ISCSI",
109 "nlk_cb_mutex-AUDIT",
110 "nlk_cb_mutex-FIB_LOOKUP",
111 "nlk_cb_mutex-CONNECTOR",
112 "nlk_cb_mutex-NETFILTER",
113 "nlk_cb_mutex-IP6_FW",
114 "nlk_cb_mutex-DNRTMSG",
115 "nlk_cb_mutex-KOBJECT_UEVENT",
116 "nlk_cb_mutex-GENERIC",
117 "nlk_cb_mutex-17",
118 "nlk_cb_mutex-SCSITRANSPORT",
119 "nlk_cb_mutex-ECRYPTFS",
120 "nlk_cb_mutex-RDMA",
121 "nlk_cb_mutex-CRYPTO",
122 "nlk_cb_mutex-SMC",
123 "nlk_cb_mutex-23",
124 "nlk_cb_mutex-24",
125 "nlk_cb_mutex-25",
126 "nlk_cb_mutex-26",
127 "nlk_cb_mutex-27",
128 "nlk_cb_mutex-28",
129 "nlk_cb_mutex-29",
130 "nlk_cb_mutex-30",
131 "nlk_cb_mutex-31",
132 "nlk_cb_mutex-MAX_LINKS"
133};
134
135static int netlink_dump(struct sock *sk);
136
137
138
139
140
141
142
143
144
145DEFINE_RWLOCK(nl_table_lock);
146EXPORT_SYMBOL_GPL(nl_table_lock);
147static atomic_t nl_table_users = ATOMIC_INIT(0);
148
149#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
150
151static BLOCKING_NOTIFIER_HEAD(netlink_chain);
152
153
154static const struct rhashtable_params netlink_rhashtable_params;
155
156void do_trace_netlink_extack(const char *msg)
157{
158 trace_netlink_extack(msg);
159}
160EXPORT_SYMBOL(do_trace_netlink_extack);
161
162static inline u32 netlink_group_mask(u32 group)
163{
164 return group ? 1 << (group - 1) : 0;
165}
166
167static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
168 gfp_t gfp_mask)
169{
170 unsigned int len = skb_end_offset(skb);
171 struct sk_buff *new;
172
173 new = alloc_skb(len, gfp_mask);
174 if (new == NULL)
175 return NULL;
176
177 NETLINK_CB(new).portid = NETLINK_CB(skb).portid;
178 NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group;
179 NETLINK_CB(new).creds = NETLINK_CB(skb).creds;
180
181 skb_put_data(new, skb->data, len);
182 return new;
183}
184
185static unsigned int netlink_tap_net_id;
186
187struct netlink_tap_net {
188 struct list_head netlink_tap_all;
189 struct mutex netlink_tap_lock;
190};
191
192int netlink_add_tap(struct netlink_tap *nt)
193{
194 struct net *net = dev_net(nt->dev);
195 struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id);
196
197 if (unlikely(nt->dev->type != ARPHRD_NETLINK))
198 return -EINVAL;
199
200 mutex_lock(&nn->netlink_tap_lock);
201 list_add_rcu(&nt->list, &nn->netlink_tap_all);
202 mutex_unlock(&nn->netlink_tap_lock);
203
204 __module_get(nt->module);
205
206 return 0;
207}
208EXPORT_SYMBOL_GPL(netlink_add_tap);
209
210static int __netlink_remove_tap(struct netlink_tap *nt)
211{
212 struct net *net = dev_net(nt->dev);
213 struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id);
214 bool found = false;
215 struct netlink_tap *tmp;
216
217 mutex_lock(&nn->netlink_tap_lock);
218
219 list_for_each_entry(tmp, &nn->netlink_tap_all, list) {
220 if (nt == tmp) {
221 list_del_rcu(&nt->list);
222 found = true;
223 goto out;
224 }
225 }
226
227 pr_warn("__netlink_remove_tap: %p not found\n", nt);
228out:
229 mutex_unlock(&nn->netlink_tap_lock);
230
231 if (found)
232 module_put(nt->module);
233
234 return found ? 0 : -ENODEV;
235}
236
237int netlink_remove_tap(struct netlink_tap *nt)
238{
239 int ret;
240
241 ret = __netlink_remove_tap(nt);
242 synchronize_net();
243
244 return ret;
245}
246EXPORT_SYMBOL_GPL(netlink_remove_tap);
247
248static __net_init int netlink_tap_init_net(struct net *net)
249{
250 struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id);
251
252 INIT_LIST_HEAD(&nn->netlink_tap_all);
253 mutex_init(&nn->netlink_tap_lock);
254 return 0;
255}
256
257static struct pernet_operations netlink_tap_net_ops = {
258 .init = netlink_tap_init_net,
259 .id = &netlink_tap_net_id,
260 .size = sizeof(struct netlink_tap_net),
261};
262
263static bool netlink_filter_tap(const struct sk_buff *skb)
264{
265 struct sock *sk = skb->sk;
266
267
268
269
270 switch (sk->sk_protocol) {
271 case NETLINK_ROUTE:
272 case NETLINK_USERSOCK:
273 case NETLINK_SOCK_DIAG:
274 case NETLINK_NFLOG:
275 case NETLINK_XFRM:
276 case NETLINK_FIB_LOOKUP:
277 case NETLINK_NETFILTER:
278 case NETLINK_GENERIC:
279 return true;
280 }
281
282 return false;
283}
284
285static int __netlink_deliver_tap_skb(struct sk_buff *skb,
286 struct net_device *dev)
287{
288 struct sk_buff *nskb;
289 struct sock *sk = skb->sk;
290 int ret = -ENOMEM;
291
292 if (!net_eq(dev_net(dev), sock_net(sk)))
293 return 0;
294
295 dev_hold(dev);
296
297 if (is_vmalloc_addr(skb->head))
298 nskb = netlink_to_full_skb(skb, GFP_ATOMIC);
299 else
300 nskb = skb_clone(skb, GFP_ATOMIC);
301 if (nskb) {
302 nskb->dev = dev;
303 nskb->protocol = htons((u16) sk->sk_protocol);
304 nskb->pkt_type = netlink_is_kernel(sk) ?
305 PACKET_KERNEL : PACKET_USER;
306 skb_reset_network_header(nskb);
307 ret = dev_queue_xmit(nskb);
308 if (unlikely(ret > 0))
309 ret = net_xmit_errno(ret);
310 }
311
312 dev_put(dev);
313 return ret;
314}
315
316static void __netlink_deliver_tap(struct sk_buff *skb, struct netlink_tap_net *nn)
317{
318 int ret;
319 struct netlink_tap *tmp;
320
321 if (!netlink_filter_tap(skb))
322 return;
323
324 list_for_each_entry_rcu(tmp, &nn->netlink_tap_all, list) {
325 ret = __netlink_deliver_tap_skb(skb, tmp->dev);
326 if (unlikely(ret))
327 break;
328 }
329}
330
331static void netlink_deliver_tap(struct net *net, struct sk_buff *skb)
332{
333 struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id);
334
335 rcu_read_lock();
336
337 if (unlikely(!list_empty(&nn->netlink_tap_all)))
338 __netlink_deliver_tap(skb, nn);
339
340 rcu_read_unlock();
341}
342
343static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
344 struct sk_buff *skb)
345{
346 if (!(netlink_is_kernel(dst) && netlink_is_kernel(src)))
347 netlink_deliver_tap(sock_net(dst), skb);
348}
349
350static void netlink_overrun(struct sock *sk)
351{
352 struct netlink_sock *nlk = nlk_sk(sk);
353
354 if (!(nlk->flags & NETLINK_F_RECV_NO_ENOBUFS)) {
355 if (!test_and_set_bit(NETLINK_S_CONGESTED,
356 &nlk_sk(sk)->state)) {
357 sk->sk_err = ENOBUFS;
358 sk->sk_error_report(sk);
359 }
360 }
361 atomic_inc(&sk->sk_drops);
362}
363
364static void netlink_rcv_wake(struct sock *sk)
365{
366 struct netlink_sock *nlk = nlk_sk(sk);
367
368 if (skb_queue_empty(&sk->sk_receive_queue))
369 clear_bit(NETLINK_S_CONGESTED, &nlk->state);
370 if (!test_bit(NETLINK_S_CONGESTED, &nlk->state))
371 wake_up_interruptible(&nlk->wait);
372}
373
374static void netlink_skb_destructor(struct sk_buff *skb)
375{
376 if (is_vmalloc_addr(skb->head)) {
377 if (!skb->cloned ||
378 !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
379 vfree(skb->head);
380
381 skb->head = NULL;
382 }
383 if (skb->sk != NULL)
384 sock_rfree(skb);
385}
386
387static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
388{
389 WARN_ON(skb->sk != NULL);
390 skb->sk = sk;
391 skb->destructor = netlink_skb_destructor;
392 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
393 sk_mem_charge(sk, skb->truesize);
394}
395
396static void netlink_sock_destruct(struct sock *sk)
397{
398 struct netlink_sock *nlk = nlk_sk(sk);
399
400 if (nlk->cb_running) {
401 if (nlk->cb.done)
402 nlk->cb.done(&nlk->cb);
403 module_put(nlk->cb.module);
404 kfree_skb(nlk->cb.skb);
405 }
406
407 skb_queue_purge(&sk->sk_receive_queue);
408
409 if (!sock_flag(sk, SOCK_DEAD)) {
410 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
411 return;
412 }
413
414 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
415 WARN_ON(refcount_read(&sk->sk_wmem_alloc));
416 WARN_ON(nlk_sk(sk)->groups);
417}
418
419static void netlink_sock_destruct_work(struct work_struct *work)
420{
421 struct netlink_sock *nlk = container_of(work, struct netlink_sock,
422 work);
423
424 sk_free(&nlk->sk);
425}
426
427
428
429
430
431
432
433void netlink_table_grab(void)
434 __acquires(nl_table_lock)
435{
436 might_sleep();
437
438 write_lock_irq(&nl_table_lock);
439
440 if (atomic_read(&nl_table_users)) {
441 DECLARE_WAITQUEUE(wait, current);
442
443 add_wait_queue_exclusive(&nl_table_wait, &wait);
444 for (;;) {
445 set_current_state(TASK_UNINTERRUPTIBLE);
446 if (atomic_read(&nl_table_users) == 0)
447 break;
448 write_unlock_irq(&nl_table_lock);
449 schedule();
450 write_lock_irq(&nl_table_lock);
451 }
452
453 __set_current_state(TASK_RUNNING);
454 remove_wait_queue(&nl_table_wait, &wait);
455 }
456}
457
458void netlink_table_ungrab(void)
459 __releases(nl_table_lock)
460{
461 write_unlock_irq(&nl_table_lock);
462 wake_up(&nl_table_wait);
463}
464
465static inline void
466netlink_lock_table(void)
467{
468
469
470 read_lock(&nl_table_lock);
471 atomic_inc(&nl_table_users);
472 read_unlock(&nl_table_lock);
473}
474
475static inline void
476netlink_unlock_table(void)
477{
478 if (atomic_dec_and_test(&nl_table_users))
479 wake_up(&nl_table_wait);
480}
481
482struct netlink_compare_arg
483{
484 possible_net_t pnet;
485 u32 portid;
486};
487
488
489#define netlink_compare_arg_len \
490 (offsetof(struct netlink_compare_arg, portid) + sizeof(u32))
491
492static inline int netlink_compare(struct rhashtable_compare_arg *arg,
493 const void *ptr)
494{
495 const struct netlink_compare_arg *x = arg->key;
496 const struct netlink_sock *nlk = ptr;
497
498 return nlk->portid != x->portid ||
499 !net_eq(sock_net(&nlk->sk), read_pnet(&x->pnet));
500}
501
502static void netlink_compare_arg_init(struct netlink_compare_arg *arg,
503 struct net *net, u32 portid)
504{
505 memset(arg, 0, sizeof(*arg));
506 write_pnet(&arg->pnet, net);
507 arg->portid = portid;
508}
509
510static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
511 struct net *net)
512{
513 struct netlink_compare_arg arg;
514
515 netlink_compare_arg_init(&arg, net, portid);
516 return rhashtable_lookup_fast(&table->hash, &arg,
517 netlink_rhashtable_params);
518}
519
520static int __netlink_insert(struct netlink_table *table, struct sock *sk)
521{
522 struct netlink_compare_arg arg;
523
524 netlink_compare_arg_init(&arg, sock_net(sk), nlk_sk(sk)->portid);
525 return rhashtable_lookup_insert_key(&table->hash, &arg,
526 &nlk_sk(sk)->node,
527 netlink_rhashtable_params);
528}
529
530static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
531{
532 struct netlink_table *table = &nl_table[protocol];
533 struct sock *sk;
534
535 rcu_read_lock();
536 sk = __netlink_lookup(table, portid, net);
537 if (sk)
538 sock_hold(sk);
539 rcu_read_unlock();
540
541 return sk;
542}
543
544static const struct proto_ops netlink_ops;
545
546static void
547netlink_update_listeners(struct sock *sk)
548{
549 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
550 unsigned long mask;
551 unsigned int i;
552 struct listeners *listeners;
553
554 listeners = nl_deref_protected(tbl->listeners);
555 if (!listeners)
556 return;
557
558 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
559 mask = 0;
560 sk_for_each_bound(sk, &tbl->mc_list) {
561 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
562 mask |= nlk_sk(sk)->groups[i];
563 }
564 listeners->masks[i] = mask;
565 }
566
567
568}
569
570static int netlink_insert(struct sock *sk, u32 portid)
571{
572 struct netlink_table *table = &nl_table[sk->sk_protocol];
573 int err;
574
575 lock_sock(sk);
576
577 err = nlk_sk(sk)->portid == portid ? 0 : -EBUSY;
578 if (nlk_sk(sk)->bound)
579 goto err;
580
581 nlk_sk(sk)->portid = portid;
582 sock_hold(sk);
583
584 err = __netlink_insert(table, sk);
585 if (err) {
586
587
588
589 if (unlikely(err == -EBUSY))
590 err = -EOVERFLOW;
591 if (err == -EEXIST)
592 err = -EADDRINUSE;
593 sock_put(sk);
594 goto err;
595 }
596
597
598 smp_wmb();
599 nlk_sk(sk)->bound = portid;
600
601err:
602 release_sock(sk);
603 return err;
604}
605
606static void netlink_remove(struct sock *sk)
607{
608 struct netlink_table *table;
609
610 table = &nl_table[sk->sk_protocol];
611 if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node,
612 netlink_rhashtable_params)) {
613 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
614 __sock_put(sk);
615 }
616
617 netlink_table_grab();
618 if (nlk_sk(sk)->subscriptions) {
619 __sk_del_bind_node(sk);
620 netlink_update_listeners(sk);
621 }
622 if (sk->sk_protocol == NETLINK_GENERIC)
623 atomic_inc(&genl_sk_destructing_cnt);
624 netlink_table_ungrab();
625}
626
627static struct proto netlink_proto = {
628 .name = "NETLINK",
629 .owner = THIS_MODULE,
630 .obj_size = sizeof(struct netlink_sock),
631};
632
633static int __netlink_create(struct net *net, struct socket *sock,
634 struct mutex *cb_mutex, int protocol,
635 int kern)
636{
637 struct sock *sk;
638 struct netlink_sock *nlk;
639
640 sock->ops = &netlink_ops;
641
642 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto, kern);
643 if (!sk)
644 return -ENOMEM;
645
646 sock_init_data(sock, sk);
647
648 nlk = nlk_sk(sk);
649 if (cb_mutex) {
650 nlk->cb_mutex = cb_mutex;
651 } else {
652 nlk->cb_mutex = &nlk->cb_def_mutex;
653 mutex_init(nlk->cb_mutex);
654 lockdep_set_class_and_name(nlk->cb_mutex,
655 nlk_cb_mutex_keys + protocol,
656 nlk_cb_mutex_key_strings[protocol]);
657 }
658 init_waitqueue_head(&nlk->wait);
659
660 sk->sk_destruct = netlink_sock_destruct;
661 sk->sk_protocol = protocol;
662 return 0;
663}
664
665static int netlink_create(struct net *net, struct socket *sock, int protocol,
666 int kern)
667{
668 struct module *module = NULL;
669 struct mutex *cb_mutex;
670 struct netlink_sock *nlk;
671 int (*bind)(struct net *net, int group);
672 void (*unbind)(struct net *net, int group);
673 int err = 0;
674
675 sock->state = SS_UNCONNECTED;
676
677 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
678 return -ESOCKTNOSUPPORT;
679
680 if (protocol < 0 || protocol >= MAX_LINKS)
681 return -EPROTONOSUPPORT;
682 protocol = array_index_nospec(protocol, MAX_LINKS);
683
684 netlink_lock_table();
685#ifdef CONFIG_MODULES
686 if (!nl_table[protocol].registered) {
687 netlink_unlock_table();
688 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
689 netlink_lock_table();
690 }
691#endif
692 if (nl_table[protocol].registered &&
693 try_module_get(nl_table[protocol].module))
694 module = nl_table[protocol].module;
695 else
696 err = -EPROTONOSUPPORT;
697 cb_mutex = nl_table[protocol].cb_mutex;
698 bind = nl_table[protocol].bind;
699 unbind = nl_table[protocol].unbind;
700 netlink_unlock_table();
701
702 if (err < 0)
703 goto out;
704
705 err = __netlink_create(net, sock, cb_mutex, protocol, kern);
706 if (err < 0)
707 goto out_module;
708
709 local_bh_disable();
710 sock_prot_inuse_add(net, &netlink_proto, 1);
711 local_bh_enable();
712
713 nlk = nlk_sk(sock->sk);
714 nlk->module = module;
715 nlk->netlink_bind = bind;
716 nlk->netlink_unbind = unbind;
717out:
718 return err;
719
720out_module:
721 module_put(module);
722 goto out;
723}
724
725static void deferred_put_nlk_sk(struct rcu_head *head)
726{
727 struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
728 struct sock *sk = &nlk->sk;
729
730 kfree(nlk->groups);
731 nlk->groups = NULL;
732
733 if (!refcount_dec_and_test(&sk->sk_refcnt))
734 return;
735
736 if (nlk->cb_running && nlk->cb.done) {
737 INIT_WORK(&nlk->work, netlink_sock_destruct_work);
738 schedule_work(&nlk->work);
739 return;
740 }
741
742 sk_free(sk);
743}
744
745static int netlink_release(struct socket *sock)
746{
747 struct sock *sk = sock->sk;
748 struct netlink_sock *nlk;
749
750 if (!sk)
751 return 0;
752
753 netlink_remove(sk);
754 sock_orphan(sk);
755 nlk = nlk_sk(sk);
756
757
758
759
760
761
762
763
764
765 if (nlk->netlink_unbind) {
766 int i;
767
768 for (i = 0; i < nlk->ngroups; i++)
769 if (test_bit(i, nlk->groups))
770 nlk->netlink_unbind(sock_net(sk), i + 1);
771 }
772 if (sk->sk_protocol == NETLINK_GENERIC &&
773 atomic_dec_return(&genl_sk_destructing_cnt) == 0)
774 wake_up(&genl_sk_destructing_waitq);
775
776 sock->sk = NULL;
777 wake_up_interruptible_all(&nlk->wait);
778
779 skb_queue_purge(&sk->sk_write_queue);
780
781 if (nlk->portid && nlk->bound) {
782 struct netlink_notify n = {
783 .net = sock_net(sk),
784 .protocol = sk->sk_protocol,
785 .portid = nlk->portid,
786 };
787 blocking_notifier_call_chain(&netlink_chain,
788 NETLINK_URELEASE, &n);
789 }
790
791 module_put(nlk->module);
792
793 if (netlink_is_kernel(sk)) {
794 netlink_table_grab();
795 BUG_ON(nl_table[sk->sk_protocol].registered == 0);
796 if (--nl_table[sk->sk_protocol].registered == 0) {
797 struct listeners *old;
798
799 old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
800 RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
801 kfree_rcu(old, rcu);
802 nl_table[sk->sk_protocol].module = NULL;
803 nl_table[sk->sk_protocol].bind = NULL;
804 nl_table[sk->sk_protocol].unbind = NULL;
805 nl_table[sk->sk_protocol].flags = 0;
806 nl_table[sk->sk_protocol].registered = 0;
807 }
808 netlink_table_ungrab();
809 }
810
811 local_bh_disable();
812 sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
813 local_bh_enable();
814 call_rcu(&nlk->rcu, deferred_put_nlk_sk);
815 return 0;
816}
817
818static int netlink_autobind(struct socket *sock)
819{
820 struct sock *sk = sock->sk;
821 struct net *net = sock_net(sk);
822 struct netlink_table *table = &nl_table[sk->sk_protocol];
823 s32 portid = task_tgid_vnr(current);
824 int err;
825 s32 rover = -4096;
826 bool ok;
827
828retry:
829 cond_resched();
830 rcu_read_lock();
831 ok = !__netlink_lookup(table, portid, net);
832 rcu_read_unlock();
833 if (!ok) {
834
835 if (rover == -4096)
836
837 rover = S32_MIN + prandom_u32_max(-4096 - S32_MIN);
838 else if (rover >= -4096)
839 rover = -4097;
840 portid = rover--;
841 goto retry;
842 }
843
844 err = netlink_insert(sk, portid);
845 if (err == -EADDRINUSE)
846 goto retry;
847
848
849 if (err == -EBUSY)
850 err = 0;
851
852 return err;
853}
854
855
856
857
858
859
860
861
862
863
864
865bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
866 struct user_namespace *user_ns, int cap)
867{
868 return ((nsp->flags & NETLINK_SKB_DST) ||
869 file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
870 ns_capable(user_ns, cap);
871}
872EXPORT_SYMBOL(__netlink_ns_capable);
873
874
875
876
877
878
879
880
881
882
883
884bool netlink_ns_capable(const struct sk_buff *skb,
885 struct user_namespace *user_ns, int cap)
886{
887 return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
888}
889EXPORT_SYMBOL(netlink_ns_capable);
890
891
892
893
894
895
896
897
898
899
900bool netlink_capable(const struct sk_buff *skb, int cap)
901{
902 return netlink_ns_capable(skb, &init_user_ns, cap);
903}
904EXPORT_SYMBOL(netlink_capable);
905
906
907
908
909
910
911
912
913
914
915
916bool netlink_net_capable(const struct sk_buff *skb, int cap)
917{
918 return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
919}
920EXPORT_SYMBOL(netlink_net_capable);
921
922static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
923{
924 return (nl_table[sock->sk->sk_protocol].flags & flag) ||
925 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
926}
927
928static void
929netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
930{
931 struct netlink_sock *nlk = nlk_sk(sk);
932
933 if (nlk->subscriptions && !subscriptions)
934 __sk_del_bind_node(sk);
935 else if (!nlk->subscriptions && subscriptions)
936 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
937 nlk->subscriptions = subscriptions;
938}
939
940static int netlink_realloc_groups(struct sock *sk)
941{
942 struct netlink_sock *nlk = nlk_sk(sk);
943 unsigned int groups;
944 unsigned long *new_groups;
945 int err = 0;
946
947 netlink_table_grab();
948
949 groups = nl_table[sk->sk_protocol].groups;
950 if (!nl_table[sk->sk_protocol].registered) {
951 err = -ENOENT;
952 goto out_unlock;
953 }
954
955 if (nlk->ngroups >= groups)
956 goto out_unlock;
957
958 new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
959 if (new_groups == NULL) {
960 err = -ENOMEM;
961 goto out_unlock;
962 }
963 memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
964 NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
965
966 nlk->groups = new_groups;
967 nlk->ngroups = groups;
968 out_unlock:
969 netlink_table_ungrab();
970 return err;
971}
972
973static void netlink_undo_bind(int group, long unsigned int groups,
974 struct sock *sk)
975{
976 struct netlink_sock *nlk = nlk_sk(sk);
977 int undo;
978
979 if (!nlk->netlink_unbind)
980 return;
981
982 for (undo = 0; undo < group; undo++)
983 if (test_bit(undo, &groups))
984 nlk->netlink_unbind(sock_net(sk), undo + 1);
985}
986
987static int netlink_bind(struct socket *sock, struct sockaddr *addr,
988 int addr_len)
989{
990 struct sock *sk = sock->sk;
991 struct net *net = sock_net(sk);
992 struct netlink_sock *nlk = nlk_sk(sk);
993 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
994 int err = 0;
995 unsigned long groups;
996 bool bound;
997
998 if (addr_len < sizeof(struct sockaddr_nl))
999 return -EINVAL;
1000
1001 if (nladdr->nl_family != AF_NETLINK)
1002 return -EINVAL;
1003 groups = nladdr->nl_groups;
1004
1005
1006 if (groups) {
1007 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
1008 return -EPERM;
1009 err = netlink_realloc_groups(sk);
1010 if (err)
1011 return err;
1012 }
1013
1014 if (nlk->ngroups < BITS_PER_LONG)
1015 groups &= (1UL << nlk->ngroups) - 1;
1016
1017 bound = nlk->bound;
1018 if (bound) {
1019
1020 smp_rmb();
1021
1022 if (nladdr->nl_pid != nlk->portid)
1023 return -EINVAL;
1024 }
1025
1026 if (nlk->netlink_bind && groups) {
1027 int group;
1028
1029
1030 for (group = 0; group < BITS_PER_TYPE(u32); group++) {
1031 if (!test_bit(group, &groups))
1032 continue;
1033 err = nlk->netlink_bind(net, group + 1);
1034 if (!err)
1035 continue;
1036 netlink_undo_bind(group, groups, sk);
1037 return err;
1038 }
1039 }
1040
1041
1042
1043
1044 netlink_lock_table();
1045 if (!bound) {
1046 err = nladdr->nl_pid ?
1047 netlink_insert(sk, nladdr->nl_pid) :
1048 netlink_autobind(sock);
1049 if (err) {
1050 netlink_undo_bind(BITS_PER_TYPE(u32), groups, sk);
1051 goto unlock;
1052 }
1053 }
1054
1055 if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
1056 goto unlock;
1057 netlink_unlock_table();
1058
1059 netlink_table_grab();
1060 netlink_update_subscriptions(sk, nlk->subscriptions +
1061 hweight32(groups) -
1062 hweight32(nlk->groups[0]));
1063 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | groups;
1064 netlink_update_listeners(sk);
1065 netlink_table_ungrab();
1066
1067 return 0;
1068
1069unlock:
1070 netlink_unlock_table();
1071 return err;
1072}
1073
1074static int netlink_connect(struct socket *sock, struct sockaddr *addr,
1075 int alen, int flags)
1076{
1077 int err = 0;
1078 struct sock *sk = sock->sk;
1079 struct netlink_sock *nlk = nlk_sk(sk);
1080 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1081
1082 if (alen < sizeof(addr->sa_family))
1083 return -EINVAL;
1084
1085 if (addr->sa_family == AF_UNSPEC) {
1086 sk->sk_state = NETLINK_UNCONNECTED;
1087 nlk->dst_portid = 0;
1088 nlk->dst_group = 0;
1089 return 0;
1090 }
1091 if (addr->sa_family != AF_NETLINK)
1092 return -EINVAL;
1093
1094 if (alen < sizeof(struct sockaddr_nl))
1095 return -EINVAL;
1096
1097 if ((nladdr->nl_groups || nladdr->nl_pid) &&
1098 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
1099 return -EPERM;
1100
1101
1102
1103
1104 if (!nlk->bound)
1105 err = netlink_autobind(sock);
1106
1107 if (err == 0) {
1108 sk->sk_state = NETLINK_CONNECTED;
1109 nlk->dst_portid = nladdr->nl_pid;
1110 nlk->dst_group = ffs(nladdr->nl_groups);
1111 }
1112
1113 return err;
1114}
1115
1116static int netlink_getname(struct socket *sock, struct sockaddr *addr,
1117 int peer)
1118{
1119 struct sock *sk = sock->sk;
1120 struct netlink_sock *nlk = nlk_sk(sk);
1121 DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
1122
1123 nladdr->nl_family = AF_NETLINK;
1124 nladdr->nl_pad = 0;
1125
1126 if (peer) {
1127 nladdr->nl_pid = nlk->dst_portid;
1128 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
1129 } else {
1130 nladdr->nl_pid = nlk->portid;
1131 netlink_lock_table();
1132 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
1133 netlink_unlock_table();
1134 }
1135 return sizeof(*nladdr);
1136}
1137
1138static int netlink_ioctl(struct socket *sock, unsigned int cmd,
1139 unsigned long arg)
1140{
1141
1142
1143 return -ENOIOCTLCMD;
1144}
1145
1146static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
1147{
1148 struct sock *sock;
1149 struct netlink_sock *nlk;
1150
1151 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
1152 if (!sock)
1153 return ERR_PTR(-ECONNREFUSED);
1154
1155
1156 nlk = nlk_sk(sock);
1157 if (sock->sk_state == NETLINK_CONNECTED &&
1158 nlk->dst_portid != nlk_sk(ssk)->portid) {
1159 sock_put(sock);
1160 return ERR_PTR(-ECONNREFUSED);
1161 }
1162 return sock;
1163}
1164
1165struct sock *netlink_getsockbyfilp(struct file *filp)
1166{
1167 struct inode *inode = file_inode(filp);
1168 struct sock *sock;
1169
1170 if (!S_ISSOCK(inode->i_mode))
1171 return ERR_PTR(-ENOTSOCK);
1172
1173 sock = SOCKET_I(inode)->sk;
1174 if (sock->sk_family != AF_NETLINK)
1175 return ERR_PTR(-EINVAL);
1176
1177 sock_hold(sock);
1178 return sock;
1179}
1180
1181static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
1182 int broadcast)
1183{
1184 struct sk_buff *skb;
1185 void *data;
1186
1187 if (size <= NLMSG_GOODSIZE || broadcast)
1188 return alloc_skb(size, GFP_KERNEL);
1189
1190 size = SKB_DATA_ALIGN(size) +
1191 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1192
1193 data = vmalloc(size);
1194 if (data == NULL)
1195 return NULL;
1196
1197 skb = __build_skb(data, size);
1198 if (skb == NULL)
1199 vfree(data);
1200 else
1201 skb->destructor = netlink_skb_destructor;
1202
1203 return skb;
1204}
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
1217 long *timeo, struct sock *ssk)
1218{
1219 struct netlink_sock *nlk;
1220
1221 nlk = nlk_sk(sk);
1222
1223 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1224 test_bit(NETLINK_S_CONGESTED, &nlk->state))) {
1225 DECLARE_WAITQUEUE(wait, current);
1226 if (!*timeo) {
1227 if (!ssk || netlink_is_kernel(ssk))
1228 netlink_overrun(sk);
1229 sock_put(sk);
1230 kfree_skb(skb);
1231 return -EAGAIN;
1232 }
1233
1234 __set_current_state(TASK_INTERRUPTIBLE);
1235 add_wait_queue(&nlk->wait, &wait);
1236
1237 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1238 test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
1239 !sock_flag(sk, SOCK_DEAD))
1240 *timeo = schedule_timeout(*timeo);
1241
1242 __set_current_state(TASK_RUNNING);
1243 remove_wait_queue(&nlk->wait, &wait);
1244 sock_put(sk);
1245
1246 if (signal_pending(current)) {
1247 kfree_skb(skb);
1248 return sock_intr_errno(*timeo);
1249 }
1250 return 1;
1251 }
1252 netlink_skb_set_owner_r(skb, sk);
1253 return 0;
1254}
1255
1256static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1257{
1258 int len = skb->len;
1259
1260 netlink_deliver_tap(sock_net(sk), skb);
1261
1262 skb_queue_tail(&sk->sk_receive_queue, skb);
1263 sk->sk_data_ready(sk);
1264 return len;
1265}
1266
1267int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1268{
1269 int len = __netlink_sendskb(sk, skb);
1270
1271 sock_put(sk);
1272 return len;
1273}
1274
1275void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
1276{
1277 kfree_skb(skb);
1278 sock_put(sk);
1279}
1280
1281static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
1282{
1283 int delta;
1284
1285 WARN_ON(skb->sk != NULL);
1286 delta = skb->end - skb->tail;
1287 if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
1288 return skb;
1289
1290 if (skb_shared(skb)) {
1291 struct sk_buff *nskb = skb_clone(skb, allocation);
1292 if (!nskb)
1293 return skb;
1294 consume_skb(skb);
1295 skb = nskb;
1296 }
1297
1298 pskb_expand_head(skb, 0, -delta,
1299 (allocation & ~__GFP_DIRECT_RECLAIM) |
1300 __GFP_NOWARN | __GFP_NORETRY);
1301 return skb;
1302}
1303
1304static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
1305 struct sock *ssk)
1306{
1307 int ret;
1308 struct netlink_sock *nlk = nlk_sk(sk);
1309
1310 ret = -ECONNREFUSED;
1311 if (nlk->netlink_rcv != NULL) {
1312 ret = skb->len;
1313 netlink_skb_set_owner_r(skb, sk);
1314 NETLINK_CB(skb).sk = ssk;
1315 netlink_deliver_tap_kernel(sk, ssk, skb);
1316 nlk->netlink_rcv(skb);
1317 consume_skb(skb);
1318 } else {
1319 kfree_skb(skb);
1320 }
1321 sock_put(sk);
1322 return ret;
1323}
1324
1325int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
1326 u32 portid, int nonblock)
1327{
1328 struct sock *sk;
1329 int err;
1330 long timeo;
1331
1332 skb = netlink_trim(skb, gfp_any());
1333
1334 timeo = sock_sndtimeo(ssk, nonblock);
1335retry:
1336 sk = netlink_getsockbyportid(ssk, portid);
1337 if (IS_ERR(sk)) {
1338 kfree_skb(skb);
1339 return PTR_ERR(sk);
1340 }
1341 if (netlink_is_kernel(sk))
1342 return netlink_unicast_kernel(sk, skb, ssk);
1343
1344 if (sk_filter(sk, skb)) {
1345 err = skb->len;
1346 kfree_skb(skb);
1347 sock_put(sk);
1348 return err;
1349 }
1350
1351 err = netlink_attachskb(sk, skb, &timeo, ssk);
1352 if (err == 1)
1353 goto retry;
1354 if (err)
1355 return err;
1356
1357 return netlink_sendskb(sk, skb);
1358}
1359EXPORT_SYMBOL(netlink_unicast);
1360
1361int netlink_has_listeners(struct sock *sk, unsigned int group)
1362{
1363 int res = 0;
1364 struct listeners *listeners;
1365
1366 BUG_ON(!netlink_is_kernel(sk));
1367
1368 rcu_read_lock();
1369 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
1370
1371 if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
1372 res = test_bit(group - 1, listeners->masks);
1373
1374 rcu_read_unlock();
1375
1376 return res;
1377}
1378EXPORT_SYMBOL_GPL(netlink_has_listeners);
1379
1380bool netlink_strict_get_check(struct sk_buff *skb)
1381{
1382 const struct netlink_sock *nlk = nlk_sk(NETLINK_CB(skb).sk);
1383
1384 return nlk->flags & NETLINK_F_STRICT_CHK;
1385}
1386EXPORT_SYMBOL_GPL(netlink_strict_get_check);
1387
1388static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
1389{
1390 struct netlink_sock *nlk = nlk_sk(sk);
1391
1392 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
1393 !test_bit(NETLINK_S_CONGESTED, &nlk->state)) {
1394 netlink_skb_set_owner_r(skb, sk);
1395 __netlink_sendskb(sk, skb);
1396 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
1397 }
1398 return -1;
1399}
1400
1401struct netlink_broadcast_data {
1402 struct sock *exclude_sk;
1403 struct net *net;
1404 u32 portid;
1405 u32 group;
1406 int failure;
1407 int delivery_failure;
1408 int congested;
1409 int delivered;
1410 gfp_t allocation;
1411 struct sk_buff *skb, *skb2;
1412 int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
1413 void *tx_data;
1414};
1415
1416static void do_one_broadcast(struct sock *sk,
1417 struct netlink_broadcast_data *p)
1418{
1419 struct netlink_sock *nlk = nlk_sk(sk);
1420 int val;
1421
1422 if (p->exclude_sk == sk)
1423 return;
1424
1425 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
1426 !test_bit(p->group - 1, nlk->groups))
1427 return;
1428
1429 if (!net_eq(sock_net(sk), p->net)) {
1430 if (!(nlk->flags & NETLINK_F_LISTEN_ALL_NSID))
1431 return;
1432
1433 if (!peernet_has_id(sock_net(sk), p->net))
1434 return;
1435
1436 if (!file_ns_capable(sk->sk_socket->file, p->net->user_ns,
1437 CAP_NET_BROADCAST))
1438 return;
1439 }
1440
1441 if (p->failure) {
1442 netlink_overrun(sk);
1443 return;
1444 }
1445
1446 sock_hold(sk);
1447 if (p->skb2 == NULL) {
1448 if (skb_shared(p->skb)) {
1449 p->skb2 = skb_clone(p->skb, p->allocation);
1450 } else {
1451 p->skb2 = skb_get(p->skb);
1452
1453
1454
1455
1456 skb_orphan(p->skb2);
1457 }
1458 }
1459 if (p->skb2 == NULL) {
1460 netlink_overrun(sk);
1461
1462 p->failure = 1;
1463 if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
1464 p->delivery_failure = 1;
1465 goto out;
1466 }
1467 if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
1468 kfree_skb(p->skb2);
1469 p->skb2 = NULL;
1470 goto out;
1471 }
1472 if (sk_filter(sk, p->skb2)) {
1473 kfree_skb(p->skb2);
1474 p->skb2 = NULL;
1475 goto out;
1476 }
1477 NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net);
1478 if (NETLINK_CB(p->skb2).nsid != NETNSA_NSID_NOT_ASSIGNED)
1479 NETLINK_CB(p->skb2).nsid_is_set = true;
1480 val = netlink_broadcast_deliver(sk, p->skb2);
1481 if (val < 0) {
1482 netlink_overrun(sk);
1483 if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
1484 p->delivery_failure = 1;
1485 } else {
1486 p->congested |= val;
1487 p->delivered = 1;
1488 p->skb2 = NULL;
1489 }
1490out:
1491 sock_put(sk);
1492}
1493
1494int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
1495 u32 group, gfp_t allocation,
1496 int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
1497 void *filter_data)
1498{
1499 struct net *net = sock_net(ssk);
1500 struct netlink_broadcast_data info;
1501 struct sock *sk;
1502
1503 skb = netlink_trim(skb, allocation);
1504
1505 info.exclude_sk = ssk;
1506 info.net = net;
1507 info.portid = portid;
1508 info.group = group;
1509 info.failure = 0;
1510 info.delivery_failure = 0;
1511 info.congested = 0;
1512 info.delivered = 0;
1513 info.allocation = allocation;
1514 info.skb = skb;
1515 info.skb2 = NULL;
1516 info.tx_filter = filter;
1517 info.tx_data = filter_data;
1518
1519
1520
1521 netlink_lock_table();
1522
1523 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1524 do_one_broadcast(sk, &info);
1525
1526 consume_skb(skb);
1527
1528 netlink_unlock_table();
1529
1530 if (info.delivery_failure) {
1531 kfree_skb(info.skb2);
1532 return -ENOBUFS;
1533 }
1534 consume_skb(info.skb2);
1535
1536 if (info.delivered) {
1537 if (info.congested && gfpflags_allow_blocking(allocation))
1538 yield();
1539 return 0;
1540 }
1541 return -ESRCH;
1542}
1543EXPORT_SYMBOL(netlink_broadcast_filtered);
1544
1545int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
1546 u32 group, gfp_t allocation)
1547{
1548 return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
1549 NULL, NULL);
1550}
1551EXPORT_SYMBOL(netlink_broadcast);
1552
1553struct netlink_set_err_data {
1554 struct sock *exclude_sk;
1555 u32 portid;
1556 u32 group;
1557 int code;
1558};
1559
1560static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
1561{
1562 struct netlink_sock *nlk = nlk_sk(sk);
1563 int ret = 0;
1564
1565 if (sk == p->exclude_sk)
1566 goto out;
1567
1568 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
1569 goto out;
1570
1571 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
1572 !test_bit(p->group - 1, nlk->groups))
1573 goto out;
1574
1575 if (p->code == ENOBUFS && nlk->flags & NETLINK_F_RECV_NO_ENOBUFS) {
1576 ret = 1;
1577 goto out;
1578 }
1579
1580 sk->sk_err = p->code;
1581 sk->sk_error_report(sk);
1582out:
1583 return ret;
1584}
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
1597{
1598 struct netlink_set_err_data info;
1599 struct sock *sk;
1600 int ret = 0;
1601
1602 info.exclude_sk = ssk;
1603 info.portid = portid;
1604 info.group = group;
1605
1606 info.code = -code;
1607
1608 read_lock(&nl_table_lock);
1609
1610 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1611 ret += do_one_set_err(sk, &info);
1612
1613 read_unlock(&nl_table_lock);
1614 return ret;
1615}
1616EXPORT_SYMBOL(netlink_set_err);
1617
1618
1619static void netlink_update_socket_mc(struct netlink_sock *nlk,
1620 unsigned int group,
1621 int is_new)
1622{
1623 int old, new = !!is_new, subscriptions;
1624
1625 old = test_bit(group - 1, nlk->groups);
1626 subscriptions = nlk->subscriptions - old + new;
1627 if (new)
1628 __set_bit(group - 1, nlk->groups);
1629 else
1630 __clear_bit(group - 1, nlk->groups);
1631 netlink_update_subscriptions(&nlk->sk, subscriptions);
1632 netlink_update_listeners(&nlk->sk);
1633}
1634
1635static int netlink_setsockopt(struct socket *sock, int level, int optname,
1636 char __user *optval, unsigned int optlen)
1637{
1638 struct sock *sk = sock->sk;
1639 struct netlink_sock *nlk = nlk_sk(sk);
1640 unsigned int val = 0;
1641 int err;
1642
1643 if (level != SOL_NETLINK)
1644 return -ENOPROTOOPT;
1645
1646 if (optlen >= sizeof(int) &&
1647 get_user(val, (unsigned int __user *)optval))
1648 return -EFAULT;
1649
1650 switch (optname) {
1651 case NETLINK_PKTINFO:
1652 if (val)
1653 nlk->flags |= NETLINK_F_RECV_PKTINFO;
1654 else
1655 nlk->flags &= ~NETLINK_F_RECV_PKTINFO;
1656 err = 0;
1657 break;
1658 case NETLINK_ADD_MEMBERSHIP:
1659 case NETLINK_DROP_MEMBERSHIP: {
1660 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
1661 return -EPERM;
1662 err = netlink_realloc_groups(sk);
1663 if (err)
1664 return err;
1665 if (!val || val - 1 >= nlk->ngroups)
1666 return -EINVAL;
1667 if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
1668 err = nlk->netlink_bind(sock_net(sk), val);
1669 if (err)
1670 return err;
1671 }
1672 netlink_table_grab();
1673 netlink_update_socket_mc(nlk, val,
1674 optname == NETLINK_ADD_MEMBERSHIP);
1675 netlink_table_ungrab();
1676 if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
1677 nlk->netlink_unbind(sock_net(sk), val);
1678
1679 err = 0;
1680 break;
1681 }
1682 case NETLINK_BROADCAST_ERROR:
1683 if (val)
1684 nlk->flags |= NETLINK_F_BROADCAST_SEND_ERROR;
1685 else
1686 nlk->flags &= ~NETLINK_F_BROADCAST_SEND_ERROR;
1687 err = 0;
1688 break;
1689 case NETLINK_NO_ENOBUFS:
1690 if (val) {
1691 nlk->flags |= NETLINK_F_RECV_NO_ENOBUFS;
1692 clear_bit(NETLINK_S_CONGESTED, &nlk->state);
1693 wake_up_interruptible(&nlk->wait);
1694 } else {
1695 nlk->flags &= ~NETLINK_F_RECV_NO_ENOBUFS;
1696 }
1697 err = 0;
1698 break;
1699 case NETLINK_LISTEN_ALL_NSID:
1700 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_BROADCAST))
1701 return -EPERM;
1702
1703 if (val)
1704 nlk->flags |= NETLINK_F_LISTEN_ALL_NSID;
1705 else
1706 nlk->flags &= ~NETLINK_F_LISTEN_ALL_NSID;
1707 err = 0;
1708 break;
1709 case NETLINK_CAP_ACK:
1710 if (val)
1711 nlk->flags |= NETLINK_F_CAP_ACK;
1712 else
1713 nlk->flags &= ~NETLINK_F_CAP_ACK;
1714 err = 0;
1715 break;
1716 case NETLINK_EXT_ACK:
1717 if (val)
1718 nlk->flags |= NETLINK_F_EXT_ACK;
1719 else
1720 nlk->flags &= ~NETLINK_F_EXT_ACK;
1721 err = 0;
1722 break;
1723 case NETLINK_GET_STRICT_CHK:
1724 if (val)
1725 nlk->flags |= NETLINK_F_STRICT_CHK;
1726 else
1727 nlk->flags &= ~NETLINK_F_STRICT_CHK;
1728 err = 0;
1729 break;
1730 default:
1731 err = -ENOPROTOOPT;
1732 }
1733 return err;
1734}
1735
1736static int netlink_getsockopt(struct socket *sock, int level, int optname,
1737 char __user *optval, int __user *optlen)
1738{
1739 struct sock *sk = sock->sk;
1740 struct netlink_sock *nlk = nlk_sk(sk);
1741 int len, val, err;
1742
1743 if (level != SOL_NETLINK)
1744 return -ENOPROTOOPT;
1745
1746 if (get_user(len, optlen))
1747 return -EFAULT;
1748 if (len < 0)
1749 return -EINVAL;
1750
1751 switch (optname) {
1752 case NETLINK_PKTINFO:
1753 if (len < sizeof(int))
1754 return -EINVAL;
1755 len = sizeof(int);
1756 val = nlk->flags & NETLINK_F_RECV_PKTINFO ? 1 : 0;
1757 if (put_user(len, optlen) ||
1758 put_user(val, optval))
1759 return -EFAULT;
1760 err = 0;
1761 break;
1762 case NETLINK_BROADCAST_ERROR:
1763 if (len < sizeof(int))
1764 return -EINVAL;
1765 len = sizeof(int);
1766 val = nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR ? 1 : 0;
1767 if (put_user(len, optlen) ||
1768 put_user(val, optval))
1769 return -EFAULT;
1770 err = 0;
1771 break;
1772 case NETLINK_NO_ENOBUFS:
1773 if (len < sizeof(int))
1774 return -EINVAL;
1775 len = sizeof(int);
1776 val = nlk->flags & NETLINK_F_RECV_NO_ENOBUFS ? 1 : 0;
1777 if (put_user(len, optlen) ||
1778 put_user(val, optval))
1779 return -EFAULT;
1780 err = 0;
1781 break;
1782 case NETLINK_LIST_MEMBERSHIPS: {
1783 int pos, idx, shift;
1784
1785 err = 0;
1786 netlink_lock_table();
1787 for (pos = 0; pos * 8 < nlk->ngroups; pos += sizeof(u32)) {
1788 if (len - pos < sizeof(u32))
1789 break;
1790
1791 idx = pos / sizeof(unsigned long);
1792 shift = (pos % sizeof(unsigned long)) * 8;
1793 if (put_user((u32)(nlk->groups[idx] >> shift),
1794 (u32 __user *)(optval + pos))) {
1795 err = -EFAULT;
1796 break;
1797 }
1798 }
1799 if (put_user(ALIGN(nlk->ngroups / 8, sizeof(u32)), optlen))
1800 err = -EFAULT;
1801 netlink_unlock_table();
1802 break;
1803 }
1804 case NETLINK_CAP_ACK:
1805 if (len < sizeof(int))
1806 return -EINVAL;
1807 len = sizeof(int);
1808 val = nlk->flags & NETLINK_F_CAP_ACK ? 1 : 0;
1809 if (put_user(len, optlen) ||
1810 put_user(val, optval))
1811 return -EFAULT;
1812 err = 0;
1813 break;
1814 case NETLINK_EXT_ACK:
1815 if (len < sizeof(int))
1816 return -EINVAL;
1817 len = sizeof(int);
1818 val = nlk->flags & NETLINK_F_EXT_ACK ? 1 : 0;
1819 if (put_user(len, optlen) || put_user(val, optval))
1820 return -EFAULT;
1821 err = 0;
1822 break;
1823 case NETLINK_GET_STRICT_CHK:
1824 if (len < sizeof(int))
1825 return -EINVAL;
1826 len = sizeof(int);
1827 val = nlk->flags & NETLINK_F_STRICT_CHK ? 1 : 0;
1828 if (put_user(len, optlen) || put_user(val, optval))
1829 return -EFAULT;
1830 err = 0;
1831 break;
1832 default:
1833 err = -ENOPROTOOPT;
1834 }
1835 return err;
1836}
1837
1838static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
1839{
1840 struct nl_pktinfo info;
1841
1842 info.group = NETLINK_CB(skb).dst_group;
1843 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
1844}
1845
1846static void netlink_cmsg_listen_all_nsid(struct sock *sk, struct msghdr *msg,
1847 struct sk_buff *skb)
1848{
1849 if (!NETLINK_CB(skb).nsid_is_set)
1850 return;
1851
1852 put_cmsg(msg, SOL_NETLINK, NETLINK_LISTEN_ALL_NSID, sizeof(int),
1853 &NETLINK_CB(skb).nsid);
1854}
1855
1856static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1857{
1858 struct sock *sk = sock->sk;
1859 struct netlink_sock *nlk = nlk_sk(sk);
1860 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
1861 u32 dst_portid;
1862 u32 dst_group;
1863 struct sk_buff *skb;
1864 int err;
1865 struct scm_cookie scm;
1866 u32 netlink_skb_flags = 0;
1867
1868 if (msg->msg_flags&MSG_OOB)
1869 return -EOPNOTSUPP;
1870
1871 err = scm_send(sock, msg, &scm, true);
1872 if (err < 0)
1873 return err;
1874
1875 if (msg->msg_namelen) {
1876 err = -EINVAL;
1877 if (msg->msg_namelen < sizeof(struct sockaddr_nl))
1878 goto out;
1879 if (addr->nl_family != AF_NETLINK)
1880 goto out;
1881 dst_portid = addr->nl_pid;
1882 dst_group = ffs(addr->nl_groups);
1883 err = -EPERM;
1884 if ((dst_group || dst_portid) &&
1885 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
1886 goto out;
1887 netlink_skb_flags |= NETLINK_SKB_DST;
1888 } else {
1889 dst_portid = nlk->dst_portid;
1890 dst_group = nlk->dst_group;
1891 }
1892
1893 if (!nlk->bound) {
1894 err = netlink_autobind(sock);
1895 if (err)
1896 goto out;
1897 } else {
1898
1899 smp_rmb();
1900 }
1901
1902 err = -EMSGSIZE;
1903 if (len > sk->sk_sndbuf - 32)
1904 goto out;
1905 err = -ENOBUFS;
1906 skb = netlink_alloc_large_skb(len, dst_group);
1907 if (skb == NULL)
1908 goto out;
1909
1910 NETLINK_CB(skb).portid = nlk->portid;
1911 NETLINK_CB(skb).dst_group = dst_group;
1912 NETLINK_CB(skb).creds = scm.creds;
1913 NETLINK_CB(skb).flags = netlink_skb_flags;
1914
1915 err = -EFAULT;
1916 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1917 kfree_skb(skb);
1918 goto out;
1919 }
1920
1921 err = security_netlink_send(sk, skb);
1922 if (err) {
1923 kfree_skb(skb);
1924 goto out;
1925 }
1926
1927 if (dst_group) {
1928 refcount_inc(&skb->users);
1929 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
1930 }
1931 err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
1932
1933out:
1934 scm_destroy(&scm);
1935 return err;
1936}
1937
1938static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1939 int flags)
1940{
1941 struct scm_cookie scm;
1942 struct sock *sk = sock->sk;
1943 struct netlink_sock *nlk = nlk_sk(sk);
1944 int noblock = flags&MSG_DONTWAIT;
1945 size_t copied;
1946 struct sk_buff *skb, *data_skb;
1947 int err, ret;
1948
1949 if (flags&MSG_OOB)
1950 return -EOPNOTSUPP;
1951
1952 copied = 0;
1953
1954 skb = skb_recv_datagram(sk, flags, noblock, &err);
1955 if (skb == NULL)
1956 goto out;
1957
1958 data_skb = skb;
1959
1960#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
1961 if (unlikely(skb_shinfo(skb)->frag_list)) {
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972 if (flags & MSG_CMSG_COMPAT)
1973 data_skb = skb_shinfo(skb)->frag_list;
1974 }
1975#endif
1976
1977
1978 nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
1979 nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
1980 SKB_WITH_OVERHEAD(32768));
1981
1982 copied = data_skb->len;
1983 if (len < copied) {
1984 msg->msg_flags |= MSG_TRUNC;
1985 copied = len;
1986 }
1987
1988 skb_reset_transport_header(data_skb);
1989 err = skb_copy_datagram_msg(data_skb, 0, msg, copied);
1990
1991 if (msg->msg_name) {
1992 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
1993 addr->nl_family = AF_NETLINK;
1994 addr->nl_pad = 0;
1995 addr->nl_pid = NETLINK_CB(skb).portid;
1996 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
1997 msg->msg_namelen = sizeof(*addr);
1998 }
1999
2000 if (nlk->flags & NETLINK_F_RECV_PKTINFO)
2001 netlink_cmsg_recv_pktinfo(msg, skb);
2002 if (nlk->flags & NETLINK_F_LISTEN_ALL_NSID)
2003 netlink_cmsg_listen_all_nsid(sk, msg, skb);
2004
2005 memset(&scm, 0, sizeof(scm));
2006 scm.creds = *NETLINK_CREDS(skb);
2007 if (flags & MSG_TRUNC)
2008 copied = data_skb->len;
2009
2010 skb_free_datagram(sk, skb);
2011
2012 if (nlk->cb_running &&
2013 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
2014 ret = netlink_dump(sk);
2015 if (ret) {
2016 sk->sk_err = -ret;
2017 sk->sk_error_report(sk);
2018 }
2019 }
2020
2021 scm_recv(sock, msg, &scm, flags);
2022out:
2023 netlink_rcv_wake(sk);
2024 return err ? : copied;
2025}
2026
2027static void netlink_data_ready(struct sock *sk)
2028{
2029 BUG();
2030}
2031
2032
2033
2034
2035
2036
2037
2038struct sock *
2039__netlink_kernel_create(struct net *net, int unit, struct module *module,
2040 struct netlink_kernel_cfg *cfg)
2041{
2042 struct socket *sock;
2043 struct sock *sk;
2044 struct netlink_sock *nlk;
2045 struct listeners *listeners = NULL;
2046 struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
2047 unsigned int groups;
2048
2049 BUG_ON(!nl_table);
2050
2051 if (unit < 0 || unit >= MAX_LINKS)
2052 return NULL;
2053
2054 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
2055 return NULL;
2056
2057 if (__netlink_create(net, sock, cb_mutex, unit, 1) < 0)
2058 goto out_sock_release_nosk;
2059
2060 sk = sock->sk;
2061
2062 if (!cfg || cfg->groups < 32)
2063 groups = 32;
2064 else
2065 groups = cfg->groups;
2066
2067 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
2068 if (!listeners)
2069 goto out_sock_release;
2070
2071 sk->sk_data_ready = netlink_data_ready;
2072 if (cfg && cfg->input)
2073 nlk_sk(sk)->netlink_rcv = cfg->input;
2074
2075 if (netlink_insert(sk, 0))
2076 goto out_sock_release;
2077
2078 nlk = nlk_sk(sk);
2079 nlk->flags |= NETLINK_F_KERNEL_SOCKET;
2080
2081 netlink_table_grab();
2082 if (!nl_table[unit].registered) {
2083 nl_table[unit].groups = groups;
2084 rcu_assign_pointer(nl_table[unit].listeners, listeners);
2085 nl_table[unit].cb_mutex = cb_mutex;
2086 nl_table[unit].module = module;
2087 if (cfg) {
2088 nl_table[unit].bind = cfg->bind;
2089 nl_table[unit].unbind = cfg->unbind;
2090 nl_table[unit].flags = cfg->flags;
2091 if (cfg->compare)
2092 nl_table[unit].compare = cfg->compare;
2093 }
2094 nl_table[unit].registered = 1;
2095 } else {
2096 kfree(listeners);
2097 nl_table[unit].registered++;
2098 }
2099 netlink_table_ungrab();
2100 return sk;
2101
2102out_sock_release:
2103 kfree(listeners);
2104 netlink_kernel_release(sk);
2105 return NULL;
2106
2107out_sock_release_nosk:
2108 sock_release(sock);
2109 return NULL;
2110}
2111EXPORT_SYMBOL(__netlink_kernel_create);
2112
2113void
2114netlink_kernel_release(struct sock *sk)
2115{
2116 if (sk == NULL || sk->sk_socket == NULL)
2117 return;
2118
2119 sock_release(sk->sk_socket);
2120}
2121EXPORT_SYMBOL(netlink_kernel_release);
2122
2123int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
2124{
2125 struct listeners *new, *old;
2126 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
2127
2128 if (groups < 32)
2129 groups = 32;
2130
2131 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
2132 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
2133 if (!new)
2134 return -ENOMEM;
2135 old = nl_deref_protected(tbl->listeners);
2136 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
2137 rcu_assign_pointer(tbl->listeners, new);
2138
2139 kfree_rcu(old, rcu);
2140 }
2141 tbl->groups = groups;
2142
2143 return 0;
2144}
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158int netlink_change_ngroups(struct sock *sk, unsigned int groups)
2159{
2160 int err;
2161
2162 netlink_table_grab();
2163 err = __netlink_change_ngroups(sk, groups);
2164 netlink_table_ungrab();
2165
2166 return err;
2167}
2168
2169void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2170{
2171 struct sock *sk;
2172 struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
2173
2174 sk_for_each_bound(sk, &tbl->mc_list)
2175 netlink_update_socket_mc(nlk_sk(sk), group, 0);
2176}
2177
2178struct nlmsghdr *
2179__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
2180{
2181 struct nlmsghdr *nlh;
2182 int size = nlmsg_msg_size(len);
2183
2184 nlh = skb_put(skb, NLMSG_ALIGN(size));
2185 nlh->nlmsg_type = type;
2186 nlh->nlmsg_len = size;
2187 nlh->nlmsg_flags = flags;
2188 nlh->nlmsg_pid = portid;
2189 nlh->nlmsg_seq = seq;
2190 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
2191 memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
2192 return nlh;
2193}
2194EXPORT_SYMBOL(__nlmsg_put);
2195
2196
2197
2198
2199
2200
2201static int netlink_dump(struct sock *sk)
2202{
2203 struct netlink_sock *nlk = nlk_sk(sk);
2204 struct netlink_ext_ack extack = {};
2205 struct netlink_callback *cb;
2206 struct sk_buff *skb = NULL;
2207 struct nlmsghdr *nlh;
2208 struct module *module;
2209 int err = -ENOBUFS;
2210 int alloc_min_size;
2211 int alloc_size;
2212
2213 mutex_lock(nlk->cb_mutex);
2214 if (!nlk->cb_running) {
2215 err = -EINVAL;
2216 goto errout_skb;
2217 }
2218
2219 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2220 goto errout_skb;
2221
2222
2223
2224
2225
2226
2227 cb = &nlk->cb;
2228 alloc_min_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
2229 alloc_min_size = max_t(int, alloc_min_size, cb->min_dump_alloc_rh_old);
2230
2231 if (alloc_min_size < nlk->max_recvmsg_len) {
2232 alloc_size = nlk->max_recvmsg_len;
2233 skb = alloc_skb(alloc_size,
2234 (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) |
2235 __GFP_NOWARN | __GFP_NORETRY);
2236 }
2237 if (!skb) {
2238 alloc_size = alloc_min_size;
2239 skb = alloc_skb(alloc_size, GFP_KERNEL);
2240 }
2241 if (!skb)
2242 goto errout_skb;
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254 skb_reserve(skb, skb_tailroom(skb) - alloc_size);
2255 netlink_skb_set_owner_r(skb, sk);
2256
2257 if (nlk->dump_done_errno > 0) {
2258 cb->extack = &extack;
2259 nlk->dump_done_errno = cb->dump(skb, cb);
2260 cb->extack = NULL;
2261 }
2262
2263 if (nlk->dump_done_errno > 0 ||
2264 skb_tailroom(skb) < nlmsg_total_size(sizeof(nlk->dump_done_errno))) {
2265 mutex_unlock(nlk->cb_mutex);
2266
2267 if (sk_filter(sk, skb))
2268 kfree_skb(skb);
2269 else
2270 __netlink_sendskb(sk, skb);
2271 return 0;
2272 }
2273
2274 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE,
2275 sizeof(nlk->dump_done_errno),
2276 NLM_F_MULTI | cb->answer_flags);
2277 if (WARN_ON(!nlh))
2278 goto errout_skb;
2279
2280 nl_dump_check_consistent(cb, nlh);
2281
2282 memcpy(nlmsg_data(nlh), &nlk->dump_done_errno,
2283 sizeof(nlk->dump_done_errno));
2284
2285 if (extack._msg && nlk->flags & NETLINK_F_EXT_ACK) {
2286 nlh->nlmsg_flags |= NLM_F_ACK_TLVS;
2287 if (!nla_put_string(skb, NLMSGERR_ATTR_MSG, extack._msg))
2288 nlmsg_end(skb, nlh);
2289 }
2290
2291 if (sk_filter(sk, skb))
2292 kfree_skb(skb);
2293 else
2294 __netlink_sendskb(sk, skb);
2295
2296 if (cb->done)
2297 cb->done(cb);
2298
2299 nlk->cb_running = false;
2300 module = cb->module;
2301 skb = cb->skb;
2302 mutex_unlock(nlk->cb_mutex);
2303 module_put(module);
2304 consume_skb(skb);
2305 return 0;
2306
2307errout_skb:
2308 mutex_unlock(nlk->cb_mutex);
2309 kfree_skb(skb);
2310 return err;
2311}
2312
2313int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2314 const struct nlmsghdr *nlh,
2315 struct netlink_dump_control *control)
2316{
2317 struct netlink_sock *nlk, *nlk2;
2318 struct netlink_callback *cb;
2319 struct sock *sk;
2320 int ret;
2321
2322 refcount_inc(&skb->users);
2323
2324 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
2325 if (sk == NULL) {
2326 ret = -ECONNREFUSED;
2327 goto error_free;
2328 }
2329
2330 nlk = nlk_sk(sk);
2331 mutex_lock(nlk->cb_mutex);
2332
2333 if (nlk->cb_running) {
2334 ret = -EBUSY;
2335 goto error_unlock;
2336 }
2337
2338 if (!try_module_get(control->module)) {
2339 ret = -EPROTONOSUPPORT;
2340 goto error_unlock;
2341 }
2342
2343 cb = &nlk->cb;
2344 memset(cb, 0, sizeof(*cb));
2345 cb->dump = control->dump;
2346 cb->done = control->done;
2347 cb->nlh = nlh;
2348 cb->data = control->data;
2349 cb->module = control->module;
2350 cb->min_dump_alloc = control->min_dump_alloc;
2351 cb->min_dump_alloc_rh_old = control->min_dump_alloc;
2352 cb->skb = skb;
2353
2354 nlk2 = nlk_sk(NETLINK_CB(skb).sk);
2355 cb->strict_check = !!(nlk2->flags & NETLINK_F_STRICT_CHK);
2356
2357 if (control->start) {
2358 ret = control->start(cb);
2359 if (ret)
2360 goto error_put;
2361 }
2362
2363 nlk->cb_running = true;
2364 nlk->dump_done_errno = INT_MAX;
2365
2366 mutex_unlock(nlk->cb_mutex);
2367
2368 ret = netlink_dump(sk);
2369
2370 sock_put(sk);
2371
2372 if (ret)
2373 return ret;
2374
2375
2376
2377
2378 return -EINTR;
2379
2380error_put:
2381 module_put(control->module);
2382error_unlock:
2383 sock_put(sk);
2384 mutex_unlock(nlk->cb_mutex);
2385error_free:
2386 kfree_skb(skb);
2387 return ret;
2388}
2389EXPORT_SYMBOL(__netlink_dump_start);
2390
2391void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
2392 const struct netlink_ext_ack *extack)
2393{
2394 struct sk_buff *skb;
2395 struct nlmsghdr *rep;
2396 struct nlmsgerr *errmsg;
2397 size_t payload = sizeof(*errmsg);
2398 size_t tlvlen = 0;
2399 struct netlink_sock *nlk = nlk_sk(NETLINK_CB(in_skb).sk);
2400 unsigned int flags = 0;
2401 bool nlk_has_extack = nlk->flags & NETLINK_F_EXT_ACK;
2402
2403
2404
2405
2406
2407 if (nlk_has_extack && extack && extack->_msg)
2408 tlvlen += nla_total_size(strlen(extack->_msg) + 1);
2409
2410 if (err && !(nlk->flags & NETLINK_F_CAP_ACK))
2411 payload += nlmsg_len(nlh);
2412 else
2413 flags |= NLM_F_CAPPED;
2414 if (err && nlk_has_extack && extack && extack->bad_attr)
2415 tlvlen += nla_total_size(sizeof(u32));
2416 if (nlk_has_extack && extack && extack->cookie_len)
2417 tlvlen += nla_total_size(extack->cookie_len);
2418
2419 if (tlvlen)
2420 flags |= NLM_F_ACK_TLVS;
2421
2422 skb = nlmsg_new(payload + tlvlen, GFP_KERNEL);
2423 if (!skb) {
2424 NETLINK_CB(in_skb).sk->sk_err = ENOBUFS;
2425 NETLINK_CB(in_skb).sk->sk_error_report(NETLINK_CB(in_skb).sk);
2426 return;
2427 }
2428
2429 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
2430 NLMSG_ERROR, payload, flags);
2431 errmsg = nlmsg_data(rep);
2432 errmsg->error = err;
2433 memcpy(&errmsg->msg, nlh, payload > sizeof(*errmsg) ? nlh->nlmsg_len : sizeof(*nlh));
2434
2435 if (nlk_has_extack && extack) {
2436 if (extack->_msg) {
2437 WARN_ON(nla_put_string(skb, NLMSGERR_ATTR_MSG,
2438 extack->_msg));
2439 }
2440 if (err && extack->bad_attr &&
2441 !WARN_ON((u8 *)extack->bad_attr < in_skb->data ||
2442 (u8 *)extack->bad_attr >= in_skb->data +
2443 in_skb->len))
2444 WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_OFFS,
2445 (u8 *)extack->bad_attr -
2446 (u8 *)nlh));
2447 if (extack->cookie_len)
2448 WARN_ON(nla_put(skb, NLMSGERR_ATTR_COOKIE,
2449 extack->cookie_len, extack->cookie));
2450 }
2451
2452 nlmsg_end(skb, rep);
2453
2454 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
2455}
2456EXPORT_SYMBOL(netlink_ack);
2457
2458int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
2459 struct nlmsghdr *,
2460 struct netlink_ext_ack *))
2461{
2462 struct netlink_ext_ack extack;
2463 struct nlmsghdr *nlh;
2464 int err;
2465
2466 while (skb->len >= nlmsg_total_size(0)) {
2467 int msglen;
2468
2469 memset(&extack, 0, sizeof(extack));
2470 nlh = nlmsg_hdr(skb);
2471 err = 0;
2472
2473 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
2474 return 0;
2475
2476
2477 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
2478 goto ack;
2479
2480
2481 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
2482 goto ack;
2483
2484 err = cb(skb, nlh, &extack);
2485 if (err == -EINTR)
2486 goto skip;
2487
2488ack:
2489 if (nlh->nlmsg_flags & NLM_F_ACK || err)
2490 netlink_ack(skb, nlh, err, &extack);
2491
2492skip:
2493 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
2494 if (msglen > skb->len)
2495 msglen = skb->len;
2496 skb_pull(skb, msglen);
2497 }
2498
2499 return 0;
2500}
2501EXPORT_SYMBOL(netlink_rcv_skb);
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
2513 unsigned int group, int report, gfp_t flags)
2514{
2515 int err = 0;
2516
2517 if (group) {
2518 int exclude_portid = 0;
2519
2520 if (report) {
2521 refcount_inc(&skb->users);
2522 exclude_portid = portid;
2523 }
2524
2525
2526
2527 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
2528 }
2529
2530 if (report) {
2531 int err2;
2532
2533 err2 = nlmsg_unicast(sk, skb, portid);
2534 if (!err || err == -ESRCH)
2535 err = err2;
2536 }
2537
2538 return err;
2539}
2540EXPORT_SYMBOL(nlmsg_notify);
2541
2542#ifdef CONFIG_PROC_FS
2543struct nl_seq_iter {
2544 struct seq_net_private p;
2545 struct rhashtable_iter hti;
2546 int link;
2547};
2548
2549static void netlink_walk_start(struct nl_seq_iter *iter)
2550{
2551 rhashtable_walk_enter(&nl_table[iter->link].hash, &iter->hti);
2552 rhashtable_walk_start(&iter->hti);
2553}
2554
2555static void netlink_walk_stop(struct nl_seq_iter *iter)
2556{
2557 rhashtable_walk_stop(&iter->hti);
2558 rhashtable_walk_exit(&iter->hti);
2559}
2560
2561static void *__netlink_seq_next(struct seq_file *seq)
2562{
2563 struct nl_seq_iter *iter = seq->private;
2564 struct netlink_sock *nlk;
2565
2566 do {
2567 for (;;) {
2568 nlk = rhashtable_walk_next(&iter->hti);
2569
2570 if (IS_ERR(nlk)) {
2571 if (PTR_ERR(nlk) == -EAGAIN)
2572 continue;
2573
2574 return nlk;
2575 }
2576
2577 if (nlk)
2578 break;
2579
2580 netlink_walk_stop(iter);
2581 if (++iter->link >= MAX_LINKS)
2582 return NULL;
2583
2584 netlink_walk_start(iter);
2585 }
2586 } while (sock_net(&nlk->sk) != seq_file_net(seq));
2587
2588 return nlk;
2589}
2590
2591static void *netlink_seq_start(struct seq_file *seq, loff_t *posp)
2592{
2593 struct nl_seq_iter *iter = seq->private;
2594 void *obj = SEQ_START_TOKEN;
2595 loff_t pos;
2596
2597 iter->link = 0;
2598
2599 netlink_walk_start(iter);
2600
2601 for (pos = *posp; pos && obj && !IS_ERR(obj); pos--)
2602 obj = __netlink_seq_next(seq);
2603
2604 return obj;
2605}
2606
2607static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2608{
2609 ++*pos;
2610 return __netlink_seq_next(seq);
2611}
2612
2613static void netlink_native_seq_stop(struct seq_file *seq, void *v)
2614{
2615 struct nl_seq_iter *iter = seq->private;
2616
2617 if (iter->link >= MAX_LINKS)
2618 return;
2619
2620 netlink_walk_stop(iter);
2621}
2622
2623
2624static int netlink_native_seq_show(struct seq_file *seq, void *v)
2625{
2626 if (v == SEQ_START_TOKEN) {
2627 seq_puts(seq,
2628 "sk Eth Pid Groups "
2629 "Rmem Wmem Dump Locks Drops Inode\n");
2630 } else {
2631 struct sock *s = v;
2632 struct netlink_sock *nlk = nlk_sk(s);
2633
2634 seq_printf(seq, "%pK %-3d %-10u %08x %-8d %-8d %-5d %-8d %-8u %-8lu\n",
2635 s,
2636 s->sk_protocol,
2637 nlk->portid,
2638 nlk->groups ? (u32)nlk->groups[0] : 0,
2639 sk_rmem_alloc_get(s),
2640 sk_wmem_alloc_get(s),
2641 nlk->cb_running,
2642 refcount_read(&s->sk_refcnt),
2643 atomic_read(&s->sk_drops),
2644 sock_i_ino(s)
2645 );
2646
2647 }
2648 return 0;
2649}
2650
2651#ifdef CONFIG_BPF_SYSCALL
2652struct bpf_iter__netlink {
2653 __bpf_md_ptr(struct bpf_iter_meta *, meta);
2654 __bpf_md_ptr(struct netlink_sock *, sk);
2655};
2656
2657DEFINE_BPF_ITER_FUNC(netlink, struct bpf_iter_meta *meta, struct netlink_sock *sk)
2658
2659static int netlink_prog_seq_show(struct bpf_prog *prog,
2660 struct bpf_iter_meta *meta,
2661 void *v)
2662{
2663 struct bpf_iter__netlink ctx;
2664
2665 meta->seq_num--;
2666 ctx.meta = meta;
2667 ctx.sk = nlk_sk((struct sock *)v);
2668 return bpf_iter_run_prog(prog, &ctx);
2669}
2670
2671static int netlink_seq_show(struct seq_file *seq, void *v)
2672{
2673 struct bpf_iter_meta meta;
2674 struct bpf_prog *prog;
2675
2676 meta.seq = seq;
2677 prog = bpf_iter_get_info(&meta, false);
2678 if (!prog)
2679 return netlink_native_seq_show(seq, v);
2680
2681 if (v != SEQ_START_TOKEN)
2682 return netlink_prog_seq_show(prog, &meta, v);
2683
2684 return 0;
2685}
2686
2687static void netlink_seq_stop(struct seq_file *seq, void *v)
2688{
2689 struct bpf_iter_meta meta;
2690 struct bpf_prog *prog;
2691
2692 if (!v) {
2693 meta.seq = seq;
2694 prog = bpf_iter_get_info(&meta, true);
2695 if (prog)
2696 (void)netlink_prog_seq_show(prog, &meta, v);
2697 }
2698
2699 netlink_native_seq_stop(seq, v);
2700}
2701#else
2702static int netlink_seq_show(struct seq_file *seq, void *v)
2703{
2704 return netlink_native_seq_show(seq, v);
2705}
2706
2707static void netlink_seq_stop(struct seq_file *seq, void *v)
2708{
2709 netlink_native_seq_stop(seq, v);
2710}
2711#endif
2712
2713static const struct seq_operations netlink_seq_ops = {
2714 .start = netlink_seq_start,
2715 .next = netlink_seq_next,
2716 .stop = netlink_seq_stop,
2717 .show = netlink_seq_show,
2718};
2719#endif
2720
2721int netlink_register_notifier(struct notifier_block *nb)
2722{
2723 return blocking_notifier_chain_register(&netlink_chain, nb);
2724}
2725EXPORT_SYMBOL(netlink_register_notifier);
2726
2727int netlink_unregister_notifier(struct notifier_block *nb)
2728{
2729 return blocking_notifier_chain_unregister(&netlink_chain, nb);
2730}
2731EXPORT_SYMBOL(netlink_unregister_notifier);
2732
2733static const struct proto_ops netlink_ops = {
2734 .family = PF_NETLINK,
2735 .owner = THIS_MODULE,
2736 .release = netlink_release,
2737 .bind = netlink_bind,
2738 .connect = netlink_connect,
2739 .socketpair = sock_no_socketpair,
2740 .accept = sock_no_accept,
2741 .getname = netlink_getname,
2742 .poll = datagram_poll,
2743 .ioctl = netlink_ioctl,
2744 .listen = sock_no_listen,
2745 .shutdown = sock_no_shutdown,
2746 .setsockopt = netlink_setsockopt,
2747 .getsockopt = netlink_getsockopt,
2748 .sendmsg = netlink_sendmsg,
2749 .recvmsg = netlink_recvmsg,
2750 .mmap = sock_no_mmap,
2751 .sendpage = sock_no_sendpage,
2752};
2753
2754static const struct net_proto_family netlink_family_ops = {
2755 .family = PF_NETLINK,
2756 .create = netlink_create,
2757 .owner = THIS_MODULE,
2758};
2759
2760static int __net_init netlink_net_init(struct net *net)
2761{
2762#ifdef CONFIG_PROC_FS
2763 if (!proc_create_net("netlink", 0, net->proc_net, &netlink_seq_ops,
2764 sizeof(struct nl_seq_iter)))
2765 return -ENOMEM;
2766#endif
2767 return 0;
2768}
2769
2770static void __net_exit netlink_net_exit(struct net *net)
2771{
2772#ifdef CONFIG_PROC_FS
2773 remove_proc_entry("netlink", net->proc_net);
2774#endif
2775}
2776
2777static void __init netlink_add_usersock_entry(void)
2778{
2779 struct listeners *listeners;
2780 int groups = 32;
2781
2782 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
2783 if (!listeners)
2784 panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
2785
2786 netlink_table_grab();
2787
2788 nl_table[NETLINK_USERSOCK].groups = groups;
2789 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
2790 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
2791 nl_table[NETLINK_USERSOCK].registered = 1;
2792 nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
2793
2794 netlink_table_ungrab();
2795}
2796
2797static struct pernet_operations __net_initdata netlink_net_ops = {
2798 .init = netlink_net_init,
2799 .exit = netlink_net_exit,
2800};
2801
2802static inline u32 netlink_hash(const void *data, u32 len, u32 seed)
2803{
2804 const struct netlink_sock *nlk = data;
2805 struct netlink_compare_arg arg;
2806
2807 netlink_compare_arg_init(&arg, sock_net(&nlk->sk), nlk->portid);
2808 return jhash2((u32 *)&arg, netlink_compare_arg_len / sizeof(u32), seed);
2809}
2810
2811static const struct rhashtable_params netlink_rhashtable_params = {
2812 .head_offset = offsetof(struct netlink_sock, node),
2813 .key_len = netlink_compare_arg_len,
2814 .obj_hashfn = netlink_hash,
2815 .obj_cmpfn = netlink_compare,
2816 .automatic_shrinking = true,
2817};
2818
2819#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
2820BTF_ID_LIST(btf_netlink_sock_id)
2821BTF_ID(struct, netlink_sock)
2822
2823static const struct bpf_iter_seq_info netlink_seq_info = {
2824 .seq_ops = &netlink_seq_ops,
2825 .init_seq_private = bpf_iter_init_seq_net,
2826 .fini_seq_private = bpf_iter_fini_seq_net,
2827 .seq_priv_size = sizeof(struct nl_seq_iter),
2828};
2829
2830static struct bpf_iter_reg netlink_reg_info = {
2831 .target = "netlink",
2832 .ctx_arg_info_size = 1,
2833 .ctx_arg_info = {
2834 { offsetof(struct bpf_iter__netlink, sk),
2835 PTR_TO_BTF_ID_OR_NULL },
2836 },
2837 .seq_info = &netlink_seq_info,
2838};
2839
2840static int __init bpf_iter_register(void)
2841{
2842 netlink_reg_info.ctx_arg_info[0].btf_id = *btf_netlink_sock_id;
2843 return bpf_iter_reg_target(&netlink_reg_info);
2844}
2845#endif
2846
2847static int __init netlink_proto_init(void)
2848{
2849 int i;
2850 int err = proto_register(&netlink_proto, 0);
2851
2852 if (err != 0)
2853 goto out;
2854
2855#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
2856 err = bpf_iter_register();
2857 if (err)
2858 goto out;
2859#endif
2860
2861 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > sizeof_field(struct sk_buff, cb));
2862
2863 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
2864 if (!nl_table)
2865 goto panic;
2866
2867 for (i = 0; i < MAX_LINKS; i++) {
2868 if (rhashtable_init(&nl_table[i].hash,
2869 &netlink_rhashtable_params) < 0) {
2870 while (--i > 0)
2871 rhashtable_destroy(&nl_table[i].hash);
2872 kfree(nl_table);
2873 goto panic;
2874 }
2875 }
2876
2877 netlink_add_usersock_entry();
2878
2879 sock_register(&netlink_family_ops);
2880 register_pernet_subsys(&netlink_net_ops);
2881 register_pernet_subsys(&netlink_tap_net_ops);
2882
2883 rtnetlink_init();
2884out:
2885 return err;
2886panic:
2887 panic("netlink_init: Cannot allocate nl_table\n");
2888}
2889
2890core_initcall(netlink_proto_init);
2891