1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80#define pr_fmt(fmt) "UDP: " fmt
81
82#include <asm/uaccess.h>
83#include <asm/ioctls.h>
84#include <linux/bootmem.h>
85#include <linux/highmem.h>
86#include <linux/swap.h>
87#include <linux/types.h>
88#include <linux/fcntl.h>
89#include <linux/module.h>
90#include <linux/socket.h>
91#include <linux/sockios.h>
92#include <linux/igmp.h>
93#include <linux/in.h>
94#include <linux/errno.h>
95#include <linux/timer.h>
96#include <linux/mm.h>
97#include <linux/inet.h>
98#include <linux/netdevice.h>
99#include <linux/slab.h>
100#include <net/tcp_states.h>
101#include <linux/skbuff.h>
102#include <linux/netdevice.h>
103#include <linux/proc_fs.h>
104#include <linux/seq_file.h>
105#include <net/net_namespace.h>
106#include <net/icmp.h>
107#include <net/inet_hashtables.h>
108#include <net/route.h>
109#include <net/checksum.h>
110#include <net/xfrm.h>
111#include <trace/events/udp.h>
112#include <linux/static_key.h>
113#include <trace/events/skb.h>
114#include <net/busy_poll.h>
115#include "udp_impl.h"
116
117struct udp_table udp_table __read_mostly;
118EXPORT_SYMBOL(udp_table);
119
120long sysctl_udp_mem[3] __read_mostly;
121EXPORT_SYMBOL(sysctl_udp_mem);
122
123int sysctl_udp_rmem_min __read_mostly;
124EXPORT_SYMBOL(sysctl_udp_rmem_min);
125
126int sysctl_udp_wmem_min __read_mostly;
127EXPORT_SYMBOL(sysctl_udp_wmem_min);
128
129atomic_long_t udp_memory_allocated;
130EXPORT_SYMBOL(udp_memory_allocated);
131
132#define MAX_UDP_PORTS 65536
133#define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN)
134
135static int udp_lib_lport_inuse(struct net *net, __u16 num,
136 const struct udp_hslot *hslot,
137 unsigned long *bitmap,
138 struct sock *sk,
139 int (*saddr_comp)(const struct sock *sk1,
140 const struct sock *sk2),
141 unsigned int log)
142{
143 struct sock *sk2;
144 struct hlist_nulls_node *node;
145 kuid_t uid = sock_i_uid(sk);
146
147 sk_nulls_for_each(sk2, node, &hslot->head) {
148 if (net_eq(sock_net(sk2), net) &&
149 sk2 != sk &&
150 (bitmap || udp_sk(sk2)->udp_port_hash == num) &&
151 (!sk2->sk_reuse || !sk->sk_reuse) &&
152 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
153 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
154 (!sk2->sk_reuseport || !sk->sk_reuseport ||
155 !uid_eq(uid, sock_i_uid(sk2))) &&
156 saddr_comp(sk, sk2)) {
157 if (!bitmap)
158 return 1;
159 __set_bit(udp_sk(sk2)->udp_port_hash >> log, bitmap);
160 }
161 }
162 return 0;
163}
164
165
166
167
168
169static int udp_lib_lport_inuse2(struct net *net, __u16 num,
170 struct udp_hslot *hslot2,
171 struct sock *sk,
172 int (*saddr_comp)(const struct sock *sk1,
173 const struct sock *sk2))
174{
175 struct sock *sk2;
176 struct hlist_nulls_node *node;
177 kuid_t uid = sock_i_uid(sk);
178 int res = 0;
179
180 spin_lock(&hslot2->lock);
181 udp_portaddr_for_each_entry(sk2, node, &hslot2->head) {
182 if (net_eq(sock_net(sk2), net) &&
183 sk2 != sk &&
184 (udp_sk(sk2)->udp_port_hash == num) &&
185 (!sk2->sk_reuse || !sk->sk_reuse) &&
186 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
187 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
188 (!sk2->sk_reuseport || !sk->sk_reuseport ||
189 !uid_eq(uid, sock_i_uid(sk2))) &&
190 saddr_comp(sk, sk2)) {
191 res = 1;
192 break;
193 }
194 }
195 spin_unlock(&hslot2->lock);
196 return res;
197}
198
199
200
201
202
203
204
205
206
207
208int udp_lib_get_port(struct sock *sk, unsigned short snum,
209 int (*saddr_comp)(const struct sock *sk1,
210 const struct sock *sk2),
211 unsigned int hash2_nulladdr)
212{
213 struct udp_hslot *hslot, *hslot2;
214 struct udp_table *udptable = sk->sk_prot->h.udp_table;
215 int error = 1;
216 struct net *net = sock_net(sk);
217
218 if (!snum) {
219 int low, high, remaining;
220 unsigned int rand;
221 unsigned short first, last;
222 DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
223
224 inet_get_local_port_range(net, &low, &high);
225 remaining = (high - low) + 1;
226
227 rand = prandom_u32();
228 first = reciprocal_scale(rand, remaining) + low;
229
230
231
232 rand = (rand | 1) * (udptable->mask + 1);
233 last = first + udptable->mask + 1;
234 do {
235 hslot = udp_hashslot(udptable, net, first);
236 bitmap_zero(bitmap, PORTS_PER_CHAIN);
237 spin_lock_bh(&hslot->lock);
238 udp_lib_lport_inuse(net, snum, hslot, bitmap, sk,
239 saddr_comp, udptable->log);
240
241 snum = first;
242
243
244
245
246
247 do {
248 if (low <= snum && snum <= high &&
249 !test_bit(snum >> udptable->log, bitmap) &&
250 !inet_is_local_reserved_port(net, snum))
251 goto found;
252 snum += rand;
253 } while (snum != first);
254 spin_unlock_bh(&hslot->lock);
255 } while (++first != last);
256 goto fail;
257 } else {
258 hslot = udp_hashslot(udptable, net, snum);
259 spin_lock_bh(&hslot->lock);
260 if (hslot->count > 10) {
261 int exist;
262 unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum;
263
264 slot2 &= udptable->mask;
265 hash2_nulladdr &= udptable->mask;
266
267 hslot2 = udp_hashslot2(udptable, slot2);
268 if (hslot->count < hslot2->count)
269 goto scan_primary_hash;
270
271 exist = udp_lib_lport_inuse2(net, snum, hslot2,
272 sk, saddr_comp);
273 if (!exist && (hash2_nulladdr != slot2)) {
274 hslot2 = udp_hashslot2(udptable, hash2_nulladdr);
275 exist = udp_lib_lport_inuse2(net, snum, hslot2,
276 sk, saddr_comp);
277 }
278 if (exist)
279 goto fail_unlock;
280 else
281 goto found;
282 }
283scan_primary_hash:
284 if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk,
285 saddr_comp, 0))
286 goto fail_unlock;
287 }
288found:
289 inet_sk(sk)->inet_num = snum;
290 udp_sk(sk)->udp_port_hash = snum;
291 udp_sk(sk)->udp_portaddr_hash ^= snum;
292 if (sk_unhashed(sk)) {
293 sk_nulls_add_node_rcu(sk, &hslot->head);
294 hslot->count++;
295 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
296
297 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
298 spin_lock(&hslot2->lock);
299 hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
300 &hslot2->head);
301 hslot2->count++;
302 spin_unlock(&hslot2->lock);
303 }
304 error = 0;
305fail_unlock:
306 spin_unlock_bh(&hslot->lock);
307fail:
308 return error;
309}
310EXPORT_SYMBOL(udp_lib_get_port);
311
312static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
313{
314 struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2);
315
316 return (!ipv6_only_sock(sk2) &&
317 (!inet1->inet_rcv_saddr || !inet2->inet_rcv_saddr ||
318 inet1->inet_rcv_saddr == inet2->inet_rcv_saddr));
319}
320
321static unsigned int udp4_portaddr_hash(struct net *net, __be32 saddr,
322 unsigned int port)
323{
324 return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
325}
326
327int udp_v4_get_port(struct sock *sk, unsigned short snum)
328{
329 unsigned int hash2_nulladdr =
330 udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum);
331 unsigned int hash2_partial =
332 udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0);
333
334
335 udp_sk(sk)->udp_portaddr_hash = hash2_partial;
336 return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr);
337}
338
339static inline int compute_score(struct sock *sk, struct net *net,
340 __be32 saddr, unsigned short hnum, __be16 sport,
341 __be32 daddr, __be16 dport, int dif)
342{
343 int score;
344 struct inet_sock *inet;
345
346 if (!net_eq(sock_net(sk), net) ||
347 udp_sk(sk)->udp_port_hash != hnum ||
348 ipv6_only_sock(sk))
349 return -1;
350
351 score = (sk->sk_family == PF_INET) ? 2 : 1;
352 inet = inet_sk(sk);
353
354 if (inet->inet_rcv_saddr) {
355 if (inet->inet_rcv_saddr != daddr)
356 return -1;
357 score += 4;
358 }
359
360 if (inet->inet_daddr) {
361 if (inet->inet_daddr != saddr)
362 return -1;
363 score += 4;
364 }
365
366 if (inet->inet_dport) {
367 if (inet->inet_dport != sport)
368 return -1;
369 score += 4;
370 }
371
372 if (sk->sk_bound_dev_if) {
373 if (sk->sk_bound_dev_if != dif)
374 return -1;
375 score += 4;
376 }
377
378 return score;
379}
380
381
382
383
384static inline int compute_score2(struct sock *sk, struct net *net,
385 __be32 saddr, __be16 sport,
386 __be32 daddr, unsigned int hnum, int dif)
387{
388 int score;
389 struct inet_sock *inet;
390
391 if (!net_eq(sock_net(sk), net) ||
392 ipv6_only_sock(sk))
393 return -1;
394
395 inet = inet_sk(sk);
396
397 if (inet->inet_rcv_saddr != daddr ||
398 inet->inet_num != hnum)
399 return -1;
400
401 score = (sk->sk_family == PF_INET) ? 2 : 1;
402
403 if (inet->inet_daddr) {
404 if (inet->inet_daddr != saddr)
405 return -1;
406 score += 4;
407 }
408
409 if (inet->inet_dport) {
410 if (inet->inet_dport != sport)
411 return -1;
412 score += 4;
413 }
414
415 if (sk->sk_bound_dev_if) {
416 if (sk->sk_bound_dev_if != dif)
417 return -1;
418 score += 4;
419 }
420
421 return score;
422}
423
424static unsigned int udp_ehashfn(struct net *net, const __be32 laddr,
425 const __u16 lport, const __be32 faddr,
426 const __be16 fport)
427{
428 static u32 udp_ehash_secret __read_mostly;
429
430 net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret));
431
432 return __inet_ehashfn(laddr, lport, faddr, fport,
433 udp_ehash_secret + net_hash_mix(net));
434}
435
436
437
438static struct sock *udp4_lib_lookup2(struct net *net,
439 __be32 saddr, __be16 sport,
440 __be32 daddr, unsigned int hnum, int dif,
441 struct udp_hslot *hslot2, unsigned int slot2)
442{
443 struct sock *sk, *result;
444 struct hlist_nulls_node *node;
445 int score, badness, matches = 0, reuseport = 0;
446 u32 hash = 0;
447
448begin:
449 result = NULL;
450 badness = 0;
451 udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
452 score = compute_score2(sk, net, saddr, sport,
453 daddr, hnum, dif);
454 if (score > badness) {
455 result = sk;
456 badness = score;
457 reuseport = sk->sk_reuseport;
458 if (reuseport) {
459 hash = udp_ehashfn(net, daddr, hnum,
460 saddr, sport);
461 matches = 1;
462 }
463 } else if (score == badness && reuseport) {
464 matches++;
465 if (reciprocal_scale(hash, matches) == 0)
466 result = sk;
467 hash = next_pseudo_random32(hash);
468 }
469 }
470
471
472
473
474
475 if (get_nulls_value(node) != slot2)
476 goto begin;
477 if (result) {
478 if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
479 result = NULL;
480 else if (unlikely(compute_score2(result, net, saddr, sport,
481 daddr, hnum, dif) < badness)) {
482 sock_put(result);
483 goto begin;
484 }
485 }
486 return result;
487}
488
489
490
491
492struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
493 __be16 sport, __be32 daddr, __be16 dport,
494 int dif, struct udp_table *udptable)
495{
496 struct sock *sk, *result;
497 struct hlist_nulls_node *node;
498 unsigned short hnum = ntohs(dport);
499 unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
500 struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
501 int score, badness, matches = 0, reuseport = 0;
502 u32 hash = 0;
503
504 rcu_read_lock();
505 if (hslot->count > 10) {
506 hash2 = udp4_portaddr_hash(net, daddr, hnum);
507 slot2 = hash2 & udptable->mask;
508 hslot2 = &udptable->hash2[slot2];
509 if (hslot->count < hslot2->count)
510 goto begin;
511
512 result = udp4_lib_lookup2(net, saddr, sport,
513 daddr, hnum, dif,
514 hslot2, slot2);
515 if (!result) {
516 hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
517 slot2 = hash2 & udptable->mask;
518 hslot2 = &udptable->hash2[slot2];
519 if (hslot->count < hslot2->count)
520 goto begin;
521
522 result = udp4_lib_lookup2(net, saddr, sport,
523 htonl(INADDR_ANY), hnum, dif,
524 hslot2, slot2);
525 }
526 rcu_read_unlock();
527 return result;
528 }
529begin:
530 result = NULL;
531 badness = 0;
532 sk_nulls_for_each_rcu(sk, node, &hslot->head) {
533 score = compute_score(sk, net, saddr, hnum, sport,
534 daddr, dport, dif);
535 if (score > badness) {
536 result = sk;
537 badness = score;
538 reuseport = sk->sk_reuseport;
539 if (reuseport) {
540 hash = udp_ehashfn(net, daddr, hnum,
541 saddr, sport);
542 matches = 1;
543 }
544 } else if (score == badness && reuseport) {
545 matches++;
546 if (reciprocal_scale(hash, matches) == 0)
547 result = sk;
548 hash = next_pseudo_random32(hash);
549 }
550 }
551
552
553
554
555
556 if (get_nulls_value(node) != slot)
557 goto begin;
558
559 if (result) {
560 if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
561 result = NULL;
562 else if (unlikely(compute_score(result, net, saddr, hnum, sport,
563 daddr, dport, dif) < badness)) {
564 sock_put(result);
565 goto begin;
566 }
567 }
568 rcu_read_unlock();
569 return result;
570}
571EXPORT_SYMBOL_GPL(__udp4_lib_lookup);
572
573static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
574 __be16 sport, __be16 dport,
575 struct udp_table *udptable)
576{
577 const struct iphdr *iph = ip_hdr(skb);
578
579 return __udp4_lib_lookup(dev_net(skb_dst(skb)->dev), iph->saddr, sport,
580 iph->daddr, dport, inet_iif(skb),
581 udptable);
582}
583
584struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
585 __be32 daddr, __be16 dport, int dif)
586{
587 return __udp4_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table);
588}
589EXPORT_SYMBOL_GPL(udp4_lib_lookup);
590
591static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
592 __be16 loc_port, __be32 loc_addr,
593 __be16 rmt_port, __be32 rmt_addr,
594 int dif, unsigned short hnum)
595{
596 struct inet_sock *inet = inet_sk(sk);
597
598 if (!net_eq(sock_net(sk), net) ||
599 udp_sk(sk)->udp_port_hash != hnum ||
600 (inet->inet_daddr && inet->inet_daddr != rmt_addr) ||
601 (inet->inet_dport != rmt_port && inet->inet_dport) ||
602 (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) ||
603 ipv6_only_sock(sk) ||
604 (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
605 return false;
606 if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif))
607 return false;
608 return true;
609}
610
611
612
613
614
615
616
617
618
619
620
621
622void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
623{
624 struct inet_sock *inet;
625 const struct iphdr *iph = (const struct iphdr *)skb->data;
626 struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2));
627 const int type = icmp_hdr(skb)->type;
628 const int code = icmp_hdr(skb)->code;
629 struct sock *sk;
630 int harderr;
631 int err;
632 struct net *net = dev_net(skb->dev);
633
634 sk = __udp4_lib_lookup(net, iph->daddr, uh->dest,
635 iph->saddr, uh->source, skb->dev->ifindex, udptable);
636 if (sk == NULL) {
637 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
638 return;
639 }
640
641 err = 0;
642 harderr = 0;
643 inet = inet_sk(sk);
644
645 switch (type) {
646 default:
647 case ICMP_TIME_EXCEEDED:
648 err = EHOSTUNREACH;
649 break;
650 case ICMP_SOURCE_QUENCH:
651 goto out;
652 case ICMP_PARAMETERPROB:
653 err = EPROTO;
654 harderr = 1;
655 break;
656 case ICMP_DEST_UNREACH:
657 if (code == ICMP_FRAG_NEEDED) {
658 ipv4_sk_update_pmtu(skb, sk, info);
659 if (inet->pmtudisc != IP_PMTUDISC_DONT) {
660 err = EMSGSIZE;
661 harderr = 1;
662 break;
663 }
664 goto out;
665 }
666 err = EHOSTUNREACH;
667 if (code <= NR_ICMP_UNREACH) {
668 harderr = icmp_err_convert[code].fatal;
669 err = icmp_err_convert[code].errno;
670 }
671 break;
672 case ICMP_REDIRECT:
673 ipv4_sk_redirect(skb, sk);
674 goto out;
675 }
676
677
678
679
680
681 if (!inet->recverr) {
682 if (!harderr || sk->sk_state != TCP_ESTABLISHED)
683 goto out;
684 } else
685 ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1));
686
687 sk->sk_err = err;
688 sk->sk_error_report(sk);
689out:
690 sock_put(sk);
691}
692
693void udp_err(struct sk_buff *skb, u32 info)
694{
695 __udp4_lib_err(skb, info, &udp_table);
696}
697
698
699
700
701void udp_flush_pending_frames(struct sock *sk)
702{
703 struct udp_sock *up = udp_sk(sk);
704
705 if (up->pending) {
706 up->len = 0;
707 up->pending = 0;
708 ip_flush_pending_frames(sk);
709 }
710}
711EXPORT_SYMBOL(udp_flush_pending_frames);
712
713
714
715
716
717
718
719
720void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
721{
722 struct udphdr *uh = udp_hdr(skb);
723 int offset = skb_transport_offset(skb);
724 int len = skb->len - offset;
725 int hlen = len;
726 __wsum csum = 0;
727
728 if (!skb_has_frag_list(skb)) {
729
730
731
732 skb->csum_start = skb_transport_header(skb) - skb->head;
733 skb->csum_offset = offsetof(struct udphdr, check);
734 uh->check = ~csum_tcpudp_magic(src, dst, len,
735 IPPROTO_UDP, 0);
736 } else {
737 struct sk_buff *frags;
738
739
740
741
742
743
744 skb_walk_frags(skb, frags) {
745 csum = csum_add(csum, frags->csum);
746 hlen -= frags->len;
747 }
748
749 csum = skb_checksum(skb, offset, hlen, csum);
750 skb->ip_summed = CHECKSUM_NONE;
751
752 uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum);
753 if (uh->check == 0)
754 uh->check = CSUM_MANGLED_0;
755 }
756}
757EXPORT_SYMBOL_GPL(udp4_hwcsum);
758
759
760
761
762void udp_set_csum(bool nocheck, struct sk_buff *skb,
763 __be32 saddr, __be32 daddr, int len)
764{
765 struct udphdr *uh = udp_hdr(skb);
766
767 if (nocheck)
768 uh->check = 0;
769 else if (skb_is_gso(skb))
770 uh->check = ~udp_v4_check(len, saddr, daddr, 0);
771 else if (skb_dst(skb) && skb_dst(skb)->dev &&
772 (skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) {
773
774 BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL);
775
776 skb->ip_summed = CHECKSUM_PARTIAL;
777 skb->csum_start = skb_transport_header(skb) - skb->head;
778 skb->csum_offset = offsetof(struct udphdr, check);
779 uh->check = ~udp_v4_check(len, saddr, daddr, 0);
780 } else {
781 __wsum csum;
782
783 BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL);
784
785 uh->check = 0;
786 csum = skb_checksum(skb, 0, len, 0);
787 uh->check = udp_v4_check(len, saddr, daddr, csum);
788 if (uh->check == 0)
789 uh->check = CSUM_MANGLED_0;
790
791 skb->ip_summed = CHECKSUM_UNNECESSARY;
792 }
793}
794EXPORT_SYMBOL(udp_set_csum);
795
796static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
797{
798 struct sock *sk = skb->sk;
799 struct inet_sock *inet = inet_sk(sk);
800 struct udphdr *uh;
801 int err = 0;
802 int is_udplite = IS_UDPLITE(sk);
803 int offset = skb_transport_offset(skb);
804 int len = skb->len - offset;
805 __wsum csum = 0;
806
807
808
809
810 uh = udp_hdr(skb);
811 uh->source = inet->inet_sport;
812 uh->dest = fl4->fl4_dport;
813 uh->len = htons(len);
814 uh->check = 0;
815
816 if (is_udplite)
817 csum = udplite_csum(skb);
818
819 else if (sk->sk_no_check_tx) {
820
821 skb->ip_summed = CHECKSUM_NONE;
822 goto send;
823
824 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
825
826 udp4_hwcsum(skb, fl4->saddr, fl4->daddr);
827 goto send;
828
829 } else
830 csum = udp_csum(skb);
831
832
833 uh->check = csum_tcpudp_magic(fl4->saddr, fl4->daddr, len,
834 sk->sk_protocol, csum);
835 if (uh->check == 0)
836 uh->check = CSUM_MANGLED_0;
837
838send:
839 err = ip_send_skb(sock_net(sk), skb);
840 if (err) {
841 if (err == -ENOBUFS && !inet->recverr) {
842 UDP_INC_STATS_USER(sock_net(sk),
843 UDP_MIB_SNDBUFERRORS, is_udplite);
844 err = 0;
845 }
846 } else
847 UDP_INC_STATS_USER(sock_net(sk),
848 UDP_MIB_OUTDATAGRAMS, is_udplite);
849 return err;
850}
851
852
853
854
855int udp_push_pending_frames(struct sock *sk)
856{
857 struct udp_sock *up = udp_sk(sk);
858 struct inet_sock *inet = inet_sk(sk);
859 struct flowi4 *fl4 = &inet->cork.fl.u.ip4;
860 struct sk_buff *skb;
861 int err = 0;
862
863 skb = ip_finish_skb(sk, fl4);
864 if (!skb)
865 goto out;
866
867 err = udp_send_skb(skb, fl4);
868
869out:
870 up->len = 0;
871 up->pending = 0;
872 return err;
873}
874EXPORT_SYMBOL(udp_push_pending_frames);
875
876int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
877 size_t len)
878{
879 struct inet_sock *inet = inet_sk(sk);
880 struct udp_sock *up = udp_sk(sk);
881 struct flowi4 fl4_stack;
882 struct flowi4 *fl4;
883 int ulen = len;
884 struct ipcm_cookie ipc;
885 struct rtable *rt = NULL;
886 int free = 0;
887 int connected = 0;
888 __be32 daddr, faddr, saddr;
889 __be16 dport;
890 u8 tos;
891 int err, is_udplite = IS_UDPLITE(sk);
892 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
893 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
894 struct sk_buff *skb;
895 struct ip_options_data opt_copy;
896
897 if (len > 0xFFFF)
898 return -EMSGSIZE;
899
900
901
902
903
904 if (msg->msg_flags & MSG_OOB)
905 return -EOPNOTSUPP;
906
907 ipc.opt = NULL;
908 ipc.tx_flags = 0;
909 ipc.ttl = 0;
910 ipc.tos = -1;
911
912 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
913
914 fl4 = &inet->cork.fl.u.ip4;
915 if (up->pending) {
916
917
918
919
920 lock_sock(sk);
921 if (likely(up->pending)) {
922 if (unlikely(up->pending != AF_INET)) {
923 release_sock(sk);
924 return -EINVAL;
925 }
926 goto do_append_data;
927 }
928 release_sock(sk);
929 }
930 ulen += sizeof(struct udphdr);
931
932
933
934
935 if (msg->msg_name) {
936 DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
937 if (msg->msg_namelen < sizeof(*usin))
938 return -EINVAL;
939 if (usin->sin_family != AF_INET) {
940 if (usin->sin_family != AF_UNSPEC)
941 return -EAFNOSUPPORT;
942 }
943
944 daddr = usin->sin_addr.s_addr;
945 dport = usin->sin_port;
946 if (dport == 0)
947 return -EINVAL;
948 } else {
949 if (sk->sk_state != TCP_ESTABLISHED)
950 return -EDESTADDRREQ;
951 daddr = inet->inet_daddr;
952 dport = inet->inet_dport;
953
954
955
956 connected = 1;
957 }
958 ipc.addr = inet->inet_saddr;
959
960 ipc.oif = sk->sk_bound_dev_if;
961
962 sock_tx_timestamp(sk, &ipc.tx_flags);
963
964 if (msg->msg_controllen) {
965 err = ip_cmsg_send(sock_net(sk), msg, &ipc,
966 sk->sk_family == AF_INET6);
967 if (err)
968 return err;
969 if (ipc.opt)
970 free = 1;
971 connected = 0;
972 }
973 if (!ipc.opt) {
974 struct ip_options_rcu *inet_opt;
975
976 rcu_read_lock();
977 inet_opt = rcu_dereference(inet->inet_opt);
978 if (inet_opt) {
979 memcpy(&opt_copy, inet_opt,
980 sizeof(*inet_opt) + inet_opt->opt.optlen);
981 ipc.opt = &opt_copy.opt;
982 }
983 rcu_read_unlock();
984 }
985
986 saddr = ipc.addr;
987 ipc.addr = faddr = daddr;
988
989 if (ipc.opt && ipc.opt->opt.srr) {
990 if (!daddr)
991 return -EINVAL;
992 faddr = ipc.opt->opt.faddr;
993 connected = 0;
994 }
995 tos = get_rttos(&ipc, inet);
996 if (sock_flag(sk, SOCK_LOCALROUTE) ||
997 (msg->msg_flags & MSG_DONTROUTE) ||
998 (ipc.opt && ipc.opt->opt.is_strictroute)) {
999 tos |= RTO_ONLINK;
1000 connected = 0;
1001 }
1002
1003 if (ipv4_is_multicast(daddr)) {
1004 if (!ipc.oif)
1005 ipc.oif = inet->mc_index;
1006 if (!saddr)
1007 saddr = inet->mc_addr;
1008 connected = 0;
1009 } else if (!ipc.oif)
1010 ipc.oif = inet->uc_index;
1011
1012 if (connected)
1013 rt = (struct rtable *)sk_dst_check(sk, 0);
1014
1015 if (rt == NULL) {
1016 struct net *net = sock_net(sk);
1017
1018 fl4 = &fl4_stack;
1019 flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
1020 RT_SCOPE_UNIVERSE, sk->sk_protocol,
1021 inet_sk_flowi_flags(sk),
1022 faddr, saddr, dport, inet->inet_sport);
1023
1024 security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
1025 rt = ip_route_output_flow(net, fl4, sk);
1026 if (IS_ERR(rt)) {
1027 err = PTR_ERR(rt);
1028 rt = NULL;
1029 if (err == -ENETUNREACH)
1030 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
1031 goto out;
1032 }
1033
1034 err = -EACCES;
1035 if ((rt->rt_flags & RTCF_BROADCAST) &&
1036 !sock_flag(sk, SOCK_BROADCAST))
1037 goto out;
1038 if (connected)
1039 sk_dst_set(sk, dst_clone(&rt->dst));
1040 }
1041
1042 if (msg->msg_flags&MSG_CONFIRM)
1043 goto do_confirm;
1044back_from_confirm:
1045
1046 saddr = fl4->saddr;
1047 if (!ipc.addr)
1048 daddr = ipc.addr = fl4->daddr;
1049
1050
1051 if (!corkreq) {
1052 skb = ip_make_skb(sk, fl4, getfrag, msg, ulen,
1053 sizeof(struct udphdr), &ipc, &rt,
1054 msg->msg_flags);
1055 err = PTR_ERR(skb);
1056 if (!IS_ERR_OR_NULL(skb))
1057 err = udp_send_skb(skb, fl4);
1058 goto out;
1059 }
1060
1061 lock_sock(sk);
1062 if (unlikely(up->pending)) {
1063
1064
1065 release_sock(sk);
1066
1067 net_dbg_ratelimited("cork app bug 2\n");
1068 err = -EINVAL;
1069 goto out;
1070 }
1071
1072
1073
1074 fl4 = &inet->cork.fl.u.ip4;
1075 fl4->daddr = daddr;
1076 fl4->saddr = saddr;
1077 fl4->fl4_dport = dport;
1078 fl4->fl4_sport = inet->inet_sport;
1079 up->pending = AF_INET;
1080
1081do_append_data:
1082 up->len += ulen;
1083 err = ip_append_data(sk, fl4, getfrag, msg, ulen,
1084 sizeof(struct udphdr), &ipc, &rt,
1085 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
1086 if (err)
1087 udp_flush_pending_frames(sk);
1088 else if (!corkreq)
1089 err = udp_push_pending_frames(sk);
1090 else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
1091 up->pending = 0;
1092 release_sock(sk);
1093
1094out:
1095 ip_rt_put(rt);
1096 if (free)
1097 kfree(ipc.opt);
1098 if (!err)
1099 return len;
1100
1101
1102
1103
1104
1105
1106
1107 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1108 UDP_INC_STATS_USER(sock_net(sk),
1109 UDP_MIB_SNDBUFERRORS, is_udplite);
1110 }
1111 return err;
1112
1113do_confirm:
1114 dst_confirm(&rt->dst);
1115 if (!(msg->msg_flags&MSG_PROBE) || len)
1116 goto back_from_confirm;
1117 err = 0;
1118 goto out;
1119}
1120EXPORT_SYMBOL(udp_sendmsg);
1121
1122int udp_sendpage(struct sock *sk, struct page *page, int offset,
1123 size_t size, int flags)
1124{
1125 struct inet_sock *inet = inet_sk(sk);
1126 struct udp_sock *up = udp_sk(sk);
1127 int ret;
1128
1129 if (flags & MSG_SENDPAGE_NOTLAST)
1130 flags |= MSG_MORE;
1131
1132 if (!up->pending) {
1133 struct msghdr msg = { .msg_flags = flags|MSG_MORE };
1134
1135
1136
1137
1138
1139 ret = udp_sendmsg(NULL, sk, &msg, 0);
1140 if (ret < 0)
1141 return ret;
1142 }
1143
1144 lock_sock(sk);
1145
1146 if (unlikely(!up->pending)) {
1147 release_sock(sk);
1148
1149 net_dbg_ratelimited("udp cork app bug 3\n");
1150 return -EINVAL;
1151 }
1152
1153 ret = ip_append_page(sk, &inet->cork.fl.u.ip4,
1154 page, offset, size, flags);
1155 if (ret == -EOPNOTSUPP) {
1156 release_sock(sk);
1157 return sock_no_sendpage(sk->sk_socket, page, offset,
1158 size, flags);
1159 }
1160 if (ret < 0) {
1161 udp_flush_pending_frames(sk);
1162 goto out;
1163 }
1164
1165 up->len += size;
1166 if (!(up->corkflag || (flags&MSG_MORE)))
1167 ret = udp_push_pending_frames(sk);
1168 if (!ret)
1169 ret = size;
1170out:
1171 release_sock(sk);
1172 return ret;
1173}
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183static unsigned int first_packet_length(struct sock *sk)
1184{
1185 struct sk_buff_head list_kill, *rcvq = &sk->sk_receive_queue;
1186 struct sk_buff *skb;
1187 unsigned int res;
1188
1189 __skb_queue_head_init(&list_kill);
1190
1191 spin_lock_bh(&rcvq->lock);
1192 while ((skb = skb_peek(rcvq)) != NULL &&
1193 udp_lib_checksum_complete(skb)) {
1194 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS,
1195 IS_UDPLITE(sk));
1196 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
1197 IS_UDPLITE(sk));
1198 atomic_inc(&sk->sk_drops);
1199 __skb_unlink(skb, rcvq);
1200 __skb_queue_tail(&list_kill, skb);
1201 }
1202 res = skb ? skb->len : 0;
1203 spin_unlock_bh(&rcvq->lock);
1204
1205 if (!skb_queue_empty(&list_kill)) {
1206 bool slow = lock_sock_fast(sk);
1207
1208 __skb_queue_purge(&list_kill);
1209 sk_mem_reclaim_partial(sk);
1210 unlock_sock_fast(sk, slow);
1211 }
1212 return res;
1213}
1214
1215
1216
1217
1218
1219int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
1220{
1221 switch (cmd) {
1222 case SIOCOUTQ:
1223 {
1224 int amount = sk_wmem_alloc_get(sk);
1225
1226 return put_user(amount, (int __user *)arg);
1227 }
1228
1229 case SIOCINQ:
1230 {
1231 unsigned int amount = first_packet_length(sk);
1232
1233 if (amount)
1234
1235
1236
1237
1238
1239 amount -= sizeof(struct udphdr);
1240
1241 return put_user(amount, (int __user *)arg);
1242 }
1243
1244 default:
1245 return -ENOIOCTLCMD;
1246 }
1247
1248 return 0;
1249}
1250EXPORT_SYMBOL(udp_ioctl);
1251
1252
1253
1254
1255
1256
1257int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1258 size_t len, int noblock, int flags, int *addr_len)
1259{
1260 struct inet_sock *inet = inet_sk(sk);
1261 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
1262 struct sk_buff *skb;
1263 unsigned int ulen, copied;
1264 int peeked, off = 0;
1265 int err;
1266 int is_udplite = IS_UDPLITE(sk);
1267 bool slow;
1268
1269 if (flags & MSG_ERRQUEUE)
1270 return ip_recv_error(sk, msg, len, addr_len);
1271
1272try_again:
1273 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
1274 &peeked, &off, &err);
1275 if (!skb)
1276 goto out;
1277
1278 ulen = skb->len - sizeof(struct udphdr);
1279 copied = len;
1280 if (copied > ulen)
1281 copied = ulen;
1282 else if (copied < ulen)
1283 msg->msg_flags |= MSG_TRUNC;
1284
1285
1286
1287
1288
1289
1290
1291 if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
1292 if (udp_lib_checksum_complete(skb))
1293 goto csum_copy_err;
1294 }
1295
1296 if (skb_csum_unnecessary(skb))
1297 err = skb_copy_datagram_msg(skb, sizeof(struct udphdr),
1298 msg, copied);
1299 else {
1300 err = skb_copy_and_csum_datagram_msg(skb, sizeof(struct udphdr),
1301 msg);
1302
1303 if (err == -EINVAL)
1304 goto csum_copy_err;
1305 }
1306
1307 if (unlikely(err)) {
1308 trace_kfree_skb(skb, udp_recvmsg);
1309 if (!peeked) {
1310 atomic_inc(&sk->sk_drops);
1311 UDP_INC_STATS_USER(sock_net(sk),
1312 UDP_MIB_INERRORS, is_udplite);
1313 }
1314 goto out_free;
1315 }
1316
1317 if (!peeked)
1318 UDP_INC_STATS_USER(sock_net(sk),
1319 UDP_MIB_INDATAGRAMS, is_udplite);
1320
1321 sock_recv_ts_and_drops(msg, sk, skb);
1322
1323
1324 if (sin) {
1325 sin->sin_family = AF_INET;
1326 sin->sin_port = udp_hdr(skb)->source;
1327 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
1328 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
1329 *addr_len = sizeof(*sin);
1330 }
1331 if (inet->cmsg_flags)
1332 ip_cmsg_recv(msg, skb);
1333
1334 err = copied;
1335 if (flags & MSG_TRUNC)
1336 err = ulen;
1337
1338out_free:
1339 skb_free_datagram_locked(sk, skb);
1340out:
1341 return err;
1342
1343csum_copy_err:
1344 slow = lock_sock_fast(sk);
1345 if (!skb_kill_datagram(sk, skb, flags)) {
1346 UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
1347 UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
1348 }
1349 unlock_sock_fast(sk, slow);
1350
1351 if (noblock)
1352 return -EAGAIN;
1353
1354
1355 msg->msg_flags &= ~MSG_TRUNC;
1356 goto try_again;
1357}
1358
1359
1360int udp_disconnect(struct sock *sk, int flags)
1361{
1362 struct inet_sock *inet = inet_sk(sk);
1363
1364
1365
1366
1367 sk->sk_state = TCP_CLOSE;
1368 inet->inet_daddr = 0;
1369 inet->inet_dport = 0;
1370 sock_rps_reset_rxhash(sk);
1371 sk->sk_bound_dev_if = 0;
1372 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1373 inet_reset_saddr(sk);
1374
1375 if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) {
1376 sk->sk_prot->unhash(sk);
1377 inet->inet_sport = 0;
1378 }
1379 sk_dst_reset(sk);
1380 return 0;
1381}
1382EXPORT_SYMBOL(udp_disconnect);
1383
1384void udp_lib_unhash(struct sock *sk)
1385{
1386 if (sk_hashed(sk)) {
1387 struct udp_table *udptable = sk->sk_prot->h.udp_table;
1388 struct udp_hslot *hslot, *hslot2;
1389
1390 hslot = udp_hashslot(udptable, sock_net(sk),
1391 udp_sk(sk)->udp_port_hash);
1392 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
1393
1394 spin_lock_bh(&hslot->lock);
1395 if (sk_nulls_del_node_init_rcu(sk)) {
1396 hslot->count--;
1397 inet_sk(sk)->inet_num = 0;
1398 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
1399
1400 spin_lock(&hslot2->lock);
1401 hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
1402 hslot2->count--;
1403 spin_unlock(&hslot2->lock);
1404 }
1405 spin_unlock_bh(&hslot->lock);
1406 }
1407}
1408EXPORT_SYMBOL(udp_lib_unhash);
1409
1410
1411
1412
1413void udp_lib_rehash(struct sock *sk, u16 newhash)
1414{
1415 if (sk_hashed(sk)) {
1416 struct udp_table *udptable = sk->sk_prot->h.udp_table;
1417 struct udp_hslot *hslot, *hslot2, *nhslot2;
1418
1419 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
1420 nhslot2 = udp_hashslot2(udptable, newhash);
1421 udp_sk(sk)->udp_portaddr_hash = newhash;
1422 if (hslot2 != nhslot2) {
1423 hslot = udp_hashslot(udptable, sock_net(sk),
1424 udp_sk(sk)->udp_port_hash);
1425
1426 spin_lock_bh(&hslot->lock);
1427
1428 spin_lock(&hslot2->lock);
1429 hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
1430 hslot2->count--;
1431 spin_unlock(&hslot2->lock);
1432
1433 spin_lock(&nhslot2->lock);
1434 hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
1435 &nhslot2->head);
1436 nhslot2->count++;
1437 spin_unlock(&nhslot2->lock);
1438
1439 spin_unlock_bh(&hslot->lock);
1440 }
1441 }
1442}
1443EXPORT_SYMBOL(udp_lib_rehash);
1444
1445static void udp_v4_rehash(struct sock *sk)
1446{
1447 u16 new_hash = udp4_portaddr_hash(sock_net(sk),
1448 inet_sk(sk)->inet_rcv_saddr,
1449 inet_sk(sk)->inet_num);
1450 udp_lib_rehash(sk, new_hash);
1451}
1452
1453static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1454{
1455 int rc;
1456
1457 if (inet_sk(sk)->inet_daddr) {
1458 sock_rps_save_rxhash(sk, skb);
1459 sk_mark_napi_id(sk, skb);
1460 sk_incoming_cpu_update(sk);
1461 }
1462
1463 rc = sock_queue_rcv_skb(sk, skb);
1464 if (rc < 0) {
1465 int is_udplite = IS_UDPLITE(sk);
1466
1467
1468 if (rc == -ENOMEM)
1469 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
1470 is_udplite);
1471 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
1472 kfree_skb(skb);
1473 trace_udp_fail_queue_rcv_skb(rc, sk);
1474 return -1;
1475 }
1476
1477 return 0;
1478
1479}
1480
1481static struct static_key udp_encap_needed __read_mostly;
1482void udp_encap_enable(void)
1483{
1484 if (!static_key_enabled(&udp_encap_needed))
1485 static_key_slow_inc(&udp_encap_needed);
1486}
1487EXPORT_SYMBOL(udp_encap_enable);
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1498{
1499 struct udp_sock *up = udp_sk(sk);
1500 int rc;
1501 int is_udplite = IS_UDPLITE(sk);
1502
1503
1504
1505
1506 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1507 goto drop;
1508 nf_reset(skb);
1509
1510 if (static_key_false(&udp_encap_needed) && up->encap_type) {
1511 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525 encap_rcv = ACCESS_ONCE(up->encap_rcv);
1526 if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) {
1527 int ret;
1528
1529
1530 if (udp_lib_checksum_complete(skb))
1531 goto csum_error;
1532
1533 ret = encap_rcv(sk, skb);
1534 if (ret <= 0) {
1535 UDP_INC_STATS_BH(sock_net(sk),
1536 UDP_MIB_INDATAGRAMS,
1537 is_udplite);
1538 return -ret;
1539 }
1540 }
1541
1542
1543 }
1544
1545
1546
1547
1548 if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561 if (up->pcrlen == 0) {
1562 net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n",
1563 UDP_SKB_CB(skb)->cscov, skb->len);
1564 goto drop;
1565 }
1566
1567
1568
1569
1570
1571
1572 if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
1573 net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n",
1574 UDP_SKB_CB(skb)->cscov, up->pcrlen);
1575 goto drop;
1576 }
1577 }
1578
1579 if (rcu_access_pointer(sk->sk_filter) &&
1580 udp_lib_checksum_complete(skb))
1581 goto csum_error;
1582
1583
1584 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
1585 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
1586 is_udplite);
1587 goto drop;
1588 }
1589
1590 rc = 0;
1591
1592 ipv4_pktinfo_prepare(sk, skb);
1593 bh_lock_sock(sk);
1594 if (!sock_owned_by_user(sk))
1595 rc = __udp_queue_rcv_skb(sk, skb);
1596 else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
1597 bh_unlock_sock(sk);
1598 goto drop;
1599 }
1600 bh_unlock_sock(sk);
1601
1602 return rc;
1603
1604csum_error:
1605 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
1606drop:
1607 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
1608 atomic_inc(&sk->sk_drops);
1609 kfree_skb(skb);
1610 return -1;
1611}
1612
1613
1614static void flush_stack(struct sock **stack, unsigned int count,
1615 struct sk_buff *skb, unsigned int final)
1616{
1617 unsigned int i;
1618 struct sk_buff *skb1 = NULL;
1619 struct sock *sk;
1620
1621 for (i = 0; i < count; i++) {
1622 sk = stack[i];
1623 if (likely(skb1 == NULL))
1624 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
1625
1626 if (!skb1) {
1627 atomic_inc(&sk->sk_drops);
1628 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
1629 IS_UDPLITE(sk));
1630 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
1631 IS_UDPLITE(sk));
1632 }
1633
1634 if (skb1 && udp_queue_rcv_skb(sk, skb1) <= 0)
1635 skb1 = NULL;
1636
1637 sock_put(sk);
1638 }
1639 if (unlikely(skb1))
1640 kfree_skb(skb1);
1641}
1642
1643
1644
1645
1646static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
1647{
1648 struct dst_entry *old;
1649
1650 dst_hold(dst);
1651 old = xchg(&sk->sk_rx_dst, dst);
1652 dst_release(old);
1653}
1654
1655
1656
1657
1658
1659
1660static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
1661 struct udphdr *uh,
1662 __be32 saddr, __be32 daddr,
1663 struct udp_table *udptable,
1664 int proto)
1665{
1666 struct sock *sk, *stack[256 / sizeof(struct sock *)];
1667 struct hlist_nulls_node *node;
1668 unsigned short hnum = ntohs(uh->dest);
1669 struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
1670 int dif = skb->dev->ifindex;
1671 unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node);
1672 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
1673 bool inner_flushed = false;
1674
1675 if (use_hash2) {
1676 hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
1677 udp_table.mask;
1678 hash2 = udp4_portaddr_hash(net, daddr, hnum) & udp_table.mask;
1679start_lookup:
1680 hslot = &udp_table.hash2[hash2];
1681 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
1682 }
1683
1684 spin_lock(&hslot->lock);
1685 sk_nulls_for_each_entry_offset(sk, node, &hslot->head, offset) {
1686 if (__udp_is_mcast_sock(net, sk,
1687 uh->dest, daddr,
1688 uh->source, saddr,
1689 dif, hnum)) {
1690 if (unlikely(count == ARRAY_SIZE(stack))) {
1691 flush_stack(stack, count, skb, ~0);
1692 inner_flushed = true;
1693 count = 0;
1694 }
1695 stack[count++] = sk;
1696 sock_hold(sk);
1697 }
1698 }
1699
1700 spin_unlock(&hslot->lock);
1701
1702
1703 if (use_hash2 && hash2 != hash2_any) {
1704 hash2 = hash2_any;
1705 goto start_lookup;
1706 }
1707
1708
1709
1710
1711 if (count) {
1712 flush_stack(stack, count, skb, count - 1);
1713 } else {
1714 if (!inner_flushed)
1715 UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
1716 proto == IPPROTO_UDPLITE);
1717 consume_skb(skb);
1718 }
1719 return 0;
1720}
1721
1722
1723
1724
1725
1726
1727static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
1728 int proto)
1729{
1730 int err;
1731
1732 UDP_SKB_CB(skb)->partial_cov = 0;
1733 UDP_SKB_CB(skb)->cscov = skb->len;
1734
1735 if (proto == IPPROTO_UDPLITE) {
1736 err = udplite_checksum_init(skb, uh);
1737 if (err)
1738 return err;
1739 }
1740
1741 return skb_checksum_init_zero_check(skb, proto, uh->check,
1742 inet_compute_pseudo);
1743}
1744
1745
1746
1747
1748
1749int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
1750 int proto)
1751{
1752 struct sock *sk;
1753 struct udphdr *uh;
1754 unsigned short ulen;
1755 struct rtable *rt = skb_rtable(skb);
1756 __be32 saddr, daddr;
1757 struct net *net = dev_net(skb->dev);
1758
1759
1760
1761
1762 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
1763 goto drop;
1764
1765 uh = udp_hdr(skb);
1766 ulen = ntohs(uh->len);
1767 saddr = ip_hdr(skb)->saddr;
1768 daddr = ip_hdr(skb)->daddr;
1769
1770 if (ulen > skb->len)
1771 goto short_packet;
1772
1773 if (proto == IPPROTO_UDP) {
1774
1775 if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen))
1776 goto short_packet;
1777 uh = udp_hdr(skb);
1778 }
1779
1780 if (udp4_csum_init(skb, uh, proto))
1781 goto csum_error;
1782
1783 sk = skb_steal_sock(skb);
1784 if (sk) {
1785 struct dst_entry *dst = skb_dst(skb);
1786 int ret;
1787
1788 if (unlikely(sk->sk_rx_dst != dst))
1789 udp_sk_rx_dst_set(sk, dst);
1790
1791 ret = udp_queue_rcv_skb(sk, skb);
1792 sock_put(sk);
1793
1794
1795
1796 if (ret > 0)
1797 return -ret;
1798 return 0;
1799 }
1800
1801 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
1802 return __udp4_lib_mcast_deliver(net, skb, uh,
1803 saddr, daddr, udptable, proto);
1804
1805 sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
1806 if (sk != NULL) {
1807 int ret;
1808
1809 if (udp_sk(sk)->convert_csum && uh->check && !IS_UDPLITE(sk))
1810 skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
1811 inet_compute_pseudo);
1812
1813 ret = udp_queue_rcv_skb(sk, skb);
1814 sock_put(sk);
1815
1816
1817
1818
1819 if (ret > 0)
1820 return -ret;
1821 return 0;
1822 }
1823
1824 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1825 goto drop;
1826 nf_reset(skb);
1827
1828
1829 if (udp_lib_checksum_complete(skb))
1830 goto csum_error;
1831
1832 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
1833 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
1834
1835
1836
1837
1838
1839 kfree_skb(skb);
1840 return 0;
1841
1842short_packet:
1843 net_dbg_ratelimited("UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n",
1844 proto == IPPROTO_UDPLITE ? "Lite" : "",
1845 &saddr, ntohs(uh->source),
1846 ulen, skb->len,
1847 &daddr, ntohs(uh->dest));
1848 goto drop;
1849
1850csum_error:
1851
1852
1853
1854
1855 net_dbg_ratelimited("UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n",
1856 proto == IPPROTO_UDPLITE ? "Lite" : "",
1857 &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest),
1858 ulen);
1859 UDP_INC_STATS_BH(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
1860drop:
1861 UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
1862 kfree_skb(skb);
1863 return 0;
1864}
1865
1866
1867
1868
1869static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net,
1870 __be16 loc_port, __be32 loc_addr,
1871 __be16 rmt_port, __be32 rmt_addr,
1872 int dif)
1873{
1874 struct sock *sk, *result;
1875 struct hlist_nulls_node *node;
1876 unsigned short hnum = ntohs(loc_port);
1877 unsigned int count, slot = udp_hashfn(net, hnum, udp_table.mask);
1878 struct udp_hslot *hslot = &udp_table.hash[slot];
1879
1880
1881 if (hslot->count > 10)
1882 return NULL;
1883
1884 rcu_read_lock();
1885begin:
1886 count = 0;
1887 result = NULL;
1888 sk_nulls_for_each_rcu(sk, node, &hslot->head) {
1889 if (__udp_is_mcast_sock(net, sk,
1890 loc_port, loc_addr,
1891 rmt_port, rmt_addr,
1892 dif, hnum)) {
1893 result = sk;
1894 ++count;
1895 }
1896 }
1897
1898
1899
1900
1901
1902 if (get_nulls_value(node) != slot)
1903 goto begin;
1904
1905 if (result) {
1906 if (count != 1 ||
1907 unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
1908 result = NULL;
1909 else if (unlikely(!__udp_is_mcast_sock(net, result,
1910 loc_port, loc_addr,
1911 rmt_port, rmt_addr,
1912 dif, hnum))) {
1913 sock_put(result);
1914 result = NULL;
1915 }
1916 }
1917 rcu_read_unlock();
1918 return result;
1919}
1920
1921
1922
1923
1924
1925static struct sock *__udp4_lib_demux_lookup(struct net *net,
1926 __be16 loc_port, __be32 loc_addr,
1927 __be16 rmt_port, __be32 rmt_addr,
1928 int dif)
1929{
1930 struct sock *sk, *result;
1931 struct hlist_nulls_node *node;
1932 unsigned short hnum = ntohs(loc_port);
1933 unsigned int hash2 = udp4_portaddr_hash(net, loc_addr, hnum);
1934 unsigned int slot2 = hash2 & udp_table.mask;
1935 struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
1936 INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr);
1937 const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
1938
1939 rcu_read_lock();
1940 result = NULL;
1941 udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
1942 if (INET_MATCH(sk, net, acookie,
1943 rmt_addr, loc_addr, ports, dif))
1944 result = sk;
1945
1946 break;
1947 }
1948
1949 if (result) {
1950 if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
1951 result = NULL;
1952 else if (unlikely(!INET_MATCH(sk, net, acookie,
1953 rmt_addr, loc_addr,
1954 ports, dif))) {
1955 sock_put(result);
1956 result = NULL;
1957 }
1958 }
1959 rcu_read_unlock();
1960 return result;
1961}
1962
1963void udp_v4_early_demux(struct sk_buff *skb)
1964{
1965 struct net *net = dev_net(skb->dev);
1966 const struct iphdr *iph;
1967 const struct udphdr *uh;
1968 struct sock *sk;
1969 struct dst_entry *dst;
1970 int dif = skb->dev->ifindex;
1971
1972
1973 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
1974 return;
1975
1976 iph = ip_hdr(skb);
1977 uh = udp_hdr(skb);
1978
1979 if (skb->pkt_type == PACKET_BROADCAST ||
1980 skb->pkt_type == PACKET_MULTICAST)
1981 sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
1982 uh->source, iph->saddr, dif);
1983 else if (skb->pkt_type == PACKET_HOST)
1984 sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr,
1985 uh->source, iph->saddr, dif);
1986 else
1987 return;
1988
1989 if (!sk)
1990 return;
1991
1992 skb->sk = sk;
1993 skb->destructor = sock_efree;
1994 dst = sk->sk_rx_dst;
1995
1996 if (dst)
1997 dst = dst_check(dst, 0);
1998 if (dst)
1999 skb_dst_set_noref(skb, dst);
2000}
2001
2002int udp_rcv(struct sk_buff *skb)
2003{
2004 return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP);
2005}
2006
2007void udp_destroy_sock(struct sock *sk)
2008{
2009 struct udp_sock *up = udp_sk(sk);
2010 bool slow = lock_sock_fast(sk);
2011 udp_flush_pending_frames(sk);
2012 unlock_sock_fast(sk, slow);
2013 if (static_key_false(&udp_encap_needed) && up->encap_type) {
2014 void (*encap_destroy)(struct sock *sk);
2015 encap_destroy = ACCESS_ONCE(up->encap_destroy);
2016 if (encap_destroy)
2017 encap_destroy(sk);
2018 }
2019}
2020
2021
2022
2023
2024int udp_lib_setsockopt(struct sock *sk, int level, int optname,
2025 char __user *optval, unsigned int optlen,
2026 int (*push_pending_frames)(struct sock *))
2027{
2028 struct udp_sock *up = udp_sk(sk);
2029 int val, valbool;
2030 int err = 0;
2031 int is_udplite = IS_UDPLITE(sk);
2032
2033 if (optlen < sizeof(int))
2034 return -EINVAL;
2035
2036 if (get_user(val, (int __user *)optval))
2037 return -EFAULT;
2038
2039 valbool = val ? 1 : 0;
2040
2041 switch (optname) {
2042 case UDP_CORK:
2043 if (val != 0) {
2044 up->corkflag = 1;
2045 } else {
2046 up->corkflag = 0;
2047 lock_sock(sk);
2048 push_pending_frames(sk);
2049 release_sock(sk);
2050 }
2051 break;
2052
2053 case UDP_ENCAP:
2054 switch (val) {
2055 case 0:
2056 case UDP_ENCAP_ESPINUDP:
2057 case UDP_ENCAP_ESPINUDP_NON_IKE:
2058 up->encap_rcv = xfrm4_udp_encap_rcv;
2059
2060 case UDP_ENCAP_L2TPINUDP:
2061 up->encap_type = val;
2062 udp_encap_enable();
2063 break;
2064 default:
2065 err = -ENOPROTOOPT;
2066 break;
2067 }
2068 break;
2069
2070 case UDP_NO_CHECK6_TX:
2071 up->no_check6_tx = valbool;
2072 break;
2073
2074 case UDP_NO_CHECK6_RX:
2075 up->no_check6_rx = valbool;
2076 break;
2077
2078
2079
2080
2081
2082
2083 case UDPLITE_SEND_CSCOV:
2084 if (!is_udplite)
2085 return -ENOPROTOOPT;
2086 if (val != 0 && val < 8)
2087 val = 8;
2088 else if (val > USHRT_MAX)
2089 val = USHRT_MAX;
2090 up->pcslen = val;
2091 up->pcflag |= UDPLITE_SEND_CC;
2092 break;
2093
2094
2095
2096
2097 case UDPLITE_RECV_CSCOV:
2098 if (!is_udplite)
2099 return -ENOPROTOOPT;
2100 if (val != 0 && val < 8)
2101 val = 8;
2102 else if (val > USHRT_MAX)
2103 val = USHRT_MAX;
2104 up->pcrlen = val;
2105 up->pcflag |= UDPLITE_RECV_CC;
2106 break;
2107
2108 default:
2109 err = -ENOPROTOOPT;
2110 break;
2111 }
2112
2113 return err;
2114}
2115EXPORT_SYMBOL(udp_lib_setsockopt);
2116
2117int udp_setsockopt(struct sock *sk, int level, int optname,
2118 char __user *optval, unsigned int optlen)
2119{
2120 if (level == SOL_UDP || level == SOL_UDPLITE)
2121 return udp_lib_setsockopt(sk, level, optname, optval, optlen,
2122 udp_push_pending_frames);
2123 return ip_setsockopt(sk, level, optname, optval, optlen);
2124}
2125
2126#ifdef CONFIG_COMPAT
2127int compat_udp_setsockopt(struct sock *sk, int level, int optname,
2128 char __user *optval, unsigned int optlen)
2129{
2130 if (level == SOL_UDP || level == SOL_UDPLITE)
2131 return udp_lib_setsockopt(sk, level, optname, optval, optlen,
2132 udp_push_pending_frames);
2133 return compat_ip_setsockopt(sk, level, optname, optval, optlen);
2134}
2135#endif
2136
2137int udp_lib_getsockopt(struct sock *sk, int level, int optname,
2138 char __user *optval, int __user *optlen)
2139{
2140 struct udp_sock *up = udp_sk(sk);
2141 int val, len;
2142
2143 if (get_user(len, optlen))
2144 return -EFAULT;
2145
2146 len = min_t(unsigned int, len, sizeof(int));
2147
2148 if (len < 0)
2149 return -EINVAL;
2150
2151 switch (optname) {
2152 case UDP_CORK:
2153 val = up->corkflag;
2154 break;
2155
2156 case UDP_ENCAP:
2157 val = up->encap_type;
2158 break;
2159
2160 case UDP_NO_CHECK6_TX:
2161 val = up->no_check6_tx;
2162 break;
2163
2164 case UDP_NO_CHECK6_RX:
2165 val = up->no_check6_rx;
2166 break;
2167
2168
2169
2170 case UDPLITE_SEND_CSCOV:
2171 val = up->pcslen;
2172 break;
2173
2174 case UDPLITE_RECV_CSCOV:
2175 val = up->pcrlen;
2176 break;
2177
2178 default:
2179 return -ENOPROTOOPT;
2180 }
2181
2182 if (put_user(len, optlen))
2183 return -EFAULT;
2184 if (copy_to_user(optval, &val, len))
2185 return -EFAULT;
2186 return 0;
2187}
2188EXPORT_SYMBOL(udp_lib_getsockopt);
2189
2190int udp_getsockopt(struct sock *sk, int level, int optname,
2191 char __user *optval, int __user *optlen)
2192{
2193 if (level == SOL_UDP || level == SOL_UDPLITE)
2194 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
2195 return ip_getsockopt(sk, level, optname, optval, optlen);
2196}
2197
2198#ifdef CONFIG_COMPAT
2199int compat_udp_getsockopt(struct sock *sk, int level, int optname,
2200 char __user *optval, int __user *optlen)
2201{
2202 if (level == SOL_UDP || level == SOL_UDPLITE)
2203 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
2204 return compat_ip_getsockopt(sk, level, optname, optval, optlen);
2205}
2206#endif
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
2221{
2222 unsigned int mask = datagram_poll(file, sock, wait);
2223 struct sock *sk = sock->sk;
2224
2225 sock_rps_record_flow(sk);
2226
2227
2228 if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
2229 !(sk->sk_shutdown & RCV_SHUTDOWN) && !first_packet_length(sk))
2230 mask &= ~(POLLIN | POLLRDNORM);
2231
2232 return mask;
2233
2234}
2235EXPORT_SYMBOL(udp_poll);
2236
2237struct proto udp_prot = {
2238 .name = "UDP",
2239 .owner = THIS_MODULE,
2240 .close = udp_lib_close,
2241 .connect = ip4_datagram_connect,
2242 .disconnect = udp_disconnect,
2243 .ioctl = udp_ioctl,
2244 .destroy = udp_destroy_sock,
2245 .setsockopt = udp_setsockopt,
2246 .getsockopt = udp_getsockopt,
2247 .sendmsg = udp_sendmsg,
2248 .recvmsg = udp_recvmsg,
2249 .sendpage = udp_sendpage,
2250 .backlog_rcv = __udp_queue_rcv_skb,
2251 .release_cb = ip4_datagram_release_cb,
2252 .hash = udp_lib_hash,
2253 .unhash = udp_lib_unhash,
2254 .rehash = udp_v4_rehash,
2255 .get_port = udp_v4_get_port,
2256 .memory_allocated = &udp_memory_allocated,
2257 .sysctl_mem = sysctl_udp_mem,
2258 .sysctl_wmem = &sysctl_udp_wmem_min,
2259 .sysctl_rmem = &sysctl_udp_rmem_min,
2260 .obj_size = sizeof(struct udp_sock),
2261 .slab_flags = SLAB_DESTROY_BY_RCU,
2262 .h.udp_table = &udp_table,
2263#ifdef CONFIG_COMPAT
2264 .compat_setsockopt = compat_udp_setsockopt,
2265 .compat_getsockopt = compat_udp_getsockopt,
2266#endif
2267 .clear_sk = sk_prot_clear_portaddr_nulls,
2268};
2269EXPORT_SYMBOL(udp_prot);
2270
2271
2272#ifdef CONFIG_PROC_FS
2273
2274static struct sock *udp_get_first(struct seq_file *seq, int start)
2275{
2276 struct sock *sk;
2277 struct udp_iter_state *state = seq->private;
2278 struct net *net = seq_file_net(seq);
2279
2280 for (state->bucket = start; state->bucket <= state->udp_table->mask;
2281 ++state->bucket) {
2282 struct hlist_nulls_node *node;
2283 struct udp_hslot *hslot = &state->udp_table->hash[state->bucket];
2284
2285 if (hlist_nulls_empty(&hslot->head))
2286 continue;
2287
2288 spin_lock_bh(&hslot->lock);
2289 sk_nulls_for_each(sk, node, &hslot->head) {
2290 if (!net_eq(sock_net(sk), net))
2291 continue;
2292 if (sk->sk_family == state->family)
2293 goto found;
2294 }
2295 spin_unlock_bh(&hslot->lock);
2296 }
2297 sk = NULL;
2298found:
2299 return sk;
2300}
2301
2302static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
2303{
2304 struct udp_iter_state *state = seq->private;
2305 struct net *net = seq_file_net(seq);
2306
2307 do {
2308 sk = sk_nulls_next(sk);
2309 } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family));
2310
2311 if (!sk) {
2312 if (state->bucket <= state->udp_table->mask)
2313 spin_unlock_bh(&state->udp_table->hash[state->bucket].lock);
2314 return udp_get_first(seq, state->bucket + 1);
2315 }
2316 return sk;
2317}
2318
2319static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos)
2320{
2321 struct sock *sk = udp_get_first(seq, 0);
2322
2323 if (sk)
2324 while (pos && (sk = udp_get_next(seq, sk)) != NULL)
2325 --pos;
2326 return pos ? NULL : sk;
2327}
2328
2329static void *udp_seq_start(struct seq_file *seq, loff_t *pos)
2330{
2331 struct udp_iter_state *state = seq->private;
2332 state->bucket = MAX_UDP_PORTS;
2333
2334 return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
2335}
2336
2337static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2338{
2339 struct sock *sk;
2340
2341 if (v == SEQ_START_TOKEN)
2342 sk = udp_get_idx(seq, 0);
2343 else
2344 sk = udp_get_next(seq, v);
2345
2346 ++*pos;
2347 return sk;
2348}
2349
2350static void udp_seq_stop(struct seq_file *seq, void *v)
2351{
2352 struct udp_iter_state *state = seq->private;
2353
2354 if (state->bucket <= state->udp_table->mask)
2355 spin_unlock_bh(&state->udp_table->hash[state->bucket].lock);
2356}
2357
2358int udp_seq_open(struct inode *inode, struct file *file)
2359{
2360 struct udp_seq_afinfo *afinfo = PDE_DATA(inode);
2361 struct udp_iter_state *s;
2362 int err;
2363
2364 err = seq_open_net(inode, file, &afinfo->seq_ops,
2365 sizeof(struct udp_iter_state));
2366 if (err < 0)
2367 return err;
2368
2369 s = ((struct seq_file *)file->private_data)->private;
2370 s->family = afinfo->family;
2371 s->udp_table = afinfo->udp_table;
2372 return err;
2373}
2374EXPORT_SYMBOL(udp_seq_open);
2375
2376
2377int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo)
2378{
2379 struct proc_dir_entry *p;
2380 int rc = 0;
2381
2382 afinfo->seq_ops.start = udp_seq_start;
2383 afinfo->seq_ops.next = udp_seq_next;
2384 afinfo->seq_ops.stop = udp_seq_stop;
2385
2386 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2387 afinfo->seq_fops, afinfo);
2388 if (!p)
2389 rc = -ENOMEM;
2390 return rc;
2391}
2392EXPORT_SYMBOL(udp_proc_register);
2393
2394void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo)
2395{
2396 remove_proc_entry(afinfo->name, net->proc_net);
2397}
2398EXPORT_SYMBOL(udp_proc_unregister);
2399
2400
2401static void udp4_format_sock(struct sock *sp, struct seq_file *f,
2402 int bucket)
2403{
2404 struct inet_sock *inet = inet_sk(sp);
2405 __be32 dest = inet->inet_daddr;
2406 __be32 src = inet->inet_rcv_saddr;
2407 __u16 destp = ntohs(inet->inet_dport);
2408 __u16 srcp = ntohs(inet->inet_sport);
2409
2410 seq_printf(f, "%5d: %08X:%04X %08X:%04X"
2411 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d",
2412 bucket, src, srcp, dest, destp, sp->sk_state,
2413 sk_wmem_alloc_get(sp),
2414 sk_rmem_alloc_get(sp),
2415 0, 0L, 0,
2416 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
2417 0, sock_i_ino(sp),
2418 atomic_read(&sp->sk_refcnt), sp,
2419 atomic_read(&sp->sk_drops));
2420}
2421
2422int udp4_seq_show(struct seq_file *seq, void *v)
2423{
2424 seq_setwidth(seq, 127);
2425 if (v == SEQ_START_TOKEN)
2426 seq_puts(seq, " sl local_address rem_address st tx_queue "
2427 "rx_queue tr tm->when retrnsmt uid timeout "
2428 "inode ref pointer drops");
2429 else {
2430 struct udp_iter_state *state = seq->private;
2431
2432 udp4_format_sock(v, seq, state->bucket);
2433 }
2434 seq_pad(seq, '\n');
2435 return 0;
2436}
2437
2438static const struct file_operations udp_afinfo_seq_fops = {
2439 .owner = THIS_MODULE,
2440 .open = udp_seq_open,
2441 .read = seq_read,
2442 .llseek = seq_lseek,
2443 .release = seq_release_net
2444};
2445
2446
2447static struct udp_seq_afinfo udp4_seq_afinfo = {
2448 .name = "udp",
2449 .family = AF_INET,
2450 .udp_table = &udp_table,
2451 .seq_fops = &udp_afinfo_seq_fops,
2452 .seq_ops = {
2453 .show = udp4_seq_show,
2454 },
2455};
2456
2457static int __net_init udp4_proc_init_net(struct net *net)
2458{
2459 return udp_proc_register(net, &udp4_seq_afinfo);
2460}
2461
2462static void __net_exit udp4_proc_exit_net(struct net *net)
2463{
2464 udp_proc_unregister(net, &udp4_seq_afinfo);
2465}
2466
2467static struct pernet_operations udp4_net_ops = {
2468 .init = udp4_proc_init_net,
2469 .exit = udp4_proc_exit_net,
2470};
2471
2472int __init udp4_proc_init(void)
2473{
2474 return register_pernet_subsys(&udp4_net_ops);
2475}
2476
2477void udp4_proc_exit(void)
2478{
2479 unregister_pernet_subsys(&udp4_net_ops);
2480}
2481#endif
2482
2483static __initdata unsigned long uhash_entries;
2484static int __init set_uhash_entries(char *str)
2485{
2486 ssize_t ret;
2487
2488 if (!str)
2489 return 0;
2490
2491 ret = kstrtoul(str, 0, &uhash_entries);
2492 if (ret)
2493 return 0;
2494
2495 if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN)
2496 uhash_entries = UDP_HTABLE_SIZE_MIN;
2497 return 1;
2498}
2499__setup("uhash_entries=", set_uhash_entries);
2500
2501void __init udp_table_init(struct udp_table *table, const char *name)
2502{
2503 unsigned int i;
2504
2505 table->hash = alloc_large_system_hash(name,
2506 2 * sizeof(struct udp_hslot),
2507 uhash_entries,
2508 21,
2509 0,
2510 &table->log,
2511 &table->mask,
2512 UDP_HTABLE_SIZE_MIN,
2513 64 * 1024);
2514
2515 table->hash2 = table->hash + (table->mask + 1);
2516 for (i = 0; i <= table->mask; i++) {
2517 INIT_HLIST_NULLS_HEAD(&table->hash[i].head, i);
2518 table->hash[i].count = 0;
2519 spin_lock_init(&table->hash[i].lock);
2520 }
2521 for (i = 0; i <= table->mask; i++) {
2522 INIT_HLIST_NULLS_HEAD(&table->hash2[i].head, i);
2523 table->hash2[i].count = 0;
2524 spin_lock_init(&table->hash2[i].lock);
2525 }
2526}
2527
2528void __init udp_init(void)
2529{
2530 unsigned long limit;
2531
2532 udp_table_init(&udp_table, "UDP");
2533 limit = nr_free_buffer_pages() / 8;
2534 limit = max(limit, 128UL);
2535 sysctl_udp_mem[0] = limit / 4 * 3;
2536 sysctl_udp_mem[1] = limit;
2537 sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2;
2538
2539 sysctl_udp_rmem_min = SK_MEM_QUANTUM;
2540 sysctl_udp_wmem_min = SK_MEM_QUANTUM;
2541}
2542