1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#ifndef _UDP_H
19#define _UDP_H
20
21#include <linux/list.h>
22#include <linux/bug.h>
23#include <net/inet_sock.h>
24#include <net/sock.h>
25#include <net/snmp.h>
26#include <net/ip.h>
27#include <linux/ipv6.h>
28#include <linux/seq_file.h>
29#include <linux/poll.h>
30#include <linux/indirect_call_wrapper.h>
31
32
33
34
35
36
37
38
39struct udp_skb_cb {
40 union {
41 struct inet_skb_parm h4;
42#if IS_ENABLED(CONFIG_IPV6)
43 struct inet6_skb_parm h6;
44#endif
45 } header;
46 __u16 cscov;
47 __u8 partial_cov;
48};
49#define UDP_SKB_CB(__skb) ((struct udp_skb_cb *)((__skb)->cb))
50
51
52
53
54
55
56
57
58struct udp_hslot {
59 struct hlist_head head;
60 int count;
61 spinlock_t lock;
62} __attribute__((aligned(2 * sizeof(long))));
63
64
65
66
67
68
69
70
71
72struct udp_table {
73 struct udp_hslot *hash;
74 struct udp_hslot *hash2;
75 unsigned int mask;
76 unsigned int log;
77};
78extern struct udp_table udp_table;
79void udp_table_init(struct udp_table *, const char *);
80static inline struct udp_hslot *udp_hashslot(struct udp_table *table,
81 struct net *net, unsigned int num)
82{
83 return &table->hash[udp_hashfn(net, num, table->mask)];
84}
85
86
87
88
89static inline struct udp_hslot *udp_hashslot2(struct udp_table *table,
90 unsigned int hash)
91{
92 return &table->hash2[hash & table->mask];
93}
94
95extern struct proto udp_prot;
96
97extern atomic_long_t udp_memory_allocated;
98
99
100extern long sysctl_udp_mem[3];
101extern int sysctl_udp_rmem_min;
102extern int sysctl_udp_wmem_min;
103
104struct sk_buff;
105
106
107
108
109static inline __sum16 __udp_lib_checksum_complete(struct sk_buff *skb)
110{
111 return (UDP_SKB_CB(skb)->cscov == skb->len ?
112 __skb_checksum_complete(skb) :
113 __skb_checksum_complete_head(skb, UDP_SKB_CB(skb)->cscov));
114}
115
116static inline int udp_lib_checksum_complete(struct sk_buff *skb)
117{
118 return !skb_csum_unnecessary(skb) &&
119 __udp_lib_checksum_complete(skb);
120}
121
122
123
124
125
126
127
128static inline __wsum udp_csum_outgoing(struct sock *sk, struct sk_buff *skb)
129{
130 __wsum csum = csum_partial(skb_transport_header(skb),
131 sizeof(struct udphdr), 0);
132 skb_queue_walk(&sk->sk_write_queue, skb) {
133 csum = csum_add(csum, skb->csum);
134 }
135 return csum;
136}
137
138static inline __wsum udp_csum(struct sk_buff *skb)
139{
140 __wsum csum = csum_partial(skb_transport_header(skb),
141 sizeof(struct udphdr), skb->csum);
142
143 for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) {
144 csum = csum_add(csum, skb->csum);
145 }
146 return csum;
147}
148
149static inline __sum16 udp_v4_check(int len, __be32 saddr,
150 __be32 daddr, __wsum base)
151{
152 return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base);
153}
154
155void udp_set_csum(bool nocheck, struct sk_buff *skb,
156 __be32 saddr, __be32 daddr, int len);
157
158static inline void udp_csum_pull_header(struct sk_buff *skb)
159{
160 if (!skb->csum_valid && skb->ip_summed == CHECKSUM_NONE)
161 skb->csum = csum_partial(skb->data, sizeof(struct udphdr),
162 skb->csum);
163 skb_pull_rcsum(skb, sizeof(struct udphdr));
164 UDP_SKB_CB(skb)->cscov -= sizeof(struct udphdr);
165}
166
167typedef struct sock *(*udp_lookup_t)(const struct sk_buff *skb, __be16 sport,
168 __be16 dport);
169
170INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *,
171 struct sk_buff *));
172INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int));
173INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *,
174 struct sk_buff *));
175INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int));
176INDIRECT_CALLABLE_DECLARE(void udp_v6_early_demux(struct sk_buff *));
177INDIRECT_CALLABLE_DECLARE(int udpv6_rcv(struct sk_buff *));
178
179struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
180 struct udphdr *uh, struct sock *sk);
181int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
182
183struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
184 netdev_features_t features, bool is_ipv6);
185
186static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
187{
188 struct udphdr *uh;
189 unsigned int hlen, off;
190
191 off = skb_gro_offset(skb);
192 hlen = off + sizeof(*uh);
193 uh = skb_gro_header_fast(skb, off);
194 if (skb_gro_header_hard(skb, hlen))
195 uh = skb_gro_header_slow(skb, hlen, off);
196
197 return uh;
198}
199
200
201static inline int udp_lib_hash(struct sock *sk)
202{
203 BUG();
204 return 0;
205}
206
207void udp_lib_unhash(struct sock *sk);
208void udp_lib_rehash(struct sock *sk, u16 new_hash);
209
210static inline void udp_lib_close(struct sock *sk, long timeout)
211{
212 sk_common_release(sk);
213}
214
215int udp_lib_get_port(struct sock *sk, unsigned short snum,
216 unsigned int hash2_nulladdr);
217
218u32 udp_flow_hashrnd(void);
219
220static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb,
221 int min, int max, bool use_eth)
222{
223 u32 hash;
224
225 if (min >= max) {
226
227 inet_get_local_port_range(net, &min, &max);
228 }
229
230 hash = skb_get_hash(skb);
231 if (unlikely(!hash)) {
232 if (use_eth) {
233
234
235
236 hash = jhash(skb->data, 2 * ETH_ALEN,
237 (__force u32) skb->protocol);
238 } else {
239
240
241
242 hash = udp_flow_hashrnd();
243 }
244 }
245
246
247
248
249
250
251 hash ^= hash << 16;
252
253 return htons((((u64) hash * (max - min)) >> 32) + min);
254}
255
256static inline int udp_rqueue_get(struct sock *sk)
257{
258 return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit);
259}
260
261static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if,
262 int dif, int sdif)
263{
264#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
265 return inet_bound_dev_eq(!!net->ipv4.sysctl_udp_l3mdev_accept,
266 bound_dev_if, dif, sdif);
267#else
268 return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
269#endif
270}
271
272
273void udp_destruct_sock(struct sock *sk);
274void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
275int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb);
276void udp_skb_destructor(struct sock *sk, struct sk_buff *skb);
277struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
278 int noblock, int *off, int *err);
279static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
280 int noblock, int *err)
281{
282 int off = 0;
283
284 return __skb_recv_udp(sk, flags, noblock, &off, err);
285}
286
287int udp_v4_early_demux(struct sk_buff *skb);
288bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
289int udp_get_port(struct sock *sk, unsigned short snum,
290 int (*saddr_cmp)(const struct sock *,
291 const struct sock *));
292int udp_err(struct sk_buff *, u32);
293int udp_abort(struct sock *sk, int err);
294int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
295int udp_push_pending_frames(struct sock *sk);
296void udp_flush_pending_frames(struct sock *sk);
297int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size);
298void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
299int udp_rcv(struct sk_buff *skb);
300int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
301int udp_init_sock(struct sock *sk);
302int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
303int __udp_disconnect(struct sock *sk, int flags);
304int udp_disconnect(struct sock *sk, int flags);
305__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait);
306struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
307 netdev_features_t features,
308 bool is_ipv6);
309int udp_lib_getsockopt(struct sock *sk, int level, int optname,
310 char __user *optval, int __user *optlen);
311int udp_lib_setsockopt(struct sock *sk, int level, int optname,
312 sockptr_t optval, unsigned int optlen,
313 int (*push_pending_frames)(struct sock *));
314struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
315 __be32 daddr, __be16 dport, int dif);
316struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
317 __be32 daddr, __be16 dport, int dif, int sdif,
318 struct udp_table *tbl, struct sk_buff *skb);
319struct sock *udp4_lib_lookup_skb(const struct sk_buff *skb,
320 __be16 sport, __be16 dport);
321struct sock *udp6_lib_lookup(struct net *net,
322 const struct in6_addr *saddr, __be16 sport,
323 const struct in6_addr *daddr, __be16 dport,
324 int dif);
325struct sock *__udp6_lib_lookup(struct net *net,
326 const struct in6_addr *saddr, __be16 sport,
327 const struct in6_addr *daddr, __be16 dport,
328 int dif, int sdif, struct udp_table *tbl,
329 struct sk_buff *skb);
330struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
331 __be16 sport, __be16 dport);
332int udp_read_sock(struct sock *sk, read_descriptor_t *desc,
333 sk_read_actor_t recv_actor);
334
335
336
337
338struct udp_dev_scratch {
339
340
341
342
343 u32 _tsize_state;
344
345#if BITS_PER_LONG == 64
346
347
348
349
350
351 u16 len;
352 bool is_linear;
353 bool csum_unnecessary;
354#endif
355};
356
357static inline struct udp_dev_scratch *udp_skb_scratch(struct sk_buff *skb)
358{
359 return (struct udp_dev_scratch *)&skb->dev_scratch;
360}
361
362#if BITS_PER_LONG == 64
363static inline unsigned int udp_skb_len(struct sk_buff *skb)
364{
365 return udp_skb_scratch(skb)->len;
366}
367
368static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
369{
370 return udp_skb_scratch(skb)->csum_unnecessary;
371}
372
373static inline bool udp_skb_is_linear(struct sk_buff *skb)
374{
375 return udp_skb_scratch(skb)->is_linear;
376}
377
378#else
379static inline unsigned int udp_skb_len(struct sk_buff *skb)
380{
381 return skb->len;
382}
383
384static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
385{
386 return skb_csum_unnecessary(skb);
387}
388
389static inline bool udp_skb_is_linear(struct sk_buff *skb)
390{
391 return !skb_is_nonlinear(skb);
392}
393#endif
394
395static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
396 struct iov_iter *to)
397{
398 int n;
399
400 n = copy_to_iter(skb->data + off, len, to);
401 if (n == len)
402 return 0;
403
404 iov_iter_revert(to, n);
405 return -EFAULT;
406}
407
408
409
410
411#define UDP_INC_STATS(net, field, is_udplite) do { \
412 if (is_udplite) SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
413 else SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
414#define __UDP_INC_STATS(net, field, is_udplite) do { \
415 if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
416 else __SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
417
418#define __UDP6_INC_STATS(net, field, is_udplite) do { \
419 if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_stats_in6, field);\
420 else __SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
421} while(0)
422#define UDP6_INC_STATS(net, field, __lite) do { \
423 if (__lite) SNMP_INC_STATS((net)->mib.udplite_stats_in6, field); \
424 else SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
425} while(0)
426
427#if IS_ENABLED(CONFIG_IPV6)
428#define __UDPX_MIB(sk, ipv4) \
429({ \
430 ipv4 ? (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
431 sock_net(sk)->mib.udp_statistics) : \
432 (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_stats_in6 : \
433 sock_net(sk)->mib.udp_stats_in6); \
434})
435#else
436#define __UDPX_MIB(sk, ipv4) \
437({ \
438 IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
439 sock_net(sk)->mib.udp_statistics; \
440})
441#endif
442
443#define __UDPX_INC_STATS(sk, field) \
444 __SNMP_INC_STATS(__UDPX_MIB(sk, (sk)->sk_family == AF_INET), field)
445
446#ifdef CONFIG_PROC_FS
447struct udp_seq_afinfo {
448 sa_family_t family;
449 struct udp_table *udp_table;
450};
451
452struct udp_iter_state {
453 struct seq_net_private p;
454 int bucket;
455 struct udp_seq_afinfo *bpf_seq_afinfo;
456};
457
458void *udp_seq_start(struct seq_file *seq, loff_t *pos);
459void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
460void udp_seq_stop(struct seq_file *seq, void *v);
461
462extern const struct seq_operations udp_seq_ops;
463extern const struct seq_operations udp6_seq_ops;
464
465int udp4_proc_init(void);
466void udp4_proc_exit(void);
467#endif
468
469int udpv4_offload_init(void);
470
471void udp_init(void);
472
473DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key);
474void udp_encap_enable(void);
475void udp_encap_disable(void);
476#if IS_ENABLED(CONFIG_IPV6)
477DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
478void udpv6_encap_enable(void);
479#endif
480
481static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
482 struct sk_buff *skb, bool ipv4)
483{
484 netdev_features_t features = NETIF_F_SG;
485 struct sk_buff *segs;
486
487
488
489
490 if (!inet_get_convert_csum(sk))
491 features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
492
493
494
495
496
497
498
499
500
501 if (skb->pkt_type == PACKET_LOOPBACK)
502 skb->ip_summed = CHECKSUM_PARTIAL;
503
504
505
506
507 segs = __skb_gso_segment(skb, features, false);
508 if (IS_ERR_OR_NULL(segs)) {
509 int segs_nr = skb_shinfo(skb)->gso_segs;
510
511 atomic_add(segs_nr, &sk->sk_drops);
512 SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, segs_nr);
513 kfree_skb(skb);
514 return NULL;
515 }
516
517 consume_skb(skb);
518 return segs;
519}
520
521static inline void udp_post_segment_fix_csum(struct sk_buff *skb)
522{
523
524 WARN_ON_ONCE(UDP_SKB_CB(skb)->partial_cov);
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539 UDP_SKB_CB(skb)->cscov = skb->len;
540 if (skb->ip_summed == CHECKSUM_NONE && !skb->csum_valid)
541 skb->csum_valid = 1;
542}
543
544#ifdef CONFIG_BPF_SYSCALL
545struct sk_psock;
546struct proto *udp_bpf_get_proto(struct sock *sk, struct sk_psock *psock);
547int udp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
548#endif
549
550#endif
551