1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#ifndef _IP_H
19#define _IP_H
20
21#include <linux/types.h>
22#include <linux/ip.h>
23#include <linux/in.h>
24#include <linux/skbuff.h>
25#include <linux/jhash.h>
26#include <linux/sockptr.h>
27
28#include <net/inet_sock.h>
29#include <net/route.h>
30#include <net/snmp.h>
31#include <net/flow.h>
32#include <net/flow_dissector.h>
33#include <net/netns/hash.h>
34#include <net/lwtunnel.h>
35
36#define IPV4_MAX_PMTU 65535U
37#define IPV4_MIN_MTU 68
38
39extern unsigned int sysctl_fib_sync_mem;
40extern unsigned int sysctl_fib_sync_mem_min;
41extern unsigned int sysctl_fib_sync_mem_max;
42
43struct sock;
44
45struct inet_skb_parm {
46 int iif;
47 struct ip_options opt;
48 u16 flags;
49
50#define IPSKB_FORWARDED BIT(0)
51#define IPSKB_XFRM_TUNNEL_SIZE BIT(1)
52#define IPSKB_XFRM_TRANSFORMED BIT(2)
53#define IPSKB_FRAG_COMPLETE BIT(3)
54#define IPSKB_REROUTED BIT(4)
55#define IPSKB_DOREDIRECT BIT(5)
56#define IPSKB_FRAG_PMTU BIT(6)
57#define IPSKB_L3SLAVE BIT(7)
58
59 u16 frag_max_size;
60};
61
62static inline bool ipv4_l3mdev_skb(u16 flags)
63{
64 return !!(flags & IPSKB_L3SLAVE);
65}
66
67static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
68{
69 return ip_hdr(skb)->ihl * 4;
70}
71
72struct ipcm_cookie {
73 struct sockcm_cookie sockc;
74 __be32 addr;
75 int oif;
76 struct ip_options_rcu *opt;
77 __u8 ttl;
78 __s16 tos;
79 char priority;
80 __u16 gso_size;
81};
82
83static inline void ipcm_init(struct ipcm_cookie *ipcm)
84{
85 *ipcm = (struct ipcm_cookie) { .tos = -1 };
86}
87
88static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
89 const struct inet_sock *inet)
90{
91 ipcm_init(ipcm);
92
93 ipcm->sockc.mark = inet->sk.sk_mark;
94 ipcm->sockc.tsflags = inet->sk.sk_tsflags;
95 ipcm->oif = inet->sk.sk_bound_dev_if;
96 ipcm->addr = inet->inet_saddr;
97}
98
99#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
100#define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb))
101
102
103static inline int inet_sdif(const struct sk_buff *skb)
104{
105#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
106 if (skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
107 return IPCB(skb)->iif;
108#endif
109 return 0;
110}
111
112
113
114
115
116
117
118
119
120
121
122
123struct ip_ra_chain {
124 struct ip_ra_chain __rcu *next;
125 struct sock *sk;
126 union {
127 void (*destructor)(struct sock *);
128 struct sock *saved_sk;
129 };
130 struct rcu_head rcu;
131};
132
133
134#define IP_CE 0x8000
135#define IP_DF 0x4000
136#define IP_MF 0x2000
137#define IP_OFFSET 0x1FFF
138
139#define IP_FRAG_TIME (30 * HZ)
140
141struct msghdr;
142struct net_device;
143struct packet_type;
144struct rtable;
145struct sockaddr;
146
147int igmp_mc_init(void);
148
149
150
151
152
153int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
154 __be32 saddr, __be32 daddr,
155 struct ip_options_rcu *opt, u8 tos);
156int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
157 struct net_device *orig_dev);
158void ip_list_rcv(struct list_head *head, struct packet_type *pt,
159 struct net_device *orig_dev);
160int ip_local_deliver(struct sk_buff *skb);
161void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int proto);
162int ip_mr_input(struct sk_buff *skb);
163int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
164int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb);
165int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
166 int (*output)(struct net *, struct sock *, struct sk_buff *));
167
168struct ip_fraglist_iter {
169 struct sk_buff *frag;
170 struct iphdr *iph;
171 int offset;
172 unsigned int hlen;
173};
174
175void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph,
176 unsigned int hlen, struct ip_fraglist_iter *iter);
177void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter);
178
179static inline struct sk_buff *ip_fraglist_next(struct ip_fraglist_iter *iter)
180{
181 struct sk_buff *skb = iter->frag;
182
183 iter->frag = skb->next;
184 skb_mark_not_on_list(skb);
185
186 return skb;
187}
188
189struct ip_frag_state {
190 bool DF;
191 unsigned int hlen;
192 unsigned int ll_rs;
193 unsigned int mtu;
194 unsigned int left;
195 int offset;
196 int ptr;
197 __be16 not_last_frag;
198};
199
200void ip_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int ll_rs,
201 unsigned int mtu, bool DF, struct ip_frag_state *state);
202struct sk_buff *ip_frag_next(struct sk_buff *skb,
203 struct ip_frag_state *state);
204
205void ip_send_check(struct iphdr *ip);
206int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
207int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
208
209int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
210 __u8 tos);
211void ip_init(void);
212int ip_append_data(struct sock *sk, struct flowi4 *fl4,
213 int getfrag(void *from, char *to, int offset, int len,
214 int odd, struct sk_buff *skb),
215 void *from, int len, int protolen,
216 struct ipcm_cookie *ipc,
217 struct rtable **rt,
218 unsigned int flags);
219int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd,
220 struct sk_buff *skb);
221ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
222 int offset, size_t size, int flags);
223struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4,
224 struct sk_buff_head *queue,
225 struct inet_cork *cork);
226int ip_send_skb(struct net *net, struct sk_buff *skb);
227int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4);
228void ip_flush_pending_frames(struct sock *sk);
229struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
230 int getfrag(void *from, char *to, int offset,
231 int len, int odd, struct sk_buff *skb),
232 void *from, int length, int transhdrlen,
233 struct ipcm_cookie *ipc, struct rtable **rtp,
234 struct inet_cork *cork, unsigned int flags);
235
236int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
237
238static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
239{
240 return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
241}
242
243static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet)
244{
245 return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(inet->tos);
246}
247
248static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
249{
250 return (ipc->tos != -1) ? RT_CONN_FLAGS_TOS(sk, ipc->tos) : RT_CONN_FLAGS(sk);
251}
252
253
254int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
255int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
256
257void ip4_datagram_release_cb(struct sock *sk);
258
259struct ip_reply_arg {
260 struct kvec iov[1];
261 int flags;
262 __wsum csum;
263 int csumoffset;
264
265 int bound_dev_if;
266 u8 tos;
267 kuid_t uid;
268};
269
270#define IP_REPLY_ARG_NOSRCCHECK 1
271
272static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
273{
274 return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
275}
276
277void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
278 const struct ip_options *sopt,
279 __be32 daddr, __be32 saddr,
280 const struct ip_reply_arg *arg,
281 unsigned int len, u64 transmit_time);
282
283#define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field)
284#define __IP_INC_STATS(net, field) __SNMP_INC_STATS64((net)->mib.ip_statistics, field)
285#define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
286#define __IP_ADD_STATS(net, field, val) __SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
287#define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
288#define __IP_UPD_PO_STATS(net, field, val) __SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
289#define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field)
290#define __NET_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.net_statistics, field)
291#define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
292#define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
293
294u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct);
295unsigned long snmp_fold_field(void __percpu *mib, int offt);
296#if BITS_PER_LONG==32
297u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
298 size_t syncp_offset);
299u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off);
300#else
301static inline u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
302 size_t syncp_offset)
303{
304 return snmp_get_cpu_field(mib, cpu, offct);
305
306}
307
308static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off)
309{
310 return snmp_fold_field(mib, offt);
311}
312#endif
313
314#define snmp_get_cpu_field64_batch(buff64, stats_list, mib_statistic, offset) \
315{ \
316 int i, c; \
317 for_each_possible_cpu(c) { \
318 for (i = 0; stats_list[i].name; i++) \
319 buff64[i] += snmp_get_cpu_field64( \
320 mib_statistic, \
321 c, stats_list[i].entry, \
322 offset); \
323 } \
324}
325
326#define snmp_get_cpu_field_batch(buff, stats_list, mib_statistic) \
327{ \
328 int i, c; \
329 for_each_possible_cpu(c) { \
330 for (i = 0; stats_list[i].name; i++) \
331 buff[i] += snmp_get_cpu_field( \
332 mib_statistic, \
333 c, stats_list[i].entry); \
334 } \
335}
336
337void inet_get_local_port_range(struct net *net, int *low, int *high);
338
339#ifdef CONFIG_SYSCTL
340static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port)
341{
342 if (!net->ipv4.sysctl_local_reserved_ports)
343 return false;
344 return test_bit(port, net->ipv4.sysctl_local_reserved_ports);
345}
346
347static inline bool sysctl_dev_name_is_allowed(const char *name)
348{
349 return strcmp(name, "default") != 0 && strcmp(name, "all") != 0;
350}
351
352static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port)
353{
354 return port < net->ipv4.sysctl_ip_prot_sock;
355}
356
357#else
358static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port)
359{
360 return false;
361}
362
363static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port)
364{
365 return port < PROT_SOCK;
366}
367#endif
368
369__be32 inet_current_timestamp(void);
370
371
372extern int inet_peer_threshold;
373extern int inet_peer_minttl;
374extern int inet_peer_maxttl;
375
376void ipfrag_init(void);
377
378void ip_static_sysctl_init(void);
379
380#define IP4_REPLY_MARK(net, mark) \
381 ((net)->ipv4.sysctl_fwmark_reflect ? (mark) : 0)
382
383static inline bool ip_is_fragment(const struct iphdr *iph)
384{
385 return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0;
386}
387
388#ifdef CONFIG_INET
389#include <net/dst.h>
390
391
392
393static inline
394int ip_decrease_ttl(struct iphdr *iph)
395{
396 u32 check = (__force u32)iph->check;
397 check += (__force u32)htons(0x0100);
398 iph->check = (__force __sum16)(check + (check>=0xFFFF));
399 return --iph->ttl;
400}
401
402static inline int ip_mtu_locked(const struct dst_entry *dst)
403{
404 const struct rtable *rt = (const struct rtable *)dst;
405
406 return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU);
407}
408
409static inline
410int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
411{
412 u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc);
413
414 return pmtudisc == IP_PMTUDISC_DO ||
415 (pmtudisc == IP_PMTUDISC_WANT &&
416 !ip_mtu_locked(dst));
417}
418
419static inline bool ip_sk_accept_pmtu(const struct sock *sk)
420{
421 return inet_sk(sk)->pmtudisc != IP_PMTUDISC_INTERFACE &&
422 inet_sk(sk)->pmtudisc != IP_PMTUDISC_OMIT;
423}
424
425static inline bool ip_sk_use_pmtu(const struct sock *sk)
426{
427 return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE;
428}
429
430static inline bool ip_sk_ignore_df(const struct sock *sk)
431{
432 return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO ||
433 inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT;
434}
435
436static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
437 bool forwarding)
438{
439 const struct rtable *rt = container_of(dst, struct rtable, dst);
440 struct net *net = dev_net(dst->dev);
441 unsigned int mtu;
442
443 if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
444 ip_mtu_locked(dst) ||
445 !forwarding) {
446 mtu = rt->rt_pmtu;
447 if (mtu && time_before(jiffies, rt->dst.expires))
448 goto out;
449 }
450
451
452 mtu = dst_metric_raw(dst, RTAX_MTU);
453 if (mtu)
454 goto out;
455
456 mtu = READ_ONCE(dst->dev->mtu);
457
458 if (unlikely(ip_mtu_locked(dst))) {
459 if (rt->rt_uses_gateway && mtu > 576)
460 mtu = 576;
461 }
462
463out:
464 mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
465
466 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
467}
468
469static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
470 const struct sk_buff *skb)
471{
472 unsigned int mtu;
473
474 if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
475 bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
476
477 return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
478 }
479
480 mtu = min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
481 return mtu - lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu);
482}
483
484struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
485 int fc_mx_len,
486 struct netlink_ext_ack *extack);
487static inline void ip_fib_metrics_put(struct dst_metrics *fib_metrics)
488{
489 if (fib_metrics != &dst_default_metrics &&
490 refcount_dec_and_test(&fib_metrics->refcnt))
491 kfree(fib_metrics);
492}
493
494
495static inline
496void ip_dst_init_metrics(struct dst_entry *dst, struct dst_metrics *fib_metrics)
497{
498 dst_init_metrics(dst, fib_metrics->metrics, true);
499
500 if (fib_metrics != &dst_default_metrics) {
501 dst->_metrics |= DST_METRICS_REFCOUNTED;
502 refcount_inc(&fib_metrics->refcnt);
503 }
504}
505
506static inline
507void ip_dst_metrics_put(struct dst_entry *dst)
508{
509 struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
510
511 if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
512 kfree(p);
513}
514
515u32 ip_idents_reserve(u32 hash, int segs);
516void __ip_select_ident(struct net *net, struct iphdr *iph, int segs);
517
518static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
519 struct sock *sk, int segs)
520{
521 struct iphdr *iph = ip_hdr(skb);
522
523 if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
524
525
526
527
528
529 if (sk && inet_sk(sk)->inet_daddr) {
530 iph->id = htons(inet_sk(sk)->inet_id);
531 inet_sk(sk)->inet_id += segs;
532 } else {
533 iph->id = 0;
534 }
535 } else {
536 __ip_select_ident(net, iph, segs);
537 }
538}
539
540static inline void ip_select_ident(struct net *net, struct sk_buff *skb,
541 struct sock *sk)
542{
543 ip_select_ident_segs(net, skb, sk, 1);
544}
545
546static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
547{
548 return csum_tcpudp_nofold(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
549 skb->len, proto, 0);
550}
551
552
553
554
555
556static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
557 const struct iphdr *iph)
558{
559 BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) !=
560 offsetof(typeof(flow->addrs), v4addrs.src) +
561 sizeof(flow->addrs.v4addrs.src));
562 memcpy(&flow->addrs.v4addrs, &iph->saddr, sizeof(flow->addrs.v4addrs));
563 flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
564}
565
566static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
567{
568 const struct iphdr *iph = skb_gro_network_header(skb);
569
570 return csum_tcpudp_nofold(iph->saddr, iph->daddr,
571 skb_gro_len(skb), proto, 0);
572}
573
574
575
576
577
578static inline void ip_eth_mc_map(__be32 naddr, char *buf)
579{
580 __u32 addr=ntohl(naddr);
581 buf[0]=0x01;
582 buf[1]=0x00;
583 buf[2]=0x5e;
584 buf[5]=addr&0xFF;
585 addr>>=8;
586 buf[4]=addr&0xFF;
587 addr>>=8;
588 buf[3]=addr&0x7F;
589}
590
591
592
593
594
595
596static inline void ip_ib_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
597{
598 __u32 addr;
599 unsigned char scope = broadcast[5] & 0xF;
600
601 buf[0] = 0;
602 buf[1] = 0xff;
603 buf[2] = 0xff;
604 buf[3] = 0xff;
605 addr = ntohl(naddr);
606 buf[4] = 0xff;
607 buf[5] = 0x10 | scope;
608 buf[6] = 0x40;
609 buf[7] = 0x1b;
610 buf[8] = broadcast[8];
611 buf[9] = broadcast[9];
612 buf[10] = 0;
613 buf[11] = 0;
614 buf[12] = 0;
615 buf[13] = 0;
616 buf[14] = 0;
617 buf[15] = 0;
618 buf[19] = addr & 0xff;
619 addr >>= 8;
620 buf[18] = addr & 0xff;
621 addr >>= 8;
622 buf[17] = addr & 0xff;
623 addr >>= 8;
624 buf[16] = addr & 0x0f;
625}
626
627static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
628{
629 if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0)
630 memcpy(buf, broadcast, 4);
631 else
632 memcpy(buf, &naddr, sizeof(naddr));
633}
634
635#if IS_ENABLED(CONFIG_IPV6)
636#include <linux/ipv6.h>
637#endif
638
639static __inline__ void inet_reset_saddr(struct sock *sk)
640{
641 inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0;
642#if IS_ENABLED(CONFIG_IPV6)
643 if (sk->sk_family == PF_INET6) {
644 struct ipv6_pinfo *np = inet6_sk(sk);
645
646 memset(&np->saddr, 0, sizeof(np->saddr));
647 memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr));
648 }
649#endif
650}
651
652#endif
653
654static inline unsigned int ipv4_addr_hash(__be32 ip)
655{
656 return (__force unsigned int) ip;
657}
658
659static inline u32 ipv4_portaddr_hash(const struct net *net,
660 __be32 saddr,
661 unsigned int port)
662{
663 return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
664}
665
666bool ip_call_ra_chain(struct sk_buff *skb);
667
668
669
670
671
672enum ip_defrag_users {
673 IP_DEFRAG_LOCAL_DELIVER,
674 IP_DEFRAG_CALL_RA_CHAIN,
675 IP_DEFRAG_CONNTRACK_IN,
676 __IP_DEFRAG_CONNTRACK_IN_END = IP_DEFRAG_CONNTRACK_IN + USHRT_MAX,
677 IP_DEFRAG_CONNTRACK_OUT,
678 __IP_DEFRAG_CONNTRACK_OUT_END = IP_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
679 IP_DEFRAG_CONNTRACK_BRIDGE_IN,
680 __IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
681 IP_DEFRAG_VS_IN,
682 IP_DEFRAG_VS_OUT,
683 IP_DEFRAG_VS_FWD,
684 IP_DEFRAG_AF_PACKET,
685 IP_DEFRAG_MACVLAN,
686};
687
688
689
690
691static inline bool ip_defrag_user_in_between(u32 user,
692 enum ip_defrag_users lower_bond,
693 enum ip_defrag_users upper_bond)
694{
695 return user >= lower_bond && user <= upper_bond;
696}
697
698int ip_defrag(struct net *net, struct sk_buff *skb, u32 user);
699#ifdef CONFIG_INET
700struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user);
701#else
702static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
703{
704 return skb;
705}
706#endif
707
708
709
710
711
712int ip_forward(struct sk_buff *skb);
713
714
715
716
717
718void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
719 __be32 daddr, struct rtable *rt, int is_frag);
720
721int __ip_options_echo(struct net *net, struct ip_options *dopt,
722 struct sk_buff *skb, const struct ip_options *sopt);
723static inline int ip_options_echo(struct net *net, struct ip_options *dopt,
724 struct sk_buff *skb)
725{
726 return __ip_options_echo(net, dopt, skb, &IPCB(skb)->opt);
727}
728
729void ip_options_fragment(struct sk_buff *skb);
730int __ip_options_compile(struct net *net, struct ip_options *opt,
731 struct sk_buff *skb, __be32 *info);
732int ip_options_compile(struct net *net, struct ip_options *opt,
733 struct sk_buff *skb);
734int ip_options_get(struct net *net, struct ip_options_rcu **optp,
735 sockptr_t data, int optlen);
736void ip_options_undo(struct ip_options *opt);
737void ip_forward_options(struct sk_buff *skb);
738int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
739
740
741
742
743
744void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
745void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
746 struct sk_buff *skb, int tlen, int offset);
747int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
748 struct ipcm_cookie *ipc, bool allow_ipv6);
749int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
750 unsigned int optlen);
751int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
752 int __user *optlen);
753int ip_ra_control(struct sock *sk, unsigned char on,
754 void (*destructor)(struct sock *));
755
756int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len);
757void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
758 u32 info, u8 *payload);
759void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
760 u32 info);
761
762static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
763{
764 ip_cmsg_recv_offset(msg, skb->sk, skb, 0, 0);
765}
766
767bool icmp_global_allow(void);
768extern int sysctl_icmp_msgs_per_sec;
769extern int sysctl_icmp_msgs_burst;
770
771#ifdef CONFIG_PROC_FS
772int ip_misc_proc_init(void);
773#endif
774
775int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
776 struct netlink_ext_ack *extack);
777
778static inline bool inetdev_valid_mtu(unsigned int mtu)
779{
780 return likely(mtu >= IPV4_MIN_MTU);
781}
782
783void ip_sock_set_freebind(struct sock *sk);
784int ip_sock_set_mtu_discover(struct sock *sk, int val);
785void ip_sock_set_pktinfo(struct sock *sk);
786void ip_sock_set_recverr(struct sock *sk);
787void ip_sock_set_tos(struct sock *sk, int val);
788
789#endif
790