1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116#define pr_fmt(fmt) "IPv4: " fmt
117
118#include <linux/module.h>
119#include <linux/types.h>
120#include <linux/kernel.h>
121#include <linux/string.h>
122#include <linux/errno.h>
123#include <linux/slab.h>
124
125#include <linux/net.h>
126#include <linux/socket.h>
127#include <linux/sockios.h>
128#include <linux/in.h>
129#include <linux/inet.h>
130#include <linux/inetdevice.h>
131#include <linux/netdevice.h>
132#include <linux/etherdevice.h>
133
134#include <net/snmp.h>
135#include <net/ip.h>
136#include <net/protocol.h>
137#include <net/route.h>
138#include <linux/skbuff.h>
139#include <net/sock.h>
140#include <net/arp.h>
141#include <net/icmp.h>
142#include <net/raw.h>
143#include <net/checksum.h>
144#include <net/inet_ecn.h>
145#include <linux/netfilter_ipv4.h>
146#include <net/xfrm.h>
147#include <linux/mroute.h>
148#include <linux/netlink.h>
149
150
151
152
153bool ip_call_ra_chain(struct sk_buff *skb)
154{
155 struct ip_ra_chain *ra;
156 u8 protocol = ip_hdr(skb)->protocol;
157 struct sock *last = NULL;
158 struct net_device *dev = skb->dev;
159
160 for (ra = rcu_dereference(ip_ra_chain); ra; ra = rcu_dereference(ra->next)) {
161 struct sock *sk = ra->sk;
162
163
164
165
166 if (sk && inet_sk(sk)->inet_num == protocol &&
167 (!sk->sk_bound_dev_if ||
168 sk->sk_bound_dev_if == dev->ifindex) &&
169 net_eq(sock_net(sk), dev_net(dev))) {
170 if (ip_is_fragment(ip_hdr(skb))) {
171 if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN))
172 return true;
173 }
174 if (last) {
175 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
176 if (skb2)
177 raw_rcv(last, skb2);
178 }
179 last = sk;
180 }
181 }
182
183 if (last) {
184 raw_rcv(last, skb);
185 return true;
186 }
187 return false;
188}
189
190static int ip_local_deliver_finish(struct sock *sk, struct sk_buff *skb)
191{
192 struct net *net = dev_net(skb->dev);
193
194 __skb_pull(skb, skb_network_header_len(skb));
195
196 rcu_read_lock();
197 {
198 int protocol = ip_hdr(skb)->protocol;
199 const struct net_protocol *ipprot;
200 int raw;
201
202 resubmit:
203 raw = raw_local_deliver(skb, protocol);
204
205 ipprot = rcu_dereference(inet_protos[protocol]);
206 if (ipprot) {
207 int ret;
208
209 if (!ipprot->no_policy) {
210 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
211 kfree_skb(skb);
212 goto out;
213 }
214 nf_reset(skb);
215 }
216 ret = ipprot->handler(skb);
217 if (ret < 0) {
218 protocol = -ret;
219 goto resubmit;
220 }
221 IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS);
222 } else {
223 if (!raw) {
224 if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
225 IP_INC_STATS_BH(net, IPSTATS_MIB_INUNKNOWNPROTOS);
226 icmp_send(skb, ICMP_DEST_UNREACH,
227 ICMP_PROT_UNREACH, 0);
228 }
229 kfree_skb(skb);
230 } else {
231 IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS);
232 consume_skb(skb);
233 }
234 }
235 }
236 out:
237 rcu_read_unlock();
238
239 return 0;
240}
241
242
243
244
245int ip_local_deliver(struct sk_buff *skb)
246{
247
248
249
250
251 if (ip_is_fragment(ip_hdr(skb))) {
252 if (ip_defrag(skb, IP_DEFRAG_LOCAL_DELIVER))
253 return 0;
254 }
255
256 return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN, NULL, skb,
257 skb->dev, NULL,
258 ip_local_deliver_finish);
259}
260
261static inline bool ip_rcv_options(struct sk_buff *skb)
262{
263 struct ip_options *opt;
264 const struct iphdr *iph;
265 struct net_device *dev = skb->dev;
266
267
268
269
270
271
272
273
274 if (skb_cow(skb, skb_headroom(skb))) {
275 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
276 goto drop;
277 }
278
279 iph = ip_hdr(skb);
280 opt = &(IPCB(skb)->opt);
281 opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
282
283 if (ip_options_compile(dev_net(dev), opt, skb)) {
284 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
285 goto drop;
286 }
287
288 if (unlikely(opt->srr)) {
289 struct in_device *in_dev = __in_dev_get_rcu(dev);
290
291 if (in_dev) {
292 if (!IN_DEV_SOURCE_ROUTE(in_dev)) {
293 if (IN_DEV_LOG_MARTIANS(in_dev))
294 net_info_ratelimited("source route option %pI4 -> %pI4\n",
295 &iph->saddr,
296 &iph->daddr);
297 goto drop;
298 }
299 }
300
301 if (ip_options_rcv_srr(skb))
302 goto drop;
303 }
304
305 return false;
306drop:
307 return true;
308}
309
310int sysctl_ip_early_demux __read_mostly = 1;
311EXPORT_SYMBOL(sysctl_ip_early_demux);
312
313static int ip_rcv_finish(struct sock *sk, struct sk_buff *skb)
314{
315 const struct iphdr *iph = ip_hdr(skb);
316 struct rtable *rt;
317
318 if (sysctl_ip_early_demux && !skb_dst(skb) && !skb->sk) {
319 const struct net_protocol *ipprot;
320 int protocol = iph->protocol;
321
322 ipprot = rcu_dereference(inet_protos[protocol]);
323 if (ipprot && ipprot->early_demux) {
324 ipprot->early_demux(skb);
325
326 iph = ip_hdr(skb);
327 }
328 }
329
330
331
332
333
334 if (!skb_dst(skb)) {
335 int err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
336 iph->tos, skb->dev);
337 if (unlikely(err)) {
338 if (err == -EXDEV)
339 NET_INC_STATS_BH(dev_net(skb->dev),
340 LINUX_MIB_IPRPFILTER);
341 goto drop;
342 }
343 }
344
345#ifdef CONFIG_IP_ROUTE_CLASSID
346 if (unlikely(skb_dst(skb)->tclassid)) {
347 struct ip_rt_acct *st = this_cpu_ptr(ip_rt_acct);
348 u32 idx = skb_dst(skb)->tclassid;
349 st[idx&0xFF].o_packets++;
350 st[idx&0xFF].o_bytes += skb->len;
351 st[(idx>>16)&0xFF].i_packets++;
352 st[(idx>>16)&0xFF].i_bytes += skb->len;
353 }
354#endif
355
356 if (iph->ihl > 5 && ip_rcv_options(skb))
357 goto drop;
358
359 rt = skb_rtable(skb);
360 if (rt->rt_type == RTN_MULTICAST) {
361 IP_UPD_PO_STATS_BH(dev_net(rt->dst.dev), IPSTATS_MIB_INMCAST,
362 skb->len);
363 } else if (rt->rt_type == RTN_BROADCAST)
364 IP_UPD_PO_STATS_BH(dev_net(rt->dst.dev), IPSTATS_MIB_INBCAST,
365 skb->len);
366
367 return dst_input(skb);
368
369drop:
370 kfree_skb(skb);
371 return NET_RX_DROP;
372}
373
374
375
376
377int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
378{
379 const struct iphdr *iph;
380 u32 len;
381
382
383
384
385 if (skb->pkt_type == PACKET_OTHERHOST)
386 goto drop;
387
388
389 IP_UPD_PO_STATS_BH(dev_net(dev), IPSTATS_MIB_IN, skb->len);
390
391 skb = skb_share_check(skb, GFP_ATOMIC);
392 if (!skb) {
393 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
394 goto out;
395 }
396
397 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
398 goto inhdr_error;
399
400 iph = ip_hdr(skb);
401
402
403
404
405
406
407
408
409
410
411
412
413 if (iph->ihl < 5 || iph->version != 4)
414 goto inhdr_error;
415
416 BUILD_BUG_ON(IPSTATS_MIB_ECT1PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_1);
417 BUILD_BUG_ON(IPSTATS_MIB_ECT0PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_0);
418 BUILD_BUG_ON(IPSTATS_MIB_CEPKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_CE);
419 IP_ADD_STATS_BH(dev_net(dev),
420 IPSTATS_MIB_NOECTPKTS + (iph->tos & INET_ECN_MASK),
421 max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
422
423 if (!pskb_may_pull(skb, iph->ihl*4))
424 goto inhdr_error;
425
426 iph = ip_hdr(skb);
427
428 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
429 goto csum_error;
430
431 len = ntohs(iph->tot_len);
432 if (skb->len < len) {
433 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INTRUNCATEDPKTS);
434 goto drop;
435 } else if (len < (iph->ihl*4))
436 goto inhdr_error;
437
438
439
440
441
442 if (pskb_trim_rcsum(skb, len)) {
443 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
444 goto drop;
445 }
446
447 skb->transport_header = skb->network_header + iph->ihl*4;
448
449
450 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
451
452
453 skb_orphan(skb);
454
455 return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, NULL, skb,
456 dev, NULL,
457 ip_rcv_finish);
458
459csum_error:
460 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_CSUMERRORS);
461inhdr_error:
462 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
463drop:
464 kfree_skb(skb);
465out:
466 return NET_RX_DROP;
467}
468