1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116#define pr_fmt(fmt) "IPv4: " fmt
117
118#include <linux/module.h>
119#include <linux/types.h>
120#include <linux/kernel.h>
121#include <linux/string.h>
122#include <linux/errno.h>
123#include <linux/slab.h>
124
125#include <linux/net.h>
126#include <linux/socket.h>
127#include <linux/sockios.h>
128#include <linux/in.h>
129#include <linux/inet.h>
130#include <linux/inetdevice.h>
131#include <linux/netdevice.h>
132#include <linux/etherdevice.h>
133
134#include <net/snmp.h>
135#include <net/ip.h>
136#include <net/protocol.h>
137#include <net/route.h>
138#include <linux/skbuff.h>
139#include <net/sock.h>
140#include <net/arp.h>
141#include <net/icmp.h>
142#include <net/raw.h>
143#include <net/checksum.h>
144#include <net/inet_ecn.h>
145#include <linux/netfilter_ipv4.h>
146#include <net/xfrm.h>
147#include <linux/mroute.h>
148#include <linux/netlink.h>
149
150
151
152
153bool ip_call_ra_chain(struct sk_buff *skb)
154{
155 struct ip_ra_chain *ra;
156 u8 protocol = ip_hdr(skb)->protocol;
157 struct sock *last = NULL;
158 struct net_device *dev = skb->dev;
159
160 for (ra = rcu_dereference(ip_ra_chain); ra; ra = rcu_dereference(ra->next)) {
161 struct sock *sk = ra->sk;
162
163
164
165
166 if (sk && inet_sk(sk)->inet_num == protocol &&
167 (!sk->sk_bound_dev_if ||
168 sk->sk_bound_dev_if == dev->ifindex) &&
169 net_eq(sock_net(sk), dev_net(dev))) {
170 if (ip_is_fragment(ip_hdr(skb))) {
171 if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN))
172 return true;
173 }
174 if (last) {
175 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
176 if (skb2)
177 raw_rcv(last, skb2);
178 }
179 last = sk;
180 }
181 }
182
183 if (last) {
184 raw_rcv(last, skb);
185 return true;
186 }
187 return false;
188}
189
190static int ip_local_deliver_finish(struct sk_buff *skb)
191{
192 struct net *net = dev_net(skb->dev);
193
194 __skb_pull(skb, skb_network_header_len(skb));
195
196 rcu_read_lock();
197 {
198 int protocol = ip_hdr(skb)->protocol;
199 const struct net_protocol *ipprot;
200 int raw;
201
202 resubmit:
203 raw = raw_local_deliver(skb, protocol);
204
205 ipprot = rcu_dereference(inet_protos[protocol]);
206 if (ipprot != NULL) {
207 int ret;
208
209 if (!ipprot->no_policy) {
210 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
211 kfree_skb(skb);
212 goto out;
213 }
214 nf_reset(skb);
215 }
216 ret = ipprot->handler(skb);
217 if (ret < 0) {
218 protocol = -ret;
219 goto resubmit;
220 }
221 IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS);
222 } else {
223 if (!raw) {
224 if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
225 IP_INC_STATS_BH(net, IPSTATS_MIB_INUNKNOWNPROTOS);
226 icmp_send(skb, ICMP_DEST_UNREACH,
227 ICMP_PROT_UNREACH, 0);
228 }
229 kfree_skb(skb);
230 } else {
231 IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS);
232 consume_skb(skb);
233 }
234 }
235 }
236 out:
237 rcu_read_unlock();
238
239 return 0;
240}
241
242
243
244
245int ip_local_deliver(struct sk_buff *skb)
246{
247
248
249
250
251 if (ip_is_fragment(ip_hdr(skb))) {
252 if (ip_defrag(skb, IP_DEFRAG_LOCAL_DELIVER))
253 return 0;
254 }
255
256 return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN, skb, skb->dev, NULL,
257 ip_local_deliver_finish);
258}
259
260static inline bool ip_rcv_options(struct sk_buff *skb)
261{
262 struct ip_options *opt;
263 const struct iphdr *iph;
264 struct net_device *dev = skb->dev;
265
266
267
268
269
270
271
272
273 if (skb_cow(skb, skb_headroom(skb))) {
274 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
275 goto drop;
276 }
277
278 iph = ip_hdr(skb);
279 opt = &(IPCB(skb)->opt);
280 opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
281
282 if (ip_options_compile(dev_net(dev), opt, skb)) {
283 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
284 goto drop;
285 }
286
287 if (unlikely(opt->srr)) {
288 struct in_device *in_dev = __in_dev_get_rcu(dev);
289
290 if (in_dev) {
291 if (!IN_DEV_SOURCE_ROUTE(in_dev)) {
292 if (IN_DEV_LOG_MARTIANS(in_dev))
293 net_info_ratelimited("source route option %pI4 -> %pI4\n",
294 &iph->saddr,
295 &iph->daddr);
296 goto drop;
297 }
298 }
299
300 if (ip_options_rcv_srr(skb))
301 goto drop;
302 }
303
304 return false;
305drop:
306 return true;
307}
308
309int sysctl_ip_early_demux __read_mostly = 1;
310EXPORT_SYMBOL(sysctl_ip_early_demux);
311
312static int ip_rcv_finish(struct sk_buff *skb)
313{
314 const struct iphdr *iph = ip_hdr(skb);
315 struct rtable *rt;
316
317 if (sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) {
318 const struct net_protocol *ipprot;
319 int protocol = iph->protocol;
320
321 ipprot = rcu_dereference(inet_protos[protocol]);
322 if (ipprot && ipprot->early_demux) {
323 ipprot->early_demux(skb);
324
325 iph = ip_hdr(skb);
326 }
327 }
328
329
330
331
332
333 if (!skb_dst(skb)) {
334 int err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
335 iph->tos, skb->dev);
336 if (unlikely(err)) {
337 if (err == -EXDEV)
338 NET_INC_STATS_BH(dev_net(skb->dev),
339 LINUX_MIB_IPRPFILTER);
340 goto drop;
341 }
342 }
343
344#ifdef CONFIG_IP_ROUTE_CLASSID
345 if (unlikely(skb_dst(skb)->tclassid)) {
346 struct ip_rt_acct *st = this_cpu_ptr(ip_rt_acct);
347 u32 idx = skb_dst(skb)->tclassid;
348 st[idx&0xFF].o_packets++;
349 st[idx&0xFF].o_bytes += skb->len;
350 st[(idx>>16)&0xFF].i_packets++;
351 st[(idx>>16)&0xFF].i_bytes += skb->len;
352 }
353#endif
354
355 if (iph->ihl > 5 && ip_rcv_options(skb))
356 goto drop;
357
358 rt = skb_rtable(skb);
359 if (rt->rt_type == RTN_MULTICAST) {
360 IP_UPD_PO_STATS_BH(dev_net(rt->dst.dev), IPSTATS_MIB_INMCAST,
361 skb->len);
362 } else if (rt->rt_type == RTN_BROADCAST)
363 IP_UPD_PO_STATS_BH(dev_net(rt->dst.dev), IPSTATS_MIB_INBCAST,
364 skb->len);
365
366 return dst_input(skb);
367
368drop:
369 kfree_skb(skb);
370 return NET_RX_DROP;
371}
372
373
374
375
376int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
377{
378 const struct iphdr *iph;
379 u32 len;
380
381
382
383
384 if (skb->pkt_type == PACKET_OTHERHOST)
385 goto drop;
386
387
388 IP_UPD_PO_STATS_BH(dev_net(dev), IPSTATS_MIB_IN, skb->len);
389
390 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
391 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
392 goto out;
393 }
394
395 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
396 goto inhdr_error;
397
398 iph = ip_hdr(skb);
399
400
401
402
403
404
405
406
407
408
409
410
411 if (iph->ihl < 5 || iph->version != 4)
412 goto inhdr_error;
413
414 BUILD_BUG_ON(IPSTATS_MIB_ECT1PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_1);
415 BUILD_BUG_ON(IPSTATS_MIB_ECT0PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_0);
416 BUILD_BUG_ON(IPSTATS_MIB_CEPKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_CE);
417 IP_ADD_STATS_BH(dev_net(dev),
418 IPSTATS_MIB_NOECTPKTS + (iph->tos & INET_ECN_MASK),
419 max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
420
421 if (!pskb_may_pull(skb, iph->ihl*4))
422 goto inhdr_error;
423
424 iph = ip_hdr(skb);
425
426 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
427 goto csum_error;
428
429 len = ntohs(iph->tot_len);
430 if (skb->len < len) {
431 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INTRUNCATEDPKTS);
432 goto drop;
433 } else if (len < (iph->ihl*4))
434 goto inhdr_error;
435
436
437
438
439
440 if (pskb_trim_rcsum(skb, len)) {
441 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
442 goto drop;
443 }
444
445 skb->transport_header = skb->network_header + iph->ihl*4;
446
447
448 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
449
450
451 skb_orphan(skb);
452
453 return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, dev, NULL,
454 ip_rcv_finish);
455
456csum_error:
457 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_CSUMERRORS);
458inhdr_error:
459 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
460drop:
461 kfree_skb(skb);
462out:
463 return NET_RX_DROP;
464}
465