1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46#include <linux/uaccess.h>
47#include <linux/module.h>
48#include <linux/types.h>
49#include <linux/kernel.h>
50#include <linux/mm.h>
51#include <linux/string.h>
52#include <linux/errno.h>
53#include <linux/highmem.h>
54#include <linux/slab.h>
55
56#include <linux/socket.h>
57#include <linux/sockios.h>
58#include <linux/in.h>
59#include <linux/inet.h>
60#include <linux/netdevice.h>
61#include <linux/etherdevice.h>
62#include <linux/proc_fs.h>
63#include <linux/stat.h>
64#include <linux/init.h>
65
66#include <net/snmp.h>
67#include <net/ip.h>
68#include <net/protocol.h>
69#include <net/route.h>
70#include <net/xfrm.h>
71#include <linux/skbuff.h>
72#include <net/sock.h>
73#include <net/arp.h>
74#include <net/icmp.h>
75#include <net/checksum.h>
76#include <net/inetpeer.h>
77#include <net/lwtunnel.h>
78#include <linux/bpf-cgroup.h>
79#include <linux/igmp.h>
80#include <linux/netfilter_ipv4.h>
81#include <linux/netfilter_bridge.h>
82#include <linux/netlink.h>
83#include <linux/tcp.h>
84
85static int
86ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
87 unsigned int mtu,
88 int (*output)(struct net *, struct sock *, struct sk_buff *));
89
90
91void ip_send_check(struct iphdr *iph)
92{
93 iph->check = 0;
94 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
95}
96EXPORT_SYMBOL(ip_send_check);
97
98int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
99{
100 struct iphdr *iph = ip_hdr(skb);
101
102 iph->tot_len = htons(skb->len);
103 ip_send_check(iph);
104
105
106
107
108 skb = l3mdev_ip_out(sk, skb);
109 if (unlikely(!skb))
110 return 0;
111
112 skb->protocol = htons(ETH_P_IP);
113
114 return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
115 net, sk, skb, NULL, skb_dst(skb)->dev,
116 dst_output);
117}
118
119int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
120{
121 int err;
122
123 err = __ip_local_out(net, sk, skb);
124 if (likely(err == 1))
125 err = dst_output(net, sk, skb);
126
127 return err;
128}
129EXPORT_SYMBOL_GPL(ip_local_out);
130
131static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
132{
133 int ttl = inet->uc_ttl;
134
135 if (ttl < 0)
136 ttl = ip4_dst_hoplimit(dst);
137 return ttl;
138}
139
140
141
142
143
144int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
145 __be32 saddr, __be32 daddr, struct ip_options_rcu *opt)
146{
147 struct inet_sock *inet = inet_sk(sk);
148 struct rtable *rt = skb_rtable(skb);
149 struct net *net = sock_net(sk);
150 struct iphdr *iph;
151
152
153 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0));
154 skb_reset_network_header(skb);
155 iph = ip_hdr(skb);
156 iph->version = 4;
157 iph->ihl = 5;
158 iph->tos = inet->tos;
159 iph->ttl = ip_select_ttl(inet, &rt->dst);
160 iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
161 iph->saddr = saddr;
162 iph->protocol = sk->sk_protocol;
163 if (ip_dont_fragment(sk, &rt->dst)) {
164 iph->frag_off = htons(IP_DF);
165 iph->id = 0;
166 } else {
167 iph->frag_off = 0;
168 __ip_select_ident(net, iph, 1);
169 }
170
171 if (opt && opt->opt.optlen) {
172 iph->ihl += opt->opt.optlen>>2;
173 ip_options_build(skb, &opt->opt, daddr, rt, 0);
174 }
175
176 skb->priority = sk->sk_priority;
177 if (!skb->mark)
178 skb->mark = sk->sk_mark;
179
180
181 return ip_local_out(net, skb->sk, skb);
182}
183EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
184
185static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
186{
187 struct dst_entry *dst = skb_dst(skb);
188 struct rtable *rt = (struct rtable *)dst;
189 struct net_device *dev = dst->dev;
190 unsigned int hh_len = LL_RESERVED_SPACE(dev);
191 struct neighbour *neigh;
192 bool is_v6gw = false;
193
194 if (rt->rt_type == RTN_MULTICAST) {
195 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTMCAST, skb->len);
196 } else if (rt->rt_type == RTN_BROADCAST)
197 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTBCAST, skb->len);
198
199
200 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
201 struct sk_buff *skb2;
202
203 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
204 if (!skb2) {
205 kfree_skb(skb);
206 return -ENOMEM;
207 }
208 if (skb->sk)
209 skb_set_owner_w(skb2, skb->sk);
210 consume_skb(skb);
211 skb = skb2;
212 }
213
214 if (lwtunnel_xmit_redirect(dst->lwtstate)) {
215 int res = lwtunnel_xmit(skb);
216
217 if (res < 0 || res == LWTUNNEL_XMIT_DONE)
218 return res;
219 }
220
221 rcu_read_lock_bh();
222 neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
223 if (!IS_ERR(neigh)) {
224 int res;
225
226 sock_confirm_neigh(skb, neigh);
227
228 res = neigh_output(neigh, skb, is_v6gw);
229 rcu_read_unlock_bh();
230 return res;
231 }
232 rcu_read_unlock_bh();
233
234 net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
235 __func__);
236 kfree_skb(skb);
237 return -EINVAL;
238}
239
240static int ip_finish_output_gso(struct net *net, struct sock *sk,
241 struct sk_buff *skb, unsigned int mtu)
242{
243 netdev_features_t features;
244 struct sk_buff *segs;
245 int ret = 0;
246
247
248
249 if (skb_gso_validate_network_len(skb, mtu))
250 return ip_finish_output2(net, sk, skb);
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265 features = netif_skb_features(skb);
266 BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET);
267 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
268 if (IS_ERR_OR_NULL(segs)) {
269 kfree_skb(skb);
270 return -ENOMEM;
271 }
272
273 consume_skb(skb);
274
275 do {
276 struct sk_buff *nskb = segs->next;
277 int err;
278
279 skb_mark_not_on_list(segs);
280 err = ip_fragment(net, sk, segs, mtu, ip_finish_output2);
281
282 if (err && ret == 0)
283 ret = err;
284 segs = nskb;
285 } while (segs);
286
287 return ret;
288}
289
290static int __ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
291{
292 unsigned int mtu;
293
294#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
295
296 if (skb_dst(skb)->xfrm) {
297 IPCB(skb)->flags |= IPSKB_REROUTED;
298 return dst_output(net, sk, skb);
299 }
300#endif
301 mtu = ip_skb_dst_mtu(sk, skb);
302 if (skb_is_gso(skb))
303 return ip_finish_output_gso(net, sk, skb, mtu);
304
305 if (skb->len > mtu || (IPCB(skb)->flags & IPSKB_FRAG_PMTU))
306 return ip_fragment(net, sk, skb, mtu, ip_finish_output2);
307
308 return ip_finish_output2(net, sk, skb);
309}
310
311static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
312{
313 int ret;
314
315 ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
316 switch (ret) {
317 case NET_XMIT_SUCCESS:
318 return __ip_finish_output(net, sk, skb);
319 case NET_XMIT_CN:
320 return __ip_finish_output(net, sk, skb) ? : ret;
321 default:
322 kfree_skb(skb);
323 return ret;
324 }
325}
326
327static int ip_mc_finish_output(struct net *net, struct sock *sk,
328 struct sk_buff *skb)
329{
330 struct rtable *new_rt;
331 bool do_cn = false;
332 int ret, err;
333
334 ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
335 switch (ret) {
336 case NET_XMIT_CN:
337 do_cn = true;
338
339 case NET_XMIT_SUCCESS:
340 break;
341 default:
342 kfree_skb(skb);
343 return ret;
344 }
345
346
347
348
349
350 new_rt = rt_dst_clone(net->loopback_dev, skb_rtable(skb));
351 if (new_rt) {
352 new_rt->rt_iif = 0;
353 skb_dst_drop(skb);
354 skb_dst_set(skb, &new_rt->dst);
355 }
356
357 err = dev_loopback_xmit(net, sk, skb);
358 return (do_cn && err) ? ret : err;
359}
360
361int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
362{
363 struct rtable *rt = skb_rtable(skb);
364 struct net_device *dev = rt->dst.dev;
365
366
367
368
369 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
370
371 skb->dev = dev;
372 skb->protocol = htons(ETH_P_IP);
373
374
375
376
377
378 if (rt->rt_flags&RTCF_MULTICAST) {
379 if (sk_mc_loop(sk)
380#ifdef CONFIG_IP_MROUTE
381
382
383
384
385
386
387
388
389 &&
390 ((rt->rt_flags & RTCF_LOCAL) ||
391 !(IPCB(skb)->flags & IPSKB_FORWARDED))
392#endif
393 ) {
394 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
395 if (newskb)
396 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
397 net, sk, newskb, NULL, newskb->dev,
398 ip_mc_finish_output);
399 }
400
401
402
403 if (ip_hdr(skb)->ttl == 0) {
404 kfree_skb(skb);
405 return 0;
406 }
407 }
408
409 if (rt->rt_flags&RTCF_BROADCAST) {
410 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
411 if (newskb)
412 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
413 net, sk, newskb, NULL, newskb->dev,
414 ip_mc_finish_output);
415 }
416
417 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
418 net, sk, skb, NULL, skb->dev,
419 ip_finish_output,
420 !(IPCB(skb)->flags & IPSKB_REROUTED));
421}
422
423int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb)
424{
425 struct net_device *dev = skb_dst(skb)->dev;
426
427 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
428
429 skb->dev = dev;
430 skb->protocol = htons(ETH_P_IP);
431
432 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
433 net, sk, skb, NULL, dev,
434 ip_finish_output,
435 !(IPCB(skb)->flags & IPSKB_REROUTED));
436}
437
438
439
440
441
442
443
444static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4)
445{
446 BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) !=
447 offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr));
448 memcpy(&iph->saddr, &fl4->saddr,
449 sizeof(fl4->saddr) + sizeof(fl4->daddr));
450}
451
452
453int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
454 __u8 tos)
455{
456 struct inet_sock *inet = inet_sk(sk);
457 struct net *net = sock_net(sk);
458 struct ip_options_rcu *inet_opt;
459 struct flowi4 *fl4;
460 struct rtable *rt;
461 struct iphdr *iph;
462 int res;
463
464
465
466
467 rcu_read_lock();
468 inet_opt = rcu_dereference(inet->inet_opt);
469 fl4 = &fl->u.ip4;
470 rt = skb_rtable(skb);
471 if (rt)
472 goto packet_routed;
473
474
475 rt = (struct rtable *)__sk_dst_check(sk, 0);
476 if (!rt) {
477 __be32 daddr;
478
479
480 daddr = inet->inet_daddr;
481 if (inet_opt && inet_opt->opt.srr)
482 daddr = inet_opt->opt.faddr;
483
484
485
486
487
488 rt = ip_route_output_ports(net, fl4, sk,
489 daddr, inet->inet_saddr,
490 inet->inet_dport,
491 inet->inet_sport,
492 sk->sk_protocol,
493 RT_CONN_FLAGS_TOS(sk, tos),
494 sk->sk_bound_dev_if);
495 if (IS_ERR(rt))
496 goto no_route;
497 sk_setup_caps(sk, &rt->dst);
498 }
499 skb_dst_set_noref(skb, &rt->dst);
500
501packet_routed:
502 if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_gw_family)
503 goto no_route;
504
505
506 skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0));
507 skb_reset_network_header(skb);
508 iph = ip_hdr(skb);
509 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (tos & 0xff));
510 if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df)
511 iph->frag_off = htons(IP_DF);
512 else
513 iph->frag_off = 0;
514 iph->ttl = ip_select_ttl(inet, &rt->dst);
515 iph->protocol = sk->sk_protocol;
516 ip_copy_addrs(iph, fl4);
517
518
519
520 if (inet_opt && inet_opt->opt.optlen) {
521 iph->ihl += inet_opt->opt.optlen >> 2;
522 ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
523 }
524
525 ip_select_ident_segs(net, skb, sk,
526 skb_shinfo(skb)->gso_segs ?: 1);
527
528
529 skb->priority = sk->sk_priority;
530 skb->mark = sk->sk_mark;
531
532 res = ip_local_out(net, sk, skb);
533 rcu_read_unlock();
534 return res;
535
536no_route:
537 rcu_read_unlock();
538 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
539 kfree_skb(skb);
540 return -EHOSTUNREACH;
541}
542EXPORT_SYMBOL(__ip_queue_xmit);
543
544static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
545{
546 to->pkt_type = from->pkt_type;
547 to->priority = from->priority;
548 to->protocol = from->protocol;
549 to->skb_iif = from->skb_iif;
550 skb_dst_drop(to);
551 skb_dst_copy(to, from);
552 to->dev = from->dev;
553 to->mark = from->mark;
554
555 skb_copy_hash(to, from);
556
557#ifdef CONFIG_NET_SCHED
558 to->tc_index = from->tc_index;
559#endif
560 nf_copy(to, from);
561 skb_ext_copy(to, from);
562#if IS_ENABLED(CONFIG_IP_VS)
563 to->ipvs_property = from->ipvs_property;
564#endif
565 skb_copy_secmark(to, from);
566}
567
568static int ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
569 unsigned int mtu,
570 int (*output)(struct net *, struct sock *, struct sk_buff *))
571{
572 struct iphdr *iph = ip_hdr(skb);
573
574 if ((iph->frag_off & htons(IP_DF)) == 0)
575 return ip_do_fragment(net, sk, skb, output);
576
577 if (unlikely(!skb->ignore_df ||
578 (IPCB(skb)->frag_max_size &&
579 IPCB(skb)->frag_max_size > mtu))) {
580 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
581 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
582 htonl(mtu));
583 kfree_skb(skb);
584 return -EMSGSIZE;
585 }
586
587 return ip_do_fragment(net, sk, skb, output);
588}
589
590void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph,
591 unsigned int hlen, struct ip_fraglist_iter *iter)
592{
593 unsigned int first_len = skb_pagelen(skb);
594
595 iter->frag = skb_shinfo(skb)->frag_list;
596 skb_frag_list_init(skb);
597
598 iter->offset = 0;
599 iter->iph = iph;
600 iter->hlen = hlen;
601
602 skb->data_len = first_len - skb_headlen(skb);
603 skb->len = first_len;
604 iph->tot_len = htons(first_len);
605 iph->frag_off = htons(IP_MF);
606 ip_send_check(iph);
607}
608EXPORT_SYMBOL(ip_fraglist_init);
609
610static void ip_fraglist_ipcb_prepare(struct sk_buff *skb,
611 struct ip_fraglist_iter *iter)
612{
613 struct sk_buff *to = iter->frag;
614
615
616 IPCB(to)->flags = IPCB(skb)->flags;
617
618 if (iter->offset == 0)
619 ip_options_fragment(to);
620}
621
622void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter)
623{
624 unsigned int hlen = iter->hlen;
625 struct iphdr *iph = iter->iph;
626 struct sk_buff *frag;
627
628 frag = iter->frag;
629 frag->ip_summed = CHECKSUM_NONE;
630 skb_reset_transport_header(frag);
631 __skb_push(frag, hlen);
632 skb_reset_network_header(frag);
633 memcpy(skb_network_header(frag), iph, hlen);
634 iter->iph = ip_hdr(frag);
635 iph = iter->iph;
636 iph->tot_len = htons(frag->len);
637 ip_copy_metadata(frag, skb);
638 iter->offset += skb->len - hlen;
639 iph->frag_off = htons(iter->offset >> 3);
640 if (frag->next)
641 iph->frag_off |= htons(IP_MF);
642
643 ip_send_check(iph);
644}
645EXPORT_SYMBOL(ip_fraglist_prepare);
646
647void ip_frag_init(struct sk_buff *skb, unsigned int hlen,
648 unsigned int ll_rs, unsigned int mtu,
649 struct ip_frag_state *state)
650{
651 struct iphdr *iph = ip_hdr(skb);
652
653 state->hlen = hlen;
654 state->ll_rs = ll_rs;
655 state->mtu = mtu;
656
657 state->left = skb->len - hlen;
658 state->ptr = hlen;
659
660 state->offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
661 state->not_last_frag = iph->frag_off & htons(IP_MF);
662}
663EXPORT_SYMBOL(ip_frag_init);
664
665static void ip_frag_ipcb(struct sk_buff *from, struct sk_buff *to,
666 bool first_frag, struct ip_frag_state *state)
667{
668
669 IPCB(to)->flags = IPCB(from)->flags;
670
671 if (IPCB(from)->flags & IPSKB_FRAG_PMTU)
672 state->iph->frag_off |= htons(IP_DF);
673
674
675
676
677
678
679
680 if (first_frag)
681 ip_options_fragment(from);
682}
683
684struct sk_buff *ip_frag_next(struct sk_buff *skb, struct ip_frag_state *state)
685{
686 unsigned int len = state->left;
687 struct sk_buff *skb2;
688 struct iphdr *iph;
689
690 len = state->left;
691
692 if (len > state->mtu)
693 len = state->mtu;
694
695
696 if (len < state->left) {
697 len &= ~7;
698 }
699
700
701 skb2 = alloc_skb(len + state->hlen + state->ll_rs, GFP_ATOMIC);
702 if (!skb2)
703 return ERR_PTR(-ENOMEM);
704
705
706
707
708
709 ip_copy_metadata(skb2, skb);
710 skb_reserve(skb2, state->ll_rs);
711 skb_put(skb2, len + state->hlen);
712 skb_reset_network_header(skb2);
713 skb2->transport_header = skb2->network_header + state->hlen;
714
715
716
717
718
719
720 if (skb->sk)
721 skb_set_owner_w(skb2, skb->sk);
722
723
724
725
726
727 skb_copy_from_linear_data(skb, skb_network_header(skb2), state->hlen);
728
729
730
731
732 if (skb_copy_bits(skb, state->ptr, skb_transport_header(skb2), len))
733 BUG();
734 state->left -= len;
735
736
737
738
739 iph = ip_hdr(skb2);
740 iph->frag_off = htons((state->offset >> 3));
741
742
743
744
745
746 if (state->left > 0 || state->not_last_frag)
747 iph->frag_off |= htons(IP_MF);
748 state->ptr += len;
749 state->offset += len;
750
751 iph->tot_len = htons(len + state->hlen);
752
753 ip_send_check(iph);
754
755 return skb2;
756}
757EXPORT_SYMBOL(ip_frag_next);
758
759
760
761
762
763
764
765
766int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
767 int (*output)(struct net *, struct sock *, struct sk_buff *))
768{
769 struct iphdr *iph;
770 struct sk_buff *skb2;
771 struct rtable *rt = skb_rtable(skb);
772 unsigned int mtu, hlen, ll_rs;
773 struct ip_fraglist_iter iter;
774 struct ip_frag_state state;
775 int err = 0;
776
777
778 if (skb->ip_summed == CHECKSUM_PARTIAL &&
779 (err = skb_checksum_help(skb)))
780 goto fail;
781
782
783
784
785
786 iph = ip_hdr(skb);
787
788 mtu = ip_skb_dst_mtu(sk, skb);
789 if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu)
790 mtu = IPCB(skb)->frag_max_size;
791
792
793
794
795
796 hlen = iph->ihl * 4;
797 mtu = mtu - hlen;
798 IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
799 ll_rs = LL_RESERVED_SPACE(rt->dst.dev);
800
801
802
803
804
805
806
807
808 if (skb_has_frag_list(skb)) {
809 struct sk_buff *frag, *frag2;
810 unsigned int first_len = skb_pagelen(skb);
811
812 if (first_len - hlen > mtu ||
813 ((first_len - hlen) & 7) ||
814 ip_is_fragment(iph) ||
815 skb_cloned(skb) ||
816 skb_headroom(skb) < ll_rs)
817 goto slow_path;
818
819 skb_walk_frags(skb, frag) {
820
821 if (frag->len > mtu ||
822 ((frag->len & 7) && frag->next) ||
823 skb_headroom(frag) < hlen + ll_rs)
824 goto slow_path_clean;
825
826
827 if (skb_shared(frag))
828 goto slow_path_clean;
829
830 BUG_ON(frag->sk);
831 if (skb->sk) {
832 frag->sk = skb->sk;
833 frag->destructor = sock_wfree;
834 }
835 skb->truesize -= frag->truesize;
836 }
837
838
839 ip_fraglist_init(skb, iph, hlen, &iter);
840
841 for (;;) {
842
843
844 if (iter.frag) {
845 ip_fraglist_ipcb_prepare(skb, &iter);
846 ip_fraglist_prepare(skb, &iter);
847 }
848
849 err = output(net, sk, skb);
850
851 if (!err)
852 IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
853 if (err || !iter.frag)
854 break;
855
856 skb = ip_fraglist_next(&iter);
857 }
858
859 if (err == 0) {
860 IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
861 return 0;
862 }
863
864 kfree_skb_list(iter.frag);
865
866 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
867 return err;
868
869slow_path_clean:
870 skb_walk_frags(skb, frag2) {
871 if (frag2 == frag)
872 break;
873 frag2->sk = NULL;
874 frag2->destructor = NULL;
875 skb->truesize += frag2->truesize;
876 }
877 }
878
879slow_path:
880
881
882
883
884 ip_frag_init(skb, hlen, ll_rs, mtu, &state);
885
886
887
888
889
890 while (state.left > 0) {
891 bool first_frag = (state.offset == 0);
892
893 skb2 = ip_frag_next(skb, &state);
894 if (IS_ERR(skb2)) {
895 err = PTR_ERR(skb2);
896 goto fail;
897 }
898 ip_frag_ipcb(skb, skb2, first_frag, &state);
899
900
901
902
903 err = output(net, sk, skb2);
904 if (err)
905 goto fail;
906
907 IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
908 }
909 consume_skb(skb);
910 IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
911 return err;
912
913fail:
914 kfree_skb(skb);
915 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
916 return err;
917}
918EXPORT_SYMBOL(ip_do_fragment);
919
920int
921ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
922{
923 struct msghdr *msg = from;
924
925 if (skb->ip_summed == CHECKSUM_PARTIAL) {
926 if (!copy_from_iter_full(to, len, &msg->msg_iter))
927 return -EFAULT;
928 } else {
929 __wsum csum = 0;
930 if (!csum_and_copy_from_iter_full(to, len, &csum, &msg->msg_iter))
931 return -EFAULT;
932 skb->csum = csum_block_add(skb->csum, csum, odd);
933 }
934 return 0;
935}
936EXPORT_SYMBOL(ip_generic_getfrag);
937
938static inline __wsum
939csum_page(struct page *page, int offset, int copy)
940{
941 char *kaddr;
942 __wsum csum;
943 kaddr = kmap(page);
944 csum = csum_partial(kaddr + offset, copy, 0);
945 kunmap(page);
946 return csum;
947}
948
949static int __ip_append_data(struct sock *sk,
950 struct flowi4 *fl4,
951 struct sk_buff_head *queue,
952 struct inet_cork *cork,
953 struct page_frag *pfrag,
954 int getfrag(void *from, char *to, int offset,
955 int len, int odd, struct sk_buff *skb),
956 void *from, int length, int transhdrlen,
957 unsigned int flags)
958{
959 struct inet_sock *inet = inet_sk(sk);
960 struct ubuf_info *uarg = NULL;
961 struct sk_buff *skb;
962
963 struct ip_options *opt = cork->opt;
964 int hh_len;
965 int exthdrlen;
966 int mtu;
967 int copy;
968 int err;
969 int offset = 0;
970 unsigned int maxfraglen, fragheaderlen, maxnonfragsize;
971 int csummode = CHECKSUM_NONE;
972 struct rtable *rt = (struct rtable *)cork->dst;
973 unsigned int wmem_alloc_delta = 0;
974 bool paged, extra_uref = false;
975 u32 tskey = 0;
976
977 skb = skb_peek_tail(queue);
978
979 exthdrlen = !skb ? rt->dst.header_len : 0;
980 mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize;
981 paged = !!cork->gso_size;
982
983 if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP &&
984 sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
985 tskey = sk->sk_tskey++;
986
987 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
988
989 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
990 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
991 maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu;
992
993 if (cork->length + length > maxnonfragsize - fragheaderlen) {
994 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
995 mtu - (opt ? opt->optlen : 0));
996 return -EMSGSIZE;
997 }
998
999
1000
1001
1002
1003 if (transhdrlen &&
1004 length + fragheaderlen <= mtu &&
1005 rt->dst.dev->features & (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM) &&
1006 (!(flags & MSG_MORE) || cork->gso_size) &&
1007 (!exthdrlen || (rt->dst.dev->features & NETIF_F_HW_ESP_TX_CSUM)))
1008 csummode = CHECKSUM_PARTIAL;
1009
1010 if (flags & MSG_ZEROCOPY && length && sock_flag(sk, SOCK_ZEROCOPY)) {
1011 uarg = sock_zerocopy_realloc(sk, length, skb_zcopy(skb));
1012 if (!uarg)
1013 return -ENOBUFS;
1014 extra_uref = !skb_zcopy(skb);
1015 if (rt->dst.dev->features & NETIF_F_SG &&
1016 csummode == CHECKSUM_PARTIAL) {
1017 paged = true;
1018 } else {
1019 uarg->zerocopy = 0;
1020 skb_zcopy_set(skb, uarg, &extra_uref);
1021 }
1022 }
1023
1024 cork->length += length;
1025
1026
1027
1028
1029
1030
1031
1032
1033 if (!skb)
1034 goto alloc_new_skb;
1035
1036 while (length > 0) {
1037
1038 copy = mtu - skb->len;
1039 if (copy < length)
1040 copy = maxfraglen - skb->len;
1041 if (copy <= 0) {
1042 char *data;
1043 unsigned int datalen;
1044 unsigned int fraglen;
1045 unsigned int fraggap;
1046 unsigned int alloclen;
1047 unsigned int pagedlen;
1048 struct sk_buff *skb_prev;
1049alloc_new_skb:
1050 skb_prev = skb;
1051 if (skb_prev)
1052 fraggap = skb_prev->len - maxfraglen;
1053 else
1054 fraggap = 0;
1055
1056
1057
1058
1059
1060 datalen = length + fraggap;
1061 if (datalen > mtu - fragheaderlen)
1062 datalen = maxfraglen - fragheaderlen;
1063 fraglen = datalen + fragheaderlen;
1064 pagedlen = 0;
1065
1066 if ((flags & MSG_MORE) &&
1067 !(rt->dst.dev->features&NETIF_F_SG))
1068 alloclen = mtu;
1069 else if (!paged)
1070 alloclen = fraglen;
1071 else {
1072 alloclen = min_t(int, fraglen, MAX_HEADER);
1073 pagedlen = fraglen - alloclen;
1074 }
1075
1076 alloclen += exthdrlen;
1077
1078
1079
1080
1081
1082
1083 if (datalen == length + fraggap)
1084 alloclen += rt->dst.trailer_len;
1085
1086 if (transhdrlen) {
1087 skb = sock_alloc_send_skb(sk,
1088 alloclen + hh_len + 15,
1089 (flags & MSG_DONTWAIT), &err);
1090 } else {
1091 skb = NULL;
1092 if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
1093 2 * sk->sk_sndbuf)
1094 skb = alloc_skb(alloclen + hh_len + 15,
1095 sk->sk_allocation);
1096 if (unlikely(!skb))
1097 err = -ENOBUFS;
1098 }
1099 if (!skb)
1100 goto error;
1101
1102
1103
1104
1105 skb->ip_summed = csummode;
1106 skb->csum = 0;
1107 skb_reserve(skb, hh_len);
1108
1109
1110
1111
1112 data = skb_put(skb, fraglen + exthdrlen - pagedlen);
1113 skb_set_network_header(skb, exthdrlen);
1114 skb->transport_header = (skb->network_header +
1115 fragheaderlen);
1116 data += fragheaderlen + exthdrlen;
1117
1118 if (fraggap) {
1119 skb->csum = skb_copy_and_csum_bits(
1120 skb_prev, maxfraglen,
1121 data + transhdrlen, fraggap, 0);
1122 skb_prev->csum = csum_sub(skb_prev->csum,
1123 skb->csum);
1124 data += fraggap;
1125 pskb_trim_unique(skb_prev, maxfraglen);
1126 }
1127
1128 copy = datalen - transhdrlen - fraggap - pagedlen;
1129 if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1130 err = -EFAULT;
1131 kfree_skb(skb);
1132 goto error;
1133 }
1134
1135 offset += copy;
1136 length -= copy + transhdrlen;
1137 transhdrlen = 0;
1138 exthdrlen = 0;
1139 csummode = CHECKSUM_NONE;
1140
1141
1142 skb_shinfo(skb)->tx_flags = cork->tx_flags;
1143 cork->tx_flags = 0;
1144 skb_shinfo(skb)->tskey = tskey;
1145 tskey = 0;
1146 skb_zcopy_set(skb, uarg, &extra_uref);
1147
1148 if ((flags & MSG_CONFIRM) && !skb_prev)
1149 skb_set_dst_pending_confirm(skb, 1);
1150
1151
1152
1153
1154 if (!skb->destructor) {
1155 skb->destructor = sock_wfree;
1156 skb->sk = sk;
1157 wmem_alloc_delta += skb->truesize;
1158 }
1159 __skb_queue_tail(queue, skb);
1160 continue;
1161 }
1162
1163 if (copy > length)
1164 copy = length;
1165
1166 if (!(rt->dst.dev->features&NETIF_F_SG) &&
1167 skb_tailroom(skb) >= copy) {
1168 unsigned int off;
1169
1170 off = skb->len;
1171 if (getfrag(from, skb_put(skb, copy),
1172 offset, copy, off, skb) < 0) {
1173 __skb_trim(skb, off);
1174 err = -EFAULT;
1175 goto error;
1176 }
1177 } else if (!uarg || !uarg->zerocopy) {
1178 int i = skb_shinfo(skb)->nr_frags;
1179
1180 err = -ENOMEM;
1181 if (!sk_page_frag_refill(sk, pfrag))
1182 goto error;
1183
1184 if (!skb_can_coalesce(skb, i, pfrag->page,
1185 pfrag->offset)) {
1186 err = -EMSGSIZE;
1187 if (i == MAX_SKB_FRAGS)
1188 goto error;
1189
1190 __skb_fill_page_desc(skb, i, pfrag->page,
1191 pfrag->offset, 0);
1192 skb_shinfo(skb)->nr_frags = ++i;
1193 get_page(pfrag->page);
1194 }
1195 copy = min_t(int, copy, pfrag->size - pfrag->offset);
1196 if (getfrag(from,
1197 page_address(pfrag->page) + pfrag->offset,
1198 offset, copy, skb->len, skb) < 0)
1199 goto error_efault;
1200
1201 pfrag->offset += copy;
1202 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1203 skb->len += copy;
1204 skb->data_len += copy;
1205 skb->truesize += copy;
1206 wmem_alloc_delta += copy;
1207 } else {
1208 err = skb_zerocopy_iter_dgram(skb, from, copy);
1209 if (err < 0)
1210 goto error;
1211 }
1212 offset += copy;
1213 length -= copy;
1214 }
1215
1216 if (wmem_alloc_delta)
1217 refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
1218 return 0;
1219
1220error_efault:
1221 err = -EFAULT;
1222error:
1223 if (uarg)
1224 sock_zerocopy_put_abort(uarg, extra_uref);
1225 cork->length -= length;
1226 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1227 refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
1228 return err;
1229}
1230
1231static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1232 struct ipcm_cookie *ipc, struct rtable **rtp)
1233{
1234 struct ip_options_rcu *opt;
1235 struct rtable *rt;
1236
1237 rt = *rtp;
1238 if (unlikely(!rt))
1239 return -EFAULT;
1240
1241
1242
1243
1244 opt = ipc->opt;
1245 if (opt) {
1246 if (!cork->opt) {
1247 cork->opt = kmalloc(sizeof(struct ip_options) + 40,
1248 sk->sk_allocation);
1249 if (unlikely(!cork->opt))
1250 return -ENOBUFS;
1251 }
1252 memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
1253 cork->flags |= IPCORK_OPT;
1254 cork->addr = ipc->addr;
1255 }
1256
1257
1258
1259
1260 *rtp = NULL;
1261 cork->fragsize = ip_sk_use_pmtu(sk) ?
1262 dst_mtu(&rt->dst) : rt->dst.dev->mtu;
1263
1264 cork->gso_size = ipc->gso_size;
1265 cork->dst = &rt->dst;
1266 cork->length = 0;
1267 cork->ttl = ipc->ttl;
1268 cork->tos = ipc->tos;
1269 cork->priority = ipc->priority;
1270 cork->transmit_time = ipc->sockc.transmit_time;
1271 cork->tx_flags = 0;
1272 sock_tx_timestamp(sk, ipc->sockc.tsflags, &cork->tx_flags);
1273
1274 return 0;
1275}
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288int ip_append_data(struct sock *sk, struct flowi4 *fl4,
1289 int getfrag(void *from, char *to, int offset, int len,
1290 int odd, struct sk_buff *skb),
1291 void *from, int length, int transhdrlen,
1292 struct ipcm_cookie *ipc, struct rtable **rtp,
1293 unsigned int flags)
1294{
1295 struct inet_sock *inet = inet_sk(sk);
1296 int err;
1297
1298 if (flags&MSG_PROBE)
1299 return 0;
1300
1301 if (skb_queue_empty(&sk->sk_write_queue)) {
1302 err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp);
1303 if (err)
1304 return err;
1305 } else {
1306 transhdrlen = 0;
1307 }
1308
1309 return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base,
1310 sk_page_frag(sk), getfrag,
1311 from, length, transhdrlen, flags);
1312}
1313
1314ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
1315 int offset, size_t size, int flags)
1316{
1317 struct inet_sock *inet = inet_sk(sk);
1318 struct sk_buff *skb;
1319 struct rtable *rt;
1320 struct ip_options *opt = NULL;
1321 struct inet_cork *cork;
1322 int hh_len;
1323 int mtu;
1324 int len;
1325 int err;
1326 unsigned int maxfraglen, fragheaderlen, fraggap, maxnonfragsize;
1327
1328 if (inet->hdrincl)
1329 return -EPERM;
1330
1331 if (flags&MSG_PROBE)
1332 return 0;
1333
1334 if (skb_queue_empty(&sk->sk_write_queue))
1335 return -EINVAL;
1336
1337 cork = &inet->cork.base;
1338 rt = (struct rtable *)cork->dst;
1339 if (cork->flags & IPCORK_OPT)
1340 opt = cork->opt;
1341
1342 if (!(rt->dst.dev->features&NETIF_F_SG))
1343 return -EOPNOTSUPP;
1344
1345 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1346 mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize;
1347
1348 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1349 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1350 maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu;
1351
1352 if (cork->length + size > maxnonfragsize - fragheaderlen) {
1353 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
1354 mtu - (opt ? opt->optlen : 0));
1355 return -EMSGSIZE;
1356 }
1357
1358 skb = skb_peek_tail(&sk->sk_write_queue);
1359 if (!skb)
1360 return -EINVAL;
1361
1362 cork->length += size;
1363
1364 while (size > 0) {
1365
1366 len = mtu - skb->len;
1367 if (len < size)
1368 len = maxfraglen - skb->len;
1369
1370 if (len <= 0) {
1371 struct sk_buff *skb_prev;
1372 int alloclen;
1373
1374 skb_prev = skb;
1375 fraggap = skb_prev->len - maxfraglen;
1376
1377 alloclen = fragheaderlen + hh_len + fraggap + 15;
1378 skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1379 if (unlikely(!skb)) {
1380 err = -ENOBUFS;
1381 goto error;
1382 }
1383
1384
1385
1386
1387 skb->ip_summed = CHECKSUM_NONE;
1388 skb->csum = 0;
1389 skb_reserve(skb, hh_len);
1390
1391
1392
1393
1394 skb_put(skb, fragheaderlen + fraggap);
1395 skb_reset_network_header(skb);
1396 skb->transport_header = (skb->network_header +
1397 fragheaderlen);
1398 if (fraggap) {
1399 skb->csum = skb_copy_and_csum_bits(skb_prev,
1400 maxfraglen,
1401 skb_transport_header(skb),
1402 fraggap, 0);
1403 skb_prev->csum = csum_sub(skb_prev->csum,
1404 skb->csum);
1405 pskb_trim_unique(skb_prev, maxfraglen);
1406 }
1407
1408
1409
1410
1411 __skb_queue_tail(&sk->sk_write_queue, skb);
1412 continue;
1413 }
1414
1415 if (len > size)
1416 len = size;
1417
1418 if (skb_append_pagefrags(skb, page, offset, len)) {
1419 err = -EMSGSIZE;
1420 goto error;
1421 }
1422
1423 if (skb->ip_summed == CHECKSUM_NONE) {
1424 __wsum csum;
1425 csum = csum_page(page, offset, len);
1426 skb->csum = csum_block_add(skb->csum, csum, skb->len);
1427 }
1428
1429 skb->len += len;
1430 skb->data_len += len;
1431 skb->truesize += len;
1432 refcount_add(len, &sk->sk_wmem_alloc);
1433 offset += len;
1434 size -= len;
1435 }
1436 return 0;
1437
1438error:
1439 cork->length -= size;
1440 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1441 return err;
1442}
1443
1444static void ip_cork_release(struct inet_cork *cork)
1445{
1446 cork->flags &= ~IPCORK_OPT;
1447 kfree(cork->opt);
1448 cork->opt = NULL;
1449 dst_release(cork->dst);
1450 cork->dst = NULL;
1451}
1452
1453
1454
1455
1456
1457struct sk_buff *__ip_make_skb(struct sock *sk,
1458 struct flowi4 *fl4,
1459 struct sk_buff_head *queue,
1460 struct inet_cork *cork)
1461{
1462 struct sk_buff *skb, *tmp_skb;
1463 struct sk_buff **tail_skb;
1464 struct inet_sock *inet = inet_sk(sk);
1465 struct net *net = sock_net(sk);
1466 struct ip_options *opt = NULL;
1467 struct rtable *rt = (struct rtable *)cork->dst;
1468 struct iphdr *iph;
1469 __be16 df = 0;
1470 __u8 ttl;
1471
1472 skb = __skb_dequeue(queue);
1473 if (!skb)
1474 goto out;
1475 tail_skb = &(skb_shinfo(skb)->frag_list);
1476
1477
1478 if (skb->data < skb_network_header(skb))
1479 __skb_pull(skb, skb_network_offset(skb));
1480 while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1481 __skb_pull(tmp_skb, skb_network_header_len(skb));
1482 *tail_skb = tmp_skb;
1483 tail_skb = &(tmp_skb->next);
1484 skb->len += tmp_skb->len;
1485 skb->data_len += tmp_skb->len;
1486 skb->truesize += tmp_skb->truesize;
1487 tmp_skb->destructor = NULL;
1488 tmp_skb->sk = NULL;
1489 }
1490
1491
1492
1493
1494
1495 skb->ignore_df = ip_sk_ignore_df(sk);
1496
1497
1498
1499
1500 if (inet->pmtudisc == IP_PMTUDISC_DO ||
1501 inet->pmtudisc == IP_PMTUDISC_PROBE ||
1502 (skb->len <= dst_mtu(&rt->dst) &&
1503 ip_dont_fragment(sk, &rt->dst)))
1504 df = htons(IP_DF);
1505
1506 if (cork->flags & IPCORK_OPT)
1507 opt = cork->opt;
1508
1509 if (cork->ttl != 0)
1510 ttl = cork->ttl;
1511 else if (rt->rt_type == RTN_MULTICAST)
1512 ttl = inet->mc_ttl;
1513 else
1514 ttl = ip_select_ttl(inet, &rt->dst);
1515
1516 iph = ip_hdr(skb);
1517 iph->version = 4;
1518 iph->ihl = 5;
1519 iph->tos = (cork->tos != -1) ? cork->tos : inet->tos;
1520 iph->frag_off = df;
1521 iph->ttl = ttl;
1522 iph->protocol = sk->sk_protocol;
1523 ip_copy_addrs(iph, fl4);
1524 ip_select_ident(net, skb, sk);
1525
1526 if (opt) {
1527 iph->ihl += opt->optlen>>2;
1528 ip_options_build(skb, opt, cork->addr, rt, 0);
1529 }
1530
1531 skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority;
1532 skb->mark = sk->sk_mark;
1533 skb->tstamp = cork->transmit_time;
1534
1535
1536
1537
1538 cork->dst = NULL;
1539 skb_dst_set(skb, &rt->dst);
1540
1541 if (iph->protocol == IPPROTO_ICMP)
1542 icmp_out_count(net, ((struct icmphdr *)
1543 skb_transport_header(skb))->type);
1544
1545 ip_cork_release(cork);
1546out:
1547 return skb;
1548}
1549
1550int ip_send_skb(struct net *net, struct sk_buff *skb)
1551{
1552 int err;
1553
1554 err = ip_local_out(net, skb->sk, skb);
1555 if (err) {
1556 if (err > 0)
1557 err = net_xmit_errno(err);
1558 if (err)
1559 IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
1560 }
1561
1562 return err;
1563}
1564
1565int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
1566{
1567 struct sk_buff *skb;
1568
1569 skb = ip_finish_skb(sk, fl4);
1570 if (!skb)
1571 return 0;
1572
1573
1574 return ip_send_skb(sock_net(sk), skb);
1575}
1576
1577
1578
1579
1580static void __ip_flush_pending_frames(struct sock *sk,
1581 struct sk_buff_head *queue,
1582 struct inet_cork *cork)
1583{
1584 struct sk_buff *skb;
1585
1586 while ((skb = __skb_dequeue_tail(queue)) != NULL)
1587 kfree_skb(skb);
1588
1589 ip_cork_release(cork);
1590}
1591
1592void ip_flush_pending_frames(struct sock *sk)
1593{
1594 __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
1595}
1596
1597struct sk_buff *ip_make_skb(struct sock *sk,
1598 struct flowi4 *fl4,
1599 int getfrag(void *from, char *to, int offset,
1600 int len, int odd, struct sk_buff *skb),
1601 void *from, int length, int transhdrlen,
1602 struct ipcm_cookie *ipc, struct rtable **rtp,
1603 struct inet_cork *cork, unsigned int flags)
1604{
1605 struct sk_buff_head queue;
1606 int err;
1607
1608 if (flags & MSG_PROBE)
1609 return NULL;
1610
1611 __skb_queue_head_init(&queue);
1612
1613 cork->flags = 0;
1614 cork->addr = 0;
1615 cork->opt = NULL;
1616 err = ip_setup_cork(sk, cork, ipc, rtp);
1617 if (err)
1618 return ERR_PTR(err);
1619
1620 err = __ip_append_data(sk, fl4, &queue, cork,
1621 ¤t->task_frag, getfrag,
1622 from, length, transhdrlen, flags);
1623 if (err) {
1624 __ip_flush_pending_frames(sk, &queue, cork);
1625 return ERR_PTR(err);
1626 }
1627
1628 return __ip_make_skb(sk, fl4, &queue, cork);
1629}
1630
1631
1632
1633
1634static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1635 int len, int odd, struct sk_buff *skb)
1636{
1637 __wsum csum;
1638
1639 csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1640 skb->csum = csum_block_add(skb->csum, csum, odd);
1641 return 0;
1642}
1643
1644
1645
1646
1647
1648void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
1649 const struct ip_options *sopt,
1650 __be32 daddr, __be32 saddr,
1651 const struct ip_reply_arg *arg,
1652 unsigned int len, u64 transmit_time)
1653{
1654 struct ip_options_data replyopts;
1655 struct ipcm_cookie ipc;
1656 struct flowi4 fl4;
1657 struct rtable *rt = skb_rtable(skb);
1658 struct net *net = sock_net(sk);
1659 struct sk_buff *nskb;
1660 int err;
1661 int oif;
1662
1663 if (__ip_options_echo(net, &replyopts.opt.opt, skb, sopt))
1664 return;
1665
1666 ipcm_init(&ipc);
1667 ipc.addr = daddr;
1668 ipc.sockc.transmit_time = transmit_time;
1669
1670 if (replyopts.opt.opt.optlen) {
1671 ipc.opt = &replyopts.opt;
1672
1673 if (replyopts.opt.opt.srr)
1674 daddr = replyopts.opt.opt.faddr;
1675 }
1676
1677 oif = arg->bound_dev_if;
1678 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
1679 oif = skb->skb_iif;
1680
1681 flowi4_init_output(&fl4, oif,
1682 IP4_REPLY_MARK(net, skb->mark) ?: sk->sk_mark,
1683 RT_TOS(arg->tos),
1684 RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
1685 ip_reply_arg_flowi_flags(arg),
1686 daddr, saddr,
1687 tcp_hdr(skb)->source, tcp_hdr(skb)->dest,
1688 arg->uid);
1689 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
1690 rt = ip_route_output_key(net, &fl4);
1691 if (IS_ERR(rt))
1692 return;
1693
1694 inet_sk(sk)->tos = arg->tos;
1695
1696 sk->sk_priority = skb->priority;
1697 sk->sk_protocol = ip_hdr(skb)->protocol;
1698 sk->sk_bound_dev_if = arg->bound_dev_if;
1699 sk->sk_sndbuf = sysctl_wmem_default;
1700 sk->sk_mark = fl4.flowi4_mark;
1701 err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
1702 len, 0, &ipc, &rt, MSG_DONTWAIT);
1703 if (unlikely(err)) {
1704 ip_flush_pending_frames(sk);
1705 goto out;
1706 }
1707
1708 nskb = skb_peek(&sk->sk_write_queue);
1709 if (nskb) {
1710 if (arg->csumoffset >= 0)
1711 *((__sum16 *)skb_transport_header(nskb) +
1712 arg->csumoffset) = csum_fold(csum_add(nskb->csum,
1713 arg->csum));
1714 nskb->ip_summed = CHECKSUM_NONE;
1715 ip_push_pending_frames(sk, &fl4);
1716 }
1717out:
1718 ip_rt_put(rt);
1719}
1720
1721void __init ip_init(void)
1722{
1723 ip_rt_init();
1724 inet_initpeers();
1725
1726#if defined(CONFIG_IP_MULTICAST)
1727 igmp_mc_init();
1728#endif
1729}
1730