1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#include <linux/module.h>
37#include <linux/types.h>
38#include <linux/kernel.h>
39#include <asm/uaccess.h>
40#include <asm/system.h>
41#include <linux/mm.h>
42#include <linux/interrupt.h>
43#include <linux/errno.h>
44#include <linux/sched.h>
45#include <linux/inet.h>
46#include <linux/netdevice.h>
47#include <linux/rtnetlink.h>
48#include <linux/poll.h>
49#include <linux/highmem.h>
50#include <linux/spinlock.h>
51
52#include <net/protocol.h>
53#include <linux/skbuff.h>
54
55#include <net/checksum.h>
56#include <net/sock.h>
57#include <net/tcp_states.h>
58#include <trace/events/skb.h>
59
60
61
62
63static inline int connection_based(struct sock *sk)
64{
65 return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM;
66}
67
68static int receiver_wake_function(wait_queue_t *wait, unsigned mode, int sync,
69 void *key)
70{
71 unsigned long bits = (unsigned long)key;
72
73
74
75
76 if (bits && !(bits & (POLLIN | POLLERR)))
77 return 0;
78 return autoremove_wake_function(wait, mode, sync, key);
79}
80
81
82
83static int wait_for_packet(struct sock *sk, int *err, long *timeo_p)
84{
85 int error;
86 DEFINE_WAIT_FUNC(wait, receiver_wake_function);
87
88 prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
89
90
91 error = sock_error(sk);
92 if (error)
93 goto out_err;
94
95 if (!skb_queue_empty(&sk->sk_receive_queue))
96 goto out;
97
98
99 if (sk->sk_shutdown & RCV_SHUTDOWN)
100 goto out_noerr;
101
102
103
104
105 error = -ENOTCONN;
106 if (connection_based(sk) &&
107 !(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_LISTEN))
108 goto out_err;
109
110
111 if (signal_pending(current))
112 goto interrupted;
113
114 error = 0;
115 *timeo_p = schedule_timeout(*timeo_p);
116out:
117 finish_wait(sk->sk_sleep, &wait);
118 return error;
119interrupted:
120 error = sock_intr_errno(*timeo_p);
121out_err:
122 *err = error;
123 goto out;
124out_noerr:
125 *err = 0;
126 error = 1;
127 goto out;
128}
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
160 int *peeked, int *err)
161{
162 struct sk_buff *skb;
163 long timeo;
164
165
166
167 int error = sock_error(sk);
168
169 if (error)
170 goto no_packet;
171
172 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
173
174 do {
175
176
177
178
179
180
181 unsigned long cpu_flags;
182
183 spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
184 skb = skb_peek(&sk->sk_receive_queue);
185 if (skb) {
186 *peeked = skb->peeked;
187 if (flags & MSG_PEEK) {
188 skb->peeked = 1;
189 atomic_inc(&skb->users);
190 } else
191 __skb_unlink(skb, &sk->sk_receive_queue);
192 }
193 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
194
195 if (skb)
196 return skb;
197
198
199 error = -EAGAIN;
200 if (!timeo)
201 goto no_packet;
202
203 } while (!wait_for_packet(sk, err, &timeo));
204
205 return NULL;
206
207no_packet:
208 *err = error;
209 return NULL;
210}
211EXPORT_SYMBOL(__skb_recv_datagram);
212
213struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
214 int noblock, int *err)
215{
216 int peeked;
217
218 return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
219 &peeked, err);
220}
221
222void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
223{
224 consume_skb(skb);
225 sk_mem_reclaim_partial(sk);
226}
227EXPORT_SYMBOL(skb_free_datagram);
228
229void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb)
230{
231 lock_sock(sk);
232 skb_free_datagram(sk, skb);
233 release_sock(sk);
234}
235EXPORT_SYMBOL(skb_free_datagram_locked);
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
259{
260 int err = 0;
261
262 if (flags & MSG_PEEK) {
263 err = -ENOENT;
264 spin_lock_bh(&sk->sk_receive_queue.lock);
265 if (skb == skb_peek(&sk->sk_receive_queue)) {
266 __skb_unlink(skb, &sk->sk_receive_queue);
267 atomic_dec(&skb->users);
268 err = 0;
269 }
270 spin_unlock_bh(&sk->sk_receive_queue.lock);
271 }
272
273 kfree_skb(skb);
274 sk_mem_reclaim_partial(sk);
275
276 return err;
277}
278
279EXPORT_SYMBOL(skb_kill_datagram);
280
281
282
283
284
285
286
287
288
289
290int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
291 struct iovec *to, int len)
292{
293 int start = skb_headlen(skb);
294 int i, copy = start - offset;
295 struct sk_buff *frag_iter;
296
297 trace_skb_copy_datagram_iovec(skb, len);
298
299
300 if (copy > 0) {
301 if (copy > len)
302 copy = len;
303 if (memcpy_toiovec(to, skb->data + offset, copy))
304 goto fault;
305 if ((len -= copy) == 0)
306 return 0;
307 offset += copy;
308 }
309
310
311 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
312 int end;
313
314 WARN_ON(start > offset + len);
315
316 end = start + skb_shinfo(skb)->frags[i].size;
317 if ((copy = end - offset) > 0) {
318 int err;
319 u8 *vaddr;
320 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
321 struct page *page = frag->page;
322
323 if (copy > len)
324 copy = len;
325 vaddr = kmap(page);
326 err = memcpy_toiovec(to, vaddr + frag->page_offset +
327 offset - start, copy);
328 kunmap(page);
329 if (err)
330 goto fault;
331 if (!(len -= copy))
332 return 0;
333 offset += copy;
334 }
335 start = end;
336 }
337
338 skb_walk_frags(skb, frag_iter) {
339 int end;
340
341 WARN_ON(start > offset + len);
342
343 end = start + frag_iter->len;
344 if ((copy = end - offset) > 0) {
345 if (copy > len)
346 copy = len;
347 if (skb_copy_datagram_iovec(frag_iter,
348 offset - start,
349 to, copy))
350 goto fault;
351 if ((len -= copy) == 0)
352 return 0;
353 offset += copy;
354 }
355 start = end;
356 }
357 if (!len)
358 return 0;
359
360fault:
361 return -EFAULT;
362}
363
364
365
366
367
368
369
370
371
372
373
374
375int skb_copy_datagram_const_iovec(const struct sk_buff *skb, int offset,
376 const struct iovec *to, int to_offset,
377 int len)
378{
379 int start = skb_headlen(skb);
380 int i, copy = start - offset;
381 struct sk_buff *frag_iter;
382
383
384 if (copy > 0) {
385 if (copy > len)
386 copy = len;
387 if (memcpy_toiovecend(to, skb->data + offset, to_offset, copy))
388 goto fault;
389 if ((len -= copy) == 0)
390 return 0;
391 offset += copy;
392 to_offset += copy;
393 }
394
395
396 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
397 int end;
398
399 WARN_ON(start > offset + len);
400
401 end = start + skb_shinfo(skb)->frags[i].size;
402 if ((copy = end - offset) > 0) {
403 int err;
404 u8 *vaddr;
405 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
406 struct page *page = frag->page;
407
408 if (copy > len)
409 copy = len;
410 vaddr = kmap(page);
411 err = memcpy_toiovecend(to, vaddr + frag->page_offset +
412 offset - start, to_offset, copy);
413 kunmap(page);
414 if (err)
415 goto fault;
416 if (!(len -= copy))
417 return 0;
418 offset += copy;
419 to_offset += copy;
420 }
421 start = end;
422 }
423
424 skb_walk_frags(skb, frag_iter) {
425 int end;
426
427 WARN_ON(start > offset + len);
428
429 end = start + frag_iter->len;
430 if ((copy = end - offset) > 0) {
431 if (copy > len)
432 copy = len;
433 if (skb_copy_datagram_const_iovec(frag_iter,
434 offset - start,
435 to, to_offset,
436 copy))
437 goto fault;
438 if ((len -= copy) == 0)
439 return 0;
440 offset += copy;
441 to_offset += copy;
442 }
443 start = end;
444 }
445 if (!len)
446 return 0;
447
448fault:
449 return -EFAULT;
450}
451EXPORT_SYMBOL(skb_copy_datagram_const_iovec);
452
453
454
455
456
457
458
459
460
461
462
463
464int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
465 const struct iovec *from, int from_offset,
466 int len)
467{
468 int start = skb_headlen(skb);
469 int i, copy = start - offset;
470 struct sk_buff *frag_iter;
471
472
473 if (copy > 0) {
474 if (copy > len)
475 copy = len;
476 if (memcpy_fromiovecend(skb->data + offset, from, from_offset,
477 copy))
478 goto fault;
479 if ((len -= copy) == 0)
480 return 0;
481 offset += copy;
482 from_offset += copy;
483 }
484
485
486 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
487 int end;
488
489 WARN_ON(start > offset + len);
490
491 end = start + skb_shinfo(skb)->frags[i].size;
492 if ((copy = end - offset) > 0) {
493 int err;
494 u8 *vaddr;
495 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
496 struct page *page = frag->page;
497
498 if (copy > len)
499 copy = len;
500 vaddr = kmap(page);
501 err = memcpy_fromiovecend(vaddr + frag->page_offset +
502 offset - start,
503 from, from_offset, copy);
504 kunmap(page);
505 if (err)
506 goto fault;
507
508 if (!(len -= copy))
509 return 0;
510 offset += copy;
511 from_offset += copy;
512 }
513 start = end;
514 }
515
516 skb_walk_frags(skb, frag_iter) {
517 int end;
518
519 WARN_ON(start > offset + len);
520
521 end = start + frag_iter->len;
522 if ((copy = end - offset) > 0) {
523 if (copy > len)
524 copy = len;
525 if (skb_copy_datagram_from_iovec(frag_iter,
526 offset - start,
527 from,
528 from_offset,
529 copy))
530 goto fault;
531 if ((len -= copy) == 0)
532 return 0;
533 offset += copy;
534 from_offset += copy;
535 }
536 start = end;
537 }
538 if (!len)
539 return 0;
540
541fault:
542 return -EFAULT;
543}
544EXPORT_SYMBOL(skb_copy_datagram_from_iovec);
545
546static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
547 u8 __user *to, int len,
548 __wsum *csump)
549{
550 int start = skb_headlen(skb);
551 int i, copy = start - offset;
552 struct sk_buff *frag_iter;
553 int pos = 0;
554
555
556 if (copy > 0) {
557 int err = 0;
558 if (copy > len)
559 copy = len;
560 *csump = csum_and_copy_to_user(skb->data + offset, to, copy,
561 *csump, &err);
562 if (err)
563 goto fault;
564 if ((len -= copy) == 0)
565 return 0;
566 offset += copy;
567 to += copy;
568 pos = copy;
569 }
570
571 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
572 int end;
573
574 WARN_ON(start > offset + len);
575
576 end = start + skb_shinfo(skb)->frags[i].size;
577 if ((copy = end - offset) > 0) {
578 __wsum csum2;
579 int err = 0;
580 u8 *vaddr;
581 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
582 struct page *page = frag->page;
583
584 if (copy > len)
585 copy = len;
586 vaddr = kmap(page);
587 csum2 = csum_and_copy_to_user(vaddr +
588 frag->page_offset +
589 offset - start,
590 to, copy, 0, &err);
591 kunmap(page);
592 if (err)
593 goto fault;
594 *csump = csum_block_add(*csump, csum2, pos);
595 if (!(len -= copy))
596 return 0;
597 offset += copy;
598 to += copy;
599 pos += copy;
600 }
601 start = end;
602 }
603
604 skb_walk_frags(skb, frag_iter) {
605 int end;
606
607 WARN_ON(start > offset + len);
608
609 end = start + frag_iter->len;
610 if ((copy = end - offset) > 0) {
611 __wsum csum2 = 0;
612 if (copy > len)
613 copy = len;
614 if (skb_copy_and_csum_datagram(frag_iter,
615 offset - start,
616 to, copy,
617 &csum2))
618 goto fault;
619 *csump = csum_block_add(*csump, csum2, pos);
620 if ((len -= copy) == 0)
621 return 0;
622 offset += copy;
623 to += copy;
624 pos += copy;
625 }
626 start = end;
627 }
628 if (!len)
629 return 0;
630
631fault:
632 return -EFAULT;
633}
634
635__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
636{
637 __sum16 sum;
638
639 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
640 if (likely(!sum)) {
641 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
642 netdev_rx_csum_fault(skb->dev);
643 skb->ip_summed = CHECKSUM_UNNECESSARY;
644 }
645 return sum;
646}
647EXPORT_SYMBOL(__skb_checksum_complete_head);
648
649__sum16 __skb_checksum_complete(struct sk_buff *skb)
650{
651 return __skb_checksum_complete_head(skb, skb->len);
652}
653EXPORT_SYMBOL(__skb_checksum_complete);
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
669 int hlen, struct iovec *iov)
670{
671 __wsum csum;
672 int chunk = skb->len - hlen;
673
674 if (!chunk)
675 return 0;
676
677
678
679
680 while (!iov->iov_len)
681 iov++;
682
683 if (iov->iov_len < chunk) {
684 if (__skb_checksum_complete(skb))
685 goto csum_error;
686 if (skb_copy_datagram_iovec(skb, hlen, iov, chunk))
687 goto fault;
688 } else {
689 csum = csum_partial(skb->data, hlen, skb->csum);
690 if (skb_copy_and_csum_datagram(skb, hlen, iov->iov_base,
691 chunk, &csum))
692 goto fault;
693 if (csum_fold(csum))
694 goto csum_error;
695 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
696 netdev_rx_csum_fault(skb->dev);
697 iov->iov_len -= chunk;
698 iov->iov_base += chunk;
699 }
700 return 0;
701csum_error:
702 return -EINVAL;
703fault:
704 return -EFAULT;
705}
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721unsigned int datagram_poll(struct file *file, struct socket *sock,
722 poll_table *wait)
723{
724 struct sock *sk = sock->sk;
725 unsigned int mask;
726
727 sock_poll_wait(file, sk->sk_sleep, wait);
728 mask = 0;
729
730
731 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
732 mask |= POLLERR;
733 if (sk->sk_shutdown & RCV_SHUTDOWN)
734 mask |= POLLRDHUP;
735 if (sk->sk_shutdown == SHUTDOWN_MASK)
736 mask |= POLLHUP;
737
738
739 if (!skb_queue_empty(&sk->sk_receive_queue) ||
740 (sk->sk_shutdown & RCV_SHUTDOWN))
741 mask |= POLLIN | POLLRDNORM;
742
743
744 if (connection_based(sk)) {
745 if (sk->sk_state == TCP_CLOSE)
746 mask |= POLLHUP;
747
748 if (sk->sk_state == TCP_SYN_SENT)
749 return mask;
750 }
751
752
753 if (sock_writeable(sk))
754 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
755 else
756 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
757
758 return mask;
759}
760
761EXPORT_SYMBOL(datagram_poll);
762EXPORT_SYMBOL(skb_copy_and_csum_datagram_iovec);
763EXPORT_SYMBOL(skb_copy_datagram_iovec);
764EXPORT_SYMBOL(skb_recv_datagram);
765