1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#ifndef _SOCK_H
36#define _SOCK_H
37
38#include <linux/hardirq.h>
39#include <linux/kernel.h>
40#include <linux/list.h>
41#include <linux/list_nulls.h>
42#include <linux/timer.h>
43#include <linux/cache.h>
44#include <linux/bitops.h>
45#include <linux/lockdep.h>
46#include <linux/netdevice.h>
47#include <linux/skbuff.h>
48#include <linux/mm.h>
49#include <linux/security.h>
50#include <linux/slab.h>
51#include <linux/uaccess.h>
52#include <linux/page_counter.h>
53#include <linux/memcontrol.h>
54#include <linux/static_key.h>
55#include <linux/sched.h>
56#include <linux/wait.h>
57#include <linux/cgroup-defs.h>
58#include <linux/rbtree.h>
59#include <linux/filter.h>
60#include <linux/rculist_nulls.h>
61#include <linux/poll.h>
62
63#include <linux/atomic.h>
64#include <linux/refcount.h>
65#include <net/dst.h>
66#include <net/checksum.h>
67#include <net/tcp_states.h>
68#include <linux/net_tstamp.h>
69#include <net/smc.h>
70#include <net/l3mdev.h>
71
72
73
74
75
76
77
78
79#define SOCK_DEBUGGING
80#ifdef SOCK_DEBUGGING
81#define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
82 printk(KERN_DEBUG msg); } while (0)
83#else
84
85static inline __printf(2, 3)
86void SOCK_DEBUG(const struct sock *sk, const char *msg, ...)
87{
88}
89#endif
90
91
92
93
94
95typedef struct {
96 spinlock_t slock;
97 int owned;
98 wait_queue_head_t wq;
99
100
101
102
103
104
105#ifdef CONFIG_DEBUG_LOCK_ALLOC
106 struct lockdep_map dep_map;
107#endif
108} socket_lock_t;
109
110struct sock;
111struct proto;
112struct net;
113
114typedef __u32 __bitwise __portpair;
115typedef __u64 __bitwise __addrpair;
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147struct sock_common {
148
149
150
151 union {
152 __addrpair skc_addrpair;
153 struct {
154 __be32 skc_daddr;
155 __be32 skc_rcv_saddr;
156 };
157 };
158 union {
159 unsigned int skc_hash;
160 __u16 skc_u16hashes[2];
161 };
162
163 union {
164 __portpair skc_portpair;
165 struct {
166 __be16 skc_dport;
167 __u16 skc_num;
168 };
169 };
170
171 unsigned short skc_family;
172 volatile unsigned char skc_state;
173 unsigned char skc_reuse:4;
174 unsigned char skc_reuseport:1;
175 unsigned char skc_ipv6only:1;
176 unsigned char skc_net_refcnt:1;
177 int skc_bound_dev_if;
178 union {
179 struct hlist_node skc_bind_node;
180 struct hlist_node skc_portaddr_node;
181 };
182 struct proto *skc_prot;
183 possible_net_t skc_net;
184
185#if IS_ENABLED(CONFIG_IPV6)
186 struct in6_addr skc_v6_daddr;
187 struct in6_addr skc_v6_rcv_saddr;
188#endif
189
190 atomic64_t skc_cookie;
191
192
193
194
195
196
197 union {
198 unsigned long skc_flags;
199 struct sock *skc_listener;
200 struct inet_timewait_death_row *skc_tw_dr;
201 };
202
203
204
205
206
207 int skc_dontcopy_begin[0];
208
209 union {
210 struct hlist_node skc_node;
211 struct hlist_nulls_node skc_nulls_node;
212 };
213 unsigned short skc_tx_queue_mapping;
214#ifdef CONFIG_XPS
215 unsigned short skc_rx_queue_mapping;
216#endif
217 union {
218 int skc_incoming_cpu;
219 u32 skc_rcv_wnd;
220 u32 skc_tw_rcv_nxt;
221 };
222
223 refcount_t skc_refcnt;
224
225 int skc_dontcopy_end[0];
226 union {
227 u32 skc_rxhash;
228 u32 skc_window_clamp;
229 u32 skc_tw_snd_nxt;
230 };
231
232};
233
234struct bpf_sk_storage;
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324struct sock {
325
326
327
328
329 struct sock_common __sk_common;
330#define sk_node __sk_common.skc_node
331#define sk_nulls_node __sk_common.skc_nulls_node
332#define sk_refcnt __sk_common.skc_refcnt
333#define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping
334#ifdef CONFIG_XPS
335#define sk_rx_queue_mapping __sk_common.skc_rx_queue_mapping
336#endif
337
338#define sk_dontcopy_begin __sk_common.skc_dontcopy_begin
339#define sk_dontcopy_end __sk_common.skc_dontcopy_end
340#define sk_hash __sk_common.skc_hash
341#define sk_portpair __sk_common.skc_portpair
342#define sk_num __sk_common.skc_num
343#define sk_dport __sk_common.skc_dport
344#define sk_addrpair __sk_common.skc_addrpair
345#define sk_daddr __sk_common.skc_daddr
346#define sk_rcv_saddr __sk_common.skc_rcv_saddr
347#define sk_family __sk_common.skc_family
348#define sk_state __sk_common.skc_state
349#define sk_reuse __sk_common.skc_reuse
350#define sk_reuseport __sk_common.skc_reuseport
351#define sk_ipv6only __sk_common.skc_ipv6only
352#define sk_net_refcnt __sk_common.skc_net_refcnt
353#define sk_bound_dev_if __sk_common.skc_bound_dev_if
354#define sk_bind_node __sk_common.skc_bind_node
355#define sk_prot __sk_common.skc_prot
356#define sk_net __sk_common.skc_net
357#define sk_v6_daddr __sk_common.skc_v6_daddr
358#define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr
359#define sk_cookie __sk_common.skc_cookie
360#define sk_incoming_cpu __sk_common.skc_incoming_cpu
361#define sk_flags __sk_common.skc_flags
362#define sk_rxhash __sk_common.skc_rxhash
363
364 socket_lock_t sk_lock;
365 atomic_t sk_drops;
366 int sk_rcvlowat;
367 struct sk_buff_head sk_error_queue;
368 struct sk_buff *sk_rx_skb_cache;
369 struct sk_buff_head sk_receive_queue;
370
371
372
373
374
375
376
377
378 struct {
379 atomic_t rmem_alloc;
380 int len;
381 struct sk_buff *head;
382 struct sk_buff *tail;
383 } sk_backlog;
384#define sk_rmem_alloc sk_backlog.rmem_alloc
385
386 int sk_forward_alloc;
387#ifdef CONFIG_NET_RX_BUSY_POLL
388 unsigned int sk_ll_usec;
389
390 unsigned int sk_napi_id;
391#endif
392 int sk_rcvbuf;
393
394 struct sk_filter __rcu *sk_filter;
395 union {
396 struct socket_wq __rcu *sk_wq;
397 struct socket_wq *sk_wq_raw;
398 };
399#ifdef CONFIG_XFRM
400 struct xfrm_policy __rcu *sk_policy[2];
401#endif
402 struct dst_entry *sk_rx_dst;
403 struct dst_entry __rcu *sk_dst_cache;
404 atomic_t sk_omem_alloc;
405 int sk_sndbuf;
406
407
408 int sk_wmem_queued;
409 refcount_t sk_wmem_alloc;
410 unsigned long sk_tsq_flags;
411 union {
412 struct sk_buff *sk_send_head;
413 struct rb_root tcp_rtx_queue;
414 };
415 struct sk_buff *sk_tx_skb_cache;
416 struct sk_buff_head sk_write_queue;
417 __s32 sk_peek_off;
418 int sk_write_pending;
419 __u32 sk_dst_pending_confirm;
420 u32 sk_pacing_status;
421 long sk_sndtimeo;
422 struct timer_list sk_timer;
423 __u32 sk_priority;
424 __u32 sk_mark;
425 unsigned long sk_pacing_rate;
426 unsigned long sk_max_pacing_rate;
427 struct page_frag sk_frag;
428 netdev_features_t sk_route_caps;
429 netdev_features_t sk_route_nocaps;
430 netdev_features_t sk_route_forced_caps;
431 int sk_gso_type;
432 unsigned int sk_gso_max_size;
433 gfp_t sk_allocation;
434 __u32 sk_txhash;
435
436
437
438
439
440 unsigned int __sk_flags_offset[0];
441#ifdef __BIG_ENDIAN_BITFIELD
442#define SK_FL_PROTO_SHIFT 16
443#define SK_FL_PROTO_MASK 0x00ff0000
444
445#define SK_FL_TYPE_SHIFT 0
446#define SK_FL_TYPE_MASK 0x0000ffff
447#else
448#define SK_FL_PROTO_SHIFT 8
449#define SK_FL_PROTO_MASK 0x0000ff00
450
451#define SK_FL_TYPE_SHIFT 16
452#define SK_FL_TYPE_MASK 0xffff0000
453#endif
454
455 unsigned int sk_padding : 1,
456 sk_kern_sock : 1,
457 sk_no_check_tx : 1,
458 sk_no_check_rx : 1,
459 sk_userlocks : 4,
460 sk_protocol : 8,
461 sk_type : 16;
462#define SK_PROTOCOL_MAX U8_MAX
463 u16 sk_gso_max_segs;
464 u8 sk_pacing_shift;
465 unsigned long sk_lingertime;
466 struct proto *sk_prot_creator;
467 rwlock_t sk_callback_lock;
468 int sk_err,
469 sk_err_soft;
470 u32 sk_ack_backlog;
471 u32 sk_max_ack_backlog;
472 kuid_t sk_uid;
473 struct pid *sk_peer_pid;
474 const struct cred *sk_peer_cred;
475 long sk_rcvtimeo;
476 ktime_t sk_stamp;
477#if BITS_PER_LONG==32
478 seqlock_t sk_stamp_seq;
479#endif
480 u16 sk_tsflags;
481 u8 sk_shutdown;
482 u32 sk_tskey;
483 atomic_t sk_zckey;
484
485 u8 sk_clockid;
486 u8 sk_txtime_deadline_mode : 1,
487 sk_txtime_report_errors : 1,
488 sk_txtime_unused : 6;
489
490 struct socket *sk_socket;
491 void *sk_user_data;
492#ifdef CONFIG_SECURITY
493 void *sk_security;
494#endif
495 struct sock_cgroup_data sk_cgrp_data;
496 struct mem_cgroup *sk_memcg;
497 void (*sk_state_change)(struct sock *sk);
498 void (*sk_data_ready)(struct sock *sk);
499 void (*sk_write_space)(struct sock *sk);
500 void (*sk_error_report)(struct sock *sk);
501 int (*sk_backlog_rcv)(struct sock *sk,
502 struct sk_buff *skb);
503#ifdef CONFIG_SOCK_VALIDATE_XMIT
504 struct sk_buff* (*sk_validate_xmit_skb)(struct sock *sk,
505 struct net_device *dev,
506 struct sk_buff *skb);
507#endif
508 void (*sk_destruct)(struct sock *sk);
509 struct sock_reuseport __rcu *sk_reuseport_cb;
510#ifdef CONFIG_BPF_SYSCALL
511 struct bpf_sk_storage __rcu *sk_bpf_storage;
512#endif
513 struct rcu_head sk_rcu;
514};
515
516enum sk_pacing {
517 SK_PACING_NONE = 0,
518 SK_PACING_NEEDED = 1,
519 SK_PACING_FQ = 2,
520};
521
522#define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
523
524#define rcu_dereference_sk_user_data(sk) rcu_dereference(__sk_user_data((sk)))
525#define rcu_assign_sk_user_data(sk, ptr) rcu_assign_pointer(__sk_user_data((sk)), ptr)
526
527
528
529
530
531
532
533
534#define SK_NO_REUSE 0
535#define SK_CAN_REUSE 1
536#define SK_FORCE_REUSE 2
537
538int sk_set_peek_off(struct sock *sk, int val);
539
540static inline int sk_peek_offset(struct sock *sk, int flags)
541{
542 if (unlikely(flags & MSG_PEEK)) {
543 return READ_ONCE(sk->sk_peek_off);
544 }
545
546 return 0;
547}
548
549static inline void sk_peek_offset_bwd(struct sock *sk, int val)
550{
551 s32 off = READ_ONCE(sk->sk_peek_off);
552
553 if (unlikely(off >= 0)) {
554 off = max_t(s32, off - val, 0);
555 WRITE_ONCE(sk->sk_peek_off, off);
556 }
557}
558
559static inline void sk_peek_offset_fwd(struct sock *sk, int val)
560{
561 sk_peek_offset_bwd(sk, -val);
562}
563
564
565
566
567static inline struct sock *sk_entry(const struct hlist_node *node)
568{
569 return hlist_entry(node, struct sock, sk_node);
570}
571
572static inline struct sock *__sk_head(const struct hlist_head *head)
573{
574 return hlist_entry(head->first, struct sock, sk_node);
575}
576
577static inline struct sock *sk_head(const struct hlist_head *head)
578{
579 return hlist_empty(head) ? NULL : __sk_head(head);
580}
581
582static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head)
583{
584 return hlist_nulls_entry(head->first, struct sock, sk_nulls_node);
585}
586
587static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head)
588{
589 return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head);
590}
591
592static inline struct sock *sk_next(const struct sock *sk)
593{
594 return hlist_entry_safe(sk->sk_node.next, struct sock, sk_node);
595}
596
597static inline struct sock *sk_nulls_next(const struct sock *sk)
598{
599 return (!is_a_nulls(sk->sk_nulls_node.next)) ?
600 hlist_nulls_entry(sk->sk_nulls_node.next,
601 struct sock, sk_nulls_node) :
602 NULL;
603}
604
605static inline bool sk_unhashed(const struct sock *sk)
606{
607 return hlist_unhashed(&sk->sk_node);
608}
609
610static inline bool sk_hashed(const struct sock *sk)
611{
612 return !sk_unhashed(sk);
613}
614
615static inline void sk_node_init(struct hlist_node *node)
616{
617 node->pprev = NULL;
618}
619
620static inline void sk_nulls_node_init(struct hlist_nulls_node *node)
621{
622 node->pprev = NULL;
623}
624
625static inline void __sk_del_node(struct sock *sk)
626{
627 __hlist_del(&sk->sk_node);
628}
629
630
631static inline bool __sk_del_node_init(struct sock *sk)
632{
633 if (sk_hashed(sk)) {
634 __sk_del_node(sk);
635 sk_node_init(&sk->sk_node);
636 return true;
637 }
638 return false;
639}
640
641
642
643
644
645
646
647static __always_inline void sock_hold(struct sock *sk)
648{
649 refcount_inc(&sk->sk_refcnt);
650}
651
652
653
654
655static __always_inline void __sock_put(struct sock *sk)
656{
657 refcount_dec(&sk->sk_refcnt);
658}
659
660static inline bool sk_del_node_init(struct sock *sk)
661{
662 bool rc = __sk_del_node_init(sk);
663
664 if (rc) {
665
666 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
667 __sock_put(sk);
668 }
669 return rc;
670}
671#define sk_del_node_init_rcu(sk) sk_del_node_init(sk)
672
673static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk)
674{
675 if (sk_hashed(sk)) {
676 hlist_nulls_del_init_rcu(&sk->sk_nulls_node);
677 return true;
678 }
679 return false;
680}
681
682static inline bool sk_nulls_del_node_init_rcu(struct sock *sk)
683{
684 bool rc = __sk_nulls_del_node_init_rcu(sk);
685
686 if (rc) {
687
688 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
689 __sock_put(sk);
690 }
691 return rc;
692}
693
694static inline void __sk_add_node(struct sock *sk, struct hlist_head *list)
695{
696 hlist_add_head(&sk->sk_node, list);
697}
698
699static inline void sk_add_node(struct sock *sk, struct hlist_head *list)
700{
701 sock_hold(sk);
702 __sk_add_node(sk, list);
703}
704
705static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
706{
707 sock_hold(sk);
708 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
709 sk->sk_family == AF_INET6)
710 hlist_add_tail_rcu(&sk->sk_node, list);
711 else
712 hlist_add_head_rcu(&sk->sk_node, list);
713}
714
715static inline void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list)
716{
717 sock_hold(sk);
718 hlist_add_tail_rcu(&sk->sk_node, list);
719}
720
721static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
722{
723 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
724}
725
726static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
727{
728 sock_hold(sk);
729 __sk_nulls_add_node_rcu(sk, list);
730}
731
732static inline void __sk_del_bind_node(struct sock *sk)
733{
734 __hlist_del(&sk->sk_bind_node);
735}
736
737static inline void sk_add_bind_node(struct sock *sk,
738 struct hlist_head *list)
739{
740 hlist_add_head(&sk->sk_bind_node, list);
741}
742
743#define sk_for_each(__sk, list) \
744 hlist_for_each_entry(__sk, list, sk_node)
745#define sk_for_each_rcu(__sk, list) \
746 hlist_for_each_entry_rcu(__sk, list, sk_node)
747#define sk_nulls_for_each(__sk, node, list) \
748 hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
749#define sk_nulls_for_each_rcu(__sk, node, list) \
750 hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
751#define sk_for_each_from(__sk) \
752 hlist_for_each_entry_from(__sk, sk_node)
753#define sk_nulls_for_each_from(__sk, node) \
754 if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
755 hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
756#define sk_for_each_safe(__sk, tmp, list) \
757 hlist_for_each_entry_safe(__sk, tmp, list, sk_node)
758#define sk_for_each_bound(__sk, list) \
759 hlist_for_each_entry(__sk, list, sk_bind_node)
760
761
762
763
764
765
766
767
768
769#define sk_for_each_entry_offset_rcu(tpos, pos, head, offset) \
770 for (pos = rcu_dereference(hlist_first_rcu(head)); \
771 pos != NULL && \
772 ({ tpos = (typeof(*tpos) *)((void *)pos - offset); 1;}); \
773 pos = rcu_dereference(hlist_next_rcu(pos)))
774
775static inline struct user_namespace *sk_user_ns(struct sock *sk)
776{
777
778
779
780
781 return sk->sk_socket->file->f_cred->user_ns;
782}
783
784
785enum sock_flags {
786 SOCK_DEAD,
787 SOCK_DONE,
788 SOCK_URGINLINE,
789 SOCK_KEEPOPEN,
790 SOCK_LINGER,
791 SOCK_DESTROY,
792 SOCK_BROADCAST,
793 SOCK_TIMESTAMP,
794 SOCK_ZAPPED,
795 SOCK_USE_WRITE_QUEUE,
796 SOCK_DBG,
797 SOCK_RCVTSTAMP,
798 SOCK_RCVTSTAMPNS,
799 SOCK_LOCALROUTE,
800 SOCK_QUEUE_SHRUNK,
801 SOCK_MEMALLOC,
802 SOCK_TIMESTAMPING_RX_SOFTWARE,
803 SOCK_FASYNC,
804 SOCK_RXQ_OVFL,
805 SOCK_ZEROCOPY,
806 SOCK_WIFI_STATUS,
807 SOCK_NOFCS,
808
809
810
811 SOCK_FILTER_LOCKED,
812 SOCK_SELECT_ERR_QUEUE,
813 SOCK_RCU_FREE,
814 SOCK_TXTIME,
815 SOCK_XDP,
816 SOCK_TSTAMP_NEW,
817};
818
819#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
820
821static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
822{
823 nsk->sk_flags = osk->sk_flags;
824}
825
826static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
827{
828 __set_bit(flag, &sk->sk_flags);
829}
830
831static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
832{
833 __clear_bit(flag, &sk->sk_flags);
834}
835
836static inline bool sock_flag(const struct sock *sk, enum sock_flags flag)
837{
838 return test_bit(flag, &sk->sk_flags);
839}
840
841#ifdef CONFIG_NET
842DECLARE_STATIC_KEY_FALSE(memalloc_socks_key);
843static inline int sk_memalloc_socks(void)
844{
845 return static_branch_unlikely(&memalloc_socks_key);
846}
847#else
848
849static inline int sk_memalloc_socks(void)
850{
851 return 0;
852}
853
854#endif
855
856static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask)
857{
858 return gfp_mask | (sk->sk_allocation & __GFP_MEMALLOC);
859}
860
861static inline void sk_acceptq_removed(struct sock *sk)
862{
863 sk->sk_ack_backlog--;
864}
865
866static inline void sk_acceptq_added(struct sock *sk)
867{
868 sk->sk_ack_backlog++;
869}
870
871static inline bool sk_acceptq_is_full(const struct sock *sk)
872{
873 return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
874}
875
876
877
878
879static inline int sk_stream_min_wspace(const struct sock *sk)
880{
881 return sk->sk_wmem_queued >> 1;
882}
883
884static inline int sk_stream_wspace(const struct sock *sk)
885{
886 return sk->sk_sndbuf - sk->sk_wmem_queued;
887}
888
889void sk_stream_write_space(struct sock *sk);
890
891
892static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
893{
894
895 skb_dst_force(skb);
896
897 if (!sk->sk_backlog.tail)
898 sk->sk_backlog.head = skb;
899 else
900 sk->sk_backlog.tail->next = skb;
901
902 sk->sk_backlog.tail = skb;
903 skb->next = NULL;
904}
905
906
907
908
909
910
911static inline bool sk_rcvqueues_full(const struct sock *sk, unsigned int limit)
912{
913 unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
914
915 return qsize > limit;
916}
917
918
919static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
920 unsigned int limit)
921{
922 if (sk_rcvqueues_full(sk, limit))
923 return -ENOBUFS;
924
925
926
927
928
929
930 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
931 return -ENOMEM;
932
933 __sk_add_backlog(sk, skb);
934 sk->sk_backlog.len += skb->truesize;
935 return 0;
936}
937
938int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
939
940static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
941{
942 if (sk_memalloc_socks() && skb_pfmemalloc(skb))
943 return __sk_backlog_rcv(sk, skb);
944
945 return sk->sk_backlog_rcv(sk, skb);
946}
947
948static inline void sk_incoming_cpu_update(struct sock *sk)
949{
950 int cpu = raw_smp_processor_id();
951
952 if (unlikely(sk->sk_incoming_cpu != cpu))
953 sk->sk_incoming_cpu = cpu;
954}
955
956static inline void sock_rps_record_flow_hash(__u32 hash)
957{
958#ifdef CONFIG_RPS
959 struct rps_sock_flow_table *sock_flow_table;
960
961 rcu_read_lock();
962 sock_flow_table = rcu_dereference(rps_sock_flow_table);
963 rps_record_sock_flow(sock_flow_table, hash);
964 rcu_read_unlock();
965#endif
966}
967
968static inline void sock_rps_record_flow(const struct sock *sk)
969{
970#ifdef CONFIG_RPS
971 if (static_branch_unlikely(&rfs_needed)) {
972
973
974
975
976
977
978
979
980
981
982 if (sk->sk_state == TCP_ESTABLISHED)
983 sock_rps_record_flow_hash(sk->sk_rxhash);
984 }
985#endif
986}
987
988static inline void sock_rps_save_rxhash(struct sock *sk,
989 const struct sk_buff *skb)
990{
991#ifdef CONFIG_RPS
992 if (unlikely(sk->sk_rxhash != skb->hash))
993 sk->sk_rxhash = skb->hash;
994#endif
995}
996
997static inline void sock_rps_reset_rxhash(struct sock *sk)
998{
999#ifdef CONFIG_RPS
1000 sk->sk_rxhash = 0;
1001#endif
1002}
1003
1004#define sk_wait_event(__sk, __timeo, __condition, __wait) \
1005 ({ int __rc; \
1006 release_sock(__sk); \
1007 __rc = __condition; \
1008 if (!__rc) { \
1009 *(__timeo) = wait_woken(__wait, \
1010 TASK_INTERRUPTIBLE, \
1011 *(__timeo)); \
1012 } \
1013 sched_annotate_sleep(); \
1014 lock_sock(__sk); \
1015 __rc = __condition; \
1016 __rc; \
1017 })
1018
1019int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
1020int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
1021void sk_stream_wait_close(struct sock *sk, long timeo_p);
1022int sk_stream_error(struct sock *sk, int flags, int err);
1023void sk_stream_kill_queues(struct sock *sk);
1024void sk_set_memalloc(struct sock *sk);
1025void sk_clear_memalloc(struct sock *sk);
1026
1027void __sk_flush_backlog(struct sock *sk);
1028
1029static inline bool sk_flush_backlog(struct sock *sk)
1030{
1031 if (unlikely(READ_ONCE(sk->sk_backlog.tail))) {
1032 __sk_flush_backlog(sk);
1033 return true;
1034 }
1035 return false;
1036}
1037
1038int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb);
1039
1040struct request_sock_ops;
1041struct timewait_sock_ops;
1042struct inet_hashinfo;
1043struct raw_hashinfo;
1044struct smc_hashinfo;
1045struct module;
1046
1047
1048
1049
1050
1051static inline void sk_prot_clear_nulls(struct sock *sk, int size)
1052{
1053 if (offsetof(struct sock, sk_node.next) != 0)
1054 memset(sk, 0, offsetof(struct sock, sk_node.next));
1055 memset(&sk->sk_node.pprev, 0,
1056 size - offsetof(struct sock, sk_node.pprev));
1057}
1058
1059
1060
1061
1062struct proto {
1063 void (*close)(struct sock *sk,
1064 long timeout);
1065 int (*pre_connect)(struct sock *sk,
1066 struct sockaddr *uaddr,
1067 int addr_len);
1068 int (*connect)(struct sock *sk,
1069 struct sockaddr *uaddr,
1070 int addr_len);
1071 int (*disconnect)(struct sock *sk, int flags);
1072
1073 struct sock * (*accept)(struct sock *sk, int flags, int *err,
1074 bool kern);
1075
1076 int (*ioctl)(struct sock *sk, int cmd,
1077 unsigned long arg);
1078 int (*init)(struct sock *sk);
1079 void (*destroy)(struct sock *sk);
1080 void (*shutdown)(struct sock *sk, int how);
1081 int (*setsockopt)(struct sock *sk, int level,
1082 int optname, char __user *optval,
1083 unsigned int optlen);
1084 int (*getsockopt)(struct sock *sk, int level,
1085 int optname, char __user *optval,
1086 int __user *option);
1087 void (*keepalive)(struct sock *sk, int valbool);
1088#ifdef CONFIG_COMPAT
1089 int (*compat_setsockopt)(struct sock *sk,
1090 int level,
1091 int optname, char __user *optval,
1092 unsigned int optlen);
1093 int (*compat_getsockopt)(struct sock *sk,
1094 int level,
1095 int optname, char __user *optval,
1096 int __user *option);
1097 int (*compat_ioctl)(struct sock *sk,
1098 unsigned int cmd, unsigned long arg);
1099#endif
1100 int (*sendmsg)(struct sock *sk, struct msghdr *msg,
1101 size_t len);
1102 int (*recvmsg)(struct sock *sk, struct msghdr *msg,
1103 size_t len, int noblock, int flags,
1104 int *addr_len);
1105 int (*sendpage)(struct sock *sk, struct page *page,
1106 int offset, size_t size, int flags);
1107 int (*bind)(struct sock *sk,
1108 struct sockaddr *uaddr, int addr_len);
1109
1110 int (*backlog_rcv) (struct sock *sk,
1111 struct sk_buff *skb);
1112
1113 void (*release_cb)(struct sock *sk);
1114
1115
1116 int (*hash)(struct sock *sk);
1117 void (*unhash)(struct sock *sk);
1118 void (*rehash)(struct sock *sk);
1119 int (*get_port)(struct sock *sk, unsigned short snum);
1120
1121
1122#ifdef CONFIG_PROC_FS
1123 unsigned int inuse_idx;
1124#endif
1125
1126 bool (*stream_memory_free)(const struct sock *sk, int wake);
1127 bool (*stream_memory_read)(const struct sock *sk);
1128
1129 void (*enter_memory_pressure)(struct sock *sk);
1130 void (*leave_memory_pressure)(struct sock *sk);
1131 atomic_long_t *memory_allocated;
1132 struct percpu_counter *sockets_allocated;
1133
1134
1135
1136
1137
1138
1139 unsigned long *memory_pressure;
1140 long *sysctl_mem;
1141
1142 int *sysctl_wmem;
1143 int *sysctl_rmem;
1144 u32 sysctl_wmem_offset;
1145 u32 sysctl_rmem_offset;
1146
1147 int max_header;
1148 bool no_autobind;
1149
1150 struct kmem_cache *slab;
1151 unsigned int obj_size;
1152 slab_flags_t slab_flags;
1153 unsigned int useroffset;
1154 unsigned int usersize;
1155
1156 struct percpu_counter *orphan_count;
1157
1158 struct request_sock_ops *rsk_prot;
1159 struct timewait_sock_ops *twsk_prot;
1160
1161 union {
1162 struct inet_hashinfo *hashinfo;
1163 struct udp_table *udp_table;
1164 struct raw_hashinfo *raw_hash;
1165 struct smc_hashinfo *smc_hash;
1166 } h;
1167
1168 struct module *owner;
1169
1170 char name[32];
1171
1172 struct list_head node;
1173#ifdef SOCK_REFCNT_DEBUG
1174 atomic_t socks;
1175#endif
1176 int (*diag_destroy)(struct sock *sk, int err);
1177} __randomize_layout;
1178
1179int proto_register(struct proto *prot, int alloc_slab);
1180void proto_unregister(struct proto *prot);
1181int sock_load_diag_module(int family, int protocol);
1182
1183#ifdef SOCK_REFCNT_DEBUG
1184static inline void sk_refcnt_debug_inc(struct sock *sk)
1185{
1186 atomic_inc(&sk->sk_prot->socks);
1187}
1188
1189static inline void sk_refcnt_debug_dec(struct sock *sk)
1190{
1191 atomic_dec(&sk->sk_prot->socks);
1192 printk(KERN_DEBUG "%s socket %p released, %d are still alive\n",
1193 sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
1194}
1195
1196static inline void sk_refcnt_debug_release(const struct sock *sk)
1197{
1198 if (refcount_read(&sk->sk_refcnt) != 1)
1199 printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
1200 sk->sk_prot->name, sk, refcount_read(&sk->sk_refcnt));
1201}
1202#else
1203#define sk_refcnt_debug_inc(sk) do { } while (0)
1204#define sk_refcnt_debug_dec(sk) do { } while (0)
1205#define sk_refcnt_debug_release(sk) do { } while (0)
1206#endif
1207
1208static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
1209{
1210 if (sk->sk_wmem_queued >= sk->sk_sndbuf)
1211 return false;
1212
1213 return sk->sk_prot->stream_memory_free ?
1214 sk->sk_prot->stream_memory_free(sk, wake) : true;
1215}
1216
1217static inline bool sk_stream_memory_free(const struct sock *sk)
1218{
1219 return __sk_stream_memory_free(sk, 0);
1220}
1221
1222static inline bool __sk_stream_is_writeable(const struct sock *sk, int wake)
1223{
1224 return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) &&
1225 __sk_stream_memory_free(sk, wake);
1226}
1227
1228static inline bool sk_stream_is_writeable(const struct sock *sk)
1229{
1230 return __sk_stream_is_writeable(sk, 0);
1231}
1232
1233static inline int sk_under_cgroup_hierarchy(struct sock *sk,
1234 struct cgroup *ancestor)
1235{
1236#ifdef CONFIG_SOCK_CGROUP_DATA
1237 return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data),
1238 ancestor);
1239#else
1240 return -ENOTSUPP;
1241#endif
1242}
1243
1244static inline bool sk_has_memory_pressure(const struct sock *sk)
1245{
1246 return sk->sk_prot->memory_pressure != NULL;
1247}
1248
1249static inline bool sk_under_memory_pressure(const struct sock *sk)
1250{
1251 if (!sk->sk_prot->memory_pressure)
1252 return false;
1253
1254 if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
1255 mem_cgroup_under_socket_pressure(sk->sk_memcg))
1256 return true;
1257
1258 return !!*sk->sk_prot->memory_pressure;
1259}
1260
1261static inline long
1262sk_memory_allocated(const struct sock *sk)
1263{
1264 return atomic_long_read(sk->sk_prot->memory_allocated);
1265}
1266
1267static inline long
1268sk_memory_allocated_add(struct sock *sk, int amt)
1269{
1270 return atomic_long_add_return(amt, sk->sk_prot->memory_allocated);
1271}
1272
1273static inline void
1274sk_memory_allocated_sub(struct sock *sk, int amt)
1275{
1276 atomic_long_sub(amt, sk->sk_prot->memory_allocated);
1277}
1278
1279static inline void sk_sockets_allocated_dec(struct sock *sk)
1280{
1281 percpu_counter_dec(sk->sk_prot->sockets_allocated);
1282}
1283
1284static inline void sk_sockets_allocated_inc(struct sock *sk)
1285{
1286 percpu_counter_inc(sk->sk_prot->sockets_allocated);
1287}
1288
1289static inline u64
1290sk_sockets_allocated_read_positive(struct sock *sk)
1291{
1292 return percpu_counter_read_positive(sk->sk_prot->sockets_allocated);
1293}
1294
1295static inline int
1296proto_sockets_allocated_sum_positive(struct proto *prot)
1297{
1298 return percpu_counter_sum_positive(prot->sockets_allocated);
1299}
1300
1301static inline long
1302proto_memory_allocated(struct proto *prot)
1303{
1304 return atomic_long_read(prot->memory_allocated);
1305}
1306
1307static inline bool
1308proto_memory_pressure(struct proto *prot)
1309{
1310 if (!prot->memory_pressure)
1311 return false;
1312 return !!*prot->memory_pressure;
1313}
1314
1315
1316#ifdef CONFIG_PROC_FS
1317
1318void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
1319int sock_prot_inuse_get(struct net *net, struct proto *proto);
1320int sock_inuse_get(struct net *net);
1321#else
1322static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
1323 int inc)
1324{
1325}
1326#endif
1327
1328
1329
1330
1331
1332static inline int __sk_prot_rehash(struct sock *sk)
1333{
1334 sk->sk_prot->unhash(sk);
1335 return sk->sk_prot->hash(sk);
1336}
1337
1338
1339#define SOCK_DESTROY_TIME (10*HZ)
1340
1341
1342#define PROT_SOCK 1024
1343
1344#define SHUTDOWN_MASK 3
1345#define RCV_SHUTDOWN 1
1346#define SEND_SHUTDOWN 2
1347
1348#define SOCK_SNDBUF_LOCK 1
1349#define SOCK_RCVBUF_LOCK 2
1350#define SOCK_BINDADDR_LOCK 4
1351#define SOCK_BINDPORT_LOCK 8
1352
1353struct socket_alloc {
1354 struct socket socket;
1355 struct inode vfs_inode;
1356};
1357
1358static inline struct socket *SOCKET_I(struct inode *inode)
1359{
1360 return &container_of(inode, struct socket_alloc, vfs_inode)->socket;
1361}
1362
1363static inline struct inode *SOCK_INODE(struct socket *socket)
1364{
1365 return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
1366}
1367
1368
1369
1370
1371int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind);
1372int __sk_mem_schedule(struct sock *sk, int size, int kind);
1373void __sk_mem_reduce_allocated(struct sock *sk, int amount);
1374void __sk_mem_reclaim(struct sock *sk, int amount);
1375
1376
1377
1378
1379#define SK_MEM_QUANTUM 4096
1380#define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
1381#define SK_MEM_SEND 0
1382#define SK_MEM_RECV 1
1383
1384
1385static inline long sk_prot_mem_limits(const struct sock *sk, int index)
1386{
1387 long val = sk->sk_prot->sysctl_mem[index];
1388
1389#if PAGE_SIZE > SK_MEM_QUANTUM
1390 val <<= PAGE_SHIFT - SK_MEM_QUANTUM_SHIFT;
1391#elif PAGE_SIZE < SK_MEM_QUANTUM
1392 val >>= SK_MEM_QUANTUM_SHIFT - PAGE_SHIFT;
1393#endif
1394 return val;
1395}
1396
1397static inline int sk_mem_pages(int amt)
1398{
1399 return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT;
1400}
1401
1402static inline bool sk_has_account(struct sock *sk)
1403{
1404
1405 return !!sk->sk_prot->memory_allocated;
1406}
1407
1408static inline bool sk_wmem_schedule(struct sock *sk, int size)
1409{
1410 if (!sk_has_account(sk))
1411 return true;
1412 return size <= sk->sk_forward_alloc ||
1413 __sk_mem_schedule(sk, size, SK_MEM_SEND);
1414}
1415
1416static inline bool
1417sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
1418{
1419 if (!sk_has_account(sk))
1420 return true;
1421 return size<= sk->sk_forward_alloc ||
1422 __sk_mem_schedule(sk, size, SK_MEM_RECV) ||
1423 skb_pfmemalloc(skb);
1424}
1425
1426static inline void sk_mem_reclaim(struct sock *sk)
1427{
1428 if (!sk_has_account(sk))
1429 return;
1430 if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
1431 __sk_mem_reclaim(sk, sk->sk_forward_alloc);
1432}
1433
1434static inline void sk_mem_reclaim_partial(struct sock *sk)
1435{
1436 if (!sk_has_account(sk))
1437 return;
1438 if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
1439 __sk_mem_reclaim(sk, sk->sk_forward_alloc - 1);
1440}
1441
1442static inline void sk_mem_charge(struct sock *sk, int size)
1443{
1444 if (!sk_has_account(sk))
1445 return;
1446 sk->sk_forward_alloc -= size;
1447}
1448
1449static inline void sk_mem_uncharge(struct sock *sk, int size)
1450{
1451 if (!sk_has_account(sk))
1452 return;
1453 sk->sk_forward_alloc += size;
1454
1455
1456
1457
1458
1459
1460
1461
1462 if (unlikely(sk->sk_forward_alloc >= 1 << 21))
1463 __sk_mem_reclaim(sk, 1 << 20);
1464}
1465
1466DECLARE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key);
1467static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
1468{
1469 sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
1470 sk->sk_wmem_queued -= skb->truesize;
1471 sk_mem_uncharge(sk, skb->truesize);
1472 if (static_branch_unlikely(&tcp_tx_skb_cache_key) &&
1473 !sk->sk_tx_skb_cache && !skb_cloned(skb)) {
1474 skb_zcopy_clear(skb, true);
1475 sk->sk_tx_skb_cache = skb;
1476 return;
1477 }
1478 __kfree_skb(skb);
1479}
1480
1481static inline void sock_release_ownership(struct sock *sk)
1482{
1483 if (sk->sk_lock.owned) {
1484 sk->sk_lock.owned = 0;
1485
1486
1487 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
1488 }
1489}
1490
1491
1492
1493
1494
1495
1496
1497
1498#define sock_lock_init_class_and_name(sk, sname, skey, name, key) \
1499do { \
1500 sk->sk_lock.owned = 0; \
1501 init_waitqueue_head(&sk->sk_lock.wq); \
1502 spin_lock_init(&(sk)->sk_lock.slock); \
1503 debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
1504 sizeof((sk)->sk_lock)); \
1505 lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
1506 (skey), (sname)); \
1507 lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
1508} while (0)
1509
1510#ifdef CONFIG_LOCKDEP
1511static inline bool lockdep_sock_is_held(const struct sock *sk)
1512{
1513 return lockdep_is_held(&sk->sk_lock) ||
1514 lockdep_is_held(&sk->sk_lock.slock);
1515}
1516#endif
1517
1518void lock_sock_nested(struct sock *sk, int subclass);
1519
1520static inline void lock_sock(struct sock *sk)
1521{
1522 lock_sock_nested(sk, 0);
1523}
1524
1525void __release_sock(struct sock *sk);
1526void release_sock(struct sock *sk);
1527
1528
1529#define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock))
1530#define bh_lock_sock_nested(__sk) \
1531 spin_lock_nested(&((__sk)->sk_lock.slock), \
1532 SINGLE_DEPTH_NESTING)
1533#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
1534
1535bool lock_sock_fast(struct sock *sk);
1536
1537
1538
1539
1540
1541
1542
1543
1544static inline void unlock_sock_fast(struct sock *sk, bool slow)
1545{
1546 if (slow)
1547 release_sock(sk);
1548 else
1549 spin_unlock_bh(&sk->sk_lock.slock);
1550}
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566static inline void sock_owned_by_me(const struct sock *sk)
1567{
1568#ifdef CONFIG_LOCKDEP
1569 WARN_ON_ONCE(!lockdep_sock_is_held(sk) && debug_locks);
1570#endif
1571}
1572
1573static inline bool sock_owned_by_user(const struct sock *sk)
1574{
1575 sock_owned_by_me(sk);
1576 return sk->sk_lock.owned;
1577}
1578
1579static inline bool sock_owned_by_user_nocheck(const struct sock *sk)
1580{
1581 return sk->sk_lock.owned;
1582}
1583
1584
1585static inline bool sock_allow_reclassification(const struct sock *csk)
1586{
1587 struct sock *sk = (struct sock *)csk;
1588
1589 return !sk->sk_lock.owned && !spin_is_locked(&sk->sk_lock.slock);
1590}
1591
1592struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1593 struct proto *prot, int kern);
1594void sk_free(struct sock *sk);
1595void sk_destruct(struct sock *sk);
1596struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
1597void sk_free_unlock_clone(struct sock *sk);
1598
1599struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1600 gfp_t priority);
1601void __sock_wfree(struct sk_buff *skb);
1602void sock_wfree(struct sk_buff *skb);
1603struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
1604 gfp_t priority);
1605void skb_orphan_partial(struct sk_buff *skb);
1606void sock_rfree(struct sk_buff *skb);
1607void sock_efree(struct sk_buff *skb);
1608#ifdef CONFIG_INET
1609void sock_edemux(struct sk_buff *skb);
1610#else
1611#define sock_edemux sock_efree
1612#endif
1613
1614int sock_setsockopt(struct socket *sock, int level, int op,
1615 char __user *optval, unsigned int optlen);
1616
1617int sock_getsockopt(struct socket *sock, int level, int op,
1618 char __user *optval, int __user *optlen);
1619int sock_gettstamp(struct socket *sock, void __user *userstamp,
1620 bool timeval, bool time32);
1621struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1622 int noblock, int *errcode);
1623struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1624 unsigned long data_len, int noblock,
1625 int *errcode, int max_page_order);
1626void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
1627void sock_kfree_s(struct sock *sk, void *mem, int size);
1628void sock_kzfree_s(struct sock *sk, void *mem, int size);
1629void sk_send_sigurg(struct sock *sk);
1630
1631struct sockcm_cookie {
1632 u64 transmit_time;
1633 u32 mark;
1634 u16 tsflags;
1635};
1636
1637static inline void sockcm_init(struct sockcm_cookie *sockc,
1638 const struct sock *sk)
1639{
1640 *sockc = (struct sockcm_cookie) { .tsflags = sk->sk_tsflags };
1641}
1642
1643int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
1644 struct sockcm_cookie *sockc);
1645int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
1646 struct sockcm_cookie *sockc);
1647
1648
1649
1650
1651
1652int sock_no_bind(struct socket *, struct sockaddr *, int);
1653int sock_no_connect(struct socket *, struct sockaddr *, int, int);
1654int sock_no_socketpair(struct socket *, struct socket *);
1655int sock_no_accept(struct socket *, struct socket *, int, bool);
1656int sock_no_getname(struct socket *, struct sockaddr *, int);
1657int sock_no_ioctl(struct socket *, unsigned int, unsigned long);
1658int sock_no_listen(struct socket *, int);
1659int sock_no_shutdown(struct socket *, int);
1660int sock_no_getsockopt(struct socket *, int , int, char __user *, int __user *);
1661int sock_no_setsockopt(struct socket *, int, int, char __user *, unsigned int);
1662int sock_no_sendmsg(struct socket *, struct msghdr *, size_t);
1663int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t len);
1664int sock_no_recvmsg(struct socket *, struct msghdr *, size_t, int);
1665int sock_no_mmap(struct file *file, struct socket *sock,
1666 struct vm_area_struct *vma);
1667ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset,
1668 size_t size, int flags);
1669ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
1670 int offset, size_t size, int flags);
1671
1672
1673
1674
1675
1676int sock_common_getsockopt(struct socket *sock, int level, int optname,
1677 char __user *optval, int __user *optlen);
1678int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
1679 int flags);
1680int sock_common_setsockopt(struct socket *sock, int level, int optname,
1681 char __user *optval, unsigned int optlen);
1682int compat_sock_common_getsockopt(struct socket *sock, int level,
1683 int optname, char __user *optval, int __user *optlen);
1684int compat_sock_common_setsockopt(struct socket *sock, int level,
1685 int optname, char __user *optval, unsigned int optlen);
1686
1687void sk_common_release(struct sock *sk);
1688
1689
1690
1691
1692
1693
1694void sock_init_data(struct socket *sock, struct sock *sk);
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722static inline void sock_put(struct sock *sk)
1723{
1724 if (refcount_dec_and_test(&sk->sk_refcnt))
1725 sk_free(sk);
1726}
1727
1728
1729
1730void sock_gen_put(struct sock *sk);
1731
1732int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
1733 unsigned int trim_cap, bool refcounted);
1734static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
1735 const int nested)
1736{
1737 return __sk_receive_skb(sk, skb, nested, 1, true);
1738}
1739
1740static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
1741{
1742
1743 if (WARN_ON_ONCE((unsigned short)tx_queue >= USHRT_MAX))
1744 return;
1745 sk->sk_tx_queue_mapping = tx_queue;
1746}
1747
1748#define NO_QUEUE_MAPPING USHRT_MAX
1749
1750static inline void sk_tx_queue_clear(struct sock *sk)
1751{
1752 sk->sk_tx_queue_mapping = NO_QUEUE_MAPPING;
1753}
1754
1755static inline int sk_tx_queue_get(const struct sock *sk)
1756{
1757 if (sk && sk->sk_tx_queue_mapping != NO_QUEUE_MAPPING)
1758 return sk->sk_tx_queue_mapping;
1759
1760 return -1;
1761}
1762
1763static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
1764{
1765#ifdef CONFIG_XPS
1766 if (skb_rx_queue_recorded(skb)) {
1767 u16 rx_queue = skb_get_rx_queue(skb);
1768
1769 if (WARN_ON_ONCE(rx_queue == NO_QUEUE_MAPPING))
1770 return;
1771
1772 sk->sk_rx_queue_mapping = rx_queue;
1773 }
1774#endif
1775}
1776
1777static inline void sk_rx_queue_clear(struct sock *sk)
1778{
1779#ifdef CONFIG_XPS
1780 sk->sk_rx_queue_mapping = NO_QUEUE_MAPPING;
1781#endif
1782}
1783
1784#ifdef CONFIG_XPS
1785static inline int sk_rx_queue_get(const struct sock *sk)
1786{
1787 if (sk && sk->sk_rx_queue_mapping != NO_QUEUE_MAPPING)
1788 return sk->sk_rx_queue_mapping;
1789
1790 return -1;
1791}
1792#endif
1793
1794static inline void sk_set_socket(struct sock *sk, struct socket *sock)
1795{
1796 sk_tx_queue_clear(sk);
1797 sk->sk_socket = sock;
1798}
1799
1800static inline wait_queue_head_t *sk_sleep(struct sock *sk)
1801{
1802 BUILD_BUG_ON(offsetof(struct socket_wq, wait) != 0);
1803 return &rcu_dereference_raw(sk->sk_wq)->wait;
1804}
1805
1806
1807
1808
1809
1810
1811
1812static inline void sock_orphan(struct sock *sk)
1813{
1814 write_lock_bh(&sk->sk_callback_lock);
1815 sock_set_flag(sk, SOCK_DEAD);
1816 sk_set_socket(sk, NULL);
1817 sk->sk_wq = NULL;
1818 write_unlock_bh(&sk->sk_callback_lock);
1819}
1820
1821static inline void sock_graft(struct sock *sk, struct socket *parent)
1822{
1823 WARN_ON(parent->sk);
1824 write_lock_bh(&sk->sk_callback_lock);
1825 rcu_assign_pointer(sk->sk_wq, &parent->wq);
1826 parent->sk = sk;
1827 sk_set_socket(sk, parent);
1828 sk->sk_uid = SOCK_INODE(parent)->i_uid;
1829 security_sock_graft(sk, parent);
1830 write_unlock_bh(&sk->sk_callback_lock);
1831}
1832
1833kuid_t sock_i_uid(struct sock *sk);
1834unsigned long sock_i_ino(struct sock *sk);
1835
1836static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk)
1837{
1838 return sk ? sk->sk_uid : make_kuid(net->user_ns, 0);
1839}
1840
1841static inline u32 net_tx_rndhash(void)
1842{
1843 u32 v = prandom_u32();
1844
1845 return v ?: 1;
1846}
1847
1848static inline void sk_set_txhash(struct sock *sk)
1849{
1850 sk->sk_txhash = net_tx_rndhash();
1851}
1852
1853static inline void sk_rethink_txhash(struct sock *sk)
1854{
1855 if (sk->sk_txhash)
1856 sk_set_txhash(sk);
1857}
1858
1859static inline struct dst_entry *
1860__sk_dst_get(struct sock *sk)
1861{
1862 return rcu_dereference_check(sk->sk_dst_cache,
1863 lockdep_sock_is_held(sk));
1864}
1865
1866static inline struct dst_entry *
1867sk_dst_get(struct sock *sk)
1868{
1869 struct dst_entry *dst;
1870
1871 rcu_read_lock();
1872 dst = rcu_dereference(sk->sk_dst_cache);
1873 if (dst && !atomic_inc_not_zero(&dst->__refcnt))
1874 dst = NULL;
1875 rcu_read_unlock();
1876 return dst;
1877}
1878
1879static inline void dst_negative_advice(struct sock *sk)
1880{
1881 struct dst_entry *ndst, *dst = __sk_dst_get(sk);
1882
1883 sk_rethink_txhash(sk);
1884
1885 if (dst && dst->ops->negative_advice) {
1886 ndst = dst->ops->negative_advice(dst);
1887
1888 if (ndst != dst) {
1889 rcu_assign_pointer(sk->sk_dst_cache, ndst);
1890 sk_tx_queue_clear(sk);
1891 sk->sk_dst_pending_confirm = 0;
1892 }
1893 }
1894}
1895
1896static inline void
1897__sk_dst_set(struct sock *sk, struct dst_entry *dst)
1898{
1899 struct dst_entry *old_dst;
1900
1901 sk_tx_queue_clear(sk);
1902 sk->sk_dst_pending_confirm = 0;
1903 old_dst = rcu_dereference_protected(sk->sk_dst_cache,
1904 lockdep_sock_is_held(sk));
1905 rcu_assign_pointer(sk->sk_dst_cache, dst);
1906 dst_release(old_dst);
1907}
1908
1909static inline void
1910sk_dst_set(struct sock *sk, struct dst_entry *dst)
1911{
1912 struct dst_entry *old_dst;
1913
1914 sk_tx_queue_clear(sk);
1915 sk->sk_dst_pending_confirm = 0;
1916 old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
1917 dst_release(old_dst);
1918}
1919
1920static inline void
1921__sk_dst_reset(struct sock *sk)
1922{
1923 __sk_dst_set(sk, NULL);
1924}
1925
1926static inline void
1927sk_dst_reset(struct sock *sk)
1928{
1929 sk_dst_set(sk, NULL);
1930}
1931
1932struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
1933
1934struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
1935
1936static inline void sk_dst_confirm(struct sock *sk)
1937{
1938 if (!sk->sk_dst_pending_confirm)
1939 sk->sk_dst_pending_confirm = 1;
1940}
1941
1942static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n)
1943{
1944 if (skb_get_dst_pending_confirm(skb)) {
1945 struct sock *sk = skb->sk;
1946 unsigned long now = jiffies;
1947
1948
1949 if (n->confirmed != now)
1950 n->confirmed = now;
1951 if (sk && sk->sk_dst_pending_confirm)
1952 sk->sk_dst_pending_confirm = 0;
1953 }
1954}
1955
1956bool sk_mc_loop(struct sock *sk);
1957
1958static inline bool sk_can_gso(const struct sock *sk)
1959{
1960 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
1961}
1962
1963void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
1964
1965static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
1966{
1967 sk->sk_route_nocaps |= flags;
1968 sk->sk_route_caps &= ~flags;
1969}
1970
1971static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
1972 struct iov_iter *from, char *to,
1973 int copy, int offset)
1974{
1975 if (skb->ip_summed == CHECKSUM_NONE) {
1976 __wsum csum = 0;
1977 if (!csum_and_copy_from_iter_full(to, copy, &csum, from))
1978 return -EFAULT;
1979 skb->csum = csum_block_add(skb->csum, csum, offset);
1980 } else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) {
1981 if (!copy_from_iter_full_nocache(to, copy, from))
1982 return -EFAULT;
1983 } else if (!copy_from_iter_full(to, copy, from))
1984 return -EFAULT;
1985
1986 return 0;
1987}
1988
1989static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb,
1990 struct iov_iter *from, int copy)
1991{
1992 int err, offset = skb->len;
1993
1994 err = skb_do_copy_data_nocache(sk, skb, from, skb_put(skb, copy),
1995 copy, offset);
1996 if (err)
1997 __skb_trim(skb, offset);
1998
1999 return err;
2000}
2001
2002static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from,
2003 struct sk_buff *skb,
2004 struct page *page,
2005 int off, int copy)
2006{
2007 int err;
2008
2009 err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) + off,
2010 copy, skb->len);
2011 if (err)
2012 return err;
2013
2014 skb->len += copy;
2015 skb->data_len += copy;
2016 skb->truesize += copy;
2017 sk->sk_wmem_queued += copy;
2018 sk_mem_charge(sk, copy);
2019 return 0;
2020}
2021
2022
2023
2024
2025
2026
2027
2028static inline int sk_wmem_alloc_get(const struct sock *sk)
2029{
2030 return refcount_read(&sk->sk_wmem_alloc) - 1;
2031}
2032
2033
2034
2035
2036
2037
2038
2039static inline int sk_rmem_alloc_get(const struct sock *sk)
2040{
2041 return atomic_read(&sk->sk_rmem_alloc);
2042}
2043
2044
2045
2046
2047
2048
2049
2050static inline bool sk_has_allocations(const struct sock *sk)
2051{
2052 return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk);
2053}
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086static inline bool skwq_has_sleeper(struct socket_wq *wq)
2087{
2088 return wq && wq_has_sleeper(&wq->wait);
2089}
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099static inline void sock_poll_wait(struct file *filp, struct socket *sock,
2100 poll_table *p)
2101{
2102 if (!poll_does_not_wait(p)) {
2103 poll_wait(filp, &sock->wq.wait, p);
2104
2105
2106
2107
2108
2109 smp_mb();
2110 }
2111}
2112
2113static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
2114{
2115 if (sk->sk_txhash) {
2116 skb->l4_hash = 1;
2117 skb->hash = sk->sk_txhash;
2118 }
2119}
2120
2121void skb_set_owner_w(struct sk_buff *skb, struct sock *sk);
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
2132{
2133 skb_orphan(skb);
2134 skb->sk = sk;
2135 skb->destructor = sock_rfree;
2136 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
2137 sk_mem_charge(sk, skb->truesize);
2138}
2139
2140void sk_reset_timer(struct sock *sk, struct timer_list *timer,
2141 unsigned long expires);
2142
2143void sk_stop_timer(struct sock *sk, struct timer_list *timer);
2144
2145int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
2146 struct sk_buff *skb, unsigned int flags,
2147 void (*destructor)(struct sock *sk,
2148 struct sk_buff *skb));
2149int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
2150int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
2151
2152int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
2153struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
2154
2155
2156
2157
2158
2159static inline int sock_error(struct sock *sk)
2160{
2161 int err;
2162 if (likely(!sk->sk_err))
2163 return 0;
2164 err = xchg(&sk->sk_err, 0);
2165 return -err;
2166}
2167
2168static inline unsigned long sock_wspace(struct sock *sk)
2169{
2170 int amt = 0;
2171
2172 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
2173 amt = sk->sk_sndbuf - refcount_read(&sk->sk_wmem_alloc);
2174 if (amt < 0)
2175 amt = 0;
2176 }
2177 return amt;
2178}
2179
2180
2181
2182
2183
2184static inline void sk_set_bit(int nr, struct sock *sk)
2185{
2186 if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) &&
2187 !sock_flag(sk, SOCK_FASYNC))
2188 return;
2189
2190 set_bit(nr, &sk->sk_wq_raw->flags);
2191}
2192
2193static inline void sk_clear_bit(int nr, struct sock *sk)
2194{
2195 if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) &&
2196 !sock_flag(sk, SOCK_FASYNC))
2197 return;
2198
2199 clear_bit(nr, &sk->sk_wq_raw->flags);
2200}
2201
2202static inline void sk_wake_async(const struct sock *sk, int how, int band)
2203{
2204 if (sock_flag(sk, SOCK_FASYNC)) {
2205 rcu_read_lock();
2206 sock_wake_async(rcu_dereference(sk->sk_wq), how, band);
2207 rcu_read_unlock();
2208 }
2209}
2210
2211
2212
2213
2214
2215
2216#define TCP_SKB_MIN_TRUESIZE (2048 + SKB_DATA_ALIGN(sizeof(struct sk_buff)))
2217
2218#define SOCK_MIN_SNDBUF (TCP_SKB_MIN_TRUESIZE * 2)
2219#define SOCK_MIN_RCVBUF TCP_SKB_MIN_TRUESIZE
2220
2221static inline void sk_stream_moderate_sndbuf(struct sock *sk)
2222{
2223 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
2224 sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
2225 sk->sk_sndbuf = max_t(u32, sk->sk_sndbuf, SOCK_MIN_SNDBUF);
2226 }
2227}
2228
2229struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
2230 bool force_schedule);
2231
2232
2233
2234
2235
2236
2237
2238
2239static inline struct page_frag *sk_page_frag(struct sock *sk)
2240{
2241 if (gfpflags_allow_blocking(sk->sk_allocation))
2242 return ¤t->task_frag;
2243
2244 return &sk->sk_frag;
2245}
2246
2247bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
2248
2249
2250
2251
2252static inline bool sock_writeable(const struct sock *sk)
2253{
2254 return refcount_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1);
2255}
2256
2257static inline gfp_t gfp_any(void)
2258{
2259 return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
2260}
2261
2262static inline long sock_rcvtimeo(const struct sock *sk, bool noblock)
2263{
2264 return noblock ? 0 : sk->sk_rcvtimeo;
2265}
2266
2267static inline long sock_sndtimeo(const struct sock *sk, bool noblock)
2268{
2269 return noblock ? 0 : sk->sk_sndtimeo;
2270}
2271
2272static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
2273{
2274 return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1;
2275}
2276
2277
2278
2279
2280static inline int sock_intr_errno(long timeo)
2281{
2282 return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
2283}
2284
2285struct sock_skb_cb {
2286 u32 dropcount;
2287};
2288
2289
2290
2291
2292
2293#define SOCK_SKB_CB_OFFSET ((FIELD_SIZEOF(struct sk_buff, cb) - \
2294 sizeof(struct sock_skb_cb)))
2295
2296#define SOCK_SKB_CB(__skb) ((struct sock_skb_cb *)((__skb)->cb + \
2297 SOCK_SKB_CB_OFFSET))
2298
2299#define sock_skb_cb_check_size(size) \
2300 BUILD_BUG_ON((size) > SOCK_SKB_CB_OFFSET)
2301
2302static inline void
2303sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb)
2304{
2305 SOCK_SKB_CB(skb)->dropcount = sock_flag(sk, SOCK_RXQ_OVFL) ?
2306 atomic_read(&sk->sk_drops) : 0;
2307}
2308
2309static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
2310{
2311 int segs = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
2312
2313 atomic_add(segs, &sk->sk_drops);
2314}
2315
2316static inline ktime_t sock_read_timestamp(struct sock *sk)
2317{
2318#if BITS_PER_LONG==32
2319 unsigned int seq;
2320 ktime_t kt;
2321
2322 do {
2323 seq = read_seqbegin(&sk->sk_stamp_seq);
2324 kt = sk->sk_stamp;
2325 } while (read_seqretry(&sk->sk_stamp_seq, seq));
2326
2327 return kt;
2328#else
2329 return sk->sk_stamp;
2330#endif
2331}
2332
2333static inline void sock_write_timestamp(struct sock *sk, ktime_t kt)
2334{
2335#if BITS_PER_LONG==32
2336 write_seqlock(&sk->sk_stamp_seq);
2337 sk->sk_stamp = kt;
2338 write_sequnlock(&sk->sk_stamp_seq);
2339#else
2340 sk->sk_stamp = kt;
2341#endif
2342}
2343
2344void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
2345 struct sk_buff *skb);
2346void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
2347 struct sk_buff *skb);
2348
2349static inline void
2350sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
2351{
2352 ktime_t kt = skb->tstamp;
2353 struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
2354
2355
2356
2357
2358
2359
2360
2361 if (sock_flag(sk, SOCK_RCVTSTAMP) ||
2362 (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
2363 (kt && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
2364 (hwtstamps->hwtstamp &&
2365 (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
2366 __sock_recv_timestamp(msg, sk, skb);
2367 else
2368 sock_write_timestamp(sk, kt);
2369
2370 if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
2371 __sock_recv_wifi_status(msg, sk, skb);
2372}
2373
2374void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
2375 struct sk_buff *skb);
2376
2377#define SK_DEFAULT_STAMP (-1L * NSEC_PER_SEC)
2378static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
2379 struct sk_buff *skb)
2380{
2381#define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL) | \
2382 (1UL << SOCK_RCVTSTAMP))
2383#define TSFLAGS_ANY (SOF_TIMESTAMPING_SOFTWARE | \
2384 SOF_TIMESTAMPING_RAW_HARDWARE)
2385
2386 if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY)
2387 __sock_recv_ts_and_drops(msg, sk, skb);
2388 else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
2389 sock_write_timestamp(sk, skb->tstamp);
2390 else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP))
2391 sock_write_timestamp(sk, 0);
2392}
2393
2394void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags,
2406 __u8 *tx_flags, __u32 *tskey)
2407{
2408 if (unlikely(tsflags)) {
2409 __sock_tx_timestamp(tsflags, tx_flags);
2410 if (tsflags & SOF_TIMESTAMPING_OPT_ID && tskey &&
2411 tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
2412 *tskey = sk->sk_tskey++;
2413 }
2414 if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
2415 *tx_flags |= SKBTX_WIFI_STATUS;
2416}
2417
2418static inline void sock_tx_timestamp(struct sock *sk, __u16 tsflags,
2419 __u8 *tx_flags)
2420{
2421 _sock_tx_timestamp(sk, tsflags, tx_flags, NULL);
2422}
2423
2424static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
2425{
2426 _sock_tx_timestamp(skb->sk, tsflags, &skb_shinfo(skb)->tx_flags,
2427 &skb_shinfo(skb)->tskey);
2428}
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
2439static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
2440{
2441 __skb_unlink(skb, &sk->sk_receive_queue);
2442 if (static_branch_unlikely(&tcp_rx_skb_cache_key) &&
2443 !sk->sk_rx_skb_cache) {
2444 sk->sk_rx_skb_cache = skb;
2445 skb_orphan(skb);
2446 return;
2447 }
2448 __kfree_skb(skb);
2449}
2450
2451static inline
2452struct net *sock_net(const struct sock *sk)
2453{
2454 return read_pnet(&sk->sk_net);
2455}
2456
2457static inline
2458void sock_net_set(struct sock *sk, struct net *net)
2459{
2460 write_pnet(&sk->sk_net, net);
2461}
2462
2463static inline struct sock *skb_steal_sock(struct sk_buff *skb)
2464{
2465 if (skb->sk) {
2466 struct sock *sk = skb->sk;
2467
2468 skb->destructor = NULL;
2469 skb->sk = NULL;
2470 return sk;
2471 }
2472 return NULL;
2473}
2474
2475
2476
2477
2478static inline bool sk_fullsock(const struct sock *sk)
2479{
2480 return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV);
2481}
2482
2483
2484
2485
2486
2487static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb,
2488 struct net_device *dev)
2489{
2490#ifdef CONFIG_SOCK_VALIDATE_XMIT
2491 struct sock *sk = skb->sk;
2492
2493 if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) {
2494 skb = sk->sk_validate_xmit_skb(sk, dev, skb);
2495#ifdef CONFIG_TLS_DEVICE
2496 } else if (unlikely(skb->decrypted)) {
2497 pr_warn_ratelimited("unencrypted skb with no associated socket - dropping\n");
2498 kfree_skb(skb);
2499 skb = NULL;
2500#endif
2501 }
2502#endif
2503
2504 return skb;
2505}
2506
2507
2508
2509
2510static inline bool sk_listener(const struct sock *sk)
2511{
2512 return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
2513}
2514
2515void sock_enable_timestamp(struct sock *sk, int flag);
2516int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
2517 int type);
2518
2519bool sk_ns_capable(const struct sock *sk,
2520 struct user_namespace *user_ns, int cap);
2521bool sk_capable(const struct sock *sk, int cap);
2522bool sk_net_capable(const struct sock *sk, int cap);
2523
2524void sk_get_meminfo(const struct sock *sk, u32 *meminfo);
2525
2526
2527
2528
2529
2530
2531#define _SK_MEM_PACKETS 256
2532#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
2533#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
2534#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
2535
2536extern __u32 sysctl_wmem_max;
2537extern __u32 sysctl_rmem_max;
2538
2539extern int sysctl_tstamp_allow_data;
2540extern int sysctl_optmem_max;
2541
2542extern __u32 sysctl_wmem_default;
2543extern __u32 sysctl_rmem_default;
2544
2545DECLARE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key);
2546
2547static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto)
2548{
2549
2550 if (proto->sysctl_wmem_offset)
2551 return *(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset);
2552
2553 return *proto->sysctl_wmem;
2554}
2555
2556static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto)
2557{
2558
2559 if (proto->sysctl_rmem_offset)
2560 return *(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset);
2561
2562 return *proto->sysctl_rmem;
2563}
2564
2565
2566
2567
2568
2569static inline void sk_pacing_shift_update(struct sock *sk, int val)
2570{
2571 if (!sk || !sk_fullsock(sk) || sk->sk_pacing_shift == val)
2572 return;
2573 sk->sk_pacing_shift = val;
2574}
2575
2576
2577
2578
2579
2580
2581static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif)
2582{
2583 int mdif;
2584
2585 if (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif)
2586 return true;
2587
2588 mdif = l3mdev_master_ifindex_by_index(sock_net(sk), dif);
2589 if (mdif && mdif == sk->sk_bound_dev_if)
2590 return true;
2591
2592 return false;
2593}
2594
2595#endif
2596