1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#ifndef _SOCK_H
36#define _SOCK_H
37
38#include <linux/hardirq.h>
39#include <linux/kernel.h>
40#include <linux/list.h>
41#include <linux/list_nulls.h>
42#include <linux/timer.h>
43#include <linux/cache.h>
44#include <linux/bitops.h>
45#include <linux/lockdep.h>
46#include <linux/netdevice.h>
47#include <linux/skbuff.h>
48#include <linux/mm.h>
49#include <linux/security.h>
50#include <linux/slab.h>
51#include <linux/uaccess.h>
52#include <linux/page_counter.h>
53#include <linux/memcontrol.h>
54#include <linux/static_key.h>
55#include <linux/sched.h>
56#include <linux/wait.h>
57#include <linux/cgroup-defs.h>
58#include <linux/rbtree.h>
59#include <linux/filter.h>
60#include <linux/rculist_nulls.h>
61#include <linux/poll.h>
62#include <linux/sockptr.h>
63
64#include <linux/atomic.h>
65#include <linux/refcount.h>
66#include <net/dst.h>
67#include <net/checksum.h>
68#include <net/tcp_states.h>
69#include <linux/net_tstamp.h>
70#include <net/l3mdev.h>
71
72
73
74
75
76
77
78
79#define SOCK_DEBUGGING
80#ifdef SOCK_DEBUGGING
81#define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
82 printk(KERN_DEBUG msg); } while (0)
83#else
84
85static inline __printf(2, 3)
86void SOCK_DEBUG(const struct sock *sk, const char *msg, ...)
87{
88}
89#endif
90
91
92
93
94
95typedef struct {
96 spinlock_t slock;
97 int owned;
98 wait_queue_head_t wq;
99
100
101
102
103
104
105#ifdef CONFIG_DEBUG_LOCK_ALLOC
106 struct lockdep_map dep_map;
107#endif
108} socket_lock_t;
109
110struct sock;
111struct proto;
112struct net;
113
114typedef __u32 __bitwise __portpair;
115typedef __u64 __bitwise __addrpair;
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162struct sock_common {
163
164
165
166 union {
167 __addrpair skc_addrpair;
168 struct {
169 __be32 skc_daddr;
170 __be32 skc_rcv_saddr;
171 };
172 };
173 union {
174 unsigned int skc_hash;
175 __u16 skc_u16hashes[2];
176 };
177
178 union {
179 __portpair skc_portpair;
180 struct {
181 __be16 skc_dport;
182 __u16 skc_num;
183 };
184 };
185
186 unsigned short skc_family;
187 volatile unsigned char skc_state;
188 unsigned char skc_reuse:4;
189 unsigned char skc_reuseport:1;
190 unsigned char skc_ipv6only:1;
191 unsigned char skc_net_refcnt:1;
192 int skc_bound_dev_if;
193 union {
194 struct hlist_node skc_bind_node;
195 struct hlist_node skc_portaddr_node;
196 };
197 struct proto *skc_prot;
198 possible_net_t skc_net;
199
200#if IS_ENABLED(CONFIG_IPV6)
201 struct in6_addr skc_v6_daddr;
202 struct in6_addr skc_v6_rcv_saddr;
203#endif
204
205 atomic64_t skc_cookie;
206
207
208
209
210
211
212 union {
213 unsigned long skc_flags;
214 struct sock *skc_listener;
215 struct inet_timewait_death_row *skc_tw_dr;
216 };
217
218
219
220
221
222 int skc_dontcopy_begin[0];
223
224 union {
225 struct hlist_node skc_node;
226 struct hlist_nulls_node skc_nulls_node;
227 };
228 unsigned short skc_tx_queue_mapping;
229#ifdef CONFIG_XPS
230 unsigned short skc_rx_queue_mapping;
231#endif
232 union {
233 int skc_incoming_cpu;
234 u32 skc_rcv_wnd;
235 u32 skc_tw_rcv_nxt;
236 };
237
238 refcount_t skc_refcnt;
239
240 int skc_dontcopy_end[0];
241 union {
242 u32 skc_rxhash;
243 u32 skc_window_clamp;
244 u32 skc_tw_snd_nxt;
245 };
246
247};
248
249struct bpf_sk_storage;
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347struct sock {
348
349
350
351
352 struct sock_common __sk_common;
353#define sk_node __sk_common.skc_node
354#define sk_nulls_node __sk_common.skc_nulls_node
355#define sk_refcnt __sk_common.skc_refcnt
356#define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping
357#ifdef CONFIG_XPS
358#define sk_rx_queue_mapping __sk_common.skc_rx_queue_mapping
359#endif
360
361#define sk_dontcopy_begin __sk_common.skc_dontcopy_begin
362#define sk_dontcopy_end __sk_common.skc_dontcopy_end
363#define sk_hash __sk_common.skc_hash
364#define sk_portpair __sk_common.skc_portpair
365#define sk_num __sk_common.skc_num
366#define sk_dport __sk_common.skc_dport
367#define sk_addrpair __sk_common.skc_addrpair
368#define sk_daddr __sk_common.skc_daddr
369#define sk_rcv_saddr __sk_common.skc_rcv_saddr
370#define sk_family __sk_common.skc_family
371#define sk_state __sk_common.skc_state
372#define sk_reuse __sk_common.skc_reuse
373#define sk_reuseport __sk_common.skc_reuseport
374#define sk_ipv6only __sk_common.skc_ipv6only
375#define sk_net_refcnt __sk_common.skc_net_refcnt
376#define sk_bound_dev_if __sk_common.skc_bound_dev_if
377#define sk_bind_node __sk_common.skc_bind_node
378#define sk_prot __sk_common.skc_prot
379#define sk_net __sk_common.skc_net
380#define sk_v6_daddr __sk_common.skc_v6_daddr
381#define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr
382#define sk_cookie __sk_common.skc_cookie
383#define sk_incoming_cpu __sk_common.skc_incoming_cpu
384#define sk_flags __sk_common.skc_flags
385#define sk_rxhash __sk_common.skc_rxhash
386
387 socket_lock_t sk_lock;
388 atomic_t sk_drops;
389 int sk_rcvlowat;
390 struct sk_buff_head sk_error_queue;
391 struct sk_buff *sk_rx_skb_cache;
392 struct sk_buff_head sk_receive_queue;
393
394
395
396
397
398
399
400
401 struct {
402 atomic_t rmem_alloc;
403 int len;
404 struct sk_buff *head;
405 struct sk_buff *tail;
406 } sk_backlog;
407#define sk_rmem_alloc sk_backlog.rmem_alloc
408
409 int sk_forward_alloc;
410#ifdef CONFIG_NET_RX_BUSY_POLL
411 unsigned int sk_ll_usec;
412
413 unsigned int sk_napi_id;
414#endif
415 int sk_rcvbuf;
416
417 struct sk_filter __rcu *sk_filter;
418 union {
419 struct socket_wq __rcu *sk_wq;
420
421 struct socket_wq *sk_wq_raw;
422
423 };
424#ifdef CONFIG_XFRM
425 struct xfrm_policy __rcu *sk_policy[2];
426#endif
427 struct dst_entry *sk_rx_dst;
428 struct dst_entry __rcu *sk_dst_cache;
429 atomic_t sk_omem_alloc;
430 int sk_sndbuf;
431
432
433 int sk_wmem_queued;
434 refcount_t sk_wmem_alloc;
435 unsigned long sk_tsq_flags;
436 union {
437 struct sk_buff *sk_send_head;
438 struct rb_root tcp_rtx_queue;
439 };
440 struct sk_buff *sk_tx_skb_cache;
441 struct sk_buff_head sk_write_queue;
442 __s32 sk_peek_off;
443 int sk_write_pending;
444 __u32 sk_dst_pending_confirm;
445 u32 sk_pacing_status;
446 long sk_sndtimeo;
447 struct timer_list sk_timer;
448 __u32 sk_priority;
449 __u32 sk_mark;
450 unsigned long sk_pacing_rate;
451 unsigned long sk_max_pacing_rate;
452 struct page_frag sk_frag;
453 netdev_features_t sk_route_caps;
454 netdev_features_t sk_route_nocaps;
455 netdev_features_t sk_route_forced_caps;
456 int sk_gso_type;
457 unsigned int sk_gso_max_size;
458 gfp_t sk_allocation;
459 __u32 sk_txhash;
460
461
462
463
464
465 u8 sk_padding : 1,
466 sk_kern_sock : 1,
467 sk_no_check_tx : 1,
468 sk_no_check_rx : 1,
469 sk_userlocks : 4;
470 u8 sk_pacing_shift;
471 u16 sk_type;
472 u16 sk_protocol;
473 u16 sk_gso_max_segs;
474 unsigned long sk_lingertime;
475 struct proto *sk_prot_creator;
476 rwlock_t sk_callback_lock;
477 int sk_err,
478 sk_err_soft;
479 u32 sk_ack_backlog;
480 u32 sk_max_ack_backlog;
481 kuid_t sk_uid;
482 struct pid *sk_peer_pid;
483 const struct cred *sk_peer_cred;
484 long sk_rcvtimeo;
485 ktime_t sk_stamp;
486#if BITS_PER_LONG==32
487 seqlock_t sk_stamp_seq;
488#endif
489 u16 sk_tsflags;
490 u8 sk_shutdown;
491 u32 sk_tskey;
492 atomic_t sk_zckey;
493
494 u8 sk_clockid;
495 u8 sk_txtime_deadline_mode : 1,
496 sk_txtime_report_errors : 1,
497 sk_txtime_unused : 6;
498
499 struct socket *sk_socket;
500 void *sk_user_data;
501#ifdef CONFIG_SECURITY
502 void *sk_security;
503#endif
504 struct sock_cgroup_data sk_cgrp_data;
505 struct mem_cgroup *sk_memcg;
506 void (*sk_state_change)(struct sock *sk);
507 void (*sk_data_ready)(struct sock *sk);
508 void (*sk_write_space)(struct sock *sk);
509 void (*sk_error_report)(struct sock *sk);
510 int (*sk_backlog_rcv)(struct sock *sk,
511 struct sk_buff *skb);
512#ifdef CONFIG_SOCK_VALIDATE_XMIT
513 struct sk_buff* (*sk_validate_xmit_skb)(struct sock *sk,
514 struct net_device *dev,
515 struct sk_buff *skb);
516#endif
517 void (*sk_destruct)(struct sock *sk);
518 struct sock_reuseport __rcu *sk_reuseport_cb;
519#ifdef CONFIG_BPF_SYSCALL
520 struct bpf_sk_storage __rcu *sk_bpf_storage;
521#endif
522 struct rcu_head sk_rcu;
523};
524
525enum sk_pacing {
526 SK_PACING_NONE = 0,
527 SK_PACING_NEEDED = 1,
528 SK_PACING_FQ = 2,
529};
530
531
532
533
534
535
536#define SK_USER_DATA_NOCOPY 1UL
537#define SK_USER_DATA_BPF 2UL
538#define SK_USER_DATA_PTRMASK ~(SK_USER_DATA_NOCOPY | SK_USER_DATA_BPF)
539
540
541
542
543
544static inline bool sk_user_data_is_nocopy(const struct sock *sk)
545{
546 return ((uintptr_t)sk->sk_user_data & SK_USER_DATA_NOCOPY);
547}
548
549#define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
550
551#define rcu_dereference_sk_user_data(sk) \
552({ \
553 void *__tmp = rcu_dereference(__sk_user_data((sk))); \
554 (void *)((uintptr_t)__tmp & SK_USER_DATA_PTRMASK); \
555})
556#define rcu_assign_sk_user_data(sk, ptr) \
557({ \
558 uintptr_t __tmp = (uintptr_t)(ptr); \
559 WARN_ON_ONCE(__tmp & ~SK_USER_DATA_PTRMASK); \
560 rcu_assign_pointer(__sk_user_data((sk)), __tmp); \
561})
562#define rcu_assign_sk_user_data_nocopy(sk, ptr) \
563({ \
564 uintptr_t __tmp = (uintptr_t)(ptr); \
565 WARN_ON_ONCE(__tmp & ~SK_USER_DATA_PTRMASK); \
566 rcu_assign_pointer(__sk_user_data((sk)), \
567 __tmp | SK_USER_DATA_NOCOPY); \
568})
569
570
571
572
573
574
575
576
577#define SK_NO_REUSE 0
578#define SK_CAN_REUSE 1
579#define SK_FORCE_REUSE 2
580
581int sk_set_peek_off(struct sock *sk, int val);
582
583static inline int sk_peek_offset(struct sock *sk, int flags)
584{
585 if (unlikely(flags & MSG_PEEK)) {
586 return READ_ONCE(sk->sk_peek_off);
587 }
588
589 return 0;
590}
591
592static inline void sk_peek_offset_bwd(struct sock *sk, int val)
593{
594 s32 off = READ_ONCE(sk->sk_peek_off);
595
596 if (unlikely(off >= 0)) {
597 off = max_t(s32, off - val, 0);
598 WRITE_ONCE(sk->sk_peek_off, off);
599 }
600}
601
602static inline void sk_peek_offset_fwd(struct sock *sk, int val)
603{
604 sk_peek_offset_bwd(sk, -val);
605}
606
607
608
609
610static inline struct sock *sk_entry(const struct hlist_node *node)
611{
612 return hlist_entry(node, struct sock, sk_node);
613}
614
615static inline struct sock *__sk_head(const struct hlist_head *head)
616{
617 return hlist_entry(head->first, struct sock, sk_node);
618}
619
620static inline struct sock *sk_head(const struct hlist_head *head)
621{
622 return hlist_empty(head) ? NULL : __sk_head(head);
623}
624
625static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head)
626{
627 return hlist_nulls_entry(head->first, struct sock, sk_nulls_node);
628}
629
630static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head)
631{
632 return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head);
633}
634
635static inline struct sock *sk_next(const struct sock *sk)
636{
637 return hlist_entry_safe(sk->sk_node.next, struct sock, sk_node);
638}
639
640static inline struct sock *sk_nulls_next(const struct sock *sk)
641{
642 return (!is_a_nulls(sk->sk_nulls_node.next)) ?
643 hlist_nulls_entry(sk->sk_nulls_node.next,
644 struct sock, sk_nulls_node) :
645 NULL;
646}
647
648static inline bool sk_unhashed(const struct sock *sk)
649{
650 return hlist_unhashed(&sk->sk_node);
651}
652
653static inline bool sk_hashed(const struct sock *sk)
654{
655 return !sk_unhashed(sk);
656}
657
658static inline void sk_node_init(struct hlist_node *node)
659{
660 node->pprev = NULL;
661}
662
663static inline void sk_nulls_node_init(struct hlist_nulls_node *node)
664{
665 node->pprev = NULL;
666}
667
668static inline void __sk_del_node(struct sock *sk)
669{
670 __hlist_del(&sk->sk_node);
671}
672
673
674static inline bool __sk_del_node_init(struct sock *sk)
675{
676 if (sk_hashed(sk)) {
677 __sk_del_node(sk);
678 sk_node_init(&sk->sk_node);
679 return true;
680 }
681 return false;
682}
683
684
685
686
687
688
689
690static __always_inline void sock_hold(struct sock *sk)
691{
692 refcount_inc(&sk->sk_refcnt);
693}
694
695
696
697
698static __always_inline void __sock_put(struct sock *sk)
699{
700 refcount_dec(&sk->sk_refcnt);
701}
702
703static inline bool sk_del_node_init(struct sock *sk)
704{
705 bool rc = __sk_del_node_init(sk);
706
707 if (rc) {
708
709 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
710 __sock_put(sk);
711 }
712 return rc;
713}
714#define sk_del_node_init_rcu(sk) sk_del_node_init(sk)
715
716static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk)
717{
718 if (sk_hashed(sk)) {
719 hlist_nulls_del_init_rcu(&sk->sk_nulls_node);
720 return true;
721 }
722 return false;
723}
724
725static inline bool sk_nulls_del_node_init_rcu(struct sock *sk)
726{
727 bool rc = __sk_nulls_del_node_init_rcu(sk);
728
729 if (rc) {
730
731 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
732 __sock_put(sk);
733 }
734 return rc;
735}
736
737static inline void __sk_add_node(struct sock *sk, struct hlist_head *list)
738{
739 hlist_add_head(&sk->sk_node, list);
740}
741
742static inline void sk_add_node(struct sock *sk, struct hlist_head *list)
743{
744 sock_hold(sk);
745 __sk_add_node(sk, list);
746}
747
748static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
749{
750 sock_hold(sk);
751 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
752 sk->sk_family == AF_INET6)
753 hlist_add_tail_rcu(&sk->sk_node, list);
754 else
755 hlist_add_head_rcu(&sk->sk_node, list);
756}
757
758static inline void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list)
759{
760 sock_hold(sk);
761 hlist_add_tail_rcu(&sk->sk_node, list);
762}
763
764static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
765{
766 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
767}
768
769static inline void __sk_nulls_add_node_tail_rcu(struct sock *sk, struct hlist_nulls_head *list)
770{
771 hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
772}
773
774static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
775{
776 sock_hold(sk);
777 __sk_nulls_add_node_rcu(sk, list);
778}
779
780static inline void __sk_del_bind_node(struct sock *sk)
781{
782 __hlist_del(&sk->sk_bind_node);
783}
784
785static inline void sk_add_bind_node(struct sock *sk,
786 struct hlist_head *list)
787{
788 hlist_add_head(&sk->sk_bind_node, list);
789}
790
791#define sk_for_each(__sk, list) \
792 hlist_for_each_entry(__sk, list, sk_node)
793#define sk_for_each_rcu(__sk, list) \
794 hlist_for_each_entry_rcu(__sk, list, sk_node)
795#define sk_nulls_for_each(__sk, node, list) \
796 hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
797#define sk_nulls_for_each_rcu(__sk, node, list) \
798 hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
799#define sk_for_each_from(__sk) \
800 hlist_for_each_entry_from(__sk, sk_node)
801#define sk_nulls_for_each_from(__sk, node) \
802 if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
803 hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
804#define sk_for_each_safe(__sk, tmp, list) \
805 hlist_for_each_entry_safe(__sk, tmp, list, sk_node)
806#define sk_for_each_bound(__sk, list) \
807 hlist_for_each_entry(__sk, list, sk_bind_node)
808
809
810
811
812
813
814
815
816
817#define sk_for_each_entry_offset_rcu(tpos, pos, head, offset) \
818 for (pos = rcu_dereference(hlist_first_rcu(head)); \
819 pos != NULL && \
820 ({ tpos = (typeof(*tpos) *)((void *)pos - offset); 1;}); \
821 pos = rcu_dereference(hlist_next_rcu(pos)))
822
823static inline struct user_namespace *sk_user_ns(struct sock *sk)
824{
825
826
827
828
829 return sk->sk_socket->file->f_cred->user_ns;
830}
831
832
833enum sock_flags {
834 SOCK_DEAD,
835 SOCK_DONE,
836 SOCK_URGINLINE,
837 SOCK_KEEPOPEN,
838 SOCK_LINGER,
839 SOCK_DESTROY,
840 SOCK_BROADCAST,
841 SOCK_TIMESTAMP,
842 SOCK_ZAPPED,
843 SOCK_USE_WRITE_QUEUE,
844 SOCK_DBG,
845 SOCK_RCVTSTAMP,
846 SOCK_RCVTSTAMPNS,
847 SOCK_LOCALROUTE,
848 SOCK_QUEUE_SHRUNK,
849 SOCK_MEMALLOC,
850 SOCK_TIMESTAMPING_RX_SOFTWARE,
851 SOCK_FASYNC,
852 SOCK_RXQ_OVFL,
853 SOCK_ZEROCOPY,
854 SOCK_WIFI_STATUS,
855 SOCK_NOFCS,
856
857
858
859 SOCK_FILTER_LOCKED,
860 SOCK_SELECT_ERR_QUEUE,
861 SOCK_RCU_FREE,
862 SOCK_TXTIME,
863 SOCK_XDP,
864 SOCK_TSTAMP_NEW,
865};
866
867#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
868
869static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
870{
871 nsk->sk_flags = osk->sk_flags;
872}
873
874static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
875{
876 __set_bit(flag, &sk->sk_flags);
877}
878
879static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
880{
881 __clear_bit(flag, &sk->sk_flags);
882}
883
884static inline void sock_valbool_flag(struct sock *sk, enum sock_flags bit,
885 int valbool)
886{
887 if (valbool)
888 sock_set_flag(sk, bit);
889 else
890 sock_reset_flag(sk, bit);
891}
892
893static inline bool sock_flag(const struct sock *sk, enum sock_flags flag)
894{
895 return test_bit(flag, &sk->sk_flags);
896}
897
898#ifdef CONFIG_NET
899DECLARE_STATIC_KEY_FALSE(memalloc_socks_key);
900static inline int sk_memalloc_socks(void)
901{
902 return static_branch_unlikely(&memalloc_socks_key);
903}
904
905void __receive_sock(struct file *file);
906#else
907
908static inline int sk_memalloc_socks(void)
909{
910 return 0;
911}
912
913static inline void __receive_sock(struct file *file)
914{ }
915#endif
916
917static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask)
918{
919 return gfp_mask | (sk->sk_allocation & __GFP_MEMALLOC);
920}
921
922static inline void sk_acceptq_removed(struct sock *sk)
923{
924 WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog - 1);
925}
926
927static inline void sk_acceptq_added(struct sock *sk)
928{
929 WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog + 1);
930}
931
932static inline bool sk_acceptq_is_full(const struct sock *sk)
933{
934 return READ_ONCE(sk->sk_ack_backlog) > READ_ONCE(sk->sk_max_ack_backlog);
935}
936
937
938
939
940static inline int sk_stream_min_wspace(const struct sock *sk)
941{
942 return READ_ONCE(sk->sk_wmem_queued) >> 1;
943}
944
945static inline int sk_stream_wspace(const struct sock *sk)
946{
947 return READ_ONCE(sk->sk_sndbuf) - READ_ONCE(sk->sk_wmem_queued);
948}
949
950static inline void sk_wmem_queued_add(struct sock *sk, int val)
951{
952 WRITE_ONCE(sk->sk_wmem_queued, sk->sk_wmem_queued + val);
953}
954
955void sk_stream_write_space(struct sock *sk);
956
957
958static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
959{
960
961 skb_dst_force(skb);
962
963 if (!sk->sk_backlog.tail)
964 WRITE_ONCE(sk->sk_backlog.head, skb);
965 else
966 sk->sk_backlog.tail->next = skb;
967
968 WRITE_ONCE(sk->sk_backlog.tail, skb);
969 skb->next = NULL;
970}
971
972
973
974
975
976
977static inline bool sk_rcvqueues_full(const struct sock *sk, unsigned int limit)
978{
979 unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
980
981 return qsize > limit;
982}
983
984
985static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
986 unsigned int limit)
987{
988 if (sk_rcvqueues_full(sk, limit))
989 return -ENOBUFS;
990
991
992
993
994
995
996 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
997 return -ENOMEM;
998
999 __sk_add_backlog(sk, skb);
1000 sk->sk_backlog.len += skb->truesize;
1001 return 0;
1002}
1003
1004int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
1005
1006static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1007{
1008 if (sk_memalloc_socks() && skb_pfmemalloc(skb))
1009 return __sk_backlog_rcv(sk, skb);
1010
1011 return sk->sk_backlog_rcv(sk, skb);
1012}
1013
1014static inline void sk_incoming_cpu_update(struct sock *sk)
1015{
1016 int cpu = raw_smp_processor_id();
1017
1018 if (unlikely(READ_ONCE(sk->sk_incoming_cpu) != cpu))
1019 WRITE_ONCE(sk->sk_incoming_cpu, cpu);
1020}
1021
1022static inline void sock_rps_record_flow_hash(__u32 hash)
1023{
1024#ifdef CONFIG_RPS
1025 struct rps_sock_flow_table *sock_flow_table;
1026
1027 rcu_read_lock();
1028 sock_flow_table = rcu_dereference(rps_sock_flow_table);
1029 rps_record_sock_flow(sock_flow_table, hash);
1030 rcu_read_unlock();
1031#endif
1032}
1033
1034static inline void sock_rps_record_flow(const struct sock *sk)
1035{
1036#ifdef CONFIG_RPS
1037 if (static_branch_unlikely(&rfs_needed)) {
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048 if (sk->sk_state == TCP_ESTABLISHED)
1049 sock_rps_record_flow_hash(sk->sk_rxhash);
1050 }
1051#endif
1052}
1053
1054static inline void sock_rps_save_rxhash(struct sock *sk,
1055 const struct sk_buff *skb)
1056{
1057#ifdef CONFIG_RPS
1058 if (unlikely(sk->sk_rxhash != skb->hash))
1059 sk->sk_rxhash = skb->hash;
1060#endif
1061}
1062
1063static inline void sock_rps_reset_rxhash(struct sock *sk)
1064{
1065#ifdef CONFIG_RPS
1066 sk->sk_rxhash = 0;
1067#endif
1068}
1069
1070#define sk_wait_event(__sk, __timeo, __condition, __wait) \
1071 ({ int __rc; \
1072 release_sock(__sk); \
1073 __rc = __condition; \
1074 if (!__rc) { \
1075 *(__timeo) = wait_woken(__wait, \
1076 TASK_INTERRUPTIBLE, \
1077 *(__timeo)); \
1078 } \
1079 sched_annotate_sleep(); \
1080 lock_sock(__sk); \
1081 __rc = __condition; \
1082 __rc; \
1083 })
1084
1085int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
1086int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
1087void sk_stream_wait_close(struct sock *sk, long timeo_p);
1088int sk_stream_error(struct sock *sk, int flags, int err);
1089void sk_stream_kill_queues(struct sock *sk);
1090void sk_set_memalloc(struct sock *sk);
1091void sk_clear_memalloc(struct sock *sk);
1092
1093void __sk_flush_backlog(struct sock *sk);
1094
1095static inline bool sk_flush_backlog(struct sock *sk)
1096{
1097 if (unlikely(READ_ONCE(sk->sk_backlog.tail))) {
1098 __sk_flush_backlog(sk);
1099 return true;
1100 }
1101 return false;
1102}
1103
1104int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb);
1105
1106struct request_sock_ops;
1107struct timewait_sock_ops;
1108struct inet_hashinfo;
1109struct raw_hashinfo;
1110struct smc_hashinfo;
1111struct module;
1112
1113
1114
1115
1116
1117static inline void sk_prot_clear_nulls(struct sock *sk, int size)
1118{
1119 if (offsetof(struct sock, sk_node.next) != 0)
1120 memset(sk, 0, offsetof(struct sock, sk_node.next));
1121 memset(&sk->sk_node.pprev, 0,
1122 size - offsetof(struct sock, sk_node.pprev));
1123}
1124
1125
1126
1127
1128struct proto {
1129 void (*close)(struct sock *sk,
1130 long timeout);
1131 int (*pre_connect)(struct sock *sk,
1132 struct sockaddr *uaddr,
1133 int addr_len);
1134 int (*connect)(struct sock *sk,
1135 struct sockaddr *uaddr,
1136 int addr_len);
1137 int (*disconnect)(struct sock *sk, int flags);
1138
1139 struct sock * (*accept)(struct sock *sk, int flags, int *err,
1140 bool kern);
1141
1142 int (*ioctl)(struct sock *sk, int cmd,
1143 unsigned long arg);
1144 int (*init)(struct sock *sk);
1145 void (*destroy)(struct sock *sk);
1146 void (*shutdown)(struct sock *sk, int how);
1147 int (*setsockopt)(struct sock *sk, int level,
1148 int optname, sockptr_t optval,
1149 unsigned int optlen);
1150 int (*getsockopt)(struct sock *sk, int level,
1151 int optname, char __user *optval,
1152 int __user *option);
1153 void (*keepalive)(struct sock *sk, int valbool);
1154#ifdef CONFIG_COMPAT
1155 int (*compat_ioctl)(struct sock *sk,
1156 unsigned int cmd, unsigned long arg);
1157#endif
1158 int (*sendmsg)(struct sock *sk, struct msghdr *msg,
1159 size_t len);
1160 int (*recvmsg)(struct sock *sk, struct msghdr *msg,
1161 size_t len, int noblock, int flags,
1162 int *addr_len);
1163 int (*sendpage)(struct sock *sk, struct page *page,
1164 int offset, size_t size, int flags);
1165 int (*bind)(struct sock *sk,
1166 struct sockaddr *addr, int addr_len);
1167 int (*bind_add)(struct sock *sk,
1168 struct sockaddr *addr, int addr_len);
1169
1170 int (*backlog_rcv) (struct sock *sk,
1171 struct sk_buff *skb);
1172
1173 void (*release_cb)(struct sock *sk);
1174
1175
1176 int (*hash)(struct sock *sk);
1177 void (*unhash)(struct sock *sk);
1178 void (*rehash)(struct sock *sk);
1179 int (*get_port)(struct sock *sk, unsigned short snum);
1180
1181
1182#ifdef CONFIG_PROC_FS
1183 unsigned int inuse_idx;
1184#endif
1185
1186 bool (*stream_memory_free)(const struct sock *sk, int wake);
1187 bool (*stream_memory_read)(const struct sock *sk);
1188
1189 void (*enter_memory_pressure)(struct sock *sk);
1190 void (*leave_memory_pressure)(struct sock *sk);
1191 atomic_long_t *memory_allocated;
1192 struct percpu_counter *sockets_allocated;
1193
1194
1195
1196
1197
1198
1199 unsigned long *memory_pressure;
1200 long *sysctl_mem;
1201
1202 int *sysctl_wmem;
1203 int *sysctl_rmem;
1204 u32 sysctl_wmem_offset;
1205 u32 sysctl_rmem_offset;
1206
1207 int max_header;
1208 bool no_autobind;
1209
1210 struct kmem_cache *slab;
1211 unsigned int obj_size;
1212 slab_flags_t slab_flags;
1213 unsigned int useroffset;
1214 unsigned int usersize;
1215
1216 struct percpu_counter *orphan_count;
1217
1218 struct request_sock_ops *rsk_prot;
1219 struct timewait_sock_ops *twsk_prot;
1220
1221 union {
1222 struct inet_hashinfo *hashinfo;
1223 struct udp_table *udp_table;
1224 struct raw_hashinfo *raw_hash;
1225 struct smc_hashinfo *smc_hash;
1226 } h;
1227
1228 struct module *owner;
1229
1230 char name[32];
1231
1232 struct list_head node;
1233#ifdef SOCK_REFCNT_DEBUG
1234 atomic_t socks;
1235#endif
1236 int (*diag_destroy)(struct sock *sk, int err);
1237} __randomize_layout;
1238
1239int proto_register(struct proto *prot, int alloc_slab);
1240void proto_unregister(struct proto *prot);
1241int sock_load_diag_module(int family, int protocol);
1242
1243#ifdef SOCK_REFCNT_DEBUG
1244static inline void sk_refcnt_debug_inc(struct sock *sk)
1245{
1246 atomic_inc(&sk->sk_prot->socks);
1247}
1248
1249static inline void sk_refcnt_debug_dec(struct sock *sk)
1250{
1251 atomic_dec(&sk->sk_prot->socks);
1252 printk(KERN_DEBUG "%s socket %p released, %d are still alive\n",
1253 sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
1254}
1255
1256static inline void sk_refcnt_debug_release(const struct sock *sk)
1257{
1258 if (refcount_read(&sk->sk_refcnt) != 1)
1259 printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
1260 sk->sk_prot->name, sk, refcount_read(&sk->sk_refcnt));
1261}
1262#else
1263#define sk_refcnt_debug_inc(sk) do { } while (0)
1264#define sk_refcnt_debug_dec(sk) do { } while (0)
1265#define sk_refcnt_debug_release(sk) do { } while (0)
1266#endif
1267
1268static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
1269{
1270 if (READ_ONCE(sk->sk_wmem_queued) >= READ_ONCE(sk->sk_sndbuf))
1271 return false;
1272
1273 return sk->sk_prot->stream_memory_free ?
1274 sk->sk_prot->stream_memory_free(sk, wake) : true;
1275}
1276
1277static inline bool sk_stream_memory_free(const struct sock *sk)
1278{
1279 return __sk_stream_memory_free(sk, 0);
1280}
1281
1282static inline bool __sk_stream_is_writeable(const struct sock *sk, int wake)
1283{
1284 return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) &&
1285 __sk_stream_memory_free(sk, wake);
1286}
1287
1288static inline bool sk_stream_is_writeable(const struct sock *sk)
1289{
1290 return __sk_stream_is_writeable(sk, 0);
1291}
1292
1293static inline int sk_under_cgroup_hierarchy(struct sock *sk,
1294 struct cgroup *ancestor)
1295{
1296#ifdef CONFIG_SOCK_CGROUP_DATA
1297 return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data),
1298 ancestor);
1299#else
1300 return -ENOTSUPP;
1301#endif
1302}
1303
1304static inline bool sk_has_memory_pressure(const struct sock *sk)
1305{
1306 return sk->sk_prot->memory_pressure != NULL;
1307}
1308
1309static inline bool sk_under_memory_pressure(const struct sock *sk)
1310{
1311 if (!sk->sk_prot->memory_pressure)
1312 return false;
1313
1314 if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
1315 mem_cgroup_under_socket_pressure(sk->sk_memcg))
1316 return true;
1317
1318 return !!*sk->sk_prot->memory_pressure;
1319}
1320
1321static inline long
1322sk_memory_allocated(const struct sock *sk)
1323{
1324 return atomic_long_read(sk->sk_prot->memory_allocated);
1325}
1326
1327static inline long
1328sk_memory_allocated_add(struct sock *sk, int amt)
1329{
1330 return atomic_long_add_return(amt, sk->sk_prot->memory_allocated);
1331}
1332
1333static inline void
1334sk_memory_allocated_sub(struct sock *sk, int amt)
1335{
1336 atomic_long_sub(amt, sk->sk_prot->memory_allocated);
1337}
1338
1339static inline void sk_sockets_allocated_dec(struct sock *sk)
1340{
1341 percpu_counter_dec(sk->sk_prot->sockets_allocated);
1342}
1343
1344static inline void sk_sockets_allocated_inc(struct sock *sk)
1345{
1346 percpu_counter_inc(sk->sk_prot->sockets_allocated);
1347}
1348
1349static inline u64
1350sk_sockets_allocated_read_positive(struct sock *sk)
1351{
1352 return percpu_counter_read_positive(sk->sk_prot->sockets_allocated);
1353}
1354
1355static inline int
1356proto_sockets_allocated_sum_positive(struct proto *prot)
1357{
1358 return percpu_counter_sum_positive(prot->sockets_allocated);
1359}
1360
1361static inline long
1362proto_memory_allocated(struct proto *prot)
1363{
1364 return atomic_long_read(prot->memory_allocated);
1365}
1366
1367static inline bool
1368proto_memory_pressure(struct proto *prot)
1369{
1370 if (!prot->memory_pressure)
1371 return false;
1372 return !!*prot->memory_pressure;
1373}
1374
1375
1376#ifdef CONFIG_PROC_FS
1377
1378void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
1379int sock_prot_inuse_get(struct net *net, struct proto *proto);
1380int sock_inuse_get(struct net *net);
1381#else
1382static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
1383 int inc)
1384{
1385}
1386#endif
1387
1388
1389
1390
1391
1392static inline int __sk_prot_rehash(struct sock *sk)
1393{
1394 sk->sk_prot->unhash(sk);
1395 return sk->sk_prot->hash(sk);
1396}
1397
1398
1399#define SOCK_DESTROY_TIME (10*HZ)
1400
1401
1402#define PROT_SOCK 1024
1403
1404#define SHUTDOWN_MASK 3
1405#define RCV_SHUTDOWN 1
1406#define SEND_SHUTDOWN 2
1407
1408#define SOCK_SNDBUF_LOCK 1
1409#define SOCK_RCVBUF_LOCK 2
1410#define SOCK_BINDADDR_LOCK 4
1411#define SOCK_BINDPORT_LOCK 8
1412
1413struct socket_alloc {
1414 struct socket socket;
1415 struct inode vfs_inode;
1416};
1417
1418static inline struct socket *SOCKET_I(struct inode *inode)
1419{
1420 return &container_of(inode, struct socket_alloc, vfs_inode)->socket;
1421}
1422
1423static inline struct inode *SOCK_INODE(struct socket *socket)
1424{
1425 return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
1426}
1427
1428
1429
1430
1431int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind);
1432int __sk_mem_schedule(struct sock *sk, int size, int kind);
1433void __sk_mem_reduce_allocated(struct sock *sk, int amount);
1434void __sk_mem_reclaim(struct sock *sk, int amount);
1435
1436
1437
1438
1439#define SK_MEM_QUANTUM 4096
1440#define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
1441#define SK_MEM_SEND 0
1442#define SK_MEM_RECV 1
1443
1444
1445static inline long sk_prot_mem_limits(const struct sock *sk, int index)
1446{
1447 long val = sk->sk_prot->sysctl_mem[index];
1448
1449#if PAGE_SIZE > SK_MEM_QUANTUM
1450 val <<= PAGE_SHIFT - SK_MEM_QUANTUM_SHIFT;
1451#elif PAGE_SIZE < SK_MEM_QUANTUM
1452 val >>= SK_MEM_QUANTUM_SHIFT - PAGE_SHIFT;
1453#endif
1454 return val;
1455}
1456
1457static inline int sk_mem_pages(int amt)
1458{
1459 return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT;
1460}
1461
1462static inline bool sk_has_account(struct sock *sk)
1463{
1464
1465 return !!sk->sk_prot->memory_allocated;
1466}
1467
1468static inline bool sk_wmem_schedule(struct sock *sk, int size)
1469{
1470 if (!sk_has_account(sk))
1471 return true;
1472 return size <= sk->sk_forward_alloc ||
1473 __sk_mem_schedule(sk, size, SK_MEM_SEND);
1474}
1475
1476static inline bool
1477sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
1478{
1479 if (!sk_has_account(sk))
1480 return true;
1481 return size<= sk->sk_forward_alloc ||
1482 __sk_mem_schedule(sk, size, SK_MEM_RECV) ||
1483 skb_pfmemalloc(skb);
1484}
1485
1486static inline void sk_mem_reclaim(struct sock *sk)
1487{
1488 if (!sk_has_account(sk))
1489 return;
1490 if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
1491 __sk_mem_reclaim(sk, sk->sk_forward_alloc);
1492}
1493
1494static inline void sk_mem_reclaim_partial(struct sock *sk)
1495{
1496 if (!sk_has_account(sk))
1497 return;
1498 if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
1499 __sk_mem_reclaim(sk, sk->sk_forward_alloc - 1);
1500}
1501
1502static inline void sk_mem_charge(struct sock *sk, int size)
1503{
1504 if (!sk_has_account(sk))
1505 return;
1506 sk->sk_forward_alloc -= size;
1507}
1508
1509static inline void sk_mem_uncharge(struct sock *sk, int size)
1510{
1511 if (!sk_has_account(sk))
1512 return;
1513 sk->sk_forward_alloc += size;
1514
1515
1516
1517
1518
1519
1520
1521
1522 if (unlikely(sk->sk_forward_alloc >= 1 << 21))
1523 __sk_mem_reclaim(sk, 1 << 20);
1524}
1525
1526DECLARE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key);
1527static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
1528{
1529 sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
1530 sk_wmem_queued_add(sk, -skb->truesize);
1531 sk_mem_uncharge(sk, skb->truesize);
1532 if (static_branch_unlikely(&tcp_tx_skb_cache_key) &&
1533 !sk->sk_tx_skb_cache && !skb_cloned(skb)) {
1534 skb_ext_reset(skb);
1535 skb_zcopy_clear(skb, true);
1536 sk->sk_tx_skb_cache = skb;
1537 return;
1538 }
1539 __kfree_skb(skb);
1540}
1541
1542static inline void sock_release_ownership(struct sock *sk)
1543{
1544 if (sk->sk_lock.owned) {
1545 sk->sk_lock.owned = 0;
1546
1547
1548 mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
1549 }
1550}
1551
1552
1553
1554
1555
1556
1557
1558
1559#define sock_lock_init_class_and_name(sk, sname, skey, name, key) \
1560do { \
1561 sk->sk_lock.owned = 0; \
1562 init_waitqueue_head(&sk->sk_lock.wq); \
1563 spin_lock_init(&(sk)->sk_lock.slock); \
1564 debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
1565 sizeof((sk)->sk_lock)); \
1566 lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
1567 (skey), (sname)); \
1568 lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
1569} while (0)
1570
1571#ifdef CONFIG_LOCKDEP
1572static inline bool lockdep_sock_is_held(const struct sock *sk)
1573{
1574 return lockdep_is_held(&sk->sk_lock) ||
1575 lockdep_is_held(&sk->sk_lock.slock);
1576}
1577#endif
1578
1579void lock_sock_nested(struct sock *sk, int subclass);
1580
1581static inline void lock_sock(struct sock *sk)
1582{
1583 lock_sock_nested(sk, 0);
1584}
1585
1586void __release_sock(struct sock *sk);
1587void release_sock(struct sock *sk);
1588
1589
1590#define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock))
1591#define bh_lock_sock_nested(__sk) \
1592 spin_lock_nested(&((__sk)->sk_lock.slock), \
1593 SINGLE_DEPTH_NESTING)
1594#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
1595
1596bool lock_sock_fast(struct sock *sk);
1597
1598
1599
1600
1601
1602
1603
1604
1605static inline void unlock_sock_fast(struct sock *sk, bool slow)
1606{
1607 if (slow)
1608 release_sock(sk);
1609 else
1610 spin_unlock_bh(&sk->sk_lock.slock);
1611}
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627static inline void sock_owned_by_me(const struct sock *sk)
1628{
1629#ifdef CONFIG_LOCKDEP
1630 WARN_ON_ONCE(!lockdep_sock_is_held(sk) && debug_locks);
1631#endif
1632}
1633
1634static inline bool sock_owned_by_user(const struct sock *sk)
1635{
1636 sock_owned_by_me(sk);
1637 return sk->sk_lock.owned;
1638}
1639
1640static inline bool sock_owned_by_user_nocheck(const struct sock *sk)
1641{
1642 return sk->sk_lock.owned;
1643}
1644
1645
1646static inline bool sock_allow_reclassification(const struct sock *csk)
1647{
1648 struct sock *sk = (struct sock *)csk;
1649
1650 return !sk->sk_lock.owned && !spin_is_locked(&sk->sk_lock.slock);
1651}
1652
1653struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1654 struct proto *prot, int kern);
1655void sk_free(struct sock *sk);
1656void sk_destruct(struct sock *sk);
1657struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
1658void sk_free_unlock_clone(struct sock *sk);
1659
1660struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1661 gfp_t priority);
1662void __sock_wfree(struct sk_buff *skb);
1663void sock_wfree(struct sk_buff *skb);
1664struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
1665 gfp_t priority);
1666void skb_orphan_partial(struct sk_buff *skb);
1667void sock_rfree(struct sk_buff *skb);
1668void sock_efree(struct sk_buff *skb);
1669#ifdef CONFIG_INET
1670void sock_edemux(struct sk_buff *skb);
1671void sock_pfree(struct sk_buff *skb);
1672#else
1673#define sock_edemux sock_efree
1674#endif
1675
1676int sock_setsockopt(struct socket *sock, int level, int op,
1677 sockptr_t optval, unsigned int optlen);
1678
1679int sock_getsockopt(struct socket *sock, int level, int op,
1680 char __user *optval, int __user *optlen);
1681int sock_gettstamp(struct socket *sock, void __user *userstamp,
1682 bool timeval, bool time32);
1683struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1684 int noblock, int *errcode);
1685struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1686 unsigned long data_len, int noblock,
1687 int *errcode, int max_page_order);
1688void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
1689void sock_kfree_s(struct sock *sk, void *mem, int size);
1690void sock_kzfree_s(struct sock *sk, void *mem, int size);
1691void sk_send_sigurg(struct sock *sk);
1692
1693struct sockcm_cookie {
1694 u64 transmit_time;
1695 u32 mark;
1696 u16 tsflags;
1697};
1698
1699static inline void sockcm_init(struct sockcm_cookie *sockc,
1700 const struct sock *sk)
1701{
1702 *sockc = (struct sockcm_cookie) { .tsflags = sk->sk_tsflags };
1703}
1704
1705int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
1706 struct sockcm_cookie *sockc);
1707int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
1708 struct sockcm_cookie *sockc);
1709
1710
1711
1712
1713
1714int sock_no_bind(struct socket *, struct sockaddr *, int);
1715int sock_no_connect(struct socket *, struct sockaddr *, int, int);
1716int sock_no_socketpair(struct socket *, struct socket *);
1717int sock_no_accept(struct socket *, struct socket *, int, bool);
1718int sock_no_getname(struct socket *, struct sockaddr *, int);
1719int sock_no_ioctl(struct socket *, unsigned int, unsigned long);
1720int sock_no_listen(struct socket *, int);
1721int sock_no_shutdown(struct socket *, int);
1722int sock_no_sendmsg(struct socket *, struct msghdr *, size_t);
1723int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t len);
1724int sock_no_recvmsg(struct socket *, struct msghdr *, size_t, int);
1725int sock_no_mmap(struct file *file, struct socket *sock,
1726 struct vm_area_struct *vma);
1727ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset,
1728 size_t size, int flags);
1729ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
1730 int offset, size_t size, int flags);
1731
1732
1733
1734
1735
1736int sock_common_getsockopt(struct socket *sock, int level, int optname,
1737 char __user *optval, int __user *optlen);
1738int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
1739 int flags);
1740int sock_common_setsockopt(struct socket *sock, int level, int optname,
1741 sockptr_t optval, unsigned int optlen);
1742
1743void sk_common_release(struct sock *sk);
1744
1745
1746
1747
1748
1749
1750void sock_init_data(struct socket *sock, struct sock *sk);
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778static inline void sock_put(struct sock *sk)
1779{
1780 if (refcount_dec_and_test(&sk->sk_refcnt))
1781 sk_free(sk);
1782}
1783
1784
1785
1786void sock_gen_put(struct sock *sk);
1787
1788int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
1789 unsigned int trim_cap, bool refcounted);
1790static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
1791 const int nested)
1792{
1793 return __sk_receive_skb(sk, skb, nested, 1, true);
1794}
1795
1796static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
1797{
1798
1799 if (WARN_ON_ONCE((unsigned short)tx_queue >= USHRT_MAX))
1800 return;
1801 sk->sk_tx_queue_mapping = tx_queue;
1802}
1803
1804#define NO_QUEUE_MAPPING USHRT_MAX
1805
1806static inline void sk_tx_queue_clear(struct sock *sk)
1807{
1808 sk->sk_tx_queue_mapping = NO_QUEUE_MAPPING;
1809}
1810
1811static inline int sk_tx_queue_get(const struct sock *sk)
1812{
1813 if (sk && sk->sk_tx_queue_mapping != NO_QUEUE_MAPPING)
1814 return sk->sk_tx_queue_mapping;
1815
1816 return -1;
1817}
1818
1819static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
1820{
1821#ifdef CONFIG_XPS
1822 if (skb_rx_queue_recorded(skb)) {
1823 u16 rx_queue = skb_get_rx_queue(skb);
1824
1825 if (WARN_ON_ONCE(rx_queue == NO_QUEUE_MAPPING))
1826 return;
1827
1828 sk->sk_rx_queue_mapping = rx_queue;
1829 }
1830#endif
1831}
1832
1833static inline void sk_rx_queue_clear(struct sock *sk)
1834{
1835#ifdef CONFIG_XPS
1836 sk->sk_rx_queue_mapping = NO_QUEUE_MAPPING;
1837#endif
1838}
1839
1840#ifdef CONFIG_XPS
1841static inline int sk_rx_queue_get(const struct sock *sk)
1842{
1843 if (sk && sk->sk_rx_queue_mapping != NO_QUEUE_MAPPING)
1844 return sk->sk_rx_queue_mapping;
1845
1846 return -1;
1847}
1848#endif
1849
1850static inline void sk_set_socket(struct sock *sk, struct socket *sock)
1851{
1852 sk->sk_socket = sock;
1853}
1854
1855static inline wait_queue_head_t *sk_sleep(struct sock *sk)
1856{
1857 BUILD_BUG_ON(offsetof(struct socket_wq, wait) != 0);
1858 return &rcu_dereference_raw(sk->sk_wq)->wait;
1859}
1860
1861
1862
1863
1864
1865
1866
1867static inline void sock_orphan(struct sock *sk)
1868{
1869 write_lock_bh(&sk->sk_callback_lock);
1870 sock_set_flag(sk, SOCK_DEAD);
1871 sk_set_socket(sk, NULL);
1872 sk->sk_wq = NULL;
1873 write_unlock_bh(&sk->sk_callback_lock);
1874}
1875
1876static inline void sock_graft(struct sock *sk, struct socket *parent)
1877{
1878 WARN_ON(parent->sk);
1879 write_lock_bh(&sk->sk_callback_lock);
1880 rcu_assign_pointer(sk->sk_wq, &parent->wq);
1881 parent->sk = sk;
1882 sk_set_socket(sk, parent);
1883 sk->sk_uid = SOCK_INODE(parent)->i_uid;
1884 security_sock_graft(sk, parent);
1885 write_unlock_bh(&sk->sk_callback_lock);
1886}
1887
1888kuid_t sock_i_uid(struct sock *sk);
1889unsigned long sock_i_ino(struct sock *sk);
1890
1891static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk)
1892{
1893 return sk ? sk->sk_uid : make_kuid(net->user_ns, 0);
1894}
1895
1896static inline u32 net_tx_rndhash(void)
1897{
1898 u32 v = prandom_u32();
1899
1900 return v ?: 1;
1901}
1902
1903static inline void sk_set_txhash(struct sock *sk)
1904{
1905 sk->sk_txhash = net_tx_rndhash();
1906}
1907
1908static inline void sk_rethink_txhash(struct sock *sk)
1909{
1910 if (sk->sk_txhash)
1911 sk_set_txhash(sk);
1912}
1913
1914static inline struct dst_entry *
1915__sk_dst_get(struct sock *sk)
1916{
1917 return rcu_dereference_check(sk->sk_dst_cache,
1918 lockdep_sock_is_held(sk));
1919}
1920
1921static inline struct dst_entry *
1922sk_dst_get(struct sock *sk)
1923{
1924 struct dst_entry *dst;
1925
1926 rcu_read_lock();
1927 dst = rcu_dereference(sk->sk_dst_cache);
1928 if (dst && !atomic_inc_not_zero(&dst->__refcnt))
1929 dst = NULL;
1930 rcu_read_unlock();
1931 return dst;
1932}
1933
1934static inline void dst_negative_advice(struct sock *sk)
1935{
1936 struct dst_entry *ndst, *dst = __sk_dst_get(sk);
1937
1938 sk_rethink_txhash(sk);
1939
1940 if (dst && dst->ops->negative_advice) {
1941 ndst = dst->ops->negative_advice(dst);
1942
1943 if (ndst != dst) {
1944 rcu_assign_pointer(sk->sk_dst_cache, ndst);
1945 sk_tx_queue_clear(sk);
1946 sk->sk_dst_pending_confirm = 0;
1947 }
1948 }
1949}
1950
1951static inline void
1952__sk_dst_set(struct sock *sk, struct dst_entry *dst)
1953{
1954 struct dst_entry *old_dst;
1955
1956 sk_tx_queue_clear(sk);
1957 sk->sk_dst_pending_confirm = 0;
1958 old_dst = rcu_dereference_protected(sk->sk_dst_cache,
1959 lockdep_sock_is_held(sk));
1960 rcu_assign_pointer(sk->sk_dst_cache, dst);
1961 dst_release(old_dst);
1962}
1963
1964static inline void
1965sk_dst_set(struct sock *sk, struct dst_entry *dst)
1966{
1967 struct dst_entry *old_dst;
1968
1969 sk_tx_queue_clear(sk);
1970 sk->sk_dst_pending_confirm = 0;
1971 old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
1972 dst_release(old_dst);
1973}
1974
1975static inline void
1976__sk_dst_reset(struct sock *sk)
1977{
1978 __sk_dst_set(sk, NULL);
1979}
1980
1981static inline void
1982sk_dst_reset(struct sock *sk)
1983{
1984 sk_dst_set(sk, NULL);
1985}
1986
1987struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
1988
1989struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
1990
1991static inline void sk_dst_confirm(struct sock *sk)
1992{
1993 if (!READ_ONCE(sk->sk_dst_pending_confirm))
1994 WRITE_ONCE(sk->sk_dst_pending_confirm, 1);
1995}
1996
1997static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n)
1998{
1999 if (skb_get_dst_pending_confirm(skb)) {
2000 struct sock *sk = skb->sk;
2001 unsigned long now = jiffies;
2002
2003
2004 if (READ_ONCE(n->confirmed) != now)
2005 WRITE_ONCE(n->confirmed, now);
2006 if (sk && READ_ONCE(sk->sk_dst_pending_confirm))
2007 WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
2008 }
2009}
2010
2011bool sk_mc_loop(struct sock *sk);
2012
2013static inline bool sk_can_gso(const struct sock *sk)
2014{
2015 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
2016}
2017
2018void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
2019
2020static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
2021{
2022 sk->sk_route_nocaps |= flags;
2023 sk->sk_route_caps &= ~flags;
2024}
2025
2026static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
2027 struct iov_iter *from, char *to,
2028 int copy, int offset)
2029{
2030 if (skb->ip_summed == CHECKSUM_NONE) {
2031 __wsum csum = 0;
2032 if (!csum_and_copy_from_iter_full(to, copy, &csum, from))
2033 return -EFAULT;
2034 skb->csum = csum_block_add(skb->csum, csum, offset);
2035 } else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) {
2036 if (!copy_from_iter_full_nocache(to, copy, from))
2037 return -EFAULT;
2038 } else if (!copy_from_iter_full(to, copy, from))
2039 return -EFAULT;
2040
2041 return 0;
2042}
2043
2044static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb,
2045 struct iov_iter *from, int copy)
2046{
2047 int err, offset = skb->len;
2048
2049 err = skb_do_copy_data_nocache(sk, skb, from, skb_put(skb, copy),
2050 copy, offset);
2051 if (err)
2052 __skb_trim(skb, offset);
2053
2054 return err;
2055}
2056
2057static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from,
2058 struct sk_buff *skb,
2059 struct page *page,
2060 int off, int copy)
2061{
2062 int err;
2063
2064 err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) + off,
2065 copy, skb->len);
2066 if (err)
2067 return err;
2068
2069 skb->len += copy;
2070 skb->data_len += copy;
2071 skb->truesize += copy;
2072 sk_wmem_queued_add(sk, copy);
2073 sk_mem_charge(sk, copy);
2074 return 0;
2075}
2076
2077
2078
2079
2080
2081
2082
2083static inline int sk_wmem_alloc_get(const struct sock *sk)
2084{
2085 return refcount_read(&sk->sk_wmem_alloc) - 1;
2086}
2087
2088
2089
2090
2091
2092
2093
2094static inline int sk_rmem_alloc_get(const struct sock *sk)
2095{
2096 return atomic_read(&sk->sk_rmem_alloc);
2097}
2098
2099
2100
2101
2102
2103
2104
2105static inline bool sk_has_allocations(const struct sock *sk)
2106{
2107 return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk);
2108}
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141static inline bool skwq_has_sleeper(struct socket_wq *wq)
2142{
2143 return wq && wq_has_sleeper(&wq->wait);
2144}
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154static inline void sock_poll_wait(struct file *filp, struct socket *sock,
2155 poll_table *p)
2156{
2157 if (!poll_does_not_wait(p)) {
2158 poll_wait(filp, &sock->wq.wait, p);
2159
2160
2161
2162
2163
2164 smp_mb();
2165 }
2166}
2167
2168static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
2169{
2170 if (sk->sk_txhash) {
2171 skb->l4_hash = 1;
2172 skb->hash = sk->sk_txhash;
2173 }
2174}
2175
2176void skb_set_owner_w(struct sk_buff *skb, struct sock *sk);
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
2187{
2188 skb_orphan(skb);
2189 skb->sk = sk;
2190 skb->destructor = sock_rfree;
2191 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
2192 sk_mem_charge(sk, skb->truesize);
2193}
2194
2195void sk_reset_timer(struct sock *sk, struct timer_list *timer,
2196 unsigned long expires);
2197
2198void sk_stop_timer(struct sock *sk, struct timer_list *timer);
2199
2200int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
2201 struct sk_buff *skb, unsigned int flags,
2202 void (*destructor)(struct sock *sk,
2203 struct sk_buff *skb));
2204int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
2205int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
2206
2207int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
2208struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
2209
2210
2211
2212
2213
2214static inline int sock_error(struct sock *sk)
2215{
2216 int err;
2217 if (likely(!sk->sk_err))
2218 return 0;
2219 err = xchg(&sk->sk_err, 0);
2220 return -err;
2221}
2222
2223static inline unsigned long sock_wspace(struct sock *sk)
2224{
2225 int amt = 0;
2226
2227 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
2228 amt = sk->sk_sndbuf - refcount_read(&sk->sk_wmem_alloc);
2229 if (amt < 0)
2230 amt = 0;
2231 }
2232 return amt;
2233}
2234
2235
2236
2237
2238
2239static inline void sk_set_bit(int nr, struct sock *sk)
2240{
2241 if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) &&
2242 !sock_flag(sk, SOCK_FASYNC))
2243 return;
2244
2245 set_bit(nr, &sk->sk_wq_raw->flags);
2246}
2247
2248static inline void sk_clear_bit(int nr, struct sock *sk)
2249{
2250 if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) &&
2251 !sock_flag(sk, SOCK_FASYNC))
2252 return;
2253
2254 clear_bit(nr, &sk->sk_wq_raw->flags);
2255}
2256
2257static inline void sk_wake_async(const struct sock *sk, int how, int band)
2258{
2259 if (sock_flag(sk, SOCK_FASYNC)) {
2260 rcu_read_lock();
2261 sock_wake_async(rcu_dereference(sk->sk_wq), how, band);
2262 rcu_read_unlock();
2263 }
2264}
2265
2266
2267
2268
2269
2270
2271#define TCP_SKB_MIN_TRUESIZE (2048 + SKB_DATA_ALIGN(sizeof(struct sk_buff)))
2272
2273#define SOCK_MIN_SNDBUF (TCP_SKB_MIN_TRUESIZE * 2)
2274#define SOCK_MIN_RCVBUF TCP_SKB_MIN_TRUESIZE
2275
2276static inline void sk_stream_moderate_sndbuf(struct sock *sk)
2277{
2278 u32 val;
2279
2280 if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
2281 return;
2282
2283 val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
2284
2285 WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF));
2286}
2287
2288struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
2289 bool force_schedule);
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306static inline struct page_frag *sk_page_frag(struct sock *sk)
2307{
2308 if (gfpflags_normal_context(sk->sk_allocation))
2309 return ¤t->task_frag;
2310
2311 return &sk->sk_frag;
2312}
2313
2314bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
2315
2316
2317
2318
2319static inline bool sock_writeable(const struct sock *sk)
2320{
2321 return refcount_read(&sk->sk_wmem_alloc) < (READ_ONCE(sk->sk_sndbuf) >> 1);
2322}
2323
2324static inline gfp_t gfp_any(void)
2325{
2326 return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
2327}
2328
2329static inline long sock_rcvtimeo(const struct sock *sk, bool noblock)
2330{
2331 return noblock ? 0 : sk->sk_rcvtimeo;
2332}
2333
2334static inline long sock_sndtimeo(const struct sock *sk, bool noblock)
2335{
2336 return noblock ? 0 : sk->sk_sndtimeo;
2337}
2338
2339static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
2340{
2341 int v = waitall ? len : min_t(int, READ_ONCE(sk->sk_rcvlowat), len);
2342
2343 return v ?: 1;
2344}
2345
2346
2347
2348
2349static inline int sock_intr_errno(long timeo)
2350{
2351 return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
2352}
2353
2354struct sock_skb_cb {
2355 u32 dropcount;
2356};
2357
2358
2359
2360
2361
2362#define SOCK_SKB_CB_OFFSET ((sizeof_field(struct sk_buff, cb) - \
2363 sizeof(struct sock_skb_cb)))
2364
2365#define SOCK_SKB_CB(__skb) ((struct sock_skb_cb *)((__skb)->cb + \
2366 SOCK_SKB_CB_OFFSET))
2367
2368#define sock_skb_cb_check_size(size) \
2369 BUILD_BUG_ON((size) > SOCK_SKB_CB_OFFSET)
2370
2371static inline void
2372sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb)
2373{
2374 SOCK_SKB_CB(skb)->dropcount = sock_flag(sk, SOCK_RXQ_OVFL) ?
2375 atomic_read(&sk->sk_drops) : 0;
2376}
2377
2378static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
2379{
2380 int segs = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
2381
2382 atomic_add(segs, &sk->sk_drops);
2383}
2384
2385static inline ktime_t sock_read_timestamp(struct sock *sk)
2386{
2387#if BITS_PER_LONG==32
2388 unsigned int seq;
2389 ktime_t kt;
2390
2391 do {
2392 seq = read_seqbegin(&sk->sk_stamp_seq);
2393 kt = sk->sk_stamp;
2394 } while (read_seqretry(&sk->sk_stamp_seq, seq));
2395
2396 return kt;
2397#else
2398 return READ_ONCE(sk->sk_stamp);
2399#endif
2400}
2401
2402static inline void sock_write_timestamp(struct sock *sk, ktime_t kt)
2403{
2404#if BITS_PER_LONG==32
2405 write_seqlock(&sk->sk_stamp_seq);
2406 sk->sk_stamp = kt;
2407 write_sequnlock(&sk->sk_stamp_seq);
2408#else
2409 WRITE_ONCE(sk->sk_stamp, kt);
2410#endif
2411}
2412
2413void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
2414 struct sk_buff *skb);
2415void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
2416 struct sk_buff *skb);
2417
2418static inline void
2419sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
2420{
2421 ktime_t kt = skb->tstamp;
2422 struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
2423
2424
2425
2426
2427
2428
2429
2430 if (sock_flag(sk, SOCK_RCVTSTAMP) ||
2431 (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
2432 (kt && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
2433 (hwtstamps->hwtstamp &&
2434 (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
2435 __sock_recv_timestamp(msg, sk, skb);
2436 else
2437 sock_write_timestamp(sk, kt);
2438
2439 if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
2440 __sock_recv_wifi_status(msg, sk, skb);
2441}
2442
2443void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
2444 struct sk_buff *skb);
2445
2446#define SK_DEFAULT_STAMP (-1L * NSEC_PER_SEC)
2447static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
2448 struct sk_buff *skb)
2449{
2450#define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL) | \
2451 (1UL << SOCK_RCVTSTAMP))
2452#define TSFLAGS_ANY (SOF_TIMESTAMPING_SOFTWARE | \
2453 SOF_TIMESTAMPING_RAW_HARDWARE)
2454
2455 if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY)
2456 __sock_recv_ts_and_drops(msg, sk, skb);
2457 else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
2458 sock_write_timestamp(sk, skb->tstamp);
2459 else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP))
2460 sock_write_timestamp(sk, 0);
2461}
2462
2463void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags,
2475 __u8 *tx_flags, __u32 *tskey)
2476{
2477 if (unlikely(tsflags)) {
2478 __sock_tx_timestamp(tsflags, tx_flags);
2479 if (tsflags & SOF_TIMESTAMPING_OPT_ID && tskey &&
2480 tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
2481 *tskey = sk->sk_tskey++;
2482 }
2483 if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
2484 *tx_flags |= SKBTX_WIFI_STATUS;
2485}
2486
2487static inline void sock_tx_timestamp(struct sock *sk, __u16 tsflags,
2488 __u8 *tx_flags)
2489{
2490 _sock_tx_timestamp(sk, tsflags, tx_flags, NULL);
2491}
2492
2493static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
2494{
2495 _sock_tx_timestamp(skb->sk, tsflags, &skb_shinfo(skb)->tx_flags,
2496 &skb_shinfo(skb)->tskey);
2497}
2498
2499DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
2500
2501
2502
2503
2504
2505
2506
2507
2508static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
2509{
2510 __skb_unlink(skb, &sk->sk_receive_queue);
2511 if (static_branch_unlikely(&tcp_rx_skb_cache_key) &&
2512 !sk->sk_rx_skb_cache) {
2513 sk->sk_rx_skb_cache = skb;
2514 skb_orphan(skb);
2515 return;
2516 }
2517 __kfree_skb(skb);
2518}
2519
2520static inline
2521struct net *sock_net(const struct sock *sk)
2522{
2523 return read_pnet(&sk->sk_net);
2524}
2525
2526static inline
2527void sock_net_set(struct sock *sk, struct net *net)
2528{
2529 write_pnet(&sk->sk_net, net);
2530}
2531
2532static inline bool
2533skb_sk_is_prefetched(struct sk_buff *skb)
2534{
2535#ifdef CONFIG_INET
2536 return skb->destructor == sock_pfree;
2537#else
2538 return false;
2539#endif
2540}
2541
2542
2543
2544
2545static inline bool sk_fullsock(const struct sock *sk)
2546{
2547 return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV);
2548}
2549
2550static inline bool
2551sk_is_refcounted(struct sock *sk)
2552{
2553
2554 return !sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE);
2555}
2556
2557
2558
2559
2560
2561
2562static inline struct sock *
2563skb_steal_sock(struct sk_buff *skb, bool *refcounted)
2564{
2565 if (skb->sk) {
2566 struct sock *sk = skb->sk;
2567
2568 *refcounted = true;
2569 if (skb_sk_is_prefetched(skb))
2570 *refcounted = sk_is_refcounted(sk);
2571 skb->destructor = NULL;
2572 skb->sk = NULL;
2573 return sk;
2574 }
2575 *refcounted = false;
2576 return NULL;
2577}
2578
2579
2580
2581
2582
2583static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb,
2584 struct net_device *dev)
2585{
2586#ifdef CONFIG_SOCK_VALIDATE_XMIT
2587 struct sock *sk = skb->sk;
2588
2589 if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) {
2590 skb = sk->sk_validate_xmit_skb(sk, dev, skb);
2591#ifdef CONFIG_TLS_DEVICE
2592 } else if (unlikely(skb->decrypted)) {
2593 pr_warn_ratelimited("unencrypted skb with no associated socket - dropping\n");
2594 kfree_skb(skb);
2595 skb = NULL;
2596#endif
2597 }
2598#endif
2599
2600 return skb;
2601}
2602
2603
2604
2605
2606static inline bool sk_listener(const struct sock *sk)
2607{
2608 return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
2609}
2610
2611void sock_enable_timestamp(struct sock *sk, enum sock_flags flag);
2612int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
2613 int type);
2614
2615bool sk_ns_capable(const struct sock *sk,
2616 struct user_namespace *user_ns, int cap);
2617bool sk_capable(const struct sock *sk, int cap);
2618bool sk_net_capable(const struct sock *sk, int cap);
2619
2620void sk_get_meminfo(const struct sock *sk, u32 *meminfo);
2621
2622
2623
2624
2625
2626
2627#define _SK_MEM_PACKETS 256
2628#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
2629#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
2630#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
2631
2632extern __u32 sysctl_wmem_max;
2633extern __u32 sysctl_rmem_max;
2634
2635extern int sysctl_tstamp_allow_data;
2636extern int sysctl_optmem_max;
2637
2638extern __u32 sysctl_wmem_default;
2639extern __u32 sysctl_rmem_default;
2640
2641DECLARE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key);
2642
2643static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto)
2644{
2645
2646 if (proto->sysctl_wmem_offset)
2647 return *(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset);
2648
2649 return *proto->sysctl_wmem;
2650}
2651
2652static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto)
2653{
2654
2655 if (proto->sysctl_rmem_offset)
2656 return *(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset);
2657
2658 return *proto->sysctl_rmem;
2659}
2660
2661
2662
2663
2664
2665static inline void sk_pacing_shift_update(struct sock *sk, int val)
2666{
2667 if (!sk || !sk_fullsock(sk) || READ_ONCE(sk->sk_pacing_shift) == val)
2668 return;
2669 WRITE_ONCE(sk->sk_pacing_shift, val);
2670}
2671
2672
2673
2674
2675
2676
2677static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif)
2678{
2679 int mdif;
2680
2681 if (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif)
2682 return true;
2683
2684 mdif = l3mdev_master_ifindex_by_index(sock_net(sk), dif);
2685 if (mdif && mdif == sk->sk_bound_dev_if)
2686 return true;
2687
2688 return false;
2689}
2690
2691void sock_def_readable(struct sock *sk);
2692
2693int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk);
2694void sock_enable_timestamps(struct sock *sk);
2695void sock_no_linger(struct sock *sk);
2696void sock_set_keepalive(struct sock *sk);
2697void sock_set_priority(struct sock *sk, u32 priority);
2698void sock_set_rcvbuf(struct sock *sk, int val);
2699void sock_set_mark(struct sock *sk, u32 val);
2700void sock_set_reuseaddr(struct sock *sk);
2701void sock_set_reuseport(struct sock *sk);
2702void sock_set_sndtimeo(struct sock *sk, s64 secs);
2703
2704int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len);
2705
2706#endif
2707