1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#ifndef _SOCK_H
36#define _SOCK_H
37
38#include <linux/hardirq.h>
39#include <linux/kernel.h>
40#include <linux/list.h>
41#include <linux/list_nulls.h>
42#include <linux/timer.h>
43#include <linux/cache.h>
44#include <linux/bitops.h>
45#include <linux/lockdep.h>
46#include <linux/netdevice.h>
47#include <linux/skbuff.h>
48#include <linux/mm.h>
49#include <linux/security.h>
50#include <linux/slab.h>
51#include <linux/uaccess.h>
52#include <linux/page_counter.h>
53#include <linux/memcontrol.h>
54#include <linux/static_key.h>
55#include <linux/sched.h>
56#include <linux/wait.h>
57#include <linux/cgroup-defs.h>
58#include <linux/rbtree.h>
59#include <linux/filter.h>
60#include <linux/rculist_nulls.h>
61#include <linux/poll.h>
62
63#include <linux/atomic.h>
64#include <linux/refcount.h>
65#include <net/dst.h>
66#include <net/checksum.h>
67#include <net/tcp_states.h>
68#include <linux/net_tstamp.h>
69#include <net/smc.h>
70#include <net/l3mdev.h>
71
72
73
74
75
76
77
78
79#define SOCK_DEBUGGING
80#ifdef SOCK_DEBUGGING
81#define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
82 printk(KERN_DEBUG msg); } while (0)
83#else
84
85static inline __printf(2, 3)
86void SOCK_DEBUG(const struct sock *sk, const char *msg, ...)
87{
88}
89#endif
90
91
92
93
94
95typedef struct {
96 spinlock_t slock;
97 int owned;
98 wait_queue_head_t wq;
99
100
101
102
103
104
105#ifdef CONFIG_DEBUG_LOCK_ALLOC
106 struct lockdep_map dep_map;
107#endif
108} socket_lock_t;
109
110struct sock;
111struct proto;
112struct net;
113
114typedef __u32 __bitwise __portpair;
115typedef __u64 __bitwise __addrpair;
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147struct sock_common {
148
149
150
151 union {
152 __addrpair skc_addrpair;
153 struct {
154 __be32 skc_daddr;
155 __be32 skc_rcv_saddr;
156 };
157 };
158 union {
159 unsigned int skc_hash;
160 __u16 skc_u16hashes[2];
161 };
162
163 union {
164 __portpair skc_portpair;
165 struct {
166 __be16 skc_dport;
167 __u16 skc_num;
168 };
169 };
170
171 unsigned short skc_family;
172 volatile unsigned char skc_state;
173 unsigned char skc_reuse:4;
174 unsigned char skc_reuseport:1;
175 unsigned char skc_ipv6only:1;
176 unsigned char skc_net_refcnt:1;
177 int skc_bound_dev_if;
178 union {
179 struct hlist_node skc_bind_node;
180 struct hlist_node skc_portaddr_node;
181 };
182 struct proto *skc_prot;
183 possible_net_t skc_net;
184
185#if IS_ENABLED(CONFIG_IPV6)
186 struct in6_addr skc_v6_daddr;
187 struct in6_addr skc_v6_rcv_saddr;
188#endif
189
190 atomic64_t skc_cookie;
191
192
193
194
195
196
197 union {
198 unsigned long skc_flags;
199 struct sock *skc_listener;
200 struct inet_timewait_death_row *skc_tw_dr;
201 };
202
203
204
205
206
207 int skc_dontcopy_begin[0];
208
209 union {
210 struct hlist_node skc_node;
211 struct hlist_nulls_node skc_nulls_node;
212 };
213 unsigned short skc_tx_queue_mapping;
214#ifdef CONFIG_XPS
215 unsigned short skc_rx_queue_mapping;
216#endif
217 union {
218 int skc_incoming_cpu;
219 u32 skc_rcv_wnd;
220 u32 skc_tw_rcv_nxt;
221 };
222
223 refcount_t skc_refcnt;
224
225 int skc_dontcopy_end[0];
226 union {
227 u32 skc_rxhash;
228 u32 skc_window_clamp;
229 u32 skc_tw_snd_nxt;
230 };
231
232};
233
234struct bpf_sk_storage;
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324struct sock {
325
326
327
328
329 struct sock_common __sk_common;
330#define sk_node __sk_common.skc_node
331#define sk_nulls_node __sk_common.skc_nulls_node
332#define sk_refcnt __sk_common.skc_refcnt
333#define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping
334#ifdef CONFIG_XPS
335#define sk_rx_queue_mapping __sk_common.skc_rx_queue_mapping
336#endif
337
338#define sk_dontcopy_begin __sk_common.skc_dontcopy_begin
339#define sk_dontcopy_end __sk_common.skc_dontcopy_end
340#define sk_hash __sk_common.skc_hash
341#define sk_portpair __sk_common.skc_portpair
342#define sk_num __sk_common.skc_num
343#define sk_dport __sk_common.skc_dport
344#define sk_addrpair __sk_common.skc_addrpair
345#define sk_daddr __sk_common.skc_daddr
346#define sk_rcv_saddr __sk_common.skc_rcv_saddr
347#define sk_family __sk_common.skc_family
348#define sk_state __sk_common.skc_state
349#define sk_reuse __sk_common.skc_reuse
350#define sk_reuseport __sk_common.skc_reuseport
351#define sk_ipv6only __sk_common.skc_ipv6only
352#define sk_net_refcnt __sk_common.skc_net_refcnt
353#define sk_bound_dev_if __sk_common.skc_bound_dev_if
354#define sk_bind_node __sk_common.skc_bind_node
355#define sk_prot __sk_common.skc_prot
356#define sk_net __sk_common.skc_net
357#define sk_v6_daddr __sk_common.skc_v6_daddr
358#define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr
359#define sk_cookie __sk_common.skc_cookie
360#define sk_incoming_cpu __sk_common.skc_incoming_cpu
361#define sk_flags __sk_common.skc_flags
362#define sk_rxhash __sk_common.skc_rxhash
363
364 socket_lock_t sk_lock;
365 atomic_t sk_drops;
366 int sk_rcvlowat;
367 struct sk_buff_head sk_error_queue;
368 struct sk_buff *sk_rx_skb_cache;
369 struct sk_buff_head sk_receive_queue;
370
371
372
373
374
375
376
377
378 struct {
379 atomic_t rmem_alloc;
380 int len;
381 struct sk_buff *head;
382 struct sk_buff *tail;
383 } sk_backlog;
384#define sk_rmem_alloc sk_backlog.rmem_alloc
385
386 int sk_forward_alloc;
387#ifdef CONFIG_NET_RX_BUSY_POLL
388 unsigned int sk_ll_usec;
389
390 unsigned int sk_napi_id;
391#endif
392 int sk_rcvbuf;
393
394 struct sk_filter __rcu *sk_filter;
395 union {
396 struct socket_wq __rcu *sk_wq;
397 struct socket_wq *sk_wq_raw;
398 };
399#ifdef CONFIG_XFRM
400 struct xfrm_policy __rcu *sk_policy[2];
401#endif
402 struct dst_entry *sk_rx_dst;
403 struct dst_entry __rcu *sk_dst_cache;
404 atomic_t sk_omem_alloc;
405 int sk_sndbuf;
406
407
408 int sk_wmem_queued;
409 refcount_t sk_wmem_alloc;
410 unsigned long sk_tsq_flags;
411 union {
412 struct sk_buff *sk_send_head;
413 struct rb_root tcp_rtx_queue;
414 };
415 struct sk_buff *sk_tx_skb_cache;
416 struct sk_buff_head sk_write_queue;
417 __s32 sk_peek_off;
418 int sk_write_pending;
419 __u32 sk_dst_pending_confirm;
420 u32 sk_pacing_status;
421 long sk_sndtimeo;
422 struct timer_list sk_timer;
423 __u32 sk_priority;
424 __u32 sk_mark;
425 unsigned long sk_pacing_rate;
426 unsigned long sk_max_pacing_rate;
427 struct page_frag sk_frag;
428 netdev_features_t sk_route_caps;
429 netdev_features_t sk_route_nocaps;
430 netdev_features_t sk_route_forced_caps;
431 int sk_gso_type;
432 unsigned int sk_gso_max_size;
433 gfp_t sk_allocation;
434 __u32 sk_txhash;
435
436
437
438
439
440 unsigned int __sk_flags_offset[0];
441#ifdef __BIG_ENDIAN_BITFIELD
442#define SK_FL_PROTO_SHIFT 16
443#define SK_FL_PROTO_MASK 0x00ff0000
444
445#define SK_FL_TYPE_SHIFT 0
446#define SK_FL_TYPE_MASK 0x0000ffff
447#else
448#define SK_FL_PROTO_SHIFT 8
449#define SK_FL_PROTO_MASK 0x0000ff00
450
451#define SK_FL_TYPE_SHIFT 16
452#define SK_FL_TYPE_MASK 0xffff0000
453#endif
454
455 unsigned int sk_padding : 1,
456 sk_kern_sock : 1,
457 sk_no_check_tx : 1,
458 sk_no_check_rx : 1,
459 sk_userlocks : 4,
460 sk_protocol : 8,
461 sk_type : 16;
462#define SK_PROTOCOL_MAX U8_MAX
463 u16 sk_gso_max_segs;
464 u8 sk_pacing_shift;
465 unsigned long sk_lingertime;
466 struct proto *sk_prot_creator;
467 rwlock_t sk_callback_lock;
468 int sk_err,
469 sk_err_soft;
470 u32 sk_ack_backlog;
471 u32 sk_max_ack_backlog;
472 kuid_t sk_uid;
473 struct pid *sk_peer_pid;
474 const struct cred *sk_peer_cred;
475 long sk_rcvtimeo;
476 ktime_t sk_stamp;
477#if BITS_PER_LONG==32
478 seqlock_t sk_stamp_seq;
479#endif
480 u16 sk_tsflags;
481 u8 sk_shutdown;
482 u32 sk_tskey;
483 atomic_t sk_zckey;
484
485 u8 sk_clockid;
486 u8 sk_txtime_deadline_mode : 1,
487 sk_txtime_report_errors : 1,
488 sk_txtime_unused : 6;
489
490 struct socket *sk_socket;
491 void *sk_user_data;
492#ifdef CONFIG_SECURITY
493 void *sk_security;
494#endif
495 struct sock_cgroup_data sk_cgrp_data;
496 struct mem_cgroup *sk_memcg;
497 void (*sk_state_change)(struct sock *sk);
498 void (*sk_data_ready)(struct sock *sk);
499 void (*sk_write_space)(struct sock *sk);
500 void (*sk_error_report)(struct sock *sk);
501 int (*sk_backlog_rcv)(struct sock *sk,
502 struct sk_buff *skb);
503#ifdef CONFIG_SOCK_VALIDATE_XMIT
504 struct sk_buff* (*sk_validate_xmit_skb)(struct sock *sk,
505 struct net_device *dev,
506 struct sk_buff *skb);
507#endif
508 void (*sk_destruct)(struct sock *sk);
509 struct sock_reuseport __rcu *sk_reuseport_cb;
510#ifdef CONFIG_BPF_SYSCALL
511 struct bpf_sk_storage __rcu *sk_bpf_storage;
512#endif
513 struct rcu_head sk_rcu;
514};
515
516enum sk_pacing {
517 SK_PACING_NONE = 0,
518 SK_PACING_NEEDED = 1,
519 SK_PACING_FQ = 2,
520};
521
522#define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
523
524#define rcu_dereference_sk_user_data(sk) rcu_dereference(__sk_user_data((sk)))
525#define rcu_assign_sk_user_data(sk, ptr) rcu_assign_pointer(__sk_user_data((sk)), ptr)
526
527
528
529
530
531
532
533
534#define SK_NO_REUSE 0
535#define SK_CAN_REUSE 1
536#define SK_FORCE_REUSE 2
537
538int sk_set_peek_off(struct sock *sk, int val);
539
540static inline int sk_peek_offset(struct sock *sk, int flags)
541{
542 if (unlikely(flags & MSG_PEEK)) {
543 return READ_ONCE(sk->sk_peek_off);
544 }
545
546 return 0;
547}
548
549static inline void sk_peek_offset_bwd(struct sock *sk, int val)
550{
551 s32 off = READ_ONCE(sk->sk_peek_off);
552
553 if (unlikely(off >= 0)) {
554 off = max_t(s32, off - val, 0);
555 WRITE_ONCE(sk->sk_peek_off, off);
556 }
557}
558
559static inline void sk_peek_offset_fwd(struct sock *sk, int val)
560{
561 sk_peek_offset_bwd(sk, -val);
562}
563
564
565
566
567static inline struct sock *sk_entry(const struct hlist_node *node)
568{
569 return hlist_entry(node, struct sock, sk_node);
570}
571
572static inline struct sock *__sk_head(const struct hlist_head *head)
573{
574 return hlist_entry(head->first, struct sock, sk_node);
575}
576
577static inline struct sock *sk_head(const struct hlist_head *head)
578{
579 return hlist_empty(head) ? NULL : __sk_head(head);
580}
581
582static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head)
583{
584 return hlist_nulls_entry(head->first, struct sock, sk_nulls_node);
585}
586
587static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head)
588{
589 return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head);
590}
591
592static inline struct sock *sk_next(const struct sock *sk)
593{
594 return hlist_entry_safe(sk->sk_node.next, struct sock, sk_node);
595}
596
597static inline struct sock *sk_nulls_next(const struct sock *sk)
598{
599 return (!is_a_nulls(sk->sk_nulls_node.next)) ?
600 hlist_nulls_entry(sk->sk_nulls_node.next,
601 struct sock, sk_nulls_node) :
602 NULL;
603}
604
605static inline bool sk_unhashed(const struct sock *sk)
606{
607 return hlist_unhashed(&sk->sk_node);
608}
609
610static inline bool sk_hashed(const struct sock *sk)
611{
612 return !sk_unhashed(sk);
613}
614
615static inline void sk_node_init(struct hlist_node *node)
616{
617 node->pprev = NULL;
618}
619
620static inline void sk_nulls_node_init(struct hlist_nulls_node *node)
621{
622 node->pprev = NULL;
623}
624
625static inline void __sk_del_node(struct sock *sk)
626{
627 __hlist_del(&sk->sk_node);
628}
629
630
631static inline bool __sk_del_node_init(struct sock *sk)
632{
633 if (sk_hashed(sk)) {
634 __sk_del_node(sk);
635 sk_node_init(&sk->sk_node);
636 return true;
637 }
638 return false;
639}
640
641
642
643
644
645
646
647static __always_inline void sock_hold(struct sock *sk)
648{
649 refcount_inc(&sk->sk_refcnt);
650}
651
652
653
654
655static __always_inline void __sock_put(struct sock *sk)
656{
657 refcount_dec(&sk->sk_refcnt);
658}
659
660static inline bool sk_del_node_init(struct sock *sk)
661{
662 bool rc = __sk_del_node_init(sk);
663
664 if (rc) {
665
666 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
667 __sock_put(sk);
668 }
669 return rc;
670}
671#define sk_del_node_init_rcu(sk) sk_del_node_init(sk)
672
673static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk)
674{
675 if (sk_hashed(sk)) {
676 hlist_nulls_del_init_rcu(&sk->sk_nulls_node);
677 return true;
678 }
679 return false;
680}
681
682static inline bool sk_nulls_del_node_init_rcu(struct sock *sk)
683{
684 bool rc = __sk_nulls_del_node_init_rcu(sk);
685
686 if (rc) {
687
688 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
689 __sock_put(sk);
690 }
691 return rc;
692}
693
694static inline void __sk_add_node(struct sock *sk, struct hlist_head *list)
695{
696 hlist_add_head(&sk->sk_node, list);
697}
698
699static inline void sk_add_node(struct sock *sk, struct hlist_head *list)
700{
701 sock_hold(sk);
702 __sk_add_node(sk, list);
703}
704
705static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
706{
707 sock_hold(sk);
708 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
709 sk->sk_family == AF_INET6)
710 hlist_add_tail_rcu(&sk->sk_node, list);
711 else
712 hlist_add_head_rcu(&sk->sk_node, list);
713}
714
715static inline void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list)
716{
717 sock_hold(sk);
718 hlist_add_tail_rcu(&sk->sk_node, list);
719}
720
721static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
722{
723 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
724}
725
726static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
727{
728 sock_hold(sk);
729 __sk_nulls_add_node_rcu(sk, list);
730}
731
732static inline void __sk_del_bind_node(struct sock *sk)
733{
734 __hlist_del(&sk->sk_bind_node);
735}
736
737static inline void sk_add_bind_node(struct sock *sk,
738 struct hlist_head *list)
739{
740 hlist_add_head(&sk->sk_bind_node, list);
741}
742
743#define sk_for_each(__sk, list) \
744 hlist_for_each_entry(__sk, list, sk_node)
745#define sk_for_each_rcu(__sk, list) \
746 hlist_for_each_entry_rcu(__sk, list, sk_node)
747#define sk_nulls_for_each(__sk, node, list) \
748 hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
749#define sk_nulls_for_each_rcu(__sk, node, list) \
750 hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
751#define sk_for_each_from(__sk) \
752 hlist_for_each_entry_from(__sk, sk_node)
753#define sk_nulls_for_each_from(__sk, node) \
754 if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
755 hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
756#define sk_for_each_safe(__sk, tmp, list) \
757 hlist_for_each_entry_safe(__sk, tmp, list, sk_node)
758#define sk_for_each_bound(__sk, list) \
759 hlist_for_each_entry(__sk, list, sk_bind_node)
760
761
762
763
764
765
766
767
768
769#define sk_for_each_entry_offset_rcu(tpos, pos, head, offset) \
770 for (pos = rcu_dereference(hlist_first_rcu(head)); \
771 pos != NULL && \
772 ({ tpos = (typeof(*tpos) *)((void *)pos - offset); 1;}); \
773 pos = rcu_dereference(hlist_next_rcu(pos)))
774
775static inline struct user_namespace *sk_user_ns(struct sock *sk)
776{
777
778
779
780
781 return sk->sk_socket->file->f_cred->user_ns;
782}
783
784
785enum sock_flags {
786 SOCK_DEAD,
787 SOCK_DONE,
788 SOCK_URGINLINE,
789 SOCK_KEEPOPEN,
790 SOCK_LINGER,
791 SOCK_DESTROY,
792 SOCK_BROADCAST,
793 SOCK_TIMESTAMP,
794 SOCK_ZAPPED,
795 SOCK_USE_WRITE_QUEUE,
796 SOCK_DBG,
797 SOCK_RCVTSTAMP,
798 SOCK_RCVTSTAMPNS,
799 SOCK_LOCALROUTE,
800 SOCK_QUEUE_SHRUNK,
801 SOCK_MEMALLOC,
802 SOCK_TIMESTAMPING_RX_SOFTWARE,
803 SOCK_FASYNC,
804 SOCK_RXQ_OVFL,
805 SOCK_ZEROCOPY,
806 SOCK_WIFI_STATUS,
807 SOCK_NOFCS,
808
809
810
811 SOCK_FILTER_LOCKED,
812 SOCK_SELECT_ERR_QUEUE,
813 SOCK_RCU_FREE,
814 SOCK_TXTIME,
815 SOCK_XDP,
816 SOCK_TSTAMP_NEW,
817};
818
819#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
820
821static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
822{
823 nsk->sk_flags = osk->sk_flags;
824}
825
826static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
827{
828 __set_bit(flag, &sk->sk_flags);
829}
830
831static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
832{
833 __clear_bit(flag, &sk->sk_flags);
834}
835
836static inline bool sock_flag(const struct sock *sk, enum sock_flags flag)
837{
838 return test_bit(flag, &sk->sk_flags);
839}
840
841#ifdef CONFIG_NET
842DECLARE_STATIC_KEY_FALSE(memalloc_socks_key);
843static inline int sk_memalloc_socks(void)
844{
845 return static_branch_unlikely(&memalloc_socks_key);
846}
847#else
848
849static inline int sk_memalloc_socks(void)
850{
851 return 0;
852}
853
854#endif
855
856static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask)
857{
858 return gfp_mask | (sk->sk_allocation & __GFP_MEMALLOC);
859}
860
861static inline void sk_acceptq_removed(struct sock *sk)
862{
863 sk->sk_ack_backlog--;
864}
865
866static inline void sk_acceptq_added(struct sock *sk)
867{
868 sk->sk_ack_backlog++;
869}
870
871static inline bool sk_acceptq_is_full(const struct sock *sk)
872{
873 return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
874}
875
876
877
878
879static inline int sk_stream_min_wspace(const struct sock *sk)
880{
881 return READ_ONCE(sk->sk_wmem_queued) >> 1;
882}
883
884static inline int sk_stream_wspace(const struct sock *sk)
885{
886 return READ_ONCE(sk->sk_sndbuf) - READ_ONCE(sk->sk_wmem_queued);
887}
888
889static inline void sk_wmem_queued_add(struct sock *sk, int val)
890{
891 WRITE_ONCE(sk->sk_wmem_queued, sk->sk_wmem_queued + val);
892}
893
894void sk_stream_write_space(struct sock *sk);
895
896
897static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
898{
899
900 skb_dst_force(skb);
901
902 if (!sk->sk_backlog.tail)
903 sk->sk_backlog.head = skb;
904 else
905 sk->sk_backlog.tail->next = skb;
906
907 sk->sk_backlog.tail = skb;
908 skb->next = NULL;
909}
910
911
912
913
914
915
916static inline bool sk_rcvqueues_full(const struct sock *sk, unsigned int limit)
917{
918 unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
919
920 return qsize > limit;
921}
922
923
924static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
925 unsigned int limit)
926{
927 if (sk_rcvqueues_full(sk, limit))
928 return -ENOBUFS;
929
930
931
932
933
934
935 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
936 return -ENOMEM;
937
938 __sk_add_backlog(sk, skb);
939 sk->sk_backlog.len += skb->truesize;
940 return 0;
941}
942
943int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
944
945static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
946{
947 if (sk_memalloc_socks() && skb_pfmemalloc(skb))
948 return __sk_backlog_rcv(sk, skb);
949
950 return sk->sk_backlog_rcv(sk, skb);
951}
952
953static inline void sk_incoming_cpu_update(struct sock *sk)
954{
955 int cpu = raw_smp_processor_id();
956
957 if (unlikely(READ_ONCE(sk->sk_incoming_cpu) != cpu))
958 WRITE_ONCE(sk->sk_incoming_cpu, cpu);
959}
960
961static inline void sock_rps_record_flow_hash(__u32 hash)
962{
963#ifdef CONFIG_RPS
964 struct rps_sock_flow_table *sock_flow_table;
965
966 rcu_read_lock();
967 sock_flow_table = rcu_dereference(rps_sock_flow_table);
968 rps_record_sock_flow(sock_flow_table, hash);
969 rcu_read_unlock();
970#endif
971}
972
973static inline void sock_rps_record_flow(const struct sock *sk)
974{
975#ifdef CONFIG_RPS
976 if (static_branch_unlikely(&rfs_needed)) {
977
978
979
980
981
982
983
984
985
986
987 if (sk->sk_state == TCP_ESTABLISHED)
988 sock_rps_record_flow_hash(sk->sk_rxhash);
989 }
990#endif
991}
992
993static inline void sock_rps_save_rxhash(struct sock *sk,
994 const struct sk_buff *skb)
995{
996#ifdef CONFIG_RPS
997 if (unlikely(sk->sk_rxhash != skb->hash))
998 sk->sk_rxhash = skb->hash;
999#endif
1000}
1001
1002static inline void sock_rps_reset_rxhash(struct sock *sk)
1003{
1004#ifdef CONFIG_RPS
1005 sk->sk_rxhash = 0;
1006#endif
1007}
1008
1009#define sk_wait_event(__sk, __timeo, __condition, __wait) \
1010 ({ int __rc; \
1011 release_sock(__sk); \
1012 __rc = __condition; \
1013 if (!__rc) { \
1014 *(__timeo) = wait_woken(__wait, \
1015 TASK_INTERRUPTIBLE, \
1016 *(__timeo)); \
1017 } \
1018 sched_annotate_sleep(); \
1019 lock_sock(__sk); \
1020 __rc = __condition; \
1021 __rc; \
1022 })
1023
1024int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
1025int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
1026void sk_stream_wait_close(struct sock *sk, long timeo_p);
1027int sk_stream_error(struct sock *sk, int flags, int err);
1028void sk_stream_kill_queues(struct sock *sk);
1029void sk_set_memalloc(struct sock *sk);
1030void sk_clear_memalloc(struct sock *sk);
1031
1032void __sk_flush_backlog(struct sock *sk);
1033
1034static inline bool sk_flush_backlog(struct sock *sk)
1035{
1036 if (unlikely(READ_ONCE(sk->sk_backlog.tail))) {
1037 __sk_flush_backlog(sk);
1038 return true;
1039 }
1040 return false;
1041}
1042
1043int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb);
1044
1045struct request_sock_ops;
1046struct timewait_sock_ops;
1047struct inet_hashinfo;
1048struct raw_hashinfo;
1049struct smc_hashinfo;
1050struct module;
1051
1052
1053
1054
1055
1056static inline void sk_prot_clear_nulls(struct sock *sk, int size)
1057{
1058 if (offsetof(struct sock, sk_node.next) != 0)
1059 memset(sk, 0, offsetof(struct sock, sk_node.next));
1060 memset(&sk->sk_node.pprev, 0,
1061 size - offsetof(struct sock, sk_node.pprev));
1062}
1063
1064
1065
1066
1067struct proto {
1068 void (*close)(struct sock *sk,
1069 long timeout);
1070 int (*pre_connect)(struct sock *sk,
1071 struct sockaddr *uaddr,
1072 int addr_len);
1073 int (*connect)(struct sock *sk,
1074 struct sockaddr *uaddr,
1075 int addr_len);
1076 int (*disconnect)(struct sock *sk, int flags);
1077
1078 struct sock * (*accept)(struct sock *sk, int flags, int *err,
1079 bool kern);
1080
1081 int (*ioctl)(struct sock *sk, int cmd,
1082 unsigned long arg);
1083 int (*init)(struct sock *sk);
1084 void (*destroy)(struct sock *sk);
1085 void (*shutdown)(struct sock *sk, int how);
1086 int (*setsockopt)(struct sock *sk, int level,
1087 int optname, char __user *optval,
1088 unsigned int optlen);
1089 int (*getsockopt)(struct sock *sk, int level,
1090 int optname, char __user *optval,
1091 int __user *option);
1092 void (*keepalive)(struct sock *sk, int valbool);
1093#ifdef CONFIG_COMPAT
1094 int (*compat_setsockopt)(struct sock *sk,
1095 int level,
1096 int optname, char __user *optval,
1097 unsigned int optlen);
1098 int (*compat_getsockopt)(struct sock *sk,
1099 int level,
1100 int optname, char __user *optval,
1101 int __user *option);
1102 int (*compat_ioctl)(struct sock *sk,
1103 unsigned int cmd, unsigned long arg);
1104#endif
1105 int (*sendmsg)(struct sock *sk, struct msghdr *msg,
1106 size_t len);
1107 int (*recvmsg)(struct sock *sk, struct msghdr *msg,
1108 size_t len, int noblock, int flags,
1109 int *addr_len);
1110 int (*sendpage)(struct sock *sk, struct page *page,
1111 int offset, size_t size, int flags);
1112 int (*bind)(struct sock *sk,
1113 struct sockaddr *uaddr, int addr_len);
1114
1115 int (*backlog_rcv) (struct sock *sk,
1116 struct sk_buff *skb);
1117
1118 void (*release_cb)(struct sock *sk);
1119
1120
1121 int (*hash)(struct sock *sk);
1122 void (*unhash)(struct sock *sk);
1123 void (*rehash)(struct sock *sk);
1124 int (*get_port)(struct sock *sk, unsigned short snum);
1125
1126
1127#ifdef CONFIG_PROC_FS
1128 unsigned int inuse_idx;
1129#endif
1130
1131 bool (*stream_memory_free)(const struct sock *sk, int wake);
1132 bool (*stream_memory_read)(const struct sock *sk);
1133
1134 void (*enter_memory_pressure)(struct sock *sk);
1135 void (*leave_memory_pressure)(struct sock *sk);
1136 atomic_long_t *memory_allocated;
1137 struct percpu_counter *sockets_allocated;
1138
1139
1140
1141
1142
1143
1144 unsigned long *memory_pressure;
1145 long *sysctl_mem;
1146
1147 int *sysctl_wmem;
1148 int *sysctl_rmem;
1149 u32 sysctl_wmem_offset;
1150 u32 sysctl_rmem_offset;
1151
1152 int max_header;
1153 bool no_autobind;
1154
1155 struct kmem_cache *slab;
1156 unsigned int obj_size;
1157 slab_flags_t slab_flags;
1158 unsigned int useroffset;
1159 unsigned int usersize;
1160
1161 struct percpu_counter *orphan_count;
1162
1163 struct request_sock_ops *rsk_prot;
1164 struct timewait_sock_ops *twsk_prot;
1165
1166 union {
1167 struct inet_hashinfo *hashinfo;
1168 struct udp_table *udp_table;
1169 struct raw_hashinfo *raw_hash;
1170 struct smc_hashinfo *smc_hash;
1171 } h;
1172
1173 struct module *owner;
1174
1175 char name[32];
1176
1177 struct list_head node;
1178#ifdef SOCK_REFCNT_DEBUG
1179 atomic_t socks;
1180#endif
1181 int (*diag_destroy)(struct sock *sk, int err);
1182} __randomize_layout;
1183
1184int proto_register(struct proto *prot, int alloc_slab);
1185void proto_unregister(struct proto *prot);
1186int sock_load_diag_module(int family, int protocol);
1187
1188#ifdef SOCK_REFCNT_DEBUG
1189static inline void sk_refcnt_debug_inc(struct sock *sk)
1190{
1191 atomic_inc(&sk->sk_prot->socks);
1192}
1193
1194static inline void sk_refcnt_debug_dec(struct sock *sk)
1195{
1196 atomic_dec(&sk->sk_prot->socks);
1197 printk(KERN_DEBUG "%s socket %p released, %d are still alive\n",
1198 sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
1199}
1200
1201static inline void sk_refcnt_debug_release(const struct sock *sk)
1202{
1203 if (refcount_read(&sk->sk_refcnt) != 1)
1204 printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
1205 sk->sk_prot->name, sk, refcount_read(&sk->sk_refcnt));
1206}
1207#else
1208#define sk_refcnt_debug_inc(sk) do { } while (0)
1209#define sk_refcnt_debug_dec(sk) do { } while (0)
1210#define sk_refcnt_debug_release(sk) do { } while (0)
1211#endif
1212
1213static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
1214{
1215 if (READ_ONCE(sk->sk_wmem_queued) >= READ_ONCE(sk->sk_sndbuf))
1216 return false;
1217
1218 return sk->sk_prot->stream_memory_free ?
1219 sk->sk_prot->stream_memory_free(sk, wake) : true;
1220}
1221
1222static inline bool sk_stream_memory_free(const struct sock *sk)
1223{
1224 return __sk_stream_memory_free(sk, 0);
1225}
1226
1227static inline bool __sk_stream_is_writeable(const struct sock *sk, int wake)
1228{
1229 return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) &&
1230 __sk_stream_memory_free(sk, wake);
1231}
1232
1233static inline bool sk_stream_is_writeable(const struct sock *sk)
1234{
1235 return __sk_stream_is_writeable(sk, 0);
1236}
1237
1238static inline int sk_under_cgroup_hierarchy(struct sock *sk,
1239 struct cgroup *ancestor)
1240{
1241#ifdef CONFIG_SOCK_CGROUP_DATA
1242 return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data),
1243 ancestor);
1244#else
1245 return -ENOTSUPP;
1246#endif
1247}
1248
1249static inline bool sk_has_memory_pressure(const struct sock *sk)
1250{
1251 return sk->sk_prot->memory_pressure != NULL;
1252}
1253
1254static inline bool sk_under_memory_pressure(const struct sock *sk)
1255{
1256 if (!sk->sk_prot->memory_pressure)
1257 return false;
1258
1259 if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
1260 mem_cgroup_under_socket_pressure(sk->sk_memcg))
1261 return true;
1262
1263 return !!*sk->sk_prot->memory_pressure;
1264}
1265
1266static inline long
1267sk_memory_allocated(const struct sock *sk)
1268{
1269 return atomic_long_read(sk->sk_prot->memory_allocated);
1270}
1271
1272static inline long
1273sk_memory_allocated_add(struct sock *sk, int amt)
1274{
1275 return atomic_long_add_return(amt, sk->sk_prot->memory_allocated);
1276}
1277
1278static inline void
1279sk_memory_allocated_sub(struct sock *sk, int amt)
1280{
1281 atomic_long_sub(amt, sk->sk_prot->memory_allocated);
1282}
1283
1284static inline void sk_sockets_allocated_dec(struct sock *sk)
1285{
1286 percpu_counter_dec(sk->sk_prot->sockets_allocated);
1287}
1288
1289static inline void sk_sockets_allocated_inc(struct sock *sk)
1290{
1291 percpu_counter_inc(sk->sk_prot->sockets_allocated);
1292}
1293
1294static inline u64
1295sk_sockets_allocated_read_positive(struct sock *sk)
1296{
1297 return percpu_counter_read_positive(sk->sk_prot->sockets_allocated);
1298}
1299
1300static inline int
1301proto_sockets_allocated_sum_positive(struct proto *prot)
1302{
1303 return percpu_counter_sum_positive(prot->sockets_allocated);
1304}
1305
1306static inline long
1307proto_memory_allocated(struct proto *prot)
1308{
1309 return atomic_long_read(prot->memory_allocated);
1310}
1311
1312static inline bool
1313proto_memory_pressure(struct proto *prot)
1314{
1315 if (!prot->memory_pressure)
1316 return false;
1317 return !!*prot->memory_pressure;
1318}
1319
1320
1321#ifdef CONFIG_PROC_FS
1322
1323void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
1324int sock_prot_inuse_get(struct net *net, struct proto *proto);
1325int sock_inuse_get(struct net *net);
1326#else
1327static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
1328 int inc)
1329{
1330}
1331#endif
1332
1333
1334
1335
1336
1337static inline int __sk_prot_rehash(struct sock *sk)
1338{
1339 sk->sk_prot->unhash(sk);
1340 return sk->sk_prot->hash(sk);
1341}
1342
1343
1344#define SOCK_DESTROY_TIME (10*HZ)
1345
1346
1347#define PROT_SOCK 1024
1348
1349#define SHUTDOWN_MASK 3
1350#define RCV_SHUTDOWN 1
1351#define SEND_SHUTDOWN 2
1352
1353#define SOCK_SNDBUF_LOCK 1
1354#define SOCK_RCVBUF_LOCK 2
1355#define SOCK_BINDADDR_LOCK 4
1356#define SOCK_BINDPORT_LOCK 8
1357
1358struct socket_alloc {
1359 struct socket socket;
1360 struct inode vfs_inode;
1361};
1362
1363static inline struct socket *SOCKET_I(struct inode *inode)
1364{
1365 return &container_of(inode, struct socket_alloc, vfs_inode)->socket;
1366}
1367
1368static inline struct inode *SOCK_INODE(struct socket *socket)
1369{
1370 return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
1371}
1372
1373
1374
1375
1376int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind);
1377int __sk_mem_schedule(struct sock *sk, int size, int kind);
1378void __sk_mem_reduce_allocated(struct sock *sk, int amount);
1379void __sk_mem_reclaim(struct sock *sk, int amount);
1380
1381
1382
1383
1384#define SK_MEM_QUANTUM 4096
1385#define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
1386#define SK_MEM_SEND 0
1387#define SK_MEM_RECV 1
1388
1389
1390static inline long sk_prot_mem_limits(const struct sock *sk, int index)
1391{
1392 long val = sk->sk_prot->sysctl_mem[index];
1393
1394#if PAGE_SIZE > SK_MEM_QUANTUM
1395 val <<= PAGE_SHIFT - SK_MEM_QUANTUM_SHIFT;
1396#elif PAGE_SIZE < SK_MEM_QUANTUM
1397 val >>= SK_MEM_QUANTUM_SHIFT - PAGE_SHIFT;
1398#endif
1399 return val;
1400}
1401
1402static inline int sk_mem_pages(int amt)
1403{
1404 return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT;
1405}
1406
1407static inline bool sk_has_account(struct sock *sk)
1408{
1409
1410 return !!sk->sk_prot->memory_allocated;
1411}
1412
1413static inline bool sk_wmem_schedule(struct sock *sk, int size)
1414{
1415 if (!sk_has_account(sk))
1416 return true;
1417 return size <= sk->sk_forward_alloc ||
1418 __sk_mem_schedule(sk, size, SK_MEM_SEND);
1419}
1420
1421static inline bool
1422sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
1423{
1424 if (!sk_has_account(sk))
1425 return true;
1426 return size<= sk->sk_forward_alloc ||
1427 __sk_mem_schedule(sk, size, SK_MEM_RECV) ||
1428 skb_pfmemalloc(skb);
1429}
1430
1431static inline void sk_mem_reclaim(struct sock *sk)
1432{
1433 if (!sk_has_account(sk))
1434 return;
1435 if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
1436 __sk_mem_reclaim(sk, sk->sk_forward_alloc);
1437}
1438
1439static inline void sk_mem_reclaim_partial(struct sock *sk)
1440{
1441 if (!sk_has_account(sk))
1442 return;
1443 if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
1444 __sk_mem_reclaim(sk, sk->sk_forward_alloc - 1);
1445}
1446
1447static inline void sk_mem_charge(struct sock *sk, int size)
1448{
1449 if (!sk_has_account(sk))
1450 return;
1451 sk->sk_forward_alloc -= size;
1452}
1453
1454static inline void sk_mem_uncharge(struct sock *sk, int size)
1455{
1456 if (!sk_has_account(sk))
1457 return;
1458 sk->sk_forward_alloc += size;
1459
1460
1461
1462
1463
1464
1465
1466
1467 if (unlikely(sk->sk_forward_alloc >= 1 << 21))
1468 __sk_mem_reclaim(sk, 1 << 20);
1469}
1470
1471DECLARE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key);
1472static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
1473{
1474 sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
1475 sk_wmem_queued_add(sk, -skb->truesize);
1476 sk_mem_uncharge(sk, skb->truesize);
1477 if (static_branch_unlikely(&tcp_tx_skb_cache_key) &&
1478 !sk->sk_tx_skb_cache && !skb_cloned(skb)) {
1479 skb_zcopy_clear(skb, true);
1480 sk->sk_tx_skb_cache = skb;
1481 return;
1482 }
1483 __kfree_skb(skb);
1484}
1485
1486static inline void sock_release_ownership(struct sock *sk)
1487{
1488 if (sk->sk_lock.owned) {
1489 sk->sk_lock.owned = 0;
1490
1491
1492 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
1493 }
1494}
1495
1496
1497
1498
1499
1500
1501
1502
1503#define sock_lock_init_class_and_name(sk, sname, skey, name, key) \
1504do { \
1505 sk->sk_lock.owned = 0; \
1506 init_waitqueue_head(&sk->sk_lock.wq); \
1507 spin_lock_init(&(sk)->sk_lock.slock); \
1508 debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
1509 sizeof((sk)->sk_lock)); \
1510 lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
1511 (skey), (sname)); \
1512 lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
1513} while (0)
1514
1515#ifdef CONFIG_LOCKDEP
1516static inline bool lockdep_sock_is_held(const struct sock *sk)
1517{
1518 return lockdep_is_held(&sk->sk_lock) ||
1519 lockdep_is_held(&sk->sk_lock.slock);
1520}
1521#endif
1522
1523void lock_sock_nested(struct sock *sk, int subclass);
1524
1525static inline void lock_sock(struct sock *sk)
1526{
1527 lock_sock_nested(sk, 0);
1528}
1529
1530void __release_sock(struct sock *sk);
1531void release_sock(struct sock *sk);
1532
1533
1534#define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock))
1535#define bh_lock_sock_nested(__sk) \
1536 spin_lock_nested(&((__sk)->sk_lock.slock), \
1537 SINGLE_DEPTH_NESTING)
1538#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
1539
1540bool lock_sock_fast(struct sock *sk);
1541
1542
1543
1544
1545
1546
1547
1548
1549static inline void unlock_sock_fast(struct sock *sk, bool slow)
1550{
1551 if (slow)
1552 release_sock(sk);
1553 else
1554 spin_unlock_bh(&sk->sk_lock.slock);
1555}
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571static inline void sock_owned_by_me(const struct sock *sk)
1572{
1573#ifdef CONFIG_LOCKDEP
1574 WARN_ON_ONCE(!lockdep_sock_is_held(sk) && debug_locks);
1575#endif
1576}
1577
1578static inline bool sock_owned_by_user(const struct sock *sk)
1579{
1580 sock_owned_by_me(sk);
1581 return sk->sk_lock.owned;
1582}
1583
1584static inline bool sock_owned_by_user_nocheck(const struct sock *sk)
1585{
1586 return sk->sk_lock.owned;
1587}
1588
1589
1590static inline bool sock_allow_reclassification(const struct sock *csk)
1591{
1592 struct sock *sk = (struct sock *)csk;
1593
1594 return !sk->sk_lock.owned && !spin_is_locked(&sk->sk_lock.slock);
1595}
1596
1597struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1598 struct proto *prot, int kern);
1599void sk_free(struct sock *sk);
1600void sk_destruct(struct sock *sk);
1601struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
1602void sk_free_unlock_clone(struct sock *sk);
1603
1604struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1605 gfp_t priority);
1606void __sock_wfree(struct sk_buff *skb);
1607void sock_wfree(struct sk_buff *skb);
1608struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
1609 gfp_t priority);
1610void skb_orphan_partial(struct sk_buff *skb);
1611void sock_rfree(struct sk_buff *skb);
1612void sock_efree(struct sk_buff *skb);
1613#ifdef CONFIG_INET
1614void sock_edemux(struct sk_buff *skb);
1615#else
1616#define sock_edemux sock_efree
1617#endif
1618
1619int sock_setsockopt(struct socket *sock, int level, int op,
1620 char __user *optval, unsigned int optlen);
1621
1622int sock_getsockopt(struct socket *sock, int level, int op,
1623 char __user *optval, int __user *optlen);
1624int sock_gettstamp(struct socket *sock, void __user *userstamp,
1625 bool timeval, bool time32);
1626struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1627 int noblock, int *errcode);
1628struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1629 unsigned long data_len, int noblock,
1630 int *errcode, int max_page_order);
1631void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
1632void sock_kfree_s(struct sock *sk, void *mem, int size);
1633void sock_kzfree_s(struct sock *sk, void *mem, int size);
1634void sk_send_sigurg(struct sock *sk);
1635
1636struct sockcm_cookie {
1637 u64 transmit_time;
1638 u32 mark;
1639 u16 tsflags;
1640};
1641
1642static inline void sockcm_init(struct sockcm_cookie *sockc,
1643 const struct sock *sk)
1644{
1645 *sockc = (struct sockcm_cookie) { .tsflags = sk->sk_tsflags };
1646}
1647
1648int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
1649 struct sockcm_cookie *sockc);
1650int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
1651 struct sockcm_cookie *sockc);
1652
1653
1654
1655
1656
1657int sock_no_bind(struct socket *, struct sockaddr *, int);
1658int sock_no_connect(struct socket *, struct sockaddr *, int, int);
1659int sock_no_socketpair(struct socket *, struct socket *);
1660int sock_no_accept(struct socket *, struct socket *, int, bool);
1661int sock_no_getname(struct socket *, struct sockaddr *, int);
1662int sock_no_ioctl(struct socket *, unsigned int, unsigned long);
1663int sock_no_listen(struct socket *, int);
1664int sock_no_shutdown(struct socket *, int);
1665int sock_no_getsockopt(struct socket *, int , int, char __user *, int __user *);
1666int sock_no_setsockopt(struct socket *, int, int, char __user *, unsigned int);
1667int sock_no_sendmsg(struct socket *, struct msghdr *, size_t);
1668int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t len);
1669int sock_no_recvmsg(struct socket *, struct msghdr *, size_t, int);
1670int sock_no_mmap(struct file *file, struct socket *sock,
1671 struct vm_area_struct *vma);
1672ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset,
1673 size_t size, int flags);
1674ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
1675 int offset, size_t size, int flags);
1676
1677
1678
1679
1680
1681int sock_common_getsockopt(struct socket *sock, int level, int optname,
1682 char __user *optval, int __user *optlen);
1683int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
1684 int flags);
1685int sock_common_setsockopt(struct socket *sock, int level, int optname,
1686 char __user *optval, unsigned int optlen);
1687int compat_sock_common_getsockopt(struct socket *sock, int level,
1688 int optname, char __user *optval, int __user *optlen);
1689int compat_sock_common_setsockopt(struct socket *sock, int level,
1690 int optname, char __user *optval, unsigned int optlen);
1691
1692void sk_common_release(struct sock *sk);
1693
1694
1695
1696
1697
1698
1699void sock_init_data(struct socket *sock, struct sock *sk);
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727static inline void sock_put(struct sock *sk)
1728{
1729 if (refcount_dec_and_test(&sk->sk_refcnt))
1730 sk_free(sk);
1731}
1732
1733
1734
1735void sock_gen_put(struct sock *sk);
1736
1737int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
1738 unsigned int trim_cap, bool refcounted);
1739static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
1740 const int nested)
1741{
1742 return __sk_receive_skb(sk, skb, nested, 1, true);
1743}
1744
1745static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
1746{
1747
1748 if (WARN_ON_ONCE((unsigned short)tx_queue >= USHRT_MAX))
1749 return;
1750 sk->sk_tx_queue_mapping = tx_queue;
1751}
1752
1753#define NO_QUEUE_MAPPING USHRT_MAX
1754
1755static inline void sk_tx_queue_clear(struct sock *sk)
1756{
1757 sk->sk_tx_queue_mapping = NO_QUEUE_MAPPING;
1758}
1759
1760static inline int sk_tx_queue_get(const struct sock *sk)
1761{
1762 if (sk && sk->sk_tx_queue_mapping != NO_QUEUE_MAPPING)
1763 return sk->sk_tx_queue_mapping;
1764
1765 return -1;
1766}
1767
1768static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
1769{
1770#ifdef CONFIG_XPS
1771 if (skb_rx_queue_recorded(skb)) {
1772 u16 rx_queue = skb_get_rx_queue(skb);
1773
1774 if (WARN_ON_ONCE(rx_queue == NO_QUEUE_MAPPING))
1775 return;
1776
1777 sk->sk_rx_queue_mapping = rx_queue;
1778 }
1779#endif
1780}
1781
1782static inline void sk_rx_queue_clear(struct sock *sk)
1783{
1784#ifdef CONFIG_XPS
1785 sk->sk_rx_queue_mapping = NO_QUEUE_MAPPING;
1786#endif
1787}
1788
1789#ifdef CONFIG_XPS
1790static inline int sk_rx_queue_get(const struct sock *sk)
1791{
1792 if (sk && sk->sk_rx_queue_mapping != NO_QUEUE_MAPPING)
1793 return sk->sk_rx_queue_mapping;
1794
1795 return -1;
1796}
1797#endif
1798
1799static inline void sk_set_socket(struct sock *sk, struct socket *sock)
1800{
1801 sk_tx_queue_clear(sk);
1802 sk->sk_socket = sock;
1803}
1804
1805static inline wait_queue_head_t *sk_sleep(struct sock *sk)
1806{
1807 BUILD_BUG_ON(offsetof(struct socket_wq, wait) != 0);
1808 return &rcu_dereference_raw(sk->sk_wq)->wait;
1809}
1810
1811
1812
1813
1814
1815
1816
1817static inline void sock_orphan(struct sock *sk)
1818{
1819 write_lock_bh(&sk->sk_callback_lock);
1820 sock_set_flag(sk, SOCK_DEAD);
1821 sk_set_socket(sk, NULL);
1822 sk->sk_wq = NULL;
1823 write_unlock_bh(&sk->sk_callback_lock);
1824}
1825
1826static inline void sock_graft(struct sock *sk, struct socket *parent)
1827{
1828 WARN_ON(parent->sk);
1829 write_lock_bh(&sk->sk_callback_lock);
1830 rcu_assign_pointer(sk->sk_wq, &parent->wq);
1831 parent->sk = sk;
1832 sk_set_socket(sk, parent);
1833 sk->sk_uid = SOCK_INODE(parent)->i_uid;
1834 security_sock_graft(sk, parent);
1835 write_unlock_bh(&sk->sk_callback_lock);
1836}
1837
1838kuid_t sock_i_uid(struct sock *sk);
1839unsigned long sock_i_ino(struct sock *sk);
1840
1841static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk)
1842{
1843 return sk ? sk->sk_uid : make_kuid(net->user_ns, 0);
1844}
1845
1846static inline u32 net_tx_rndhash(void)
1847{
1848 u32 v = prandom_u32();
1849
1850 return v ?: 1;
1851}
1852
1853static inline void sk_set_txhash(struct sock *sk)
1854{
1855 sk->sk_txhash = net_tx_rndhash();
1856}
1857
1858static inline void sk_rethink_txhash(struct sock *sk)
1859{
1860 if (sk->sk_txhash)
1861 sk_set_txhash(sk);
1862}
1863
1864static inline struct dst_entry *
1865__sk_dst_get(struct sock *sk)
1866{
1867 return rcu_dereference_check(sk->sk_dst_cache,
1868 lockdep_sock_is_held(sk));
1869}
1870
1871static inline struct dst_entry *
1872sk_dst_get(struct sock *sk)
1873{
1874 struct dst_entry *dst;
1875
1876 rcu_read_lock();
1877 dst = rcu_dereference(sk->sk_dst_cache);
1878 if (dst && !atomic_inc_not_zero(&dst->__refcnt))
1879 dst = NULL;
1880 rcu_read_unlock();
1881 return dst;
1882}
1883
1884static inline void dst_negative_advice(struct sock *sk)
1885{
1886 struct dst_entry *ndst, *dst = __sk_dst_get(sk);
1887
1888 sk_rethink_txhash(sk);
1889
1890 if (dst && dst->ops->negative_advice) {
1891 ndst = dst->ops->negative_advice(dst);
1892
1893 if (ndst != dst) {
1894 rcu_assign_pointer(sk->sk_dst_cache, ndst);
1895 sk_tx_queue_clear(sk);
1896 sk->sk_dst_pending_confirm = 0;
1897 }
1898 }
1899}
1900
1901static inline void
1902__sk_dst_set(struct sock *sk, struct dst_entry *dst)
1903{
1904 struct dst_entry *old_dst;
1905
1906 sk_tx_queue_clear(sk);
1907 sk->sk_dst_pending_confirm = 0;
1908 old_dst = rcu_dereference_protected(sk->sk_dst_cache,
1909 lockdep_sock_is_held(sk));
1910 rcu_assign_pointer(sk->sk_dst_cache, dst);
1911 dst_release(old_dst);
1912}
1913
1914static inline void
1915sk_dst_set(struct sock *sk, struct dst_entry *dst)
1916{
1917 struct dst_entry *old_dst;
1918
1919 sk_tx_queue_clear(sk);
1920 sk->sk_dst_pending_confirm = 0;
1921 old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
1922 dst_release(old_dst);
1923}
1924
1925static inline void
1926__sk_dst_reset(struct sock *sk)
1927{
1928 __sk_dst_set(sk, NULL);
1929}
1930
1931static inline void
1932sk_dst_reset(struct sock *sk)
1933{
1934 sk_dst_set(sk, NULL);
1935}
1936
1937struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
1938
1939struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
1940
1941static inline void sk_dst_confirm(struct sock *sk)
1942{
1943 if (!sk->sk_dst_pending_confirm)
1944 sk->sk_dst_pending_confirm = 1;
1945}
1946
1947static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n)
1948{
1949 if (skb_get_dst_pending_confirm(skb)) {
1950 struct sock *sk = skb->sk;
1951 unsigned long now = jiffies;
1952
1953
1954 if (n->confirmed != now)
1955 n->confirmed = now;
1956 if (sk && sk->sk_dst_pending_confirm)
1957 sk->sk_dst_pending_confirm = 0;
1958 }
1959}
1960
1961bool sk_mc_loop(struct sock *sk);
1962
1963static inline bool sk_can_gso(const struct sock *sk)
1964{
1965 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
1966}
1967
1968void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
1969
1970static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
1971{
1972 sk->sk_route_nocaps |= flags;
1973 sk->sk_route_caps &= ~flags;
1974}
1975
1976static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
1977 struct iov_iter *from, char *to,
1978 int copy, int offset)
1979{
1980 if (skb->ip_summed == CHECKSUM_NONE) {
1981 __wsum csum = 0;
1982 if (!csum_and_copy_from_iter_full(to, copy, &csum, from))
1983 return -EFAULT;
1984 skb->csum = csum_block_add(skb->csum, csum, offset);
1985 } else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) {
1986 if (!copy_from_iter_full_nocache(to, copy, from))
1987 return -EFAULT;
1988 } else if (!copy_from_iter_full(to, copy, from))
1989 return -EFAULT;
1990
1991 return 0;
1992}
1993
1994static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb,
1995 struct iov_iter *from, int copy)
1996{
1997 int err, offset = skb->len;
1998
1999 err = skb_do_copy_data_nocache(sk, skb, from, skb_put(skb, copy),
2000 copy, offset);
2001 if (err)
2002 __skb_trim(skb, offset);
2003
2004 return err;
2005}
2006
2007static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from,
2008 struct sk_buff *skb,
2009 struct page *page,
2010 int off, int copy)
2011{
2012 int err;
2013
2014 err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) + off,
2015 copy, skb->len);
2016 if (err)
2017 return err;
2018
2019 skb->len += copy;
2020 skb->data_len += copy;
2021 skb->truesize += copy;
2022 sk_wmem_queued_add(sk, copy);
2023 sk_mem_charge(sk, copy);
2024 return 0;
2025}
2026
2027
2028
2029
2030
2031
2032
2033static inline int sk_wmem_alloc_get(const struct sock *sk)
2034{
2035 return refcount_read(&sk->sk_wmem_alloc) - 1;
2036}
2037
2038
2039
2040
2041
2042
2043
2044static inline int sk_rmem_alloc_get(const struct sock *sk)
2045{
2046 return atomic_read(&sk->sk_rmem_alloc);
2047}
2048
2049
2050
2051
2052
2053
2054
2055static inline bool sk_has_allocations(const struct sock *sk)
2056{
2057 return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk);
2058}
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091static inline bool skwq_has_sleeper(struct socket_wq *wq)
2092{
2093 return wq && wq_has_sleeper(&wq->wait);
2094}
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104static inline void sock_poll_wait(struct file *filp, struct socket *sock,
2105 poll_table *p)
2106{
2107 if (!poll_does_not_wait(p)) {
2108 poll_wait(filp, &sock->wq.wait, p);
2109
2110
2111
2112
2113
2114 smp_mb();
2115 }
2116}
2117
2118static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
2119{
2120 if (sk->sk_txhash) {
2121 skb->l4_hash = 1;
2122 skb->hash = sk->sk_txhash;
2123 }
2124}
2125
2126void skb_set_owner_w(struct sk_buff *skb, struct sock *sk);
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
2137{
2138 skb_orphan(skb);
2139 skb->sk = sk;
2140 skb->destructor = sock_rfree;
2141 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
2142 sk_mem_charge(sk, skb->truesize);
2143}
2144
2145void sk_reset_timer(struct sock *sk, struct timer_list *timer,
2146 unsigned long expires);
2147
2148void sk_stop_timer(struct sock *sk, struct timer_list *timer);
2149
2150int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
2151 struct sk_buff *skb, unsigned int flags,
2152 void (*destructor)(struct sock *sk,
2153 struct sk_buff *skb));
2154int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
2155int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
2156
2157int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
2158struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
2159
2160
2161
2162
2163
2164static inline int sock_error(struct sock *sk)
2165{
2166 int err;
2167 if (likely(!sk->sk_err))
2168 return 0;
2169 err = xchg(&sk->sk_err, 0);
2170 return -err;
2171}
2172
2173static inline unsigned long sock_wspace(struct sock *sk)
2174{
2175 int amt = 0;
2176
2177 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
2178 amt = sk->sk_sndbuf - refcount_read(&sk->sk_wmem_alloc);
2179 if (amt < 0)
2180 amt = 0;
2181 }
2182 return amt;
2183}
2184
2185
2186
2187
2188
2189static inline void sk_set_bit(int nr, struct sock *sk)
2190{
2191 if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) &&
2192 !sock_flag(sk, SOCK_FASYNC))
2193 return;
2194
2195 set_bit(nr, &sk->sk_wq_raw->flags);
2196}
2197
2198static inline void sk_clear_bit(int nr, struct sock *sk)
2199{
2200 if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) &&
2201 !sock_flag(sk, SOCK_FASYNC))
2202 return;
2203
2204 clear_bit(nr, &sk->sk_wq_raw->flags);
2205}
2206
2207static inline void sk_wake_async(const struct sock *sk, int how, int band)
2208{
2209 if (sock_flag(sk, SOCK_FASYNC)) {
2210 rcu_read_lock();
2211 sock_wake_async(rcu_dereference(sk->sk_wq), how, band);
2212 rcu_read_unlock();
2213 }
2214}
2215
2216
2217
2218
2219
2220
2221#define TCP_SKB_MIN_TRUESIZE (2048 + SKB_DATA_ALIGN(sizeof(struct sk_buff)))
2222
2223#define SOCK_MIN_SNDBUF (TCP_SKB_MIN_TRUESIZE * 2)
2224#define SOCK_MIN_RCVBUF TCP_SKB_MIN_TRUESIZE
2225
2226static inline void sk_stream_moderate_sndbuf(struct sock *sk)
2227{
2228 u32 val;
2229
2230 if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
2231 return;
2232
2233 val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
2234
2235 WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF));
2236}
2237
2238struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
2239 bool force_schedule);
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253static inline struct page_frag *sk_page_frag(struct sock *sk)
2254{
2255 if (gfpflags_normal_context(sk->sk_allocation))
2256 return ¤t->task_frag;
2257
2258 return &sk->sk_frag;
2259}
2260
2261bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
2262
2263
2264
2265
2266static inline bool sock_writeable(const struct sock *sk)
2267{
2268 return refcount_read(&sk->sk_wmem_alloc) < (READ_ONCE(sk->sk_sndbuf) >> 1);
2269}
2270
2271static inline gfp_t gfp_any(void)
2272{
2273 return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
2274}
2275
2276static inline long sock_rcvtimeo(const struct sock *sk, bool noblock)
2277{
2278 return noblock ? 0 : sk->sk_rcvtimeo;
2279}
2280
2281static inline long sock_sndtimeo(const struct sock *sk, bool noblock)
2282{
2283 return noblock ? 0 : sk->sk_sndtimeo;
2284}
2285
2286static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
2287{
2288 int v = waitall ? len : min_t(int, READ_ONCE(sk->sk_rcvlowat), len);
2289
2290 return v ?: 1;
2291}
2292
2293
2294
2295
2296static inline int sock_intr_errno(long timeo)
2297{
2298 return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
2299}
2300
2301struct sock_skb_cb {
2302 u32 dropcount;
2303};
2304
2305
2306
2307
2308
2309#define SOCK_SKB_CB_OFFSET ((FIELD_SIZEOF(struct sk_buff, cb) - \
2310 sizeof(struct sock_skb_cb)))
2311
2312#define SOCK_SKB_CB(__skb) ((struct sock_skb_cb *)((__skb)->cb + \
2313 SOCK_SKB_CB_OFFSET))
2314
2315#define sock_skb_cb_check_size(size) \
2316 BUILD_BUG_ON((size) > SOCK_SKB_CB_OFFSET)
2317
2318static inline void
2319sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb)
2320{
2321 SOCK_SKB_CB(skb)->dropcount = sock_flag(sk, SOCK_RXQ_OVFL) ?
2322 atomic_read(&sk->sk_drops) : 0;
2323}
2324
2325static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
2326{
2327 int segs = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
2328
2329 atomic_add(segs, &sk->sk_drops);
2330}
2331
2332static inline ktime_t sock_read_timestamp(struct sock *sk)
2333{
2334#if BITS_PER_LONG==32
2335 unsigned int seq;
2336 ktime_t kt;
2337
2338 do {
2339 seq = read_seqbegin(&sk->sk_stamp_seq);
2340 kt = sk->sk_stamp;
2341 } while (read_seqretry(&sk->sk_stamp_seq, seq));
2342
2343 return kt;
2344#else
2345 return READ_ONCE(sk->sk_stamp);
2346#endif
2347}
2348
2349static inline void sock_write_timestamp(struct sock *sk, ktime_t kt)
2350{
2351#if BITS_PER_LONG==32
2352 write_seqlock(&sk->sk_stamp_seq);
2353 sk->sk_stamp = kt;
2354 write_sequnlock(&sk->sk_stamp_seq);
2355#else
2356 WRITE_ONCE(sk->sk_stamp, kt);
2357#endif
2358}
2359
2360void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
2361 struct sk_buff *skb);
2362void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
2363 struct sk_buff *skb);
2364
2365static inline void
2366sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
2367{
2368 ktime_t kt = skb->tstamp;
2369 struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
2370
2371
2372
2373
2374
2375
2376
2377 if (sock_flag(sk, SOCK_RCVTSTAMP) ||
2378 (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
2379 (kt && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
2380 (hwtstamps->hwtstamp &&
2381 (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
2382 __sock_recv_timestamp(msg, sk, skb);
2383 else
2384 sock_write_timestamp(sk, kt);
2385
2386 if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
2387 __sock_recv_wifi_status(msg, sk, skb);
2388}
2389
2390void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
2391 struct sk_buff *skb);
2392
2393#define SK_DEFAULT_STAMP (-1L * NSEC_PER_SEC)
2394static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
2395 struct sk_buff *skb)
2396{
2397#define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL) | \
2398 (1UL << SOCK_RCVTSTAMP))
2399#define TSFLAGS_ANY (SOF_TIMESTAMPING_SOFTWARE | \
2400 SOF_TIMESTAMPING_RAW_HARDWARE)
2401
2402 if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY)
2403 __sock_recv_ts_and_drops(msg, sk, skb);
2404 else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
2405 sock_write_timestamp(sk, skb->tstamp);
2406 else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP))
2407 sock_write_timestamp(sk, 0);
2408}
2409
2410void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags,
2422 __u8 *tx_flags, __u32 *tskey)
2423{
2424 if (unlikely(tsflags)) {
2425 __sock_tx_timestamp(tsflags, tx_flags);
2426 if (tsflags & SOF_TIMESTAMPING_OPT_ID && tskey &&
2427 tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
2428 *tskey = sk->sk_tskey++;
2429 }
2430 if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
2431 *tx_flags |= SKBTX_WIFI_STATUS;
2432}
2433
2434static inline void sock_tx_timestamp(struct sock *sk, __u16 tsflags,
2435 __u8 *tx_flags)
2436{
2437 _sock_tx_timestamp(sk, tsflags, tx_flags, NULL);
2438}
2439
2440static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
2441{
2442 _sock_tx_timestamp(skb->sk, tsflags, &skb_shinfo(skb)->tx_flags,
2443 &skb_shinfo(skb)->tskey);
2444}
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
2455static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
2456{
2457 __skb_unlink(skb, &sk->sk_receive_queue);
2458 if (static_branch_unlikely(&tcp_rx_skb_cache_key) &&
2459 !sk->sk_rx_skb_cache) {
2460 sk->sk_rx_skb_cache = skb;
2461 skb_orphan(skb);
2462 return;
2463 }
2464 __kfree_skb(skb);
2465}
2466
2467static inline
2468struct net *sock_net(const struct sock *sk)
2469{
2470 return read_pnet(&sk->sk_net);
2471}
2472
2473static inline
2474void sock_net_set(struct sock *sk, struct net *net)
2475{
2476 write_pnet(&sk->sk_net, net);
2477}
2478
2479static inline struct sock *skb_steal_sock(struct sk_buff *skb)
2480{
2481 if (skb->sk) {
2482 struct sock *sk = skb->sk;
2483
2484 skb->destructor = NULL;
2485 skb->sk = NULL;
2486 return sk;
2487 }
2488 return NULL;
2489}
2490
2491
2492
2493
2494static inline bool sk_fullsock(const struct sock *sk)
2495{
2496 return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV);
2497}
2498
2499
2500
2501
2502
2503static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb,
2504 struct net_device *dev)
2505{
2506#ifdef CONFIG_SOCK_VALIDATE_XMIT
2507 struct sock *sk = skb->sk;
2508
2509 if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) {
2510 skb = sk->sk_validate_xmit_skb(sk, dev, skb);
2511#ifdef CONFIG_TLS_DEVICE
2512 } else if (unlikely(skb->decrypted)) {
2513 pr_warn_ratelimited("unencrypted skb with no associated socket - dropping\n");
2514 kfree_skb(skb);
2515 skb = NULL;
2516#endif
2517 }
2518#endif
2519
2520 return skb;
2521}
2522
2523
2524
2525
2526static inline bool sk_listener(const struct sock *sk)
2527{
2528 return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
2529}
2530
2531void sock_enable_timestamp(struct sock *sk, int flag);
2532int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
2533 int type);
2534
2535bool sk_ns_capable(const struct sock *sk,
2536 struct user_namespace *user_ns, int cap);
2537bool sk_capable(const struct sock *sk, int cap);
2538bool sk_net_capable(const struct sock *sk, int cap);
2539
2540void sk_get_meminfo(const struct sock *sk, u32 *meminfo);
2541
2542
2543
2544
2545
2546
2547#define _SK_MEM_PACKETS 256
2548#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
2549#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
2550#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
2551
2552extern __u32 sysctl_wmem_max;
2553extern __u32 sysctl_rmem_max;
2554
2555extern int sysctl_tstamp_allow_data;
2556extern int sysctl_optmem_max;
2557
2558extern __u32 sysctl_wmem_default;
2559extern __u32 sysctl_rmem_default;
2560
2561DECLARE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key);
2562
2563static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto)
2564{
2565
2566 if (proto->sysctl_wmem_offset)
2567 return *(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset);
2568
2569 return *proto->sysctl_wmem;
2570}
2571
2572static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto)
2573{
2574
2575 if (proto->sysctl_rmem_offset)
2576 return *(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset);
2577
2578 return *proto->sysctl_rmem;
2579}
2580
2581
2582
2583
2584
2585static inline void sk_pacing_shift_update(struct sock *sk, int val)
2586{
2587 if (!sk || !sk_fullsock(sk) || sk->sk_pacing_shift == val)
2588 return;
2589 sk->sk_pacing_shift = val;
2590}
2591
2592
2593
2594
2595
2596
2597static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif)
2598{
2599 int mdif;
2600
2601 if (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif)
2602 return true;
2603
2604 mdif = l3mdev_master_ifindex_by_index(sock_net(sk), dif);
2605 if (mdif && mdif == sk->sk_bound_dev_if)
2606 return true;
2607
2608 return false;
2609}
2610
2611#endif
2612