1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40#ifndef _SOCK_H
41#define _SOCK_H
42
43#include <linux/hardirq.h>
44#include <linux/kernel.h>
45#include <linux/list.h>
46#include <linux/list_nulls.h>
47#include <linux/timer.h>
48#include <linux/cache.h>
49#include <linux/bitops.h>
50#include <linux/lockdep.h>
51#include <linux/netdevice.h>
52#include <linux/skbuff.h>
53#include <linux/mm.h>
54#include <linux/security.h>
55#include <linux/slab.h>
56#include <linux/uaccess.h>
57#include <linux/page_counter.h>
58#include <linux/memcontrol.h>
59#include <linux/static_key.h>
60#include <linux/sched.h>
61#include <linux/wait.h>
62#include <linux/cgroup-defs.h>
63#include <linux/rbtree.h>
64#include <linux/filter.h>
65#include <linux/rculist_nulls.h>
66#include <linux/poll.h>
67
68#include <linux/atomic.h>
69#include <linux/refcount.h>
70#include <net/dst.h>
71#include <net/checksum.h>
72#include <net/tcp_states.h>
73#include <linux/net_tstamp.h>
74#include <net/smc.h>
75#include <net/l3mdev.h>
76
77
78
79
80
81
82
83
84#define SOCK_DEBUGGING
85#ifdef SOCK_DEBUGGING
86#define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
87 printk(KERN_DEBUG msg); } while (0)
88#else
89
90static inline __printf(2, 3)
91void SOCK_DEBUG(const struct sock *sk, const char *msg, ...)
92{
93}
94#endif
95
96
97
98
99
100typedef struct {
101 spinlock_t slock;
102 int owned;
103 wait_queue_head_t wq;
104
105
106
107
108
109
110#ifdef CONFIG_DEBUG_LOCK_ALLOC
111 struct lockdep_map dep_map;
112#endif
113} socket_lock_t;
114
115struct sock;
116struct proto;
117struct net;
118
119typedef __u32 __bitwise __portpair;
120typedef __u64 __bitwise __addrpair;
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152struct sock_common {
153
154
155
156 union {
157 __addrpair skc_addrpair;
158 struct {
159 __be32 skc_daddr;
160 __be32 skc_rcv_saddr;
161 };
162 };
163 union {
164 unsigned int skc_hash;
165 __u16 skc_u16hashes[2];
166 };
167
168 union {
169 __portpair skc_portpair;
170 struct {
171 __be16 skc_dport;
172 __u16 skc_num;
173 };
174 };
175
176 unsigned short skc_family;
177 volatile unsigned char skc_state;
178 unsigned char skc_reuse:4;
179 unsigned char skc_reuseport:1;
180 unsigned char skc_ipv6only:1;
181 unsigned char skc_net_refcnt:1;
182 int skc_bound_dev_if;
183 union {
184 struct hlist_node skc_bind_node;
185 struct hlist_node skc_portaddr_node;
186 };
187 struct proto *skc_prot;
188 possible_net_t skc_net;
189
190#if IS_ENABLED(CONFIG_IPV6)
191 struct in6_addr skc_v6_daddr;
192 struct in6_addr skc_v6_rcv_saddr;
193#endif
194
195 atomic64_t skc_cookie;
196
197
198
199
200
201
202 union {
203 unsigned long skc_flags;
204 struct sock *skc_listener;
205 struct inet_timewait_death_row *skc_tw_dr;
206 };
207
208
209
210
211
212 int skc_dontcopy_begin[0];
213
214 union {
215 struct hlist_node skc_node;
216 struct hlist_nulls_node skc_nulls_node;
217 };
218 unsigned short skc_tx_queue_mapping;
219#ifdef CONFIG_XPS
220 unsigned short skc_rx_queue_mapping;
221#endif
222 union {
223 int skc_incoming_cpu;
224 u32 skc_rcv_wnd;
225 u32 skc_tw_rcv_nxt;
226 };
227
228 refcount_t skc_refcnt;
229
230 int skc_dontcopy_end[0];
231 union {
232 u32 skc_rxhash;
233 u32 skc_window_clamp;
234 u32 skc_tw_snd_nxt;
235 };
236
237};
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326struct sock {
327
328
329
330
331 struct sock_common __sk_common;
332#define sk_node __sk_common.skc_node
333#define sk_nulls_node __sk_common.skc_nulls_node
334#define sk_refcnt __sk_common.skc_refcnt
335#define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping
336#ifdef CONFIG_XPS
337#define sk_rx_queue_mapping __sk_common.skc_rx_queue_mapping
338#endif
339
340#define sk_dontcopy_begin __sk_common.skc_dontcopy_begin
341#define sk_dontcopy_end __sk_common.skc_dontcopy_end
342#define sk_hash __sk_common.skc_hash
343#define sk_portpair __sk_common.skc_portpair
344#define sk_num __sk_common.skc_num
345#define sk_dport __sk_common.skc_dport
346#define sk_addrpair __sk_common.skc_addrpair
347#define sk_daddr __sk_common.skc_daddr
348#define sk_rcv_saddr __sk_common.skc_rcv_saddr
349#define sk_family __sk_common.skc_family
350#define sk_state __sk_common.skc_state
351#define sk_reuse __sk_common.skc_reuse
352#define sk_reuseport __sk_common.skc_reuseport
353#define sk_ipv6only __sk_common.skc_ipv6only
354#define sk_net_refcnt __sk_common.skc_net_refcnt
355#define sk_bound_dev_if __sk_common.skc_bound_dev_if
356#define sk_bind_node __sk_common.skc_bind_node
357#define sk_prot __sk_common.skc_prot
358#define sk_net __sk_common.skc_net
359#define sk_v6_daddr __sk_common.skc_v6_daddr
360#define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr
361#define sk_cookie __sk_common.skc_cookie
362#define sk_incoming_cpu __sk_common.skc_incoming_cpu
363#define sk_flags __sk_common.skc_flags
364#define sk_rxhash __sk_common.skc_rxhash
365
366 socket_lock_t sk_lock;
367 atomic_t sk_drops;
368 int sk_rcvlowat;
369 struct sk_buff_head sk_error_queue;
370 struct sk_buff_head sk_receive_queue;
371
372
373
374
375
376
377
378
379 struct {
380 atomic_t rmem_alloc;
381 int len;
382 struct sk_buff *head;
383 struct sk_buff *tail;
384 } sk_backlog;
385#define sk_rmem_alloc sk_backlog.rmem_alloc
386
387 int sk_forward_alloc;
388#ifdef CONFIG_NET_RX_BUSY_POLL
389 unsigned int sk_ll_usec;
390
391 unsigned int sk_napi_id;
392#endif
393 int sk_rcvbuf;
394
395 struct sk_filter __rcu *sk_filter;
396 union {
397 struct socket_wq __rcu *sk_wq;
398 struct socket_wq *sk_wq_raw;
399 };
400#ifdef CONFIG_XFRM
401 struct xfrm_policy __rcu *sk_policy[2];
402#endif
403 struct dst_entry *sk_rx_dst;
404 struct dst_entry __rcu *sk_dst_cache;
405 atomic_t sk_omem_alloc;
406 int sk_sndbuf;
407
408
409 int sk_wmem_queued;
410 refcount_t sk_wmem_alloc;
411 unsigned long sk_tsq_flags;
412 union {
413 struct sk_buff *sk_send_head;
414 struct rb_root tcp_rtx_queue;
415 };
416 struct sk_buff_head sk_write_queue;
417 __s32 sk_peek_off;
418 int sk_write_pending;
419 __u32 sk_dst_pending_confirm;
420 u32 sk_pacing_status;
421 long sk_sndtimeo;
422 struct timer_list sk_timer;
423 __u32 sk_priority;
424 __u32 sk_mark;
425 u32 sk_pacing_rate;
426 u32 sk_max_pacing_rate;
427 struct page_frag sk_frag;
428 netdev_features_t sk_route_caps;
429 netdev_features_t sk_route_nocaps;
430 netdev_features_t sk_route_forced_caps;
431 int sk_gso_type;
432 unsigned int sk_gso_max_size;
433 gfp_t sk_allocation;
434 __u32 sk_txhash;
435
436
437
438
439
440 unsigned int __sk_flags_offset[0];
441#ifdef __BIG_ENDIAN_BITFIELD
442#define SK_FL_PROTO_SHIFT 16
443#define SK_FL_PROTO_MASK 0x00ff0000
444
445#define SK_FL_TYPE_SHIFT 0
446#define SK_FL_TYPE_MASK 0x0000ffff
447#else
448#define SK_FL_PROTO_SHIFT 8
449#define SK_FL_PROTO_MASK 0x0000ff00
450
451#define SK_FL_TYPE_SHIFT 16
452#define SK_FL_TYPE_MASK 0xffff0000
453#endif
454
455 unsigned int sk_padding : 1,
456 sk_kern_sock : 1,
457 sk_no_check_tx : 1,
458 sk_no_check_rx : 1,
459 sk_userlocks : 4,
460 sk_protocol : 8,
461 sk_type : 16;
462#define SK_PROTOCOL_MAX U8_MAX
463 u16 sk_gso_max_segs;
464 u8 sk_pacing_shift;
465 unsigned long sk_lingertime;
466 struct proto *sk_prot_creator;
467 rwlock_t sk_callback_lock;
468 int sk_err,
469 sk_err_soft;
470 u32 sk_ack_backlog;
471 u32 sk_max_ack_backlog;
472 kuid_t sk_uid;
473 struct pid *sk_peer_pid;
474 const struct cred *sk_peer_cred;
475 long sk_rcvtimeo;
476 ktime_t sk_stamp;
477 u16 sk_tsflags;
478 u8 sk_shutdown;
479 u32 sk_tskey;
480 atomic_t sk_zckey;
481
482 u8 sk_clockid;
483 u8 sk_txtime_deadline_mode : 1,
484 sk_txtime_report_errors : 1,
485 sk_txtime_unused : 6;
486
487 struct socket *sk_socket;
488 void *sk_user_data;
489#ifdef CONFIG_SECURITY
490 void *sk_security;
491#endif
492 struct sock_cgroup_data sk_cgrp_data;
493 struct mem_cgroup *sk_memcg;
494 void (*sk_state_change)(struct sock *sk);
495 void (*sk_data_ready)(struct sock *sk);
496 void (*sk_write_space)(struct sock *sk);
497 void (*sk_error_report)(struct sock *sk);
498 int (*sk_backlog_rcv)(struct sock *sk,
499 struct sk_buff *skb);
500#ifdef CONFIG_SOCK_VALIDATE_XMIT
501 struct sk_buff* (*sk_validate_xmit_skb)(struct sock *sk,
502 struct net_device *dev,
503 struct sk_buff *skb);
504#endif
505 void (*sk_destruct)(struct sock *sk);
506 struct sock_reuseport __rcu *sk_reuseport_cb;
507 struct rcu_head sk_rcu;
508};
509
510enum sk_pacing {
511 SK_PACING_NONE = 0,
512 SK_PACING_NEEDED = 1,
513 SK_PACING_FQ = 2,
514};
515
516#define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
517
518#define rcu_dereference_sk_user_data(sk) rcu_dereference(__sk_user_data((sk)))
519#define rcu_assign_sk_user_data(sk, ptr) rcu_assign_pointer(__sk_user_data((sk)), ptr)
520
521
522
523
524
525
526
527
528#define SK_NO_REUSE 0
529#define SK_CAN_REUSE 1
530#define SK_FORCE_REUSE 2
531
532int sk_set_peek_off(struct sock *sk, int val);
533
534static inline int sk_peek_offset(struct sock *sk, int flags)
535{
536 if (unlikely(flags & MSG_PEEK)) {
537 return READ_ONCE(sk->sk_peek_off);
538 }
539
540 return 0;
541}
542
543static inline void sk_peek_offset_bwd(struct sock *sk, int val)
544{
545 s32 off = READ_ONCE(sk->sk_peek_off);
546
547 if (unlikely(off >= 0)) {
548 off = max_t(s32, off - val, 0);
549 WRITE_ONCE(sk->sk_peek_off, off);
550 }
551}
552
553static inline void sk_peek_offset_fwd(struct sock *sk, int val)
554{
555 sk_peek_offset_bwd(sk, -val);
556}
557
558
559
560
561static inline struct sock *sk_entry(const struct hlist_node *node)
562{
563 return hlist_entry(node, struct sock, sk_node);
564}
565
566static inline struct sock *__sk_head(const struct hlist_head *head)
567{
568 return hlist_entry(head->first, struct sock, sk_node);
569}
570
571static inline struct sock *sk_head(const struct hlist_head *head)
572{
573 return hlist_empty(head) ? NULL : __sk_head(head);
574}
575
576static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head)
577{
578 return hlist_nulls_entry(head->first, struct sock, sk_nulls_node);
579}
580
581static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head)
582{
583 return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head);
584}
585
586static inline struct sock *sk_next(const struct sock *sk)
587{
588 return hlist_entry_safe(sk->sk_node.next, struct sock, sk_node);
589}
590
591static inline struct sock *sk_nulls_next(const struct sock *sk)
592{
593 return (!is_a_nulls(sk->sk_nulls_node.next)) ?
594 hlist_nulls_entry(sk->sk_nulls_node.next,
595 struct sock, sk_nulls_node) :
596 NULL;
597}
598
599static inline bool sk_unhashed(const struct sock *sk)
600{
601 return hlist_unhashed(&sk->sk_node);
602}
603
604static inline bool sk_hashed(const struct sock *sk)
605{
606 return !sk_unhashed(sk);
607}
608
609static inline void sk_node_init(struct hlist_node *node)
610{
611 node->pprev = NULL;
612}
613
614static inline void sk_nulls_node_init(struct hlist_nulls_node *node)
615{
616 node->pprev = NULL;
617}
618
619static inline void __sk_del_node(struct sock *sk)
620{
621 __hlist_del(&sk->sk_node);
622}
623
624
625static inline bool __sk_del_node_init(struct sock *sk)
626{
627 if (sk_hashed(sk)) {
628 __sk_del_node(sk);
629 sk_node_init(&sk->sk_node);
630 return true;
631 }
632 return false;
633}
634
635
636
637
638
639
640
641static __always_inline void sock_hold(struct sock *sk)
642{
643 refcount_inc(&sk->sk_refcnt);
644}
645
646
647
648
649static __always_inline void __sock_put(struct sock *sk)
650{
651 refcount_dec(&sk->sk_refcnt);
652}
653
654static inline bool sk_del_node_init(struct sock *sk)
655{
656 bool rc = __sk_del_node_init(sk);
657
658 if (rc) {
659
660 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
661 __sock_put(sk);
662 }
663 return rc;
664}
665#define sk_del_node_init_rcu(sk) sk_del_node_init(sk)
666
667static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk)
668{
669 if (sk_hashed(sk)) {
670 hlist_nulls_del_init_rcu(&sk->sk_nulls_node);
671 return true;
672 }
673 return false;
674}
675
676static inline bool sk_nulls_del_node_init_rcu(struct sock *sk)
677{
678 bool rc = __sk_nulls_del_node_init_rcu(sk);
679
680 if (rc) {
681
682 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
683 __sock_put(sk);
684 }
685 return rc;
686}
687
688static inline void __sk_add_node(struct sock *sk, struct hlist_head *list)
689{
690 hlist_add_head(&sk->sk_node, list);
691}
692
693static inline void sk_add_node(struct sock *sk, struct hlist_head *list)
694{
695 sock_hold(sk);
696 __sk_add_node(sk, list);
697}
698
699static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
700{
701 sock_hold(sk);
702 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
703 sk->sk_family == AF_INET6)
704 hlist_add_tail_rcu(&sk->sk_node, list);
705 else
706 hlist_add_head_rcu(&sk->sk_node, list);
707}
708
709static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
710{
711 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
712}
713
714static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
715{
716 sock_hold(sk);
717 __sk_nulls_add_node_rcu(sk, list);
718}
719
720static inline void __sk_del_bind_node(struct sock *sk)
721{
722 __hlist_del(&sk->sk_bind_node);
723}
724
725static inline void sk_add_bind_node(struct sock *sk,
726 struct hlist_head *list)
727{
728 hlist_add_head(&sk->sk_bind_node, list);
729}
730
731#define sk_for_each(__sk, list) \
732 hlist_for_each_entry(__sk, list, sk_node)
733#define sk_for_each_rcu(__sk, list) \
734 hlist_for_each_entry_rcu(__sk, list, sk_node)
735#define sk_nulls_for_each(__sk, node, list) \
736 hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
737#define sk_nulls_for_each_rcu(__sk, node, list) \
738 hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
739#define sk_for_each_from(__sk) \
740 hlist_for_each_entry_from(__sk, sk_node)
741#define sk_nulls_for_each_from(__sk, node) \
742 if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
743 hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
744#define sk_for_each_safe(__sk, tmp, list) \
745 hlist_for_each_entry_safe(__sk, tmp, list, sk_node)
746#define sk_for_each_bound(__sk, list) \
747 hlist_for_each_entry(__sk, list, sk_bind_node)
748
749
750
751
752
753
754
755
756
757#define sk_for_each_entry_offset_rcu(tpos, pos, head, offset) \
758 for (pos = rcu_dereference(hlist_first_rcu(head)); \
759 pos != NULL && \
760 ({ tpos = (typeof(*tpos) *)((void *)pos - offset); 1;}); \
761 pos = rcu_dereference(hlist_next_rcu(pos)))
762
763static inline struct user_namespace *sk_user_ns(struct sock *sk)
764{
765
766
767
768
769 return sk->sk_socket->file->f_cred->user_ns;
770}
771
772
773enum sock_flags {
774 SOCK_DEAD,
775 SOCK_DONE,
776 SOCK_URGINLINE,
777 SOCK_KEEPOPEN,
778 SOCK_LINGER,
779 SOCK_DESTROY,
780 SOCK_BROADCAST,
781 SOCK_TIMESTAMP,
782 SOCK_ZAPPED,
783 SOCK_USE_WRITE_QUEUE,
784 SOCK_DBG,
785 SOCK_RCVTSTAMP,
786 SOCK_RCVTSTAMPNS,
787 SOCK_LOCALROUTE,
788 SOCK_QUEUE_SHRUNK,
789 SOCK_MEMALLOC,
790 SOCK_TIMESTAMPING_RX_SOFTWARE,
791 SOCK_FASYNC,
792 SOCK_RXQ_OVFL,
793 SOCK_ZEROCOPY,
794 SOCK_WIFI_STATUS,
795 SOCK_NOFCS,
796
797
798
799 SOCK_FILTER_LOCKED,
800 SOCK_SELECT_ERR_QUEUE,
801 SOCK_RCU_FREE,
802 SOCK_TXTIME,
803};
804
805#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
806
807static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
808{
809 nsk->sk_flags = osk->sk_flags;
810}
811
812static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
813{
814 __set_bit(flag, &sk->sk_flags);
815}
816
817static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
818{
819 __clear_bit(flag, &sk->sk_flags);
820}
821
822static inline bool sock_flag(const struct sock *sk, enum sock_flags flag)
823{
824 return test_bit(flag, &sk->sk_flags);
825}
826
827#ifdef CONFIG_NET
828DECLARE_STATIC_KEY_FALSE(memalloc_socks_key);
829static inline int sk_memalloc_socks(void)
830{
831 return static_branch_unlikely(&memalloc_socks_key);
832}
833#else
834
835static inline int sk_memalloc_socks(void)
836{
837 return 0;
838}
839
840#endif
841
842static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask)
843{
844 return gfp_mask | (sk->sk_allocation & __GFP_MEMALLOC);
845}
846
847static inline void sk_acceptq_removed(struct sock *sk)
848{
849 sk->sk_ack_backlog--;
850}
851
852static inline void sk_acceptq_added(struct sock *sk)
853{
854 sk->sk_ack_backlog++;
855}
856
857static inline bool sk_acceptq_is_full(const struct sock *sk)
858{
859 return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
860}
861
862
863
864
865static inline int sk_stream_min_wspace(const struct sock *sk)
866{
867 return sk->sk_wmem_queued >> 1;
868}
869
870static inline int sk_stream_wspace(const struct sock *sk)
871{
872 return sk->sk_sndbuf - sk->sk_wmem_queued;
873}
874
875void sk_stream_write_space(struct sock *sk);
876
877
878static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
879{
880
881 skb_dst_force(skb);
882
883 if (!sk->sk_backlog.tail)
884 sk->sk_backlog.head = skb;
885 else
886 sk->sk_backlog.tail->next = skb;
887
888 sk->sk_backlog.tail = skb;
889 skb->next = NULL;
890}
891
892
893
894
895
896
897static inline bool sk_rcvqueues_full(const struct sock *sk, unsigned int limit)
898{
899 unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
900
901 return qsize > limit;
902}
903
904
905static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
906 unsigned int limit)
907{
908 if (sk_rcvqueues_full(sk, limit))
909 return -ENOBUFS;
910
911
912
913
914
915
916 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
917 return -ENOMEM;
918
919 __sk_add_backlog(sk, skb);
920 sk->sk_backlog.len += skb->truesize;
921 return 0;
922}
923
924int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
925
926static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
927{
928 if (sk_memalloc_socks() && skb_pfmemalloc(skb))
929 return __sk_backlog_rcv(sk, skb);
930
931 return sk->sk_backlog_rcv(sk, skb);
932}
933
934static inline void sk_incoming_cpu_update(struct sock *sk)
935{
936 int cpu = raw_smp_processor_id();
937
938 if (unlikely(sk->sk_incoming_cpu != cpu))
939 sk->sk_incoming_cpu = cpu;
940}
941
942static inline void sock_rps_record_flow_hash(__u32 hash)
943{
944#ifdef CONFIG_RPS
945 struct rps_sock_flow_table *sock_flow_table;
946
947 rcu_read_lock();
948 sock_flow_table = rcu_dereference(rps_sock_flow_table);
949 rps_record_sock_flow(sock_flow_table, hash);
950 rcu_read_unlock();
951#endif
952}
953
954static inline void sock_rps_record_flow(const struct sock *sk)
955{
956#ifdef CONFIG_RPS
957 if (static_key_false(&rfs_needed)) {
958
959
960
961
962
963
964
965
966
967
968 if (sk->sk_state == TCP_ESTABLISHED)
969 sock_rps_record_flow_hash(sk->sk_rxhash);
970 }
971#endif
972}
973
974static inline void sock_rps_save_rxhash(struct sock *sk,
975 const struct sk_buff *skb)
976{
977#ifdef CONFIG_RPS
978 if (unlikely(sk->sk_rxhash != skb->hash))
979 sk->sk_rxhash = skb->hash;
980#endif
981}
982
983static inline void sock_rps_reset_rxhash(struct sock *sk)
984{
985#ifdef CONFIG_RPS
986 sk->sk_rxhash = 0;
987#endif
988}
989
990#define sk_wait_event(__sk, __timeo, __condition, __wait) \
991 ({ int __rc; \
992 release_sock(__sk); \
993 __rc = __condition; \
994 if (!__rc) { \
995 *(__timeo) = wait_woken(__wait, \
996 TASK_INTERRUPTIBLE, \
997 *(__timeo)); \
998 } \
999 sched_annotate_sleep(); \
1000 lock_sock(__sk); \
1001 __rc = __condition; \
1002 __rc; \
1003 })
1004
1005int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
1006int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
1007void sk_stream_wait_close(struct sock *sk, long timeo_p);
1008int sk_stream_error(struct sock *sk, int flags, int err);
1009void sk_stream_kill_queues(struct sock *sk);
1010void sk_set_memalloc(struct sock *sk);
1011void sk_clear_memalloc(struct sock *sk);
1012
1013void __sk_flush_backlog(struct sock *sk);
1014
1015static inline bool sk_flush_backlog(struct sock *sk)
1016{
1017 if (unlikely(READ_ONCE(sk->sk_backlog.tail))) {
1018 __sk_flush_backlog(sk);
1019 return true;
1020 }
1021 return false;
1022}
1023
1024int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb);
1025
1026struct request_sock_ops;
1027struct timewait_sock_ops;
1028struct inet_hashinfo;
1029struct raw_hashinfo;
1030struct smc_hashinfo;
1031struct module;
1032
1033
1034
1035
1036
1037static inline void sk_prot_clear_nulls(struct sock *sk, int size)
1038{
1039 if (offsetof(struct sock, sk_node.next) != 0)
1040 memset(sk, 0, offsetof(struct sock, sk_node.next));
1041 memset(&sk->sk_node.pprev, 0,
1042 size - offsetof(struct sock, sk_node.pprev));
1043}
1044
1045
1046
1047
1048struct proto {
1049 void (*close)(struct sock *sk,
1050 long timeout);
1051 int (*pre_connect)(struct sock *sk,
1052 struct sockaddr *uaddr,
1053 int addr_len);
1054 int (*connect)(struct sock *sk,
1055 struct sockaddr *uaddr,
1056 int addr_len);
1057 int (*disconnect)(struct sock *sk, int flags);
1058
1059 struct sock * (*accept)(struct sock *sk, int flags, int *err,
1060 bool kern);
1061
1062 int (*ioctl)(struct sock *sk, int cmd,
1063 unsigned long arg);
1064 int (*init)(struct sock *sk);
1065 void (*destroy)(struct sock *sk);
1066 void (*shutdown)(struct sock *sk, int how);
1067 int (*setsockopt)(struct sock *sk, int level,
1068 int optname, char __user *optval,
1069 unsigned int optlen);
1070 int (*getsockopt)(struct sock *sk, int level,
1071 int optname, char __user *optval,
1072 int __user *option);
1073 void (*keepalive)(struct sock *sk, int valbool);
1074#ifdef CONFIG_COMPAT
1075 int (*compat_setsockopt)(struct sock *sk,
1076 int level,
1077 int optname, char __user *optval,
1078 unsigned int optlen);
1079 int (*compat_getsockopt)(struct sock *sk,
1080 int level,
1081 int optname, char __user *optval,
1082 int __user *option);
1083 int (*compat_ioctl)(struct sock *sk,
1084 unsigned int cmd, unsigned long arg);
1085#endif
1086 int (*sendmsg)(struct sock *sk, struct msghdr *msg,
1087 size_t len);
1088 int (*recvmsg)(struct sock *sk, struct msghdr *msg,
1089 size_t len, int noblock, int flags,
1090 int *addr_len);
1091 int (*sendpage)(struct sock *sk, struct page *page,
1092 int offset, size_t size, int flags);
1093 int (*bind)(struct sock *sk,
1094 struct sockaddr *uaddr, int addr_len);
1095
1096 int (*backlog_rcv) (struct sock *sk,
1097 struct sk_buff *skb);
1098
1099 void (*release_cb)(struct sock *sk);
1100
1101
1102 int (*hash)(struct sock *sk);
1103 void (*unhash)(struct sock *sk);
1104 void (*rehash)(struct sock *sk);
1105 int (*get_port)(struct sock *sk, unsigned short snum);
1106
1107
1108#ifdef CONFIG_PROC_FS
1109 unsigned int inuse_idx;
1110#endif
1111
1112 bool (*stream_memory_free)(const struct sock *sk);
1113 bool (*stream_memory_read)(const struct sock *sk);
1114
1115 void (*enter_memory_pressure)(struct sock *sk);
1116 void (*leave_memory_pressure)(struct sock *sk);
1117 atomic_long_t *memory_allocated;
1118 struct percpu_counter *sockets_allocated;
1119
1120
1121
1122
1123
1124
1125 unsigned long *memory_pressure;
1126 long *sysctl_mem;
1127
1128 int *sysctl_wmem;
1129 int *sysctl_rmem;
1130 u32 sysctl_wmem_offset;
1131 u32 sysctl_rmem_offset;
1132
1133 int max_header;
1134 bool no_autobind;
1135
1136 struct kmem_cache *slab;
1137 unsigned int obj_size;
1138 slab_flags_t slab_flags;
1139 unsigned int useroffset;
1140 unsigned int usersize;
1141
1142 struct percpu_counter *orphan_count;
1143
1144 struct request_sock_ops *rsk_prot;
1145 struct timewait_sock_ops *twsk_prot;
1146
1147 union {
1148 struct inet_hashinfo *hashinfo;
1149 struct udp_table *udp_table;
1150 struct raw_hashinfo *raw_hash;
1151 struct smc_hashinfo *smc_hash;
1152 } h;
1153
1154 struct module *owner;
1155
1156 char name[32];
1157
1158 struct list_head node;
1159#ifdef SOCK_REFCNT_DEBUG
1160 atomic_t socks;
1161#endif
1162 int (*diag_destroy)(struct sock *sk, int err);
1163} __randomize_layout;
1164
1165int proto_register(struct proto *prot, int alloc_slab);
1166void proto_unregister(struct proto *prot);
1167int sock_load_diag_module(int family, int protocol);
1168
1169#ifdef SOCK_REFCNT_DEBUG
1170static inline void sk_refcnt_debug_inc(struct sock *sk)
1171{
1172 atomic_inc(&sk->sk_prot->socks);
1173}
1174
1175static inline void sk_refcnt_debug_dec(struct sock *sk)
1176{
1177 atomic_dec(&sk->sk_prot->socks);
1178 printk(KERN_DEBUG "%s socket %p released, %d are still alive\n",
1179 sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
1180}
1181
1182static inline void sk_refcnt_debug_release(const struct sock *sk)
1183{
1184 if (refcount_read(&sk->sk_refcnt) != 1)
1185 printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
1186 sk->sk_prot->name, sk, refcount_read(&sk->sk_refcnt));
1187}
1188#else
1189#define sk_refcnt_debug_inc(sk) do { } while (0)
1190#define sk_refcnt_debug_dec(sk) do { } while (0)
1191#define sk_refcnt_debug_release(sk) do { } while (0)
1192#endif
1193
1194static inline bool sk_stream_memory_free(const struct sock *sk)
1195{
1196 if (sk->sk_wmem_queued >= sk->sk_sndbuf)
1197 return false;
1198
1199 return sk->sk_prot->stream_memory_free ?
1200 sk->sk_prot->stream_memory_free(sk) : true;
1201}
1202
1203static inline bool sk_stream_is_writeable(const struct sock *sk)
1204{
1205 return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) &&
1206 sk_stream_memory_free(sk);
1207}
1208
1209static inline int sk_under_cgroup_hierarchy(struct sock *sk,
1210 struct cgroup *ancestor)
1211{
1212#ifdef CONFIG_SOCK_CGROUP_DATA
1213 return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data),
1214 ancestor);
1215#else
1216 return -ENOTSUPP;
1217#endif
1218}
1219
1220static inline bool sk_has_memory_pressure(const struct sock *sk)
1221{
1222 return sk->sk_prot->memory_pressure != NULL;
1223}
1224
1225static inline bool sk_under_memory_pressure(const struct sock *sk)
1226{
1227 if (!sk->sk_prot->memory_pressure)
1228 return false;
1229
1230 if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
1231 mem_cgroup_under_socket_pressure(sk->sk_memcg))
1232 return true;
1233
1234 return !!*sk->sk_prot->memory_pressure;
1235}
1236
1237static inline long
1238sk_memory_allocated(const struct sock *sk)
1239{
1240 return atomic_long_read(sk->sk_prot->memory_allocated);
1241}
1242
1243static inline long
1244sk_memory_allocated_add(struct sock *sk, int amt)
1245{
1246 return atomic_long_add_return(amt, sk->sk_prot->memory_allocated);
1247}
1248
1249static inline void
1250sk_memory_allocated_sub(struct sock *sk, int amt)
1251{
1252 atomic_long_sub(amt, sk->sk_prot->memory_allocated);
1253}
1254
1255static inline void sk_sockets_allocated_dec(struct sock *sk)
1256{
1257 percpu_counter_dec(sk->sk_prot->sockets_allocated);
1258}
1259
1260static inline void sk_sockets_allocated_inc(struct sock *sk)
1261{
1262 percpu_counter_inc(sk->sk_prot->sockets_allocated);
1263}
1264
1265static inline int
1266sk_sockets_allocated_read_positive(struct sock *sk)
1267{
1268 return percpu_counter_read_positive(sk->sk_prot->sockets_allocated);
1269}
1270
1271static inline int
1272proto_sockets_allocated_sum_positive(struct proto *prot)
1273{
1274 return percpu_counter_sum_positive(prot->sockets_allocated);
1275}
1276
1277static inline long
1278proto_memory_allocated(struct proto *prot)
1279{
1280 return atomic_long_read(prot->memory_allocated);
1281}
1282
1283static inline bool
1284proto_memory_pressure(struct proto *prot)
1285{
1286 if (!prot->memory_pressure)
1287 return false;
1288 return !!*prot->memory_pressure;
1289}
1290
1291
1292#ifdef CONFIG_PROC_FS
1293
1294void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
1295int sock_prot_inuse_get(struct net *net, struct proto *proto);
1296int sock_inuse_get(struct net *net);
1297#else
1298static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
1299 int inc)
1300{
1301}
1302#endif
1303
1304
1305
1306
1307
1308static inline int __sk_prot_rehash(struct sock *sk)
1309{
1310 sk->sk_prot->unhash(sk);
1311 return sk->sk_prot->hash(sk);
1312}
1313
1314
1315#define SOCK_DESTROY_TIME (10*HZ)
1316
1317
1318#define PROT_SOCK 1024
1319
1320#define SHUTDOWN_MASK 3
1321#define RCV_SHUTDOWN 1
1322#define SEND_SHUTDOWN 2
1323
1324#define SOCK_SNDBUF_LOCK 1
1325#define SOCK_RCVBUF_LOCK 2
1326#define SOCK_BINDADDR_LOCK 4
1327#define SOCK_BINDPORT_LOCK 8
1328
1329struct socket_alloc {
1330 struct socket socket;
1331 struct inode vfs_inode;
1332};
1333
1334static inline struct socket *SOCKET_I(struct inode *inode)
1335{
1336 return &container_of(inode, struct socket_alloc, vfs_inode)->socket;
1337}
1338
1339static inline struct inode *SOCK_INODE(struct socket *socket)
1340{
1341 return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
1342}
1343
1344
1345
1346
1347int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind);
1348int __sk_mem_schedule(struct sock *sk, int size, int kind);
1349void __sk_mem_reduce_allocated(struct sock *sk, int amount);
1350void __sk_mem_reclaim(struct sock *sk, int amount);
1351
1352
1353
1354
1355#define SK_MEM_QUANTUM 4096
1356#define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
1357#define SK_MEM_SEND 0
1358#define SK_MEM_RECV 1
1359
1360
1361static inline long sk_prot_mem_limits(const struct sock *sk, int index)
1362{
1363 long val = sk->sk_prot->sysctl_mem[index];
1364
1365#if PAGE_SIZE > SK_MEM_QUANTUM
1366 val <<= PAGE_SHIFT - SK_MEM_QUANTUM_SHIFT;
1367#elif PAGE_SIZE < SK_MEM_QUANTUM
1368 val >>= SK_MEM_QUANTUM_SHIFT - PAGE_SHIFT;
1369#endif
1370 return val;
1371}
1372
1373static inline int sk_mem_pages(int amt)
1374{
1375 return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT;
1376}
1377
1378static inline bool sk_has_account(struct sock *sk)
1379{
1380
1381 return !!sk->sk_prot->memory_allocated;
1382}
1383
1384static inline bool sk_wmem_schedule(struct sock *sk, int size)
1385{
1386 if (!sk_has_account(sk))
1387 return true;
1388 return size <= sk->sk_forward_alloc ||
1389 __sk_mem_schedule(sk, size, SK_MEM_SEND);
1390}
1391
1392static inline bool
1393sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
1394{
1395 if (!sk_has_account(sk))
1396 return true;
1397 return size<= sk->sk_forward_alloc ||
1398 __sk_mem_schedule(sk, size, SK_MEM_RECV) ||
1399 skb_pfmemalloc(skb);
1400}
1401
1402static inline void sk_mem_reclaim(struct sock *sk)
1403{
1404 if (!sk_has_account(sk))
1405 return;
1406 if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
1407 __sk_mem_reclaim(sk, sk->sk_forward_alloc);
1408}
1409
1410static inline void sk_mem_reclaim_partial(struct sock *sk)
1411{
1412 if (!sk_has_account(sk))
1413 return;
1414 if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
1415 __sk_mem_reclaim(sk, sk->sk_forward_alloc - 1);
1416}
1417
1418static inline void sk_mem_charge(struct sock *sk, int size)
1419{
1420 if (!sk_has_account(sk))
1421 return;
1422 sk->sk_forward_alloc -= size;
1423}
1424
1425static inline void sk_mem_uncharge(struct sock *sk, int size)
1426{
1427 if (!sk_has_account(sk))
1428 return;
1429 sk->sk_forward_alloc += size;
1430
1431
1432
1433
1434
1435
1436
1437
1438 if (unlikely(sk->sk_forward_alloc >= 1 << 21))
1439 __sk_mem_reclaim(sk, 1 << 20);
1440}
1441
1442static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
1443{
1444 sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
1445 sk->sk_wmem_queued -= skb->truesize;
1446 sk_mem_uncharge(sk, skb->truesize);
1447 __kfree_skb(skb);
1448}
1449
1450static inline void sock_release_ownership(struct sock *sk)
1451{
1452 if (sk->sk_lock.owned) {
1453 sk->sk_lock.owned = 0;
1454
1455
1456 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
1457 }
1458}
1459
1460
1461
1462
1463
1464
1465
1466
1467#define sock_lock_init_class_and_name(sk, sname, skey, name, key) \
1468do { \
1469 sk->sk_lock.owned = 0; \
1470 init_waitqueue_head(&sk->sk_lock.wq); \
1471 spin_lock_init(&(sk)->sk_lock.slock); \
1472 debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
1473 sizeof((sk)->sk_lock)); \
1474 lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
1475 (skey), (sname)); \
1476 lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
1477} while (0)
1478
1479#ifdef CONFIG_LOCKDEP
1480static inline bool lockdep_sock_is_held(const struct sock *sk)
1481{
1482 return lockdep_is_held(&sk->sk_lock) ||
1483 lockdep_is_held(&sk->sk_lock.slock);
1484}
1485#endif
1486
1487void lock_sock_nested(struct sock *sk, int subclass);
1488
1489static inline void lock_sock(struct sock *sk)
1490{
1491 lock_sock_nested(sk, 0);
1492}
1493
1494void release_sock(struct sock *sk);
1495
1496
1497#define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock))
1498#define bh_lock_sock_nested(__sk) \
1499 spin_lock_nested(&((__sk)->sk_lock.slock), \
1500 SINGLE_DEPTH_NESTING)
1501#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
1502
1503bool lock_sock_fast(struct sock *sk);
1504
1505
1506
1507
1508
1509
1510
1511
1512static inline void unlock_sock_fast(struct sock *sk, bool slow)
1513{
1514 if (slow)
1515 release_sock(sk);
1516 else
1517 spin_unlock_bh(&sk->sk_lock.slock);
1518}
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534static inline void sock_owned_by_me(const struct sock *sk)
1535{
1536#ifdef CONFIG_LOCKDEP
1537 WARN_ON_ONCE(!lockdep_sock_is_held(sk) && debug_locks);
1538#endif
1539}
1540
1541static inline bool sock_owned_by_user(const struct sock *sk)
1542{
1543 sock_owned_by_me(sk);
1544 return sk->sk_lock.owned;
1545}
1546
1547static inline bool sock_owned_by_user_nocheck(const struct sock *sk)
1548{
1549 return sk->sk_lock.owned;
1550}
1551
1552
1553static inline bool sock_allow_reclassification(const struct sock *csk)
1554{
1555 struct sock *sk = (struct sock *)csk;
1556
1557 return !sk->sk_lock.owned && !spin_is_locked(&sk->sk_lock.slock);
1558}
1559
1560struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1561 struct proto *prot, int kern);
1562void sk_free(struct sock *sk);
1563void sk_destruct(struct sock *sk);
1564struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
1565void sk_free_unlock_clone(struct sock *sk);
1566
1567struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1568 gfp_t priority);
1569void __sock_wfree(struct sk_buff *skb);
1570void sock_wfree(struct sk_buff *skb);
1571struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
1572 gfp_t priority);
1573void skb_orphan_partial(struct sk_buff *skb);
1574void sock_rfree(struct sk_buff *skb);
1575void sock_efree(struct sk_buff *skb);
1576#ifdef CONFIG_INET
1577void sock_edemux(struct sk_buff *skb);
1578#else
1579#define sock_edemux sock_efree
1580#endif
1581
1582int sock_setsockopt(struct socket *sock, int level, int op,
1583 char __user *optval, unsigned int optlen);
1584
1585int sock_getsockopt(struct socket *sock, int level, int op,
1586 char __user *optval, int __user *optlen);
1587struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1588 int noblock, int *errcode);
1589struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1590 unsigned long data_len, int noblock,
1591 int *errcode, int max_page_order);
1592void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
1593void sock_kfree_s(struct sock *sk, void *mem, int size);
1594void sock_kzfree_s(struct sock *sk, void *mem, int size);
1595void sk_send_sigurg(struct sock *sk);
1596
1597struct sockcm_cookie {
1598 u64 transmit_time;
1599 u32 mark;
1600 u16 tsflags;
1601};
1602
1603static inline void sockcm_init(struct sockcm_cookie *sockc,
1604 const struct sock *sk)
1605{
1606 *sockc = (struct sockcm_cookie) { .tsflags = sk->sk_tsflags };
1607}
1608
1609int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
1610 struct sockcm_cookie *sockc);
1611int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
1612 struct sockcm_cookie *sockc);
1613
1614
1615
1616
1617
1618int sock_no_bind(struct socket *, struct sockaddr *, int);
1619int sock_no_connect(struct socket *, struct sockaddr *, int, int);
1620int sock_no_socketpair(struct socket *, struct socket *);
1621int sock_no_accept(struct socket *, struct socket *, int, bool);
1622int sock_no_getname(struct socket *, struct sockaddr *, int);
1623int sock_no_ioctl(struct socket *, unsigned int, unsigned long);
1624int sock_no_listen(struct socket *, int);
1625int sock_no_shutdown(struct socket *, int);
1626int sock_no_getsockopt(struct socket *, int , int, char __user *, int __user *);
1627int sock_no_setsockopt(struct socket *, int, int, char __user *, unsigned int);
1628int sock_no_sendmsg(struct socket *, struct msghdr *, size_t);
1629int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t len);
1630int sock_no_recvmsg(struct socket *, struct msghdr *, size_t, int);
1631int sock_no_mmap(struct file *file, struct socket *sock,
1632 struct vm_area_struct *vma);
1633ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset,
1634 size_t size, int flags);
1635ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
1636 int offset, size_t size, int flags);
1637
1638
1639
1640
1641
1642int sock_common_getsockopt(struct socket *sock, int level, int optname,
1643 char __user *optval, int __user *optlen);
1644int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
1645 int flags);
1646int sock_common_setsockopt(struct socket *sock, int level, int optname,
1647 char __user *optval, unsigned int optlen);
1648int compat_sock_common_getsockopt(struct socket *sock, int level,
1649 int optname, char __user *optval, int __user *optlen);
1650int compat_sock_common_setsockopt(struct socket *sock, int level,
1651 int optname, char __user *optval, unsigned int optlen);
1652
1653void sk_common_release(struct sock *sk);
1654
1655
1656
1657
1658
1659
1660void sock_init_data(struct socket *sock, struct sock *sk);
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688static inline void sock_put(struct sock *sk)
1689{
1690 if (refcount_dec_and_test(&sk->sk_refcnt))
1691 sk_free(sk);
1692}
1693
1694
1695
1696void sock_gen_put(struct sock *sk);
1697
1698int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
1699 unsigned int trim_cap, bool refcounted);
1700static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
1701 const int nested)
1702{
1703 return __sk_receive_skb(sk, skb, nested, 1, true);
1704}
1705
1706static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
1707{
1708
1709 if (WARN_ON_ONCE((unsigned short)tx_queue >= USHRT_MAX))
1710 return;
1711 sk->sk_tx_queue_mapping = tx_queue;
1712}
1713
1714#define NO_QUEUE_MAPPING USHRT_MAX
1715
1716static inline void sk_tx_queue_clear(struct sock *sk)
1717{
1718 sk->sk_tx_queue_mapping = NO_QUEUE_MAPPING;
1719}
1720
1721static inline int sk_tx_queue_get(const struct sock *sk)
1722{
1723 if (sk && sk->sk_tx_queue_mapping != NO_QUEUE_MAPPING)
1724 return sk->sk_tx_queue_mapping;
1725
1726 return -1;
1727}
1728
1729static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
1730{
1731#ifdef CONFIG_XPS
1732 if (skb_rx_queue_recorded(skb)) {
1733 u16 rx_queue = skb_get_rx_queue(skb);
1734
1735 if (WARN_ON_ONCE(rx_queue == NO_QUEUE_MAPPING))
1736 return;
1737
1738 sk->sk_rx_queue_mapping = rx_queue;
1739 }
1740#endif
1741}
1742
1743static inline void sk_rx_queue_clear(struct sock *sk)
1744{
1745#ifdef CONFIG_XPS
1746 sk->sk_rx_queue_mapping = NO_QUEUE_MAPPING;
1747#endif
1748}
1749
1750#ifdef CONFIG_XPS
1751static inline int sk_rx_queue_get(const struct sock *sk)
1752{
1753 if (sk && sk->sk_rx_queue_mapping != NO_QUEUE_MAPPING)
1754 return sk->sk_rx_queue_mapping;
1755
1756 return -1;
1757}
1758#endif
1759
1760static inline void sk_set_socket(struct sock *sk, struct socket *sock)
1761{
1762 sk_tx_queue_clear(sk);
1763 sk->sk_socket = sock;
1764}
1765
1766static inline wait_queue_head_t *sk_sleep(struct sock *sk)
1767{
1768 BUILD_BUG_ON(offsetof(struct socket_wq, wait) != 0);
1769 return &rcu_dereference_raw(sk->sk_wq)->wait;
1770}
1771
1772
1773
1774
1775
1776
1777
1778static inline void sock_orphan(struct sock *sk)
1779{
1780 write_lock_bh(&sk->sk_callback_lock);
1781 sock_set_flag(sk, SOCK_DEAD);
1782 sk_set_socket(sk, NULL);
1783 sk->sk_wq = NULL;
1784 write_unlock_bh(&sk->sk_callback_lock);
1785}
1786
1787static inline void sock_graft(struct sock *sk, struct socket *parent)
1788{
1789 WARN_ON(parent->sk);
1790 write_lock_bh(&sk->sk_callback_lock);
1791 rcu_assign_pointer(sk->sk_wq, parent->wq);
1792 parent->sk = sk;
1793 sk_set_socket(sk, parent);
1794 sk->sk_uid = SOCK_INODE(parent)->i_uid;
1795 security_sock_graft(sk, parent);
1796 write_unlock_bh(&sk->sk_callback_lock);
1797}
1798
1799kuid_t sock_i_uid(struct sock *sk);
1800unsigned long sock_i_ino(struct sock *sk);
1801
1802static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk)
1803{
1804 return sk ? sk->sk_uid : make_kuid(net->user_ns, 0);
1805}
1806
1807static inline u32 net_tx_rndhash(void)
1808{
1809 u32 v = prandom_u32();
1810
1811 return v ?: 1;
1812}
1813
1814static inline void sk_set_txhash(struct sock *sk)
1815{
1816 sk->sk_txhash = net_tx_rndhash();
1817}
1818
1819static inline void sk_rethink_txhash(struct sock *sk)
1820{
1821 if (sk->sk_txhash)
1822 sk_set_txhash(sk);
1823}
1824
1825static inline struct dst_entry *
1826__sk_dst_get(struct sock *sk)
1827{
1828 return rcu_dereference_check(sk->sk_dst_cache,
1829 lockdep_sock_is_held(sk));
1830}
1831
1832static inline struct dst_entry *
1833sk_dst_get(struct sock *sk)
1834{
1835 struct dst_entry *dst;
1836
1837 rcu_read_lock();
1838 dst = rcu_dereference(sk->sk_dst_cache);
1839 if (dst && !atomic_inc_not_zero(&dst->__refcnt))
1840 dst = NULL;
1841 rcu_read_unlock();
1842 return dst;
1843}
1844
1845static inline void dst_negative_advice(struct sock *sk)
1846{
1847 struct dst_entry *ndst, *dst = __sk_dst_get(sk);
1848
1849 sk_rethink_txhash(sk);
1850
1851 if (dst && dst->ops->negative_advice) {
1852 ndst = dst->ops->negative_advice(dst);
1853
1854 if (ndst != dst) {
1855 rcu_assign_pointer(sk->sk_dst_cache, ndst);
1856 sk_tx_queue_clear(sk);
1857 sk->sk_dst_pending_confirm = 0;
1858 }
1859 }
1860}
1861
1862static inline void
1863__sk_dst_set(struct sock *sk, struct dst_entry *dst)
1864{
1865 struct dst_entry *old_dst;
1866
1867 sk_tx_queue_clear(sk);
1868 sk->sk_dst_pending_confirm = 0;
1869 old_dst = rcu_dereference_protected(sk->sk_dst_cache,
1870 lockdep_sock_is_held(sk));
1871 rcu_assign_pointer(sk->sk_dst_cache, dst);
1872 dst_release(old_dst);
1873}
1874
1875static inline void
1876sk_dst_set(struct sock *sk, struct dst_entry *dst)
1877{
1878 struct dst_entry *old_dst;
1879
1880 sk_tx_queue_clear(sk);
1881 sk->sk_dst_pending_confirm = 0;
1882 old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
1883 dst_release(old_dst);
1884}
1885
1886static inline void
1887__sk_dst_reset(struct sock *sk)
1888{
1889 __sk_dst_set(sk, NULL);
1890}
1891
1892static inline void
1893sk_dst_reset(struct sock *sk)
1894{
1895 sk_dst_set(sk, NULL);
1896}
1897
1898struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
1899
1900struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
1901
1902static inline void sk_dst_confirm(struct sock *sk)
1903{
1904 if (!sk->sk_dst_pending_confirm)
1905 sk->sk_dst_pending_confirm = 1;
1906}
1907
1908static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n)
1909{
1910 if (skb_get_dst_pending_confirm(skb)) {
1911 struct sock *sk = skb->sk;
1912 unsigned long now = jiffies;
1913
1914
1915 if (n->confirmed != now)
1916 n->confirmed = now;
1917 if (sk && sk->sk_dst_pending_confirm)
1918 sk->sk_dst_pending_confirm = 0;
1919 }
1920}
1921
1922bool sk_mc_loop(struct sock *sk);
1923
1924static inline bool sk_can_gso(const struct sock *sk)
1925{
1926 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
1927}
1928
1929void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
1930
1931static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
1932{
1933 sk->sk_route_nocaps |= flags;
1934 sk->sk_route_caps &= ~flags;
1935}
1936
1937static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
1938 struct iov_iter *from, char *to,
1939 int copy, int offset)
1940{
1941 if (skb->ip_summed == CHECKSUM_NONE) {
1942 __wsum csum = 0;
1943 if (!csum_and_copy_from_iter_full(to, copy, &csum, from))
1944 return -EFAULT;
1945 skb->csum = csum_block_add(skb->csum, csum, offset);
1946 } else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) {
1947 if (!copy_from_iter_full_nocache(to, copy, from))
1948 return -EFAULT;
1949 } else if (!copy_from_iter_full(to, copy, from))
1950 return -EFAULT;
1951
1952 return 0;
1953}
1954
1955static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb,
1956 struct iov_iter *from, int copy)
1957{
1958 int err, offset = skb->len;
1959
1960 err = skb_do_copy_data_nocache(sk, skb, from, skb_put(skb, copy),
1961 copy, offset);
1962 if (err)
1963 __skb_trim(skb, offset);
1964
1965 return err;
1966}
1967
1968static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from,
1969 struct sk_buff *skb,
1970 struct page *page,
1971 int off, int copy)
1972{
1973 int err;
1974
1975 err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) + off,
1976 copy, skb->len);
1977 if (err)
1978 return err;
1979
1980 skb->len += copy;
1981 skb->data_len += copy;
1982 skb->truesize += copy;
1983 sk->sk_wmem_queued += copy;
1984 sk_mem_charge(sk, copy);
1985 return 0;
1986}
1987
1988
1989
1990
1991
1992
1993
1994static inline int sk_wmem_alloc_get(const struct sock *sk)
1995{
1996 return refcount_read(&sk->sk_wmem_alloc) - 1;
1997}
1998
1999
2000
2001
2002
2003
2004
2005static inline int sk_rmem_alloc_get(const struct sock *sk)
2006{
2007 return atomic_read(&sk->sk_rmem_alloc);
2008}
2009
2010
2011
2012
2013
2014
2015
2016static inline bool sk_has_allocations(const struct sock *sk)
2017{
2018 return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk);
2019}
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052static inline bool skwq_has_sleeper(struct socket_wq *wq)
2053{
2054 return wq && wq_has_sleeper(&wq->wait);
2055}
2056
2057
2058
2059
2060
2061
2062
2063
2064static inline void sock_poll_wait(struct file *filp, poll_table *p)
2065{
2066 struct socket *sock = filp->private_data;
2067
2068 if (!poll_does_not_wait(p)) {
2069 poll_wait(filp, &sock->wq->wait, p);
2070
2071
2072
2073
2074
2075 smp_mb();
2076 }
2077}
2078
2079static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
2080{
2081 if (sk->sk_txhash) {
2082 skb->l4_hash = 1;
2083 skb->hash = sk->sk_txhash;
2084 }
2085}
2086
2087void skb_set_owner_w(struct sk_buff *skb, struct sock *sk);
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
2098{
2099 skb_orphan(skb);
2100 skb->sk = sk;
2101 skb->destructor = sock_rfree;
2102 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
2103 sk_mem_charge(sk, skb->truesize);
2104}
2105
2106void sk_reset_timer(struct sock *sk, struct timer_list *timer,
2107 unsigned long expires);
2108
2109void sk_stop_timer(struct sock *sk, struct timer_list *timer);
2110
2111int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
2112 struct sk_buff *skb, unsigned int flags,
2113 void (*destructor)(struct sock *sk,
2114 struct sk_buff *skb));
2115int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
2116int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
2117
2118int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
2119struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
2120
2121
2122
2123
2124
2125static inline int sock_error(struct sock *sk)
2126{
2127 int err;
2128 if (likely(!sk->sk_err))
2129 return 0;
2130 err = xchg(&sk->sk_err, 0);
2131 return -err;
2132}
2133
2134static inline unsigned long sock_wspace(struct sock *sk)
2135{
2136 int amt = 0;
2137
2138 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
2139 amt = sk->sk_sndbuf - refcount_read(&sk->sk_wmem_alloc);
2140 if (amt < 0)
2141 amt = 0;
2142 }
2143 return amt;
2144}
2145
2146
2147
2148
2149
2150static inline void sk_set_bit(int nr, struct sock *sk)
2151{
2152 if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) &&
2153 !sock_flag(sk, SOCK_FASYNC))
2154 return;
2155
2156 set_bit(nr, &sk->sk_wq_raw->flags);
2157}
2158
2159static inline void sk_clear_bit(int nr, struct sock *sk)
2160{
2161 if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) &&
2162 !sock_flag(sk, SOCK_FASYNC))
2163 return;
2164
2165 clear_bit(nr, &sk->sk_wq_raw->flags);
2166}
2167
2168static inline void sk_wake_async(const struct sock *sk, int how, int band)
2169{
2170 if (sock_flag(sk, SOCK_FASYNC)) {
2171 rcu_read_lock();
2172 sock_wake_async(rcu_dereference(sk->sk_wq), how, band);
2173 rcu_read_unlock();
2174 }
2175}
2176
2177
2178
2179
2180
2181
2182#define TCP_SKB_MIN_TRUESIZE (2048 + SKB_DATA_ALIGN(sizeof(struct sk_buff)))
2183
2184#define SOCK_MIN_SNDBUF (TCP_SKB_MIN_TRUESIZE * 2)
2185#define SOCK_MIN_RCVBUF TCP_SKB_MIN_TRUESIZE
2186
2187static inline void sk_stream_moderate_sndbuf(struct sock *sk)
2188{
2189 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
2190 sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
2191 sk->sk_sndbuf = max_t(u32, sk->sk_sndbuf, SOCK_MIN_SNDBUF);
2192 }
2193}
2194
2195struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
2196 bool force_schedule);
2197
2198
2199
2200
2201
2202
2203
2204
2205static inline struct page_frag *sk_page_frag(struct sock *sk)
2206{
2207 if (gfpflags_allow_blocking(sk->sk_allocation))
2208 return ¤t->task_frag;
2209
2210 return &sk->sk_frag;
2211}
2212
2213bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
2214
2215int sk_alloc_sg(struct sock *sk, int len, struct scatterlist *sg,
2216 int sg_start, int *sg_curr, unsigned int *sg_size,
2217 int first_coalesce);
2218
2219
2220
2221
2222static inline bool sock_writeable(const struct sock *sk)
2223{
2224 return refcount_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1);
2225}
2226
2227static inline gfp_t gfp_any(void)
2228{
2229 return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
2230}
2231
2232static inline long sock_rcvtimeo(const struct sock *sk, bool noblock)
2233{
2234 return noblock ? 0 : sk->sk_rcvtimeo;
2235}
2236
2237static inline long sock_sndtimeo(const struct sock *sk, bool noblock)
2238{
2239 return noblock ? 0 : sk->sk_sndtimeo;
2240}
2241
2242static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
2243{
2244 return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1;
2245}
2246
2247
2248
2249
2250static inline int sock_intr_errno(long timeo)
2251{
2252 return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
2253}
2254
2255struct sock_skb_cb {
2256 u32 dropcount;
2257};
2258
2259
2260
2261
2262
2263#define SOCK_SKB_CB_OFFSET ((FIELD_SIZEOF(struct sk_buff, cb) - \
2264 sizeof(struct sock_skb_cb)))
2265
2266#define SOCK_SKB_CB(__skb) ((struct sock_skb_cb *)((__skb)->cb + \
2267 SOCK_SKB_CB_OFFSET))
2268
2269#define sock_skb_cb_check_size(size) \
2270 BUILD_BUG_ON((size) > SOCK_SKB_CB_OFFSET)
2271
2272static inline void
2273sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb)
2274{
2275 SOCK_SKB_CB(skb)->dropcount = sock_flag(sk, SOCK_RXQ_OVFL) ?
2276 atomic_read(&sk->sk_drops) : 0;
2277}
2278
2279static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
2280{
2281 int segs = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
2282
2283 atomic_add(segs, &sk->sk_drops);
2284}
2285
2286void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
2287 struct sk_buff *skb);
2288void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
2289 struct sk_buff *skb);
2290
2291static inline void
2292sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
2293{
2294 ktime_t kt = skb->tstamp;
2295 struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
2296
2297
2298
2299
2300
2301
2302
2303 if (sock_flag(sk, SOCK_RCVTSTAMP) ||
2304 (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
2305 (kt && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
2306 (hwtstamps->hwtstamp &&
2307 (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
2308 __sock_recv_timestamp(msg, sk, skb);
2309 else
2310 sk->sk_stamp = kt;
2311
2312 if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
2313 __sock_recv_wifi_status(msg, sk, skb);
2314}
2315
2316void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
2317 struct sk_buff *skb);
2318
2319#define SK_DEFAULT_STAMP (-1L * NSEC_PER_SEC)
2320static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
2321 struct sk_buff *skb)
2322{
2323#define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL) | \
2324 (1UL << SOCK_RCVTSTAMP))
2325#define TSFLAGS_ANY (SOF_TIMESTAMPING_SOFTWARE | \
2326 SOF_TIMESTAMPING_RAW_HARDWARE)
2327
2328 if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY)
2329 __sock_recv_ts_and_drops(msg, sk, skb);
2330 else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
2331 sk->sk_stamp = skb->tstamp;
2332 else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP))
2333 sk->sk_stamp = 0;
2334}
2335
2336void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346static inline void sock_tx_timestamp(const struct sock *sk, __u16 tsflags,
2347 __u8 *tx_flags)
2348{
2349 if (unlikely(tsflags))
2350 __sock_tx_timestamp(tsflags, tx_flags);
2351 if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
2352 *tx_flags |= SKBTX_WIFI_STATUS;
2353}
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
2364{
2365 __skb_unlink(skb, &sk->sk_receive_queue);
2366 __kfree_skb(skb);
2367}
2368
2369static inline
2370struct net *sock_net(const struct sock *sk)
2371{
2372 return read_pnet(&sk->sk_net);
2373}
2374
2375static inline
2376void sock_net_set(struct sock *sk, struct net *net)
2377{
2378 write_pnet(&sk->sk_net, net);
2379}
2380
2381static inline struct sock *skb_steal_sock(struct sk_buff *skb)
2382{
2383 if (skb->sk) {
2384 struct sock *sk = skb->sk;
2385
2386 skb->destructor = NULL;
2387 skb->sk = NULL;
2388 return sk;
2389 }
2390 return NULL;
2391}
2392
2393
2394
2395
2396static inline bool sk_fullsock(const struct sock *sk)
2397{
2398 return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV);
2399}
2400
2401
2402
2403
2404static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb,
2405 struct net_device *dev)
2406{
2407#ifdef CONFIG_SOCK_VALIDATE_XMIT
2408 struct sock *sk = skb->sk;
2409
2410 if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb)
2411 skb = sk->sk_validate_xmit_skb(sk, dev, skb);
2412#endif
2413
2414 return skb;
2415}
2416
2417
2418
2419
2420static inline bool sk_listener(const struct sock *sk)
2421{
2422 return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
2423}
2424
2425void sock_enable_timestamp(struct sock *sk, int flag);
2426int sock_get_timestamp(struct sock *, struct timeval __user *);
2427int sock_get_timestampns(struct sock *, struct timespec __user *);
2428int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
2429 int type);
2430
2431bool sk_ns_capable(const struct sock *sk,
2432 struct user_namespace *user_ns, int cap);
2433bool sk_capable(const struct sock *sk, int cap);
2434bool sk_net_capable(const struct sock *sk, int cap);
2435
2436void sk_get_meminfo(const struct sock *sk, u32 *meminfo);
2437
2438
2439
2440
2441
2442
2443#define _SK_MEM_PACKETS 256
2444#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
2445#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
2446#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
2447
2448extern __u32 sysctl_wmem_max;
2449extern __u32 sysctl_rmem_max;
2450
2451extern int sysctl_tstamp_allow_data;
2452extern int sysctl_optmem_max;
2453
2454extern __u32 sysctl_wmem_default;
2455extern __u32 sysctl_rmem_default;
2456
2457static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto)
2458{
2459
2460 if (proto->sysctl_wmem_offset)
2461 return *(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset);
2462
2463 return *proto->sysctl_wmem;
2464}
2465
2466static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto)
2467{
2468
2469 if (proto->sysctl_rmem_offset)
2470 return *(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset);
2471
2472 return *proto->sysctl_rmem;
2473}
2474
2475
2476
2477
2478
2479static inline void sk_pacing_shift_update(struct sock *sk, int val)
2480{
2481 if (!sk || !sk_fullsock(sk) || sk->sk_pacing_shift == val)
2482 return;
2483 sk->sk_pacing_shift = val;
2484}
2485
2486
2487
2488
2489
2490
2491static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif)
2492{
2493 int mdif;
2494
2495 if (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif)
2496 return true;
2497
2498 mdif = l3mdev_master_ifindex_by_index(sock_net(sk), dif);
2499 if (mdif && mdif == sk->sk_bound_dev_if)
2500 return true;
2501
2502 return false;
2503}
2504
2505#endif
2506