linux/include/net/request_sock.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/*
   3 * NET          Generic infrastructure for Network protocols.
   4 *
   5 *              Definitions for request_sock
   6 *
   7 * Authors:     Arnaldo Carvalho de Melo <acme@conectiva.com.br>
   8 *
   9 *              From code originally in include/net/tcp.h
  10 */
  11#ifndef _REQUEST_SOCK_H
  12#define _REQUEST_SOCK_H
  13
  14#include <linux/slab.h>
  15#include <linux/spinlock.h>
  16#include <linux/types.h>
  17#include <linux/bug.h>
  18#include <linux/refcount.h>
  19
  20#include <net/sock.h>
  21
  22struct request_sock;
  23struct sk_buff;
  24struct dst_entry;
  25struct proto;
  26
  27struct request_sock_ops {
  28        int             family;
  29        unsigned int    obj_size;
  30        struct kmem_cache       *slab;
  31        char            *slab_name;
  32        int             (*rtx_syn_ack)(const struct sock *sk,
  33                                       struct request_sock *req);
  34        void            (*send_ack)(const struct sock *sk, struct sk_buff *skb,
  35                                    struct request_sock *req);
  36        void            (*send_reset)(const struct sock *sk,
  37                                      struct sk_buff *skb);
  38        void            (*destructor)(struct request_sock *req);
  39        void            (*syn_ack_timeout)(const struct request_sock *req);
  40};
  41
  42int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req);
  43
  44struct saved_syn {
  45        u32 mac_hdrlen;
  46        u32 network_hdrlen;
  47        u32 tcp_hdrlen;
  48        u8 data[];
  49};
  50
  51/* struct request_sock - mini sock to represent a connection request
  52 */
  53struct request_sock {
  54        struct sock_common              __req_common;
  55#define rsk_refcnt                      __req_common.skc_refcnt
  56#define rsk_hash                        __req_common.skc_hash
  57#define rsk_listener                    __req_common.skc_listener
  58#define rsk_window_clamp                __req_common.skc_window_clamp
  59#define rsk_rcv_wnd                     __req_common.skc_rcv_wnd
  60
  61        struct request_sock             *dl_next;
  62        u16                             mss;
  63        u8                              num_retrans; /* number of retransmits */
  64        u8                              syncookie:1; /* syncookie: encode tcpopts in timestamp */
  65        u8                              num_timeout:7; /* number of timeouts */
  66        u32                             ts_recent;
  67        struct timer_list               rsk_timer;
  68        const struct request_sock_ops   *rsk_ops;
  69        struct sock                     *sk;
  70        struct saved_syn                *saved_syn;
  71        u32                             secid;
  72        u32                             peer_secid;
  73};
  74
  75static inline struct request_sock *inet_reqsk(const struct sock *sk)
  76{
  77        return (struct request_sock *)sk;
  78}
  79
  80static inline struct sock *req_to_sk(struct request_sock *req)
  81{
  82        return (struct sock *)req;
  83}
  84
  85static inline struct request_sock *
  86reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
  87            bool attach_listener)
  88{
  89        struct request_sock *req;
  90
  91        req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
  92        if (!req)
  93                return NULL;
  94        req->rsk_listener = NULL;
  95        if (attach_listener) {
  96                if (unlikely(!refcount_inc_not_zero(&sk_listener->sk_refcnt))) {
  97                        kmem_cache_free(ops->slab, req);
  98                        return NULL;
  99                }
 100                req->rsk_listener = sk_listener;
 101        }
 102        req->rsk_ops = ops;
 103        req_to_sk(req)->sk_prot = sk_listener->sk_prot;
 104        sk_node_init(&req_to_sk(req)->sk_node);
 105        sk_tx_queue_clear(req_to_sk(req));
 106        req->saved_syn = NULL;
 107        req->num_timeout = 0;
 108        req->num_retrans = 0;
 109        req->sk = NULL;
 110        refcount_set(&req->rsk_refcnt, 0);
 111
 112        return req;
 113}
 114
 115static inline void __reqsk_free(struct request_sock *req)
 116{
 117        req->rsk_ops->destructor(req);
 118        if (req->rsk_listener)
 119                sock_put(req->rsk_listener);
 120        kfree(req->saved_syn);
 121        kmem_cache_free(req->rsk_ops->slab, req);
 122}
 123
 124static inline void reqsk_free(struct request_sock *req)
 125{
 126        WARN_ON_ONCE(refcount_read(&req->rsk_refcnt) != 0);
 127        __reqsk_free(req);
 128}
 129
 130static inline void reqsk_put(struct request_sock *req)
 131{
 132        if (refcount_dec_and_test(&req->rsk_refcnt))
 133                reqsk_free(req);
 134}
 135
 136/*
 137 * For a TCP Fast Open listener -
 138 *      lock - protects the access to all the reqsk, which is co-owned by
 139 *              the listener and the child socket.
 140 *      qlen - pending TFO requests (still in TCP_SYN_RECV).
 141 *      max_qlen - max TFO reqs allowed before TFO is disabled.
 142 *
 143 *      XXX (TFO) - ideally these fields can be made as part of "listen_sock"
 144 *      structure above. But there is some implementation difficulty due to
 145 *      listen_sock being part of request_sock_queue hence will be freed when
 146 *      a listener is stopped. But TFO related fields may continue to be
 147 *      accessed even after a listener is closed, until its sk_refcnt drops
 148 *      to 0 implying no more outstanding TFO reqs. One solution is to keep
 149 *      listen_opt around until sk_refcnt drops to 0. But there is some other
 150 *      complexity that needs to be resolved. E.g., a listener can be disabled
 151 *      temporarily through shutdown()->tcp_disconnect(), and re-enabled later.
 152 */
 153struct fastopen_queue {
 154        struct request_sock     *rskq_rst_head; /* Keep track of past TFO */
 155        struct request_sock     *rskq_rst_tail; /* requests that caused RST.
 156                                                 * This is part of the defense
 157                                                 * against spoofing attack.
 158                                                 */
 159        spinlock_t      lock;
 160        int             qlen;           /* # of pending (TCP_SYN_RECV) reqs */
 161        int             max_qlen;       /* != 0 iff TFO is currently enabled */
 162
 163        struct tcp_fastopen_context __rcu *ctx; /* cipher context for cookie */
 164};
 165
 166/** struct request_sock_queue - queue of request_socks
 167 *
 168 * @rskq_accept_head - FIFO head of established children
 169 * @rskq_accept_tail - FIFO tail of established children
 170 * @rskq_defer_accept - User waits for some data after accept()
 171 *
 172 */
 173struct request_sock_queue {
 174        spinlock_t              rskq_lock;
 175        u8                      rskq_defer_accept;
 176
 177        u32                     synflood_warned;
 178        atomic_t                qlen;
 179        atomic_t                young;
 180
 181        struct request_sock     *rskq_accept_head;
 182        struct request_sock     *rskq_accept_tail;
 183        struct fastopen_queue   fastopenq;  /* Check max_qlen != 0 to determine
 184                                             * if TFO is enabled.
 185                                             */
 186};
 187
 188void reqsk_queue_alloc(struct request_sock_queue *queue);
 189
 190void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
 191                           bool reset);
 192
 193static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
 194{
 195        return READ_ONCE(queue->rskq_accept_head) == NULL;
 196}
 197
 198static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
 199                                                      struct sock *parent)
 200{
 201        struct request_sock *req;
 202
 203        spin_lock_bh(&queue->rskq_lock);
 204        req = queue->rskq_accept_head;
 205        if (req) {
 206                sk_acceptq_removed(parent);
 207                WRITE_ONCE(queue->rskq_accept_head, req->dl_next);
 208                if (queue->rskq_accept_head == NULL)
 209                        queue->rskq_accept_tail = NULL;
 210        }
 211        spin_unlock_bh(&queue->rskq_lock);
 212        return req;
 213}
 214
 215static inline void reqsk_queue_removed(struct request_sock_queue *queue,
 216                                       const struct request_sock *req)
 217{
 218        if (req->num_timeout == 0)
 219                atomic_dec(&queue->young);
 220        atomic_dec(&queue->qlen);
 221}
 222
 223static inline void reqsk_queue_added(struct request_sock_queue *queue)
 224{
 225        atomic_inc(&queue->young);
 226        atomic_inc(&queue->qlen);
 227}
 228
 229static inline int reqsk_queue_len(const struct request_sock_queue *queue)
 230{
 231        return atomic_read(&queue->qlen);
 232}
 233
 234static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
 235{
 236        return atomic_read(&queue->young);
 237}
 238
 239#endif /* _REQUEST_SOCK_H */
 240