linux/include/net/request_sock.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/*
   3 * NET          Generic infrastructure for Network protocols.
   4 *
   5 *              Definitions for request_sock
   6 *
   7 * Authors:     Arnaldo Carvalho de Melo <acme@conectiva.com.br>
   8 *
   9 *              From code originally in include/net/tcp.h
  10 */
  11#ifndef _REQUEST_SOCK_H
  12#define _REQUEST_SOCK_H
  13
  14#include <linux/slab.h>
  15#include <linux/spinlock.h>
  16#include <linux/types.h>
  17#include <linux/bug.h>
  18#include <linux/refcount.h>
  19
  20#include <net/sock.h>
  21
  22struct request_sock;
  23struct sk_buff;
  24struct dst_entry;
  25struct proto;
  26
  27struct request_sock_ops {
  28        int             family;
  29        unsigned int    obj_size;
  30        struct kmem_cache       *slab;
  31        char            *slab_name;
  32        int             (*rtx_syn_ack)(const struct sock *sk,
  33                                       struct request_sock *req);
  34        void            (*send_ack)(const struct sock *sk, struct sk_buff *skb,
  35                                    struct request_sock *req);
  36        void            (*send_reset)(const struct sock *sk,
  37                                      struct sk_buff *skb);
  38        void            (*destructor)(struct request_sock *req);
  39        void            (*syn_ack_timeout)(const struct request_sock *req);
  40};
  41
  42int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req);
  43
  44/* struct request_sock - mini sock to represent a connection request
  45 */
  46struct request_sock {
  47        struct sock_common              __req_common;
  48#define rsk_refcnt                      __req_common.skc_refcnt
  49#define rsk_hash                        __req_common.skc_hash
  50#define rsk_listener                    __req_common.skc_listener
  51#define rsk_window_clamp                __req_common.skc_window_clamp
  52#define rsk_rcv_wnd                     __req_common.skc_rcv_wnd
  53
  54        struct request_sock             *dl_next;
  55        u16                             mss;
  56        u8                              num_retrans; /* number of retransmits */
  57        u8                              cookie_ts:1; /* syncookie: encode tcpopts in timestamp */
  58        u8                              num_timeout:7; /* number of timeouts */
  59        u32                             ts_recent;
  60        struct timer_list               rsk_timer;
  61        const struct request_sock_ops   *rsk_ops;
  62        struct sock                     *sk;
  63        u32                             *saved_syn;
  64        u32                             secid;
  65        u32                             peer_secid;
  66};
  67
  68static inline struct request_sock *inet_reqsk(const struct sock *sk)
  69{
  70        return (struct request_sock *)sk;
  71}
  72
  73static inline struct sock *req_to_sk(struct request_sock *req)
  74{
  75        return (struct sock *)req;
  76}
  77
  78static inline struct request_sock *
  79reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
  80            bool attach_listener)
  81{
  82        struct request_sock *req;
  83
  84        req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
  85        if (!req)
  86                return NULL;
  87        req->rsk_listener = NULL;
  88        if (attach_listener) {
  89                if (unlikely(!refcount_inc_not_zero(&sk_listener->sk_refcnt))) {
  90                        kmem_cache_free(ops->slab, req);
  91                        return NULL;
  92                }
  93                req->rsk_listener = sk_listener;
  94        }
  95        req->rsk_ops = ops;
  96        req_to_sk(req)->sk_prot = sk_listener->sk_prot;
  97        sk_node_init(&req_to_sk(req)->sk_node);
  98        sk_tx_queue_clear(req_to_sk(req));
  99        req->saved_syn = NULL;
 100        req->num_timeout = 0;
 101        req->num_retrans = 0;
 102        req->sk = NULL;
 103        refcount_set(&req->rsk_refcnt, 0);
 104
 105        return req;
 106}
 107
 108static inline void __reqsk_free(struct request_sock *req)
 109{
 110        req->rsk_ops->destructor(req);
 111        if (req->rsk_listener)
 112                sock_put(req->rsk_listener);
 113        kfree(req->saved_syn);
 114        kmem_cache_free(req->rsk_ops->slab, req);
 115}
 116
 117static inline void reqsk_free(struct request_sock *req)
 118{
 119        WARN_ON_ONCE(refcount_read(&req->rsk_refcnt) != 0);
 120        __reqsk_free(req);
 121}
 122
 123static inline void reqsk_put(struct request_sock *req)
 124{
 125        if (refcount_dec_and_test(&req->rsk_refcnt))
 126                reqsk_free(req);
 127}
 128
 129/*
 130 * For a TCP Fast Open listener -
 131 *      lock - protects the access to all the reqsk, which is co-owned by
 132 *              the listener and the child socket.
 133 *      qlen - pending TFO requests (still in TCP_SYN_RECV).
 134 *      max_qlen - max TFO reqs allowed before TFO is disabled.
 135 *
 136 *      XXX (TFO) - ideally these fields can be made as part of "listen_sock"
 137 *      structure above. But there is some implementation difficulty due to
 138 *      listen_sock being part of request_sock_queue hence will be freed when
 139 *      a listener is stopped. But TFO related fields may continue to be
 140 *      accessed even after a listener is closed, until its sk_refcnt drops
 141 *      to 0 implying no more outstanding TFO reqs. One solution is to keep
 142 *      listen_opt around until sk_refcnt drops to 0. But there is some other
 143 *      complexity that needs to be resolved. E.g., a listener can be disabled
 144 *      temporarily through shutdown()->tcp_disconnect(), and re-enabled later.
 145 */
 146struct fastopen_queue {
 147        struct request_sock     *rskq_rst_head; /* Keep track of past TFO */
 148        struct request_sock     *rskq_rst_tail; /* requests that caused RST.
 149                                                 * This is part of the defense
 150                                                 * against spoofing attack.
 151                                                 */
 152        spinlock_t      lock;
 153        int             qlen;           /* # of pending (TCP_SYN_RECV) reqs */
 154        int             max_qlen;       /* != 0 iff TFO is currently enabled */
 155
 156        struct tcp_fastopen_context __rcu *ctx; /* cipher context for cookie */
 157};
 158
 159/** struct request_sock_queue - queue of request_socks
 160 *
 161 * @rskq_accept_head - FIFO head of established children
 162 * @rskq_accept_tail - FIFO tail of established children
 163 * @rskq_defer_accept - User waits for some data after accept()
 164 *
 165 */
 166struct request_sock_queue {
 167        spinlock_t              rskq_lock;
 168        u8                      rskq_defer_accept;
 169
 170        u32                     synflood_warned;
 171        atomic_t                qlen;
 172        atomic_t                young;
 173
 174        struct request_sock     *rskq_accept_head;
 175        struct request_sock     *rskq_accept_tail;
 176        struct fastopen_queue   fastopenq;  /* Check max_qlen != 0 to determine
 177                                             * if TFO is enabled.
 178                                             */
 179};
 180
 181void reqsk_queue_alloc(struct request_sock_queue *queue);
 182
 183void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
 184                           bool reset);
 185
 186static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
 187{
 188        return READ_ONCE(queue->rskq_accept_head) == NULL;
 189}
 190
 191static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
 192                                                      struct sock *parent)
 193{
 194        struct request_sock *req;
 195
 196        spin_lock_bh(&queue->rskq_lock);
 197        req = queue->rskq_accept_head;
 198        if (req) {
 199                sk_acceptq_removed(parent);
 200                WRITE_ONCE(queue->rskq_accept_head, req->dl_next);
 201                if (queue->rskq_accept_head == NULL)
 202                        queue->rskq_accept_tail = NULL;
 203        }
 204        spin_unlock_bh(&queue->rskq_lock);
 205        return req;
 206}
 207
 208static inline void reqsk_queue_removed(struct request_sock_queue *queue,
 209                                       const struct request_sock *req)
 210{
 211        if (req->num_timeout == 0)
 212                atomic_dec(&queue->young);
 213        atomic_dec(&queue->qlen);
 214}
 215
 216static inline void reqsk_queue_added(struct request_sock_queue *queue)
 217{
 218        atomic_inc(&queue->young);
 219        atomic_inc(&queue->qlen);
 220}
 221
 222static inline int reqsk_queue_len(const struct request_sock_queue *queue)
 223{
 224        return atomic_read(&queue->qlen);
 225}
 226
 227static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
 228{
 229        return atomic_read(&queue->young);
 230}
 231
 232#endif /* _REQUEST_SOCK_H */
 233