linux/net/core/request_sock.c
<<
>>
Prefs
   1/*
   2 * NET          Generic infrastructure for Network protocols.
   3 *
   4 * Authors:     Arnaldo Carvalho de Melo <acme@conectiva.com.br>
   5 *
   6 *              From code originally in include/net/tcp.h
   7 *
   8 *              This program is free software; you can redistribute it and/or
   9 *              modify it under the terms of the GNU General Public License
  10 *              as published by the Free Software Foundation; either version
  11 *              2 of the License, or (at your option) any later version.
  12 */
  13
  14#include <linux/module.h>
  15#include <linux/random.h>
  16#include <linux/slab.h>
  17#include <linux/string.h>
  18#include <linux/vmalloc.h>
  19
  20#include <net/request_sock.h>
  21
  22/*
  23 * Maximum number of SYN_RECV sockets in queue per LISTEN socket.
  24 * One SYN_RECV socket costs about 80bytes on a 32bit machine.
  25 * It would be better to replace it with a global counter for all sockets
  26 * but then some measure against one socket starving all other sockets
  27 * would be needed.
  28 *
  29 * It was 128 by default. Experiments with real servers show, that
  30 * it is absolutely not enough even at 100conn/sec. 256 cures most
  31 * of problems. This value is adjusted to 128 for very small machines
  32 * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb).
  33 * Note : Dont forget somaxconn that may limit backlog too.
  34 */
  35int sysctl_max_syn_backlog = 256;
  36EXPORT_SYMBOL(sysctl_max_syn_backlog);
  37
  38int reqsk_queue_alloc(struct request_sock_queue *queue,
  39                      unsigned int nr_table_entries)
  40{
  41        size_t lopt_size = sizeof(struct listen_sock);
  42        struct listen_sock *lopt;
  43
  44        nr_table_entries = min_t(u32, nr_table_entries, sysctl_max_syn_backlog);
  45        nr_table_entries = max_t(u32, nr_table_entries, 8);
  46        nr_table_entries = roundup_pow_of_two(nr_table_entries + 1);
  47        lopt_size += nr_table_entries * sizeof(struct request_sock *);
  48        if (lopt_size > PAGE_SIZE)
  49                lopt = vzalloc(lopt_size);
  50        else
  51                lopt = kzalloc(lopt_size, GFP_KERNEL);
  52        if (lopt == NULL)
  53                return -ENOMEM;
  54
  55        for (lopt->max_qlen_log = 3;
  56             (1 << lopt->max_qlen_log) < nr_table_entries;
  57             lopt->max_qlen_log++);
  58
  59        get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
  60        rwlock_init(&queue->syn_wait_lock);
  61        queue->rskq_accept_head = NULL;
  62        lopt->nr_table_entries = nr_table_entries;
  63
  64        write_lock_bh(&queue->syn_wait_lock);
  65        queue->listen_opt = lopt;
  66        write_unlock_bh(&queue->syn_wait_lock);
  67
  68        return 0;
  69}
  70
  71void __reqsk_queue_destroy(struct request_sock_queue *queue)
  72{
  73        struct listen_sock *lopt;
  74        size_t lopt_size;
  75
  76        /*
  77         * this is an error recovery path only
  78         * no locking needed and the lopt is not NULL
  79         */
  80
  81        lopt = queue->listen_opt;
  82        lopt_size = sizeof(struct listen_sock) +
  83                lopt->nr_table_entries * sizeof(struct request_sock *);
  84
  85        if (lopt_size > PAGE_SIZE)
  86                vfree(lopt);
  87        else
  88                kfree(lopt);
  89}
  90
  91static inline struct listen_sock *reqsk_queue_yank_listen_sk(
  92                struct request_sock_queue *queue)
  93{
  94        struct listen_sock *lopt;
  95
  96        write_lock_bh(&queue->syn_wait_lock);
  97        lopt = queue->listen_opt;
  98        queue->listen_opt = NULL;
  99        write_unlock_bh(&queue->syn_wait_lock);
 100
 101        return lopt;
 102}
 103
 104void reqsk_queue_destroy(struct request_sock_queue *queue)
 105{
 106        /* make all the listen_opt local to us */
 107        struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue);
 108        size_t lopt_size = sizeof(struct listen_sock) +
 109                lopt->nr_table_entries * sizeof(struct request_sock *);
 110
 111        if (lopt->qlen != 0) {
 112                unsigned int i;
 113
 114                for (i = 0; i < lopt->nr_table_entries; i++) {
 115                        struct request_sock *req;
 116
 117                        while ((req = lopt->syn_table[i]) != NULL) {
 118                                lopt->syn_table[i] = req->dl_next;
 119                                lopt->qlen--;
 120                                reqsk_free(req);
 121                        }
 122                }
 123        }
 124
 125        WARN_ON(lopt->qlen != 0);
 126        if (lopt_size > PAGE_SIZE)
 127                vfree(lopt);
 128        else
 129                kfree(lopt);
 130}
 131
 132