linux/net/core/request_sock.c
<<
>>
Prefs
   1/*
   2 * NET          Generic infrastructure for Network protocols.
   3 *
   4 * Authors:     Arnaldo Carvalho de Melo <acme@conectiva.com.br>
   5 *
   6 *              From code originally in include/net/tcp.h
   7 *
   8 *              This program is free software; you can redistribute it and/or
   9 *              modify it under the terms of the GNU General Public License
  10 *              as published by the Free Software Foundation; either version
  11 *              2 of the License, or (at your option) any later version.
  12 */
  13
  14#include <linux/module.h>
  15#include <linux/random.h>
  16#include <linux/slab.h>
  17#include <linux/string.h>
  18#include <linux/vmalloc.h>
  19
  20#include <net/request_sock.h>
  21
  22/*
  23 * Maximum number of SYN_RECV sockets in queue per LISTEN socket.
  24 * One SYN_RECV socket costs about 80bytes on a 32bit machine.
  25 * It would be better to replace it with a global counter for all sockets
  26 * but then some measure against one socket starving all other sockets
  27 * would be needed.
  28 *
  29 * It was 128 by default. Experiments with real servers show, that
  30 * it is absolutely not enough even at 100conn/sec. 256 cures most
  31 * of problems. This value is adjusted to 128 for very small machines
  32 * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb).
  33 * Note : Dont forget somaxconn that may limit backlog too.
  34 */
  35int sysctl_max_syn_backlog = 256;
  36
  37int reqsk_queue_alloc(struct request_sock_queue *queue,
  38                      unsigned int nr_table_entries)
  39{
  40        size_t lopt_size = sizeof(struct listen_sock);
  41        struct listen_sock *lopt;
  42
  43        nr_table_entries = min_t(u32, nr_table_entries, sysctl_max_syn_backlog);
  44        nr_table_entries = max_t(u32, nr_table_entries, 8);
  45        nr_table_entries = roundup_pow_of_two(nr_table_entries + 1);
  46        lopt_size += nr_table_entries * sizeof(struct request_sock *);
  47        if (lopt_size > PAGE_SIZE)
  48                lopt = __vmalloc(lopt_size,
  49                        GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
  50                        PAGE_KERNEL);
  51        else
  52                lopt = kzalloc(lopt_size, GFP_KERNEL);
  53        if (lopt == NULL)
  54                return -ENOMEM;
  55
  56        for (lopt->max_qlen_log = 3;
  57             (1 << lopt->max_qlen_log) < nr_table_entries;
  58             lopt->max_qlen_log++);
  59
  60        get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
  61        rwlock_init(&queue->syn_wait_lock);
  62        queue->rskq_accept_head = NULL;
  63        lopt->nr_table_entries = nr_table_entries;
  64
  65        write_lock_bh(&queue->syn_wait_lock);
  66        queue->listen_opt = lopt;
  67        write_unlock_bh(&queue->syn_wait_lock);
  68
  69        return 0;
  70}
  71
  72void __reqsk_queue_destroy(struct request_sock_queue *queue)
  73{
  74        struct listen_sock *lopt;
  75        size_t lopt_size;
  76
  77        /*
  78         * this is an error recovery path only
  79         * no locking needed and the lopt is not NULL
  80         */
  81
  82        lopt = queue->listen_opt;
  83        lopt_size = sizeof(struct listen_sock) +
  84                lopt->nr_table_entries * sizeof(struct request_sock *);
  85
  86        if (lopt_size > PAGE_SIZE)
  87                vfree(lopt);
  88        else
  89                kfree(lopt);
  90}
  91
  92static inline struct listen_sock *reqsk_queue_yank_listen_sk(
  93                struct request_sock_queue *queue)
  94{
  95        struct listen_sock *lopt;
  96
  97        write_lock_bh(&queue->syn_wait_lock);
  98        lopt = queue->listen_opt;
  99        queue->listen_opt = NULL;
 100        write_unlock_bh(&queue->syn_wait_lock);
 101
 102        return lopt;
 103}
 104
 105void reqsk_queue_destroy(struct request_sock_queue *queue)
 106{
 107        /* make all the listen_opt local to us */
 108        struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue);
 109        size_t lopt_size = sizeof(struct listen_sock) +
 110                lopt->nr_table_entries * sizeof(struct request_sock *);
 111
 112        if (lopt->qlen != 0) {
 113                unsigned int i;
 114
 115                for (i = 0; i < lopt->nr_table_entries; i++) {
 116                        struct request_sock *req;
 117
 118                        while ((req = lopt->syn_table[i]) != NULL) {
 119                                lopt->syn_table[i] = req->dl_next;
 120                                lopt->qlen--;
 121                                reqsk_free(req);
 122                        }
 123                }
 124        }
 125
 126        WARN_ON(lopt->qlen != 0);
 127        if (lopt_size > PAGE_SIZE)
 128                vfree(lopt);
 129        else
 130                kfree(lopt);
 131}
 132
 133