linux/include/net/inet_hashtables.h
<<
>>
Prefs
   1/*
   2 * INET         An implementation of the TCP/IP protocol suite for the LINUX
   3 *              operating system.  INET is implemented using the BSD Socket
   4 *              interface as the means of communication with the user level.
   5 *
   6 * Authors:     Lotsa people, from code originally in tcp
   7 *
   8 *      This program is free software; you can redistribute it and/or
   9 *      modify it under the terms of the GNU General Public License
  10 *      as published by the Free Software Foundation; either version
  11 *      2 of the License, or (at your option) any later version.
  12 */
  13
  14#ifndef _INET_HASHTABLES_H
  15#define _INET_HASHTABLES_H
  16
  17
  18#include <linux/interrupt.h>
  19#include <linux/ip.h>
  20#include <linux/ipv6.h>
  21#include <linux/list.h>
  22#include <linux/slab.h>
  23#include <linux/socket.h>
  24#include <linux/spinlock.h>
  25#include <linux/types.h>
  26#include <linux/wait.h>
  27#include <linux/vmalloc.h>
  28
  29#include <net/inet_connection_sock.h>
  30#include <net/inet_sock.h>
  31#include <net/sock.h>
  32#include <net/route.h>
  33#include <net/tcp_states.h>
  34#include <net/netns/hash.h>
  35
  36#include <linux/atomic.h>
  37#include <asm/byteorder.h>
  38
  39/* This is for all connections with a full identity, no wildcards.
  40 * The 'e' prefix stands for Establish, but we really put all sockets
  41 * but LISTEN ones.
  42 */
  43struct inet_ehash_bucket {
  44        struct hlist_nulls_head chain;
  45};
  46
  47/* There are a few simple rules, which allow for local port reuse by
  48 * an application.  In essence:
  49 *
  50 *      1) Sockets bound to different interfaces may share a local port.
  51 *         Failing that, goto test 2.
  52 *      2) If all sockets have sk->sk_reuse set, and none of them are in
  53 *         TCP_LISTEN state, the port may be shared.
  54 *         Failing that, goto test 3.
  55 *      3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local
  56 *         address, and none of them are the same, the port may be
  57 *         shared.
  58 *         Failing this, the port cannot be shared.
  59 *
  60 * The interesting point, is test #2.  This is what an FTP server does
  61 * all day.  To optimize this case we use a specific flag bit defined
  62 * below.  As we add sockets to a bind bucket list, we perform a
  63 * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN))
  64 * As long as all sockets added to a bind bucket pass this test,
  65 * the flag bit will be set.
  66 * The resulting situation is that tcp_v[46]_verify_bind() can just check
  67 * for this flag bit, if it is set and the socket trying to bind has
  68 * sk->sk_reuse set, we don't even have to walk the owners list at all,
  69 * we return that it is ok to bind this socket to the requested local port.
  70 *
  71 * Sounds like a lot of work, but it is worth it.  In a more naive
  72 * implementation (ie. current FreeBSD etc.) the entire list of ports
  73 * must be walked for each data port opened by an ftp server.  Needless
  74 * to say, this does not scale at all.  With a couple thousand FTP
  75 * users logged onto your box, isn't it nice to know that new data
  76 * ports are created in O(1) time?  I thought so. ;-)   -DaveM
  77 */
  78struct inet_bind_bucket {
  79        possible_net_t          ib_net;
  80        unsigned short          port;
  81        signed char             fastreuse;
  82        signed char             fastreuseport;
  83        kuid_t                  fastuid;
  84        int                     num_owners;
  85        struct hlist_node       node;
  86        struct hlist_head       owners;
  87};
  88
  89static inline struct net *ib_net(struct inet_bind_bucket *ib)
  90{
  91        return read_pnet(&ib->ib_net);
  92}
  93
  94#define inet_bind_bucket_for_each(tb, head) \
  95        hlist_for_each_entry(tb, head, node)
  96
  97struct inet_bind_hashbucket {
  98        spinlock_t              lock;
  99        struct hlist_head       chain;
 100};
 101
 102/*
 103 * Sockets can be hashed in established or listening table
 104 * We must use different 'nulls' end-of-chain value for listening
 105 * hash table, or we might find a socket that was closed and
 106 * reallocated/inserted into established hash table
 107 */
 108#define LISTENING_NULLS_BASE (1U << 29)
 109struct inet_listen_hashbucket {
 110        spinlock_t              lock;
 111        struct hlist_nulls_head head;
 112};
 113
 114/* This is for listening sockets, thus all sockets which possess wildcards. */
 115#define INET_LHTABLE_SIZE       32      /* Yes, really, this is all you need. */
 116
 117struct inet_hashinfo {
 118        /* This is for sockets with full identity only.  Sockets here will
 119         * always be without wildcards and will have the following invariant:
 120         *
 121         *          TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
 122         *
 123         */
 124        struct inet_ehash_bucket        *ehash;
 125        spinlock_t                      *ehash_locks;
 126        unsigned int                    ehash_mask;
 127        unsigned int                    ehash_locks_mask;
 128
 129        /* Ok, let's try this, I give up, we do need a local binding
 130         * TCP hash as well as the others for fast bind/connect.
 131         */
 132        struct inet_bind_hashbucket     *bhash;
 133
 134        unsigned int                    bhash_size;
 135        /* 4 bytes hole on 64 bit */
 136
 137        struct kmem_cache               *bind_bucket_cachep;
 138
 139        /* All the above members are written once at bootup and
 140         * never written again _or_ are predominantly read-access.
 141         *
 142         * Now align to a new cache line as all the following members
 143         * might be often dirty.
 144         */
 145        /* All sockets in TCP_LISTEN state will be in here.  This is the only
 146         * table where wildcard'd TCP sockets can exist.  Hash function here
 147         * is just local port number.
 148         */
 149        struct inet_listen_hashbucket   listening_hash[INET_LHTABLE_SIZE]
 150                                        ____cacheline_aligned_in_smp;
 151
 152        atomic_t                        bsockets;
 153};
 154
 155static inline struct inet_ehash_bucket *inet_ehash_bucket(
 156        struct inet_hashinfo *hashinfo,
 157        unsigned int hash)
 158{
 159        return &hashinfo->ehash[hash & hashinfo->ehash_mask];
 160}
 161
 162static inline spinlock_t *inet_ehash_lockp(
 163        struct inet_hashinfo *hashinfo,
 164        unsigned int hash)
 165{
 166        return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask];
 167}
 168
 169static inline int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
 170{
 171        unsigned int i, size = 256;
 172#if defined(CONFIG_PROVE_LOCKING)
 173        unsigned int nr_pcpus = 2;
 174#else
 175        unsigned int nr_pcpus = num_possible_cpus();
 176#endif
 177        if (nr_pcpus >= 4)
 178                size = 512;
 179        if (nr_pcpus >= 8)
 180                size = 1024;
 181        if (nr_pcpus >= 16)
 182                size = 2048;
 183        if (nr_pcpus >= 32)
 184                size = 4096;
 185        if (sizeof(spinlock_t) != 0) {
 186#ifdef CONFIG_NUMA
 187                if (size * sizeof(spinlock_t) > PAGE_SIZE)
 188                        hashinfo->ehash_locks = vmalloc(size * sizeof(spinlock_t));
 189                else
 190#endif
 191                hashinfo->ehash_locks = kmalloc(size * sizeof(spinlock_t),
 192                                                GFP_KERNEL);
 193                if (!hashinfo->ehash_locks)
 194                        return ENOMEM;
 195                for (i = 0; i < size; i++)
 196                        spin_lock_init(&hashinfo->ehash_locks[i]);
 197        }
 198        hashinfo->ehash_locks_mask = size - 1;
 199        return 0;
 200}
 201
 202static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
 203{
 204        if (hashinfo->ehash_locks) {
 205#ifdef CONFIG_NUMA
 206                unsigned int size = (hashinfo->ehash_locks_mask + 1) *
 207                                                        sizeof(spinlock_t);
 208                if (size > PAGE_SIZE)
 209                        vfree(hashinfo->ehash_locks);
 210                else
 211#endif
 212                kfree(hashinfo->ehash_locks);
 213                hashinfo->ehash_locks = NULL;
 214        }
 215}
 216
 217struct inet_bind_bucket *
 218inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
 219                        struct inet_bind_hashbucket *head,
 220                        const unsigned short snum);
 221void inet_bind_bucket_destroy(struct kmem_cache *cachep,
 222                              struct inet_bind_bucket *tb);
 223
 224static inline u32 inet_bhashfn(const struct net *net, const __u16 lport,
 225                               const u32 bhash_size)
 226{
 227        return (lport + net_hash_mix(net)) & (bhash_size - 1);
 228}
 229
 230void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
 231                    const unsigned short snum);
 232
 233/* These can have wildcards, don't try too hard. */
 234static inline u32 inet_lhashfn(const struct net *net, const unsigned short num)
 235{
 236        return (num + net_hash_mix(net)) & (INET_LHTABLE_SIZE - 1);
 237}
 238
 239static inline int inet_sk_listen_hashfn(const struct sock *sk)
 240{
 241        return inet_lhashfn(sock_net(sk), inet_sk(sk)->inet_num);
 242}
 243
 244/* Caller must disable local BH processing. */
 245int __inet_inherit_port(struct sock *sk, struct sock *child);
 246
 247void inet_put_port(struct sock *sk);
 248
 249void inet_hashinfo_init(struct inet_hashinfo *h);
 250
 251int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw);
 252int __inet_hash(struct sock *sk, struct inet_timewait_sock *tw);
 253void inet_hash(struct sock *sk);
 254void inet_unhash(struct sock *sk);
 255
 256struct sock *__inet_lookup_listener(struct net *net,
 257                                    struct inet_hashinfo *hashinfo,
 258                                    const __be32 saddr, const __be16 sport,
 259                                    const __be32 daddr,
 260                                    const unsigned short hnum,
 261                                    const int dif);
 262
 263static inline struct sock *inet_lookup_listener(struct net *net,
 264                struct inet_hashinfo *hashinfo,
 265                __be32 saddr, __be16 sport,
 266                __be32 daddr, __be16 dport, int dif)
 267{
 268        return __inet_lookup_listener(net, hashinfo, saddr, sport,
 269                                      daddr, ntohs(dport), dif);
 270}
 271
 272/* Socket demux engine toys. */
 273/* What happens here is ugly; there's a pair of adjacent fields in
 274   struct inet_sock; __be16 dport followed by __u16 num.  We want to
 275   search by pair, so we combine the keys into a single 32bit value
 276   and compare with 32bit value read from &...->dport.  Let's at least
 277   make sure that it's not mixed with anything else...
 278   On 64bit targets we combine comparisons with pair of adjacent __be32
 279   fields in the same way.
 280*/
 281#ifdef __BIG_ENDIAN
 282#define INET_COMBINED_PORTS(__sport, __dport) \
 283        ((__force __portpair)(((__force __u32)(__be16)(__sport) << 16) | (__u32)(__dport)))
 284#else /* __LITTLE_ENDIAN */
 285#define INET_COMBINED_PORTS(__sport, __dport) \
 286        ((__force __portpair)(((__u32)(__dport) << 16) | (__force __u32)(__be16)(__sport)))
 287#endif
 288
 289#if (BITS_PER_LONG == 64)
 290#ifdef __BIG_ENDIAN
 291#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
 292        const __addrpair __name = (__force __addrpair) ( \
 293                                   (((__force __u64)(__be32)(__saddr)) << 32) | \
 294                                   ((__force __u64)(__be32)(__daddr)))
 295#else /* __LITTLE_ENDIAN */
 296#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
 297        const __addrpair __name = (__force __addrpair) ( \
 298                                   (((__force __u64)(__be32)(__daddr)) << 32) | \
 299                                   ((__force __u64)(__be32)(__saddr)))
 300#endif /* __BIG_ENDIAN */
 301#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif)     \
 302        (((__sk)->sk_portpair == (__ports))                     &&      \
 303         ((__sk)->sk_addrpair == (__cookie))                    &&      \
 304         (!(__sk)->sk_bound_dev_if      ||                              \
 305           ((__sk)->sk_bound_dev_if == (__dif)))                &&      \
 306         net_eq(sock_net(__sk), (__net)))
 307#else /* 32-bit arch */
 308#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
 309        const int __name __deprecated __attribute__((unused))
 310
 311#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif) \
 312        (((__sk)->sk_portpair == (__ports))             &&              \
 313         ((__sk)->sk_daddr      == (__saddr))           &&              \
 314         ((__sk)->sk_rcv_saddr  == (__daddr))           &&              \
 315         (!(__sk)->sk_bound_dev_if      ||                              \
 316           ((__sk)->sk_bound_dev_if == (__dif)))        &&              \
 317         net_eq(sock_net(__sk), (__net)))
 318#endif /* 64-bit arch */
 319
 320/*
 321 * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
 322 * not check it for lookups anymore, thanks Alexey. -DaveM
 323 *
 324 * Local BH must be disabled here.
 325 */
 326struct sock *__inet_lookup_established(struct net *net,
 327                                       struct inet_hashinfo *hashinfo,
 328                                       const __be32 saddr, const __be16 sport,
 329                                       const __be32 daddr, const u16 hnum,
 330                                       const int dif);
 331
 332static inline struct sock *
 333        inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo,
 334                                const __be32 saddr, const __be16 sport,
 335                                const __be32 daddr, const __be16 dport,
 336                                const int dif)
 337{
 338        return __inet_lookup_established(net, hashinfo, saddr, sport, daddr,
 339                                         ntohs(dport), dif);
 340}
 341
 342static inline struct sock *__inet_lookup(struct net *net,
 343                                         struct inet_hashinfo *hashinfo,
 344                                         const __be32 saddr, const __be16 sport,
 345                                         const __be32 daddr, const __be16 dport,
 346                                         const int dif)
 347{
 348        u16 hnum = ntohs(dport);
 349        struct sock *sk = __inet_lookup_established(net, hashinfo,
 350                                saddr, sport, daddr, hnum, dif);
 351
 352        return sk ? : __inet_lookup_listener(net, hashinfo, saddr, sport,
 353                                             daddr, hnum, dif);
 354}
 355
 356static inline struct sock *inet_lookup(struct net *net,
 357                                       struct inet_hashinfo *hashinfo,
 358                                       const __be32 saddr, const __be16 sport,
 359                                       const __be32 daddr, const __be16 dport,
 360                                       const int dif)
 361{
 362        struct sock *sk;
 363
 364        local_bh_disable();
 365        sk = __inet_lookup(net, hashinfo, saddr, sport, daddr, dport, dif);
 366        local_bh_enable();
 367
 368        return sk;
 369}
 370
 371static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
 372                                             struct sk_buff *skb,
 373                                             const __be16 sport,
 374                                             const __be16 dport)
 375{
 376        struct sock *sk = skb_steal_sock(skb);
 377        const struct iphdr *iph = ip_hdr(skb);
 378
 379        if (sk)
 380                return sk;
 381        else
 382                return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo,
 383                                     iph->saddr, sport,
 384                                     iph->daddr, dport, inet_iif(skb));
 385}
 386
 387u32 sk_ehashfn(const struct sock *sk);
 388u32 inet6_ehashfn(const struct net *net,
 389                  const struct in6_addr *laddr, const u16 lport,
 390                  const struct in6_addr *faddr, const __be16 fport);
 391
 392static inline void sk_daddr_set(struct sock *sk, __be32 addr)
 393{
 394        sk->sk_daddr = addr; /* alias of inet_daddr */
 395#if IS_ENABLED(CONFIG_IPV6)
 396        ipv6_addr_set_v4mapped(addr, &sk->sk_v6_daddr);
 397#endif
 398}
 399
 400static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr)
 401{
 402        sk->sk_rcv_saddr = addr; /* alias of inet_rcv_saddr */
 403#if IS_ENABLED(CONFIG_IPV6)
 404        ipv6_addr_set_v4mapped(addr, &sk->sk_v6_rcv_saddr);
 405#endif
 406}
 407
 408int __inet_hash_connect(struct inet_timewait_death_row *death_row,
 409                        struct sock *sk, u32 port_offset,
 410                        int (*check_established)(struct inet_timewait_death_row *,
 411                                                 struct sock *, __u16,
 412                                                 struct inet_timewait_sock **));
 413
 414int inet_hash_connect(struct inet_timewait_death_row *death_row,
 415                      struct sock *sk);
 416#endif /* _INET_HASHTABLES_H */
 417