linux/net/rxrpc/peer_object.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* RxRPC remote transport endpoint record management
   3 *
   4 * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
   5 * Written by David Howells (dhowells@redhat.com)
   6 */
   7
   8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   9
  10#include <linux/module.h>
  11#include <linux/net.h>
  12#include <linux/skbuff.h>
  13#include <linux/udp.h>
  14#include <linux/in.h>
  15#include <linux/in6.h>
  16#include <linux/slab.h>
  17#include <linux/hashtable.h>
  18#include <net/sock.h>
  19#include <net/af_rxrpc.h>
  20#include <net/ip.h>
  21#include <net/route.h>
  22#include <net/ip6_route.h>
  23#include "ar-internal.h"
  24
  25/*
  26 * Hash a peer key.
  27 */
  28static unsigned long rxrpc_peer_hash_key(struct rxrpc_local *local,
  29                                         const struct sockaddr_rxrpc *srx)
  30{
  31        const u16 *p;
  32        unsigned int i, size;
  33        unsigned long hash_key;
  34
  35        _enter("");
  36
  37        hash_key = (unsigned long)local / __alignof__(*local);
  38        hash_key += srx->transport_type;
  39        hash_key += srx->transport_len;
  40        hash_key += srx->transport.family;
  41
  42        switch (srx->transport.family) {
  43        case AF_INET:
  44                hash_key += (u16 __force)srx->transport.sin.sin_port;
  45                size = sizeof(srx->transport.sin.sin_addr);
  46                p = (u16 *)&srx->transport.sin.sin_addr;
  47                break;
  48#ifdef CONFIG_AF_RXRPC_IPV6
  49        case AF_INET6:
  50                hash_key += (u16 __force)srx->transport.sin.sin_port;
  51                size = sizeof(srx->transport.sin6.sin6_addr);
  52                p = (u16 *)&srx->transport.sin6.sin6_addr;
  53                break;
  54#endif
  55        default:
  56                WARN(1, "AF_RXRPC: Unsupported transport address family\n");
  57                return 0;
  58        }
  59
  60        /* Step through the peer address in 16-bit portions for speed */
  61        for (i = 0; i < size; i += sizeof(*p), p++)
  62                hash_key += *p;
  63
  64        _leave(" 0x%lx", hash_key);
  65        return hash_key;
  66}
  67
  68/*
  69 * Compare a peer to a key.  Return -ve, 0 or +ve to indicate less than, same
  70 * or greater than.
  71 *
  72 * Unfortunately, the primitives in linux/hashtable.h don't allow for sorted
  73 * buckets and mid-bucket insertion, so we don't make full use of this
  74 * information at this point.
  75 */
  76static long rxrpc_peer_cmp_key(const struct rxrpc_peer *peer,
  77                               struct rxrpc_local *local,
  78                               const struct sockaddr_rxrpc *srx,
  79                               unsigned long hash_key)
  80{
  81        long diff;
  82
  83        diff = ((peer->hash_key - hash_key) ?:
  84                ((unsigned long)peer->local - (unsigned long)local) ?:
  85                (peer->srx.transport_type - srx->transport_type) ?:
  86                (peer->srx.transport_len - srx->transport_len) ?:
  87                (peer->srx.transport.family - srx->transport.family));
  88        if (diff != 0)
  89                return diff;
  90
  91        switch (srx->transport.family) {
  92        case AF_INET:
  93                return ((u16 __force)peer->srx.transport.sin.sin_port -
  94                        (u16 __force)srx->transport.sin.sin_port) ?:
  95                        memcmp(&peer->srx.transport.sin.sin_addr,
  96                               &srx->transport.sin.sin_addr,
  97                               sizeof(struct in_addr));
  98#ifdef CONFIG_AF_RXRPC_IPV6
  99        case AF_INET6:
 100                return ((u16 __force)peer->srx.transport.sin6.sin6_port -
 101                        (u16 __force)srx->transport.sin6.sin6_port) ?:
 102                        memcmp(&peer->srx.transport.sin6.sin6_addr,
 103                               &srx->transport.sin6.sin6_addr,
 104                               sizeof(struct in6_addr));
 105#endif
 106        default:
 107                BUG();
 108        }
 109}
 110
 111/*
 112 * Look up a remote transport endpoint for the specified address using RCU.
 113 */
 114static struct rxrpc_peer *__rxrpc_lookup_peer_rcu(
 115        struct rxrpc_local *local,
 116        const struct sockaddr_rxrpc *srx,
 117        unsigned long hash_key)
 118{
 119        struct rxrpc_peer *peer;
 120        struct rxrpc_net *rxnet = local->rxnet;
 121
 122        hash_for_each_possible_rcu(rxnet->peer_hash, peer, hash_link, hash_key) {
 123                if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0 &&
 124                    atomic_read(&peer->usage) > 0)
 125                        return peer;
 126        }
 127
 128        return NULL;
 129}
 130
 131/*
 132 * Look up a remote transport endpoint for the specified address using RCU.
 133 */
 134struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *local,
 135                                         const struct sockaddr_rxrpc *srx)
 136{
 137        struct rxrpc_peer *peer;
 138        unsigned long hash_key = rxrpc_peer_hash_key(local, srx);
 139
 140        peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
 141        if (peer) {
 142                _net("PEER %d {%pISp}", peer->debug_id, &peer->srx.transport);
 143                _leave(" = %p {u=%d}", peer, atomic_read(&peer->usage));
 144        }
 145        return peer;
 146}
 147
 148/*
 149 * assess the MTU size for the network interface through which this peer is
 150 * reached
 151 */
 152static void rxrpc_assess_MTU_size(struct rxrpc_sock *rx,
 153                                  struct rxrpc_peer *peer)
 154{
 155        struct net *net = sock_net(&rx->sk);
 156        struct dst_entry *dst;
 157        struct rtable *rt;
 158        struct flowi fl;
 159        struct flowi4 *fl4 = &fl.u.ip4;
 160#ifdef CONFIG_AF_RXRPC_IPV6
 161        struct flowi6 *fl6 = &fl.u.ip6;
 162#endif
 163
 164        peer->if_mtu = 1500;
 165
 166        memset(&fl, 0, sizeof(fl));
 167        switch (peer->srx.transport.family) {
 168        case AF_INET:
 169                rt = ip_route_output_ports(
 170                        net, fl4, NULL,
 171                        peer->srx.transport.sin.sin_addr.s_addr, 0,
 172                        htons(7000), htons(7001), IPPROTO_UDP, 0, 0);
 173                if (IS_ERR(rt)) {
 174                        _leave(" [route err %ld]", PTR_ERR(rt));
 175                        return;
 176                }
 177                dst = &rt->dst;
 178                break;
 179
 180#ifdef CONFIG_AF_RXRPC_IPV6
 181        case AF_INET6:
 182                fl6->flowi6_iif = LOOPBACK_IFINDEX;
 183                fl6->flowi6_scope = RT_SCOPE_UNIVERSE;
 184                fl6->flowi6_proto = IPPROTO_UDP;
 185                memcpy(&fl6->daddr, &peer->srx.transport.sin6.sin6_addr,
 186                       sizeof(struct in6_addr));
 187                fl6->fl6_dport = htons(7001);
 188                fl6->fl6_sport = htons(7000);
 189                dst = ip6_route_output(net, NULL, fl6);
 190                if (dst->error) {
 191                        _leave(" [route err %d]", dst->error);
 192                        return;
 193                }
 194                break;
 195#endif
 196
 197        default:
 198                BUG();
 199        }
 200
 201        peer->if_mtu = dst_mtu(dst);
 202        dst_release(dst);
 203
 204        _leave(" [if_mtu %u]", peer->if_mtu);
 205}
 206
 207/*
 208 * Allocate a peer.
 209 */
 210struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
 211{
 212        const void *here = __builtin_return_address(0);
 213        struct rxrpc_peer *peer;
 214
 215        _enter("");
 216
 217        peer = kzalloc(sizeof(struct rxrpc_peer), gfp);
 218        if (peer) {
 219                atomic_set(&peer->usage, 1);
 220                peer->local = rxrpc_get_local(local);
 221                INIT_HLIST_HEAD(&peer->error_targets);
 222                peer->service_conns = RB_ROOT;
 223                seqlock_init(&peer->service_conn_lock);
 224                spin_lock_init(&peer->lock);
 225                spin_lock_init(&peer->rtt_input_lock);
 226                peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
 227
 228                rxrpc_peer_init_rtt(peer);
 229
 230                if (RXRPC_TX_SMSS > 2190)
 231                        peer->cong_cwnd = 2;
 232                else if (RXRPC_TX_SMSS > 1095)
 233                        peer->cong_cwnd = 3;
 234                else
 235                        peer->cong_cwnd = 4;
 236                trace_rxrpc_peer(peer->debug_id, rxrpc_peer_new, 1, here);
 237        }
 238
 239        _leave(" = %p", peer);
 240        return peer;
 241}
 242
 243/*
 244 * Initialise peer record.
 245 */
 246static void rxrpc_init_peer(struct rxrpc_sock *rx, struct rxrpc_peer *peer,
 247                            unsigned long hash_key)
 248{
 249        peer->hash_key = hash_key;
 250        rxrpc_assess_MTU_size(rx, peer);
 251        peer->mtu = peer->if_mtu;
 252        peer->rtt_last_req = ktime_get_real();
 253
 254        switch (peer->srx.transport.family) {
 255        case AF_INET:
 256                peer->hdrsize = sizeof(struct iphdr);
 257                break;
 258#ifdef CONFIG_AF_RXRPC_IPV6
 259        case AF_INET6:
 260                peer->hdrsize = sizeof(struct ipv6hdr);
 261                break;
 262#endif
 263        default:
 264                BUG();
 265        }
 266
 267        switch (peer->srx.transport_type) {
 268        case SOCK_DGRAM:
 269                peer->hdrsize += sizeof(struct udphdr);
 270                break;
 271        default:
 272                BUG();
 273        }
 274
 275        peer->hdrsize += sizeof(struct rxrpc_wire_header);
 276        peer->maxdata = peer->mtu - peer->hdrsize;
 277}
 278
 279/*
 280 * Set up a new peer.
 281 */
 282static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_sock *rx,
 283                                            struct rxrpc_local *local,
 284                                            struct sockaddr_rxrpc *srx,
 285                                            unsigned long hash_key,
 286                                            gfp_t gfp)
 287{
 288        struct rxrpc_peer *peer;
 289
 290        _enter("");
 291
 292        peer = rxrpc_alloc_peer(local, gfp);
 293        if (peer) {
 294                memcpy(&peer->srx, srx, sizeof(*srx));
 295                rxrpc_init_peer(rx, peer, hash_key);
 296        }
 297
 298        _leave(" = %p", peer);
 299        return peer;
 300}
 301
 302static void rxrpc_free_peer(struct rxrpc_peer *peer)
 303{
 304        rxrpc_put_local(peer->local);
 305        kfree_rcu(peer, rcu);
 306}
 307
 308/*
 309 * Set up a new incoming peer.  There shouldn't be any other matching peers
 310 * since we've already done a search in the list from the non-reentrant context
 311 * (the data_ready handler) that is the only place we can add new peers.
 312 */
 313void rxrpc_new_incoming_peer(struct rxrpc_sock *rx, struct rxrpc_local *local,
 314                             struct rxrpc_peer *peer)
 315{
 316        struct rxrpc_net *rxnet = local->rxnet;
 317        unsigned long hash_key;
 318
 319        hash_key = rxrpc_peer_hash_key(local, &peer->srx);
 320        rxrpc_init_peer(rx, peer, hash_key);
 321
 322        spin_lock(&rxnet->peer_hash_lock);
 323        hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
 324        list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
 325        spin_unlock(&rxnet->peer_hash_lock);
 326}
 327
 328/*
 329 * obtain a remote transport endpoint for the specified address
 330 */
 331struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx,
 332                                     struct rxrpc_local *local,
 333                                     struct sockaddr_rxrpc *srx, gfp_t gfp)
 334{
 335        struct rxrpc_peer *peer, *candidate;
 336        struct rxrpc_net *rxnet = local->rxnet;
 337        unsigned long hash_key = rxrpc_peer_hash_key(local, srx);
 338
 339        _enter("{%pISp}", &srx->transport);
 340
 341        /* search the peer list first */
 342        rcu_read_lock();
 343        peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
 344        if (peer && !rxrpc_get_peer_maybe(peer))
 345                peer = NULL;
 346        rcu_read_unlock();
 347
 348        if (!peer) {
 349                /* The peer is not yet present in hash - create a candidate
 350                 * for a new record and then redo the search.
 351                 */
 352                candidate = rxrpc_create_peer(rx, local, srx, hash_key, gfp);
 353                if (!candidate) {
 354                        _leave(" = NULL [nomem]");
 355                        return NULL;
 356                }
 357
 358                spin_lock_bh(&rxnet->peer_hash_lock);
 359
 360                /* Need to check that we aren't racing with someone else */
 361                peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
 362                if (peer && !rxrpc_get_peer_maybe(peer))
 363                        peer = NULL;
 364                if (!peer) {
 365                        hash_add_rcu(rxnet->peer_hash,
 366                                     &candidate->hash_link, hash_key);
 367                        list_add_tail(&candidate->keepalive_link,
 368                                      &rxnet->peer_keepalive_new);
 369                }
 370
 371                spin_unlock_bh(&rxnet->peer_hash_lock);
 372
 373                if (peer)
 374                        rxrpc_free_peer(candidate);
 375                else
 376                        peer = candidate;
 377        }
 378
 379        _net("PEER %d {%pISp}", peer->debug_id, &peer->srx.transport);
 380
 381        _leave(" = %p {u=%d}", peer, atomic_read(&peer->usage));
 382        return peer;
 383}
 384
 385/*
 386 * Get a ref on a peer record.
 387 */
 388struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer)
 389{
 390        const void *here = __builtin_return_address(0);
 391        int n;
 392
 393        n = atomic_inc_return(&peer->usage);
 394        trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, n, here);
 395        return peer;
 396}
 397
 398/*
 399 * Get a ref on a peer record unless its usage has already reached 0.
 400 */
 401struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
 402{
 403        const void *here = __builtin_return_address(0);
 404
 405        if (peer) {
 406                int n = atomic_fetch_add_unless(&peer->usage, 1, 0);
 407                if (n > 0)
 408                        trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, n + 1, here);
 409                else
 410                        peer = NULL;
 411        }
 412        return peer;
 413}
 414
 415/*
 416 * Discard a peer record.
 417 */
 418static void __rxrpc_put_peer(struct rxrpc_peer *peer)
 419{
 420        struct rxrpc_net *rxnet = peer->local->rxnet;
 421
 422        ASSERT(hlist_empty(&peer->error_targets));
 423
 424        spin_lock_bh(&rxnet->peer_hash_lock);
 425        hash_del_rcu(&peer->hash_link);
 426        list_del_init(&peer->keepalive_link);
 427        spin_unlock_bh(&rxnet->peer_hash_lock);
 428
 429        rxrpc_free_peer(peer);
 430}
 431
 432/*
 433 * Drop a ref on a peer record.
 434 */
 435void rxrpc_put_peer(struct rxrpc_peer *peer)
 436{
 437        const void *here = __builtin_return_address(0);
 438        unsigned int debug_id;
 439        int n;
 440
 441        if (peer) {
 442                debug_id = peer->debug_id;
 443                n = atomic_dec_return(&peer->usage);
 444                trace_rxrpc_peer(debug_id, rxrpc_peer_put, n, here);
 445                if (n == 0)
 446                        __rxrpc_put_peer(peer);
 447        }
 448}
 449
 450/*
 451 * Drop a ref on a peer record where the caller already holds the
 452 * peer_hash_lock.
 453 */
 454void rxrpc_put_peer_locked(struct rxrpc_peer *peer)
 455{
 456        const void *here = __builtin_return_address(0);
 457        unsigned int debug_id = peer->debug_id;
 458        int n;
 459
 460        n = atomic_dec_return(&peer->usage);
 461        trace_rxrpc_peer(debug_id, rxrpc_peer_put, n, here);
 462        if (n == 0) {
 463                hash_del_rcu(&peer->hash_link);
 464                list_del_init(&peer->keepalive_link);
 465                rxrpc_free_peer(peer);
 466        }
 467}
 468
 469/*
 470 * Make sure all peer records have been discarded.
 471 */
 472void rxrpc_destroy_all_peers(struct rxrpc_net *rxnet)
 473{
 474        struct rxrpc_peer *peer;
 475        int i;
 476
 477        for (i = 0; i < HASH_SIZE(rxnet->peer_hash); i++) {
 478                if (hlist_empty(&rxnet->peer_hash[i]))
 479                        continue;
 480
 481                hlist_for_each_entry(peer, &rxnet->peer_hash[i], hash_link) {
 482                        pr_err("Leaked peer %u {%u} %pISp\n",
 483                               peer->debug_id,
 484                               atomic_read(&peer->usage),
 485                               &peer->srx.transport);
 486                }
 487        }
 488}
 489
 490/**
 491 * rxrpc_kernel_get_peer - Get the peer address of a call
 492 * @sock: The socket on which the call is in progress.
 493 * @call: The call to query
 494 * @_srx: Where to place the result
 495 *
 496 * Get the address of the remote peer in a call.
 497 */
 498void rxrpc_kernel_get_peer(struct socket *sock, struct rxrpc_call *call,
 499                           struct sockaddr_rxrpc *_srx)
 500{
 501        *_srx = call->peer->srx;
 502}
 503EXPORT_SYMBOL(rxrpc_kernel_get_peer);
 504
 505/**
 506 * rxrpc_kernel_get_srtt - Get a call's peer smoothed RTT
 507 * @sock: The socket on which the call is in progress.
 508 * @call: The call to query
 509 * @_srtt: Where to store the SRTT value.
 510 *
 511 * Get the call's peer smoothed RTT in uS.
 512 */
 513bool rxrpc_kernel_get_srtt(struct socket *sock, struct rxrpc_call *call,
 514                           u32 *_srtt)
 515{
 516        struct rxrpc_peer *peer = call->peer;
 517
 518        if (peer->rtt_count == 0) {
 519                *_srtt = 1000000; /* 1S */
 520                return false;
 521        }
 522
 523        *_srtt = call->peer->srtt_us >> 3;
 524        return true;
 525}
 526EXPORT_SYMBOL(rxrpc_kernel_get_srtt);
 527