linux/net/rxrpc/conn_service.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* Service connection management
   3 *
   4 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
   5 * Written by David Howells (dhowells@redhat.com)
   6 */
   7
   8#include <linux/slab.h>
   9#include "ar-internal.h"
  10
  11/*
  12 * Find a service connection under RCU conditions.
  13 *
  14 * We could use a hash table, but that is subject to bucket stuffing by an
  15 * attacker as the client gets to pick the epoch and cid values and would know
  16 * the hash function.  So, instead, we use a hash table for the peer and from
  17 * that an rbtree to find the service connection.  Under ordinary circumstances
  18 * it might be slower than a large hash table, but it is at least limited in
  19 * depth.
  20 */
  21struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *peer,
  22                                                     struct sk_buff *skb)
  23{
  24        struct rxrpc_connection *conn = NULL;
  25        struct rxrpc_conn_proto k;
  26        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  27        struct rb_node *p;
  28        unsigned int seq = 0;
  29
  30        k.epoch = sp->hdr.epoch;
  31        k.cid   = sp->hdr.cid & RXRPC_CIDMASK;
  32
  33        do {
  34                /* Unfortunately, rbtree walking doesn't give reliable results
  35                 * under just the RCU read lock, so we have to check for
  36                 * changes.
  37                 */
  38                read_seqbegin_or_lock(&peer->service_conn_lock, &seq);
  39
  40                p = rcu_dereference_raw(peer->service_conns.rb_node);
  41                while (p) {
  42                        conn = rb_entry(p, struct rxrpc_connection, service_node);
  43
  44                        if (conn->proto.index_key < k.index_key)
  45                                p = rcu_dereference_raw(p->rb_left);
  46                        else if (conn->proto.index_key > k.index_key)
  47                                p = rcu_dereference_raw(p->rb_right);
  48                        else
  49                                break;
  50                        conn = NULL;
  51                }
  52        } while (need_seqretry(&peer->service_conn_lock, seq));
  53
  54        done_seqretry(&peer->service_conn_lock, seq);
  55        _leave(" = %d", conn ? conn->debug_id : -1);
  56        return conn;
  57}
  58
  59/*
  60 * Insert a service connection into a peer's tree, thereby making it a target
  61 * for incoming packets.
  62 */
  63static void rxrpc_publish_service_conn(struct rxrpc_peer *peer,
  64                                       struct rxrpc_connection *conn)
  65{
  66        struct rxrpc_connection *cursor = NULL;
  67        struct rxrpc_conn_proto k = conn->proto;
  68        struct rb_node **pp, *parent;
  69
  70        write_seqlock_bh(&peer->service_conn_lock);
  71
  72        pp = &peer->service_conns.rb_node;
  73        parent = NULL;
  74        while (*pp) {
  75                parent = *pp;
  76                cursor = rb_entry(parent,
  77                                  struct rxrpc_connection, service_node);
  78
  79                if (cursor->proto.index_key < k.index_key)
  80                        pp = &(*pp)->rb_left;
  81                else if (cursor->proto.index_key > k.index_key)
  82                        pp = &(*pp)->rb_right;
  83                else
  84                        goto found_extant_conn;
  85        }
  86
  87        rb_link_node_rcu(&conn->service_node, parent, pp);
  88        rb_insert_color(&conn->service_node, &peer->service_conns);
  89conn_published:
  90        set_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags);
  91        write_sequnlock_bh(&peer->service_conn_lock);
  92        _leave(" = %d [new]", conn->debug_id);
  93        return;
  94
  95found_extant_conn:
  96        if (atomic_read(&cursor->usage) == 0)
  97                goto replace_old_connection;
  98        write_sequnlock_bh(&peer->service_conn_lock);
  99        /* We should not be able to get here.  rxrpc_incoming_connection() is
 100         * called in a non-reentrant context, so there can't be a race to
 101         * insert a new connection.
 102         */
 103        BUG();
 104
 105replace_old_connection:
 106        /* The old connection is from an outdated epoch. */
 107        _debug("replace conn");
 108        rb_replace_node_rcu(&cursor->service_node,
 109                            &conn->service_node,
 110                            &peer->service_conns);
 111        clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &cursor->flags);
 112        goto conn_published;
 113}
 114
 115/*
 116 * Preallocate a service connection.  The connection is placed on the proc and
 117 * reap lists so that we don't have to get the lock from BH context.
 118 */
 119struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxnet,
 120                                                           gfp_t gfp)
 121{
 122        struct rxrpc_connection *conn = rxrpc_alloc_connection(gfp);
 123
 124        if (conn) {
 125                /* We maintain an extra ref on the connection whilst it is on
 126                 * the rxrpc_connections list.
 127                 */
 128                conn->state = RXRPC_CONN_SERVICE_PREALLOC;
 129                atomic_set(&conn->usage, 2);
 130
 131                atomic_inc(&rxnet->nr_conns);
 132                write_lock(&rxnet->conn_lock);
 133                list_add_tail(&conn->link, &rxnet->service_conns);
 134                list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
 135                write_unlock(&rxnet->conn_lock);
 136
 137                trace_rxrpc_conn(conn, rxrpc_conn_new_service,
 138                                 atomic_read(&conn->usage),
 139                                 __builtin_return_address(0));
 140        }
 141
 142        return conn;
 143}
 144
 145/*
 146 * Set up an incoming connection.  This is called in BH context with the RCU
 147 * read lock held.
 148 */
 149void rxrpc_new_incoming_connection(struct rxrpc_sock *rx,
 150                                   struct rxrpc_connection *conn,
 151                                   struct sk_buff *skb)
 152{
 153        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
 154
 155        _enter("");
 156
 157        conn->proto.epoch       = sp->hdr.epoch;
 158        conn->proto.cid         = sp->hdr.cid & RXRPC_CIDMASK;
 159        conn->params.service_id = sp->hdr.serviceId;
 160        conn->service_id        = sp->hdr.serviceId;
 161        conn->security_ix       = sp->hdr.securityIndex;
 162        conn->out_clientflag    = 0;
 163        if (conn->security_ix)
 164                conn->state     = RXRPC_CONN_SERVICE_UNSECURED;
 165        else
 166                conn->state     = RXRPC_CONN_SERVICE;
 167
 168        /* See if we should upgrade the service.  This can only happen on the
 169         * first packet on a new connection.  Once done, it applies to all
 170         * subsequent calls on that connection.
 171         */
 172        if (sp->hdr.userStatus == RXRPC_USERSTATUS_SERVICE_UPGRADE &&
 173            conn->service_id == rx->service_upgrade.from)
 174                conn->service_id = rx->service_upgrade.to;
 175
 176        /* Make the connection a target for incoming packets. */
 177        rxrpc_publish_service_conn(conn->params.peer, conn);
 178
 179        _net("CONNECTION new %d {%x}", conn->debug_id, conn->proto.cid);
 180}
 181
 182/*
 183 * Remove the service connection from the peer's tree, thereby removing it as a
 184 * target for incoming packets.
 185 */
 186void rxrpc_unpublish_service_conn(struct rxrpc_connection *conn)
 187{
 188        struct rxrpc_peer *peer = conn->params.peer;
 189
 190        write_seqlock_bh(&peer->service_conn_lock);
 191        if (test_and_clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags))
 192                rb_erase(&conn->service_node, &peer->service_conns);
 193        write_sequnlock_bh(&peer->service_conn_lock);
 194}
 195