linux/net/rxrpc/conn_object.c
<<
>>
Prefs
   1/* RxRPC virtual connection handler, common bits.
   2 *
   3 * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
   4 * Written by David Howells (dhowells@redhat.com)
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * as published by the Free Software Foundation; either version
   9 * 2 of the License, or (at your option) any later version.
  10 */
  11
  12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  13
  14#include <linux/module.h>
  15#include <linux/slab.h>
  16#include <linux/net.h>
  17#include <linux/skbuff.h>
  18#include "ar-internal.h"
  19
  20/*
  21 * Time till a connection expires after last use (in seconds).
  22 */
  23unsigned int __read_mostly rxrpc_connection_expiry = 10 * 60;
  24unsigned int __read_mostly rxrpc_closed_conn_expiry = 10;
  25
  26static void rxrpc_destroy_connection(struct rcu_head *);
  27
  28static void rxrpc_connection_timer(struct timer_list *timer)
  29{
  30        struct rxrpc_connection *conn =
  31                container_of(timer, struct rxrpc_connection, timer);
  32
  33        rxrpc_queue_conn(conn);
  34}
  35
  36/*
  37 * allocate a new connection
  38 */
  39struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
  40{
  41        struct rxrpc_connection *conn;
  42
  43        _enter("");
  44
  45        conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
  46        if (conn) {
  47                INIT_LIST_HEAD(&conn->cache_link);
  48                spin_lock_init(&conn->channel_lock);
  49                INIT_LIST_HEAD(&conn->waiting_calls);
  50                timer_setup(&conn->timer, &rxrpc_connection_timer, 0);
  51                INIT_WORK(&conn->processor, &rxrpc_process_connection);
  52                INIT_LIST_HEAD(&conn->proc_link);
  53                INIT_LIST_HEAD(&conn->link);
  54                skb_queue_head_init(&conn->rx_queue);
  55                conn->security = &rxrpc_no_security;
  56                spin_lock_init(&conn->state_lock);
  57                conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
  58                conn->size_align = 4;
  59                conn->idle_timestamp = jiffies;
  60        }
  61
  62        _leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
  63        return conn;
  64}
  65
  66/*
  67 * Look up a connection in the cache by protocol parameters.
  68 *
  69 * If successful, a pointer to the connection is returned, but no ref is taken.
  70 * NULL is returned if there is no match.
  71 *
  72 * When searching for a service call, if we find a peer but no connection, we
  73 * return that through *_peer in case we need to create a new service call.
  74 *
  75 * The caller must be holding the RCU read lock.
  76 */
  77struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
  78                                                   struct sk_buff *skb,
  79                                                   struct rxrpc_peer **_peer)
  80{
  81        struct rxrpc_connection *conn;
  82        struct rxrpc_conn_proto k;
  83        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  84        struct sockaddr_rxrpc srx;
  85        struct rxrpc_peer *peer;
  86
  87        _enter(",%x", sp->hdr.cid & RXRPC_CIDMASK);
  88
  89        if (rxrpc_extract_addr_from_skb(local, &srx, skb) < 0)
  90                goto not_found;
  91
  92        /* We may have to handle mixing IPv4 and IPv6 */
  93        if (srx.transport.family != local->srx.transport.family) {
  94                pr_warn_ratelimited("AF_RXRPC: Protocol mismatch %u not %u\n",
  95                                    srx.transport.family,
  96                                    local->srx.transport.family);
  97                goto not_found;
  98        }
  99
 100        k.epoch = sp->hdr.epoch;
 101        k.cid   = sp->hdr.cid & RXRPC_CIDMASK;
 102
 103        if (rxrpc_to_server(sp)) {
 104                /* We need to look up service connections by the full protocol
 105                 * parameter set.  We look up the peer first as an intermediate
 106                 * step and then the connection from the peer's tree.
 107                 */
 108                peer = rxrpc_lookup_peer_rcu(local, &srx);
 109                if (!peer)
 110                        goto not_found;
 111                *_peer = peer;
 112                conn = rxrpc_find_service_conn_rcu(peer, skb);
 113                if (!conn || atomic_read(&conn->usage) == 0)
 114                        goto not_found;
 115                _leave(" = %p", conn);
 116                return conn;
 117        } else {
 118                /* Look up client connections by connection ID alone as their
 119                 * IDs are unique for this machine.
 120                 */
 121                conn = idr_find(&rxrpc_client_conn_ids,
 122                                sp->hdr.cid >> RXRPC_CIDSHIFT);
 123                if (!conn || atomic_read(&conn->usage) == 0) {
 124                        _debug("no conn");
 125                        goto not_found;
 126                }
 127
 128                if (conn->proto.epoch != k.epoch ||
 129                    conn->params.local != local)
 130                        goto not_found;
 131
 132                peer = conn->params.peer;
 133                switch (srx.transport.family) {
 134                case AF_INET:
 135                        if (peer->srx.transport.sin.sin_port !=
 136                            srx.transport.sin.sin_port ||
 137                            peer->srx.transport.sin.sin_addr.s_addr !=
 138                            srx.transport.sin.sin_addr.s_addr)
 139                                goto not_found;
 140                        break;
 141#ifdef CONFIG_AF_RXRPC_IPV6
 142                case AF_INET6:
 143                        if (peer->srx.transport.sin6.sin6_port !=
 144                            srx.transport.sin6.sin6_port ||
 145                            memcmp(&peer->srx.transport.sin6.sin6_addr,
 146                                   &srx.transport.sin6.sin6_addr,
 147                                   sizeof(struct in6_addr)) != 0)
 148                                goto not_found;
 149                        break;
 150#endif
 151                default:
 152                        BUG();
 153                }
 154
 155                _leave(" = %p", conn);
 156                return conn;
 157        }
 158
 159not_found:
 160        _leave(" = NULL");
 161        return NULL;
 162}
 163
 164/*
 165 * Disconnect a call and clear any channel it occupies when that call
 166 * terminates.  The caller must hold the channel_lock and must release the
 167 * call's ref on the connection.
 168 */
 169void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
 170                             struct rxrpc_call *call)
 171{
 172        struct rxrpc_channel *chan =
 173                &conn->channels[call->cid & RXRPC_CHANNELMASK];
 174
 175        _enter("%d,%x", conn->debug_id, call->cid);
 176
 177        if (rcu_access_pointer(chan->call) == call) {
 178                /* Save the result of the call so that we can repeat it if necessary
 179                 * through the channel, whilst disposing of the actual call record.
 180                 */
 181                trace_rxrpc_disconnect_call(call);
 182                switch (call->completion) {
 183                case RXRPC_CALL_SUCCEEDED:
 184                        chan->last_seq = call->rx_hard_ack;
 185                        chan->last_type = RXRPC_PACKET_TYPE_ACK;
 186                        break;
 187                case RXRPC_CALL_LOCALLY_ABORTED:
 188                        chan->last_abort = call->abort_code;
 189                        chan->last_type = RXRPC_PACKET_TYPE_ABORT;
 190                        break;
 191                default:
 192                        chan->last_abort = RX_USER_ABORT;
 193                        chan->last_type = RXRPC_PACKET_TYPE_ABORT;
 194                        break;
 195                }
 196
 197                /* Sync with rxrpc_conn_retransmit(). */
 198                smp_wmb();
 199                chan->last_call = chan->call_id;
 200                chan->call_id = chan->call_counter;
 201
 202                rcu_assign_pointer(chan->call, NULL);
 203        }
 204
 205        _leave("");
 206}
 207
 208/*
 209 * Disconnect a call and clear any channel it occupies when that call
 210 * terminates.
 211 */
 212void rxrpc_disconnect_call(struct rxrpc_call *call)
 213{
 214        struct rxrpc_connection *conn = call->conn;
 215
 216        call->peer->cong_cwnd = call->cong_cwnd;
 217
 218        spin_lock_bh(&conn->params.peer->lock);
 219        hlist_del_rcu(&call->error_link);
 220        spin_unlock_bh(&conn->params.peer->lock);
 221
 222        if (rxrpc_is_client_call(call))
 223                return rxrpc_disconnect_client_call(call);
 224
 225        spin_lock(&conn->channel_lock);
 226        __rxrpc_disconnect_call(conn, call);
 227        spin_unlock(&conn->channel_lock);
 228
 229        call->conn = NULL;
 230        conn->idle_timestamp = jiffies;
 231        rxrpc_put_connection(conn);
 232}
 233
 234/*
 235 * Kill off a connection.
 236 */
 237void rxrpc_kill_connection(struct rxrpc_connection *conn)
 238{
 239        struct rxrpc_net *rxnet = conn->params.local->rxnet;
 240
 241        ASSERT(!rcu_access_pointer(conn->channels[0].call) &&
 242               !rcu_access_pointer(conn->channels[1].call) &&
 243               !rcu_access_pointer(conn->channels[2].call) &&
 244               !rcu_access_pointer(conn->channels[3].call));
 245        ASSERT(list_empty(&conn->cache_link));
 246
 247        write_lock(&rxnet->conn_lock);
 248        list_del_init(&conn->proc_link);
 249        write_unlock(&rxnet->conn_lock);
 250
 251        /* Drain the Rx queue.  Note that even though we've unpublished, an
 252         * incoming packet could still be being added to our Rx queue, so we
 253         * will need to drain it again in the RCU cleanup handler.
 254         */
 255        rxrpc_purge_queue(&conn->rx_queue);
 256
 257        /* Leave final destruction to RCU.  The connection processor work item
 258         * must carry a ref on the connection to prevent us getting here whilst
 259         * it is queued or running.
 260         */
 261        call_rcu(&conn->rcu, rxrpc_destroy_connection);
 262}
 263
 264/*
 265 * Queue a connection's work processor, getting a ref to pass to the work
 266 * queue.
 267 */
 268bool rxrpc_queue_conn(struct rxrpc_connection *conn)
 269{
 270        const void *here = __builtin_return_address(0);
 271        int n = atomic_fetch_add_unless(&conn->usage, 1, 0);
 272        if (n == 0)
 273                return false;
 274        if (rxrpc_queue_work(&conn->processor))
 275                trace_rxrpc_conn(conn, rxrpc_conn_queued, n + 1, here);
 276        else
 277                rxrpc_put_connection(conn);
 278        return true;
 279}
 280
 281/*
 282 * Note the re-emergence of a connection.
 283 */
 284void rxrpc_see_connection(struct rxrpc_connection *conn)
 285{
 286        const void *here = __builtin_return_address(0);
 287        if (conn) {
 288                int n = atomic_read(&conn->usage);
 289
 290                trace_rxrpc_conn(conn, rxrpc_conn_seen, n, here);
 291        }
 292}
 293
 294/*
 295 * Get a ref on a connection.
 296 */
 297void rxrpc_get_connection(struct rxrpc_connection *conn)
 298{
 299        const void *here = __builtin_return_address(0);
 300        int n = atomic_inc_return(&conn->usage);
 301
 302        trace_rxrpc_conn(conn, rxrpc_conn_got, n, here);
 303}
 304
 305/*
 306 * Try to get a ref on a connection.
 307 */
 308struct rxrpc_connection *
 309rxrpc_get_connection_maybe(struct rxrpc_connection *conn)
 310{
 311        const void *here = __builtin_return_address(0);
 312
 313        if (conn) {
 314                int n = atomic_fetch_add_unless(&conn->usage, 1, 0);
 315                if (n > 0)
 316                        trace_rxrpc_conn(conn, rxrpc_conn_got, n + 1, here);
 317                else
 318                        conn = NULL;
 319        }
 320        return conn;
 321}
 322
 323/*
 324 * Set the service connection reap timer.
 325 */
 326static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
 327                                         unsigned long reap_at)
 328{
 329        if (rxnet->live)
 330                timer_reduce(&rxnet->service_conn_reap_timer, reap_at);
 331}
 332
 333/*
 334 * Release a service connection
 335 */
 336void rxrpc_put_service_conn(struct rxrpc_connection *conn)
 337{
 338        const void *here = __builtin_return_address(0);
 339        int n;
 340
 341        n = atomic_dec_return(&conn->usage);
 342        trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here);
 343        ASSERTCMP(n, >=, 0);
 344        if (n == 1)
 345                rxrpc_set_service_reap_timer(conn->params.local->rxnet,
 346                                             jiffies + rxrpc_connection_expiry);
 347}
 348
 349/*
 350 * destroy a virtual connection
 351 */
 352static void rxrpc_destroy_connection(struct rcu_head *rcu)
 353{
 354        struct rxrpc_connection *conn =
 355                container_of(rcu, struct rxrpc_connection, rcu);
 356
 357        _enter("{%d,u=%d}", conn->debug_id, atomic_read(&conn->usage));
 358
 359        ASSERTCMP(atomic_read(&conn->usage), ==, 0);
 360
 361        _net("DESTROY CONN %d", conn->debug_id);
 362
 363        del_timer_sync(&conn->timer);
 364        rxrpc_purge_queue(&conn->rx_queue);
 365
 366        conn->security->clear(conn);
 367        key_put(conn->params.key);
 368        key_put(conn->server_key);
 369        rxrpc_put_peer(conn->params.peer);
 370
 371        if (atomic_dec_and_test(&conn->params.local->rxnet->nr_conns))
 372                wake_up_var(&conn->params.local->rxnet->nr_conns);
 373        rxrpc_put_local(conn->params.local);
 374
 375        kfree(conn);
 376        _leave("");
 377}
 378
 379/*
 380 * reap dead service connections
 381 */
 382void rxrpc_service_connection_reaper(struct work_struct *work)
 383{
 384        struct rxrpc_connection *conn, *_p;
 385        struct rxrpc_net *rxnet =
 386                container_of(work, struct rxrpc_net, service_conn_reaper);
 387        unsigned long expire_at, earliest, idle_timestamp, now;
 388
 389        LIST_HEAD(graveyard);
 390
 391        _enter("");
 392
 393        now = jiffies;
 394        earliest = now + MAX_JIFFY_OFFSET;
 395
 396        write_lock(&rxnet->conn_lock);
 397        list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
 398                ASSERTCMP(atomic_read(&conn->usage), >, 0);
 399                if (likely(atomic_read(&conn->usage) > 1))
 400                        continue;
 401                if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
 402                        continue;
 403
 404                if (rxnet->live) {
 405                        idle_timestamp = READ_ONCE(conn->idle_timestamp);
 406                        expire_at = idle_timestamp + rxrpc_connection_expiry * HZ;
 407                        if (conn->params.local->service_closed)
 408                                expire_at = idle_timestamp + rxrpc_closed_conn_expiry * HZ;
 409
 410                        _debug("reap CONN %d { u=%d,t=%ld }",
 411                               conn->debug_id, atomic_read(&conn->usage),
 412                               (long)expire_at - (long)now);
 413
 414                        if (time_before(now, expire_at)) {
 415                                if (time_before(expire_at, earliest))
 416                                        earliest = expire_at;
 417                                continue;
 418                        }
 419                }
 420
 421                /* The usage count sits at 1 whilst the object is unused on the
 422                 * list; we reduce that to 0 to make the object unavailable.
 423                 */
 424                if (atomic_cmpxchg(&conn->usage, 1, 0) != 1)
 425                        continue;
 426                trace_rxrpc_conn(conn, rxrpc_conn_reap_service, 0, NULL);
 427
 428                if (rxrpc_conn_is_client(conn))
 429                        BUG();
 430                else
 431                        rxrpc_unpublish_service_conn(conn);
 432
 433                list_move_tail(&conn->link, &graveyard);
 434        }
 435        write_unlock(&rxnet->conn_lock);
 436
 437        if (earliest != now + MAX_JIFFY_OFFSET) {
 438                _debug("reschedule reaper %ld", (long)earliest - (long)now);
 439                ASSERT(time_after(earliest, now));
 440                rxrpc_set_service_reap_timer(rxnet, earliest);
 441        }
 442
 443        while (!list_empty(&graveyard)) {
 444                conn = list_entry(graveyard.next, struct rxrpc_connection,
 445                                  link);
 446                list_del_init(&conn->link);
 447
 448                ASSERTCMP(atomic_read(&conn->usage), ==, 0);
 449                rxrpc_kill_connection(conn);
 450        }
 451
 452        _leave("");
 453}
 454
 455/*
 456 * preemptively destroy all the service connection records rather than
 457 * waiting for them to time out
 458 */
 459void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
 460{
 461        struct rxrpc_connection *conn, *_p;
 462        bool leak = false;
 463
 464        _enter("");
 465
 466        atomic_dec(&rxnet->nr_conns);
 467        rxrpc_destroy_all_client_connections(rxnet);
 468
 469        del_timer_sync(&rxnet->service_conn_reap_timer);
 470        rxrpc_queue_work(&rxnet->service_conn_reaper);
 471        flush_workqueue(rxrpc_workqueue);
 472
 473        write_lock(&rxnet->conn_lock);
 474        list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
 475                pr_err("AF_RXRPC: Leaked conn %p {%d}\n",
 476                       conn, atomic_read(&conn->usage));
 477                leak = true;
 478        }
 479        write_unlock(&rxnet->conn_lock);
 480        BUG_ON(leak);
 481
 482        ASSERT(list_empty(&rxnet->conn_proc_list));
 483
 484        /* We need to wait for the connections to be destroyed by RCU as they
 485         * pin things that we still need to get rid of.
 486         */
 487        wait_var_event(&rxnet->nr_conns, !atomic_read(&rxnet->nr_conns));
 488        _leave("");
 489}
 490