linux/net/rxrpc/ar-transport.c
<<
>>
Prefs
   1/* RxRPC point-to-point transport session management
   2 *
   3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
   4 * Written by David Howells (dhowells@redhat.com)
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * as published by the Free Software Foundation; either version
   9 * 2 of the License, or (at your option) any later version.
  10 */
  11
  12#include <linux/module.h>
  13#include <linux/net.h>
  14#include <linux/skbuff.h>
  15#include <linux/slab.h>
  16#include <net/sock.h>
  17#include <net/af_rxrpc.h>
  18#include "ar-internal.h"
  19
  20/*
  21 * Time after last use at which transport record is cleaned up.
  22 */
  23unsigned rxrpc_transport_expiry = 3600 * 24;
  24
  25static void rxrpc_transport_reaper(struct work_struct *work);
  26
  27static LIST_HEAD(rxrpc_transports);
  28static DEFINE_RWLOCK(rxrpc_transport_lock);
  29static DECLARE_DELAYED_WORK(rxrpc_transport_reap, rxrpc_transport_reaper);
  30
  31/*
  32 * allocate a new transport session manager
  33 */
  34static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
  35                                                     struct rxrpc_peer *peer,
  36                                                     gfp_t gfp)
  37{
  38        struct rxrpc_transport *trans;
  39
  40        _enter("");
  41
  42        trans = kzalloc(sizeof(struct rxrpc_transport), gfp);
  43        if (trans) {
  44                trans->local = local;
  45                trans->peer = peer;
  46                INIT_LIST_HEAD(&trans->link);
  47                trans->bundles = RB_ROOT;
  48                trans->client_conns = RB_ROOT;
  49                trans->server_conns = RB_ROOT;
  50                skb_queue_head_init(&trans->error_queue);
  51                spin_lock_init(&trans->client_lock);
  52                rwlock_init(&trans->conn_lock);
  53                atomic_set(&trans->usage, 1);
  54                trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
  55
  56                if (peer->srx.transport.family == AF_INET) {
  57                        switch (peer->srx.transport_type) {
  58                        case SOCK_DGRAM:
  59                                INIT_WORK(&trans->error_handler,
  60                                          rxrpc_UDP_error_handler);
  61                                break;
  62                        default:
  63                                BUG();
  64                                break;
  65                        }
  66                } else {
  67                        BUG();
  68                }
  69        }
  70
  71        _leave(" = %p", trans);
  72        return trans;
  73}
  74
  75/*
  76 * obtain a transport session for the nominated endpoints
  77 */
  78struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *local,
  79                                            struct rxrpc_peer *peer,
  80                                            gfp_t gfp)
  81{
  82        struct rxrpc_transport *trans, *candidate;
  83        const char *new = "old";
  84        int usage;
  85
  86        _enter("{%pI4+%hu},{%pI4+%hu},",
  87               &local->srx.transport.sin.sin_addr,
  88               ntohs(local->srx.transport.sin.sin_port),
  89               &peer->srx.transport.sin.sin_addr,
  90               ntohs(peer->srx.transport.sin.sin_port));
  91
  92        /* search the transport list first */
  93        read_lock_bh(&rxrpc_transport_lock);
  94        list_for_each_entry(trans, &rxrpc_transports, link) {
  95                if (trans->local == local && trans->peer == peer)
  96                        goto found_extant_transport;
  97        }
  98        read_unlock_bh(&rxrpc_transport_lock);
  99
 100        /* not yet present - create a candidate for a new record and then
 101         * redo the search */
 102        candidate = rxrpc_alloc_transport(local, peer, gfp);
 103        if (!candidate) {
 104                _leave(" = -ENOMEM");
 105                return ERR_PTR(-ENOMEM);
 106        }
 107
 108        write_lock_bh(&rxrpc_transport_lock);
 109
 110        list_for_each_entry(trans, &rxrpc_transports, link) {
 111                if (trans->local == local && trans->peer == peer)
 112                        goto found_extant_second;
 113        }
 114
 115        /* we can now add the new candidate to the list */
 116        trans = candidate;
 117        candidate = NULL;
 118        usage = atomic_read(&trans->usage);
 119
 120        rxrpc_get_local(trans->local);
 121        atomic_inc(&trans->peer->usage);
 122        list_add_tail(&trans->link, &rxrpc_transports);
 123        write_unlock_bh(&rxrpc_transport_lock);
 124        new = "new";
 125
 126success:
 127        _net("TRANSPORT %s %d local %d -> peer %d",
 128             new,
 129             trans->debug_id,
 130             trans->local->debug_id,
 131             trans->peer->debug_id);
 132
 133        _leave(" = %p {u=%d}", trans, usage);
 134        return trans;
 135
 136        /* we found the transport in the list immediately */
 137found_extant_transport:
 138        usage = atomic_inc_return(&trans->usage);
 139        read_unlock_bh(&rxrpc_transport_lock);
 140        goto success;
 141
 142        /* we found the transport on the second time through the list */
 143found_extant_second:
 144        usage = atomic_inc_return(&trans->usage);
 145        write_unlock_bh(&rxrpc_transport_lock);
 146        kfree(candidate);
 147        goto success;
 148}
 149
 150/*
 151 * find the transport connecting two endpoints
 152 */
 153struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *local,
 154                                             struct rxrpc_peer *peer)
 155{
 156        struct rxrpc_transport *trans;
 157
 158        _enter("{%pI4+%hu},{%pI4+%hu},",
 159               &local->srx.transport.sin.sin_addr,
 160               ntohs(local->srx.transport.sin.sin_port),
 161               &peer->srx.transport.sin.sin_addr,
 162               ntohs(peer->srx.transport.sin.sin_port));
 163
 164        /* search the transport list */
 165        read_lock_bh(&rxrpc_transport_lock);
 166
 167        list_for_each_entry(trans, &rxrpc_transports, link) {
 168                if (trans->local == local && trans->peer == peer)
 169                        goto found_extant_transport;
 170        }
 171
 172        read_unlock_bh(&rxrpc_transport_lock);
 173        _leave(" = NULL");
 174        return NULL;
 175
 176found_extant_transport:
 177        atomic_inc(&trans->usage);
 178        read_unlock_bh(&rxrpc_transport_lock);
 179        _leave(" = %p", trans);
 180        return trans;
 181}
 182
 183/*
 184 * release a transport session
 185 */
 186void rxrpc_put_transport(struct rxrpc_transport *trans)
 187{
 188        _enter("%p{u=%d}", trans, atomic_read(&trans->usage));
 189
 190        ASSERTCMP(atomic_read(&trans->usage), >, 0);
 191
 192        trans->put_time = ktime_get_seconds();
 193        if (unlikely(atomic_dec_and_test(&trans->usage))) {
 194                _debug("zombie");
 195                /* let the reaper determine the timeout to avoid a race with
 196                 * overextending the timeout if the reaper is running at the
 197                 * same time */
 198                rxrpc_queue_delayed_work(&rxrpc_transport_reap, 0);
 199        }
 200        _leave("");
 201}
 202
 203/*
 204 * clean up a transport session
 205 */
 206static void rxrpc_cleanup_transport(struct rxrpc_transport *trans)
 207{
 208        _net("DESTROY TRANS %d", trans->debug_id);
 209
 210        rxrpc_purge_queue(&trans->error_queue);
 211
 212        rxrpc_put_local(trans->local);
 213        rxrpc_put_peer(trans->peer);
 214        kfree(trans);
 215}
 216
 217/*
 218 * reap dead transports that have passed their expiry date
 219 */
 220static void rxrpc_transport_reaper(struct work_struct *work)
 221{
 222        struct rxrpc_transport *trans, *_p;
 223        unsigned long now, earliest, reap_time;
 224
 225        LIST_HEAD(graveyard);
 226
 227        _enter("");
 228
 229        now = ktime_get_seconds();
 230        earliest = ULONG_MAX;
 231
 232        /* extract all the transports that have been dead too long */
 233        write_lock_bh(&rxrpc_transport_lock);
 234        list_for_each_entry_safe(trans, _p, &rxrpc_transports, link) {
 235                _debug("reap TRANS %d { u=%d t=%ld }",
 236                       trans->debug_id, atomic_read(&trans->usage),
 237                       (long) now - (long) trans->put_time);
 238
 239                if (likely(atomic_read(&trans->usage) > 0))
 240                        continue;
 241
 242                reap_time = trans->put_time + rxrpc_transport_expiry;
 243                if (reap_time <= now)
 244                        list_move_tail(&trans->link, &graveyard);
 245                else if (reap_time < earliest)
 246                        earliest = reap_time;
 247        }
 248        write_unlock_bh(&rxrpc_transport_lock);
 249
 250        if (earliest != ULONG_MAX) {
 251                _debug("reschedule reaper %ld", (long) earliest - now);
 252                ASSERTCMP(earliest, >, now);
 253                rxrpc_queue_delayed_work(&rxrpc_transport_reap,
 254                                         (earliest - now) * HZ);
 255        }
 256
 257        /* then destroy all those pulled out */
 258        while (!list_empty(&graveyard)) {
 259                trans = list_entry(graveyard.next, struct rxrpc_transport,
 260                                   link);
 261                list_del_init(&trans->link);
 262
 263                ASSERTCMP(atomic_read(&trans->usage), ==, 0);
 264                rxrpc_cleanup_transport(trans);
 265        }
 266
 267        _leave("");
 268}
 269
 270/*
 271 * preemptively destroy all the transport session records rather than waiting
 272 * for them to time out
 273 */
 274void __exit rxrpc_destroy_all_transports(void)
 275{
 276        _enter("");
 277
 278        rxrpc_transport_expiry = 0;
 279        cancel_delayed_work(&rxrpc_transport_reap);
 280        rxrpc_queue_delayed_work(&rxrpc_transport_reap, 0);
 281
 282        _leave("");
 283}
 284