linux/net/rxrpc/local_object.c
<<
>>
Prefs
   1/* Local endpoint object management
   2 *
   3 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
   4 * Written by David Howells (dhowells@redhat.com)
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public Licence
   8 * as published by the Free Software Foundation; either version
   9 * 2 of the Licence, or (at your option) any later version.
  10 */
  11
  12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  13
  14#include <linux/module.h>
  15#include <linux/net.h>
  16#include <linux/skbuff.h>
  17#include <linux/slab.h>
  18#include <linux/udp.h>
  19#include <linux/ip.h>
  20#include <linux/hashtable.h>
  21#include <net/sock.h>
  22#include <net/af_rxrpc.h>
  23#include "ar-internal.h"
  24
  25static void rxrpc_local_processor(struct work_struct *);
  26static void rxrpc_local_rcu(struct rcu_head *);
  27
  28/*
  29 * Compare a local to an address.  Return -ve, 0 or +ve to indicate less than,
  30 * same or greater than.
  31 *
  32 * We explicitly don't compare the RxRPC service ID as we want to reject
  33 * conflicting uses by differing services.  Further, we don't want to share
  34 * addresses with different options (IPv6), so we don't compare those bits
  35 * either.
  36 */
  37static long rxrpc_local_cmp_key(const struct rxrpc_local *local,
  38                                const struct sockaddr_rxrpc *srx)
  39{
  40        long diff;
  41
  42        diff = ((local->srx.transport_type - srx->transport_type) ?:
  43                (local->srx.transport_len - srx->transport_len) ?:
  44                (local->srx.transport.family - srx->transport.family));
  45        if (diff != 0)
  46                return diff;
  47
  48        switch (srx->transport.family) {
  49        case AF_INET:
  50                /* If the choice of UDP port is left up to the transport, then
  51                 * the endpoint record doesn't match.
  52                 */
  53                return ((u16 __force)local->srx.transport.sin.sin_port -
  54                        (u16 __force)srx->transport.sin.sin_port) ?:
  55                        memcmp(&local->srx.transport.sin.sin_addr,
  56                               &srx->transport.sin.sin_addr,
  57                               sizeof(struct in_addr));
  58#ifdef CONFIG_AF_RXRPC_IPV6
  59        case AF_INET6:
  60                /* If the choice of UDP6 port is left up to the transport, then
  61                 * the endpoint record doesn't match.
  62                 */
  63                return ((u16 __force)local->srx.transport.sin6.sin6_port -
  64                        (u16 __force)srx->transport.sin6.sin6_port) ?:
  65                        memcmp(&local->srx.transport.sin6.sin6_addr,
  66                               &srx->transport.sin6.sin6_addr,
  67                               sizeof(struct in6_addr));
  68#endif
  69        default:
  70                BUG();
  71        }
  72}
  73
  74/*
  75 * Allocate a new local endpoint.
  76 */
  77static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
  78                                             const struct sockaddr_rxrpc *srx)
  79{
  80        struct rxrpc_local *local;
  81
  82        local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
  83        if (local) {
  84                atomic_set(&local->usage, 1);
  85                local->rxnet = rxnet;
  86                INIT_LIST_HEAD(&local->link);
  87                INIT_WORK(&local->processor, rxrpc_local_processor);
  88                init_rwsem(&local->defrag_sem);
  89                skb_queue_head_init(&local->reject_queue);
  90                skb_queue_head_init(&local->event_queue);
  91                local->client_conns = RB_ROOT;
  92                spin_lock_init(&local->client_conns_lock);
  93                spin_lock_init(&local->lock);
  94                rwlock_init(&local->services_lock);
  95                local->debug_id = atomic_inc_return(&rxrpc_debug_id);
  96                memcpy(&local->srx, srx, sizeof(*srx));
  97                local->srx.srx_service = 0;
  98        }
  99
 100        _leave(" = %p", local);
 101        return local;
 102}
 103
 104/*
 105 * create the local socket
 106 * - must be called with rxrpc_local_mutex locked
 107 */
 108static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
 109{
 110        struct sock *sock;
 111        int ret, opt;
 112
 113        _enter("%p{%d,%d}",
 114               local, local->srx.transport_type, local->srx.transport.family);
 115
 116        /* create a socket to represent the local endpoint */
 117        ret = sock_create_kern(net, local->srx.transport.family,
 118                               local->srx.transport_type, 0, &local->socket);
 119        if (ret < 0) {
 120                _leave(" = %d [socket]", ret);
 121                return ret;
 122        }
 123
 124        /* if a local address was supplied then bind it */
 125        if (local->srx.transport_len > sizeof(sa_family_t)) {
 126                _debug("bind");
 127                ret = kernel_bind(local->socket,
 128                                  (struct sockaddr *)&local->srx.transport,
 129                                  local->srx.transport_len);
 130                if (ret < 0) {
 131                        _debug("bind failed %d", ret);
 132                        goto error;
 133                }
 134        }
 135
 136        /* we want to receive ICMP errors */
 137        opt = 1;
 138        ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
 139                                (char *) &opt, sizeof(opt));
 140        if (ret < 0) {
 141                _debug("setsockopt failed");
 142                goto error;
 143        }
 144
 145        /* we want to set the don't fragment bit */
 146        opt = IP_PMTUDISC_DO;
 147        ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
 148                                (char *) &opt, sizeof(opt));
 149        if (ret < 0) {
 150                _debug("setsockopt failed");
 151                goto error;
 152        }
 153
 154        /* set the socket up */
 155        sock = local->socket->sk;
 156        sock->sk_user_data      = local;
 157        sock->sk_data_ready     = rxrpc_data_ready;
 158        sock->sk_error_report   = rxrpc_error_report;
 159        _leave(" = 0");
 160        return 0;
 161
 162error:
 163        kernel_sock_shutdown(local->socket, SHUT_RDWR);
 164        local->socket->sk->sk_user_data = NULL;
 165        sock_release(local->socket);
 166        local->socket = NULL;
 167
 168        _leave(" = %d", ret);
 169        return ret;
 170}
 171
 172/*
 173 * Look up or create a new local endpoint using the specified local address.
 174 */
 175struct rxrpc_local *rxrpc_lookup_local(struct net *net,
 176                                       const struct sockaddr_rxrpc *srx)
 177{
 178        struct rxrpc_local *local;
 179        struct rxrpc_net *rxnet = rxrpc_net(net);
 180        struct list_head *cursor;
 181        const char *age;
 182        long diff;
 183        int ret;
 184
 185        _enter("{%d,%d,%pISp}",
 186               srx->transport_type, srx->transport.family, &srx->transport);
 187
 188        mutex_lock(&rxnet->local_mutex);
 189
 190        for (cursor = rxnet->local_endpoints.next;
 191             cursor != &rxnet->local_endpoints;
 192             cursor = cursor->next) {
 193                local = list_entry(cursor, struct rxrpc_local, link);
 194
 195                diff = rxrpc_local_cmp_key(local, srx);
 196                if (diff < 0)
 197                        continue;
 198                if (diff > 0)
 199                        break;
 200
 201                /* Services aren't allowed to share transport sockets, so
 202                 * reject that here.  It is possible that the object is dying -
 203                 * but it may also still have the local transport address that
 204                 * we want bound.
 205                 */
 206                if (srx->srx_service) {
 207                        local = NULL;
 208                        goto addr_in_use;
 209                }
 210
 211                /* Found a match.  We replace a dying object.  Attempting to
 212                 * bind the transport socket may still fail if we're attempting
 213                 * to use a local address that the dying object is still using.
 214                 */
 215                if (!rxrpc_get_local_maybe(local)) {
 216                        cursor = cursor->next;
 217                        list_del_init(&local->link);
 218                        break;
 219                }
 220
 221                age = "old";
 222                goto found;
 223        }
 224
 225        local = rxrpc_alloc_local(rxnet, srx);
 226        if (!local)
 227                goto nomem;
 228
 229        ret = rxrpc_open_socket(local, net);
 230        if (ret < 0)
 231                goto sock_error;
 232
 233        list_add_tail(&local->link, cursor);
 234        age = "new";
 235
 236found:
 237        mutex_unlock(&rxnet->local_mutex);
 238
 239        _net("LOCAL %s %d {%pISp}",
 240             age, local->debug_id, &local->srx.transport);
 241
 242        _leave(" = %p", local);
 243        return local;
 244
 245nomem:
 246        ret = -ENOMEM;
 247sock_error:
 248        mutex_unlock(&rxnet->local_mutex);
 249        kfree(local);
 250        _leave(" = %d", ret);
 251        return ERR_PTR(ret);
 252
 253addr_in_use:
 254        mutex_unlock(&rxnet->local_mutex);
 255        _leave(" = -EADDRINUSE");
 256        return ERR_PTR(-EADDRINUSE);
 257}
 258
 259/*
 260 * A local endpoint reached its end of life.
 261 */
 262void __rxrpc_put_local(struct rxrpc_local *local)
 263{
 264        _enter("%d", local->debug_id);
 265        rxrpc_queue_work(&local->processor);
 266}
 267
 268/*
 269 * Destroy a local endpoint's socket and then hand the record to RCU to dispose
 270 * of.
 271 *
 272 * Closing the socket cannot be done from bottom half context or RCU callback
 273 * context because it might sleep.
 274 */
 275static void rxrpc_local_destroyer(struct rxrpc_local *local)
 276{
 277        struct socket *socket = local->socket;
 278        struct rxrpc_net *rxnet = local->rxnet;
 279
 280        _enter("%d", local->debug_id);
 281
 282        /* We can get a race between an incoming call packet queueing the
 283         * processor again and the work processor starting the destruction
 284         * process which will shut down the UDP socket.
 285         */
 286        if (local->dead) {
 287                _leave(" [already dead]");
 288                return;
 289        }
 290        local->dead = true;
 291
 292        mutex_lock(&rxnet->local_mutex);
 293        list_del_init(&local->link);
 294        mutex_unlock(&rxnet->local_mutex);
 295
 296        ASSERT(RB_EMPTY_ROOT(&local->client_conns));
 297        ASSERT(!local->service);
 298
 299        if (socket) {
 300                local->socket = NULL;
 301                kernel_sock_shutdown(socket, SHUT_RDWR);
 302                socket->sk->sk_user_data = NULL;
 303                sock_release(socket);
 304        }
 305
 306        /* At this point, there should be no more packets coming in to the
 307         * local endpoint.
 308         */
 309        rxrpc_purge_queue(&local->reject_queue);
 310        rxrpc_purge_queue(&local->event_queue);
 311
 312        _debug("rcu local %d", local->debug_id);
 313        call_rcu(&local->rcu, rxrpc_local_rcu);
 314}
 315
 316/*
 317 * Process events on an endpoint
 318 */
 319static void rxrpc_local_processor(struct work_struct *work)
 320{
 321        struct rxrpc_local *local =
 322                container_of(work, struct rxrpc_local, processor);
 323        bool again;
 324
 325        _enter("%d", local->debug_id);
 326
 327        do {
 328                again = false;
 329                if (atomic_read(&local->usage) == 0)
 330                        return rxrpc_local_destroyer(local);
 331
 332                if (!skb_queue_empty(&local->reject_queue)) {
 333                        rxrpc_reject_packets(local);
 334                        again = true;
 335                }
 336
 337                if (!skb_queue_empty(&local->event_queue)) {
 338                        rxrpc_process_local_events(local);
 339                        again = true;
 340                }
 341        } while (again);
 342}
 343
 344/*
 345 * Destroy a local endpoint after the RCU grace period expires.
 346 */
 347static void rxrpc_local_rcu(struct rcu_head *rcu)
 348{
 349        struct rxrpc_local *local = container_of(rcu, struct rxrpc_local, rcu);
 350
 351        _enter("%d", local->debug_id);
 352
 353        ASSERT(!work_pending(&local->processor));
 354
 355        _net("DESTROY LOCAL %d", local->debug_id);
 356        kfree(local);
 357        _leave("");
 358}
 359
 360/*
 361 * Verify the local endpoint list is empty by this point.
 362 */
 363void rxrpc_destroy_all_locals(struct rxrpc_net *rxnet)
 364{
 365        struct rxrpc_local *local;
 366
 367        _enter("");
 368
 369        flush_workqueue(rxrpc_workqueue);
 370
 371        if (!list_empty(&rxnet->local_endpoints)) {
 372                mutex_lock(&rxnet->local_mutex);
 373                list_for_each_entry(local, &rxnet->local_endpoints, link) {
 374                        pr_err("AF_RXRPC: Leaked local %p {%d}\n",
 375                               local, atomic_read(&local->usage));
 376                }
 377                mutex_unlock(&rxnet->local_mutex);
 378                BUG();
 379        }
 380}
 381