linux/net/rxrpc/call_object.c
<<
>>
Prefs
   1/* RxRPC individual remote procedure call handling
   2 *
   3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
   4 * Written by David Howells (dhowells@redhat.com)
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * as published by the Free Software Foundation; either version
   9 * 2 of the License, or (at your option) any later version.
  10 */
  11
  12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  13
  14#include <linux/slab.h>
  15#include <linux/module.h>
  16#include <linux/circ_buf.h>
  17#include <linux/spinlock_types.h>
  18#include <net/sock.h>
  19#include <net/af_rxrpc.h>
  20#include "ar-internal.h"
  21
  22const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
  23        [RXRPC_CALL_UNINITIALISED]              = "Uninit  ",
  24        [RXRPC_CALL_CLIENT_AWAIT_CONN]          = "ClWtConn",
  25        [RXRPC_CALL_CLIENT_SEND_REQUEST]        = "ClSndReq",
  26        [RXRPC_CALL_CLIENT_AWAIT_REPLY]         = "ClAwtRpl",
  27        [RXRPC_CALL_CLIENT_RECV_REPLY]          = "ClRcvRpl",
  28        [RXRPC_CALL_SERVER_PREALLOC]            = "SvPrealc",
  29        [RXRPC_CALL_SERVER_SECURING]            = "SvSecure",
  30        [RXRPC_CALL_SERVER_ACCEPTING]           = "SvAccept",
  31        [RXRPC_CALL_SERVER_RECV_REQUEST]        = "SvRcvReq",
  32        [RXRPC_CALL_SERVER_ACK_REQUEST]         = "SvAckReq",
  33        [RXRPC_CALL_SERVER_SEND_REPLY]          = "SvSndRpl",
  34        [RXRPC_CALL_SERVER_AWAIT_ACK]           = "SvAwtACK",
  35        [RXRPC_CALL_COMPLETE]                   = "Complete",
  36};
  37
  38const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
  39        [RXRPC_CALL_SUCCEEDED]                  = "Complete",
  40        [RXRPC_CALL_REMOTELY_ABORTED]           = "RmtAbort",
  41        [RXRPC_CALL_LOCALLY_ABORTED]            = "LocAbort",
  42        [RXRPC_CALL_LOCAL_ERROR]                = "LocError",
  43        [RXRPC_CALL_NETWORK_ERROR]              = "NetError",
  44};
  45
  46struct kmem_cache *rxrpc_call_jar;
  47
  48static void rxrpc_call_timer_expired(unsigned long _call)
  49{
  50        struct rxrpc_call *call = (struct rxrpc_call *)_call;
  51
  52        _enter("%d", call->debug_id);
  53
  54        if (call->state < RXRPC_CALL_COMPLETE)
  55                rxrpc_set_timer(call, rxrpc_timer_expired, ktime_get_real());
  56}
  57
  58/*
  59 * find an extant server call
  60 * - called in process context with IRQs enabled
  61 */
  62struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
  63                                              unsigned long user_call_ID)
  64{
  65        struct rxrpc_call *call;
  66        struct rb_node *p;
  67
  68        _enter("%p,%lx", rx, user_call_ID);
  69
  70        read_lock(&rx->call_lock);
  71
  72        p = rx->calls.rb_node;
  73        while (p) {
  74                call = rb_entry(p, struct rxrpc_call, sock_node);
  75
  76                if (user_call_ID < call->user_call_ID)
  77                        p = p->rb_left;
  78                else if (user_call_ID > call->user_call_ID)
  79                        p = p->rb_right;
  80                else
  81                        goto found_extant_call;
  82        }
  83
  84        read_unlock(&rx->call_lock);
  85        _leave(" = NULL");
  86        return NULL;
  87
  88found_extant_call:
  89        rxrpc_get_call(call, rxrpc_call_got);
  90        read_unlock(&rx->call_lock);
  91        _leave(" = %p [%d]", call, atomic_read(&call->usage));
  92        return call;
  93}
  94
  95/*
  96 * allocate a new call
  97 */
  98struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
  99{
 100        struct rxrpc_call *call;
 101
 102        call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
 103        if (!call)
 104                return NULL;
 105
 106        call->rxtx_buffer = kcalloc(RXRPC_RXTX_BUFF_SIZE,
 107                                    sizeof(struct sk_buff *),
 108                                    gfp);
 109        if (!call->rxtx_buffer)
 110                goto nomem;
 111
 112        call->rxtx_annotations = kcalloc(RXRPC_RXTX_BUFF_SIZE, sizeof(u8), gfp);
 113        if (!call->rxtx_annotations)
 114                goto nomem_2;
 115
 116        mutex_init(&call->user_mutex);
 117        setup_timer(&call->timer, rxrpc_call_timer_expired,
 118                    (unsigned long)call);
 119        INIT_WORK(&call->processor, &rxrpc_process_call);
 120        INIT_LIST_HEAD(&call->link);
 121        INIT_LIST_HEAD(&call->chan_wait_link);
 122        INIT_LIST_HEAD(&call->accept_link);
 123        INIT_LIST_HEAD(&call->recvmsg_link);
 124        INIT_LIST_HEAD(&call->sock_link);
 125        init_waitqueue_head(&call->waitq);
 126        spin_lock_init(&call->lock);
 127        rwlock_init(&call->state_lock);
 128        atomic_set(&call->usage, 1);
 129        call->debug_id = atomic_inc_return(&rxrpc_debug_id);
 130        call->tx_total_len = -1;
 131
 132        memset(&call->sock_node, 0xed, sizeof(call->sock_node));
 133
 134        /* Leave space in the ring to handle a maxed-out jumbo packet */
 135        call->rx_winsize = rxrpc_rx_window_size;
 136        call->tx_winsize = 16;
 137        call->rx_expect_next = 1;
 138
 139        call->cong_cwnd = 2;
 140        call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1;
 141        return call;
 142
 143nomem_2:
 144        kfree(call->rxtx_buffer);
 145nomem:
 146        kmem_cache_free(rxrpc_call_jar, call);
 147        return NULL;
 148}
 149
 150/*
 151 * Allocate a new client call.
 152 */
 153static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx,
 154                                                  gfp_t gfp)
 155{
 156        struct rxrpc_call *call;
 157        ktime_t now;
 158
 159        _enter("");
 160
 161        call = rxrpc_alloc_call(gfp);
 162        if (!call)
 163                return ERR_PTR(-ENOMEM);
 164        call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
 165        call->service_id = srx->srx_service;
 166        call->tx_phase = true;
 167        now = ktime_get_real();
 168        call->acks_latest_ts = now;
 169        call->cong_tstamp = now;
 170
 171        _leave(" = %p", call);
 172        return call;
 173}
 174
 175/*
 176 * Initiate the call ack/resend/expiry timer.
 177 */
 178static void rxrpc_start_call_timer(struct rxrpc_call *call)
 179{
 180        ktime_t now = ktime_get_real(), expire_at;
 181
 182        expire_at = ktime_add_ms(now, rxrpc_max_call_lifetime);
 183        call->expire_at = expire_at;
 184        call->ack_at = expire_at;
 185        call->ping_at = expire_at;
 186        call->resend_at = expire_at;
 187        call->timer.expires = jiffies + LONG_MAX / 2;
 188        rxrpc_set_timer(call, rxrpc_timer_begin, now);
 189}
 190
 191/*
 192 * Set up a call for the given parameters.
 193 * - Called with the socket lock held, which it must release.
 194 * - If it returns a call, the call's lock will need releasing by the caller.
 195 */
 196struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
 197                                         struct rxrpc_conn_parameters *cp,
 198                                         struct sockaddr_rxrpc *srx,
 199                                         unsigned long user_call_ID,
 200                                         s64 tx_total_len,
 201                                         gfp_t gfp)
 202        __releases(&rx->sk.sk_lock.slock)
 203{
 204        struct rxrpc_call *call, *xcall;
 205        struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
 206        struct rb_node *parent, **pp;
 207        const void *here = __builtin_return_address(0);
 208        int ret;
 209
 210        _enter("%p,%lx", rx, user_call_ID);
 211
 212        call = rxrpc_alloc_client_call(srx, gfp);
 213        if (IS_ERR(call)) {
 214                release_sock(&rx->sk);
 215                _leave(" = %ld", PTR_ERR(call));
 216                return call;
 217        }
 218
 219        call->tx_total_len = tx_total_len;
 220        trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage),
 221                         here, (const void *)user_call_ID);
 222
 223        /* We need to protect a partially set up call against the user as we
 224         * will be acting outside the socket lock.
 225         */
 226        mutex_lock(&call->user_mutex);
 227
 228        /* Publish the call, even though it is incompletely set up as yet */
 229        write_lock(&rx->call_lock);
 230
 231        pp = &rx->calls.rb_node;
 232        parent = NULL;
 233        while (*pp) {
 234                parent = *pp;
 235                xcall = rb_entry(parent, struct rxrpc_call, sock_node);
 236
 237                if (user_call_ID < xcall->user_call_ID)
 238                        pp = &(*pp)->rb_left;
 239                else if (user_call_ID > xcall->user_call_ID)
 240                        pp = &(*pp)->rb_right;
 241                else
 242                        goto error_dup_user_ID;
 243        }
 244
 245        rcu_assign_pointer(call->socket, rx);
 246        call->user_call_ID = user_call_ID;
 247        __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
 248        rxrpc_get_call(call, rxrpc_call_got_userid);
 249        rb_link_node(&call->sock_node, parent, pp);
 250        rb_insert_color(&call->sock_node, &rx->calls);
 251        list_add(&call->sock_link, &rx->sock_calls);
 252
 253        write_unlock(&rx->call_lock);
 254
 255        write_lock(&rxnet->call_lock);
 256        list_add_tail(&call->link, &rxnet->calls);
 257        write_unlock(&rxnet->call_lock);
 258
 259        /* From this point on, the call is protected by its own lock. */
 260        release_sock(&rx->sk);
 261
 262        /* Set up or get a connection record and set the protocol parameters,
 263         * including channel number and call ID.
 264         */
 265        ret = rxrpc_connect_call(call, cp, srx, gfp);
 266        if (ret < 0)
 267                goto error;
 268
 269        trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage),
 270                         here, NULL);
 271
 272        spin_lock_bh(&call->conn->params.peer->lock);
 273        hlist_add_head(&call->error_link,
 274                       &call->conn->params.peer->error_targets);
 275        spin_unlock_bh(&call->conn->params.peer->lock);
 276
 277        rxrpc_start_call_timer(call);
 278
 279        _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
 280
 281        _leave(" = %p [new]", call);
 282        return call;
 283
 284        /* We unexpectedly found the user ID in the list after taking
 285         * the call_lock.  This shouldn't happen unless the user races
 286         * with itself and tries to add the same user ID twice at the
 287         * same time in different threads.
 288         */
 289error_dup_user_ID:
 290        write_unlock(&rx->call_lock);
 291        release_sock(&rx->sk);
 292        ret = -EEXIST;
 293
 294error:
 295        __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
 296                                    RX_CALL_DEAD, ret);
 297        trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage),
 298                         here, ERR_PTR(ret));
 299        rxrpc_release_call(rx, call);
 300        mutex_unlock(&call->user_mutex);
 301        rxrpc_put_call(call, rxrpc_call_put);
 302        _leave(" = %d", ret);
 303        return ERR_PTR(ret);
 304}
 305
 306/*
 307 * Set up an incoming call.  call->conn points to the connection.
 308 * This is called in BH context and isn't allowed to fail.
 309 */
 310void rxrpc_incoming_call(struct rxrpc_sock *rx,
 311                         struct rxrpc_call *call,
 312                         struct sk_buff *skb)
 313{
 314        struct rxrpc_connection *conn = call->conn;
 315        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
 316        u32 chan;
 317
 318        _enter(",%d", call->conn->debug_id);
 319
 320        rcu_assign_pointer(call->socket, rx);
 321        call->call_id           = sp->hdr.callNumber;
 322        call->service_id        = sp->hdr.serviceId;
 323        call->cid               = sp->hdr.cid;
 324        call->state             = RXRPC_CALL_SERVER_ACCEPTING;
 325        if (sp->hdr.securityIndex > 0)
 326                call->state     = RXRPC_CALL_SERVER_SECURING;
 327        call->cong_tstamp       = skb->tstamp;
 328
 329        /* Set the channel for this call.  We don't get channel_lock as we're
 330         * only defending against the data_ready handler (which we're called
 331         * from) and the RESPONSE packet parser (which is only really
 332         * interested in call_counter and can cope with a disagreement with the
 333         * call pointer).
 334         */
 335        chan = sp->hdr.cid & RXRPC_CHANNELMASK;
 336        conn->channels[chan].call_counter = call->call_id;
 337        conn->channels[chan].call_id = call->call_id;
 338        rcu_assign_pointer(conn->channels[chan].call, call);
 339
 340        spin_lock(&conn->params.peer->lock);
 341        hlist_add_head(&call->error_link, &conn->params.peer->error_targets);
 342        spin_unlock(&conn->params.peer->lock);
 343
 344        _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
 345
 346        rxrpc_start_call_timer(call);
 347        _leave("");
 348}
 349
 350/*
 351 * Queue a call's work processor, getting a ref to pass to the work queue.
 352 */
 353bool rxrpc_queue_call(struct rxrpc_call *call)
 354{
 355        const void *here = __builtin_return_address(0);
 356        int n = __atomic_add_unless(&call->usage, 1, 0);
 357        if (n == 0)
 358                return false;
 359        if (rxrpc_queue_work(&call->processor))
 360                trace_rxrpc_call(call, rxrpc_call_queued, n + 1, here, NULL);
 361        else
 362                rxrpc_put_call(call, rxrpc_call_put_noqueue);
 363        return true;
 364}
 365
 366/*
 367 * Queue a call's work processor, passing the callers ref to the work queue.
 368 */
 369bool __rxrpc_queue_call(struct rxrpc_call *call)
 370{
 371        const void *here = __builtin_return_address(0);
 372        int n = atomic_read(&call->usage);
 373        ASSERTCMP(n, >=, 1);
 374        if (rxrpc_queue_work(&call->processor))
 375                trace_rxrpc_call(call, rxrpc_call_queued_ref, n, here, NULL);
 376        else
 377                rxrpc_put_call(call, rxrpc_call_put_noqueue);
 378        return true;
 379}
 380
 381/*
 382 * Note the re-emergence of a call.
 383 */
 384void rxrpc_see_call(struct rxrpc_call *call)
 385{
 386        const void *here = __builtin_return_address(0);
 387        if (call) {
 388                int n = atomic_read(&call->usage);
 389
 390                trace_rxrpc_call(call, rxrpc_call_seen, n, here, NULL);
 391        }
 392}
 393
 394/*
 395 * Note the addition of a ref on a call.
 396 */
 397void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
 398{
 399        const void *here = __builtin_return_address(0);
 400        int n = atomic_inc_return(&call->usage);
 401
 402        trace_rxrpc_call(call, op, n, here, NULL);
 403}
 404
 405/*
 406 * Detach a call from its owning socket.
 407 */
 408void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
 409{
 410        const void *here = __builtin_return_address(0);
 411        struct rxrpc_connection *conn = call->conn;
 412        bool put = false;
 413        int i;
 414
 415        _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
 416
 417        trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage),
 418                         here, (const void *)call->flags);
 419
 420        ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
 421
 422        spin_lock_bh(&call->lock);
 423        if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
 424                BUG();
 425        spin_unlock_bh(&call->lock);
 426
 427        del_timer_sync(&call->timer);
 428
 429        /* Make sure we don't get any more notifications */
 430        write_lock_bh(&rx->recvmsg_lock);
 431
 432        if (!list_empty(&call->recvmsg_link)) {
 433                _debug("unlinking once-pending call %p { e=%lx f=%lx }",
 434                       call, call->events, call->flags);
 435                list_del(&call->recvmsg_link);
 436                put = true;
 437        }
 438
 439        /* list_empty() must return false in rxrpc_notify_socket() */
 440        call->recvmsg_link.next = NULL;
 441        call->recvmsg_link.prev = NULL;
 442
 443        write_unlock_bh(&rx->recvmsg_lock);
 444        if (put)
 445                rxrpc_put_call(call, rxrpc_call_put);
 446
 447        write_lock(&rx->call_lock);
 448
 449        if (test_and_clear_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
 450                rb_erase(&call->sock_node, &rx->calls);
 451                memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
 452                rxrpc_put_call(call, rxrpc_call_put_userid);
 453        }
 454
 455        list_del(&call->sock_link);
 456        write_unlock(&rx->call_lock);
 457
 458        _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
 459
 460        if (conn)
 461                rxrpc_disconnect_call(call);
 462
 463        for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
 464                rxrpc_free_skb(call->rxtx_buffer[i],
 465                               (call->tx_phase ? rxrpc_skb_tx_cleaned :
 466                                rxrpc_skb_rx_cleaned));
 467                call->rxtx_buffer[i] = NULL;
 468        }
 469
 470        _leave("");
 471}
 472
 473/*
 474 * release all the calls associated with a socket
 475 */
 476void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
 477{
 478        struct rxrpc_call *call;
 479
 480        _enter("%p", rx);
 481
 482        while (!list_empty(&rx->to_be_accepted)) {
 483                call = list_entry(rx->to_be_accepted.next,
 484                                  struct rxrpc_call, accept_link);
 485                list_del(&call->accept_link);
 486                rxrpc_abort_call("SKR", call, 0, RX_CALL_DEAD, -ECONNRESET);
 487                rxrpc_put_call(call, rxrpc_call_put);
 488        }
 489
 490        while (!list_empty(&rx->sock_calls)) {
 491                call = list_entry(rx->sock_calls.next,
 492                                  struct rxrpc_call, sock_link);
 493                rxrpc_get_call(call, rxrpc_call_got);
 494                rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, -ECONNRESET);
 495                rxrpc_send_abort_packet(call);
 496                rxrpc_release_call(rx, call);
 497                rxrpc_put_call(call, rxrpc_call_put);
 498        }
 499
 500        _leave("");
 501}
 502
 503/*
 504 * release a call
 505 */
 506void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
 507{
 508        struct rxrpc_net *rxnet;
 509        const void *here = __builtin_return_address(0);
 510        int n;
 511
 512        ASSERT(call != NULL);
 513
 514        n = atomic_dec_return(&call->usage);
 515        trace_rxrpc_call(call, op, n, here, NULL);
 516        ASSERTCMP(n, >=, 0);
 517        if (n == 0) {
 518                _debug("call %d dead", call->debug_id);
 519                ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
 520
 521                if (!list_empty(&call->link)) {
 522                        rxnet = rxrpc_net(sock_net(&call->socket->sk));
 523                        write_lock(&rxnet->call_lock);
 524                        list_del_init(&call->link);
 525                        write_unlock(&rxnet->call_lock);
 526                }
 527
 528                rxrpc_cleanup_call(call);
 529        }
 530}
 531
 532/*
 533 * Final call destruction under RCU.
 534 */
 535static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
 536{
 537        struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
 538
 539        rxrpc_put_peer(call->peer);
 540        kfree(call->rxtx_buffer);
 541        kfree(call->rxtx_annotations);
 542        kmem_cache_free(rxrpc_call_jar, call);
 543}
 544
 545/*
 546 * clean up a call
 547 */
 548void rxrpc_cleanup_call(struct rxrpc_call *call)
 549{
 550        int i;
 551
 552        _net("DESTROY CALL %d", call->debug_id);
 553
 554        memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
 555
 556        del_timer_sync(&call->timer);
 557
 558        ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
 559        ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
 560        ASSERTCMP(call->conn, ==, NULL);
 561
 562        /* Clean up the Rx/Tx buffer */
 563        for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++)
 564                rxrpc_free_skb(call->rxtx_buffer[i],
 565                               (call->tx_phase ? rxrpc_skb_tx_cleaned :
 566                                rxrpc_skb_rx_cleaned));
 567
 568        rxrpc_free_skb(call->tx_pending, rxrpc_skb_tx_cleaned);
 569
 570        call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
 571}
 572
 573/*
 574 * Make sure that all calls are gone from a network namespace.  To reach this
 575 * point, any open UDP sockets in that namespace must have been closed, so any
 576 * outstanding calls cannot be doing I/O.
 577 */
 578void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
 579{
 580        struct rxrpc_call *call;
 581
 582        _enter("");
 583
 584        if (list_empty(&rxnet->calls))
 585                return;
 586
 587        write_lock(&rxnet->call_lock);
 588
 589        while (!list_empty(&rxnet->calls)) {
 590                call = list_entry(rxnet->calls.next, struct rxrpc_call, link);
 591                _debug("Zapping call %p", call);
 592
 593                rxrpc_see_call(call);
 594                list_del_init(&call->link);
 595
 596                pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
 597                       call, atomic_read(&call->usage),
 598                       rxrpc_call_states[call->state],
 599                       call->flags, call->events);
 600
 601                write_unlock(&rxnet->call_lock);
 602                cond_resched();
 603                write_lock(&rxnet->call_lock);
 604        }
 605
 606        write_unlock(&rxnet->call_lock);
 607}
 608