linux/net/rxrpc/call_accept.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* incoming call handling
   3 *
   4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
   5 * Written by David Howells (dhowells@redhat.com)
   6 */
   7
   8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   9
  10#include <linux/module.h>
  11#include <linux/net.h>
  12#include <linux/skbuff.h>
  13#include <linux/errqueue.h>
  14#include <linux/udp.h>
  15#include <linux/in.h>
  16#include <linux/in6.h>
  17#include <linux/icmp.h>
  18#include <linux/gfp.h>
  19#include <linux/circ_buf.h>
  20#include <net/sock.h>
  21#include <net/af_rxrpc.h>
  22#include <net/ip.h>
  23#include "ar-internal.h"
  24
  25static void rxrpc_dummy_notify(struct sock *sk, struct rxrpc_call *call,
  26                               unsigned long user_call_ID)
  27{
  28}
  29
  30/*
  31 * Preallocate a single service call, connection and peer and, if possible,
  32 * give them a user ID and attach the user's side of the ID to them.
  33 */
  34static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
  35                                      struct rxrpc_backlog *b,
  36                                      rxrpc_notify_rx_t notify_rx,
  37                                      rxrpc_user_attach_call_t user_attach_call,
  38                                      unsigned long user_call_ID, gfp_t gfp,
  39                                      unsigned int debug_id)
  40{
  41        const void *here = __builtin_return_address(0);
  42        struct rxrpc_call *call, *xcall;
  43        struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
  44        struct rb_node *parent, **pp;
  45        int max, tmp;
  46        unsigned int size = RXRPC_BACKLOG_MAX;
  47        unsigned int head, tail, call_head, call_tail;
  48
  49        max = rx->sk.sk_max_ack_backlog;
  50        tmp = rx->sk.sk_ack_backlog;
  51        if (tmp >= max) {
  52                _leave(" = -ENOBUFS [full %u]", max);
  53                return -ENOBUFS;
  54        }
  55        max -= tmp;
  56
  57        /* We don't need more conns and peers than we have calls, but on the
  58         * other hand, we shouldn't ever use more peers than conns or conns
  59         * than calls.
  60         */
  61        call_head = b->call_backlog_head;
  62        call_tail = READ_ONCE(b->call_backlog_tail);
  63        tmp = CIRC_CNT(call_head, call_tail, size);
  64        if (tmp >= max) {
  65                _leave(" = -ENOBUFS [enough %u]", tmp);
  66                return -ENOBUFS;
  67        }
  68        max = tmp + 1;
  69
  70        head = b->peer_backlog_head;
  71        tail = READ_ONCE(b->peer_backlog_tail);
  72        if (CIRC_CNT(head, tail, size) < max) {
  73                struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp);
  74                if (!peer)
  75                        return -ENOMEM;
  76                b->peer_backlog[head] = peer;
  77                smp_store_release(&b->peer_backlog_head,
  78                                  (head + 1) & (size - 1));
  79        }
  80
  81        head = b->conn_backlog_head;
  82        tail = READ_ONCE(b->conn_backlog_tail);
  83        if (CIRC_CNT(head, tail, size) < max) {
  84                struct rxrpc_connection *conn;
  85
  86                conn = rxrpc_prealloc_service_connection(rxnet, gfp);
  87                if (!conn)
  88                        return -ENOMEM;
  89                b->conn_backlog[head] = conn;
  90                smp_store_release(&b->conn_backlog_head,
  91                                  (head + 1) & (size - 1));
  92
  93                trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service,
  94                                 atomic_read(&conn->usage), here);
  95        }
  96
  97        /* Now it gets complicated, because calls get registered with the
  98         * socket here, with a user ID preassigned by the user.
  99         */
 100        call = rxrpc_alloc_call(rx, gfp, debug_id);
 101        if (!call)
 102                return -ENOMEM;
 103        call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
 104        call->state = RXRPC_CALL_SERVER_PREALLOC;
 105
 106        trace_rxrpc_call(call->debug_id, rxrpc_call_new_service,
 107                         atomic_read(&call->usage),
 108                         here, (const void *)user_call_ID);
 109
 110        write_lock(&rx->call_lock);
 111
 112        /* Check the user ID isn't already in use */
 113        pp = &rx->calls.rb_node;
 114        parent = NULL;
 115        while (*pp) {
 116                parent = *pp;
 117                xcall = rb_entry(parent, struct rxrpc_call, sock_node);
 118                if (user_call_ID < xcall->user_call_ID)
 119                        pp = &(*pp)->rb_left;
 120                else if (user_call_ID > xcall->user_call_ID)
 121                        pp = &(*pp)->rb_right;
 122                else
 123                        goto id_in_use;
 124        }
 125
 126        call->user_call_ID = user_call_ID;
 127        call->notify_rx = notify_rx;
 128        if (user_attach_call) {
 129                rxrpc_get_call(call, rxrpc_call_got_kernel);
 130                user_attach_call(call, user_call_ID);
 131        }
 132
 133        rxrpc_get_call(call, rxrpc_call_got_userid);
 134        rb_link_node(&call->sock_node, parent, pp);
 135        rb_insert_color(&call->sock_node, &rx->calls);
 136        set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
 137
 138        list_add(&call->sock_link, &rx->sock_calls);
 139
 140        write_unlock(&rx->call_lock);
 141
 142        rxnet = call->rxnet;
 143        write_lock(&rxnet->call_lock);
 144        list_add_tail(&call->link, &rxnet->calls);
 145        write_unlock(&rxnet->call_lock);
 146
 147        b->call_backlog[call_head] = call;
 148        smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));
 149        _leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID);
 150        return 0;
 151
 152id_in_use:
 153        write_unlock(&rx->call_lock);
 154        rxrpc_cleanup_call(call);
 155        _leave(" = -EBADSLT");
 156        return -EBADSLT;
 157}
 158
 159/*
 160 * Allocate the preallocation buffers for incoming service calls.  These must
 161 * be charged manually.
 162 */
 163int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
 164{
 165        struct rxrpc_backlog *b = rx->backlog;
 166
 167        if (!b) {
 168                b = kzalloc(sizeof(struct rxrpc_backlog), gfp);
 169                if (!b)
 170                        return -ENOMEM;
 171                rx->backlog = b;
 172        }
 173
 174        return 0;
 175}
 176
 177/*
 178 * Discard the preallocation on a service.
 179 */
 180void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
 181{
 182        struct rxrpc_backlog *b = rx->backlog;
 183        struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
 184        unsigned int size = RXRPC_BACKLOG_MAX, head, tail;
 185
 186        if (!b)
 187                return;
 188        rx->backlog = NULL;
 189
 190        /* Make sure that there aren't any incoming calls in progress before we
 191         * clear the preallocation buffers.
 192         */
 193        spin_lock_bh(&rx->incoming_lock);
 194        spin_unlock_bh(&rx->incoming_lock);
 195
 196        head = b->peer_backlog_head;
 197        tail = b->peer_backlog_tail;
 198        while (CIRC_CNT(head, tail, size) > 0) {
 199                struct rxrpc_peer *peer = b->peer_backlog[tail];
 200                rxrpc_put_local(peer->local);
 201                kfree(peer);
 202                tail = (tail + 1) & (size - 1);
 203        }
 204
 205        head = b->conn_backlog_head;
 206        tail = b->conn_backlog_tail;
 207        while (CIRC_CNT(head, tail, size) > 0) {
 208                struct rxrpc_connection *conn = b->conn_backlog[tail];
 209                write_lock(&rxnet->conn_lock);
 210                list_del(&conn->link);
 211                list_del(&conn->proc_link);
 212                write_unlock(&rxnet->conn_lock);
 213                kfree(conn);
 214                if (atomic_dec_and_test(&rxnet->nr_conns))
 215                        wake_up_var(&rxnet->nr_conns);
 216                tail = (tail + 1) & (size - 1);
 217        }
 218
 219        head = b->call_backlog_head;
 220        tail = b->call_backlog_tail;
 221        while (CIRC_CNT(head, tail, size) > 0) {
 222                struct rxrpc_call *call = b->call_backlog[tail];
 223                rcu_assign_pointer(call->socket, rx);
 224                if (rx->discard_new_call) {
 225                        _debug("discard %lx", call->user_call_ID);
 226                        rx->discard_new_call(call, call->user_call_ID);
 227                        if (call->notify_rx)
 228                                call->notify_rx = rxrpc_dummy_notify;
 229                        rxrpc_put_call(call, rxrpc_call_put_kernel);
 230                }
 231                rxrpc_call_completed(call);
 232                rxrpc_release_call(rx, call);
 233                rxrpc_put_call(call, rxrpc_call_put);
 234                tail = (tail + 1) & (size - 1);
 235        }
 236
 237        kfree(b);
 238}
 239
 240/*
 241 * Ping the other end to fill our RTT cache and to retrieve the rwind
 242 * and MTU parameters.
 243 */
 244static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
 245{
 246        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
 247        ktime_t now = skb->tstamp;
 248
 249        if (call->peer->rtt_count < 3 ||
 250            ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
 251                rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
 252                                  true, true,
 253                                  rxrpc_propose_ack_ping_for_params);
 254}
 255
 256/*
 257 * Allocate a new incoming call from the prealloc pool, along with a connection
 258 * and a peer as necessary.
 259 */
 260static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
 261                                                    struct rxrpc_local *local,
 262                                                    struct rxrpc_peer *peer,
 263                                                    struct rxrpc_connection *conn,
 264                                                    const struct rxrpc_security *sec,
 265                                                    struct sk_buff *skb)
 266{
 267        struct rxrpc_backlog *b = rx->backlog;
 268        struct rxrpc_call *call;
 269        unsigned short call_head, conn_head, peer_head;
 270        unsigned short call_tail, conn_tail, peer_tail;
 271        unsigned short call_count, conn_count;
 272
 273        /* #calls >= #conns >= #peers must hold true. */
 274        call_head = smp_load_acquire(&b->call_backlog_head);
 275        call_tail = b->call_backlog_tail;
 276        call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX);
 277        conn_head = smp_load_acquire(&b->conn_backlog_head);
 278        conn_tail = b->conn_backlog_tail;
 279        conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX);
 280        ASSERTCMP(conn_count, >=, call_count);
 281        peer_head = smp_load_acquire(&b->peer_backlog_head);
 282        peer_tail = b->peer_backlog_tail;
 283        ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=,
 284                  conn_count);
 285
 286        if (call_count == 0)
 287                return NULL;
 288
 289        if (!conn) {
 290                if (peer && !rxrpc_get_peer_maybe(peer))
 291                        peer = NULL;
 292                if (!peer) {
 293                        peer = b->peer_backlog[peer_tail];
 294                        if (rxrpc_extract_addr_from_skb(&peer->srx, skb) < 0)
 295                                return NULL;
 296                        b->peer_backlog[peer_tail] = NULL;
 297                        smp_store_release(&b->peer_backlog_tail,
 298                                          (peer_tail + 1) &
 299                                          (RXRPC_BACKLOG_MAX - 1));
 300
 301                        rxrpc_new_incoming_peer(rx, local, peer);
 302                }
 303
 304                /* Now allocate and set up the connection */
 305                conn = b->conn_backlog[conn_tail];
 306                b->conn_backlog[conn_tail] = NULL;
 307                smp_store_release(&b->conn_backlog_tail,
 308                                  (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
 309                conn->params.local = rxrpc_get_local(local);
 310                conn->params.peer = peer;
 311                rxrpc_see_connection(conn);
 312                rxrpc_new_incoming_connection(rx, conn, sec, skb);
 313        } else {
 314                rxrpc_get_connection(conn);
 315        }
 316
 317        /* And now we can allocate and set up a new call */
 318        call = b->call_backlog[call_tail];
 319        b->call_backlog[call_tail] = NULL;
 320        smp_store_release(&b->call_backlog_tail,
 321                          (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
 322
 323        rxrpc_see_call(call);
 324        call->conn = conn;
 325        call->security = conn->security;
 326        call->security_ix = conn->security_ix;
 327        call->peer = rxrpc_get_peer(conn->params.peer);
 328        call->cong_cwnd = call->peer->cong_cwnd;
 329        return call;
 330}
 331
 332/*
 333 * Set up a new incoming call.  Called in BH context with the RCU read lock
 334 * held.
 335 *
 336 * If this is for a kernel service, when we allocate the call, it will have
 337 * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the
 338 * retainer ref obtained from the backlog buffer.  Prealloc calls for userspace
 339 * services only have the ref from the backlog buffer.  We want to pass this
 340 * ref to non-BH context to dispose of.
 341 *
 342 * If we want to report an error, we mark the skb with the packet type and
 343 * abort code and return NULL.
 344 *
 345 * The call is returned with the user access mutex held.
 346 */
 347struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
 348                                           struct rxrpc_sock *rx,
 349                                           struct sk_buff *skb)
 350{
 351        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
 352        const struct rxrpc_security *sec = NULL;
 353        struct rxrpc_connection *conn;
 354        struct rxrpc_peer *peer = NULL;
 355        struct rxrpc_call *call = NULL;
 356
 357        _enter("");
 358
 359        spin_lock(&rx->incoming_lock);
 360        if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
 361            rx->sk.sk_state == RXRPC_CLOSE) {
 362                trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber,
 363                                  sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
 364                skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
 365                skb->priority = RX_INVALID_OPERATION;
 366                goto no_call;
 367        }
 368
 369        /* The peer, connection and call may all have sprung into existence due
 370         * to a duplicate packet being handled on another CPU in parallel, so
 371         * we have to recheck the routing.  However, we're now holding
 372         * rx->incoming_lock, so the values should remain stable.
 373         */
 374        conn = rxrpc_find_connection_rcu(local, skb, &peer);
 375
 376        if (!conn) {
 377                sec = rxrpc_get_incoming_security(rx, skb);
 378                if (!sec)
 379                        goto no_call;
 380        }
 381
 382        call = rxrpc_alloc_incoming_call(rx, local, peer, conn, sec, skb);
 383        if (!call) {
 384                skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
 385                goto no_call;
 386        }
 387
 388        trace_rxrpc_receive(call, rxrpc_receive_incoming,
 389                            sp->hdr.serial, sp->hdr.seq);
 390
 391        /* Make the call live. */
 392        rxrpc_incoming_call(rx, call, skb);
 393        conn = call->conn;
 394
 395        if (rx->notify_new_call)
 396                rx->notify_new_call(&rx->sk, call, call->user_call_ID);
 397
 398        spin_lock(&conn->state_lock);
 399        switch (conn->state) {
 400        case RXRPC_CONN_SERVICE_UNSECURED:
 401                conn->state = RXRPC_CONN_SERVICE_CHALLENGING;
 402                set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events);
 403                rxrpc_queue_conn(call->conn);
 404                break;
 405
 406        case RXRPC_CONN_SERVICE:
 407                write_lock(&call->state_lock);
 408                if (call->state < RXRPC_CALL_COMPLETE)
 409                        call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
 410                write_unlock(&call->state_lock);
 411                break;
 412
 413        case RXRPC_CONN_REMOTELY_ABORTED:
 414                rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
 415                                          conn->abort_code, conn->error);
 416                break;
 417        case RXRPC_CONN_LOCALLY_ABORTED:
 418                rxrpc_abort_call("CON", call, sp->hdr.seq,
 419                                 conn->abort_code, conn->error);
 420                break;
 421        default:
 422                BUG();
 423        }
 424        spin_unlock(&conn->state_lock);
 425        spin_unlock(&rx->incoming_lock);
 426
 427        rxrpc_send_ping(call, skb);
 428
 429        /* We have to discard the prealloc queue's ref here and rely on a
 430         * combination of the RCU read lock and refs held either by the socket
 431         * (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel
 432         * service to prevent the call from being deallocated too early.
 433         */
 434        rxrpc_put_call(call, rxrpc_call_put);
 435
 436        _leave(" = %p{%d}", call, call->debug_id);
 437        return call;
 438
 439no_call:
 440        spin_unlock(&rx->incoming_lock);
 441        _leave(" = NULL [%u]", skb->mark);
 442        return NULL;
 443}
 444
 445/*
 446 * Charge up socket with preallocated calls, attaching user call IDs.
 447 */
 448int rxrpc_user_charge_accept(struct rxrpc_sock *rx, unsigned long user_call_ID)
 449{
 450        struct rxrpc_backlog *b = rx->backlog;
 451
 452        if (rx->sk.sk_state == RXRPC_CLOSE)
 453                return -ESHUTDOWN;
 454
 455        return rxrpc_service_prealloc_one(rx, b, NULL, NULL, user_call_ID,
 456                                          GFP_KERNEL,
 457                                          atomic_inc_return(&rxrpc_debug_id));
 458}
 459
 460/*
 461 * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls
 462 * @sock: The socket on which to preallocate
 463 * @notify_rx: Event notification function for the call
 464 * @user_attach_call: Func to attach call to user_call_ID
 465 * @user_call_ID: The tag to attach to the preallocated call
 466 * @gfp: The allocation conditions.
 467 * @debug_id: The tracing debug ID.
 468 *
 469 * Charge up the socket with preallocated calls, each with a user ID.  A
 470 * function should be provided to effect the attachment from the user's side.
 471 * The user is given a ref to hold on the call.
 472 *
 473 * Note that the call may be come connected before this function returns.
 474 */
 475int rxrpc_kernel_charge_accept(struct socket *sock,
 476                               rxrpc_notify_rx_t notify_rx,
 477                               rxrpc_user_attach_call_t user_attach_call,
 478                               unsigned long user_call_ID, gfp_t gfp,
 479                               unsigned int debug_id)
 480{
 481        struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
 482        struct rxrpc_backlog *b = rx->backlog;
 483
 484        if (sock->sk->sk_state == RXRPC_CLOSE)
 485                return -ESHUTDOWN;
 486
 487        return rxrpc_service_prealloc_one(rx, b, notify_rx,
 488                                          user_attach_call, user_call_ID,
 489                                          gfp, debug_id);
 490}
 491EXPORT_SYMBOL(rxrpc_kernel_charge_accept);
 492