linux/net/rxrpc/ar-accept.c
<<
>>
Prefs
   1/* incoming call handling
   2 *
   3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
   4 * Written by David Howells (dhowells@redhat.com)
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * as published by the Free Software Foundation; either version
   9 * 2 of the License, or (at your option) any later version.
  10 */
  11
  12#include <linux/module.h>
  13#include <linux/net.h>
  14#include <linux/skbuff.h>
  15#include <linux/errqueue.h>
  16#include <linux/udp.h>
  17#include <linux/in.h>
  18#include <linux/in6.h>
  19#include <linux/icmp.h>
  20#include <net/sock.h>
  21#include <net/af_rxrpc.h>
  22#include <net/ip.h>
  23#include "ar-internal.h"
  24
  25/*
  26 * generate a connection-level abort
  27 */
  28static int rxrpc_busy(struct rxrpc_local *local, struct sockaddr_rxrpc *srx,
  29                      struct rxrpc_header *hdr)
  30{
  31        struct msghdr msg;
  32        struct kvec iov[1];
  33        size_t len;
  34        int ret;
  35
  36        _enter("%d,,", local->debug_id);
  37
  38        msg.msg_name    = &srx->transport.sin;
  39        msg.msg_namelen = sizeof(srx->transport.sin);
  40        msg.msg_control = NULL;
  41        msg.msg_controllen = 0;
  42        msg.msg_flags   = 0;
  43
  44        hdr->seq        = 0;
  45        hdr->type       = RXRPC_PACKET_TYPE_BUSY;
  46        hdr->flags      = 0;
  47        hdr->userStatus = 0;
  48        hdr->_rsvd      = 0;
  49
  50        iov[0].iov_base = hdr;
  51        iov[0].iov_len  = sizeof(*hdr);
  52
  53        len = iov[0].iov_len;
  54
  55        hdr->serial = htonl(1);
  56        _proto("Tx BUSY %%%u", ntohl(hdr->serial));
  57
  58        ret = kernel_sendmsg(local->socket, &msg, iov, 1, len);
  59        if (ret < 0) {
  60                _leave(" = -EAGAIN [sendmsg failed: %d]", ret);
  61                return -EAGAIN;
  62        }
  63
  64        _leave(" = 0");
  65        return 0;
  66}
  67
  68/*
  69 * accept an incoming call that needs peer, transport and/or connection setting
  70 * up
  71 */
  72static int rxrpc_accept_incoming_call(struct rxrpc_local *local,
  73                                      struct rxrpc_sock *rx,
  74                                      struct sk_buff *skb,
  75                                      struct sockaddr_rxrpc *srx)
  76{
  77        struct rxrpc_connection *conn;
  78        struct rxrpc_transport *trans;
  79        struct rxrpc_skb_priv *sp, *nsp;
  80        struct rxrpc_peer *peer;
  81        struct rxrpc_call *call;
  82        struct sk_buff *notification;
  83        int ret;
  84
  85        _enter("");
  86
  87        sp = rxrpc_skb(skb);
  88
  89        /* get a notification message to send to the server app */
  90        notification = alloc_skb(0, GFP_NOFS);
  91        rxrpc_new_skb(notification);
  92        notification->mark = RXRPC_SKB_MARK_NEW_CALL;
  93
  94        peer = rxrpc_get_peer(srx, GFP_NOIO);
  95        if (IS_ERR(peer)) {
  96                _debug("no peer");
  97                ret = -EBUSY;
  98                goto error;
  99        }
 100
 101        trans = rxrpc_get_transport(local, peer, GFP_NOIO);
 102        rxrpc_put_peer(peer);
 103        if (IS_ERR(trans)) {
 104                _debug("no trans");
 105                ret = -EBUSY;
 106                goto error;
 107        }
 108
 109        conn = rxrpc_incoming_connection(trans, &sp->hdr, GFP_NOIO);
 110        rxrpc_put_transport(trans);
 111        if (IS_ERR(conn)) {
 112                _debug("no conn");
 113                ret = PTR_ERR(conn);
 114                goto error;
 115        }
 116
 117        call = rxrpc_incoming_call(rx, conn, &sp->hdr, GFP_NOIO);
 118        rxrpc_put_connection(conn);
 119        if (IS_ERR(call)) {
 120                _debug("no call");
 121                ret = PTR_ERR(call);
 122                goto error;
 123        }
 124
 125        /* attach the call to the socket */
 126        read_lock_bh(&local->services_lock);
 127        if (rx->sk.sk_state == RXRPC_CLOSE)
 128                goto invalid_service;
 129
 130        write_lock(&rx->call_lock);
 131        if (!test_and_set_bit(RXRPC_CALL_INIT_ACCEPT, &call->flags)) {
 132                rxrpc_get_call(call);
 133
 134                spin_lock(&call->conn->state_lock);
 135                if (sp->hdr.securityIndex > 0 &&
 136                    call->conn->state == RXRPC_CONN_SERVER_UNSECURED) {
 137                        _debug("await conn sec");
 138                        list_add_tail(&call->accept_link, &rx->secureq);
 139                        call->conn->state = RXRPC_CONN_SERVER_CHALLENGING;
 140                        atomic_inc(&call->conn->usage);
 141                        set_bit(RXRPC_CONN_CHALLENGE, &call->conn->events);
 142                        rxrpc_queue_conn(call->conn);
 143                } else {
 144                        _debug("conn ready");
 145                        call->state = RXRPC_CALL_SERVER_ACCEPTING;
 146                        list_add_tail(&call->accept_link, &rx->acceptq);
 147                        rxrpc_get_call(call);
 148                        nsp = rxrpc_skb(notification);
 149                        nsp->call = call;
 150
 151                        ASSERTCMP(atomic_read(&call->usage), >=, 3);
 152
 153                        _debug("notify");
 154                        spin_lock(&call->lock);
 155                        ret = rxrpc_queue_rcv_skb(call, notification, true,
 156                                                  false);
 157                        spin_unlock(&call->lock);
 158                        notification = NULL;
 159                        BUG_ON(ret < 0);
 160                }
 161                spin_unlock(&call->conn->state_lock);
 162
 163                _debug("queued");
 164        }
 165        write_unlock(&rx->call_lock);
 166
 167        _debug("process");
 168        rxrpc_fast_process_packet(call, skb);
 169
 170        _debug("done");
 171        read_unlock_bh(&local->services_lock);
 172        rxrpc_free_skb(notification);
 173        rxrpc_put_call(call);
 174        _leave(" = 0");
 175        return 0;
 176
 177invalid_service:
 178        _debug("invalid");
 179        read_unlock_bh(&local->services_lock);
 180
 181        read_lock_bh(&call->state_lock);
 182        if (!test_bit(RXRPC_CALL_RELEASE, &call->flags) &&
 183            !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events)) {
 184                rxrpc_get_call(call);
 185                rxrpc_queue_call(call);
 186        }
 187        read_unlock_bh(&call->state_lock);
 188        rxrpc_put_call(call);
 189        ret = -ECONNREFUSED;
 190error:
 191        rxrpc_free_skb(notification);
 192        _leave(" = %d", ret);
 193        return ret;
 194}
 195
 196/*
 197 * accept incoming calls that need peer, transport and/or connection setting up
 198 * - the packets we get are all incoming client DATA packets that have seq == 1
 199 */
 200void rxrpc_accept_incoming_calls(struct work_struct *work)
 201{
 202        struct rxrpc_local *local =
 203                container_of(work, struct rxrpc_local, acceptor);
 204        struct rxrpc_skb_priv *sp;
 205        struct sockaddr_rxrpc srx;
 206        struct rxrpc_sock *rx;
 207        struct sk_buff *skb;
 208        __be16 service_id;
 209        int ret;
 210
 211        _enter("%d", local->debug_id);
 212
 213        read_lock_bh(&rxrpc_local_lock);
 214        if (atomic_read(&local->usage) > 0)
 215                rxrpc_get_local(local);
 216        else
 217                local = NULL;
 218        read_unlock_bh(&rxrpc_local_lock);
 219        if (!local) {
 220                _leave(" [local dead]");
 221                return;
 222        }
 223
 224process_next_packet:
 225        skb = skb_dequeue(&local->accept_queue);
 226        if (!skb) {
 227                rxrpc_put_local(local);
 228                _leave("\n");
 229                return;
 230        }
 231
 232        _net("incoming call skb %p", skb);
 233
 234        sp = rxrpc_skb(skb);
 235
 236        /* determine the remote address */
 237        memset(&srx, 0, sizeof(srx));
 238        srx.srx_family = AF_RXRPC;
 239        srx.transport.family = local->srx.transport.family;
 240        srx.transport_type = local->srx.transport_type;
 241        switch (srx.transport.family) {
 242        case AF_INET:
 243                srx.transport_len = sizeof(struct sockaddr_in);
 244                srx.transport.sin.sin_port = udp_hdr(skb)->source;
 245                srx.transport.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
 246                break;
 247        default:
 248                goto busy;
 249        }
 250
 251        /* get the socket providing the service */
 252        service_id = sp->hdr.serviceId;
 253        read_lock_bh(&local->services_lock);
 254        list_for_each_entry(rx, &local->services, listen_link) {
 255                if (rx->service_id == service_id &&
 256                    rx->sk.sk_state != RXRPC_CLOSE)
 257                        goto found_service;
 258        }
 259        read_unlock_bh(&local->services_lock);
 260        goto invalid_service;
 261
 262found_service:
 263        _debug("found service %hd", ntohs(rx->service_id));
 264        if (sk_acceptq_is_full(&rx->sk))
 265                goto backlog_full;
 266        sk_acceptq_added(&rx->sk);
 267        sock_hold(&rx->sk);
 268        read_unlock_bh(&local->services_lock);
 269
 270        ret = rxrpc_accept_incoming_call(local, rx, skb, &srx);
 271        if (ret < 0)
 272                sk_acceptq_removed(&rx->sk);
 273        sock_put(&rx->sk);
 274        switch (ret) {
 275        case -ECONNRESET: /* old calls are ignored */
 276        case -ECONNABORTED: /* aborted calls are reaborted or ignored */
 277        case 0:
 278                goto process_next_packet;
 279        case -ECONNREFUSED:
 280                goto invalid_service;
 281        case -EBUSY:
 282                goto busy;
 283        case -EKEYREJECTED:
 284                goto security_mismatch;
 285        default:
 286                BUG();
 287        }
 288
 289backlog_full:
 290        read_unlock_bh(&local->services_lock);
 291busy:
 292        rxrpc_busy(local, &srx, &sp->hdr);
 293        rxrpc_free_skb(skb);
 294        goto process_next_packet;
 295
 296invalid_service:
 297        skb->priority = RX_INVALID_OPERATION;
 298        rxrpc_reject_packet(local, skb);
 299        goto process_next_packet;
 300
 301        /* can't change connection security type mid-flow */
 302security_mismatch:
 303        skb->priority = RX_PROTOCOL_ERROR;
 304        rxrpc_reject_packet(local, skb);
 305        goto process_next_packet;
 306}
 307
 308/*
 309 * handle acceptance of a call by userspace
 310 * - assign the user call ID to the call at the front of the queue
 311 */
 312struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
 313                                     unsigned long user_call_ID)
 314{
 315        struct rxrpc_call *call;
 316        struct rb_node *parent, **pp;
 317        int ret;
 318
 319        _enter(",%lx", user_call_ID);
 320
 321        ASSERT(!irqs_disabled());
 322
 323        write_lock(&rx->call_lock);
 324
 325        ret = -ENODATA;
 326        if (list_empty(&rx->acceptq))
 327                goto out;
 328
 329        /* check the user ID isn't already in use */
 330        ret = -EBADSLT;
 331        pp = &rx->calls.rb_node;
 332        parent = NULL;
 333        while (*pp) {
 334                parent = *pp;
 335                call = rb_entry(parent, struct rxrpc_call, sock_node);
 336
 337                if (user_call_ID < call->user_call_ID)
 338                        pp = &(*pp)->rb_left;
 339                else if (user_call_ID > call->user_call_ID)
 340                        pp = &(*pp)->rb_right;
 341                else
 342                        goto out;
 343        }
 344
 345        /* dequeue the first call and check it's still valid */
 346        call = list_entry(rx->acceptq.next, struct rxrpc_call, accept_link);
 347        list_del_init(&call->accept_link);
 348        sk_acceptq_removed(&rx->sk);
 349
 350        write_lock_bh(&call->state_lock);
 351        switch (call->state) {
 352        case RXRPC_CALL_SERVER_ACCEPTING:
 353                call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
 354                break;
 355        case RXRPC_CALL_REMOTELY_ABORTED:
 356        case RXRPC_CALL_LOCALLY_ABORTED:
 357                ret = -ECONNABORTED;
 358                goto out_release;
 359        case RXRPC_CALL_NETWORK_ERROR:
 360                ret = call->conn->error;
 361                goto out_release;
 362        case RXRPC_CALL_DEAD:
 363                ret = -ETIME;
 364                goto out_discard;
 365        default:
 366                BUG();
 367        }
 368
 369        /* formalise the acceptance */
 370        call->user_call_ID = user_call_ID;
 371        rb_link_node(&call->sock_node, parent, pp);
 372        rb_insert_color(&call->sock_node, &rx->calls);
 373        if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags))
 374                BUG();
 375        if (test_and_set_bit(RXRPC_CALL_ACCEPTED, &call->events))
 376                BUG();
 377        rxrpc_queue_call(call);
 378
 379        rxrpc_get_call(call);
 380        write_unlock_bh(&call->state_lock);
 381        write_unlock(&rx->call_lock);
 382        _leave(" = %p{%d}", call, call->debug_id);
 383        return call;
 384
 385        /* if the call is already dying or dead, then we leave the socket's ref
 386         * on it to be released by rxrpc_dead_call_expired() as induced by
 387         * rxrpc_release_call() */
 388out_release:
 389        _debug("release %p", call);
 390        if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
 391            !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
 392                rxrpc_queue_call(call);
 393out_discard:
 394        write_unlock_bh(&call->state_lock);
 395        _debug("discard %p", call);
 396out:
 397        write_unlock(&rx->call_lock);
 398        _leave(" = %d", ret);
 399        return ERR_PTR(ret);
 400}
 401
 402/*
 403 * handle rejectance of a call by userspace
 404 * - reject the call at the front of the queue
 405 */
 406int rxrpc_reject_call(struct rxrpc_sock *rx)
 407{
 408        struct rxrpc_call *call;
 409        int ret;
 410
 411        _enter("");
 412
 413        ASSERT(!irqs_disabled());
 414
 415        write_lock(&rx->call_lock);
 416
 417        ret = -ENODATA;
 418        if (list_empty(&rx->acceptq))
 419                goto out;
 420
 421        /* dequeue the first call and check it's still valid */
 422        call = list_entry(rx->acceptq.next, struct rxrpc_call, accept_link);
 423        list_del_init(&call->accept_link);
 424        sk_acceptq_removed(&rx->sk);
 425
 426        write_lock_bh(&call->state_lock);
 427        switch (call->state) {
 428        case RXRPC_CALL_SERVER_ACCEPTING:
 429                call->state = RXRPC_CALL_SERVER_BUSY;
 430                if (test_and_set_bit(RXRPC_CALL_REJECT_BUSY, &call->events))
 431                        rxrpc_queue_call(call);
 432                ret = 0;
 433                goto out_release;
 434        case RXRPC_CALL_REMOTELY_ABORTED:
 435        case RXRPC_CALL_LOCALLY_ABORTED:
 436                ret = -ECONNABORTED;
 437                goto out_release;
 438        case RXRPC_CALL_NETWORK_ERROR:
 439                ret = call->conn->error;
 440                goto out_release;
 441        case RXRPC_CALL_DEAD:
 442                ret = -ETIME;
 443                goto out_discard;
 444        default:
 445                BUG();
 446        }
 447
 448        /* if the call is already dying or dead, then we leave the socket's ref
 449         * on it to be released by rxrpc_dead_call_expired() as induced by
 450         * rxrpc_release_call() */
 451out_release:
 452        _debug("release %p", call);
 453        if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
 454            !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
 455                rxrpc_queue_call(call);
 456out_discard:
 457        write_unlock_bh(&call->state_lock);
 458        _debug("discard %p", call);
 459out:
 460        write_unlock(&rx->call_lock);
 461        _leave(" = %d", ret);
 462        return ret;
 463}
 464
 465/**
 466 * rxrpc_kernel_accept_call - Allow a kernel service to accept an incoming call
 467 * @sock: The socket on which the impending call is waiting
 468 * @user_call_ID: The tag to attach to the call
 469 *
 470 * Allow a kernel service to accept an incoming call, assuming the incoming
 471 * call is still valid.
 472 */
 473struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *sock,
 474                                            unsigned long user_call_ID)
 475{
 476        struct rxrpc_call *call;
 477
 478        _enter(",%lx", user_call_ID);
 479        call = rxrpc_accept_call(rxrpc_sk(sock->sk), user_call_ID);
 480        _leave(" = %p", call);
 481        return call;
 482}
 483
 484EXPORT_SYMBOL(rxrpc_kernel_accept_call);
 485
 486/**
 487 * rxrpc_kernel_reject_call - Allow a kernel service to reject an incoming call
 488 * @sock: The socket on which the impending call is waiting
 489 *
 490 * Allow a kernel service to reject an incoming call with a BUSY message,
 491 * assuming the incoming call is still valid.
 492 */
 493int rxrpc_kernel_reject_call(struct socket *sock)
 494{
 495        int ret;
 496
 497        _enter("");
 498        ret = rxrpc_reject_call(rxrpc_sk(sock->sk));
 499        _leave(" = %d", ret);
 500        return ret;
 501}
 502
 503EXPORT_SYMBOL(rxrpc_kernel_reject_call);
 504