linux/net/rxrpc/af_rxrpc.c
<<
>>
Prefs
   1/* AF_RXRPC implementation
   2 *
   3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
   4 * Written by David Howells (dhowells@redhat.com)
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * as published by the Free Software Foundation; either version
   9 * 2 of the License, or (at your option) any later version.
  10 */
  11
  12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  13
  14#include <linux/module.h>
  15#include <linux/kernel.h>
  16#include <linux/net.h>
  17#include <linux/slab.h>
  18#include <linux/skbuff.h>
  19#include <linux/poll.h>
  20#include <linux/proc_fs.h>
  21#include <linux/key-type.h>
  22#include <net/net_namespace.h>
  23#include <net/sock.h>
  24#include <net/af_rxrpc.h>
  25#include "ar-internal.h"
  26
  27MODULE_DESCRIPTION("RxRPC network protocol");
  28MODULE_AUTHOR("Red Hat, Inc.");
  29MODULE_LICENSE("GPL");
  30MODULE_ALIAS_NETPROTO(PF_RXRPC);
  31
  32unsigned int rxrpc_debug; // = RXRPC_DEBUG_KPROTO;
  33module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO);
  34MODULE_PARM_DESC(debug, "RxRPC debugging mask");
  35
  36static struct proto rxrpc_proto;
  37static const struct proto_ops rxrpc_rpc_ops;
  38
  39/* local epoch for detecting local-end reset */
  40u32 rxrpc_epoch;
  41
  42/* current debugging ID */
  43atomic_t rxrpc_debug_id;
  44
  45/* count of skbs currently in use */
  46atomic_t rxrpc_n_skbs;
  47
  48struct workqueue_struct *rxrpc_workqueue;
  49
  50static void rxrpc_sock_destructor(struct sock *);
  51
  52/*
  53 * see if an RxRPC socket is currently writable
  54 */
  55static inline int rxrpc_writable(struct sock *sk)
  56{
  57        return atomic_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf;
  58}
  59
  60/*
  61 * wait for write bufferage to become available
  62 */
  63static void rxrpc_write_space(struct sock *sk)
  64{
  65        _enter("%p", sk);
  66        rcu_read_lock();
  67        if (rxrpc_writable(sk)) {
  68                struct socket_wq *wq = rcu_dereference(sk->sk_wq);
  69
  70                if (skwq_has_sleeper(wq))
  71                        wake_up_interruptible(&wq->wait);
  72                sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
  73        }
  74        rcu_read_unlock();
  75}
  76
  77/*
  78 * validate an RxRPC address
  79 */
  80static int rxrpc_validate_address(struct rxrpc_sock *rx,
  81                                  struct sockaddr_rxrpc *srx,
  82                                  int len)
  83{
  84        unsigned int tail;
  85
  86        if (len < sizeof(struct sockaddr_rxrpc))
  87                return -EINVAL;
  88
  89        if (srx->srx_family != AF_RXRPC)
  90                return -EAFNOSUPPORT;
  91
  92        if (srx->transport_type != SOCK_DGRAM)
  93                return -ESOCKTNOSUPPORT;
  94
  95        len -= offsetof(struct sockaddr_rxrpc, transport);
  96        if (srx->transport_len < sizeof(sa_family_t) ||
  97            srx->transport_len > len)
  98                return -EINVAL;
  99
 100        if (srx->transport.family != rx->family)
 101                return -EAFNOSUPPORT;
 102
 103        switch (srx->transport.family) {
 104        case AF_INET:
 105                if (srx->transport_len < sizeof(struct sockaddr_in))
 106                        return -EINVAL;
 107                _debug("INET: %x @ %pI4",
 108                       ntohs(srx->transport.sin.sin_port),
 109                       &srx->transport.sin.sin_addr);
 110                tail = offsetof(struct sockaddr_rxrpc, transport.sin.__pad);
 111                break;
 112
 113        case AF_INET6:
 114        default:
 115                return -EAFNOSUPPORT;
 116        }
 117
 118        if (tail < len)
 119                memset((void *)srx + tail, 0, len - tail);
 120        return 0;
 121}
 122
 123/*
 124 * bind a local address to an RxRPC socket
 125 */
 126static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
 127{
 128        struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)saddr;
 129        struct sock *sk = sock->sk;
 130        struct rxrpc_local *local;
 131        struct rxrpc_sock *rx = rxrpc_sk(sk), *prx;
 132        int ret;
 133
 134        _enter("%p,%p,%d", rx, saddr, len);
 135
 136        ret = rxrpc_validate_address(rx, srx, len);
 137        if (ret < 0)
 138                goto error;
 139
 140        lock_sock(&rx->sk);
 141
 142        if (rx->sk.sk_state != RXRPC_UNBOUND) {
 143                ret = -EINVAL;
 144                goto error_unlock;
 145        }
 146
 147        memcpy(&rx->srx, srx, sizeof(rx->srx));
 148
 149        local = rxrpc_lookup_local(&rx->srx);
 150        if (IS_ERR(local)) {
 151                ret = PTR_ERR(local);
 152                goto error_unlock;
 153        }
 154
 155        if (rx->srx.srx_service) {
 156                write_lock_bh(&local->services_lock);
 157                list_for_each_entry(prx, &local->services, listen_link) {
 158                        if (prx->srx.srx_service == rx->srx.srx_service)
 159                                goto service_in_use;
 160                }
 161
 162                rx->local = local;
 163                list_add_tail(&rx->listen_link, &local->services);
 164                write_unlock_bh(&local->services_lock);
 165
 166                rx->sk.sk_state = RXRPC_SERVER_BOUND;
 167        } else {
 168                rx->local = local;
 169                rx->sk.sk_state = RXRPC_CLIENT_BOUND;
 170        }
 171
 172        release_sock(&rx->sk);
 173        _leave(" = 0");
 174        return 0;
 175
 176service_in_use:
 177        write_unlock_bh(&local->services_lock);
 178        rxrpc_put_local(local);
 179        ret = -EADDRINUSE;
 180error_unlock:
 181        release_sock(&rx->sk);
 182error:
 183        _leave(" = %d", ret);
 184        return ret;
 185}
 186
 187/*
 188 * set the number of pending calls permitted on a listening socket
 189 */
 190static int rxrpc_listen(struct socket *sock, int backlog)
 191{
 192        struct sock *sk = sock->sk;
 193        struct rxrpc_sock *rx = rxrpc_sk(sk);
 194        unsigned int max;
 195        int ret;
 196
 197        _enter("%p,%d", rx, backlog);
 198
 199        lock_sock(&rx->sk);
 200
 201        switch (rx->sk.sk_state) {
 202        case RXRPC_UNBOUND:
 203                ret = -EADDRNOTAVAIL;
 204                break;
 205        case RXRPC_SERVER_BOUND:
 206                ASSERT(rx->local != NULL);
 207                max = READ_ONCE(rxrpc_max_backlog);
 208                ret = -EINVAL;
 209                if (backlog == INT_MAX)
 210                        backlog = max;
 211                else if (backlog < 0 || backlog > max)
 212                        break;
 213                sk->sk_max_ack_backlog = backlog;
 214                rx->sk.sk_state = RXRPC_SERVER_LISTENING;
 215                ret = 0;
 216                break;
 217        default:
 218                ret = -EBUSY;
 219                break;
 220        }
 221
 222        release_sock(&rx->sk);
 223        _leave(" = %d", ret);
 224        return ret;
 225}
 226
 227/**
 228 * rxrpc_kernel_begin_call - Allow a kernel service to begin a call
 229 * @sock: The socket on which to make the call
 230 * @srx: The address of the peer to contact
 231 * @key: The security context to use (defaults to socket setting)
 232 * @user_call_ID: The ID to use
 233 *
 234 * Allow a kernel service to begin a call on the nominated socket.  This just
 235 * sets up all the internal tracking structures and allocates connection and
 236 * call IDs as appropriate.  The call to be used is returned.
 237 *
 238 * The default socket destination address and security may be overridden by
 239 * supplying @srx and @key.
 240 */
 241struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
 242                                           struct sockaddr_rxrpc *srx,
 243                                           struct key *key,
 244                                           unsigned long user_call_ID,
 245                                           gfp_t gfp)
 246{
 247        struct rxrpc_conn_parameters cp;
 248        struct rxrpc_call *call;
 249        struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
 250        int ret;
 251
 252        _enter(",,%x,%lx", key_serial(key), user_call_ID);
 253
 254        ret = rxrpc_validate_address(rx, srx, sizeof(*srx));
 255        if (ret < 0)
 256                return ERR_PTR(ret);
 257
 258        lock_sock(&rx->sk);
 259
 260        if (!key)
 261                key = rx->key;
 262        if (key && !key->payload.data[0])
 263                key = NULL; /* a no-security key */
 264
 265        memset(&cp, 0, sizeof(cp));
 266        cp.local                = rx->local;
 267        cp.key                  = key;
 268        cp.security_level       = 0;
 269        cp.exclusive            = false;
 270        cp.service_id           = srx->srx_service;
 271        call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, gfp);
 272
 273        release_sock(&rx->sk);
 274        _leave(" = %p", call);
 275        return call;
 276}
 277EXPORT_SYMBOL(rxrpc_kernel_begin_call);
 278
 279/**
 280 * rxrpc_kernel_end_call - Allow a kernel service to end a call it was using
 281 * @call: The call to end
 282 *
 283 * Allow a kernel service to end a call it was using.  The call must be
 284 * complete before this is called (the call should be aborted if necessary).
 285 */
 286void rxrpc_kernel_end_call(struct rxrpc_call *call)
 287{
 288        _enter("%d{%d}", call->debug_id, atomic_read(&call->usage));
 289        rxrpc_remove_user_ID(call->socket, call);
 290        rxrpc_put_call(call);
 291}
 292EXPORT_SYMBOL(rxrpc_kernel_end_call);
 293
 294/**
 295 * rxrpc_kernel_intercept_rx_messages - Intercept received RxRPC messages
 296 * @sock: The socket to intercept received messages on
 297 * @interceptor: The function to pass the messages to
 298 *
 299 * Allow a kernel service to intercept messages heading for the Rx queue on an
 300 * RxRPC socket.  They get passed to the specified function instead.
 301 * @interceptor should free the socket buffers it is given.  @interceptor is
 302 * called with the socket receive queue spinlock held and softirqs disabled -
 303 * this ensures that the messages will be delivered in the right order.
 304 */
 305void rxrpc_kernel_intercept_rx_messages(struct socket *sock,
 306                                        rxrpc_interceptor_t interceptor)
 307{
 308        struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
 309
 310        _enter("");
 311        rx->interceptor = interceptor;
 312}
 313
 314EXPORT_SYMBOL(rxrpc_kernel_intercept_rx_messages);
 315
 316/*
 317 * connect an RxRPC socket
 318 * - this just targets it at a specific destination; no actual connection
 319 *   negotiation takes place
 320 */
 321static int rxrpc_connect(struct socket *sock, struct sockaddr *addr,
 322                         int addr_len, int flags)
 323{
 324        struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)addr;
 325        struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
 326        int ret;
 327
 328        _enter("%p,%p,%d,%d", rx, addr, addr_len, flags);
 329
 330        ret = rxrpc_validate_address(rx, srx, addr_len);
 331        if (ret < 0) {
 332                _leave(" = %d [bad addr]", ret);
 333                return ret;
 334        }
 335
 336        lock_sock(&rx->sk);
 337
 338        ret = -EISCONN;
 339        if (test_bit(RXRPC_SOCK_CONNECTED, &rx->flags))
 340                goto error;
 341
 342        switch (rx->sk.sk_state) {
 343        case RXRPC_UNBOUND:
 344                rx->sk.sk_state = RXRPC_CLIENT_UNBOUND;
 345        case RXRPC_CLIENT_UNBOUND:
 346        case RXRPC_CLIENT_BOUND:
 347                break;
 348        default:
 349                ret = -EBUSY;
 350                goto error;
 351        }
 352
 353        rx->connect_srx = *srx;
 354        set_bit(RXRPC_SOCK_CONNECTED, &rx->flags);
 355        ret = 0;
 356
 357error:
 358        release_sock(&rx->sk);
 359        return ret;
 360}
 361
 362/*
 363 * send a message through an RxRPC socket
 364 * - in a client this does a number of things:
 365 *   - finds/sets up a connection for the security specified (if any)
 366 *   - initiates a call (ID in control data)
 367 *   - ends the request phase of a call (if MSG_MORE is not set)
 368 *   - sends a call data packet
 369 *   - may send an abort (abort code in control data)
 370 */
 371static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
 372{
 373        struct rxrpc_local *local;
 374        struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
 375        int ret;
 376
 377        _enter(",{%d},,%zu", rx->sk.sk_state, len);
 378
 379        if (m->msg_flags & MSG_OOB)
 380                return -EOPNOTSUPP;
 381
 382        if (m->msg_name) {
 383                ret = rxrpc_validate_address(rx, m->msg_name, m->msg_namelen);
 384                if (ret < 0) {
 385                        _leave(" = %d [bad addr]", ret);
 386                        return ret;
 387                }
 388        }
 389
 390        lock_sock(&rx->sk);
 391
 392        switch (rx->sk.sk_state) {
 393        case RXRPC_UNBOUND:
 394                local = rxrpc_lookup_local(&rx->srx);
 395                if (IS_ERR(local)) {
 396                        ret = PTR_ERR(local);
 397                        goto error_unlock;
 398                }
 399
 400                rx->local = local;
 401                rx->sk.sk_state = RXRPC_CLIENT_UNBOUND;
 402                /* Fall through */
 403
 404        case RXRPC_CLIENT_UNBOUND:
 405        case RXRPC_CLIENT_BOUND:
 406                if (!m->msg_name &&
 407                    test_bit(RXRPC_SOCK_CONNECTED, &rx->flags)) {
 408                        m->msg_name = &rx->connect_srx;
 409                        m->msg_namelen = sizeof(rx->connect_srx);
 410                }
 411        case RXRPC_SERVER_BOUND:
 412        case RXRPC_SERVER_LISTENING:
 413                ret = rxrpc_do_sendmsg(rx, m, len);
 414                break;
 415        default:
 416                ret = -EINVAL;
 417                break;
 418        }
 419
 420error_unlock:
 421        release_sock(&rx->sk);
 422        _leave(" = %d", ret);
 423        return ret;
 424}
 425
 426/*
 427 * set RxRPC socket options
 428 */
 429static int rxrpc_setsockopt(struct socket *sock, int level, int optname,
 430                            char __user *optval, unsigned int optlen)
 431{
 432        struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
 433        unsigned int min_sec_level;
 434        int ret;
 435
 436        _enter(",%d,%d,,%d", level, optname, optlen);
 437
 438        lock_sock(&rx->sk);
 439        ret = -EOPNOTSUPP;
 440
 441        if (level == SOL_RXRPC) {
 442                switch (optname) {
 443                case RXRPC_EXCLUSIVE_CONNECTION:
 444                        ret = -EINVAL;
 445                        if (optlen != 0)
 446                                goto error;
 447                        ret = -EISCONN;
 448                        if (rx->sk.sk_state != RXRPC_UNBOUND)
 449                                goto error;
 450                        rx->exclusive = true;
 451                        goto success;
 452
 453                case RXRPC_SECURITY_KEY:
 454                        ret = -EINVAL;
 455                        if (rx->key)
 456                                goto error;
 457                        ret = -EISCONN;
 458                        if (rx->sk.sk_state != RXRPC_UNBOUND)
 459                                goto error;
 460                        ret = rxrpc_request_key(rx, optval, optlen);
 461                        goto error;
 462
 463                case RXRPC_SECURITY_KEYRING:
 464                        ret = -EINVAL;
 465                        if (rx->key)
 466                                goto error;
 467                        ret = -EISCONN;
 468                        if (rx->sk.sk_state != RXRPC_UNBOUND)
 469                                goto error;
 470                        ret = rxrpc_server_keyring(rx, optval, optlen);
 471                        goto error;
 472
 473                case RXRPC_MIN_SECURITY_LEVEL:
 474                        ret = -EINVAL;
 475                        if (optlen != sizeof(unsigned int))
 476                                goto error;
 477                        ret = -EISCONN;
 478                        if (rx->sk.sk_state != RXRPC_UNBOUND)
 479                                goto error;
 480                        ret = get_user(min_sec_level,
 481                                       (unsigned int __user *) optval);
 482                        if (ret < 0)
 483                                goto error;
 484                        ret = -EINVAL;
 485                        if (min_sec_level > RXRPC_SECURITY_MAX)
 486                                goto error;
 487                        rx->min_sec_level = min_sec_level;
 488                        goto success;
 489
 490                default:
 491                        break;
 492                }
 493        }
 494
 495success:
 496        ret = 0;
 497error:
 498        release_sock(&rx->sk);
 499        return ret;
 500}
 501
 502/*
 503 * permit an RxRPC socket to be polled
 504 */
 505static unsigned int rxrpc_poll(struct file *file, struct socket *sock,
 506                               poll_table *wait)
 507{
 508        unsigned int mask;
 509        struct sock *sk = sock->sk;
 510
 511        sock_poll_wait(file, sk_sleep(sk), wait);
 512        mask = 0;
 513
 514        /* the socket is readable if there are any messages waiting on the Rx
 515         * queue */
 516        if (!skb_queue_empty(&sk->sk_receive_queue))
 517                mask |= POLLIN | POLLRDNORM;
 518
 519        /* the socket is writable if there is space to add new data to the
 520         * socket; there is no guarantee that any particular call in progress
 521         * on the socket may have space in the Tx ACK window */
 522        if (rxrpc_writable(sk))
 523                mask |= POLLOUT | POLLWRNORM;
 524
 525        return mask;
 526}
 527
 528/*
 529 * create an RxRPC socket
 530 */
 531static int rxrpc_create(struct net *net, struct socket *sock, int protocol,
 532                        int kern)
 533{
 534        struct rxrpc_sock *rx;
 535        struct sock *sk;
 536
 537        _enter("%p,%d", sock, protocol);
 538
 539        if (!net_eq(net, &init_net))
 540                return -EAFNOSUPPORT;
 541
 542        /* we support transport protocol UDP/UDP6 only */
 543        if (protocol != PF_INET)
 544                return -EPROTONOSUPPORT;
 545
 546        if (sock->type != SOCK_DGRAM)
 547                return -ESOCKTNOSUPPORT;
 548
 549        sock->ops = &rxrpc_rpc_ops;
 550        sock->state = SS_UNCONNECTED;
 551
 552        sk = sk_alloc(net, PF_RXRPC, GFP_KERNEL, &rxrpc_proto, kern);
 553        if (!sk)
 554                return -ENOMEM;
 555
 556        sock_init_data(sock, sk);
 557        sk->sk_state            = RXRPC_UNBOUND;
 558        sk->sk_write_space      = rxrpc_write_space;
 559        sk->sk_max_ack_backlog  = 0;
 560        sk->sk_destruct         = rxrpc_sock_destructor;
 561
 562        rx = rxrpc_sk(sk);
 563        rx->family = protocol;
 564        rx->calls = RB_ROOT;
 565
 566        INIT_LIST_HEAD(&rx->listen_link);
 567        INIT_LIST_HEAD(&rx->secureq);
 568        INIT_LIST_HEAD(&rx->acceptq);
 569        rwlock_init(&rx->call_lock);
 570        memset(&rx->srx, 0, sizeof(rx->srx));
 571
 572        _leave(" = 0 [%p]", rx);
 573        return 0;
 574}
 575
 576/*
 577 * RxRPC socket destructor
 578 */
 579static void rxrpc_sock_destructor(struct sock *sk)
 580{
 581        _enter("%p", sk);
 582
 583        rxrpc_purge_queue(&sk->sk_receive_queue);
 584
 585        WARN_ON(atomic_read(&sk->sk_wmem_alloc));
 586        WARN_ON(!sk_unhashed(sk));
 587        WARN_ON(sk->sk_socket);
 588
 589        if (!sock_flag(sk, SOCK_DEAD)) {
 590                printk("Attempt to release alive rxrpc socket: %p\n", sk);
 591                return;
 592        }
 593}
 594
 595/*
 596 * release an RxRPC socket
 597 */
 598static int rxrpc_release_sock(struct sock *sk)
 599{
 600        struct rxrpc_sock *rx = rxrpc_sk(sk);
 601
 602        _enter("%p{%d,%d}", sk, sk->sk_state, atomic_read(&sk->sk_refcnt));
 603
 604        /* declare the socket closed for business */
 605        sock_orphan(sk);
 606        sk->sk_shutdown = SHUTDOWN_MASK;
 607
 608        spin_lock_bh(&sk->sk_receive_queue.lock);
 609        sk->sk_state = RXRPC_CLOSE;
 610        spin_unlock_bh(&sk->sk_receive_queue.lock);
 611
 612        ASSERTCMP(rx->listen_link.next, !=, LIST_POISON1);
 613
 614        if (!list_empty(&rx->listen_link)) {
 615                write_lock_bh(&rx->local->services_lock);
 616                list_del(&rx->listen_link);
 617                write_unlock_bh(&rx->local->services_lock);
 618        }
 619
 620        /* try to flush out this socket */
 621        rxrpc_release_calls_on_socket(rx);
 622        flush_workqueue(rxrpc_workqueue);
 623        rxrpc_purge_queue(&sk->sk_receive_queue);
 624
 625        rxrpc_put_local(rx->local);
 626        rx->local = NULL;
 627        key_put(rx->key);
 628        rx->key = NULL;
 629        key_put(rx->securities);
 630        rx->securities = NULL;
 631        sock_put(sk);
 632
 633        _leave(" = 0");
 634        return 0;
 635}
 636
 637/*
 638 * release an RxRPC BSD socket on close() or equivalent
 639 */
 640static int rxrpc_release(struct socket *sock)
 641{
 642        struct sock *sk = sock->sk;
 643
 644        _enter("%p{%p}", sock, sk);
 645
 646        if (!sk)
 647                return 0;
 648
 649        sock->sk = NULL;
 650
 651        return rxrpc_release_sock(sk);
 652}
 653
 654/*
 655 * RxRPC network protocol
 656 */
 657static const struct proto_ops rxrpc_rpc_ops = {
 658        .family         = PF_RXRPC,
 659        .owner          = THIS_MODULE,
 660        .release        = rxrpc_release,
 661        .bind           = rxrpc_bind,
 662        .connect        = rxrpc_connect,
 663        .socketpair     = sock_no_socketpair,
 664        .accept         = sock_no_accept,
 665        .getname        = sock_no_getname,
 666        .poll           = rxrpc_poll,
 667        .ioctl          = sock_no_ioctl,
 668        .listen         = rxrpc_listen,
 669        .shutdown       = sock_no_shutdown,
 670        .setsockopt     = rxrpc_setsockopt,
 671        .getsockopt     = sock_no_getsockopt,
 672        .sendmsg        = rxrpc_sendmsg,
 673        .recvmsg        = rxrpc_recvmsg,
 674        .mmap           = sock_no_mmap,
 675        .sendpage       = sock_no_sendpage,
 676};
 677
 678static struct proto rxrpc_proto = {
 679        .name           = "RXRPC",
 680        .owner          = THIS_MODULE,
 681        .obj_size       = sizeof(struct rxrpc_sock),
 682        .max_header     = sizeof(struct rxrpc_wire_header),
 683};
 684
 685static const struct net_proto_family rxrpc_family_ops = {
 686        .family = PF_RXRPC,
 687        .create = rxrpc_create,
 688        .owner  = THIS_MODULE,
 689};
 690
 691/*
 692 * initialise and register the RxRPC protocol
 693 */
 694static int __init af_rxrpc_init(void)
 695{
 696        int ret = -1;
 697
 698        BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > FIELD_SIZEOF(struct sk_buff, cb));
 699
 700        rxrpc_epoch = get_seconds();
 701
 702        ret = -ENOMEM;
 703        rxrpc_call_jar = kmem_cache_create(
 704                "rxrpc_call_jar", sizeof(struct rxrpc_call), 0,
 705                SLAB_HWCACHE_ALIGN, NULL);
 706        if (!rxrpc_call_jar) {
 707                pr_notice("Failed to allocate call jar\n");
 708                goto error_call_jar;
 709        }
 710
 711        rxrpc_workqueue = alloc_workqueue("krxrpcd", 0, 1);
 712        if (!rxrpc_workqueue) {
 713                pr_notice("Failed to allocate work queue\n");
 714                goto error_work_queue;
 715        }
 716
 717        ret = rxrpc_init_security();
 718        if (ret < 0) {
 719                pr_crit("Cannot initialise security\n");
 720                goto error_security;
 721        }
 722
 723        ret = proto_register(&rxrpc_proto, 1);
 724        if (ret < 0) {
 725                pr_crit("Cannot register protocol\n");
 726                goto error_proto;
 727        }
 728
 729        ret = sock_register(&rxrpc_family_ops);
 730        if (ret < 0) {
 731                pr_crit("Cannot register socket family\n");
 732                goto error_sock;
 733        }
 734
 735        ret = register_key_type(&key_type_rxrpc);
 736        if (ret < 0) {
 737                pr_crit("Cannot register client key type\n");
 738                goto error_key_type;
 739        }
 740
 741        ret = register_key_type(&key_type_rxrpc_s);
 742        if (ret < 0) {
 743                pr_crit("Cannot register server key type\n");
 744                goto error_key_type_s;
 745        }
 746
 747        ret = rxrpc_sysctl_init();
 748        if (ret < 0) {
 749                pr_crit("Cannot register sysctls\n");
 750                goto error_sysctls;
 751        }
 752
 753#ifdef CONFIG_PROC_FS
 754        proc_create("rxrpc_calls", 0, init_net.proc_net, &rxrpc_call_seq_fops);
 755        proc_create("rxrpc_conns", 0, init_net.proc_net,
 756                    &rxrpc_connection_seq_fops);
 757#endif
 758        return 0;
 759
 760error_sysctls:
 761        unregister_key_type(&key_type_rxrpc_s);
 762error_key_type_s:
 763        unregister_key_type(&key_type_rxrpc);
 764error_key_type:
 765        sock_unregister(PF_RXRPC);
 766error_sock:
 767        proto_unregister(&rxrpc_proto);
 768error_proto:
 769        rxrpc_exit_security();
 770error_security:
 771        destroy_workqueue(rxrpc_workqueue);
 772error_work_queue:
 773        kmem_cache_destroy(rxrpc_call_jar);
 774error_call_jar:
 775        return ret;
 776}
 777
 778/*
 779 * unregister the RxRPC protocol
 780 */
 781static void __exit af_rxrpc_exit(void)
 782{
 783        _enter("");
 784        rxrpc_sysctl_exit();
 785        unregister_key_type(&key_type_rxrpc_s);
 786        unregister_key_type(&key_type_rxrpc);
 787        sock_unregister(PF_RXRPC);
 788        proto_unregister(&rxrpc_proto);
 789        rxrpc_destroy_all_calls();
 790        rxrpc_destroy_all_connections();
 791        ASSERTCMP(atomic_read(&rxrpc_n_skbs), ==, 0);
 792        rxrpc_destroy_all_locals();
 793
 794        remove_proc_entry("rxrpc_conns", init_net.proc_net);
 795        remove_proc_entry("rxrpc_calls", init_net.proc_net);
 796        destroy_workqueue(rxrpc_workqueue);
 797        rxrpc_exit_security();
 798        kmem_cache_destroy(rxrpc_call_jar);
 799        _leave("");
 800}
 801
 802module_init(af_rxrpc_init);
 803module_exit(af_rxrpc_exit);
 804