linux/net/rds/af_rds.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2006 Oracle.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 *
  32 */
  33#include <linux/module.h>
  34#include <linux/errno.h>
  35#include <linux/kernel.h>
  36#include <linux/gfp.h>
  37#include <linux/in.h>
  38#include <linux/poll.h>
  39#include <net/sock.h>
  40
  41#include "rds.h"
  42
  43/* this is just used for stats gathering :/ */
  44static DEFINE_SPINLOCK(rds_sock_lock);
  45static unsigned long rds_sock_count;
  46static LIST_HEAD(rds_sock_list);
  47DECLARE_WAIT_QUEUE_HEAD(rds_poll_waitq);
  48
  49/*
  50 * This is called as the final descriptor referencing this socket is closed.
  51 * We have to unbind the socket so that another socket can be bound to the
  52 * address it was using.
  53 *
  54 * We have to be careful about racing with the incoming path.  sock_orphan()
  55 * sets SOCK_DEAD and we use that as an indicator to the rx path that new
  56 * messages shouldn't be queued.
  57 */
  58static int rds_release(struct socket *sock)
  59{
  60        struct sock *sk = sock->sk;
  61        struct rds_sock *rs;
  62
  63        if (!sk)
  64                goto out;
  65
  66        rs = rds_sk_to_rs(sk);
  67
  68        sock_orphan(sk);
  69        /* Note - rds_clear_recv_queue grabs rs_recv_lock, so
  70         * that ensures the recv path has completed messing
  71         * with the socket. */
  72        rds_clear_recv_queue(rs);
  73        rds_cong_remove_socket(rs);
  74
  75        rds_remove_bound(rs);
  76
  77        rds_send_drop_to(rs, NULL);
  78        rds_rdma_drop_keys(rs);
  79        rds_notify_queue_get(rs, NULL);
  80
  81        spin_lock_bh(&rds_sock_lock);
  82        list_del_init(&rs->rs_item);
  83        rds_sock_count--;
  84        spin_unlock_bh(&rds_sock_lock);
  85
  86        rds_trans_put(rs->rs_transport);
  87
  88        sock->sk = NULL;
  89        sock_put(sk);
  90out:
  91        return 0;
  92}
  93
  94/*
  95 * Careful not to race with rds_release -> sock_orphan which clears sk_sleep.
  96 * _bh() isn't OK here, we're called from interrupt handlers.  It's probably OK
  97 * to wake the waitqueue after sk_sleep is clear as we hold a sock ref, but
  98 * this seems more conservative.
  99 * NB - normally, one would use sk_callback_lock for this, but we can
 100 * get here from interrupts, whereas the network code grabs sk_callback_lock
 101 * with _lock_bh only - so relying on sk_callback_lock introduces livelocks.
 102 */
 103void rds_wake_sk_sleep(struct rds_sock *rs)
 104{
 105        unsigned long flags;
 106
 107        read_lock_irqsave(&rs->rs_recv_lock, flags);
 108        __rds_wake_sk_sleep(rds_rs_to_sk(rs));
 109        read_unlock_irqrestore(&rs->rs_recv_lock, flags);
 110}
 111
 112static int rds_getname(struct socket *sock, struct sockaddr *uaddr,
 113                       int *uaddr_len, int peer)
 114{
 115        struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
 116        struct rds_sock *rs = rds_sk_to_rs(sock->sk);
 117
 118        memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
 119
 120        /* racey, don't care */
 121        if (peer) {
 122                if (!rs->rs_conn_addr)
 123                        return -ENOTCONN;
 124
 125                sin->sin_port = rs->rs_conn_port;
 126                sin->sin_addr.s_addr = rs->rs_conn_addr;
 127        } else {
 128                sin->sin_port = rs->rs_bound_port;
 129                sin->sin_addr.s_addr = rs->rs_bound_addr;
 130        }
 131
 132        sin->sin_family = AF_INET;
 133
 134        *uaddr_len = sizeof(*sin);
 135        return 0;
 136}
 137
 138/*
 139 * RDS' poll is without a doubt the least intuitive part of the interface,
 140 * as POLLIN and POLLOUT do not behave entirely as you would expect from
 141 * a network protocol.
 142 *
 143 * POLLIN is asserted if
 144 *  -   there is data on the receive queue.
 145 *  -   to signal that a previously congested destination may have become
 146 *      uncongested
 147 *  -   A notification has been queued to the socket (this can be a congestion
 148 *      update, or a RDMA completion).
 149 *
 150 * POLLOUT is asserted if there is room on the send queue. This does not mean
 151 * however, that the next sendmsg() call will succeed. If the application tries
 152 * to send to a congested destination, the system call may still fail (and
 153 * return ENOBUFS).
 154 */
 155static unsigned int rds_poll(struct file *file, struct socket *sock,
 156                             poll_table *wait)
 157{
 158        struct sock *sk = sock->sk;
 159        struct rds_sock *rs = rds_sk_to_rs(sk);
 160        unsigned int mask = 0;
 161        unsigned long flags;
 162
 163        poll_wait(file, sk_sleep(sk), wait);
 164
 165        if (rs->rs_seen_congestion)
 166                poll_wait(file, &rds_poll_waitq, wait);
 167
 168        read_lock_irqsave(&rs->rs_recv_lock, flags);
 169        if (!rs->rs_cong_monitor) {
 170                /* When a congestion map was updated, we signal POLLIN for
 171                 * "historical" reasons. Applications can also poll for
 172                 * WRBAND instead. */
 173                if (rds_cong_updated_since(&rs->rs_cong_track))
 174                        mask |= (POLLIN | POLLRDNORM | POLLWRBAND);
 175        } else {
 176                spin_lock(&rs->rs_lock);
 177                if (rs->rs_cong_notify)
 178                        mask |= (POLLIN | POLLRDNORM);
 179                spin_unlock(&rs->rs_lock);
 180        }
 181        if (!list_empty(&rs->rs_recv_queue) ||
 182            !list_empty(&rs->rs_notify_queue))
 183                mask |= (POLLIN | POLLRDNORM);
 184        if (rs->rs_snd_bytes < rds_sk_sndbuf(rs))
 185                mask |= (POLLOUT | POLLWRNORM);
 186        read_unlock_irqrestore(&rs->rs_recv_lock, flags);
 187
 188        /* clear state any time we wake a seen-congested socket */
 189        if (mask)
 190                rs->rs_seen_congestion = 0;
 191
 192        return mask;
 193}
 194
 195static int rds_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
 196{
 197        return -ENOIOCTLCMD;
 198}
 199
 200static int rds_cancel_sent_to(struct rds_sock *rs, char __user *optval,
 201                              int len)
 202{
 203        struct sockaddr_in sin;
 204        int ret = 0;
 205
 206        /* racing with another thread binding seems ok here */
 207        if (rs->rs_bound_addr == 0) {
 208                ret = -ENOTCONN; /* XXX not a great errno */
 209                goto out;
 210        }
 211
 212        if (len < sizeof(struct sockaddr_in)) {
 213                ret = -EINVAL;
 214                goto out;
 215        }
 216
 217        if (copy_from_user(&sin, optval, sizeof(sin))) {
 218                ret = -EFAULT;
 219                goto out;
 220        }
 221
 222        rds_send_drop_to(rs, &sin);
 223out:
 224        return ret;
 225}
 226
 227static int rds_set_bool_option(unsigned char *optvar, char __user *optval,
 228                               int optlen)
 229{
 230        int value;
 231
 232        if (optlen < sizeof(int))
 233                return -EINVAL;
 234        if (get_user(value, (int __user *) optval))
 235                return -EFAULT;
 236        *optvar = !!value;
 237        return 0;
 238}
 239
 240static int rds_cong_monitor(struct rds_sock *rs, char __user *optval,
 241                            int optlen)
 242{
 243        int ret;
 244
 245        ret = rds_set_bool_option(&rs->rs_cong_monitor, optval, optlen);
 246        if (ret == 0) {
 247                if (rs->rs_cong_monitor) {
 248                        rds_cong_add_socket(rs);
 249                } else {
 250                        rds_cong_remove_socket(rs);
 251                        rs->rs_cong_mask = 0;
 252                        rs->rs_cong_notify = 0;
 253                }
 254        }
 255        return ret;
 256}
 257
 258static int rds_set_transport(struct rds_sock *rs, char __user *optval,
 259                             int optlen)
 260{
 261        int t_type;
 262
 263        if (rs->rs_transport)
 264                return -EOPNOTSUPP; /* previously attached to transport */
 265
 266        if (optlen != sizeof(int))
 267                return -EINVAL;
 268
 269        if (copy_from_user(&t_type, (int __user *)optval, sizeof(t_type)))
 270                return -EFAULT;
 271
 272        if (t_type < 0 || t_type >= RDS_TRANS_COUNT)
 273                return -EINVAL;
 274
 275        rs->rs_transport = rds_trans_get(t_type);
 276
 277        return rs->rs_transport ? 0 : -ENOPROTOOPT;
 278}
 279
 280static int rds_setsockopt(struct socket *sock, int level, int optname,
 281                          char __user *optval, unsigned int optlen)
 282{
 283        struct rds_sock *rs = rds_sk_to_rs(sock->sk);
 284        int ret;
 285
 286        if (level != SOL_RDS) {
 287                ret = -ENOPROTOOPT;
 288                goto out;
 289        }
 290
 291        switch (optname) {
 292        case RDS_CANCEL_SENT_TO:
 293                ret = rds_cancel_sent_to(rs, optval, optlen);
 294                break;
 295        case RDS_GET_MR:
 296                ret = rds_get_mr(rs, optval, optlen);
 297                break;
 298        case RDS_GET_MR_FOR_DEST:
 299                ret = rds_get_mr_for_dest(rs, optval, optlen);
 300                break;
 301        case RDS_FREE_MR:
 302                ret = rds_free_mr(rs, optval, optlen);
 303                break;
 304        case RDS_RECVERR:
 305                ret = rds_set_bool_option(&rs->rs_recverr, optval, optlen);
 306                break;
 307        case RDS_CONG_MONITOR:
 308                ret = rds_cong_monitor(rs, optval, optlen);
 309                break;
 310        case SO_RDS_TRANSPORT:
 311                lock_sock(sock->sk);
 312                ret = rds_set_transport(rs, optval, optlen);
 313                release_sock(sock->sk);
 314                break;
 315        default:
 316                ret = -ENOPROTOOPT;
 317        }
 318out:
 319        return ret;
 320}
 321
 322static int rds_getsockopt(struct socket *sock, int level, int optname,
 323                          char __user *optval, int __user *optlen)
 324{
 325        struct rds_sock *rs = rds_sk_to_rs(sock->sk);
 326        int ret = -ENOPROTOOPT, len;
 327        int trans;
 328
 329        if (level != SOL_RDS)
 330                goto out;
 331
 332        if (get_user(len, optlen)) {
 333                ret = -EFAULT;
 334                goto out;
 335        }
 336
 337        switch (optname) {
 338        case RDS_INFO_FIRST ... RDS_INFO_LAST:
 339                ret = rds_info_getsockopt(sock, optname, optval,
 340                                          optlen);
 341                break;
 342
 343        case RDS_RECVERR:
 344                if (len < sizeof(int))
 345                        ret = -EINVAL;
 346                else
 347                if (put_user(rs->rs_recverr, (int __user *) optval) ||
 348                    put_user(sizeof(int), optlen))
 349                        ret = -EFAULT;
 350                else
 351                        ret = 0;
 352                break;
 353        case SO_RDS_TRANSPORT:
 354                if (len < sizeof(int)) {
 355                        ret = -EINVAL;
 356                        break;
 357                }
 358                trans = (rs->rs_transport ? rs->rs_transport->t_type :
 359                         RDS_TRANS_NONE); /* unbound */
 360                if (put_user(trans, (int __user *)optval) ||
 361                    put_user(sizeof(int), optlen))
 362                        ret = -EFAULT;
 363                else
 364                        ret = 0;
 365                break;
 366        default:
 367                break;
 368        }
 369
 370out:
 371        return ret;
 372
 373}
 374
 375static int rds_connect(struct socket *sock, struct sockaddr *uaddr,
 376                       int addr_len, int flags)
 377{
 378        struct sock *sk = sock->sk;
 379        struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
 380        struct rds_sock *rs = rds_sk_to_rs(sk);
 381        int ret = 0;
 382
 383        lock_sock(sk);
 384
 385        if (addr_len != sizeof(struct sockaddr_in)) {
 386                ret = -EINVAL;
 387                goto out;
 388        }
 389
 390        if (sin->sin_family != AF_INET) {
 391                ret = -EAFNOSUPPORT;
 392                goto out;
 393        }
 394
 395        if (sin->sin_addr.s_addr == htonl(INADDR_ANY)) {
 396                ret = -EDESTADDRREQ;
 397                goto out;
 398        }
 399
 400        rs->rs_conn_addr = sin->sin_addr.s_addr;
 401        rs->rs_conn_port = sin->sin_port;
 402
 403out:
 404        release_sock(sk);
 405        return ret;
 406}
 407
 408static struct proto rds_proto = {
 409        .name     = "RDS",
 410        .owner    = THIS_MODULE,
 411        .obj_size = sizeof(struct rds_sock),
 412};
 413
 414static const struct proto_ops rds_proto_ops = {
 415        .family =       AF_RDS,
 416        .owner =        THIS_MODULE,
 417        .release =      rds_release,
 418        .bind =         rds_bind,
 419        .connect =      rds_connect,
 420        .socketpair =   sock_no_socketpair,
 421        .accept =       sock_no_accept,
 422        .getname =      rds_getname,
 423        .poll =         rds_poll,
 424        .ioctl =        rds_ioctl,
 425        .listen =       sock_no_listen,
 426        .shutdown =     sock_no_shutdown,
 427        .setsockopt =   rds_setsockopt,
 428        .getsockopt =   rds_getsockopt,
 429        .sendmsg =      rds_sendmsg,
 430        .recvmsg =      rds_recvmsg,
 431        .mmap =         sock_no_mmap,
 432        .sendpage =     sock_no_sendpage,
 433};
 434
 435static void rds_sock_destruct(struct sock *sk)
 436{
 437        struct rds_sock *rs = rds_sk_to_rs(sk);
 438
 439        WARN_ON((&rs->rs_item != rs->rs_item.next ||
 440                 &rs->rs_item != rs->rs_item.prev));
 441}
 442
 443static int __rds_create(struct socket *sock, struct sock *sk, int protocol)
 444{
 445        struct rds_sock *rs;
 446
 447        sock_init_data(sock, sk);
 448        sock->ops               = &rds_proto_ops;
 449        sk->sk_protocol         = protocol;
 450        sk->sk_destruct         = rds_sock_destruct;
 451
 452        rs = rds_sk_to_rs(sk);
 453        spin_lock_init(&rs->rs_lock);
 454        rwlock_init(&rs->rs_recv_lock);
 455        INIT_LIST_HEAD(&rs->rs_send_queue);
 456        INIT_LIST_HEAD(&rs->rs_recv_queue);
 457        INIT_LIST_HEAD(&rs->rs_notify_queue);
 458        INIT_LIST_HEAD(&rs->rs_cong_list);
 459        spin_lock_init(&rs->rs_rdma_lock);
 460        rs->rs_rdma_keys = RB_ROOT;
 461
 462        spin_lock_bh(&rds_sock_lock);
 463        list_add_tail(&rs->rs_item, &rds_sock_list);
 464        rds_sock_count++;
 465        spin_unlock_bh(&rds_sock_lock);
 466
 467        return 0;
 468}
 469
 470static int rds_create(struct net *net, struct socket *sock, int protocol,
 471                      int kern)
 472{
 473        struct sock *sk;
 474
 475        if (sock->type != SOCK_SEQPACKET || protocol)
 476                return -ESOCKTNOSUPPORT;
 477
 478        sk = sk_alloc(net, AF_RDS, GFP_ATOMIC, &rds_proto, kern);
 479        if (!sk)
 480                return -ENOMEM;
 481
 482        return __rds_create(sock, sk, protocol);
 483}
 484
 485void rds_sock_addref(struct rds_sock *rs)
 486{
 487        sock_hold(rds_rs_to_sk(rs));
 488}
 489
 490void rds_sock_put(struct rds_sock *rs)
 491{
 492        sock_put(rds_rs_to_sk(rs));
 493}
 494
 495static const struct net_proto_family rds_family_ops = {
 496        .family =       AF_RDS,
 497        .create =       rds_create,
 498        .owner  =       THIS_MODULE,
 499};
 500
 501static void rds_sock_inc_info(struct socket *sock, unsigned int len,
 502                              struct rds_info_iterator *iter,
 503                              struct rds_info_lengths *lens)
 504{
 505        struct rds_sock *rs;
 506        struct rds_incoming *inc;
 507        unsigned int total = 0;
 508
 509        len /= sizeof(struct rds_info_message);
 510
 511        spin_lock_bh(&rds_sock_lock);
 512
 513        list_for_each_entry(rs, &rds_sock_list, rs_item) {
 514                read_lock(&rs->rs_recv_lock);
 515
 516                /* XXX too lazy to maintain counts.. */
 517                list_for_each_entry(inc, &rs->rs_recv_queue, i_item) {
 518                        total++;
 519                        if (total <= len)
 520                                rds_inc_info_copy(inc, iter, inc->i_saddr,
 521                                                  rs->rs_bound_addr, 1);
 522                }
 523
 524                read_unlock(&rs->rs_recv_lock);
 525        }
 526
 527        spin_unlock_bh(&rds_sock_lock);
 528
 529        lens->nr = total;
 530        lens->each = sizeof(struct rds_info_message);
 531}
 532
 533static void rds_sock_info(struct socket *sock, unsigned int len,
 534                          struct rds_info_iterator *iter,
 535                          struct rds_info_lengths *lens)
 536{
 537        struct rds_info_socket sinfo;
 538        struct rds_sock *rs;
 539
 540        len /= sizeof(struct rds_info_socket);
 541
 542        spin_lock_bh(&rds_sock_lock);
 543
 544        if (len < rds_sock_count)
 545                goto out;
 546
 547        list_for_each_entry(rs, &rds_sock_list, rs_item) {
 548                sinfo.sndbuf = rds_sk_sndbuf(rs);
 549                sinfo.rcvbuf = rds_sk_rcvbuf(rs);
 550                sinfo.bound_addr = rs->rs_bound_addr;
 551                sinfo.connected_addr = rs->rs_conn_addr;
 552                sinfo.bound_port = rs->rs_bound_port;
 553                sinfo.connected_port = rs->rs_conn_port;
 554                sinfo.inum = sock_i_ino(rds_rs_to_sk(rs));
 555
 556                rds_info_copy(iter, &sinfo, sizeof(sinfo));
 557        }
 558
 559out:
 560        lens->nr = rds_sock_count;
 561        lens->each = sizeof(struct rds_info_socket);
 562
 563        spin_unlock_bh(&rds_sock_lock);
 564}
 565
 566static void rds_exit(void)
 567{
 568        sock_unregister(rds_family_ops.family);
 569        proto_unregister(&rds_proto);
 570        rds_conn_exit();
 571        rds_cong_exit();
 572        rds_sysctl_exit();
 573        rds_threads_exit();
 574        rds_stats_exit();
 575        rds_page_exit();
 576        rds_bind_lock_destroy();
 577        rds_info_deregister_func(RDS_INFO_SOCKETS, rds_sock_info);
 578        rds_info_deregister_func(RDS_INFO_RECV_MESSAGES, rds_sock_inc_info);
 579}
 580module_exit(rds_exit);
 581
 582static int rds_init(void)
 583{
 584        int ret;
 585
 586        ret = rds_bind_lock_init();
 587        if (ret)
 588                goto out;
 589
 590        ret = rds_conn_init();
 591        if (ret)
 592                goto out_bind;
 593
 594        ret = rds_threads_init();
 595        if (ret)
 596                goto out_conn;
 597        ret = rds_sysctl_init();
 598        if (ret)
 599                goto out_threads;
 600        ret = rds_stats_init();
 601        if (ret)
 602                goto out_sysctl;
 603        ret = proto_register(&rds_proto, 1);
 604        if (ret)
 605                goto out_stats;
 606        ret = sock_register(&rds_family_ops);
 607        if (ret)
 608                goto out_proto;
 609
 610        rds_info_register_func(RDS_INFO_SOCKETS, rds_sock_info);
 611        rds_info_register_func(RDS_INFO_RECV_MESSAGES, rds_sock_inc_info);
 612
 613        goto out;
 614
 615out_proto:
 616        proto_unregister(&rds_proto);
 617out_stats:
 618        rds_stats_exit();
 619out_sysctl:
 620        rds_sysctl_exit();
 621out_threads:
 622        rds_threads_exit();
 623out_conn:
 624        rds_conn_exit();
 625        rds_cong_exit();
 626        rds_page_exit();
 627out_bind:
 628        rds_bind_lock_destroy();
 629out:
 630        return ret;
 631}
 632module_init(rds_init);
 633
 634#define DRV_VERSION     "4.0"
 635#define DRV_RELDATE     "Feb 12, 2009"
 636
 637MODULE_AUTHOR("Oracle Corporation <rds-devel@oss.oracle.com>");
 638MODULE_DESCRIPTION("RDS: Reliable Datagram Sockets"
 639                   " v" DRV_VERSION " (" DRV_RELDATE ")");
 640MODULE_VERSION(DRV_VERSION);
 641MODULE_LICENSE("Dual BSD/GPL");
 642MODULE_ALIAS_NETPROTO(PF_RDS);
 643