linux/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
<<
>>
Prefs
   1/*
   2 * GPL HEADER START
   3 *
   4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 only,
   8 * as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  13 * General Public License version 2 for more details (a copy is included
  14 * in the LICENSE file that accompanied this code).
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * version 2 along with this program; If not, see
  18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
  19 *
  20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  21 * CA 95054 USA or visit www.sun.com if you need additional information or
  22 * have any questions.
  23 *
  24 * GPL HEADER END
  25 */
  26/*
  27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
  28 * Use is subject to license terms.
  29 *
  30 * Copyright (c) 2011, 2012, Intel Corporation.
  31 */
  32/*
  33 * This file is part of Lustre, http://www.lustre.org/
  34 * Lustre is a trademark of Sun Microsystems, Inc.
  35 *
  36 * lnet/klnds/socklnd/socklnd.c
  37 *
  38 * Author: Zach Brown <zab@zabbo.net>
  39 * Author: Peter J. Braam <braam@clusterfs.com>
  40 * Author: Phil Schwan <phil@clusterfs.com>
  41 * Author: Eric Barton <eric@bartonsoftware.com>
  42 */
  43
  44#include "socklnd.h"
  45
  46static lnd_t the_ksocklnd;
  47ksock_nal_data_t ksocknal_data;
  48
  49static ksock_interface_t *
  50ksocknal_ip2iface(lnet_ni_t *ni, __u32 ip)
  51{
  52        ksock_net_t *net = ni->ni_data;
  53        int i;
  54        ksock_interface_t *iface;
  55
  56        for (i = 0; i < net->ksnn_ninterfaces; i++) {
  57                LASSERT(i < LNET_MAX_INTERFACES);
  58                iface = &net->ksnn_interfaces[i];
  59
  60                if (iface->ksni_ipaddr == ip)
  61                        return iface;
  62        }
  63
  64        return NULL;
  65}
  66
  67static ksock_route_t *
  68ksocknal_create_route(__u32 ipaddr, int port)
  69{
  70        ksock_route_t *route;
  71
  72        LIBCFS_ALLOC(route, sizeof(*route));
  73        if (route == NULL)
  74                return NULL;
  75
  76        atomic_set(&route->ksnr_refcount, 1);
  77        route->ksnr_peer = NULL;
  78        route->ksnr_retry_interval = 0;  /* OK to connect at any time */
  79        route->ksnr_ipaddr = ipaddr;
  80        route->ksnr_port = port;
  81        route->ksnr_scheduled = 0;
  82        route->ksnr_connecting = 0;
  83        route->ksnr_connected = 0;
  84        route->ksnr_deleted = 0;
  85        route->ksnr_conn_count = 0;
  86        route->ksnr_share_count = 0;
  87
  88        return route;
  89}
  90
  91void
  92ksocknal_destroy_route(ksock_route_t *route)
  93{
  94        LASSERT(atomic_read(&route->ksnr_refcount) == 0);
  95
  96        if (route->ksnr_peer != NULL)
  97                ksocknal_peer_decref(route->ksnr_peer);
  98
  99        LIBCFS_FREE(route, sizeof(*route));
 100}
 101
 102static int
 103ksocknal_create_peer(ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id)
 104{
 105        ksock_net_t *net = ni->ni_data;
 106        ksock_peer_t *peer;
 107
 108        LASSERT(id.nid != LNET_NID_ANY);
 109        LASSERT(id.pid != LNET_PID_ANY);
 110        LASSERT(!in_interrupt());
 111
 112        LIBCFS_ALLOC(peer, sizeof(*peer));
 113        if (peer == NULL)
 114                return -ENOMEM;
 115
 116        peer->ksnp_ni = ni;
 117        peer->ksnp_id = id;
 118        atomic_set(&peer->ksnp_refcount, 1);   /* 1 ref for caller */
 119        peer->ksnp_closing = 0;
 120        peer->ksnp_accepting = 0;
 121        peer->ksnp_proto = NULL;
 122        peer->ksnp_last_alive = 0;
 123        peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
 124
 125        INIT_LIST_HEAD(&peer->ksnp_conns);
 126        INIT_LIST_HEAD(&peer->ksnp_routes);
 127        INIT_LIST_HEAD(&peer->ksnp_tx_queue);
 128        INIT_LIST_HEAD(&peer->ksnp_zc_req_list);
 129        spin_lock_init(&peer->ksnp_lock);
 130
 131        spin_lock_bh(&net->ksnn_lock);
 132
 133        if (net->ksnn_shutdown) {
 134                spin_unlock_bh(&net->ksnn_lock);
 135
 136                LIBCFS_FREE(peer, sizeof(*peer));
 137                CERROR("Can't create peer: network shutdown\n");
 138                return -ESHUTDOWN;
 139        }
 140
 141        net->ksnn_npeers++;
 142
 143        spin_unlock_bh(&net->ksnn_lock);
 144
 145        *peerp = peer;
 146        return 0;
 147}
 148
 149void
 150ksocknal_destroy_peer(ksock_peer_t *peer)
 151{
 152        ksock_net_t *net = peer->ksnp_ni->ni_data;
 153
 154        CDEBUG(D_NET, "peer %s %p deleted\n",
 155                libcfs_id2str(peer->ksnp_id), peer);
 156
 157        LASSERT(atomic_read(&peer->ksnp_refcount) == 0);
 158        LASSERT(peer->ksnp_accepting == 0);
 159        LASSERT(list_empty(&peer->ksnp_conns));
 160        LASSERT(list_empty(&peer->ksnp_routes));
 161        LASSERT(list_empty(&peer->ksnp_tx_queue));
 162        LASSERT(list_empty(&peer->ksnp_zc_req_list));
 163
 164        LIBCFS_FREE(peer, sizeof(*peer));
 165
 166        /* NB a peer's connections and routes keep a reference on their peer
 167         * until they are destroyed, so we can be assured that _all_ state to
 168         * do with this peer has been cleaned up when its refcount drops to
 169         * zero. */
 170        spin_lock_bh(&net->ksnn_lock);
 171        net->ksnn_npeers--;
 172        spin_unlock_bh(&net->ksnn_lock);
 173}
 174
 175ksock_peer_t *
 176ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id)
 177{
 178        struct list_head *peer_list = ksocknal_nid2peerlist(id.nid);
 179        struct list_head *tmp;
 180        ksock_peer_t *peer;
 181
 182        list_for_each(tmp, peer_list) {
 183
 184                peer = list_entry(tmp, ksock_peer_t, ksnp_list);
 185
 186                LASSERT(!peer->ksnp_closing);
 187
 188                if (peer->ksnp_ni != ni)
 189                        continue;
 190
 191                if (peer->ksnp_id.nid != id.nid ||
 192                    peer->ksnp_id.pid != id.pid)
 193                        continue;
 194
 195                CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n",
 196                       peer, libcfs_id2str(id),
 197                       atomic_read(&peer->ksnp_refcount));
 198                return peer;
 199        }
 200        return NULL;
 201}
 202
 203ksock_peer_t *
 204ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id)
 205{
 206        ksock_peer_t *peer;
 207
 208        read_lock(&ksocknal_data.ksnd_global_lock);
 209        peer = ksocknal_find_peer_locked(ni, id);
 210        if (peer != NULL)                       /* +1 ref for caller? */
 211                ksocknal_peer_addref(peer);
 212        read_unlock(&ksocknal_data.ksnd_global_lock);
 213
 214        return peer;
 215}
 216
 217static void
 218ksocknal_unlink_peer_locked(ksock_peer_t *peer)
 219{
 220        int i;
 221        __u32 ip;
 222        ksock_interface_t *iface;
 223
 224        for (i = 0; i < peer->ksnp_n_passive_ips; i++) {
 225                LASSERT(i < LNET_MAX_INTERFACES);
 226                ip = peer->ksnp_passive_ips[i];
 227
 228                iface = ksocknal_ip2iface(peer->ksnp_ni, ip);
 229                /* All IPs in peer->ksnp_passive_ips[] come from the
 230                 * interface list, therefore the call must succeed. */
 231                LASSERT(iface != NULL);
 232
 233                CDEBUG(D_NET, "peer=%p iface=%p ksni_nroutes=%d\n",
 234                       peer, iface, iface->ksni_nroutes);
 235                iface->ksni_npeers--;
 236        }
 237
 238        LASSERT(list_empty(&peer->ksnp_conns));
 239        LASSERT(list_empty(&peer->ksnp_routes));
 240        LASSERT(!peer->ksnp_closing);
 241        peer->ksnp_closing = 1;
 242        list_del(&peer->ksnp_list);
 243        /* lose peerlist's ref */
 244        ksocknal_peer_decref(peer);
 245}
 246
 247static int
 248ksocknal_get_peer_info(lnet_ni_t *ni, int index,
 249                        lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip,
 250                        int *port, int *conn_count, int *share_count)
 251{
 252        ksock_peer_t *peer;
 253        struct list_head *ptmp;
 254        ksock_route_t *route;
 255        struct list_head *rtmp;
 256        int i;
 257        int j;
 258        int rc = -ENOENT;
 259
 260        read_lock(&ksocknal_data.ksnd_global_lock);
 261
 262        for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
 263
 264                list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
 265                        peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
 266
 267                        if (peer->ksnp_ni != ni)
 268                                continue;
 269
 270                        if (peer->ksnp_n_passive_ips == 0 &&
 271                            list_empty(&peer->ksnp_routes)) {
 272                                if (index-- > 0)
 273                                        continue;
 274
 275                                *id = peer->ksnp_id;
 276                                *myip = 0;
 277                                *peer_ip = 0;
 278                                *port = 0;
 279                                *conn_count = 0;
 280                                *share_count = 0;
 281                                rc = 0;
 282                                goto out;
 283                        }
 284
 285                        for (j = 0; j < peer->ksnp_n_passive_ips; j++) {
 286                                if (index-- > 0)
 287                                        continue;
 288
 289                                *id = peer->ksnp_id;
 290                                *myip = peer->ksnp_passive_ips[j];
 291                                *peer_ip = 0;
 292                                *port = 0;
 293                                *conn_count = 0;
 294                                *share_count = 0;
 295                                rc = 0;
 296                                goto out;
 297                        }
 298
 299                        list_for_each(rtmp, &peer->ksnp_routes) {
 300                                if (index-- > 0)
 301                                        continue;
 302
 303                                route = list_entry(rtmp, ksock_route_t,
 304                                                       ksnr_list);
 305
 306                                *id = peer->ksnp_id;
 307                                *myip = route->ksnr_myipaddr;
 308                                *peer_ip = route->ksnr_ipaddr;
 309                                *port = route->ksnr_port;
 310                                *conn_count = route->ksnr_conn_count;
 311                                *share_count = route->ksnr_share_count;
 312                                rc = 0;
 313                                goto out;
 314                        }
 315                }
 316        }
 317 out:
 318        read_unlock(&ksocknal_data.ksnd_global_lock);
 319        return rc;
 320}
 321
 322static void
 323ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
 324{
 325        ksock_peer_t *peer = route->ksnr_peer;
 326        int type = conn->ksnc_type;
 327        ksock_interface_t *iface;
 328
 329        conn->ksnc_route = route;
 330        ksocknal_route_addref(route);
 331
 332        if (route->ksnr_myipaddr != conn->ksnc_myipaddr) {
 333                if (route->ksnr_myipaddr == 0) {
 334                        /* route wasn't bound locally yet (the initial route) */
 335                        CDEBUG(D_NET, "Binding %s %pI4h to %pI4h\n",
 336                               libcfs_id2str(peer->ksnp_id),
 337                               &route->ksnr_ipaddr,
 338                               &conn->ksnc_myipaddr);
 339                } else {
 340                        CDEBUG(D_NET, "Rebinding %s %pI4h from %pI4h to %pI4h\n",
 341                               libcfs_id2str(peer->ksnp_id),
 342                               &route->ksnr_ipaddr,
 343                               &route->ksnr_myipaddr,
 344                               &conn->ksnc_myipaddr);
 345
 346                        iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
 347                                                  route->ksnr_myipaddr);
 348                        if (iface != NULL)
 349                                iface->ksni_nroutes--;
 350                }
 351                route->ksnr_myipaddr = conn->ksnc_myipaddr;
 352                iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
 353                                          route->ksnr_myipaddr);
 354                if (iface != NULL)
 355                        iface->ksni_nroutes++;
 356        }
 357
 358        route->ksnr_connected |= (1<<type);
 359        route->ksnr_conn_count++;
 360
 361        /* Successful connection => further attempts can
 362         * proceed immediately */
 363        route->ksnr_retry_interval = 0;
 364}
 365
 366static void
 367ksocknal_add_route_locked(ksock_peer_t *peer, ksock_route_t *route)
 368{
 369        struct list_head *tmp;
 370        ksock_conn_t *conn;
 371        ksock_route_t *route2;
 372
 373        LASSERT(!peer->ksnp_closing);
 374        LASSERT(route->ksnr_peer == NULL);
 375        LASSERT(!route->ksnr_scheduled);
 376        LASSERT(!route->ksnr_connecting);
 377        LASSERT(route->ksnr_connected == 0);
 378
 379        /* LASSERT(unique) */
 380        list_for_each(tmp, &peer->ksnp_routes) {
 381                route2 = list_entry(tmp, ksock_route_t, ksnr_list);
 382
 383                if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
 384                        CERROR("Duplicate route %s %pI4h\n",
 385                                libcfs_id2str(peer->ksnp_id),
 386                                &route->ksnr_ipaddr);
 387                        LBUG();
 388                }
 389        }
 390
 391        route->ksnr_peer = peer;
 392        ksocknal_peer_addref(peer);
 393        /* peer's routelist takes over my ref on 'route' */
 394        list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
 395
 396        list_for_each(tmp, &peer->ksnp_conns) {
 397                conn = list_entry(tmp, ksock_conn_t, ksnc_list);
 398
 399                if (conn->ksnc_ipaddr != route->ksnr_ipaddr)
 400                        continue;
 401
 402                ksocknal_associate_route_conn_locked(route, conn);
 403                /* keep going (typed routes) */
 404        }
 405}
 406
 407static void
 408ksocknal_del_route_locked(ksock_route_t *route)
 409{
 410        ksock_peer_t *peer = route->ksnr_peer;
 411        ksock_interface_t *iface;
 412        ksock_conn_t *conn;
 413        struct list_head *ctmp;
 414        struct list_head *cnxt;
 415
 416        LASSERT(!route->ksnr_deleted);
 417
 418        /* Close associated conns */
 419        list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) {
 420                conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
 421
 422                if (conn->ksnc_route != route)
 423                        continue;
 424
 425                ksocknal_close_conn_locked(conn, 0);
 426        }
 427
 428        if (route->ksnr_myipaddr != 0) {
 429                iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
 430                                          route->ksnr_myipaddr);
 431                if (iface != NULL)
 432                        iface->ksni_nroutes--;
 433        }
 434
 435        route->ksnr_deleted = 1;
 436        list_del(&route->ksnr_list);
 437        ksocknal_route_decref(route);        /* drop peer's ref */
 438
 439        if (list_empty(&peer->ksnp_routes) &&
 440            list_empty(&peer->ksnp_conns)) {
 441                /* I've just removed the last route to a peer with no active
 442                 * connections */
 443                ksocknal_unlink_peer_locked(peer);
 444        }
 445}
 446
 447int
 448ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
 449{
 450        struct list_head *tmp;
 451        ksock_peer_t *peer;
 452        ksock_peer_t *peer2;
 453        ksock_route_t *route;
 454        ksock_route_t *route2;
 455        int rc;
 456
 457        if (id.nid == LNET_NID_ANY ||
 458            id.pid == LNET_PID_ANY)
 459                return -EINVAL;
 460
 461        /* Have a brand new peer ready... */
 462        rc = ksocknal_create_peer(&peer, ni, id);
 463        if (rc != 0)
 464                return rc;
 465
 466        route = ksocknal_create_route(ipaddr, port);
 467        if (route == NULL) {
 468                ksocknal_peer_decref(peer);
 469                return -ENOMEM;
 470        }
 471
 472        write_lock_bh(&ksocknal_data.ksnd_global_lock);
 473
 474        /* always called with a ref on ni, so shutdown can't have started */
 475        LASSERT(((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
 476
 477        peer2 = ksocknal_find_peer_locked(ni, id);
 478        if (peer2 != NULL) {
 479                ksocknal_peer_decref(peer);
 480                peer = peer2;
 481        } else {
 482                /* peer table takes my ref on peer */
 483                list_add_tail(&peer->ksnp_list,
 484                                   ksocknal_nid2peerlist(id.nid));
 485        }
 486
 487        route2 = NULL;
 488        list_for_each(tmp, &peer->ksnp_routes) {
 489                route2 = list_entry(tmp, ksock_route_t, ksnr_list);
 490
 491                if (route2->ksnr_ipaddr == ipaddr)
 492                        break;
 493
 494                route2 = NULL;
 495        }
 496        if (route2 == NULL) {
 497                ksocknal_add_route_locked(peer, route);
 498                route->ksnr_share_count++;
 499        } else {
 500                ksocknal_route_decref(route);
 501                route2->ksnr_share_count++;
 502        }
 503
 504        write_unlock_bh(&ksocknal_data.ksnd_global_lock);
 505
 506        return 0;
 507}
 508
 509static void
 510ksocknal_del_peer_locked(ksock_peer_t *peer, __u32 ip)
 511{
 512        ksock_conn_t *conn;
 513        ksock_route_t *route;
 514        struct list_head *tmp;
 515        struct list_head *nxt;
 516        int nshared;
 517
 518        LASSERT(!peer->ksnp_closing);
 519
 520        /* Extra ref prevents peer disappearing until I'm done with it */
 521        ksocknal_peer_addref(peer);
 522
 523        list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
 524                route = list_entry(tmp, ksock_route_t, ksnr_list);
 525
 526                /* no match */
 527                if (!(ip == 0 || route->ksnr_ipaddr == ip))
 528                        continue;
 529
 530                route->ksnr_share_count = 0;
 531                /* This deletes associated conns too */
 532                ksocknal_del_route_locked(route);
 533        }
 534
 535        nshared = 0;
 536        list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
 537                route = list_entry(tmp, ksock_route_t, ksnr_list);
 538                nshared += route->ksnr_share_count;
 539        }
 540
 541        if (nshared == 0) {
 542                /* remove everything else if there are no explicit entries
 543                 * left */
 544
 545                list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
 546                        route = list_entry(tmp, ksock_route_t, ksnr_list);
 547
 548                        /* we should only be removing auto-entries */
 549                        LASSERT(route->ksnr_share_count == 0);
 550                        ksocknal_del_route_locked(route);
 551                }
 552
 553                list_for_each_safe(tmp, nxt, &peer->ksnp_conns) {
 554                        conn = list_entry(tmp, ksock_conn_t, ksnc_list);
 555
 556                        ksocknal_close_conn_locked(conn, 0);
 557                }
 558        }
 559
 560        ksocknal_peer_decref(peer);
 561        /* NB peer unlinks itself when last conn/route is removed */
 562}
 563
 564static int
 565ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
 566{
 567        LIST_HEAD(zombies);
 568        struct list_head *ptmp;
 569        struct list_head *pnxt;
 570        ksock_peer_t *peer;
 571        int lo;
 572        int hi;
 573        int i;
 574        int rc = -ENOENT;
 575
 576        write_lock_bh(&ksocknal_data.ksnd_global_lock);
 577
 578        if (id.nid != LNET_NID_ANY)
 579                lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
 580        else {
 581                lo = 0;
 582                hi = ksocknal_data.ksnd_peer_hash_size - 1;
 583        }
 584
 585        for (i = lo; i <= hi; i++) {
 586                list_for_each_safe(ptmp, pnxt,
 587                                        &ksocknal_data.ksnd_peers[i]) {
 588                        peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
 589
 590                        if (peer->ksnp_ni != ni)
 591                                continue;
 592
 593                        if (!((id.nid == LNET_NID_ANY || peer->ksnp_id.nid == id.nid) &&
 594                              (id.pid == LNET_PID_ANY || peer->ksnp_id.pid == id.pid)))
 595                                continue;
 596
 597                        ksocknal_peer_addref(peer);     /* a ref for me... */
 598
 599                        ksocknal_del_peer_locked(peer, ip);
 600
 601                        if (peer->ksnp_closing &&
 602                            !list_empty(&peer->ksnp_tx_queue)) {
 603                                LASSERT(list_empty(&peer->ksnp_conns));
 604                                LASSERT(list_empty(&peer->ksnp_routes));
 605
 606                                list_splice_init(&peer->ksnp_tx_queue,
 607                                                     &zombies);
 608                        }
 609
 610                        ksocknal_peer_decref(peer);     /* ...till here */
 611
 612                        rc = 0;          /* matched! */
 613                }
 614        }
 615
 616        write_unlock_bh(&ksocknal_data.ksnd_global_lock);
 617
 618        ksocknal_txlist_done(ni, &zombies, 1);
 619
 620        return rc;
 621}
 622
 623static ksock_conn_t *
 624ksocknal_get_conn_by_idx(lnet_ni_t *ni, int index)
 625{
 626        ksock_peer_t *peer;
 627        struct list_head *ptmp;
 628        ksock_conn_t *conn;
 629        struct list_head *ctmp;
 630        int i;
 631
 632        read_lock(&ksocknal_data.ksnd_global_lock);
 633
 634        for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
 635                list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
 636                        peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
 637
 638                        LASSERT(!peer->ksnp_closing);
 639
 640                        if (peer->ksnp_ni != ni)
 641                                continue;
 642
 643                        list_for_each(ctmp, &peer->ksnp_conns) {
 644                                if (index-- > 0)
 645                                        continue;
 646
 647                                conn = list_entry(ctmp, ksock_conn_t,
 648                                                       ksnc_list);
 649                                ksocknal_conn_addref(conn);
 650                                read_unlock(&ksocknal_data.ksnd_global_lock);
 651                                return conn;
 652                        }
 653                }
 654        }
 655
 656        read_unlock(&ksocknal_data.ksnd_global_lock);
 657        return NULL;
 658}
 659
 660static ksock_sched_t *
 661ksocknal_choose_scheduler_locked(unsigned int cpt)
 662{
 663        struct ksock_sched_info *info = ksocknal_data.ksnd_sched_info[cpt];
 664        ksock_sched_t *sched;
 665        int i;
 666
 667        LASSERT(info->ksi_nthreads > 0);
 668
 669        sched = &info->ksi_scheds[0];
 670        /*
 671         * NB: it's safe so far, but info->ksi_nthreads could be changed
 672         * at runtime when we have dynamic LNet configuration, then we
 673         * need to take care of this.
 674         */
 675        for (i = 1; i < info->ksi_nthreads; i++) {
 676                if (sched->kss_nconns > info->ksi_scheds[i].kss_nconns)
 677                        sched = &info->ksi_scheds[i];
 678        }
 679
 680        return sched;
 681}
 682
 683static int
 684ksocknal_local_ipvec(lnet_ni_t *ni, __u32 *ipaddrs)
 685{
 686        ksock_net_t *net = ni->ni_data;
 687        int i;
 688        int nip;
 689
 690        read_lock(&ksocknal_data.ksnd_global_lock);
 691
 692        nip = net->ksnn_ninterfaces;
 693        LASSERT(nip <= LNET_MAX_INTERFACES);
 694
 695        /* Only offer interfaces for additional connections if I have
 696         * more than one. */
 697        if (nip < 2) {
 698                read_unlock(&ksocknal_data.ksnd_global_lock);
 699                return 0;
 700        }
 701
 702        for (i = 0; i < nip; i++) {
 703                ipaddrs[i] = net->ksnn_interfaces[i].ksni_ipaddr;
 704                LASSERT(ipaddrs[i] != 0);
 705        }
 706
 707        read_unlock(&ksocknal_data.ksnd_global_lock);
 708        return nip;
 709}
 710
 711static int
 712ksocknal_match_peerip(ksock_interface_t *iface, __u32 *ips, int nips)
 713{
 714        int best_netmatch = 0;
 715        int best_xor      = 0;
 716        int best          = -1;
 717        int this_xor;
 718        int this_netmatch;
 719        int i;
 720
 721        for (i = 0; i < nips; i++) {
 722                if (ips[i] == 0)
 723                        continue;
 724
 725                this_xor = ips[i] ^ iface->ksni_ipaddr;
 726                this_netmatch = ((this_xor & iface->ksni_netmask) == 0) ? 1 : 0;
 727
 728                if (!(best < 0 ||
 729                      best_netmatch < this_netmatch ||
 730                      (best_netmatch == this_netmatch &&
 731                       best_xor > this_xor)))
 732                        continue;
 733
 734                best = i;
 735                best_netmatch = this_netmatch;
 736                best_xor = this_xor;
 737        }
 738
 739        LASSERT(best >= 0);
 740        return best;
 741}
 742
 743static int
 744ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
 745{
 746        rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
 747        ksock_net_t *net = peer->ksnp_ni->ni_data;
 748        ksock_interface_t *iface;
 749        ksock_interface_t *best_iface;
 750        int n_ips;
 751        int i;
 752        int j;
 753        int k;
 754        __u32 ip;
 755        __u32 xor;
 756        int this_netmatch;
 757        int best_netmatch;
 758        int best_npeers;
 759
 760        /* CAVEAT EMPTOR: We do all our interface matching with an
 761         * exclusive hold of global lock at IRQ priority.  We're only
 762         * expecting to be dealing with small numbers of interfaces, so the
 763         * O(n**3)-ness shouldn't matter */
 764
 765        /* Also note that I'm not going to return more than n_peerips
 766         * interfaces, even if I have more myself */
 767
 768        write_lock_bh(global_lock);
 769
 770        LASSERT(n_peerips <= LNET_MAX_INTERFACES);
 771        LASSERT(net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
 772
 773        /* Only match interfaces for additional connections
 774         * if I have > 1 interface */
 775        n_ips = (net->ksnn_ninterfaces < 2) ? 0 :
 776                min(n_peerips, net->ksnn_ninterfaces);
 777
 778        for (i = 0; peer->ksnp_n_passive_ips < n_ips; i++) {
 779                /*            ^ yes really... */
 780
 781                /* If we have any new interfaces, first tick off all the
 782                 * peer IPs that match old interfaces, then choose new
 783                 * interfaces to match the remaining peer IPS.
 784                 * We don't forget interfaces we've stopped using; we might
 785                 * start using them again... */
 786
 787                if (i < peer->ksnp_n_passive_ips) {
 788                        /* Old interface. */
 789                        ip = peer->ksnp_passive_ips[i];
 790                        best_iface = ksocknal_ip2iface(peer->ksnp_ni, ip);
 791
 792                } else {
 793                        /* choose a new interface */
 794                        LASSERT(i == peer->ksnp_n_passive_ips);
 795
 796                        best_iface = NULL;
 797                        best_netmatch = 0;
 798                        best_npeers = 0;
 799
 800                        for (j = 0; j < net->ksnn_ninterfaces; j++) {
 801                                iface = &net->ksnn_interfaces[j];
 802                                ip = iface->ksni_ipaddr;
 803
 804                                for (k = 0; k < peer->ksnp_n_passive_ips; k++)
 805                                        if (peer->ksnp_passive_ips[k] == ip)
 806                                                break;
 807
 808                                if (k < peer->ksnp_n_passive_ips) /* using it already */
 809                                        continue;
 810
 811                                k = ksocknal_match_peerip(iface, peerips, n_peerips);
 812                                xor = ip ^ peerips[k];
 813                                this_netmatch = ((xor & iface->ksni_netmask) == 0) ? 1 : 0;
 814
 815                                if (!(best_iface == NULL ||
 816                                      best_netmatch < this_netmatch ||
 817                                      (best_netmatch == this_netmatch &&
 818                                       best_npeers > iface->ksni_npeers)))
 819                                        continue;
 820
 821                                best_iface = iface;
 822                                best_netmatch = this_netmatch;
 823                                best_npeers = iface->ksni_npeers;
 824                        }
 825
 826                        best_iface->ksni_npeers++;
 827                        ip = best_iface->ksni_ipaddr;
 828                        peer->ksnp_passive_ips[i] = ip;
 829                        peer->ksnp_n_passive_ips = i+1;
 830                }
 831
 832                /* mark the best matching peer IP used */
 833                j = ksocknal_match_peerip(best_iface, peerips, n_peerips);
 834                peerips[j] = 0;
 835        }
 836
 837        /* Overwrite input peer IP addresses */
 838        memcpy(peerips, peer->ksnp_passive_ips, n_ips * sizeof(*peerips));
 839
 840        write_unlock_bh(global_lock);
 841
 842        return n_ips;
 843}
 844
 845static void
 846ksocknal_create_routes(ksock_peer_t *peer, int port,
 847                       __u32 *peer_ipaddrs, int npeer_ipaddrs)
 848{
 849        ksock_route_t *newroute = NULL;
 850        rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
 851        lnet_ni_t *ni = peer->ksnp_ni;
 852        ksock_net_t *net = ni->ni_data;
 853        struct list_head *rtmp;
 854        ksock_route_t *route;
 855        ksock_interface_t *iface;
 856        ksock_interface_t *best_iface;
 857        int best_netmatch;
 858        int this_netmatch;
 859        int best_nroutes;
 860        int i;
 861        int j;
 862
 863        /* CAVEAT EMPTOR: We do all our interface matching with an
 864         * exclusive hold of global lock at IRQ priority.  We're only
 865         * expecting to be dealing with small numbers of interfaces, so the
 866         * O(n**3)-ness here shouldn't matter */
 867
 868        write_lock_bh(global_lock);
 869
 870        if (net->ksnn_ninterfaces < 2) {
 871                /* Only create additional connections
 872                 * if I have > 1 interface */
 873                write_unlock_bh(global_lock);
 874                return;
 875        }
 876
 877        LASSERT(npeer_ipaddrs <= LNET_MAX_INTERFACES);
 878
 879        for (i = 0; i < npeer_ipaddrs; i++) {
 880                if (newroute != NULL) {
 881                        newroute->ksnr_ipaddr = peer_ipaddrs[i];
 882                } else {
 883                        write_unlock_bh(global_lock);
 884
 885                        newroute = ksocknal_create_route(peer_ipaddrs[i], port);
 886                        if (newroute == NULL)
 887                                return;
 888
 889                        write_lock_bh(global_lock);
 890                }
 891
 892                if (peer->ksnp_closing) {
 893                        /* peer got closed under me */
 894                        break;
 895                }
 896
 897                /* Already got a route? */
 898                route = NULL;
 899                list_for_each(rtmp, &peer->ksnp_routes) {
 900                        route = list_entry(rtmp, ksock_route_t, ksnr_list);
 901
 902                        if (route->ksnr_ipaddr == newroute->ksnr_ipaddr)
 903                                break;
 904
 905                        route = NULL;
 906                }
 907                if (route != NULL)
 908                        continue;
 909
 910                best_iface = NULL;
 911                best_nroutes = 0;
 912                best_netmatch = 0;
 913
 914                LASSERT(net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
 915
 916                /* Select interface to connect from */
 917                for (j = 0; j < net->ksnn_ninterfaces; j++) {
 918                        iface = &net->ksnn_interfaces[j];
 919
 920                        /* Using this interface already? */
 921                        list_for_each(rtmp, &peer->ksnp_routes) {
 922                                route = list_entry(rtmp, ksock_route_t,
 923                                                       ksnr_list);
 924
 925                                if (route->ksnr_myipaddr == iface->ksni_ipaddr)
 926                                        break;
 927
 928                                route = NULL;
 929                        }
 930                        if (route != NULL)
 931                                continue;
 932
 933                        this_netmatch = (((iface->ksni_ipaddr ^
 934                                           newroute->ksnr_ipaddr) &
 935                                           iface->ksni_netmask) == 0) ? 1 : 0;
 936
 937                        if (!(best_iface == NULL ||
 938                              best_netmatch < this_netmatch ||
 939                              (best_netmatch == this_netmatch &&
 940                               best_nroutes > iface->ksni_nroutes)))
 941                                continue;
 942
 943                        best_iface = iface;
 944                        best_netmatch = this_netmatch;
 945                        best_nroutes = iface->ksni_nroutes;
 946                }
 947
 948                if (best_iface == NULL)
 949                        continue;
 950
 951                newroute->ksnr_myipaddr = best_iface->ksni_ipaddr;
 952                best_iface->ksni_nroutes++;
 953
 954                ksocknal_add_route_locked(peer, newroute);
 955                newroute = NULL;
 956        }
 957
 958        write_unlock_bh(global_lock);
 959        if (newroute != NULL)
 960                ksocknal_route_decref(newroute);
 961}
 962
 963int
 964ksocknal_accept(lnet_ni_t *ni, struct socket *sock)
 965{
 966        ksock_connreq_t *cr;
 967        int rc;
 968        __u32 peer_ip;
 969        int peer_port;
 970
 971        rc = lnet_sock_getaddr(sock, 1, &peer_ip, &peer_port);
 972        LASSERT(rc == 0);                     /* we succeeded before */
 973
 974        LIBCFS_ALLOC(cr, sizeof(*cr));
 975        if (cr == NULL) {
 976                LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from %pI4h: memory exhausted\n",
 977                                   &peer_ip);
 978                return -ENOMEM;
 979        }
 980
 981        lnet_ni_addref(ni);
 982        cr->ksncr_ni   = ni;
 983        cr->ksncr_sock = sock;
 984
 985        spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
 986
 987        list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
 988        wake_up(&ksocknal_data.ksnd_connd_waitq);
 989
 990        spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
 991        return 0;
 992}
 993
 994static int
 995ksocknal_connecting(ksock_peer_t *peer, __u32 ipaddr)
 996{
 997        ksock_route_t *route;
 998
 999        list_for_each_entry(route, &peer->ksnp_routes, ksnr_list) {
1000
1001                if (route->ksnr_ipaddr == ipaddr)
1002                        return route->ksnr_connecting;
1003        }
1004        return 0;
1005}
1006
1007int
1008ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
1009                      struct socket *sock, int type)
1010{
1011        rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
1012        LIST_HEAD(zombies);
1013        lnet_process_id_t peerid;
1014        struct list_head *tmp;
1015        __u64 incarnation;
1016        ksock_conn_t *conn;
1017        ksock_conn_t *conn2;
1018        ksock_peer_t *peer = NULL;
1019        ksock_peer_t *peer2;
1020        ksock_sched_t *sched;
1021        ksock_hello_msg_t *hello;
1022        int cpt;
1023        ksock_tx_t *tx;
1024        ksock_tx_t *txtmp;
1025        int rc;
1026        int active;
1027        char *warn = NULL;
1028
1029        active = (route != NULL);
1030
1031        LASSERT(active == (type != SOCKLND_CONN_NONE));
1032
1033        LIBCFS_ALLOC(conn, sizeof(*conn));
1034        if (conn == NULL) {
1035                rc = -ENOMEM;
1036                goto failed_0;
1037        }
1038
1039        conn->ksnc_peer = NULL;
1040        conn->ksnc_route = NULL;
1041        conn->ksnc_sock = sock;
1042        /* 2 ref, 1 for conn, another extra ref prevents socket
1043         * being closed before establishment of connection */
1044        atomic_set(&conn->ksnc_sock_refcount, 2);
1045        conn->ksnc_type = type;
1046        ksocknal_lib_save_callback(sock, conn);
1047        atomic_set(&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
1048
1049        conn->ksnc_rx_ready = 0;
1050        conn->ksnc_rx_scheduled = 0;
1051
1052        INIT_LIST_HEAD(&conn->ksnc_tx_queue);
1053        conn->ksnc_tx_ready = 0;
1054        conn->ksnc_tx_scheduled = 0;
1055        conn->ksnc_tx_carrier = NULL;
1056        atomic_set(&conn->ksnc_tx_nob, 0);
1057
1058        LIBCFS_ALLOC(hello, offsetof(ksock_hello_msg_t,
1059                                     kshm_ips[LNET_MAX_INTERFACES]));
1060        if (hello == NULL) {
1061                rc = -ENOMEM;
1062                goto failed_1;
1063        }
1064
1065        /* stash conn's local and remote addrs */
1066        rc = ksocknal_lib_get_conn_addrs(conn);
1067        if (rc != 0)
1068                goto failed_1;
1069
1070        /* Find out/confirm peer's NID and connection type and get the
1071         * vector of interfaces she's willing to let me connect to.
1072         * Passive connections use the listener timeout since the peer sends
1073         * eagerly */
1074
1075        if (active) {
1076                peer = route->ksnr_peer;
1077                LASSERT(ni == peer->ksnp_ni);
1078
1079                /* Active connection sends HELLO eagerly */
1080                hello->kshm_nips = ksocknal_local_ipvec(ni, hello->kshm_ips);
1081                peerid = peer->ksnp_id;
1082
1083                write_lock_bh(global_lock);
1084                conn->ksnc_proto = peer->ksnp_proto;
1085                write_unlock_bh(global_lock);
1086
1087                if (conn->ksnc_proto == NULL) {
1088                         conn->ksnc_proto = &ksocknal_protocol_v3x;
1089#if SOCKNAL_VERSION_DEBUG
1090                         if (*ksocknal_tunables.ksnd_protocol == 2)
1091                                 conn->ksnc_proto = &ksocknal_protocol_v2x;
1092                         else if (*ksocknal_tunables.ksnd_protocol == 1)
1093                                 conn->ksnc_proto = &ksocknal_protocol_v1x;
1094#endif
1095                }
1096
1097                rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
1098                if (rc != 0)
1099                        goto failed_1;
1100        } else {
1101                peerid.nid = LNET_NID_ANY;
1102                peerid.pid = LNET_PID_ANY;
1103
1104                /* Passive, get protocol from peer */
1105                conn->ksnc_proto = NULL;
1106        }
1107
1108        rc = ksocknal_recv_hello(ni, conn, hello, &peerid, &incarnation);
1109        if (rc < 0)
1110                goto failed_1;
1111
1112        LASSERT(rc == 0 || active);
1113        LASSERT(conn->ksnc_proto != NULL);
1114        LASSERT(peerid.nid != LNET_NID_ANY);
1115
1116        cpt = lnet_cpt_of_nid(peerid.nid);
1117
1118        if (active) {
1119                ksocknal_peer_addref(peer);
1120                write_lock_bh(global_lock);
1121        } else {
1122                rc = ksocknal_create_peer(&peer, ni, peerid);
1123                if (rc != 0)
1124                        goto failed_1;
1125
1126                write_lock_bh(global_lock);
1127
1128                /* called with a ref on ni, so shutdown can't have started */
1129                LASSERT(((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
1130
1131                peer2 = ksocknal_find_peer_locked(ni, peerid);
1132                if (peer2 == NULL) {
1133                        /* NB this puts an "empty" peer in the peer
1134                         * table (which takes my ref) */
1135                        list_add_tail(&peer->ksnp_list,
1136                                          ksocknal_nid2peerlist(peerid.nid));
1137                } else {
1138                        ksocknal_peer_decref(peer);
1139                        peer = peer2;
1140                }
1141
1142                /* +1 ref for me */
1143                ksocknal_peer_addref(peer);
1144                peer->ksnp_accepting++;
1145
1146                /* Am I already connecting to this guy?  Resolve in
1147                 * favour of higher NID... */
1148                if (peerid.nid < ni->ni_nid &&
1149                    ksocknal_connecting(peer, conn->ksnc_ipaddr)) {
1150                        rc = EALREADY;
1151                        warn = "connection race resolution";
1152                        goto failed_2;
1153                }
1154        }
1155
1156        if (peer->ksnp_closing ||
1157            (active && route->ksnr_deleted)) {
1158                /* peer/route got closed under me */
1159                rc = -ESTALE;
1160                warn = "peer/route removed";
1161                goto failed_2;
1162        }
1163
1164        if (peer->ksnp_proto == NULL) {
1165                /* Never connected before.
1166                 * NB recv_hello may have returned EPROTO to signal my peer
1167                 * wants a different protocol than the one I asked for.
1168                 */
1169                LASSERT(list_empty(&peer->ksnp_conns));
1170
1171                peer->ksnp_proto = conn->ksnc_proto;
1172                peer->ksnp_incarnation = incarnation;
1173        }
1174
1175        if (peer->ksnp_proto != conn->ksnc_proto ||
1176            peer->ksnp_incarnation != incarnation) {
1177                /* Peer rebooted or I've got the wrong protocol version */
1178                ksocknal_close_peer_conns_locked(peer, 0, 0);
1179
1180                peer->ksnp_proto = NULL;
1181                rc = ESTALE;
1182                warn = peer->ksnp_incarnation != incarnation ?
1183                       "peer rebooted" :
1184                       "wrong proto version";
1185                goto failed_2;
1186        }
1187
1188        switch (rc) {
1189        default:
1190                LBUG();
1191        case 0:
1192                break;
1193        case EALREADY:
1194                warn = "lost conn race";
1195                goto failed_2;
1196        case EPROTO:
1197                warn = "retry with different protocol version";
1198                goto failed_2;
1199        }
1200
1201        /* Refuse to duplicate an existing connection, unless this is a
1202         * loopback connection */
1203        if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
1204                list_for_each(tmp, &peer->ksnp_conns) {
1205                        conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
1206
1207                        if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr ||
1208                            conn2->ksnc_myipaddr != conn->ksnc_myipaddr ||
1209                            conn2->ksnc_type != conn->ksnc_type)
1210                                continue;
1211
1212                        /* Reply on a passive connection attempt so the peer
1213                         * realises we're connected. */
1214                        LASSERT(rc == 0);
1215                        if (!active)
1216                                rc = EALREADY;
1217
1218                        warn = "duplicate";
1219                        goto failed_2;
1220                }
1221        }
1222
1223        /* If the connection created by this route didn't bind to the IP
1224         * address the route connected to, the connection/route matching
1225         * code below probably isn't going to work. */
1226        if (active &&
1227            route->ksnr_ipaddr != conn->ksnc_ipaddr) {
1228                CERROR("Route %s %pI4h connected to %pI4h\n",
1229                       libcfs_id2str(peer->ksnp_id),
1230                       &route->ksnr_ipaddr,
1231                       &conn->ksnc_ipaddr);
1232        }
1233
1234        /* Search for a route corresponding to the new connection and
1235         * create an association.  This allows incoming connections created
1236         * by routes in my peer to match my own route entries so I don't
1237         * continually create duplicate routes. */
1238        list_for_each(tmp, &peer->ksnp_routes) {
1239                route = list_entry(tmp, ksock_route_t, ksnr_list);
1240
1241                if (route->ksnr_ipaddr != conn->ksnc_ipaddr)
1242                        continue;
1243
1244                ksocknal_associate_route_conn_locked(route, conn);
1245                break;
1246        }
1247
1248        conn->ksnc_peer = peer;          /* conn takes my ref on peer */
1249        peer->ksnp_last_alive = cfs_time_current();
1250        peer->ksnp_send_keepalive = 0;
1251        peer->ksnp_error = 0;
1252
1253        sched = ksocknal_choose_scheduler_locked(cpt);
1254        sched->kss_nconns++;
1255        conn->ksnc_scheduler = sched;
1256
1257        conn->ksnc_tx_last_post = cfs_time_current();
1258        /* Set the deadline for the outgoing HELLO to drain */
1259        conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued;
1260        conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
1261        mb();   /* order with adding to peer's conn list */
1262
1263        list_add(&conn->ksnc_list, &peer->ksnp_conns);
1264        ksocknal_conn_addref(conn);
1265
1266        ksocknal_new_packet(conn, 0);
1267
1268        conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn);
1269
1270        /* Take packets blocking for this connection. */
1271        list_for_each_entry_safe(tx, txtmp, &peer->ksnp_tx_queue, tx_list) {
1272                if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) == SOCKNAL_MATCH_NO)
1273                                continue;
1274
1275                list_del(&tx->tx_list);
1276                ksocknal_queue_tx_locked(tx, conn);
1277        }
1278
1279        write_unlock_bh(global_lock);
1280
1281        /* We've now got a new connection.  Any errors from here on are just
1282         * like "normal" comms errors and we close the connection normally.
1283         * NB (a) we still have to send the reply HELLO for passive
1284         *      connections,
1285         *    (b) normal I/O on the conn is blocked until I setup and call the
1286         *      socket callbacks.
1287         */
1288
1289        CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d incarnation:%lld sched[%d:%d]\n",
1290               libcfs_id2str(peerid), conn->ksnc_proto->pro_version,
1291               &conn->ksnc_myipaddr, &conn->ksnc_ipaddr,
1292               conn->ksnc_port, incarnation, cpt,
1293               (int)(sched - &sched->kss_info->ksi_scheds[0]));
1294
1295        if (active) {
1296                /* additional routes after interface exchange? */
1297                ksocknal_create_routes(peer, conn->ksnc_port,
1298                                       hello->kshm_ips, hello->kshm_nips);
1299        } else {
1300                hello->kshm_nips = ksocknal_select_ips(peer, hello->kshm_ips,
1301                                                       hello->kshm_nips);
1302                rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
1303        }
1304
1305        LIBCFS_FREE(hello, offsetof(ksock_hello_msg_t,
1306                                    kshm_ips[LNET_MAX_INTERFACES]));
1307
1308        /* setup the socket AFTER I've received hello (it disables
1309         * SO_LINGER).  I might call back to the acceptor who may want
1310         * to send a protocol version response and then close the
1311         * socket; this ensures the socket only tears down after the
1312         * response has been sent. */
1313        if (rc == 0)
1314                rc = ksocknal_lib_setup_sock(sock);
1315
1316        write_lock_bh(global_lock);
1317
1318        /* NB my callbacks block while I hold ksnd_global_lock */
1319        ksocknal_lib_set_callback(sock, conn);
1320
1321        if (!active)
1322                peer->ksnp_accepting--;
1323
1324        write_unlock_bh(global_lock);
1325
1326        if (rc != 0) {
1327                write_lock_bh(global_lock);
1328                if (!conn->ksnc_closing) {
1329                        /* could be closed by another thread */
1330                        ksocknal_close_conn_locked(conn, rc);
1331                }
1332                write_unlock_bh(global_lock);
1333        } else if (ksocknal_connsock_addref(conn) == 0) {
1334                /* Allow I/O to proceed. */
1335                ksocknal_read_callback(conn);
1336                ksocknal_write_callback(conn);
1337                ksocknal_connsock_decref(conn);
1338        }
1339
1340        ksocknal_connsock_decref(conn);
1341        ksocknal_conn_decref(conn);
1342        return rc;
1343
1344 failed_2:
1345        if (!peer->ksnp_closing &&
1346            list_empty(&peer->ksnp_conns) &&
1347            list_empty(&peer->ksnp_routes)) {
1348                list_add(&zombies, &peer->ksnp_tx_queue);
1349                list_del_init(&peer->ksnp_tx_queue);
1350                ksocknal_unlink_peer_locked(peer);
1351        }
1352
1353        write_unlock_bh(global_lock);
1354
1355        if (warn != NULL) {
1356                if (rc < 0)
1357                        CERROR("Not creating conn %s type %d: %s\n",
1358                               libcfs_id2str(peerid), conn->ksnc_type, warn);
1359                else
1360                        CDEBUG(D_NET, "Not creating conn %s type %d: %s\n",
1361                              libcfs_id2str(peerid), conn->ksnc_type, warn);
1362        }
1363
1364        if (!active) {
1365                if (rc > 0) {
1366                        /* Request retry by replying with CONN_NONE
1367                         * ksnc_proto has been set already */
1368                        conn->ksnc_type = SOCKLND_CONN_NONE;
1369                        hello->kshm_nips = 0;
1370                        ksocknal_send_hello(ni, conn, peerid.nid, hello);
1371                }
1372
1373                write_lock_bh(global_lock);
1374                peer->ksnp_accepting--;
1375                write_unlock_bh(global_lock);
1376        }
1377
1378        ksocknal_txlist_done(ni, &zombies, 1);
1379        ksocknal_peer_decref(peer);
1380
1381failed_1:
1382        if (hello != NULL)
1383                LIBCFS_FREE(hello, offsetof(ksock_hello_msg_t,
1384                                            kshm_ips[LNET_MAX_INTERFACES]));
1385
1386        LIBCFS_FREE(conn, sizeof(*conn));
1387
1388failed_0:
1389        sock_release(sock);
1390        return rc;
1391}
1392
1393void
1394ksocknal_close_conn_locked(ksock_conn_t *conn, int error)
1395{
1396        /* This just does the immmediate housekeeping, and queues the
1397         * connection for the reaper to terminate.
1398         * Caller holds ksnd_global_lock exclusively in irq context */
1399        ksock_peer_t *peer = conn->ksnc_peer;
1400        ksock_route_t *route;
1401        ksock_conn_t *conn2;
1402        struct list_head *tmp;
1403
1404        LASSERT(peer->ksnp_error == 0);
1405        LASSERT(!conn->ksnc_closing);
1406        conn->ksnc_closing = 1;
1407
1408        /* ksnd_deathrow_conns takes over peer's ref */
1409        list_del(&conn->ksnc_list);
1410
1411        route = conn->ksnc_route;
1412        if (route != NULL) {
1413                /* dissociate conn from route... */
1414                LASSERT(!route->ksnr_deleted);
1415                LASSERT((route->ksnr_connected & (1 << conn->ksnc_type)) != 0);
1416
1417                conn2 = NULL;
1418                list_for_each(tmp, &peer->ksnp_conns) {
1419                        conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
1420
1421                        if (conn2->ksnc_route == route &&
1422                            conn2->ksnc_type == conn->ksnc_type)
1423                                break;
1424
1425                        conn2 = NULL;
1426                }
1427                if (conn2 == NULL)
1428                        route->ksnr_connected &= ~(1 << conn->ksnc_type);
1429
1430                conn->ksnc_route = NULL;
1431
1432#if 0      /* irrelevant with only eager routes */
1433                /* make route least favourite */
1434                list_del(&route->ksnr_list);
1435                list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
1436#endif
1437                ksocknal_route_decref(route);     /* drop conn's ref on route */
1438        }
1439
1440        if (list_empty(&peer->ksnp_conns)) {
1441                /* No more connections to this peer */
1442
1443                if (!list_empty(&peer->ksnp_tx_queue)) {
1444                        ksock_tx_t *tx;
1445
1446                        LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
1447
1448                        /* throw them to the last connection...,
1449                         * these TXs will be send to /dev/null by scheduler */
1450                        list_for_each_entry(tx, &peer->ksnp_tx_queue,
1451                                                tx_list)
1452                                ksocknal_tx_prep(conn, tx);
1453
1454                        spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
1455                        list_splice_init(&peer->ksnp_tx_queue,
1456                                             &conn->ksnc_tx_queue);
1457                        spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
1458                }
1459
1460                peer->ksnp_proto = NULL;        /* renegotiate protocol version */
1461                peer->ksnp_error = error;       /* stash last conn close reason */
1462
1463                if (list_empty(&peer->ksnp_routes)) {
1464                        /* I've just closed last conn belonging to a
1465                         * peer with no routes to it */
1466                        ksocknal_unlink_peer_locked(peer);
1467                }
1468        }
1469
1470        spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1471
1472        list_add_tail(&conn->ksnc_list,
1473                          &ksocknal_data.ksnd_deathrow_conns);
1474        wake_up(&ksocknal_data.ksnd_reaper_waitq);
1475
1476        spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1477}
1478
1479void
1480ksocknal_peer_failed(ksock_peer_t *peer)
1481{
1482        int notify = 0;
1483        unsigned long last_alive = 0;
1484
1485        /* There has been a connection failure or comms error; but I'll only
1486         * tell LNET I think the peer is dead if it's to another kernel and
1487         * there are no connections or connection attempts in existence. */
1488
1489        read_lock(&ksocknal_data.ksnd_global_lock);
1490
1491        if ((peer->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
1492            list_empty(&peer->ksnp_conns) &&
1493            peer->ksnp_accepting == 0 &&
1494            ksocknal_find_connecting_route_locked(peer) == NULL) {
1495                notify = 1;
1496                last_alive = peer->ksnp_last_alive;
1497        }
1498
1499        read_unlock(&ksocknal_data.ksnd_global_lock);
1500
1501        if (notify)
1502                lnet_notify(peer->ksnp_ni, peer->ksnp_id.nid, 0,
1503                             last_alive);
1504}
1505
1506void
1507ksocknal_finalize_zcreq(ksock_conn_t *conn)
1508{
1509        ksock_peer_t *peer = conn->ksnc_peer;
1510        ksock_tx_t *tx;
1511        ksock_tx_t *tmp;
1512        LIST_HEAD(zlist);
1513
1514        /* NB safe to finalize TXs because closing of socket will
1515         * abort all buffered data */
1516        LASSERT(conn->ksnc_sock == NULL);
1517
1518        spin_lock(&peer->ksnp_lock);
1519
1520        list_for_each_entry_safe(tx, tmp, &peer->ksnp_zc_req_list, tx_zc_list) {
1521                if (tx->tx_conn != conn)
1522                        continue;
1523
1524                LASSERT(tx->tx_msg.ksm_zc_cookies[0] != 0);
1525
1526                tx->tx_msg.ksm_zc_cookies[0] = 0;
1527                tx->tx_zc_aborted = 1; /* mark it as not-acked */
1528                list_del(&tx->tx_zc_list);
1529                list_add(&tx->tx_zc_list, &zlist);
1530        }
1531
1532        spin_unlock(&peer->ksnp_lock);
1533
1534        while (!list_empty(&zlist)) {
1535                tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list);
1536
1537                list_del(&tx->tx_zc_list);
1538                ksocknal_tx_decref(tx);
1539        }
1540}
1541
1542void
1543ksocknal_terminate_conn(ksock_conn_t *conn)
1544{
1545        /* This gets called by the reaper (guaranteed thread context) to
1546         * disengage the socket from its callbacks and close it.
1547         * ksnc_refcount will eventually hit zero, and then the reaper will
1548         * destroy it. */
1549        ksock_peer_t *peer = conn->ksnc_peer;
1550        ksock_sched_t *sched = conn->ksnc_scheduler;
1551        int failed = 0;
1552
1553        LASSERT(conn->ksnc_closing);
1554
1555        /* wake up the scheduler to "send" all remaining packets to /dev/null */
1556        spin_lock_bh(&sched->kss_lock);
1557
1558        /* a closing conn is always ready to tx */
1559        conn->ksnc_tx_ready = 1;
1560
1561        if (!conn->ksnc_tx_scheduled &&
1562            !list_empty(&conn->ksnc_tx_queue)) {
1563                list_add_tail(&conn->ksnc_tx_list,
1564                               &sched->kss_tx_conns);
1565                conn->ksnc_tx_scheduled = 1;
1566                /* extra ref for scheduler */
1567                ksocknal_conn_addref(conn);
1568
1569                wake_up(&sched->kss_waitq);
1570        }
1571
1572        spin_unlock_bh(&sched->kss_lock);
1573
1574        /* serialise with callbacks */
1575        write_lock_bh(&ksocknal_data.ksnd_global_lock);
1576
1577        ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
1578
1579        /* OK, so this conn may not be completely disengaged from its
1580         * scheduler yet, but it _has_ committed to terminate... */
1581        conn->ksnc_scheduler->kss_nconns--;
1582
1583        if (peer->ksnp_error != 0) {
1584                /* peer's last conn closed in error */
1585                LASSERT(list_empty(&peer->ksnp_conns));
1586                failed = 1;
1587                peer->ksnp_error = 0;     /* avoid multiple notifications */
1588        }
1589
1590        write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1591
1592        if (failed)
1593                ksocknal_peer_failed(peer);
1594
1595        /* The socket is closed on the final put; either here, or in
1596         * ksocknal_{send,recv}msg().  Since we set up the linger2 option
1597         * when the connection was established, this will close the socket
1598         * immediately, aborting anything buffered in it. Any hung
1599         * zero-copy transmits will therefore complete in finite time. */
1600        ksocknal_connsock_decref(conn);
1601}
1602
1603void
1604ksocknal_queue_zombie_conn(ksock_conn_t *conn)
1605{
1606        /* Queue the conn for the reaper to destroy */
1607
1608        LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
1609        spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1610
1611        list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
1612        wake_up(&ksocknal_data.ksnd_reaper_waitq);
1613
1614        spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1615}
1616
1617void
1618ksocknal_destroy_conn(ksock_conn_t *conn)
1619{
1620        unsigned long last_rcv;
1621
1622        /* Final coup-de-grace of the reaper */
1623        CDEBUG(D_NET, "connection %p\n", conn);
1624
1625        LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
1626        LASSERT(atomic_read(&conn->ksnc_sock_refcount) == 0);
1627        LASSERT(conn->ksnc_sock == NULL);
1628        LASSERT(conn->ksnc_route == NULL);
1629        LASSERT(!conn->ksnc_tx_scheduled);
1630        LASSERT(!conn->ksnc_rx_scheduled);
1631        LASSERT(list_empty(&conn->ksnc_tx_queue));
1632
1633        /* complete current receive if any */
1634        switch (conn->ksnc_rx_state) {
1635        case SOCKNAL_RX_LNET_PAYLOAD:
1636                last_rcv = conn->ksnc_rx_deadline -
1637                           cfs_time_seconds(*ksocknal_tunables.ksnd_timeout);
1638                CERROR("Completing partial receive from %s[%d], ip %pI4h:%d, with error, wanted: %d, left: %d, last alive is %ld secs ago\n",
1639                       libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
1640                       &conn->ksnc_ipaddr, conn->ksnc_port,
1641                       conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
1642                       cfs_duration_sec(cfs_time_sub(cfs_time_current(),
1643                                                     last_rcv)));
1644                lnet_finalize(conn->ksnc_peer->ksnp_ni,
1645                               conn->ksnc_cookie, -EIO);
1646                break;
1647        case SOCKNAL_RX_LNET_HEADER:
1648                if (conn->ksnc_rx_started)
1649                        CERROR("Incomplete receive of lnet header from %s, ip %pI4h:%d, with error, protocol: %d.x.\n",
1650                               libcfs_id2str(conn->ksnc_peer->ksnp_id),
1651                               &conn->ksnc_ipaddr, conn->ksnc_port,
1652                               conn->ksnc_proto->pro_version);
1653                break;
1654        case SOCKNAL_RX_KSM_HEADER:
1655                if (conn->ksnc_rx_started)
1656                        CERROR("Incomplete receive of ksock message from %s, ip %pI4h:%d, with error, protocol: %d.x.\n",
1657                               libcfs_id2str(conn->ksnc_peer->ksnp_id),
1658                               &conn->ksnc_ipaddr, conn->ksnc_port,
1659                               conn->ksnc_proto->pro_version);
1660                break;
1661        case SOCKNAL_RX_SLOP:
1662                if (conn->ksnc_rx_started)
1663                        CERROR("Incomplete receive of slops from %s, ip %pI4h:%d, with error\n",
1664                               libcfs_id2str(conn->ksnc_peer->ksnp_id),
1665                               &conn->ksnc_ipaddr, conn->ksnc_port);
1666               break;
1667        default:
1668                LBUG();
1669                break;
1670        }
1671
1672        ksocknal_peer_decref(conn->ksnc_peer);
1673
1674        LIBCFS_FREE(conn, sizeof(*conn));
1675}
1676
1677int
1678ksocknal_close_peer_conns_locked(ksock_peer_t *peer, __u32 ipaddr, int why)
1679{
1680        ksock_conn_t *conn;
1681        struct list_head *ctmp;
1682        struct list_head *cnxt;
1683        int count = 0;
1684
1685        list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) {
1686                conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
1687
1688                if (ipaddr == 0 ||
1689                    conn->ksnc_ipaddr == ipaddr) {
1690                        count++;
1691                        ksocknal_close_conn_locked(conn, why);
1692                }
1693        }
1694
1695        return count;
1696}
1697
1698int
1699ksocknal_close_conn_and_siblings(ksock_conn_t *conn, int why)
1700{
1701        ksock_peer_t *peer = conn->ksnc_peer;
1702        __u32 ipaddr = conn->ksnc_ipaddr;
1703        int count;
1704
1705        write_lock_bh(&ksocknal_data.ksnd_global_lock);
1706
1707        count = ksocknal_close_peer_conns_locked(peer, ipaddr, why);
1708
1709        write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1710
1711        return count;
1712}
1713
1714int
1715ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr)
1716{
1717        ksock_peer_t *peer;
1718        struct list_head *ptmp;
1719        struct list_head *pnxt;
1720        int lo;
1721        int hi;
1722        int i;
1723        int count = 0;
1724
1725        write_lock_bh(&ksocknal_data.ksnd_global_lock);
1726
1727        if (id.nid != LNET_NID_ANY)
1728                lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
1729        else {
1730                lo = 0;
1731                hi = ksocknal_data.ksnd_peer_hash_size - 1;
1732        }
1733
1734        for (i = lo; i <= hi; i++) {
1735                list_for_each_safe(ptmp, pnxt,
1736                                        &ksocknal_data.ksnd_peers[i]) {
1737
1738                        peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
1739
1740                        if (!((id.nid == LNET_NID_ANY || id.nid == peer->ksnp_id.nid) &&
1741                              (id.pid == LNET_PID_ANY || id.pid == peer->ksnp_id.pid)))
1742                                continue;
1743
1744                        count += ksocknal_close_peer_conns_locked(peer, ipaddr, 0);
1745                }
1746        }
1747
1748        write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1749
1750        /* wildcards always succeed */
1751        if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
1752                return 0;
1753
1754        if (count == 0)
1755                return -ENOENT;
1756        else
1757                return 0;
1758}
1759
1760void
1761ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive)
1762{
1763        /* The router is telling me she's been notified of a change in
1764         * gateway state.... */
1765        lnet_process_id_t id = {0};
1766
1767        id.nid = gw_nid;
1768        id.pid = LNET_PID_ANY;
1769
1770        CDEBUG(D_NET, "gw %s %s\n", libcfs_nid2str(gw_nid),
1771                alive ? "up" : "down");
1772
1773        if (!alive) {
1774                /* If the gateway crashed, close all open connections... */
1775                ksocknal_close_matching_conns(id, 0);
1776                return;
1777        }
1778
1779        /* ...otherwise do nothing.  We can only establish new connections
1780         * if we have autroutes, and these connect on demand. */
1781}
1782
1783void
1784ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
1785{
1786        int connect = 1;
1787        unsigned long last_alive = 0;
1788        unsigned long now = cfs_time_current();
1789        ksock_peer_t *peer = NULL;
1790        rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
1791        lnet_process_id_t id = {.nid = nid, .pid = LUSTRE_SRV_LNET_PID};
1792
1793        read_lock(glock);
1794
1795        peer = ksocknal_find_peer_locked(ni, id);
1796        if (peer != NULL) {
1797                struct list_head *tmp;
1798                ksock_conn_t *conn;
1799                int bufnob;
1800
1801                list_for_each(tmp, &peer->ksnp_conns) {
1802                        conn = list_entry(tmp, ksock_conn_t, ksnc_list);
1803                        bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
1804
1805                        if (bufnob < conn->ksnc_tx_bufnob) {
1806                                /* something got ACKed */
1807                                conn->ksnc_tx_deadline =
1808                                        cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
1809                                peer->ksnp_last_alive = now;
1810                                conn->ksnc_tx_bufnob = bufnob;
1811                        }
1812                }
1813
1814                last_alive = peer->ksnp_last_alive;
1815                if (ksocknal_find_connectable_route_locked(peer) == NULL)
1816                        connect = 0;
1817        }
1818
1819        read_unlock(glock);
1820
1821        if (last_alive != 0)
1822                *when = last_alive;
1823
1824        CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago, connect %d\n",
1825               libcfs_nid2str(nid), peer,
1826               last_alive ? cfs_duration_sec(now - last_alive) : -1,
1827               connect);
1828
1829        if (!connect)
1830                return;
1831
1832        ksocknal_add_peer(ni, id, LNET_NIDADDR(nid), lnet_acceptor_port());
1833
1834        write_lock_bh(glock);
1835
1836        peer = ksocknal_find_peer_locked(ni, id);
1837        if (peer != NULL)
1838                ksocknal_launch_all_connections_locked(peer);
1839
1840        write_unlock_bh(glock);
1841}
1842
1843static void
1844ksocknal_push_peer(ksock_peer_t *peer)
1845{
1846        int index;
1847        int i;
1848        struct list_head *tmp;
1849        ksock_conn_t *conn;
1850
1851        for (index = 0; ; index++) {
1852                read_lock(&ksocknal_data.ksnd_global_lock);
1853
1854                i = 0;
1855                conn = NULL;
1856
1857                list_for_each(tmp, &peer->ksnp_conns) {
1858                        if (i++ == index) {
1859                                conn = list_entry(tmp, ksock_conn_t,
1860                                                       ksnc_list);
1861                                ksocknal_conn_addref(conn);
1862                                break;
1863                        }
1864                }
1865
1866                read_unlock(&ksocknal_data.ksnd_global_lock);
1867
1868                if (conn == NULL)
1869                        break;
1870
1871                ksocknal_lib_push_conn(conn);
1872                ksocknal_conn_decref(conn);
1873        }
1874}
1875
1876static int ksocknal_push(lnet_ni_t *ni, lnet_process_id_t id)
1877{
1878        struct list_head *start;
1879        struct list_head *end;
1880        struct list_head *tmp;
1881        int rc = -ENOENT;
1882        unsigned int hsize = ksocknal_data.ksnd_peer_hash_size;
1883
1884        if (id.nid == LNET_NID_ANY) {
1885                start = &ksocknal_data.ksnd_peers[0];
1886                end = &ksocknal_data.ksnd_peers[hsize - 1];
1887        } else {
1888                start = end = ksocknal_nid2peerlist(id.nid);
1889        }
1890
1891        for (tmp = start; tmp <= end; tmp++) {
1892                int peer_off; /* searching offset in peer hash table */
1893
1894                for (peer_off = 0; ; peer_off++) {
1895                        ksock_peer_t *peer;
1896                        int i = 0;
1897
1898                        read_lock(&ksocknal_data.ksnd_global_lock);
1899                        list_for_each_entry(peer, tmp, ksnp_list) {
1900                                if (!((id.nid == LNET_NID_ANY ||
1901                                       id.nid == peer->ksnp_id.nid) &&
1902                                      (id.pid == LNET_PID_ANY ||
1903                                       id.pid == peer->ksnp_id.pid)))
1904                                        continue;
1905
1906                                if (i++ == peer_off) {
1907                                        ksocknal_peer_addref(peer);
1908                                        break;
1909                                }
1910                        }
1911                        read_unlock(&ksocknal_data.ksnd_global_lock);
1912
1913                        if (i == 0) /* no match */
1914                                break;
1915
1916                        rc = 0;
1917                        ksocknal_push_peer(peer);
1918                        ksocknal_peer_decref(peer);
1919                }
1920        }
1921        return rc;
1922}
1923
1924static int
1925ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
1926{
1927        ksock_net_t *net = ni->ni_data;
1928        ksock_interface_t *iface;
1929        int rc;
1930        int i;
1931        int j;
1932        struct list_head *ptmp;
1933        ksock_peer_t *peer;
1934        struct list_head *rtmp;
1935        ksock_route_t *route;
1936
1937        if (ipaddress == 0 ||
1938            netmask == 0)
1939                return -EINVAL;
1940
1941        write_lock_bh(&ksocknal_data.ksnd_global_lock);
1942
1943        iface = ksocknal_ip2iface(ni, ipaddress);
1944        if (iface != NULL) {
1945                /* silently ignore dups */
1946                rc = 0;
1947        } else if (net->ksnn_ninterfaces == LNET_MAX_INTERFACES) {
1948                rc = -ENOSPC;
1949        } else {
1950                iface = &net->ksnn_interfaces[net->ksnn_ninterfaces++];
1951
1952                iface->ksni_ipaddr = ipaddress;
1953                iface->ksni_netmask = netmask;
1954                iface->ksni_nroutes = 0;
1955                iface->ksni_npeers = 0;
1956
1957                for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
1958                        list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
1959                                peer = list_entry(ptmp, ksock_peer_t,
1960                                                      ksnp_list);
1961
1962                                for (j = 0; j < peer->ksnp_n_passive_ips; j++)
1963                                        if (peer->ksnp_passive_ips[j] == ipaddress)
1964                                                iface->ksni_npeers++;
1965
1966                                list_for_each(rtmp, &peer->ksnp_routes) {
1967                                        route = list_entry(rtmp,
1968                                                               ksock_route_t,
1969                                                               ksnr_list);
1970
1971                                        if (route->ksnr_myipaddr == ipaddress)
1972                                                iface->ksni_nroutes++;
1973                                }
1974                        }
1975                }
1976
1977                rc = 0;
1978                /* NB only new connections will pay attention to the new interface! */
1979        }
1980
1981        write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1982
1983        return rc;
1984}
1985
1986static void
1987ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr)
1988{
1989        struct list_head *tmp;
1990        struct list_head *nxt;
1991        ksock_route_t *route;
1992        ksock_conn_t *conn;
1993        int i;
1994        int j;
1995
1996        for (i = 0; i < peer->ksnp_n_passive_ips; i++)
1997                if (peer->ksnp_passive_ips[i] == ipaddr) {
1998                        for (j = i+1; j < peer->ksnp_n_passive_ips; j++)
1999                                peer->ksnp_passive_ips[j-1] =
2000                                        peer->ksnp_passive_ips[j];
2001                        peer->ksnp_n_passive_ips--;
2002                        break;
2003                }
2004
2005        list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
2006                route = list_entry(tmp, ksock_route_t, ksnr_list);
2007
2008                if (route->ksnr_myipaddr != ipaddr)
2009                        continue;
2010
2011                if (route->ksnr_share_count != 0) {
2012                        /* Manually created; keep, but unbind */
2013                        route->ksnr_myipaddr = 0;
2014                } else {
2015                        ksocknal_del_route_locked(route);
2016                }
2017        }
2018
2019        list_for_each_safe(tmp, nxt, &peer->ksnp_conns) {
2020                conn = list_entry(tmp, ksock_conn_t, ksnc_list);
2021
2022                if (conn->ksnc_myipaddr == ipaddr)
2023                        ksocknal_close_conn_locked(conn, 0);
2024        }
2025}
2026
2027static int
2028ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress)
2029{
2030        ksock_net_t *net = ni->ni_data;
2031        int rc = -ENOENT;
2032        struct list_head *tmp;
2033        struct list_head *nxt;
2034        ksock_peer_t *peer;
2035        __u32 this_ip;
2036        int i;
2037        int j;
2038
2039        write_lock_bh(&ksocknal_data.ksnd_global_lock);
2040
2041        for (i = 0; i < net->ksnn_ninterfaces; i++) {
2042                this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
2043
2044                if (!(ipaddress == 0 ||
2045                      ipaddress == this_ip))
2046                        continue;
2047
2048                rc = 0;
2049
2050                for (j = i+1; j < net->ksnn_ninterfaces; j++)
2051                        net->ksnn_interfaces[j-1] =
2052                                net->ksnn_interfaces[j];
2053
2054                net->ksnn_ninterfaces--;
2055
2056                for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
2057                        list_for_each_safe(tmp, nxt,
2058                                               &ksocknal_data.ksnd_peers[j]) {
2059                                peer = list_entry(tmp, ksock_peer_t,
2060                                                      ksnp_list);
2061
2062                                if (peer->ksnp_ni != ni)
2063                                        continue;
2064
2065                                ksocknal_peer_del_interface_locked(peer, this_ip);
2066                        }
2067                }
2068        }
2069
2070        write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2071
2072        return rc;
2073}
2074
2075int
2076ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
2077{
2078        lnet_process_id_t id = {0};
2079        struct libcfs_ioctl_data *data = arg;
2080        int rc;
2081
2082        switch (cmd) {
2083        case IOC_LIBCFS_GET_INTERFACE: {
2084                ksock_net_t       *net = ni->ni_data;
2085                ksock_interface_t *iface;
2086
2087                read_lock(&ksocknal_data.ksnd_global_lock);
2088
2089                if (data->ioc_count >= (__u32)net->ksnn_ninterfaces) {
2090                        rc = -ENOENT;
2091                } else {
2092                        rc = 0;
2093                        iface = &net->ksnn_interfaces[data->ioc_count];
2094
2095                        data->ioc_u32[0] = iface->ksni_ipaddr;
2096                        data->ioc_u32[1] = iface->ksni_netmask;
2097                        data->ioc_u32[2] = iface->ksni_npeers;
2098                        data->ioc_u32[3] = iface->ksni_nroutes;
2099                }
2100
2101                read_unlock(&ksocknal_data.ksnd_global_lock);
2102                return rc;
2103        }
2104
2105        case IOC_LIBCFS_ADD_INTERFACE:
2106                return ksocknal_add_interface(ni,
2107                                              data->ioc_u32[0], /* IP address */
2108                                              data->ioc_u32[1]); /* net mask */
2109
2110        case IOC_LIBCFS_DEL_INTERFACE:
2111                return ksocknal_del_interface(ni,
2112                                              data->ioc_u32[0]); /* IP address */
2113
2114        case IOC_LIBCFS_GET_PEER: {
2115                __u32 myip = 0;
2116                __u32 ip = 0;
2117                int port = 0;
2118                int conn_count = 0;
2119                int share_count = 0;
2120
2121                rc = ksocknal_get_peer_info(ni, data->ioc_count,
2122                                            &id, &myip, &ip, &port,
2123                                            &conn_count,  &share_count);
2124                if (rc != 0)
2125                        return rc;
2126
2127                data->ioc_nid    = id.nid;
2128                data->ioc_count  = share_count;
2129                data->ioc_u32[0] = ip;
2130                data->ioc_u32[1] = port;
2131                data->ioc_u32[2] = myip;
2132                data->ioc_u32[3] = conn_count;
2133                data->ioc_u32[4] = id.pid;
2134                return 0;
2135        }
2136
2137        case IOC_LIBCFS_ADD_PEER:
2138                id.nid = data->ioc_nid;
2139                id.pid = LUSTRE_SRV_LNET_PID;
2140                return ksocknal_add_peer(ni, id,
2141                                          data->ioc_u32[0], /* IP */
2142                                          data->ioc_u32[1]); /* port */
2143
2144        case IOC_LIBCFS_DEL_PEER:
2145                id.nid = data->ioc_nid;
2146                id.pid = LNET_PID_ANY;
2147                return ksocknal_del_peer(ni, id,
2148                                          data->ioc_u32[0]); /* IP */
2149
2150        case IOC_LIBCFS_GET_CONN: {
2151                int txmem;
2152                int rxmem;
2153                int nagle;
2154                ksock_conn_t *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
2155
2156                if (conn == NULL)
2157                        return -ENOENT;
2158
2159                ksocknal_lib_get_conn_tunables(conn, &txmem, &rxmem, &nagle);
2160
2161                data->ioc_count  = txmem;
2162                data->ioc_nid    = conn->ksnc_peer->ksnp_id.nid;
2163                data->ioc_flags  = nagle;
2164                data->ioc_u32[0] = conn->ksnc_ipaddr;
2165                data->ioc_u32[1] = conn->ksnc_port;
2166                data->ioc_u32[2] = conn->ksnc_myipaddr;
2167                data->ioc_u32[3] = conn->ksnc_type;
2168                data->ioc_u32[4] = conn->ksnc_scheduler->kss_info->ksi_cpt;
2169                data->ioc_u32[5] = rxmem;
2170                data->ioc_u32[6] = conn->ksnc_peer->ksnp_id.pid;
2171                ksocknal_conn_decref(conn);
2172                return 0;
2173        }
2174
2175        case IOC_LIBCFS_CLOSE_CONNECTION:
2176                id.nid = data->ioc_nid;
2177                id.pid = LNET_PID_ANY;
2178                return ksocknal_close_matching_conns(id,
2179                                                      data->ioc_u32[0]);
2180
2181        case IOC_LIBCFS_REGISTER_MYNID:
2182                /* Ignore if this is a noop */
2183                if (data->ioc_nid == ni->ni_nid)
2184                        return 0;
2185
2186                CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
2187                       libcfs_nid2str(data->ioc_nid),
2188                       libcfs_nid2str(ni->ni_nid));
2189                return -EINVAL;
2190
2191        case IOC_LIBCFS_PUSH_CONNECTION:
2192                id.nid = data->ioc_nid;
2193                id.pid = LNET_PID_ANY;
2194                return ksocknal_push(ni, id);
2195
2196        default:
2197                return -EINVAL;
2198        }
2199        /* not reached */
2200}
2201
2202static void
2203ksocknal_free_buffers(void)
2204{
2205        LASSERT(atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
2206
2207        if (ksocknal_data.ksnd_sched_info != NULL) {
2208                struct ksock_sched_info *info;
2209                int i;
2210
2211                cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
2212                        if (info->ksi_scheds != NULL) {
2213                                LIBCFS_FREE(info->ksi_scheds,
2214                                            info->ksi_nthreads_max *
2215                                            sizeof(info->ksi_scheds[0]));
2216                        }
2217                }
2218                cfs_percpt_free(ksocknal_data.ksnd_sched_info);
2219        }
2220
2221        LIBCFS_FREE(ksocknal_data.ksnd_peers,
2222                     sizeof(struct list_head) *
2223                     ksocknal_data.ksnd_peer_hash_size);
2224
2225        spin_lock(&ksocknal_data.ksnd_tx_lock);
2226
2227        if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
2228                struct list_head zlist;
2229                ksock_tx_t *tx;
2230
2231                list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
2232                list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
2233                spin_unlock(&ksocknal_data.ksnd_tx_lock);
2234
2235                while (!list_empty(&zlist)) {
2236                        tx = list_entry(zlist.next, ksock_tx_t, tx_list);
2237                        list_del(&tx->tx_list);
2238                        LIBCFS_FREE(tx, tx->tx_desc_size);
2239                }
2240        } else {
2241                spin_unlock(&ksocknal_data.ksnd_tx_lock);
2242        }
2243}
2244
2245static void
2246ksocknal_base_shutdown(void)
2247{
2248        struct ksock_sched_info *info;
2249        ksock_sched_t *sched;
2250        int i;
2251        int j;
2252
2253        LASSERT(ksocknal_data.ksnd_nnets == 0);
2254
2255        switch (ksocknal_data.ksnd_init) {
2256        default:
2257                LASSERT(0);
2258
2259        case SOCKNAL_INIT_ALL:
2260        case SOCKNAL_INIT_DATA:
2261                LASSERT(ksocknal_data.ksnd_peers != NULL);
2262                for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
2263                        LASSERT(list_empty(&ksocknal_data.ksnd_peers[i]));
2264
2265                LASSERT(list_empty(&ksocknal_data.ksnd_nets));
2266                LASSERT(list_empty(&ksocknal_data.ksnd_enomem_conns));
2267                LASSERT(list_empty(&ksocknal_data.ksnd_zombie_conns));
2268                LASSERT(list_empty(&ksocknal_data.ksnd_connd_connreqs));
2269                LASSERT(list_empty(&ksocknal_data.ksnd_connd_routes));
2270
2271                if (ksocknal_data.ksnd_sched_info != NULL) {
2272                        cfs_percpt_for_each(info, i,
2273                                            ksocknal_data.ksnd_sched_info) {
2274                                if (info->ksi_scheds == NULL)
2275                                        continue;
2276
2277                                for (j = 0; j < info->ksi_nthreads_max; j++) {
2278
2279                                        sched = &info->ksi_scheds[j];
2280                                        LASSERT(list_empty(
2281                                                &sched->kss_tx_conns));
2282                                        LASSERT(list_empty(
2283                                                &sched->kss_rx_conns));
2284                                        LASSERT(list_empty(
2285                                                &sched->kss_zombie_noop_txs));
2286                                        LASSERT(sched->kss_nconns == 0);
2287                                }
2288                        }
2289                }
2290
2291                /* flag threads to terminate; wake and wait for them to die */
2292                ksocknal_data.ksnd_shuttingdown = 1;
2293                wake_up_all(&ksocknal_data.ksnd_connd_waitq);
2294                wake_up_all(&ksocknal_data.ksnd_reaper_waitq);
2295
2296                if (ksocknal_data.ksnd_sched_info != NULL) {
2297                        cfs_percpt_for_each(info, i,
2298                                            ksocknal_data.ksnd_sched_info) {
2299                                if (info->ksi_scheds == NULL)
2300                                        continue;
2301
2302                                for (j = 0; j < info->ksi_nthreads_max; j++) {
2303                                        sched = &info->ksi_scheds[j];
2304                                        wake_up_all(&sched->kss_waitq);
2305                                }
2306                        }
2307                }
2308
2309                i = 4;
2310                read_lock(&ksocknal_data.ksnd_global_lock);
2311                while (ksocknal_data.ksnd_nthreads != 0) {
2312                        i++;
2313                        CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2314                               "waiting for %d threads to terminate\n",
2315                                ksocknal_data.ksnd_nthreads);
2316                        read_unlock(&ksocknal_data.ksnd_global_lock);
2317                        set_current_state(TASK_UNINTERRUPTIBLE);
2318                        schedule_timeout(cfs_time_seconds(1));
2319                        read_lock(&ksocknal_data.ksnd_global_lock);
2320                }
2321                read_unlock(&ksocknal_data.ksnd_global_lock);
2322
2323                ksocknal_free_buffers();
2324
2325                ksocknal_data.ksnd_init = SOCKNAL_INIT_NOTHING;
2326                break;
2327        }
2328
2329        module_put(THIS_MODULE);
2330}
2331
2332static __u64
2333ksocknal_new_incarnation(void)
2334{
2335
2336        /* The incarnation number is the time this module loaded and it
2337         * identifies this particular instance of the socknal.
2338         */
2339        return ktime_get_ns();
2340}
2341
2342static int
2343ksocknal_base_startup(void)
2344{
2345        struct ksock_sched_info *info;
2346        int rc;
2347        int i;
2348
2349        LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
2350        LASSERT(ksocknal_data.ksnd_nnets == 0);
2351
2352        memset(&ksocknal_data, 0, sizeof(ksocknal_data)); /* zero pointers */
2353
2354        ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE;
2355        LIBCFS_ALLOC(ksocknal_data.ksnd_peers,
2356                      sizeof(struct list_head) *
2357                      ksocknal_data.ksnd_peer_hash_size);
2358        if (ksocknal_data.ksnd_peers == NULL)
2359                return -ENOMEM;
2360
2361        for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
2362                INIT_LIST_HEAD(&ksocknal_data.ksnd_peers[i]);
2363
2364        rwlock_init(&ksocknal_data.ksnd_global_lock);
2365        INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
2366
2367        spin_lock_init(&ksocknal_data.ksnd_reaper_lock);
2368        INIT_LIST_HEAD(&ksocknal_data.ksnd_enomem_conns);
2369        INIT_LIST_HEAD(&ksocknal_data.ksnd_zombie_conns);
2370        INIT_LIST_HEAD(&ksocknal_data.ksnd_deathrow_conns);
2371        init_waitqueue_head(&ksocknal_data.ksnd_reaper_waitq);
2372
2373        spin_lock_init(&ksocknal_data.ksnd_connd_lock);
2374        INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_connreqs);
2375        INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_routes);
2376        init_waitqueue_head(&ksocknal_data.ksnd_connd_waitq);
2377
2378        spin_lock_init(&ksocknal_data.ksnd_tx_lock);
2379        INIT_LIST_HEAD(&ksocknal_data.ksnd_idle_noop_txs);
2380
2381        /* NB memset above zeros whole of ksocknal_data */
2382
2383        /* flag lists/ptrs/locks initialised */
2384        ksocknal_data.ksnd_init = SOCKNAL_INIT_DATA;
2385        try_module_get(THIS_MODULE);
2386
2387        ksocknal_data.ksnd_sched_info = cfs_percpt_alloc(lnet_cpt_table(),
2388                                                         sizeof(*info));
2389        if (ksocknal_data.ksnd_sched_info == NULL)
2390                goto failed;
2391
2392        cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
2393                ksock_sched_t *sched;
2394                int nthrs;
2395
2396                nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
2397                if (*ksocknal_tunables.ksnd_nscheds > 0) {
2398                        nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds);
2399                } else {
2400                        /* max to half of CPUs, assume another half should be
2401                         * reserved for upper layer modules */
2402                        nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2403                }
2404
2405                info->ksi_nthreads_max = nthrs;
2406                info->ksi_cpt = i;
2407
2408                LIBCFS_CPT_ALLOC(info->ksi_scheds, lnet_cpt_table(), i,
2409                                 info->ksi_nthreads_max * sizeof(*sched));
2410                if (info->ksi_scheds == NULL)
2411                        goto failed;
2412
2413                for (; nthrs > 0; nthrs--) {
2414                        sched = &info->ksi_scheds[nthrs - 1];
2415
2416                        sched->kss_info = info;
2417                        spin_lock_init(&sched->kss_lock);
2418                        INIT_LIST_HEAD(&sched->kss_rx_conns);
2419                        INIT_LIST_HEAD(&sched->kss_tx_conns);
2420                        INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
2421                        init_waitqueue_head(&sched->kss_waitq);
2422                }
2423        }
2424
2425        ksocknal_data.ksnd_connd_starting       = 0;
2426        ksocknal_data.ksnd_connd_failed_stamp   = 0;
2427        ksocknal_data.ksnd_connd_starting_stamp = ktime_get_real_seconds();
2428        /* must have at least 2 connds to remain responsive to accepts while
2429         * connecting */
2430        if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1)
2431                *ksocknal_tunables.ksnd_nconnds = SOCKNAL_CONND_RESV + 1;
2432
2433        if (*ksocknal_tunables.ksnd_nconnds_max <
2434            *ksocknal_tunables.ksnd_nconnds) {
2435                ksocknal_tunables.ksnd_nconnds_max =
2436                        ksocknal_tunables.ksnd_nconnds;
2437        }
2438
2439        for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
2440                char name[16];
2441
2442                spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2443                ksocknal_data.ksnd_connd_starting++;
2444                spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2445
2446                snprintf(name, sizeof(name), "socknal_cd%02d", i);
2447                rc = ksocknal_thread_start(ksocknal_connd,
2448                                           (void *)((ulong_ptr_t)i), name);
2449                if (rc != 0) {
2450                        spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2451                        ksocknal_data.ksnd_connd_starting--;
2452                        spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2453                        CERROR("Can't spawn socknal connd: %d\n", rc);
2454                        goto failed;
2455                }
2456        }
2457
2458        rc = ksocknal_thread_start(ksocknal_reaper, NULL, "socknal_reaper");
2459        if (rc != 0) {
2460                CERROR("Can't spawn socknal reaper: %d\n", rc);
2461                goto failed;
2462        }
2463
2464        /* flag everything initialised */
2465        ksocknal_data.ksnd_init = SOCKNAL_INIT_ALL;
2466
2467        return 0;
2468
2469 failed:
2470        ksocknal_base_shutdown();
2471        return -ENETDOWN;
2472}
2473
2474static void
2475ksocknal_debug_peerhash(lnet_ni_t *ni)
2476{
2477        ksock_peer_t *peer = NULL;
2478        struct list_head *tmp;
2479        int i;
2480
2481        read_lock(&ksocknal_data.ksnd_global_lock);
2482
2483        for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2484                list_for_each(tmp, &ksocknal_data.ksnd_peers[i]) {
2485                        peer = list_entry(tmp, ksock_peer_t, ksnp_list);
2486
2487                        if (peer->ksnp_ni == ni)
2488                                break;
2489
2490                        peer = NULL;
2491                }
2492        }
2493
2494        if (peer != NULL) {
2495                ksock_route_t *route;
2496                ksock_conn_t  *conn;
2497
2498                CWARN("Active peer on shutdown: %s, ref %d, scnt %d, closing %d, accepting %d, err %d, zcookie %llu, txq %d, zc_req %d\n",
2499                      libcfs_id2str(peer->ksnp_id),
2500                      atomic_read(&peer->ksnp_refcount),
2501                      peer->ksnp_sharecount, peer->ksnp_closing,
2502                      peer->ksnp_accepting, peer->ksnp_error,
2503                      peer->ksnp_zc_next_cookie,
2504                      !list_empty(&peer->ksnp_tx_queue),
2505                      !list_empty(&peer->ksnp_zc_req_list));
2506
2507                list_for_each(tmp, &peer->ksnp_routes) {
2508                        route = list_entry(tmp, ksock_route_t, ksnr_list);
2509                        CWARN("Route: ref %d, schd %d, conn %d, cnted %d, del %d\n",
2510                              atomic_read(&route->ksnr_refcount),
2511                              route->ksnr_scheduled, route->ksnr_connecting,
2512                              route->ksnr_connected, route->ksnr_deleted);
2513                }
2514
2515                list_for_each(tmp, &peer->ksnp_conns) {
2516                        conn = list_entry(tmp, ksock_conn_t, ksnc_list);
2517                        CWARN("Conn: ref %d, sref %d, t %d, c %d\n",
2518                               atomic_read(&conn->ksnc_conn_refcount),
2519                               atomic_read(&conn->ksnc_sock_refcount),
2520                               conn->ksnc_type, conn->ksnc_closing);
2521                }
2522        }
2523
2524        read_unlock(&ksocknal_data.ksnd_global_lock);
2525        return;
2526}
2527
2528void
2529ksocknal_shutdown(lnet_ni_t *ni)
2530{
2531        ksock_net_t *net = ni->ni_data;
2532        int i;
2533        lnet_process_id_t anyid = {0};
2534
2535        anyid.nid = LNET_NID_ANY;
2536        anyid.pid = LNET_PID_ANY;
2537
2538        LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
2539        LASSERT(ksocknal_data.ksnd_nnets > 0);
2540
2541        spin_lock_bh(&net->ksnn_lock);
2542        net->ksnn_shutdown = 1;          /* prevent new peers */
2543        spin_unlock_bh(&net->ksnn_lock);
2544
2545        /* Delete all peers */
2546        ksocknal_del_peer(ni, anyid, 0);
2547
2548        /* Wait for all peer state to clean up */
2549        i = 2;
2550        spin_lock_bh(&net->ksnn_lock);
2551        while (net->ksnn_npeers != 0) {
2552                spin_unlock_bh(&net->ksnn_lock);
2553
2554                i++;
2555                CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2556                       "waiting for %d peers to disconnect\n",
2557                       net->ksnn_npeers);
2558                set_current_state(TASK_UNINTERRUPTIBLE);
2559                schedule_timeout(cfs_time_seconds(1));
2560
2561                ksocknal_debug_peerhash(ni);
2562
2563                spin_lock_bh(&net->ksnn_lock);
2564        }
2565        spin_unlock_bh(&net->ksnn_lock);
2566
2567        for (i = 0; i < net->ksnn_ninterfaces; i++) {
2568                LASSERT(net->ksnn_interfaces[i].ksni_npeers == 0);
2569                LASSERT(net->ksnn_interfaces[i].ksni_nroutes == 0);
2570        }
2571
2572        list_del(&net->ksnn_list);
2573        LIBCFS_FREE(net, sizeof(*net));
2574
2575        ksocknal_data.ksnd_nnets--;
2576        if (ksocknal_data.ksnd_nnets == 0)
2577                ksocknal_base_shutdown();
2578}
2579
2580static int
2581ksocknal_enumerate_interfaces(ksock_net_t *net)
2582{
2583        char **names;
2584        int i;
2585        int j;
2586        int rc;
2587        int n;
2588
2589        n = lnet_ipif_enumerate(&names);
2590        if (n <= 0) {
2591                CERROR("Can't enumerate interfaces: %d\n", n);
2592                return n;
2593        }
2594
2595        for (i = j = 0; i < n; i++) {
2596                int up;
2597                __u32 ip;
2598                __u32 mask;
2599
2600                if (!strcmp(names[i], "lo")) /* skip the loopback IF */
2601                        continue;
2602
2603                rc = lnet_ipif_query(names[i], &up, &ip, &mask);
2604                if (rc != 0) {
2605                        CWARN("Can't get interface %s info: %d\n",
2606                              names[i], rc);
2607                        continue;
2608                }
2609
2610                if (!up) {
2611                        CWARN("Ignoring interface %s (down)\n",
2612                              names[i]);
2613                        continue;
2614                }
2615
2616                if (j == LNET_MAX_INTERFACES) {
2617                        CWARN("Ignoring interface %s (too many interfaces)\n",
2618                              names[i]);
2619                        continue;
2620                }
2621
2622                net->ksnn_interfaces[j].ksni_ipaddr = ip;
2623                net->ksnn_interfaces[j].ksni_netmask = mask;
2624                strncpy(&net->ksnn_interfaces[j].ksni_name[0],
2625                        names[i], IFNAMSIZ);
2626                j++;
2627        }
2628
2629        lnet_ipif_free_enumeration(names, n);
2630
2631        if (j == 0)
2632                CERROR("Can't find any usable interfaces\n");
2633
2634        return j;
2635}
2636
2637static int
2638ksocknal_search_new_ipif(ksock_net_t *net)
2639{
2640        int new_ipif = 0;
2641        int i;
2642
2643        for (i = 0; i < net->ksnn_ninterfaces; i++) {
2644                char *ifnam = &net->ksnn_interfaces[i].ksni_name[0];
2645                char *colon = strchr(ifnam, ':');
2646                int found  = 0;
2647                ksock_net_t *tmp;
2648                int j;
2649
2650                if (colon != NULL) /* ignore alias device */
2651                        *colon = 0;
2652
2653                list_for_each_entry(tmp, &ksocknal_data.ksnd_nets,
2654                                        ksnn_list) {
2655                        for (j = 0; !found && j < tmp->ksnn_ninterfaces; j++) {
2656                                char *ifnam2 =
2657                                        &tmp->ksnn_interfaces[j].ksni_name[0];
2658                                char *colon2 = strchr(ifnam2, ':');
2659
2660                                if (colon2 != NULL)
2661                                        *colon2 = 0;
2662
2663                                found = strcmp(ifnam, ifnam2) == 0;
2664                                if (colon2 != NULL)
2665                                        *colon2 = ':';
2666                        }
2667                        if (found)
2668                                break;
2669                }
2670
2671                new_ipif += !found;
2672                if (colon != NULL)
2673                        *colon = ':';
2674        }
2675
2676        return new_ipif;
2677}
2678
2679static int
2680ksocknal_start_schedulers(struct ksock_sched_info *info)
2681{
2682        int nthrs;
2683        int rc = 0;
2684        int i;
2685
2686        if (info->ksi_nthreads == 0) {
2687                if (*ksocknal_tunables.ksnd_nscheds > 0) {
2688                        nthrs = info->ksi_nthreads_max;
2689                } else {
2690                        nthrs = cfs_cpt_weight(lnet_cpt_table(),
2691                                               info->ksi_cpt);
2692                        nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2693                        nthrs = min(SOCKNAL_NSCHEDS_HIGH, nthrs);
2694                }
2695                nthrs = min(nthrs, info->ksi_nthreads_max);
2696        } else {
2697                LASSERT(info->ksi_nthreads <= info->ksi_nthreads_max);
2698                /* increase two threads if there is new interface */
2699                nthrs = min(2, info->ksi_nthreads_max - info->ksi_nthreads);
2700        }
2701
2702        for (i = 0; i < nthrs; i++) {
2703                long id;
2704                char name[20];
2705                ksock_sched_t *sched;
2706
2707                id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i);
2708                sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
2709                snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
2710                         info->ksi_cpt, (int)(sched - &info->ksi_scheds[0]));
2711
2712                rc = ksocknal_thread_start(ksocknal_scheduler,
2713                                           (void *)id, name);
2714                if (rc == 0)
2715                        continue;
2716
2717                CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
2718                       info->ksi_cpt, info->ksi_nthreads + i, rc);
2719                break;
2720        }
2721
2722        info->ksi_nthreads += i;
2723        return rc;
2724}
2725
2726static int
2727ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts)
2728{
2729        int newif = ksocknal_search_new_ipif(net);
2730        int rc;
2731        int i;
2732
2733        LASSERT(ncpts > 0 && ncpts <= cfs_cpt_number(lnet_cpt_table()));
2734
2735        for (i = 0; i < ncpts; i++) {
2736                struct ksock_sched_info *info;
2737                int cpt = (cpts == NULL) ? i : cpts[i];
2738
2739                LASSERT(cpt < cfs_cpt_number(lnet_cpt_table()));
2740                info = ksocknal_data.ksnd_sched_info[cpt];
2741
2742                if (!newif && info->ksi_nthreads > 0)
2743                        continue;
2744
2745                rc = ksocknal_start_schedulers(info);
2746                if (rc != 0)
2747                        return rc;
2748        }
2749        return 0;
2750}
2751
2752int
2753ksocknal_startup(lnet_ni_t *ni)
2754{
2755        ksock_net_t *net;
2756        int rc;
2757        int i;
2758
2759        LASSERT(ni->ni_lnd == &the_ksocklnd);
2760
2761        if (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING) {
2762                rc = ksocknal_base_startup();
2763                if (rc != 0)
2764                        return rc;
2765        }
2766
2767        LIBCFS_ALLOC(net, sizeof(*net));
2768        if (net == NULL)
2769                goto fail_0;
2770
2771        spin_lock_init(&net->ksnn_lock);
2772        net->ksnn_incarnation = ksocknal_new_incarnation();
2773        ni->ni_data = net;
2774        ni->ni_peertimeout    = *ksocknal_tunables.ksnd_peertimeout;
2775        ni->ni_maxtxcredits   = *ksocknal_tunables.ksnd_credits;
2776        ni->ni_peertxcredits  = *ksocknal_tunables.ksnd_peertxcredits;
2777        ni->ni_peerrtrcredits = *ksocknal_tunables.ksnd_peerrtrcredits;
2778
2779        if (ni->ni_interfaces[0] == NULL) {
2780                rc = ksocknal_enumerate_interfaces(net);
2781                if (rc <= 0)
2782                        goto fail_1;
2783
2784                net->ksnn_ninterfaces = 1;
2785        } else {
2786                for (i = 0; i < LNET_MAX_INTERFACES; i++) {
2787                        int up;
2788
2789                        if (ni->ni_interfaces[i] == NULL)
2790                                break;
2791
2792                        rc = lnet_ipif_query(ni->ni_interfaces[i], &up,
2793                                &net->ksnn_interfaces[i].ksni_ipaddr,
2794                                &net->ksnn_interfaces[i].ksni_netmask);
2795
2796                        if (rc != 0) {
2797                                CERROR("Can't get interface %s info: %d\n",
2798                                       ni->ni_interfaces[i], rc);
2799                                goto fail_1;
2800                        }
2801
2802                        if (!up) {
2803                                CERROR("Interface %s is down\n",
2804                                       ni->ni_interfaces[i]);
2805                                goto fail_1;
2806                        }
2807
2808                        strncpy(&net->ksnn_interfaces[i].ksni_name[0],
2809                                ni->ni_interfaces[i], IFNAMSIZ);
2810                }
2811                net->ksnn_ninterfaces = i;
2812        }
2813
2814        /* call it before add it to ksocknal_data.ksnd_nets */
2815        rc = ksocknal_net_start_threads(net, ni->ni_cpts, ni->ni_ncpts);
2816        if (rc != 0)
2817                goto fail_1;
2818
2819        ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
2820                                net->ksnn_interfaces[0].ksni_ipaddr);
2821        list_add(&net->ksnn_list, &ksocknal_data.ksnd_nets);
2822
2823        ksocknal_data.ksnd_nnets++;
2824
2825        return 0;
2826
2827 fail_1:
2828        LIBCFS_FREE(net, sizeof(*net));
2829 fail_0:
2830        if (ksocknal_data.ksnd_nnets == 0)
2831                ksocknal_base_shutdown();
2832
2833        return -ENETDOWN;
2834}
2835
2836static void __exit
2837ksocknal_module_fini(void)
2838{
2839        lnet_unregister_lnd(&the_ksocklnd);
2840}
2841
2842static int __init
2843ksocknal_module_init(void)
2844{
2845        int rc;
2846
2847        /* check ksnr_connected/connecting field large enough */
2848        CLASSERT(SOCKLND_CONN_NTYPES <= 4);
2849        CLASSERT(SOCKLND_CONN_ACK == SOCKLND_CONN_BULK_IN);
2850
2851        /* initialize the_ksocklnd */
2852        the_ksocklnd.lnd_type     = SOCKLND;
2853        the_ksocklnd.lnd_startup  = ksocknal_startup;
2854        the_ksocklnd.lnd_shutdown = ksocknal_shutdown;
2855        the_ksocklnd.lnd_ctl      = ksocknal_ctl;
2856        the_ksocklnd.lnd_send     = ksocknal_send;
2857        the_ksocklnd.lnd_recv     = ksocknal_recv;
2858        the_ksocklnd.lnd_notify   = ksocknal_notify;
2859        the_ksocklnd.lnd_query    = ksocknal_query;
2860        the_ksocklnd.lnd_accept   = ksocknal_accept;
2861
2862        rc = ksocknal_tunables_init();
2863        if (rc != 0)
2864                return rc;
2865
2866        lnet_register_lnd(&the_ksocklnd);
2867
2868        return 0;
2869}
2870
2871MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
2872MODULE_DESCRIPTION("Kernel TCP Socket LND v3.0.0");
2873MODULE_LICENSE("GPL");
2874MODULE_VERSION("3.0.0");
2875
2876module_init(ksocknal_module_init);
2877module_exit(ksocknal_module_fini);
2878