linux/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
<<
>>
Prefs
   1/*
   2 * GPL HEADER START
   3 *
   4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 only,
   8 * as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  13 * General Public License version 2 for more details (a copy is included
  14 * in the LICENSE file that accompanied this code).
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * version 2 along with this program; If not, see
  18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
  19 *
  20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  21 * CA 95054 USA or visit www.sun.com if you need additional information or
  22 * have any questions.
  23 *
  24 * GPL HEADER END
  25 */
  26/*
  27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
  28 * Use is subject to license terms.
  29 *
  30 * Copyright (c) 2011, 2012, Intel Corporation.
  31 */
  32/*
  33 * This file is part of Lustre, http://www.lustre.org/
  34 * Lustre is a trademark of Sun Microsystems, Inc.
  35 *
  36 * lnet/klnds/socklnd/socklnd.c
  37 *
  38 * Author: Zach Brown <zab@zabbo.net>
  39 * Author: Peter J. Braam <braam@clusterfs.com>
  40 * Author: Phil Schwan <phil@clusterfs.com>
  41 * Author: Eric Barton <eric@bartonsoftware.com>
  42 */
  43
  44#include "socklnd.h"
  45
  46lnd_t              the_ksocklnd;
  47ksock_nal_data_t        ksocknal_data;
  48
  49ksock_interface_t *
  50ksocknal_ip2iface(lnet_ni_t *ni, __u32 ip)
  51{
  52        ksock_net_t       *net = ni->ni_data;
  53        int             i;
  54        ksock_interface_t *iface;
  55
  56        for (i = 0; i < net->ksnn_ninterfaces; i++) {
  57                LASSERT(i < LNET_MAX_INTERFACES);
  58                iface = &net->ksnn_interfaces[i];
  59
  60                if (iface->ksni_ipaddr == ip)
  61                        return (iface);
  62        }
  63
  64        return (NULL);
  65}
  66
  67ksock_route_t *
  68ksocknal_create_route (__u32 ipaddr, int port)
  69{
  70        ksock_route_t *route;
  71
  72        LIBCFS_ALLOC (route, sizeof (*route));
  73        if (route == NULL)
  74                return (NULL);
  75
  76        atomic_set (&route->ksnr_refcount, 1);
  77        route->ksnr_peer = NULL;
  78        route->ksnr_retry_interval = 0;  /* OK to connect at any time */
  79        route->ksnr_ipaddr = ipaddr;
  80        route->ksnr_port = port;
  81        route->ksnr_scheduled = 0;
  82        route->ksnr_connecting = 0;
  83        route->ksnr_connected = 0;
  84        route->ksnr_deleted = 0;
  85        route->ksnr_conn_count = 0;
  86        route->ksnr_share_count = 0;
  87
  88        return (route);
  89}
  90
  91void
  92ksocknal_destroy_route (ksock_route_t *route)
  93{
  94        LASSERT (atomic_read(&route->ksnr_refcount) == 0);
  95
  96        if (route->ksnr_peer != NULL)
  97                ksocknal_peer_decref(route->ksnr_peer);
  98
  99        LIBCFS_FREE (route, sizeof (*route));
 100}
 101
 102int
 103ksocknal_create_peer (ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id)
 104{
 105        ksock_net_t   *net = ni->ni_data;
 106        ksock_peer_t  *peer;
 107
 108        LASSERT (id.nid != LNET_NID_ANY);
 109        LASSERT (id.pid != LNET_PID_ANY);
 110        LASSERT (!in_interrupt());
 111
 112        LIBCFS_ALLOC (peer, sizeof (*peer));
 113        if (peer == NULL)
 114                return -ENOMEM;
 115
 116        memset (peer, 0, sizeof (*peer));       /* NULL pointers/clear flags etc */
 117
 118        peer->ksnp_ni = ni;
 119        peer->ksnp_id = id;
 120        atomic_set (&peer->ksnp_refcount, 1);   /* 1 ref for caller */
 121        peer->ksnp_closing = 0;
 122        peer->ksnp_accepting = 0;
 123        peer->ksnp_proto = NULL;
 124        peer->ksnp_last_alive = 0;
 125        peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
 126
 127        INIT_LIST_HEAD (&peer->ksnp_conns);
 128        INIT_LIST_HEAD (&peer->ksnp_routes);
 129        INIT_LIST_HEAD (&peer->ksnp_tx_queue);
 130        INIT_LIST_HEAD (&peer->ksnp_zc_req_list);
 131        spin_lock_init(&peer->ksnp_lock);
 132
 133        spin_lock_bh(&net->ksnn_lock);
 134
 135        if (net->ksnn_shutdown) {
 136                spin_unlock_bh(&net->ksnn_lock);
 137
 138                LIBCFS_FREE(peer, sizeof(*peer));
 139                CERROR("Can't create peer: network shutdown\n");
 140                return -ESHUTDOWN;
 141        }
 142
 143        net->ksnn_npeers++;
 144
 145        spin_unlock_bh(&net->ksnn_lock);
 146
 147        *peerp = peer;
 148        return 0;
 149}
 150
 151void
 152ksocknal_destroy_peer (ksock_peer_t *peer)
 153{
 154        ksock_net_t    *net = peer->ksnp_ni->ni_data;
 155
 156        CDEBUG (D_NET, "peer %s %p deleted\n",
 157                libcfs_id2str(peer->ksnp_id), peer);
 158
 159        LASSERT (atomic_read (&peer->ksnp_refcount) == 0);
 160        LASSERT (peer->ksnp_accepting == 0);
 161        LASSERT (list_empty (&peer->ksnp_conns));
 162        LASSERT (list_empty (&peer->ksnp_routes));
 163        LASSERT (list_empty (&peer->ksnp_tx_queue));
 164        LASSERT (list_empty (&peer->ksnp_zc_req_list));
 165
 166        LIBCFS_FREE (peer, sizeof (*peer));
 167
 168        /* NB a peer's connections and routes keep a reference on their peer
 169         * until they are destroyed, so we can be assured that _all_ state to
 170         * do with this peer has been cleaned up when its refcount drops to
 171         * zero. */
 172        spin_lock_bh(&net->ksnn_lock);
 173        net->ksnn_npeers--;
 174        spin_unlock_bh(&net->ksnn_lock);
 175}
 176
 177ksock_peer_t *
 178ksocknal_find_peer_locked (lnet_ni_t *ni, lnet_process_id_t id)
 179{
 180        struct list_head       *peer_list = ksocknal_nid2peerlist(id.nid);
 181        struct list_head       *tmp;
 182        ksock_peer_t     *peer;
 183
 184        list_for_each (tmp, peer_list) {
 185
 186                peer = list_entry (tmp, ksock_peer_t, ksnp_list);
 187
 188                LASSERT (!peer->ksnp_closing);
 189
 190                if (peer->ksnp_ni != ni)
 191                        continue;
 192
 193                if (peer->ksnp_id.nid != id.nid ||
 194                    peer->ksnp_id.pid != id.pid)
 195                        continue;
 196
 197                CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n",
 198                       peer, libcfs_id2str(id),
 199                       atomic_read(&peer->ksnp_refcount));
 200                return (peer);
 201        }
 202        return (NULL);
 203}
 204
 205ksock_peer_t *
 206ksocknal_find_peer (lnet_ni_t *ni, lnet_process_id_t id)
 207{
 208        ksock_peer_t     *peer;
 209
 210        read_lock(&ksocknal_data.ksnd_global_lock);
 211        peer = ksocknal_find_peer_locked(ni, id);
 212        if (peer != NULL)                       /* +1 ref for caller? */
 213                ksocknal_peer_addref(peer);
 214        read_unlock(&ksocknal_data.ksnd_global_lock);
 215
 216        return (peer);
 217}
 218
 219void
 220ksocknal_unlink_peer_locked (ksock_peer_t *peer)
 221{
 222        int             i;
 223        __u32         ip;
 224        ksock_interface_t *iface;
 225
 226        for (i = 0; i < peer->ksnp_n_passive_ips; i++) {
 227                LASSERT (i < LNET_MAX_INTERFACES);
 228                ip = peer->ksnp_passive_ips[i];
 229
 230                iface = ksocknal_ip2iface(peer->ksnp_ni, ip);
 231                /* All IPs in peer->ksnp_passive_ips[] come from the
 232                 * interface list, therefore the call must succeed. */
 233                LASSERT (iface != NULL);
 234
 235                CDEBUG(D_NET, "peer=%p iface=%p ksni_nroutes=%d\n",
 236                       peer, iface, iface->ksni_nroutes);
 237                iface->ksni_npeers--;
 238        }
 239
 240        LASSERT (list_empty(&peer->ksnp_conns));
 241        LASSERT (list_empty(&peer->ksnp_routes));
 242        LASSERT (!peer->ksnp_closing);
 243        peer->ksnp_closing = 1;
 244        list_del (&peer->ksnp_list);
 245        /* lose peerlist's ref */
 246        ksocknal_peer_decref(peer);
 247}
 248
 249int
 250ksocknal_get_peer_info (lnet_ni_t *ni, int index,
 251                        lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip,
 252                        int *port, int *conn_count, int *share_count)
 253{
 254        ksock_peer_t      *peer;
 255        struct list_head        *ptmp;
 256        ksock_route_t     *route;
 257        struct list_head        *rtmp;
 258        int             i;
 259        int             j;
 260        int             rc = -ENOENT;
 261
 262        read_lock(&ksocknal_data.ksnd_global_lock);
 263
 264        for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
 265
 266                list_for_each (ptmp, &ksocknal_data.ksnd_peers[i]) {
 267                        peer = list_entry (ptmp, ksock_peer_t, ksnp_list);
 268
 269                        if (peer->ksnp_ni != ni)
 270                                continue;
 271
 272                        if (peer->ksnp_n_passive_ips == 0 &&
 273                            list_empty(&peer->ksnp_routes)) {
 274                                if (index-- > 0)
 275                                        continue;
 276
 277                                *id = peer->ksnp_id;
 278                                *myip = 0;
 279                                *peer_ip = 0;
 280                                *port = 0;
 281                                *conn_count = 0;
 282                                *share_count = 0;
 283                                rc = 0;
 284                                goto out;
 285                        }
 286
 287                        for (j = 0; j < peer->ksnp_n_passive_ips; j++) {
 288                                if (index-- > 0)
 289                                        continue;
 290
 291                                *id = peer->ksnp_id;
 292                                *myip = peer->ksnp_passive_ips[j];
 293                                *peer_ip = 0;
 294                                *port = 0;
 295                                *conn_count = 0;
 296                                *share_count = 0;
 297                                rc = 0;
 298                                goto out;
 299                        }
 300
 301                        list_for_each (rtmp, &peer->ksnp_routes) {
 302                                if (index-- > 0)
 303                                        continue;
 304
 305                                route = list_entry(rtmp, ksock_route_t,
 306                                                       ksnr_list);
 307
 308                                *id = peer->ksnp_id;
 309                                *myip = route->ksnr_myipaddr;
 310                                *peer_ip = route->ksnr_ipaddr;
 311                                *port = route->ksnr_port;
 312                                *conn_count = route->ksnr_conn_count;
 313                                *share_count = route->ksnr_share_count;
 314                                rc = 0;
 315                                goto out;
 316                        }
 317                }
 318        }
 319 out:
 320        read_unlock(&ksocknal_data.ksnd_global_lock);
 321        return (rc);
 322}
 323
 324void
 325ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
 326{
 327        ksock_peer_t      *peer = route->ksnr_peer;
 328        int             type = conn->ksnc_type;
 329        ksock_interface_t *iface;
 330
 331        conn->ksnc_route = route;
 332        ksocknal_route_addref(route);
 333
 334        if (route->ksnr_myipaddr != conn->ksnc_myipaddr) {
 335                if (route->ksnr_myipaddr == 0) {
 336                        /* route wasn't bound locally yet (the initial route) */
 337                        CDEBUG(D_NET, "Binding %s %pI4h to %pI4h\n",
 338                               libcfs_id2str(peer->ksnp_id),
 339                               &route->ksnr_ipaddr,
 340                               &conn->ksnc_myipaddr);
 341                } else {
 342                        CDEBUG(D_NET, "Rebinding %s %pI4h from "
 343                               "%pI4h to %pI4h\n",
 344                               libcfs_id2str(peer->ksnp_id),
 345                               &route->ksnr_ipaddr,
 346                               &route->ksnr_myipaddr,
 347                               &conn->ksnc_myipaddr);
 348
 349                        iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
 350                                                  route->ksnr_myipaddr);
 351                        if (iface != NULL)
 352                                iface->ksni_nroutes--;
 353                }
 354                route->ksnr_myipaddr = conn->ksnc_myipaddr;
 355                iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
 356                                          route->ksnr_myipaddr);
 357                if (iface != NULL)
 358                        iface->ksni_nroutes++;
 359        }
 360
 361        route->ksnr_connected |= (1<<type);
 362        route->ksnr_conn_count++;
 363
 364        /* Successful connection => further attempts can
 365         * proceed immediately */
 366        route->ksnr_retry_interval = 0;
 367}
 368
 369void
 370ksocknal_add_route_locked (ksock_peer_t *peer, ksock_route_t *route)
 371{
 372        struct list_head        *tmp;
 373        ksock_conn_t      *conn;
 374        ksock_route_t     *route2;
 375
 376        LASSERT (!peer->ksnp_closing);
 377        LASSERT (route->ksnr_peer == NULL);
 378        LASSERT (!route->ksnr_scheduled);
 379        LASSERT (!route->ksnr_connecting);
 380        LASSERT (route->ksnr_connected == 0);
 381
 382        /* LASSERT(unique) */
 383        list_for_each(tmp, &peer->ksnp_routes) {
 384                route2 = list_entry(tmp, ksock_route_t, ksnr_list);
 385
 386                if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
 387                        CERROR("Duplicate route %s %pI4h\n",
 388                                libcfs_id2str(peer->ksnp_id),
 389                                &route->ksnr_ipaddr);
 390                        LBUG();
 391                }
 392        }
 393
 394        route->ksnr_peer = peer;
 395        ksocknal_peer_addref(peer);
 396        /* peer's routelist takes over my ref on 'route' */
 397        list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
 398
 399        list_for_each(tmp, &peer->ksnp_conns) {
 400                conn = list_entry(tmp, ksock_conn_t, ksnc_list);
 401
 402                if (conn->ksnc_ipaddr != route->ksnr_ipaddr)
 403                        continue;
 404
 405                ksocknal_associate_route_conn_locked(route, conn);
 406                /* keep going (typed routes) */
 407        }
 408}
 409
 410void
 411ksocknal_del_route_locked (ksock_route_t *route)
 412{
 413        ksock_peer_t      *peer = route->ksnr_peer;
 414        ksock_interface_t *iface;
 415        ksock_conn_t      *conn;
 416        struct list_head        *ctmp;
 417        struct list_head        *cnxt;
 418
 419        LASSERT (!route->ksnr_deleted);
 420
 421        /* Close associated conns */
 422        list_for_each_safe (ctmp, cnxt, &peer->ksnp_conns) {
 423                conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
 424
 425                if (conn->ksnc_route != route)
 426                        continue;
 427
 428                ksocknal_close_conn_locked (conn, 0);
 429        }
 430
 431        if (route->ksnr_myipaddr != 0) {
 432                iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
 433                                          route->ksnr_myipaddr);
 434                if (iface != NULL)
 435                        iface->ksni_nroutes--;
 436        }
 437
 438        route->ksnr_deleted = 1;
 439        list_del (&route->ksnr_list);
 440        ksocknal_route_decref(route);        /* drop peer's ref */
 441
 442        if (list_empty (&peer->ksnp_routes) &&
 443            list_empty (&peer->ksnp_conns)) {
 444                /* I've just removed the last route to a peer with no active
 445                 * connections */
 446                ksocknal_unlink_peer_locked (peer);
 447        }
 448}
 449
 450int
 451ksocknal_add_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
 452{
 453        struct list_head        *tmp;
 454        ksock_peer_t      *peer;
 455        ksock_peer_t      *peer2;
 456        ksock_route_t     *route;
 457        ksock_route_t     *route2;
 458        int             rc;
 459
 460        if (id.nid == LNET_NID_ANY ||
 461            id.pid == LNET_PID_ANY)
 462                return (-EINVAL);
 463
 464        /* Have a brand new peer ready... */
 465        rc = ksocknal_create_peer(&peer, ni, id);
 466        if (rc != 0)
 467                return rc;
 468
 469        route = ksocknal_create_route (ipaddr, port);
 470        if (route == NULL) {
 471                ksocknal_peer_decref(peer);
 472                return (-ENOMEM);
 473        }
 474
 475        write_lock_bh(&ksocknal_data.ksnd_global_lock);
 476
 477        /* always called with a ref on ni, so shutdown can't have started */
 478        LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
 479
 480        peer2 = ksocknal_find_peer_locked (ni, id);
 481        if (peer2 != NULL) {
 482                ksocknal_peer_decref(peer);
 483                peer = peer2;
 484        } else {
 485                /* peer table takes my ref on peer */
 486                list_add_tail (&peer->ksnp_list,
 487                                   ksocknal_nid2peerlist (id.nid));
 488        }
 489
 490        route2 = NULL;
 491        list_for_each (tmp, &peer->ksnp_routes) {
 492                route2 = list_entry(tmp, ksock_route_t, ksnr_list);
 493
 494                if (route2->ksnr_ipaddr == ipaddr)
 495                        break;
 496
 497                route2 = NULL;
 498        }
 499        if (route2 == NULL) {
 500                ksocknal_add_route_locked(peer, route);
 501                route->ksnr_share_count++;
 502        } else {
 503                ksocknal_route_decref(route);
 504                route2->ksnr_share_count++;
 505        }
 506
 507        write_unlock_bh(&ksocknal_data.ksnd_global_lock);
 508
 509        return (0);
 510}
 511
 512void
 513ksocknal_del_peer_locked (ksock_peer_t *peer, __u32 ip)
 514{
 515        ksock_conn_t     *conn;
 516        ksock_route_t    *route;
 517        struct list_head       *tmp;
 518        struct list_head       *nxt;
 519        int            nshared;
 520
 521        LASSERT (!peer->ksnp_closing);
 522
 523        /* Extra ref prevents peer disappearing until I'm done with it */
 524        ksocknal_peer_addref(peer);
 525
 526        list_for_each_safe (tmp, nxt, &peer->ksnp_routes) {
 527                route = list_entry(tmp, ksock_route_t, ksnr_list);
 528
 529                /* no match */
 530                if (!(ip == 0 || route->ksnr_ipaddr == ip))
 531                        continue;
 532
 533                route->ksnr_share_count = 0;
 534                /* This deletes associated conns too */
 535                ksocknal_del_route_locked (route);
 536        }
 537
 538        nshared = 0;
 539        list_for_each_safe (tmp, nxt, &peer->ksnp_routes) {
 540                route = list_entry(tmp, ksock_route_t, ksnr_list);
 541                nshared += route->ksnr_share_count;
 542        }
 543
 544        if (nshared == 0) {
 545                /* remove everything else if there are no explicit entries
 546                 * left */
 547
 548                list_for_each_safe (tmp, nxt, &peer->ksnp_routes) {
 549                        route = list_entry(tmp, ksock_route_t, ksnr_list);
 550
 551                        /* we should only be removing auto-entries */
 552                        LASSERT(route->ksnr_share_count == 0);
 553                        ksocknal_del_route_locked (route);
 554                }
 555
 556                list_for_each_safe (tmp, nxt, &peer->ksnp_conns) {
 557                        conn = list_entry(tmp, ksock_conn_t, ksnc_list);
 558
 559                        ksocknal_close_conn_locked(conn, 0);
 560                }
 561        }
 562
 563        ksocknal_peer_decref(peer);
 564        /* NB peer unlinks itself when last conn/route is removed */
 565}
 566
 567int
 568ksocknal_del_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
 569{
 570        LIST_HEAD     (zombies);
 571        struct list_head        *ptmp;
 572        struct list_head        *pnxt;
 573        ksock_peer_t      *peer;
 574        int             lo;
 575        int             hi;
 576        int             i;
 577        int             rc = -ENOENT;
 578
 579        write_lock_bh(&ksocknal_data.ksnd_global_lock);
 580
 581        if (id.nid != LNET_NID_ANY)
 582                lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
 583        else {
 584                lo = 0;
 585                hi = ksocknal_data.ksnd_peer_hash_size - 1;
 586        }
 587
 588        for (i = lo; i <= hi; i++) {
 589                list_for_each_safe (ptmp, pnxt,
 590                                        &ksocknal_data.ksnd_peers[i]) {
 591                        peer = list_entry (ptmp, ksock_peer_t, ksnp_list);
 592
 593                        if (peer->ksnp_ni != ni)
 594                                continue;
 595
 596                        if (!((id.nid == LNET_NID_ANY || peer->ksnp_id.nid == id.nid) &&
 597                              (id.pid == LNET_PID_ANY || peer->ksnp_id.pid == id.pid)))
 598                                continue;
 599
 600                        ksocknal_peer_addref(peer);     /* a ref for me... */
 601
 602                        ksocknal_del_peer_locked (peer, ip);
 603
 604                        if (peer->ksnp_closing &&
 605                            !list_empty(&peer->ksnp_tx_queue)) {
 606                                LASSERT (list_empty(&peer->ksnp_conns));
 607                                LASSERT (list_empty(&peer->ksnp_routes));
 608
 609                                list_splice_init(&peer->ksnp_tx_queue,
 610                                                     &zombies);
 611                        }
 612
 613                        ksocknal_peer_decref(peer);     /* ...till here */
 614
 615                        rc = 0;          /* matched! */
 616                }
 617        }
 618
 619        write_unlock_bh(&ksocknal_data.ksnd_global_lock);
 620
 621        ksocknal_txlist_done(ni, &zombies, 1);
 622
 623        return (rc);
 624}
 625
 626ksock_conn_t *
 627ksocknal_get_conn_by_idx (lnet_ni_t *ni, int index)
 628{
 629        ksock_peer_t      *peer;
 630        struct list_head        *ptmp;
 631        ksock_conn_t      *conn;
 632        struct list_head        *ctmp;
 633        int             i;
 634
 635        read_lock(&ksocknal_data.ksnd_global_lock);
 636
 637        for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
 638                list_for_each (ptmp, &ksocknal_data.ksnd_peers[i]) {
 639                        peer = list_entry (ptmp, ksock_peer_t, ksnp_list);
 640
 641                        LASSERT (!peer->ksnp_closing);
 642
 643                        if (peer->ksnp_ni != ni)
 644                                continue;
 645
 646                        list_for_each (ctmp, &peer->ksnp_conns) {
 647                                if (index-- > 0)
 648                                        continue;
 649
 650                                conn = list_entry (ctmp, ksock_conn_t,
 651                                                       ksnc_list);
 652                                ksocknal_conn_addref(conn);
 653                                read_unlock(&ksocknal_data. \
 654                                                 ksnd_global_lock);
 655                                return (conn);
 656                        }
 657                }
 658        }
 659
 660        read_unlock(&ksocknal_data.ksnd_global_lock);
 661        return (NULL);
 662}
 663
 664ksock_sched_t *
 665ksocknal_choose_scheduler_locked(unsigned int cpt)
 666{
 667        struct ksock_sched_info *info = ksocknal_data.ksnd_sched_info[cpt];
 668        ksock_sched_t           *sched;
 669        int                     i;
 670
 671        LASSERT(info->ksi_nthreads > 0);
 672
 673        sched = &info->ksi_scheds[0];
 674        /*
 675         * NB: it's safe so far, but info->ksi_nthreads could be changed
 676         * at runtime when we have dynamic LNet configuration, then we
 677         * need to take care of this.
 678         */
 679        for (i = 1; i < info->ksi_nthreads; i++) {
 680                if (sched->kss_nconns > info->ksi_scheds[i].kss_nconns)
 681                        sched = &info->ksi_scheds[i];
 682        }
 683
 684        return sched;
 685}
 686
 687int
 688ksocknal_local_ipvec (lnet_ni_t *ni, __u32 *ipaddrs)
 689{
 690        ksock_net_t       *net = ni->ni_data;
 691        int             i;
 692        int             nip;
 693
 694        read_lock(&ksocknal_data.ksnd_global_lock);
 695
 696        nip = net->ksnn_ninterfaces;
 697        LASSERT (nip <= LNET_MAX_INTERFACES);
 698
 699        /* Only offer interfaces for additional connections if I have
 700         * more than one. */
 701        if (nip < 2) {
 702                read_unlock(&ksocknal_data.ksnd_global_lock);
 703                return 0;
 704        }
 705
 706        for (i = 0; i < nip; i++) {
 707                ipaddrs[i] = net->ksnn_interfaces[i].ksni_ipaddr;
 708                LASSERT (ipaddrs[i] != 0);
 709        }
 710
 711        read_unlock(&ksocknal_data.ksnd_global_lock);
 712        return (nip);
 713}
 714
 715int
 716ksocknal_match_peerip (ksock_interface_t *iface, __u32 *ips, int nips)
 717{
 718        int   best_netmatch = 0;
 719        int   best_xor      = 0;
 720        int   best        = -1;
 721        int   this_xor;
 722        int   this_netmatch;
 723        int   i;
 724
 725        for (i = 0; i < nips; i++) {
 726                if (ips[i] == 0)
 727                        continue;
 728
 729                this_xor = (ips[i] ^ iface->ksni_ipaddr);
 730                this_netmatch = ((this_xor & iface->ksni_netmask) == 0) ? 1 : 0;
 731
 732                if (!(best < 0 ||
 733                      best_netmatch < this_netmatch ||
 734                      (best_netmatch == this_netmatch &&
 735                       best_xor > this_xor)))
 736                        continue;
 737
 738                best = i;
 739                best_netmatch = this_netmatch;
 740                best_xor = this_xor;
 741        }
 742
 743        LASSERT (best >= 0);
 744        return (best);
 745}
 746
 747int
 748ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
 749{
 750        rwlock_t                *global_lock = &ksocknal_data.ksnd_global_lock;
 751        ksock_net_t     *net = peer->ksnp_ni->ni_data;
 752        ksock_interface_t  *iface;
 753        ksock_interface_t  *best_iface;
 754        int              n_ips;
 755        int              i;
 756        int              j;
 757        int              k;
 758        __u32          ip;
 759        __u32          xor;
 760        int              this_netmatch;
 761        int              best_netmatch;
 762        int              best_npeers;
 763
 764        /* CAVEAT EMPTOR: We do all our interface matching with an
 765         * exclusive hold of global lock at IRQ priority.  We're only
 766         * expecting to be dealing with small numbers of interfaces, so the
 767         * O(n**3)-ness shouldn't matter */
 768
 769        /* Also note that I'm not going to return more than n_peerips
 770         * interfaces, even if I have more myself */
 771
 772        write_lock_bh(global_lock);
 773
 774        LASSERT (n_peerips <= LNET_MAX_INTERFACES);
 775        LASSERT (net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
 776
 777        /* Only match interfaces for additional connections
 778         * if I have > 1 interface */
 779        n_ips = (net->ksnn_ninterfaces < 2) ? 0 :
 780                MIN(n_peerips, net->ksnn_ninterfaces);
 781
 782        for (i = 0; peer->ksnp_n_passive_ips < n_ips; i++) {
 783                /*            ^ yes really... */
 784
 785                /* If we have any new interfaces, first tick off all the
 786                 * peer IPs that match old interfaces, then choose new
 787                 * interfaces to match the remaining peer IPS.
 788                 * We don't forget interfaces we've stopped using; we might
 789                 * start using them again... */
 790
 791                if (i < peer->ksnp_n_passive_ips) {
 792                        /* Old interface. */
 793                        ip = peer->ksnp_passive_ips[i];
 794                        best_iface = ksocknal_ip2iface(peer->ksnp_ni, ip);
 795
 796                        /* peer passive ips are kept up to date */
 797                        LASSERT(best_iface != NULL);
 798                } else {
 799                        /* choose a new interface */
 800                        LASSERT (i == peer->ksnp_n_passive_ips);
 801
 802                        best_iface = NULL;
 803                        best_netmatch = 0;
 804                        best_npeers = 0;
 805
 806                        for (j = 0; j < net->ksnn_ninterfaces; j++) {
 807                                iface = &net->ksnn_interfaces[j];
 808                                ip = iface->ksni_ipaddr;
 809
 810                                for (k = 0; k < peer->ksnp_n_passive_ips; k++)
 811                                        if (peer->ksnp_passive_ips[k] == ip)
 812                                                break;
 813
 814                                if (k < peer->ksnp_n_passive_ips) /* using it already */
 815                                        continue;
 816
 817                                k = ksocknal_match_peerip(iface, peerips, n_peerips);
 818                                xor = (ip ^ peerips[k]);
 819                                this_netmatch = ((xor & iface->ksni_netmask) == 0) ? 1 : 0;
 820
 821                                if (!(best_iface == NULL ||
 822                                      best_netmatch < this_netmatch ||
 823                                      (best_netmatch == this_netmatch &&
 824                                       best_npeers > iface->ksni_npeers)))
 825                                        continue;
 826
 827                                best_iface = iface;
 828                                best_netmatch = this_netmatch;
 829                                best_npeers = iface->ksni_npeers;
 830                        }
 831
 832                        best_iface->ksni_npeers++;
 833                        ip = best_iface->ksni_ipaddr;
 834                        peer->ksnp_passive_ips[i] = ip;
 835                        peer->ksnp_n_passive_ips = i+1;
 836                }
 837
 838                LASSERT (best_iface != NULL);
 839
 840                /* mark the best matching peer IP used */
 841                j = ksocknal_match_peerip(best_iface, peerips, n_peerips);
 842                peerips[j] = 0;
 843        }
 844
 845        /* Overwrite input peer IP addresses */
 846        memcpy(peerips, peer->ksnp_passive_ips, n_ips * sizeof(*peerips));
 847
 848        write_unlock_bh(global_lock);
 849
 850        return (n_ips);
 851}
 852
 853void
 854ksocknal_create_routes(ksock_peer_t *peer, int port,
 855                       __u32 *peer_ipaddrs, int npeer_ipaddrs)
 856{
 857        ksock_route_t       *newroute = NULL;
 858        rwlock_t                *global_lock = &ksocknal_data.ksnd_global_lock;
 859        lnet_ni_t          *ni = peer->ksnp_ni;
 860        ksock_net_t      *net = ni->ni_data;
 861        struct list_head          *rtmp;
 862        ksock_route_t       *route;
 863        ksock_interface_t   *iface;
 864        ksock_interface_t   *best_iface;
 865        int               best_netmatch;
 866        int               this_netmatch;
 867        int               best_nroutes;
 868        int               i;
 869        int               j;
 870
 871        /* CAVEAT EMPTOR: We do all our interface matching with an
 872         * exclusive hold of global lock at IRQ priority.  We're only
 873         * expecting to be dealing with small numbers of interfaces, so the
 874         * O(n**3)-ness here shouldn't matter */
 875
 876        write_lock_bh(global_lock);
 877
 878        if (net->ksnn_ninterfaces < 2) {
 879                /* Only create additional connections
 880                 * if I have > 1 interface */
 881                write_unlock_bh(global_lock);
 882                return;
 883        }
 884
 885        LASSERT (npeer_ipaddrs <= LNET_MAX_INTERFACES);
 886
 887        for (i = 0; i < npeer_ipaddrs; i++) {
 888                if (newroute != NULL) {
 889                        newroute->ksnr_ipaddr = peer_ipaddrs[i];
 890                } else {
 891                        write_unlock_bh(global_lock);
 892
 893                        newroute = ksocknal_create_route(peer_ipaddrs[i], port);
 894                        if (newroute == NULL)
 895                                return;
 896
 897                        write_lock_bh(global_lock);
 898                }
 899
 900                if (peer->ksnp_closing) {
 901                        /* peer got closed under me */
 902                        break;
 903                }
 904
 905                /* Already got a route? */
 906                route = NULL;
 907                list_for_each(rtmp, &peer->ksnp_routes) {
 908                        route = list_entry(rtmp, ksock_route_t, ksnr_list);
 909
 910                        if (route->ksnr_ipaddr == newroute->ksnr_ipaddr)
 911                                break;
 912
 913                        route = NULL;
 914                }
 915                if (route != NULL)
 916                        continue;
 917
 918                best_iface = NULL;
 919                best_nroutes = 0;
 920                best_netmatch = 0;
 921
 922                LASSERT (net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
 923
 924                /* Select interface to connect from */
 925                for (j = 0; j < net->ksnn_ninterfaces; j++) {
 926                        iface = &net->ksnn_interfaces[j];
 927
 928                        /* Using this interface already? */
 929                        list_for_each(rtmp, &peer->ksnp_routes) {
 930                                route = list_entry(rtmp, ksock_route_t,
 931                                                       ksnr_list);
 932
 933                                if (route->ksnr_myipaddr == iface->ksni_ipaddr)
 934                                        break;
 935
 936                                route = NULL;
 937                        }
 938                        if (route != NULL)
 939                                continue;
 940
 941                        this_netmatch = (((iface->ksni_ipaddr ^
 942                                           newroute->ksnr_ipaddr) &
 943                                           iface->ksni_netmask) == 0) ? 1 : 0;
 944
 945                        if (!(best_iface == NULL ||
 946                              best_netmatch < this_netmatch ||
 947                              (best_netmatch == this_netmatch &&
 948                               best_nroutes > iface->ksni_nroutes)))
 949                                continue;
 950
 951                        best_iface = iface;
 952                        best_netmatch = this_netmatch;
 953                        best_nroutes = iface->ksni_nroutes;
 954                }
 955
 956                if (best_iface == NULL)
 957                        continue;
 958
 959                newroute->ksnr_myipaddr = best_iface->ksni_ipaddr;
 960                best_iface->ksni_nroutes++;
 961
 962                ksocknal_add_route_locked(peer, newroute);
 963                newroute = NULL;
 964        }
 965
 966        write_unlock_bh(global_lock);
 967        if (newroute != NULL)
 968                ksocknal_route_decref(newroute);
 969}
 970
 971int
 972ksocknal_accept (lnet_ni_t *ni, socket_t *sock)
 973{
 974        ksock_connreq_t    *cr;
 975        int              rc;
 976        __u32          peer_ip;
 977        int              peer_port;
 978
 979        rc = libcfs_sock_getaddr(sock, 1, &peer_ip, &peer_port);
 980        LASSERT (rc == 0);                    /* we succeeded before */
 981
 982        LIBCFS_ALLOC(cr, sizeof(*cr));
 983        if (cr == NULL) {
 984                LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from "
 985                                   "%pI4h: memory exhausted\n",
 986                                   &peer_ip);
 987                return -ENOMEM;
 988        }
 989
 990        lnet_ni_addref(ni);
 991        cr->ksncr_ni   = ni;
 992        cr->ksncr_sock = sock;
 993
 994        spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
 995
 996        list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
 997        wake_up(&ksocknal_data.ksnd_connd_waitq);
 998
 999        spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
1000        return 0;
1001}
1002
1003int
1004ksocknal_connecting (ksock_peer_t *peer, __u32 ipaddr)
1005{
1006        ksock_route_t   *route;
1007
1008        list_for_each_entry (route, &peer->ksnp_routes, ksnr_list) {
1009
1010                if (route->ksnr_ipaddr == ipaddr)
1011                        return route->ksnr_connecting;
1012        }
1013        return 0;
1014}
1015
1016int
1017ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
1018                      socket_t *sock, int type)
1019{
1020        rwlock_t                *global_lock = &ksocknal_data.ksnd_global_lock;
1021        LIST_HEAD     (zombies);
1022        lnet_process_id_t  peerid;
1023        struct list_head        *tmp;
1024        __u64         incarnation;
1025        ksock_conn_t      *conn;
1026        ksock_conn_t      *conn2;
1027        ksock_peer_t      *peer = NULL;
1028        ksock_peer_t      *peer2;
1029        ksock_sched_t     *sched;
1030        ksock_hello_msg_t *hello;
1031        int                cpt;
1032        ksock_tx_t      *tx;
1033        ksock_tx_t      *txtmp;
1034        int             rc;
1035        int             active;
1036        char          *warn = NULL;
1037
1038        active = (route != NULL);
1039
1040        LASSERT (active == (type != SOCKLND_CONN_NONE));
1041
1042        LIBCFS_ALLOC(conn, sizeof(*conn));
1043        if (conn == NULL) {
1044                rc = -ENOMEM;
1045                goto failed_0;
1046        }
1047
1048        memset (conn, 0, sizeof (*conn));
1049
1050        conn->ksnc_peer = NULL;
1051        conn->ksnc_route = NULL;
1052        conn->ksnc_sock = sock;
1053        /* 2 ref, 1 for conn, another extra ref prevents socket
1054         * being closed before establishment of connection */
1055        atomic_set (&conn->ksnc_sock_refcount, 2);
1056        conn->ksnc_type = type;
1057        ksocknal_lib_save_callback(sock, conn);
1058        atomic_set (&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
1059
1060        conn->ksnc_rx_ready = 0;
1061        conn->ksnc_rx_scheduled = 0;
1062
1063        INIT_LIST_HEAD (&conn->ksnc_tx_queue);
1064        conn->ksnc_tx_ready = 0;
1065        conn->ksnc_tx_scheduled = 0;
1066        conn->ksnc_tx_carrier = NULL;
1067        atomic_set (&conn->ksnc_tx_nob, 0);
1068
1069        LIBCFS_ALLOC(hello, offsetof(ksock_hello_msg_t,
1070                                     kshm_ips[LNET_MAX_INTERFACES]));
1071        if (hello == NULL) {
1072                rc = -ENOMEM;
1073                goto failed_1;
1074        }
1075
1076        /* stash conn's local and remote addrs */
1077        rc = ksocknal_lib_get_conn_addrs (conn);
1078        if (rc != 0)
1079                goto failed_1;
1080
1081        /* Find out/confirm peer's NID and connection type and get the
1082         * vector of interfaces she's willing to let me connect to.
1083         * Passive connections use the listener timeout since the peer sends
1084         * eagerly */
1085
1086        if (active) {
1087                peer = route->ksnr_peer;
1088                LASSERT(ni == peer->ksnp_ni);
1089
1090                /* Active connection sends HELLO eagerly */
1091                hello->kshm_nips = ksocknal_local_ipvec(ni, hello->kshm_ips);
1092                peerid = peer->ksnp_id;
1093
1094                write_lock_bh(global_lock);
1095                conn->ksnc_proto = peer->ksnp_proto;
1096                write_unlock_bh(global_lock);
1097
1098                if (conn->ksnc_proto == NULL) {
1099                         conn->ksnc_proto = &ksocknal_protocol_v3x;
1100#if SOCKNAL_VERSION_DEBUG
1101                         if (*ksocknal_tunables.ksnd_protocol == 2)
1102                                 conn->ksnc_proto = &ksocknal_protocol_v2x;
1103                         else if (*ksocknal_tunables.ksnd_protocol == 1)
1104                                 conn->ksnc_proto = &ksocknal_protocol_v1x;
1105#endif
1106                }
1107
1108                rc = ksocknal_send_hello (ni, conn, peerid.nid, hello);
1109                if (rc != 0)
1110                        goto failed_1;
1111        } else {
1112                peerid.nid = LNET_NID_ANY;
1113                peerid.pid = LNET_PID_ANY;
1114
1115                /* Passive, get protocol from peer */
1116                conn->ksnc_proto = NULL;
1117        }
1118
1119        rc = ksocknal_recv_hello (ni, conn, hello, &peerid, &incarnation);
1120        if (rc < 0)
1121                goto failed_1;
1122
1123        LASSERT (rc == 0 || active);
1124        LASSERT (conn->ksnc_proto != NULL);
1125        LASSERT (peerid.nid != LNET_NID_ANY);
1126
1127        cpt = lnet_cpt_of_nid(peerid.nid);
1128
1129        if (active) {
1130                ksocknal_peer_addref(peer);
1131                write_lock_bh(global_lock);
1132        } else {
1133                rc = ksocknal_create_peer(&peer, ni, peerid);
1134                if (rc != 0)
1135                        goto failed_1;
1136
1137                write_lock_bh(global_lock);
1138
1139                /* called with a ref on ni, so shutdown can't have started */
1140                LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
1141
1142                peer2 = ksocknal_find_peer_locked(ni, peerid);
1143                if (peer2 == NULL) {
1144                        /* NB this puts an "empty" peer in the peer
1145                         * table (which takes my ref) */
1146                        list_add_tail(&peer->ksnp_list,
1147                                          ksocknal_nid2peerlist(peerid.nid));
1148                } else {
1149                        ksocknal_peer_decref(peer);
1150                        peer = peer2;
1151                }
1152
1153                /* +1 ref for me */
1154                ksocknal_peer_addref(peer);
1155                peer->ksnp_accepting++;
1156
1157                /* Am I already connecting to this guy?  Resolve in
1158                 * favour of higher NID... */
1159                if (peerid.nid < ni->ni_nid &&
1160                    ksocknal_connecting(peer, conn->ksnc_ipaddr)) {
1161                        rc = EALREADY;
1162                        warn = "connection race resolution";
1163                        goto failed_2;
1164                }
1165        }
1166
1167        if (peer->ksnp_closing ||
1168            (active && route->ksnr_deleted)) {
1169                /* peer/route got closed under me */
1170                rc = -ESTALE;
1171                warn = "peer/route removed";
1172                goto failed_2;
1173        }
1174
1175        if (peer->ksnp_proto == NULL) {
1176                /* Never connected before.
1177                 * NB recv_hello may have returned EPROTO to signal my peer
1178                 * wants a different protocol than the one I asked for.
1179                 */
1180                LASSERT (list_empty(&peer->ksnp_conns));
1181
1182                peer->ksnp_proto = conn->ksnc_proto;
1183                peer->ksnp_incarnation = incarnation;
1184        }
1185
1186        if (peer->ksnp_proto != conn->ksnc_proto ||
1187            peer->ksnp_incarnation != incarnation) {
1188                /* Peer rebooted or I've got the wrong protocol version */
1189                ksocknal_close_peer_conns_locked(peer, 0, 0);
1190
1191                peer->ksnp_proto = NULL;
1192                rc = ESTALE;
1193                warn = peer->ksnp_incarnation != incarnation ?
1194                       "peer rebooted" :
1195                       "wrong proto version";
1196                goto failed_2;
1197        }
1198
1199        switch (rc) {
1200        default:
1201                LBUG();
1202        case 0:
1203                break;
1204        case EALREADY:
1205                warn = "lost conn race";
1206                goto failed_2;
1207        case EPROTO:
1208                warn = "retry with different protocol version";
1209                goto failed_2;
1210        }
1211
1212        /* Refuse to duplicate an existing connection, unless this is a
1213         * loopback connection */
1214        if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
1215                list_for_each(tmp, &peer->ksnp_conns) {
1216                        conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
1217
1218                        if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr ||
1219                            conn2->ksnc_myipaddr != conn->ksnc_myipaddr ||
1220                            conn2->ksnc_type != conn->ksnc_type)
1221                                continue;
1222
1223                        /* Reply on a passive connection attempt so the peer
1224                         * realises we're connected. */
1225                        LASSERT (rc == 0);
1226                        if (!active)
1227                                rc = EALREADY;
1228
1229                        warn = "duplicate";
1230                        goto failed_2;
1231                }
1232        }
1233
1234        /* If the connection created by this route didn't bind to the IP
1235         * address the route connected to, the connection/route matching
1236         * code below probably isn't going to work. */
1237        if (active &&
1238            route->ksnr_ipaddr != conn->ksnc_ipaddr) {
1239                CERROR("Route %s %pI4h connected to %pI4h\n",
1240                       libcfs_id2str(peer->ksnp_id),
1241                       &route->ksnr_ipaddr,
1242                       &conn->ksnc_ipaddr);
1243        }
1244
1245        /* Search for a route corresponding to the new connection and
1246         * create an association.  This allows incoming connections created
1247         * by routes in my peer to match my own route entries so I don't
1248         * continually create duplicate routes. */
1249        list_for_each (tmp, &peer->ksnp_routes) {
1250                route = list_entry(tmp, ksock_route_t, ksnr_list);
1251
1252                if (route->ksnr_ipaddr != conn->ksnc_ipaddr)
1253                        continue;
1254
1255                ksocknal_associate_route_conn_locked(route, conn);
1256                break;
1257        }
1258
1259        conn->ksnc_peer = peer;          /* conn takes my ref on peer */
1260        peer->ksnp_last_alive = cfs_time_current();
1261        peer->ksnp_send_keepalive = 0;
1262        peer->ksnp_error = 0;
1263
1264        sched = ksocknal_choose_scheduler_locked(cpt);
1265        sched->kss_nconns++;
1266        conn->ksnc_scheduler = sched;
1267
1268        conn->ksnc_tx_last_post = cfs_time_current();
1269        /* Set the deadline for the outgoing HELLO to drain */
1270        conn->ksnc_tx_bufnob = cfs_sock_wmem_queued(sock);
1271        conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
1272        mb();   /* order with adding to peer's conn list */
1273
1274        list_add (&conn->ksnc_list, &peer->ksnp_conns);
1275        ksocknal_conn_addref(conn);
1276
1277        ksocknal_new_packet(conn, 0);
1278
1279        conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn);
1280
1281        /* Take packets blocking for this connection. */
1282        list_for_each_entry_safe(tx, txtmp, &peer->ksnp_tx_queue, tx_list) {
1283                if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) == SOCKNAL_MATCH_NO)
1284                                continue;
1285
1286                list_del (&tx->tx_list);
1287                ksocknal_queue_tx_locked (tx, conn);
1288        }
1289
1290        write_unlock_bh(global_lock);
1291
1292        /* We've now got a new connection.  Any errors from here on are just
1293         * like "normal" comms errors and we close the connection normally.
1294         * NB (a) we still have to send the reply HELLO for passive
1295         *      connections,
1296         *    (b) normal I/O on the conn is blocked until I setup and call the
1297         *      socket callbacks.
1298         */
1299
1300        CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d"
1301               " incarnation:"LPD64" sched[%d:%d]\n",
1302               libcfs_id2str(peerid), conn->ksnc_proto->pro_version,
1303               &conn->ksnc_myipaddr, &conn->ksnc_ipaddr,
1304               conn->ksnc_port, incarnation, cpt,
1305               (int)(sched - &sched->kss_info->ksi_scheds[0]));
1306
1307        if (active) {
1308                /* additional routes after interface exchange? */
1309                ksocknal_create_routes(peer, conn->ksnc_port,
1310                                       hello->kshm_ips, hello->kshm_nips);
1311        } else {
1312                hello->kshm_nips = ksocknal_select_ips(peer, hello->kshm_ips,
1313                                                       hello->kshm_nips);
1314                rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
1315        }
1316
1317        LIBCFS_FREE(hello, offsetof(ksock_hello_msg_t,
1318                                    kshm_ips[LNET_MAX_INTERFACES]));
1319
1320        /* setup the socket AFTER I've received hello (it disables
1321         * SO_LINGER).  I might call back to the acceptor who may want
1322         * to send a protocol version response and then close the
1323         * socket; this ensures the socket only tears down after the
1324         * response has been sent. */
1325        if (rc == 0)
1326                rc = ksocknal_lib_setup_sock(sock);
1327
1328        write_lock_bh(global_lock);
1329
1330        /* NB my callbacks block while I hold ksnd_global_lock */
1331        ksocknal_lib_set_callback(sock, conn);
1332
1333        if (!active)
1334                peer->ksnp_accepting--;
1335
1336        write_unlock_bh(global_lock);
1337
1338        if (rc != 0) {
1339                write_lock_bh(global_lock);
1340                if (!conn->ksnc_closing) {
1341                        /* could be closed by another thread */
1342                        ksocknal_close_conn_locked(conn, rc);
1343                }
1344                write_unlock_bh(global_lock);
1345        } else if (ksocknal_connsock_addref(conn) == 0) {
1346                /* Allow I/O to proceed. */
1347                ksocknal_read_callback(conn);
1348                ksocknal_write_callback(conn);
1349                ksocknal_connsock_decref(conn);
1350        }
1351
1352        ksocknal_connsock_decref(conn);
1353        ksocknal_conn_decref(conn);
1354        return rc;
1355
1356 failed_2:
1357        if (!peer->ksnp_closing &&
1358            list_empty (&peer->ksnp_conns) &&
1359            list_empty (&peer->ksnp_routes)) {
1360                list_add(&zombies, &peer->ksnp_tx_queue);
1361                list_del_init(&peer->ksnp_tx_queue);
1362                ksocknal_unlink_peer_locked(peer);
1363        }
1364
1365        write_unlock_bh(global_lock);
1366
1367        if (warn != NULL) {
1368                if (rc < 0)
1369                        CERROR("Not creating conn %s type %d: %s\n",
1370                               libcfs_id2str(peerid), conn->ksnc_type, warn);
1371                else
1372                        CDEBUG(D_NET, "Not creating conn %s type %d: %s\n",
1373                              libcfs_id2str(peerid), conn->ksnc_type, warn);
1374        }
1375
1376        if (!active) {
1377                if (rc > 0) {
1378                        /* Request retry by replying with CONN_NONE
1379                         * ksnc_proto has been set already */
1380                        conn->ksnc_type = SOCKLND_CONN_NONE;
1381                        hello->kshm_nips = 0;
1382                        ksocknal_send_hello(ni, conn, peerid.nid, hello);
1383                }
1384
1385                write_lock_bh(global_lock);
1386                peer->ksnp_accepting--;
1387                write_unlock_bh(global_lock);
1388        }
1389
1390        ksocknal_txlist_done(ni, &zombies, 1);
1391        ksocknal_peer_decref(peer);
1392
1393 failed_1:
1394        if (hello != NULL)
1395                LIBCFS_FREE(hello, offsetof(ksock_hello_msg_t,
1396                                            kshm_ips[LNET_MAX_INTERFACES]));
1397
1398        LIBCFS_FREE (conn, sizeof(*conn));
1399
1400 failed_0:
1401        libcfs_sock_release(sock);
1402        return rc;
1403}
1404
1405void
1406ksocknal_close_conn_locked (ksock_conn_t *conn, int error)
1407{
1408        /* This just does the immmediate housekeeping, and queues the
1409         * connection for the reaper to terminate.
1410         * Caller holds ksnd_global_lock exclusively in irq context */
1411        ksock_peer_t      *peer = conn->ksnc_peer;
1412        ksock_route_t     *route;
1413        ksock_conn_t      *conn2;
1414        struct list_head        *tmp;
1415
1416        LASSERT (peer->ksnp_error == 0);
1417        LASSERT (!conn->ksnc_closing);
1418        conn->ksnc_closing = 1;
1419
1420        /* ksnd_deathrow_conns takes over peer's ref */
1421        list_del (&conn->ksnc_list);
1422
1423        route = conn->ksnc_route;
1424        if (route != NULL) {
1425                /* dissociate conn from route... */
1426                LASSERT (!route->ksnr_deleted);
1427                LASSERT ((route->ksnr_connected & (1 << conn->ksnc_type)) != 0);
1428
1429                conn2 = NULL;
1430                list_for_each(tmp, &peer->ksnp_conns) {
1431                        conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
1432
1433                        if (conn2->ksnc_route == route &&
1434                            conn2->ksnc_type == conn->ksnc_type)
1435                                break;
1436
1437                        conn2 = NULL;
1438                }
1439                if (conn2 == NULL)
1440                        route->ksnr_connected &= ~(1 << conn->ksnc_type);
1441
1442                conn->ksnc_route = NULL;
1443
1444#if 0      /* irrelevant with only eager routes */
1445                /* make route least favourite */
1446                list_del (&route->ksnr_list);
1447                list_add_tail (&route->ksnr_list, &peer->ksnp_routes);
1448#endif
1449                ksocknal_route_decref(route);     /* drop conn's ref on route */
1450        }
1451
1452        if (list_empty (&peer->ksnp_conns)) {
1453                /* No more connections to this peer */
1454
1455                if (!list_empty(&peer->ksnp_tx_queue)) {
1456                        ksock_tx_t *tx;
1457
1458                        LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x);
1459
1460                        /* throw them to the last connection...,
1461                         * these TXs will be send to /dev/null by scheduler */
1462                        list_for_each_entry(tx, &peer->ksnp_tx_queue,
1463                                                tx_list)
1464                                ksocknal_tx_prep(conn, tx);
1465
1466                        spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
1467                        list_splice_init(&peer->ksnp_tx_queue,
1468                                             &conn->ksnc_tx_queue);
1469                        spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
1470                }
1471
1472                peer->ksnp_proto = NULL;        /* renegotiate protocol version */
1473                peer->ksnp_error = error;       /* stash last conn close reason */
1474
1475                if (list_empty (&peer->ksnp_routes)) {
1476                        /* I've just closed last conn belonging to a
1477                         * peer with no routes to it */
1478                        ksocknal_unlink_peer_locked (peer);
1479                }
1480        }
1481
1482        spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1483
1484        list_add_tail(&conn->ksnc_list,
1485                          &ksocknal_data.ksnd_deathrow_conns);
1486        wake_up(&ksocknal_data.ksnd_reaper_waitq);
1487
1488        spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1489}
1490
1491void
1492ksocknal_peer_failed (ksock_peer_t *peer)
1493{
1494        int     notify = 0;
1495        cfs_time_t last_alive = 0;
1496
1497        /* There has been a connection failure or comms error; but I'll only
1498         * tell LNET I think the peer is dead if it's to another kernel and
1499         * there are no connections or connection attempts in existence. */
1500
1501        read_lock(&ksocknal_data.ksnd_global_lock);
1502
1503        if ((peer->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
1504            list_empty(&peer->ksnp_conns) &&
1505            peer->ksnp_accepting == 0 &&
1506            ksocknal_find_connecting_route_locked(peer) == NULL) {
1507                notify = 1;
1508                last_alive = peer->ksnp_last_alive;
1509        }
1510
1511        read_unlock(&ksocknal_data.ksnd_global_lock);
1512
1513        if (notify)
1514                lnet_notify (peer->ksnp_ni, peer->ksnp_id.nid, 0,
1515                             last_alive);
1516}
1517
1518void
1519ksocknal_finalize_zcreq(ksock_conn_t *conn)
1520{
1521        ksock_peer_t     *peer = conn->ksnc_peer;
1522        ksock_tx_t       *tx;
1523        ksock_tx_t       *tmp;
1524        LIST_HEAD    (zlist);
1525
1526        /* NB safe to finalize TXs because closing of socket will
1527         * abort all buffered data */
1528        LASSERT (conn->ksnc_sock == NULL);
1529
1530        spin_lock(&peer->ksnp_lock);
1531
1532        list_for_each_entry_safe(tx, tmp, &peer->ksnp_zc_req_list, tx_zc_list) {
1533                if (tx->tx_conn != conn)
1534                        continue;
1535
1536                LASSERT (tx->tx_msg.ksm_zc_cookies[0] != 0);
1537
1538                tx->tx_msg.ksm_zc_cookies[0] = 0;
1539                tx->tx_zc_aborted = 1; /* mark it as not-acked */
1540                list_del(&tx->tx_zc_list);
1541                list_add(&tx->tx_zc_list, &zlist);
1542        }
1543
1544        spin_unlock(&peer->ksnp_lock);
1545
1546        while (!list_empty(&zlist)) {
1547                tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list);
1548
1549                list_del(&tx->tx_zc_list);
1550                ksocknal_tx_decref(tx);
1551        }
1552}
1553
1554void
1555ksocknal_terminate_conn (ksock_conn_t *conn)
1556{
1557        /* This gets called by the reaper (guaranteed thread context) to
1558         * disengage the socket from its callbacks and close it.
1559         * ksnc_refcount will eventually hit zero, and then the reaper will
1560         * destroy it. */
1561        ksock_peer_t     *peer = conn->ksnc_peer;
1562        ksock_sched_t    *sched = conn->ksnc_scheduler;
1563        int            failed = 0;
1564
1565        LASSERT(conn->ksnc_closing);
1566
1567        /* wake up the scheduler to "send" all remaining packets to /dev/null */
1568        spin_lock_bh(&sched->kss_lock);
1569
1570        /* a closing conn is always ready to tx */
1571        conn->ksnc_tx_ready = 1;
1572
1573        if (!conn->ksnc_tx_scheduled &&
1574            !list_empty(&conn->ksnc_tx_queue)){
1575                list_add_tail (&conn->ksnc_tx_list,
1576                               &sched->kss_tx_conns);
1577                conn->ksnc_tx_scheduled = 1;
1578                /* extra ref for scheduler */
1579                ksocknal_conn_addref(conn);
1580
1581                wake_up (&sched->kss_waitq);
1582        }
1583
1584        spin_unlock_bh(&sched->kss_lock);
1585
1586        /* serialise with callbacks */
1587        write_lock_bh(&ksocknal_data.ksnd_global_lock);
1588
1589        ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
1590
1591        /* OK, so this conn may not be completely disengaged from its
1592         * scheduler yet, but it _has_ committed to terminate... */
1593        conn->ksnc_scheduler->kss_nconns--;
1594
1595        if (peer->ksnp_error != 0) {
1596                /* peer's last conn closed in error */
1597                LASSERT (list_empty (&peer->ksnp_conns));
1598                failed = 1;
1599                peer->ksnp_error = 0;     /* avoid multiple notifications */
1600        }
1601
1602        write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1603
1604        if (failed)
1605                ksocknal_peer_failed(peer);
1606
1607        /* The socket is closed on the final put; either here, or in
1608         * ksocknal_{send,recv}msg().  Since we set up the linger2 option
1609         * when the connection was established, this will close the socket
1610         * immediately, aborting anything buffered in it. Any hung
1611         * zero-copy transmits will therefore complete in finite time. */
1612        ksocknal_connsock_decref(conn);
1613}
1614
1615void
1616ksocknal_queue_zombie_conn (ksock_conn_t *conn)
1617{
1618        /* Queue the conn for the reaper to destroy */
1619
1620        LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
1621        spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1622
1623        list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
1624        wake_up(&ksocknal_data.ksnd_reaper_waitq);
1625
1626        spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1627}
1628
1629void
1630ksocknal_destroy_conn (ksock_conn_t *conn)
1631{
1632        cfs_time_t      last_rcv;
1633
1634        /* Final coup-de-grace of the reaper */
1635        CDEBUG (D_NET, "connection %p\n", conn);
1636
1637        LASSERT (atomic_read (&conn->ksnc_conn_refcount) == 0);
1638        LASSERT (atomic_read (&conn->ksnc_sock_refcount) == 0);
1639        LASSERT (conn->ksnc_sock == NULL);
1640        LASSERT (conn->ksnc_route == NULL);
1641        LASSERT (!conn->ksnc_tx_scheduled);
1642        LASSERT (!conn->ksnc_rx_scheduled);
1643        LASSERT (list_empty(&conn->ksnc_tx_queue));
1644
1645        /* complete current receive if any */
1646        switch (conn->ksnc_rx_state) {
1647        case SOCKNAL_RX_LNET_PAYLOAD:
1648                last_rcv = conn->ksnc_rx_deadline -
1649                           cfs_time_seconds(*ksocknal_tunables.ksnd_timeout);
1650                CERROR("Completing partial receive from %s[%d]"
1651                       ", ip %pI4h:%d, with error, wanted: %d, left: %d, "
1652                       "last alive is %ld secs ago\n",
1653                       libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
1654                       &conn->ksnc_ipaddr, conn->ksnc_port,
1655                       conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
1656                       cfs_duration_sec(cfs_time_sub(cfs_time_current(),
1657                                        last_rcv)));
1658                lnet_finalize (conn->ksnc_peer->ksnp_ni,
1659                               conn->ksnc_cookie, -EIO);
1660                break;
1661        case SOCKNAL_RX_LNET_HEADER:
1662                if (conn->ksnc_rx_started)
1663                        CERROR("Incomplete receive of lnet header from %s"
1664                               ", ip %pI4h:%d, with error, protocol: %d.x.\n",
1665                               libcfs_id2str(conn->ksnc_peer->ksnp_id),
1666                               &conn->ksnc_ipaddr, conn->ksnc_port,
1667                               conn->ksnc_proto->pro_version);
1668                break;
1669        case SOCKNAL_RX_KSM_HEADER:
1670                if (conn->ksnc_rx_started)
1671                        CERROR("Incomplete receive of ksock message from %s"
1672                               ", ip %pI4h:%d, with error, protocol: %d.x.\n",
1673                               libcfs_id2str(conn->ksnc_peer->ksnp_id),
1674                               &conn->ksnc_ipaddr, conn->ksnc_port,
1675                               conn->ksnc_proto->pro_version);
1676                break;
1677        case SOCKNAL_RX_SLOP:
1678                if (conn->ksnc_rx_started)
1679                        CERROR("Incomplete receive of slops from %s"
1680                               ", ip %pI4h:%d, with error\n",
1681                               libcfs_id2str(conn->ksnc_peer->ksnp_id),
1682                               &conn->ksnc_ipaddr, conn->ksnc_port);
1683               break;
1684        default:
1685                LBUG ();
1686                break;
1687        }
1688
1689        ksocknal_peer_decref(conn->ksnc_peer);
1690
1691        LIBCFS_FREE (conn, sizeof (*conn));
1692}
1693
1694int
1695ksocknal_close_peer_conns_locked (ksock_peer_t *peer, __u32 ipaddr, int why)
1696{
1697        ksock_conn_t       *conn;
1698        struct list_head         *ctmp;
1699        struct list_head         *cnxt;
1700        int              count = 0;
1701
1702        list_for_each_safe (ctmp, cnxt, &peer->ksnp_conns) {
1703                conn = list_entry (ctmp, ksock_conn_t, ksnc_list);
1704
1705                if (ipaddr == 0 ||
1706                    conn->ksnc_ipaddr == ipaddr) {
1707                        count++;
1708                        ksocknal_close_conn_locked (conn, why);
1709                }
1710        }
1711
1712        return (count);
1713}
1714
1715int
1716ksocknal_close_conn_and_siblings (ksock_conn_t *conn, int why)
1717{
1718        ksock_peer_t     *peer = conn->ksnc_peer;
1719        __u32        ipaddr = conn->ksnc_ipaddr;
1720        int            count;
1721
1722        write_lock_bh(&ksocknal_data.ksnd_global_lock);
1723
1724        count = ksocknal_close_peer_conns_locked (peer, ipaddr, why);
1725
1726        write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1727
1728        return (count);
1729}
1730
1731int
1732ksocknal_close_matching_conns (lnet_process_id_t id, __u32 ipaddr)
1733{
1734        ksock_peer_t       *peer;
1735        struct list_head         *ptmp;
1736        struct list_head         *pnxt;
1737        int              lo;
1738        int              hi;
1739        int              i;
1740        int              count = 0;
1741
1742        write_lock_bh(&ksocknal_data.ksnd_global_lock);
1743
1744        if (id.nid != LNET_NID_ANY)
1745                lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
1746        else {
1747                lo = 0;
1748                hi = ksocknal_data.ksnd_peer_hash_size - 1;
1749        }
1750
1751        for (i = lo; i <= hi; i++) {
1752                list_for_each_safe (ptmp, pnxt,
1753                                        &ksocknal_data.ksnd_peers[i]) {
1754
1755                        peer = list_entry (ptmp, ksock_peer_t, ksnp_list);
1756
1757                        if (!((id.nid == LNET_NID_ANY || id.nid == peer->ksnp_id.nid) &&
1758                              (id.pid == LNET_PID_ANY || id.pid == peer->ksnp_id.pid)))
1759                                continue;
1760
1761                        count += ksocknal_close_peer_conns_locked (peer, ipaddr, 0);
1762                }
1763        }
1764
1765        write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1766
1767        /* wildcards always succeed */
1768        if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
1769                return (0);
1770
1771        return (count == 0 ? -ENOENT : 0);
1772}
1773
1774void
1775ksocknal_notify (lnet_ni_t *ni, lnet_nid_t gw_nid, int alive)
1776{
1777        /* The router is telling me she's been notified of a change in
1778         * gateway state.... */
1779        lnet_process_id_t  id = {0};
1780
1781        id.nid = gw_nid;
1782        id.pid = LNET_PID_ANY;
1783
1784        CDEBUG (D_NET, "gw %s %s\n", libcfs_nid2str(gw_nid),
1785                alive ? "up" : "down");
1786
1787        if (!alive) {
1788                /* If the gateway crashed, close all open connections... */
1789                ksocknal_close_matching_conns (id, 0);
1790                return;
1791        }
1792
1793        /* ...otherwise do nothing.  We can only establish new connections
1794         * if we have autroutes, and these connect on demand. */
1795}
1796
1797void
1798ksocknal_query (lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
1799{
1800        int             connect = 1;
1801        cfs_time_t       last_alive = 0;
1802        cfs_time_t       now = cfs_time_current();
1803        ksock_peer_t      *peer = NULL;
1804        rwlock_t                *glock = &ksocknal_data.ksnd_global_lock;
1805        lnet_process_id_t  id = {.nid = nid, .pid = LUSTRE_SRV_LNET_PID};
1806
1807        read_lock(glock);
1808
1809        peer = ksocknal_find_peer_locked(ni, id);
1810        if (peer != NULL) {
1811                struct list_head       *tmp;
1812                ksock_conn_t     *conn;
1813                int            bufnob;
1814
1815                list_for_each (tmp, &peer->ksnp_conns) {
1816                        conn = list_entry(tmp, ksock_conn_t, ksnc_list);
1817                        bufnob = cfs_sock_wmem_queued(conn->ksnc_sock);
1818
1819                        if (bufnob < conn->ksnc_tx_bufnob) {
1820                                /* something got ACKed */
1821                                conn->ksnc_tx_deadline =
1822                                        cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
1823                                peer->ksnp_last_alive = now;
1824                                conn->ksnc_tx_bufnob = bufnob;
1825                        }
1826                }
1827
1828                last_alive = peer->ksnp_last_alive;
1829                if (ksocknal_find_connectable_route_locked(peer) == NULL)
1830                        connect = 0;
1831        }
1832
1833        read_unlock(glock);
1834
1835        if (last_alive != 0)
1836                *when = last_alive;
1837
1838        CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago, connect %d\n",
1839               libcfs_nid2str(nid), peer,
1840               last_alive ? cfs_duration_sec(now - last_alive) : -1,
1841               connect);
1842
1843        if (!connect)
1844                return;
1845
1846        ksocknal_add_peer(ni, id, LNET_NIDADDR(nid), lnet_acceptor_port());
1847
1848        write_lock_bh(glock);
1849
1850        peer = ksocknal_find_peer_locked(ni, id);
1851        if (peer != NULL)
1852                ksocknal_launch_all_connections_locked(peer);
1853
1854        write_unlock_bh(glock);
1855        return;
1856}
1857
1858void
1859ksocknal_push_peer (ksock_peer_t *peer)
1860{
1861        int            index;
1862        int            i;
1863        struct list_head       *tmp;
1864        ksock_conn_t     *conn;
1865
1866        for (index = 0; ; index++) {
1867                read_lock(&ksocknal_data.ksnd_global_lock);
1868
1869                i = 0;
1870                conn = NULL;
1871
1872                list_for_each (tmp, &peer->ksnp_conns) {
1873                        if (i++ == index) {
1874                                conn = list_entry (tmp, ksock_conn_t,
1875                                                       ksnc_list);
1876                                ksocknal_conn_addref(conn);
1877                                break;
1878                        }
1879                }
1880
1881                read_unlock(&ksocknal_data.ksnd_global_lock);
1882
1883                if (conn == NULL)
1884                        break;
1885
1886                ksocknal_lib_push_conn (conn);
1887                ksocknal_conn_decref(conn);
1888        }
1889}
1890
1891int
1892ksocknal_push (lnet_ni_t *ni, lnet_process_id_t id)
1893{
1894        ksock_peer_t      *peer;
1895        struct list_head        *tmp;
1896        int             index;
1897        int             i;
1898        int             j;
1899        int             rc = -ENOENT;
1900
1901        for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
1902                for (j = 0; ; j++) {
1903                        read_lock(&ksocknal_data.ksnd_global_lock);
1904
1905                        index = 0;
1906                        peer = NULL;
1907
1908                        list_for_each (tmp, &ksocknal_data.ksnd_peers[i]) {
1909                                peer = list_entry(tmp, ksock_peer_t,
1910                                                      ksnp_list);
1911
1912                                if (!((id.nid == LNET_NID_ANY ||
1913                                       id.nid == peer->ksnp_id.nid) &&
1914                                      (id.pid == LNET_PID_ANY ||
1915                                       id.pid == peer->ksnp_id.pid))) {
1916                                        peer = NULL;
1917                                        continue;
1918                                }
1919
1920                                if (index++ == j) {
1921                                        ksocknal_peer_addref(peer);
1922                                        break;
1923                                }
1924                        }
1925
1926                        read_unlock(&ksocknal_data.ksnd_global_lock);
1927
1928                        if (peer != NULL) {
1929                                rc = 0;
1930                                ksocknal_push_peer (peer);
1931                                ksocknal_peer_decref(peer);
1932                        }
1933                }
1934
1935        }
1936
1937        return (rc);
1938}
1939
1940int
1941ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
1942{
1943        ksock_net_t       *net = ni->ni_data;
1944        ksock_interface_t *iface;
1945        int             rc;
1946        int             i;
1947        int             j;
1948        struct list_head        *ptmp;
1949        ksock_peer_t      *peer;
1950        struct list_head        *rtmp;
1951        ksock_route_t     *route;
1952
1953        if (ipaddress == 0 ||
1954            netmask == 0)
1955                return (-EINVAL);
1956
1957        write_lock_bh(&ksocknal_data.ksnd_global_lock);
1958
1959        iface = ksocknal_ip2iface(ni, ipaddress);
1960        if (iface != NULL) {
1961                /* silently ignore dups */
1962                rc = 0;
1963        } else if (net->ksnn_ninterfaces == LNET_MAX_INTERFACES) {
1964                rc = -ENOSPC;
1965        } else {
1966                iface = &net->ksnn_interfaces[net->ksnn_ninterfaces++];
1967
1968                iface->ksni_ipaddr = ipaddress;
1969                iface->ksni_netmask = netmask;
1970                iface->ksni_nroutes = 0;
1971                iface->ksni_npeers = 0;
1972
1973                for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
1974                        list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
1975                                peer = list_entry(ptmp, ksock_peer_t,
1976                                                      ksnp_list);
1977
1978                                for (j = 0; j < peer->ksnp_n_passive_ips; j++)
1979                                        if (peer->ksnp_passive_ips[j] == ipaddress)
1980                                                iface->ksni_npeers++;
1981
1982                                list_for_each(rtmp, &peer->ksnp_routes) {
1983                                        route = list_entry(rtmp,
1984                                                               ksock_route_t,
1985                                                               ksnr_list);
1986
1987                                        if (route->ksnr_myipaddr == ipaddress)
1988                                                iface->ksni_nroutes++;
1989                                }
1990                        }
1991                }
1992
1993                rc = 0;
1994                /* NB only new connections will pay attention to the new interface! */
1995        }
1996
1997        write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1998
1999        return (rc);
2000}
2001
2002void
2003ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr)
2004{
2005        struct list_head         *tmp;
2006        struct list_head         *nxt;
2007        ksock_route_t      *route;
2008        ksock_conn_t       *conn;
2009        int              i;
2010        int              j;
2011
2012        for (i = 0; i < peer->ksnp_n_passive_ips; i++)
2013                if (peer->ksnp_passive_ips[i] == ipaddr) {
2014                        for (j = i+1; j < peer->ksnp_n_passive_ips; j++)
2015                                peer->ksnp_passive_ips[j-1] =
2016                                        peer->ksnp_passive_ips[j];
2017                        peer->ksnp_n_passive_ips--;
2018                        break;
2019                }
2020
2021        list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
2022                route = list_entry (tmp, ksock_route_t, ksnr_list);
2023
2024                if (route->ksnr_myipaddr != ipaddr)
2025                        continue;
2026
2027                if (route->ksnr_share_count != 0) {
2028                        /* Manually created; keep, but unbind */
2029                        route->ksnr_myipaddr = 0;
2030                } else {
2031                        ksocknal_del_route_locked(route);
2032                }
2033        }
2034
2035        list_for_each_safe(tmp, nxt, &peer->ksnp_conns) {
2036                conn = list_entry(tmp, ksock_conn_t, ksnc_list);
2037
2038                if (conn->ksnc_myipaddr == ipaddr)
2039                        ksocknal_close_conn_locked (conn, 0);
2040        }
2041}
2042
2043int
2044ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress)
2045{
2046        ksock_net_t       *net = ni->ni_data;
2047        int             rc = -ENOENT;
2048        struct list_head        *tmp;
2049        struct list_head        *nxt;
2050        ksock_peer_t      *peer;
2051        __u32         this_ip;
2052        int             i;
2053        int             j;
2054
2055        write_lock_bh(&ksocknal_data.ksnd_global_lock);
2056
2057        for (i = 0; i < net->ksnn_ninterfaces; i++) {
2058                this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
2059
2060                if (!(ipaddress == 0 ||
2061                      ipaddress == this_ip))
2062                        continue;
2063
2064                rc = 0;
2065
2066                for (j = i+1; j < net->ksnn_ninterfaces; j++)
2067                        net->ksnn_interfaces[j-1] =
2068                                net->ksnn_interfaces[j];
2069
2070                net->ksnn_ninterfaces--;
2071
2072                for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
2073                        list_for_each_safe(tmp, nxt,
2074                                               &ksocknal_data.ksnd_peers[j]) {
2075                                peer = list_entry(tmp, ksock_peer_t,
2076                                                      ksnp_list);
2077
2078                                if (peer->ksnp_ni != ni)
2079                                        continue;
2080
2081                                ksocknal_peer_del_interface_locked(peer, this_ip);
2082                        }
2083                }
2084        }
2085
2086        write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2087
2088        return (rc);
2089}
2090
2091int
2092ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
2093{
2094        lnet_process_id_t id = {0};
2095        struct libcfs_ioctl_data *data = arg;
2096        int rc;
2097
2098        switch(cmd) {
2099        case IOC_LIBCFS_GET_INTERFACE: {
2100                ksock_net_t       *net = ni->ni_data;
2101                ksock_interface_t *iface;
2102
2103                read_lock(&ksocknal_data.ksnd_global_lock);
2104
2105                if (data->ioc_count >= (__u32)net->ksnn_ninterfaces) {
2106                        rc = -ENOENT;
2107                } else {
2108                        rc = 0;
2109                        iface = &net->ksnn_interfaces[data->ioc_count];
2110
2111                        data->ioc_u32[0] = iface->ksni_ipaddr;
2112                        data->ioc_u32[1] = iface->ksni_netmask;
2113                        data->ioc_u32[2] = iface->ksni_npeers;
2114                        data->ioc_u32[3] = iface->ksni_nroutes;
2115                }
2116
2117                read_unlock(&ksocknal_data.ksnd_global_lock);
2118                return rc;
2119        }
2120
2121        case IOC_LIBCFS_ADD_INTERFACE:
2122                return ksocknal_add_interface(ni,
2123                                              data->ioc_u32[0], /* IP address */
2124                                              data->ioc_u32[1]); /* net mask */
2125
2126        case IOC_LIBCFS_DEL_INTERFACE:
2127                return ksocknal_del_interface(ni,
2128                                              data->ioc_u32[0]); /* IP address */
2129
2130        case IOC_LIBCFS_GET_PEER: {
2131                __u32       myip = 0;
2132                __u32       ip = 0;
2133                int           port = 0;
2134                int           conn_count = 0;
2135                int           share_count = 0;
2136
2137                rc = ksocknal_get_peer_info(ni, data->ioc_count,
2138                                            &id, &myip, &ip, &port,
2139                                            &conn_count,  &share_count);
2140                if (rc != 0)
2141                        return rc;
2142
2143                data->ioc_nid    = id.nid;
2144                data->ioc_count  = share_count;
2145                data->ioc_u32[0] = ip;
2146                data->ioc_u32[1] = port;
2147                data->ioc_u32[2] = myip;
2148                data->ioc_u32[3] = conn_count;
2149                data->ioc_u32[4] = id.pid;
2150                return 0;
2151        }
2152
2153        case IOC_LIBCFS_ADD_PEER:
2154                id.nid = data->ioc_nid;
2155                id.pid = LUSTRE_SRV_LNET_PID;
2156                return ksocknal_add_peer (ni, id,
2157                                          data->ioc_u32[0], /* IP */
2158                                          data->ioc_u32[1]); /* port */
2159
2160        case IOC_LIBCFS_DEL_PEER:
2161                id.nid = data->ioc_nid;
2162                id.pid = LNET_PID_ANY;
2163                return ksocknal_del_peer (ni, id,
2164                                          data->ioc_u32[0]); /* IP */
2165
2166        case IOC_LIBCFS_GET_CONN: {
2167                int        txmem;
2168                int        rxmem;
2169                int        nagle;
2170                ksock_conn_t *conn = ksocknal_get_conn_by_idx (ni, data->ioc_count);
2171
2172                if (conn == NULL)
2173                        return -ENOENT;
2174
2175                ksocknal_lib_get_conn_tunables(conn, &txmem, &rxmem, &nagle);
2176
2177                data->ioc_count  = txmem;
2178                data->ioc_nid    = conn->ksnc_peer->ksnp_id.nid;
2179                data->ioc_flags  = nagle;
2180                data->ioc_u32[0] = conn->ksnc_ipaddr;
2181                data->ioc_u32[1] = conn->ksnc_port;
2182                data->ioc_u32[2] = conn->ksnc_myipaddr;
2183                data->ioc_u32[3] = conn->ksnc_type;
2184                data->ioc_u32[4] = conn->ksnc_scheduler->kss_info->ksi_cpt;
2185                data->ioc_u32[5] = rxmem;
2186                data->ioc_u32[6] = conn->ksnc_peer->ksnp_id.pid;
2187                ksocknal_conn_decref(conn);
2188                return 0;
2189        }
2190
2191        case IOC_LIBCFS_CLOSE_CONNECTION:
2192                id.nid = data->ioc_nid;
2193                id.pid = LNET_PID_ANY;
2194                return ksocknal_close_matching_conns (id,
2195                                                      data->ioc_u32[0]);
2196
2197        case IOC_LIBCFS_REGISTER_MYNID:
2198                /* Ignore if this is a noop */
2199                if (data->ioc_nid == ni->ni_nid)
2200                        return 0;
2201
2202                CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
2203                       libcfs_nid2str(data->ioc_nid),
2204                       libcfs_nid2str(ni->ni_nid));
2205                return -EINVAL;
2206
2207        case IOC_LIBCFS_PUSH_CONNECTION:
2208                id.nid = data->ioc_nid;
2209                id.pid = LNET_PID_ANY;
2210                return ksocknal_push(ni, id);
2211
2212        default:
2213                return -EINVAL;
2214        }
2215        /* not reached */
2216}
2217
2218void
2219ksocknal_free_buffers (void)
2220{
2221        LASSERT (atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
2222
2223        if (ksocknal_data.ksnd_sched_info != NULL) {
2224                struct ksock_sched_info *info;
2225                int                     i;
2226
2227                cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
2228                        if (info->ksi_scheds != NULL) {
2229                                LIBCFS_FREE(info->ksi_scheds,
2230                                            info->ksi_nthreads_max *
2231                                            sizeof(info->ksi_scheds[0]));
2232                        }
2233                }
2234                cfs_percpt_free(ksocknal_data.ksnd_sched_info);
2235        }
2236
2237        LIBCFS_FREE (ksocknal_data.ksnd_peers,
2238                     sizeof (struct list_head) *
2239                     ksocknal_data.ksnd_peer_hash_size);
2240
2241        spin_lock(&ksocknal_data.ksnd_tx_lock);
2242
2243        if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
2244                struct list_head        zlist;
2245                ksock_tx_t      *tx;
2246
2247                list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
2248                list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
2249                spin_unlock(&ksocknal_data.ksnd_tx_lock);
2250
2251                while (!list_empty(&zlist)) {
2252                        tx = list_entry(zlist.next, ksock_tx_t, tx_list);
2253                        list_del(&tx->tx_list);
2254                        LIBCFS_FREE(tx, tx->tx_desc_size);
2255                }
2256        } else {
2257                spin_unlock(&ksocknal_data.ksnd_tx_lock);
2258        }
2259}
2260
2261void
2262ksocknal_base_shutdown(void)
2263{
2264        struct ksock_sched_info *info;
2265        ksock_sched_t           *sched;
2266        int                     i;
2267        int                     j;
2268
2269        CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
2270               atomic_read (&libcfs_kmemory));
2271        LASSERT (ksocknal_data.ksnd_nnets == 0);
2272
2273        switch (ksocknal_data.ksnd_init) {
2274        default:
2275                LASSERT (0);
2276
2277        case SOCKNAL_INIT_ALL:
2278        case SOCKNAL_INIT_DATA:
2279                LASSERT (ksocknal_data.ksnd_peers != NULL);
2280                for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2281                        LASSERT (list_empty (&ksocknal_data.ksnd_peers[i]));
2282                }
2283
2284                LASSERT(list_empty(&ksocknal_data.ksnd_nets));
2285                LASSERT (list_empty (&ksocknal_data.ksnd_enomem_conns));
2286                LASSERT (list_empty (&ksocknal_data.ksnd_zombie_conns));
2287                LASSERT (list_empty (&ksocknal_data.ksnd_connd_connreqs));
2288                LASSERT (list_empty (&ksocknal_data.ksnd_connd_routes));
2289
2290                if (ksocknal_data.ksnd_sched_info != NULL) {
2291                        cfs_percpt_for_each(info, i,
2292                                            ksocknal_data.ksnd_sched_info) {
2293                                if (info->ksi_scheds == NULL)
2294                                        continue;
2295
2296                                for (j = 0; j < info->ksi_nthreads_max; j++) {
2297
2298                                        sched = &info->ksi_scheds[j];
2299                                        LASSERT(list_empty(&sched->\
2300                                                               kss_tx_conns));
2301                                        LASSERT(list_empty(&sched->\
2302                                                               kss_rx_conns));
2303                                        LASSERT(list_empty(&sched-> \
2304                                                  kss_zombie_noop_txs));
2305                                        LASSERT(sched->kss_nconns == 0);
2306                                }
2307                        }
2308                }
2309
2310                /* flag threads to terminate; wake and wait for them to die */
2311                ksocknal_data.ksnd_shuttingdown = 1;
2312                wake_up_all(&ksocknal_data.ksnd_connd_waitq);
2313                wake_up_all(&ksocknal_data.ksnd_reaper_waitq);
2314
2315                if (ksocknal_data.ksnd_sched_info != NULL) {
2316                        cfs_percpt_for_each(info, i,
2317                                            ksocknal_data.ksnd_sched_info) {
2318                                if (info->ksi_scheds == NULL)
2319                                        continue;
2320
2321                                for (j = 0; j < info->ksi_nthreads_max; j++) {
2322                                        sched = &info->ksi_scheds[j];
2323                                        wake_up_all(&sched->kss_waitq);
2324                                }
2325                        }
2326                }
2327
2328                i = 4;
2329                read_lock(&ksocknal_data.ksnd_global_lock);
2330                while (ksocknal_data.ksnd_nthreads != 0) {
2331                        i++;
2332                        CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2333                               "waiting for %d threads to terminate\n",
2334                                ksocknal_data.ksnd_nthreads);
2335                        read_unlock(&ksocknal_data.ksnd_global_lock);
2336                        cfs_pause(cfs_time_seconds(1));
2337                        read_lock(&ksocknal_data.ksnd_global_lock);
2338                }
2339                read_unlock(&ksocknal_data.ksnd_global_lock);
2340
2341                ksocknal_free_buffers();
2342
2343                ksocknal_data.ksnd_init = SOCKNAL_INIT_NOTHING;
2344                break;
2345        }
2346
2347        CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2348               atomic_read (&libcfs_kmemory));
2349
2350        module_put(THIS_MODULE);
2351}
2352
2353__u64
2354ksocknal_new_incarnation (void)
2355{
2356        struct timeval tv;
2357
2358        /* The incarnation number is the time this module loaded and it
2359         * identifies this particular instance of the socknal.  Hopefully
2360         * we won't be able to reboot more frequently than 1MHz for the
2361         * foreseeable future :) */
2362
2363        do_gettimeofday(&tv);
2364
2365        return (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
2366}
2367
2368int
2369ksocknal_base_startup(void)
2370{
2371        struct ksock_sched_info *info;
2372        int                     rc;
2373        int                     i;
2374
2375        LASSERT (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
2376        LASSERT (ksocknal_data.ksnd_nnets == 0);
2377
2378        memset (&ksocknal_data, 0, sizeof (ksocknal_data)); /* zero pointers */
2379
2380        ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE;
2381        LIBCFS_ALLOC (ksocknal_data.ksnd_peers,
2382                      sizeof (struct list_head) *
2383                      ksocknal_data.ksnd_peer_hash_size);
2384        if (ksocknal_data.ksnd_peers == NULL)
2385                return -ENOMEM;
2386
2387        for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
2388                INIT_LIST_HEAD(&ksocknal_data.ksnd_peers[i]);
2389
2390        rwlock_init(&ksocknal_data.ksnd_global_lock);
2391        INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
2392
2393        spin_lock_init(&ksocknal_data.ksnd_reaper_lock);
2394        INIT_LIST_HEAD (&ksocknal_data.ksnd_enomem_conns);
2395        INIT_LIST_HEAD (&ksocknal_data.ksnd_zombie_conns);
2396        INIT_LIST_HEAD (&ksocknal_data.ksnd_deathrow_conns);
2397        init_waitqueue_head(&ksocknal_data.ksnd_reaper_waitq);
2398
2399        spin_lock_init(&ksocknal_data.ksnd_connd_lock);
2400        INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_connreqs);
2401        INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_routes);
2402        init_waitqueue_head(&ksocknal_data.ksnd_connd_waitq);
2403
2404        spin_lock_init(&ksocknal_data.ksnd_tx_lock);
2405        INIT_LIST_HEAD (&ksocknal_data.ksnd_idle_noop_txs);
2406
2407        /* NB memset above zeros whole of ksocknal_data */
2408
2409        /* flag lists/ptrs/locks initialised */
2410        ksocknal_data.ksnd_init = SOCKNAL_INIT_DATA;
2411        try_module_get(THIS_MODULE);
2412
2413        ksocknal_data.ksnd_sched_info = cfs_percpt_alloc(lnet_cpt_table(),
2414                                                         sizeof(*info));
2415        if (ksocknal_data.ksnd_sched_info == NULL)
2416                goto failed;
2417
2418        cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
2419                ksock_sched_t   *sched;
2420                int             nthrs;
2421
2422                nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
2423                if (*ksocknal_tunables.ksnd_nscheds > 0) {
2424                        nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds);
2425                } else {
2426                        /* max to half of CPUs, assume another half should be
2427                         * reserved for upper layer modules */
2428                        nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2429                }
2430
2431                info->ksi_nthreads_max = nthrs;
2432                info->ksi_cpt = i;
2433
2434                LIBCFS_CPT_ALLOC(info->ksi_scheds, lnet_cpt_table(), i,
2435                                 info->ksi_nthreads_max * sizeof(*sched));
2436                if (info->ksi_scheds == NULL)
2437                        goto failed;
2438
2439                for (; nthrs > 0; nthrs--) {
2440                        sched = &info->ksi_scheds[nthrs - 1];
2441
2442                        sched->kss_info = info;
2443                        spin_lock_init(&sched->kss_lock);
2444                        INIT_LIST_HEAD(&sched->kss_rx_conns);
2445                        INIT_LIST_HEAD(&sched->kss_tx_conns);
2446                        INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
2447                        init_waitqueue_head(&sched->kss_waitq);
2448                }
2449        }
2450
2451        ksocknal_data.ksnd_connd_starting        = 0;
2452        ksocknal_data.ksnd_connd_failed_stamp     = 0;
2453        ksocknal_data.ksnd_connd_starting_stamp   = cfs_time_current_sec();
2454        /* must have at least 2 connds to remain responsive to accepts while
2455         * connecting */
2456        if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1)
2457                *ksocknal_tunables.ksnd_nconnds = SOCKNAL_CONND_RESV + 1;
2458
2459        if (*ksocknal_tunables.ksnd_nconnds_max <
2460            *ksocknal_tunables.ksnd_nconnds) {
2461                ksocknal_tunables.ksnd_nconnds_max =
2462                        ksocknal_tunables.ksnd_nconnds;
2463        }
2464
2465        for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
2466                char name[16];
2467                spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2468                ksocknal_data.ksnd_connd_starting++;
2469                spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2470
2471
2472                snprintf(name, sizeof(name), "socknal_cd%02d", i);
2473                rc = ksocknal_thread_start(ksocknal_connd,
2474                                           (void *)((ulong_ptr_t)i), name);
2475                if (rc != 0) {
2476                        spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2477                        ksocknal_data.ksnd_connd_starting--;
2478                        spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2479                        CERROR("Can't spawn socknal connd: %d\n", rc);
2480                        goto failed;
2481                }
2482        }
2483
2484        rc = ksocknal_thread_start(ksocknal_reaper, NULL, "socknal_reaper");
2485        if (rc != 0) {
2486                CERROR ("Can't spawn socknal reaper: %d\n", rc);
2487                goto failed;
2488        }
2489
2490        /* flag everything initialised */
2491        ksocknal_data.ksnd_init = SOCKNAL_INIT_ALL;
2492
2493        return 0;
2494
2495 failed:
2496        ksocknal_base_shutdown();
2497        return -ENETDOWN;
2498}
2499
2500void
2501ksocknal_debug_peerhash (lnet_ni_t *ni)
2502{
2503        ksock_peer_t    *peer = NULL;
2504        struct list_head        *tmp;
2505        int             i;
2506
2507        read_lock(&ksocknal_data.ksnd_global_lock);
2508
2509        for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2510                list_for_each (tmp, &ksocknal_data.ksnd_peers[i]) {
2511                        peer = list_entry (tmp, ksock_peer_t, ksnp_list);
2512
2513                        if (peer->ksnp_ni == ni) break;
2514
2515                        peer = NULL;
2516                }
2517        }
2518
2519        if (peer != NULL) {
2520                ksock_route_t *route;
2521                ksock_conn_t  *conn;
2522
2523                CWARN ("Active peer on shutdown: %s, ref %d, scnt %d, "
2524                       "closing %d, accepting %d, err %d, zcookie "LPU64", "
2525                       "txq %d, zc_req %d\n", libcfs_id2str(peer->ksnp_id),
2526                       atomic_read(&peer->ksnp_refcount),
2527                       peer->ksnp_sharecount, peer->ksnp_closing,
2528                       peer->ksnp_accepting, peer->ksnp_error,
2529                       peer->ksnp_zc_next_cookie,
2530                       !list_empty(&peer->ksnp_tx_queue),
2531                       !list_empty(&peer->ksnp_zc_req_list));
2532
2533                list_for_each (tmp, &peer->ksnp_routes) {
2534                        route = list_entry(tmp, ksock_route_t, ksnr_list);
2535                        CWARN ("Route: ref %d, schd %d, conn %d, cnted %d, "
2536                               "del %d\n", atomic_read(&route->ksnr_refcount),
2537                               route->ksnr_scheduled, route->ksnr_connecting,
2538                               route->ksnr_connected, route->ksnr_deleted);
2539                }
2540
2541                list_for_each (tmp, &peer->ksnp_conns) {
2542                        conn = list_entry(tmp, ksock_conn_t, ksnc_list);
2543                        CWARN ("Conn: ref %d, sref %d, t %d, c %d\n",
2544                               atomic_read(&conn->ksnc_conn_refcount),
2545                               atomic_read(&conn->ksnc_sock_refcount),
2546                               conn->ksnc_type, conn->ksnc_closing);
2547                }
2548        }
2549
2550        read_unlock(&ksocknal_data.ksnd_global_lock);
2551        return;
2552}
2553
2554void
2555ksocknal_shutdown (lnet_ni_t *ni)
2556{
2557        ksock_net_t      *net = ni->ni_data;
2558        int            i;
2559        lnet_process_id_t anyid = {0};
2560
2561        anyid.nid =  LNET_NID_ANY;
2562        anyid.pid =  LNET_PID_ANY;
2563
2564        LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
2565        LASSERT(ksocknal_data.ksnd_nnets > 0);
2566
2567        spin_lock_bh(&net->ksnn_lock);
2568        net->ksnn_shutdown = 1;          /* prevent new peers */
2569        spin_unlock_bh(&net->ksnn_lock);
2570
2571        /* Delete all peers */
2572        ksocknal_del_peer(ni, anyid, 0);
2573
2574        /* Wait for all peer state to clean up */
2575        i = 2;
2576        spin_lock_bh(&net->ksnn_lock);
2577        while (net->ksnn_npeers != 0) {
2578                spin_unlock_bh(&net->ksnn_lock);
2579
2580                i++;
2581                CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2582                       "waiting for %d peers to disconnect\n",
2583                       net->ksnn_npeers);
2584                cfs_pause(cfs_time_seconds(1));
2585
2586                ksocknal_debug_peerhash(ni);
2587
2588                spin_lock_bh(&net->ksnn_lock);
2589        }
2590        spin_unlock_bh(&net->ksnn_lock);
2591
2592        for (i = 0; i < net->ksnn_ninterfaces; i++) {
2593                LASSERT (net->ksnn_interfaces[i].ksni_npeers == 0);
2594                LASSERT (net->ksnn_interfaces[i].ksni_nroutes == 0);
2595        }
2596
2597        list_del(&net->ksnn_list);
2598        LIBCFS_FREE(net, sizeof(*net));
2599
2600        ksocknal_data.ksnd_nnets--;
2601        if (ksocknal_data.ksnd_nnets == 0)
2602                ksocknal_base_shutdown();
2603}
2604
2605int
2606ksocknal_enumerate_interfaces(ksock_net_t *net)
2607{
2608        char      **names;
2609        int      i;
2610        int      j;
2611        int      rc;
2612        int      n;
2613
2614        n = libcfs_ipif_enumerate(&names);
2615        if (n <= 0) {
2616                CERROR("Can't enumerate interfaces: %d\n", n);
2617                return n;
2618        }
2619
2620        for (i = j = 0; i < n; i++) {
2621                int     up;
2622                __u32      ip;
2623                __u32      mask;
2624
2625                if (!strcmp(names[i], "lo")) /* skip the loopback IF */
2626                        continue;
2627
2628                rc = libcfs_ipif_query(names[i], &up, &ip, &mask);
2629                if (rc != 0) {
2630                        CWARN("Can't get interface %s info: %d\n",
2631                              names[i], rc);
2632                        continue;
2633                }
2634
2635                if (!up) {
2636                        CWARN("Ignoring interface %s (down)\n",
2637                              names[i]);
2638                        continue;
2639                }
2640
2641                if (j == LNET_MAX_INTERFACES) {
2642                        CWARN("Ignoring interface %s (too many interfaces)\n",
2643                              names[i]);
2644                        continue;
2645                }
2646
2647                net->ksnn_interfaces[j].ksni_ipaddr = ip;
2648                net->ksnn_interfaces[j].ksni_netmask = mask;
2649                strncpy(&net->ksnn_interfaces[j].ksni_name[0],
2650                        names[i], IFNAMSIZ);
2651                j++;
2652        }
2653
2654        libcfs_ipif_free_enumeration(names, n);
2655
2656        if (j == 0)
2657                CERROR("Can't find any usable interfaces\n");
2658
2659        return j;
2660}
2661
2662int
2663ksocknal_search_new_ipif(ksock_net_t *net)
2664{
2665        int     new_ipif = 0;
2666        int     i;
2667
2668        for (i = 0; i < net->ksnn_ninterfaces; i++) {
2669                char            *ifnam = &net->ksnn_interfaces[i].ksni_name[0];
2670                char            *colon = strchr(ifnam, ':');
2671                int             found  = 0;
2672                ksock_net_t     *tmp;
2673                int             j;
2674
2675                if (colon != NULL) /* ignore alias device */
2676                        *colon = 0;
2677
2678                list_for_each_entry(tmp, &ksocknal_data.ksnd_nets,
2679                                        ksnn_list) {
2680                        for (j = 0; !found && j < tmp->ksnn_ninterfaces; j++) {
2681                                char *ifnam2 = &tmp->ksnn_interfaces[j].\
2682                                             ksni_name[0];
2683                                char *colon2 = strchr(ifnam2, ':');
2684
2685                                if (colon2 != NULL)
2686                                        *colon2 = 0;
2687
2688                                found = strcmp(ifnam, ifnam2) == 0;
2689                                if (colon2 != NULL)
2690                                        *colon2 = ':';
2691                        }
2692                        if (found)
2693                                break;
2694                }
2695
2696                new_ipif += !found;
2697                if (colon != NULL)
2698                        *colon = ':';
2699        }
2700
2701        return new_ipif;
2702}
2703
2704int
2705ksocknal_start_schedulers(struct ksock_sched_info *info)
2706{
2707        int     nthrs;
2708        int     rc = 0;
2709        int     i;
2710
2711        if (info->ksi_nthreads == 0) {
2712                if (*ksocknal_tunables.ksnd_nscheds > 0) {
2713                        nthrs = info->ksi_nthreads_max;
2714                } else {
2715                        nthrs = cfs_cpt_weight(lnet_cpt_table(),
2716                                               info->ksi_cpt);
2717                        nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2718                        nthrs = min(SOCKNAL_NSCHEDS_HIGH, nthrs);
2719                }
2720                nthrs = min(nthrs, info->ksi_nthreads_max);
2721        } else {
2722                LASSERT(info->ksi_nthreads <= info->ksi_nthreads_max);
2723                /* increase two threads if there is new interface */
2724                nthrs = min(2, info->ksi_nthreads_max - info->ksi_nthreads);
2725        }
2726
2727        for (i = 0; i < nthrs; i++) {
2728                long            id;
2729                char            name[20];
2730                ksock_sched_t   *sched;
2731                id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i);
2732                sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
2733                snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
2734                         info->ksi_cpt, (int)(sched - &info->ksi_scheds[0]));
2735
2736                rc = ksocknal_thread_start(ksocknal_scheduler,
2737                                           (void *)id, name);
2738                if (rc == 0)
2739                        continue;
2740
2741                CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
2742                       info->ksi_cpt, info->ksi_nthreads + i, rc);
2743                break;
2744        }
2745
2746        info->ksi_nthreads += i;
2747        return rc;
2748}
2749
2750int
2751ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts)
2752{
2753        int     newif = ksocknal_search_new_ipif(net);
2754        int     rc;
2755        int     i;
2756
2757        LASSERT(ncpts > 0 && ncpts <= cfs_cpt_number(lnet_cpt_table()));
2758
2759        for (i = 0; i < ncpts; i++) {
2760                struct ksock_sched_info *info;
2761                int cpt = (cpts == NULL) ? i : cpts[i];
2762
2763                LASSERT(cpt < cfs_cpt_number(lnet_cpt_table()));
2764                info = ksocknal_data.ksnd_sched_info[cpt];
2765
2766                if (!newif && info->ksi_nthreads > 0)
2767                        continue;
2768
2769                rc = ksocknal_start_schedulers(info);
2770                if (rc != 0)
2771                        return rc;
2772        }
2773        return 0;
2774}
2775
2776int
2777ksocknal_startup (lnet_ni_t *ni)
2778{
2779        ksock_net_t  *net;
2780        int        rc;
2781        int        i;
2782
2783        LASSERT (ni->ni_lnd == &the_ksocklnd);
2784
2785        if (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING) {
2786                rc = ksocknal_base_startup();
2787                if (rc != 0)
2788                        return rc;
2789        }
2790
2791        LIBCFS_ALLOC(net, sizeof(*net));
2792        if (net == NULL)
2793                goto fail_0;
2794
2795        spin_lock_init(&net->ksnn_lock);
2796        net->ksnn_incarnation = ksocknal_new_incarnation();
2797        ni->ni_data = net;
2798        ni->ni_peertimeout    = *ksocknal_tunables.ksnd_peertimeout;
2799        ni->ni_maxtxcredits   = *ksocknal_tunables.ksnd_credits;
2800        ni->ni_peertxcredits  = *ksocknal_tunables.ksnd_peertxcredits;
2801        ni->ni_peerrtrcredits = *ksocknal_tunables.ksnd_peerrtrcredits;
2802
2803        if (ni->ni_interfaces[0] == NULL) {
2804                rc = ksocknal_enumerate_interfaces(net);
2805                if (rc <= 0)
2806                        goto fail_1;
2807
2808                net->ksnn_ninterfaces = 1;
2809        } else {
2810                for (i = 0; i < LNET_MAX_INTERFACES; i++) {
2811                        int    up;
2812
2813                        if (ni->ni_interfaces[i] == NULL)
2814                                break;
2815
2816                        rc = libcfs_ipif_query(
2817                                ni->ni_interfaces[i], &up,
2818                                &net->ksnn_interfaces[i].ksni_ipaddr,
2819                                &net->ksnn_interfaces[i].ksni_netmask);
2820
2821                        if (rc != 0) {
2822                                CERROR("Can't get interface %s info: %d\n",
2823                                       ni->ni_interfaces[i], rc);
2824                                goto fail_1;
2825                        }
2826
2827                        if (!up) {
2828                                CERROR("Interface %s is down\n",
2829                                       ni->ni_interfaces[i]);
2830                                goto fail_1;
2831                        }
2832
2833                        strncpy(&net->ksnn_interfaces[i].ksni_name[0],
2834                                ni->ni_interfaces[i], IFNAMSIZ);
2835                }
2836                net->ksnn_ninterfaces = i;
2837        }
2838
2839        /* call it before add it to ksocknal_data.ksnd_nets */
2840        rc = ksocknal_net_start_threads(net, ni->ni_cpts, ni->ni_ncpts);
2841        if (rc != 0)
2842                goto fail_1;
2843
2844        ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
2845                                net->ksnn_interfaces[0].ksni_ipaddr);
2846        list_add(&net->ksnn_list, &ksocknal_data.ksnd_nets);
2847
2848        ksocknal_data.ksnd_nnets++;
2849
2850        return 0;
2851
2852 fail_1:
2853        LIBCFS_FREE(net, sizeof(*net));
2854 fail_0:
2855        if (ksocknal_data.ksnd_nnets == 0)
2856                ksocknal_base_shutdown();
2857
2858        return -ENETDOWN;
2859}
2860
2861
2862void __exit
2863ksocknal_module_fini (void)
2864{
2865        lnet_unregister_lnd(&the_ksocklnd);
2866        ksocknal_tunables_fini();
2867}
2868
2869int __init
2870ksocknal_module_init (void)
2871{
2872        int    rc;
2873
2874        /* check ksnr_connected/connecting field large enough */
2875        CLASSERT (SOCKLND_CONN_NTYPES <= 4);
2876        CLASSERT (SOCKLND_CONN_ACK == SOCKLND_CONN_BULK_IN);
2877
2878        /* initialize the_ksocklnd */
2879        the_ksocklnd.lnd_type     = SOCKLND;
2880        the_ksocklnd.lnd_startup  = ksocknal_startup;
2881        the_ksocklnd.lnd_shutdown = ksocknal_shutdown;
2882        the_ksocklnd.lnd_ctl      = ksocknal_ctl;
2883        the_ksocklnd.lnd_send     = ksocknal_send;
2884        the_ksocklnd.lnd_recv     = ksocknal_recv;
2885        the_ksocklnd.lnd_notify   = ksocknal_notify;
2886        the_ksocklnd.lnd_query    = ksocknal_query;
2887        the_ksocklnd.lnd_accept   = ksocknal_accept;
2888
2889        rc = ksocknal_tunables_init();
2890        if (rc != 0)
2891                return rc;
2892
2893        lnet_register_lnd(&the_ksocklnd);
2894
2895        return 0;
2896}
2897
2898MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
2899MODULE_DESCRIPTION("Kernel TCP Socket LND v3.0.0");
2900MODULE_LICENSE("GPL");
2901MODULE_VERSION("3.0.0");
2902
2903module_init(ksocknal_module_init);
2904module_exit(ksocknal_module_fini);
2905