linux/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
   3 *
   4 * Copyright (c) 2011, 2012, Intel Corporation.
   5 *
   6 *   Author: Zach Brown <zab@zabbo.net>
   7 *   Author: Peter J. Braam <braam@clusterfs.com>
   8 *   Author: Phil Schwan <phil@clusterfs.com>
   9 *   Author: Eric Barton <eric@bartonsoftware.com>
  10 *
  11 *   This file is part of Lustre, http://www.lustre.org
  12 *
  13 *   Portals is free software; you can redistribute it and/or
  14 *   modify it under the terms of version 2 of the GNU General Public
  15 *   License as published by the Free Software Foundation.
  16 *
  17 *   Portals is distributed in the hope that it will be useful,
  18 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
  19 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  20 *   GNU General Public License for more details.
  21 *
  22 *   You should have received a copy of the GNU General Public License
  23 *   along with Portals; if not, write to the Free Software
  24 *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  25 *
  26 */
  27
  28#define DEBUG_PORTAL_ALLOC
  29#define DEBUG_SUBSYSTEM S_LND
  30
  31#include "socklnd_lib-linux.h"
  32
  33#include <linux/libcfs/libcfs.h>
  34#include <linux/lnet/lnet.h>
  35#include <linux/lnet/lib-lnet.h>
  36#include <linux/lnet/socklnd.h>
  37#include <linux/lnet/lnet-sysctl.h>
  38
  39#define SOCKNAL_PEER_HASH_SIZE  101          /* # peer lists */
  40#define SOCKNAL_RESCHED  100         /* # scheduler loops before reschedule */
  41#define SOCKNAL_INSANITY_RECONN 5000        /* connd is trying on reconn infinitely */
  42#define SOCKNAL_ENOMEM_RETRY    CFS_TICK        /* jiffies between retries */
  43
  44#define SOCKNAL_SINGLE_FRAG_TX      0      /* disable multi-fragment sends */
  45#define SOCKNAL_SINGLE_FRAG_RX      0      /* disable multi-fragment receives */
  46
  47#define SOCKNAL_VERSION_DEBUG       0      /* enable protocol version debugging */
  48
  49/* risk kmap deadlock on multi-frag I/O (backs off to single-frag if disabled).
  50 * no risk if we're not running on a CONFIG_HIGHMEM platform. */
  51#ifdef CONFIG_HIGHMEM
  52# define SOCKNAL_RISK_KMAP_DEADLOCK  0
  53#else
  54# define SOCKNAL_RISK_KMAP_DEADLOCK  1
  55#endif
  56
  57struct ksock_sched_info;
  58
  59typedef struct                            /* per scheduler state */
  60{
  61        spinlock_t              kss_lock;       /* serialise */
  62        struct list_head                kss_rx_conns;   /* conn waiting to be read */
  63        /* conn waiting to be written */
  64        struct list_head                kss_tx_conns;
  65        /* zombie noop tx list */
  66        struct list_head                kss_zombie_noop_txs;
  67        wait_queue_head_t               kss_waitq;      /* where scheduler sleeps */
  68        /* # connections assigned to this scheduler */
  69        int                     kss_nconns;
  70        struct ksock_sched_info *kss_info;      /* owner of it */
  71        struct page             *kss_rx_scratch_pgs[LNET_MAX_IOV];
  72        struct iovec            kss_scratch_iov[LNET_MAX_IOV];
  73} ksock_sched_t;
  74
  75struct ksock_sched_info {
  76        int                     ksi_nthreads_max; /* max allowed threads */
  77        int                     ksi_nthreads;   /* number of threads */
  78        int                     ksi_cpt;        /* CPT id */
  79        ksock_sched_t           *ksi_scheds;    /* array of schedulers */
  80};
  81
  82#define KSOCK_CPT_SHIFT                 16
  83#define KSOCK_THREAD_ID(cpt, sid)       (((cpt) << KSOCK_CPT_SHIFT) | (sid))
  84#define KSOCK_THREAD_CPT(id)            ((id) >> KSOCK_CPT_SHIFT)
  85#define KSOCK_THREAD_SID(id)            ((id) & ((1UL << KSOCK_CPT_SHIFT) - 1))
  86
  87typedef struct                            /* in-use interface */
  88{
  89        __u32           ksni_ipaddr;            /* interface's IP address */
  90        __u32           ksni_netmask;           /* interface's network mask */
  91        int             ksni_nroutes;           /* # routes using (active) */
  92        int             ksni_npeers;            /* # peers using (passive) */
  93        char            ksni_name[IFNAMSIZ];    /* interface name */
  94} ksock_interface_t;
  95
  96typedef struct
  97{
  98        /* "stuck" socket timeout (seconds) */
  99        int           *ksnd_timeout;
 100        /* # scheduler threads in each pool while starting */
 101        int              *ksnd_nscheds;
 102        int           *ksnd_nconnds;     /* # connection daemons */
 103        int           *ksnd_nconnds_max;     /* max # connection daemons */
 104        int           *ksnd_min_reconnectms; /* first connection retry after (ms)... */
 105        int           *ksnd_max_reconnectms; /* ...exponentially increasing to this */
 106        int           *ksnd_eager_ack;       /* make TCP ack eagerly? */
 107        int           *ksnd_typed_conns;     /* drive sockets by type? */
 108        int           *ksnd_min_bulk;   /* smallest "large" message */
 109        int           *ksnd_tx_buffer_size;  /* socket tx buffer size */
 110        int           *ksnd_rx_buffer_size;  /* socket rx buffer size */
 111        int           *ksnd_nagle;         /* enable NAGLE? */
 112        int           *ksnd_round_robin;     /* round robin for multiple interfaces */
 113        int           *ksnd_keepalive;       /* # secs for sending keepalive NOOP */
 114        int           *ksnd_keepalive_idle;  /* # idle secs before 1st probe */
 115        int           *ksnd_keepalive_count; /* # probes */
 116        int           *ksnd_keepalive_intvl; /* time between probes */
 117        int           *ksnd_credits;     /* # concurrent sends */
 118        int           *ksnd_peertxcredits;   /* # concurrent sends to 1 peer */
 119        int           *ksnd_peerrtrcredits;  /* # per-peer router buffer credits */
 120        int           *ksnd_peertimeout;     /* seconds to consider peer dead */
 121        int           *ksnd_enable_csum;     /* enable check sum */
 122        int           *ksnd_inject_csum_error; /* set non-zero to inject checksum error */
 123        int           *ksnd_nonblk_zcack;    /* always send zc-ack on non-blocking connection */
 124        unsigned int     *ksnd_zc_min_payload;  /* minimum zero copy payload size */
 125        int           *ksnd_zc_recv;     /* enable ZC receive (for Chelsio TOE) */
 126        int           *ksnd_zc_recv_min_nfrags; /* minimum # of fragments to enable ZC receive */
 127#if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
 128        ctl_table_header_t *ksnd_sysctl;   /* sysctl interface */
 129#endif
 130} ksock_tunables_t;
 131
 132typedef struct
 133{
 134        __u64             ksnn_incarnation;     /* my epoch */
 135        spinlock_t        ksnn_lock;            /* serialise */
 136        struct list_head          ksnn_list;            /* chain on global list */
 137        int               ksnn_npeers;          /* # peers */
 138        int               ksnn_shutdown;        /* shutting down? */
 139        int               ksnn_ninterfaces;     /* IP interfaces */
 140        ksock_interface_t ksnn_interfaces[LNET_MAX_INTERFACES];
 141} ksock_net_t;
 142
 143/** connd timeout */
 144#define SOCKNAL_CONND_TIMEOUT  120
 145/** reserved thread for accepting & creating new connd */
 146#define SOCKNAL_CONND_RESV     1
 147
 148typedef struct
 149{
 150        int                     ksnd_init;      /* initialisation state */
 151        int                     ksnd_nnets;     /* # networks set up */
 152        struct list_head                ksnd_nets;      /* list of nets */
 153        /* stabilize peer/conn ops */
 154        rwlock_t                ksnd_global_lock;
 155        /* hash table of all my known peers */
 156        struct list_head                *ksnd_peers;
 157        int                     ksnd_peer_hash_size; /* size of ksnd_peers */
 158
 159        int                     ksnd_nthreads;  /* # live threads */
 160        int                     ksnd_shuttingdown; /* tell threads to exit */
 161        /* schedulers information */
 162        struct ksock_sched_info **ksnd_sched_info;
 163
 164        atomic_t      ksnd_nactive_txs;    /* #active txs */
 165
 166        struct list_head        ksnd_deathrow_conns; /* conns to close: reaper_lock*/
 167        struct list_head        ksnd_zombie_conns;   /* conns to free: reaper_lock */
 168        struct list_head        ksnd_enomem_conns;   /* conns to retry: reaper_lock*/
 169        wait_queue_head_t       ksnd_reaper_waitq;   /* reaper sleeps here */
 170        cfs_time_t      ksnd_reaper_waketime;/* when reaper will wake */
 171        spinlock_t        ksnd_reaper_lock;     /* serialise */
 172
 173        int            ksnd_enomem_tx;      /* test ENOMEM sender */
 174        int            ksnd_stall_tx;       /* test sluggish sender */
 175        int            ksnd_stall_rx;       /* test sluggish receiver */
 176
 177        struct list_head        ksnd_connd_connreqs; /* incoming connection requests */
 178        struct list_head        ksnd_connd_routes;   /* routes waiting to be connected */
 179        wait_queue_head_t       ksnd_connd_waitq;    /* connds sleep here */
 180        int            ksnd_connd_connecting;/* # connds connecting */
 181        /** time stamp of the last failed connecting attempt */
 182        long          ksnd_connd_failed_stamp;
 183        /** # starting connd */
 184        unsigned          ksnd_connd_starting;
 185        /** time stamp of the last starting connd */
 186        long          ksnd_connd_starting_stamp;
 187        /** # running connd */
 188        unsigned          ksnd_connd_running;
 189        spinlock_t        ksnd_connd_lock;      /* serialise */
 190
 191        struct list_head          ksnd_idle_noop_txs;   /* list head for freed noop tx */
 192        spinlock_t        ksnd_tx_lock;         /* serialise, g_lock unsafe */
 193
 194} ksock_nal_data_t;
 195
 196#define SOCKNAL_INIT_NOTHING    0
 197#define SOCKNAL_INIT_DATA       1
 198#define SOCKNAL_INIT_ALL        2
 199
 200/* A packet just assembled for transmission is represented by 1 or more
 201 * struct iovec fragments (the first frag contains the portals header),
 202 * followed by 0 or more lnet_kiov_t fragments.
 203 *
 204 * On the receive side, initially 1 struct iovec fragment is posted for
 205 * receive (the header).  Once the header has been received, the payload is
 206 * received into either struct iovec or lnet_kiov_t fragments, depending on
 207 * what the header matched or whether the message needs forwarding. */
 208
 209struct ksock_conn;                            /* forward ref */
 210struct ksock_peer;                            /* forward ref */
 211struct ksock_route;                          /* forward ref */
 212struct ksock_proto;                          /* forward ref */
 213
 214typedef struct                            /* transmit packet */
 215{
 216        struct list_head     tx_list;   /* queue on conn for transmission etc */
 217        struct list_head     tx_zc_list;     /* queue on peer for ZC request */
 218        atomic_t   tx_refcount;    /* tx reference count */
 219        int         tx_nob;      /* # packet bytes */
 220        int         tx_resid;       /* residual bytes */
 221        int         tx_niov;    /* # packet iovec frags */
 222        struct iovec  *tx_iov;   /* packet iovec frags */
 223        int         tx_nkiov;       /* # packet page frags */
 224        unsigned short tx_zc_aborted;  /* aborted ZC request */
 225        unsigned short tx_zc_capable:1; /* payload is large enough for ZC */
 226        unsigned short tx_zc_checked:1; /* Have I checked if I should ZC? */
 227        unsigned short tx_nonblk:1;    /* it's a non-blocking ACK */
 228        lnet_kiov_t   *tx_kiov; /* packet page frags */
 229        struct ksock_conn  *tx_conn;    /* owning conn */
 230        lnet_msg_t    *tx_lnetmsg;     /* lnet message for lnet_finalize() */
 231        cfs_time_t     tx_deadline;    /* when (in jiffies) tx times out */
 232        ksock_msg_t    tx_msg;   /* socklnd message buffer */
 233        int         tx_desc_size;   /* size of this descriptor */
 234        union {
 235                struct {
 236                        struct iovec iov;       /* virt hdr */
 237                        lnet_kiov_t  kiov[0];   /* paged payload */
 238                }                 paged;
 239                struct {
 240                        struct iovec iov[1];    /* virt hdr + payload */
 241                }                 virt;
 242        }                      tx_frags;
 243} ksock_tx_t;
 244
 245#define KSOCK_NOOP_TX_SIZE  ((int)offsetof(ksock_tx_t, tx_frags.paged.kiov[0]))
 246
 247/* network zero copy callback descriptor embedded in ksock_tx_t */
 248
 249/* space for the rx frag descriptors; we either read a single contiguous
 250 * header, or up to LNET_MAX_IOV frags of payload of either type. */
 251typedef union {
 252        struct iovec     iov[LNET_MAX_IOV];
 253        lnet_kiov_t      kiov[LNET_MAX_IOV];
 254} ksock_rxiovspace_t;
 255
 256#define SOCKNAL_RX_KSM_HEADER   1              /* reading ksock message header */
 257#define SOCKNAL_RX_LNET_HEADER  2              /* reading lnet message header */
 258#define SOCKNAL_RX_PARSE        3              /* Calling lnet_parse() */
 259#define SOCKNAL_RX_PARSE_WAIT   4              /* waiting to be told to read the body */
 260#define SOCKNAL_RX_LNET_PAYLOAD 5              /* reading lnet payload (to deliver here) */
 261#define SOCKNAL_RX_SLOP  6             /* skipping body */
 262
 263typedef struct ksock_conn
 264{
 265        struct ksock_peer  *ksnc_peer;   /* owning peer */
 266        struct ksock_route *ksnc_route; /* owning route */
 267        struct list_head          ksnc_list;     /* stash on peer's conn list */
 268        socket_t       *ksnc_sock;       /* actual socket */
 269        void           *ksnc_saved_data_ready; /* socket's original data_ready() callback */
 270        void           *ksnc_saved_write_space; /* socket's original write_space() callback */
 271        atomic_t        ksnc_conn_refcount; /* conn refcount */
 272        atomic_t        ksnc_sock_refcount; /* sock refcount */
 273        ksock_sched_t      *ksnc_scheduler;  /* who schedules this connection */
 274        __u32          ksnc_myipaddr;   /* my IP */
 275        __u32          ksnc_ipaddr;     /* peer's IP */
 276        int              ksnc_port;       /* peer's port */
 277        signed int        ksnc_type:3;     /* type of connection,
 278                                              * should be signed value */
 279        unsigned int        ksnc_closing:1;  /* being shut down */
 280        unsigned int        ksnc_flip:1;     /* flip or not, only for V2.x */
 281        unsigned int        ksnc_zc_capable:1; /* enable to ZC */
 282        struct ksock_proto *ksnc_proto;      /* protocol for the connection */
 283
 284        /* reader */
 285        struct list_head  ksnc_rx_list;     /* where I enq waiting input or a forwarding descriptor */
 286        cfs_time_t          ksnc_rx_deadline; /* when (in jiffies) receive times out */
 287        __u8              ksnc_rx_started;  /* started receiving a message */
 288        __u8              ksnc_rx_ready;    /* data ready to read */
 289        __u8              ksnc_rx_scheduled;/* being progressed */
 290        __u8              ksnc_rx_state;    /* what is being read */
 291        int                ksnc_rx_nob_left; /* # bytes to next hdr/body */
 292        int                ksnc_rx_nob_wanted; /* bytes actually wanted */
 293        int                ksnc_rx_niov;     /* # iovec frags */
 294        struct iovec     *ksnc_rx_iov;      /* the iovec frags */
 295        int                ksnc_rx_nkiov;    /* # page frags */
 296        lnet_kiov_t       *ksnc_rx_kiov;     /* the page frags */
 297        ksock_rxiovspace_t    ksnc_rx_iov_space;/* space for frag descriptors */
 298        __u32            ksnc_rx_csum;     /* partial checksum for incoming data */
 299        void             *ksnc_cookie;      /* rx lnet_finalize passthru arg */
 300        ksock_msg_t        ksnc_msg;     /* incoming message buffer:
 301                                                 * V2.x message takes the
 302                                                 * whole struct
 303                                                 * V1.x message is a bare
 304                                                 * lnet_hdr_t, it's stored in
 305                                                 * ksnc_msg.ksm_u.lnetmsg */
 306
 307        /* WRITER */
 308        struct list_head            ksnc_tx_list;     /* where I enq waiting for output space */
 309        struct list_head            ksnc_tx_queue;    /* packets waiting to be sent */
 310        ksock_tx_t         *ksnc_tx_carrier;  /* next TX that can carry a LNet message or ZC-ACK */
 311        cfs_time_t          ksnc_tx_deadline; /* when (in jiffies) tx times out */
 312        int                ksnc_tx_bufnob;     /* send buffer marker */
 313        atomic_t          ksnc_tx_nob;  /* # bytes queued */
 314        int                ksnc_tx_ready;      /* write space */
 315        int                ksnc_tx_scheduled;  /* being progressed */
 316        cfs_time_t          ksnc_tx_last_post;  /* time stamp of the last posted TX */
 317} ksock_conn_t;
 318
 319typedef struct ksock_route
 320{
 321        struct list_head            ksnr_list;  /* chain on peer route list */
 322        struct list_head            ksnr_connd_list;  /* chain on ksnr_connd_routes */
 323        struct ksock_peer    *ksnr_peer;        /* owning peer */
 324        atomic_t          ksnr_refcount;    /* # users */
 325        cfs_time_t          ksnr_timeout;     /* when (in jiffies) reconnection can happen next */
 326        cfs_duration_t  ksnr_retry_interval; /* how long between retries */
 327        __u32            ksnr_myipaddr;    /* my IP */
 328        __u32            ksnr_ipaddr;      /* IP address to connect to */
 329        int                ksnr_port;   /* port to connect to */
 330        unsigned int      ksnr_scheduled:1; /* scheduled for attention */
 331        unsigned int      ksnr_connecting:1;/* connection establishment in progress */
 332        unsigned int      ksnr_connected:4; /* connections established by type */
 333        unsigned int      ksnr_deleted:1;   /* been removed from peer? */
 334        unsigned int      ksnr_share_count; /* created explicitly? */
 335        int                ksnr_conn_count;  /* # conns established by this route */
 336} ksock_route_t;
 337
 338#define SOCKNAL_KEEPALIVE_PING    1       /* cookie for keepalive ping */
 339
 340typedef struct ksock_peer
 341{
 342        struct list_head            ksnp_list;  /* stash on global peer list */
 343        cfs_time_t          ksnp_last_alive;  /* when (in jiffies) I was last alive */
 344        lnet_process_id_t     ksnp_id;       /* who's on the other end(s) */
 345        atomic_t          ksnp_refcount; /* # users */
 346        int                ksnp_sharecount;  /* lconf usage counter */
 347        int                ksnp_closing;  /* being closed */
 348        int                ksnp_accepting;/* # passive connections pending */
 349        int                ksnp_error;    /* errno on closing last conn */
 350        __u64            ksnp_zc_next_cookie;/* ZC completion cookie */
 351        __u64            ksnp_incarnation;   /* latest known peer incarnation */
 352        struct ksock_proto   *ksnp_proto;    /* latest known peer protocol */
 353        struct list_head            ksnp_conns;    /* all active connections */
 354        struct list_head            ksnp_routes;   /* routes */
 355        struct list_head            ksnp_tx_queue; /* waiting packets */
 356        spinlock_t            ksnp_lock;        /* serialize, g_lock unsafe */
 357        struct list_head            ksnp_zc_req_list;   /* zero copy requests wait for ACK  */
 358        cfs_time_t          ksnp_send_keepalive; /* time to send keepalive */
 359        lnet_ni_t           *ksnp_ni;       /* which network */
 360        int                ksnp_n_passive_ips; /* # of... */
 361        __u32            ksnp_passive_ips[LNET_MAX_INTERFACES]; /* preferred local interfaces */
 362} ksock_peer_t;
 363
 364typedef struct ksock_connreq
 365{
 366        struct list_head            ksncr_list;     /* stash on ksnd_connd_connreqs */
 367        lnet_ni_t           *ksncr_ni;       /* chosen NI */
 368        socket_t         *ksncr_sock;     /* accepted socket */
 369} ksock_connreq_t;
 370
 371extern ksock_nal_data_t ksocknal_data;
 372extern ksock_tunables_t ksocknal_tunables;
 373
 374#define SOCKNAL_MATCH_NO        0       /* TX can't match type of connection */
 375#define SOCKNAL_MATCH_YES       1       /* TX matches type of connection */
 376#define SOCKNAL_MATCH_MAY       2       /* TX can be sent on the connection, but not preferred */
 377
 378typedef struct ksock_proto
 379{
 380        int        pro_version;                                       /* version number of protocol */
 381        int      (*pro_send_hello)(ksock_conn_t *, ksock_hello_msg_t *);     /* handshake function */
 382        int      (*pro_recv_hello)(ksock_conn_t *, ksock_hello_msg_t *, int);/* handshake function */
 383        void    (*pro_pack)(ksock_tx_t *);                                /* message pack */
 384        void    (*pro_unpack)(ksock_msg_t *);                          /* message unpack */
 385        ksock_tx_t *(*pro_queue_tx_msg)(ksock_conn_t *, ksock_tx_t *);    /* queue tx on the connection */
 386        int      (*pro_queue_tx_zcack)(ksock_conn_t *, ksock_tx_t *, __u64); /* queue ZC ack on the connection */
 387        int      (*pro_handle_zcreq)(ksock_conn_t *, __u64, int);           /* handle ZC request */
 388        int      (*pro_handle_zcack)(ksock_conn_t *, __u64, __u64);       /* handle ZC ACK */
 389        int      (*pro_match_tx)(ksock_conn_t *, ksock_tx_t *, int);     /* msg type matches the connection type:
 390                                                                                 * return value:
 391                                                                                 *   return MATCH_NO  : no
 392                                                                                 *   return MATCH_YES : matching type
 393                                                                                 *   return MATCH_MAY : can be backup */
 394} ksock_proto_t;
 395
 396extern ksock_proto_t ksocknal_protocol_v1x;
 397extern ksock_proto_t ksocknal_protocol_v2x;
 398extern ksock_proto_t ksocknal_protocol_v3x;
 399
 400#define KSOCK_PROTO_V1_MAJOR    LNET_PROTO_TCP_VERSION_MAJOR
 401#define KSOCK_PROTO_V1_MINOR    LNET_PROTO_TCP_VERSION_MINOR
 402#define KSOCK_PROTO_V1    KSOCK_PROTO_V1_MAJOR
 403
 404#ifndef CPU_MASK_NONE
 405#define CPU_MASK_NONE   0UL
 406#endif
 407
 408static inline int
 409ksocknal_route_mask(void)
 410{
 411        if (!*ksocknal_tunables.ksnd_typed_conns)
 412                return (1 << SOCKLND_CONN_ANY);
 413
 414        return ((1 << SOCKLND_CONN_CONTROL) |
 415                (1 << SOCKLND_CONN_BULK_IN) |
 416                (1 << SOCKLND_CONN_BULK_OUT));
 417}
 418
 419static inline struct list_head *
 420ksocknal_nid2peerlist (lnet_nid_t nid)
 421{
 422        unsigned int hash = ((unsigned int)nid) % ksocknal_data.ksnd_peer_hash_size;
 423
 424        return (&ksocknal_data.ksnd_peers [hash]);
 425}
 426
 427static inline void
 428ksocknal_conn_addref (ksock_conn_t *conn)
 429{
 430        LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
 431        atomic_inc(&conn->ksnc_conn_refcount);
 432}
 433
 434extern void ksocknal_queue_zombie_conn (ksock_conn_t *conn);
 435extern void ksocknal_finalize_zcreq(ksock_conn_t *conn);
 436
 437static inline void
 438ksocknal_conn_decref (ksock_conn_t *conn)
 439{
 440        LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
 441        if (atomic_dec_and_test(&conn->ksnc_conn_refcount))
 442                ksocknal_queue_zombie_conn(conn);
 443}
 444
 445static inline int
 446ksocknal_connsock_addref (ksock_conn_t *conn)
 447{
 448        int   rc = -ESHUTDOWN;
 449
 450        read_lock(&ksocknal_data.ksnd_global_lock);
 451        if (!conn->ksnc_closing) {
 452                LASSERT(atomic_read(&conn->ksnc_sock_refcount) > 0);
 453                atomic_inc(&conn->ksnc_sock_refcount);
 454                rc = 0;
 455        }
 456        read_unlock(&ksocknal_data.ksnd_global_lock);
 457
 458        return (rc);
 459}
 460
 461static inline void
 462ksocknal_connsock_decref (ksock_conn_t *conn)
 463{
 464        LASSERT (atomic_read(&conn->ksnc_sock_refcount) > 0);
 465        if (atomic_dec_and_test(&conn->ksnc_sock_refcount)) {
 466                LASSERT (conn->ksnc_closing);
 467                libcfs_sock_release(conn->ksnc_sock);
 468                conn->ksnc_sock = NULL;
 469                ksocknal_finalize_zcreq(conn);
 470        }
 471}
 472
 473static inline void
 474ksocknal_tx_addref (ksock_tx_t *tx)
 475{
 476        LASSERT (atomic_read(&tx->tx_refcount) > 0);
 477        atomic_inc(&tx->tx_refcount);
 478}
 479
 480extern void ksocknal_tx_prep (ksock_conn_t *, ksock_tx_t *tx);
 481extern void ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx);
 482
 483static inline void
 484ksocknal_tx_decref (ksock_tx_t *tx)
 485{
 486        LASSERT (atomic_read(&tx->tx_refcount) > 0);
 487        if (atomic_dec_and_test(&tx->tx_refcount))
 488                ksocknal_tx_done(NULL, tx);
 489}
 490
 491static inline void
 492ksocknal_route_addref (ksock_route_t *route)
 493{
 494        LASSERT (atomic_read(&route->ksnr_refcount) > 0);
 495        atomic_inc(&route->ksnr_refcount);
 496}
 497
 498extern void ksocknal_destroy_route (ksock_route_t *route);
 499
 500static inline void
 501ksocknal_route_decref (ksock_route_t *route)
 502{
 503        LASSERT (atomic_read (&route->ksnr_refcount) > 0);
 504        if (atomic_dec_and_test(&route->ksnr_refcount))
 505                ksocknal_destroy_route (route);
 506}
 507
 508static inline void
 509ksocknal_peer_addref (ksock_peer_t *peer)
 510{
 511        LASSERT (atomic_read (&peer->ksnp_refcount) > 0);
 512        atomic_inc(&peer->ksnp_refcount);
 513}
 514
 515extern void ksocknal_destroy_peer (ksock_peer_t *peer);
 516
 517static inline void
 518ksocknal_peer_decref (ksock_peer_t *peer)
 519{
 520        LASSERT (atomic_read (&peer->ksnp_refcount) > 0);
 521        if (atomic_dec_and_test(&peer->ksnp_refcount))
 522                ksocknal_destroy_peer (peer);
 523}
 524
 525int ksocknal_startup (lnet_ni_t *ni);
 526void ksocknal_shutdown (lnet_ni_t *ni);
 527int ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg);
 528int ksocknal_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg);
 529int ksocknal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
 530                  int delayed, unsigned int niov,
 531                  struct iovec *iov, lnet_kiov_t *kiov,
 532                  unsigned int offset, unsigned int mlen, unsigned int rlen);
 533int ksocknal_accept(lnet_ni_t *ni, socket_t *sock);
 534
 535extern int ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip, int port);
 536extern ksock_peer_t *ksocknal_find_peer_locked (lnet_ni_t *ni, lnet_process_id_t id);
 537extern ksock_peer_t *ksocknal_find_peer (lnet_ni_t *ni, lnet_process_id_t id);
 538extern void ksocknal_peer_failed (ksock_peer_t *peer);
 539extern int ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
 540                                 socket_t *sock, int type);
 541extern void ksocknal_close_conn_locked (ksock_conn_t *conn, int why);
 542extern void ksocknal_terminate_conn (ksock_conn_t *conn);
 543extern void ksocknal_destroy_conn (ksock_conn_t *conn);
 544extern int  ksocknal_close_peer_conns_locked (ksock_peer_t *peer,
 545                                              __u32 ipaddr, int why);
 546extern int ksocknal_close_conn_and_siblings (ksock_conn_t *conn, int why);
 547extern int ksocknal_close_matching_conns (lnet_process_id_t id, __u32 ipaddr);
 548extern ksock_conn_t *ksocknal_find_conn_locked(ksock_peer_t *peer,
 549                                               ksock_tx_t *tx, int nonblk);
 550
 551extern int  ksocknal_launch_packet(lnet_ni_t *ni, ksock_tx_t *tx,
 552                                   lnet_process_id_t id);
 553extern ksock_tx_t *ksocknal_alloc_tx(int type, int size);
 554extern void ksocknal_free_tx (ksock_tx_t *tx);
 555extern ksock_tx_t *ksocknal_alloc_tx_noop(__u64 cookie, int nonblk);
 556extern void ksocknal_next_tx_carrier(ksock_conn_t *conn);
 557extern void ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn);
 558extern void ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist,
 559                                  int error);
 560extern void ksocknal_notify (lnet_ni_t *ni, lnet_nid_t gw_nid, int alive);
 561extern void ksocknal_query (struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when);
 562extern int ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name);
 563extern void ksocknal_thread_fini (void);
 564extern void ksocknal_launch_all_connections_locked (ksock_peer_t *peer);
 565extern ksock_route_t *ksocknal_find_connectable_route_locked (ksock_peer_t *peer);
 566extern ksock_route_t *ksocknal_find_connecting_route_locked (ksock_peer_t *peer);
 567extern int ksocknal_new_packet (ksock_conn_t *conn, int skip);
 568extern int ksocknal_scheduler (void *arg);
 569extern int ksocknal_connd (void *arg);
 570extern int ksocknal_reaper (void *arg);
 571extern int ksocknal_send_hello (lnet_ni_t *ni, ksock_conn_t *conn,
 572                                lnet_nid_t peer_nid, ksock_hello_msg_t *hello);
 573extern int ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
 574                                ksock_hello_msg_t *hello, lnet_process_id_t *id,
 575                                __u64 *incarnation);
 576extern void ksocknal_read_callback(ksock_conn_t *conn);
 577extern void ksocknal_write_callback(ksock_conn_t *conn);
 578
 579extern int ksocknal_lib_zc_capable(ksock_conn_t *conn);
 580extern void ksocknal_lib_save_callback(socket_t *sock, ksock_conn_t *conn);
 581extern void ksocknal_lib_set_callback(socket_t *sock,  ksock_conn_t *conn);
 582extern void ksocknal_lib_reset_callback(socket_t *sock, ksock_conn_t *conn);
 583extern void ksocknal_lib_push_conn (ksock_conn_t *conn);
 584extern int ksocknal_lib_get_conn_addrs (ksock_conn_t *conn);
 585extern int ksocknal_lib_setup_sock (socket_t *so);
 586extern int ksocknal_lib_send_iov (ksock_conn_t *conn, ksock_tx_t *tx);
 587extern int ksocknal_lib_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx);
 588extern void ksocknal_lib_eager_ack (ksock_conn_t *conn);
 589extern int ksocknal_lib_recv_iov (ksock_conn_t *conn);
 590extern int ksocknal_lib_recv_kiov (ksock_conn_t *conn);
 591extern int ksocknal_lib_get_conn_tunables (ksock_conn_t *conn, int *txmem,
 592                                           int *rxmem, int *nagle);
 593
 594extern int ksocknal_tunables_init(void);
 595extern void ksocknal_tunables_fini(void);
 596extern int ksocknal_lib_tunables_init(void);
 597extern void ksocknal_lib_tunables_fini(void);
 598
 599extern void ksocknal_lib_csum_tx(ksock_tx_t *tx);
 600
 601extern int ksocknal_lib_memory_pressure(ksock_conn_t *conn);
 602extern int ksocknal_lib_bind_thread_to_cpu(int id);
 603