linux/drivers/staging/lustre/include/linux/lnet/lib-types.h
<<
>>
Prefs
   1/*
   2 * GPL HEADER START
   3 *
   4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 only,
   8 * as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  13 * General Public License version 2 for more details (a copy is included
  14 * in the LICENSE file that accompanied this code).
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * version 2 along with this program; If not, see
  18 * http://www.gnu.org/licenses/gpl-2.0.html
  19 *
  20 * GPL HEADER END
  21 */
  22/*
  23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
  24 * Use is subject to license terms.
  25 *
  26 * Copyright (c) 2012, 2015, Intel Corporation.
  27 */
  28/*
  29 * This file is part of Lustre, http://www.lustre.org/
  30 * Lustre is a trademark of Seagate, Inc.
  31 *
  32 * lnet/include/lnet/lib-types.h
  33 */
  34
  35#ifndef __LNET_LIB_TYPES_H__
  36#define __LNET_LIB_TYPES_H__
  37
  38#include <linux/kthread.h>
  39#include <linux/uio.h>
  40#include <linux/types.h>
  41
  42#include "types.h"
  43#include "lnetctl.h"
  44
  45/* Max payload size */
  46#define LNET_MAX_PAYLOAD      CONFIG_LNET_MAX_PAYLOAD
  47#if (LNET_MAX_PAYLOAD < LNET_MTU)
  48# error "LNET_MAX_PAYLOAD too small - error in configure --with-max-payload-mb"
  49#elif (LNET_MAX_PAYLOAD > (PAGE_SIZE * LNET_MAX_IOV))
  50# error "LNET_MAX_PAYLOAD too large - error in configure --with-max-payload-mb"
  51#endif
  52
  53/* forward refs */
  54struct lnet_libmd;
  55
  56typedef struct lnet_msg {
  57        struct list_head        msg_activelist;
  58        struct list_head        msg_list;          /* Q for credits/MD */
  59
  60        lnet_process_id_t       msg_target;
  61        /* where is it from, it's only for building event */
  62        lnet_nid_t              msg_from;
  63        __u32                   msg_type;
  64
  65        /* committed for sending */
  66        unsigned int            msg_tx_committed:1;
  67        /* CPT # this message committed for sending */
  68        unsigned int            msg_tx_cpt:15;
  69        /* committed for receiving */
  70        unsigned int            msg_rx_committed:1;
  71        /* CPT # this message committed for receiving */
  72        unsigned int            msg_rx_cpt:15;
  73        /* queued for tx credit */
  74        unsigned int            msg_tx_delayed:1;
  75        /* queued for RX buffer */
  76        unsigned int            msg_rx_delayed:1;
  77        /* ready for pending on RX delay list */
  78        unsigned int            msg_rx_ready_delay:1;
  79
  80        unsigned int    msg_vmflush:1;          /* VM trying to free memory */
  81        unsigned int    msg_target_is_router:1; /* sending to a router */
  82        unsigned int    msg_routing:1;          /* being forwarded */
  83        unsigned int    msg_ack:1;              /* ack on finalize (PUT) */
  84        unsigned int    msg_sending:1;          /* outgoing message */
  85        unsigned int    msg_receiving:1;        /* being received */
  86        unsigned int    msg_txcredit:1;         /* taken an NI send credit */
  87        unsigned int    msg_peertxcredit:1;     /* taken a peer send credit */
  88        unsigned int    msg_rtrcredit:1;        /* taken a global router credit */
  89        unsigned int    msg_peerrtrcredit:1;    /* taken a peer router credit */
  90        unsigned int    msg_onactivelist:1;     /* on the activelist */
  91        unsigned int    msg_rdma_get:1;
  92
  93        struct lnet_peer        *msg_txpeer;     /* peer I'm sending to */
  94        struct lnet_peer        *msg_rxpeer;     /* peer I received from */
  95
  96        void                    *msg_private;
  97        struct lnet_libmd       *msg_md;
  98
  99        unsigned int             msg_len;
 100        unsigned int             msg_wanted;
 101        unsigned int             msg_offset;
 102        unsigned int             msg_niov;
 103        struct kvec             *msg_iov;
 104        lnet_kiov_t             *msg_kiov;
 105
 106        lnet_event_t             msg_ev;
 107        lnet_hdr_t               msg_hdr;
 108} lnet_msg_t;
 109
 110typedef struct lnet_libhandle {
 111        struct list_head        lh_hash_chain;
 112        __u64                   lh_cookie;
 113} lnet_libhandle_t;
 114
 115#define lh_entry(ptr, type, member) \
 116        ((type *)((char *)(ptr) - (char *)(&((type *)0)->member)))
 117
 118typedef struct lnet_eq {
 119        struct list_head          eq_list;
 120        lnet_libhandle_t          eq_lh;
 121        lnet_seq_t                eq_enq_seq;
 122        lnet_seq_t                eq_deq_seq;
 123        unsigned int              eq_size;
 124        lnet_eq_handler_t         eq_callback;
 125        lnet_event_t             *eq_events;
 126        int                     **eq_refs;      /* percpt refcount for EQ */
 127} lnet_eq_t;
 128
 129typedef struct lnet_me {
 130        struct list_head         me_list;
 131        lnet_libhandle_t         me_lh;
 132        lnet_process_id_t        me_match_id;
 133        unsigned int             me_portal;
 134        unsigned int             me_pos;        /* hash offset in mt_hash */
 135        __u64                    me_match_bits;
 136        __u64                    me_ignore_bits;
 137        lnet_unlink_t            me_unlink;
 138        struct lnet_libmd       *me_md;
 139} lnet_me_t;
 140
 141typedef struct lnet_libmd {
 142        struct list_head         md_list;
 143        lnet_libhandle_t         md_lh;
 144        lnet_me_t               *md_me;
 145        char                    *md_start;
 146        unsigned int             md_offset;
 147        unsigned int             md_length;
 148        unsigned int             md_max_size;
 149        int                      md_threshold;
 150        int                      md_refcount;
 151        unsigned int             md_options;
 152        unsigned int             md_flags;
 153        void                    *md_user_ptr;
 154        lnet_eq_t               *md_eq;
 155        unsigned int             md_niov;       /* # frags */
 156        union {
 157                struct kvec     iov[LNET_MAX_IOV];
 158                lnet_kiov_t     kiov[LNET_MAX_IOV];
 159        } md_iov;
 160} lnet_libmd_t;
 161
 162#define LNET_MD_FLAG_ZOMBIE             (1 << 0)
 163#define LNET_MD_FLAG_AUTO_UNLINK        (1 << 1)
 164#define LNET_MD_FLAG_ABORTED            (1 << 2)
 165
 166typedef struct {
 167        /* info about peers we are trying to fail */
 168        struct list_head        tp_list;        /* ln_test_peers */
 169        lnet_nid_t              tp_nid;         /* matching nid */
 170        unsigned int            tp_threshold;   /* # failures to simulate */
 171} lnet_test_peer_t;
 172
 173#define LNET_COOKIE_TYPE_MD     1
 174#define LNET_COOKIE_TYPE_ME     2
 175#define LNET_COOKIE_TYPE_EQ     3
 176#define LNET_COOKIE_TYPE_BITS   2
 177#define LNET_COOKIE_MASK        ((1ULL << LNET_COOKIE_TYPE_BITS) - 1ULL)
 178
 179struct lnet_ni;                 /* forward ref */
 180
 181typedef struct lnet_lnd {
 182        /* fields managed by portals */
 183        struct list_head        lnd_list;       /* stash in the LND table */
 184        int                     lnd_refcount;   /* # active instances */
 185
 186        /* fields initialised by the LND */
 187        __u32                   lnd_type;
 188
 189        int  (*lnd_startup)(struct lnet_ni *ni);
 190        void (*lnd_shutdown)(struct lnet_ni *ni);
 191        int  (*lnd_ctl)(struct lnet_ni *ni, unsigned int cmd, void *arg);
 192
 193        /*
 194         * In data movement APIs below, payload buffers are described as a set
 195         * of 'niov' fragments which are...
 196         * EITHER
 197         *    in virtual memory (struct iovec *iov != NULL)
 198         * OR
 199         *    in pages (kernel only: plt_kiov_t *kiov != NULL).
 200         * The LND may NOT overwrite these fragment descriptors.
 201         * An 'offset' and may specify a byte offset within the set of
 202         * fragments to start from
 203         */
 204
 205        /*
 206         * Start sending a preformatted message.  'private' is NULL for PUT and
 207         * GET messages; otherwise this is a response to an incoming message
 208         * and 'private' is the 'private' passed to lnet_parse().  Return
 209         * non-zero for immediate failure, otherwise complete later with
 210         * lnet_finalize()
 211         */
 212        int (*lnd_send)(struct lnet_ni *ni, void *private, lnet_msg_t *msg);
 213
 214        /*
 215         * Start receiving 'mlen' bytes of payload data, skipping the following
 216         * 'rlen' - 'mlen' bytes. 'private' is the 'private' passed to
 217         * lnet_parse().  Return non-zero for immediate failure, otherwise
 218         * complete later with lnet_finalize().  This also gives back a receive
 219         * credit if the LND does flow control.
 220         */
 221        int (*lnd_recv)(struct lnet_ni *ni, void *private, lnet_msg_t *msg,
 222                        int delayed, unsigned int niov,
 223                        struct kvec *iov, lnet_kiov_t *kiov,
 224                        unsigned int offset, unsigned int mlen,
 225                        unsigned int rlen);
 226
 227        /*
 228         * lnet_parse() has had to delay processing of this message
 229         * (e.g. waiting for a forwarding buffer or send credits).  Give the
 230         * LND a chance to free urgently needed resources.  If called, return 0
 231         * for success and do NOT give back a receive credit; that has to wait
 232         * until lnd_recv() gets called.  On failure return < 0 and
 233         * release resources; lnd_recv() will not be called.
 234         */
 235        int (*lnd_eager_recv)(struct lnet_ni *ni, void *private,
 236                              lnet_msg_t *msg, void **new_privatep);
 237
 238        /* notification of peer health */
 239        void (*lnd_notify)(struct lnet_ni *ni, lnet_nid_t peer, int alive);
 240
 241        /* query of peer aliveness */
 242        void (*lnd_query)(struct lnet_ni *ni, lnet_nid_t peer,
 243                          unsigned long *when);
 244
 245        /* accept a new connection */
 246        int (*lnd_accept)(struct lnet_ni *ni, struct socket *sock);
 247} lnd_t;
 248
 249struct lnet_tx_queue {
 250        int                     tq_credits;     /* # tx credits free */
 251        int                     tq_credits_min; /* lowest it's been */
 252        int                     tq_credits_max; /* total # tx credits */
 253        struct list_head        tq_delayed;     /* delayed TXs */
 254};
 255
 256typedef struct lnet_ni {
 257        spinlock_t                ni_lock;
 258        struct list_head          ni_list;      /* chain on ln_nis */
 259        struct list_head          ni_cptlist;   /* chain on ln_nis_cpt */
 260        int                       ni_maxtxcredits; /* # tx credits  */
 261        /* # per-peer send credits */
 262        int                       ni_peertxcredits;
 263        /* # per-peer router buffer credits */
 264        int                       ni_peerrtrcredits;
 265        /* seconds to consider peer dead */
 266        int                       ni_peertimeout;
 267        int                       ni_ncpts;     /* number of CPTs */
 268        __u32                    *ni_cpts;      /* bond NI on some CPTs */
 269        lnet_nid_t                ni_nid;       /* interface's NID */
 270        void                     *ni_data;      /* instance-specific data */
 271        lnd_t                    *ni_lnd;       /* procedural interface */
 272        struct lnet_tx_queue    **ni_tx_queues; /* percpt TX queues */
 273        int                     **ni_refs;      /* percpt reference count */
 274        time64_t                  ni_last_alive;/* when I was last alive */
 275        lnet_ni_status_t         *ni_status;    /* my health status */
 276        /* equivalent interfaces to use */
 277        char                     *ni_interfaces[LNET_MAX_INTERFACES];
 278} lnet_ni_t;
 279
 280#define LNET_PROTO_PING_MATCHBITS       0x8000000000000000LL
 281
 282/*
 283 * NB: value of these features equal to LNET_PROTO_PING_VERSION_x
 284 * of old LNet, so there shouldn't be any compatibility issue
 285 */
 286#define LNET_PING_FEAT_INVAL            (0)             /* no feature */
 287#define LNET_PING_FEAT_BASE             (1 << 0)        /* just a ping */
 288#define LNET_PING_FEAT_NI_STATUS        (1 << 1)        /* return NI status */
 289#define LNET_PING_FEAT_RTE_DISABLED     (1 << 2)        /* Routing enabled */
 290
 291#define LNET_PING_FEAT_MASK             (LNET_PING_FEAT_BASE | \
 292                                         LNET_PING_FEAT_NI_STATUS)
 293
 294/* router checker data, per router */
 295#define LNET_MAX_RTR_NIS   16
 296#define LNET_PINGINFO_SIZE offsetof(lnet_ping_info_t, pi_ni[LNET_MAX_RTR_NIS])
 297typedef struct {
 298        /* chain on the_lnet.ln_zombie_rcd or ln_deathrow_rcd */
 299        struct list_head         rcd_list;
 300        lnet_handle_md_t         rcd_mdh;       /* ping buffer MD */
 301        struct lnet_peer        *rcd_gateway;   /* reference to gateway */
 302        lnet_ping_info_t        *rcd_pinginfo;  /* ping buffer */
 303} lnet_rc_data_t;
 304
 305typedef struct lnet_peer {
 306        struct list_head         lp_hashlist;   /* chain on peer hash */
 307        struct list_head         lp_txq;        /* messages blocking for
 308                                                   tx credits */
 309        struct list_head         lp_rtrq;       /* messages blocking for
 310                                                   router credits */
 311        struct list_head         lp_rtr_list;   /* chain on router list */
 312        int                      lp_txcredits;  /* # tx credits available */
 313        int                      lp_mintxcredits;  /* low water mark */
 314        int                      lp_rtrcredits;    /* # router credits */
 315        int                      lp_minrtrcredits; /* low water mark */
 316        unsigned int             lp_alive:1;       /* alive/dead? */
 317        unsigned int             lp_notify:1;   /* notification outstanding? */
 318        unsigned int             lp_notifylnd:1;/* outstanding notification
 319                                                   for LND? */
 320        unsigned int             lp_notifying:1; /* some thread is handling
 321                                                    notification */
 322        unsigned int             lp_ping_notsent;/* SEND event outstanding
 323                                                    from ping */
 324        int                      lp_alive_count; /* # times router went
 325                                                    dead<->alive */
 326        long                     lp_txqnob;      /* bytes queued for sending */
 327        unsigned long            lp_timestamp;   /* time of last aliveness
 328                                                    news */
 329        unsigned long            lp_ping_timestamp;/* time of last ping
 330                                                      attempt */
 331        unsigned long            lp_ping_deadline; /* != 0 if ping reply
 332                                                      expected */
 333        unsigned long            lp_last_alive; /* when I was last alive */
 334        unsigned long            lp_last_query; /* when lp_ni was queried
 335                                                   last time */
 336        lnet_ni_t               *lp_ni;         /* interface peer is on */
 337        lnet_nid_t               lp_nid;        /* peer's NID */
 338        int                      lp_refcount;   /* # refs */
 339        int                      lp_cpt;        /* CPT this peer attached on */
 340        /* # refs from lnet_route_t::lr_gateway */
 341        int                      lp_rtr_refcount;
 342        /* returned RC ping features */
 343        unsigned int             lp_ping_feats;
 344        struct list_head         lp_routes;     /* routers on this peer */
 345        lnet_rc_data_t          *lp_rcd;        /* router checker state */
 346} lnet_peer_t;
 347
 348/* peer hash size */
 349#define LNET_PEER_HASH_BITS     9
 350#define LNET_PEER_HASH_SIZE     (1 << LNET_PEER_HASH_BITS)
 351
 352/* peer hash table */
 353struct lnet_peer_table {
 354        int                      pt_version;    /* /proc validity stamp */
 355        int                      pt_number;     /* # peers extant */
 356        /* # zombies to go to deathrow (and not there yet) */
 357        int                      pt_zombies;
 358        struct list_head         pt_deathrow;   /* zombie peers */
 359        struct list_head        *pt_hash;       /* NID->peer hash */
 360};
 361
 362/*
 363 * peer aliveness is enabled only on routers for peers in a network where the
 364 * lnet_ni_t::ni_peertimeout has been set to a positive value
 365 */
 366#define lnet_peer_aliveness_enabled(lp) (the_lnet.ln_routing && \
 367                                         (lp)->lp_ni->ni_peertimeout > 0)
 368
 369typedef struct {
 370        struct list_head         lr_list;       /* chain on net */
 371        struct list_head         lr_gwlist;     /* chain on gateway */
 372        lnet_peer_t             *lr_gateway;    /* router node */
 373        __u32                    lr_net;        /* remote network number */
 374        int                      lr_seq;        /* sequence for round-robin */
 375        unsigned int             lr_downis;     /* number of down NIs */
 376        __u32                    lr_hops;       /* how far I am */
 377        unsigned int             lr_priority;   /* route priority */
 378} lnet_route_t;
 379
 380#define LNET_REMOTE_NETS_HASH_DEFAULT   (1U << 7)
 381#define LNET_REMOTE_NETS_HASH_MAX       (1U << 16)
 382#define LNET_REMOTE_NETS_HASH_SIZE      (1 << the_lnet.ln_remote_nets_hbits)
 383
 384typedef struct {
 385        struct list_head        lrn_list;       /* chain on
 386                                                   ln_remote_nets_hash */
 387        struct list_head        lrn_routes;     /* routes to me */
 388        __u32                   lrn_net;        /* my net number */
 389} lnet_remotenet_t;
 390
 391/** lnet message has credit and can be submitted to lnd for send/receive */
 392#define LNET_CREDIT_OK          0
 393/** lnet message is waiting for credit */
 394#define LNET_CREDIT_WAIT        1
 395
 396typedef struct {
 397        struct list_head        rbp_bufs;       /* my free buffer pool */
 398        struct list_head        rbp_msgs;       /* messages blocking
 399                                                   for a buffer */
 400        int                     rbp_npages;     /* # pages in each buffer */
 401        /* requested number of buffers */
 402        int                     rbp_req_nbuffers;
 403        /* # buffers actually allocated */
 404        int                     rbp_nbuffers;
 405        int                     rbp_credits;    /* # free buffers /
 406                                                     blocked messages */
 407        int                     rbp_mincredits; /* low water mark */
 408} lnet_rtrbufpool_t;
 409
 410typedef struct {
 411        struct list_head         rb_list;       /* chain on rbp_bufs */
 412        lnet_rtrbufpool_t       *rb_pool;       /* owning pool */
 413        lnet_kiov_t              rb_kiov[0];    /* the buffer space */
 414} lnet_rtrbuf_t;
 415
 416#define LNET_PEER_HASHSIZE      503     /* prime! */
 417
 418#define LNET_TINY_BUF_IDX       0
 419#define LNET_SMALL_BUF_IDX      1
 420#define LNET_LARGE_BUF_IDX      2
 421
 422/* # different router buffer pools */
 423#define LNET_NRBPOOLS           (LNET_LARGE_BUF_IDX + 1)
 424
 425enum {
 426        /* Didn't match anything */
 427        LNET_MATCHMD_NONE       = (1 << 0),
 428        /* Matched OK */
 429        LNET_MATCHMD_OK         = (1 << 1),
 430        /* Must be discarded */
 431        LNET_MATCHMD_DROP       = (1 << 2),
 432        /* match and buffer is exhausted */
 433        LNET_MATCHMD_EXHAUSTED  = (1 << 3),
 434        /* match or drop */
 435        LNET_MATCHMD_FINISH     = (LNET_MATCHMD_OK | LNET_MATCHMD_DROP),
 436};
 437
 438/* Options for lnet_portal_t::ptl_options */
 439#define LNET_PTL_LAZY           (1 << 0)
 440#define LNET_PTL_MATCH_UNIQUE   (1 << 1)        /* unique match, for RDMA */
 441#define LNET_PTL_MATCH_WILDCARD (1 << 2)        /* wildcard match,
 442                                                   request portal */
 443
 444/* parameter for matching operations (GET, PUT) */
 445struct lnet_match_info {
 446        __u64                   mi_mbits;
 447        lnet_process_id_t       mi_id;
 448        unsigned int            mi_opc;
 449        unsigned int            mi_portal;
 450        unsigned int            mi_rlength;
 451        unsigned int            mi_roffset;
 452};
 453
 454/* ME hash of RDMA portal */
 455#define LNET_MT_HASH_BITS               8
 456#define LNET_MT_HASH_SIZE               (1 << LNET_MT_HASH_BITS)
 457#define LNET_MT_HASH_MASK               (LNET_MT_HASH_SIZE - 1)
 458/*
 459 * we allocate (LNET_MT_HASH_SIZE + 1) entries for lnet_match_table::mt_hash,
 460 * the last entry is reserved for MEs with ignore-bits
 461 */
 462#define LNET_MT_HASH_IGNORE             LNET_MT_HASH_SIZE
 463/*
 464 * __u64 has 2^6 bits, so need 2^(LNET_MT_HASH_BITS - LNET_MT_BITS_U64) which
 465 * is 4 __u64s as bit-map, and add an extra __u64 (only use one bit) for the
 466 * ME-list with ignore-bits, which is mtable::mt_hash[LNET_MT_HASH_IGNORE]
 467 */
 468#define LNET_MT_BITS_U64                6       /* 2^6 bits */
 469#define LNET_MT_EXHAUSTED_BITS          (LNET_MT_HASH_BITS - LNET_MT_BITS_U64)
 470#define LNET_MT_EXHAUSTED_BMAP          ((1 << LNET_MT_EXHAUSTED_BITS) + 1)
 471
 472/* portal match table */
 473struct lnet_match_table {
 474        /* reserved for upcoming patches, CPU partition ID */
 475        unsigned int             mt_cpt;
 476        unsigned int             mt_portal;     /* portal index */
 477        /*
 478         * match table is set as "enabled" if there's non-exhausted MD
 479         * attached on mt_mhash, it's only valid for wildcard portal
 480         */
 481        unsigned int             mt_enabled;
 482        /* bitmap to flag whether MEs on mt_hash are exhausted or not */
 483        __u64                    mt_exhausted[LNET_MT_EXHAUSTED_BMAP];
 484        struct list_head        *mt_mhash;      /* matching hash */
 485};
 486
 487/* these are only useful for wildcard portal */
 488/* Turn off message rotor for wildcard portals */
 489#define LNET_PTL_ROTOR_OFF      0
 490/* round-robin dispatch all PUT messages for wildcard portals */
 491#define LNET_PTL_ROTOR_ON       1
 492/* round-robin dispatch routed PUT message for wildcard portals */
 493#define LNET_PTL_ROTOR_RR_RT    2
 494/* dispatch routed PUT message by hashing source NID for wildcard portals */
 495#define LNET_PTL_ROTOR_HASH_RT  3
 496
 497typedef struct lnet_portal {
 498        spinlock_t                ptl_lock;
 499        unsigned int              ptl_index;    /* portal ID, reserved */
 500        /* flags on this portal: lazy, unique... */
 501        unsigned int              ptl_options;
 502        /* list of messages which are stealing buffer */
 503        struct list_head          ptl_msg_stealing;
 504        /* messages blocking for MD */
 505        struct list_head          ptl_msg_delayed;
 506        /* Match table for each CPT */
 507        struct lnet_match_table **ptl_mtables;
 508        /* spread rotor of incoming "PUT" */
 509        unsigned int              ptl_rotor;
 510        /* # active entries for this portal */
 511        int                       ptl_mt_nmaps;
 512        /* array of active entries' cpu-partition-id */
 513        int                       ptl_mt_maps[0];
 514} lnet_portal_t;
 515
 516#define LNET_LH_HASH_BITS       12
 517#define LNET_LH_HASH_SIZE       (1ULL << LNET_LH_HASH_BITS)
 518#define LNET_LH_HASH_MASK       (LNET_LH_HASH_SIZE - 1)
 519
 520/* resource container (ME, MD, EQ) */
 521struct lnet_res_container {
 522        unsigned int             rec_type;      /* container type */
 523        __u64                    rec_lh_cookie; /* cookie generator */
 524        struct list_head         rec_active;    /* active resource list */
 525        struct list_head        *rec_lh_hash;   /* handle hash */
 526};
 527
 528/* message container */
 529struct lnet_msg_container {
 530        int                       msc_init;     /* initialized or not */
 531        /* max # threads finalizing */
 532        int                       msc_nfinalizers;
 533        /* msgs waiting to complete finalizing */
 534        struct list_head          msc_finalizing;
 535        struct list_head          msc_active;   /* active message list */
 536        /* threads doing finalization */
 537        void                    **msc_finalizers;
 538};
 539
 540/* Router Checker states */
 541#define LNET_RC_STATE_SHUTDOWN          0       /* not started */
 542#define LNET_RC_STATE_RUNNING           1       /* started up OK */
 543#define LNET_RC_STATE_STOPPING          2       /* telling thread to stop */
 544
 545typedef struct {
 546        /* CPU partition table of LNet */
 547        struct cfs_cpt_table             *ln_cpt_table;
 548        /* number of CPTs in ln_cpt_table */
 549        unsigned int                      ln_cpt_number;
 550        unsigned int                      ln_cpt_bits;
 551
 552        /* protect LNet resources (ME/MD/EQ) */
 553        struct cfs_percpt_lock           *ln_res_lock;
 554        /* # portals */
 555        int                               ln_nportals;
 556        /* the vector of portals */
 557        lnet_portal_t                   **ln_portals;
 558        /* percpt ME containers */
 559        struct lnet_res_container       **ln_me_containers;
 560        /* percpt MD container */
 561        struct lnet_res_container       **ln_md_containers;
 562
 563        /* Event Queue container */
 564        struct lnet_res_container         ln_eq_container;
 565        wait_queue_head_t                 ln_eq_waitq;
 566        spinlock_t                        ln_eq_wait_lock;
 567        unsigned int                      ln_remote_nets_hbits;
 568
 569        /* protect NI, peer table, credits, routers, rtrbuf... */
 570        struct cfs_percpt_lock           *ln_net_lock;
 571        /* percpt message containers for active/finalizing/freed message */
 572        struct lnet_msg_container       **ln_msg_containers;
 573        lnet_counters_t                 **ln_counters;
 574        struct lnet_peer_table          **ln_peer_tables;
 575        /* failure simulation */
 576        struct list_head                  ln_test_peers;
 577        struct list_head                  ln_drop_rules;
 578        struct list_head                  ln_delay_rules;
 579
 580        struct list_head                  ln_nis;       /* LND instances */
 581        /* NIs bond on specific CPT(s) */
 582        struct list_head                  ln_nis_cpt;
 583        /* dying LND instances */
 584        struct list_head                  ln_nis_zombie;
 585        lnet_ni_t                        *ln_loni;      /* the loopback NI */
 586
 587        /* remote networks with routes to them */
 588        struct list_head                 *ln_remote_nets_hash;
 589        /* validity stamp */
 590        __u64                             ln_remote_nets_version;
 591        /* list of all known routers */
 592        struct list_head                  ln_routers;
 593        /* validity stamp */
 594        __u64                             ln_routers_version;
 595        /* percpt router buffer pools */
 596        lnet_rtrbufpool_t               **ln_rtrpools;
 597
 598        lnet_handle_md_t                  ln_ping_target_md;
 599        lnet_handle_eq_t                  ln_ping_target_eq;
 600        lnet_ping_info_t                 *ln_ping_info;
 601
 602        /* router checker startup/shutdown state */
 603        int                               ln_rc_state;
 604        /* router checker's event queue */
 605        lnet_handle_eq_t                  ln_rc_eqh;
 606        /* rcd still pending on net */
 607        struct list_head                  ln_rcd_deathrow;
 608        /* rcd ready for free */
 609        struct list_head                  ln_rcd_zombie;
 610        /* serialise startup/shutdown */
 611        struct semaphore                  ln_rc_signal;
 612
 613        struct mutex                      ln_api_mutex;
 614        struct mutex                      ln_lnd_mutex;
 615        struct mutex                      ln_delay_mutex;
 616        /* Have I called LNetNIInit myself? */
 617        int                               ln_niinit_self;
 618        /* LNetNIInit/LNetNIFini counter */
 619        int                               ln_refcount;
 620        /* shutdown in progress */
 621        int                               ln_shutdown;
 622
 623        int                               ln_routing;   /* am I a router? */
 624        lnet_pid_t                        ln_pid;       /* requested pid */
 625        /* uniquely identifies this ni in this epoch */
 626        __u64                             ln_interface_cookie;
 627        /* registered LNDs */
 628        struct list_head                  ln_lnds;
 629
 630        /* test protocol compatibility flags */
 631        int                               ln_testprotocompat;
 632
 633        /*
 634         * 0 - load the NIs from the mod params
 635         * 1 - do not load the NIs from the mod params
 636         * Reverse logic to ensure that other calls to LNetNIInit
 637         * need no change
 638         */
 639        bool                              ln_nis_from_mod_params;
 640
 641        /*
 642         * waitq for router checker.  As long as there are no routes in
 643         * the list, the router checker will sleep on this queue.  when
 644         * routes are added the thread will wake up
 645         */
 646        wait_queue_head_t                 ln_rc_waitq;
 647
 648} lnet_t;
 649
 650#endif
 651