linux/drivers/staging/lustre/include/linux/lnet/types.h
<<
>>
Prefs
   1/*
   2 * GPL HEADER START
   3 *
   4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 only,
   8 * as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  13 * General Public License version 2 for more details (a copy is included
  14 * in the LICENSE file that accompanied this code).
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * version 2 along with this program; If not, see
  18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
  19 *
  20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  21 * CA 95054 USA or visit www.sun.com if you need additional information or
  22 * have any questions.
  23 *
  24 * GPL HEADER END
  25 */
  26/*
  27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
  28 * Use is subject to license terms.
  29 *
  30 * Copyright (c) 2012, Intel Corporation.
  31 */
  32/*
  33 * This file is part of Lustre, http://www.lustre.org/
  34 * Lustre is a trademark of Sun Microsystems, Inc.
  35 */
  36
  37#ifndef __LNET_TYPES_H__
  38#define __LNET_TYPES_H__
  39
  40/** \addtogroup lnet
  41 * @{ */
  42
  43#include <linux/libcfs/libcfs.h>
  44
  45/** \addtogroup lnet_addr
  46 * @{ */
  47
  48/** Portal reserved for LNet's own use.
  49 * \see lustre/include/lustre/lustre_idl.h for Lustre portal assignments.
  50 */
  51#define LNET_RESERVED_PORTAL      0
  52
  53/**
  54 * Address of an end-point in an LNet network.
  55 *
  56 * A node can have multiple end-points and hence multiple addresses.
  57 * An LNet network can be a simple network (e.g. tcp0) or a network of
  58 * LNet networks connected by LNet routers. Therefore an end-point address
  59 * has two parts: network ID, and address within a network.
  60 *
  61 * \see LNET_NIDNET, LNET_NIDADDR, and LNET_MKNID.
  62 */
  63typedef __u64 lnet_nid_t;
  64/**
  65 * ID of a process in a node. Shortened as PID to distinguish from
  66 * lnet_process_id_t, the global process ID.
  67 */
  68typedef __u32 lnet_pid_t;
  69
  70/** wildcard NID that matches any end-point address */
  71#define LNET_NID_ANY      ((lnet_nid_t) -1)
  72/** wildcard PID that matches any lnet_pid_t */
  73#define LNET_PID_ANY      ((lnet_pid_t) -1)
  74
  75#define LNET_PID_RESERVED 0xf0000000 /* reserved bits in PID */
  76#define LNET_PID_USERFLAG 0x80000000 /* set in userspace peers */
  77
  78#define LNET_TIME_FOREVER    (-1)
  79
  80/**
  81 * Objects maintained by the LNet are accessed through handles. Handle types
  82 * have names of the form lnet_handle_xx_t, where xx is one of the two letter
  83 * object type codes ('eq' for event queue, 'md' for memory descriptor, and
  84 * 'me' for match entry).
  85 * Each type of object is given a unique handle type to enhance type checking.
  86 * The type lnet_handle_any_t can be used when a generic handle is needed.
  87 * Every handle value can be converted into a value of type lnet_handle_any_t
  88 * without loss of information.
  89 */
  90typedef struct {
  91        __u64    cookie;
  92} lnet_handle_any_t;
  93
  94typedef lnet_handle_any_t lnet_handle_eq_t;
  95typedef lnet_handle_any_t lnet_handle_md_t;
  96typedef lnet_handle_any_t lnet_handle_me_t;
  97
  98#define LNET_WIRE_HANDLE_COOKIE_NONE   (-1)
  99
 100/**
 101 * Invalidate handle \a h.
 102 */
 103static inline void LNetInvalidateHandle(lnet_handle_any_t *h)
 104{
 105        h->cookie = LNET_WIRE_HANDLE_COOKIE_NONE;
 106}
 107
 108/**
 109 * Compare handles \a h1 and \a h2.
 110 *
 111 * \return 1 if handles are equal, 0 if otherwise.
 112 */
 113static inline int LNetHandleIsEqual (lnet_handle_any_t h1, lnet_handle_any_t h2)
 114{
 115        return (h1.cookie == h2.cookie);
 116}
 117
 118/**
 119 * Check whether handle \a h is invalid.
 120 *
 121 * \return 1 if handle is invalid, 0 if valid.
 122 */
 123static inline int LNetHandleIsInvalid(lnet_handle_any_t h)
 124{
 125        return (LNET_WIRE_HANDLE_COOKIE_NONE == h.cookie);
 126}
 127
 128/**
 129 * Global process ID.
 130 */
 131typedef struct {
 132        /** node id */
 133        lnet_nid_t nid;
 134        /** process id */
 135        lnet_pid_t pid;
 136} lnet_process_id_t;
 137/** @} lnet_addr */
 138
 139/** \addtogroup lnet_me
 140 * @{ */
 141
 142/**
 143 * Specifies whether the match entry or memory descriptor should be unlinked
 144 * automatically (LNET_UNLINK) or not (LNET_RETAIN).
 145 */
 146typedef enum {
 147        LNET_RETAIN = 0,
 148        LNET_UNLINK
 149} lnet_unlink_t;
 150
 151/**
 152 * Values of the type lnet_ins_pos_t are used to control where a new match
 153 * entry is inserted. The value LNET_INS_BEFORE is used to insert the new
 154 * entry before the current entry or before the head of the list. The value
 155 * LNET_INS_AFTER is used to insert the new entry after the current entry
 156 * or after the last item in the list.
 157 */
 158typedef enum {
 159        /** insert ME before current position or head of the list */
 160        LNET_INS_BEFORE,
 161        /** insert ME after current position or tail of the list */
 162        LNET_INS_AFTER,
 163        /** attach ME at tail of local CPU partition ME list */
 164        LNET_INS_LOCAL
 165} lnet_ins_pos_t;
 166
 167/** @} lnet_me */
 168
 169/** \addtogroup lnet_md
 170 * @{ */
 171
 172/**
 173 * Defines the visible parts of a memory descriptor. Values of this type
 174 * are used to initialize memory descriptors.
 175 */
 176typedef struct {
 177        /**
 178         * Specify the memory region associated with the memory descriptor.
 179         * If the options field has:
 180         * - LNET_MD_KIOV bit set: The start field points to the starting
 181         * address of an array of lnet_kiov_t and the length field specifies
 182         * the number of entries in the array. The length can't be bigger
 183         * than LNET_MAX_IOV. The lnet_kiov_t is used to describe page-based
 184         * fragments that are not necessarily mapped in virtal memory.
 185         * - LNET_MD_IOVEC bit set: The start field points to the starting
 186         * address of an array of struct iovec and the length field specifies
 187         * the number of entries in the array. The length can't be bigger
 188         * than LNET_MAX_IOV. The struct iovec is used to describe fragments
 189         * that have virtual addresses.
 190         * - Otherwise: The memory region is contiguous. The start field
 191         * specifies the starting address for the memory region and the
 192         * length field specifies its length.
 193         *
 194         * When the memory region is fragmented, all fragments but the first
 195         * one must start on page boundary, and all but the last must end on
 196         * page boundary.
 197         */
 198        void        *start;
 199        unsigned int     length;
 200        /**
 201         * Specifies the maximum number of operations that can be performed
 202         * on the memory descriptor. An operation is any action that could
 203         * possibly generate an event. In the usual case, the threshold value
 204         * is decremented for each operation on the MD. When the threshold
 205         * drops to zero, the MD becomes inactive and does not respond to
 206         * operations. A threshold value of LNET_MD_THRESH_INF indicates that
 207         * there is no bound on the number of operations that may be applied
 208         * to a MD.
 209         */
 210        int           threshold;
 211        /**
 212         * Specifies the largest incoming request that the memory descriptor
 213         * should respond to. When the unused portion of a MD (length -
 214         * local offset) falls below this value, the MD becomes inactive and
 215         * does not respond to further operations. This value is only used
 216         * if the LNET_MD_MAX_SIZE option is set.
 217         */
 218        int           max_size;
 219        /**
 220         * Specifies the behavior of the memory descriptor. A bitwise OR
 221         * of the following values can be used:
 222         * - LNET_MD_OP_PUT: The LNet PUT operation is allowed on this MD.
 223         * - LNET_MD_OP_GET: The LNet GET operation is allowed on this MD.
 224         * - LNET_MD_MANAGE_REMOTE: The offset used in accessing the memory
 225         *   region is provided by the incoming request. By default, the
 226         *   offset is maintained locally. When maintained locally, the
 227         *   offset is incremented by the length of the request so that
 228         *   the next operation (PUT or GET) will access the next part of
 229         *   the memory region. Note that only one offset variable exists
 230         *   per memory descriptor. If both PUT and GET operations are
 231         *   performed on a memory descriptor, the offset is updated each time.
 232         * - LNET_MD_TRUNCATE: The length provided in the incoming request can
 233         *   be reduced to match the memory available in the region (determined
 234         *   by subtracting the offset from the length of the memory region).
 235         *   By default, if the length in the incoming operation is greater
 236         *   than the amount of memory available, the operation is rejected.
 237         * - LNET_MD_ACK_DISABLE: An acknowledgment should not be sent for
 238         *   incoming PUT operations, even if requested. By default,
 239         *   acknowledgments are sent for PUT operations that request an
 240         *   acknowledgment. Acknowledgments are never sent for GET operations.
 241         *   The data sent in the REPLY serves as an implicit acknowledgment.
 242         * - LNET_MD_KIOV: The start and length fields specify an array of
 243         *   lnet_kiov_t.
 244         * - LNET_MD_IOVEC: The start and length fields specify an array of
 245         *   struct iovec.
 246         * - LNET_MD_MAX_SIZE: The max_size field is valid.
 247         *
 248         * Note:
 249         * - LNET_MD_KIOV or LNET_MD_IOVEC allows for a scatter/gather
 250         *   capability for memory descriptors. They can't be both set.
 251         * - When LNET_MD_MAX_SIZE is set, the total length of the memory
 252         *   region (i.e. sum of all fragment lengths) must not be less than
 253         *   \a max_size.
 254         */
 255        unsigned int     options;
 256        /**
 257         * A user-specified value that is associated with the memory
 258         * descriptor. The value does not need to be a pointer, but must fit
 259         * in the space used by a pointer. This value is recorded in events
 260         * associated with operations on this MD.
 261         */
 262        void        *user_ptr;
 263        /**
 264         * A handle for the event queue used to log the operations performed on
 265         * the memory region. If this argument is a NULL handle (i.e. nullified
 266         * by LNetInvalidateHandle()), operations performed on this memory
 267         * descriptor are not logged.
 268         */
 269        lnet_handle_eq_t eq_handle;
 270} lnet_md_t;
 271
 272/* Max Transfer Unit (minimum supported everywhere).
 273 * CAVEAT EMPTOR, with multinet (i.e. routers forwarding between networks)
 274 * these limits are system wide and not interface-local. */
 275#define LNET_MTU_BITS   20
 276#define LNET_MTU        (1 << LNET_MTU_BITS)
 277
 278/** limit on the number of fragments in discontiguous MDs */
 279#define LNET_MAX_IOV    256
 280
 281/* Max payload size */
 282# define LNET_MAX_PAYLOAD       CONFIG_LNET_MAX_PAYLOAD
 283# if (LNET_MAX_PAYLOAD < LNET_MTU)
 284#  error "LNET_MAX_PAYLOAD too small - error in configure --with-max-payload-mb"
 285# else
 286#  if (LNET_MAX_PAYLOAD > (PAGE_SIZE * LNET_MAX_IOV))
 287/*  PAGE_SIZE is a constant: check with cpp! */
 288#   error "LNET_MAX_PAYLOAD too large - error in configure --with-max-payload-mb"
 289#  endif
 290# endif
 291
 292/**
 293 * Options for the MD structure. See lnet_md_t::options.
 294 */
 295#define LNET_MD_OP_PUT         (1 << 0)
 296/** See lnet_md_t::options. */
 297#define LNET_MD_OP_GET         (1 << 1)
 298/** See lnet_md_t::options. */
 299#define LNET_MD_MANAGE_REMOTE   (1 << 2)
 300/* unused                           (1 << 3) */
 301/** See lnet_md_t::options. */
 302#define LNET_MD_TRUNCATE             (1 << 4)
 303/** See lnet_md_t::options. */
 304#define LNET_MD_ACK_DISABLE       (1 << 5)
 305/** See lnet_md_t::options. */
 306#define LNET_MD_IOVEC           (1 << 6)
 307/** See lnet_md_t::options. */
 308#define LNET_MD_MAX_SIZE             (1 << 7)
 309/** See lnet_md_t::options. */
 310#define LNET_MD_KIOV             (1 << 8)
 311
 312/* For compatibility with Cray Portals */
 313#define LNET_MD_PHYS                     0
 314
 315/** Infinite threshold on MD operations. See lnet_md_t::threshold */
 316#define LNET_MD_THRESH_INF       (-1)
 317
 318/* NB lustre portals uses struct iovec internally! */
 319typedef struct iovec lnet_md_iovec_t;
 320
 321/**
 322 * A page-based fragment of a MD.
 323 */
 324typedef struct {
 325        /** Pointer to the page where the fragment resides */
 326        struct page      *kiov_page;
 327        /** Length in bytes of the fragment */
 328        unsigned int     kiov_len;
 329        /**
 330         * Starting offset of the fragment within the page. Note that the
 331         * end of the fragment must not pass the end of the page; i.e.,
 332         * kiov_len + kiov_offset <= PAGE_CACHE_SIZE.
 333         */
 334        unsigned int     kiov_offset;
 335} lnet_kiov_t;
 336/** @} lnet_md */
 337
 338/** \addtogroup lnet_eq
 339 * @{ */
 340
 341/**
 342 * Six types of events can be logged in an event queue.
 343 */
 344typedef enum {
 345        /** An incoming GET operation has completed on the MD. */
 346        LNET_EVENT_GET          = 1,
 347        /**
 348         * An incoming PUT operation has completed on the MD. The
 349         * underlying layers will not alter the memory (on behalf of this
 350         * operation) once this event has been logged.
 351         */
 352        LNET_EVENT_PUT,
 353        /**
 354         * A REPLY operation has completed. This event is logged after the
 355         * data (if any) from the REPLY has been written into the MD.
 356         */
 357        LNET_EVENT_REPLY,
 358        /** An acknowledgment has been received. */
 359        LNET_EVENT_ACK,
 360        /**
 361         * An outgoing send (PUT or GET) operation has completed. This event
 362         * is logged after the entire buffer has been sent and it is safe for
 363         * the caller to reuse the buffer.
 364         *
 365         * Note:
 366         * - The LNET_EVENT_SEND doesn't guarantee message delivery. It can
 367         *   happen even when the message has not yet been put out on wire.
 368         * - It's unsafe to assume that in an outgoing GET operation
 369         *   the LNET_EVENT_SEND event would happen before the
 370         *   LNET_EVENT_REPLY event. The same holds for LNET_EVENT_SEND and
 371         *   LNET_EVENT_ACK events in an outgoing PUT operation.
 372         */
 373        LNET_EVENT_SEND,
 374        /**
 375         * A MD has been unlinked. Note that LNetMDUnlink() does not
 376         * necessarily trigger an LNET_EVENT_UNLINK event.
 377         * \see LNetMDUnlink
 378         */
 379        LNET_EVENT_UNLINK,
 380} lnet_event_kind_t;
 381
 382#define LNET_SEQ_BASETYPE       long
 383typedef unsigned LNET_SEQ_BASETYPE lnet_seq_t;
 384#define LNET_SEQ_GT(a,b)        (((signed LNET_SEQ_BASETYPE)((a) - (b))) > 0)
 385
 386/* XXX
 387 * cygwin need the pragma line, not clear if it's needed in other places.
 388 * checking!!!
 389 */
 390#ifdef __CYGWIN__
 391#pragma pack(push, 4)
 392#endif
 393
 394/**
 395 * Information about an event on a MD.
 396 */
 397typedef struct {
 398        /** The identifier (nid, pid) of the target. */
 399        lnet_process_id_t   target;
 400        /** The identifier (nid, pid) of the initiator. */
 401        lnet_process_id_t   initiator;
 402        /**
 403         * The NID of the immediate sender. If the request has been forwarded
 404         * by routers, this is the NID of the last hop; otherwise it's the
 405         * same as the initiator.
 406         */
 407        lnet_nid_t        sender;
 408        /** Indicates the type of the event. */
 409        lnet_event_kind_t   type;
 410        /** The portal table index specified in the request */
 411        unsigned int    pt_index;
 412        /** A copy of the match bits specified in the request. */
 413        __u64          match_bits;
 414        /** The length (in bytes) specified in the request. */
 415        unsigned int    rlength;
 416        /**
 417         * The length (in bytes) of the data that was manipulated by the
 418         * operation. For truncated operations, the manipulated length will be
 419         * the number of bytes specified by the MD (possibly with an offset,
 420         * see lnet_md_t). For all other operations, the manipulated length
 421         * will be the length of the requested operation, i.e. rlength.
 422         */
 423        unsigned int    mlength;
 424        /**
 425         * The handle to the MD associated with the event. The handle may be
 426         * invalid if the MD has been unlinked.
 427         */
 428        lnet_handle_md_t    md_handle;
 429        /**
 430         * A snapshot of the state of the MD immediately after the event has
 431         * been processed. In particular, the threshold field in md will
 432         * reflect the value of the threshold after the operation occurred.
 433         */
 434        lnet_md_t          md;
 435        /**
 436         * 64 bits of out-of-band user data. Only valid for LNET_EVENT_PUT.
 437         * \see LNetPut
 438         */
 439        __u64          hdr_data;
 440        /**
 441         * Indicates the completion status of the operation. It's 0 for
 442         * successful operations, otherwise it's an error code.
 443         */
 444        int              status;
 445        /**
 446         * Indicates whether the MD has been unlinked. Note that:
 447         * - An event with unlinked set is the last event on the MD.
 448         * - This field is also set for an explicit LNET_EVENT_UNLINK event.
 449         * \see LNetMDUnlink
 450         */
 451        int              unlinked;
 452        /**
 453         * The displacement (in bytes) into the memory region that the
 454         * operation used. The offset can be determined by the operation for
 455         * a remote managed MD or by the local MD.
 456         * \see lnet_md_t::options
 457         */
 458        unsigned int    offset;
 459        /**
 460         * The sequence number for this event. Sequence numbers are unique
 461         * to each event.
 462         */
 463        volatile lnet_seq_t sequence;
 464} lnet_event_t;
 465#ifdef __CYGWIN__
 466#pragma pop
 467#endif
 468
 469/**
 470 * Event queue handler function type.
 471 *
 472 * The EQ handler runs for each event that is deposited into the EQ. The
 473 * handler is supplied with a pointer to the event that triggered the
 474 * handler invocation.
 475 *
 476 * The handler must not block, must be reentrant, and must not call any LNet
 477 * API functions. It should return as quickly as possible.
 478 */
 479typedef void (*lnet_eq_handler_t)(lnet_event_t *event);
 480#define LNET_EQ_HANDLER_NONE NULL
 481/** @} lnet_eq */
 482
 483/** \addtogroup lnet_data
 484 * @{ */
 485
 486/**
 487 * Specify whether an acknowledgment should be sent by target when the PUT
 488 * operation completes (i.e., when the data has been written to a MD of the
 489 * target process).
 490 *
 491 * \see lnet_md_t::options for the discussion on LNET_MD_ACK_DISABLE by which
 492 * acknowledgments can be disabled for a MD.
 493 */
 494typedef enum {
 495        /** Request an acknowledgment */
 496        LNET_ACK_REQ,
 497        /** Request that no acknowledgment should be generated. */
 498        LNET_NOACK_REQ
 499} lnet_ack_req_t;
 500/** @} lnet_data */
 501
 502/** @} lnet */
 503#endif
 504