linux/drivers/net/ethernet/google/gve/gve.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
   2 * Google virtual Ethernet (gve) driver
   3 *
   4 * Copyright (C) 2015-2019 Google, Inc.
   5 */
   6
   7#ifndef _GVE_H_
   8#define _GVE_H_
   9
  10#include <linux/dma-mapping.h>
  11#include <linux/netdevice.h>
  12#include <linux/pci.h>
  13#include <linux/u64_stats_sync.h>
  14#include "gve_desc.h"
  15
  16#ifndef PCI_VENDOR_ID_GOOGLE
  17#define PCI_VENDOR_ID_GOOGLE    0x1ae0
  18#endif
  19
  20#define PCI_DEV_ID_GVNIC        0x0042
  21
  22#define GVE_REGISTER_BAR        0
  23#define GVE_DOORBELL_BAR        2
  24
  25/* Driver can alloc up to 2 segments for the header and 2 for the payload. */
  26#define GVE_TX_MAX_IOVEC        4
  27/* 1 for management, 1 for rx, 1 for tx */
  28#define GVE_MIN_MSIX 3
  29
  30/* Numbers of gve tx/rx stats in stats report. */
  31#define GVE_TX_STATS_REPORT_NUM 5
  32#define GVE_RX_STATS_REPORT_NUM 2
  33
  34/* Interval to schedule a stats report update, 20000ms. */
  35#define GVE_STATS_REPORT_TIMER_PERIOD   20000
  36
  37/* Numbers of NIC tx/rx stats in stats report. */
  38#define NIC_TX_STATS_REPORT_NUM 0
  39#define NIC_RX_STATS_REPORT_NUM 4
  40
  41#define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
  42
  43/* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
  44struct gve_rx_desc_queue {
  45        struct gve_rx_desc *desc_ring; /* the descriptor ring */
  46        dma_addr_t bus; /* the bus for the desc_ring */
  47        u8 seqno; /* the next expected seqno for this desc*/
  48};
  49
  50/* The page info for a single slot in the RX data queue */
  51struct gve_rx_slot_page_info {
  52        struct page *page;
  53        void *page_address;
  54        u8 page_offset; /* flipped to second half? */
  55        u8 can_flip;
  56};
  57
  58/* A list of pages registered with the device during setup and used by a queue
  59 * as buffers
  60 */
  61struct gve_queue_page_list {
  62        u32 id; /* unique id */
  63        u32 num_entries;
  64        struct page **pages; /* list of num_entries pages */
  65        dma_addr_t *page_buses; /* the dma addrs of the pages */
  66};
  67
  68/* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */
  69struct gve_rx_data_queue {
  70        union gve_rx_data_slot *data_ring; /* read by NIC */
  71        dma_addr_t data_bus; /* dma mapping of the slots */
  72        struct gve_rx_slot_page_info *page_info; /* page info of the buffers */
  73        struct gve_queue_page_list *qpl; /* qpl assigned to this queue */
  74        u8 raw_addressing; /* use raw_addressing? */
  75};
  76
  77struct gve_priv;
  78
  79/* An RX ring that contains a power-of-two sized desc and data ring. */
  80struct gve_rx_ring {
  81        struct gve_priv *gve;
  82        struct gve_rx_desc_queue desc;
  83        struct gve_rx_data_queue data;
  84        u64 rbytes; /* free-running bytes received */
  85        u64 rpackets; /* free-running packets received */
  86        u32 cnt; /* free-running total number of completed packets */
  87        u32 fill_cnt; /* free-running total number of descs and buffs posted */
  88        u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
  89        u32 db_threshold; /* threshold for posting new buffs and descs */
  90        u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
  91        u64 rx_copied_pkt; /* free-running total number of copied packets */
  92        u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
  93        u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
  94        u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
  95        u32 q_num; /* queue index */
  96        u32 ntfy_id; /* notification block index */
  97        struct gve_queue_resources *q_resources; /* head and tail pointer idx */
  98        dma_addr_t q_resources_bus; /* dma address for the queue resources */
  99        struct u64_stats_sync statss; /* sync stats for 32bit archs */
 100};
 101
 102/* A TX desc ring entry */
 103union gve_tx_desc {
 104        struct gve_tx_pkt_desc pkt; /* first desc for a packet */
 105        struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
 106};
 107
 108/* Tracks the memory in the fifo occupied by a segment of a packet */
 109struct gve_tx_iovec {
 110        u32 iov_offset; /* offset into this segment */
 111        u32 iov_len; /* length */
 112        u32 iov_padding; /* padding associated with this segment */
 113};
 114
 115struct gve_tx_dma_buf {
 116        DEFINE_DMA_UNMAP_ADDR(dma);
 117        DEFINE_DMA_UNMAP_LEN(len);
 118};
 119
 120/* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc
 121 * ring entry but only used for a pkt_desc not a seg_desc
 122 */
 123struct gve_tx_buffer_state {
 124        struct sk_buff *skb; /* skb for this pkt */
 125        union {
 126                struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
 127                struct gve_tx_dma_buf buf;
 128        };
 129};
 130
 131/* A TX buffer - each queue has one */
 132struct gve_tx_fifo {
 133        void *base; /* address of base of FIFO */
 134        u32 size; /* total size */
 135        atomic_t available; /* how much space is still available */
 136        u32 head; /* offset to write at */
 137        struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */
 138};
 139
 140/* A TX ring that contains a power-of-two sized desc ring and a FIFO buffer */
 141struct gve_tx_ring {
 142        /* Cacheline 0 -- Accessed & dirtied during transmit */
 143        struct gve_tx_fifo tx_fifo;
 144        u32 req; /* driver tracked head pointer */
 145        u32 done; /* driver tracked tail pointer */
 146
 147        /* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */
 148        __be32 last_nic_done ____cacheline_aligned; /* NIC tail pointer */
 149        u64 pkt_done; /* free-running - total packets completed */
 150        u64 bytes_done; /* free-running - total bytes completed */
 151        u64 dropped_pkt; /* free-running - total packets dropped */
 152        u64 dma_mapping_error; /* count of dma mapping errors */
 153
 154        /* Cacheline 2 -- Read-mostly fields */
 155        union gve_tx_desc *desc ____cacheline_aligned;
 156        struct gve_tx_buffer_state *info; /* Maps 1:1 to a desc */
 157        struct netdev_queue *netdev_txq;
 158        struct gve_queue_resources *q_resources; /* head and tail pointer idx */
 159        struct device *dev;
 160        u32 mask; /* masks req and done down to queue size */
 161        u8 raw_addressing; /* use raw_addressing? */
 162
 163        /* Slow-path fields */
 164        u32 q_num ____cacheline_aligned; /* queue idx */
 165        u32 stop_queue; /* count of queue stops */
 166        u32 wake_queue; /* count of queue wakes */
 167        u32 ntfy_id; /* notification block index */
 168        dma_addr_t bus; /* dma address of the descr ring */
 169        dma_addr_t q_resources_bus; /* dma address of the queue resources */
 170        struct u64_stats_sync statss; /* sync stats for 32bit archs */
 171} ____cacheline_aligned;
 172
 173/* Wraps the info for one irq including the napi struct and the queues
 174 * associated with that irq.
 175 */
 176struct gve_notify_block {
 177        __be32 irq_db_index; /* idx into Bar2 - set by device, must be 1st */
 178        char name[IFNAMSIZ + 16]; /* name registered with the kernel */
 179        struct napi_struct napi; /* kernel napi struct for this block */
 180        struct gve_priv *priv;
 181        struct gve_tx_ring *tx; /* tx rings on this block */
 182        struct gve_rx_ring *rx; /* rx rings on this block */
 183} ____cacheline_aligned;
 184
 185/* Tracks allowed and current queue settings */
 186struct gve_queue_config {
 187        u16 max_queues;
 188        u16 num_queues; /* current */
 189};
 190
 191/* Tracks the available and used qpl IDs */
 192struct gve_qpl_config {
 193        u32 qpl_map_size; /* map memory size */
 194        unsigned long *qpl_id_map; /* bitmap of used qpl ids */
 195};
 196
 197struct gve_priv {
 198        struct net_device *dev;
 199        struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
 200        struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
 201        struct gve_queue_page_list *qpls; /* array of num qpls */
 202        struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
 203        dma_addr_t ntfy_block_bus;
 204        struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */
 205        char mgmt_msix_name[IFNAMSIZ + 16];
 206        u32 mgmt_msix_idx;
 207        __be32 *counter_array; /* array of num_event_counters */
 208        dma_addr_t counter_array_bus;
 209
 210        u16 num_event_counters;
 211        u16 tx_desc_cnt; /* num desc per ring */
 212        u16 rx_desc_cnt; /* num desc per ring */
 213        u16 tx_pages_per_qpl; /* tx buffer length */
 214        u16 rx_data_slot_cnt; /* rx buffer length */
 215        u64 max_registered_pages;
 216        u64 num_registered_pages; /* num pages registered with NIC */
 217        u32 rx_copybreak; /* copy packets smaller than this */
 218        u16 default_num_queues; /* default num queues to set up */
 219        u8 raw_addressing; /* 1 if this dev supports raw addressing, 0 otherwise */
 220
 221        struct gve_queue_config tx_cfg;
 222        struct gve_queue_config rx_cfg;
 223        struct gve_qpl_config qpl_cfg; /* map used QPL ids */
 224        u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
 225
 226        struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
 227        __be32 __iomem *db_bar2; /* "array" of doorbells */
 228        u32 msg_enable; /* level for netif* netdev print macros */
 229        struct pci_dev *pdev;
 230
 231        /* metrics */
 232        u32 tx_timeo_cnt;
 233
 234        /* Admin queue - see gve_adminq.h*/
 235        union gve_adminq_command *adminq;
 236        dma_addr_t adminq_bus_addr;
 237        u32 adminq_mask; /* masks prod_cnt to adminq size */
 238        u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
 239        u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
 240        u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */
 241        /* free-running count of per AQ cmd executed */
 242        u32 adminq_describe_device_cnt;
 243        u32 adminq_cfg_device_resources_cnt;
 244        u32 adminq_register_page_list_cnt;
 245        u32 adminq_unregister_page_list_cnt;
 246        u32 adminq_create_tx_queue_cnt;
 247        u32 adminq_create_rx_queue_cnt;
 248        u32 adminq_destroy_tx_queue_cnt;
 249        u32 adminq_destroy_rx_queue_cnt;
 250        u32 adminq_dcfg_device_resources_cnt;
 251        u32 adminq_set_driver_parameter_cnt;
 252        u32 adminq_report_stats_cnt;
 253        u32 adminq_report_link_speed_cnt;
 254
 255        /* Global stats */
 256        u32 interface_up_cnt; /* count of times interface turned up since last reset */
 257        u32 interface_down_cnt; /* count of times interface turned down since last reset */
 258        u32 reset_cnt; /* count of reset */
 259        u32 page_alloc_fail; /* count of page alloc fails */
 260        u32 dma_mapping_error; /* count of dma mapping errors */
 261        u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */
 262        struct workqueue_struct *gve_wq;
 263        struct work_struct service_task;
 264        struct work_struct stats_report_task;
 265        unsigned long service_task_flags;
 266        unsigned long state_flags;
 267
 268        struct gve_stats_report *stats_report;
 269        u64 stats_report_len;
 270        dma_addr_t stats_report_bus; /* dma address for the stats report */
 271        unsigned long ethtool_flags;
 272
 273        unsigned long stats_report_timer_period;
 274        struct timer_list stats_report_timer;
 275
 276        /* Gvnic device link speed from hypervisor. */
 277        u64 link_speed;
 278};
 279
 280enum gve_service_task_flags_bit {
 281        GVE_PRIV_FLAGS_DO_RESET                 = 1,
 282        GVE_PRIV_FLAGS_RESET_IN_PROGRESS        = 2,
 283        GVE_PRIV_FLAGS_PROBE_IN_PROGRESS        = 3,
 284        GVE_PRIV_FLAGS_DO_REPORT_STATS = 4,
 285};
 286
 287enum gve_state_flags_bit {
 288        GVE_PRIV_FLAGS_ADMIN_QUEUE_OK           = 1,
 289        GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK      = 2,
 290        GVE_PRIV_FLAGS_DEVICE_RINGS_OK          = 3,
 291        GVE_PRIV_FLAGS_NAPI_ENABLED             = 4,
 292};
 293
 294enum gve_ethtool_flags_bit {
 295        GVE_PRIV_FLAGS_REPORT_STATS             = 0,
 296};
 297
 298static inline bool gve_get_do_reset(struct gve_priv *priv)
 299{
 300        return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
 301}
 302
 303static inline void gve_set_do_reset(struct gve_priv *priv)
 304{
 305        set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
 306}
 307
 308static inline void gve_clear_do_reset(struct gve_priv *priv)
 309{
 310        clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
 311}
 312
 313static inline bool gve_get_reset_in_progress(struct gve_priv *priv)
 314{
 315        return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS,
 316                        &priv->service_task_flags);
 317}
 318
 319static inline void gve_set_reset_in_progress(struct gve_priv *priv)
 320{
 321        set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
 322}
 323
 324static inline void gve_clear_reset_in_progress(struct gve_priv *priv)
 325{
 326        clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
 327}
 328
 329static inline bool gve_get_probe_in_progress(struct gve_priv *priv)
 330{
 331        return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS,
 332                        &priv->service_task_flags);
 333}
 334
 335static inline void gve_set_probe_in_progress(struct gve_priv *priv)
 336{
 337        set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
 338}
 339
 340static inline void gve_clear_probe_in_progress(struct gve_priv *priv)
 341{
 342        clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
 343}
 344
 345static inline bool gve_get_do_report_stats(struct gve_priv *priv)
 346{
 347        return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS,
 348                        &priv->service_task_flags);
 349}
 350
 351static inline void gve_set_do_report_stats(struct gve_priv *priv)
 352{
 353        set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
 354}
 355
 356static inline void gve_clear_do_report_stats(struct gve_priv *priv)
 357{
 358        clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
 359}
 360
 361static inline bool gve_get_admin_queue_ok(struct gve_priv *priv)
 362{
 363        return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
 364}
 365
 366static inline void gve_set_admin_queue_ok(struct gve_priv *priv)
 367{
 368        set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
 369}
 370
 371static inline void gve_clear_admin_queue_ok(struct gve_priv *priv)
 372{
 373        clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
 374}
 375
 376static inline bool gve_get_device_resources_ok(struct gve_priv *priv)
 377{
 378        return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
 379}
 380
 381static inline void gve_set_device_resources_ok(struct gve_priv *priv)
 382{
 383        set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
 384}
 385
 386static inline void gve_clear_device_resources_ok(struct gve_priv *priv)
 387{
 388        clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
 389}
 390
 391static inline bool gve_get_device_rings_ok(struct gve_priv *priv)
 392{
 393        return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
 394}
 395
 396static inline void gve_set_device_rings_ok(struct gve_priv *priv)
 397{
 398        set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
 399}
 400
 401static inline void gve_clear_device_rings_ok(struct gve_priv *priv)
 402{
 403        clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
 404}
 405
 406static inline bool gve_get_napi_enabled(struct gve_priv *priv)
 407{
 408        return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
 409}
 410
 411static inline void gve_set_napi_enabled(struct gve_priv *priv)
 412{
 413        set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
 414}
 415
 416static inline void gve_clear_napi_enabled(struct gve_priv *priv)
 417{
 418        clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
 419}
 420
 421static inline bool gve_get_report_stats(struct gve_priv *priv)
 422{
 423        return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
 424}
 425
 426static inline void gve_clear_report_stats(struct gve_priv *priv)
 427{
 428        clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
 429}
 430
 431/* Returns the address of the ntfy_blocks irq doorbell
 432 */
 433static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
 434                                               struct gve_notify_block *block)
 435{
 436        return &priv->db_bar2[be32_to_cpu(block->irq_db_index)];
 437}
 438
 439/* Returns the index into ntfy_blocks of the given tx ring's block
 440 */
 441static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
 442{
 443        return queue_idx;
 444}
 445
 446/* Returns the index into ntfy_blocks of the given rx ring's block
 447 */
 448static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
 449{
 450        return (priv->num_ntfy_blks / 2) + queue_idx;
 451}
 452
 453/* Returns the number of tx queue page lists
 454 */
 455static inline u32 gve_num_tx_qpls(struct gve_priv *priv)
 456{
 457        return priv->raw_addressing ? 0 : priv->tx_cfg.num_queues;
 458}
 459
 460/* Returns the number of rx queue page lists
 461 */
 462static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
 463{
 464        return priv->raw_addressing ? 0 : priv->rx_cfg.num_queues;
 465}
 466
 467/* Returns a pointer to the next available tx qpl in the list of qpls
 468 */
 469static inline
 470struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv)
 471{
 472        int id = find_first_zero_bit(priv->qpl_cfg.qpl_id_map,
 473                                     priv->qpl_cfg.qpl_map_size);
 474
 475        /* we are out of tx qpls */
 476        if (id >= gve_num_tx_qpls(priv))
 477                return NULL;
 478
 479        set_bit(id, priv->qpl_cfg.qpl_id_map);
 480        return &priv->qpls[id];
 481}
 482
 483/* Returns a pointer to the next available rx qpl in the list of qpls
 484 */
 485static inline
 486struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv)
 487{
 488        int id = find_next_zero_bit(priv->qpl_cfg.qpl_id_map,
 489                                    priv->qpl_cfg.qpl_map_size,
 490                                    gve_num_tx_qpls(priv));
 491
 492        /* we are out of rx qpls */
 493        if (id == priv->qpl_cfg.qpl_map_size)
 494                return NULL;
 495
 496        set_bit(id, priv->qpl_cfg.qpl_id_map);
 497        return &priv->qpls[id];
 498}
 499
 500/* Unassigns the qpl with the given id
 501 */
 502static inline void gve_unassign_qpl(struct gve_priv *priv, int id)
 503{
 504        clear_bit(id, priv->qpl_cfg.qpl_id_map);
 505}
 506
 507/* Returns the correct dma direction for tx and rx qpls
 508 */
 509static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
 510                                                      int id)
 511{
 512        if (id < gve_num_tx_qpls(priv))
 513                return DMA_TO_DEVICE;
 514        else
 515                return DMA_FROM_DEVICE;
 516}
 517
 518/* buffers */
 519int gve_alloc_page(struct gve_priv *priv, struct device *dev,
 520                   struct page **page, dma_addr_t *dma,
 521                   enum dma_data_direction);
 522void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
 523                   enum dma_data_direction);
 524/* tx handling */
 525netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
 526bool gve_tx_poll(struct gve_notify_block *block, int budget);
 527int gve_tx_alloc_rings(struct gve_priv *priv);
 528void gve_tx_free_rings(struct gve_priv *priv);
 529__be32 gve_tx_load_event_counter(struct gve_priv *priv,
 530                                 struct gve_tx_ring *tx);
 531/* rx handling */
 532void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
 533bool gve_rx_poll(struct gve_notify_block *block, int budget);
 534int gve_rx_alloc_rings(struct gve_priv *priv);
 535void gve_rx_free_rings(struct gve_priv *priv);
 536bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
 537                       netdev_features_t feat);
 538/* Reset */
 539void gve_schedule_reset(struct gve_priv *priv);
 540int gve_reset(struct gve_priv *priv, bool attempt_teardown);
 541int gve_adjust_queues(struct gve_priv *priv,
 542                      struct gve_queue_config new_rx_config,
 543                      struct gve_queue_config new_tx_config);
 544/* report stats handling */
 545void gve_handle_report_stats(struct gve_priv *priv);
 546/* exported by ethtool.c */
 547extern const struct ethtool_ops gve_ethtool_ops;
 548/* needed by ethtool */
 549extern const char gve_version_str[];
 550#endif /* _GVE_H_ */
 551