linux/drivers/net/ethernet/google/gve/gve.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
   2 * Google virtual Ethernet (gve) driver
   3 *
   4 * Copyright (C) 2015-2021 Google, Inc.
   5 */
   6
   7#ifndef _GVE_H_
   8#define _GVE_H_
   9
  10#include <linux/dma-mapping.h>
  11#include <linux/netdevice.h>
  12#include <linux/pci.h>
  13#include <linux/u64_stats_sync.h>
  14
  15#include "gve_desc.h"
  16#include "gve_desc_dqo.h"
  17
  18#ifndef PCI_VENDOR_ID_GOOGLE
  19#define PCI_VENDOR_ID_GOOGLE    0x1ae0
  20#endif
  21
  22#define PCI_DEV_ID_GVNIC        0x0042
  23
  24#define GVE_REGISTER_BAR        0
  25#define GVE_DOORBELL_BAR        2
  26
  27/* Driver can alloc up to 2 segments for the header and 2 for the payload. */
  28#define GVE_TX_MAX_IOVEC        4
  29/* 1 for management, 1 for rx, 1 for tx */
  30#define GVE_MIN_MSIX 3
  31
  32/* Numbers of gve tx/rx stats in stats report. */
  33#define GVE_TX_STATS_REPORT_NUM 6
  34#define GVE_RX_STATS_REPORT_NUM 2
  35
  36/* Interval to schedule a stats report update, 20000ms. */
  37#define GVE_STATS_REPORT_TIMER_PERIOD   20000
  38
  39/* Numbers of NIC tx/rx stats in stats report. */
  40#define NIC_TX_STATS_REPORT_NUM 0
  41#define NIC_RX_STATS_REPORT_NUM 4
  42
  43#define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
  44
  45/* PTYPEs are always 10 bits. */
  46#define GVE_NUM_PTYPES  1024
  47
  48#define GVE_RX_BUFFER_SIZE_DQO 2048
  49
  50/* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
  51struct gve_rx_desc_queue {
  52        struct gve_rx_desc *desc_ring; /* the descriptor ring */
  53        dma_addr_t bus; /* the bus for the desc_ring */
  54        u8 seqno; /* the next expected seqno for this desc*/
  55};
  56
  57/* The page info for a single slot in the RX data queue */
  58struct gve_rx_slot_page_info {
  59        struct page *page;
  60        void *page_address;
  61        u32 page_offset; /* offset to write to in page */
  62        int pagecnt_bias; /* expected pagecnt if only the driver has a ref */
  63        u8 can_flip;
  64};
  65
  66/* A list of pages registered with the device during setup and used by a queue
  67 * as buffers
  68 */
  69struct gve_queue_page_list {
  70        u32 id; /* unique id */
  71        u32 num_entries;
  72        struct page **pages; /* list of num_entries pages */
  73        dma_addr_t *page_buses; /* the dma addrs of the pages */
  74};
  75
  76/* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */
  77struct gve_rx_data_queue {
  78        union gve_rx_data_slot *data_ring; /* read by NIC */
  79        dma_addr_t data_bus; /* dma mapping of the slots */
  80        struct gve_rx_slot_page_info *page_info; /* page info of the buffers */
  81        struct gve_queue_page_list *qpl; /* qpl assigned to this queue */
  82        u8 raw_addressing; /* use raw_addressing? */
  83};
  84
  85struct gve_priv;
  86
  87/* RX buffer queue for posting buffers to HW.
  88 * Each RX (completion) queue has a corresponding buffer queue.
  89 */
  90struct gve_rx_buf_queue_dqo {
  91        struct gve_rx_desc_dqo *desc_ring;
  92        dma_addr_t bus;
  93        u32 head; /* Pointer to start cleaning buffers at. */
  94        u32 tail; /* Last posted buffer index + 1 */
  95        u32 mask; /* Mask for indices to the size of the ring */
  96};
  97
  98/* RX completion queue to receive packets from HW. */
  99struct gve_rx_compl_queue_dqo {
 100        struct gve_rx_compl_desc_dqo *desc_ring;
 101        dma_addr_t bus;
 102
 103        /* Number of slots which did not have a buffer posted yet. We should not
 104         * post more buffers than the queue size to avoid HW overrunning the
 105         * queue.
 106         */
 107        int num_free_slots;
 108
 109        /* HW uses a "generation bit" to notify SW of new descriptors. When a
 110         * descriptor's generation bit is different from the current generation,
 111         * that descriptor is ready to be consumed by SW.
 112         */
 113        u8 cur_gen_bit;
 114
 115        /* Pointer into desc_ring where the next completion descriptor will be
 116         * received.
 117         */
 118        u32 head;
 119        u32 mask; /* Mask for indices to the size of the ring */
 120};
 121
 122/* Stores state for tracking buffers posted to HW */
 123struct gve_rx_buf_state_dqo {
 124        /* The page posted to HW. */
 125        struct gve_rx_slot_page_info page_info;
 126
 127        /* The DMA address corresponding to `page_info`. */
 128        dma_addr_t addr;
 129
 130        /* Last offset into the page when it only had a single reference, at
 131         * which point every other offset is free to be reused.
 132         */
 133        u32 last_single_ref_offset;
 134
 135        /* Linked list index to next element in the list, or -1 if none */
 136        s16 next;
 137};
 138
 139/* `head` and `tail` are indices into an array, or -1 if empty. */
 140struct gve_index_list {
 141        s16 head;
 142        s16 tail;
 143};
 144
 145/* A single received packet split across multiple buffers may be
 146 * reconstructed using the information in this structure.
 147 */
 148struct gve_rx_ctx {
 149        /* head and tail of skb chain for the current packet or NULL if none */
 150        struct sk_buff *skb_head;
 151        struct sk_buff *skb_tail;
 152        u16 total_expected_size;
 153        u8 expected_frag_cnt;
 154        u8 curr_frag_cnt;
 155        u8 reuse_frags;
 156};
 157
 158/* Contains datapath state used to represent an RX queue. */
 159struct gve_rx_ring {
 160        struct gve_priv *gve;
 161        union {
 162                /* GQI fields */
 163                struct {
 164                        struct gve_rx_desc_queue desc;
 165                        struct gve_rx_data_queue data;
 166
 167                        /* threshold for posting new buffs and descs */
 168                        u32 db_threshold;
 169                        u16 packet_buffer_size;
 170                };
 171
 172                /* DQO fields. */
 173                struct {
 174                        struct gve_rx_buf_queue_dqo bufq;
 175                        struct gve_rx_compl_queue_dqo complq;
 176
 177                        struct gve_rx_buf_state_dqo *buf_states;
 178                        u16 num_buf_states;
 179
 180                        /* Linked list of gve_rx_buf_state_dqo. Index into
 181                         * buf_states, or -1 if empty.
 182                         */
 183                        s16 free_buf_states;
 184
 185                        /* Linked list of gve_rx_buf_state_dqo. Indexes into
 186                         * buf_states, or -1 if empty.
 187                         *
 188                         * This list contains buf_states which are pointing to
 189                         * valid buffers.
 190                         *
 191                         * We use a FIFO here in order to increase the
 192                         * probability that buffers can be reused by increasing
 193                         * the time between usages.
 194                         */
 195                        struct gve_index_list recycled_buf_states;
 196
 197                        /* Linked list of gve_rx_buf_state_dqo. Indexes into
 198                         * buf_states, or -1 if empty.
 199                         *
 200                         * This list contains buf_states which have buffers
 201                         * which cannot be reused yet.
 202                         */
 203                        struct gve_index_list used_buf_states;
 204                } dqo;
 205        };
 206
 207        u64 rbytes; /* free-running bytes received */
 208        u64 rpackets; /* free-running packets received */
 209        u32 cnt; /* free-running total number of completed packets */
 210        u32 fill_cnt; /* free-running total number of descs and buffs posted */
 211        u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
 212        u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
 213        u64 rx_copied_pkt; /* free-running total number of copied packets */
 214        u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
 215        u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
 216        u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
 217        u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
 218        u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
 219        u64 rx_frag_copy_cnt; /* free-running count of rx segments copied into skb linear portion */
 220        u32 q_num; /* queue index */
 221        u32 ntfy_id; /* notification block index */
 222        struct gve_queue_resources *q_resources; /* head and tail pointer idx */
 223        dma_addr_t q_resources_bus; /* dma address for the queue resources */
 224        struct u64_stats_sync statss; /* sync stats for 32bit archs */
 225
 226        struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */
 227};
 228
 229/* A TX desc ring entry */
 230union gve_tx_desc {
 231        struct gve_tx_pkt_desc pkt; /* first desc for a packet */
 232        struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
 233};
 234
 235/* Tracks the memory in the fifo occupied by a segment of a packet */
 236struct gve_tx_iovec {
 237        u32 iov_offset; /* offset into this segment */
 238        u32 iov_len; /* length */
 239        u32 iov_padding; /* padding associated with this segment */
 240};
 241
 242/* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc
 243 * ring entry but only used for a pkt_desc not a seg_desc
 244 */
 245struct gve_tx_buffer_state {
 246        struct sk_buff *skb; /* skb for this pkt */
 247        union {
 248                struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
 249                struct {
 250                        DEFINE_DMA_UNMAP_ADDR(dma);
 251                        DEFINE_DMA_UNMAP_LEN(len);
 252                };
 253        };
 254};
 255
 256/* A TX buffer - each queue has one */
 257struct gve_tx_fifo {
 258        void *base; /* address of base of FIFO */
 259        u32 size; /* total size */
 260        atomic_t available; /* how much space is still available */
 261        u32 head; /* offset to write at */
 262        struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */
 263};
 264
 265/* TX descriptor for DQO format */
 266union gve_tx_desc_dqo {
 267        struct gve_tx_pkt_desc_dqo pkt;
 268        struct gve_tx_tso_context_desc_dqo tso_ctx;
 269        struct gve_tx_general_context_desc_dqo general_ctx;
 270};
 271
 272enum gve_packet_state {
 273        /* Packet is in free list, available to be allocated.
 274         * This should always be zero since state is not explicitly initialized.
 275         */
 276        GVE_PACKET_STATE_UNALLOCATED,
 277        /* Packet is expecting a regular data completion or miss completion */
 278        GVE_PACKET_STATE_PENDING_DATA_COMPL,
 279        /* Packet has received a miss completion and is expecting a
 280         * re-injection completion.
 281         */
 282        GVE_PACKET_STATE_PENDING_REINJECT_COMPL,
 283        /* No valid completion received within the specified timeout. */
 284        GVE_PACKET_STATE_TIMED_OUT_COMPL,
 285};
 286
 287struct gve_tx_pending_packet_dqo {
 288        struct sk_buff *skb; /* skb for this packet */
 289
 290        /* 0th element corresponds to the linear portion of `skb`, should be
 291         * unmapped with `dma_unmap_single`.
 292         *
 293         * All others correspond to `skb`'s frags and should be unmapped with
 294         * `dma_unmap_page`.
 295         */
 296        DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]);
 297        DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]);
 298        u16 num_bufs;
 299
 300        /* Linked list index to next element in the list, or -1 if none */
 301        s16 next;
 302
 303        /* Linked list index to prev element in the list, or -1 if none.
 304         * Used for tracking either outstanding miss completions or prematurely
 305         * freed packets.
 306         */
 307        s16 prev;
 308
 309        /* Identifies the current state of the packet as defined in
 310         * `enum gve_packet_state`.
 311         */
 312        u8 state;
 313
 314        /* If packet is an outstanding miss completion, then the packet is
 315         * freed if the corresponding re-injection completion is not received
 316         * before kernel jiffies exceeds timeout_jiffies.
 317         */
 318        unsigned long timeout_jiffies;
 319};
 320
 321/* Contains datapath state used to represent a TX queue. */
 322struct gve_tx_ring {
 323        /* Cacheline 0 -- Accessed & dirtied during transmit */
 324        union {
 325                /* GQI fields */
 326                struct {
 327                        struct gve_tx_fifo tx_fifo;
 328                        u32 req; /* driver tracked head pointer */
 329                        u32 done; /* driver tracked tail pointer */
 330                };
 331
 332                /* DQO fields. */
 333                struct {
 334                        /* Linked list of gve_tx_pending_packet_dqo. Index into
 335                         * pending_packets, or -1 if empty.
 336                         *
 337                         * This is a consumer list owned by the TX path. When it
 338                         * runs out, the producer list is stolen from the
 339                         * completion handling path
 340                         * (dqo_compl.free_pending_packets).
 341                         */
 342                        s16 free_pending_packets;
 343
 344                        /* Cached value of `dqo_compl.hw_tx_head` */
 345                        u32 head;
 346                        u32 tail; /* Last posted buffer index + 1 */
 347
 348                        /* Index of the last descriptor with "report event" bit
 349                         * set.
 350                         */
 351                        u32 last_re_idx;
 352                } dqo_tx;
 353        };
 354
 355        /* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */
 356        union {
 357                /* GQI fields */
 358                struct {
 359                        /* Spinlock for when cleanup in progress */
 360                        spinlock_t clean_lock;
 361                };
 362
 363                /* DQO fields. */
 364                struct {
 365                        u32 head; /* Last read on compl_desc */
 366
 367                        /* Tracks the current gen bit of compl_q */
 368                        u8 cur_gen_bit;
 369
 370                        /* Linked list of gve_tx_pending_packet_dqo. Index into
 371                         * pending_packets, or -1 if empty.
 372                         *
 373                         * This is the producer list, owned by the completion
 374                         * handling path. When the consumer list
 375                         * (dqo_tx.free_pending_packets) is runs out, this list
 376                         * will be stolen.
 377                         */
 378                        atomic_t free_pending_packets;
 379
 380                        /* Last TX ring index fetched by HW */
 381                        atomic_t hw_tx_head;
 382
 383                        /* List to track pending packets which received a miss
 384                         * completion but not a corresponding reinjection.
 385                         */
 386                        struct gve_index_list miss_completions;
 387
 388                        /* List to track pending packets that were completed
 389                         * before receiving a valid completion because they
 390                         * reached a specified timeout.
 391                         */
 392                        struct gve_index_list timed_out_completions;
 393                } dqo_compl;
 394        } ____cacheline_aligned;
 395        u64 pkt_done; /* free-running - total packets completed */
 396        u64 bytes_done; /* free-running - total bytes completed */
 397        u64 dropped_pkt; /* free-running - total packets dropped */
 398        u64 dma_mapping_error; /* count of dma mapping errors */
 399
 400        /* Cacheline 2 -- Read-mostly fields */
 401        union {
 402                /* GQI fields */
 403                struct {
 404                        union gve_tx_desc *desc;
 405
 406                        /* Maps 1:1 to a desc */
 407                        struct gve_tx_buffer_state *info;
 408                };
 409
 410                /* DQO fields. */
 411                struct {
 412                        union gve_tx_desc_dqo *tx_ring;
 413                        struct gve_tx_compl_desc *compl_ring;
 414
 415                        struct gve_tx_pending_packet_dqo *pending_packets;
 416                        s16 num_pending_packets;
 417
 418                        u32 complq_mask; /* complq size is complq_mask + 1 */
 419                } dqo;
 420        } ____cacheline_aligned;
 421        struct netdev_queue *netdev_txq;
 422        struct gve_queue_resources *q_resources; /* head and tail pointer idx */
 423        struct device *dev;
 424        u32 mask; /* masks req and done down to queue size */
 425        u8 raw_addressing; /* use raw_addressing? */
 426
 427        /* Slow-path fields */
 428        u32 q_num ____cacheline_aligned; /* queue idx */
 429        u32 stop_queue; /* count of queue stops */
 430        u32 wake_queue; /* count of queue wakes */
 431        u32 queue_timeout; /* count of queue timeouts */
 432        u32 ntfy_id; /* notification block index */
 433        u32 last_kick_msec; /* Last time the queue was kicked */
 434        dma_addr_t bus; /* dma address of the descr ring */
 435        dma_addr_t q_resources_bus; /* dma address of the queue resources */
 436        dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
 437        struct u64_stats_sync statss; /* sync stats for 32bit archs */
 438} ____cacheline_aligned;
 439
 440/* Wraps the info for one irq including the napi struct and the queues
 441 * associated with that irq.
 442 */
 443struct gve_notify_block {
 444        __be32 irq_db_index; /* idx into Bar2 - set by device, must be 1st */
 445        char name[IFNAMSIZ + 16]; /* name registered with the kernel */
 446        struct napi_struct napi; /* kernel napi struct for this block */
 447        struct gve_priv *priv;
 448        struct gve_tx_ring *tx; /* tx rings on this block */
 449        struct gve_rx_ring *rx; /* rx rings on this block */
 450} ____cacheline_aligned;
 451
 452/* Tracks allowed and current queue settings */
 453struct gve_queue_config {
 454        u16 max_queues;
 455        u16 num_queues; /* current */
 456};
 457
 458/* Tracks the available and used qpl IDs */
 459struct gve_qpl_config {
 460        u32 qpl_map_size; /* map memory size */
 461        unsigned long *qpl_id_map; /* bitmap of used qpl ids */
 462};
 463
 464struct gve_options_dqo_rda {
 465        u16 tx_comp_ring_entries; /* number of tx_comp descriptors */
 466        u16 rx_buff_ring_entries; /* number of rx_buff descriptors */
 467};
 468
 469struct gve_ptype {
 470        u8 l3_type;  /* `gve_l3_type` in gve_adminq.h */
 471        u8 l4_type;  /* `gve_l4_type` in gve_adminq.h */
 472};
 473
 474struct gve_ptype_lut {
 475        struct gve_ptype ptypes[GVE_NUM_PTYPES];
 476};
 477
 478/* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
 479 * when the entire configure_device_resources command is zeroed out and the
 480 * queue_format is not specified.
 481 */
 482enum gve_queue_format {
 483        GVE_QUEUE_FORMAT_UNSPECIFIED    = 0x0,
 484        GVE_GQI_RDA_FORMAT              = 0x1,
 485        GVE_GQI_QPL_FORMAT              = 0x2,
 486        GVE_DQO_RDA_FORMAT              = 0x3,
 487};
 488
 489struct gve_priv {
 490        struct net_device *dev;
 491        struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
 492        struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
 493        struct gve_queue_page_list *qpls; /* array of num qpls */
 494        struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
 495        dma_addr_t ntfy_block_bus;
 496        struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */
 497        char mgmt_msix_name[IFNAMSIZ + 16];
 498        u32 mgmt_msix_idx;
 499        __be32 *counter_array; /* array of num_event_counters */
 500        dma_addr_t counter_array_bus;
 501
 502        u16 num_event_counters;
 503        u16 tx_desc_cnt; /* num desc per ring */
 504        u16 rx_desc_cnt; /* num desc per ring */
 505        u16 tx_pages_per_qpl; /* tx buffer length */
 506        u16 rx_data_slot_cnt; /* rx buffer length */
 507        u64 max_registered_pages;
 508        u64 num_registered_pages; /* num pages registered with NIC */
 509        u32 rx_copybreak; /* copy packets smaller than this */
 510        u16 default_num_queues; /* default num queues to set up */
 511
 512        struct gve_queue_config tx_cfg;
 513        struct gve_queue_config rx_cfg;
 514        struct gve_qpl_config qpl_cfg; /* map used QPL ids */
 515        u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
 516
 517        struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
 518        __be32 __iomem *db_bar2; /* "array" of doorbells */
 519        u32 msg_enable; /* level for netif* netdev print macros */
 520        struct pci_dev *pdev;
 521
 522        /* metrics */
 523        u32 tx_timeo_cnt;
 524
 525        /* Admin queue - see gve_adminq.h*/
 526        union gve_adminq_command *adminq;
 527        dma_addr_t adminq_bus_addr;
 528        u32 adminq_mask; /* masks prod_cnt to adminq size */
 529        u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
 530        u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
 531        u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */
 532        /* free-running count of per AQ cmd executed */
 533        u32 adminq_describe_device_cnt;
 534        u32 adminq_cfg_device_resources_cnt;
 535        u32 adminq_register_page_list_cnt;
 536        u32 adminq_unregister_page_list_cnt;
 537        u32 adminq_create_tx_queue_cnt;
 538        u32 adminq_create_rx_queue_cnt;
 539        u32 adminq_destroy_tx_queue_cnt;
 540        u32 adminq_destroy_rx_queue_cnt;
 541        u32 adminq_dcfg_device_resources_cnt;
 542        u32 adminq_set_driver_parameter_cnt;
 543        u32 adminq_report_stats_cnt;
 544        u32 adminq_report_link_speed_cnt;
 545        u32 adminq_get_ptype_map_cnt;
 546
 547        /* Global stats */
 548        u32 interface_up_cnt; /* count of times interface turned up since last reset */
 549        u32 interface_down_cnt; /* count of times interface turned down since last reset */
 550        u32 reset_cnt; /* count of reset */
 551        u32 page_alloc_fail; /* count of page alloc fails */
 552        u32 dma_mapping_error; /* count of dma mapping errors */
 553        u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */
 554        struct workqueue_struct *gve_wq;
 555        struct work_struct service_task;
 556        struct work_struct stats_report_task;
 557        unsigned long service_task_flags;
 558        unsigned long state_flags;
 559
 560        struct gve_stats_report *stats_report;
 561        u64 stats_report_len;
 562        dma_addr_t stats_report_bus; /* dma address for the stats report */
 563        unsigned long ethtool_flags;
 564
 565        unsigned long stats_report_timer_period;
 566        struct timer_list stats_report_timer;
 567
 568        /* Gvnic device link speed from hypervisor. */
 569        u64 link_speed;
 570
 571        struct gve_options_dqo_rda options_dqo_rda;
 572        struct gve_ptype_lut *ptype_lut_dqo;
 573
 574        /* Must be a power of two. */
 575        int data_buffer_size_dqo;
 576
 577        enum gve_queue_format queue_format;
 578};
 579
 580enum gve_service_task_flags_bit {
 581        GVE_PRIV_FLAGS_DO_RESET                 = 1,
 582        GVE_PRIV_FLAGS_RESET_IN_PROGRESS        = 2,
 583        GVE_PRIV_FLAGS_PROBE_IN_PROGRESS        = 3,
 584        GVE_PRIV_FLAGS_DO_REPORT_STATS = 4,
 585};
 586
 587enum gve_state_flags_bit {
 588        GVE_PRIV_FLAGS_ADMIN_QUEUE_OK           = 1,
 589        GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK      = 2,
 590        GVE_PRIV_FLAGS_DEVICE_RINGS_OK          = 3,
 591        GVE_PRIV_FLAGS_NAPI_ENABLED             = 4,
 592};
 593
 594enum gve_ethtool_flags_bit {
 595        GVE_PRIV_FLAGS_REPORT_STATS             = 0,
 596};
 597
 598static inline bool gve_get_do_reset(struct gve_priv *priv)
 599{
 600        return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
 601}
 602
 603static inline void gve_set_do_reset(struct gve_priv *priv)
 604{
 605        set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
 606}
 607
 608static inline void gve_clear_do_reset(struct gve_priv *priv)
 609{
 610        clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
 611}
 612
 613static inline bool gve_get_reset_in_progress(struct gve_priv *priv)
 614{
 615        return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS,
 616                        &priv->service_task_flags);
 617}
 618
 619static inline void gve_set_reset_in_progress(struct gve_priv *priv)
 620{
 621        set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
 622}
 623
 624static inline void gve_clear_reset_in_progress(struct gve_priv *priv)
 625{
 626        clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
 627}
 628
 629static inline bool gve_get_probe_in_progress(struct gve_priv *priv)
 630{
 631        return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS,
 632                        &priv->service_task_flags);
 633}
 634
 635static inline void gve_set_probe_in_progress(struct gve_priv *priv)
 636{
 637        set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
 638}
 639
 640static inline void gve_clear_probe_in_progress(struct gve_priv *priv)
 641{
 642        clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
 643}
 644
 645static inline bool gve_get_do_report_stats(struct gve_priv *priv)
 646{
 647        return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS,
 648                        &priv->service_task_flags);
 649}
 650
 651static inline void gve_set_do_report_stats(struct gve_priv *priv)
 652{
 653        set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
 654}
 655
 656static inline void gve_clear_do_report_stats(struct gve_priv *priv)
 657{
 658        clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
 659}
 660
 661static inline bool gve_get_admin_queue_ok(struct gve_priv *priv)
 662{
 663        return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
 664}
 665
 666static inline void gve_set_admin_queue_ok(struct gve_priv *priv)
 667{
 668        set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
 669}
 670
 671static inline void gve_clear_admin_queue_ok(struct gve_priv *priv)
 672{
 673        clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
 674}
 675
 676static inline bool gve_get_device_resources_ok(struct gve_priv *priv)
 677{
 678        return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
 679}
 680
 681static inline void gve_set_device_resources_ok(struct gve_priv *priv)
 682{
 683        set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
 684}
 685
 686static inline void gve_clear_device_resources_ok(struct gve_priv *priv)
 687{
 688        clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
 689}
 690
 691static inline bool gve_get_device_rings_ok(struct gve_priv *priv)
 692{
 693        return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
 694}
 695
 696static inline void gve_set_device_rings_ok(struct gve_priv *priv)
 697{
 698        set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
 699}
 700
 701static inline void gve_clear_device_rings_ok(struct gve_priv *priv)
 702{
 703        clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
 704}
 705
 706static inline bool gve_get_napi_enabled(struct gve_priv *priv)
 707{
 708        return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
 709}
 710
 711static inline void gve_set_napi_enabled(struct gve_priv *priv)
 712{
 713        set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
 714}
 715
 716static inline void gve_clear_napi_enabled(struct gve_priv *priv)
 717{
 718        clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
 719}
 720
 721static inline bool gve_get_report_stats(struct gve_priv *priv)
 722{
 723        return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
 724}
 725
 726static inline void gve_clear_report_stats(struct gve_priv *priv)
 727{
 728        clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
 729}
 730
 731/* Returns the address of the ntfy_blocks irq doorbell
 732 */
 733static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
 734                                               struct gve_notify_block *block)
 735{
 736        return &priv->db_bar2[be32_to_cpu(block->irq_db_index)];
 737}
 738
 739/* Returns the index into ntfy_blocks of the given tx ring's block
 740 */
 741static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
 742{
 743        return queue_idx;
 744}
 745
 746/* Returns the index into ntfy_blocks of the given rx ring's block
 747 */
 748static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
 749{
 750        return (priv->num_ntfy_blks / 2) + queue_idx;
 751}
 752
 753/* Returns the number of tx queue page lists
 754 */
 755static inline u32 gve_num_tx_qpls(struct gve_priv *priv)
 756{
 757        if (priv->queue_format != GVE_GQI_QPL_FORMAT)
 758                return 0;
 759
 760        return priv->tx_cfg.num_queues;
 761}
 762
 763/* Returns the number of rx queue page lists
 764 */
 765static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
 766{
 767        if (priv->queue_format != GVE_GQI_QPL_FORMAT)
 768                return 0;
 769
 770        return priv->rx_cfg.num_queues;
 771}
 772
 773/* Returns a pointer to the next available tx qpl in the list of qpls
 774 */
 775static inline
 776struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv)
 777{
 778        int id = find_first_zero_bit(priv->qpl_cfg.qpl_id_map,
 779                                     priv->qpl_cfg.qpl_map_size);
 780
 781        /* we are out of tx qpls */
 782        if (id >= gve_num_tx_qpls(priv))
 783                return NULL;
 784
 785        set_bit(id, priv->qpl_cfg.qpl_id_map);
 786        return &priv->qpls[id];
 787}
 788
 789/* Returns a pointer to the next available rx qpl in the list of qpls
 790 */
 791static inline
 792struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv)
 793{
 794        int id = find_next_zero_bit(priv->qpl_cfg.qpl_id_map,
 795                                    priv->qpl_cfg.qpl_map_size,
 796                                    gve_num_tx_qpls(priv));
 797
 798        /* we are out of rx qpls */
 799        if (id == gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv))
 800                return NULL;
 801
 802        set_bit(id, priv->qpl_cfg.qpl_id_map);
 803        return &priv->qpls[id];
 804}
 805
 806/* Unassigns the qpl with the given id
 807 */
 808static inline void gve_unassign_qpl(struct gve_priv *priv, int id)
 809{
 810        clear_bit(id, priv->qpl_cfg.qpl_id_map);
 811}
 812
 813/* Returns the correct dma direction for tx and rx qpls
 814 */
 815static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
 816                                                      int id)
 817{
 818        if (id < gve_num_tx_qpls(priv))
 819                return DMA_TO_DEVICE;
 820        else
 821                return DMA_FROM_DEVICE;
 822}
 823
 824static inline bool gve_is_gqi(struct gve_priv *priv)
 825{
 826        return priv->queue_format == GVE_GQI_RDA_FORMAT ||
 827                priv->queue_format == GVE_GQI_QPL_FORMAT;
 828}
 829
 830/* buffers */
 831int gve_alloc_page(struct gve_priv *priv, struct device *dev,
 832                   struct page **page, dma_addr_t *dma,
 833                   enum dma_data_direction);
 834void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
 835                   enum dma_data_direction);
 836/* tx handling */
 837netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
 838bool gve_tx_poll(struct gve_notify_block *block, int budget);
 839int gve_tx_alloc_rings(struct gve_priv *priv);
 840void gve_tx_free_rings_gqi(struct gve_priv *priv);
 841u32 gve_tx_load_event_counter(struct gve_priv *priv,
 842                              struct gve_tx_ring *tx);
 843bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
 844/* rx handling */
 845void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
 846int gve_rx_poll(struct gve_notify_block *block, int budget);
 847bool gve_rx_work_pending(struct gve_rx_ring *rx);
 848int gve_rx_alloc_rings(struct gve_priv *priv);
 849void gve_rx_free_rings_gqi(struct gve_priv *priv);
 850/* Reset */
 851void gve_schedule_reset(struct gve_priv *priv);
 852int gve_reset(struct gve_priv *priv, bool attempt_teardown);
 853int gve_adjust_queues(struct gve_priv *priv,
 854                      struct gve_queue_config new_rx_config,
 855                      struct gve_queue_config new_tx_config);
 856/* report stats handling */
 857void gve_handle_report_stats(struct gve_priv *priv);
 858/* exported by ethtool.c */
 859extern const struct ethtool_ops gve_ethtool_ops;
 860/* needed by ethtool */
 861extern const char gve_version_str[];
 862#endif /* _GVE_H_ */
 863