1
2
3
4
5
6
7#ifndef _GVE_H_
8#define _GVE_H_
9
10#include <linux/dma-mapping.h>
11#include <linux/netdevice.h>
12#include <linux/pci.h>
13#include <linux/u64_stats_sync.h>
14#include "gve_desc.h"
15
16#ifndef PCI_VENDOR_ID_GOOGLE
17#define PCI_VENDOR_ID_GOOGLE 0x1ae0
18#endif
19
20#define PCI_DEV_ID_GVNIC 0x0042
21
22#define GVE_REGISTER_BAR 0
23#define GVE_DOORBELL_BAR 2
24
25
26#define GVE_TX_MAX_IOVEC 4
27
28#define GVE_MIN_MSIX 3
29
30
31#define GVE_TX_STATS_REPORT_NUM 5
32#define GVE_RX_STATS_REPORT_NUM 2
33
34
35#define GVE_STATS_REPORT_TIMER_PERIOD 20000
36
37
38#define NIC_TX_STATS_REPORT_NUM 0
39#define NIC_RX_STATS_REPORT_NUM 4
40
41#define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
42
43
44struct gve_rx_desc_queue {
45 struct gve_rx_desc *desc_ring;
46 dma_addr_t bus;
47 u8 seqno;
48};
49
50
51struct gve_rx_slot_page_info {
52 struct page *page;
53 void *page_address;
54 u8 page_offset;
55 u8 can_flip;
56};
57
58
59
60
61struct gve_queue_page_list {
62 u32 id;
63 u32 num_entries;
64 struct page **pages;
65 dma_addr_t *page_buses;
66};
67
68
69struct gve_rx_data_queue {
70 union gve_rx_data_slot *data_ring;
71 dma_addr_t data_bus;
72 struct gve_rx_slot_page_info *page_info;
73 struct gve_queue_page_list *qpl;
74 u8 raw_addressing;
75};
76
77struct gve_priv;
78
79
80struct gve_rx_ring {
81 struct gve_priv *gve;
82 struct gve_rx_desc_queue desc;
83 struct gve_rx_data_queue data;
84 u64 rbytes;
85 u64 rpackets;
86 u32 cnt;
87 u32 fill_cnt;
88 u32 mask;
89 u32 db_threshold;
90 u64 rx_copybreak_pkt;
91 u64 rx_copied_pkt;
92 u64 rx_skb_alloc_fail;
93 u64 rx_buf_alloc_fail;
94 u64 rx_desc_err_dropped_pkt;
95 u32 q_num;
96 u32 ntfy_id;
97 struct gve_queue_resources *q_resources;
98 dma_addr_t q_resources_bus;
99 struct u64_stats_sync statss;
100};
101
102
103union gve_tx_desc {
104 struct gve_tx_pkt_desc pkt;
105 struct gve_tx_seg_desc seg;
106};
107
108
109struct gve_tx_iovec {
110 u32 iov_offset;
111 u32 iov_len;
112 u32 iov_padding;
113};
114
115struct gve_tx_dma_buf {
116 DEFINE_DMA_UNMAP_ADDR(dma);
117 DEFINE_DMA_UNMAP_LEN(len);
118};
119
120
121
122
123struct gve_tx_buffer_state {
124 struct sk_buff *skb;
125 union {
126 struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC];
127 struct gve_tx_dma_buf buf;
128 };
129};
130
131
132struct gve_tx_fifo {
133 void *base;
134 u32 size;
135 atomic_t available;
136 u32 head;
137 struct gve_queue_page_list *qpl;
138};
139
140
141struct gve_tx_ring {
142
143 struct gve_tx_fifo tx_fifo;
144 u32 req;
145 u32 done;
146
147
148 __be32 last_nic_done ____cacheline_aligned;
149 u64 pkt_done;
150 u64 bytes_done;
151 u64 dropped_pkt;
152 u64 dma_mapping_error;
153
154
155 union gve_tx_desc *desc ____cacheline_aligned;
156 struct gve_tx_buffer_state *info;
157 struct netdev_queue *netdev_txq;
158 struct gve_queue_resources *q_resources;
159 struct device *dev;
160 u32 mask;
161 u8 raw_addressing;
162
163
164 u32 q_num ____cacheline_aligned;
165 u32 stop_queue;
166 u32 wake_queue;
167 u32 ntfy_id;
168 dma_addr_t bus;
169 dma_addr_t q_resources_bus;
170 struct u64_stats_sync statss;
171} ____cacheline_aligned;
172
173
174
175
176struct gve_notify_block {
177 __be32 irq_db_index;
178 char name[IFNAMSIZ + 16];
179 struct napi_struct napi;
180 struct gve_priv *priv;
181 struct gve_tx_ring *tx;
182 struct gve_rx_ring *rx;
183} ____cacheline_aligned;
184
185
186struct gve_queue_config {
187 u16 max_queues;
188 u16 num_queues;
189};
190
191
192struct gve_qpl_config {
193 u32 qpl_map_size;
194 unsigned long *qpl_id_map;
195};
196
197struct gve_priv {
198 struct net_device *dev;
199 struct gve_tx_ring *tx;
200 struct gve_rx_ring *rx;
201 struct gve_queue_page_list *qpls;
202 struct gve_notify_block *ntfy_blocks;
203 dma_addr_t ntfy_block_bus;
204 struct msix_entry *msix_vectors;
205 char mgmt_msix_name[IFNAMSIZ + 16];
206 u32 mgmt_msix_idx;
207 __be32 *counter_array;
208 dma_addr_t counter_array_bus;
209
210 u16 num_event_counters;
211 u16 tx_desc_cnt;
212 u16 rx_desc_cnt;
213 u16 tx_pages_per_qpl;
214 u16 rx_data_slot_cnt;
215 u64 max_registered_pages;
216 u64 num_registered_pages;
217 u32 rx_copybreak;
218 u16 default_num_queues;
219 u8 raw_addressing;
220
221 struct gve_queue_config tx_cfg;
222 struct gve_queue_config rx_cfg;
223 struct gve_qpl_config qpl_cfg;
224 u32 num_ntfy_blks;
225
226 struct gve_registers __iomem *reg_bar0;
227 __be32 __iomem *db_bar2;
228 u32 msg_enable;
229 struct pci_dev *pdev;
230
231
232 u32 tx_timeo_cnt;
233
234
235 union gve_adminq_command *adminq;
236 dma_addr_t adminq_bus_addr;
237 u32 adminq_mask;
238 u32 adminq_prod_cnt;
239 u32 adminq_cmd_fail;
240 u32 adminq_timeouts;
241
242 u32 adminq_describe_device_cnt;
243 u32 adminq_cfg_device_resources_cnt;
244 u32 adminq_register_page_list_cnt;
245 u32 adminq_unregister_page_list_cnt;
246 u32 adminq_create_tx_queue_cnt;
247 u32 adminq_create_rx_queue_cnt;
248 u32 adminq_destroy_tx_queue_cnt;
249 u32 adminq_destroy_rx_queue_cnt;
250 u32 adminq_dcfg_device_resources_cnt;
251 u32 adminq_set_driver_parameter_cnt;
252 u32 adminq_report_stats_cnt;
253 u32 adminq_report_link_speed_cnt;
254
255
256 u32 interface_up_cnt;
257 u32 interface_down_cnt;
258 u32 reset_cnt;
259 u32 page_alloc_fail;
260 u32 dma_mapping_error;
261 u32 stats_report_trigger_cnt;
262 struct workqueue_struct *gve_wq;
263 struct work_struct service_task;
264 struct work_struct stats_report_task;
265 unsigned long service_task_flags;
266 unsigned long state_flags;
267
268 struct gve_stats_report *stats_report;
269 u64 stats_report_len;
270 dma_addr_t stats_report_bus;
271 unsigned long ethtool_flags;
272
273 unsigned long stats_report_timer_period;
274 struct timer_list stats_report_timer;
275
276
277 u64 link_speed;
278};
279
280enum gve_service_task_flags_bit {
281 GVE_PRIV_FLAGS_DO_RESET = 1,
282 GVE_PRIV_FLAGS_RESET_IN_PROGRESS = 2,
283 GVE_PRIV_FLAGS_PROBE_IN_PROGRESS = 3,
284 GVE_PRIV_FLAGS_DO_REPORT_STATS = 4,
285};
286
287enum gve_state_flags_bit {
288 GVE_PRIV_FLAGS_ADMIN_QUEUE_OK = 1,
289 GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK = 2,
290 GVE_PRIV_FLAGS_DEVICE_RINGS_OK = 3,
291 GVE_PRIV_FLAGS_NAPI_ENABLED = 4,
292};
293
294enum gve_ethtool_flags_bit {
295 GVE_PRIV_FLAGS_REPORT_STATS = 0,
296};
297
298static inline bool gve_get_do_reset(struct gve_priv *priv)
299{
300 return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
301}
302
303static inline void gve_set_do_reset(struct gve_priv *priv)
304{
305 set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
306}
307
308static inline void gve_clear_do_reset(struct gve_priv *priv)
309{
310 clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
311}
312
313static inline bool gve_get_reset_in_progress(struct gve_priv *priv)
314{
315 return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS,
316 &priv->service_task_flags);
317}
318
319static inline void gve_set_reset_in_progress(struct gve_priv *priv)
320{
321 set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
322}
323
324static inline void gve_clear_reset_in_progress(struct gve_priv *priv)
325{
326 clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
327}
328
329static inline bool gve_get_probe_in_progress(struct gve_priv *priv)
330{
331 return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS,
332 &priv->service_task_flags);
333}
334
335static inline void gve_set_probe_in_progress(struct gve_priv *priv)
336{
337 set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
338}
339
340static inline void gve_clear_probe_in_progress(struct gve_priv *priv)
341{
342 clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
343}
344
345static inline bool gve_get_do_report_stats(struct gve_priv *priv)
346{
347 return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS,
348 &priv->service_task_flags);
349}
350
351static inline void gve_set_do_report_stats(struct gve_priv *priv)
352{
353 set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
354}
355
356static inline void gve_clear_do_report_stats(struct gve_priv *priv)
357{
358 clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
359}
360
361static inline bool gve_get_admin_queue_ok(struct gve_priv *priv)
362{
363 return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
364}
365
366static inline void gve_set_admin_queue_ok(struct gve_priv *priv)
367{
368 set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
369}
370
371static inline void gve_clear_admin_queue_ok(struct gve_priv *priv)
372{
373 clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
374}
375
376static inline bool gve_get_device_resources_ok(struct gve_priv *priv)
377{
378 return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
379}
380
381static inline void gve_set_device_resources_ok(struct gve_priv *priv)
382{
383 set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
384}
385
386static inline void gve_clear_device_resources_ok(struct gve_priv *priv)
387{
388 clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
389}
390
391static inline bool gve_get_device_rings_ok(struct gve_priv *priv)
392{
393 return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
394}
395
396static inline void gve_set_device_rings_ok(struct gve_priv *priv)
397{
398 set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
399}
400
401static inline void gve_clear_device_rings_ok(struct gve_priv *priv)
402{
403 clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
404}
405
406static inline bool gve_get_napi_enabled(struct gve_priv *priv)
407{
408 return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
409}
410
411static inline void gve_set_napi_enabled(struct gve_priv *priv)
412{
413 set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
414}
415
416static inline void gve_clear_napi_enabled(struct gve_priv *priv)
417{
418 clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
419}
420
421static inline bool gve_get_report_stats(struct gve_priv *priv)
422{
423 return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
424}
425
426static inline void gve_clear_report_stats(struct gve_priv *priv)
427{
428 clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
429}
430
431
432
433static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
434 struct gve_notify_block *block)
435{
436 return &priv->db_bar2[be32_to_cpu(block->irq_db_index)];
437}
438
439
440
441static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
442{
443 return queue_idx;
444}
445
446
447
448static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
449{
450 return (priv->num_ntfy_blks / 2) + queue_idx;
451}
452
453
454
455static inline u32 gve_num_tx_qpls(struct gve_priv *priv)
456{
457 return priv->raw_addressing ? 0 : priv->tx_cfg.num_queues;
458}
459
460
461
462static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
463{
464 return priv->raw_addressing ? 0 : priv->rx_cfg.num_queues;
465}
466
467
468
469static inline
470struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv)
471{
472 int id = find_first_zero_bit(priv->qpl_cfg.qpl_id_map,
473 priv->qpl_cfg.qpl_map_size);
474
475
476 if (id >= gve_num_tx_qpls(priv))
477 return NULL;
478
479 set_bit(id, priv->qpl_cfg.qpl_id_map);
480 return &priv->qpls[id];
481}
482
483
484
485static inline
486struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv)
487{
488 int id = find_next_zero_bit(priv->qpl_cfg.qpl_id_map,
489 priv->qpl_cfg.qpl_map_size,
490 gve_num_tx_qpls(priv));
491
492
493 if (id == priv->qpl_cfg.qpl_map_size)
494 return NULL;
495
496 set_bit(id, priv->qpl_cfg.qpl_id_map);
497 return &priv->qpls[id];
498}
499
500
501
502static inline void gve_unassign_qpl(struct gve_priv *priv, int id)
503{
504 clear_bit(id, priv->qpl_cfg.qpl_id_map);
505}
506
507
508
509static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
510 int id)
511{
512 if (id < gve_num_tx_qpls(priv))
513 return DMA_TO_DEVICE;
514 else
515 return DMA_FROM_DEVICE;
516}
517
518
519int gve_alloc_page(struct gve_priv *priv, struct device *dev,
520 struct page **page, dma_addr_t *dma,
521 enum dma_data_direction);
522void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
523 enum dma_data_direction);
524
525netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
526bool gve_tx_poll(struct gve_notify_block *block, int budget);
527int gve_tx_alloc_rings(struct gve_priv *priv);
528void gve_tx_free_rings(struct gve_priv *priv);
529__be32 gve_tx_load_event_counter(struct gve_priv *priv,
530 struct gve_tx_ring *tx);
531
532void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
533bool gve_rx_poll(struct gve_notify_block *block, int budget);
534int gve_rx_alloc_rings(struct gve_priv *priv);
535void gve_rx_free_rings(struct gve_priv *priv);
536bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
537 netdev_features_t feat);
538
539void gve_schedule_reset(struct gve_priv *priv);
540int gve_reset(struct gve_priv *priv, bool attempt_teardown);
541int gve_adjust_queues(struct gve_priv *priv,
542 struct gve_queue_config new_rx_config,
543 struct gve_queue_config new_tx_config);
544
545void gve_handle_report_stats(struct gve_priv *priv);
546
547extern const struct ethtool_ops gve_ethtool_ops;
548
549extern const char gve_version_str[];
550#endif
551