1
2
3
4
5
6
7#ifndef _GVE_H_
8#define _GVE_H_
9
10#include <linux/dma-mapping.h>
11#include <linux/netdevice.h>
12#include <linux/pci.h>
13#include <linux/u64_stats_sync.h>
14
15#include "gve_desc.h"
16#include "gve_desc_dqo.h"
17
18#ifndef PCI_VENDOR_ID_GOOGLE
19#define PCI_VENDOR_ID_GOOGLE 0x1ae0
20#endif
21
22#define PCI_DEV_ID_GVNIC 0x0042
23
24#define GVE_REGISTER_BAR 0
25#define GVE_DOORBELL_BAR 2
26
27
28#define GVE_TX_MAX_IOVEC 4
29
30#define GVE_MIN_MSIX 3
31
32
33#define GVE_TX_STATS_REPORT_NUM 6
34#define GVE_RX_STATS_REPORT_NUM 2
35
36
37#define GVE_STATS_REPORT_TIMER_PERIOD 20000
38
39
40#define NIC_TX_STATS_REPORT_NUM 0
41#define NIC_RX_STATS_REPORT_NUM 4
42
43#define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
44
45
46#define GVE_NUM_PTYPES 1024
47
48#define GVE_RX_BUFFER_SIZE_DQO 2048
49
50
51struct gve_rx_desc_queue {
52 struct gve_rx_desc *desc_ring;
53 dma_addr_t bus;
54 u8 seqno;
55};
56
57
58struct gve_rx_slot_page_info {
59 struct page *page;
60 void *page_address;
61 u32 page_offset;
62 int pagecnt_bias;
63 u8 can_flip;
64};
65
66
67
68
69struct gve_queue_page_list {
70 u32 id;
71 u32 num_entries;
72 struct page **pages;
73 dma_addr_t *page_buses;
74};
75
76
77struct gve_rx_data_queue {
78 union gve_rx_data_slot *data_ring;
79 dma_addr_t data_bus;
80 struct gve_rx_slot_page_info *page_info;
81 struct gve_queue_page_list *qpl;
82 u8 raw_addressing;
83};
84
85struct gve_priv;
86
87
88
89
90struct gve_rx_buf_queue_dqo {
91 struct gve_rx_desc_dqo *desc_ring;
92 dma_addr_t bus;
93 u32 head;
94 u32 tail;
95 u32 mask;
96};
97
98
99struct gve_rx_compl_queue_dqo {
100 struct gve_rx_compl_desc_dqo *desc_ring;
101 dma_addr_t bus;
102
103
104
105
106
107 int num_free_slots;
108
109
110
111
112
113 u8 cur_gen_bit;
114
115
116
117
118 u32 head;
119 u32 mask;
120};
121
122
123struct gve_rx_buf_state_dqo {
124
125 struct gve_rx_slot_page_info page_info;
126
127
128 dma_addr_t addr;
129
130
131
132
133 u32 last_single_ref_offset;
134
135
136 s16 next;
137};
138
139
140struct gve_index_list {
141 s16 head;
142 s16 tail;
143};
144
145
146
147
148struct gve_rx_ctx {
149
150 struct sk_buff *skb_head;
151 struct sk_buff *skb_tail;
152 u16 total_expected_size;
153 u8 expected_frag_cnt;
154 u8 curr_frag_cnt;
155 u8 reuse_frags;
156};
157
158
159struct gve_rx_ring {
160 struct gve_priv *gve;
161 union {
162
163 struct {
164 struct gve_rx_desc_queue desc;
165 struct gve_rx_data_queue data;
166
167
168 u32 db_threshold;
169 u16 packet_buffer_size;
170 };
171
172
173 struct {
174 struct gve_rx_buf_queue_dqo bufq;
175 struct gve_rx_compl_queue_dqo complq;
176
177 struct gve_rx_buf_state_dqo *buf_states;
178 u16 num_buf_states;
179
180
181
182
183 s16 free_buf_states;
184
185
186
187
188
189
190
191
192
193
194
195 struct gve_index_list recycled_buf_states;
196
197
198
199
200
201
202
203 struct gve_index_list used_buf_states;
204 } dqo;
205 };
206
207 u64 rbytes;
208 u64 rpackets;
209 u32 cnt;
210 u32 fill_cnt;
211 u32 mask;
212 u64 rx_copybreak_pkt;
213 u64 rx_copied_pkt;
214 u64 rx_skb_alloc_fail;
215 u64 rx_buf_alloc_fail;
216 u64 rx_desc_err_dropped_pkt;
217 u64 rx_cont_packet_cnt;
218 u64 rx_frag_flip_cnt;
219 u64 rx_frag_copy_cnt;
220 u32 q_num;
221 u32 ntfy_id;
222 struct gve_queue_resources *q_resources;
223 dma_addr_t q_resources_bus;
224 struct u64_stats_sync statss;
225
226 struct gve_rx_ctx ctx;
227};
228
229
230union gve_tx_desc {
231 struct gve_tx_pkt_desc pkt;
232 struct gve_tx_seg_desc seg;
233};
234
235
236struct gve_tx_iovec {
237 u32 iov_offset;
238 u32 iov_len;
239 u32 iov_padding;
240};
241
242
243
244
245struct gve_tx_buffer_state {
246 struct sk_buff *skb;
247 union {
248 struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC];
249 struct {
250 DEFINE_DMA_UNMAP_ADDR(dma);
251 DEFINE_DMA_UNMAP_LEN(len);
252 };
253 };
254};
255
256
257struct gve_tx_fifo {
258 void *base;
259 u32 size;
260 atomic_t available;
261 u32 head;
262 struct gve_queue_page_list *qpl;
263};
264
265
266union gve_tx_desc_dqo {
267 struct gve_tx_pkt_desc_dqo pkt;
268 struct gve_tx_tso_context_desc_dqo tso_ctx;
269 struct gve_tx_general_context_desc_dqo general_ctx;
270};
271
272enum gve_packet_state {
273
274
275
276 GVE_PACKET_STATE_UNALLOCATED,
277
278 GVE_PACKET_STATE_PENDING_DATA_COMPL,
279
280
281
282 GVE_PACKET_STATE_PENDING_REINJECT_COMPL,
283
284 GVE_PACKET_STATE_TIMED_OUT_COMPL,
285};
286
287struct gve_tx_pending_packet_dqo {
288 struct sk_buff *skb;
289
290
291
292
293
294
295
296 DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]);
297 DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]);
298 u16 num_bufs;
299
300
301 s16 next;
302
303
304
305
306
307 s16 prev;
308
309
310
311
312 u8 state;
313
314
315
316
317
318 unsigned long timeout_jiffies;
319};
320
321
322struct gve_tx_ring {
323
324 union {
325
326 struct {
327 struct gve_tx_fifo tx_fifo;
328 u32 req;
329 u32 done;
330 };
331
332
333 struct {
334
335
336
337
338
339
340
341
342 s16 free_pending_packets;
343
344
345 u32 head;
346 u32 tail;
347
348
349
350
351 u32 last_re_idx;
352 } dqo_tx;
353 };
354
355
356 union {
357
358 struct {
359
360 spinlock_t clean_lock;
361 };
362
363
364 struct {
365 u32 head;
366
367
368 u8 cur_gen_bit;
369
370
371
372
373
374
375
376
377
378 atomic_t free_pending_packets;
379
380
381 atomic_t hw_tx_head;
382
383
384
385
386 struct gve_index_list miss_completions;
387
388
389
390
391
392 struct gve_index_list timed_out_completions;
393 } dqo_compl;
394 } ____cacheline_aligned;
395 u64 pkt_done;
396 u64 bytes_done;
397 u64 dropped_pkt;
398 u64 dma_mapping_error;
399
400
401 union {
402
403 struct {
404 union gve_tx_desc *desc;
405
406
407 struct gve_tx_buffer_state *info;
408 };
409
410
411 struct {
412 union gve_tx_desc_dqo *tx_ring;
413 struct gve_tx_compl_desc *compl_ring;
414
415 struct gve_tx_pending_packet_dqo *pending_packets;
416 s16 num_pending_packets;
417
418 u32 complq_mask;
419 } dqo;
420 } ____cacheline_aligned;
421 struct netdev_queue *netdev_txq;
422 struct gve_queue_resources *q_resources;
423 struct device *dev;
424 u32 mask;
425 u8 raw_addressing;
426
427
428 u32 q_num ____cacheline_aligned;
429 u32 stop_queue;
430 u32 wake_queue;
431 u32 queue_timeout;
432 u32 ntfy_id;
433 u32 last_kick_msec;
434 dma_addr_t bus;
435 dma_addr_t q_resources_bus;
436 dma_addr_t complq_bus_dqo;
437 struct u64_stats_sync statss;
438} ____cacheline_aligned;
439
440
441
442
443struct gve_notify_block {
444 __be32 irq_db_index;
445 char name[IFNAMSIZ + 16];
446 struct napi_struct napi;
447 struct gve_priv *priv;
448 struct gve_tx_ring *tx;
449 struct gve_rx_ring *rx;
450} ____cacheline_aligned;
451
452
453struct gve_queue_config {
454 u16 max_queues;
455 u16 num_queues;
456};
457
458
459struct gve_qpl_config {
460 u32 qpl_map_size;
461 unsigned long *qpl_id_map;
462};
463
464struct gve_options_dqo_rda {
465 u16 tx_comp_ring_entries;
466 u16 rx_buff_ring_entries;
467};
468
469struct gve_ptype {
470 u8 l3_type;
471 u8 l4_type;
472};
473
474struct gve_ptype_lut {
475 struct gve_ptype ptypes[GVE_NUM_PTYPES];
476};
477
478
479
480
481
482enum gve_queue_format {
483 GVE_QUEUE_FORMAT_UNSPECIFIED = 0x0,
484 GVE_GQI_RDA_FORMAT = 0x1,
485 GVE_GQI_QPL_FORMAT = 0x2,
486 GVE_DQO_RDA_FORMAT = 0x3,
487};
488
489struct gve_priv {
490 struct net_device *dev;
491 struct gve_tx_ring *tx;
492 struct gve_rx_ring *rx;
493 struct gve_queue_page_list *qpls;
494 struct gve_notify_block *ntfy_blocks;
495 dma_addr_t ntfy_block_bus;
496 struct msix_entry *msix_vectors;
497 char mgmt_msix_name[IFNAMSIZ + 16];
498 u32 mgmt_msix_idx;
499 __be32 *counter_array;
500 dma_addr_t counter_array_bus;
501
502 u16 num_event_counters;
503 u16 tx_desc_cnt;
504 u16 rx_desc_cnt;
505 u16 tx_pages_per_qpl;
506 u16 rx_data_slot_cnt;
507 u64 max_registered_pages;
508 u64 num_registered_pages;
509 u32 rx_copybreak;
510 u16 default_num_queues;
511
512 struct gve_queue_config tx_cfg;
513 struct gve_queue_config rx_cfg;
514 struct gve_qpl_config qpl_cfg;
515 u32 num_ntfy_blks;
516
517 struct gve_registers __iomem *reg_bar0;
518 __be32 __iomem *db_bar2;
519 u32 msg_enable;
520 struct pci_dev *pdev;
521
522
523 u32 tx_timeo_cnt;
524
525
526 union gve_adminq_command *adminq;
527 dma_addr_t adminq_bus_addr;
528 u32 adminq_mask;
529 u32 adminq_prod_cnt;
530 u32 adminq_cmd_fail;
531 u32 adminq_timeouts;
532
533 u32 adminq_describe_device_cnt;
534 u32 adminq_cfg_device_resources_cnt;
535 u32 adminq_register_page_list_cnt;
536 u32 adminq_unregister_page_list_cnt;
537 u32 adminq_create_tx_queue_cnt;
538 u32 adminq_create_rx_queue_cnt;
539 u32 adminq_destroy_tx_queue_cnt;
540 u32 adminq_destroy_rx_queue_cnt;
541 u32 adminq_dcfg_device_resources_cnt;
542 u32 adminq_set_driver_parameter_cnt;
543 u32 adminq_report_stats_cnt;
544 u32 adminq_report_link_speed_cnt;
545 u32 adminq_get_ptype_map_cnt;
546
547
548 u32 interface_up_cnt;
549 u32 interface_down_cnt;
550 u32 reset_cnt;
551 u32 page_alloc_fail;
552 u32 dma_mapping_error;
553 u32 stats_report_trigger_cnt;
554 struct workqueue_struct *gve_wq;
555 struct work_struct service_task;
556 struct work_struct stats_report_task;
557 unsigned long service_task_flags;
558 unsigned long state_flags;
559
560 struct gve_stats_report *stats_report;
561 u64 stats_report_len;
562 dma_addr_t stats_report_bus;
563 unsigned long ethtool_flags;
564
565 unsigned long stats_report_timer_period;
566 struct timer_list stats_report_timer;
567
568
569 u64 link_speed;
570
571 struct gve_options_dqo_rda options_dqo_rda;
572 struct gve_ptype_lut *ptype_lut_dqo;
573
574
575 int data_buffer_size_dqo;
576
577 enum gve_queue_format queue_format;
578};
579
580enum gve_service_task_flags_bit {
581 GVE_PRIV_FLAGS_DO_RESET = 1,
582 GVE_PRIV_FLAGS_RESET_IN_PROGRESS = 2,
583 GVE_PRIV_FLAGS_PROBE_IN_PROGRESS = 3,
584 GVE_PRIV_FLAGS_DO_REPORT_STATS = 4,
585};
586
587enum gve_state_flags_bit {
588 GVE_PRIV_FLAGS_ADMIN_QUEUE_OK = 1,
589 GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK = 2,
590 GVE_PRIV_FLAGS_DEVICE_RINGS_OK = 3,
591 GVE_PRIV_FLAGS_NAPI_ENABLED = 4,
592};
593
594enum gve_ethtool_flags_bit {
595 GVE_PRIV_FLAGS_REPORT_STATS = 0,
596};
597
598static inline bool gve_get_do_reset(struct gve_priv *priv)
599{
600 return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
601}
602
603static inline void gve_set_do_reset(struct gve_priv *priv)
604{
605 set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
606}
607
608static inline void gve_clear_do_reset(struct gve_priv *priv)
609{
610 clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
611}
612
613static inline bool gve_get_reset_in_progress(struct gve_priv *priv)
614{
615 return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS,
616 &priv->service_task_flags);
617}
618
619static inline void gve_set_reset_in_progress(struct gve_priv *priv)
620{
621 set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
622}
623
624static inline void gve_clear_reset_in_progress(struct gve_priv *priv)
625{
626 clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
627}
628
629static inline bool gve_get_probe_in_progress(struct gve_priv *priv)
630{
631 return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS,
632 &priv->service_task_flags);
633}
634
635static inline void gve_set_probe_in_progress(struct gve_priv *priv)
636{
637 set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
638}
639
640static inline void gve_clear_probe_in_progress(struct gve_priv *priv)
641{
642 clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
643}
644
645static inline bool gve_get_do_report_stats(struct gve_priv *priv)
646{
647 return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS,
648 &priv->service_task_flags);
649}
650
651static inline void gve_set_do_report_stats(struct gve_priv *priv)
652{
653 set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
654}
655
656static inline void gve_clear_do_report_stats(struct gve_priv *priv)
657{
658 clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
659}
660
661static inline bool gve_get_admin_queue_ok(struct gve_priv *priv)
662{
663 return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
664}
665
666static inline void gve_set_admin_queue_ok(struct gve_priv *priv)
667{
668 set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
669}
670
671static inline void gve_clear_admin_queue_ok(struct gve_priv *priv)
672{
673 clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
674}
675
676static inline bool gve_get_device_resources_ok(struct gve_priv *priv)
677{
678 return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
679}
680
681static inline void gve_set_device_resources_ok(struct gve_priv *priv)
682{
683 set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
684}
685
686static inline void gve_clear_device_resources_ok(struct gve_priv *priv)
687{
688 clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
689}
690
691static inline bool gve_get_device_rings_ok(struct gve_priv *priv)
692{
693 return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
694}
695
696static inline void gve_set_device_rings_ok(struct gve_priv *priv)
697{
698 set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
699}
700
701static inline void gve_clear_device_rings_ok(struct gve_priv *priv)
702{
703 clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
704}
705
706static inline bool gve_get_napi_enabled(struct gve_priv *priv)
707{
708 return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
709}
710
711static inline void gve_set_napi_enabled(struct gve_priv *priv)
712{
713 set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
714}
715
716static inline void gve_clear_napi_enabled(struct gve_priv *priv)
717{
718 clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
719}
720
721static inline bool gve_get_report_stats(struct gve_priv *priv)
722{
723 return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
724}
725
726static inline void gve_clear_report_stats(struct gve_priv *priv)
727{
728 clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
729}
730
731
732
733static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
734 struct gve_notify_block *block)
735{
736 return &priv->db_bar2[be32_to_cpu(block->irq_db_index)];
737}
738
739
740
741static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
742{
743 return queue_idx;
744}
745
746
747
748static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
749{
750 return (priv->num_ntfy_blks / 2) + queue_idx;
751}
752
753
754
755static inline u32 gve_num_tx_qpls(struct gve_priv *priv)
756{
757 if (priv->queue_format != GVE_GQI_QPL_FORMAT)
758 return 0;
759
760 return priv->tx_cfg.num_queues;
761}
762
763
764
765static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
766{
767 if (priv->queue_format != GVE_GQI_QPL_FORMAT)
768 return 0;
769
770 return priv->rx_cfg.num_queues;
771}
772
773
774
775static inline
776struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv)
777{
778 int id = find_first_zero_bit(priv->qpl_cfg.qpl_id_map,
779 priv->qpl_cfg.qpl_map_size);
780
781
782 if (id >= gve_num_tx_qpls(priv))
783 return NULL;
784
785 set_bit(id, priv->qpl_cfg.qpl_id_map);
786 return &priv->qpls[id];
787}
788
789
790
791static inline
792struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv)
793{
794 int id = find_next_zero_bit(priv->qpl_cfg.qpl_id_map,
795 priv->qpl_cfg.qpl_map_size,
796 gve_num_tx_qpls(priv));
797
798
799 if (id == gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv))
800 return NULL;
801
802 set_bit(id, priv->qpl_cfg.qpl_id_map);
803 return &priv->qpls[id];
804}
805
806
807
808static inline void gve_unassign_qpl(struct gve_priv *priv, int id)
809{
810 clear_bit(id, priv->qpl_cfg.qpl_id_map);
811}
812
813
814
815static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
816 int id)
817{
818 if (id < gve_num_tx_qpls(priv))
819 return DMA_TO_DEVICE;
820 else
821 return DMA_FROM_DEVICE;
822}
823
824static inline bool gve_is_gqi(struct gve_priv *priv)
825{
826 return priv->queue_format == GVE_GQI_RDA_FORMAT ||
827 priv->queue_format == GVE_GQI_QPL_FORMAT;
828}
829
830
831int gve_alloc_page(struct gve_priv *priv, struct device *dev,
832 struct page **page, dma_addr_t *dma,
833 enum dma_data_direction);
834void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
835 enum dma_data_direction);
836
837netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
838bool gve_tx_poll(struct gve_notify_block *block, int budget);
839int gve_tx_alloc_rings(struct gve_priv *priv);
840void gve_tx_free_rings_gqi(struct gve_priv *priv);
841u32 gve_tx_load_event_counter(struct gve_priv *priv,
842 struct gve_tx_ring *tx);
843bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
844
845void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
846int gve_rx_poll(struct gve_notify_block *block, int budget);
847bool gve_rx_work_pending(struct gve_rx_ring *rx);
848int gve_rx_alloc_rings(struct gve_priv *priv);
849void gve_rx_free_rings_gqi(struct gve_priv *priv);
850
851void gve_schedule_reset(struct gve_priv *priv);
852int gve_reset(struct gve_priv *priv, bool attempt_teardown);
853int gve_adjust_queues(struct gve_priv *priv,
854 struct gve_queue_config new_rx_config,
855 struct gve_queue_config new_tx_config);
856
857void gve_handle_report_stats(struct gve_priv *priv);
858
859extern const struct ethtool_ops gve_ethtool_ops;
860
861extern const char gve_version_str[];
862#endif
863