1
2
3
4#ifndef _IGC_H_
5#define _IGC_H_
6
7#include <linux/kobject.h>
8#include <linux/pci.h>
9#include <linux/netdevice.h>
10#include <linux/vmalloc.h>
11#include <linux/ethtool.h>
12#include <linux/sctp.h>
13
14#include "igc_hw.h"
15
16
17void igc_set_ethtool_ops(struct net_device *);
18
19struct igc_adapter;
20struct igc_ring;
21
22void igc_up(struct igc_adapter *adapter);
23void igc_down(struct igc_adapter *adapter);
24int igc_setup_tx_resources(struct igc_ring *ring);
25int igc_setup_rx_resources(struct igc_ring *ring);
26void igc_free_tx_resources(struct igc_ring *ring);
27void igc_free_rx_resources(struct igc_ring *ring);
28unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter);
29void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
30 const u32 max_rss_queues);
31int igc_reinit_queues(struct igc_adapter *adapter);
32void igc_write_rss_indir_tbl(struct igc_adapter *adapter);
33bool igc_has_link(struct igc_adapter *adapter);
34void igc_reset(struct igc_adapter *adapter);
35int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx);
36int igc_add_mac_steering_filter(struct igc_adapter *adapter,
37 const u8 *addr, u8 queue, u8 flags);
38int igc_del_mac_steering_filter(struct igc_adapter *adapter,
39 const u8 *addr, u8 queue, u8 flags);
40void igc_update_stats(struct igc_adapter *adapter);
41
42extern char igc_driver_name[];
43extern char igc_driver_version[];
44
45#define IGC_REGS_LEN 740
46#define IGC_RETA_SIZE 128
47
48
49#define IGC_START_ITR 648
50#define IGC_FLAG_HAS_MSI BIT(0)
51#define IGC_FLAG_QUEUE_PAIRS BIT(3)
52#define IGC_FLAG_DMAC BIT(4)
53#define IGC_FLAG_NEED_LINK_UPDATE BIT(9)
54#define IGC_FLAG_MEDIA_RESET BIT(10)
55#define IGC_FLAG_MAS_ENABLE BIT(12)
56#define IGC_FLAG_HAS_MSIX BIT(13)
57#define IGC_FLAG_VLAN_PROMISC BIT(15)
58#define IGC_FLAG_RX_LEGACY BIT(16)
59
60#define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6)
61#define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7)
62
63#define IGC_MRQC_ENABLE_RSS_MQ 0x00000002
64#define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
65#define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
66
67#define IGC_START_ITR 648
68#define IGC_4K_ITR 980
69#define IGC_20K_ITR 196
70#define IGC_70K_ITR 56
71
72#define IGC_DEFAULT_ITR 3
73#define IGC_MAX_ITR_USECS 10000
74#define IGC_MIN_ITR_USECS 10
75#define NON_Q_VECTORS 1
76#define MAX_MSIX_ENTRIES 10
77
78
79#define IGC_DEFAULT_TXD 256
80#define IGC_DEFAULT_TX_WORK 128
81#define IGC_MIN_TXD 80
82#define IGC_MAX_TXD 4096
83
84#define IGC_DEFAULT_RXD 256
85#define IGC_MIN_RXD 80
86#define IGC_MAX_RXD 4096
87
88
89#define IGC_MAX_RX_QUEUES 4
90#define IGC_MAX_TX_QUEUES 4
91
92#define MAX_Q_VECTORS 8
93#define MAX_STD_JUMBO_FRAME_SIZE 9216
94
95
96#define IGC_RXBUFFER_256 256
97#define IGC_RXBUFFER_2048 2048
98#define IGC_RXBUFFER_3072 3072
99
100#define AUTO_ALL_MODES 0
101#define IGC_RX_HDR_LEN IGC_RXBUFFER_256
102
103
104
105
106
107
108
109
110
111
112
113
114#define IGC_RX_PTHRESH 8
115#define IGC_RX_HTHRESH 8
116#define IGC_TX_PTHRESH 8
117#define IGC_TX_HTHRESH 1
118#define IGC_RX_WTHRESH 4
119#define IGC_TX_WTHRESH 16
120
121#define IGC_RX_DMA_ATTR \
122 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
123
124#define IGC_TS_HDR_LEN 16
125
126#define IGC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
127
128#if (PAGE_SIZE < 8192)
129#define IGC_MAX_FRAME_BUILD_SKB \
130 (SKB_WITH_OVERHEAD(IGC_RXBUFFER_2048) - IGC_SKB_PAD - IGC_TS_HDR_LEN)
131#else
132#define IGC_MAX_FRAME_BUILD_SKB (IGC_RXBUFFER_2048 - IGC_TS_HDR_LEN)
133#endif
134
135
136#define IGC_RX_BUFFER_WRITE 16
137
138
139static inline __le32 igc_test_staterr(union igc_adv_rx_desc *rx_desc,
140 const u32 stat_err_bits)
141{
142 return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
143}
144
145enum igc_state_t {
146 __IGC_TESTING,
147 __IGC_RESETTING,
148 __IGC_DOWN,
149 __IGC_PTP_TX_IN_PROGRESS,
150};
151
152enum igc_tx_flags {
153
154 IGC_TX_FLAGS_VLAN = 0x01,
155 IGC_TX_FLAGS_TSO = 0x02,
156 IGC_TX_FLAGS_TSTAMP = 0x04,
157
158
159 IGC_TX_FLAGS_IPV4 = 0x10,
160 IGC_TX_FLAGS_CSUM = 0x20,
161};
162
163enum igc_boards {
164 board_base,
165};
166
167
168
169
170#define IGC_MAX_TXD_PWR 15
171#define IGC_MAX_DATA_PER_TXD BIT(IGC_MAX_TXD_PWR)
172
173
174#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGC_MAX_DATA_PER_TXD)
175#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
176
177
178
179
180struct igc_tx_buffer {
181 union igc_adv_tx_desc *next_to_watch;
182 unsigned long time_stamp;
183 struct sk_buff *skb;
184 unsigned int bytecount;
185 u16 gso_segs;
186 __be16 protocol;
187
188 DEFINE_DMA_UNMAP_ADDR(dma);
189 DEFINE_DMA_UNMAP_LEN(len);
190 u32 tx_flags;
191};
192
193struct igc_rx_buffer {
194 dma_addr_t dma;
195 struct page *page;
196#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
197 __u32 page_offset;
198#else
199 __u16 page_offset;
200#endif
201 __u16 pagecnt_bias;
202};
203
204struct igc_tx_queue_stats {
205 u64 packets;
206 u64 bytes;
207 u64 restart_queue;
208 u64 restart_queue2;
209};
210
211struct igc_rx_queue_stats {
212 u64 packets;
213 u64 bytes;
214 u64 drops;
215 u64 csum_err;
216 u64 alloc_failed;
217};
218
219struct igc_rx_packet_stats {
220 u64 ipv4_packets;
221 u64 ipv4e_packets;
222 u64 ipv6_packets;
223 u64 ipv6e_packets;
224 u64 tcp_packets;
225 u64 udp_packets;
226 u64 sctp_packets;
227 u64 nfs_packets;
228 u64 other_packets;
229};
230
231struct igc_ring_container {
232 struct igc_ring *ring;
233 unsigned int total_bytes;
234 unsigned int total_packets;
235 u16 work_limit;
236 u8 count;
237 u8 itr;
238};
239
240struct igc_ring {
241 struct igc_q_vector *q_vector;
242 struct net_device *netdev;
243 struct device *dev;
244 union {
245 struct igc_tx_buffer *tx_buffer_info;
246 struct igc_rx_buffer *rx_buffer_info;
247 };
248 void *desc;
249 unsigned long flags;
250 void __iomem *tail;
251 dma_addr_t dma;
252 unsigned int size;
253
254 u16 count;
255 u8 queue_index;
256 u8 reg_idx;
257
258
259 u16 next_to_clean;
260 u16 next_to_use;
261 u16 next_to_alloc;
262
263 union {
264
265 struct {
266 struct igc_tx_queue_stats tx_stats;
267 struct u64_stats_sync tx_syncp;
268 struct u64_stats_sync tx_syncp2;
269 };
270
271 struct {
272 struct igc_rx_queue_stats rx_stats;
273 struct igc_rx_packet_stats pkt_stats;
274 struct u64_stats_sync rx_syncp;
275 struct sk_buff *skb;
276 };
277 };
278} ____cacheline_internodealigned_in_smp;
279
280struct igc_q_vector {
281 struct igc_adapter *adapter;
282 void __iomem *itr_register;
283 u32 eims_value;
284
285 u16 itr_val;
286 u8 set_itr;
287
288 struct igc_ring_container rx, tx;
289
290 struct napi_struct napi;
291
292 struct rcu_head rcu;
293 char name[IFNAMSIZ + 9];
294 struct net_device poll_dev;
295
296
297 struct igc_ring ring[0] ____cacheline_internodealigned_in_smp;
298};
299
300#define MAX_ETYPE_FILTER (4 - 1)
301
302enum igc_filter_match_flags {
303 IGC_FILTER_FLAG_ETHER_TYPE = 0x1,
304 IGC_FILTER_FLAG_VLAN_TCI = 0x2,
305 IGC_FILTER_FLAG_SRC_MAC_ADDR = 0x4,
306 IGC_FILTER_FLAG_DST_MAC_ADDR = 0x8,
307};
308
309
310struct igc_nfc_input {
311
312
313
314
315
316 u8 match_flags;
317 __be16 etype;
318 __be16 vlan_tci;
319 u8 src_addr[ETH_ALEN];
320 u8 dst_addr[ETH_ALEN];
321};
322
323struct igc_nfc_filter {
324 struct hlist_node nfc_node;
325 struct igc_nfc_input filter;
326 unsigned long cookie;
327 u16 etype_reg_index;
328 u16 sw_idx;
329 u16 action;
330};
331
332struct igc_mac_addr {
333 u8 addr[ETH_ALEN];
334 u8 queue;
335 u8 state;
336};
337
338#define IGC_MAC_STATE_DEFAULT 0x1
339#define IGC_MAC_STATE_IN_USE 0x2
340#define IGC_MAC_STATE_SRC_ADDR 0x4
341#define IGC_MAC_STATE_QUEUE_STEERING 0x8
342
343#define IGC_MAX_RXNFC_FILTERS 16
344
345
346struct igc_adapter {
347 struct net_device *netdev;
348
349 unsigned long state;
350 unsigned int flags;
351 unsigned int num_q_vectors;
352
353 struct msix_entry *msix_entries;
354
355
356 u16 tx_work_limit;
357 u32 tx_timeout_count;
358 int num_tx_queues;
359 struct igc_ring *tx_ring[IGC_MAX_TX_QUEUES];
360
361
362 int num_rx_queues;
363 struct igc_ring *rx_ring[IGC_MAX_RX_QUEUES];
364
365 struct timer_list watchdog_timer;
366 struct timer_list dma_err_timer;
367 struct timer_list phy_info_timer;
368
369 u16 link_speed;
370 u16 link_duplex;
371
372 u8 port_num;
373
374 u8 __iomem *io_addr;
375
376 u32 rx_itr_setting;
377 u32 tx_itr_setting;
378
379 struct work_struct reset_task;
380 struct work_struct watchdog_task;
381 struct work_struct dma_err_task;
382 bool fc_autoneg;
383
384 u8 tx_timeout_factor;
385
386 int msg_enable;
387 u32 max_frame_size;
388 u32 min_frame_size;
389
390
391 struct pci_dev *pdev;
392
393 spinlock_t stats64_lock;
394 struct rtnl_link_stats64 stats64;
395
396
397 struct igc_hw hw;
398 struct igc_hw_stats stats;
399
400 struct igc_q_vector *q_vector[MAX_Q_VECTORS];
401 u32 eims_enable_mask;
402 u32 eims_other;
403
404 u16 tx_ring_count;
405 u16 rx_ring_count;
406
407 u32 tx_hwtstamp_timeouts;
408 u32 tx_hwtstamp_skipped;
409 u32 rx_hwtstamp_cleared;
410 u32 *shadow_vfta;
411
412 u32 rss_queues;
413 u32 rss_indir_tbl_init;
414
415
416 struct hlist_head nfc_filter_list;
417 struct hlist_head cls_flower_list;
418 unsigned int nfc_filter_count;
419
420
421 spinlock_t nfc_lock;
422 bool etype_bitmap[MAX_ETYPE_FILTER];
423
424 struct igc_mac_addr *mac_table;
425
426 u8 rss_indir_tbl[IGC_RETA_SIZE];
427
428 unsigned long link_check_timeout;
429 struct igc_info ei;
430};
431
432
433static inline u16 igc_desc_unused(const struct igc_ring *ring)
434{
435 u16 ntc = ring->next_to_clean;
436 u16 ntu = ring->next_to_use;
437
438 return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
439}
440
441static inline s32 igc_get_phy_info(struct igc_hw *hw)
442{
443 if (hw->phy.ops.get_phy_info)
444 return hw->phy.ops.get_phy_info(hw);
445
446 return 0;
447}
448
449static inline s32 igc_reset_phy(struct igc_hw *hw)
450{
451 if (hw->phy.ops.reset)
452 return hw->phy.ops.reset(hw);
453
454 return 0;
455}
456
457static inline struct netdev_queue *txring_txq(const struct igc_ring *tx_ring)
458{
459 return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
460}
461
462enum igc_ring_flags_t {
463 IGC_RING_FLAG_RX_3K_BUFFER,
464 IGC_RING_FLAG_RX_BUILD_SKB_ENABLED,
465 IGC_RING_FLAG_RX_SCTP_CSUM,
466 IGC_RING_FLAG_RX_LB_VLAN_BSWAP,
467 IGC_RING_FLAG_TX_CTX_IDX,
468 IGC_RING_FLAG_TX_DETECT_HANG
469};
470
471#define ring_uses_large_buffer(ring) \
472 test_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
473
474#define ring_uses_build_skb(ring) \
475 test_bit(IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
476
477static inline unsigned int igc_rx_bufsz(struct igc_ring *ring)
478{
479#if (PAGE_SIZE < 8192)
480 if (ring_uses_large_buffer(ring))
481 return IGC_RXBUFFER_3072;
482
483 if (ring_uses_build_skb(ring))
484 return IGC_MAX_FRAME_BUILD_SKB + IGC_TS_HDR_LEN;
485#endif
486 return IGC_RXBUFFER_2048;
487}
488
489static inline unsigned int igc_rx_pg_order(struct igc_ring *ring)
490{
491#if (PAGE_SIZE < 8192)
492 if (ring_uses_large_buffer(ring))
493 return 1;
494#endif
495 return 0;
496}
497
498static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data)
499{
500 if (hw->phy.ops.read_reg)
501 return hw->phy.ops.read_reg(hw, offset, data);
502
503 return 0;
504}
505
506
507void igc_reinit_locked(struct igc_adapter *);
508int igc_add_filter(struct igc_adapter *adapter,
509 struct igc_nfc_filter *input);
510int igc_erase_filter(struct igc_adapter *adapter,
511 struct igc_nfc_filter *input);
512
513#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring))
514
515#define IGC_TXD_DCMD (IGC_ADVTXD_DCMD_EOP | IGC_ADVTXD_DCMD_RS)
516
517#define IGC_RX_DESC(R, i) \
518 (&(((union igc_adv_rx_desc *)((R)->desc))[i]))
519#define IGC_TX_DESC(R, i) \
520 (&(((union igc_adv_tx_desc *)((R)->desc))[i]))
521#define IGC_TX_CTXTDESC(R, i) \
522 (&(((struct igc_adv_tx_context_desc *)((R)->desc))[i]))
523
524#endif
525