1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#ifndef _IXGBE_H_
29#define _IXGBE_H_
30
31#include <linux/bitops.h>
32#include <linux/types.h>
33#include <linux/pci.h>
34#include <linux/netdevice.h>
35#include <linux/cpumask.h>
36#include <linux/aer.h>
37#include <linux/if_vlan.h>
38#include <linux/jiffies.h>
39
40#include <linux/clocksource.h>
41#include <linux/net_tstamp.h>
42#include <linux/ptp_clock_kernel.h>
43
44#include "ixgbe_type.h"
45#include "ixgbe_common.h"
46#include "ixgbe_dcb.h"
47#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
48#define IXGBE_FCOE
49#include "ixgbe_fcoe.h"
50#endif
51#ifdef CONFIG_IXGBE_DCA
52#include <linux/dca.h>
53#endif
54
55#include <net/busy_poll.h>
56
57#ifdef CONFIG_NET_RX_BUSY_POLL
58#define LL_EXTENDED_STATS
59#endif
60
61#undef pr_fmt
62#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
63
64
65#define IXGBE_DEFAULT_TXD 512
66#define IXGBE_DEFAULT_TX_WORK 256
67#define IXGBE_MAX_TXD 4096
68#define IXGBE_MIN_TXD 64
69
70#define IXGBE_DEFAULT_RXD 512
71#define IXGBE_MAX_RXD 4096
72#define IXGBE_MIN_RXD 64
73
74
75#define IXGBE_MIN_FCRTL 0x40
76#define IXGBE_MAX_FCRTL 0x7FF80
77#define IXGBE_MIN_FCRTH 0x600
78#define IXGBE_MAX_FCRTH 0x7FFF0
79#define IXGBE_DEFAULT_FCPAUSE 0xFFFF
80#define IXGBE_MIN_FCPAUSE 0
81#define IXGBE_MAX_FCPAUSE 0xFFFF
82
83
84#define IXGBE_RXBUFFER_256 256
85#define IXGBE_RXBUFFER_2K 2048
86#define IXGBE_RXBUFFER_3K 3072
87#define IXGBE_RXBUFFER_4K 4096
88#define IXGBE_MAX_RXBUFFER 16384
89
90
91
92
93
94
95
96
97
98#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256
99
100
101#define IXGBE_RX_BUFFER_WRITE 16
102
103enum ixgbe_tx_flags {
104
105 IXGBE_TX_FLAGS_HW_VLAN = 0x01,
106 IXGBE_TX_FLAGS_TSO = 0x02,
107 IXGBE_TX_FLAGS_TSTAMP = 0x04,
108
109
110 IXGBE_TX_FLAGS_CC = 0x08,
111 IXGBE_TX_FLAGS_IPV4 = 0x10,
112 IXGBE_TX_FLAGS_CSUM = 0x20,
113
114
115 IXGBE_TX_FLAGS_SW_VLAN = 0x40,
116 IXGBE_TX_FLAGS_FCOE = 0x80,
117};
118
119
120#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
121#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
122#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29
123#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
124
125#define IXGBE_MAX_VF_MC_ENTRIES 30
126#define IXGBE_MAX_VF_FUNCTIONS 64
127#define IXGBE_MAX_VFTA_ENTRIES 128
128#define MAX_EMULATION_MAC_ADDRS 16
129#define IXGBE_MAX_PF_MACVLANS 15
130#define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset)
131#define IXGBE_82599_VF_DEVICE_ID 0x10ED
132#define IXGBE_X540_VF_DEVICE_ID 0x1515
133
134struct vf_data_storage {
135 unsigned char vf_mac_addresses[ETH_ALEN];
136 u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
137 u16 num_vf_mc_hashes;
138 u16 default_vf_vlan_id;
139 u16 vlans_enabled;
140 bool clear_to_send;
141 bool pf_set_mac;
142 u16 pf_vlan;
143 u16 pf_qos;
144 u16 tx_rate;
145 u16 vlan_count;
146 u8 spoofchk_enabled;
147 unsigned int vf_api;
148};
149
150struct vf_macvlans {
151 struct list_head l;
152 int vf;
153 int rar_entry;
154 bool free;
155 bool is_macvlan;
156 u8 vf_macvlan[ETH_ALEN];
157};
158
159#define IXGBE_MAX_TXD_PWR 14
160#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
161
162
163#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
164#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
165
166
167
168struct ixgbe_tx_buffer {
169 union ixgbe_adv_tx_desc *next_to_watch;
170 unsigned long time_stamp;
171 struct sk_buff *skb;
172 unsigned int bytecount;
173 unsigned short gso_segs;
174 __be16 protocol;
175 DEFINE_DMA_UNMAP_ADDR(dma);
176 DEFINE_DMA_UNMAP_LEN(len);
177 u32 tx_flags;
178};
179
180struct ixgbe_rx_buffer {
181 struct sk_buff *skb;
182 dma_addr_t dma;
183 struct page *page;
184 unsigned int page_offset;
185};
186
187struct ixgbe_queue_stats {
188 u64 packets;
189 u64 bytes;
190#ifdef LL_EXTENDED_STATS
191 u64 yields;
192 u64 misses;
193 u64 cleaned;
194#endif
195};
196
197struct ixgbe_tx_queue_stats {
198 u64 restart_queue;
199 u64 tx_busy;
200 u64 tx_done_old;
201};
202
203struct ixgbe_rx_queue_stats {
204 u64 rsc_count;
205 u64 rsc_flush;
206 u64 non_eop_descs;
207 u64 alloc_rx_page_failed;
208 u64 alloc_rx_buff_failed;
209 u64 csum_err;
210};
211
212enum ixgbe_ring_state_t {
213 __IXGBE_TX_FDIR_INIT_DONE,
214 __IXGBE_TX_XPS_INIT_DONE,
215 __IXGBE_TX_DETECT_HANG,
216 __IXGBE_HANG_CHECK_ARMED,
217 __IXGBE_RX_RSC_ENABLED,
218 __IXGBE_RX_CSUM_UDP_ZERO_ERR,
219 __IXGBE_RX_FCOE,
220};
221
222#define check_for_tx_hang(ring) \
223 test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
224#define set_check_for_tx_hang(ring) \
225 set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
226#define clear_check_for_tx_hang(ring) \
227 clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
228#define ring_is_rsc_enabled(ring) \
229 test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
230#define set_ring_rsc_enabled(ring) \
231 set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
232#define clear_ring_rsc_enabled(ring) \
233 clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
234struct ixgbe_ring {
235 struct ixgbe_ring *next;
236 struct ixgbe_q_vector *q_vector;
237 struct net_device *netdev;
238 struct device *dev;
239 void *desc;
240 union {
241 struct ixgbe_tx_buffer *tx_buffer_info;
242 struct ixgbe_rx_buffer *rx_buffer_info;
243 };
244 unsigned long last_rx_timestamp;
245 unsigned long state;
246 u8 __iomem *tail;
247 dma_addr_t dma;
248 unsigned int size;
249
250 u16 count;
251
252 u8 queue_index;
253 u8 reg_idx;
254
255
256
257
258 u16 next_to_use;
259 u16 next_to_clean;
260
261 union {
262 u16 next_to_alloc;
263 struct {
264 u8 atr_sample_rate;
265 u8 atr_count;
266 };
267 };
268
269 u8 dcb_tc;
270 struct ixgbe_queue_stats stats;
271 struct u64_stats_sync syncp;
272 union {
273 struct ixgbe_tx_queue_stats tx_stats;
274 struct ixgbe_rx_queue_stats rx_stats;
275 };
276} ____cacheline_internodealigned_in_smp;
277
278enum ixgbe_ring_f_enum {
279 RING_F_NONE = 0,
280 RING_F_VMDQ,
281 RING_F_RSS,
282 RING_F_FDIR,
283#ifdef IXGBE_FCOE
284 RING_F_FCOE,
285#endif
286
287 RING_F_ARRAY_SIZE
288};
289
290#define IXGBE_MAX_RSS_INDICES 16
291#define IXGBE_MAX_VMDQ_INDICES 64
292#define IXGBE_MAX_FDIR_INDICES 63
293#define IXGBE_MAX_FCOE_INDICES 8
294#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
295#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
296struct ixgbe_ring_feature {
297 u16 limit;
298 u16 indices;
299 u16 mask;
300 u16 offset;
301} ____cacheline_internodealigned_in_smp;
302
303#define IXGBE_82599_VMDQ_8Q_MASK 0x78
304#define IXGBE_82599_VMDQ_4Q_MASK 0x7C
305#define IXGBE_82599_VMDQ_2Q_MASK 0x7E
306
307
308
309
310
311
312static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring)
313{
314#ifdef IXGBE_FCOE
315 if (test_bit(__IXGBE_RX_FCOE, &ring->state))
316 return (PAGE_SIZE < 8192) ? IXGBE_RXBUFFER_4K :
317 IXGBE_RXBUFFER_3K;
318#endif
319 return IXGBE_RXBUFFER_2K;
320}
321
322static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
323{
324#ifdef IXGBE_FCOE
325 if (test_bit(__IXGBE_RX_FCOE, &ring->state))
326 return (PAGE_SIZE < 8192) ? 1 : 0;
327#endif
328 return 0;
329}
330#define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))
331
332struct ixgbe_ring_container {
333 struct ixgbe_ring *ring;
334 unsigned int total_bytes;
335 unsigned int total_packets;
336 u16 work_limit;
337 u8 count;
338 u8 itr;
339};
340
341
342#define ixgbe_for_each_ring(pos, head) \
343 for (pos = (head).ring; pos != NULL; pos = pos->next)
344
345#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
346 ? 8 : 1)
347#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
348
349
350
351
352struct ixgbe_q_vector {
353 struct ixgbe_adapter *adapter;
354#ifdef CONFIG_IXGBE_DCA
355 int cpu;
356#endif
357 u16 v_idx;
358
359
360 u16 itr;
361 struct ixgbe_ring_container rx, tx;
362
363 struct napi_struct napi;
364 cpumask_t affinity_mask;
365 int numa_node;
366 struct rcu_head rcu;
367 char name[IFNAMSIZ + 9];
368
369#ifdef CONFIG_NET_RX_BUSY_POLL
370 unsigned int state;
371#define IXGBE_QV_STATE_IDLE 0
372#define IXGBE_QV_STATE_NAPI 1
373#define IXGBE_QV_STATE_POLL 2
374#define IXGBE_QV_LOCKED (IXGBE_QV_STATE_NAPI | IXGBE_QV_STATE_POLL)
375#define IXGBE_QV_STATE_NAPI_YIELD 4
376#define IXGBE_QV_STATE_POLL_YIELD 8
377#define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD)
378#define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD)
379 spinlock_t lock;
380#endif
381
382
383 struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
384};
385#ifdef CONFIG_NET_RX_BUSY_POLL
386static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
387{
388
389 spin_lock_init(&q_vector->lock);
390 q_vector->state = IXGBE_QV_STATE_IDLE;
391}
392
393
394static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
395{
396 int rc = true;
397 spin_lock(&q_vector->lock);
398 if (q_vector->state & IXGBE_QV_LOCKED) {
399 WARN_ON(q_vector->state & IXGBE_QV_STATE_NAPI);
400 q_vector->state |= IXGBE_QV_STATE_NAPI_YIELD;
401 rc = false;
402#ifdef LL_EXTENDED_STATS
403 q_vector->tx.ring->stats.yields++;
404#endif
405 } else
406
407 q_vector->state = IXGBE_QV_STATE_NAPI;
408 spin_unlock(&q_vector->lock);
409 return rc;
410}
411
412
413static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector)
414{
415 int rc = false;
416 spin_lock(&q_vector->lock);
417 WARN_ON(q_vector->state & (IXGBE_QV_STATE_POLL |
418 IXGBE_QV_STATE_NAPI_YIELD));
419
420 if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD)
421 rc = true;
422 q_vector->state = IXGBE_QV_STATE_IDLE;
423 spin_unlock(&q_vector->lock);
424 return rc;
425}
426
427
428static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
429{
430 int rc = true;
431 spin_lock_bh(&q_vector->lock);
432 if ((q_vector->state & IXGBE_QV_LOCKED)) {
433 q_vector->state |= IXGBE_QV_STATE_POLL_YIELD;
434 rc = false;
435#ifdef LL_EXTENDED_STATS
436 q_vector->rx.ring->stats.yields++;
437#endif
438 } else
439
440 q_vector->state |= IXGBE_QV_STATE_POLL;
441 spin_unlock_bh(&q_vector->lock);
442 return rc;
443}
444
445
446static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
447{
448 int rc = false;
449 spin_lock_bh(&q_vector->lock);
450 WARN_ON(q_vector->state & (IXGBE_QV_STATE_NAPI));
451
452 if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD)
453 rc = true;
454 q_vector->state = IXGBE_QV_STATE_IDLE;
455 spin_unlock_bh(&q_vector->lock);
456 return rc;
457}
458
459
460static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
461{
462 WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED));
463 return q_vector->state & IXGBE_QV_USER_PEND;
464}
465#else
466static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
467{
468}
469
470static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
471{
472 return true;
473}
474
475static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector)
476{
477 return false;
478}
479
480static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
481{
482 return false;
483}
484
485static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
486{
487 return false;
488}
489
490static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
491{
492 return false;
493}
494#endif
495
496#ifdef CONFIG_IXGBE_HWMON
497
498#define IXGBE_HWMON_TYPE_LOC 0
499#define IXGBE_HWMON_TYPE_TEMP 1
500#define IXGBE_HWMON_TYPE_CAUTION 2
501#define IXGBE_HWMON_TYPE_MAX 3
502
503struct hwmon_attr {
504 struct device_attribute dev_attr;
505 struct ixgbe_hw *hw;
506 struct ixgbe_thermal_diode_data *sensor;
507 char name[12];
508};
509
510struct hwmon_buff {
511 struct device *device;
512 struct hwmon_attr *hwmon_list;
513 unsigned int n_hwmon;
514};
515#endif
516
517
518
519
520
521#define IXGBE_MIN_RSC_ITR 24
522#define IXGBE_100K_ITR 40
523#define IXGBE_20K_ITR 200
524#define IXGBE_10K_ITR 400
525#define IXGBE_8K_ITR 500
526
527
528static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc,
529 const u32 stat_err_bits)
530{
531 return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
532}
533
534static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
535{
536 u16 ntc = ring->next_to_clean;
537 u16 ntu = ring->next_to_use;
538
539 return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
540}
541
542#define IXGBE_RX_DESC(R, i) \
543 (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
544#define IXGBE_TX_DESC(R, i) \
545 (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
546#define IXGBE_TX_CTXTDESC(R, i) \
547 (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
548
549#define IXGBE_MAX_JUMBO_FRAME_SIZE 9728
550#ifdef IXGBE_FCOE
551
552#define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072
553#endif
554
555#define OTHER_VECTOR 1
556#define NON_Q_VECTORS (OTHER_VECTOR)
557
558#define MAX_MSIX_VECTORS_82599 64
559#define MAX_Q_VECTORS_82599 64
560#define MAX_MSIX_VECTORS_82598 18
561#define MAX_Q_VECTORS_82598 16
562
563#define MAX_Q_VECTORS MAX_Q_VECTORS_82599
564#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
565
566#define MIN_MSIX_Q_VECTORS 1
567#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
568
569
570#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
571
572
573struct ixgbe_adapter {
574 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
575
576 struct net_device *netdev;
577 struct pci_dev *pdev;
578
579 unsigned long state;
580
581
582
583
584 u32 flags;
585#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 0)
586#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1)
587#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 2)
588#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 3)
589#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 4)
590#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 5)
591#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 6)
592#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 7)
593#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 8)
594#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 9)
595#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 10)
596#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 11)
597#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 12)
598#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 13)
599#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 14)
600#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 15)
601#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 16)
602#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 17)
603#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 18)
604#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 19)
605#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 20)
606#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 21)
607#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 22)
608#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 23)
609
610 u32 flags2;
611#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1 << 0)
612#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1)
613#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2)
614#define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 3)
615#define IXGBE_FLAG2_SEARCH_FOR_SFP (u32)(1 << 4)
616#define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 5)
617#define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 6)
618#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7)
619#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8)
620#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9)
621#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10)
622#define IXGBE_FLAG2_BRIDGE_MODE_VEB (u32)(1 << 11)
623
624
625 int num_tx_queues;
626 u16 tx_itr_setting;
627 u16 tx_work_limit;
628
629
630 int num_rx_queues;
631 u16 rx_itr_setting;
632
633
634 struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
635
636 u64 restart_queue;
637 u64 lsc_int;
638 u32 tx_timeout_count;
639
640
641 struct ixgbe_ring *rx_ring[MAX_RX_QUEUES];
642 int num_rx_pools;
643 int num_rx_queues_per_pool;
644 u64 hw_csum_rx_error;
645 u64 hw_rx_no_dma_resources;
646 u64 rsc_total_count;
647 u64 rsc_total_flush;
648 u64 non_eop_descs;
649 u32 alloc_rx_page_failed;
650 u32 alloc_rx_buff_failed;
651
652 struct ixgbe_q_vector *q_vector[MAX_Q_VECTORS];
653
654
655 struct ieee_pfc *ixgbe_ieee_pfc;
656 struct ieee_ets *ixgbe_ieee_ets;
657 struct ixgbe_dcb_config dcb_cfg;
658 struct ixgbe_dcb_config temp_dcb_cfg;
659 u8 dcb_set_bitmap;
660 u8 dcbx_cap;
661 enum ixgbe_fc_mode last_lfc_mode;
662
663 int num_q_vectors;
664 int max_q_vectors;
665 struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
666 struct msix_entry *msix_entries;
667
668 u32 test_icr;
669 struct ixgbe_ring test_tx_ring;
670 struct ixgbe_ring test_rx_ring;
671
672
673 struct ixgbe_hw hw;
674 u16 msg_enable;
675 struct ixgbe_hw_stats stats;
676
677 u64 tx_busy;
678 unsigned int tx_ring_count;
679 unsigned int rx_ring_count;
680
681 u32 link_speed;
682 bool link_up;
683 unsigned long link_check_timeout;
684
685 struct timer_list service_timer;
686 struct work_struct service_task;
687
688 struct hlist_head fdir_filter_list;
689 unsigned long fdir_overflow;
690 union ixgbe_atr_input fdir_mask;
691 int fdir_filter_count;
692 u32 fdir_pballoc;
693 u32 atr_sample_rate;
694 spinlock_t fdir_perfect_lock;
695
696#ifdef IXGBE_FCOE
697 struct ixgbe_fcoe fcoe;
698#endif
699 u32 wol;
700
701 u16 bd_number;
702
703 u16 eeprom_verh;
704 u16 eeprom_verl;
705 u16 eeprom_cap;
706
707 u32 interrupt_event;
708 u32 led_reg;
709
710 struct ptp_clock *ptp_clock;
711 struct ptp_clock_info ptp_caps;
712 struct work_struct ptp_tx_work;
713 struct sk_buff *ptp_tx_skb;
714 unsigned long ptp_tx_start;
715 unsigned long last_overflow_check;
716 unsigned long last_rx_ptp_check;
717 spinlock_t tmreg_lock;
718 struct cyclecounter cc;
719 struct timecounter tc;
720 u32 base_incval;
721
722
723 DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
724 unsigned int num_vfs;
725 struct vf_data_storage *vfinfo;
726 int vf_rate_link_speed;
727 struct vf_macvlans vf_mvs;
728 struct vf_macvlans *mv_list;
729
730 u32 timer_event_accumulator;
731 u32 vferr_refcount;
732 struct kobject *info_kobj;
733#ifdef CONFIG_IXGBE_HWMON
734 struct hwmon_buff ixgbe_hwmon_buff;
735#endif
736#ifdef CONFIG_DEBUG_FS
737 struct dentry *ixgbe_dbg_adapter;
738#endif
739
740 u8 default_up;
741};
742
743struct ixgbe_fdir_filter {
744 struct hlist_node fdir_node;
745 union ixgbe_atr_input filter;
746 u16 sw_idx;
747 u16 action;
748};
749
750enum ixgbe_state_t {
751 __IXGBE_TESTING,
752 __IXGBE_RESETTING,
753 __IXGBE_DOWN,
754 __IXGBE_SERVICE_SCHED,
755 __IXGBE_IN_SFP_INIT,
756 __IXGBE_PTP_RUNNING,
757};
758
759struct ixgbe_cb {
760 union {
761 struct sk_buff *head;
762 struct sk_buff *tail;
763 };
764 dma_addr_t dma;
765 u16 append_cnt;
766 bool page_released;
767};
768#define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb)
769
770enum ixgbe_boards {
771 board_82598,
772 board_82599,
773 board_X540,
774};
775
776extern struct ixgbe_info ixgbe_82598_info;
777extern struct ixgbe_info ixgbe_82599_info;
778extern struct ixgbe_info ixgbe_X540_info;
779#ifdef CONFIG_IXGBE_DCB
780extern const struct dcbnl_rtnl_ops dcbnl_ops;
781#endif
782
783extern char ixgbe_driver_name[];
784extern const char ixgbe_driver_version[];
785#ifdef IXGBE_FCOE
786extern char ixgbe_default_device_descr[];
787#endif
788
789extern void ixgbe_up(struct ixgbe_adapter *adapter);
790extern void ixgbe_down(struct ixgbe_adapter *adapter);
791extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
792extern void ixgbe_reset(struct ixgbe_adapter *adapter);
793extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
794extern int ixgbe_setup_rx_resources(struct ixgbe_ring *);
795extern int ixgbe_setup_tx_resources(struct ixgbe_ring *);
796extern void ixgbe_free_rx_resources(struct ixgbe_ring *);
797extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
798extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
799extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
800extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
801 struct ixgbe_ring *);
802extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
803extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
804extern int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
805 u16 subdevice_id);
806extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
807extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *,
808 struct ixgbe_adapter *,
809 struct ixgbe_ring *);
810extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
811 struct ixgbe_tx_buffer *);
812extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
813extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
814extern int ixgbe_poll(struct napi_struct *napi, int budget);
815extern int ethtool_ioctl(struct ifreq *ifr);
816extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
817extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
818extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
819extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
820 union ixgbe_atr_hash_dword input,
821 union ixgbe_atr_hash_dword common,
822 u8 queue);
823extern s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
824 union ixgbe_atr_input *input_mask);
825extern s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
826 union ixgbe_atr_input *input,
827 u16 soft_id, u8 queue);
828extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
829 union ixgbe_atr_input *input,
830 u16 soft_id);
831extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
832 union ixgbe_atr_input *mask);
833extern bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
834extern void ixgbe_set_rx_mode(struct net_device *netdev);
835#ifdef CONFIG_IXGBE_DCB
836extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
837#endif
838extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
839extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
840extern void ixgbe_do_reset(struct net_device *netdev);
841#ifdef CONFIG_IXGBE_HWMON
842extern void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
843extern int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
844#endif
845#ifdef IXGBE_FCOE
846extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
847extern int ixgbe_fso(struct ixgbe_ring *tx_ring,
848 struct ixgbe_tx_buffer *first,
849 u8 *hdr_len);
850extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
851 union ixgbe_adv_rx_desc *rx_desc,
852 struct sk_buff *skb);
853extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
854 struct scatterlist *sgl, unsigned int sgc);
855extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
856 struct scatterlist *sgl, unsigned int sgc);
857extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
858extern int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
859extern void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
860extern int ixgbe_fcoe_enable(struct net_device *netdev);
861extern int ixgbe_fcoe_disable(struct net_device *netdev);
862#ifdef CONFIG_IXGBE_DCB
863extern u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
864extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
865#endif
866extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
867extern int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
868 struct netdev_fcoe_hbainfo *info);
869extern u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
870#endif
871#ifdef CONFIG_DEBUG_FS
872extern void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
873extern void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
874extern void ixgbe_dbg_init(void);
875extern void ixgbe_dbg_exit(void);
876#else
877static inline void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) {}
878static inline void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) {}
879static inline void ixgbe_dbg_init(void) {}
880static inline void ixgbe_dbg_exit(void) {}
881#endif
882static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
883{
884 return netdev_get_tx_queue(ring->netdev, ring->queue_index);
885}
886
887extern void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
888extern void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
889extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
890extern void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
891extern void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
892 struct sk_buff *skb);
893static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
894 union ixgbe_adv_rx_desc *rx_desc,
895 struct sk_buff *skb)
896{
897 if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
898 return;
899
900 __ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb);
901
902
903
904
905
906 rx_ring->last_rx_timestamp = jiffies;
907}
908
909extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
910 struct ifreq *ifr, int cmd);
911extern void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
912extern void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
913extern void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
914#ifdef CONFIG_PCI_IOV
915void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter);
916#endif
917
918#endif
919