1
2
3
4
5
6
7
8
9
10
11
12
13#ifndef EFX_NET_DRIVER_H
14#define EFX_NET_DRIVER_H
15
16#if defined(EFX_ENABLE_DEBUG) && !defined(DEBUG)
17#define DEBUG
18#endif
19
20#include <linux/netdevice.h>
21#include <linux/etherdevice.h>
22#include <linux/ethtool.h>
23#include <linux/if_vlan.h>
24#include <linux/timer.h>
25#include <linux/mdio.h>
26#include <linux/list.h>
27#include <linux/pci.h>
28#include <linux/device.h>
29#include <linux/highmem.h>
30#include <linux/workqueue.h>
31#include <linux/vmalloc.h>
32#include <linux/i2c.h>
33
34#include "enum.h"
35#include "bitfield.h"
36
37
38
39
40
41
42
43#define EFX_DRIVER_VERSION "3.1"
44
45#ifdef EFX_ENABLE_DEBUG
46#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
47#define EFX_WARN_ON_PARANOID(x) WARN_ON(x)
48#else
49#define EFX_BUG_ON_PARANOID(x) do {} while (0)
50#define EFX_WARN_ON_PARANOID(x) do {} while (0)
51#endif
52
53
54
55
56
57
58
59#define EFX_MAX_CHANNELS 32
60#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
61
62
63
64
65#define EFX_MAX_TX_TC 2
66#define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS)
67#define EFX_TXQ_TYPE_OFFLOAD 1
68#define EFX_TXQ_TYPE_HIGHPRI 2
69#define EFX_TXQ_TYPES 4
70#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
71
72
73
74
75
76
77
78
79
80
81
82
83
84struct efx_special_buffer {
85 void *addr;
86 dma_addr_t dma_addr;
87 unsigned int len;
88 int index;
89 int entries;
90};
91
92enum efx_flush_state {
93 FLUSH_NONE,
94 FLUSH_PENDING,
95 FLUSH_FAILED,
96 FLUSH_DONE,
97};
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114struct efx_tx_buffer {
115 const struct sk_buff *skb;
116 struct efx_tso_header *tsoh;
117 dma_addr_t dma_addr;
118 unsigned short len;
119 bool continuation;
120 bool unmap_single;
121 unsigned short unmap_len;
122};
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178struct efx_tx_queue {
179
180 struct efx_nic *efx ____cacheline_aligned_in_smp;
181 unsigned queue;
182 struct efx_channel *channel;
183 struct netdev_queue *core_txq;
184 struct efx_tx_buffer *buffer;
185 struct efx_special_buffer txd;
186 unsigned int ptr_mask;
187 bool initialised;
188 enum efx_flush_state flushed;
189
190
191 unsigned int read_count ____cacheline_aligned_in_smp;
192 unsigned int old_write_count;
193
194
195 unsigned int insert_count ____cacheline_aligned_in_smp;
196 unsigned int write_count;
197 unsigned int old_read_count;
198 struct efx_tso_header *tso_headers_free;
199 unsigned int tso_bursts;
200 unsigned int tso_long_headers;
201 unsigned int tso_packets;
202 unsigned int pushes;
203
204
205 unsigned int empty_read_count ____cacheline_aligned_in_smp;
206#define EFX_EMPTY_COUNT_VALID 0x80000000
207};
208
209
210
211
212
213
214
215
216
217
218
219struct efx_rx_buffer {
220 dma_addr_t dma_addr;
221 union {
222 struct sk_buff *skb;
223 struct page *page;
224 } u;
225 unsigned int len;
226 bool is_page;
227};
228
229
230
231
232
233
234
235
236
237
238
239
240struct efx_rx_page_state {
241 unsigned refcnt;
242 dma_addr_t dma_addr;
243
244 unsigned int __pad[0] ____cacheline_aligned;
245};
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269struct efx_rx_queue {
270 struct efx_nic *efx;
271 struct efx_rx_buffer *buffer;
272 struct efx_special_buffer rxd;
273 unsigned int ptr_mask;
274
275 int added_count;
276 int notified_count;
277 int removed_count;
278 unsigned int max_fill;
279 unsigned int fast_fill_trigger;
280 unsigned int fast_fill_limit;
281 unsigned int min_fill;
282 unsigned int min_overfill;
283 unsigned int alloc_page_count;
284 unsigned int alloc_skb_count;
285 struct timer_list slow_fill;
286 unsigned int slow_fill_count;
287
288 enum efx_flush_state flushed;
289};
290
291
292
293
294
295
296
297
298
299
300struct efx_buffer {
301 void *addr;
302 dma_addr_t dma_addr;
303 unsigned int len;
304};
305
306
307enum efx_rx_alloc_method {
308 RX_ALLOC_METHOD_AUTO = 0,
309 RX_ALLOC_METHOD_SKB = 1,
310 RX_ALLOC_METHOD_PAGE = 2,
311};
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348struct efx_channel {
349 struct efx_nic *efx;
350 int channel;
351 bool enabled;
352 int irq;
353 unsigned int irq_moderation;
354 struct net_device *napi_dev;
355 struct napi_struct napi_str;
356 bool work_pending;
357 struct efx_special_buffer eventq;
358 unsigned int eventq_mask;
359 unsigned int eventq_read_ptr;
360 unsigned int last_eventq_read_ptr;
361
362 unsigned int irq_count;
363 unsigned int irq_mod_score;
364#ifdef CONFIG_RFS_ACCEL
365 unsigned int rfs_filters_added;
366#endif
367
368 int rx_alloc_level;
369 int rx_alloc_push_pages;
370
371 unsigned n_rx_tobe_disc;
372 unsigned n_rx_ip_hdr_chksum_err;
373 unsigned n_rx_tcp_udp_chksum_err;
374 unsigned n_rx_mcast_mismatch;
375 unsigned n_rx_frm_trunc;
376 unsigned n_rx_overlength;
377 unsigned n_skbuff_leaks;
378
379
380
381
382 struct efx_rx_buffer *rx_pkt;
383 bool rx_pkt_csummed;
384
385 struct efx_rx_queue rx_queue;
386 struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
387};
388
389enum efx_led_mode {
390 EFX_LED_OFF = 0,
391 EFX_LED_ON = 1,
392 EFX_LED_DEFAULT = 2
393};
394
395#define STRING_TABLE_LOOKUP(val, member) \
396 ((val) < member ## _max) ? member ## _names[val] : "(invalid)"
397
398extern const char *efx_loopback_mode_names[];
399extern const unsigned int efx_loopback_mode_max;
400#define LOOPBACK_MODE(efx) \
401 STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode)
402
403extern const char *efx_reset_type_names[];
404extern const unsigned int efx_reset_type_max;
405#define RESET_TYPE(type) \
406 STRING_TABLE_LOOKUP(type, efx_reset_type)
407
408enum efx_int_mode {
409
410 EFX_INT_MODE_MSIX = 0,
411 EFX_INT_MODE_MSI = 1,
412 EFX_INT_MODE_LEGACY = 2,
413 EFX_INT_MODE_MAX
414};
415#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
416
417enum nic_state {
418 STATE_INIT = 0,
419 STATE_RUNNING = 1,
420 STATE_FINI = 2,
421 STATE_DISABLED = 3,
422 STATE_MAX,
423};
424
425
426
427
428
429
430
431
432#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
433#define EFX_PAGE_IP_ALIGN 0
434#else
435#define EFX_PAGE_IP_ALIGN NET_IP_ALIGN
436#endif
437
438
439
440
441
442
443
444
445#define EFX_PAGE_SKB_ALIGN 2
446
447
448struct efx_nic;
449
450
451#define EFX_FC_RX FLOW_CTRL_RX
452#define EFX_FC_TX FLOW_CTRL_TX
453#define EFX_FC_AUTO 4
454
455
456
457
458
459
460
461
462struct efx_link_state {
463 bool up;
464 bool fd;
465 u8 fc;
466 unsigned int speed;
467};
468
469static inline bool efx_link_state_equal(const struct efx_link_state *left,
470 const struct efx_link_state *right)
471{
472 return left->up == right->up && left->fd == right->fd &&
473 left->fc == right->fc && left->speed == right->speed;
474}
475
476
477
478
479
480
481
482struct efx_mac_operations {
483 int (*reconfigure) (struct efx_nic *efx);
484 void (*update_stats) (struct efx_nic *efx);
485 bool (*check_fault)(struct efx_nic *efx);
486};
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506struct efx_phy_operations {
507 int (*probe) (struct efx_nic *efx);
508 int (*init) (struct efx_nic *efx);
509 void (*fini) (struct efx_nic *efx);
510 void (*remove) (struct efx_nic *efx);
511 int (*reconfigure) (struct efx_nic *efx);
512 bool (*poll) (struct efx_nic *efx);
513 void (*get_settings) (struct efx_nic *efx,
514 struct ethtool_cmd *ecmd);
515 int (*set_settings) (struct efx_nic *efx,
516 struct ethtool_cmd *ecmd);
517 void (*set_npage_adv) (struct efx_nic *efx, u32);
518 int (*test_alive) (struct efx_nic *efx);
519 const char *(*test_name) (struct efx_nic *efx, unsigned int index);
520 int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags);
521};
522
523
524
525
526
527
528
529
530
531enum efx_phy_mode {
532 PHY_MODE_NORMAL = 0,
533 PHY_MODE_TX_DISABLED = 1,
534 PHY_MODE_LOW_POWER = 2,
535 PHY_MODE_OFF = 4,
536 PHY_MODE_SPECIAL = 8,
537};
538
539static inline bool efx_phy_mode_disabled(enum efx_phy_mode mode)
540{
541 return !!(mode & ~PHY_MODE_TX_DISABLED);
542}
543
544
545
546
547
548
549
550
551struct efx_mac_stats {
552 u64 tx_bytes;
553 u64 tx_good_bytes;
554 u64 tx_bad_bytes;
555 unsigned long tx_packets;
556 unsigned long tx_bad;
557 unsigned long tx_pause;
558 unsigned long tx_control;
559 unsigned long tx_unicast;
560 unsigned long tx_multicast;
561 unsigned long tx_broadcast;
562 unsigned long tx_lt64;
563 unsigned long tx_64;
564 unsigned long tx_65_to_127;
565 unsigned long tx_128_to_255;
566 unsigned long tx_256_to_511;
567 unsigned long tx_512_to_1023;
568 unsigned long tx_1024_to_15xx;
569 unsigned long tx_15xx_to_jumbo;
570 unsigned long tx_gtjumbo;
571 unsigned long tx_collision;
572 unsigned long tx_single_collision;
573 unsigned long tx_multiple_collision;
574 unsigned long tx_excessive_collision;
575 unsigned long tx_deferred;
576 unsigned long tx_late_collision;
577 unsigned long tx_excessive_deferred;
578 unsigned long tx_non_tcpudp;
579 unsigned long tx_mac_src_error;
580 unsigned long tx_ip_src_error;
581 u64 rx_bytes;
582 u64 rx_good_bytes;
583 u64 rx_bad_bytes;
584 unsigned long rx_packets;
585 unsigned long rx_good;
586 unsigned long rx_bad;
587 unsigned long rx_pause;
588 unsigned long rx_control;
589 unsigned long rx_unicast;
590 unsigned long rx_multicast;
591 unsigned long rx_broadcast;
592 unsigned long rx_lt64;
593 unsigned long rx_64;
594 unsigned long rx_65_to_127;
595 unsigned long rx_128_to_255;
596 unsigned long rx_256_to_511;
597 unsigned long rx_512_to_1023;
598 unsigned long rx_1024_to_15xx;
599 unsigned long rx_15xx_to_jumbo;
600 unsigned long rx_gtjumbo;
601 unsigned long rx_bad_lt64;
602 unsigned long rx_bad_64_to_15xx;
603 unsigned long rx_bad_15xx_to_jumbo;
604 unsigned long rx_bad_gtjumbo;
605 unsigned long rx_overflow;
606 unsigned long rx_missed;
607 unsigned long rx_false_carrier;
608 unsigned long rx_symbol_error;
609 unsigned long rx_align_error;
610 unsigned long rx_length_error;
611 unsigned long rx_internal_error;
612 unsigned long rx_good_lt64;
613};
614
615
616#define EFX_MCAST_HASH_BITS 8
617
618
619#define EFX_MCAST_HASH_ENTRIES (1 << EFX_MCAST_HASH_BITS)
620
621
622union efx_multicast_hash {
623 u8 byte[EFX_MCAST_HASH_ENTRIES / 8];
624 efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
625};
626
627struct efx_filter_state;
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710struct efx_nic {
711
712
713 char name[IFNAMSIZ];
714 struct pci_dev *pci_dev;
715 const struct efx_nic_type *type;
716 int legacy_irq;
717 bool legacy_irq_enabled;
718 struct workqueue_struct *workqueue;
719 char workqueue_name[16];
720 struct work_struct reset_work;
721 resource_size_t membase_phys;
722 void __iomem *membase;
723
724 enum efx_int_mode interrupt_mode;
725 bool irq_rx_adaptive;
726 unsigned int irq_rx_moderation;
727 u32 msg_enable;
728
729 enum nic_state state;
730 unsigned long reset_pending;
731
732 struct efx_channel *channel[EFX_MAX_CHANNELS];
733 char channel_name[EFX_MAX_CHANNELS][IFNAMSIZ + 6];
734
735 unsigned rxq_entries;
736 unsigned txq_entries;
737 unsigned next_buffer_table;
738 unsigned n_channels;
739 unsigned n_rx_channels;
740 unsigned tx_channel_offset;
741 unsigned n_tx_channels;
742 unsigned int rx_buffer_len;
743 unsigned int rx_buffer_order;
744 u8 rx_hash_key[40];
745 u32 rx_indir_table[128];
746
747 unsigned int_error_count;
748 unsigned long int_error_expire;
749
750 struct efx_buffer irq_status;
751 unsigned irq_zero_count;
752 unsigned fatal_irq_level;
753
754#ifdef CONFIG_SFC_MTD
755 struct list_head mtd_list;
756#endif
757
758 void *nic_data;
759
760 struct mutex mac_lock;
761 struct work_struct mac_work;
762 bool port_enabled;
763
764 bool port_initialized;
765 struct net_device *net_dev;
766
767 struct efx_buffer stats_buffer;
768
769 const struct efx_mac_operations *mac_op;
770
771 unsigned int phy_type;
772 const struct efx_phy_operations *phy_op;
773 void *phy_data;
774 struct mdio_if_info mdio;
775 unsigned int mdio_bus;
776 enum efx_phy_mode phy_mode;
777
778 u32 link_advertising;
779 struct efx_link_state link_state;
780 unsigned int n_link_state_changes;
781
782 bool promiscuous;
783 union efx_multicast_hash multicast_hash;
784 u8 wanted_fc;
785
786 atomic_t rx_reset;
787 enum efx_loopback_mode loopback_mode;
788 u64 loopback_modes;
789
790 void *loopback_selftest;
791
792 struct efx_filter_state *filter_state;
793
794
795
796 struct delayed_work monitor_work ____cacheline_aligned_in_smp;
797 spinlock_t biu_lock;
798 volatile signed int last_irq_cpu;
799 unsigned n_rx_nodesc_drop_cnt;
800 struct efx_mac_stats mac_stats;
801 spinlock_t stats_lock;
802};
803
804static inline int efx_dev_registered(struct efx_nic *efx)
805{
806 return efx->net_dev->reg_state == NETREG_REGISTERED;
807}
808
809
810
811
812
813static inline const char *efx_dev_name(struct efx_nic *efx)
814{
815 return efx_dev_registered(efx) ? efx->name : "";
816}
817
818static inline unsigned int efx_port_num(struct efx_nic *efx)
819{
820 return efx->net_dev->dev_id;
821}
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870struct efx_nic_type {
871 int (*probe)(struct efx_nic *efx);
872 void (*remove)(struct efx_nic *efx);
873 int (*init)(struct efx_nic *efx);
874 void (*fini)(struct efx_nic *efx);
875 void (*monitor)(struct efx_nic *efx);
876 enum reset_type (*map_reset_reason)(enum reset_type reason);
877 int (*map_reset_flags)(u32 *flags);
878 int (*reset)(struct efx_nic *efx, enum reset_type method);
879 int (*probe_port)(struct efx_nic *efx);
880 void (*remove_port)(struct efx_nic *efx);
881 bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
882 void (*prepare_flush)(struct efx_nic *efx);
883 void (*update_stats)(struct efx_nic *efx);
884 void (*start_stats)(struct efx_nic *efx);
885 void (*stop_stats)(struct efx_nic *efx);
886 void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode);
887 void (*push_irq_moderation)(struct efx_channel *channel);
888 void (*push_multicast_hash)(struct efx_nic *efx);
889 int (*reconfigure_port)(struct efx_nic *efx);
890 void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
891 int (*set_wol)(struct efx_nic *efx, u32 type);
892 void (*resume_wol)(struct efx_nic *efx);
893 int (*test_registers)(struct efx_nic *efx);
894 int (*test_nvram)(struct efx_nic *efx);
895 const struct efx_mac_operations *default_mac_ops;
896
897 int revision;
898 unsigned int mem_map_size;
899 unsigned int txd_ptr_tbl_base;
900 unsigned int rxd_ptr_tbl_base;
901 unsigned int buf_tbl_base;
902 unsigned int evq_ptr_tbl_base;
903 unsigned int evq_rptr_tbl_base;
904 u64 max_dma_mask;
905 unsigned int rx_buffer_hash_size;
906 unsigned int rx_buffer_padding;
907 unsigned int max_interrupt_mode;
908 unsigned int phys_addr_channels;
909 unsigned int tx_dc_base;
910 unsigned int rx_dc_base;
911 u32 offload_features;
912};
913
914
915
916
917
918
919
920static inline struct efx_channel *
921efx_get_channel(struct efx_nic *efx, unsigned index)
922{
923 EFX_BUG_ON_PARANOID(index >= efx->n_channels);
924 return efx->channel[index];
925}
926
927
928#define efx_for_each_channel(_channel, _efx) \
929 for (_channel = (_efx)->channel[0]; \
930 _channel; \
931 _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \
932 (_efx)->channel[_channel->channel + 1] : NULL)
933
934static inline struct efx_tx_queue *
935efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
936{
937 EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
938 type >= EFX_TXQ_TYPES);
939 return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
940}
941
942static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
943{
944 return channel->channel - channel->efx->tx_channel_offset <
945 channel->efx->n_tx_channels;
946}
947
948static inline struct efx_tx_queue *
949efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
950{
951 EFX_BUG_ON_PARANOID(!efx_channel_has_tx_queues(channel) ||
952 type >= EFX_TXQ_TYPES);
953 return &channel->tx_queue[type];
954}
955
956static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
957{
958 return !(tx_queue->efx->net_dev->num_tc < 2 &&
959 tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI);
960}
961
962
963#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
964 if (!efx_channel_has_tx_queues(_channel)) \
965 ; \
966 else \
967 for (_tx_queue = (_channel)->tx_queue; \
968 _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \
969 efx_tx_queue_used(_tx_queue); \
970 _tx_queue++)
971
972
973#define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel) \
974 for (_tx_queue = (_channel)->tx_queue; \
975 _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
976 _tx_queue++)
977
978static inline struct efx_rx_queue *
979efx_get_rx_queue(struct efx_nic *efx, unsigned index)
980{
981 EFX_BUG_ON_PARANOID(index >= efx->n_rx_channels);
982 return &efx->channel[index]->rx_queue;
983}
984
985static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
986{
987 return channel->channel < channel->efx->n_rx_channels;
988}
989
990static inline struct efx_rx_queue *
991efx_channel_get_rx_queue(struct efx_channel *channel)
992{
993 EFX_BUG_ON_PARANOID(!efx_channel_has_rx_queue(channel));
994 return &channel->rx_queue;
995}
996
997
998#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
999 if (!efx_channel_has_rx_queue(_channel)) \
1000 ; \
1001 else \
1002 for (_rx_queue = &(_channel)->rx_queue; \
1003 _rx_queue; \
1004 _rx_queue = NULL)
1005
1006static inline struct efx_channel *
1007efx_rx_queue_channel(struct efx_rx_queue *rx_queue)
1008{
1009 return container_of(rx_queue, struct efx_channel, rx_queue);
1010}
1011
1012static inline int efx_rx_queue_index(struct efx_rx_queue *rx_queue)
1013{
1014 return efx_rx_queue_channel(rx_queue)->channel;
1015}
1016
1017
1018
1019
1020static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
1021 unsigned int index)
1022{
1023 return &rx_queue->buffer[index];
1024}
1025
1026
1027static inline void set_bit_le(unsigned nr, unsigned char *addr)
1028{
1029 addr[nr / 8] |= (1 << (nr % 8));
1030}
1031
1032
1033static inline void clear_bit_le(unsigned nr, unsigned char *addr)
1034{
1035 addr[nr / 8] &= ~(1 << (nr % 8));
1036}
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056#define EFX_MAX_FRAME_LEN(mtu) \
1057 ((((mtu) + ETH_HLEN + VLAN_HLEN + 4 + 7) & ~7) + 16)
1058
1059
1060#endif
1061