1
2
3
4
5
6
7
8
9
10
11
12
13#ifndef EFX_NET_DRIVER_H
14#define EFX_NET_DRIVER_H
15
16#include <linux/netdevice.h>
17#include <linux/etherdevice.h>
18#include <linux/ethtool.h>
19#include <linux/if_vlan.h>
20#include <linux/timer.h>
21#include <linux/mdio.h>
22#include <linux/list.h>
23#include <linux/pci.h>
24#include <linux/device.h>
25#include <linux/highmem.h>
26#include <linux/workqueue.h>
27#include <linux/mutex.h>
28#include <linux/rwsem.h>
29#include <linux/vmalloc.h>
30#include <linux/i2c.h>
31#include <linux/mtd/mtd.h>
32#include <net/busy_poll.h>
33
34#include "enum.h"
35#include "bitfield.h"
36#include "filter.h"
37
38
39
40
41
42
43
44#define EFX_DRIVER_VERSION "4.0"
45
46#ifdef DEBUG
47#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
48#define EFX_WARN_ON_PARANOID(x) WARN_ON(x)
49#else
50#define EFX_BUG_ON_PARANOID(x) do {} while (0)
51#define EFX_WARN_ON_PARANOID(x) do {} while (0)
52#endif
53
54
55
56
57
58
59
60#define EFX_MAX_CHANNELS 32U
61#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
62#define EFX_EXTRA_CHANNEL_IOV 0
63#define EFX_EXTRA_CHANNEL_PTP 1
64#define EFX_MAX_EXTRA_CHANNELS 2U
65
66
67
68
69#define EFX_MAX_TX_TC 2
70#define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS)
71#define EFX_TXQ_TYPE_OFFLOAD 1
72#define EFX_TXQ_TYPE_HIGHPRI 2
73#define EFX_TXQ_TYPES 4
74#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
75
76
77#define EFX_MAX_MTU (9 * 1024)
78
79
80#define EFX_MIN_MTU 68
81
82
83
84
85#define EFX_RX_USR_BUF_SIZE (2048 - 256)
86
87
88
89
90
91#if NET_IP_ALIGN == 0
92#define EFX_RX_BUF_ALIGNMENT L1_CACHE_BYTES
93#else
94#define EFX_RX_BUF_ALIGNMENT 4
95#endif
96
97
98struct efx_ptp_data;
99struct hwtstamp_config;
100
101struct efx_self_tests;
102
103
104
105
106
107
108
109
110
111
112struct efx_buffer {
113 void *addr;
114 dma_addr_t dma_addr;
115 unsigned int len;
116};
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132struct efx_special_buffer {
133 struct efx_buffer buf;
134 unsigned int index;
135 unsigned int entries;
136};
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153struct efx_tx_buffer {
154 union {
155 const struct sk_buff *skb;
156 void *heap_buf;
157 };
158 union {
159 efx_qword_t option;
160 dma_addr_t dma_addr;
161 };
162 unsigned short flags;
163 unsigned short len;
164 unsigned short unmap_len;
165 unsigned short dma_offset;
166};
167#define EFX_TX_BUF_CONT 1
168#define EFX_TX_BUF_SKB 2
169#define EFX_TX_BUF_HEAP 4
170#define EFX_TX_BUF_MAP_SINGLE 8
171#define EFX_TX_BUF_OPTION 0x10
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231struct efx_tx_queue {
232
233 struct efx_nic *efx ____cacheline_aligned_in_smp;
234 unsigned queue;
235 unsigned int tso_version;
236 struct efx_channel *channel;
237 struct netdev_queue *core_txq;
238 struct efx_tx_buffer *buffer;
239 struct efx_buffer *tsoh_page;
240 struct efx_special_buffer txd;
241 unsigned int ptr_mask;
242 void __iomem *piobuf;
243 unsigned int piobuf_offset;
244 bool initialised;
245
246
247 unsigned int read_count ____cacheline_aligned_in_smp;
248 unsigned int old_write_count;
249 unsigned int merge_events;
250 unsigned int bytes_compl;
251 unsigned int pkts_compl;
252
253
254 unsigned int insert_count ____cacheline_aligned_in_smp;
255 unsigned int write_count;
256 unsigned int old_read_count;
257 unsigned int tso_bursts;
258 unsigned int tso_long_headers;
259 unsigned int tso_packets;
260 unsigned int pushes;
261 unsigned int pio_packets;
262 bool xmit_more_available;
263
264 unsigned long tx_packets;
265
266
267 unsigned int empty_read_count ____cacheline_aligned_in_smp;
268#define EFX_EMPTY_COUNT_VALID 0x80000000
269 atomic_t flush_outstanding;
270};
271
272
273
274
275
276
277
278
279
280
281
282
283
284struct efx_rx_buffer {
285 dma_addr_t dma_addr;
286 struct page *page;
287 u16 page_offset;
288 u16 len;
289 u16 flags;
290};
291#define EFX_RX_BUF_LAST_IN_PAGE 0x0001
292#define EFX_RX_PKT_CSUMMED 0x0002
293#define EFX_RX_PKT_DISCARD 0x0004
294#define EFX_RX_PKT_TCP 0x0040
295#define EFX_RX_PKT_PREFIX_LEN 0x0080
296
297
298
299
300
301
302
303
304
305
306struct efx_rx_page_state {
307 dma_addr_t dma_addr;
308
309 unsigned int __pad[0] ____cacheline_aligned;
310};
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346struct efx_rx_queue {
347 struct efx_nic *efx;
348 int core_index;
349 struct efx_rx_buffer *buffer;
350 struct efx_special_buffer rxd;
351 unsigned int ptr_mask;
352 bool refill_enabled;
353 bool flush_pending;
354
355 unsigned int added_count;
356 unsigned int notified_count;
357 unsigned int removed_count;
358 unsigned int scatter_n;
359 unsigned int scatter_len;
360 struct page **page_ring;
361 unsigned int page_add;
362 unsigned int page_remove;
363 unsigned int page_recycle_count;
364 unsigned int page_recycle_failed;
365 unsigned int page_recycle_full;
366 unsigned int page_ptr_mask;
367 unsigned int max_fill;
368 unsigned int fast_fill_trigger;
369 unsigned int min_fill;
370 unsigned int min_overfill;
371 unsigned int recycle_count;
372 struct timer_list slow_fill;
373 unsigned int slow_fill_count;
374
375 unsigned long rx_packets;
376};
377
378enum efx_sync_events_state {
379 SYNC_EVENTS_DISABLED = 0,
380 SYNC_EVENTS_QUIESCENT,
381 SYNC_EVENTS_REQUESTED,
382 SYNC_EVENTS_VALID,
383};
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432struct efx_channel {
433 struct efx_nic *efx;
434 int channel;
435 const struct efx_channel_type *type;
436 bool eventq_init;
437 bool enabled;
438 int irq;
439 unsigned int irq_moderation_us;
440 struct net_device *napi_dev;
441 struct napi_struct napi_str;
442#ifdef CONFIG_NET_RX_BUSY_POLL
443 unsigned long busy_poll_state;
444#endif
445 struct efx_special_buffer eventq;
446 unsigned int eventq_mask;
447 unsigned int eventq_read_ptr;
448 int event_test_cpu;
449
450 unsigned int irq_count;
451 unsigned int irq_mod_score;
452#ifdef CONFIG_RFS_ACCEL
453 unsigned int rfs_filters_added;
454#define RPS_FLOW_ID_INVALID 0xFFFFFFFF
455 u32 *rps_flow_id;
456#endif
457
458 unsigned n_rx_tobe_disc;
459 unsigned n_rx_ip_hdr_chksum_err;
460 unsigned n_rx_tcp_udp_chksum_err;
461 unsigned n_rx_mcast_mismatch;
462 unsigned n_rx_frm_trunc;
463 unsigned n_rx_overlength;
464 unsigned n_skbuff_leaks;
465 unsigned int n_rx_nodesc_trunc;
466 unsigned int n_rx_merge_events;
467 unsigned int n_rx_merge_packets;
468
469 unsigned int rx_pkt_n_frags;
470 unsigned int rx_pkt_index;
471
472 struct efx_rx_queue rx_queue;
473 struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
474
475 enum efx_sync_events_state sync_events_state;
476 u32 sync_timestamp_major;
477 u32 sync_timestamp_minor;
478};
479
480#ifdef CONFIG_NET_RX_BUSY_POLL
481enum efx_channel_busy_poll_state {
482 EFX_CHANNEL_STATE_IDLE = 0,
483 EFX_CHANNEL_STATE_NAPI = BIT(0),
484 EFX_CHANNEL_STATE_NAPI_REQ_BIT = 1,
485 EFX_CHANNEL_STATE_NAPI_REQ = BIT(1),
486 EFX_CHANNEL_STATE_POLL_BIT = 2,
487 EFX_CHANNEL_STATE_POLL = BIT(2),
488 EFX_CHANNEL_STATE_DISABLE_BIT = 3,
489};
490
491static inline void efx_channel_busy_poll_init(struct efx_channel *channel)
492{
493 WRITE_ONCE(channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE);
494}
495
496
497static inline bool efx_channel_lock_napi(struct efx_channel *channel)
498{
499 unsigned long prev, old = READ_ONCE(channel->busy_poll_state);
500
501 while (1) {
502 switch (old) {
503 case EFX_CHANNEL_STATE_POLL:
504
505 set_bit(EFX_CHANNEL_STATE_NAPI_REQ_BIT,
506 &channel->busy_poll_state);
507
508 case EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_NAPI_REQ:
509 return false;
510 default:
511 break;
512 }
513 prev = cmpxchg(&channel->busy_poll_state, old,
514 EFX_CHANNEL_STATE_NAPI);
515 if (unlikely(prev != old)) {
516
517
518
519 old = prev;
520 continue;
521 }
522 return true;
523 }
524}
525
526static inline void efx_channel_unlock_napi(struct efx_channel *channel)
527{
528
529 smp_wmb();
530 WRITE_ONCE(channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE);
531}
532
533
534static inline bool efx_channel_try_lock_poll(struct efx_channel *channel)
535{
536 return cmpxchg(&channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE,
537 EFX_CHANNEL_STATE_POLL) == EFX_CHANNEL_STATE_IDLE;
538}
539
540static inline void efx_channel_unlock_poll(struct efx_channel *channel)
541{
542 clear_bit_unlock(EFX_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state);
543}
544
545static inline bool efx_channel_busy_polling(struct efx_channel *channel)
546{
547 return test_bit(EFX_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state);
548}
549
550static inline void efx_channel_enable(struct efx_channel *channel)
551{
552 clear_bit_unlock(EFX_CHANNEL_STATE_DISABLE_BIT,
553 &channel->busy_poll_state);
554}
555
556
557
558
559static inline bool efx_channel_disable(struct efx_channel *channel)
560{
561 set_bit(EFX_CHANNEL_STATE_DISABLE_BIT, &channel->busy_poll_state);
562
563 return !efx_channel_busy_polling(channel);
564}
565
566#else
567
568static inline void efx_channel_busy_poll_init(struct efx_channel *channel)
569{
570}
571
572static inline bool efx_channel_lock_napi(struct efx_channel *channel)
573{
574 return true;
575}
576
577static inline void efx_channel_unlock_napi(struct efx_channel *channel)
578{
579}
580
581static inline bool efx_channel_try_lock_poll(struct efx_channel *channel)
582{
583 return false;
584}
585
586static inline void efx_channel_unlock_poll(struct efx_channel *channel)
587{
588}
589
590static inline bool efx_channel_busy_polling(struct efx_channel *channel)
591{
592 return false;
593}
594
595static inline void efx_channel_enable(struct efx_channel *channel)
596{
597}
598
599static inline bool efx_channel_disable(struct efx_channel *channel)
600{
601 return true;
602}
603#endif
604
605
606
607
608
609
610
611
612
613
614struct efx_msi_context {
615 struct efx_nic *efx;
616 unsigned int index;
617 char name[IFNAMSIZ + 6];
618};
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633struct efx_channel_type {
634 void (*handle_no_channel)(struct efx_nic *);
635 int (*pre_probe)(struct efx_channel *);
636 void (*post_remove)(struct efx_channel *);
637 void (*get_name)(struct efx_channel *, char *buf, size_t len);
638 struct efx_channel *(*copy)(const struct efx_channel *);
639 bool (*receive_skb)(struct efx_channel *, struct sk_buff *);
640 bool keep_eventq;
641};
642
643enum efx_led_mode {
644 EFX_LED_OFF = 0,
645 EFX_LED_ON = 1,
646 EFX_LED_DEFAULT = 2
647};
648
649#define STRING_TABLE_LOOKUP(val, member) \
650 ((val) < member ## _max) ? member ## _names[val] : "(invalid)"
651
652extern const char *const efx_loopback_mode_names[];
653extern const unsigned int efx_loopback_mode_max;
654#define LOOPBACK_MODE(efx) \
655 STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode)
656
657extern const char *const efx_reset_type_names[];
658extern const unsigned int efx_reset_type_max;
659#define RESET_TYPE(type) \
660 STRING_TABLE_LOOKUP(type, efx_reset_type)
661
662enum efx_int_mode {
663
664 EFX_INT_MODE_MSIX = 0,
665 EFX_INT_MODE_MSI = 1,
666 EFX_INT_MODE_LEGACY = 2,
667 EFX_INT_MODE_MAX
668};
669#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
670
671enum nic_state {
672 STATE_UNINIT = 0,
673 STATE_READY = 1,
674 STATE_DISABLED = 2,
675 STATE_RECOVERY = 3,
676};
677
678
679struct efx_nic;
680
681
682#define EFX_FC_RX FLOW_CTRL_RX
683#define EFX_FC_TX FLOW_CTRL_TX
684#define EFX_FC_AUTO 4
685
686
687
688
689
690
691
692
693struct efx_link_state {
694 bool up;
695 bool fd;
696 u8 fc;
697 unsigned int speed;
698};
699
700static inline bool efx_link_state_equal(const struct efx_link_state *left,
701 const struct efx_link_state *right)
702{
703 return left->up == right->up && left->fd == right->fd &&
704 left->fc == right->fc && left->speed == right->speed;
705}
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725struct efx_phy_operations {
726 int (*probe) (struct efx_nic *efx);
727 int (*init) (struct efx_nic *efx);
728 void (*fini) (struct efx_nic *efx);
729 void (*remove) (struct efx_nic *efx);
730 int (*reconfigure) (struct efx_nic *efx);
731 bool (*poll) (struct efx_nic *efx);
732 void (*get_settings) (struct efx_nic *efx,
733 struct ethtool_cmd *ecmd);
734 int (*set_settings) (struct efx_nic *efx,
735 struct ethtool_cmd *ecmd);
736 void (*set_npage_adv) (struct efx_nic *efx, u32);
737 int (*test_alive) (struct efx_nic *efx);
738 const char *(*test_name) (struct efx_nic *efx, unsigned int index);
739 int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags);
740 int (*get_module_eeprom) (struct efx_nic *efx,
741 struct ethtool_eeprom *ee,
742 u8 *data);
743 int (*get_module_info) (struct efx_nic *efx,
744 struct ethtool_modinfo *modinfo);
745};
746
747
748
749
750
751
752
753
754
755enum efx_phy_mode {
756 PHY_MODE_NORMAL = 0,
757 PHY_MODE_TX_DISABLED = 1,
758 PHY_MODE_LOW_POWER = 2,
759 PHY_MODE_OFF = 4,
760 PHY_MODE_SPECIAL = 8,
761};
762
763static inline bool efx_phy_mode_disabled(enum efx_phy_mode mode)
764{
765 return !!(mode & ~PHY_MODE_TX_DISABLED);
766}
767
768
769
770
771
772
773
774
775struct efx_hw_stat_desc {
776 const char *name;
777 u16 dma_width;
778 u16 offset;
779};
780
781
782#define EFX_MCAST_HASH_BITS 8
783
784
785#define EFX_MCAST_HASH_ENTRIES (1 << EFX_MCAST_HASH_BITS)
786
787
788union efx_multicast_hash {
789 u8 byte[EFX_MCAST_HASH_ENTRIES / 8];
790 efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
791};
792
793struct vfdi_status;
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928struct efx_nic {
929
930
931 char name[IFNAMSIZ];
932 struct list_head node;
933 struct efx_nic *primary;
934 struct list_head secondary_list;
935 struct pci_dev *pci_dev;
936 unsigned int port_num;
937 const struct efx_nic_type *type;
938 int legacy_irq;
939 bool eeh_disabled_legacy_irq;
940 struct workqueue_struct *workqueue;
941 char workqueue_name[16];
942 struct work_struct reset_work;
943 resource_size_t membase_phys;
944 void __iomem *membase;
945
946 enum efx_int_mode interrupt_mode;
947 unsigned int timer_quantum_ns;
948 unsigned int timer_max_ns;
949 bool irq_rx_adaptive;
950 unsigned int irq_mod_step_us;
951 unsigned int irq_rx_moderation_us;
952 u32 msg_enable;
953
954 enum nic_state state;
955 unsigned long reset_pending;
956
957 struct efx_channel *channel[EFX_MAX_CHANNELS];
958 struct efx_msi_context msi_context[EFX_MAX_CHANNELS];
959 const struct efx_channel_type *
960 extra_channel_type[EFX_MAX_EXTRA_CHANNELS];
961
962 unsigned rxq_entries;
963 unsigned txq_entries;
964 unsigned int txq_stop_thresh;
965 unsigned int txq_wake_thresh;
966
967 unsigned tx_dc_base;
968 unsigned rx_dc_base;
969 unsigned sram_lim_qw;
970 unsigned next_buffer_table;
971
972 unsigned int max_channels;
973 unsigned int max_tx_channels;
974 unsigned n_channels;
975 unsigned n_rx_channels;
976 unsigned rss_spread;
977 unsigned tx_channel_offset;
978 unsigned n_tx_channels;
979 unsigned int rx_ip_align;
980 unsigned int rx_dma_len;
981 unsigned int rx_buffer_order;
982 unsigned int rx_buffer_truesize;
983 unsigned int rx_page_buf_step;
984 unsigned int rx_bufs_per_page;
985 unsigned int rx_pages_per_batch;
986 unsigned int rx_prefix_size;
987 int rx_packet_hash_offset;
988 int rx_packet_len_offset;
989 int rx_packet_ts_offset;
990 u8 rx_hash_key[40];
991 u32 rx_indir_table[128];
992 bool rx_scatter;
993
994 unsigned int_error_count;
995 unsigned long int_error_expire;
996
997 bool irq_soft_enabled;
998 struct efx_buffer irq_status;
999 unsigned irq_zero_count;
1000 unsigned irq_level;
1001 struct delayed_work selftest_work;
1002
1003#ifdef CONFIG_SFC_MTD
1004 struct list_head mtd_list;
1005#endif
1006
1007 void *nic_data;
1008 struct efx_mcdi_data *mcdi;
1009
1010 struct mutex mac_lock;
1011 struct work_struct mac_work;
1012 bool port_enabled;
1013
1014 bool mc_bist_for_other_fn;
1015 bool port_initialized;
1016 struct net_device *net_dev;
1017
1018 netdev_features_t fixed_features;
1019
1020 struct efx_buffer stats_buffer;
1021 u64 rx_nodesc_drops_total;
1022 u64 rx_nodesc_drops_while_down;
1023 bool rx_nodesc_drops_prev_state;
1024
1025 unsigned int phy_type;
1026 const struct efx_phy_operations *phy_op;
1027 void *phy_data;
1028 struct mdio_if_info mdio;
1029 unsigned int mdio_bus;
1030 enum efx_phy_mode phy_mode;
1031
1032 u32 link_advertising;
1033 struct efx_link_state link_state;
1034 unsigned int n_link_state_changes;
1035
1036 bool unicast_filter;
1037 union efx_multicast_hash multicast_hash;
1038 u8 wanted_fc;
1039 unsigned fc_disable;
1040
1041 atomic_t rx_reset;
1042 enum efx_loopback_mode loopback_mode;
1043 u64 loopback_modes;
1044
1045 void *loopback_selftest;
1046
1047 struct rw_semaphore filter_sem;
1048 spinlock_t filter_lock;
1049 void *filter_state;
1050#ifdef CONFIG_RFS_ACCEL
1051 unsigned int rps_expire_channel;
1052 unsigned int rps_expire_index;
1053#endif
1054
1055 atomic_t active_queues;
1056 atomic_t rxq_flush_pending;
1057 atomic_t rxq_flush_outstanding;
1058 wait_queue_head_t flush_wq;
1059
1060#ifdef CONFIG_SFC_SRIOV
1061 unsigned vf_count;
1062 unsigned vf_init_count;
1063 unsigned vi_scale;
1064#endif
1065
1066 struct efx_ptp_data *ptp_data;
1067
1068 char *vpd_sn;
1069
1070
1071
1072 struct delayed_work monitor_work ____cacheline_aligned_in_smp;
1073 spinlock_t biu_lock;
1074 int last_irq_cpu;
1075 spinlock_t stats_lock;
1076 atomic_t n_rx_noskb_drops;
1077};
1078
1079static inline int efx_dev_registered(struct efx_nic *efx)
1080{
1081 return efx->net_dev->reg_state == NETREG_REGISTERED;
1082}
1083
1084static inline unsigned int efx_port_num(struct efx_nic *efx)
1085{
1086 return efx->port_num;
1087}
1088
1089struct efx_mtd_partition {
1090 struct list_head node;
1091 struct mtd_info mtd;
1092 const char *dev_type_name;
1093 const char *type_name;
1094 char name[IFNAMSIZ + 20];
1095};
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234struct efx_nic_type {
1235 bool is_vf;
1236 unsigned int mem_bar;
1237 unsigned int (*mem_map_size)(struct efx_nic *efx);
1238 int (*probe)(struct efx_nic *efx);
1239 void (*remove)(struct efx_nic *efx);
1240 int (*init)(struct efx_nic *efx);
1241 int (*dimension_resources)(struct efx_nic *efx);
1242 void (*fini)(struct efx_nic *efx);
1243 void (*monitor)(struct efx_nic *efx);
1244 enum reset_type (*map_reset_reason)(enum reset_type reason);
1245 int (*map_reset_flags)(u32 *flags);
1246 int (*reset)(struct efx_nic *efx, enum reset_type method);
1247 int (*probe_port)(struct efx_nic *efx);
1248 void (*remove_port)(struct efx_nic *efx);
1249 bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
1250 int (*fini_dmaq)(struct efx_nic *efx);
1251 void (*prepare_flush)(struct efx_nic *efx);
1252 void (*finish_flush)(struct efx_nic *efx);
1253 void (*prepare_flr)(struct efx_nic *efx);
1254 void (*finish_flr)(struct efx_nic *efx);
1255 size_t (*describe_stats)(struct efx_nic *efx, u8 *names);
1256 size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats,
1257 struct rtnl_link_stats64 *core_stats);
1258 void (*start_stats)(struct efx_nic *efx);
1259 void (*pull_stats)(struct efx_nic *efx);
1260 void (*stop_stats)(struct efx_nic *efx);
1261 void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode);
1262 void (*push_irq_moderation)(struct efx_channel *channel);
1263 int (*reconfigure_port)(struct efx_nic *efx);
1264 void (*prepare_enable_fc_tx)(struct efx_nic *efx);
1265 int (*reconfigure_mac)(struct efx_nic *efx);
1266 bool (*check_mac_fault)(struct efx_nic *efx);
1267 void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
1268 int (*set_wol)(struct efx_nic *efx, u32 type);
1269 void (*resume_wol)(struct efx_nic *efx);
1270 int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests);
1271 int (*test_nvram)(struct efx_nic *efx);
1272 void (*mcdi_request)(struct efx_nic *efx,
1273 const efx_dword_t *hdr, size_t hdr_len,
1274 const efx_dword_t *sdu, size_t sdu_len);
1275 bool (*mcdi_poll_response)(struct efx_nic *efx);
1276 void (*mcdi_read_response)(struct efx_nic *efx, efx_dword_t *pdu,
1277 size_t pdu_offset, size_t pdu_len);
1278 int (*mcdi_poll_reboot)(struct efx_nic *efx);
1279 void (*mcdi_reboot_detected)(struct efx_nic *efx);
1280 void (*irq_enable_master)(struct efx_nic *efx);
1281 int (*irq_test_generate)(struct efx_nic *efx);
1282 void (*irq_disable_non_ev)(struct efx_nic *efx);
1283 irqreturn_t (*irq_handle_msi)(int irq, void *dev_id);
1284 irqreturn_t (*irq_handle_legacy)(int irq, void *dev_id);
1285 int (*tx_probe)(struct efx_tx_queue *tx_queue);
1286 void (*tx_init)(struct efx_tx_queue *tx_queue);
1287 void (*tx_remove)(struct efx_tx_queue *tx_queue);
1288 void (*tx_write)(struct efx_tx_queue *tx_queue);
1289 int (*rx_push_rss_config)(struct efx_nic *efx, bool user,
1290 const u32 *rx_indir_table);
1291 int (*rx_probe)(struct efx_rx_queue *rx_queue);
1292 void (*rx_init)(struct efx_rx_queue *rx_queue);
1293 void (*rx_remove)(struct efx_rx_queue *rx_queue);
1294 void (*rx_write)(struct efx_rx_queue *rx_queue);
1295 void (*rx_defer_refill)(struct efx_rx_queue *rx_queue);
1296 int (*ev_probe)(struct efx_channel *channel);
1297 int (*ev_init)(struct efx_channel *channel);
1298 void (*ev_fini)(struct efx_channel *channel);
1299 void (*ev_remove)(struct efx_channel *channel);
1300 int (*ev_process)(struct efx_channel *channel, int quota);
1301 void (*ev_read_ack)(struct efx_channel *channel);
1302 void (*ev_test_generate)(struct efx_channel *channel);
1303 int (*filter_table_probe)(struct efx_nic *efx);
1304 void (*filter_table_restore)(struct efx_nic *efx);
1305 void (*filter_table_remove)(struct efx_nic *efx);
1306 void (*filter_update_rx_scatter)(struct efx_nic *efx);
1307 s32 (*filter_insert)(struct efx_nic *efx,
1308 struct efx_filter_spec *spec, bool replace);
1309 int (*filter_remove_safe)(struct efx_nic *efx,
1310 enum efx_filter_priority priority,
1311 u32 filter_id);
1312 int (*filter_get_safe)(struct efx_nic *efx,
1313 enum efx_filter_priority priority,
1314 u32 filter_id, struct efx_filter_spec *);
1315 int (*filter_clear_rx)(struct efx_nic *efx,
1316 enum efx_filter_priority priority);
1317 u32 (*filter_count_rx_used)(struct efx_nic *efx,
1318 enum efx_filter_priority priority);
1319 u32 (*filter_get_rx_id_limit)(struct efx_nic *efx);
1320 s32 (*filter_get_rx_ids)(struct efx_nic *efx,
1321 enum efx_filter_priority priority,
1322 u32 *buf, u32 size);
1323#ifdef CONFIG_RFS_ACCEL
1324 s32 (*filter_rfs_insert)(struct efx_nic *efx,
1325 struct efx_filter_spec *spec);
1326 bool (*filter_rfs_expire_one)(struct efx_nic *efx, u32 flow_id,
1327 unsigned int index);
1328#endif
1329#ifdef CONFIG_SFC_MTD
1330 int (*mtd_probe)(struct efx_nic *efx);
1331 void (*mtd_rename)(struct efx_mtd_partition *part);
1332 int (*mtd_read)(struct mtd_info *mtd, loff_t start, size_t len,
1333 size_t *retlen, u8 *buffer);
1334 int (*mtd_erase)(struct mtd_info *mtd, loff_t start, size_t len);
1335 int (*mtd_write)(struct mtd_info *mtd, loff_t start, size_t len,
1336 size_t *retlen, const u8 *buffer);
1337 int (*mtd_sync)(struct mtd_info *mtd);
1338#endif
1339 void (*ptp_write_host_time)(struct efx_nic *efx, u32 host_time);
1340 int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp);
1341 int (*ptp_set_ts_config)(struct efx_nic *efx,
1342 struct hwtstamp_config *init);
1343 int (*sriov_configure)(struct efx_nic *efx, int num_vfs);
1344 int (*vlan_rx_add_vid)(struct efx_nic *efx, __be16 proto, u16 vid);
1345 int (*vlan_rx_kill_vid)(struct efx_nic *efx, __be16 proto, u16 vid);
1346 int (*sriov_init)(struct efx_nic *efx);
1347 void (*sriov_fini)(struct efx_nic *efx);
1348 bool (*sriov_wanted)(struct efx_nic *efx);
1349 void (*sriov_reset)(struct efx_nic *efx);
1350 void (*sriov_flr)(struct efx_nic *efx, unsigned vf_i);
1351 int (*sriov_set_vf_mac)(struct efx_nic *efx, int vf_i, u8 *mac);
1352 int (*sriov_set_vf_vlan)(struct efx_nic *efx, int vf_i, u16 vlan,
1353 u8 qos);
1354 int (*sriov_set_vf_spoofchk)(struct efx_nic *efx, int vf_i,
1355 bool spoofchk);
1356 int (*sriov_get_vf_config)(struct efx_nic *efx, int vf_i,
1357 struct ifla_vf_info *ivi);
1358 int (*sriov_set_vf_link_state)(struct efx_nic *efx, int vf_i,
1359 int link_state);
1360 int (*sriov_get_phys_port_id)(struct efx_nic *efx,
1361 struct netdev_phys_item_id *ppid);
1362 int (*vswitching_probe)(struct efx_nic *efx);
1363 int (*vswitching_restore)(struct efx_nic *efx);
1364 void (*vswitching_remove)(struct efx_nic *efx);
1365 int (*get_mac_address)(struct efx_nic *efx, unsigned char *perm_addr);
1366 int (*set_mac_address)(struct efx_nic *efx);
1367
1368 int revision;
1369 unsigned int txd_ptr_tbl_base;
1370 unsigned int rxd_ptr_tbl_base;
1371 unsigned int buf_tbl_base;
1372 unsigned int evq_ptr_tbl_base;
1373 unsigned int evq_rptr_tbl_base;
1374 u64 max_dma_mask;
1375 unsigned int rx_prefix_size;
1376 unsigned int rx_hash_offset;
1377 unsigned int rx_ts_offset;
1378 unsigned int rx_buffer_padding;
1379 bool can_rx_scatter;
1380 bool always_rx_scatter;
1381 unsigned int max_interrupt_mode;
1382 unsigned int timer_period_max;
1383 netdev_features_t offload_features;
1384 int mcdi_max_ver;
1385 unsigned int max_rx_ip_filters;
1386 u32 hwtstamp_filters;
1387};
1388
1389
1390
1391
1392
1393
1394
1395static inline struct efx_channel *
1396efx_get_channel(struct efx_nic *efx, unsigned index)
1397{
1398 EFX_BUG_ON_PARANOID(index >= efx->n_channels);
1399 return efx->channel[index];
1400}
1401
1402
1403#define efx_for_each_channel(_channel, _efx) \
1404 for (_channel = (_efx)->channel[0]; \
1405 _channel; \
1406 _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \
1407 (_efx)->channel[_channel->channel + 1] : NULL)
1408
1409
1410#define efx_for_each_channel_rev(_channel, _efx) \
1411 for (_channel = (_efx)->channel[(_efx)->n_channels - 1]; \
1412 _channel; \
1413 _channel = _channel->channel ? \
1414 (_efx)->channel[_channel->channel - 1] : NULL)
1415
1416static inline struct efx_tx_queue *
1417efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
1418{
1419 EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
1420 type >= EFX_TXQ_TYPES);
1421 return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
1422}
1423
1424static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
1425{
1426 return channel->channel - channel->efx->tx_channel_offset <
1427 channel->efx->n_tx_channels;
1428}
1429
1430static inline struct efx_tx_queue *
1431efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
1432{
1433 EFX_BUG_ON_PARANOID(!efx_channel_has_tx_queues(channel) ||
1434 type >= EFX_TXQ_TYPES);
1435 return &channel->tx_queue[type];
1436}
1437
1438static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
1439{
1440 return !(tx_queue->efx->net_dev->num_tc < 2 &&
1441 tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI);
1442}
1443
1444
1445#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
1446 if (!efx_channel_has_tx_queues(_channel)) \
1447 ; \
1448 else \
1449 for (_tx_queue = (_channel)->tx_queue; \
1450 _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \
1451 efx_tx_queue_used(_tx_queue); \
1452 _tx_queue++)
1453
1454
1455#define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel) \
1456 if (!efx_channel_has_tx_queues(_channel)) \
1457 ; \
1458 else \
1459 for (_tx_queue = (_channel)->tx_queue; \
1460 _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
1461 _tx_queue++)
1462
1463static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
1464{
1465 return channel->rx_queue.core_index >= 0;
1466}
1467
1468static inline struct efx_rx_queue *
1469efx_channel_get_rx_queue(struct efx_channel *channel)
1470{
1471 EFX_BUG_ON_PARANOID(!efx_channel_has_rx_queue(channel));
1472 return &channel->rx_queue;
1473}
1474
1475
1476#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
1477 if (!efx_channel_has_rx_queue(_channel)) \
1478 ; \
1479 else \
1480 for (_rx_queue = &(_channel)->rx_queue; \
1481 _rx_queue; \
1482 _rx_queue = NULL)
1483
1484static inline struct efx_channel *
1485efx_rx_queue_channel(struct efx_rx_queue *rx_queue)
1486{
1487 return container_of(rx_queue, struct efx_channel, rx_queue);
1488}
1489
1490static inline int efx_rx_queue_index(struct efx_rx_queue *rx_queue)
1491{
1492 return efx_rx_queue_channel(rx_queue)->channel;
1493}
1494
1495
1496
1497
1498static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
1499 unsigned int index)
1500{
1501 return &rx_queue->buffer[index];
1502}
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521#define EFX_FRAME_PAD 16
1522#define EFX_MAX_FRAME_LEN(mtu) \
1523 (ALIGN(((mtu) + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN + EFX_FRAME_PAD), 8))
1524
1525static inline bool efx_xmit_with_hwtstamp(struct sk_buff *skb)
1526{
1527 return skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP;
1528}
1529static inline void efx_xmit_hwtstamp_pending(struct sk_buff *skb)
1530{
1531 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1532}
1533
1534
1535
1536
1537
1538
1539static inline netdev_features_t efx_supported_features(const struct efx_nic *efx)
1540{
1541 const struct net_device *net_dev = efx->net_dev;
1542
1543 return net_dev->features | net_dev->hw_features;
1544}
1545
1546#endif
1547