1
2
3
4
5#include <sys/queue.h>
6#include <stdio.h>
7#include <errno.h>
8#include <stdint.h>
9#include <stdarg.h>
10
11#include <rte_string_fns.h>
12#include <rte_common.h>
13#include <rte_interrupts.h>
14#include <rte_byteorder.h>
15#include <rte_log.h>
16#include <rte_debug.h>
17#include <rte_pci.h>
18#include <rte_bus_pci.h>
19#include <rte_ether.h>
20#include <ethdev_driver.h>
21#include <ethdev_pci.h>
22#include <rte_memory.h>
23#include <rte_eal.h>
24#include <rte_malloc.h>
25#include <rte_dev.h>
26
27#include "e1000_logs.h"
28#include "base/e1000_api.h"
29#include "e1000_ethdev.h"
30#include "igb_regs.h"
31
32
33
34
35#define IGB_DEFAULT_RX_FREE_THRESH 32
36
37#define IGB_DEFAULT_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : 8)
38#define IGB_DEFAULT_RX_HTHRESH 8
39#define IGB_DEFAULT_RX_WTHRESH ((hw->mac.type == e1000_82576) ? 1 : 4)
40
41#define IGB_DEFAULT_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8)
42#define IGB_DEFAULT_TX_HTHRESH 1
43#define IGB_DEFAULT_TX_WTHRESH ((hw->mac.type == e1000_82576) ? 1 : 16)
44
45
46#define IGB_4_BIT_WIDTH (CHAR_BIT / 2)
47#define IGB_4_BIT_MASK RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t)
48#define IGB_8_BIT_WIDTH CHAR_BIT
49#define IGB_8_BIT_MASK UINT8_MAX
50
51
52#define E1000_CYCLECOUNTER_MASK 0xffffffffffffffffULL
53#define E1000_ETQF_FILTER_1588 3
54#define IGB_82576_TSYNC_SHIFT 16
55#define E1000_INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT)
56#define E1000_INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
57#define E1000_TSAUXC_DISABLE_SYSTIME 0x80000000
58
59#define E1000_VTIVAR_MISC 0x01740
60#define E1000_VTIVAR_MISC_MASK 0xFF
61#define E1000_VTIVAR_VALID 0x80
62#define E1000_VTIVAR_MISC_MAILBOX 0
63#define E1000_VTIVAR_MISC_INTR_MASK 0x3
64
65
66#define E1000_CTRL_EXT_EXT_VLAN (1 << 26)
67
68
69#define E1000_VET_VET_EXT 0xFFFF0000
70#define E1000_VET_VET_EXT_SHIFT 16
71
72
73#define IGB_MSIX_OTHER_INTR_VEC 0
74
75static int eth_igb_configure(struct rte_eth_dev *dev);
76static int eth_igb_start(struct rte_eth_dev *dev);
77static int eth_igb_stop(struct rte_eth_dev *dev);
78static int eth_igb_dev_set_link_up(struct rte_eth_dev *dev);
79static int eth_igb_dev_set_link_down(struct rte_eth_dev *dev);
80static int eth_igb_close(struct rte_eth_dev *dev);
81static int eth_igb_reset(struct rte_eth_dev *dev);
82static int eth_igb_promiscuous_enable(struct rte_eth_dev *dev);
83static int eth_igb_promiscuous_disable(struct rte_eth_dev *dev);
84static int eth_igb_allmulticast_enable(struct rte_eth_dev *dev);
85static int eth_igb_allmulticast_disable(struct rte_eth_dev *dev);
86static int eth_igb_link_update(struct rte_eth_dev *dev,
87 int wait_to_complete);
88static int eth_igb_stats_get(struct rte_eth_dev *dev,
89 struct rte_eth_stats *rte_stats);
90static int eth_igb_xstats_get(struct rte_eth_dev *dev,
91 struct rte_eth_xstat *xstats, unsigned n);
92static int eth_igb_xstats_get_by_id(struct rte_eth_dev *dev,
93 const uint64_t *ids,
94 uint64_t *values, unsigned int n);
95static int eth_igb_xstats_get_names(struct rte_eth_dev *dev,
96 struct rte_eth_xstat_name *xstats_names,
97 unsigned int size);
98static int eth_igb_xstats_get_names_by_id(struct rte_eth_dev *dev,
99 const uint64_t *ids, struct rte_eth_xstat_name *xstats_names,
100 unsigned int limit);
101static int eth_igb_stats_reset(struct rte_eth_dev *dev);
102static int eth_igb_xstats_reset(struct rte_eth_dev *dev);
103static int eth_igb_fw_version_get(struct rte_eth_dev *dev,
104 char *fw_version, size_t fw_size);
105static int eth_igb_infos_get(struct rte_eth_dev *dev,
106 struct rte_eth_dev_info *dev_info);
107static const uint32_t *eth_igb_supported_ptypes_get(struct rte_eth_dev *dev);
108static int eth_igbvf_infos_get(struct rte_eth_dev *dev,
109 struct rte_eth_dev_info *dev_info);
110static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev,
111 struct rte_eth_fc_conf *fc_conf);
112static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
113 struct rte_eth_fc_conf *fc_conf);
114static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
115static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev);
116static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
117static int eth_igb_interrupt_action(struct rte_eth_dev *dev,
118 struct rte_intr_handle *handle);
119static void eth_igb_interrupt_handler(void *param);
120static int igb_hardware_init(struct e1000_hw *hw);
121static void igb_hw_control_acquire(struct e1000_hw *hw);
122static void igb_hw_control_release(struct e1000_hw *hw);
123static void igb_init_manageability(struct e1000_hw *hw);
124static void igb_release_manageability(struct e1000_hw *hw);
125
126static int eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
127
128static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev,
129 uint16_t vlan_id, int on);
130static int eth_igb_vlan_tpid_set(struct rte_eth_dev *dev,
131 enum rte_vlan_type vlan_type,
132 uint16_t tpid_id);
133static int eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask);
134
135static void igb_vlan_hw_filter_enable(struct rte_eth_dev *dev);
136static void igb_vlan_hw_filter_disable(struct rte_eth_dev *dev);
137static void igb_vlan_hw_strip_enable(struct rte_eth_dev *dev);
138static void igb_vlan_hw_strip_disable(struct rte_eth_dev *dev);
139static void igb_vlan_hw_extend_enable(struct rte_eth_dev *dev);
140static void igb_vlan_hw_extend_disable(struct rte_eth_dev *dev);
141
142static int eth_igb_led_on(struct rte_eth_dev *dev);
143static int eth_igb_led_off(struct rte_eth_dev *dev);
144
145static void igb_intr_disable(struct rte_eth_dev *dev);
146static int igb_get_rx_buffer_size(struct e1000_hw *hw);
147static int eth_igb_rar_set(struct rte_eth_dev *dev,
148 struct rte_ether_addr *mac_addr,
149 uint32_t index, uint32_t pool);
150static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index);
151static int eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
152 struct rte_ether_addr *addr);
153
154static void igbvf_intr_disable(struct e1000_hw *hw);
155static int igbvf_dev_configure(struct rte_eth_dev *dev);
156static int igbvf_dev_start(struct rte_eth_dev *dev);
157static int igbvf_dev_stop(struct rte_eth_dev *dev);
158static int igbvf_dev_close(struct rte_eth_dev *dev);
159static int igbvf_promiscuous_enable(struct rte_eth_dev *dev);
160static int igbvf_promiscuous_disable(struct rte_eth_dev *dev);
161static int igbvf_allmulticast_enable(struct rte_eth_dev *dev);
162static int igbvf_allmulticast_disable(struct rte_eth_dev *dev);
163static int eth_igbvf_link_update(struct e1000_hw *hw);
164static int eth_igbvf_stats_get(struct rte_eth_dev *dev,
165 struct rte_eth_stats *rte_stats);
166static int eth_igbvf_xstats_get(struct rte_eth_dev *dev,
167 struct rte_eth_xstat *xstats, unsigned n);
168static int eth_igbvf_xstats_get_names(struct rte_eth_dev *dev,
169 struct rte_eth_xstat_name *xstats_names,
170 unsigned limit);
171static int eth_igbvf_stats_reset(struct rte_eth_dev *dev);
172static int igbvf_vlan_filter_set(struct rte_eth_dev *dev,
173 uint16_t vlan_id, int on);
174static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on);
175static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on);
176static int igbvf_default_mac_addr_set(struct rte_eth_dev *dev,
177 struct rte_ether_addr *addr);
178static int igbvf_get_reg_length(struct rte_eth_dev *dev);
179static int igbvf_get_regs(struct rte_eth_dev *dev,
180 struct rte_dev_reg_info *regs);
181
182static int eth_igb_rss_reta_update(struct rte_eth_dev *dev,
183 struct rte_eth_rss_reta_entry64 *reta_conf,
184 uint16_t reta_size);
185static int eth_igb_rss_reta_query(struct rte_eth_dev *dev,
186 struct rte_eth_rss_reta_entry64 *reta_conf,
187 uint16_t reta_size);
188
189static int igb_add_2tuple_filter(struct rte_eth_dev *dev,
190 struct rte_eth_ntuple_filter *ntuple_filter);
191static int igb_remove_2tuple_filter(struct rte_eth_dev *dev,
192 struct rte_eth_ntuple_filter *ntuple_filter);
193static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
194 struct rte_eth_ntuple_filter *ntuple_filter);
195static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
196 struct rte_eth_ntuple_filter *ntuple_filter);
197static int eth_igb_flow_ops_get(struct rte_eth_dev *dev,
198 const struct rte_flow_ops **ops);
199static int eth_igb_get_reg_length(struct rte_eth_dev *dev);
200static int eth_igb_get_regs(struct rte_eth_dev *dev,
201 struct rte_dev_reg_info *regs);
202static int eth_igb_get_eeprom_length(struct rte_eth_dev *dev);
203static int eth_igb_get_eeprom(struct rte_eth_dev *dev,
204 struct rte_dev_eeprom_info *eeprom);
205static int eth_igb_set_eeprom(struct rte_eth_dev *dev,
206 struct rte_dev_eeprom_info *eeprom);
207static int eth_igb_get_module_info(struct rte_eth_dev *dev,
208 struct rte_eth_dev_module_info *modinfo);
209static int eth_igb_get_module_eeprom(struct rte_eth_dev *dev,
210 struct rte_dev_eeprom_info *info);
211static int eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
212 struct rte_ether_addr *mc_addr_set,
213 uint32_t nb_mc_addr);
214static int igb_timesync_enable(struct rte_eth_dev *dev);
215static int igb_timesync_disable(struct rte_eth_dev *dev);
216static int igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
217 struct timespec *timestamp,
218 uint32_t flags);
219static int igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
220 struct timespec *timestamp);
221static int igb_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
222static int igb_timesync_read_time(struct rte_eth_dev *dev,
223 struct timespec *timestamp);
224static int igb_timesync_write_time(struct rte_eth_dev *dev,
225 const struct timespec *timestamp);
226static int eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev,
227 uint16_t queue_id);
228static int eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev,
229 uint16_t queue_id);
230static void eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,
231 uint8_t queue, uint8_t msix_vector);
232static void eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
233 uint8_t index, uint8_t offset);
234static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev);
235static void eth_igbvf_interrupt_handler(void *param);
236static void igbvf_mbx_process(struct rte_eth_dev *dev);
237static int igb_filter_restore(struct rte_eth_dev *dev);
238
239
240
241
242#define UPDATE_VF_STAT(reg, last, cur) \
243{ \
244 u32 latest = E1000_READ_REG(hw, reg); \
245 cur += (latest - last) & UINT_MAX; \
246 last = latest; \
247}
248
249#define IGB_FC_PAUSE_TIME 0x0680
250#define IGB_LINK_UPDATE_CHECK_TIMEOUT 90
251#define IGB_LINK_UPDATE_CHECK_INTERVAL 100
252
253#define IGBVF_PMD_NAME "rte_igbvf_pmd"
254
255static enum e1000_fc_mode igb_fc_setting = e1000_fc_full;
256
257
258
259
260static const struct rte_pci_id pci_id_igb_map[] = {
261 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576) },
262 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_FIBER) },
263 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_SERDES) },
264 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_QUAD_COPPER) },
265 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_QUAD_COPPER_ET2) },
266 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_NS) },
267 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_NS_SERDES) },
268 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_SERDES_QUAD) },
269
270 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575EB_COPPER) },
271 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575EB_FIBER_SERDES) },
272 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575GB_QUAD_COPPER) },
273
274 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_COPPER) },
275 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_FIBER) },
276 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_SERDES) },
277 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_SGMII) },
278 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_COPPER_DUAL) },
279 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_QUAD_FIBER) },
280
281 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_COPPER) },
282 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_FIBER) },
283 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_SERDES) },
284 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_SGMII) },
285 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_DA4) },
286 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER) },
287 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_OEM1) },
288 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_IT) },
289 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_FIBER) },
290 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SERDES) },
291 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SGMII) },
292 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_FLASHLESS) },
293 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SERDES_FLASHLESS) },
294 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I211_COPPER) },
295 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
296 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_SGMII) },
297 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
298 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SGMII) },
299 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SERDES) },
300 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_BACKPLANE) },
301 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SFP) },
302 { .vendor_id = 0, },
303};
304
305
306
307
308static const struct rte_pci_id pci_id_igbvf_map[] = {
309 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_VF) },
310 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_VF_HV) },
311 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_VF) },
312 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_VF_HV) },
313 { .vendor_id = 0, },
314};
315
316static const struct rte_eth_desc_lim rx_desc_lim = {
317 .nb_max = E1000_MAX_RING_DESC,
318 .nb_min = E1000_MIN_RING_DESC,
319 .nb_align = IGB_RXD_ALIGN,
320};
321
322static const struct rte_eth_desc_lim tx_desc_lim = {
323 .nb_max = E1000_MAX_RING_DESC,
324 .nb_min = E1000_MIN_RING_DESC,
325 .nb_align = IGB_RXD_ALIGN,
326 .nb_seg_max = IGB_TX_MAX_SEG,
327 .nb_mtu_seg_max = IGB_TX_MAX_MTU_SEG,
328};
329
330static const struct eth_dev_ops eth_igb_ops = {
331 .dev_configure = eth_igb_configure,
332 .dev_start = eth_igb_start,
333 .dev_stop = eth_igb_stop,
334 .dev_set_link_up = eth_igb_dev_set_link_up,
335 .dev_set_link_down = eth_igb_dev_set_link_down,
336 .dev_close = eth_igb_close,
337 .dev_reset = eth_igb_reset,
338 .promiscuous_enable = eth_igb_promiscuous_enable,
339 .promiscuous_disable = eth_igb_promiscuous_disable,
340 .allmulticast_enable = eth_igb_allmulticast_enable,
341 .allmulticast_disable = eth_igb_allmulticast_disable,
342 .link_update = eth_igb_link_update,
343 .stats_get = eth_igb_stats_get,
344 .xstats_get = eth_igb_xstats_get,
345 .xstats_get_by_id = eth_igb_xstats_get_by_id,
346 .xstats_get_names_by_id = eth_igb_xstats_get_names_by_id,
347 .xstats_get_names = eth_igb_xstats_get_names,
348 .stats_reset = eth_igb_stats_reset,
349 .xstats_reset = eth_igb_xstats_reset,
350 .fw_version_get = eth_igb_fw_version_get,
351 .dev_infos_get = eth_igb_infos_get,
352 .dev_supported_ptypes_get = eth_igb_supported_ptypes_get,
353 .mtu_set = eth_igb_mtu_set,
354 .vlan_filter_set = eth_igb_vlan_filter_set,
355 .vlan_tpid_set = eth_igb_vlan_tpid_set,
356 .vlan_offload_set = eth_igb_vlan_offload_set,
357 .rx_queue_setup = eth_igb_rx_queue_setup,
358 .rx_queue_intr_enable = eth_igb_rx_queue_intr_enable,
359 .rx_queue_intr_disable = eth_igb_rx_queue_intr_disable,
360 .rx_queue_release = eth_igb_rx_queue_release,
361 .tx_queue_setup = eth_igb_tx_queue_setup,
362 .tx_queue_release = eth_igb_tx_queue_release,
363 .tx_done_cleanup = eth_igb_tx_done_cleanup,
364 .dev_led_on = eth_igb_led_on,
365 .dev_led_off = eth_igb_led_off,
366 .flow_ctrl_get = eth_igb_flow_ctrl_get,
367 .flow_ctrl_set = eth_igb_flow_ctrl_set,
368 .mac_addr_add = eth_igb_rar_set,
369 .mac_addr_remove = eth_igb_rar_clear,
370 .mac_addr_set = eth_igb_default_mac_addr_set,
371 .reta_update = eth_igb_rss_reta_update,
372 .reta_query = eth_igb_rss_reta_query,
373 .rss_hash_update = eth_igb_rss_hash_update,
374 .rss_hash_conf_get = eth_igb_rss_hash_conf_get,
375 .flow_ops_get = eth_igb_flow_ops_get,
376 .set_mc_addr_list = eth_igb_set_mc_addr_list,
377 .rxq_info_get = igb_rxq_info_get,
378 .txq_info_get = igb_txq_info_get,
379 .timesync_enable = igb_timesync_enable,
380 .timesync_disable = igb_timesync_disable,
381 .timesync_read_rx_timestamp = igb_timesync_read_rx_timestamp,
382 .timesync_read_tx_timestamp = igb_timesync_read_tx_timestamp,
383 .get_reg = eth_igb_get_regs,
384 .get_eeprom_length = eth_igb_get_eeprom_length,
385 .get_eeprom = eth_igb_get_eeprom,
386 .set_eeprom = eth_igb_set_eeprom,
387 .get_module_info = eth_igb_get_module_info,
388 .get_module_eeprom = eth_igb_get_module_eeprom,
389 .timesync_adjust_time = igb_timesync_adjust_time,
390 .timesync_read_time = igb_timesync_read_time,
391 .timesync_write_time = igb_timesync_write_time,
392};
393
394
395
396
397
398static const struct eth_dev_ops igbvf_eth_dev_ops = {
399 .dev_configure = igbvf_dev_configure,
400 .dev_start = igbvf_dev_start,
401 .dev_stop = igbvf_dev_stop,
402 .dev_close = igbvf_dev_close,
403 .promiscuous_enable = igbvf_promiscuous_enable,
404 .promiscuous_disable = igbvf_promiscuous_disable,
405 .allmulticast_enable = igbvf_allmulticast_enable,
406 .allmulticast_disable = igbvf_allmulticast_disable,
407 .link_update = eth_igb_link_update,
408 .stats_get = eth_igbvf_stats_get,
409 .xstats_get = eth_igbvf_xstats_get,
410 .xstats_get_names = eth_igbvf_xstats_get_names,
411 .stats_reset = eth_igbvf_stats_reset,
412 .xstats_reset = eth_igbvf_stats_reset,
413 .vlan_filter_set = igbvf_vlan_filter_set,
414 .dev_infos_get = eth_igbvf_infos_get,
415 .dev_supported_ptypes_get = eth_igb_supported_ptypes_get,
416 .rx_queue_setup = eth_igb_rx_queue_setup,
417 .rx_queue_release = eth_igb_rx_queue_release,
418 .tx_queue_setup = eth_igb_tx_queue_setup,
419 .tx_queue_release = eth_igb_tx_queue_release,
420 .tx_done_cleanup = eth_igb_tx_done_cleanup,
421 .set_mc_addr_list = eth_igb_set_mc_addr_list,
422 .rxq_info_get = igb_rxq_info_get,
423 .txq_info_get = igb_txq_info_get,
424 .mac_addr_set = igbvf_default_mac_addr_set,
425 .get_reg = igbvf_get_regs,
426};
427
428
429struct rte_igb_xstats_name_off {
430 char name[RTE_ETH_XSTATS_NAME_SIZE];
431 unsigned offset;
432};
433
434static const struct rte_igb_xstats_name_off rte_igb_stats_strings[] = {
435 {"rx_crc_errors", offsetof(struct e1000_hw_stats, crcerrs)},
436 {"rx_align_errors", offsetof(struct e1000_hw_stats, algnerrc)},
437 {"rx_symbol_errors", offsetof(struct e1000_hw_stats, symerrs)},
438 {"rx_missed_packets", offsetof(struct e1000_hw_stats, mpc)},
439 {"tx_single_collision_packets", offsetof(struct e1000_hw_stats, scc)},
440 {"tx_multiple_collision_packets", offsetof(struct e1000_hw_stats, mcc)},
441 {"tx_excessive_collision_packets", offsetof(struct e1000_hw_stats,
442 ecol)},
443 {"tx_late_collisions", offsetof(struct e1000_hw_stats, latecol)},
444 {"tx_total_collisions", offsetof(struct e1000_hw_stats, colc)},
445 {"tx_deferred_packets", offsetof(struct e1000_hw_stats, dc)},
446 {"tx_no_carrier_sense_packets", offsetof(struct e1000_hw_stats, tncrs)},
447 {"rx_carrier_ext_errors", offsetof(struct e1000_hw_stats, cexterr)},
448 {"rx_length_errors", offsetof(struct e1000_hw_stats, rlec)},
449 {"rx_xon_packets", offsetof(struct e1000_hw_stats, xonrxc)},
450 {"tx_xon_packets", offsetof(struct e1000_hw_stats, xontxc)},
451 {"rx_xoff_packets", offsetof(struct e1000_hw_stats, xoffrxc)},
452 {"tx_xoff_packets", offsetof(struct e1000_hw_stats, xofftxc)},
453 {"rx_flow_control_unsupported_packets", offsetof(struct e1000_hw_stats,
454 fcruc)},
455 {"rx_size_64_packets", offsetof(struct e1000_hw_stats, prc64)},
456 {"rx_size_65_to_127_packets", offsetof(struct e1000_hw_stats, prc127)},
457 {"rx_size_128_to_255_packets", offsetof(struct e1000_hw_stats, prc255)},
458 {"rx_size_256_to_511_packets", offsetof(struct e1000_hw_stats, prc511)},
459 {"rx_size_512_to_1023_packets", offsetof(struct e1000_hw_stats,
460 prc1023)},
461 {"rx_size_1024_to_max_packets", offsetof(struct e1000_hw_stats,
462 prc1522)},
463 {"rx_broadcast_packets", offsetof(struct e1000_hw_stats, bprc)},
464 {"rx_multicast_packets", offsetof(struct e1000_hw_stats, mprc)},
465 {"rx_undersize_errors", offsetof(struct e1000_hw_stats, ruc)},
466 {"rx_fragment_errors", offsetof(struct e1000_hw_stats, rfc)},
467 {"rx_oversize_errors", offsetof(struct e1000_hw_stats, roc)},
468 {"rx_jabber_errors", offsetof(struct e1000_hw_stats, rjc)},
469 {"rx_management_packets", offsetof(struct e1000_hw_stats, mgprc)},
470 {"rx_management_dropped", offsetof(struct e1000_hw_stats, mgpdc)},
471 {"tx_management_packets", offsetof(struct e1000_hw_stats, mgptc)},
472 {"rx_total_packets", offsetof(struct e1000_hw_stats, tpr)},
473 {"tx_total_packets", offsetof(struct e1000_hw_stats, tpt)},
474 {"rx_total_bytes", offsetof(struct e1000_hw_stats, tor)},
475 {"tx_total_bytes", offsetof(struct e1000_hw_stats, tot)},
476 {"tx_size_64_packets", offsetof(struct e1000_hw_stats, ptc64)},
477 {"tx_size_65_to_127_packets", offsetof(struct e1000_hw_stats, ptc127)},
478 {"tx_size_128_to_255_packets", offsetof(struct e1000_hw_stats, ptc255)},
479 {"tx_size_256_to_511_packets", offsetof(struct e1000_hw_stats, ptc511)},
480 {"tx_size_512_to_1023_packets", offsetof(struct e1000_hw_stats,
481 ptc1023)},
482 {"tx_size_1023_to_max_packets", offsetof(struct e1000_hw_stats,
483 ptc1522)},
484 {"tx_multicast_packets", offsetof(struct e1000_hw_stats, mptc)},
485 {"tx_broadcast_packets", offsetof(struct e1000_hw_stats, bptc)},
486 {"tx_tso_packets", offsetof(struct e1000_hw_stats, tsctc)},
487 {"tx_tso_errors", offsetof(struct e1000_hw_stats, tsctfc)},
488 {"rx_sent_to_host_packets", offsetof(struct e1000_hw_stats, rpthc)},
489 {"tx_sent_by_host_packets", offsetof(struct e1000_hw_stats, hgptc)},
490 {"rx_code_violation_packets", offsetof(struct e1000_hw_stats, scvpc)},
491
492 {"interrupt_assert_count", offsetof(struct e1000_hw_stats, iac)},
493};
494
495#define IGB_NB_XSTATS (sizeof(rte_igb_stats_strings) / \
496 sizeof(rte_igb_stats_strings[0]))
497
498static const struct rte_igb_xstats_name_off rte_igbvf_stats_strings[] = {
499 {"rx_multicast_packets", offsetof(struct e1000_vf_stats, mprc)},
500 {"rx_good_loopback_packets", offsetof(struct e1000_vf_stats, gprlbc)},
501 {"tx_good_loopback_packets", offsetof(struct e1000_vf_stats, gptlbc)},
502 {"rx_good_loopback_bytes", offsetof(struct e1000_vf_stats, gorlbc)},
503 {"tx_good_loopback_bytes", offsetof(struct e1000_vf_stats, gotlbc)},
504};
505
506#define IGBVF_NB_XSTATS (sizeof(rte_igbvf_stats_strings) / \
507 sizeof(rte_igbvf_stats_strings[0]))
508
509
510static inline void
511igb_intr_enable(struct rte_eth_dev *dev)
512{
513 struct e1000_interrupt *intr =
514 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
515 struct e1000_hw *hw =
516 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
517 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
518 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
519
520 if (rte_intr_allow_others(intr_handle) &&
521 dev->data->dev_conf.intr_conf.lsc != 0) {
522 E1000_WRITE_REG(hw, E1000_EIMS, 1 << IGB_MSIX_OTHER_INTR_VEC);
523 }
524
525 E1000_WRITE_REG(hw, E1000_IMS, intr->mask);
526 E1000_WRITE_FLUSH(hw);
527}
528
529static void
530igb_intr_disable(struct rte_eth_dev *dev)
531{
532 struct e1000_hw *hw =
533 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
534 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
535 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
536
537 if (rte_intr_allow_others(intr_handle) &&
538 dev->data->dev_conf.intr_conf.lsc != 0) {
539 E1000_WRITE_REG(hw, E1000_EIMC, 1 << IGB_MSIX_OTHER_INTR_VEC);
540 }
541
542 E1000_WRITE_REG(hw, E1000_IMC, ~0);
543 E1000_WRITE_FLUSH(hw);
544}
545
546static inline void
547igbvf_intr_enable(struct rte_eth_dev *dev)
548{
549 struct e1000_hw *hw =
550 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
551
552
553 E1000_WRITE_REG(hw, E1000_EIAM, 1 << E1000_VTIVAR_MISC_MAILBOX);
554 E1000_WRITE_REG(hw, E1000_EIAC, 1 << E1000_VTIVAR_MISC_MAILBOX);
555 E1000_WRITE_REG(hw, E1000_EIMS, 1 << E1000_VTIVAR_MISC_MAILBOX);
556 E1000_WRITE_FLUSH(hw);
557}
558
559
560static void
561igbvf_set_ivar_map(struct e1000_hw *hw, uint8_t msix_vector)
562{
563 uint32_t tmp = 0;
564
565
566 tmp |= (msix_vector & E1000_VTIVAR_MISC_INTR_MASK);
567 tmp |= E1000_VTIVAR_VALID;
568 E1000_WRITE_REG(hw, E1000_VTIVAR_MISC, tmp);
569}
570
571static void
572eth_igbvf_configure_msix_intr(struct rte_eth_dev *dev)
573{
574 struct e1000_hw *hw =
575 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
576
577
578 igbvf_set_ivar_map(hw, E1000_VTIVAR_MISC_MAILBOX);
579}
580
581static inline int32_t
582igb_pf_reset_hw(struct e1000_hw *hw)
583{
584 uint32_t ctrl_ext;
585 int32_t status;
586
587 status = e1000_reset_hw(hw);
588
589 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
590
591 ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
592 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
593 E1000_WRITE_FLUSH(hw);
594
595 return status;
596}
597
598static void
599igb_identify_hardware(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev)
600{
601 struct e1000_hw *hw =
602 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
603
604
605 hw->vendor_id = pci_dev->id.vendor_id;
606 hw->device_id = pci_dev->id.device_id;
607 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
608 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
609
610 e1000_set_mac_type(hw);
611
612
613}
614
615static int
616igb_reset_swfw_lock(struct e1000_hw *hw)
617{
618 int ret_val;
619
620
621
622
623
624 ret_val = e1000_init_mac_params(hw);
625 if (ret_val)
626 return ret_val;
627
628
629
630
631
632
633 if (e1000_get_hw_semaphore_generic(hw) < 0) {
634 PMD_DRV_LOG(DEBUG, "SMBI lock released");
635 }
636 e1000_put_hw_semaphore_generic(hw);
637
638 if (hw->mac.ops.acquire_swfw_sync != NULL) {
639 uint16_t mask;
640
641
642
643
644
645
646 mask = E1000_SWFW_PHY0_SM << hw->bus.func;
647 if (hw->bus.func > E1000_FUNC_1)
648 mask <<= 2;
649 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
650 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released",
651 hw->bus.func);
652 }
653 hw->mac.ops.release_swfw_sync(hw, mask);
654
655
656
657
658
659
660
661 mask = E1000_SWFW_EEP_SM;
662 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
663 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
664 }
665 hw->mac.ops.release_swfw_sync(hw, mask);
666 }
667
668 return E1000_SUCCESS;
669}
670
671
672static int igb_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
673{
674 struct e1000_filter_info *filter_info =
675 E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
676 struct e1000_5tuple_filter *p_5tuple;
677 struct e1000_2tuple_filter *p_2tuple;
678
679 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
680 TAILQ_REMOVE(&filter_info->fivetuple_list,
681 p_5tuple, entries);
682 rte_free(p_5tuple);
683 }
684 filter_info->fivetuple_mask = 0;
685 while ((p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list))) {
686 TAILQ_REMOVE(&filter_info->twotuple_list,
687 p_2tuple, entries);
688 rte_free(p_2tuple);
689 }
690 filter_info->twotuple_mask = 0;
691
692 return 0;
693}
694
695
696static int igb_flex_filter_uninit(struct rte_eth_dev *eth_dev)
697{
698 struct e1000_filter_info *filter_info =
699 E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
700 struct e1000_flex_filter *p_flex;
701
702 while ((p_flex = TAILQ_FIRST(&filter_info->flex_list))) {
703 TAILQ_REMOVE(&filter_info->flex_list, p_flex, entries);
704 rte_free(p_flex);
705 }
706 filter_info->flex_mask = 0;
707
708 return 0;
709}
710
711static int
712eth_igb_dev_init(struct rte_eth_dev *eth_dev)
713{
714 int error = 0;
715 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
716 struct e1000_hw *hw =
717 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
718 struct e1000_vfta * shadow_vfta =
719 E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
720 struct e1000_filter_info *filter_info =
721 E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
722 struct e1000_adapter *adapter =
723 E1000_DEV_PRIVATE(eth_dev->data->dev_private);
724
725 uint32_t ctrl_ext;
726
727 eth_dev->dev_ops = ð_igb_ops;
728 eth_dev->rx_queue_count = eth_igb_rx_queue_count;
729 eth_dev->rx_descriptor_status = eth_igb_rx_descriptor_status;
730 eth_dev->tx_descriptor_status = eth_igb_tx_descriptor_status;
731 eth_dev->rx_pkt_burst = ð_igb_recv_pkts;
732 eth_dev->tx_pkt_burst = ð_igb_xmit_pkts;
733 eth_dev->tx_pkt_prepare = ð_igb_prep_pkts;
734
735
736
737
738 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
739 if (eth_dev->data->scattered_rx)
740 eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts;
741 return 0;
742 }
743
744 rte_eth_copy_pci_info(eth_dev, pci_dev);
745
746 hw->hw_addr= (void *)pci_dev->mem_resource[0].addr;
747
748 igb_identify_hardware(eth_dev, pci_dev);
749 if (e1000_setup_init_funcs(hw, FALSE) != E1000_SUCCESS) {
750 error = -EIO;
751 goto err_late;
752 }
753
754 e1000_get_bus_info(hw);
755
756
757 if (igb_reset_swfw_lock(hw) != E1000_SUCCESS) {
758 error = -EIO;
759 goto err_late;
760 }
761
762
763 if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS) {
764 error = -EIO;
765 goto err_late;
766 }
767
768 hw->mac.autoneg = 1;
769 hw->phy.autoneg_wait_to_complete = 0;
770 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
771
772
773 if (hw->phy.media_type == e1000_media_type_copper) {
774 hw->phy.mdix = 0;
775 hw->phy.disable_polarity_correction = 0;
776 hw->phy.ms_type = e1000_ms_hw_default;
777 }
778
779
780
781
782
783 igb_pf_reset_hw(hw);
784
785
786 if (e1000_validate_nvm_checksum(hw) < 0) {
787
788
789
790
791
792 if (e1000_validate_nvm_checksum(hw) < 0) {
793 PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
794 error = -EIO;
795 goto err_late;
796 }
797 }
798
799
800 if (e1000_read_mac_addr(hw) != 0) {
801 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
802 error = -EIO;
803 goto err_late;
804 }
805
806
807 eth_dev->data->mac_addrs = rte_zmalloc("e1000",
808 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
809 if (eth_dev->data->mac_addrs == NULL) {
810 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
811 "store MAC addresses",
812 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count);
813 error = -ENOMEM;
814 goto err_late;
815 }
816
817
818 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
819 ð_dev->data->mac_addrs[0]);
820
821
822 memset(shadow_vfta, 0, sizeof(*shadow_vfta));
823
824
825 if (igb_hardware_init(hw) != 0) {
826 PMD_INIT_LOG(ERR, "Hardware initialization failed");
827 rte_free(eth_dev->data->mac_addrs);
828 eth_dev->data->mac_addrs = NULL;
829 error = -ENODEV;
830 goto err_late;
831 }
832 hw->mac.get_link_status = 1;
833 adapter->stopped = 0;
834
835
836 if (e1000_check_reset_block(hw) < 0) {
837 PMD_INIT_LOG(ERR, "PHY reset is blocked due to"
838 "SOL/IDER session");
839 }
840
841
842 igb_pf_host_init(eth_dev);
843
844 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
845
846 ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
847 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
848 E1000_WRITE_FLUSH(hw);
849
850 PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x",
851 eth_dev->data->port_id, pci_dev->id.vendor_id,
852 pci_dev->id.device_id);
853
854 rte_intr_callback_register(pci_dev->intr_handle,
855 eth_igb_interrupt_handler,
856 (void *)eth_dev);
857
858
859 rte_intr_enable(pci_dev->intr_handle);
860
861
862 igb_intr_enable(eth_dev);
863
864 eth_igb_dev_set_link_down(eth_dev);
865
866
867 memset(filter_info, 0,
868 sizeof(struct e1000_filter_info));
869
870 TAILQ_INIT(&filter_info->flex_list);
871 TAILQ_INIT(&filter_info->twotuple_list);
872 TAILQ_INIT(&filter_info->fivetuple_list);
873
874 TAILQ_INIT(&igb_filter_ntuple_list);
875 TAILQ_INIT(&igb_filter_ethertype_list);
876 TAILQ_INIT(&igb_filter_syn_list);
877 TAILQ_INIT(&igb_filter_flex_list);
878 TAILQ_INIT(&igb_filter_rss_list);
879 TAILQ_INIT(&igb_flow_list);
880
881 return 0;
882
883err_late:
884 igb_hw_control_release(hw);
885
886 return error;
887}
888
889static int
890eth_igb_dev_uninit(struct rte_eth_dev *eth_dev)
891{
892 PMD_INIT_FUNC_TRACE();
893
894 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
895 return 0;
896
897 eth_igb_close(eth_dev);
898
899 return 0;
900}
901
902
903
904
905static int
906eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
907{
908 struct rte_pci_device *pci_dev;
909 struct rte_intr_handle *intr_handle;
910 struct e1000_adapter *adapter =
911 E1000_DEV_PRIVATE(eth_dev->data->dev_private);
912 struct e1000_hw *hw =
913 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
914 int diag;
915 struct rte_ether_addr *perm_addr =
916 (struct rte_ether_addr *)hw->mac.perm_addr;
917
918 PMD_INIT_FUNC_TRACE();
919
920 eth_dev->dev_ops = &igbvf_eth_dev_ops;
921 eth_dev->rx_descriptor_status = eth_igb_rx_descriptor_status;
922 eth_dev->tx_descriptor_status = eth_igb_tx_descriptor_status;
923 eth_dev->rx_pkt_burst = ð_igb_recv_pkts;
924 eth_dev->tx_pkt_burst = ð_igb_xmit_pkts;
925 eth_dev->tx_pkt_prepare = ð_igb_prep_pkts;
926
927
928
929
930 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
931 if (eth_dev->data->scattered_rx)
932 eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts;
933 return 0;
934 }
935
936 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
937 rte_eth_copy_pci_info(eth_dev, pci_dev);
938
939 hw->device_id = pci_dev->id.device_id;
940 hw->vendor_id = pci_dev->id.vendor_id;
941 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
942 adapter->stopped = 0;
943
944
945 diag = e1000_setup_init_funcs(hw, TRUE);
946 if (diag != 0) {
947 PMD_INIT_LOG(ERR, "Shared code init failed for igbvf: %d",
948 diag);
949 return -EIO;
950 }
951
952
953 hw->mbx.ops.init_params(hw);
954
955
956 igbvf_intr_disable(hw);
957
958 diag = hw->mac.ops.reset_hw(hw);
959
960
961 eth_dev->data->mac_addrs = rte_zmalloc("igbvf", RTE_ETHER_ADDR_LEN *
962 hw->mac.rar_entry_count, 0);
963 if (eth_dev->data->mac_addrs == NULL) {
964 PMD_INIT_LOG(ERR,
965 "Failed to allocate %d bytes needed to store MAC "
966 "addresses",
967 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count);
968 return -ENOMEM;
969 }
970
971
972 if (rte_is_zero_ether_addr(perm_addr)) {
973 rte_eth_random_addr(perm_addr->addr_bytes);
974 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
975 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
976 RTE_ETHER_ADDR_PRT_FMT,
977 RTE_ETHER_ADDR_BYTES(perm_addr));
978 }
979
980 diag = e1000_rar_set(hw, perm_addr->addr_bytes, 0);
981 if (diag) {
982 rte_free(eth_dev->data->mac_addrs);
983 eth_dev->data->mac_addrs = NULL;
984 return diag;
985 }
986
987 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
988 ð_dev->data->mac_addrs[0]);
989
990 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x "
991 "mac.type=%s",
992 eth_dev->data->port_id, pci_dev->id.vendor_id,
993 pci_dev->id.device_id, "igb_mac_82576_vf");
994
995 intr_handle = pci_dev->intr_handle;
996 rte_intr_callback_register(intr_handle,
997 eth_igbvf_interrupt_handler, eth_dev);
998
999 return 0;
1000}
1001
1002static int
1003eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev)
1004{
1005 PMD_INIT_FUNC_TRACE();
1006
1007 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1008 return 0;
1009
1010 igbvf_dev_close(eth_dev);
1011
1012 return 0;
1013}
1014
1015static int eth_igb_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1016 struct rte_pci_device *pci_dev)
1017{
1018 return rte_eth_dev_pci_generic_probe(pci_dev,
1019 sizeof(struct e1000_adapter), eth_igb_dev_init);
1020}
1021
1022static int eth_igb_pci_remove(struct rte_pci_device *pci_dev)
1023{
1024 return rte_eth_dev_pci_generic_remove(pci_dev, eth_igb_dev_uninit);
1025}
1026
1027static struct rte_pci_driver rte_igb_pmd = {
1028 .id_table = pci_id_igb_map,
1029 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1030 .probe = eth_igb_pci_probe,
1031 .remove = eth_igb_pci_remove,
1032};
1033
1034
1035static int eth_igbvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1036 struct rte_pci_device *pci_dev)
1037{
1038 return rte_eth_dev_pci_generic_probe(pci_dev,
1039 sizeof(struct e1000_adapter), eth_igbvf_dev_init);
1040}
1041
1042static int eth_igbvf_pci_remove(struct rte_pci_device *pci_dev)
1043{
1044 return rte_eth_dev_pci_generic_remove(pci_dev, eth_igbvf_dev_uninit);
1045}
1046
1047
1048
1049
1050static struct rte_pci_driver rte_igbvf_pmd = {
1051 .id_table = pci_id_igbvf_map,
1052 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1053 .probe = eth_igbvf_pci_probe,
1054 .remove = eth_igbvf_pci_remove,
1055};
1056
1057static void
1058igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1059{
1060 struct e1000_hw *hw =
1061 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1062
1063 uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL);
1064 rctl |= E1000_RCTL_VFE;
1065 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1066}
1067
1068static int
1069igb_check_mq_mode(struct rte_eth_dev *dev)
1070{
1071 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1072 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
1073 uint16_t nb_rx_q = dev->data->nb_rx_queues;
1074 uint16_t nb_tx_q = dev->data->nb_tx_queues;
1075
1076 if ((rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) ||
1077 tx_mq_mode == RTE_ETH_MQ_TX_DCB ||
1078 tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
1079 PMD_INIT_LOG(ERR, "DCB mode is not supported.");
1080 return -EINVAL;
1081 }
1082 if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
1083
1084
1085
1086
1087
1088 if (rx_mq_mode == RTE_ETH_MQ_RX_NONE ||
1089 rx_mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
1090 dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY;
1091 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
1092 } else {
1093
1094
1095
1096 PMD_INIT_LOG(ERR, "SRIOV is active,"
1097 " wrong mq_mode rx %d.",
1098 rx_mq_mode);
1099 return -EINVAL;
1100 }
1101
1102 if (tx_mq_mode != RTE_ETH_MQ_TX_VMDQ_ONLY) {
1103
1104 PMD_INIT_LOG(WARNING, "SRIOV is active,"
1105 " TX mode %d is not supported. "
1106 " Driver will behave as %d mode.",
1107 tx_mq_mode, RTE_ETH_MQ_TX_VMDQ_ONLY);
1108 }
1109
1110
1111 if ((nb_rx_q > 1) || (nb_tx_q > 1)) {
1112 PMD_INIT_LOG(ERR, "SRIOV is active,"
1113 " only support one queue on VFs.");
1114 return -EINVAL;
1115 }
1116 } else {
1117
1118
1119
1120 if (rx_mq_mode != RTE_ETH_MQ_RX_NONE &&
1121 rx_mq_mode != RTE_ETH_MQ_RX_VMDQ_ONLY &&
1122 rx_mq_mode != RTE_ETH_MQ_RX_RSS) {
1123
1124 PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
1125 rx_mq_mode);
1126 return -EINVAL;
1127 }
1128
1129 if (tx_mq_mode != RTE_ETH_MQ_TX_NONE &&
1130 tx_mq_mode != RTE_ETH_MQ_TX_VMDQ_ONLY) {
1131 PMD_INIT_LOG(WARNING, "TX mode %d is not supported."
1132 " Due to txmode is meaningless in this"
1133 " driver, just ignore.",
1134 tx_mq_mode);
1135 }
1136 }
1137 return 0;
1138}
1139
1140static int
1141eth_igb_configure(struct rte_eth_dev *dev)
1142{
1143 struct e1000_interrupt *intr =
1144 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1145 int ret;
1146
1147 PMD_INIT_FUNC_TRACE();
1148
1149 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
1150 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
1151
1152
1153 ret = igb_check_mq_mode(dev);
1154 if (ret != 0) {
1155 PMD_DRV_LOG(ERR, "igb_check_mq_mode fails with %d.",
1156 ret);
1157 return ret;
1158 }
1159
1160 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
1161 PMD_INIT_FUNC_TRACE();
1162
1163 return 0;
1164}
1165
1166static void
1167eth_igb_rxtx_control(struct rte_eth_dev *dev,
1168 bool enable)
1169{
1170 struct e1000_hw *hw =
1171 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1172 uint32_t tctl, rctl;
1173
1174 tctl = E1000_READ_REG(hw, E1000_TCTL);
1175 rctl = E1000_READ_REG(hw, E1000_RCTL);
1176
1177 if (enable) {
1178
1179 tctl |= E1000_TCTL_EN;
1180 rctl |= E1000_RCTL_EN;
1181 } else {
1182
1183 tctl &= ~E1000_TCTL_EN;
1184 rctl &= ~E1000_RCTL_EN;
1185 }
1186 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
1187 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1188 E1000_WRITE_FLUSH(hw);
1189}
1190
1191static int
1192eth_igb_start(struct rte_eth_dev *dev)
1193{
1194 struct e1000_hw *hw =
1195 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1196 struct e1000_adapter *adapter =
1197 E1000_DEV_PRIVATE(dev->data->dev_private);
1198 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1199 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1200 int ret, mask;
1201 uint32_t intr_vector = 0;
1202 uint32_t ctrl_ext;
1203 uint32_t *speeds;
1204 int num_speeds;
1205 bool autoneg;
1206
1207 PMD_INIT_FUNC_TRACE();
1208
1209
1210 rte_intr_disable(intr_handle);
1211
1212
1213 eth_igb_dev_set_link_up(dev);
1214
1215
1216
1217
1218
1219
1220 if (hw->mac.type == e1000_82575) {
1221 uint32_t pba;
1222
1223 pba = E1000_PBA_32K;
1224 E1000_WRITE_REG(hw, E1000_PBA, pba);
1225 }
1226
1227
1228 e1000_rar_set(hw, hw->mac.addr, 0);
1229
1230
1231 if (igb_hardware_init(hw)) {
1232 PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
1233 return -EIO;
1234 }
1235 adapter->stopped = 0;
1236
1237 E1000_WRITE_REG(hw, E1000_VET,
1238 RTE_ETHER_TYPE_VLAN << 16 | RTE_ETHER_TYPE_VLAN);
1239
1240 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1241
1242 ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
1243 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
1244 E1000_WRITE_FLUSH(hw);
1245
1246
1247 igb_pf_host_configure(dev);
1248
1249
1250 if ((rte_intr_cap_multiple(intr_handle) ||
1251 !RTE_ETH_DEV_SRIOV(dev).active) &&
1252 dev->data->dev_conf.intr_conf.rxq != 0) {
1253 intr_vector = dev->data->nb_rx_queues;
1254 if (rte_intr_efd_enable(intr_handle, intr_vector))
1255 return -1;
1256 }
1257
1258
1259 if (rte_intr_dp_is_en(intr_handle)) {
1260 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
1261 dev->data->nb_rx_queues)) {
1262 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
1263 " intr_vec", dev->data->nb_rx_queues);
1264 return -ENOMEM;
1265 }
1266 }
1267
1268
1269 eth_igb_configure_msix_intr(dev);
1270
1271
1272 igb_init_manageability(hw);
1273
1274 eth_igb_tx_init(dev);
1275
1276
1277 ret = eth_igb_rx_init(dev);
1278 if (ret) {
1279 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
1280 igb_dev_clear_queues(dev);
1281 return ret;
1282 }
1283
1284 e1000_clear_hw_cntrs_base_generic(hw);
1285
1286
1287
1288
1289 mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
1290 RTE_ETH_VLAN_EXTEND_MASK;
1291 ret = eth_igb_vlan_offload_set(dev, mask);
1292 if (ret) {
1293 PMD_INIT_LOG(ERR, "Unable to set vlan offload");
1294 igb_dev_clear_queues(dev);
1295 return ret;
1296 }
1297
1298 if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
1299
1300 igb_vmdq_vlan_hw_filter_enable(dev);
1301 }
1302
1303 if ((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580) ||
1304 (hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i210) ||
1305 (hw->mac.type == e1000_i211)) {
1306
1307 E1000_WRITE_REG(hw, E1000_EITR(0), 0xFFFF);
1308 }
1309
1310
1311 speeds = &dev->data->dev_conf.link_speeds;
1312 if (*speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
1313 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
1314 hw->mac.autoneg = 1;
1315 } else {
1316 num_speeds = 0;
1317 autoneg = (*speeds & RTE_ETH_LINK_SPEED_FIXED) == 0;
1318
1319
1320 hw->phy.autoneg_advertised = 0;
1321
1322 if (*speeds & ~(RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
1323 RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
1324 RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_FIXED)) {
1325 num_speeds = -1;
1326 goto error_invalid_config;
1327 }
1328 if (*speeds & RTE_ETH_LINK_SPEED_10M_HD) {
1329 hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
1330 num_speeds++;
1331 }
1332 if (*speeds & RTE_ETH_LINK_SPEED_10M) {
1333 hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
1334 num_speeds++;
1335 }
1336 if (*speeds & RTE_ETH_LINK_SPEED_100M_HD) {
1337 hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
1338 num_speeds++;
1339 }
1340 if (*speeds & RTE_ETH_LINK_SPEED_100M) {
1341 hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
1342 num_speeds++;
1343 }
1344 if (*speeds & RTE_ETH_LINK_SPEED_1G) {
1345 hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
1346 num_speeds++;
1347 }
1348 if (num_speeds == 0 || (!autoneg && (num_speeds > 1)))
1349 goto error_invalid_config;
1350
1351
1352
1353
1354 if (!autoneg) {
1355 hw->mac.autoneg = 0;
1356 hw->mac.forced_speed_duplex =
1357 hw->phy.autoneg_advertised;
1358 } else {
1359 hw->mac.autoneg = 1;
1360 }
1361 }
1362
1363 e1000_setup_link(hw);
1364
1365 if (rte_intr_allow_others(intr_handle)) {
1366
1367 if (dev->data->dev_conf.intr_conf.lsc != 0)
1368 eth_igb_lsc_interrupt_setup(dev, TRUE);
1369 else
1370 eth_igb_lsc_interrupt_setup(dev, FALSE);
1371 } else {
1372 rte_intr_callback_unregister(intr_handle,
1373 eth_igb_interrupt_handler,
1374 (void *)dev);
1375 if (dev->data->dev_conf.intr_conf.lsc != 0)
1376 PMD_INIT_LOG(INFO, "lsc won't enable because of"
1377 " no intr multiplex");
1378 }
1379
1380
1381 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1382 rte_intr_dp_is_en(intr_handle))
1383 eth_igb_rxq_interrupt_setup(dev);
1384
1385
1386 rte_intr_enable(intr_handle);
1387
1388
1389 igb_intr_enable(dev);
1390
1391
1392 igb_filter_restore(dev);
1393
1394 eth_igb_rxtx_control(dev, true);
1395 eth_igb_link_update(dev, 0);
1396
1397 PMD_INIT_LOG(DEBUG, "<<");
1398
1399 return 0;
1400
1401error_invalid_config:
1402 PMD_INIT_LOG(ERR, "Invalid advertised speeds (%u) for port %u",
1403 dev->data->dev_conf.link_speeds, dev->data->port_id);
1404 igb_dev_clear_queues(dev);
1405 return -EINVAL;
1406}
1407
1408
1409
1410
1411
1412
1413
1414static int
1415eth_igb_stop(struct rte_eth_dev *dev)
1416{
1417 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1418 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1419 struct rte_eth_link link;
1420 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1421 struct e1000_adapter *adapter =
1422 E1000_DEV_PRIVATE(dev->data->dev_private);
1423
1424 if (adapter->stopped)
1425 return 0;
1426
1427 eth_igb_rxtx_control(dev, false);
1428
1429 igb_intr_disable(dev);
1430
1431
1432 rte_intr_disable(intr_handle);
1433
1434 igb_pf_reset_hw(hw);
1435 E1000_WRITE_REG(hw, E1000_WUC, 0);
1436
1437
1438 if (hw->mac.type >= e1000_82580 &&
1439 (e1000_check_reset_block(hw) != E1000_BLK_PHY_RESET)) {
1440 uint32_t phpm_reg;
1441
1442 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
1443 phpm_reg |= E1000_82580_PM_GO_LINKD;
1444 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
1445 }
1446
1447
1448 eth_igb_dev_set_link_down(dev);
1449
1450 igb_dev_clear_queues(dev);
1451
1452
1453 memset(&link, 0, sizeof(link));
1454 rte_eth_linkstatus_set(dev, &link);
1455
1456 if (!rte_intr_allow_others(intr_handle))
1457
1458 rte_intr_callback_register(intr_handle,
1459 eth_igb_interrupt_handler,
1460 (void *)dev);
1461
1462
1463 rte_intr_efd_disable(intr_handle);
1464 rte_intr_vec_list_free(intr_handle);
1465
1466 adapter->stopped = true;
1467 dev->data->dev_started = 0;
1468
1469 return 0;
1470}
1471
1472static int
1473eth_igb_dev_set_link_up(struct rte_eth_dev *dev)
1474{
1475 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1476
1477 if (hw->phy.media_type == e1000_media_type_copper)
1478 e1000_power_up_phy(hw);
1479 else
1480 e1000_power_up_fiber_serdes_link(hw);
1481
1482 return 0;
1483}
1484
1485static int
1486eth_igb_dev_set_link_down(struct rte_eth_dev *dev)
1487{
1488 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1489
1490 if (hw->phy.media_type == e1000_media_type_copper)
1491 e1000_power_down_phy(hw);
1492 else
1493 e1000_shutdown_fiber_serdes_link(hw);
1494
1495 return 0;
1496}
1497
1498static int
1499eth_igb_close(struct rte_eth_dev *dev)
1500{
1501 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1502 struct rte_eth_link link;
1503 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1504 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1505 struct e1000_filter_info *filter_info =
1506 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1507 int ret;
1508
1509 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1510 return 0;
1511
1512 ret = eth_igb_stop(dev);
1513
1514 e1000_phy_hw_reset(hw);
1515 igb_release_manageability(hw);
1516 igb_hw_control_release(hw);
1517
1518
1519 if (hw->mac.type >= e1000_82580 &&
1520 (e1000_check_reset_block(hw) != E1000_BLK_PHY_RESET)) {
1521 uint32_t phpm_reg;
1522
1523 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
1524 phpm_reg &= ~E1000_82580_PM_GO_LINKD;
1525 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
1526 }
1527
1528 igb_dev_free_queues(dev);
1529
1530
1531 rte_intr_vec_list_free(intr_handle);
1532
1533 memset(&link, 0, sizeof(link));
1534 rte_eth_linkstatus_set(dev, &link);
1535
1536
1537 igb_reset_swfw_lock(hw);
1538
1539
1540 igb_pf_host_uninit(dev);
1541
1542 rte_intr_callback_unregister(intr_handle,
1543 eth_igb_interrupt_handler, dev);
1544
1545
1546 filter_info->syn_info = 0;
1547
1548
1549 filter_info->ethertype_mask = 0;
1550 memset(filter_info->ethertype_filters, 0,
1551 E1000_MAX_ETQF_FILTERS * sizeof(struct igb_ethertype_filter));
1552
1553
1554 memset(&filter_info->rss_info, 0,
1555 sizeof(struct igb_rte_flow_rss_conf));
1556
1557
1558 igb_ntuple_filter_uninit(dev);
1559
1560
1561 igb_flex_filter_uninit(dev);
1562
1563
1564 igb_filterlist_flush(dev);
1565
1566 return ret;
1567}
1568
1569
1570
1571
1572static int
1573eth_igb_reset(struct rte_eth_dev *dev)
1574{
1575 int ret;
1576
1577
1578
1579
1580
1581
1582
1583 if (dev->data->sriov.active)
1584 return -ENOTSUP;
1585
1586 ret = eth_igb_dev_uninit(dev);
1587 if (ret)
1588 return ret;
1589
1590 ret = eth_igb_dev_init(dev);
1591
1592 return ret;
1593}
1594
1595
1596static int
1597igb_get_rx_buffer_size(struct e1000_hw *hw)
1598{
1599 uint32_t rx_buf_size;
1600 if (hw->mac.type == e1000_82576) {
1601 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xffff) << 10;
1602 } else if (hw->mac.type == e1000_82580 || hw->mac.type == e1000_i350) {
1603
1604 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xf);
1605 rx_buf_size = (uint32_t) e1000_rxpbs_adjust_82580(rx_buf_size);
1606 rx_buf_size = (rx_buf_size << 10);
1607 } else if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
1608 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0x3f) << 10;
1609 } else {
1610 rx_buf_size = (E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10;
1611 }
1612
1613 return rx_buf_size;
1614}
1615
1616
1617
1618
1619
1620
1621static int
1622igb_hardware_init(struct e1000_hw *hw)
1623{
1624 uint32_t rx_buf_size;
1625 int diag;
1626
1627
1628 igb_hw_control_acquire(hw);
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644 rx_buf_size = igb_get_rx_buffer_size(hw);
1645
1646 hw->fc.high_water = rx_buf_size - (RTE_ETHER_MAX_LEN * 2);
1647 hw->fc.low_water = hw->fc.high_water - 1500;
1648 hw->fc.pause_time = IGB_FC_PAUSE_TIME;
1649 hw->fc.send_xon = 1;
1650
1651
1652 if ((igb_fc_setting != e1000_fc_none) && (igb_fc_setting < 4))
1653 hw->fc.requested_mode = igb_fc_setting;
1654 else
1655 hw->fc.requested_mode = e1000_fc_none;
1656
1657
1658 igb_pf_reset_hw(hw);
1659 E1000_WRITE_REG(hw, E1000_WUC, 0);
1660
1661 diag = e1000_init_hw(hw);
1662 if (diag < 0)
1663 return diag;
1664
1665 E1000_WRITE_REG(hw, E1000_VET,
1666 RTE_ETHER_TYPE_VLAN << 16 | RTE_ETHER_TYPE_VLAN);
1667 e1000_get_phy_info(hw);
1668 e1000_check_for_link(hw);
1669
1670 return 0;
1671}
1672
1673
1674static void
1675igb_read_stats_registers(struct e1000_hw *hw, struct e1000_hw_stats *stats)
1676{
1677 int pause_frames;
1678
1679 uint64_t old_gprc = stats->gprc;
1680 uint64_t old_gptc = stats->gptc;
1681 uint64_t old_tpr = stats->tpr;
1682 uint64_t old_tpt = stats->tpt;
1683 uint64_t old_rpthc = stats->rpthc;
1684 uint64_t old_hgptc = stats->hgptc;
1685
1686 if(hw->phy.media_type == e1000_media_type_copper ||
1687 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
1688 stats->symerrs +=
1689 E1000_READ_REG(hw,E1000_SYMERRS);
1690 stats->sec += E1000_READ_REG(hw, E1000_SEC);
1691 }
1692
1693 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
1694 stats->mpc += E1000_READ_REG(hw, E1000_MPC);
1695 stats->scc += E1000_READ_REG(hw, E1000_SCC);
1696 stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
1697
1698 stats->mcc += E1000_READ_REG(hw, E1000_MCC);
1699 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
1700 stats->colc += E1000_READ_REG(hw, E1000_COLC);
1701 stats->dc += E1000_READ_REG(hw, E1000_DC);
1702 stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
1703 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
1704 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
1705
1706
1707
1708
1709 pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC);
1710 stats->xoffrxc += pause_frames;
1711 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
1712 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
1713 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
1714 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
1715 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
1716 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
1717 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
1718 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
1719 stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
1720 stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
1721 stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
1722 stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
1723
1724
1725
1726
1727
1728 stats->gorc += E1000_READ_REG(hw, E1000_GORCL);
1729 stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
1730 stats->gorc -= (stats->gprc - old_gprc) * RTE_ETHER_CRC_LEN;
1731 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL);
1732 stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
1733 stats->gotc -= (stats->gptc - old_gptc) * RTE_ETHER_CRC_LEN;
1734
1735 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
1736 stats->ruc += E1000_READ_REG(hw, E1000_RUC);
1737 stats->rfc += E1000_READ_REG(hw, E1000_RFC);
1738 stats->roc += E1000_READ_REG(hw, E1000_ROC);
1739 stats->rjc += E1000_READ_REG(hw, E1000_RJC);
1740
1741 stats->tpr += E1000_READ_REG(hw, E1000_TPR);
1742 stats->tpt += E1000_READ_REG(hw, E1000_TPT);
1743
1744 stats->tor += E1000_READ_REG(hw, E1000_TORL);
1745 stats->tor += ((uint64_t)E1000_READ_REG(hw, E1000_TORH) << 32);
1746 stats->tor -= (stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN;
1747 stats->tot += E1000_READ_REG(hw, E1000_TOTL);
1748 stats->tot += ((uint64_t)E1000_READ_REG(hw, E1000_TOTH) << 32);
1749 stats->tot -= (stats->tpt - old_tpt) * RTE_ETHER_CRC_LEN;
1750
1751 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
1752 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
1753 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
1754 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
1755 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
1756 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
1757 stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
1758 stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
1759
1760
1761
1762 stats->iac += E1000_READ_REG(hw, E1000_IAC);
1763 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
1764 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
1765 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
1766 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
1767 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
1768 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
1769 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
1770 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
1771
1772
1773
1774 stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC);
1775 stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC);
1776 stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC);
1777 stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC);
1778 stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC);
1779 stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC);
1780 stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
1781 stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL);
1782 stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32);
1783 stats->hgorc -= (stats->rpthc - old_rpthc) * RTE_ETHER_CRC_LEN;
1784 stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL);
1785 stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32);
1786 stats->hgotc -= (stats->hgptc - old_hgptc) * RTE_ETHER_CRC_LEN;
1787 stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
1788 stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
1789 stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
1790
1791 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
1792 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
1793 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
1794 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
1795 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
1796 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
1797}
1798
1799static int
1800eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1801{
1802 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1803 struct e1000_hw_stats *stats =
1804 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1805
1806 igb_read_stats_registers(hw, stats);
1807
1808 if (rte_stats == NULL)
1809 return -EINVAL;
1810
1811
1812 rte_stats->imissed = stats->mpc;
1813 rte_stats->ierrors = stats->crcerrs + stats->rlec +
1814 stats->rxerrc + stats->algnerrc + stats->cexterr;
1815
1816
1817 rte_stats->oerrors = stats->ecol + stats->latecol;
1818
1819 rte_stats->ipackets = stats->gprc;
1820 rte_stats->opackets = stats->gptc;
1821 rte_stats->ibytes = stats->gorc;
1822 rte_stats->obytes = stats->gotc;
1823 return 0;
1824}
1825
1826static int
1827eth_igb_stats_reset(struct rte_eth_dev *dev)
1828{
1829 struct e1000_hw_stats *hw_stats =
1830 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1831
1832
1833 eth_igb_stats_get(dev, NULL);
1834
1835
1836 memset(hw_stats, 0, sizeof(*hw_stats));
1837
1838 return 0;
1839}
1840
1841static int
1842eth_igb_xstats_reset(struct rte_eth_dev *dev)
1843{
1844 struct e1000_hw_stats *stats =
1845 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1846
1847
1848 eth_igb_xstats_get(dev, NULL, IGB_NB_XSTATS);
1849
1850
1851 memset(stats, 0, sizeof(*stats));
1852
1853 return 0;
1854}
1855
1856static int eth_igb_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1857 struct rte_eth_xstat_name *xstats_names,
1858 __rte_unused unsigned int size)
1859{
1860 unsigned i;
1861
1862 if (xstats_names == NULL)
1863 return IGB_NB_XSTATS;
1864
1865
1866
1867 for (i = 0; i < IGB_NB_XSTATS; i++) {
1868 strlcpy(xstats_names[i].name, rte_igb_stats_strings[i].name,
1869 sizeof(xstats_names[i].name));
1870 }
1871
1872 return IGB_NB_XSTATS;
1873}
1874
1875static int eth_igb_xstats_get_names_by_id(struct rte_eth_dev *dev,
1876 const uint64_t *ids, struct rte_eth_xstat_name *xstats_names,
1877 unsigned int limit)
1878{
1879 unsigned int i;
1880
1881 if (!ids) {
1882 if (xstats_names == NULL)
1883 return IGB_NB_XSTATS;
1884
1885 for (i = 0; i < IGB_NB_XSTATS; i++)
1886 strlcpy(xstats_names[i].name,
1887 rte_igb_stats_strings[i].name,
1888 sizeof(xstats_names[i].name));
1889
1890 return IGB_NB_XSTATS;
1891
1892 } else {
1893 struct rte_eth_xstat_name xstats_names_copy[IGB_NB_XSTATS];
1894
1895 eth_igb_xstats_get_names_by_id(dev, NULL, xstats_names_copy,
1896 IGB_NB_XSTATS);
1897
1898 for (i = 0; i < limit; i++) {
1899 if (ids[i] >= IGB_NB_XSTATS) {
1900 PMD_INIT_LOG(ERR, "id value isn't valid");
1901 return -1;
1902 }
1903 strcpy(xstats_names[i].name,
1904 xstats_names_copy[ids[i]].name);
1905 }
1906 return limit;
1907 }
1908}
1909
1910static int
1911eth_igb_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1912 unsigned n)
1913{
1914 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1915 struct e1000_hw_stats *hw_stats =
1916 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1917 unsigned i;
1918
1919 if (n < IGB_NB_XSTATS)
1920 return IGB_NB_XSTATS;
1921
1922 igb_read_stats_registers(hw, hw_stats);
1923
1924
1925
1926
1927 if (!xstats)
1928 return 0;
1929
1930
1931 for (i = 0; i < IGB_NB_XSTATS; i++) {
1932 xstats[i].id = i;
1933 xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
1934 rte_igb_stats_strings[i].offset);
1935 }
1936
1937 return IGB_NB_XSTATS;
1938}
1939
1940static int
1941eth_igb_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1942 uint64_t *values, unsigned int n)
1943{
1944 unsigned int i;
1945
1946 if (!ids) {
1947 struct e1000_hw *hw =
1948 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1949 struct e1000_hw_stats *hw_stats =
1950 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1951
1952 if (n < IGB_NB_XSTATS)
1953 return IGB_NB_XSTATS;
1954
1955 igb_read_stats_registers(hw, hw_stats);
1956
1957
1958
1959
1960 if (!values)
1961 return 0;
1962
1963
1964 for (i = 0; i < IGB_NB_XSTATS; i++)
1965 values[i] = *(uint64_t *)(((char *)hw_stats) +
1966 rte_igb_stats_strings[i].offset);
1967
1968 return IGB_NB_XSTATS;
1969
1970 } else {
1971 uint64_t values_copy[IGB_NB_XSTATS];
1972
1973 eth_igb_xstats_get_by_id(dev, NULL, values_copy,
1974 IGB_NB_XSTATS);
1975
1976 for (i = 0; i < n; i++) {
1977 if (ids[i] >= IGB_NB_XSTATS) {
1978 PMD_INIT_LOG(ERR, "id value isn't valid");
1979 return -1;
1980 }
1981 values[i] = values_copy[ids[i]];
1982 }
1983 return n;
1984 }
1985}
1986
1987static void
1988igbvf_read_stats_registers(struct e1000_hw *hw, struct e1000_vf_stats *hw_stats)
1989{
1990
1991 UPDATE_VF_STAT(E1000_VFGPRC,
1992 hw_stats->last_gprc, hw_stats->gprc);
1993
1994
1995 UPDATE_VF_STAT(E1000_VFGORC,
1996 hw_stats->last_gorc, hw_stats->gorc);
1997
1998
1999 UPDATE_VF_STAT(E1000_VFGPTC,
2000 hw_stats->last_gptc, hw_stats->gptc);
2001
2002
2003 UPDATE_VF_STAT(E1000_VFGOTC,
2004 hw_stats->last_gotc, hw_stats->gotc);
2005
2006
2007 UPDATE_VF_STAT(E1000_VFMPRC,
2008 hw_stats->last_mprc, hw_stats->mprc);
2009
2010
2011 UPDATE_VF_STAT(E1000_VFGPRLBC,
2012 hw_stats->last_gprlbc, hw_stats->gprlbc);
2013
2014
2015 UPDATE_VF_STAT(E1000_VFGORLBC,
2016 hw_stats->last_gorlbc, hw_stats->gorlbc);
2017
2018
2019 UPDATE_VF_STAT(E1000_VFGPTLBC,
2020 hw_stats->last_gptlbc, hw_stats->gptlbc);
2021
2022
2023 UPDATE_VF_STAT(E1000_VFGOTLBC,
2024 hw_stats->last_gotlbc, hw_stats->gotlbc);
2025}
2026
2027static int eth_igbvf_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
2028 struct rte_eth_xstat_name *xstats_names,
2029 __rte_unused unsigned limit)
2030{
2031 unsigned i;
2032
2033 if (xstats_names != NULL)
2034 for (i = 0; i < IGBVF_NB_XSTATS; i++) {
2035 strlcpy(xstats_names[i].name,
2036 rte_igbvf_stats_strings[i].name,
2037 sizeof(xstats_names[i].name));
2038 }
2039 return IGBVF_NB_XSTATS;
2040}
2041
2042static int
2043eth_igbvf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
2044 unsigned n)
2045{
2046 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2047 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats *)
2048 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2049 unsigned i;
2050
2051 if (n < IGBVF_NB_XSTATS)
2052 return IGBVF_NB_XSTATS;
2053
2054 igbvf_read_stats_registers(hw, hw_stats);
2055
2056 if (!xstats)
2057 return 0;
2058
2059 for (i = 0; i < IGBVF_NB_XSTATS; i++) {
2060 xstats[i].id = i;
2061 xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
2062 rte_igbvf_stats_strings[i].offset);
2063 }
2064
2065 return IGBVF_NB_XSTATS;
2066}
2067
2068static int
2069eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
2070{
2071 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2072 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats *)
2073 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2074
2075 igbvf_read_stats_registers(hw, hw_stats);
2076
2077 if (rte_stats == NULL)
2078 return -EINVAL;
2079
2080 rte_stats->ipackets = hw_stats->gprc;
2081 rte_stats->ibytes = hw_stats->gorc;
2082 rte_stats->opackets = hw_stats->gptc;
2083 rte_stats->obytes = hw_stats->gotc;
2084 return 0;
2085}
2086
2087static int
2088eth_igbvf_stats_reset(struct rte_eth_dev *dev)
2089{
2090 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
2091 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2092
2093
2094 eth_igbvf_stats_get(dev, NULL);
2095
2096
2097 memset(&hw_stats->gprc, 0, sizeof(*hw_stats) -
2098 offsetof(struct e1000_vf_stats, gprc));
2099
2100 return 0;
2101}
2102
2103static int
2104eth_igb_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
2105 size_t fw_size)
2106{
2107 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2108 struct e1000_fw_version fw;
2109 int ret;
2110
2111 e1000_get_fw_version(hw, &fw);
2112
2113 switch (hw->mac.type) {
2114 case e1000_i210:
2115 case e1000_i211:
2116 if (!(e1000_get_flash_presence_i210(hw))) {
2117 ret = snprintf(fw_version, fw_size,
2118 "%2d.%2d-%d",
2119 fw.invm_major, fw.invm_minor,
2120 fw.invm_img_type);
2121 break;
2122 }
2123
2124 default:
2125
2126 if (fw.or_valid) {
2127 ret = snprintf(fw_version, fw_size,
2128 "%d.%d, 0x%08x, %d.%d.%d",
2129 fw.eep_major, fw.eep_minor, fw.etrack_id,
2130 fw.or_major, fw.or_build, fw.or_patch);
2131
2132 } else {
2133 if (fw.etrack_id != 0X0000) {
2134 ret = snprintf(fw_version, fw_size,
2135 "%d.%d, 0x%08x",
2136 fw.eep_major, fw.eep_minor,
2137 fw.etrack_id);
2138 } else {
2139 ret = snprintf(fw_version, fw_size,
2140 "%d.%d.%d",
2141 fw.eep_major, fw.eep_minor,
2142 fw.eep_build);
2143 }
2144 }
2145 break;
2146 }
2147 if (ret < 0)
2148 return -EINVAL;
2149
2150 ret += 1;
2151 if (fw_size < (size_t)ret)
2152 return ret;
2153 else
2154 return 0;
2155}
2156
2157static int
2158eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2159{
2160 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2161
2162 dev_info->min_rx_bufsize = 256;
2163 dev_info->max_rx_pktlen = 0x3FFF;
2164 dev_info->max_mac_addrs = hw->mac.rar_entry_count;
2165 dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev);
2166 dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) |
2167 dev_info->rx_queue_offload_capa;
2168 dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev);
2169 dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) |
2170 dev_info->tx_queue_offload_capa;
2171 dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
2172
2173 switch (hw->mac.type) {
2174 case e1000_82575:
2175 dev_info->max_rx_queues = 4;
2176 dev_info->max_tx_queues = 4;
2177 dev_info->max_vmdq_pools = 0;
2178 break;
2179
2180 case e1000_82576:
2181 dev_info->max_rx_queues = 16;
2182 dev_info->max_tx_queues = 16;
2183 dev_info->max_vmdq_pools = RTE_ETH_8_POOLS;
2184 dev_info->vmdq_queue_num = 16;
2185 break;
2186
2187 case e1000_82580:
2188 dev_info->max_rx_queues = 8;
2189 dev_info->max_tx_queues = 8;
2190 dev_info->max_vmdq_pools = RTE_ETH_8_POOLS;
2191 dev_info->vmdq_queue_num = 8;
2192 break;
2193
2194 case e1000_i350:
2195 dev_info->max_rx_queues = 8;
2196 dev_info->max_tx_queues = 8;
2197 dev_info->max_vmdq_pools = RTE_ETH_8_POOLS;
2198 dev_info->vmdq_queue_num = 8;
2199 break;
2200
2201 case e1000_i354:
2202 dev_info->max_rx_queues = 8;
2203 dev_info->max_tx_queues = 8;
2204 break;
2205
2206 case e1000_i210:
2207 dev_info->max_rx_queues = 4;
2208 dev_info->max_tx_queues = 4;
2209 dev_info->max_vmdq_pools = 0;
2210 break;
2211
2212 case e1000_i211:
2213 dev_info->max_rx_queues = 2;
2214 dev_info->max_tx_queues = 2;
2215 dev_info->max_vmdq_pools = 0;
2216 break;
2217
2218 default:
2219
2220 return -EINVAL;
2221 }
2222 dev_info->hash_key_size = IGB_HKEY_MAX_INDEX * sizeof(uint32_t);
2223 dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
2224 dev_info->flow_type_rss_offloads = IGB_RSS_OFFLOAD_ALL;
2225
2226 dev_info->default_rxconf = (struct rte_eth_rxconf) {
2227 .rx_thresh = {
2228 .pthresh = IGB_DEFAULT_RX_PTHRESH,
2229 .hthresh = IGB_DEFAULT_RX_HTHRESH,
2230 .wthresh = IGB_DEFAULT_RX_WTHRESH,
2231 },
2232 .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
2233 .rx_drop_en = 0,
2234 .offloads = 0,
2235 };
2236
2237 dev_info->default_txconf = (struct rte_eth_txconf) {
2238 .tx_thresh = {
2239 .pthresh = IGB_DEFAULT_TX_PTHRESH,
2240 .hthresh = IGB_DEFAULT_TX_HTHRESH,
2241 .wthresh = IGB_DEFAULT_TX_WTHRESH,
2242 },
2243 .offloads = 0,
2244 };
2245
2246 dev_info->rx_desc_lim = rx_desc_lim;
2247 dev_info->tx_desc_lim = tx_desc_lim;
2248
2249 dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
2250 RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
2251 RTE_ETH_LINK_SPEED_1G;
2252
2253 dev_info->max_mtu = dev_info->max_rx_pktlen - E1000_ETH_OVERHEAD;
2254 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
2255
2256 return 0;
2257}
2258
2259static const uint32_t *
2260eth_igb_supported_ptypes_get(struct rte_eth_dev *dev)
2261{
2262 static const uint32_t ptypes[] = {
2263
2264 RTE_PTYPE_L2_ETHER,
2265 RTE_PTYPE_L3_IPV4,
2266 RTE_PTYPE_L3_IPV4_EXT,
2267 RTE_PTYPE_L3_IPV6,
2268 RTE_PTYPE_L3_IPV6_EXT,
2269 RTE_PTYPE_L4_TCP,
2270 RTE_PTYPE_L4_UDP,
2271 RTE_PTYPE_L4_SCTP,
2272 RTE_PTYPE_TUNNEL_IP,
2273 RTE_PTYPE_INNER_L3_IPV6,
2274 RTE_PTYPE_INNER_L3_IPV6_EXT,
2275 RTE_PTYPE_INNER_L4_TCP,
2276 RTE_PTYPE_INNER_L4_UDP,
2277 RTE_PTYPE_UNKNOWN
2278 };
2279
2280 if (dev->rx_pkt_burst == eth_igb_recv_pkts ||
2281 dev->rx_pkt_burst == eth_igb_recv_scattered_pkts)
2282 return ptypes;
2283 return NULL;
2284}
2285
2286static int
2287eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2288{
2289 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2290
2291 dev_info->min_rx_bufsize = 256;
2292 dev_info->max_rx_pktlen = 0x3FFF;
2293 dev_info->max_mac_addrs = hw->mac.rar_entry_count;
2294 dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
2295 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
2296 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
2297 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
2298 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
2299 RTE_ETH_TX_OFFLOAD_TCP_TSO;
2300 switch (hw->mac.type) {
2301 case e1000_vfadapt:
2302 dev_info->max_rx_queues = 2;
2303 dev_info->max_tx_queues = 2;
2304 break;
2305 case e1000_vfadapt_i350:
2306 dev_info->max_rx_queues = 1;
2307 dev_info->max_tx_queues = 1;
2308 break;
2309 default:
2310
2311 return -EINVAL;
2312 }
2313
2314 dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev);
2315 dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) |
2316 dev_info->rx_queue_offload_capa;
2317 dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev);
2318 dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) |
2319 dev_info->tx_queue_offload_capa;
2320
2321 dev_info->default_rxconf = (struct rte_eth_rxconf) {
2322 .rx_thresh = {
2323 .pthresh = IGB_DEFAULT_RX_PTHRESH,
2324 .hthresh = IGB_DEFAULT_RX_HTHRESH,
2325 .wthresh = IGB_DEFAULT_RX_WTHRESH,
2326 },
2327 .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
2328 .rx_drop_en = 0,
2329 .offloads = 0,
2330 };
2331
2332 dev_info->default_txconf = (struct rte_eth_txconf) {
2333 .tx_thresh = {
2334 .pthresh = IGB_DEFAULT_TX_PTHRESH,
2335 .hthresh = IGB_DEFAULT_TX_HTHRESH,
2336 .wthresh = IGB_DEFAULT_TX_WTHRESH,
2337 },
2338 .offloads = 0,
2339 };
2340
2341 dev_info->rx_desc_lim = rx_desc_lim;
2342 dev_info->tx_desc_lim = tx_desc_lim;
2343
2344 return 0;
2345}
2346
2347
2348static int
2349eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
2350{
2351 struct e1000_hw *hw =
2352 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2353 struct rte_eth_link link;
2354 int link_check, count;
2355
2356 link_check = 0;
2357 hw->mac.get_link_status = 1;
2358
2359
2360 for (count = 0; count < IGB_LINK_UPDATE_CHECK_TIMEOUT; count ++) {
2361
2362 switch (hw->phy.media_type) {
2363 case e1000_media_type_copper:
2364
2365 e1000_check_for_link(hw);
2366 link_check = !hw->mac.get_link_status;
2367 break;
2368
2369 case e1000_media_type_fiber:
2370 e1000_check_for_link(hw);
2371 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2372 E1000_STATUS_LU);
2373 break;
2374
2375 case e1000_media_type_internal_serdes:
2376 e1000_check_for_link(hw);
2377 link_check = hw->mac.serdes_has_link;
2378 break;
2379
2380
2381 case e1000_media_type_unknown:
2382 eth_igbvf_link_update(hw);
2383 link_check = !hw->mac.get_link_status;
2384 break;
2385
2386 default:
2387 break;
2388 }
2389 if (link_check || wait_to_complete == 0)
2390 break;
2391 rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL);
2392 }
2393 memset(&link, 0, sizeof(link));
2394
2395
2396 if (link_check) {
2397 uint16_t duplex, speed;
2398 hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
2399 link.link_duplex = (duplex == FULL_DUPLEX) ?
2400 RTE_ETH_LINK_FULL_DUPLEX :
2401 RTE_ETH_LINK_HALF_DUPLEX;
2402 link.link_speed = speed;
2403 link.link_status = RTE_ETH_LINK_UP;
2404 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
2405 RTE_ETH_LINK_SPEED_FIXED);
2406 } else if (!link_check) {
2407 link.link_speed = 0;
2408 link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
2409 link.link_status = RTE_ETH_LINK_DOWN;
2410 link.link_autoneg = RTE_ETH_LINK_FIXED;
2411 }
2412
2413 return rte_eth_linkstatus_set(dev, &link);
2414}
2415
2416
2417
2418
2419
2420
2421static void
2422igb_hw_control_acquire(struct e1000_hw *hw)
2423{
2424 uint32_t ctrl_ext;
2425
2426
2427 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2428 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
2429}
2430
2431
2432
2433
2434
2435
2436static void
2437igb_hw_control_release(struct e1000_hw *hw)
2438{
2439 uint32_t ctrl_ext;
2440
2441
2442 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2443 E1000_WRITE_REG(hw, E1000_CTRL_EXT,
2444 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
2445}
2446
2447
2448
2449
2450
2451
2452static void
2453igb_init_manageability(struct e1000_hw *hw)
2454{
2455 if (e1000_enable_mng_pass_thru(hw)) {
2456 uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H);
2457 uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
2458
2459
2460 manc &= ~(E1000_MANC_ARP_EN);
2461
2462
2463 manc |= E1000_MANC_EN_MNG2HOST;
2464 manc2h |= 1 << 5;
2465 manc2h |= 1 << 6;
2466 E1000_WRITE_REG(hw, E1000_MANC2H, manc2h);
2467 E1000_WRITE_REG(hw, E1000_MANC, manc);
2468 }
2469}
2470
2471static void
2472igb_release_manageability(struct e1000_hw *hw)
2473{
2474 if (e1000_enable_mng_pass_thru(hw)) {
2475 uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
2476
2477 manc |= E1000_MANC_ARP_EN;
2478 manc &= ~E1000_MANC_EN_MNG2HOST;
2479
2480 E1000_WRITE_REG(hw, E1000_MANC, manc);
2481 }
2482}
2483
2484static int
2485eth_igb_promiscuous_enable(struct rte_eth_dev *dev)
2486{
2487 struct e1000_hw *hw =
2488 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2489 uint32_t rctl;
2490
2491 rctl = E1000_READ_REG(hw, E1000_RCTL);
2492 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2493 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2494
2495 return 0;
2496}
2497
2498static int
2499eth_igb_promiscuous_disable(struct rte_eth_dev *dev)
2500{
2501 struct e1000_hw *hw =
2502 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2503 uint32_t rctl;
2504
2505 rctl = E1000_READ_REG(hw, E1000_RCTL);
2506 rctl &= (~E1000_RCTL_UPE);
2507 if (dev->data->all_multicast == 1)
2508 rctl |= E1000_RCTL_MPE;
2509 else
2510 rctl &= (~E1000_RCTL_MPE);
2511 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2512
2513 return 0;
2514}
2515
2516static int
2517eth_igb_allmulticast_enable(struct rte_eth_dev *dev)
2518{
2519 struct e1000_hw *hw =
2520 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2521 uint32_t rctl;
2522
2523 rctl = E1000_READ_REG(hw, E1000_RCTL);
2524 rctl |= E1000_RCTL_MPE;
2525 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2526
2527 return 0;
2528}
2529
2530static int
2531eth_igb_allmulticast_disable(struct rte_eth_dev *dev)
2532{
2533 struct e1000_hw *hw =
2534 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2535 uint32_t rctl;
2536
2537 if (dev->data->promiscuous == 1)
2538 return 0;
2539 rctl = E1000_READ_REG(hw, E1000_RCTL);
2540 rctl &= (~E1000_RCTL_MPE);
2541 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2542
2543 return 0;
2544}
2545
2546static int
2547eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2548{
2549 struct e1000_hw *hw =
2550 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2551 struct e1000_vfta * shadow_vfta =
2552 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2553 uint32_t vfta;
2554 uint32_t vid_idx;
2555 uint32_t vid_bit;
2556
2557 vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) &
2558 E1000_VFTA_ENTRY_MASK);
2559 vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK));
2560 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx);
2561 if (on)
2562 vfta |= vid_bit;
2563 else
2564 vfta &= ~vid_bit;
2565 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta);
2566
2567
2568 shadow_vfta->vfta[vid_idx] = vfta;
2569
2570 return 0;
2571}
2572
2573static int
2574eth_igb_vlan_tpid_set(struct rte_eth_dev *dev,
2575 enum rte_vlan_type vlan_type,
2576 uint16_t tpid)
2577{
2578 struct e1000_hw *hw =
2579 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2580 uint32_t reg, qinq;
2581
2582 qinq = E1000_READ_REG(hw, E1000_CTRL_EXT);
2583 qinq &= E1000_CTRL_EXT_EXT_VLAN;
2584
2585
2586 if (qinq && vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
2587 reg = E1000_READ_REG(hw, E1000_VET);
2588 reg = (reg & (~E1000_VET_VET_EXT)) |
2589 ((uint32_t)tpid << E1000_VET_VET_EXT_SHIFT);
2590 E1000_WRITE_REG(hw, E1000_VET, reg);
2591
2592 return 0;
2593 }
2594
2595
2596 PMD_DRV_LOG(ERR, "Not supported");
2597
2598 return -ENOTSUP;
2599}
2600
2601static void
2602igb_vlan_hw_filter_disable(struct rte_eth_dev *dev)
2603{
2604 struct e1000_hw *hw =
2605 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2606 uint32_t reg;
2607
2608
2609 reg = E1000_READ_REG(hw, E1000_RCTL);
2610 reg &= ~E1000_RCTL_CFIEN;
2611 reg &= ~E1000_RCTL_VFE;
2612 E1000_WRITE_REG(hw, E1000_RCTL, reg);
2613}
2614
2615static void
2616igb_vlan_hw_filter_enable(struct rte_eth_dev *dev)
2617{
2618 struct e1000_hw *hw =
2619 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2620 struct e1000_vfta * shadow_vfta =
2621 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2622 uint32_t reg;
2623 int i;
2624
2625
2626 reg = E1000_READ_REG(hw, E1000_RCTL);
2627 reg &= ~E1000_RCTL_CFIEN;
2628 reg |= E1000_RCTL_VFE;
2629 E1000_WRITE_REG(hw, E1000_RCTL, reg);
2630
2631
2632 for (i = 0; i < IGB_VFTA_SIZE; i++)
2633 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]);
2634}
2635
2636static void
2637igb_vlan_hw_strip_disable(struct rte_eth_dev *dev)
2638{
2639 struct e1000_hw *hw =
2640 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2641 uint32_t reg;
2642
2643
2644 reg = E1000_READ_REG(hw, E1000_CTRL);
2645 reg &= ~E1000_CTRL_VME;
2646 E1000_WRITE_REG(hw, E1000_CTRL, reg);
2647}
2648
2649static void
2650igb_vlan_hw_strip_enable(struct rte_eth_dev *dev)
2651{
2652 struct e1000_hw *hw =
2653 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2654 uint32_t reg;
2655
2656
2657 reg = E1000_READ_REG(hw, E1000_CTRL);
2658 reg |= E1000_CTRL_VME;
2659 E1000_WRITE_REG(hw, E1000_CTRL, reg);
2660}
2661
2662static void
2663igb_vlan_hw_extend_disable(struct rte_eth_dev *dev)
2664{
2665 struct e1000_hw *hw =
2666 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2667 uint32_t reg;
2668
2669
2670 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
2671 reg &= ~E1000_CTRL_EXT_EXTEND_VLAN;
2672 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
2673
2674
2675 E1000_WRITE_REG(hw, E1000_RLPML, dev->data->mtu + E1000_ETH_OVERHEAD);
2676}
2677
2678static void
2679igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
2680{
2681 struct e1000_hw *hw =
2682 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2683 uint32_t reg;
2684
2685
2686 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
2687 reg |= E1000_CTRL_EXT_EXTEND_VLAN;
2688 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
2689
2690
2691 E1000_WRITE_REG(hw, E1000_RLPML,
2692 dev->data->mtu + E1000_ETH_OVERHEAD + VLAN_TAG_SIZE);
2693}
2694
2695static int
2696eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2697{
2698 struct rte_eth_rxmode *rxmode;
2699
2700 rxmode = &dev->data->dev_conf.rxmode;
2701 if (mask & RTE_ETH_VLAN_STRIP_MASK) {
2702 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
2703 igb_vlan_hw_strip_enable(dev);
2704 else
2705 igb_vlan_hw_strip_disable(dev);
2706 }
2707
2708 if (mask & RTE_ETH_VLAN_FILTER_MASK) {
2709 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
2710 igb_vlan_hw_filter_enable(dev);
2711 else
2712 igb_vlan_hw_filter_disable(dev);
2713 }
2714
2715 if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
2716 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
2717 igb_vlan_hw_extend_enable(dev);
2718 else
2719 igb_vlan_hw_extend_disable(dev);
2720 }
2721
2722 return 0;
2723}
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738static int
2739eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
2740{
2741 struct e1000_interrupt *intr =
2742 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2743
2744 if (on)
2745 intr->mask |= E1000_ICR_LSC;
2746 else
2747 intr->mask &= ~E1000_ICR_LSC;
2748
2749 return 0;
2750}
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev)
2763{
2764 uint32_t mask, regval;
2765 int ret;
2766 struct e1000_hw *hw =
2767 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2768 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2769 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2770 int misc_shift = rte_intr_allow_others(intr_handle) ? 1 : 0;
2771 struct rte_eth_dev_info dev_info;
2772
2773 memset(&dev_info, 0, sizeof(dev_info));
2774 ret = eth_igb_infos_get(dev, &dev_info);
2775 if (ret != 0)
2776 return ret;
2777
2778 mask = (0xFFFFFFFF >> (32 - dev_info.max_rx_queues)) << misc_shift;
2779 regval = E1000_READ_REG(hw, E1000_EIMS);
2780 E1000_WRITE_REG(hw, E1000_EIMS, regval | mask);
2781
2782 return 0;
2783}
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796static int
2797eth_igb_interrupt_get_status(struct rte_eth_dev *dev)
2798{
2799 uint32_t icr;
2800 struct e1000_hw *hw =
2801 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2802 struct e1000_interrupt *intr =
2803 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2804
2805 igb_intr_disable(dev);
2806
2807
2808 icr = E1000_READ_REG(hw, E1000_ICR);
2809
2810 intr->flags = 0;
2811 if (icr & E1000_ICR_LSC) {
2812 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
2813 }
2814
2815 if (icr & E1000_ICR_VMMB)
2816 intr->flags |= E1000_FLAG_MAILBOX;
2817
2818 return 0;
2819}
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831static int
2832eth_igb_interrupt_action(struct rte_eth_dev *dev,
2833 struct rte_intr_handle *intr_handle)
2834{
2835 struct e1000_hw *hw =
2836 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2837 struct e1000_interrupt *intr =
2838 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2839 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2840 struct rte_eth_link link;
2841 int ret;
2842
2843 if (intr->flags & E1000_FLAG_MAILBOX) {
2844 igb_pf_mbx_process(dev);
2845 intr->flags &= ~E1000_FLAG_MAILBOX;
2846 }
2847
2848 igb_intr_enable(dev);
2849 rte_intr_ack(intr_handle);
2850
2851 if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) {
2852 intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
2853
2854
2855 hw->mac.get_link_status = 1;
2856 ret = eth_igb_link_update(dev, 0);
2857
2858
2859 if (ret < 0)
2860 return 0;
2861
2862 rte_eth_linkstatus_get(dev, &link);
2863 if (link.link_status) {
2864 PMD_INIT_LOG(INFO,
2865 " Port %d: Link Up - speed %u Mbps - %s",
2866 dev->data->port_id,
2867 (unsigned)link.link_speed,
2868 link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
2869 "full-duplex" : "half-duplex");
2870 } else {
2871 PMD_INIT_LOG(INFO, " Port %d: Link Down",
2872 dev->data->port_id);
2873 }
2874
2875 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2876 pci_dev->addr.domain,
2877 pci_dev->addr.bus,
2878 pci_dev->addr.devid,
2879 pci_dev->addr.function);
2880 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
2881 }
2882
2883 return 0;
2884}
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897static void
2898eth_igb_interrupt_handler(void *param)
2899{
2900 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2901
2902 eth_igb_interrupt_get_status(dev);
2903 eth_igb_interrupt_action(dev, dev->intr_handle);
2904}
2905
2906static int
2907eth_igbvf_interrupt_get_status(struct rte_eth_dev *dev)
2908{
2909 uint32_t eicr;
2910 struct e1000_hw *hw =
2911 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2912 struct e1000_interrupt *intr =
2913 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2914
2915 igbvf_intr_disable(hw);
2916
2917
2918 eicr = E1000_READ_REG(hw, E1000_EICR);
2919 intr->flags = 0;
2920
2921 if (eicr == E1000_VTIVAR_MISC_MAILBOX)
2922 intr->flags |= E1000_FLAG_MAILBOX;
2923
2924 return 0;
2925}
2926
2927void igbvf_mbx_process(struct rte_eth_dev *dev)
2928{
2929 struct e1000_hw *hw =
2930 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2931 struct e1000_mbx_info *mbx = &hw->mbx;
2932 u32 in_msg = 0;
2933
2934
2935 in_msg = E1000_READ_REG(hw, E1000_VMBMEM(0));
2936
2937
2938 if (in_msg == E1000_PF_CONTROL_MSG) {
2939
2940 if (mbx->ops.read(hw, &in_msg, 1, 0))
2941 return;
2942 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
2943 NULL);
2944 }
2945}
2946
2947static int
2948eth_igbvf_interrupt_action(struct rte_eth_dev *dev, struct rte_intr_handle *intr_handle)
2949{
2950 struct e1000_interrupt *intr =
2951 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2952
2953 if (intr->flags & E1000_FLAG_MAILBOX) {
2954 igbvf_mbx_process(dev);
2955 intr->flags &= ~E1000_FLAG_MAILBOX;
2956 }
2957
2958 igbvf_intr_enable(dev);
2959 rte_intr_ack(intr_handle);
2960
2961 return 0;
2962}
2963
2964static void
2965eth_igbvf_interrupt_handler(void *param)
2966{
2967 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2968
2969 eth_igbvf_interrupt_get_status(dev);
2970 eth_igbvf_interrupt_action(dev, dev->intr_handle);
2971}
2972
2973static int
2974eth_igb_led_on(struct rte_eth_dev *dev)
2975{
2976 struct e1000_hw *hw;
2977
2978 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2979 return e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP;
2980}
2981
2982static int
2983eth_igb_led_off(struct rte_eth_dev *dev)
2984{
2985 struct e1000_hw *hw;
2986
2987 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2988 return e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP;
2989}
2990
2991static int
2992eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2993{
2994 struct e1000_hw *hw;
2995 uint32_t ctrl;
2996 int tx_pause;
2997 int rx_pause;
2998
2999 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3000 fc_conf->pause_time = hw->fc.pause_time;
3001 fc_conf->high_water = hw->fc.high_water;
3002 fc_conf->low_water = hw->fc.low_water;
3003 fc_conf->send_xon = hw->fc.send_xon;
3004 fc_conf->autoneg = hw->mac.autoneg;
3005
3006
3007
3008
3009
3010 ctrl = E1000_READ_REG(hw, E1000_CTRL);
3011 if (ctrl & E1000_CTRL_TFCE)
3012 tx_pause = 1;
3013 else
3014 tx_pause = 0;
3015
3016 if (ctrl & E1000_CTRL_RFCE)
3017 rx_pause = 1;
3018 else
3019 rx_pause = 0;
3020
3021 if (rx_pause && tx_pause)
3022 fc_conf->mode = RTE_ETH_FC_FULL;
3023 else if (rx_pause)
3024 fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
3025 else if (tx_pause)
3026 fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
3027 else
3028 fc_conf->mode = RTE_ETH_FC_NONE;
3029
3030 return 0;
3031}
3032
3033static int
3034eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3035{
3036 struct e1000_hw *hw;
3037 int err;
3038 enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = {
3039 e1000_fc_none,
3040 e1000_fc_rx_pause,
3041 e1000_fc_tx_pause,
3042 e1000_fc_full
3043 };
3044 uint32_t rx_buf_size;
3045 uint32_t max_high_water;
3046 uint32_t rctl;
3047 uint32_t ctrl;
3048
3049 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3050 if (fc_conf->autoneg != hw->mac.autoneg)
3051 return -ENOTSUP;
3052 rx_buf_size = igb_get_rx_buffer_size(hw);
3053 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3054
3055
3056 max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN;
3057 if ((fc_conf->high_water > max_high_water) ||
3058 (fc_conf->high_water < fc_conf->low_water)) {
3059 PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
3060 PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water);
3061 return -EINVAL;
3062 }
3063
3064 hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode];
3065 hw->fc.pause_time = fc_conf->pause_time;
3066 hw->fc.high_water = fc_conf->high_water;
3067 hw->fc.low_water = fc_conf->low_water;
3068 hw->fc.send_xon = fc_conf->send_xon;
3069
3070 err = e1000_setup_link_generic(hw);
3071 if (err == E1000_SUCCESS) {
3072
3073
3074
3075
3076 rctl = E1000_READ_REG(hw, E1000_RCTL);
3077
3078
3079 if (fc_conf->mac_ctrl_frame_fwd != 0)
3080 rctl |= E1000_RCTL_PMCF;
3081 else
3082 rctl &= ~E1000_RCTL_PMCF;
3083
3084 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
3085
3086
3087
3088
3089
3090 ctrl = E1000_READ_REG(hw, E1000_CTRL);
3091
3092
3093
3094
3095
3096 switch (fc_conf->mode) {
3097 case RTE_ETH_FC_NONE:
3098 ctrl &= ~E1000_CTRL_RFCE & ~E1000_CTRL_TFCE;
3099 break;
3100 case RTE_ETH_FC_RX_PAUSE:
3101 ctrl |= E1000_CTRL_RFCE;
3102 ctrl &= ~E1000_CTRL_TFCE;
3103 break;
3104 case RTE_ETH_FC_TX_PAUSE:
3105 ctrl |= E1000_CTRL_TFCE;
3106 ctrl &= ~E1000_CTRL_RFCE;
3107 break;
3108 case RTE_ETH_FC_FULL:
3109 ctrl |= E1000_CTRL_RFCE | E1000_CTRL_TFCE;
3110 break;
3111 default:
3112 PMD_INIT_LOG(ERR, "invalid flow control mode");
3113 return -EINVAL;
3114 }
3115
3116 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
3117
3118 E1000_WRITE_FLUSH(hw);
3119
3120 return 0;
3121 }
3122
3123 PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err);
3124 return -EIO;
3125}
3126
3127#define E1000_RAH_POOLSEL_SHIFT (18)
3128static int
3129eth_igb_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
3130 uint32_t index, uint32_t pool)
3131{
3132 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3133 uint32_t rah;
3134
3135 e1000_rar_set(hw, mac_addr->addr_bytes, index);
3136 rah = E1000_READ_REG(hw, E1000_RAH(index));
3137 rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + pool));
3138 E1000_WRITE_REG(hw, E1000_RAH(index), rah);
3139 return 0;
3140}
3141
3142static void
3143eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index)
3144{
3145 uint8_t addr[RTE_ETHER_ADDR_LEN];
3146 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3147
3148 memset(addr, 0, sizeof(addr));
3149
3150 e1000_rar_set(hw, addr, index);
3151}
3152
3153static int
3154eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
3155 struct rte_ether_addr *addr)
3156{
3157 eth_igb_rar_clear(dev, 0);
3158 eth_igb_rar_set(dev, (void *)addr, 0, 0);
3159
3160 return 0;
3161}
3162
3163
3164
3165static void
3166igbvf_intr_disable(struct e1000_hw *hw)
3167{
3168 PMD_INIT_FUNC_TRACE();
3169
3170
3171 E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF);
3172
3173 E1000_WRITE_FLUSH(hw);
3174}
3175
3176static void
3177igbvf_stop_adapter(struct rte_eth_dev *dev)
3178{
3179 u32 reg_val;
3180 u16 i;
3181 struct rte_eth_dev_info dev_info;
3182 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3183 int ret;
3184
3185 memset(&dev_info, 0, sizeof(dev_info));
3186 ret = eth_igbvf_infos_get(dev, &dev_info);
3187 if (ret != 0)
3188 return;
3189
3190
3191 igbvf_intr_disable(hw);
3192
3193
3194 E1000_READ_REG(hw, E1000_EICR);
3195
3196
3197 for (i = 0; i < dev_info.max_tx_queues; i++)
3198 E1000_WRITE_REG(hw, E1000_TXDCTL(i), E1000_TXDCTL_SWFLSH);
3199
3200
3201 for (i = 0; i < dev_info.max_rx_queues; i++) {
3202 reg_val = E1000_READ_REG(hw, E1000_RXDCTL(i));
3203 reg_val &= ~E1000_RXDCTL_QUEUE_ENABLE;
3204 E1000_WRITE_REG(hw, E1000_RXDCTL(i), reg_val);
3205 while (E1000_READ_REG(hw, E1000_RXDCTL(i)) & E1000_RXDCTL_QUEUE_ENABLE)
3206 ;
3207 }
3208
3209
3210 E1000_WRITE_FLUSH(hw);
3211 msec_delay(2);
3212}
3213
3214static int eth_igbvf_link_update(struct e1000_hw *hw)
3215{
3216 struct e1000_mbx_info *mbx = &hw->mbx;
3217 struct e1000_mac_info *mac = &hw->mac;
3218 int ret_val = E1000_SUCCESS;
3219
3220 PMD_INIT_LOG(DEBUG, "e1000_check_for_link_vf");
3221
3222
3223
3224
3225
3226
3227
3228
3229 if (!e1000_check_for_rst(hw, 0) || !mbx->timeout)
3230 mac->get_link_status = TRUE;
3231
3232 if (!mac->get_link_status)
3233 goto out;
3234
3235
3236 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU))
3237 goto out;
3238
3239
3240
3241 mac->get_link_status = FALSE;
3242
3243out:
3244 return ret_val;
3245}
3246
3247
3248static int
3249igbvf_dev_configure(struct rte_eth_dev *dev)
3250{
3251 struct rte_eth_conf* conf = &dev->data->dev_conf;
3252
3253 PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
3254 dev->data->port_id);
3255
3256 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
3257 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
3258
3259
3260
3261
3262
3263#ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
3264 if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
3265 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
3266 conf->rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC;
3267 }
3268#else
3269 if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)) {
3270 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
3271 conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
3272 }
3273#endif
3274
3275 return 0;
3276}
3277
3278static int
3279igbvf_dev_start(struct rte_eth_dev *dev)
3280{
3281 struct e1000_hw *hw =
3282 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3283 struct e1000_adapter *adapter =
3284 E1000_DEV_PRIVATE(dev->data->dev_private);
3285 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3286 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
3287 int ret;
3288 uint32_t intr_vector = 0;
3289
3290 PMD_INIT_FUNC_TRACE();
3291
3292 hw->mac.ops.reset_hw(hw);
3293 adapter->stopped = 0;
3294
3295
3296 igbvf_set_vfta_all(dev,1);
3297
3298 eth_igbvf_tx_init(dev);
3299
3300
3301 ret = eth_igbvf_rx_init(dev);
3302 if (ret) {
3303 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
3304 igb_dev_clear_queues(dev);
3305 return ret;
3306 }
3307
3308
3309 if (rte_intr_cap_multiple(intr_handle) &&
3310 dev->data->dev_conf.intr_conf.rxq) {
3311 intr_vector = dev->data->nb_rx_queues;
3312 ret = rte_intr_efd_enable(intr_handle, intr_vector);
3313 if (ret)
3314 return ret;
3315 }
3316
3317
3318 if (rte_intr_dp_is_en(intr_handle)) {
3319 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
3320 dev->data->nb_rx_queues)) {
3321 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
3322 " intr_vec", dev->data->nb_rx_queues);
3323 return -ENOMEM;
3324 }
3325 }
3326
3327 eth_igbvf_configure_msix_intr(dev);
3328
3329
3330 rte_intr_enable(intr_handle);
3331
3332
3333 igbvf_intr_enable(dev);
3334
3335 return 0;
3336}
3337
3338static int
3339igbvf_dev_stop(struct rte_eth_dev *dev)
3340{
3341 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3342 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
3343 struct e1000_adapter *adapter =
3344 E1000_DEV_PRIVATE(dev->data->dev_private);
3345
3346 if (adapter->stopped)
3347 return 0;
3348
3349 PMD_INIT_FUNC_TRACE();
3350
3351 igbvf_stop_adapter(dev);
3352
3353
3354
3355
3356
3357 igbvf_set_vfta_all(dev,0);
3358
3359 igb_dev_clear_queues(dev);
3360
3361
3362 rte_intr_disable(intr_handle);
3363
3364
3365 rte_intr_efd_disable(intr_handle);
3366
3367
3368 rte_intr_vec_list_free(intr_handle);
3369
3370 adapter->stopped = true;
3371 dev->data->dev_started = 0;
3372
3373 return 0;
3374}
3375
3376static int
3377igbvf_dev_close(struct rte_eth_dev *dev)
3378{
3379 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3380 struct rte_ether_addr addr;
3381 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3382 int ret;
3383
3384 PMD_INIT_FUNC_TRACE();
3385
3386 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3387 return 0;
3388
3389 e1000_reset_hw(hw);
3390
3391 ret = igbvf_dev_stop(dev);
3392 if (ret != 0)
3393 return ret;
3394
3395 igb_dev_free_queues(dev);
3396
3397
3398
3399
3400
3401
3402
3403 memset(&addr, 0, sizeof(addr));
3404 igbvf_default_mac_addr_set(dev, &addr);
3405
3406 rte_intr_callback_unregister(pci_dev->intr_handle,
3407 eth_igbvf_interrupt_handler,
3408 (void *)dev);
3409
3410 return 0;
3411}
3412
3413static int
3414igbvf_promiscuous_enable(struct rte_eth_dev *dev)
3415{
3416 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3417
3418
3419 e1000_promisc_set_vf(hw, e1000_promisc_enabled);
3420
3421 return 0;
3422}
3423
3424static int
3425igbvf_promiscuous_disable(struct rte_eth_dev *dev)
3426{
3427 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3428
3429
3430 if (dev->data->all_multicast == 1)
3431 e1000_promisc_set_vf(hw, e1000_promisc_multicast);
3432 else
3433 e1000_promisc_set_vf(hw, e1000_promisc_disabled);
3434
3435 return 0;
3436}
3437
3438static int
3439igbvf_allmulticast_enable(struct rte_eth_dev *dev)
3440{
3441 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3442
3443
3444 if (dev->data->promiscuous == 0)
3445 e1000_promisc_set_vf(hw, e1000_promisc_multicast);
3446
3447 return 0;
3448}
3449
3450static int
3451igbvf_allmulticast_disable(struct rte_eth_dev *dev)
3452{
3453 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3454
3455
3456 if (dev->data->promiscuous == 0)
3457 e1000_promisc_set_vf(hw, e1000_promisc_disabled);
3458
3459 return 0;
3460}
3461
3462static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on)
3463{
3464 struct e1000_mbx_info *mbx = &hw->mbx;
3465 uint32_t msgbuf[2];
3466 s32 err;
3467
3468
3469 msgbuf[0] = E1000_VF_SET_VLAN;
3470 msgbuf[1] = vid;
3471
3472 if (on)
3473 msgbuf[0] |= E1000_VF_SET_VLAN_ADD;
3474
3475 err = mbx->ops.write_posted(hw, msgbuf, 2, 0);
3476 if (err)
3477 goto mbx_err;
3478
3479 err = mbx->ops.read_posted(hw, msgbuf, 2, 0);
3480 if (err)
3481 goto mbx_err;
3482
3483 msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS;
3484 if (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK))
3485 err = -EINVAL;
3486
3487mbx_err:
3488 return err;
3489}
3490
3491static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on)
3492{
3493 struct e1000_hw *hw =
3494 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3495 struct e1000_vfta * shadow_vfta =
3496 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
3497 int i = 0, j = 0, vfta = 0, mask = 1;
3498
3499 for (i = 0; i < IGB_VFTA_SIZE; i++){
3500 vfta = shadow_vfta->vfta[i];
3501 if(vfta){
3502 mask = 1;
3503 for (j = 0; j < 32; j++){
3504 if(vfta & mask)
3505 igbvf_set_vfta(hw,
3506 (uint16_t)((i<<5)+j), on);
3507 mask<<=1;
3508 }
3509 }
3510 }
3511
3512}
3513
3514static int
3515igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3516{
3517 struct e1000_hw *hw =
3518 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3519 struct e1000_vfta * shadow_vfta =
3520 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
3521 uint32_t vid_idx = 0;
3522 uint32_t vid_bit = 0;
3523 int ret = 0;
3524
3525 PMD_INIT_FUNC_TRACE();
3526
3527
3528 ret = igbvf_set_vfta(hw, vlan_id, !!on);
3529 if(ret){
3530 PMD_INIT_LOG(ERR, "Unable to set VF vlan");
3531 return ret;
3532 }
3533 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
3534 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
3535
3536
3537 if (on)
3538 shadow_vfta->vfta[vid_idx] |= vid_bit;
3539 else
3540 shadow_vfta->vfta[vid_idx] &= ~vid_bit;
3541
3542 return 0;
3543}
3544
3545static int
3546igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
3547{
3548 struct e1000_hw *hw =
3549 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3550
3551
3552 hw->mac.ops.rar_set(hw, (void *)addr, 0);
3553 return 0;
3554}
3555
3556
3557static int
3558eth_igb_rss_reta_update(struct rte_eth_dev *dev,
3559 struct rte_eth_rss_reta_entry64 *reta_conf,
3560 uint16_t reta_size)
3561{
3562 uint8_t i, j, mask;
3563 uint32_t reta, r;
3564 uint16_t idx, shift;
3565 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3566
3567 if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
3568 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3569 "(%d) doesn't match the number hardware can supported "
3570 "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
3571 return -EINVAL;
3572 }
3573
3574 for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
3575 idx = i / RTE_ETH_RETA_GROUP_SIZE;
3576 shift = i % RTE_ETH_RETA_GROUP_SIZE;
3577 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
3578 IGB_4_BIT_MASK);
3579 if (!mask)
3580 continue;
3581 if (mask == IGB_4_BIT_MASK)
3582 r = 0;
3583 else
3584 r = E1000_READ_REG(hw, E1000_RETA(i >> 2));
3585 for (j = 0, reta = 0; j < IGB_4_BIT_WIDTH; j++) {
3586 if (mask & (0x1 << j))
3587 reta |= reta_conf[idx].reta[shift + j] <<
3588 (CHAR_BIT * j);
3589 else
3590 reta |= r & (IGB_8_BIT_MASK << (CHAR_BIT * j));
3591 }
3592 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta);
3593 }
3594
3595 return 0;
3596}
3597
3598static int
3599eth_igb_rss_reta_query(struct rte_eth_dev *dev,
3600 struct rte_eth_rss_reta_entry64 *reta_conf,
3601 uint16_t reta_size)
3602{
3603 uint8_t i, j, mask;
3604 uint32_t reta;
3605 uint16_t idx, shift;
3606 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3607
3608 if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
3609 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3610 "(%d) doesn't match the number hardware can supported "
3611 "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
3612 return -EINVAL;
3613 }
3614
3615 for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
3616 idx = i / RTE_ETH_RETA_GROUP_SIZE;
3617 shift = i % RTE_ETH_RETA_GROUP_SIZE;
3618 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
3619 IGB_4_BIT_MASK);
3620 if (!mask)
3621 continue;
3622 reta = E1000_READ_REG(hw, E1000_RETA(i >> 2));
3623 for (j = 0; j < IGB_4_BIT_WIDTH; j++) {
3624 if (mask & (0x1 << j))
3625 reta_conf[idx].reta[shift + j] =
3626 ((reta >> (CHAR_BIT * j)) &
3627 IGB_8_BIT_MASK);
3628 }
3629 }
3630
3631 return 0;
3632}
3633
3634int
3635eth_igb_syn_filter_set(struct rte_eth_dev *dev,
3636 struct rte_eth_syn_filter *filter,
3637 bool add)
3638{
3639 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3640 struct e1000_filter_info *filter_info =
3641 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3642 uint32_t synqf, rfctl;
3643
3644 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
3645 return -EINVAL;
3646
3647 synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
3648
3649 if (add) {
3650 if (synqf & E1000_SYN_FILTER_ENABLE)
3651 return -EINVAL;
3652
3653 synqf = (uint32_t)(((filter->queue << E1000_SYN_FILTER_QUEUE_SHIFT) &
3654 E1000_SYN_FILTER_QUEUE) | E1000_SYN_FILTER_ENABLE);
3655
3656 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
3657 if (filter->hig_pri)
3658 rfctl |= E1000_RFCTL_SYNQFP;
3659 else
3660 rfctl &= ~E1000_RFCTL_SYNQFP;
3661
3662 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
3663 } else {
3664 if (!(synqf & E1000_SYN_FILTER_ENABLE))
3665 return -ENOENT;
3666 synqf = 0;
3667 }
3668
3669 filter_info->syn_info = synqf;
3670 E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf);
3671 E1000_WRITE_FLUSH(hw);
3672 return 0;
3673}
3674
3675
3676static inline int
3677ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter,
3678 struct e1000_2tuple_filter_info *filter_info)
3679{
3680 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
3681 return -EINVAL;
3682 if (filter->priority > E1000_2TUPLE_MAX_PRI)
3683 return -EINVAL;
3684 if (filter->tcp_flags > RTE_NTUPLE_TCP_FLAGS_MASK)
3685 return -EINVAL;
3686
3687 switch (filter->dst_port_mask) {
3688 case UINT16_MAX:
3689 filter_info->dst_port_mask = 0;
3690 filter_info->dst_port = filter->dst_port;
3691 break;
3692 case 0:
3693 filter_info->dst_port_mask = 1;
3694 break;
3695 default:
3696 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
3697 return -EINVAL;
3698 }
3699
3700 switch (filter->proto_mask) {
3701 case UINT8_MAX:
3702 filter_info->proto_mask = 0;
3703 filter_info->proto = filter->proto;
3704 break;
3705 case 0:
3706 filter_info->proto_mask = 1;
3707 break;
3708 default:
3709 PMD_DRV_LOG(ERR, "invalid protocol mask.");
3710 return -EINVAL;
3711 }
3712
3713 filter_info->priority = (uint8_t)filter->priority;
3714 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG)
3715 filter_info->tcp_flags = filter->tcp_flags;
3716 else
3717 filter_info->tcp_flags = 0;
3718
3719 return 0;
3720}
3721
3722static inline struct e1000_2tuple_filter *
3723igb_2tuple_filter_lookup(struct e1000_2tuple_filter_list *filter_list,
3724 struct e1000_2tuple_filter_info *key)
3725{
3726 struct e1000_2tuple_filter *it;
3727
3728 TAILQ_FOREACH(it, filter_list, entries) {
3729 if (memcmp(key, &it->filter_info,
3730 sizeof(struct e1000_2tuple_filter_info)) == 0) {
3731 return it;
3732 }
3733 }
3734 return NULL;
3735}
3736
3737
3738static inline void
3739igb_inject_2uple_filter(struct rte_eth_dev *dev,
3740 struct e1000_2tuple_filter *filter)
3741{
3742 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3743 uint32_t ttqf = E1000_TTQF_DISABLE_MASK;
3744 uint32_t imir, imir_ext = E1000_IMIREXT_SIZE_BP;
3745 int i;
3746
3747 i = filter->index;
3748 imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
3749 if (filter->filter_info.dst_port_mask == 1)
3750 imir |= E1000_IMIR_PORT_BP;
3751 else
3752 imir &= ~E1000_IMIR_PORT_BP;
3753
3754 imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
3755
3756 ttqf |= E1000_TTQF_QUEUE_ENABLE;
3757 ttqf |= (uint32_t)(filter->queue << E1000_TTQF_QUEUE_SHIFT);
3758 ttqf |= (uint32_t)(filter->filter_info.proto &
3759 E1000_TTQF_PROTOCOL_MASK);
3760 if (filter->filter_info.proto_mask == 0)
3761 ttqf &= ~E1000_TTQF_MASK_ENABLE;
3762
3763
3764 if (filter->filter_info.tcp_flags & RTE_NTUPLE_TCP_FLAGS_MASK) {
3765 if (filter->filter_info.tcp_flags & RTE_TCP_URG_FLAG)
3766 imir_ext |= E1000_IMIREXT_CTRL_URG;
3767 if (filter->filter_info.tcp_flags & RTE_TCP_ACK_FLAG)
3768 imir_ext |= E1000_IMIREXT_CTRL_ACK;
3769 if (filter->filter_info.tcp_flags & RTE_TCP_PSH_FLAG)
3770 imir_ext |= E1000_IMIREXT_CTRL_PSH;
3771 if (filter->filter_info.tcp_flags & RTE_TCP_RST_FLAG)
3772 imir_ext |= E1000_IMIREXT_CTRL_RST;
3773 if (filter->filter_info.tcp_flags & RTE_TCP_SYN_FLAG)
3774 imir_ext |= E1000_IMIREXT_CTRL_SYN;
3775 if (filter->filter_info.tcp_flags & RTE_TCP_FIN_FLAG)
3776 imir_ext |= E1000_IMIREXT_CTRL_FIN;
3777 } else {
3778 imir_ext |= E1000_IMIREXT_CTRL_BP;
3779 }
3780 E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
3781 E1000_WRITE_REG(hw, E1000_TTQF(i), ttqf);
3782 E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
3783}
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796static int
3797igb_add_2tuple_filter(struct rte_eth_dev *dev,
3798 struct rte_eth_ntuple_filter *ntuple_filter)
3799{
3800 struct e1000_filter_info *filter_info =
3801 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3802 struct e1000_2tuple_filter *filter;
3803 int i, ret;
3804
3805 filter = rte_zmalloc("e1000_2tuple_filter",
3806 sizeof(struct e1000_2tuple_filter), 0);
3807 if (filter == NULL)
3808 return -ENOMEM;
3809
3810 ret = ntuple_filter_to_2tuple(ntuple_filter,
3811 &filter->filter_info);
3812 if (ret < 0) {
3813 rte_free(filter);
3814 return ret;
3815 }
3816 if (igb_2tuple_filter_lookup(&filter_info->twotuple_list,
3817 &filter->filter_info) != NULL) {
3818 PMD_DRV_LOG(ERR, "filter exists.");
3819 rte_free(filter);
3820 return -EEXIST;
3821 }
3822 filter->queue = ntuple_filter->queue;
3823
3824
3825
3826
3827
3828 for (i = 0; i < E1000_MAX_TTQF_FILTERS; i++) {
3829 if (!(filter_info->twotuple_mask & (1 << i))) {
3830 filter_info->twotuple_mask |= 1 << i;
3831 filter->index = i;
3832 TAILQ_INSERT_TAIL(&filter_info->twotuple_list,
3833 filter,
3834 entries);
3835 break;
3836 }
3837 }
3838 if (i >= E1000_MAX_TTQF_FILTERS) {
3839 PMD_DRV_LOG(ERR, "2tuple filters are full.");
3840 rte_free(filter);
3841 return -ENOSYS;
3842 }
3843
3844 igb_inject_2uple_filter(dev, filter);
3845 return 0;
3846}
3847
3848int
3849igb_delete_2tuple_filter(struct rte_eth_dev *dev,
3850 struct e1000_2tuple_filter *filter)
3851{
3852 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3853 struct e1000_filter_info *filter_info =
3854 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3855
3856 filter_info->twotuple_mask &= ~(1 << filter->index);
3857 TAILQ_REMOVE(&filter_info->twotuple_list, filter, entries);
3858 rte_free(filter);
3859
3860 E1000_WRITE_REG(hw, E1000_TTQF(filter->index), E1000_TTQF_DISABLE_MASK);
3861 E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
3862 E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
3863 return 0;
3864}
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875
3876
3877static int
3878igb_remove_2tuple_filter(struct rte_eth_dev *dev,
3879 struct rte_eth_ntuple_filter *ntuple_filter)
3880{
3881 struct e1000_filter_info *filter_info =
3882 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3883 struct e1000_2tuple_filter_info filter_2tuple;
3884 struct e1000_2tuple_filter *filter;
3885 int ret;
3886
3887 memset(&filter_2tuple, 0, sizeof(struct e1000_2tuple_filter_info));
3888 ret = ntuple_filter_to_2tuple(ntuple_filter,
3889 &filter_2tuple);
3890 if (ret < 0)
3891 return ret;
3892
3893 filter = igb_2tuple_filter_lookup(&filter_info->twotuple_list,
3894 &filter_2tuple);
3895 if (filter == NULL) {
3896 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3897 return -ENOENT;
3898 }
3899
3900 igb_delete_2tuple_filter(dev, filter);
3901
3902 return 0;
3903}
3904
3905
3906static inline void
3907igb_inject_flex_filter(struct rte_eth_dev *dev,
3908 struct e1000_flex_filter *filter)
3909{
3910 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3911 uint32_t wufc, queueing;
3912 uint32_t reg_off;
3913 uint8_t i, j = 0;
3914
3915 wufc = E1000_READ_REG(hw, E1000_WUFC);
3916 if (filter->index < E1000_MAX_FHFT)
3917 reg_off = E1000_FHFT(filter->index);
3918 else
3919 reg_off = E1000_FHFT_EXT(filter->index - E1000_MAX_FHFT);
3920
3921 E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ |
3922 (E1000_WUFC_FLX0 << filter->index));
3923 queueing = filter->filter_info.len |
3924 (filter->queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) |
3925 (filter->filter_info.priority <<
3926 E1000_FHFT_QUEUEING_PRIO_SHIFT);
3927 E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET,
3928 queueing);
3929
3930 for (i = 0; i < E1000_FLEX_FILTERS_MASK_SIZE; i++) {
3931 E1000_WRITE_REG(hw, reg_off,
3932 filter->filter_info.dwords[j]);
3933 reg_off += sizeof(uint32_t);
3934 E1000_WRITE_REG(hw, reg_off,
3935 filter->filter_info.dwords[++j]);
3936 reg_off += sizeof(uint32_t);
3937 E1000_WRITE_REG(hw, reg_off,
3938 (uint32_t)filter->filter_info.mask[i]);
3939 reg_off += sizeof(uint32_t) * 2;
3940 ++j;
3941 }
3942}
3943
3944static inline struct e1000_flex_filter *
3945eth_igb_flex_filter_lookup(struct e1000_flex_filter_list *filter_list,
3946 struct e1000_flex_filter_info *key)
3947{
3948 struct e1000_flex_filter *it;
3949
3950 TAILQ_FOREACH(it, filter_list, entries) {
3951 if (memcmp(key, &it->filter_info,
3952 sizeof(struct e1000_flex_filter_info)) == 0)
3953 return it;
3954 }
3955
3956 return NULL;
3957}
3958
3959
3960
3961
3962
3963
3964void
3965igb_remove_flex_filter(struct rte_eth_dev *dev,
3966 struct e1000_flex_filter *filter)
3967{
3968 struct e1000_filter_info *filter_info =
3969 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3970 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3971 uint32_t wufc, i;
3972 uint32_t reg_off;
3973
3974 wufc = E1000_READ_REG(hw, E1000_WUFC);
3975 if (filter->index < E1000_MAX_FHFT)
3976 reg_off = E1000_FHFT(filter->index);
3977 else
3978 reg_off = E1000_FHFT_EXT(filter->index - E1000_MAX_FHFT);
3979
3980 for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++)
3981 E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0);
3982
3983 E1000_WRITE_REG(hw, E1000_WUFC, wufc &
3984 (~(E1000_WUFC_FLX0 << filter->index)));
3985
3986 filter_info->flex_mask &= ~(1 << filter->index);
3987 TAILQ_REMOVE(&filter_info->flex_list, filter, entries);
3988 rte_free(filter);
3989}
3990
3991int
3992eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
3993 struct igb_flex_filter *filter,
3994 bool add)
3995{
3996 struct e1000_filter_info *filter_info =
3997 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3998 struct e1000_flex_filter *flex_filter, *it;
3999 uint32_t mask;
4000 uint8_t shift, i;
4001
4002 flex_filter = rte_zmalloc("e1000_flex_filter",
4003 sizeof(struct e1000_flex_filter), 0);
4004 if (flex_filter == NULL)
4005 return -ENOMEM;
4006
4007 flex_filter->filter_info.len = filter->len;
4008 flex_filter->filter_info.priority = filter->priority;
4009 memcpy(flex_filter->filter_info.dwords, filter->bytes, filter->len);
4010 for (i = 0; i < RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT; i++) {
4011 mask = 0;
4012
4013 for (shift = 0; shift < CHAR_BIT; shift++) {
4014 if (filter->mask[i] & (0x01 << shift))
4015 mask |= (0x80 >> shift);
4016 }
4017 flex_filter->filter_info.mask[i] = mask;
4018 }
4019
4020 it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
4021 &flex_filter->filter_info);
4022 if (it == NULL && !add) {
4023 PMD_DRV_LOG(ERR, "filter doesn't exist.");
4024 rte_free(flex_filter);
4025 return -ENOENT;
4026 }
4027 if (it != NULL && add) {
4028 PMD_DRV_LOG(ERR, "filter exists.");
4029 rte_free(flex_filter);
4030 return -EEXIST;
4031 }
4032
4033 if (add) {
4034 flex_filter->queue = filter->queue;
4035
4036
4037
4038
4039 for (i = 0; i < E1000_MAX_FLEX_FILTERS; i++) {
4040 if (!(filter_info->flex_mask & (1 << i))) {
4041 filter_info->flex_mask |= 1 << i;
4042 flex_filter->index = i;
4043 TAILQ_INSERT_TAIL(&filter_info->flex_list,
4044 flex_filter,
4045 entries);
4046 break;
4047 }
4048 }
4049 if (i >= E1000_MAX_FLEX_FILTERS) {
4050 PMD_DRV_LOG(ERR, "flex filters are full.");
4051 rte_free(flex_filter);
4052 return -ENOSYS;
4053 }
4054
4055 igb_inject_flex_filter(dev, flex_filter);
4056
4057 } else {
4058 igb_remove_flex_filter(dev, it);
4059 rte_free(flex_filter);
4060 }
4061
4062 return 0;
4063}
4064
4065
4066static inline int
4067ntuple_filter_to_5tuple_82576(struct rte_eth_ntuple_filter *filter,
4068 struct e1000_5tuple_filter_info *filter_info)
4069{
4070 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576)
4071 return -EINVAL;
4072 if (filter->priority > E1000_2TUPLE_MAX_PRI)
4073 return -EINVAL;
4074 if (filter->tcp_flags > RTE_NTUPLE_TCP_FLAGS_MASK)
4075 return -EINVAL;
4076
4077 switch (filter->dst_ip_mask) {
4078 case UINT32_MAX:
4079 filter_info->dst_ip_mask = 0;
4080 filter_info->dst_ip = filter->dst_ip;
4081 break;
4082 case 0:
4083 filter_info->dst_ip_mask = 1;
4084 break;
4085 default:
4086 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
4087 return -EINVAL;
4088 }
4089
4090 switch (filter->src_ip_mask) {
4091 case UINT32_MAX:
4092 filter_info->src_ip_mask = 0;
4093 filter_info->src_ip = filter->src_ip;
4094 break;
4095 case 0:
4096 filter_info->src_ip_mask = 1;
4097 break;
4098 default:
4099 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
4100 return -EINVAL;
4101 }
4102
4103 switch (filter->dst_port_mask) {
4104 case UINT16_MAX:
4105 filter_info->dst_port_mask = 0;
4106 filter_info->dst_port = filter->dst_port;
4107 break;
4108 case 0:
4109 filter_info->dst_port_mask = 1;
4110 break;
4111 default:
4112 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
4113 return -EINVAL;
4114 }
4115
4116 switch (filter->src_port_mask) {
4117 case UINT16_MAX:
4118 filter_info->src_port_mask = 0;
4119 filter_info->src_port = filter->src_port;
4120 break;
4121 case 0:
4122 filter_info->src_port_mask = 1;
4123 break;
4124 default:
4125 PMD_DRV_LOG(ERR, "invalid src_port mask.");
4126 return -EINVAL;
4127 }
4128
4129 switch (filter->proto_mask) {
4130 case UINT8_MAX:
4131 filter_info->proto_mask = 0;
4132 filter_info->proto = filter->proto;
4133 break;
4134 case 0:
4135 filter_info->proto_mask = 1;
4136 break;
4137 default:
4138 PMD_DRV_LOG(ERR, "invalid protocol mask.");
4139 return -EINVAL;
4140 }
4141
4142 filter_info->priority = (uint8_t)filter->priority;
4143 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG)
4144 filter_info->tcp_flags = filter->tcp_flags;
4145 else
4146 filter_info->tcp_flags = 0;
4147
4148 return 0;
4149}
4150
4151static inline struct e1000_5tuple_filter *
4152igb_5tuple_filter_lookup_82576(struct e1000_5tuple_filter_list *filter_list,
4153 struct e1000_5tuple_filter_info *key)
4154{
4155 struct e1000_5tuple_filter *it;
4156
4157 TAILQ_FOREACH(it, filter_list, entries) {
4158 if (memcmp(key, &it->filter_info,
4159 sizeof(struct e1000_5tuple_filter_info)) == 0) {
4160 return it;
4161 }
4162 }
4163 return NULL;
4164}
4165
4166
4167static inline void
4168igb_inject_5tuple_filter_82576(struct rte_eth_dev *dev,
4169 struct e1000_5tuple_filter *filter)
4170{
4171 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4172 uint32_t ftqf = E1000_FTQF_VF_BP | E1000_FTQF_MASK;
4173 uint32_t spqf, imir, imir_ext = E1000_IMIREXT_SIZE_BP;
4174 uint8_t i;
4175
4176 i = filter->index;
4177 ftqf |= filter->filter_info.proto & E1000_FTQF_PROTOCOL_MASK;
4178 if (filter->filter_info.src_ip_mask == 0)
4179 ftqf &= ~E1000_FTQF_MASK_SOURCE_ADDR_BP;
4180 if (filter->filter_info.dst_ip_mask == 0)
4181 ftqf &= ~E1000_FTQF_MASK_DEST_ADDR_BP;
4182 if (filter->filter_info.src_port_mask == 0)
4183 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
4184 if (filter->filter_info.proto_mask == 0)
4185 ftqf &= ~E1000_FTQF_MASK_PROTO_BP;
4186 ftqf |= (filter->queue << E1000_FTQF_QUEUE_SHIFT) &
4187 E1000_FTQF_QUEUE_MASK;
4188 ftqf |= E1000_FTQF_QUEUE_ENABLE;
4189 E1000_WRITE_REG(hw, E1000_FTQF(i), ftqf);
4190 E1000_WRITE_REG(hw, E1000_DAQF(i), filter->filter_info.dst_ip);
4191 E1000_WRITE_REG(hw, E1000_SAQF(i), filter->filter_info.src_ip);
4192
4193 spqf = filter->filter_info.src_port & E1000_SPQF_SRCPORT;
4194 E1000_WRITE_REG(hw, E1000_SPQF(i), spqf);
4195
4196 imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
4197 if (filter->filter_info.dst_port_mask == 1)
4198 imir |= E1000_IMIR_PORT_BP;
4199 else
4200 imir &= ~E1000_IMIR_PORT_BP;
4201 imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
4202
4203
4204 if (filter->filter_info.tcp_flags & RTE_NTUPLE_TCP_FLAGS_MASK) {
4205 if (filter->filter_info.tcp_flags & RTE_TCP_URG_FLAG)
4206 imir_ext |= E1000_IMIREXT_CTRL_URG;
4207 if (filter->filter_info.tcp_flags & RTE_TCP_ACK_FLAG)
4208 imir_ext |= E1000_IMIREXT_CTRL_ACK;
4209 if (filter->filter_info.tcp_flags & RTE_TCP_PSH_FLAG)
4210 imir_ext |= E1000_IMIREXT_CTRL_PSH;
4211 if (filter->filter_info.tcp_flags & RTE_TCP_RST_FLAG)
4212 imir_ext |= E1000_IMIREXT_CTRL_RST;
4213 if (filter->filter_info.tcp_flags & RTE_TCP_SYN_FLAG)
4214 imir_ext |= E1000_IMIREXT_CTRL_SYN;
4215 if (filter->filter_info.tcp_flags & RTE_TCP_FIN_FLAG)
4216 imir_ext |= E1000_IMIREXT_CTRL_FIN;
4217 } else {
4218 imir_ext |= E1000_IMIREXT_CTRL_BP;
4219 }
4220 E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
4221 E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
4222}
4223
4224
4225
4226
4227
4228
4229
4230
4231
4232
4233
4234
4235static int
4236igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
4237 struct rte_eth_ntuple_filter *ntuple_filter)
4238{
4239 struct e1000_filter_info *filter_info =
4240 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4241 struct e1000_5tuple_filter *filter;
4242 uint8_t i;
4243 int ret;
4244
4245 filter = rte_zmalloc("e1000_5tuple_filter",
4246 sizeof(struct e1000_5tuple_filter), 0);
4247 if (filter == NULL)
4248 return -ENOMEM;
4249
4250 ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
4251 &filter->filter_info);
4252 if (ret < 0) {
4253 rte_free(filter);
4254 return ret;
4255 }
4256
4257 if (igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list,
4258 &filter->filter_info) != NULL) {
4259 PMD_DRV_LOG(ERR, "filter exists.");
4260 rte_free(filter);
4261 return -EEXIST;
4262 }
4263 filter->queue = ntuple_filter->queue;
4264
4265
4266
4267
4268
4269 for (i = 0; i < E1000_MAX_FTQF_FILTERS; i++) {
4270 if (!(filter_info->fivetuple_mask & (1 << i))) {
4271 filter_info->fivetuple_mask |= 1 << i;
4272 filter->index = i;
4273 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
4274 filter,
4275 entries);
4276 break;
4277 }
4278 }
4279 if (i >= E1000_MAX_FTQF_FILTERS) {
4280 PMD_DRV_LOG(ERR, "5tuple filters are full.");
4281 rte_free(filter);
4282 return -ENOSYS;
4283 }
4284
4285 igb_inject_5tuple_filter_82576(dev, filter);
4286 return 0;
4287}
4288
4289int
4290igb_delete_5tuple_filter_82576(struct rte_eth_dev *dev,
4291 struct e1000_5tuple_filter *filter)
4292{
4293 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4294 struct e1000_filter_info *filter_info =
4295 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4296
4297 filter_info->fivetuple_mask &= ~(1 << filter->index);
4298 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
4299 rte_free(filter);
4300
4301 E1000_WRITE_REG(hw, E1000_FTQF(filter->index),
4302 E1000_FTQF_VF_BP | E1000_FTQF_MASK);
4303 E1000_WRITE_REG(hw, E1000_DAQF(filter->index), 0);
4304 E1000_WRITE_REG(hw, E1000_SAQF(filter->index), 0);
4305 E1000_WRITE_REG(hw, E1000_SPQF(filter->index), 0);
4306 E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
4307 E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
4308 return 0;
4309}
4310
4311
4312
4313
4314
4315
4316
4317
4318
4319
4320
4321
4322static int
4323igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
4324 struct rte_eth_ntuple_filter *ntuple_filter)
4325{
4326 struct e1000_filter_info *filter_info =
4327 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4328 struct e1000_5tuple_filter_info filter_5tuple;
4329 struct e1000_5tuple_filter *filter;
4330 int ret;
4331
4332 memset(&filter_5tuple, 0, sizeof(struct e1000_5tuple_filter_info));
4333 ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
4334 &filter_5tuple);
4335 if (ret < 0)
4336 return ret;
4337
4338 filter = igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list,
4339 &filter_5tuple);
4340 if (filter == NULL) {
4341 PMD_DRV_LOG(ERR, "filter doesn't exist.");
4342 return -ENOENT;
4343 }
4344
4345 igb_delete_5tuple_filter_82576(dev, filter);
4346
4347 return 0;
4348}
4349
4350static int
4351eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
4352{
4353 uint32_t rctl;
4354 struct e1000_hw *hw;
4355 uint32_t frame_size = mtu + E1000_ETH_OVERHEAD;
4356
4357 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4358
4359#ifdef RTE_LIBRTE_82571_SUPPORT
4360
4361 if (hw->mac.type == e1000_82571)
4362 return -ENOTSUP;
4363#endif
4364
4365
4366
4367
4368 if (dev->data->dev_started && !dev->data->scattered_rx &&
4369 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
4370 PMD_INIT_LOG(ERR, "Stop port first.");
4371 return -EINVAL;
4372 }
4373
4374 rctl = E1000_READ_REG(hw, E1000_RCTL);
4375
4376
4377 if (mtu > RTE_ETHER_MTU)
4378 rctl |= E1000_RCTL_LPE;
4379 else
4380 rctl &= ~E1000_RCTL_LPE;
4381 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
4382
4383 E1000_WRITE_REG(hw, E1000_RLPML, frame_size);
4384
4385 return 0;
4386}
4387
4388
4389
4390
4391
4392
4393
4394
4395
4396
4397
4398
4399
4400int
4401igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
4402 struct rte_eth_ntuple_filter *ntuple_filter,
4403 bool add)
4404{
4405 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4406 int ret;
4407
4408 switch (ntuple_filter->flags) {
4409 case RTE_5TUPLE_FLAGS:
4410 case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
4411 if (hw->mac.type != e1000_82576)
4412 return -ENOTSUP;
4413 if (add)
4414 ret = igb_add_5tuple_filter_82576(dev,
4415 ntuple_filter);
4416 else
4417 ret = igb_remove_5tuple_filter_82576(dev,
4418 ntuple_filter);
4419 break;
4420 case RTE_2TUPLE_FLAGS:
4421 case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
4422 if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350 &&
4423 hw->mac.type != e1000_i210 &&
4424 hw->mac.type != e1000_i211)
4425 return -ENOTSUP;
4426 if (add)
4427 ret = igb_add_2tuple_filter(dev, ntuple_filter);
4428 else
4429 ret = igb_remove_2tuple_filter(dev, ntuple_filter);
4430 break;
4431 default:
4432 ret = -EINVAL;
4433 break;
4434 }
4435
4436 return ret;
4437}
4438
4439static inline int
4440igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info,
4441 uint16_t ethertype)
4442{
4443 int i;
4444
4445 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
4446 if (filter_info->ethertype_filters[i].ethertype == ethertype &&
4447 (filter_info->ethertype_mask & (1 << i)))
4448 return i;
4449 }
4450 return -1;
4451}
4452
4453static inline int
4454igb_ethertype_filter_insert(struct e1000_filter_info *filter_info,
4455 uint16_t ethertype, uint32_t etqf)
4456{
4457 int i;
4458
4459 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
4460 if (!(filter_info->ethertype_mask & (1 << i))) {
4461 filter_info->ethertype_mask |= 1 << i;
4462 filter_info->ethertype_filters[i].ethertype = ethertype;
4463 filter_info->ethertype_filters[i].etqf = etqf;
4464 return i;
4465 }
4466 }
4467 return -1;
4468}
4469
4470int
4471igb_ethertype_filter_remove(struct e1000_filter_info *filter_info,
4472 uint8_t idx)
4473{
4474 if (idx >= E1000_MAX_ETQF_FILTERS)
4475 return -1;
4476 filter_info->ethertype_mask &= ~(1 << idx);
4477 filter_info->ethertype_filters[idx].ethertype = 0;
4478 filter_info->ethertype_filters[idx].etqf = 0;
4479 return idx;
4480}
4481
4482
4483int
4484igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
4485 struct rte_eth_ethertype_filter *filter,
4486 bool add)
4487{
4488 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4489 struct e1000_filter_info *filter_info =
4490 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4491 uint32_t etqf = 0;
4492 int ret;
4493
4494 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
4495 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
4496 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
4497 " ethertype filter.", filter->ether_type);
4498 return -EINVAL;
4499 }
4500
4501 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
4502 PMD_DRV_LOG(ERR, "mac compare is unsupported.");
4503 return -EINVAL;
4504 }
4505 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
4506 PMD_DRV_LOG(ERR, "drop option is unsupported.");
4507 return -EINVAL;
4508 }
4509
4510 ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type);
4511 if (ret >= 0 && add) {
4512 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
4513 filter->ether_type);
4514 return -EEXIST;
4515 }
4516 if (ret < 0 && !add) {
4517 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
4518 filter->ether_type);
4519 return -ENOENT;
4520 }
4521
4522 if (add) {
4523 etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE;
4524 etqf |= (uint32_t)(filter->ether_type & E1000_ETQF_ETHERTYPE);
4525 etqf |= filter->queue << E1000_ETQF_QUEUE_SHIFT;
4526 ret = igb_ethertype_filter_insert(filter_info,
4527 filter->ether_type, etqf);
4528 if (ret < 0) {
4529 PMD_DRV_LOG(ERR, "ethertype filters are full.");
4530 return -ENOSYS;
4531 }
4532 } else {
4533 ret = igb_ethertype_filter_remove(filter_info, (uint8_t)ret);
4534 if (ret < 0)
4535 return -ENOSYS;
4536 }
4537 E1000_WRITE_REG(hw, E1000_ETQF(ret), etqf);
4538 E1000_WRITE_FLUSH(hw);
4539
4540 return 0;
4541}
4542
4543static int
4544eth_igb_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
4545 const struct rte_flow_ops **ops)
4546{
4547 *ops = &igb_flow_ops;
4548 return 0;
4549}
4550
4551static int
4552eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
4553 struct rte_ether_addr *mc_addr_set,
4554 uint32_t nb_mc_addr)
4555{
4556 struct e1000_hw *hw;
4557
4558 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4559 e1000_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr);
4560 return 0;
4561}
4562
4563static uint64_t
4564igb_read_systime_cyclecounter(struct rte_eth_dev *dev)
4565{
4566 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4567 uint64_t systime_cycles;
4568
4569 switch (hw->mac.type) {
4570 case e1000_i210:
4571 case e1000_i211:
4572
4573
4574
4575
4576 E1000_READ_REG(hw, E1000_SYSTIMR);
4577
4578 systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML);
4579 systime_cycles += (uint64_t)E1000_READ_REG(hw, E1000_SYSTIMH)
4580 * NSEC_PER_SEC;
4581 break;
4582 case e1000_82580:
4583 case e1000_i350:
4584 case e1000_i354:
4585
4586
4587
4588
4589 E1000_READ_REG(hw, E1000_SYSTIMR);
4590 systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML);
4591
4592 systime_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_SYSTIMH)
4593 & 0xff) << 32;
4594 break;
4595 default:
4596 systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML);
4597 systime_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_SYSTIMH)
4598 << 32;
4599 break;
4600 }
4601
4602 return systime_cycles;
4603}
4604
4605static uint64_t
4606igb_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
4607{
4608 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4609 uint64_t rx_tstamp_cycles;
4610
4611 switch (hw->mac.type) {
4612 case e1000_i210:
4613 case e1000_i211:
4614
4615 rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL);
4616 rx_tstamp_cycles += (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPH)
4617 * NSEC_PER_SEC;
4618 break;
4619 case e1000_82580:
4620 case e1000_i350:
4621 case e1000_i354:
4622 rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL);
4623
4624 rx_tstamp_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_RXSTMPH)
4625 & 0xff) << 32;
4626 break;
4627 default:
4628 rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL);
4629 rx_tstamp_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPH)
4630 << 32;
4631 break;
4632 }
4633
4634 return rx_tstamp_cycles;
4635}
4636
4637static uint64_t
4638igb_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
4639{
4640 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4641 uint64_t tx_tstamp_cycles;
4642
4643 switch (hw->mac.type) {
4644 case e1000_i210:
4645 case e1000_i211:
4646
4647 tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL);
4648 tx_tstamp_cycles += (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPH)
4649 * NSEC_PER_SEC;
4650 break;
4651 case e1000_82580:
4652 case e1000_i350:
4653 case e1000_i354:
4654 tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL);
4655
4656 tx_tstamp_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_TXSTMPH)
4657 & 0xff) << 32;
4658 break;
4659 default:
4660 tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL);
4661 tx_tstamp_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPH)
4662 << 32;
4663 break;
4664 }
4665
4666 return tx_tstamp_cycles;
4667}
4668
4669static void
4670igb_start_timecounters(struct rte_eth_dev *dev)
4671{
4672 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4673 struct e1000_adapter *adapter = dev->data->dev_private;
4674 uint32_t incval = 1;
4675 uint32_t shift = 0;
4676 uint64_t mask = E1000_CYCLECOUNTER_MASK;
4677
4678 switch (hw->mac.type) {
4679 case e1000_82580:
4680 case e1000_i350:
4681 case e1000_i354:
4682
4683 mask = (1ULL << 40) - 1;
4684
4685 case e1000_i210:
4686 case e1000_i211:
4687
4688
4689
4690
4691 E1000_WRITE_REG(hw, E1000_TIMINCA, incval);
4692 break;
4693 case e1000_82576:
4694 incval = E1000_INCVALUE_82576;
4695 shift = IGB_82576_TSYNC_SHIFT;
4696 E1000_WRITE_REG(hw, E1000_TIMINCA,
4697 E1000_INCPERIOD_82576 | incval);
4698 break;
4699 default:
4700
4701 return;
4702 }
4703
4704 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
4705 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
4706 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
4707
4708 adapter->systime_tc.cc_mask = mask;
4709 adapter->systime_tc.cc_shift = shift;
4710 adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
4711
4712 adapter->rx_tstamp_tc.cc_mask = mask;
4713 adapter->rx_tstamp_tc.cc_shift = shift;
4714 adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
4715
4716 adapter->tx_tstamp_tc.cc_mask = mask;
4717 adapter->tx_tstamp_tc.cc_shift = shift;
4718 adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
4719}
4720
4721static int
4722igb_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
4723{
4724 struct e1000_adapter *adapter = dev->data->dev_private;
4725
4726 adapter->systime_tc.nsec += delta;
4727 adapter->rx_tstamp_tc.nsec += delta;
4728 adapter->tx_tstamp_tc.nsec += delta;
4729
4730 return 0;
4731}
4732
4733static int
4734igb_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
4735{
4736 uint64_t ns;
4737 struct e1000_adapter *adapter = dev->data->dev_private;
4738
4739 ns = rte_timespec_to_ns(ts);
4740
4741
4742 adapter->systime_tc.nsec = ns;
4743 adapter->rx_tstamp_tc.nsec = ns;
4744 adapter->tx_tstamp_tc.nsec = ns;
4745
4746 return 0;
4747}
4748
4749static int
4750igb_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
4751{
4752 uint64_t ns, systime_cycles;
4753 struct e1000_adapter *adapter = dev->data->dev_private;
4754
4755 systime_cycles = igb_read_systime_cyclecounter(dev);
4756 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
4757 *ts = rte_ns_to_timespec(ns);
4758
4759 return 0;
4760}
4761
4762static int
4763igb_timesync_enable(struct rte_eth_dev *dev)
4764{
4765 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4766 uint32_t tsync_ctl;
4767 uint32_t tsauxc;
4768
4769
4770 E1000_WRITE_REG(hw, E1000_TIMINCA, 0x0);
4771
4772 switch (hw->mac.type) {
4773 case e1000_82580:
4774 case e1000_i350:
4775 case e1000_i354:
4776 case e1000_i210:
4777 case e1000_i211:
4778 E1000_WRITE_REG(hw, E1000_SYSTIMR, 0x0);
4779
4780 case e1000_82576:
4781 E1000_WRITE_REG(hw, E1000_SYSTIML, 0x0);
4782 E1000_WRITE_REG(hw, E1000_SYSTIMH, 0x0);
4783 break;
4784 default:
4785
4786 return -ENOTSUP;
4787 }
4788
4789
4790 tsauxc = E1000_READ_REG(hw, E1000_TSAUXC);
4791 tsauxc &= ~E1000_TSAUXC_DISABLE_SYSTIME;
4792 E1000_WRITE_REG(hw, E1000_TSAUXC, tsauxc);
4793
4794 igb_start_timecounters(dev);
4795
4796
4797 E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588),
4798 (RTE_ETHER_TYPE_1588 |
4799 E1000_ETQF_FILTER_ENABLE |
4800 E1000_ETQF_1588));
4801
4802
4803 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
4804 tsync_ctl |= E1000_TSYNCRXCTL_ENABLED;
4805 E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl);
4806
4807
4808 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
4809 tsync_ctl |= E1000_TSYNCTXCTL_ENABLED;
4810 E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl);
4811
4812 return 0;
4813}
4814
4815static int
4816igb_timesync_disable(struct rte_eth_dev *dev)
4817{
4818 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4819 uint32_t tsync_ctl;
4820
4821
4822 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
4823 tsync_ctl &= ~E1000_TSYNCTXCTL_ENABLED;
4824 E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl);
4825
4826
4827 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
4828 tsync_ctl &= ~E1000_TSYNCRXCTL_ENABLED;
4829 E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl);
4830
4831
4832 E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588), 0);
4833
4834
4835 E1000_WRITE_REG(hw, E1000_TIMINCA, 0);
4836
4837 return 0;
4838}
4839
4840static int
4841igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
4842 struct timespec *timestamp,
4843 uint32_t flags __rte_unused)
4844{
4845 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4846 struct e1000_adapter *adapter = dev->data->dev_private;
4847 uint32_t tsync_rxctl;
4848 uint64_t rx_tstamp_cycles;
4849 uint64_t ns;
4850
4851 tsync_rxctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
4852 if ((tsync_rxctl & E1000_TSYNCRXCTL_VALID) == 0)
4853 return -EINVAL;
4854
4855 rx_tstamp_cycles = igb_read_rx_tstamp_cyclecounter(dev);
4856 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
4857 *timestamp = rte_ns_to_timespec(ns);
4858
4859 return 0;
4860}
4861
4862static int
4863igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
4864 struct timespec *timestamp)
4865{
4866 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4867 struct e1000_adapter *adapter = dev->data->dev_private;
4868 uint32_t tsync_txctl;
4869 uint64_t tx_tstamp_cycles;
4870 uint64_t ns;
4871
4872 tsync_txctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
4873 if ((tsync_txctl & E1000_TSYNCTXCTL_VALID) == 0)
4874 return -EINVAL;
4875
4876 tx_tstamp_cycles = igb_read_tx_tstamp_cyclecounter(dev);
4877 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
4878 *timestamp = rte_ns_to_timespec(ns);
4879
4880 return 0;
4881}
4882
4883static int
4884eth_igb_get_reg_length(struct rte_eth_dev *dev __rte_unused)
4885{
4886 int count = 0;
4887 int g_ind = 0;
4888 const struct reg_info *reg_group;
4889
4890 while ((reg_group = igb_regs[g_ind++]))
4891 count += igb_reg_group_count(reg_group);
4892
4893 return count;
4894}
4895
4896static int
4897igbvf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
4898{
4899 int count = 0;
4900 int g_ind = 0;
4901 const struct reg_info *reg_group;
4902
4903 while ((reg_group = igbvf_regs[g_ind++]))
4904 count += igb_reg_group_count(reg_group);
4905
4906 return count;
4907}
4908
4909static int
4910eth_igb_get_regs(struct rte_eth_dev *dev,
4911 struct rte_dev_reg_info *regs)
4912{
4913 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4914 uint32_t *data = regs->data;
4915 int g_ind = 0;
4916 int count = 0;
4917 const struct reg_info *reg_group;
4918
4919 if (data == NULL) {
4920 regs->length = eth_igb_get_reg_length(dev);
4921 regs->width = sizeof(uint32_t);
4922 return 0;
4923 }
4924
4925
4926 if ((regs->length == 0) ||
4927 (regs->length == (uint32_t)eth_igb_get_reg_length(dev))) {
4928 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
4929 hw->device_id;
4930 while ((reg_group = igb_regs[g_ind++]))
4931 count += igb_read_regs_group(dev, &data[count],
4932 reg_group);
4933 return 0;
4934 }
4935
4936 return -ENOTSUP;
4937}
4938
4939static int
4940igbvf_get_regs(struct rte_eth_dev *dev,
4941 struct rte_dev_reg_info *regs)
4942{
4943 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4944 uint32_t *data = regs->data;
4945 int g_ind = 0;
4946 int count = 0;
4947 const struct reg_info *reg_group;
4948
4949 if (data == NULL) {
4950 regs->length = igbvf_get_reg_length(dev);
4951 regs->width = sizeof(uint32_t);
4952 return 0;
4953 }
4954
4955
4956 if ((regs->length == 0) ||
4957 (regs->length == (uint32_t)igbvf_get_reg_length(dev))) {
4958 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
4959 hw->device_id;
4960 while ((reg_group = igbvf_regs[g_ind++]))
4961 count += igb_read_regs_group(dev, &data[count],
4962 reg_group);
4963 return 0;
4964 }
4965
4966 return -ENOTSUP;
4967}
4968
4969static int
4970eth_igb_get_eeprom_length(struct rte_eth_dev *dev)
4971{
4972 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4973
4974
4975 return hw->nvm.word_size * 2;
4976}
4977
4978static int
4979eth_igb_get_eeprom(struct rte_eth_dev *dev,
4980 struct rte_dev_eeprom_info *in_eeprom)
4981{
4982 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4983 struct e1000_nvm_info *nvm = &hw->nvm;
4984 uint16_t *data = in_eeprom->data;
4985 int first, length;
4986
4987 first = in_eeprom->offset >> 1;
4988 length = in_eeprom->length >> 1;
4989 if ((first >= hw->nvm.word_size) ||
4990 ((first + length) >= hw->nvm.word_size))
4991 return -EINVAL;
4992
4993 in_eeprom->magic = hw->vendor_id |
4994 ((uint32_t)hw->device_id << 16);
4995
4996 if ((nvm->ops.read) == NULL)
4997 return -ENOTSUP;
4998
4999 return nvm->ops.read(hw, first, length, data);
5000}
5001
5002static int
5003eth_igb_set_eeprom(struct rte_eth_dev *dev,
5004 struct rte_dev_eeprom_info *in_eeprom)
5005{
5006 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5007 struct e1000_nvm_info *nvm = &hw->nvm;
5008 uint16_t *data = in_eeprom->data;
5009 int first, length;
5010
5011 first = in_eeprom->offset >> 1;
5012 length = in_eeprom->length >> 1;
5013 if ((first >= hw->nvm.word_size) ||
5014 ((first + length) >= hw->nvm.word_size))
5015 return -EINVAL;
5016
5017 in_eeprom->magic = (uint32_t)hw->vendor_id |
5018 ((uint32_t)hw->device_id << 16);
5019
5020 if ((nvm->ops.write) == NULL)
5021 return -ENOTSUP;
5022 return nvm->ops.write(hw, first, length, data);
5023}
5024
5025static int
5026eth_igb_get_module_info(struct rte_eth_dev *dev,
5027 struct rte_eth_dev_module_info *modinfo)
5028{
5029 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5030
5031 uint32_t status = 0;
5032 uint16_t sff8472_rev, addr_mode;
5033 bool page_swap = false;
5034
5035 if (hw->phy.media_type == e1000_media_type_copper ||
5036 hw->phy.media_type == e1000_media_type_unknown)
5037 return -EOPNOTSUPP;
5038
5039
5040 status = e1000_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev);
5041 if (status)
5042 return -EIO;
5043
5044
5045 status = e1000_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode);
5046 if (status)
5047 return -EIO;
5048
5049
5050 if ((addr_mode & 0xFF) & IGB_SFF_ADDRESSING_MODE) {
5051 PMD_DRV_LOG(ERR,
5052 "Address change required to access page 0xA2, "
5053 "but not supported. Please report the module "
5054 "type to the driver maintainers.\n");
5055 page_swap = true;
5056 }
5057
5058 if ((sff8472_rev & 0xFF) == IGB_SFF_8472_UNSUP || page_swap) {
5059
5060 modinfo->type = RTE_ETH_MODULE_SFF_8079;
5061 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
5062 } else {
5063
5064 modinfo->type = RTE_ETH_MODULE_SFF_8472;
5065 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
5066 }
5067
5068 return 0;
5069}
5070
5071static int
5072eth_igb_get_module_eeprom(struct rte_eth_dev *dev,
5073 struct rte_dev_eeprom_info *info)
5074{
5075 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5076
5077 uint32_t status = 0;
5078 uint16_t dataword[RTE_ETH_MODULE_SFF_8472_LEN / 2 + 1];
5079 u16 first_word, last_word;
5080 int i = 0;
5081
5082 first_word = info->offset >> 1;
5083 last_word = (info->offset + info->length - 1) >> 1;
5084
5085
5086 for (i = 0; i < last_word - first_word + 1; i++) {
5087 status = e1000_read_phy_reg_i2c(hw, (first_word + i) * 2,
5088 &dataword[i]);
5089 if (status) {
5090
5091 return -EIO;
5092 }
5093
5094 dataword[i] = rte_be_to_cpu_16(dataword[i]);
5095 }
5096
5097 memcpy(info->data, (u8 *)dataword + (info->offset & 1), info->length);
5098
5099 return 0;
5100}
5101
5102static int
5103eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
5104{
5105 struct e1000_hw *hw =
5106 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5107 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5108 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
5109 uint32_t vec = E1000_MISC_VEC_ID;
5110
5111 if (rte_intr_allow_others(intr_handle))
5112 vec = E1000_RX_VEC_START;
5113
5114 uint32_t mask = 1 << (queue_id + vec);
5115
5116 E1000_WRITE_REG(hw, E1000_EIMC, mask);
5117 E1000_WRITE_FLUSH(hw);
5118
5119 return 0;
5120}
5121
5122static int
5123eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
5124{
5125 struct e1000_hw *hw =
5126 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5127 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5128 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
5129 uint32_t vec = E1000_MISC_VEC_ID;
5130
5131 if (rte_intr_allow_others(intr_handle))
5132 vec = E1000_RX_VEC_START;
5133
5134 uint32_t mask = 1 << (queue_id + vec);
5135 uint32_t regval;
5136
5137 regval = E1000_READ_REG(hw, E1000_EIMS);
5138 E1000_WRITE_REG(hw, E1000_EIMS, regval | mask);
5139 E1000_WRITE_FLUSH(hw);
5140
5141 rte_intr_ack(intr_handle);
5142
5143 return 0;
5144}
5145
5146static void
5147eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
5148 uint8_t index, uint8_t offset)
5149{
5150 uint32_t val = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
5151
5152
5153 val &= ~((uint32_t)0xFF << offset);
5154
5155
5156 val |= (msix_vector | E1000_IVAR_VALID) << offset;
5157
5158 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, val);
5159}
5160
5161static void
5162eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,
5163 uint8_t queue, uint8_t msix_vector)
5164{
5165 uint32_t tmp = 0;
5166
5167 if (hw->mac.type == e1000_82575) {
5168 if (direction == 0)
5169 tmp = E1000_EICR_RX_QUEUE0 << queue;
5170 else if (direction == 1)
5171 tmp = E1000_EICR_TX_QUEUE0 << queue;
5172 E1000_WRITE_REG(hw, E1000_MSIXBM(msix_vector), tmp);
5173 } else if (hw->mac.type == e1000_82576) {
5174 if ((direction == 0) || (direction == 1))
5175 eth_igb_write_ivar(hw, msix_vector, queue & 0x7,
5176 ((queue & 0x8) << 1) +
5177 8 * direction);
5178 } else if ((hw->mac.type == e1000_82580) ||
5179 (hw->mac.type == e1000_i350) ||
5180 (hw->mac.type == e1000_i354) ||
5181 (hw->mac.type == e1000_i210) ||
5182 (hw->mac.type == e1000_i211)) {
5183 if ((direction == 0) || (direction == 1))
5184 eth_igb_write_ivar(hw, msix_vector,
5185 queue >> 1,
5186 ((queue & 0x1) << 4) +
5187 8 * direction);
5188 }
5189}
5190
5191
5192
5193
5194
5195static void
5196eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
5197{
5198 int queue_id, nb_efd;
5199 uint32_t tmpval, regval, intr_mask;
5200 struct e1000_hw *hw =
5201 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5202 uint32_t vec = E1000_MISC_VEC_ID;
5203 uint32_t base = E1000_MISC_VEC_ID;
5204 uint32_t misc_shift = 0;
5205 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5206 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
5207
5208
5209
5210
5211 if (!rte_intr_dp_is_en(intr_handle))
5212 return;
5213
5214 if (rte_intr_allow_others(intr_handle)) {
5215 vec = base = E1000_RX_VEC_START;
5216 misc_shift = 1;
5217 }
5218
5219
5220 if (hw->mac.type == e1000_82575) {
5221 tmpval = E1000_READ_REG(hw, E1000_CTRL_EXT);
5222
5223 tmpval |= E1000_CTRL_EXT_PBA_CLR;
5224
5225
5226 tmpval |= E1000_CTRL_EXT_EIAME;
5227 tmpval |= E1000_CTRL_EXT_IRCA;
5228
5229 E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmpval);
5230
5231
5232 E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), 0, E1000_EIMS_OTHER);
5233 regval = E1000_READ_REG(hw, E1000_EIAC);
5234 E1000_WRITE_REG(hw, E1000_EIAC, regval | E1000_EIMS_OTHER);
5235 regval = E1000_READ_REG(hw, E1000_EIAM);
5236 E1000_WRITE_REG(hw, E1000_EIMS, regval | E1000_EIMS_OTHER);
5237 } else if ((hw->mac.type == e1000_82576) ||
5238 (hw->mac.type == e1000_82580) ||
5239 (hw->mac.type == e1000_i350) ||
5240 (hw->mac.type == e1000_i354) ||
5241 (hw->mac.type == e1000_i210) ||
5242 (hw->mac.type == e1000_i211)) {
5243
5244 E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE |
5245 E1000_GPIE_PBA | E1000_GPIE_EIAME |
5246 E1000_GPIE_NSICR);
5247 nb_efd = rte_intr_nb_efd_get(intr_handle);
5248 if (nb_efd < 0)
5249 return;
5250
5251 intr_mask = RTE_LEN2MASK(nb_efd, uint32_t) << misc_shift;
5252
5253 if (dev->data->dev_conf.intr_conf.lsc != 0)
5254 intr_mask |= (1 << IGB_MSIX_OTHER_INTR_VEC);
5255
5256 regval = E1000_READ_REG(hw, E1000_EIAC);
5257 E1000_WRITE_REG(hw, E1000_EIAC, regval | intr_mask);
5258
5259
5260 regval = E1000_READ_REG(hw, E1000_EIMS);
5261 E1000_WRITE_REG(hw, E1000_EIMS, regval | intr_mask);
5262 tmpval = (IGB_MSIX_OTHER_INTR_VEC | E1000_IVAR_VALID) << 8;
5263 E1000_WRITE_REG(hw, E1000_IVAR_MISC, tmpval);
5264 }
5265
5266
5267
5268
5269 nb_efd = rte_intr_nb_efd_get(intr_handle);
5270 if (nb_efd < 0)
5271 return;
5272
5273 intr_mask = RTE_LEN2MASK(nb_efd, uint32_t) << misc_shift;
5274
5275 if (dev->data->dev_conf.intr_conf.lsc != 0)
5276 intr_mask |= (1 << IGB_MSIX_OTHER_INTR_VEC);
5277
5278 regval = E1000_READ_REG(hw, E1000_EIAM);
5279 E1000_WRITE_REG(hw, E1000_EIAM, regval | intr_mask);
5280
5281 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) {
5282 eth_igb_assign_msix_vector(hw, 0, queue_id, vec);
5283 rte_intr_vec_list_index_set(intr_handle, queue_id, vec);
5284 if (vec < base + rte_intr_nb_efd_get(intr_handle) - 1)
5285 vec++;
5286 }
5287
5288 E1000_WRITE_FLUSH(hw);
5289}
5290
5291
5292static inline void
5293igb_ntuple_filter_restore(struct rte_eth_dev *dev)
5294{
5295 struct e1000_filter_info *filter_info =
5296 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5297 struct e1000_5tuple_filter *p_5tuple;
5298 struct e1000_2tuple_filter *p_2tuple;
5299
5300 TAILQ_FOREACH(p_5tuple, &filter_info->fivetuple_list, entries) {
5301 igb_inject_5tuple_filter_82576(dev, p_5tuple);
5302 }
5303
5304 TAILQ_FOREACH(p_2tuple, &filter_info->twotuple_list, entries) {
5305 igb_inject_2uple_filter(dev, p_2tuple);
5306 }
5307}
5308
5309
5310static inline void
5311igb_syn_filter_restore(struct rte_eth_dev *dev)
5312{
5313 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5314 struct e1000_filter_info *filter_info =
5315 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5316 uint32_t synqf;
5317
5318 synqf = filter_info->syn_info;
5319
5320 if (synqf & E1000_SYN_FILTER_ENABLE) {
5321 E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf);
5322 E1000_WRITE_FLUSH(hw);
5323 }
5324}
5325
5326
5327static inline void
5328igb_ethertype_filter_restore(struct rte_eth_dev *dev)
5329{
5330 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5331 struct e1000_filter_info *filter_info =
5332 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5333 int i;
5334
5335 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
5336 if (filter_info->ethertype_mask & (1 << i)) {
5337 E1000_WRITE_REG(hw, E1000_ETQF(i),
5338 filter_info->ethertype_filters[i].etqf);
5339 E1000_WRITE_FLUSH(hw);
5340 }
5341 }
5342}
5343
5344
5345static inline void
5346igb_flex_filter_restore(struct rte_eth_dev *dev)
5347{
5348 struct e1000_filter_info *filter_info =
5349 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5350 struct e1000_flex_filter *flex_filter;
5351
5352 TAILQ_FOREACH(flex_filter, &filter_info->flex_list, entries) {
5353 igb_inject_flex_filter(dev, flex_filter);
5354 }
5355}
5356
5357
5358static inline void
5359igb_rss_filter_restore(struct rte_eth_dev *dev)
5360{
5361 struct e1000_filter_info *filter_info =
5362 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5363
5364 if (filter_info->rss_info.conf.queue_num)
5365 igb_config_rss_filter(dev, &filter_info->rss_info, TRUE);
5366}
5367
5368
5369static int
5370igb_filter_restore(struct rte_eth_dev *dev)
5371{
5372 igb_ntuple_filter_restore(dev);
5373 igb_ethertype_filter_restore(dev);
5374 igb_syn_filter_restore(dev);
5375 igb_flex_filter_restore(dev);
5376 igb_rss_filter_restore(dev);
5377
5378 return 0;
5379}
5380
5381RTE_PMD_REGISTER_PCI(net_e1000_igb, rte_igb_pmd);
5382RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb, pci_id_igb_map);
5383RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb, "* igb_uio | uio_pci_generic | vfio-pci");
5384RTE_PMD_REGISTER_PCI(net_e1000_igb_vf, rte_igbvf_pmd);
5385RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb_vf, pci_id_igbvf_map);
5386RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb_vf, "* igb_uio | vfio-pci");
5387