1
2
3
4
5#include <stdio.h>
6#include <errno.h>
7#include <stdint.h>
8#include <string.h>
9#include <unistd.h>
10#include <stdarg.h>
11#include <inttypes.h>
12#include <assert.h>
13
14#include <rte_common.h>
15#include <rte_eal.h>
16#include <rte_string_fns.h>
17#include <rte_pci.h>
18#include <rte_bus_pci.h>
19#include <rte_ether.h>
20#include <ethdev_driver.h>
21#include <ethdev_pci.h>
22#include <rte_memzone.h>
23#include <rte_malloc.h>
24#include <rte_memcpy.h>
25#include <rte_alarm.h>
26#include <rte_dev.h>
27#include <rte_tailq.h>
28#include <rte_hash_crc.h>
29#include <rte_bitmap.h>
30#include <rte_os_shim.h>
31
32#include "i40e_logs.h"
33#include "base/i40e_prototype.h"
34#include "base/i40e_adminq_cmd.h"
35#include "base/i40e_type.h"
36#include "base/i40e_register.h"
37#include "base/i40e_dcb.h"
38#include "i40e_ethdev.h"
39#include "i40e_rxtx.h"
40#include "i40e_pf.h"
41#include "i40e_regs.h"
42#include "rte_pmd_i40e.h"
43#include "i40e_hash.h"
44
45#define ETH_I40E_FLOATING_VEB_ARG "enable_floating_veb"
46#define ETH_I40E_FLOATING_VEB_LIST_ARG "floating_veb_list"
47#define ETH_I40E_SUPPORT_MULTI_DRIVER "support-multi-driver"
48#define ETH_I40E_QUEUE_NUM_PER_VF_ARG "queue-num-per-vf"
49#define ETH_I40E_VF_MSG_CFG "vf_msg_cfg"
50
51#define I40E_CLEAR_PXE_WAIT_MS 200
52#define I40E_VSI_TSR_QINQ_STRIP 0x4010
53#define I40E_VSI_TSR(_i) (0x00050800 + ((_i) * 4))
54
55
56#define I40E_MAX_CAP_ELE_NUM 128
57
58
59#define I40E_CHK_Q_ENA_COUNT 1000
60#define I40E_CHK_Q_ENA_INTERVAL_US 1000
61
62
63#define I40E_MAX_NUM_VSIS (384UL)
64
65#define I40E_PRE_TX_Q_CFG_WAIT_US 10
66
67
68#define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
69
70
71#define I40E_PRTMAC_FWD_CTRL 0x00000001
72
73
74#define I40E_RXPBSIZE (968 * 1024)
75
76
77#define I40E_KILOSHIFT 10
78
79
80#define I40E_DEFAULT_HIGH_WATER (0xF2000 >> I40E_KILOSHIFT)
81
82
83#define I40E_DEFAULT_LOW_WATER (0xF2000 >> I40E_KILOSHIFT)
84
85
86#define I40E_PACKET_AVERAGE_SIZE 128
87
88
89#define I40E_PFINT_ICR0_ENA_MASK ( \
90 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
91 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
92 I40E_PFINT_ICR0_ENA_GRST_MASK | \
93 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
94 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
95 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
96 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
97 I40E_PFINT_ICR0_ENA_VFLR_MASK | \
98 I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
99
100#define I40E_FLOW_TYPES ( \
101 (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
102 (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
103 (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
104 (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
105 (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
106 (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
107 (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
108 (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
109 (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
110 (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
111 (1UL << RTE_ETH_FLOW_L2_PAYLOAD))
112
113
114#define I40E_PTP_40GB_INCVAL 0x0199999999ULL
115#define I40E_PTP_10GB_INCVAL 0x0333333333ULL
116#define I40E_PTP_1GB_INCVAL 0x2000000000ULL
117#define I40E_PRTTSYN_TSYNENA 0x80000000
118#define I40E_PRTTSYN_TSYNTYPE 0x0e000000
119#define I40E_CYCLECOUNTER_MASK 0xffffffffffffffffULL
120
121
122
123
124
125
126#define I40E_REG_INSET_L2_DMAC 0xE000000000000000ULL
127
128#define I40E_REG_INSET_L2_SMAC 0x1C00000000000000ULL
129
130#define I40E_REG_INSET_L2_OUTER_VLAN 0x0000000004000000ULL
131
132#define I40E_REG_INSET_L2_INNER_VLAN 0x0080000000000000ULL
133
134#define I40E_REG_INSET_TUNNEL_VLAN 0x0100000000000000ULL
135
136#define I40E_REG_INSET_L3_SRC_IP4 0x0001800000000000ULL
137
138#define I40E_REG_INSET_L3_DST_IP4 0x0000001800000000ULL
139
140#define I40E_X722_REG_INSET_L3_SRC_IP4 0x0006000000000000ULL
141
142#define I40E_X722_REG_INSET_L3_DST_IP4 0x0000060000000000ULL
143
144#define I40E_X722_REG_INSET_L3_IP4_PROTO 0x0010000000000000ULL
145
146#define I40E_X722_REG_INSET_L3_IP4_TTL 0x0010000000000000ULL
147
148#define I40E_REG_INSET_L3_IP4_TOS 0x0040000000000000ULL
149
150#define I40E_REG_INSET_L3_IP4_PROTO 0x0004000000000000ULL
151
152#define I40E_REG_INSET_L3_IP4_TTL 0x0004000000000000ULL
153
154#define I40E_REG_INSET_L3_SRC_IP6 0x0007F80000000000ULL
155
156#define I40E_REG_INSET_L3_DST_IP6 0x000007F800000000ULL
157
158#define I40E_REG_INSET_L3_IP6_TC 0x0040000000000000ULL
159
160#define I40E_REG_INSET_L3_IP6_NEXT_HDR 0x0008000000000000ULL
161
162#define I40E_REG_INSET_L3_IP6_HOP_LIMIT 0x0008000000000000ULL
163
164#define I40E_REG_INSET_L4_SRC_PORT 0x0000000400000000ULL
165
166#define I40E_REG_INSET_L4_DST_PORT 0x0000000200000000ULL
167
168#define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG 0x0000000180000000ULL
169
170#define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC 0x0000000001C00000ULL
171
172#define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT 0x0000000000200000ULL
173
174#define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT 0x0000000000100000ULL
175
176#define I40E_REG_INSET_TUNNEL_ID 0x00000000000C0000ULL
177
178#define I40E_REG_INSET_LAST_ETHER_TYPE 0x0000000000004000ULL
179
180#define I40E_REG_INSET_TUNNEL_L3_DST_IP4 0x00000000000000C0ULL
181
182#define I40E_REG_INSET_TUNNEL_L3_DST_IP6 0x0000000000003FC0ULL
183
184#define I40E_REG_INSET_FLEX_PAYLOAD_WORD1 0x0000000000002000ULL
185
186#define I40E_REG_INSET_FLEX_PAYLOAD_WORD2 0x0000000000001000ULL
187
188#define I40E_REG_INSET_FLEX_PAYLOAD_WORD3 0x0000000000000800ULL
189
190#define I40E_REG_INSET_FLEX_PAYLOAD_WORD4 0x0000000000000400ULL
191
192#define I40E_REG_INSET_FLEX_PAYLOAD_WORD5 0x0000000000000200ULL
193
194#define I40E_REG_INSET_FLEX_PAYLOAD_WORD6 0x0000000000000100ULL
195
196#define I40E_REG_INSET_FLEX_PAYLOAD_WORD7 0x0000000000000080ULL
197
198#define I40E_REG_INSET_FLEX_PAYLOAD_WORD8 0x0000000000000040ULL
199
200#define I40E_REG_INSET_FLEX_PAYLOAD_WORDS 0x0000000000003FC0ULL
201#define I40E_REG_INSET_MASK_DEFAULT 0x0000000000000000ULL
202
203#define I40E_TRANSLATE_INSET 0
204#define I40E_TRANSLATE_REG 1
205
206#define I40E_INSET_IPV4_TOS_MASK 0x0000FF00UL
207#define I40E_INSET_IPV4_TTL_MASK 0x000000FFUL
208#define I40E_INSET_IPV4_PROTO_MASK 0x0000FF00UL
209#define I40E_INSET_IPV6_TC_MASK 0x0000F00FUL
210#define I40E_INSET_IPV6_HOP_LIMIT_MASK 0x0000FF00UL
211#define I40E_INSET_IPV6_NEXT_HDR_MASK 0x000000FFUL
212
213
214#define PCI_DEV_CAP_REG 0xA4
215
216#define PCI_DEV_CTRL_REG 0xA8
217
218#define PCI_DEV_CAP_EXT_TAG_MASK 0x20
219
220#define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
221
222#define PCI_DEV_CTRL_EXT_TAG_MASK (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
223
224#define I40E_GLQF_PIT_IPV4_START 2
225#define I40E_GLQF_PIT_IPV4_COUNT 2
226#define I40E_GLQF_PIT_IPV6_START 4
227#define I40E_GLQF_PIT_IPV6_COUNT 2
228
229#define I40E_GLQF_PIT_SOURCE_OFF_GET(a) \
230 (((a) & I40E_GLQF_PIT_SOURCE_OFF_MASK) >> \
231 I40E_GLQF_PIT_SOURCE_OFF_SHIFT)
232
233#define I40E_GLQF_PIT_DEST_OFF_GET(a) \
234 (((a) & I40E_GLQF_PIT_DEST_OFF_MASK) >> \
235 I40E_GLQF_PIT_DEST_OFF_SHIFT)
236
237#define I40E_GLQF_PIT_FSIZE_GET(a) (((a) & I40E_GLQF_PIT_FSIZE_MASK) >> \
238 I40E_GLQF_PIT_FSIZE_SHIFT)
239
240#define I40E_GLQF_PIT_BUILD(off, mask) (((off) << 16) | (mask))
241#define I40E_FDIR_FIELD_OFFSET(a) ((a) >> 1)
242
243static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
244static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
245static int i40e_dev_configure(struct rte_eth_dev *dev);
246static int i40e_dev_start(struct rte_eth_dev *dev);
247static int i40e_dev_stop(struct rte_eth_dev *dev);
248static int i40e_dev_close(struct rte_eth_dev *dev);
249static int i40e_dev_reset(struct rte_eth_dev *dev);
250static int i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
251static int i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
252static int i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
253static int i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
254static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
255static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
256static int i40e_dev_stats_get(struct rte_eth_dev *dev,
257 struct rte_eth_stats *stats);
258static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
259 struct rte_eth_xstat *xstats, unsigned n);
260static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev,
261 struct rte_eth_xstat_name *xstats_names,
262 unsigned limit);
263static int i40e_dev_stats_reset(struct rte_eth_dev *dev);
264static int i40e_fw_version_get(struct rte_eth_dev *dev,
265 char *fw_version, size_t fw_size);
266static int i40e_dev_info_get(struct rte_eth_dev *dev,
267 struct rte_eth_dev_info *dev_info);
268static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
269 uint16_t vlan_id,
270 int on);
271static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
272 enum rte_vlan_type vlan_type,
273 uint16_t tpid);
274static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
275static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
276 uint16_t queue,
277 int on);
278static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
279static int i40e_dev_led_on(struct rte_eth_dev *dev);
280static int i40e_dev_led_off(struct rte_eth_dev *dev);
281static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
282 struct rte_eth_fc_conf *fc_conf);
283static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
284 struct rte_eth_fc_conf *fc_conf);
285static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
286 struct rte_eth_pfc_conf *pfc_conf);
287static int i40e_macaddr_add(struct rte_eth_dev *dev,
288 struct rte_ether_addr *mac_addr,
289 uint32_t index,
290 uint32_t pool);
291static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
292static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
293 struct rte_eth_rss_reta_entry64 *reta_conf,
294 uint16_t reta_size);
295static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
296 struct rte_eth_rss_reta_entry64 *reta_conf,
297 uint16_t reta_size);
298
299static int i40e_get_cap(struct i40e_hw *hw);
300static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
301static int i40e_pf_setup(struct i40e_pf *pf);
302static int i40e_dev_rxtx_init(struct i40e_pf *pf);
303static int i40e_vmdq_setup(struct rte_eth_dev *dev);
304static int i40e_dcb_setup(struct rte_eth_dev *dev);
305static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
306 bool offset_loaded, uint64_t *offset, uint64_t *stat);
307static void i40e_stat_update_48(struct i40e_hw *hw,
308 uint32_t hireg,
309 uint32_t loreg,
310 bool offset_loaded,
311 uint64_t *offset,
312 uint64_t *stat);
313static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
314static void i40e_dev_interrupt_handler(void *param);
315static void i40e_dev_alarm_handler(void *param);
316static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
317 uint32_t base, uint32_t num);
318static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
319static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
320 uint32_t base);
321static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
322 uint16_t num);
323static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
324static int i40e_veb_release(struct i40e_veb *veb);
325static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
326 struct i40e_vsi *vsi);
327static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
328static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
329 struct i40e_macvlan_filter *mv_f,
330 int num,
331 uint16_t vlan);
332static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
333static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
334 struct rte_eth_rss_conf *rss_conf);
335static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
336 struct rte_eth_rss_conf *rss_conf);
337static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
338 struct rte_eth_udp_tunnel *udp_tunnel);
339static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
340 struct rte_eth_udp_tunnel *udp_tunnel);
341static void i40e_filter_input_set_init(struct i40e_pf *pf);
342static int i40e_dev_flow_ops_get(struct rte_eth_dev *dev,
343 const struct rte_flow_ops **ops);
344static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
345 struct rte_eth_dcb_info *dcb_info);
346static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
347static void i40e_configure_registers(struct i40e_hw *hw);
348static void i40e_hw_init(struct rte_eth_dev *dev);
349static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
350static enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw,
351 uint16_t seid,
352 uint16_t rule_type,
353 uint16_t *entries,
354 uint16_t count,
355 uint16_t rule_id);
356static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
357 struct rte_eth_mirror_conf *mirror_conf,
358 uint8_t sw_id, uint8_t on);
359static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
360
361static int i40e_timesync_enable(struct rte_eth_dev *dev);
362static int i40e_timesync_disable(struct rte_eth_dev *dev);
363static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
364 struct timespec *timestamp,
365 uint32_t flags);
366static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
367 struct timespec *timestamp);
368static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
369
370static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
371
372static int i40e_timesync_read_time(struct rte_eth_dev *dev,
373 struct timespec *timestamp);
374static int i40e_timesync_write_time(struct rte_eth_dev *dev,
375 const struct timespec *timestamp);
376
377static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
378 uint16_t queue_id);
379static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
380 uint16_t queue_id);
381
382static int i40e_get_regs(struct rte_eth_dev *dev,
383 struct rte_dev_reg_info *regs);
384
385static int i40e_get_eeprom_length(struct rte_eth_dev *dev);
386
387static int i40e_get_eeprom(struct rte_eth_dev *dev,
388 struct rte_dev_eeprom_info *eeprom);
389
390static int i40e_get_module_info(struct rte_eth_dev *dev,
391 struct rte_eth_dev_module_info *modinfo);
392static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
393 struct rte_dev_eeprom_info *info);
394
395static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
396 struct rte_ether_addr *mac_addr);
397
398static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
399
400static int i40e_ethertype_filter_convert(
401 const struct rte_eth_ethertype_filter *input,
402 struct i40e_ethertype_filter *filter);
403static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
404 struct i40e_ethertype_filter *filter);
405
406static int i40e_tunnel_filter_convert(
407 struct i40e_aqc_cloud_filters_element_bb *cld_filter,
408 struct i40e_tunnel_filter *tunnel_filter);
409static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
410 struct i40e_tunnel_filter *tunnel_filter);
411static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf);
412
413static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
414static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
415static void i40e_filter_restore(struct i40e_pf *pf);
416static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
417
418static const char *const valid_keys[] = {
419 ETH_I40E_FLOATING_VEB_ARG,
420 ETH_I40E_FLOATING_VEB_LIST_ARG,
421 ETH_I40E_SUPPORT_MULTI_DRIVER,
422 ETH_I40E_QUEUE_NUM_PER_VF_ARG,
423 ETH_I40E_VF_MSG_CFG,
424 NULL};
425
426static const struct rte_pci_id pci_id_i40e_map[] = {
427 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
428 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
429 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B) },
430 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C) },
431 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A) },
432 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B) },
433 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C) },
434 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T) },
435 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2) },
436 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A) },
437 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4) },
438 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B) },
439 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28) },
440 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0) },
441 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722) },
442 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722) },
443 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722) },
444 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) },
445 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) },
446 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) },
447 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X710_N3000) },
448 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_XXV710_N3000) },
449 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC) },
450 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_5G_BASE_T_BC) },
451 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B) },
452 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP) },
453 { .vendor_id = 0, },
454};
455
456static const struct eth_dev_ops i40e_eth_dev_ops = {
457 .dev_configure = i40e_dev_configure,
458 .dev_start = i40e_dev_start,
459 .dev_stop = i40e_dev_stop,
460 .dev_close = i40e_dev_close,
461 .dev_reset = i40e_dev_reset,
462 .promiscuous_enable = i40e_dev_promiscuous_enable,
463 .promiscuous_disable = i40e_dev_promiscuous_disable,
464 .allmulticast_enable = i40e_dev_allmulticast_enable,
465 .allmulticast_disable = i40e_dev_allmulticast_disable,
466 .dev_set_link_up = i40e_dev_set_link_up,
467 .dev_set_link_down = i40e_dev_set_link_down,
468 .link_update = i40e_dev_link_update,
469 .stats_get = i40e_dev_stats_get,
470 .xstats_get = i40e_dev_xstats_get,
471 .xstats_get_names = i40e_dev_xstats_get_names,
472 .stats_reset = i40e_dev_stats_reset,
473 .xstats_reset = i40e_dev_stats_reset,
474 .fw_version_get = i40e_fw_version_get,
475 .dev_infos_get = i40e_dev_info_get,
476 .dev_supported_ptypes_get = i40e_dev_supported_ptypes_get,
477 .vlan_filter_set = i40e_vlan_filter_set,
478 .vlan_tpid_set = i40e_vlan_tpid_set,
479 .vlan_offload_set = i40e_vlan_offload_set,
480 .vlan_strip_queue_set = i40e_vlan_strip_queue_set,
481 .vlan_pvid_set = i40e_vlan_pvid_set,
482 .rx_queue_start = i40e_dev_rx_queue_start,
483 .rx_queue_stop = i40e_dev_rx_queue_stop,
484 .tx_queue_start = i40e_dev_tx_queue_start,
485 .tx_queue_stop = i40e_dev_tx_queue_stop,
486 .rx_queue_setup = i40e_dev_rx_queue_setup,
487 .rx_queue_intr_enable = i40e_dev_rx_queue_intr_enable,
488 .rx_queue_intr_disable = i40e_dev_rx_queue_intr_disable,
489 .rx_queue_release = i40e_dev_rx_queue_release,
490 .tx_queue_setup = i40e_dev_tx_queue_setup,
491 .tx_queue_release = i40e_dev_tx_queue_release,
492 .dev_led_on = i40e_dev_led_on,
493 .dev_led_off = i40e_dev_led_off,
494 .flow_ctrl_get = i40e_flow_ctrl_get,
495 .flow_ctrl_set = i40e_flow_ctrl_set,
496 .priority_flow_ctrl_set = i40e_priority_flow_ctrl_set,
497 .mac_addr_add = i40e_macaddr_add,
498 .mac_addr_remove = i40e_macaddr_remove,
499 .reta_update = i40e_dev_rss_reta_update,
500 .reta_query = i40e_dev_rss_reta_query,
501 .rss_hash_update = i40e_dev_rss_hash_update,
502 .rss_hash_conf_get = i40e_dev_rss_hash_conf_get,
503 .udp_tunnel_port_add = i40e_dev_udp_tunnel_port_add,
504 .udp_tunnel_port_del = i40e_dev_udp_tunnel_port_del,
505 .flow_ops_get = i40e_dev_flow_ops_get,
506 .rxq_info_get = i40e_rxq_info_get,
507 .txq_info_get = i40e_txq_info_get,
508 .rx_burst_mode_get = i40e_rx_burst_mode_get,
509 .tx_burst_mode_get = i40e_tx_burst_mode_get,
510 .mirror_rule_set = i40e_mirror_rule_set,
511 .mirror_rule_reset = i40e_mirror_rule_reset,
512 .timesync_enable = i40e_timesync_enable,
513 .timesync_disable = i40e_timesync_disable,
514 .timesync_read_rx_timestamp = i40e_timesync_read_rx_timestamp,
515 .timesync_read_tx_timestamp = i40e_timesync_read_tx_timestamp,
516 .get_dcb_info = i40e_dev_get_dcb_info,
517 .timesync_adjust_time = i40e_timesync_adjust_time,
518 .timesync_read_time = i40e_timesync_read_time,
519 .timesync_write_time = i40e_timesync_write_time,
520 .get_reg = i40e_get_regs,
521 .get_eeprom_length = i40e_get_eeprom_length,
522 .get_eeprom = i40e_get_eeprom,
523 .get_module_info = i40e_get_module_info,
524 .get_module_eeprom = i40e_get_module_eeprom,
525 .mac_addr_set = i40e_set_default_mac_addr,
526 .mtu_set = i40e_dev_mtu_set,
527 .tm_ops_get = i40e_tm_ops_get,
528 .tx_done_cleanup = i40e_tx_done_cleanup,
529 .get_monitor_addr = i40e_get_monitor_addr,
530};
531
532
533struct rte_i40e_xstats_name_off {
534 char name[RTE_ETH_XSTATS_NAME_SIZE];
535 unsigned offset;
536};
537
538static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
539 {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
540 {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
541 {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
542 {"rx_dropped_packets", offsetof(struct i40e_eth_stats, rx_discards)},
543 {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
544 rx_unknown_protocol)},
545 {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
546 {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
547 {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
548 {"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_discards)},
549};
550
551#define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
552 sizeof(rte_i40e_stats_strings[0]))
553
554static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
555 {"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
556 tx_dropped_link_down)},
557 {"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
558 {"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
559 illegal_bytes)},
560 {"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
561 {"mac_local_errors", offsetof(struct i40e_hw_port_stats,
562 mac_local_faults)},
563 {"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
564 mac_remote_faults)},
565 {"rx_length_errors", offsetof(struct i40e_hw_port_stats,
566 rx_length_errors)},
567 {"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
568 {"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
569 {"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
570 {"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
571 {"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
572 {"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
573 rx_size_127)},
574 {"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
575 rx_size_255)},
576 {"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
577 rx_size_511)},
578 {"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
579 rx_size_1023)},
580 {"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
581 rx_size_1522)},
582 {"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
583 rx_size_big)},
584 {"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
585 rx_undersize)},
586 {"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
587 rx_oversize)},
588 {"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
589 mac_short_packet_dropped)},
590 {"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
591 rx_fragments)},
592 {"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
593 {"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
594 {"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
595 tx_size_127)},
596 {"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
597 tx_size_255)},
598 {"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
599 tx_size_511)},
600 {"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
601 tx_size_1023)},
602 {"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
603 tx_size_1522)},
604 {"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
605 tx_size_big)},
606 {"rx_flow_director_atr_match_packets",
607 offsetof(struct i40e_hw_port_stats, fd_atr_match)},
608 {"rx_flow_director_sb_match_packets",
609 offsetof(struct i40e_hw_port_stats, fd_sb_match)},
610 {"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
611 tx_lpi_status)},
612 {"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
613 rx_lpi_status)},
614 {"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
615 tx_lpi_count)},
616 {"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
617 rx_lpi_count)},
618};
619
620#define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
621 sizeof(rte_i40e_hw_port_strings[0]))
622
623static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
624 {"xon_packets", offsetof(struct i40e_hw_port_stats,
625 priority_xon_rx)},
626 {"xoff_packets", offsetof(struct i40e_hw_port_stats,
627 priority_xoff_rx)},
628};
629
630#define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
631 sizeof(rte_i40e_rxq_prio_strings[0]))
632
633static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
634 {"xon_packets", offsetof(struct i40e_hw_port_stats,
635 priority_xon_tx)},
636 {"xoff_packets", offsetof(struct i40e_hw_port_stats,
637 priority_xoff_tx)},
638 {"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
639 priority_xon_2_xoff)},
640};
641
642#define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
643 sizeof(rte_i40e_txq_prio_strings[0]))
644
645static int
646eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
647 struct rte_pci_device *pci_dev)
648{
649 char name[RTE_ETH_NAME_MAX_LEN];
650 struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
651 int i, retval;
652
653 if (pci_dev->device.devargs) {
654 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
655 ð_da);
656 if (retval)
657 return retval;
658 }
659
660 if (eth_da.nb_representor_ports > 0 &&
661 eth_da.type != RTE_ETH_REPRESENTOR_VF) {
662 PMD_DRV_LOG(ERR, "unsupported representor type: %s\n",
663 pci_dev->device.devargs->args);
664 return -ENOTSUP;
665 }
666
667 retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
668 sizeof(struct i40e_adapter),
669 eth_dev_pci_specific_init, pci_dev,
670 eth_i40e_dev_init, NULL);
671
672 if (retval || eth_da.nb_representor_ports < 1)
673 return retval;
674
675
676 struct rte_eth_dev *pf_ethdev = rte_eth_dev_allocated(
677 pci_dev->device.name);
678
679 if (pf_ethdev == NULL)
680 return -ENODEV;
681
682 for (i = 0; i < eth_da.nb_representor_ports; i++) {
683 struct i40e_vf_representor representor = {
684 .vf_id = eth_da.representor_ports[i],
685 .switch_domain_id = I40E_DEV_PRIVATE_TO_PF(
686 pf_ethdev->data->dev_private)->switch_domain_id,
687 .adapter = I40E_DEV_PRIVATE_TO_ADAPTER(
688 pf_ethdev->data->dev_private)
689 };
690
691
692 snprintf(name, sizeof(name), "net_%s_representor_%d",
693 pci_dev->device.name, eth_da.representor_ports[i]);
694
695 retval = rte_eth_dev_create(&pci_dev->device, name,
696 sizeof(struct i40e_vf_representor), NULL, NULL,
697 i40e_vf_representor_init, &representor);
698
699 if (retval)
700 PMD_DRV_LOG(ERR, "failed to create i40e vf "
701 "representor %s.", name);
702 }
703
704 return 0;
705}
706
707static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
708{
709 struct rte_eth_dev *ethdev;
710
711 ethdev = rte_eth_dev_allocated(pci_dev->device.name);
712 if (!ethdev)
713 return 0;
714
715 if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
716 return rte_eth_dev_pci_generic_remove(pci_dev,
717 i40e_vf_representor_uninit);
718 else
719 return rte_eth_dev_pci_generic_remove(pci_dev,
720 eth_i40e_dev_uninit);
721}
722
723static struct rte_pci_driver rte_i40e_pmd = {
724 .id_table = pci_id_i40e_map,
725 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
726 .probe = eth_i40e_pci_probe,
727 .remove = eth_i40e_pci_remove,
728};
729
730static inline void
731i40e_write_global_rx_ctl(struct i40e_hw *hw, uint32_t reg_addr,
732 uint32_t reg_val)
733{
734 uint32_t ori_reg_val;
735 struct rte_eth_dev_data *dev_data =
736 ((struct i40e_adapter *)hw->back)->pf.dev_data;
737 struct rte_eth_dev *dev = &rte_eth_devices[dev_data->port_id];
738
739 ori_reg_val = i40e_read_rx_ctl(hw, reg_addr);
740 i40e_write_rx_ctl(hw, reg_addr, reg_val);
741 if (ori_reg_val != reg_val)
742 PMD_DRV_LOG(WARNING,
743 "i40e device %s changed global register [0x%08x]."
744 " original: 0x%08x, new: 0x%08x",
745 dev->device->name, reg_addr, ori_reg_val, reg_val);
746}
747
748RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
749RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
750RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci");
751
752#ifndef I40E_GLQF_ORT
753#define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4))
754#endif
755#ifndef I40E_GLQF_PIT
756#define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4))
757#endif
758#ifndef I40E_GLQF_L3_MAP
759#define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4))
760#endif
761
762static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
763{
764
765
766
767
768
769
770 I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
771 I40E_WRITE_GLB_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
772}
773
774static inline void i40e_config_automask(struct i40e_pf *pf)
775{
776 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
777 uint32_t val;
778
779
780 val = I40E_READ_REG(hw, I40E_GLINT_CTL);
781 val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
782 I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
783
784
785 if (!pf->support_multi_driver)
786 val |= I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
787
788 I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
789}
790
791static inline void i40e_clear_automask(struct i40e_pf *pf)
792{
793 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
794 uint32_t val;
795
796 val = I40E_READ_REG(hw, I40E_GLINT_CTL);
797 val &= ~(I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
798 I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK);
799
800 if (!pf->support_multi_driver)
801 val &= ~I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
802
803 I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
804}
805
806#define I40E_FLOW_CONTROL_ETHERTYPE 0x8808
807
808
809
810
811
812static void
813i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
814{
815 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
816 uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
817 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
818 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
819 int ret;
820
821 ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
822 I40E_FLOW_CONTROL_ETHERTYPE, flags,
823 pf->main_vsi_seid, 0,
824 TRUE, NULL, NULL);
825 if (ret)
826 PMD_INIT_LOG(ERR,
827 "Failed to add filter to drop flow control frames from VSIs.");
828}
829
830static int
831floating_veb_list_handler(__rte_unused const char *key,
832 const char *floating_veb_value,
833 void *opaque)
834{
835 int idx = 0;
836 unsigned int count = 0;
837 char *end = NULL;
838 int min, max;
839 bool *vf_floating_veb = opaque;
840
841 while (isblank(*floating_veb_value))
842 floating_veb_value++;
843
844
845 for (idx = 0; idx < I40E_MAX_VF; idx++)
846 vf_floating_veb[idx] = false;
847
848 min = I40E_MAX_VF;
849 do {
850 while (isblank(*floating_veb_value))
851 floating_veb_value++;
852 if (*floating_veb_value == '\0')
853 return -1;
854 errno = 0;
855 idx = strtoul(floating_veb_value, &end, 10);
856 if (errno || end == NULL)
857 return -1;
858 if (idx < 0)
859 return -1;
860 while (isblank(*end))
861 end++;
862 if (*end == '-') {
863 min = idx;
864 } else if ((*end == ';') || (*end == '\0')) {
865 max = idx;
866 if (min == I40E_MAX_VF)
867 min = idx;
868 if (max >= I40E_MAX_VF)
869 max = I40E_MAX_VF - 1;
870 for (idx = min; idx <= max; idx++) {
871 vf_floating_veb[idx] = true;
872 count++;
873 }
874 min = I40E_MAX_VF;
875 } else {
876 return -1;
877 }
878 floating_veb_value = end + 1;
879 } while (*end != '\0');
880
881 if (count == 0)
882 return -1;
883
884 return 0;
885}
886
887static void
888config_vf_floating_veb(struct rte_devargs *devargs,
889 uint16_t floating_veb,
890 bool *vf_floating_veb)
891{
892 struct rte_kvargs *kvlist;
893 int i;
894 const char *floating_veb_list = ETH_I40E_FLOATING_VEB_LIST_ARG;
895
896 if (!floating_veb)
897 return;
898
899
900
901 for (i = 0; i < I40E_MAX_VF; i++)
902 vf_floating_veb[i] = true;
903
904 if (devargs == NULL)
905 return;
906
907 kvlist = rte_kvargs_parse(devargs->args, valid_keys);
908 if (kvlist == NULL)
909 return;
910
911 if (!rte_kvargs_count(kvlist, floating_veb_list)) {
912 rte_kvargs_free(kvlist);
913 return;
914 }
915
916
917
918
919 if (rte_kvargs_process(kvlist, floating_veb_list,
920 floating_veb_list_handler,
921 vf_floating_veb) < 0) {
922 rte_kvargs_free(kvlist);
923 return;
924 }
925 rte_kvargs_free(kvlist);
926}
927
928static int
929i40e_check_floating_handler(__rte_unused const char *key,
930 const char *value,
931 __rte_unused void *opaque)
932{
933 if (strcmp(value, "1"))
934 return -1;
935
936 return 0;
937}
938
939static int
940is_floating_veb_supported(struct rte_devargs *devargs)
941{
942 struct rte_kvargs *kvlist;
943 const char *floating_veb_key = ETH_I40E_FLOATING_VEB_ARG;
944
945 if (devargs == NULL)
946 return 0;
947
948 kvlist = rte_kvargs_parse(devargs->args, valid_keys);
949 if (kvlist == NULL)
950 return 0;
951
952 if (!rte_kvargs_count(kvlist, floating_veb_key)) {
953 rte_kvargs_free(kvlist);
954 return 0;
955 }
956
957
958
959 if (rte_kvargs_process(kvlist, floating_veb_key,
960 i40e_check_floating_handler, NULL) < 0) {
961 rte_kvargs_free(kvlist);
962 return 0;
963 }
964 rte_kvargs_free(kvlist);
965
966 return 1;
967}
968
969static void
970config_floating_veb(struct rte_eth_dev *dev)
971{
972 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
973 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
974 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
975
976 memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list));
977
978 if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) {
979 pf->floating_veb =
980 is_floating_veb_supported(pci_dev->device.devargs);
981 config_vf_floating_veb(pci_dev->device.devargs,
982 pf->floating_veb,
983 pf->floating_veb_list);
984 } else {
985 pf->floating_veb = false;
986 }
987}
988
989#define I40E_L2_TAGS_S_TAG_SHIFT 1
990#define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
991
992static int
993i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
994{
995 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
996 struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
997 char ethertype_hash_name[RTE_HASH_NAMESIZE];
998 int ret;
999
1000 struct rte_hash_parameters ethertype_hash_params = {
1001 .name = ethertype_hash_name,
1002 .entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
1003 .key_len = sizeof(struct i40e_ethertype_filter_input),
1004 .hash_func = rte_hash_crc,
1005 .hash_func_init_val = 0,
1006 .socket_id = rte_socket_id(),
1007 };
1008
1009
1010 TAILQ_INIT(ðertype_rule->ethertype_list);
1011 snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
1012 "ethertype_%s", dev->device->name);
1013 ethertype_rule->hash_table = rte_hash_create(ðertype_hash_params);
1014 if (!ethertype_rule->hash_table) {
1015 PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
1016 return -EINVAL;
1017 }
1018 ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
1019 sizeof(struct i40e_ethertype_filter *) *
1020 I40E_MAX_ETHERTYPE_FILTER_NUM,
1021 0);
1022 if (!ethertype_rule->hash_map) {
1023 PMD_INIT_LOG(ERR,
1024 "Failed to allocate memory for ethertype hash map!");
1025 ret = -ENOMEM;
1026 goto err_ethertype_hash_map_alloc;
1027 }
1028
1029 return 0;
1030
1031err_ethertype_hash_map_alloc:
1032 rte_hash_free(ethertype_rule->hash_table);
1033
1034 return ret;
1035}
1036
1037static int
1038i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
1039{
1040 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1041 struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
1042 char tunnel_hash_name[RTE_HASH_NAMESIZE];
1043 int ret;
1044
1045 struct rte_hash_parameters tunnel_hash_params = {
1046 .name = tunnel_hash_name,
1047 .entries = I40E_MAX_TUNNEL_FILTER_NUM,
1048 .key_len = sizeof(struct i40e_tunnel_filter_input),
1049 .hash_func = rte_hash_crc,
1050 .hash_func_init_val = 0,
1051 .socket_id = rte_socket_id(),
1052 };
1053
1054
1055 TAILQ_INIT(&tunnel_rule->tunnel_list);
1056 snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
1057 "tunnel_%s", dev->device->name);
1058 tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
1059 if (!tunnel_rule->hash_table) {
1060 PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
1061 return -EINVAL;
1062 }
1063 tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
1064 sizeof(struct i40e_tunnel_filter *) *
1065 I40E_MAX_TUNNEL_FILTER_NUM,
1066 0);
1067 if (!tunnel_rule->hash_map) {
1068 PMD_INIT_LOG(ERR,
1069 "Failed to allocate memory for tunnel hash map!");
1070 ret = -ENOMEM;
1071 goto err_tunnel_hash_map_alloc;
1072 }
1073
1074 return 0;
1075
1076err_tunnel_hash_map_alloc:
1077 rte_hash_free(tunnel_rule->hash_table);
1078
1079 return ret;
1080}
1081
1082static int
1083i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
1084{
1085 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1086 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1087 struct i40e_fdir_info *fdir_info = &pf->fdir;
1088 char fdir_hash_name[RTE_HASH_NAMESIZE];
1089 uint32_t alloc = hw->func_caps.fd_filters_guaranteed;
1090 uint32_t best = hw->func_caps.fd_filters_best_effort;
1091 enum i40e_filter_pctype pctype;
1092 struct rte_bitmap *bmp = NULL;
1093 uint32_t bmp_size;
1094 void *mem = NULL;
1095 uint32_t i = 0;
1096 int ret;
1097
1098 struct rte_hash_parameters fdir_hash_params = {
1099 .name = fdir_hash_name,
1100 .entries = I40E_MAX_FDIR_FILTER_NUM,
1101 .key_len = sizeof(struct i40e_fdir_input),
1102 .hash_func = rte_hash_crc,
1103 .hash_func_init_val = 0,
1104 .socket_id = rte_socket_id(),
1105 };
1106
1107
1108 TAILQ_INIT(&fdir_info->fdir_list);
1109 snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
1110 "fdir_%s", dev->device->name);
1111 fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
1112 if (!fdir_info->hash_table) {
1113 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
1114 return -EINVAL;
1115 }
1116
1117 fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
1118 sizeof(struct i40e_fdir_filter *) *
1119 I40E_MAX_FDIR_FILTER_NUM,
1120 0);
1121 if (!fdir_info->hash_map) {
1122 PMD_INIT_LOG(ERR,
1123 "Failed to allocate memory for fdir hash map!");
1124 ret = -ENOMEM;
1125 goto err_fdir_hash_map_alloc;
1126 }
1127
1128 fdir_info->fdir_filter_array = rte_zmalloc("fdir_filter",
1129 sizeof(struct i40e_fdir_filter) *
1130 I40E_MAX_FDIR_FILTER_NUM,
1131 0);
1132
1133 if (!fdir_info->fdir_filter_array) {
1134 PMD_INIT_LOG(ERR,
1135 "Failed to allocate memory for fdir filter array!");
1136 ret = -ENOMEM;
1137 goto err_fdir_filter_array_alloc;
1138 }
1139
1140 for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
1141 pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++)
1142 pf->fdir.flow_count[pctype] = 0;
1143
1144 fdir_info->fdir_space_size = alloc + best;
1145 fdir_info->fdir_actual_cnt = 0;
1146 fdir_info->fdir_guarantee_total_space = alloc;
1147 fdir_info->fdir_guarantee_free_space =
1148 fdir_info->fdir_guarantee_total_space;
1149
1150 PMD_DRV_LOG(INFO, "FDIR guarantee space: %u, best_effort space %u.", alloc, best);
1151
1152 fdir_info->fdir_flow_pool.pool =
1153 rte_zmalloc("i40e_fdir_entry",
1154 sizeof(struct i40e_fdir_entry) *
1155 fdir_info->fdir_space_size,
1156 0);
1157
1158 if (!fdir_info->fdir_flow_pool.pool) {
1159 PMD_INIT_LOG(ERR,
1160 "Failed to allocate memory for bitmap flow!");
1161 ret = -ENOMEM;
1162 goto err_fdir_bitmap_flow_alloc;
1163 }
1164
1165 for (i = 0; i < fdir_info->fdir_space_size; i++)
1166 fdir_info->fdir_flow_pool.pool[i].idx = i;
1167
1168 bmp_size =
1169 rte_bitmap_get_memory_footprint(fdir_info->fdir_space_size);
1170 mem = rte_zmalloc("fdir_bmap", bmp_size, RTE_CACHE_LINE_SIZE);
1171 if (mem == NULL) {
1172 PMD_INIT_LOG(ERR,
1173 "Failed to allocate memory for fdir bitmap!");
1174 ret = -ENOMEM;
1175 goto err_fdir_mem_alloc;
1176 }
1177 bmp = rte_bitmap_init(fdir_info->fdir_space_size, mem, bmp_size);
1178 if (bmp == NULL) {
1179 PMD_INIT_LOG(ERR,
1180 "Failed to initialization fdir bitmap!");
1181 ret = -ENOMEM;
1182 goto err_fdir_bmp_alloc;
1183 }
1184 for (i = 0; i < fdir_info->fdir_space_size; i++)
1185 rte_bitmap_set(bmp, i);
1186
1187 fdir_info->fdir_flow_pool.bitmap = bmp;
1188
1189 return 0;
1190
1191err_fdir_bmp_alloc:
1192 rte_free(mem);
1193err_fdir_mem_alloc:
1194 rte_free(fdir_info->fdir_flow_pool.pool);
1195err_fdir_bitmap_flow_alloc:
1196 rte_free(fdir_info->fdir_filter_array);
1197err_fdir_filter_array_alloc:
1198 rte_free(fdir_info->hash_map);
1199err_fdir_hash_map_alloc:
1200 rte_hash_free(fdir_info->hash_table);
1201
1202 return ret;
1203}
1204
1205static void
1206i40e_init_customized_info(struct i40e_pf *pf)
1207{
1208 int i;
1209
1210
1211 for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) {
1212 pf->customized_pctype[i].index = i;
1213 pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID;
1214 pf->customized_pctype[i].valid = false;
1215 }
1216
1217 pf->gtp_support = false;
1218 pf->esp_support = false;
1219}
1220
1221static void
1222i40e_init_filter_invalidation(struct i40e_pf *pf)
1223{
1224 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1225 struct i40e_fdir_info *fdir_info = &pf->fdir;
1226 uint32_t glqf_ctl_reg = 0;
1227
1228 glqf_ctl_reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
1229 if (!pf->support_multi_driver) {
1230 fdir_info->fdir_invalprio = 1;
1231 glqf_ctl_reg |= I40E_GLQF_CTL_INVALPRIO_MASK;
1232 PMD_DRV_LOG(INFO, "FDIR INVALPRIO set to guaranteed first");
1233 i40e_write_rx_ctl(hw, I40E_GLQF_CTL, glqf_ctl_reg);
1234 } else {
1235 if (glqf_ctl_reg & I40E_GLQF_CTL_INVALPRIO_MASK) {
1236 fdir_info->fdir_invalprio = 1;
1237 PMD_DRV_LOG(INFO, "FDIR INVALPRIO is: guaranteed first");
1238 } else {
1239 fdir_info->fdir_invalprio = 0;
1240 PMD_DRV_LOG(INFO, "FDIR INVALPRIO is: shared first");
1241 }
1242 }
1243}
1244
1245void
1246i40e_init_queue_region_conf(struct rte_eth_dev *dev)
1247{
1248 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1249 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1250 struct i40e_queue_regions *info = &pf->queue_region;
1251 uint16_t i;
1252
1253 for (i = 0; i < I40E_PFQF_HREGION_MAX_INDEX; i++)
1254 i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(i), 0);
1255
1256 memset(info, 0, sizeof(struct i40e_queue_regions));
1257}
1258
1259static int
1260i40e_parse_multi_drv_handler(__rte_unused const char *key,
1261 const char *value,
1262 void *opaque)
1263{
1264 struct i40e_pf *pf;
1265 unsigned long support_multi_driver;
1266 char *end;
1267
1268 pf = (struct i40e_pf *)opaque;
1269
1270 errno = 0;
1271 support_multi_driver = strtoul(value, &end, 10);
1272 if (errno != 0 || end == value || *end != 0) {
1273 PMD_DRV_LOG(WARNING, "Wrong global configuration");
1274 return -(EINVAL);
1275 }
1276
1277 if (support_multi_driver == 1 || support_multi_driver == 0)
1278 pf->support_multi_driver = (bool)support_multi_driver;
1279 else
1280 PMD_DRV_LOG(WARNING, "%s must be 1 or 0,",
1281 "enable global configuration by default."
1282 ETH_I40E_SUPPORT_MULTI_DRIVER);
1283 return 0;
1284}
1285
1286static int
1287i40e_support_multi_driver(struct rte_eth_dev *dev)
1288{
1289 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1290 struct rte_kvargs *kvlist;
1291 int kvargs_count;
1292
1293
1294 pf->support_multi_driver = false;
1295
1296 if (!dev->device->devargs)
1297 return 0;
1298
1299 kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1300 if (!kvlist)
1301 return -EINVAL;
1302
1303 kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER);
1304 if (!kvargs_count) {
1305 rte_kvargs_free(kvlist);
1306 return 0;
1307 }
1308
1309 if (kvargs_count > 1)
1310 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
1311 "the first invalid or last valid one is used !",
1312 ETH_I40E_SUPPORT_MULTI_DRIVER);
1313
1314 if (rte_kvargs_process(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER,
1315 i40e_parse_multi_drv_handler, pf) < 0) {
1316 rte_kvargs_free(kvlist);
1317 return -EINVAL;
1318 }
1319
1320 rte_kvargs_free(kvlist);
1321 return 0;
1322}
1323
1324static int
1325i40e_aq_debug_write_global_register(struct i40e_hw *hw,
1326 uint32_t reg_addr, uint64_t reg_val,
1327 struct i40e_asq_cmd_details *cmd_details)
1328{
1329 uint64_t ori_reg_val;
1330 struct rte_eth_dev_data *dev_data =
1331 ((struct i40e_adapter *)hw->back)->pf.dev_data;
1332 struct rte_eth_dev *dev = &rte_eth_devices[dev_data->port_id];
1333 int ret;
1334
1335 ret = i40e_aq_debug_read_register(hw, reg_addr, &ori_reg_val, NULL);
1336 if (ret != I40E_SUCCESS) {
1337 PMD_DRV_LOG(ERR,
1338 "Fail to debug read from 0x%08x",
1339 reg_addr);
1340 return -EIO;
1341 }
1342
1343 if (ori_reg_val != reg_val)
1344 PMD_DRV_LOG(WARNING,
1345 "i40e device %s changed global register [0x%08x]."
1346 " original: 0x%"PRIx64", after: 0x%"PRIx64,
1347 dev->device->name, reg_addr, ori_reg_val, reg_val);
1348
1349 return i40e_aq_debug_write_register(hw, reg_addr, reg_val, cmd_details);
1350}
1351
1352static int
1353read_vf_msg_config(__rte_unused const char *key,
1354 const char *value,
1355 void *opaque)
1356{
1357 struct i40e_vf_msg_cfg *cfg = opaque;
1358
1359 if (sscanf(value, "%u@%u:%u", &cfg->max_msg, &cfg->period,
1360 &cfg->ignore_second) != 3) {
1361 memset(cfg, 0, sizeof(*cfg));
1362 PMD_DRV_LOG(ERR, "format error! example: "
1363 "%s=60@120:180", ETH_I40E_VF_MSG_CFG);
1364 return -EINVAL;
1365 }
1366
1367
1368
1369
1370
1371 if (cfg->max_msg && (!cfg->period || !cfg->ignore_second)) {
1372 memset(cfg, 0, sizeof(*cfg));
1373 PMD_DRV_LOG(ERR, "%s error! the second and third"
1374 " number must be greater than 0!",
1375 ETH_I40E_VF_MSG_CFG);
1376 return -EINVAL;
1377 }
1378
1379 return 0;
1380}
1381
1382static int
1383i40e_parse_vf_msg_config(struct rte_eth_dev *dev,
1384 struct i40e_vf_msg_cfg *msg_cfg)
1385{
1386 struct rte_kvargs *kvlist;
1387 int kvargs_count;
1388 int ret = 0;
1389
1390 memset(msg_cfg, 0, sizeof(*msg_cfg));
1391
1392 if (!dev->device->devargs)
1393 return ret;
1394
1395 kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1396 if (!kvlist)
1397 return -EINVAL;
1398
1399 kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_VF_MSG_CFG);
1400 if (!kvargs_count)
1401 goto free_end;
1402
1403 if (kvargs_count > 1) {
1404 PMD_DRV_LOG(ERR, "More than one argument \"%s\"!",
1405 ETH_I40E_VF_MSG_CFG);
1406 ret = -EINVAL;
1407 goto free_end;
1408 }
1409
1410 if (rte_kvargs_process(kvlist, ETH_I40E_VF_MSG_CFG,
1411 read_vf_msg_config, msg_cfg) < 0)
1412 ret = -EINVAL;
1413
1414free_end:
1415 rte_kvargs_free(kvlist);
1416 return ret;
1417}
1418
1419#define I40E_ALARM_INTERVAL 50000
1420
1421static int
1422eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
1423{
1424 struct rte_pci_device *pci_dev;
1425 struct rte_intr_handle *intr_handle;
1426 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1427 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1428 struct i40e_vsi *vsi;
1429 int ret;
1430 uint32_t len, val;
1431 uint8_t aq_fail = 0;
1432
1433 PMD_INIT_FUNC_TRACE();
1434
1435 dev->dev_ops = &i40e_eth_dev_ops;
1436 dev->rx_queue_count = i40e_dev_rx_queue_count;
1437 dev->rx_descriptor_done = i40e_dev_rx_descriptor_done;
1438 dev->rx_descriptor_status = i40e_dev_rx_descriptor_status;
1439 dev->tx_descriptor_status = i40e_dev_tx_descriptor_status;
1440 dev->rx_pkt_burst = i40e_recv_pkts;
1441 dev->tx_pkt_burst = i40e_xmit_pkts;
1442 dev->tx_pkt_prepare = i40e_prep_pkts;
1443
1444
1445
1446
1447 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1448 i40e_set_rx_function(dev);
1449 i40e_set_tx_function(dev);
1450 return 0;
1451 }
1452 i40e_set_default_ptype_table(dev);
1453 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1454 intr_handle = &pci_dev->intr_handle;
1455
1456 rte_eth_copy_pci_info(dev, pci_dev);
1457 dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1458
1459 pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1460 pf->dev_data = dev->data;
1461
1462 hw->back = I40E_PF_TO_ADAPTER(pf);
1463 hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
1464 if (!hw->hw_addr) {
1465 PMD_INIT_LOG(ERR,
1466 "Hardware is not available, as address is NULL");
1467 return -ENODEV;
1468 }
1469
1470 hw->vendor_id = pci_dev->id.vendor_id;
1471 hw->device_id = pci_dev->id.device_id;
1472 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1473 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1474 hw->bus.device = pci_dev->addr.devid;
1475 hw->bus.func = pci_dev->addr.function;
1476 hw->adapter_stopped = 0;
1477 hw->adapter_closed = 0;
1478
1479
1480 hw->switch_dev = NULL;
1481
1482
1483
1484
1485
1486
1487 hw->switch_tag = 0xffff;
1488
1489 val = I40E_READ_REG(hw, I40E_GL_FWSTS);
1490 if (val & I40E_GL_FWSTS_FWS1B_MASK) {
1491 PMD_INIT_LOG(ERR, "\nERROR: "
1492 "Firmware recovery mode detected. Limiting functionality.\n"
1493 "Refer to the Intel(R) Ethernet Adapters and Devices "
1494 "User Guide for details on firmware recovery mode.");
1495 return -EIO;
1496 }
1497
1498 i40e_parse_vf_msg_config(dev, &pf->vf_msg_cfg);
1499
1500 i40e_support_multi_driver(dev);
1501
1502
1503 i40e_clear_hw(hw);
1504
1505
1506 ret = i40e_pf_reset(hw);
1507 if (ret) {
1508 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
1509 return ret;
1510 }
1511
1512
1513 ret = i40e_init_shared_code(hw);
1514 if (ret) {
1515 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
1516 return ret;
1517 }
1518
1519
1520 i40e_init_adminq_parameter(hw);
1521 ret = i40e_init_adminq(hw);
1522 if (ret != I40E_SUCCESS) {
1523 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
1524 return -EIO;
1525 }
1526
1527 if (hw->device_id == I40E_DEV_ID_SFP_X722 ||
1528 hw->device_id == I40E_DEV_ID_SFP_I_X722)
1529 hw->flags &= ~I40E_HW_FLAG_802_1AD_CAPABLE;
1530
1531 PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
1532 hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
1533 hw->aq.api_maj_ver, hw->aq.api_min_ver,
1534 ((hw->nvm.version >> 12) & 0xf),
1535 ((hw->nvm.version >> 4) & 0xff),
1536 (hw->nvm.version & 0xf), hw->nvm.eetrack);
1537
1538
1539 i40e_hw_init(dev);
1540
1541 i40e_config_automask(pf);
1542
1543 i40e_set_default_pctype_table(dev);
1544
1545
1546
1547
1548
1549
1550 if (!pf->support_multi_driver)
1551 i40e_GLQF_reg_init(hw);
1552
1553
1554 i40e_filter_input_set_init(pf);
1555
1556
1557 if (!pf->support_multi_driver) {
1558 ret = i40e_aq_debug_write_global_register(hw,
1559 I40E_GLQF_L3_MAP(40),
1560 0x00000028, NULL);
1561 if (ret)
1562 PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d",
1563 ret);
1564 PMD_INIT_LOG(DEBUG,
1565 "Global register 0x%08x is changed with 0x28",
1566 I40E_GLQF_L3_MAP(40));
1567 }
1568
1569
1570 config_floating_veb(dev);
1571
1572 i40e_clear_pxe_mode(hw);
1573 i40e_dev_sync_phy_type(hw);
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583 i40e_configure_registers(hw);
1584
1585
1586 ret = i40e_get_cap(hw);
1587 if (ret != I40E_SUCCESS) {
1588 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
1589 goto err_get_capabilities;
1590 }
1591
1592
1593 ret = i40e_pf_parameter_init(dev);
1594 if (ret != 0) {
1595 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
1596 goto err_parameter_init;
1597 }
1598
1599
1600 ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
1601 if (ret < 0) {
1602 PMD_INIT_LOG(ERR, "Failed to init queue pool");
1603 goto err_qp_pool_init;
1604 }
1605 ret = i40e_res_pool_init(&pf->msix_pool, 1,
1606 hw->func_caps.num_msix_vectors - 1);
1607 if (ret < 0) {
1608 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1609 goto err_msix_pool_init;
1610 }
1611
1612
1613 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1614 hw->func_caps.num_rx_qp, 0, 0);
1615 if (ret != I40E_SUCCESS) {
1616 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
1617 goto err_init_lan_hmc;
1618 }
1619
1620
1621 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1622 if (ret != I40E_SUCCESS) {
1623 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
1624 goto err_configure_lan_hmc;
1625 }
1626
1627
1628 i40e_get_mac_addr(hw, hw->mac.addr);
1629 if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
1630 PMD_INIT_LOG(ERR, "mac address is not valid");
1631 ret = -EIO;
1632 goto err_get_mac_addr;
1633 }
1634
1635 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
1636 (struct rte_ether_addr *)hw->mac.perm_addr);
1637
1638
1639 hw->fc.requested_mode = I40E_FC_NONE;
1640 i40e_set_fc(hw, &aq_fail, TRUE);
1641
1642
1643 if (!pf->support_multi_driver) {
1644 ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
1645 RTE_ETHER_TYPE_VLAN);
1646 if (ret != I40E_SUCCESS) {
1647 PMD_INIT_LOG(ERR,
1648 "Failed to set the default outer "
1649 "VLAN ether type");
1650 goto err_setup_pf_switch;
1651 }
1652 }
1653
1654
1655 ret = i40e_pf_setup(pf);
1656 if (ret) {
1657 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
1658 goto err_setup_pf_switch;
1659 }
1660
1661 vsi = pf->main_vsi;
1662
1663
1664 i40e_vsi_config_double_vlan(vsi, FALSE);
1665
1666
1667 if (!pf->floating_veb) {
1668 ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
1669 if (ret & I40E_L2_TAGS_S_TAG_MASK) {
1670 ret &= ~I40E_L2_TAGS_S_TAG_MASK;
1671 I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
1672 }
1673 }
1674
1675 if (!vsi->max_macaddrs)
1676 len = RTE_ETHER_ADDR_LEN;
1677 else
1678 len = RTE_ETHER_ADDR_LEN * vsi->max_macaddrs;
1679
1680
1681 dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
1682 if (!dev->data->mac_addrs) {
1683 PMD_INIT_LOG(ERR,
1684 "Failed to allocated memory for storing mac address");
1685 goto err_mac_alloc;
1686 }
1687 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
1688 &dev->data->mac_addrs[0]);
1689
1690
1691 ret = i40e_dcb_init_configure(dev, TRUE);
1692 if (ret != I40E_SUCCESS) {
1693 PMD_INIT_LOG(INFO, "Failed to init dcb.");
1694 pf->flags &= ~I40E_FLAG_DCB;
1695 }
1696
1697 i40e_get_cap(hw);
1698
1699
1700 i40e_pf_host_init(dev);
1701
1702
1703 rte_intr_callback_register(intr_handle,
1704 i40e_dev_interrupt_handler, dev);
1705
1706
1707 i40e_pf_config_irq0(hw, TRUE);
1708 i40e_pf_enable_irq0(hw);
1709
1710
1711 rte_intr_enable(intr_handle);
1712
1713
1714 if (!pf->support_multi_driver)
1715 i40e_flex_payload_reg_set_default(hw);
1716
1717
1718
1719
1720
1721
1722 i40e_add_tx_flow_control_drop_filter(pf);
1723
1724
1725
1726
1727 i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, false, 0, NULL);
1728
1729
1730 TAILQ_INIT(&pf->mirror_list);
1731
1732
1733 TAILQ_INIT(&pf->rss_config_list);
1734
1735
1736 i40e_tm_conf_init(dev);
1737
1738
1739 i40e_init_customized_info(pf);
1740
1741
1742 i40e_init_filter_invalidation(pf);
1743
1744 ret = i40e_init_ethtype_filter_list(dev);
1745 if (ret < 0)
1746 goto err_init_ethtype_filter_list;
1747 ret = i40e_init_tunnel_filter_list(dev);
1748 if (ret < 0)
1749 goto err_init_tunnel_filter_list;
1750 ret = i40e_init_fdir_filter_list(dev);
1751 if (ret < 0)
1752 goto err_init_fdir_filter_list;
1753
1754
1755 i40e_init_queue_region_conf(dev);
1756
1757
1758 i40e_dev_stats_reset(dev);
1759
1760 return 0;
1761
1762err_init_fdir_filter_list:
1763 rte_free(pf->tunnel.hash_table);
1764 rte_free(pf->tunnel.hash_map);
1765err_init_tunnel_filter_list:
1766 rte_free(pf->ethertype.hash_table);
1767 rte_free(pf->ethertype.hash_map);
1768err_init_ethtype_filter_list:
1769 rte_free(dev->data->mac_addrs);
1770 dev->data->mac_addrs = NULL;
1771err_mac_alloc:
1772 i40e_vsi_release(pf->main_vsi);
1773err_setup_pf_switch:
1774err_get_mac_addr:
1775err_configure_lan_hmc:
1776 (void)i40e_shutdown_lan_hmc(hw);
1777err_init_lan_hmc:
1778 i40e_res_pool_destroy(&pf->msix_pool);
1779err_msix_pool_init:
1780 i40e_res_pool_destroy(&pf->qp_pool);
1781err_qp_pool_init:
1782err_parameter_init:
1783err_get_capabilities:
1784 (void)i40e_shutdown_adminq(hw);
1785
1786 return ret;
1787}
1788
1789static void
1790i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
1791{
1792 struct i40e_ethertype_filter *p_ethertype;
1793 struct i40e_ethertype_rule *ethertype_rule;
1794
1795 ethertype_rule = &pf->ethertype;
1796
1797 if (ethertype_rule->hash_map)
1798 rte_free(ethertype_rule->hash_map);
1799 if (ethertype_rule->hash_table)
1800 rte_hash_free(ethertype_rule->hash_table);
1801
1802 while ((p_ethertype = TAILQ_FIRST(ðertype_rule->ethertype_list))) {
1803 TAILQ_REMOVE(ðertype_rule->ethertype_list,
1804 p_ethertype, rules);
1805 rte_free(p_ethertype);
1806 }
1807}
1808
1809static void
1810i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
1811{
1812 struct i40e_tunnel_filter *p_tunnel;
1813 struct i40e_tunnel_rule *tunnel_rule;
1814
1815 tunnel_rule = &pf->tunnel;
1816
1817 if (tunnel_rule->hash_map)
1818 rte_free(tunnel_rule->hash_map);
1819 if (tunnel_rule->hash_table)
1820 rte_hash_free(tunnel_rule->hash_table);
1821
1822 while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
1823 TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
1824 rte_free(p_tunnel);
1825 }
1826}
1827
1828static void
1829i40e_rm_fdir_filter_list(struct i40e_pf *pf)
1830{
1831 struct i40e_fdir_filter *p_fdir;
1832 struct i40e_fdir_info *fdir_info;
1833
1834 fdir_info = &pf->fdir;
1835
1836
1837 while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list)))
1838 TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
1839}
1840
1841static void
1842i40e_fdir_memory_cleanup(struct i40e_pf *pf)
1843{
1844 struct i40e_fdir_info *fdir_info;
1845
1846 fdir_info = &pf->fdir;
1847
1848
1849 if (fdir_info->hash_map)
1850 rte_free(fdir_info->hash_map);
1851 if (fdir_info->hash_table)
1852 rte_hash_free(fdir_info->hash_table);
1853 if (fdir_info->fdir_flow_pool.bitmap)
1854 rte_free(fdir_info->fdir_flow_pool.bitmap);
1855 if (fdir_info->fdir_flow_pool.pool)
1856 rte_free(fdir_info->fdir_flow_pool.pool);
1857 if (fdir_info->fdir_filter_array)
1858 rte_free(fdir_info->fdir_filter_array);
1859}
1860
1861void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
1862{
1863
1864
1865
1866
1867 I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33), 0x00000000);
1868 I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(34), 0x00000000);
1869 I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(35), 0x00000000);
1870}
1871
1872static int
1873eth_i40e_dev_uninit(struct rte_eth_dev *dev)
1874{
1875 struct i40e_hw *hw;
1876
1877 PMD_INIT_FUNC_TRACE();
1878
1879 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1880 return 0;
1881
1882 hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1883
1884 if (hw->adapter_closed == 0)
1885 i40e_dev_close(dev);
1886
1887 return 0;
1888}
1889
1890static int
1891i40e_dev_configure(struct rte_eth_dev *dev)
1892{
1893 struct i40e_adapter *ad =
1894 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1895 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1896 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1897 enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1898 int i, ret;
1899
1900 ret = i40e_dev_sync_phy_type(hw);
1901 if (ret)
1902 return ret;
1903
1904
1905
1906
1907 ad->rx_bulk_alloc_allowed = true;
1908 ad->rx_vec_allowed = true;
1909 ad->tx_simple_allowed = true;
1910 ad->tx_vec_allowed = true;
1911
1912 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
1913 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1914
1915
1916
1917
1918
1919 if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
1920 ret = i40e_fdir_setup(pf);
1921 if (ret != I40E_SUCCESS) {
1922 PMD_DRV_LOG(ERR, "Failed to setup flow director.");
1923 return -ENOTSUP;
1924 }
1925 ret = i40e_fdir_configure(dev);
1926 if (ret < 0) {
1927 PMD_DRV_LOG(ERR, "failed to configure fdir.");
1928 goto err;
1929 }
1930 } else
1931 i40e_fdir_teardown(pf);
1932
1933 ret = i40e_dev_init_vlan(dev);
1934 if (ret < 0)
1935 goto err;
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947 if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
1948 ret = i40e_vmdq_setup(dev);
1949 if (ret)
1950 goto err;
1951 }
1952
1953 if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
1954 ret = i40e_dcb_setup(dev);
1955 if (ret) {
1956 PMD_DRV_LOG(ERR, "failed to configure DCB.");
1957 goto err_dcb;
1958 }
1959 }
1960
1961 TAILQ_INIT(&pf->flow_list);
1962
1963 return 0;
1964
1965err_dcb:
1966
1967 for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1968 i40e_vsi_release(pf->vmdq[i].vsi);
1969 pf->vmdq[i].vsi = NULL;
1970 }
1971 rte_free(pf->vmdq);
1972 pf->vmdq = NULL;
1973err:
1974
1975
1976
1977
1978
1979 i40e_fdir_teardown(pf);
1980 return ret;
1981}
1982
1983void
1984i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
1985{
1986 struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
1987 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1988 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1989 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1990 uint16_t msix_vect = vsi->msix_intr;
1991 uint16_t i;
1992
1993 for (i = 0; i < vsi->nb_qps; i++) {
1994 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1995 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1996 rte_wmb();
1997 }
1998
1999 if (vsi->type != I40E_VSI_SRIOV) {
2000 if (!rte_intr_allow_others(intr_handle)) {
2001 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
2002 I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
2003 I40E_WRITE_REG(hw,
2004 I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
2005 0);
2006 } else {
2007 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
2008 I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
2009 I40E_WRITE_REG(hw,
2010 I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
2011 msix_vect - 1), 0);
2012 }
2013 } else {
2014 uint32_t reg;
2015 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
2016 vsi->user_param + (msix_vect - 1);
2017
2018 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
2019 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
2020 }
2021 I40E_WRITE_FLUSH(hw);
2022}
2023
2024static void
2025__vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
2026 int base_queue, int nb_queue,
2027 uint16_t itr_idx)
2028{
2029 int i;
2030 uint32_t val;
2031 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2032 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2033
2034
2035 for (i = 0; i < nb_queue; i++) {
2036 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2037 itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT |
2038 ((base_queue + i + 1) <<
2039 I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2040 (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
2041 I40E_QINT_RQCTL_CAUSE_ENA_MASK;
2042
2043 if (i == nb_queue - 1)
2044 val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
2045 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
2046 }
2047
2048
2049 if (vsi->type != I40E_VSI_SRIOV) {
2050 uint16_t interval =
2051 i40e_calc_itr_interval(1, pf->support_multi_driver);
2052
2053 if (msix_vect == I40E_MISC_VEC_ID) {
2054 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
2055 (base_queue <<
2056 I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
2057 (0x0 <<
2058 I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
2059 I40E_WRITE_REG(hw,
2060 I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
2061 interval);
2062 } else {
2063 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
2064 (base_queue <<
2065 I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
2066 (0x0 <<
2067 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2068 I40E_WRITE_REG(hw,
2069 I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
2070 msix_vect - 1),
2071 interval);
2072 }
2073 } else {
2074 uint32_t reg;
2075
2076 if (msix_vect == I40E_MISC_VEC_ID) {
2077 I40E_WRITE_REG(hw,
2078 I40E_VPINT_LNKLST0(vsi->user_param),
2079 (base_queue <<
2080 I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
2081 (0x0 <<
2082 I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
2083 } else {
2084
2085 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
2086 vsi->user_param + (msix_vect - 1);
2087
2088 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
2089 (base_queue <<
2090 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
2091 (0x0 <<
2092 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2093 }
2094 }
2095
2096 I40E_WRITE_FLUSH(hw);
2097}
2098
2099int
2100i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
2101{
2102 struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
2103 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2104 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2105 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2106 uint16_t msix_vect = vsi->msix_intr;
2107 uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
2108 uint16_t queue_idx = 0;
2109 int record = 0;
2110 int i;
2111
2112 for (i = 0; i < vsi->nb_qps; i++) {
2113 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
2114 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
2115 }
2116
2117
2118 if (vsi->type == I40E_VSI_SRIOV) {
2119 if (vsi->nb_msix == 0) {
2120 PMD_DRV_LOG(ERR, "No msix resource");
2121 return -EINVAL;
2122 }
2123 __vsi_queues_bind_intr(vsi, msix_vect,
2124 vsi->base_queue, vsi->nb_qps,
2125 itr_idx);
2126 return 0;
2127 }
2128
2129
2130 if (rte_intr_dp_is_en(intr_handle)) {
2131 if (vsi->type == I40E_VSI_MAIN) {
2132 queue_idx = 0;
2133 record = 1;
2134 } else if (vsi->type == I40E_VSI_VMDQ2) {
2135 struct i40e_vsi *main_vsi =
2136 I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
2137 queue_idx = vsi->base_queue - main_vsi->nb_qps;
2138 record = 1;
2139 }
2140 }
2141
2142 for (i = 0; i < vsi->nb_used_qps; i++) {
2143 if (vsi->nb_msix == 0) {
2144 PMD_DRV_LOG(ERR, "No msix resource");
2145 return -EINVAL;
2146 } else if (nb_msix <= 1) {
2147 if (!rte_intr_allow_others(intr_handle))
2148
2149 msix_vect = I40E_MISC_VEC_ID;
2150
2151
2152 __vsi_queues_bind_intr(vsi, msix_vect,
2153 vsi->base_queue + i,
2154 vsi->nb_used_qps - i,
2155 itr_idx);
2156 for (; !!record && i < vsi->nb_used_qps; i++)
2157 intr_handle->intr_vec[queue_idx + i] =
2158 msix_vect;
2159 break;
2160 }
2161
2162 __vsi_queues_bind_intr(vsi, msix_vect,
2163 vsi->base_queue + i, 1,
2164 itr_idx);
2165 if (!!record)
2166 intr_handle->intr_vec[queue_idx + i] = msix_vect;
2167
2168 msix_vect++;
2169 nb_msix--;
2170 }
2171
2172 return 0;
2173}
2174
2175void
2176i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
2177{
2178 struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
2179 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2180 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2181 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2182 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2183 uint16_t msix_intr, i;
2184
2185 if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
2186 for (i = 0; i < vsi->nb_msix; i++) {
2187 msix_intr = vsi->msix_intr + i;
2188 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
2189 I40E_PFINT_DYN_CTLN_INTENA_MASK |
2190 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2191 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
2192 }
2193 else
2194 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
2195 I40E_PFINT_DYN_CTL0_INTENA_MASK |
2196 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2197 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
2198
2199 I40E_WRITE_FLUSH(hw);
2200}
2201
2202void
2203i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
2204{
2205 struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
2206 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2207 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2208 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2209 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2210 uint16_t msix_intr, i;
2211
2212 if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
2213 for (i = 0; i < vsi->nb_msix; i++) {
2214 msix_intr = vsi->msix_intr + i;
2215 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
2216 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
2217 }
2218 else
2219 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
2220 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
2221
2222 I40E_WRITE_FLUSH(hw);
2223}
2224
2225static inline uint8_t
2226i40e_parse_link_speeds(uint16_t link_speeds)
2227{
2228 uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
2229
2230 if (link_speeds & ETH_LINK_SPEED_40G)
2231 link_speed |= I40E_LINK_SPEED_40GB;
2232 if (link_speeds & ETH_LINK_SPEED_25G)
2233 link_speed |= I40E_LINK_SPEED_25GB;
2234 if (link_speeds & ETH_LINK_SPEED_20G)
2235 link_speed |= I40E_LINK_SPEED_20GB;
2236 if (link_speeds & ETH_LINK_SPEED_10G)
2237 link_speed |= I40E_LINK_SPEED_10GB;
2238 if (link_speeds & ETH_LINK_SPEED_1G)
2239 link_speed |= I40E_LINK_SPEED_1GB;
2240 if (link_speeds & ETH_LINK_SPEED_100M)
2241 link_speed |= I40E_LINK_SPEED_100MB;
2242
2243 return link_speed;
2244}
2245
2246static int
2247i40e_phy_conf_link(struct i40e_hw *hw,
2248 uint8_t abilities,
2249 uint8_t force_speed,
2250 bool is_up)
2251{
2252 enum i40e_status_code status;
2253 struct i40e_aq_get_phy_abilities_resp phy_ab;
2254 struct i40e_aq_set_phy_config phy_conf;
2255 enum i40e_aq_phy_type cnt;
2256 uint8_t avail_speed;
2257 uint32_t phy_type_mask = 0;
2258
2259 const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
2260 I40E_AQ_PHY_FLAG_PAUSE_RX |
2261 I40E_AQ_PHY_FLAG_PAUSE_RX |
2262 I40E_AQ_PHY_FLAG_LOW_POWER;
2263 int ret = -ENOTSUP;
2264
2265
2266 status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
2267 NULL);
2268 if (status) {
2269 PMD_DRV_LOG(ERR, "Failed to get PHY capabilities: %d\n",
2270 status);
2271 return ret;
2272 }
2273 avail_speed = phy_ab.link_speed;
2274
2275
2276 status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
2277 NULL);
2278 if (status) {
2279 PMD_DRV_LOG(ERR, "Failed to get the current PHY config: %d\n",
2280 status);
2281 return ret;
2282 }
2283
2284
2285
2286
2287 if (is_up && phy_ab.phy_type != 0 &&
2288 abilities & I40E_AQ_PHY_AN_ENABLED &&
2289 phy_ab.link_speed != 0)
2290 return I40E_SUCCESS;
2291
2292 memset(&phy_conf, 0, sizeof(phy_conf));
2293
2294
2295 abilities &= ~mask;
2296 abilities |= phy_ab.abilities & mask;
2297
2298 phy_conf.abilities = abilities;
2299
2300
2301
2302
2303 if (is_up && !(force_speed & avail_speed)) {
2304 PMD_DRV_LOG(WARNING, "Invalid speed setting, set to default!\n");
2305 phy_conf.link_speed = avail_speed;
2306 } else {
2307 phy_conf.link_speed = is_up ? force_speed : avail_speed;
2308 }
2309
2310
2311 for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_25GBASE_KR; cnt++)
2312 phy_type_mask |= 1 << cnt;
2313
2314
2315 phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0;
2316 phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR |
2317 I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR |
2318 I40E_AQ_PHY_TYPE_EXT_25G_LR | I40E_AQ_PHY_TYPE_EXT_25G_AOC |
2319 I40E_AQ_PHY_TYPE_EXT_25G_ACC) : 0;
2320 phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
2321 phy_conf.eee_capability = phy_ab.eee_capability;
2322 phy_conf.eeer = phy_ab.eeer_val;
2323 phy_conf.low_power_ctrl = phy_ab.d3_lpan;
2324
2325 PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
2326 phy_ab.abilities, phy_ab.link_speed);
2327 PMD_DRV_LOG(DEBUG, "\tConfig: abilities %x, link_speed %x",
2328 phy_conf.abilities, phy_conf.link_speed);
2329
2330 status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
2331 if (status)
2332 return ret;
2333
2334 return I40E_SUCCESS;
2335}
2336
2337static int
2338i40e_apply_link_speed(struct rte_eth_dev *dev)
2339{
2340 uint8_t speed;
2341 uint8_t abilities = 0;
2342 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2343 struct rte_eth_conf *conf = &dev->data->dev_conf;
2344
2345 abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK |
2346 I40E_AQ_PHY_LINK_ENABLED;
2347
2348 if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
2349 conf->link_speeds = ETH_LINK_SPEED_40G |
2350 ETH_LINK_SPEED_25G |
2351 ETH_LINK_SPEED_20G |
2352 ETH_LINK_SPEED_10G |
2353 ETH_LINK_SPEED_1G |
2354 ETH_LINK_SPEED_100M;
2355
2356 abilities |= I40E_AQ_PHY_AN_ENABLED;
2357 } else {
2358 abilities &= ~I40E_AQ_PHY_AN_ENABLED;
2359 }
2360 speed = i40e_parse_link_speeds(conf->link_speeds);
2361
2362 return i40e_phy_conf_link(hw, abilities, speed, true);
2363}
2364
2365static int
2366i40e_dev_start(struct rte_eth_dev *dev)
2367{
2368 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2369 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2370 struct i40e_vsi *main_vsi = pf->main_vsi;
2371 int ret, i;
2372 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2373 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2374 uint32_t intr_vector = 0;
2375 struct i40e_vsi *vsi;
2376 uint16_t nb_rxq, nb_txq;
2377
2378 hw->adapter_stopped = 0;
2379
2380 rte_intr_disable(intr_handle);
2381
2382 if ((rte_intr_cap_multiple(intr_handle) ||
2383 !RTE_ETH_DEV_SRIOV(dev).active) &&
2384 dev->data->dev_conf.intr_conf.rxq != 0) {
2385 intr_vector = dev->data->nb_rx_queues;
2386 ret = rte_intr_efd_enable(intr_handle, intr_vector);
2387 if (ret)
2388 return ret;
2389 }
2390
2391 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2392 intr_handle->intr_vec =
2393 rte_zmalloc("intr_vec",
2394 dev->data->nb_rx_queues * sizeof(int),
2395 0);
2396 if (!intr_handle->intr_vec) {
2397 PMD_INIT_LOG(ERR,
2398 "Failed to allocate %d rx_queues intr_vec",
2399 dev->data->nb_rx_queues);
2400 return -ENOMEM;
2401 }
2402 }
2403
2404
2405 ret = i40e_dev_rxtx_init(pf);
2406 if (ret != I40E_SUCCESS) {
2407 PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
2408 return ret;
2409 }
2410
2411
2412 main_vsi->nb_used_qps = dev->data->nb_rx_queues -
2413 pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2414 ret = i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT);
2415 if (ret < 0)
2416 return ret;
2417 i40e_vsi_enable_queues_intr(main_vsi);
2418
2419
2420 for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2421 pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2422 ret = i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi,
2423 I40E_ITR_INDEX_DEFAULT);
2424 if (ret < 0)
2425 return ret;
2426 i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
2427 }
2428
2429
2430 for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) {
2431 ret = i40e_dev_rx_queue_start(dev, nb_rxq);
2432 if (ret)
2433 goto rx_err;
2434 }
2435
2436 for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) {
2437 ret = i40e_dev_tx_queue_start(dev, nb_txq);
2438 if (ret)
2439 goto tx_err;
2440 }
2441
2442
2443 ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
2444 if (ret != I40E_SUCCESS)
2445 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2446
2447 for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2448 ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
2449 true, NULL);
2450 if (ret != I40E_SUCCESS)
2451 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2452 }
2453
2454
2455 if (pf->vfs) {
2456 for (i = 0; i < pf->vf_num; i++) {
2457 vsi = pf->vfs[i].vsi;
2458 i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid,
2459 true, NULL);
2460 }
2461 }
2462
2463
2464 if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE ||
2465 dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_PHY_LOCAL) {
2466 ret = i40e_aq_set_lb_modes(hw, dev->data->dev_conf.lpbk_mode, NULL);
2467 if (ret != I40E_SUCCESS) {
2468 PMD_DRV_LOG(ERR, "fail to set loopback link");
2469 goto tx_err;
2470 }
2471 }
2472
2473
2474 ret = i40e_apply_link_speed(dev);
2475 if (I40E_SUCCESS != ret) {
2476 PMD_DRV_LOG(ERR, "Fail to apply link setting");
2477 goto tx_err;
2478 }
2479
2480 if (!rte_intr_allow_others(intr_handle)) {
2481 rte_intr_callback_unregister(intr_handle,
2482 i40e_dev_interrupt_handler,
2483 (void *)dev);
2484
2485 i40e_pf_config_irq0(hw, FALSE);
2486 i40e_pf_enable_irq0(hw);
2487
2488 if (dev->data->dev_conf.intr_conf.lsc != 0)
2489 PMD_INIT_LOG(INFO,
2490 "lsc won't enable because of no intr multiplex");
2491 } else {
2492 ret = i40e_aq_set_phy_int_mask(hw,
2493 ~(I40E_AQ_EVENT_LINK_UPDOWN |
2494 I40E_AQ_EVENT_MODULE_QUAL_FAIL |
2495 I40E_AQ_EVENT_MEDIA_NA), NULL);
2496 if (ret != I40E_SUCCESS)
2497 PMD_DRV_LOG(WARNING, "Fail to set phy mask");
2498
2499
2500 i40e_dev_link_update(dev, 0);
2501 }
2502
2503 if (dev->data->dev_conf.intr_conf.rxq == 0) {
2504 rte_eal_alarm_set(I40E_ALARM_INTERVAL,
2505 i40e_dev_alarm_handler, dev);
2506 } else {
2507
2508 rte_intr_enable(intr_handle);
2509 }
2510
2511 i40e_filter_restore(pf);
2512
2513 if (pf->tm_conf.root && !pf->tm_conf.committed)
2514 PMD_DRV_LOG(WARNING,
2515 "please call hierarchy_commit() "
2516 "before starting the port");
2517
2518 return I40E_SUCCESS;
2519
2520tx_err:
2521 for (i = 0; i < nb_txq; i++)
2522 i40e_dev_tx_queue_stop(dev, i);
2523rx_err:
2524 for (i = 0; i < nb_rxq; i++)
2525 i40e_dev_rx_queue_stop(dev, i);
2526
2527 return ret;
2528}
2529
2530static int
2531i40e_dev_stop(struct rte_eth_dev *dev)
2532{
2533 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2534 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2535 struct i40e_vsi *main_vsi = pf->main_vsi;
2536 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2537 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2538 int i;
2539
2540 if (hw->adapter_stopped == 1)
2541 return 0;
2542
2543 if (dev->data->dev_conf.intr_conf.rxq == 0) {
2544 rte_eal_alarm_cancel(i40e_dev_alarm_handler, dev);
2545 rte_intr_enable(intr_handle);
2546 }
2547
2548
2549 for (i = 0; i < dev->data->nb_tx_queues; i++)
2550 i40e_dev_tx_queue_stop(dev, i);
2551
2552 for (i = 0; i < dev->data->nb_rx_queues; i++)
2553 i40e_dev_rx_queue_stop(dev, i);
2554
2555
2556 i40e_vsi_disable_queues_intr(main_vsi);
2557 i40e_vsi_queues_unbind_intr(main_vsi);
2558
2559 for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2560 i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
2561 i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
2562 }
2563
2564
2565 i40e_dev_clear_queues(dev);
2566
2567
2568 i40e_dev_set_link_down(dev);
2569
2570 if (!rte_intr_allow_others(intr_handle))
2571
2572 rte_intr_callback_register(intr_handle,
2573 i40e_dev_interrupt_handler,
2574 (void *)dev);
2575
2576
2577 rte_intr_efd_disable(intr_handle);
2578 if (intr_handle->intr_vec) {
2579 rte_free(intr_handle->intr_vec);
2580 intr_handle->intr_vec = NULL;
2581 }
2582
2583
2584 pf->tm_conf.committed = false;
2585
2586 hw->adapter_stopped = 1;
2587 dev->data->dev_started = 0;
2588
2589 pf->adapter->rss_reta_updated = 0;
2590
2591 return 0;
2592}
2593
2594static int
2595i40e_dev_close(struct rte_eth_dev *dev)
2596{
2597 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2598 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2599 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2600 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2601 struct i40e_mirror_rule *p_mirror;
2602 struct i40e_filter_control_settings settings;
2603 struct rte_flow *p_flow;
2604 uint32_t reg;
2605 int i;
2606 int ret;
2607 uint8_t aq_fail = 0;
2608 int retries = 0;
2609
2610 PMD_INIT_FUNC_TRACE();
2611 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2612 return 0;
2613
2614 ret = rte_eth_switch_domain_free(pf->switch_domain_id);
2615 if (ret)
2616 PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
2617
2618
2619 ret = i40e_dev_stop(dev);
2620
2621
2622 while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
2623 ret = i40e_aq_del_mirror_rule(hw,
2624 pf->main_vsi->veb->seid,
2625 p_mirror->rule_type,
2626 p_mirror->entries,
2627 p_mirror->num_entries,
2628 p_mirror->id);
2629 if (ret < 0)
2630 PMD_DRV_LOG(ERR, "failed to remove mirror rule: "
2631 "status = %d, aq_err = %d.", ret,
2632 hw->aq.asq_last_status);
2633
2634
2635 TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
2636 rte_free(p_mirror);
2637 pf->nb_mirror_rule--;
2638 }
2639
2640 i40e_dev_free_queues(dev);
2641
2642
2643 i40e_pf_disable_irq0(hw);
2644 rte_intr_disable(intr_handle);
2645
2646
2647
2648
2649
2650
2651 i40e_fdir_teardown(pf);
2652
2653
2654 i40e_shutdown_lan_hmc(hw);
2655
2656 for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2657 i40e_vsi_release(pf->vmdq[i].vsi);
2658 pf->vmdq[i].vsi = NULL;
2659 }
2660 rte_free(pf->vmdq);
2661 pf->vmdq = NULL;
2662
2663
2664 i40e_vsi_release(pf->main_vsi);
2665
2666
2667 i40e_aq_queue_shutdown(hw, true);
2668 i40e_shutdown_adminq(hw);
2669
2670 i40e_res_pool_destroy(&pf->qp_pool);
2671 i40e_res_pool_destroy(&pf->msix_pool);
2672
2673
2674 if (!pf->support_multi_driver)
2675 i40e_flex_payload_reg_set_default(hw);
2676
2677
2678 reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
2679 I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
2680 (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
2681 I40E_WRITE_FLUSH(hw);
2682
2683
2684 i40e_clear_pxe_mode(hw);
2685
2686
2687 memset(&settings, 0, sizeof(settings));
2688 ret = i40e_set_filter_control(hw, &settings);
2689 if (ret)
2690 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
2691 ret);
2692
2693
2694 hw->fc.requested_mode = I40E_FC_NONE;
2695 i40e_set_fc(hw, &aq_fail, TRUE);
2696
2697
2698 i40e_pf_host_uninit(dev);
2699
2700 do {
2701 ret = rte_intr_callback_unregister(intr_handle,
2702 i40e_dev_interrupt_handler, dev);
2703 if (ret >= 0 || ret == -ENOENT) {
2704 break;
2705 } else if (ret != -EAGAIN) {
2706 PMD_INIT_LOG(ERR,
2707 "intr callback unregister failed: %d",
2708 ret);
2709 }
2710 i40e_msec_delay(500);
2711 } while (retries++ < 5);
2712
2713 i40e_rm_ethtype_filter_list(pf);
2714 i40e_rm_tunnel_filter_list(pf);
2715 i40e_rm_fdir_filter_list(pf);
2716
2717
2718 while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
2719 TAILQ_REMOVE(&pf->flow_list, p_flow, node);
2720
2721 if (p_flow->filter_type != RTE_ETH_FILTER_FDIR)
2722 rte_free(p_flow);
2723 }
2724
2725
2726 i40e_fdir_memory_cleanup(pf);
2727
2728
2729 i40e_tm_conf_uninit(dev);
2730
2731 i40e_clear_automask(pf);
2732
2733 hw->adapter_closed = 1;
2734 return ret;
2735}
2736
2737
2738
2739
2740static int
2741i40e_dev_reset(struct rte_eth_dev *dev)
2742{
2743 int ret;
2744
2745
2746
2747
2748
2749
2750
2751 if (dev->data->sriov.active)
2752 return -ENOTSUP;
2753
2754 ret = eth_i40e_dev_uninit(dev);
2755 if (ret)
2756 return ret;
2757
2758 ret = eth_i40e_dev_init(dev, NULL);
2759
2760 return ret;
2761}
2762
2763static int
2764i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
2765{
2766 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2767 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2768 struct i40e_vsi *vsi = pf->main_vsi;
2769 int status;
2770
2771 status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2772 true, NULL, true);
2773 if (status != I40E_SUCCESS) {
2774 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
2775 return -EAGAIN;
2776 }
2777
2778 status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2779 TRUE, NULL);
2780 if (status != I40E_SUCCESS) {
2781 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2782
2783 i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2784 false, NULL, true);
2785 return -EAGAIN;
2786 }
2787
2788 return 0;
2789}
2790
2791static int
2792i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
2793{
2794 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2795 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2796 struct i40e_vsi *vsi = pf->main_vsi;
2797 int status;
2798
2799 status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2800 false, NULL, true);
2801 if (status != I40E_SUCCESS) {
2802 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
2803 return -EAGAIN;
2804 }
2805
2806
2807 if (dev->data->all_multicast == 1)
2808 return 0;
2809
2810 status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2811 false, NULL);
2812 if (status != I40E_SUCCESS) {
2813 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2814
2815 i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2816 true, NULL, true);
2817 return -EAGAIN;
2818 }
2819
2820 return 0;
2821}
2822
2823static int
2824i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
2825{
2826 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2827 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2828 struct i40e_vsi *vsi = pf->main_vsi;
2829 int ret;
2830
2831 ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
2832 if (ret != I40E_SUCCESS) {
2833 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2834 return -EAGAIN;
2835 }
2836
2837 return 0;
2838}
2839
2840static int
2841i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
2842{
2843 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2844 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2845 struct i40e_vsi *vsi = pf->main_vsi;
2846 int ret;
2847
2848 if (dev->data->promiscuous == 1)
2849 return 0;
2850
2851 ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
2852 vsi->seid, FALSE, NULL);
2853 if (ret != I40E_SUCCESS) {
2854 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2855 return -EAGAIN;
2856 }
2857
2858 return 0;
2859}
2860
2861
2862
2863
2864static int
2865i40e_dev_set_link_up(struct rte_eth_dev *dev)
2866{
2867
2868 return i40e_apply_link_speed(dev);
2869}
2870
2871
2872
2873
2874static int
2875i40e_dev_set_link_down(struct rte_eth_dev *dev)
2876{
2877 uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
2878 uint8_t abilities = 0;
2879 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2880
2881 abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2882 return i40e_phy_conf_link(hw, abilities, speed, false);
2883}
2884
2885static __rte_always_inline void
2886update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
2887{
2888
2889#define I40E_PRTMAC_LINKSTA 0x001E2420
2890#define I40E_REG_LINK_UP 0x40000080
2891#define I40E_PRTMAC_MACC 0x001E24E0
2892#define I40E_REG_MACC_25GB 0x00020000
2893#define I40E_REG_SPEED_MASK 0x38000000
2894#define I40E_REG_SPEED_0 0x00000000
2895#define I40E_REG_SPEED_1 0x08000000
2896#define I40E_REG_SPEED_2 0x10000000
2897#define I40E_REG_SPEED_3 0x18000000
2898#define I40E_REG_SPEED_4 0x20000000
2899 uint32_t link_speed;
2900 uint32_t reg_val;
2901
2902 reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA);
2903 link_speed = reg_val & I40E_REG_SPEED_MASK;
2904 reg_val &= I40E_REG_LINK_UP;
2905 link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0;
2906
2907 if (unlikely(link->link_status == 0))
2908 return;
2909
2910
2911 switch (link_speed) {
2912 case I40E_REG_SPEED_0:
2913 link->link_speed = ETH_SPEED_NUM_100M;
2914 break;
2915 case I40E_REG_SPEED_1:
2916 link->link_speed = ETH_SPEED_NUM_1G;
2917 break;
2918 case I40E_REG_SPEED_2:
2919 if (hw->mac.type == I40E_MAC_X722)
2920 link->link_speed = ETH_SPEED_NUM_2_5G;
2921 else
2922 link->link_speed = ETH_SPEED_NUM_10G;
2923 break;
2924 case I40E_REG_SPEED_3:
2925 if (hw->mac.type == I40E_MAC_X722) {
2926 link->link_speed = ETH_SPEED_NUM_5G;
2927 } else {
2928 reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
2929
2930 if (reg_val & I40E_REG_MACC_25GB)
2931 link->link_speed = ETH_SPEED_NUM_25G;
2932 else
2933 link->link_speed = ETH_SPEED_NUM_40G;
2934 }
2935 break;
2936 case I40E_REG_SPEED_4:
2937 if (hw->mac.type == I40E_MAC_X722)
2938 link->link_speed = ETH_SPEED_NUM_10G;
2939 else
2940 link->link_speed = ETH_SPEED_NUM_20G;
2941 break;
2942 default:
2943 PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
2944 break;
2945 }
2946}
2947
2948static __rte_always_inline void
2949update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
2950 bool enable_lse, int wait_to_complete)
2951{
2952#define CHECK_INTERVAL 100
2953#define MAX_REPEAT_TIME 10
2954 uint32_t rep_cnt = MAX_REPEAT_TIME;
2955 struct i40e_link_status link_status;
2956 int status;
2957
2958 memset(&link_status, 0, sizeof(link_status));
2959
2960 do {
2961 memset(&link_status, 0, sizeof(link_status));
2962
2963
2964 status = i40e_aq_get_link_info(hw, enable_lse,
2965 &link_status, NULL);
2966 if (unlikely(status != I40E_SUCCESS)) {
2967 link->link_speed = ETH_SPEED_NUM_NONE;
2968 link->link_duplex = ETH_LINK_FULL_DUPLEX;
2969 PMD_DRV_LOG(ERR, "Failed to get link info");
2970 return;
2971 }
2972
2973 link->link_status = link_status.link_info & I40E_AQ_LINK_UP;
2974 if (!wait_to_complete || link->link_status)
2975 break;
2976
2977 rte_delay_ms(CHECK_INTERVAL);
2978 } while (--rep_cnt);
2979
2980
2981 switch (link_status.link_speed) {
2982 case I40E_LINK_SPEED_100MB:
2983 link->link_speed = ETH_SPEED_NUM_100M;
2984 break;
2985 case I40E_LINK_SPEED_1GB:
2986 link->link_speed = ETH_SPEED_NUM_1G;
2987 break;
2988 case I40E_LINK_SPEED_10GB:
2989 link->link_speed = ETH_SPEED_NUM_10G;
2990 break;
2991 case I40E_LINK_SPEED_20GB:
2992 link->link_speed = ETH_SPEED_NUM_20G;
2993 break;
2994 case I40E_LINK_SPEED_25GB:
2995 link->link_speed = ETH_SPEED_NUM_25G;
2996 break;
2997 case I40E_LINK_SPEED_40GB:
2998 link->link_speed = ETH_SPEED_NUM_40G;
2999 break;
3000 default:
3001 if (link->link_status)
3002 link->link_speed = ETH_SPEED_NUM_UNKNOWN;
3003 else
3004 link->link_speed = ETH_SPEED_NUM_NONE;
3005 break;
3006 }
3007}
3008
3009int
3010i40e_dev_link_update(struct rte_eth_dev *dev,
3011 int wait_to_complete)
3012{
3013 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3014 struct rte_eth_link link;
3015 bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3016 int ret;
3017
3018 memset(&link, 0, sizeof(link));
3019
3020
3021 link.link_duplex = ETH_LINK_FULL_DUPLEX;
3022 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
3023 ETH_LINK_SPEED_FIXED);
3024
3025 if (!wait_to_complete && !enable_lse)
3026 update_link_reg(hw, &link);
3027 else
3028 update_link_aq(hw, &link, enable_lse, wait_to_complete);
3029
3030 if (hw->switch_dev)
3031 rte_eth_linkstatus_get(hw->switch_dev, &link);
3032
3033 ret = rte_eth_linkstatus_set(dev, &link);
3034 i40e_notify_all_vfs_link_status(dev);
3035
3036 return ret;
3037}
3038
3039static void
3040i40e_stat_update_48_in_64(struct i40e_hw *hw, uint32_t hireg,
3041 uint32_t loreg, bool offset_loaded, uint64_t *offset,
3042 uint64_t *stat, uint64_t *prev_stat)
3043{
3044 i40e_stat_update_48(hw, hireg, loreg, offset_loaded, offset, stat);
3045
3046 if (offset_loaded) {
3047 if (I40E_RXTX_BYTES_L_48_BIT(*prev_stat) > *stat)
3048 *stat += (uint64_t)1 << I40E_48_BIT_WIDTH;
3049 *stat += I40E_RXTX_BYTES_H_16_BIT(*prev_stat);
3050 }
3051 *prev_stat = *stat;
3052}
3053
3054
3055void
3056i40e_update_vsi_stats(struct i40e_vsi *vsi)
3057{
3058 struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
3059 struct i40e_eth_stats *nes = &vsi->eth_stats;
3060 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3061 int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
3062
3063 i40e_stat_update_48_in_64(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
3064 vsi->offset_loaded, &oes->rx_bytes,
3065 &nes->rx_bytes, &vsi->prev_rx_bytes);
3066 i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
3067 vsi->offset_loaded, &oes->rx_unicast,
3068 &nes->rx_unicast);
3069 i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
3070 vsi->offset_loaded, &oes->rx_multicast,
3071 &nes->rx_multicast);
3072 i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
3073 vsi->offset_loaded, &oes->rx_broadcast,
3074 &nes->rx_broadcast);
3075
3076 nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
3077 nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
3078
3079 i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
3080 &oes->rx_discards, &nes->rx_discards);
3081
3082
3083 i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
3084 &oes->rx_unknown_protocol,
3085 &nes->rx_unknown_protocol);
3086 i40e_stat_update_48_in_64(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
3087 vsi->offset_loaded, &oes->tx_bytes,
3088 &nes->tx_bytes, &vsi->prev_tx_bytes);
3089 i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
3090 vsi->offset_loaded, &oes->tx_unicast,
3091 &nes->tx_unicast);
3092 i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
3093 vsi->offset_loaded, &oes->tx_multicast,
3094 &nes->tx_multicast);
3095 i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
3096 vsi->offset_loaded, &oes->tx_broadcast,
3097 &nes->tx_broadcast);
3098
3099 i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
3100 &oes->tx_errors, &nes->tx_errors);
3101 vsi->offset_loaded = true;
3102
3103 PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
3104 vsi->vsi_id);
3105 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", nes->rx_bytes);
3106 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", nes->rx_unicast);
3107 PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", nes->rx_multicast);
3108 PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", nes->rx_broadcast);
3109 PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", nes->rx_discards);
3110 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3111 nes->rx_unknown_protocol);
3112 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", nes->tx_bytes);
3113 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", nes->tx_unicast);
3114 PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", nes->tx_multicast);
3115 PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", nes->tx_broadcast);
3116 PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", nes->tx_discards);
3117 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", nes->tx_errors);
3118 PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
3119 vsi->vsi_id);
3120}
3121
3122static void
3123i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
3124{
3125 unsigned int i;
3126 struct i40e_hw_port_stats *ns = &pf->stats;
3127 struct i40e_hw_port_stats *os = &pf->stats_offset;
3128
3129
3130 i40e_stat_update_48_in_64(hw, I40E_GLV_GORCH(hw->port),
3131 I40E_GLV_GORCL(hw->port),
3132 pf->offset_loaded,
3133 &pf->internal_stats_offset.rx_bytes,
3134 &pf->internal_stats.rx_bytes,
3135 &pf->internal_prev_rx_bytes);
3136 i40e_stat_update_48_in_64(hw, I40E_GLV_GOTCH(hw->port),
3137 I40E_GLV_GOTCL(hw->port),
3138 pf->offset_loaded,
3139 &pf->internal_stats_offset.tx_bytes,
3140 &pf->internal_stats.tx_bytes,
3141 &pf->internal_prev_tx_bytes);
3142
3143 i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port),
3144 I40E_GLV_UPRCL(hw->port),
3145 pf->offset_loaded,
3146 &pf->internal_stats_offset.rx_unicast,
3147 &pf->internal_stats.rx_unicast);
3148 i40e_stat_update_48(hw, I40E_GLV_MPRCH(hw->port),
3149 I40E_GLV_MPRCL(hw->port),
3150 pf->offset_loaded,
3151 &pf->internal_stats_offset.rx_multicast,
3152 &pf->internal_stats.rx_multicast);
3153 i40e_stat_update_48(hw, I40E_GLV_BPRCH(hw->port),
3154 I40E_GLV_BPRCL(hw->port),
3155 pf->offset_loaded,
3156 &pf->internal_stats_offset.rx_broadcast,
3157 &pf->internal_stats.rx_broadcast);
3158
3159 i40e_stat_update_48(hw, I40E_GLV_UPTCH(hw->port),
3160 I40E_GLV_UPTCL(hw->port),
3161 pf->offset_loaded,
3162 &pf->internal_stats_offset.tx_unicast,
3163 &pf->internal_stats.tx_unicast);
3164 i40e_stat_update_48(hw, I40E_GLV_MPTCH(hw->port),
3165 I40E_GLV_MPTCL(hw->port),
3166 pf->offset_loaded,
3167 &pf->internal_stats_offset.tx_multicast,
3168 &pf->internal_stats.tx_multicast);
3169 i40e_stat_update_48(hw, I40E_GLV_BPTCH(hw->port),
3170 I40E_GLV_BPTCL(hw->port),
3171 pf->offset_loaded,
3172 &pf->internal_stats_offset.tx_broadcast,
3173 &pf->internal_stats.tx_broadcast);
3174
3175
3176 pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
3177 pf->internal_stats.rx_multicast +
3178 pf->internal_stats.rx_broadcast) * RTE_ETHER_CRC_LEN;
3179
3180
3181 i40e_stat_update_48_in_64(hw, I40E_GLPRT_GORCH(hw->port),
3182 I40E_GLPRT_GORCL(hw->port),
3183 pf->offset_loaded, &os->eth.rx_bytes,
3184 &ns->eth.rx_bytes, &pf->prev_rx_bytes);
3185 i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
3186 I40E_GLPRT_UPRCL(hw->port),
3187 pf->offset_loaded, &os->eth.rx_unicast,
3188 &ns->eth.rx_unicast);
3189 i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
3190 I40E_GLPRT_MPRCL(hw->port),
3191 pf->offset_loaded, &os->eth.rx_multicast,
3192 &ns->eth.rx_multicast);
3193 i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
3194 I40E_GLPRT_BPRCL(hw->port),
3195 pf->offset_loaded, &os->eth.rx_broadcast,
3196 &ns->eth.rx_broadcast);
3197
3198
3199
3200
3201 ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
3202 ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
3203
3204
3205
3206
3207
3208
3209
3210 if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes)
3211 ns->eth.rx_bytes = 0;
3212 else
3213 ns->eth.rx_bytes -= pf->internal_stats.rx_bytes;
3214
3215 if (ns->eth.rx_unicast < pf->internal_stats.rx_unicast)
3216 ns->eth.rx_unicast = 0;
3217 else
3218 ns->eth.rx_unicast -= pf->internal_stats.rx_unicast;
3219
3220 if (ns->eth.rx_multicast < pf->internal_stats.rx_multicast)
3221 ns->eth.rx_multicast = 0;
3222 else
3223 ns->eth.rx_multicast -= pf->internal_stats.rx_multicast;
3224
3225 if (ns->eth.rx_broadcast < pf->internal_stats.rx_broadcast)
3226 ns->eth.rx_broadcast = 0;
3227 else
3228 ns->eth.rx_broadcast -= pf->internal_stats.rx_broadcast;
3229
3230 i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
3231 pf->offset_loaded, &os->eth.rx_discards,
3232 &ns->eth.rx_discards);
3233
3234
3235 i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
3236 pf->offset_loaded,
3237 &os->eth.rx_unknown_protocol,
3238 &ns->eth.rx_unknown_protocol);
3239 i40e_stat_update_48_in_64(hw, I40E_GLPRT_GOTCH(hw->port),
3240 I40E_GLPRT_GOTCL(hw->port),
3241 pf->offset_loaded, &os->eth.tx_bytes,
3242 &ns->eth.tx_bytes, &pf->prev_tx_bytes);
3243 i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
3244 I40E_GLPRT_UPTCL(hw->port),
3245 pf->offset_loaded, &os->eth.tx_unicast,
3246 &ns->eth.tx_unicast);
3247 i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
3248 I40E_GLPRT_MPTCL(hw->port),
3249 pf->offset_loaded, &os->eth.tx_multicast,
3250 &ns->eth.tx_multicast);
3251 i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
3252 I40E_GLPRT_BPTCL(hw->port),
3253 pf->offset_loaded, &os->eth.tx_broadcast,
3254 &ns->eth.tx_broadcast);
3255 ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
3256 ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
3257
3258
3259
3260
3261
3262
3263
3264 if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes)
3265 ns->eth.tx_bytes = 0;
3266 else
3267 ns->eth.tx_bytes -= pf->internal_stats.tx_bytes;
3268
3269 if (ns->eth.tx_unicast < pf->internal_stats.tx_unicast)
3270 ns->eth.tx_unicast = 0;
3271 else
3272 ns->eth.tx_unicast -= pf->internal_stats.tx_unicast;
3273
3274 if (ns->eth.tx_multicast < pf->internal_stats.tx_multicast)
3275 ns->eth.tx_multicast = 0;
3276 else
3277 ns->eth.tx_multicast -= pf->internal_stats.tx_multicast;
3278
3279 if (ns->eth.tx_broadcast < pf->internal_stats.tx_broadcast)
3280 ns->eth.tx_broadcast = 0;
3281 else
3282 ns->eth.tx_broadcast -= pf->internal_stats.tx_broadcast;
3283
3284
3285
3286
3287 i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
3288 pf->offset_loaded, &os->tx_dropped_link_down,
3289 &ns->tx_dropped_link_down);
3290 i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
3291 pf->offset_loaded, &os->crc_errors,
3292 &ns->crc_errors);
3293 i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
3294 pf->offset_loaded, &os->illegal_bytes,
3295 &ns->illegal_bytes);
3296
3297 i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
3298 pf->offset_loaded, &os->mac_local_faults,
3299 &ns->mac_local_faults);
3300 i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
3301 pf->offset_loaded, &os->mac_remote_faults,
3302 &ns->mac_remote_faults);
3303 i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
3304 pf->offset_loaded, &os->rx_length_errors,
3305 &ns->rx_length_errors);
3306 i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
3307 pf->offset_loaded, &os->link_xon_rx,
3308 &ns->link_xon_rx);
3309 i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3310 pf->offset_loaded, &os->link_xoff_rx,
3311 &ns->link_xoff_rx);
3312 for (i = 0; i < 8; i++) {
3313 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
3314 pf->offset_loaded,
3315 &os->priority_xon_rx[i],
3316 &ns->priority_xon_rx[i]);
3317 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
3318 pf->offset_loaded,
3319 &os->priority_xoff_rx[i],
3320 &ns->priority_xoff_rx[i]);
3321 }
3322 i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
3323 pf->offset_loaded, &os->link_xon_tx,
3324 &ns->link_xon_tx);
3325 i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3326 pf->offset_loaded, &os->link_xoff_tx,
3327 &ns->link_xoff_tx);
3328 for (i = 0; i < 8; i++) {
3329 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
3330 pf->offset_loaded,
3331 &os->priority_xon_tx[i],
3332 &ns->priority_xon_tx[i]);
3333 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
3334 pf->offset_loaded,
3335 &os->priority_xoff_tx[i],
3336 &ns->priority_xoff_tx[i]);
3337 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
3338 pf->offset_loaded,
3339 &os->priority_xon_2_xoff[i],
3340 &ns->priority_xon_2_xoff[i]);
3341 }
3342 i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
3343 I40E_GLPRT_PRC64L(hw->port),
3344 pf->offset_loaded, &os->rx_size_64,
3345 &ns->rx_size_64);
3346 i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
3347 I40E_GLPRT_PRC127L(hw->port),
3348 pf->offset_loaded, &os->rx_size_127,
3349 &ns->rx_size_127);
3350 i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
3351 I40E_GLPRT_PRC255L(hw->port),
3352 pf->offset_loaded, &os->rx_size_255,
3353 &ns->rx_size_255);
3354 i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
3355 I40E_GLPRT_PRC511L(hw->port),
3356 pf->offset_loaded, &os->rx_size_511,
3357 &ns->rx_size_511);
3358 i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
3359 I40E_GLPRT_PRC1023L(hw->port),
3360 pf->offset_loaded, &os->rx_size_1023,
3361 &ns->rx_size_1023);
3362 i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
3363 I40E_GLPRT_PRC1522L(hw->port),
3364 pf->offset_loaded, &os->rx_size_1522,
3365 &ns->rx_size_1522);
3366 i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
3367 I40E_GLPRT_PRC9522L(hw->port),
3368 pf->offset_loaded, &os->rx_size_big,
3369 &ns->rx_size_big);
3370 i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
3371 pf->offset_loaded, &os->rx_undersize,
3372 &ns->rx_undersize);
3373 i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
3374 pf->offset_loaded, &os->rx_fragments,
3375 &ns->rx_fragments);
3376 i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
3377 pf->offset_loaded, &os->rx_oversize,
3378 &ns->rx_oversize);
3379 i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
3380 pf->offset_loaded, &os->rx_jabber,
3381 &ns->rx_jabber);
3382 i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
3383 I40E_GLPRT_PTC64L(hw->port),
3384 pf->offset_loaded, &os->tx_size_64,
3385 &ns->tx_size_64);
3386 i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
3387 I40E_GLPRT_PTC127L(hw->port),
3388 pf->offset_loaded, &os->tx_size_127,
3389 &ns->tx_size_127);
3390 i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
3391 I40E_GLPRT_PTC255L(hw->port),
3392 pf->offset_loaded, &os->tx_size_255,
3393 &ns->tx_size_255);
3394 i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
3395 I40E_GLPRT_PTC511L(hw->port),
3396 pf->offset_loaded, &os->tx_size_511,
3397 &ns->tx_size_511);
3398 i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
3399 I40E_GLPRT_PTC1023L(hw->port),
3400 pf->offset_loaded, &os->tx_size_1023,
3401 &ns->tx_size_1023);
3402 i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
3403 I40E_GLPRT_PTC1522L(hw->port),
3404 pf->offset_loaded, &os->tx_size_1522,
3405 &ns->tx_size_1522);
3406 i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
3407 I40E_GLPRT_PTC9522L(hw->port),
3408 pf->offset_loaded, &os->tx_size_big,
3409 &ns->tx_size_big);
3410 i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
3411 pf->offset_loaded,
3412 &os->fd_sb_match, &ns->fd_sb_match);
3413
3414
3415
3416 pf->offset_loaded = true;
3417
3418 if (pf->main_vsi)
3419 i40e_update_vsi_stats(pf->main_vsi);
3420}
3421
3422
3423static int
3424i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3425{
3426 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3427 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3428 struct i40e_hw_port_stats *ns = &pf->stats;
3429 struct i40e_vsi *vsi;
3430 unsigned i;
3431
3432
3433 i40e_read_stats_registers(pf, hw);
3434
3435 stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
3436 pf->main_vsi->eth_stats.rx_multicast +
3437 pf->main_vsi->eth_stats.rx_broadcast -
3438 pf->main_vsi->eth_stats.rx_discards;
3439 stats->opackets = ns->eth.tx_unicast +
3440 ns->eth.tx_multicast +
3441 ns->eth.tx_broadcast;
3442 stats->ibytes = pf->main_vsi->eth_stats.rx_bytes;
3443 stats->obytes = ns->eth.tx_bytes;
3444 stats->oerrors = ns->eth.tx_errors +
3445 pf->main_vsi->eth_stats.tx_errors;
3446
3447
3448 stats->imissed = ns->eth.rx_discards +
3449 pf->main_vsi->eth_stats.rx_discards;
3450 stats->ierrors = ns->crc_errors +
3451 ns->rx_length_errors + ns->rx_undersize +
3452 ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
3453
3454 if (pf->vfs) {
3455 for (i = 0; i < pf->vf_num; i++) {
3456 vsi = pf->vfs[i].vsi;
3457 i40e_update_vsi_stats(vsi);
3458
3459 stats->ipackets += (vsi->eth_stats.rx_unicast +
3460 vsi->eth_stats.rx_multicast +
3461 vsi->eth_stats.rx_broadcast -
3462 vsi->eth_stats.rx_discards);
3463 stats->ibytes += vsi->eth_stats.rx_bytes;
3464 stats->oerrors += vsi->eth_stats.tx_errors;
3465 stats->imissed += vsi->eth_stats.rx_discards;
3466 }
3467 }
3468
3469 PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
3470 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", ns->eth.rx_bytes);
3471 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", ns->eth.rx_unicast);
3472 PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", ns->eth.rx_multicast);
3473 PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", ns->eth.rx_broadcast);
3474 PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", ns->eth.rx_discards);
3475 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3476 ns->eth.rx_unknown_protocol);
3477 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", ns->eth.tx_bytes);
3478 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", ns->eth.tx_unicast);
3479 PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", ns->eth.tx_multicast);
3480 PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", ns->eth.tx_broadcast);
3481 PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", ns->eth.tx_discards);
3482 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", ns->eth.tx_errors);
3483
3484 PMD_DRV_LOG(DEBUG, "tx_dropped_link_down: %"PRIu64"",
3485 ns->tx_dropped_link_down);
3486 PMD_DRV_LOG(DEBUG, "crc_errors: %"PRIu64"", ns->crc_errors);
3487 PMD_DRV_LOG(DEBUG, "illegal_bytes: %"PRIu64"",
3488 ns->illegal_bytes);
3489 PMD_DRV_LOG(DEBUG, "error_bytes: %"PRIu64"", ns->error_bytes);
3490 PMD_DRV_LOG(DEBUG, "mac_local_faults: %"PRIu64"",
3491 ns->mac_local_faults);
3492 PMD_DRV_LOG(DEBUG, "mac_remote_faults: %"PRIu64"",
3493 ns->mac_remote_faults);
3494 PMD_DRV_LOG(DEBUG, "rx_length_errors: %"PRIu64"",
3495 ns->rx_length_errors);
3496 PMD_DRV_LOG(DEBUG, "link_xon_rx: %"PRIu64"", ns->link_xon_rx);
3497 PMD_DRV_LOG(DEBUG, "link_xoff_rx: %"PRIu64"", ns->link_xoff_rx);
3498 for (i = 0; i < 8; i++) {
3499 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]: %"PRIu64"",
3500 i, ns->priority_xon_rx[i]);
3501 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]: %"PRIu64"",
3502 i, ns->priority_xoff_rx[i]);
3503 }
3504 PMD_DRV_LOG(DEBUG, "link_xon_tx: %"PRIu64"", ns->link_xon_tx);
3505 PMD_DRV_LOG(DEBUG, "link_xoff_tx: %"PRIu64"", ns->link_xoff_tx);
3506 for (i = 0; i < 8; i++) {
3507 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]: %"PRIu64"",
3508 i, ns->priority_xon_tx[i]);
3509 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]: %"PRIu64"",
3510 i, ns->priority_xoff_tx[i]);
3511 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]: %"PRIu64"",
3512 i, ns->priority_xon_2_xoff[i]);
3513 }
3514 PMD_DRV_LOG(DEBUG, "rx_size_64: %"PRIu64"", ns->rx_size_64);
3515 PMD_DRV_LOG(DEBUG, "rx_size_127: %"PRIu64"", ns->rx_size_127);
3516 PMD_DRV_LOG(DEBUG, "rx_size_255: %"PRIu64"", ns->rx_size_255);
3517 PMD_DRV_LOG(DEBUG, "rx_size_511: %"PRIu64"", ns->rx_size_511);
3518 PMD_DRV_LOG(DEBUG, "rx_size_1023: %"PRIu64"", ns->rx_size_1023);
3519 PMD_DRV_LOG(DEBUG, "rx_size_1522: %"PRIu64"", ns->rx_size_1522);
3520 PMD_DRV_LOG(DEBUG, "rx_size_big: %"PRIu64"", ns->rx_size_big);
3521 PMD_DRV_LOG(DEBUG, "rx_undersize: %"PRIu64"", ns->rx_undersize);
3522 PMD_DRV_LOG(DEBUG, "rx_fragments: %"PRIu64"", ns->rx_fragments);
3523 PMD_DRV_LOG(DEBUG, "rx_oversize: %"PRIu64"", ns->rx_oversize);
3524 PMD_DRV_LOG(DEBUG, "rx_jabber: %"PRIu64"", ns->rx_jabber);
3525 PMD_DRV_LOG(DEBUG, "tx_size_64: %"PRIu64"", ns->tx_size_64);
3526 PMD_DRV_LOG(DEBUG, "tx_size_127: %"PRIu64"", ns->tx_size_127);
3527 PMD_DRV_LOG(DEBUG, "tx_size_255: %"PRIu64"", ns->tx_size_255);
3528 PMD_DRV_LOG(DEBUG, "tx_size_511: %"PRIu64"", ns->tx_size_511);
3529 PMD_DRV_LOG(DEBUG, "tx_size_1023: %"PRIu64"", ns->tx_size_1023);
3530 PMD_DRV_LOG(DEBUG, "tx_size_1522: %"PRIu64"", ns->tx_size_1522);
3531 PMD_DRV_LOG(DEBUG, "tx_size_big: %"PRIu64"", ns->tx_size_big);
3532 PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
3533 ns->mac_short_packet_dropped);
3534 PMD_DRV_LOG(DEBUG, "checksum_error: %"PRIu64"",
3535 ns->checksum_error);
3536 PMD_DRV_LOG(DEBUG, "fdir_match: %"PRIu64"", ns->fd_sb_match);
3537 PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
3538 return 0;
3539}
3540
3541
3542static int
3543i40e_dev_stats_reset(struct rte_eth_dev *dev)
3544{
3545 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3546 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3547
3548
3549 pf->offset_loaded = false;
3550 if (pf->main_vsi)
3551 pf->main_vsi->offset_loaded = false;
3552
3553
3554 i40e_read_stats_registers(pf, hw);
3555
3556 return 0;
3557}
3558
3559static uint32_t
3560i40e_xstats_calc_num(void)
3561{
3562 return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
3563 (I40E_NB_RXQ_PRIO_XSTATS * 8) +
3564 (I40E_NB_TXQ_PRIO_XSTATS * 8);
3565}
3566
3567static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3568 struct rte_eth_xstat_name *xstats_names,
3569 __rte_unused unsigned limit)
3570{
3571 unsigned count = 0;
3572 unsigned i, prio;
3573
3574 if (xstats_names == NULL)
3575 return i40e_xstats_calc_num();
3576
3577
3578
3579
3580 for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3581 strlcpy(xstats_names[count].name,
3582 rte_i40e_stats_strings[i].name,
3583 sizeof(xstats_names[count].name));
3584 count++;
3585 }
3586
3587
3588 for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3589 strlcpy(xstats_names[count].name,
3590 rte_i40e_hw_port_strings[i].name,
3591 sizeof(xstats_names[count].name));
3592 count++;
3593 }
3594
3595 for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3596 for (prio = 0; prio < 8; prio++) {
3597 snprintf(xstats_names[count].name,
3598 sizeof(xstats_names[count].name),
3599 "rx_priority%u_%s", prio,
3600 rte_i40e_rxq_prio_strings[i].name);
3601 count++;
3602 }
3603 }
3604
3605 for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3606 for (prio = 0; prio < 8; prio++) {
3607 snprintf(xstats_names[count].name,
3608 sizeof(xstats_names[count].name),
3609 "tx_priority%u_%s", prio,
3610 rte_i40e_txq_prio_strings[i].name);
3611 count++;
3612 }
3613 }
3614 return count;
3615}
3616
3617static int
3618i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3619 unsigned n)
3620{
3621 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3622 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3623 unsigned i, count, prio;
3624 struct i40e_hw_port_stats *hw_stats = &pf->stats;
3625
3626 count = i40e_xstats_calc_num();
3627 if (n < count)
3628 return count;
3629
3630 i40e_read_stats_registers(pf, hw);
3631
3632 if (xstats == NULL)
3633 return 0;
3634
3635 count = 0;
3636
3637
3638 for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3639 xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
3640 rte_i40e_stats_strings[i].offset);
3641 xstats[count].id = count;
3642 count++;
3643 }
3644
3645
3646 for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3647 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3648 rte_i40e_hw_port_strings[i].offset);
3649 xstats[count].id = count;
3650 count++;
3651 }
3652
3653 for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3654 for (prio = 0; prio < 8; prio++) {
3655 xstats[count].value =
3656 *(uint64_t *)(((char *)hw_stats) +
3657 rte_i40e_rxq_prio_strings[i].offset +
3658 (sizeof(uint64_t) * prio));
3659 xstats[count].id = count;
3660 count++;
3661 }
3662 }
3663
3664 for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3665 for (prio = 0; prio < 8; prio++) {
3666 xstats[count].value =
3667 *(uint64_t *)(((char *)hw_stats) +
3668 rte_i40e_txq_prio_strings[i].offset +
3669 (sizeof(uint64_t) * prio));
3670 xstats[count].id = count;
3671 count++;
3672 }
3673 }
3674
3675 return count;
3676}
3677
3678static int
3679i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3680{
3681 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3682 u32 full_ver;
3683 u8 ver, patch;
3684 u16 build;
3685 int ret;
3686
3687 full_ver = hw->nvm.oem_ver;
3688 ver = (u8)(full_ver >> 24);
3689 build = (u16)((full_ver >> 8) & 0xffff);
3690 patch = (u8)(full_ver & 0xff);
3691
3692 ret = snprintf(fw_version, fw_size,
3693 "%d.%d%d 0x%08x %d.%d.%d",
3694 ((hw->nvm.version >> 12) & 0xf),
3695 ((hw->nvm.version >> 4) & 0xff),
3696 (hw->nvm.version & 0xf), hw->nvm.eetrack,
3697 ver, build, patch);
3698 if (ret < 0)
3699 return -EINVAL;
3700
3701 ret += 1;
3702 if (fw_size < (size_t)ret)
3703 return ret;
3704 else
3705 return 0;
3706}
3707
3708
3709
3710
3711
3712
3713
3714static bool
3715i40e_need_stop_lldp(struct rte_eth_dev *dev)
3716{
3717 double nvm_ver;
3718 char ver_str[64] = {0};
3719 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3720
3721 i40e_fw_version_get(dev, ver_str, 64);
3722 nvm_ver = atof(ver_str);
3723 if ((hw->mac.type == I40E_MAC_X722 ||
3724 hw->mac.type == I40E_MAC_X722_VF) &&
3725 ((uint32_t)(nvm_ver * 1000) >= (uint32_t)(3.33 * 1000)))
3726 return true;
3727 else if ((uint32_t)(nvm_ver * 1000) >= (uint32_t)(6.01 * 1000))
3728 return true;
3729
3730 return false;
3731}
3732
3733static int
3734i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3735{
3736 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3737 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3738 struct i40e_vsi *vsi = pf->main_vsi;
3739 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3740
3741 dev_info->max_rx_queues = vsi->nb_qps;
3742 dev_info->max_tx_queues = vsi->nb_qps;
3743 dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
3744 dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
3745 dev_info->max_mac_addrs = vsi->max_macaddrs;
3746 dev_info->max_vfs = pci_dev->max_vfs;
3747 dev_info->max_mtu = dev_info->max_rx_pktlen - I40E_ETH_OVERHEAD;
3748 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3749 dev_info->rx_queue_offload_capa = 0;
3750 dev_info->rx_offload_capa =
3751 DEV_RX_OFFLOAD_VLAN_STRIP |
3752 DEV_RX_OFFLOAD_QINQ_STRIP |
3753 DEV_RX_OFFLOAD_IPV4_CKSUM |
3754 DEV_RX_OFFLOAD_UDP_CKSUM |
3755 DEV_RX_OFFLOAD_TCP_CKSUM |
3756 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3757 DEV_RX_OFFLOAD_KEEP_CRC |
3758 DEV_RX_OFFLOAD_SCATTER |
3759 DEV_RX_OFFLOAD_VLAN_EXTEND |
3760 DEV_RX_OFFLOAD_VLAN_FILTER |
3761 DEV_RX_OFFLOAD_JUMBO_FRAME |
3762 DEV_RX_OFFLOAD_RSS_HASH;
3763
3764 dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
3765 dev_info->tx_offload_capa =
3766 DEV_TX_OFFLOAD_VLAN_INSERT |
3767 DEV_TX_OFFLOAD_QINQ_INSERT |
3768 DEV_TX_OFFLOAD_IPV4_CKSUM |
3769 DEV_TX_OFFLOAD_UDP_CKSUM |
3770 DEV_TX_OFFLOAD_TCP_CKSUM |
3771 DEV_TX_OFFLOAD_SCTP_CKSUM |
3772 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3773 DEV_TX_OFFLOAD_TCP_TSO |
3774 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
3775 DEV_TX_OFFLOAD_GRE_TNL_TSO |
3776 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
3777 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
3778 DEV_TX_OFFLOAD_MULTI_SEGS |
3779 dev_info->tx_queue_offload_capa;
3780 dev_info->dev_capa =
3781 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
3782 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
3783
3784 dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
3785 sizeof(uint32_t);
3786 dev_info->reta_size = pf->hash_lut_size;
3787 dev_info->flow_type_rss_offloads = pf->adapter->flow_types_mask;
3788
3789 dev_info->default_rxconf = (struct rte_eth_rxconf) {
3790 .rx_thresh = {
3791 .pthresh = I40E_DEFAULT_RX_PTHRESH,
3792 .hthresh = I40E_DEFAULT_RX_HTHRESH,
3793 .wthresh = I40E_DEFAULT_RX_WTHRESH,
3794 },
3795 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
3796 .rx_drop_en = 0,
3797 .offloads = 0,
3798 };
3799
3800 dev_info->default_txconf = (struct rte_eth_txconf) {
3801 .tx_thresh = {
3802 .pthresh = I40E_DEFAULT_TX_PTHRESH,
3803 .hthresh = I40E_DEFAULT_TX_HTHRESH,
3804 .wthresh = I40E_DEFAULT_TX_WTHRESH,
3805 },
3806 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
3807 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
3808 .offloads = 0,
3809 };
3810
3811 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3812 .nb_max = I40E_MAX_RING_DESC,
3813 .nb_min = I40E_MIN_RING_DESC,
3814 .nb_align = I40E_ALIGN_RING_DESC,
3815 };
3816
3817 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3818 .nb_max = I40E_MAX_RING_DESC,
3819 .nb_min = I40E_MIN_RING_DESC,
3820 .nb_align = I40E_ALIGN_RING_DESC,
3821 .nb_seg_max = I40E_TX_MAX_SEG,
3822 .nb_mtu_seg_max = I40E_TX_MAX_MTU_SEG,
3823 };
3824
3825 if (pf->flags & I40E_FLAG_VMDQ) {
3826 dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
3827 dev_info->vmdq_queue_base = dev_info->max_rx_queues;
3828 dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
3829 pf->max_nb_vmdq_vsi;
3830 dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
3831 dev_info->max_rx_queues += dev_info->vmdq_queue_num;
3832 dev_info->max_tx_queues += dev_info->vmdq_queue_num;
3833 }
3834
3835 if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
3836
3837 dev_info->speed_capa = ETH_LINK_SPEED_40G;
3838 dev_info->default_rxportconf.nb_queues = 2;
3839 dev_info->default_txportconf.nb_queues = 2;
3840 if (dev->data->nb_rx_queues == 1)
3841 dev_info->default_rxportconf.ring_size = 2048;
3842 else
3843 dev_info->default_rxportconf.ring_size = 1024;
3844 if (dev->data->nb_tx_queues == 1)
3845 dev_info->default_txportconf.ring_size = 1024;
3846 else
3847 dev_info->default_txportconf.ring_size = 512;
3848
3849 } else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) {
3850
3851 dev_info->speed_capa = ETH_LINK_SPEED_25G;
3852 dev_info->default_rxportconf.nb_queues = 1;
3853 dev_info->default_txportconf.nb_queues = 1;
3854 dev_info->default_rxportconf.ring_size = 256;
3855 dev_info->default_txportconf.ring_size = 256;
3856 } else {
3857
3858 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3859 dev_info->default_rxportconf.nb_queues = 1;
3860 dev_info->default_txportconf.nb_queues = 1;
3861 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) {
3862 dev_info->default_rxportconf.ring_size = 512;
3863 dev_info->default_txportconf.ring_size = 256;
3864 } else {
3865 dev_info->default_rxportconf.ring_size = 256;
3866 dev_info->default_txportconf.ring_size = 256;
3867 }
3868 }
3869 dev_info->default_rxportconf.burst_size = 32;
3870 dev_info->default_txportconf.burst_size = 32;
3871
3872 return 0;
3873}
3874
3875static int
3876i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3877{
3878 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3879 struct i40e_vsi *vsi = pf->main_vsi;
3880 PMD_INIT_FUNC_TRACE();
3881
3882 if (on)
3883 return i40e_vsi_add_vlan(vsi, vlan_id);
3884 else
3885 return i40e_vsi_delete_vlan(vsi, vlan_id);
3886}
3887
3888static int
3889i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
3890 enum rte_vlan_type vlan_type,
3891 uint16_t tpid, int qinq)
3892{
3893 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3894 uint64_t reg_r = 0;
3895 uint64_t reg_w = 0;
3896 uint16_t reg_id = 3;
3897 int ret;
3898
3899 if (qinq) {
3900 if (vlan_type == ETH_VLAN_TYPE_OUTER)
3901 reg_id = 2;
3902 }
3903
3904 ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3905 ®_r, NULL);
3906 if (ret != I40E_SUCCESS) {
3907 PMD_DRV_LOG(ERR,
3908 "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]",
3909 reg_id);
3910 return -EIO;
3911 }
3912 PMD_DRV_LOG(DEBUG,
3913 "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
3914 reg_id, reg_r);
3915
3916 reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
3917 reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
3918 if (reg_r == reg_w) {
3919 PMD_DRV_LOG(DEBUG, "No need to write");
3920 return 0;
3921 }
3922
3923 ret = i40e_aq_debug_write_global_register(hw,
3924 I40E_GL_SWT_L2TAGCTRL(reg_id),
3925 reg_w, NULL);
3926 if (ret != I40E_SUCCESS) {
3927 PMD_DRV_LOG(ERR,
3928 "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
3929 reg_id);
3930 return -EIO;
3931 }
3932 PMD_DRV_LOG(DEBUG,
3933 "Global register 0x%08x is changed with value 0x%08x",
3934 I40E_GL_SWT_L2TAGCTRL(reg_id), (uint32_t)reg_w);
3935
3936 return 0;
3937}
3938
3939static int
3940i40e_vlan_tpid_set(struct rte_eth_dev *dev,
3941 enum rte_vlan_type vlan_type,
3942 uint16_t tpid)
3943{
3944 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3945 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3946 int qinq = dev->data->dev_conf.rxmode.offloads &
3947 DEV_RX_OFFLOAD_VLAN_EXTEND;
3948 int ret = 0;
3949
3950 if ((vlan_type != ETH_VLAN_TYPE_INNER &&
3951 vlan_type != ETH_VLAN_TYPE_OUTER) ||
3952 (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
3953 PMD_DRV_LOG(ERR,
3954 "Unsupported vlan type.");
3955 return -EINVAL;
3956 }
3957
3958 if (pf->support_multi_driver) {
3959 PMD_DRV_LOG(ERR, "Setting TPID is not supported.");
3960 return -ENOTSUP;
3961 }
3962
3963
3964 if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
3965 if (qinq) {
3966 if (vlan_type == ETH_VLAN_TYPE_OUTER)
3967 hw->first_tag = rte_cpu_to_le_16(tpid);
3968 else if (vlan_type == ETH_VLAN_TYPE_INNER)
3969 hw->second_tag = rte_cpu_to_le_16(tpid);
3970 } else {
3971 if (vlan_type == ETH_VLAN_TYPE_OUTER)
3972 hw->second_tag = rte_cpu_to_le_16(tpid);
3973 }
3974 ret = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
3975 if (ret != I40E_SUCCESS) {
3976 PMD_DRV_LOG(ERR,
3977 "Set switch config failed aq_err: %d",
3978 hw->aq.asq_last_status);
3979 ret = -EIO;
3980 }
3981 } else
3982
3983 ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type,
3984 tpid, qinq);
3985
3986 return ret;
3987}
3988
3989
3990static int
3991i40e_vsi_config_outer_vlan_stripping(struct i40e_vsi *vsi, bool on)
3992{
3993 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3994 int ret = I40E_SUCCESS;
3995 uint32_t reg;
3996
3997 if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
3998 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
3999 return -EINVAL;
4000 }
4001
4002
4003 reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
4004
4005 if (on)
4006 reg |= I40E_VSI_TSR_QINQ_STRIP;
4007 else
4008 reg &= ~I40E_VSI_TSR_QINQ_STRIP;
4009
4010 ret = i40e_aq_debug_write_register(hw,
4011 I40E_VSI_TSR(vsi->vsi_id),
4012 reg, NULL);
4013 if (ret < 0) {
4014 PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
4015 vsi->vsi_id);
4016 return I40E_ERR_CONFIG;
4017 }
4018
4019 return ret;
4020}
4021
4022static int
4023i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
4024{
4025 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4026 struct i40e_vsi *vsi = pf->main_vsi;
4027 struct rte_eth_rxmode *rxmode;
4028
4029 rxmode = &dev->data->dev_conf.rxmode;
4030 if (mask & ETH_VLAN_FILTER_MASK) {
4031 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
4032 i40e_vsi_config_vlan_filter(vsi, TRUE);
4033 else
4034 i40e_vsi_config_vlan_filter(vsi, FALSE);
4035 }
4036
4037 if (mask & ETH_VLAN_STRIP_MASK) {
4038
4039 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
4040 i40e_vsi_config_vlan_stripping(vsi, TRUE);
4041 else
4042 i40e_vsi_config_vlan_stripping(vsi, FALSE);
4043 }
4044
4045 if (mask & ETH_VLAN_EXTEND_MASK) {
4046 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
4047 i40e_vsi_config_double_vlan(vsi, TRUE);
4048
4049 i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
4050 RTE_ETHER_TYPE_VLAN);
4051 i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
4052 RTE_ETHER_TYPE_VLAN);
4053 }
4054 else
4055 i40e_vsi_config_double_vlan(vsi, FALSE);
4056 }
4057
4058 if (mask & ETH_QINQ_STRIP_MASK) {
4059
4060 if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
4061 i40e_vsi_config_outer_vlan_stripping(vsi, TRUE);
4062 else
4063 i40e_vsi_config_outer_vlan_stripping(vsi, FALSE);
4064 }
4065
4066 return 0;
4067}
4068
4069static void
4070i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
4071 __rte_unused uint16_t queue,
4072 __rte_unused int on)
4073{
4074 PMD_INIT_FUNC_TRACE();
4075}
4076
4077static int
4078i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
4079{
4080 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4081 struct i40e_vsi *vsi = pf->main_vsi;
4082 struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
4083 struct i40e_vsi_vlan_pvid_info info;
4084
4085 memset(&info, 0, sizeof(info));
4086 info.on = on;
4087 if (info.on)
4088 info.config.pvid = pvid;
4089 else {
4090 info.config.reject.tagged =
4091 data->dev_conf.txmode.hw_vlan_reject_tagged;
4092 info.config.reject.untagged =
4093 data->dev_conf.txmode.hw_vlan_reject_untagged;
4094 }
4095
4096 return i40e_vsi_vlan_pvid_set(vsi, &info);
4097}
4098
4099static int
4100i40e_dev_led_on(struct rte_eth_dev *dev)
4101{
4102 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4103 uint32_t mode = i40e_led_get(hw);
4104
4105 if (mode == 0)
4106 i40e_led_set(hw, 0xf, true);
4107
4108 return 0;
4109}
4110
4111static int
4112i40e_dev_led_off(struct rte_eth_dev *dev)
4113{
4114 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4115 uint32_t mode = i40e_led_get(hw);
4116
4117 if (mode != 0)
4118 i40e_led_set(hw, 0, false);
4119
4120 return 0;
4121}
4122
4123static int
4124i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4125{
4126 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4127 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4128
4129 fc_conf->pause_time = pf->fc_conf.pause_time;
4130
4131
4132 pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] =
4133 I40E_READ_REG(hw, I40E_GLRPB_GHW) >> I40E_KILOSHIFT;
4134 pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] =
4135 I40E_READ_REG(hw, I40E_GLRPB_GLW) >> I40E_KILOSHIFT;
4136
4137 fc_conf->high_water = pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
4138 fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
4139
4140
4141 switch (hw->fc.current_mode) {
4142 case I40E_FC_FULL:
4143 fc_conf->mode = RTE_FC_FULL;
4144 break;
4145 case I40E_FC_TX_PAUSE:
4146 fc_conf->mode = RTE_FC_TX_PAUSE;
4147 break;
4148 case I40E_FC_RX_PAUSE:
4149 fc_conf->mode = RTE_FC_RX_PAUSE;
4150 break;
4151 case I40E_FC_NONE:
4152 default:
4153 fc_conf->mode = RTE_FC_NONE;
4154 };
4155
4156 return 0;
4157}
4158
4159static int
4160i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4161{
4162 uint32_t mflcn_reg, fctrl_reg, reg;
4163 uint32_t max_high_water;
4164 uint8_t i, aq_failure;
4165 int err;
4166 struct i40e_hw *hw;
4167 struct i40e_pf *pf;
4168 enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
4169 [RTE_FC_NONE] = I40E_FC_NONE,
4170 [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
4171 [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
4172 [RTE_FC_FULL] = I40E_FC_FULL
4173 };
4174
4175
4176
4177 max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
4178 if ((fc_conf->high_water > max_high_water) ||
4179 (fc_conf->high_water < fc_conf->low_water)) {
4180 PMD_INIT_LOG(ERR,
4181 "Invalid high/low water setup value in KB, High_water must be <= %d.",
4182 max_high_water);
4183 return -EINVAL;
4184 }
4185
4186 hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4187 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4188 hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
4189
4190 pf->fc_conf.pause_time = fc_conf->pause_time;
4191 pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
4192 pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
4193
4194 PMD_INIT_FUNC_TRACE();
4195
4196
4197
4198
4199 err = i40e_set_fc(hw, &aq_failure, true);
4200 if (err < 0)
4201 return -ENOSYS;
4202
4203 if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
4204
4205
4206
4207
4208
4209 I40E_WRITE_REG(hw,
4210 I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
4211 pf->fc_conf.pause_time);
4212
4213
4214
4215
4216
4217
4218 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
4219 pf->fc_conf.pause_time);
4220
4221 fctrl_reg = I40E_READ_REG(hw,
4222 I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
4223
4224 if (fc_conf->mac_ctrl_frame_fwd != 0)
4225 fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
4226 else
4227 fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
4228
4229 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
4230 fctrl_reg);
4231 } else {
4232
4233 reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
4234 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
4235 I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
4236
4237
4238 I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
4239 pf->fc_conf.pause_time / 2);
4240
4241 mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
4242
4243
4244
4245
4246 if (fc_conf->mac_ctrl_frame_fwd != 0) {
4247 mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
4248 mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
4249 } else {
4250 mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
4251 mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
4252 }
4253
4254 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
4255 }
4256
4257 if (!pf->support_multi_driver) {
4258
4259 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PHW,
4260 (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
4261 << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
4262 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PLW,
4263 (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
4264 << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
4265 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GHW,
4266 pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
4267 << I40E_KILOSHIFT);
4268 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GLW,
4269 pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
4270 << I40E_KILOSHIFT);
4271 } else {
4272 PMD_DRV_LOG(ERR,
4273 "Water marker configuration is not supported.");
4274 }
4275
4276 I40E_WRITE_FLUSH(hw);
4277
4278 return 0;
4279}
4280
4281static int
4282i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
4283 __rte_unused struct rte_eth_pfc_conf *pfc_conf)
4284{
4285 PMD_INIT_FUNC_TRACE();
4286
4287 return -ENOSYS;
4288}
4289
4290
4291static int
4292i40e_macaddr_add(struct rte_eth_dev *dev,
4293 struct rte_ether_addr *mac_addr,
4294 __rte_unused uint32_t index,
4295 uint32_t pool)
4296{
4297 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4298 struct i40e_mac_filter_info mac_filter;
4299 struct i40e_vsi *vsi;
4300 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
4301 int ret;
4302
4303
4304 if (pool != 0 && (!(pf->flags & I40E_FLAG_VMDQ) ||
4305 !pf->nb_cfg_vmdq_vsi)) {
4306 PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
4307 pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled",
4308 pool);
4309 return -ENOTSUP;
4310 }
4311
4312 if (pool > pf->nb_cfg_vmdq_vsi) {
4313 PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
4314 pool, pf->nb_cfg_vmdq_vsi);
4315 return -EINVAL;
4316 }
4317
4318 rte_memcpy(&mac_filter.mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
4319 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
4320 mac_filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
4321 else
4322 mac_filter.filter_type = I40E_MAC_PERFECT_MATCH;
4323
4324 if (pool == 0)
4325 vsi = pf->main_vsi;
4326 else
4327 vsi = pf->vmdq[pool - 1].vsi;
4328
4329 ret = i40e_vsi_add_mac(vsi, &mac_filter);
4330 if (ret != I40E_SUCCESS) {
4331 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
4332 return -ENODEV;
4333 }
4334 return 0;
4335}
4336
4337
4338static void
4339i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
4340{
4341 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4342 struct i40e_vsi *vsi;
4343 struct rte_eth_dev_data *data = dev->data;
4344 struct rte_ether_addr *macaddr;
4345 int ret;
4346 uint32_t i;
4347 uint64_t pool_sel;
4348
4349 macaddr = &(data->mac_addrs[index]);
4350
4351 pool_sel = dev->data->mac_pool_sel[index];
4352
4353 for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
4354 if (pool_sel & (1ULL << i)) {
4355 if (i == 0)
4356 vsi = pf->main_vsi;
4357 else {
4358
4359 if (!(pf->flags & I40E_FLAG_VMDQ) ||
4360 (i > pf->nb_cfg_vmdq_vsi)) {
4361 PMD_DRV_LOG(ERR,
4362 "No VMDQ pool enabled/configured");
4363 return;
4364 }
4365 vsi = pf->vmdq[i - 1].vsi;
4366 }
4367 ret = i40e_vsi_delete_mac(vsi, macaddr);
4368
4369 if (ret) {
4370 PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
4371 return;
4372 }
4373 }
4374 }
4375}
4376
4377static int
4378i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4379{
4380 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
4381 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4382 uint32_t reg;
4383 int ret;
4384
4385 if (!lut)
4386 return -EINVAL;
4387
4388 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
4389 ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id,
4390 vsi->type != I40E_VSI_SRIOV,
4391 lut, lut_size);
4392 if (ret) {
4393 PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
4394 return ret;
4395 }
4396 } else {
4397 uint32_t *lut_dw = (uint32_t *)lut;
4398 uint16_t i, lut_size_dw = lut_size / 4;
4399
4400 if (vsi->type == I40E_VSI_SRIOV) {
4401 for (i = 0; i <= lut_size_dw; i++) {
4402 reg = I40E_VFQF_HLUT1(i, vsi->user_param);
4403 lut_dw[i] = i40e_read_rx_ctl(hw, reg);
4404 }
4405 } else {
4406 for (i = 0; i < lut_size_dw; i++)
4407 lut_dw[i] = I40E_READ_REG(hw,
4408 I40E_PFQF_HLUT(i));
4409 }
4410 }
4411
4412 return 0;
4413}
4414
4415int
4416i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4417{
4418 struct i40e_pf *pf;
4419 struct i40e_hw *hw;
4420
4421 if (!vsi || !lut)
4422 return -EINVAL;
4423
4424 pf = I40E_VSI_TO_PF(vsi);
4425 hw = I40E_VSI_TO_HW(vsi);
4426
4427 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
4428 enum i40e_status_code status;
4429
4430 status = i40e_aq_set_rss_lut(hw, vsi->vsi_id,
4431 vsi->type != I40E_VSI_SRIOV,
4432 lut, lut_size);
4433 if (status) {
4434 PMD_DRV_LOG(ERR,
4435 "Failed to update RSS lookup table, error status: %d",
4436 status);
4437 return -EIO;
4438 }
4439 } else {
4440 uint32_t *lut_dw = (uint32_t *)lut;
4441 uint16_t i, lut_size_dw = lut_size / 4;
4442
4443 if (vsi->type == I40E_VSI_SRIOV) {
4444 for (i = 0; i < lut_size_dw; i++)
4445 I40E_WRITE_REG(
4446 hw,
4447 I40E_VFQF_HLUT1(i, vsi->user_param),
4448 lut_dw[i]);
4449 } else {
4450 for (i = 0; i < lut_size_dw; i++)
4451 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i),
4452 lut_dw[i]);
4453 }
4454 I40E_WRITE_FLUSH(hw);
4455 }
4456
4457 return 0;
4458}
4459
4460static int
4461i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
4462 struct rte_eth_rss_reta_entry64 *reta_conf,
4463 uint16_t reta_size)
4464{
4465 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4466 uint16_t i, lut_size = pf->hash_lut_size;
4467 uint16_t idx, shift;
4468 uint8_t *lut;
4469 int ret;
4470
4471 if (reta_size != lut_size ||
4472 reta_size > ETH_RSS_RETA_SIZE_512) {
4473 PMD_DRV_LOG(ERR,
4474 "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4475 reta_size, lut_size);
4476 return -EINVAL;
4477 }
4478
4479 lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4480 if (!lut) {
4481 PMD_DRV_LOG(ERR, "No memory can be allocated");
4482 return -ENOMEM;
4483 }
4484 ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4485 if (ret)
4486 goto out;
4487 for (i = 0; i < reta_size; i++) {
4488 idx = i / RTE_RETA_GROUP_SIZE;
4489 shift = i % RTE_RETA_GROUP_SIZE;
4490 if (reta_conf[idx].mask & (1ULL << shift))
4491 lut[i] = reta_conf[idx].reta[shift];
4492 }
4493 ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
4494
4495 pf->adapter->rss_reta_updated = 1;
4496
4497out:
4498 rte_free(lut);
4499
4500 return ret;
4501}
4502
4503static int
4504i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
4505 struct rte_eth_rss_reta_entry64 *reta_conf,
4506 uint16_t reta_size)
4507{
4508 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4509 uint16_t i, lut_size = pf->hash_lut_size;
4510 uint16_t idx, shift;
4511 uint8_t *lut;
4512 int ret;
4513
4514 if (reta_size != lut_size ||
4515 reta_size > ETH_RSS_RETA_SIZE_512) {
4516 PMD_DRV_LOG(ERR,
4517 "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4518 reta_size, lut_size);
4519 return -EINVAL;
4520 }
4521
4522 lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4523 if (!lut) {
4524 PMD_DRV_LOG(ERR, "No memory can be allocated");
4525 return -ENOMEM;
4526 }
4527
4528 ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4529 if (ret)
4530 goto out;
4531 for (i = 0; i < reta_size; i++) {
4532 idx = i / RTE_RETA_GROUP_SIZE;
4533 shift = i % RTE_RETA_GROUP_SIZE;
4534 if (reta_conf[idx].mask & (1ULL << shift))
4535 reta_conf[idx].reta[shift] = lut[i];
4536 }
4537
4538out:
4539 rte_free(lut);
4540
4541 return ret;
4542}
4543
4544
4545
4546
4547
4548
4549
4550
4551enum i40e_status_code
4552i40e_allocate_dma_mem_d(__rte_unused struct i40e_hw *hw,
4553 struct i40e_dma_mem *mem,
4554 u64 size,
4555 u32 alignment)
4556{
4557 static uint64_t i40e_dma_memzone_id;
4558 const struct rte_memzone *mz = NULL;
4559 char z_name[RTE_MEMZONE_NAMESIZE];
4560
4561 if (!mem)
4562 return I40E_ERR_PARAM;
4563
4564 snprintf(z_name, sizeof(z_name), "i40e_dma_%" PRIu64,
4565 __atomic_fetch_add(&i40e_dma_memzone_id, 1, __ATOMIC_RELAXED));
4566 mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
4567 RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
4568 if (!mz)
4569 return I40E_ERR_NO_MEMORY;
4570
4571 mem->size = size;
4572 mem->va = mz->addr;
4573 mem->pa = mz->iova;
4574 mem->zone = (const void *)mz;
4575 PMD_DRV_LOG(DEBUG,
4576 "memzone %s allocated with physical address: %"PRIu64,
4577 mz->name, mem->pa);
4578
4579 return I40E_SUCCESS;
4580}
4581
4582
4583
4584
4585
4586
4587enum i40e_status_code
4588i40e_free_dma_mem_d(__rte_unused struct i40e_hw *hw,
4589 struct i40e_dma_mem *mem)
4590{
4591 if (!mem)
4592 return I40E_ERR_PARAM;
4593
4594 PMD_DRV_LOG(DEBUG,
4595 "memzone %s to be freed with physical address: %"PRIu64,
4596 ((const struct rte_memzone *)mem->zone)->name, mem->pa);
4597 rte_memzone_free((const struct rte_memzone *)mem->zone);
4598 mem->zone = NULL;
4599 mem->va = NULL;
4600 mem->pa = (u64)0;
4601
4602 return I40E_SUCCESS;
4603}
4604
4605
4606
4607
4608
4609
4610
4611enum i40e_status_code
4612i40e_allocate_virt_mem_d(__rte_unused struct i40e_hw *hw,
4613 struct i40e_virt_mem *mem,
4614 u32 size)
4615{
4616 if (!mem)
4617 return I40E_ERR_PARAM;
4618
4619 mem->size = size;
4620 mem->va = rte_zmalloc("i40e", size, 0);
4621
4622 if (mem->va)
4623 return I40E_SUCCESS;
4624 else
4625 return I40E_ERR_NO_MEMORY;
4626}
4627
4628
4629
4630
4631
4632
4633enum i40e_status_code
4634i40e_free_virt_mem_d(__rte_unused struct i40e_hw *hw,
4635 struct i40e_virt_mem *mem)
4636{
4637 if (!mem)
4638 return I40E_ERR_PARAM;
4639
4640 rte_free(mem->va);
4641 mem->va = NULL;
4642
4643 return I40E_SUCCESS;
4644}
4645
4646void
4647i40e_init_spinlock_d(struct i40e_spinlock *sp)
4648{
4649 rte_spinlock_init(&sp->spinlock);
4650}
4651
4652void
4653i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
4654{
4655 rte_spinlock_lock(&sp->spinlock);
4656}
4657
4658void
4659i40e_release_spinlock_d(struct i40e_spinlock *sp)
4660{
4661 rte_spinlock_unlock(&sp->spinlock);
4662}
4663
4664void
4665i40e_destroy_spinlock_d(__rte_unused struct i40e_spinlock *sp)
4666{
4667 return;
4668}
4669
4670
4671
4672
4673
4674static int
4675i40e_get_cap(struct i40e_hw *hw)
4676{
4677 struct i40e_aqc_list_capabilities_element_resp *buf;
4678 uint16_t len, size = 0;
4679 int ret;
4680
4681
4682 len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
4683 I40E_MAX_CAP_ELE_NUM;
4684 buf = rte_zmalloc("i40e", len, 0);
4685 if (!buf) {
4686 PMD_DRV_LOG(ERR, "Failed to allocate memory");
4687 return I40E_ERR_NO_MEMORY;
4688 }
4689
4690
4691 ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
4692 i40e_aqc_opc_list_func_capabilities, NULL);
4693 if (ret != I40E_SUCCESS)
4694 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
4695
4696
4697 rte_free(buf);
4698
4699 return ret;
4700}
4701
4702#define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF 4
4703
4704static int i40e_pf_parse_vf_queue_number_handler(const char *key,
4705 const char *value,
4706 void *opaque)
4707{
4708 struct i40e_pf *pf;
4709 unsigned long num;
4710 char *end;
4711
4712 pf = (struct i40e_pf *)opaque;
4713 RTE_SET_USED(key);
4714
4715 errno = 0;
4716 num = strtoul(value, &end, 0);
4717 if (errno != 0 || end == value || *end != 0) {
4718 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %s, Now it is "
4719 "kept the value = %hu", value, pf->vf_nb_qp_max);
4720 return -(EINVAL);
4721 }
4722
4723 if (num <= I40E_MAX_QP_NUM_PER_VF && rte_is_power_of_2(num))
4724 pf->vf_nb_qp_max = (uint16_t)num;
4725 else
4726
4727 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %lu, it must be "
4728 "power of 2 and equal or less than 16 !, Now it is "
4729 "kept the value = %hu", num, pf->vf_nb_qp_max);
4730
4731 return 0;
4732}
4733
4734static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev)
4735{
4736 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4737 struct rte_kvargs *kvlist;
4738 int kvargs_count;
4739
4740
4741 pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
4742
4743 if (dev->device->devargs == NULL)
4744 return 0;
4745
4746 kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
4747 if (kvlist == NULL)
4748 return -(EINVAL);
4749
4750 kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG);
4751 if (!kvargs_count) {
4752 rte_kvargs_free(kvlist);
4753 return 0;
4754 }
4755
4756 if (kvargs_count > 1)
4757 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
4758 "the first invalid or last valid one is used !",
4759 ETH_I40E_QUEUE_NUM_PER_VF_ARG);
4760
4761 rte_kvargs_process(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG,
4762 i40e_pf_parse_vf_queue_number_handler, pf);
4763
4764 rte_kvargs_free(kvlist);
4765
4766 return 0;
4767}
4768
4769static int
4770i40e_pf_parameter_init(struct rte_eth_dev *dev)
4771{
4772 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4773 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4774 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4775 uint16_t qp_count = 0, vsi_count = 0;
4776
4777 if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
4778 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
4779 return -EINVAL;
4780 }
4781
4782 i40e_pf_config_vf_rxq_number(dev);
4783
4784
4785 pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
4786 pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
4787 pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
4788
4789 pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
4790 pf->max_num_vsi = hw->func_caps.num_vsis;
4791 pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
4792 pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
4793
4794
4795 pf->fdir_qp_offset = 0;
4796 if (hw->func_caps.fd) {
4797 pf->flags |= I40E_FLAG_FDIR;
4798 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
4799 } else {
4800 pf->fdir_nb_qps = 0;
4801 }
4802 qp_count += pf->fdir_nb_qps;
4803 vsi_count += 1;
4804
4805
4806 pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
4807 if (!hw->func_caps.rss) {
4808 pf->lan_nb_qps = 1;
4809 } else {
4810 pf->flags |= I40E_FLAG_RSS;
4811 if (hw->mac.type == I40E_MAC_X722)
4812 pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
4813 pf->lan_nb_qps = pf->lan_nb_qp_max;
4814 }
4815 qp_count += pf->lan_nb_qps;
4816 vsi_count += 1;
4817
4818
4819 pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
4820 if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
4821 pf->flags |= I40E_FLAG_SRIOV;
4822 pf->vf_nb_qps = pf->vf_nb_qp_max;
4823 pf->vf_num = pci_dev->max_vfs;
4824 PMD_DRV_LOG(DEBUG,
4825 "%u VF VSIs, %u queues per VF VSI, in total %u queues",
4826 pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num);
4827 } else {
4828 pf->vf_nb_qps = 0;
4829 pf->vf_num = 0;
4830 }
4831 qp_count += pf->vf_nb_qps * pf->vf_num;
4832 vsi_count += pf->vf_num;
4833
4834
4835 pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
4836 pf->vmdq_nb_qps = 0;
4837 pf->max_nb_vmdq_vsi = 0;
4838 if (hw->func_caps.vmdq) {
4839 if (qp_count < hw->func_caps.num_tx_qp &&
4840 vsi_count < hw->func_caps.num_vsis) {
4841 pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
4842 qp_count) / pf->vmdq_nb_qp_max;
4843
4844
4845
4846
4847 pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4848 hw->func_caps.num_vsis - vsi_count);
4849 pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4850 ETH_64_POOLS);
4851 if (pf->max_nb_vmdq_vsi) {
4852 pf->flags |= I40E_FLAG_VMDQ;
4853 pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
4854 PMD_DRV_LOG(DEBUG,
4855 "%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues",
4856 pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps,
4857 pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
4858 } else {
4859 PMD_DRV_LOG(INFO,
4860 "No enough queues left for VMDq");
4861 }
4862 } else {
4863 PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
4864 }
4865 }
4866 qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
4867 vsi_count += pf->max_nb_vmdq_vsi;
4868
4869 if (hw->func_caps.dcb)
4870 pf->flags |= I40E_FLAG_DCB;
4871
4872 if (qp_count > hw->func_caps.num_tx_qp) {
4873 PMD_DRV_LOG(ERR,
4874 "Failed to allocate %u queues, which exceeds the hardware maximum %u",
4875 qp_count, hw->func_caps.num_tx_qp);
4876 return -EINVAL;
4877 }
4878 if (vsi_count > hw->func_caps.num_vsis) {
4879 PMD_DRV_LOG(ERR,
4880 "Failed to allocate %u VSIs, which exceeds the hardware maximum %u",
4881 vsi_count, hw->func_caps.num_vsis);
4882 return -EINVAL;
4883 }
4884
4885 return 0;
4886}
4887
4888static int
4889i40e_pf_get_switch_config(struct i40e_pf *pf)
4890{
4891 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4892 struct i40e_aqc_get_switch_config_resp *switch_config;
4893 struct i40e_aqc_switch_config_element_resp *element;
4894 uint16_t start_seid = 0, num_reported;
4895 int ret;
4896
4897 switch_config = (struct i40e_aqc_get_switch_config_resp *)\
4898 rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
4899 if (!switch_config) {
4900 PMD_DRV_LOG(ERR, "Failed to allocated memory");
4901 return -ENOMEM;
4902 }
4903
4904
4905 ret = i40e_aq_get_switch_config(hw, switch_config,
4906 I40E_AQ_LARGE_BUF, &start_seid, NULL);
4907 if (ret != I40E_SUCCESS) {
4908 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
4909 goto fail;
4910 }
4911 num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
4912 if (num_reported != 1) {
4913 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
4914 goto fail;
4915 }
4916
4917
4918 element = &(switch_config->element[0]);
4919 if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
4920 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
4921 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
4922 } else
4923 PMD_DRV_LOG(INFO, "Unknown element type");
4924
4925fail:
4926 rte_free(switch_config);
4927
4928 return ret;
4929}
4930
4931static int
4932i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
4933 uint32_t num)
4934{
4935 struct pool_entry *entry;
4936
4937 if (pool == NULL || num == 0)
4938 return -EINVAL;
4939
4940 entry = rte_zmalloc("i40e", sizeof(*entry), 0);
4941 if (entry == NULL) {
4942 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
4943 return -ENOMEM;
4944 }
4945
4946
4947 pool->num_free = num;
4948 pool->num_alloc = 0;
4949 pool->base = base;
4950 LIST_INIT(&pool->alloc_list);
4951 LIST_INIT(&pool->free_list);
4952
4953
4954 entry->base = 0;
4955 entry->len = num;
4956
4957 LIST_INSERT_HEAD(&pool->free_list, entry, next);
4958 return 0;
4959}
4960
4961static void
4962i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
4963{
4964 struct pool_entry *entry, *next_entry;
4965
4966 if (pool == NULL)
4967 return;
4968
4969 for (entry = LIST_FIRST(&pool->alloc_list);
4970 entry && (next_entry = LIST_NEXT(entry, next), 1);
4971 entry = next_entry) {
4972 LIST_REMOVE(entry, next);
4973 rte_free(entry);
4974 }
4975
4976 for (entry = LIST_FIRST(&pool->free_list);
4977 entry && (next_entry = LIST_NEXT(entry, next), 1);
4978 entry = next_entry) {
4979 LIST_REMOVE(entry, next);
4980 rte_free(entry);
4981 }
4982
4983 pool->num_free = 0;
4984 pool->num_alloc = 0;
4985 pool->base = 0;
4986 LIST_INIT(&pool->alloc_list);
4987 LIST_INIT(&pool->free_list);
4988}
4989
4990static int
4991i40e_res_pool_free(struct i40e_res_pool_info *pool,
4992 uint32_t base)
4993{
4994 struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
4995 uint32_t pool_offset;
4996 uint16_t len;
4997 int insert;
4998
4999 if (pool == NULL) {
5000 PMD_DRV_LOG(ERR, "Invalid parameter");
5001 return -EINVAL;
5002 }
5003
5004 pool_offset = base - pool->base;
5005
5006 LIST_FOREACH(entry, &pool->alloc_list, next) {
5007 if (entry->base == pool_offset) {
5008 valid_entry = entry;
5009 LIST_REMOVE(entry, next);
5010 break;
5011 }
5012 }
5013
5014
5015 if (valid_entry == NULL) {
5016 PMD_DRV_LOG(ERR, "Failed to find entry");
5017 return -EINVAL;
5018 }
5019
5020
5021
5022
5023
5024
5025 prev = next = NULL;
5026 LIST_FOREACH(entry, &pool->free_list, next) {
5027 if (entry->base > valid_entry->base) {
5028 next = entry;
5029 break;
5030 }
5031 prev = entry;
5032 }
5033
5034 insert = 0;
5035 len = valid_entry->len;
5036
5037 if (next != NULL) {
5038
5039 if (valid_entry->base + len == next->base) {
5040 next->base = valid_entry->base;
5041 next->len += len;
5042 rte_free(valid_entry);
5043 valid_entry = next;
5044 insert = 1;
5045 }
5046 }
5047
5048 if (prev != NULL) {
5049
5050 if (prev->base + prev->len == valid_entry->base) {
5051 prev->len += len;
5052
5053 if (insert == 1) {
5054 LIST_REMOVE(valid_entry, next);
5055 rte_free(valid_entry);
5056 valid_entry = NULL;
5057 } else {
5058 rte_free(valid_entry);
5059 valid_entry = NULL;
5060 insert = 1;
5061 }
5062 }
5063 }
5064
5065
5066 if (insert == 0) {
5067 if (prev != NULL)
5068 LIST_INSERT_AFTER(prev, valid_entry, next);
5069 else if (next != NULL)
5070 LIST_INSERT_BEFORE(next, valid_entry, next);
5071 else
5072 LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
5073 }
5074
5075 pool->num_free += len;
5076 pool->num_alloc -= len;
5077
5078 return 0;
5079}
5080
5081static int
5082i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
5083 uint16_t num)
5084{
5085 struct pool_entry *entry, *valid_entry;
5086
5087 if (pool == NULL || num == 0) {
5088 PMD_DRV_LOG(ERR, "Invalid parameter");
5089 return -EINVAL;
5090 }
5091
5092 if (pool->num_free < num) {
5093 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
5094 num, pool->num_free);
5095 return -ENOMEM;
5096 }
5097
5098 valid_entry = NULL;
5099
5100 LIST_FOREACH(entry, &pool->free_list, next) {
5101 if (entry->len >= num) {
5102
5103 if (entry->len == num) {
5104 valid_entry = entry;
5105 break;
5106 }
5107 if (valid_entry == NULL || valid_entry->len > entry->len)
5108 valid_entry = entry;
5109 }
5110 }
5111
5112
5113 if (valid_entry == NULL) {
5114 PMD_DRV_LOG(ERR, "No valid entry found");
5115 return -ENOMEM;
5116 }
5117
5118
5119
5120
5121 if (valid_entry->len == num) {
5122 LIST_REMOVE(valid_entry, next);
5123 } else {
5124
5125
5126
5127
5128
5129 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
5130 if (entry == NULL) {
5131 PMD_DRV_LOG(ERR,
5132 "Failed to allocate memory for resource pool");
5133 return -ENOMEM;
5134 }
5135 entry->base = valid_entry->base;
5136 entry->len = num;
5137 valid_entry->base += num;
5138 valid_entry->len -= num;
5139 valid_entry = entry;
5140 }
5141
5142
5143 LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
5144
5145 pool->num_free -= valid_entry->len;
5146 pool->num_alloc += valid_entry->len;
5147
5148 return valid_entry->base + pool->base;
5149}
5150
5151
5152
5153
5154static inline int
5155bitmap_is_subset(uint8_t src1, uint8_t src2)
5156{
5157 return !((src1 ^ src2) & src2);
5158}
5159
5160static enum i40e_status_code
5161validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
5162{
5163 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5164
5165
5166 if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
5167 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
5168 return I40E_NOT_SUPPORTED;
5169 }
5170
5171 if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
5172 PMD_DRV_LOG(ERR,
5173 "Enabled TC map 0x%x not applicable to HW support 0x%x",
5174 hw->func_caps.enabled_tcmap, enabled_tcmap);
5175 return I40E_NOT_SUPPORTED;
5176 }
5177 return I40E_SUCCESS;
5178}
5179
5180int
5181i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
5182 struct i40e_vsi_vlan_pvid_info *info)
5183{
5184 struct i40e_hw *hw;
5185 struct i40e_vsi_context ctxt;
5186 uint8_t vlan_flags = 0;
5187 int ret;
5188
5189 if (vsi == NULL || info == NULL) {
5190 PMD_DRV_LOG(ERR, "invalid parameters");
5191 return I40E_ERR_PARAM;
5192 }
5193
5194 if (info->on) {
5195 vsi->info.pvid = info->config.pvid;
5196
5197
5198
5199
5200 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
5201 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
5202 } else {
5203 vsi->info.pvid = 0;
5204 if (info->config.reject.tagged == 0)
5205 vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
5206
5207 if (info->config.reject.untagged == 0)
5208 vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
5209 }
5210 vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
5211 I40E_AQ_VSI_PVLAN_MODE_MASK);
5212 vsi->info.port_vlan_flags |= vlan_flags;
5213 vsi->info.valid_sections =
5214 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5215 memset(&ctxt, 0, sizeof(ctxt));
5216 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
5217 ctxt.seid = vsi->seid;
5218
5219 hw = I40E_VSI_TO_HW(vsi);
5220 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5221 if (ret != I40E_SUCCESS)
5222 PMD_DRV_LOG(ERR, "Failed to update VSI params");
5223
5224 return ret;
5225}
5226
5227static int
5228i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
5229{
5230 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5231 int i, ret;
5232 struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
5233
5234 ret = validate_tcmap_parameter(vsi, enabled_tcmap);
5235 if (ret != I40E_SUCCESS)
5236 return ret;
5237
5238 if (!vsi->seid) {
5239 PMD_DRV_LOG(ERR, "seid not valid");
5240 return -EINVAL;
5241 }
5242
5243 memset(&tc_bw_data, 0, sizeof(tc_bw_data));
5244 tc_bw_data.tc_valid_bits = enabled_tcmap;
5245 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5246 tc_bw_data.tc_bw_credits[i] =
5247 (enabled_tcmap & (1 << i)) ? 1 : 0;
5248
5249 ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
5250 if (ret != I40E_SUCCESS) {
5251 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
5252 return ret;
5253 }
5254
5255 rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
5256 sizeof(vsi->info.qs_handle));
5257 return I40E_SUCCESS;
5258}
5259
5260static enum i40e_status_code
5261i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
5262 struct i40e_aqc_vsi_properties_data *info,
5263 uint8_t enabled_tcmap)
5264{
5265 enum i40e_status_code ret;
5266 int i, total_tc = 0;
5267 uint16_t qpnum_per_tc, bsf, qp_idx;
5268
5269 ret = validate_tcmap_parameter(vsi, enabled_tcmap);
5270 if (ret != I40E_SUCCESS)
5271 return ret;
5272
5273 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5274 if (enabled_tcmap & (1 << i))
5275 total_tc++;
5276 if (total_tc == 0)
5277 total_tc = 1;
5278 vsi->enabled_tc = enabled_tcmap;
5279
5280
5281 qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
5282 qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
5283 bsf = rte_bsf32(qpnum_per_tc);
5284
5285
5286 if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
5287 vsi->nb_qps = qpnum_per_tc * total_tc;
5288
5289
5290
5291
5292
5293
5294 qp_idx = 0;
5295 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5296 if (vsi->enabled_tc & (1 << i)) {
5297 info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
5298 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5299 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
5300 qp_idx += qpnum_per_tc;
5301 } else
5302 info->tc_mapping[i] = 0;
5303 }
5304
5305
5306 if (vsi->type == I40E_VSI_SRIOV) {
5307 info->mapping_flags |=
5308 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
5309 for (i = 0; i < vsi->nb_qps; i++)
5310 info->queue_mapping[i] =
5311 rte_cpu_to_le_16(vsi->base_queue + i);
5312 } else {
5313 info->mapping_flags |=
5314 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
5315 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
5316 }
5317 info->valid_sections |=
5318 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
5319
5320 return I40E_SUCCESS;
5321}
5322
5323static int
5324i40e_veb_release(struct i40e_veb *veb)
5325{
5326 struct i40e_vsi *vsi;
5327 struct i40e_hw *hw;
5328
5329 if (veb == NULL)
5330 return -EINVAL;
5331
5332 if (!TAILQ_EMPTY(&veb->head)) {
5333 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
5334 return -EACCES;
5335 }
5336
5337 if (veb->associate_vsi != NULL) {
5338 vsi = veb->associate_vsi;
5339 hw = I40E_VSI_TO_HW(vsi);
5340
5341 vsi->uplink_seid = veb->uplink_seid;
5342 vsi->veb = NULL;
5343 } else {
5344 veb->associate_pf->main_vsi->floating_veb = NULL;
5345 hw = I40E_VSI_TO_HW(veb->associate_pf->main_vsi);
5346 }
5347
5348 i40e_aq_delete_element(hw, veb->seid, NULL);
5349 rte_free(veb);
5350 return I40E_SUCCESS;
5351}
5352
5353
5354static struct i40e_veb *
5355i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
5356{
5357 struct i40e_veb *veb;
5358 int ret;
5359 struct i40e_hw *hw;
5360
5361 if (pf == NULL) {
5362 PMD_DRV_LOG(ERR,
5363 "veb setup failed, associated PF shouldn't null");
5364 return NULL;
5365 }
5366 hw = I40E_PF_TO_HW(pf);
5367
5368 veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
5369 if (!veb) {
5370 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
5371 goto fail;
5372 }
5373
5374 veb->associate_vsi = vsi;
5375 veb->associate_pf = pf;
5376 TAILQ_INIT(&veb->head);
5377 veb->uplink_seid = vsi ? vsi->uplink_seid : 0;
5378
5379
5380 if (vsi != NULL) {
5381 ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
5382 I40E_DEFAULT_TCMAP, false,
5383 &veb->seid, false, NULL);
5384 } else {
5385 ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
5386 true, &veb->seid, false, NULL);
5387 }
5388
5389 if (ret != I40E_SUCCESS) {
5390 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
5391 hw->aq.asq_last_status);
5392 goto fail;
5393 }
5394 veb->enabled_tc = I40E_DEFAULT_TCMAP;
5395
5396
5397 ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
5398 &veb->stats_idx, NULL, NULL, NULL);
5399 if (ret != I40E_SUCCESS) {
5400 PMD_DRV_LOG(ERR, "Get veb statistics index failed, aq_err: %d",
5401 hw->aq.asq_last_status);
5402 goto fail;
5403 }
5404
5405
5406 if (vsi)
5407 vsi->uplink_seid = veb->seid;
5408
5409 return veb;
5410fail:
5411 rte_free(veb);
5412 return NULL;
5413}
5414
5415int
5416i40e_vsi_release(struct i40e_vsi *vsi)
5417{
5418 struct i40e_pf *pf;
5419 struct i40e_hw *hw;
5420 struct i40e_vsi_list *vsi_list;
5421 void *temp;
5422 int ret;
5423 struct i40e_mac_filter *f;
5424 uint16_t user_param;
5425
5426 if (!vsi)
5427 return I40E_SUCCESS;
5428
5429 if (!vsi->adapter)
5430 return -EFAULT;
5431
5432 user_param = vsi->user_param;
5433
5434 pf = I40E_VSI_TO_PF(vsi);
5435 hw = I40E_VSI_TO_HW(vsi);
5436
5437
5438 if (vsi->veb) {
5439 TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) {
5440 if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5441 return -1;
5442 }
5443 i40e_veb_release(vsi->veb);
5444 }
5445
5446 if (vsi->floating_veb) {
5447 TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head, list, temp) {
5448 if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5449 return -1;
5450 }
5451 }
5452
5453
5454 i40e_vsi_remove_all_macvlan_filter(vsi);
5455 TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
5456 rte_free(f);
5457
5458 if (vsi->type != I40E_VSI_MAIN &&
5459 ((vsi->type != I40E_VSI_SRIOV) ||
5460 !pf->floating_veb_list[user_param])) {
5461
5462 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
5463 PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5464 return I40E_ERR_PARAM;
5465 }
5466 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
5467 &vsi->sib_vsi_list, list);
5468
5469
5470 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5471 if (ret != I40E_SUCCESS)
5472 PMD_DRV_LOG(ERR, "Failed to delete element");
5473 }
5474
5475 if ((vsi->type == I40E_VSI_SRIOV) &&
5476 pf->floating_veb_list[user_param]) {
5477
5478 if (vsi->parent_vsi == NULL ||
5479 vsi->parent_vsi->floating_veb == NULL) {
5480 PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5481 return I40E_ERR_PARAM;
5482 }
5483 TAILQ_REMOVE(&vsi->parent_vsi->floating_veb->head,
5484 &vsi->sib_vsi_list, list);
5485
5486
5487 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5488 if (ret != I40E_SUCCESS)
5489 PMD_DRV_LOG(ERR, "Failed to delete element");
5490 }
5491
5492 i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
5493
5494 if (vsi->type != I40E_VSI_SRIOV)
5495 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
5496 rte_free(vsi);
5497
5498 return I40E_SUCCESS;
5499}
5500
5501static int
5502i40e_update_default_filter_setting(struct i40e_vsi *vsi)
5503{
5504 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5505 struct i40e_aqc_remove_macvlan_element_data def_filter;
5506 struct i40e_mac_filter_info filter;
5507 int ret;
5508
5509 if (vsi->type != I40E_VSI_MAIN)
5510 return I40E_ERR_CONFIG;
5511 memset(&def_filter, 0, sizeof(def_filter));
5512 rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
5513 ETH_ADDR_LEN);
5514 def_filter.vlan_tag = 0;
5515 def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
5516 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
5517 ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
5518 if (ret != I40E_SUCCESS) {
5519 struct i40e_mac_filter *f;
5520 struct rte_ether_addr *mac;
5521
5522 PMD_DRV_LOG(DEBUG,
5523 "Cannot remove the default macvlan filter");
5524
5525 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
5526 if (f == NULL) {
5527 PMD_DRV_LOG(ERR, "failed to allocate memory");
5528 return I40E_ERR_NO_MEMORY;
5529 }
5530 mac = &f->mac_info.mac_addr;
5531 rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
5532 ETH_ADDR_LEN);
5533 f->mac_info.filter_type = I40E_MACVLAN_PERFECT_MATCH;
5534 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
5535 vsi->mac_num++;
5536
5537 return ret;
5538 }
5539 rte_memcpy(&filter.mac_addr,
5540 (struct rte_ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
5541 filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
5542 return i40e_vsi_add_mac(vsi, &filter);
5543}
5544
5545
5546
5547
5548
5549
5550
5551static enum i40e_status_code
5552i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
5553{
5554 struct i40e_aqc_query_vsi_bw_config_resp bw_config;
5555 struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
5556 struct i40e_hw *hw = &vsi->adapter->hw;
5557 i40e_status ret;
5558 int i;
5559 uint32_t bw_max;
5560
5561 memset(&bw_config, 0, sizeof(bw_config));
5562 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5563 if (ret != I40E_SUCCESS) {
5564 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
5565 hw->aq.asq_last_status);
5566 return ret;
5567 }
5568
5569 memset(&ets_sla_config, 0, sizeof(ets_sla_config));
5570 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
5571 &ets_sla_config, NULL);
5572 if (ret != I40E_SUCCESS) {
5573 PMD_DRV_LOG(ERR,
5574 "VSI failed to get TC bandwdith configuration %u",
5575 hw->aq.asq_last_status);
5576 return ret;
5577 }
5578
5579
5580 vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
5581 vsi->bw_info.bw_max = bw_config.max_bw;
5582 PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit);
5583 PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max);
5584 bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) |
5585 (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) <<
5586 I40E_16_BIT_WIDTH);
5587 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5588 vsi->bw_info.bw_ets_share_credits[i] =
5589 ets_sla_config.share_credits[i];
5590 vsi->bw_info.bw_ets_credits[i] =
5591 rte_le_to_cpu_16(ets_sla_config.credits[i]);
5592
5593 vsi->bw_info.bw_ets_max[i] =
5594 (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
5595 RTE_LEN2MASK(3, uint8_t));
5596 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i,
5597 vsi->bw_info.bw_ets_share_credits[i]);
5598 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i,
5599 vsi->bw_info.bw_ets_credits[i]);
5600 PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i,
5601 vsi->bw_info.bw_ets_max[i]);
5602 }
5603
5604 return I40E_SUCCESS;
5605}
5606
5607
5608
5609
5610
5611
5612static inline void
5613i40e_enable_pf_lb(struct i40e_pf *pf)
5614{
5615 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5616 struct i40e_vsi_context ctxt;
5617 int ret;
5618
5619
5620 if (hw->aq.fw_maj_ver < 5 && hw->mac.type != I40E_MAC_X722) {
5621 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
5622 return;
5623 }
5624
5625 memset(&ctxt, 0, sizeof(ctxt));
5626 ctxt.seid = pf->main_vsi_seid;
5627 ctxt.pf_num = hw->pf_id;
5628 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5629 if (ret) {
5630 PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d",
5631 ret, hw->aq.asq_last_status);
5632 return;
5633 }
5634 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5635 ctxt.info.valid_sections =
5636 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5637 ctxt.info.switch_id |=
5638 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5639
5640 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5641 if (ret)
5642 PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d",
5643 hw->aq.asq_last_status);
5644}
5645
5646
5647struct i40e_vsi *
5648i40e_vsi_setup(struct i40e_pf *pf,
5649 enum i40e_vsi_type type,
5650 struct i40e_vsi *uplink_vsi,
5651 uint16_t user_param)
5652{
5653 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5654 struct i40e_vsi *vsi;
5655 struct i40e_mac_filter_info filter;
5656 int ret;
5657 struct i40e_vsi_context ctxt;
5658 struct rte_ether_addr broadcast =
5659 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
5660
5661 if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
5662 uplink_vsi == NULL) {
5663 PMD_DRV_LOG(ERR,
5664 "VSI setup failed, VSI link shouldn't be NULL");
5665 return NULL;
5666 }
5667
5668 if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
5669 PMD_DRV_LOG(ERR,
5670 "VSI setup failed, MAIN VSI uplink VSI should be NULL");
5671 return NULL;
5672 }
5673
5674
5675
5676
5677
5678
5679
5680
5681 if (type != I40E_VSI_MAIN && uplink_vsi != NULL &&
5682 uplink_vsi->veb == NULL) {
5683 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
5684
5685 if (uplink_vsi->veb == NULL) {
5686 PMD_DRV_LOG(ERR, "VEB setup failed");
5687 return NULL;
5688 }
5689
5690 i40e_enable_pf_lb(pf);
5691 }
5692
5693 if (type == I40E_VSI_SRIOV && uplink_vsi == NULL &&
5694 pf->main_vsi->floating_veb == NULL) {
5695 pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi);
5696
5697 if (pf->main_vsi->floating_veb == NULL) {
5698 PMD_DRV_LOG(ERR, "VEB setup failed");
5699 return NULL;
5700 }
5701 }
5702
5703 vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
5704 if (!vsi) {
5705 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
5706 return NULL;
5707 }
5708 TAILQ_INIT(&vsi->mac_list);
5709 vsi->type = type;
5710 vsi->adapter = I40E_PF_TO_ADAPTER(pf);
5711 vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
5712 vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
5713 vsi->user_param = user_param;
5714 vsi->vlan_anti_spoof_on = 0;
5715 vsi->vlan_filter_on = 0;
5716
5717 switch (vsi->type) {
5718 case I40E_VSI_MAIN :
5719 vsi->nb_qps = pf->lan_nb_qps;
5720 break;
5721 case I40E_VSI_SRIOV :
5722 vsi->nb_qps = pf->vf_nb_qps;
5723 break;
5724 case I40E_VSI_VMDQ2:
5725 vsi->nb_qps = pf->vmdq_nb_qps;
5726 break;
5727 case I40E_VSI_FDIR:
5728 vsi->nb_qps = pf->fdir_nb_qps;
5729 break;
5730 default:
5731 goto fail_mem;
5732 }
5733
5734
5735
5736
5737
5738
5739
5740
5741 if (type != I40E_VSI_FDIR) {
5742 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
5743 if (ret < 0) {
5744 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
5745 vsi->seid, ret);
5746 goto fail_mem;
5747 }
5748 vsi->base_queue = ret;
5749 } else
5750 vsi->base_queue = I40E_FDIR_QUEUE_ID;
5751
5752
5753 if (type == I40E_VSI_MAIN) {
5754 if (pf->support_multi_driver) {
5755
5756
5757
5758
5759
5760 vsi->msix_intr = 0;
5761 vsi->nb_msix = 1;
5762 } else {
5763 ret = i40e_res_pool_alloc(&pf->msix_pool,
5764 RTE_MIN(vsi->nb_qps,
5765 RTE_MAX_RXTX_INTR_VEC_ID));
5766 if (ret < 0) {
5767 PMD_DRV_LOG(ERR,
5768 "VSI MAIN %d get heap failed %d",
5769 vsi->seid, ret);
5770 goto fail_queue_alloc;
5771 }
5772 vsi->msix_intr = ret;
5773 vsi->nb_msix = RTE_MIN(vsi->nb_qps,
5774 RTE_MAX_RXTX_INTR_VEC_ID);
5775 }
5776 } else if (type != I40E_VSI_SRIOV) {
5777 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
5778 if (ret < 0) {
5779 PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
5780 if (type != I40E_VSI_FDIR)
5781 goto fail_queue_alloc;
5782 vsi->msix_intr = 0;
5783 vsi->nb_msix = 0;
5784 } else {
5785 vsi->msix_intr = ret;
5786 vsi->nb_msix = 1;
5787 }
5788 } else {
5789 vsi->msix_intr = 0;
5790 vsi->nb_msix = 0;
5791 }
5792
5793
5794 if (type == I40E_VSI_MAIN) {
5795
5796 vsi->uplink_seid = pf->mac_seid;
5797 vsi->seid = pf->main_vsi_seid;
5798
5799
5800
5801
5802
5803
5804
5805
5806 memset(&ctxt, 0, sizeof(ctxt));
5807 ctxt.seid = vsi->seid;
5808 ctxt.pf_num = hw->pf_id;
5809 ctxt.uplink_seid = vsi->uplink_seid;
5810 ctxt.vf_num = 0;
5811 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5812 if (ret != I40E_SUCCESS) {
5813 PMD_DRV_LOG(ERR, "Failed to get VSI params");
5814 goto fail_msix_alloc;
5815 }
5816 rte_memcpy(&vsi->info, &ctxt.info,
5817 sizeof(struct i40e_aqc_vsi_properties_data));
5818 vsi->vsi_id = ctxt.vsi_number;
5819 vsi->info.valid_sections = 0;
5820
5821
5822 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
5823 I40E_SUCCESS) {
5824 PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
5825 goto fail_msix_alloc;
5826 }
5827
5828
5829 memset(&ctxt, 0, sizeof(ctxt));
5830 vsi->info.valid_sections |=
5831 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5832 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5833 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5834 rte_memcpy(&ctxt.info, &vsi->info,
5835 sizeof(struct i40e_aqc_vsi_properties_data));
5836 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5837 I40E_DEFAULT_TCMAP);
5838 if (ret != I40E_SUCCESS) {
5839 PMD_DRV_LOG(ERR,
5840 "Failed to configure TC queue mapping");
5841 goto fail_msix_alloc;
5842 }
5843 ctxt.seid = vsi->seid;
5844 ctxt.pf_num = hw->pf_id;
5845 ctxt.uplink_seid = vsi->uplink_seid;
5846 ctxt.vf_num = 0;
5847
5848
5849 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5850 if (ret != I40E_SUCCESS) {
5851 PMD_DRV_LOG(ERR, "Failed to update VSI params");
5852 goto fail_msix_alloc;
5853 }
5854
5855 rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
5856 sizeof(vsi->info.tc_mapping));
5857 rte_memcpy(&vsi->info.queue_mapping,
5858 &ctxt.info.queue_mapping,
5859 sizeof(vsi->info.queue_mapping));
5860 vsi->info.mapping_flags = ctxt.info.mapping_flags;
5861 vsi->info.valid_sections = 0;
5862
5863 rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
5864 ETH_ADDR_LEN);
5865
5866
5867
5868
5869
5870
5871
5872
5873
5874
5875
5876 i40e_update_default_filter_setting(vsi);
5877 i40e_config_qinq(hw, vsi);
5878 } else if (type == I40E_VSI_SRIOV) {
5879 memset(&ctxt, 0, sizeof(ctxt));
5880
5881
5882
5883
5884 if (uplink_vsi == NULL)
5885 vsi->uplink_seid = pf->main_vsi->floating_veb->seid;
5886 else
5887 vsi->uplink_seid = uplink_vsi->uplink_seid;
5888 ctxt.pf_num = hw->pf_id;
5889 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
5890 ctxt.uplink_seid = vsi->uplink_seid;
5891 ctxt.connection_type = 0x1;
5892 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5893
5894
5895 if (hw->aq.fw_maj_ver >= 5 || hw->mac.type == I40E_MAC_X722) {
5896
5897 ctxt.info.valid_sections |=
5898 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5899 ctxt.info.switch_id =
5900 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5901 }
5902
5903
5904 ctxt.info.valid_sections |=
5905 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5906 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5907 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5908 hw->func_caps.enabled_tcmap);
5909 if (ret != I40E_SUCCESS) {
5910 PMD_DRV_LOG(ERR,
5911 "Failed to configure TC queue mapping");
5912 goto fail_msix_alloc;
5913 }
5914
5915 ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap;
5916 ctxt.info.valid_sections |=
5917 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5918
5919
5920
5921
5922
5923 i40e_config_qinq(hw, vsi);
5924 } else if (type == I40E_VSI_VMDQ2) {
5925 memset(&ctxt, 0, sizeof(ctxt));
5926
5927
5928
5929
5930 vsi->uplink_seid = uplink_vsi->uplink_seid;
5931 ctxt.pf_num = hw->pf_id;
5932 ctxt.vf_num = 0;
5933 ctxt.uplink_seid = vsi->uplink_seid;
5934 ctxt.connection_type = 0x1;
5935 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5936
5937 ctxt.info.valid_sections |=
5938 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5939
5940 if (user_param) {
5941 ctxt.info.switch_id =
5942 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
5943 ctxt.info.switch_id |=
5944 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5945 }
5946
5947
5948 ctxt.info.valid_sections |=
5949 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5950 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5951 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5952 I40E_DEFAULT_TCMAP);
5953 if (ret != I40E_SUCCESS) {
5954 PMD_DRV_LOG(ERR,
5955 "Failed to configure TC queue mapping");
5956 goto fail_msix_alloc;
5957 }
5958 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5959 ctxt.info.valid_sections |=
5960 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5961 } else if (type == I40E_VSI_FDIR) {
5962 memset(&ctxt, 0, sizeof(ctxt));
5963 vsi->uplink_seid = uplink_vsi->uplink_seid;
5964 ctxt.pf_num = hw->pf_id;
5965 ctxt.vf_num = 0;
5966 ctxt.uplink_seid = vsi->uplink_seid;
5967 ctxt.connection_type = 0x1;
5968 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5969 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5970 I40E_DEFAULT_TCMAP);
5971 if (ret != I40E_SUCCESS) {
5972 PMD_DRV_LOG(ERR,
5973 "Failed to configure TC queue mapping.");
5974 goto fail_msix_alloc;
5975 }
5976 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5977 ctxt.info.valid_sections |=
5978 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5979 } else {
5980 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
5981 goto fail_msix_alloc;
5982 }
5983
5984 if (vsi->type != I40E_VSI_MAIN) {
5985 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5986 if (ret != I40E_SUCCESS) {
5987 PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
5988 hw->aq.asq_last_status);
5989 goto fail_msix_alloc;
5990 }
5991 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
5992 vsi->info.valid_sections = 0;
5993 vsi->seid = ctxt.seid;
5994 vsi->vsi_id = ctxt.vsi_number;
5995 vsi->sib_vsi_list.vsi = vsi;
5996 if (vsi->type == I40E_VSI_SRIOV && uplink_vsi == NULL) {
5997 TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head,
5998 &vsi->sib_vsi_list, list);
5999 } else {
6000 TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
6001 &vsi->sib_vsi_list, list);
6002 }
6003 }
6004
6005
6006 rte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);
6007 filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
6008
6009 ret = i40e_vsi_add_mac(vsi, &filter);
6010 if (ret != I40E_SUCCESS) {
6011 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
6012 goto fail_msix_alloc;
6013 }
6014
6015
6016 i40e_vsi_get_bw_config(vsi);
6017 return vsi;
6018fail_msix_alloc:
6019 i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
6020fail_queue_alloc:
6021 i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
6022fail_mem:
6023 rte_free(vsi);
6024 return NULL;
6025}
6026
6027
6028int
6029i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on)
6030{
6031 int i, num;
6032 struct i40e_mac_filter *f;
6033 void *temp;
6034 struct i40e_mac_filter_info *mac_filter;
6035 enum i40e_mac_filter_type desired_filter;
6036 int ret = I40E_SUCCESS;
6037
6038 if (on) {
6039
6040 desired_filter = I40E_MACVLAN_PERFECT_MATCH;
6041 } else {
6042
6043 desired_filter = I40E_MAC_PERFECT_MATCH;
6044 }
6045
6046 num = vsi->mac_num;
6047
6048 mac_filter = rte_zmalloc("mac_filter_info_data",
6049 num * sizeof(*mac_filter), 0);
6050 if (mac_filter == NULL) {
6051 PMD_DRV_LOG(ERR, "failed to allocate memory");
6052 return I40E_ERR_NO_MEMORY;
6053 }
6054
6055 i = 0;
6056
6057
6058 TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
6059 mac_filter[i] = f->mac_info;
6060 ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
6061 if (ret) {
6062 PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
6063 on ? "enable" : "disable");
6064 goto DONE;
6065 }
6066 i++;
6067 }
6068
6069
6070 for (i = 0; i < num; i++) {
6071 mac_filter[i].filter_type = desired_filter;
6072 ret = i40e_vsi_add_mac(vsi, &mac_filter[i]);
6073 if (ret) {
6074 PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
6075 on ? "enable" : "disable");
6076 goto DONE;
6077 }
6078 }
6079
6080DONE:
6081 rte_free(mac_filter);
6082 return ret;
6083}
6084
6085
6086int
6087i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
6088{
6089 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6090 struct i40e_vsi_context ctxt;
6091 uint8_t vlan_flags;
6092 int ret = I40E_SUCCESS;
6093
6094
6095 if (vsi->info.valid_sections &
6096 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
6097 if (on) {
6098 if ((vsi->info.port_vlan_flags &
6099 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
6100 return 0;
6101 } else {
6102 if ((vsi->info.port_vlan_flags &
6103 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
6104 I40E_AQ_VSI_PVLAN_EMOD_MASK)
6105 return 0;
6106 }
6107 }
6108
6109 if (on)
6110 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
6111 else
6112 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
6113 vsi->info.valid_sections =
6114 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
6115 vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
6116 vsi->info.port_vlan_flags |= vlan_flags;
6117 ctxt.seid = vsi->seid;
6118 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
6119 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
6120 if (ret)
6121 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
6122 on ? "enable" : "disable");
6123
6124 return ret;
6125}
6126
6127static int
6128i40e_dev_init_vlan(struct rte_eth_dev *dev)
6129{
6130 struct rte_eth_dev_data *data = dev->data;
6131 int ret;
6132 int mask = 0;
6133
6134
6135 mask = ETH_VLAN_STRIP_MASK |
6136 ETH_QINQ_STRIP_MASK |
6137 ETH_VLAN_FILTER_MASK |
6138 ETH_VLAN_EXTEND_MASK;
6139 ret = i40e_vlan_offload_set(dev, mask);
6140 if (ret) {
6141 PMD_DRV_LOG(INFO, "Failed to update vlan offload");
6142 return ret;
6143 }
6144
6145
6146 ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
6147 data->dev_conf.txmode.hw_vlan_insert_pvid);
6148 if (ret)
6149 PMD_DRV_LOG(INFO, "Failed to update VSI params");
6150
6151 return ret;
6152}
6153
6154static int
6155i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
6156{
6157 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6158
6159 return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
6160}
6161
6162static int
6163i40e_update_flow_control(struct i40e_hw *hw)
6164{
6165#define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
6166 struct i40e_link_status link_status;
6167 uint32_t rxfc = 0, txfc = 0, reg;
6168 uint8_t an_info;
6169 int ret;
6170
6171 memset(&link_status, 0, sizeof(link_status));
6172 ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
6173 if (ret != I40E_SUCCESS) {
6174 PMD_DRV_LOG(ERR, "Failed to get link status information");
6175 goto write_reg;
6176 }
6177
6178 an_info = hw->phy.link_info.an_info;
6179 if (!(an_info & I40E_AQ_AN_COMPLETED)) {
6180 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
6181 ret = I40E_ERR_NOT_READY;
6182 goto write_reg;
6183 }
6184
6185
6186
6187
6188 switch (an_info & I40E_LINK_PAUSE_RXTX) {
6189 case I40E_LINK_PAUSE_RXTX:
6190 rxfc = 1;
6191 txfc = 1;
6192 hw->fc.current_mode = I40E_FC_FULL;
6193 break;
6194 case I40E_AQ_LINK_PAUSE_RX:
6195 rxfc = 1;
6196 hw->fc.current_mode = I40E_FC_RX_PAUSE;
6197 break;
6198 case I40E_AQ_LINK_PAUSE_TX:
6199 txfc = 1;
6200 hw->fc.current_mode = I40E_FC_TX_PAUSE;
6201 break;
6202 default:
6203 hw->fc.current_mode = I40E_FC_NONE;
6204 break;
6205 }
6206
6207write_reg:
6208 I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
6209 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
6210 reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
6211 reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
6212 reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
6213 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
6214
6215 return ret;
6216}
6217
6218
6219static int
6220i40e_pf_setup(struct i40e_pf *pf)
6221{
6222 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6223 struct i40e_filter_control_settings settings;
6224 struct i40e_vsi *vsi;
6225 int ret;
6226
6227
6228 pf->offset_loaded = FALSE;
6229 memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
6230 memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
6231 memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats));
6232 memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats));
6233
6234 ret = i40e_pf_get_switch_config(pf);
6235 if (ret != I40E_SUCCESS) {
6236 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
6237 return ret;
6238 }
6239
6240 ret = rte_eth_switch_domain_alloc(&pf->switch_domain_id);
6241 if (ret)
6242 PMD_INIT_LOG(WARNING,
6243 "failed to allocate switch domain for device %d", ret);
6244
6245 if (pf->flags & I40E_FLAG_FDIR) {
6246
6247 ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
6248 if (ret != I40E_FDIR_QUEUE_ID) {
6249 PMD_DRV_LOG(ERR,
6250 "queue allocation fails for FDIR: ret =%d",
6251 ret);
6252 pf->flags &= ~I40E_FLAG_FDIR;
6253 }
6254 }
6255
6256 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
6257 if (!vsi) {
6258 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
6259 return I40E_ERR_NOT_READY;
6260 }
6261 pf->main_vsi = vsi;
6262
6263
6264 memset(&settings, 0, sizeof(settings));
6265 if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
6266 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
6267 else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
6268 settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
6269 else {
6270 PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
6271 hw->func_caps.rss_table_size);
6272 return I40E_ERR_PARAM;
6273 }
6274 PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u",
6275 hw->func_caps.rss_table_size);
6276 pf->hash_lut_size = hw->func_caps.rss_table_size;
6277
6278
6279 settings.enable_ethtype = TRUE;
6280 settings.enable_macvlan = TRUE;
6281 ret = i40e_set_filter_control(hw, &settings);
6282 if (ret)
6283 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
6284 ret);
6285
6286
6287 i40e_update_flow_control(hw);
6288
6289 return I40E_SUCCESS;
6290}
6291
6292int
6293i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
6294{
6295 uint32_t reg;
6296 uint16_t j;
6297
6298
6299
6300
6301
6302 i40e_pre_tx_queue_cfg(hw, q_idx, on);
6303 rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
6304
6305
6306 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6307 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6308 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
6309 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
6310 ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
6311 & 0x1))) {
6312 break;
6313 }
6314 }
6315 if (on) {
6316 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
6317 return I40E_SUCCESS;
6318
6319 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
6320 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
6321 } else {
6322 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
6323 return I40E_SUCCESS;
6324 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
6325 }
6326
6327 I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
6328
6329 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6330 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6331 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
6332 if (on) {
6333 if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
6334 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
6335 break;
6336 } else {
6337 if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
6338 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
6339 break;
6340 }
6341 }
6342
6343 if (j >= I40E_CHK_Q_ENA_COUNT) {
6344 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
6345 (on ? "enable" : "disable"), q_idx);
6346 return I40E_ERR_TIMEOUT;
6347 }
6348
6349 return I40E_SUCCESS;
6350}
6351
6352int
6353i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
6354{
6355 uint32_t reg;
6356 uint16_t j;
6357
6358
6359 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6360 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6361 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
6362 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
6363 ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
6364 break;
6365 }
6366
6367 if (on) {
6368 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
6369 return I40E_SUCCESS;
6370 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
6371 } else {
6372 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6373 return I40E_SUCCESS;
6374 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
6375 }
6376
6377
6378 I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
6379
6380 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6381 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6382 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
6383 if (on) {
6384 if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6385 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
6386 break;
6387 } else {
6388 if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6389 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6390 break;
6391 }
6392 }
6393
6394
6395 if (j >= I40E_CHK_Q_ENA_COUNT) {
6396 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
6397 (on ? "enable" : "disable"), q_idx);
6398 return I40E_ERR_TIMEOUT;
6399 }
6400
6401 return I40E_SUCCESS;
6402}
6403
6404
6405static int
6406i40e_dev_tx_init(struct i40e_pf *pf)
6407{
6408 struct rte_eth_dev_data *data = pf->dev_data;
6409 uint16_t i;
6410 uint32_t ret = I40E_SUCCESS;
6411 struct i40e_tx_queue *txq;
6412
6413 for (i = 0; i < data->nb_tx_queues; i++) {
6414 txq = data->tx_queues[i];
6415 if (!txq || !txq->q_set)
6416 continue;
6417 ret = i40e_tx_queue_init(txq);
6418 if (ret != I40E_SUCCESS)
6419 break;
6420 }
6421 if (ret == I40E_SUCCESS)
6422 i40e_set_tx_function(&rte_eth_devices[pf->dev_data->port_id]);
6423
6424 return ret;
6425}
6426
6427
6428static int
6429i40e_dev_rx_init(struct i40e_pf *pf)
6430{
6431 struct rte_eth_dev_data *data = pf->dev_data;
6432 int ret = I40E_SUCCESS;
6433 uint16_t i;
6434 struct i40e_rx_queue *rxq;
6435
6436 i40e_pf_config_rss(pf);
6437 for (i = 0; i < data->nb_rx_queues; i++) {
6438 rxq = data->rx_queues[i];
6439 if (!rxq || !rxq->q_set)
6440 continue;
6441
6442 ret = i40e_rx_queue_init(rxq);
6443 if (ret != I40E_SUCCESS) {
6444 PMD_DRV_LOG(ERR,
6445 "Failed to do RX queue initialization");
6446 break;
6447 }
6448 }
6449 if (ret == I40E_SUCCESS)
6450 i40e_set_rx_function(&rte_eth_devices[pf->dev_data->port_id]);
6451
6452 return ret;
6453}
6454
6455static int
6456i40e_dev_rxtx_init(struct i40e_pf *pf)
6457{
6458 int err;
6459
6460 err = i40e_dev_tx_init(pf);
6461 if (err) {
6462 PMD_DRV_LOG(ERR, "Failed to do TX initialization");
6463 return err;
6464 }
6465 err = i40e_dev_rx_init(pf);
6466 if (err) {
6467 PMD_DRV_LOG(ERR, "Failed to do RX initialization");
6468 return err;
6469 }
6470
6471 return err;
6472}
6473
6474static int
6475i40e_vmdq_setup(struct rte_eth_dev *dev)
6476{
6477 struct rte_eth_conf *conf = &dev->data->dev_conf;
6478 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6479 int i, err, conf_vsis, j, loop;
6480 struct i40e_vsi *vsi;
6481 struct i40e_vmdq_info *vmdq_info;
6482 struct rte_eth_vmdq_rx_conf *vmdq_conf;
6483 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6484
6485
6486
6487
6488
6489 i40e_pf_disable_irq0(hw);
6490
6491 if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
6492 PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
6493 return -ENOTSUP;
6494 }
6495
6496 conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
6497 if (conf_vsis > pf->max_nb_vmdq_vsi) {
6498 PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
6499 conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
6500 pf->max_nb_vmdq_vsi);
6501 return -ENOTSUP;
6502 }
6503
6504 if (pf->vmdq != NULL) {
6505 PMD_INIT_LOG(INFO, "VMDQ already configured");
6506 return 0;
6507 }
6508
6509 pf->vmdq = rte_zmalloc("vmdq_info_struct",
6510 sizeof(*vmdq_info) * conf_vsis, 0);
6511
6512 if (pf->vmdq == NULL) {
6513 PMD_INIT_LOG(ERR, "Failed to allocate memory");
6514 return -ENOMEM;
6515 }
6516
6517 vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
6518
6519
6520 for (i = 0; i < conf_vsis; i++) {
6521 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
6522 vmdq_conf->enable_loop_back);
6523 if (vsi == NULL) {
6524 PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
6525 err = -1;
6526 goto err_vsi_setup;
6527 }
6528 vmdq_info = &pf->vmdq[i];
6529 vmdq_info->pf = pf;
6530 vmdq_info->vsi = vsi;
6531 }
6532 pf->nb_cfg_vmdq_vsi = conf_vsis;
6533
6534
6535 loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
6536 for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
6537 for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
6538 if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
6539 PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
6540 vmdq_conf->pool_map[i].vlan_id, j);
6541
6542 err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
6543 vmdq_conf->pool_map[i].vlan_id);
6544 if (err) {
6545 PMD_INIT_LOG(ERR, "Failed to add vlan");
6546 err = -1;
6547 goto err_vsi_setup;
6548 }
6549 }
6550 }
6551 }
6552
6553 i40e_pf_enable_irq0(hw);
6554
6555 return 0;
6556
6557err_vsi_setup:
6558 for (i = 0; i < conf_vsis; i++)
6559 if (pf->vmdq[i].vsi == NULL)
6560 break;
6561 else
6562 i40e_vsi_release(pf->vmdq[i].vsi);
6563
6564 rte_free(pf->vmdq);
6565 pf->vmdq = NULL;
6566 i40e_pf_enable_irq0(hw);
6567 return err;
6568}
6569
6570static void
6571i40e_stat_update_32(struct i40e_hw *hw,
6572 uint32_t reg,
6573 bool offset_loaded,
6574 uint64_t *offset,
6575 uint64_t *stat)
6576{
6577 uint64_t new_data;
6578
6579 new_data = (uint64_t)I40E_READ_REG(hw, reg);
6580 if (!offset_loaded)
6581 *offset = new_data;
6582
6583 if (new_data >= *offset)
6584 *stat = (uint64_t)(new_data - *offset);
6585 else
6586 *stat = (uint64_t)((new_data +
6587 ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
6588}
6589
6590static void
6591i40e_stat_update_48(struct i40e_hw *hw,
6592 uint32_t hireg,
6593 uint32_t loreg,
6594 bool offset_loaded,
6595 uint64_t *offset,
6596 uint64_t *stat)
6597{
6598 uint64_t new_data;
6599
6600 if (hw->device_id == I40E_DEV_ID_QEMU) {
6601 new_data = (uint64_t)I40E_READ_REG(hw, loreg);
6602 new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
6603 I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
6604 } else {
6605 new_data = I40E_READ_REG64(hw, loreg);
6606 }
6607
6608 if (!offset_loaded)
6609 *offset = new_data;
6610
6611 if (new_data >= *offset)
6612 *stat = new_data - *offset;
6613 else
6614 *stat = (uint64_t)((new_data +
6615 ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
6616
6617 *stat &= I40E_48_BIT_MASK;
6618}
6619
6620
6621void
6622i40e_pf_disable_irq0(struct i40e_hw *hw)
6623{
6624
6625 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6626 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6627 I40E_WRITE_FLUSH(hw);
6628}
6629
6630
6631void
6632i40e_pf_enable_irq0(struct i40e_hw *hw)
6633{
6634 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6635 I40E_PFINT_DYN_CTL0_INTENA_MASK |
6636 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
6637 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6638 I40E_WRITE_FLUSH(hw);
6639}
6640
6641static void
6642i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
6643{
6644
6645 i40e_pf_disable_irq0(hw);
6646 I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
6647 I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
6648 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
6649
6650 if (no_queue)
6651
6652 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
6653 I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
6654}
6655
6656static void
6657i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
6658{
6659 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6660 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6661 int i;
6662 uint16_t abs_vf_id;
6663 uint32_t index, offset, val;
6664
6665 if (!pf->vfs)
6666 return;
6667
6668
6669
6670
6671 for (i = 0; i < pf->vf_num; i++) {
6672 abs_vf_id = hw->func_caps.vf_base_id + i;
6673 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
6674 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
6675 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
6676
6677 if (val & (0x1 << offset)) {
6678 int ret;
6679
6680
6681 I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
6682 (0x1 << offset));
6683 PMD_DRV_LOG(INFO, "VF %u reset occurred", abs_vf_id);
6684
6685
6686
6687
6688 ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
6689 if (ret != I40E_SUCCESS)
6690 PMD_DRV_LOG(ERR, "Failed to do VF reset");
6691 }
6692 }
6693}
6694
6695static void
6696i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
6697{
6698 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6699 int i;
6700
6701 for (i = 0; i < pf->vf_num; i++)
6702 i40e_notify_vf_link_status(dev, &pf->vfs[i]);
6703}
6704
6705static void
6706i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
6707{
6708 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6709 struct i40e_arq_event_info info;
6710 uint16_t pending, opcode;
6711 int ret;
6712
6713 info.buf_len = I40E_AQ_BUF_SZ;
6714 info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
6715 if (!info.msg_buf) {
6716 PMD_DRV_LOG(ERR, "Failed to allocate mem");
6717 return;
6718 }
6719
6720 pending = 1;
6721 while (pending) {
6722 ret = i40e_clean_arq_element(hw, &info, &pending);
6723
6724 if (ret != I40E_SUCCESS) {
6725 PMD_DRV_LOG(INFO,
6726 "Failed to read msg from AdminQ, aq_err: %u",
6727 hw->aq.asq_last_status);
6728 break;
6729 }
6730 opcode = rte_le_to_cpu_16(info.desc.opcode);
6731
6732 switch (opcode) {
6733 case i40e_aqc_opc_send_msg_to_pf:
6734
6735 i40e_pf_host_handle_vf_msg(dev,
6736 rte_le_to_cpu_16(info.desc.retval),
6737 rte_le_to_cpu_32(info.desc.cookie_high),
6738 rte_le_to_cpu_32(info.desc.cookie_low),
6739 info.msg_buf,
6740 info.msg_len);
6741 break;
6742 case i40e_aqc_opc_get_link_status:
6743 ret = i40e_dev_link_update(dev, 0);
6744 if (!ret)
6745 rte_eth_dev_callback_process(dev,
6746 RTE_ETH_EVENT_INTR_LSC, NULL);
6747 break;
6748 default:
6749 PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
6750 opcode);
6751 break;
6752 }
6753 }
6754 rte_free(info.msg_buf);
6755}
6756
6757static void
6758i40e_handle_mdd_event(struct rte_eth_dev *dev)
6759{
6760#define I40E_MDD_CLEAR32 0xFFFFFFFF
6761#define I40E_MDD_CLEAR16 0xFFFF
6762 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6763 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6764 bool mdd_detected = false;
6765 struct i40e_pf_vf *vf;
6766 uint32_t reg;
6767 int i;
6768
6769
6770 reg = I40E_READ_REG(hw, I40E_GL_MDET_TX);
6771 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
6772 uint8_t pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
6773 I40E_GL_MDET_TX_PF_NUM_SHIFT;
6774 uint16_t vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
6775 I40E_GL_MDET_TX_VF_NUM_SHIFT;
6776 uint8_t event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
6777 I40E_GL_MDET_TX_EVENT_SHIFT;
6778 uint16_t queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
6779 I40E_GL_MDET_TX_QUEUE_SHIFT) -
6780 hw->func_caps.base_queue;
6781 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on TX "
6782 "queue %d PF number 0x%02x VF number 0x%02x device %s\n",
6783 event, queue, pf_num, vf_num, dev->data->name);
6784 I40E_WRITE_REG(hw, I40E_GL_MDET_TX, I40E_MDD_CLEAR32);
6785 mdd_detected = true;
6786 }
6787 reg = I40E_READ_REG(hw, I40E_GL_MDET_RX);
6788 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
6789 uint8_t func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
6790 I40E_GL_MDET_RX_FUNCTION_SHIFT;
6791 uint8_t event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
6792 I40E_GL_MDET_RX_EVENT_SHIFT;
6793 uint16_t queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
6794 I40E_GL_MDET_RX_QUEUE_SHIFT) -
6795 hw->func_caps.base_queue;
6796
6797 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on RX "
6798 "queue %d of function 0x%02x device %s\n",
6799 event, queue, func, dev->data->name);
6800 I40E_WRITE_REG(hw, I40E_GL_MDET_RX, I40E_MDD_CLEAR32);
6801 mdd_detected = true;
6802 }
6803
6804 if (mdd_detected) {
6805 reg = I40E_READ_REG(hw, I40E_PF_MDET_TX);
6806 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
6807 I40E_WRITE_REG(hw, I40E_PF_MDET_TX, I40E_MDD_CLEAR16);
6808 PMD_DRV_LOG(WARNING, "TX driver issue detected on PF\n");
6809 }
6810 reg = I40E_READ_REG(hw, I40E_PF_MDET_RX);
6811 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
6812 I40E_WRITE_REG(hw, I40E_PF_MDET_RX,
6813 I40E_MDD_CLEAR16);
6814 PMD_DRV_LOG(WARNING, "RX driver issue detected on PF\n");
6815 }
6816 }
6817
6818
6819 for (i = 0; i < pf->vf_num && mdd_detected; i++) {
6820 vf = &pf->vfs[i];
6821 reg = I40E_READ_REG(hw, I40E_VP_MDET_TX(i));
6822 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
6823 I40E_WRITE_REG(hw, I40E_VP_MDET_TX(i),
6824 I40E_MDD_CLEAR16);
6825 vf->num_mdd_events++;
6826 PMD_DRV_LOG(WARNING, "TX driver issue detected on VF %d %-"
6827 PRIu64 "times\n",
6828 i, vf->num_mdd_events);
6829 }
6830
6831 reg = I40E_READ_REG(hw, I40E_VP_MDET_RX(i));
6832 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
6833 I40E_WRITE_REG(hw, I40E_VP_MDET_RX(i),
6834 I40E_MDD_CLEAR16);
6835 vf->num_mdd_events++;
6836 PMD_DRV_LOG(WARNING, "RX driver issue detected on VF %d %-"
6837 PRIu64 "times\n",
6838 i, vf->num_mdd_events);
6839 }
6840 }
6841}
6842
6843
6844
6845
6846
6847
6848
6849
6850
6851
6852
6853
6854
6855static void
6856i40e_dev_interrupt_handler(void *param)
6857{
6858 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
6859 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6860 uint32_t icr0;
6861
6862
6863 i40e_pf_disable_irq0(hw);
6864
6865
6866 icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
6867
6868
6869 if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
6870 PMD_DRV_LOG(INFO, "No interrupt event");
6871 goto done;
6872 }
6873 if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
6874 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
6875 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
6876 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
6877 i40e_handle_mdd_event(dev);
6878 }
6879 if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
6880 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
6881 if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
6882 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
6883 if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
6884 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
6885 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
6886 PMD_DRV_LOG(ERR, "ICR0: HMC error");
6887 if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
6888 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
6889
6890 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
6891 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
6892 i40e_dev_handle_vfr_event(dev);
6893 }
6894 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
6895 PMD_DRV_LOG(INFO, "ICR0: adminq event");
6896 i40e_dev_handle_aq_msg(dev);
6897 }
6898
6899done:
6900
6901 i40e_pf_enable_irq0(hw);
6902}
6903
6904static void
6905i40e_dev_alarm_handler(void *param)
6906{
6907 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
6908 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6909 uint32_t icr0;
6910
6911
6912 i40e_pf_disable_irq0(hw);
6913
6914
6915 icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
6916
6917
6918 if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK))
6919 goto done;
6920 if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
6921 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
6922 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
6923 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
6924 i40e_handle_mdd_event(dev);
6925 }
6926 if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
6927 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
6928 if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
6929 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
6930 if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
6931 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
6932 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
6933 PMD_DRV_LOG(ERR, "ICR0: HMC error");
6934 if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
6935 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
6936
6937 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
6938 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
6939 i40e_dev_handle_vfr_event(dev);
6940 }
6941 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
6942 PMD_DRV_LOG(INFO, "ICR0: adminq event");
6943 i40e_dev_handle_aq_msg(dev);
6944 }
6945
6946done:
6947
6948 i40e_pf_enable_irq0(hw);
6949 rte_eal_alarm_set(I40E_ALARM_INTERVAL,
6950 i40e_dev_alarm_handler, dev);
6951}
6952
6953int
6954i40e_add_macvlan_filters(struct i40e_vsi *vsi,
6955 struct i40e_macvlan_filter *filter,
6956 int total)
6957{
6958 int ele_num, ele_buff_size;
6959 int num, actual_num, i;
6960 uint16_t flags;
6961 int ret = I40E_SUCCESS;
6962 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6963 struct i40e_aqc_add_macvlan_element_data *req_list;
6964
6965 if (filter == NULL || total == 0)
6966 return I40E_ERR_PARAM;
6967 ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
6968 ele_buff_size = hw->aq.asq_buf_size;
6969
6970 req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
6971 if (req_list == NULL) {
6972 PMD_DRV_LOG(ERR, "Fail to allocate memory");
6973 return I40E_ERR_NO_MEMORY;
6974 }
6975
6976 num = 0;
6977 do {
6978 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6979 memset(req_list, 0, ele_buff_size);
6980
6981 for (i = 0; i < actual_num; i++) {
6982 rte_memcpy(req_list[i].mac_addr,
6983 &filter[num + i].macaddr, ETH_ADDR_LEN);
6984 req_list[i].vlan_tag =
6985 rte_cpu_to_le_16(filter[num + i].vlan_id);
6986
6987 switch (filter[num + i].filter_type) {
6988 case I40E_MAC_PERFECT_MATCH:
6989 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
6990 I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6991 break;
6992 case I40E_MACVLAN_PERFECT_MATCH:
6993 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
6994 break;
6995 case I40E_MAC_HASH_MATCH:
6996 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
6997 I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6998 break;
6999 case I40E_MACVLAN_HASH_MATCH:
7000 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
7001 break;
7002 default:
7003 PMD_DRV_LOG(ERR, "Invalid MAC match type");
7004 ret = I40E_ERR_PARAM;
7005 goto DONE;
7006 }
7007
7008 req_list[i].queue_number = 0;
7009
7010 req_list[i].flags = rte_cpu_to_le_16(flags);
7011 }
7012
7013 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
7014 actual_num, NULL);
7015 if (ret != I40E_SUCCESS) {
7016 PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
7017 goto DONE;
7018 }
7019 num += actual_num;
7020 } while (num < total);
7021
7022DONE:
7023 rte_free(req_list);
7024 return ret;
7025}
7026
7027int
7028i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
7029 struct i40e_macvlan_filter *filter,
7030 int total)
7031{
7032 int ele_num, ele_buff_size;
7033 int num, actual_num, i;
7034 uint16_t flags;
7035 int ret = I40E_SUCCESS;
7036 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7037 struct i40e_aqc_remove_macvlan_element_data *req_list;
7038
7039 if (filter == NULL || total == 0)
7040 return I40E_ERR_PARAM;
7041
7042 ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
7043 ele_buff_size = hw->aq.asq_buf_size;
7044
7045 req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
7046 if (req_list == NULL) {
7047 PMD_DRV_LOG(ERR, "Fail to allocate memory");
7048 return I40E_ERR_NO_MEMORY;
7049 }
7050
7051 num = 0;
7052 do {
7053 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
7054 memset(req_list, 0, ele_buff_size);
7055
7056 for (i = 0; i < actual_num; i++) {
7057 rte_memcpy(req_list[i].mac_addr,
7058 &filter[num + i].macaddr, ETH_ADDR_LEN);
7059 req_list[i].vlan_tag =
7060 rte_cpu_to_le_16(filter[num + i].vlan_id);
7061
7062 switch (filter[num + i].filter_type) {
7063 case I40E_MAC_PERFECT_MATCH:
7064 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
7065 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
7066 break;
7067 case I40E_MACVLAN_PERFECT_MATCH:
7068 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
7069 break;
7070 case I40E_MAC_HASH_MATCH:
7071 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
7072 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
7073 break;
7074 case I40E_MACVLAN_HASH_MATCH:
7075 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
7076 break;
7077 default:
7078 PMD_DRV_LOG(ERR, "Invalid MAC filter type");
7079 ret = I40E_ERR_PARAM;
7080 goto DONE;
7081 }
7082 req_list[i].flags = rte_cpu_to_le_16(flags);
7083 }
7084
7085 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
7086 actual_num, NULL);
7087 if (ret != I40E_SUCCESS) {
7088 PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
7089 goto DONE;
7090 }
7091 num += actual_num;
7092 } while (num < total);
7093
7094DONE:
7095 rte_free(req_list);
7096 return ret;
7097}
7098
7099
7100static struct i40e_mac_filter *
7101i40e_find_mac_filter(struct i40e_vsi *vsi,
7102 struct rte_ether_addr *macaddr)
7103{
7104 struct i40e_mac_filter *f;
7105
7106 TAILQ_FOREACH(f, &vsi->mac_list, next) {
7107 if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
7108 return f;
7109 }
7110
7111 return NULL;
7112}
7113
7114static bool
7115i40e_find_vlan_filter(struct i40e_vsi *vsi,
7116 uint16_t vlan_id)
7117{
7118 uint32_t vid_idx, vid_bit;
7119
7120 if (vlan_id > ETH_VLAN_ID_MAX)
7121 return 0;
7122
7123 vid_idx = I40E_VFTA_IDX(vlan_id);
7124 vid_bit = I40E_VFTA_BIT(vlan_id);
7125
7126 if (vsi->vfta[vid_idx] & vid_bit)
7127 return 1;
7128 else
7129 return 0;
7130}
7131
7132static void
7133i40e_store_vlan_filter(struct i40e_vsi *vsi,
7134 uint16_t vlan_id, bool on)
7135{
7136 uint32_t vid_idx, vid_bit;
7137
7138 vid_idx = I40E_VFTA_IDX(vlan_id);
7139 vid_bit = I40E_VFTA_BIT(vlan_id);
7140
7141 if (on)
7142 vsi->vfta[vid_idx] |= vid_bit;
7143 else
7144 vsi->vfta[vid_idx] &= ~vid_bit;
7145}
7146
7147void
7148i40e_set_vlan_filter(struct i40e_vsi *vsi,
7149 uint16_t vlan_id, bool on)
7150{
7151 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7152 struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
7153 int ret;
7154
7155 if (vlan_id > ETH_VLAN_ID_MAX)
7156 return;
7157
7158 i40e_store_vlan_filter(vsi, vlan_id, on);
7159
7160 if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id)
7161 return;
7162
7163 vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
7164
7165 if (on) {
7166 ret = i40e_aq_add_vlan(hw, vsi->seid,
7167 &vlan_data, 1, NULL);
7168 if (ret != I40E_SUCCESS)
7169 PMD_DRV_LOG(ERR, "Failed to add vlan filter");
7170 } else {
7171 ret = i40e_aq_remove_vlan(hw, vsi->seid,
7172 &vlan_data, 1, NULL);
7173 if (ret != I40E_SUCCESS)
7174 PMD_DRV_LOG(ERR,
7175 "Failed to remove vlan filter");
7176 }
7177}
7178
7179
7180
7181
7182
7183int
7184i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
7185 struct i40e_macvlan_filter *mv_f,
7186 int num, struct rte_ether_addr *addr)
7187{
7188 int i;
7189 uint32_t j, k;
7190
7191
7192
7193
7194
7195 if (num < vsi->vlan_num)
7196 return I40E_ERR_PARAM;
7197
7198 i = 0;
7199 for (j = 0; j < I40E_VFTA_SIZE; j++) {
7200 if (vsi->vfta[j]) {
7201 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
7202 if (vsi->vfta[j] & (1 << k)) {
7203 if (i > num - 1) {
7204 PMD_DRV_LOG(ERR,
7205 "vlan number doesn't match");
7206 return I40E_ERR_PARAM;
7207 }
7208 rte_memcpy(&mv_f[i].macaddr,
7209 addr, ETH_ADDR_LEN);
7210 mv_f[i].vlan_id =
7211 j * I40E_UINT32_BIT_SIZE + k;
7212 i++;
7213 }
7214 }
7215 }
7216 }
7217 return I40E_SUCCESS;
7218}
7219
7220static inline int
7221i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
7222 struct i40e_macvlan_filter *mv_f,
7223 int num,
7224 uint16_t vlan)
7225{
7226 int i = 0;
7227 struct i40e_mac_filter *f;
7228
7229 if (num < vsi->mac_num)
7230 return I40E_ERR_PARAM;
7231
7232 TAILQ_FOREACH(f, &vsi->mac_list, next) {
7233 if (i > num - 1) {
7234 PMD_DRV_LOG(ERR, "buffer number not match");
7235 return I40E_ERR_PARAM;
7236 }
7237 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
7238 ETH_ADDR_LEN);
7239 mv_f[i].vlan_id = vlan;
7240 mv_f[i].filter_type = f->mac_info.filter_type;
7241 i++;
7242 }
7243
7244 return I40E_SUCCESS;
7245}
7246
7247static int
7248i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
7249{
7250 int i, j, num;
7251 struct i40e_mac_filter *f;
7252 struct i40e_macvlan_filter *mv_f;
7253 int ret = I40E_SUCCESS;
7254
7255 if (vsi == NULL || vsi->mac_num == 0)
7256 return I40E_ERR_PARAM;
7257
7258
7259 if (vsi->vlan_num == 0)
7260 num = vsi->mac_num;
7261 else
7262 num = vsi->mac_num * vsi->vlan_num;
7263
7264 mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
7265 if (mv_f == NULL) {
7266 PMD_DRV_LOG(ERR, "failed to allocate memory");
7267 return I40E_ERR_NO_MEMORY;
7268 }
7269
7270 i = 0;
7271 if (vsi->vlan_num == 0) {
7272 TAILQ_FOREACH(f, &vsi->mac_list, next) {
7273 rte_memcpy(&mv_f[i].macaddr,
7274 &f->mac_info.mac_addr, ETH_ADDR_LEN);
7275 mv_f[i].filter_type = f->mac_info.filter_type;
7276 mv_f[i].vlan_id = 0;
7277 i++;
7278 }
7279 } else {
7280 TAILQ_FOREACH(f, &vsi->mac_list, next) {
7281 ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
7282 vsi->vlan_num, &f->mac_info.mac_addr);
7283 if (ret != I40E_SUCCESS)
7284 goto DONE;
7285 for (j = i; j < i + vsi->vlan_num; j++)
7286 mv_f[j].filter_type = f->mac_info.filter_type;
7287 i += vsi->vlan_num;
7288 }
7289 }
7290
7291 ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
7292DONE:
7293 rte_free(mv_f);
7294
7295 return ret;
7296}
7297
7298int
7299i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
7300{
7301 struct i40e_macvlan_filter *mv_f;
7302 int mac_num;
7303 int ret = I40E_SUCCESS;
7304
7305 if (!vsi || vlan > RTE_ETHER_MAX_VLAN_ID)
7306 return I40E_ERR_PARAM;
7307
7308
7309 if (i40e_find_vlan_filter(vsi,vlan))
7310 return I40E_SUCCESS;
7311
7312 mac_num = vsi->mac_num;
7313
7314 if (mac_num == 0) {
7315 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
7316 return I40E_ERR_PARAM;
7317 }
7318
7319 mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
7320
7321 if (mv_f == NULL) {
7322 PMD_DRV_LOG(ERR, "failed to allocate memory");
7323 return I40E_ERR_NO_MEMORY;
7324 }
7325
7326 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
7327
7328 if (ret != I40E_SUCCESS)
7329 goto DONE;
7330
7331 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
7332
7333 if (ret != I40E_SUCCESS)
7334 goto DONE;
7335
7336 i40e_set_vlan_filter(vsi, vlan, 1);
7337
7338 vsi->vlan_num++;
7339 ret = I40E_SUCCESS;
7340DONE:
7341 rte_free(mv_f);
7342 return ret;
7343}
7344
7345int
7346i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
7347{
7348 struct i40e_macvlan_filter *mv_f;
7349 int mac_num;
7350 int ret = I40E_SUCCESS;
7351
7352
7353
7354
7355
7356 if (!vsi || vlan == 0 || vlan > RTE_ETHER_MAX_VLAN_ID)
7357 return I40E_ERR_PARAM;
7358
7359
7360 if (!i40e_find_vlan_filter(vsi, vlan))
7361 return I40E_ERR_PARAM;
7362
7363 mac_num = vsi->mac_num;
7364
7365 if (mac_num == 0) {
7366 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
7367 return I40E_ERR_PARAM;
7368 }
7369
7370 mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
7371
7372 if (mv_f == NULL) {
7373 PMD_DRV_LOG(ERR, "failed to allocate memory");
7374 return I40E_ERR_NO_MEMORY;
7375 }
7376
7377 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
7378
7379 if (ret != I40E_SUCCESS)
7380 goto DONE;
7381
7382 ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
7383
7384 if (ret != I40E_SUCCESS)
7385 goto DONE;
7386
7387
7388 if (vsi->vlan_num == 1) {
7389 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
7390 if (ret != I40E_SUCCESS)
7391 goto DONE;
7392
7393 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
7394 if (ret != I40E_SUCCESS)
7395 goto DONE;
7396 }
7397
7398 i40e_set_vlan_filter(vsi, vlan, 0);
7399
7400 vsi->vlan_num--;
7401 ret = I40E_SUCCESS;
7402DONE:
7403 rte_free(mv_f);
7404 return ret;
7405}
7406
7407int
7408i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
7409{
7410 struct i40e_mac_filter *f;
7411 struct i40e_macvlan_filter *mv_f;
7412 int i, vlan_num = 0;
7413 int ret = I40E_SUCCESS;
7414
7415
7416 f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
7417 if (f != NULL)
7418 return I40E_SUCCESS;
7419 if (mac_filter->filter_type == I40E_MACVLAN_PERFECT_MATCH ||
7420 mac_filter->filter_type == I40E_MACVLAN_HASH_MATCH) {
7421
7422
7423
7424
7425
7426 if (vsi->vlan_num == 0) {
7427 i40e_set_vlan_filter(vsi, 0, 1);
7428 vsi->vlan_num = 1;
7429 }
7430 vlan_num = vsi->vlan_num;
7431 } else if (mac_filter->filter_type == I40E_MAC_PERFECT_MATCH ||
7432 mac_filter->filter_type == I40E_MAC_HASH_MATCH)
7433 vlan_num = 1;
7434
7435 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
7436 if (mv_f == NULL) {
7437 PMD_DRV_LOG(ERR, "failed to allocate memory");
7438 return I40E_ERR_NO_MEMORY;
7439 }
7440
7441 for (i = 0; i < vlan_num; i++) {
7442 mv_f[i].filter_type = mac_filter->filter_type;
7443 rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
7444 ETH_ADDR_LEN);
7445 }
7446
7447 if (mac_filter->filter_type == I40E_MACVLAN_PERFECT_MATCH ||
7448 mac_filter->filter_type == I40E_MACVLAN_HASH_MATCH) {
7449 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
7450 &mac_filter->mac_addr);
7451 if (ret != I40E_SUCCESS)
7452 goto DONE;
7453 }
7454
7455 ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
7456 if (ret != I40E_SUCCESS)
7457 goto DONE;
7458
7459
7460 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
7461 if (f == NULL) {
7462 PMD_DRV_LOG(ERR, "failed to allocate memory");
7463 ret = I40E_ERR_NO_MEMORY;
7464 goto DONE;
7465 }
7466 rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
7467 ETH_ADDR_LEN);
7468 f->mac_info.filter_type = mac_filter->filter_type;
7469 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
7470 vsi->mac_num++;
7471
7472 ret = I40E_SUCCESS;
7473DONE:
7474 rte_free(mv_f);
7475
7476 return ret;
7477}
7478
7479int
7480i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct rte_ether_addr *addr)
7481{
7482 struct i40e_mac_filter *f;
7483 struct i40e_macvlan_filter *mv_f;
7484 int i, vlan_num;
7485 enum i40e_mac_filter_type filter_type;
7486 int ret = I40E_SUCCESS;
7487
7488
7489 f = i40e_find_mac_filter(vsi, addr);
7490 if (f == NULL)
7491 return I40E_ERR_PARAM;
7492
7493 vlan_num = vsi->vlan_num;
7494 filter_type = f->mac_info.filter_type;
7495 if (filter_type == I40E_MACVLAN_PERFECT_MATCH ||
7496 filter_type == I40E_MACVLAN_HASH_MATCH) {
7497 if (vlan_num == 0) {
7498 PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
7499 return I40E_ERR_PARAM;
7500 }
7501 } else if (filter_type == I40E_MAC_PERFECT_MATCH ||
7502 filter_type == I40E_MAC_HASH_MATCH)
7503 vlan_num = 1;
7504
7505 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
7506 if (mv_f == NULL) {
7507 PMD_DRV_LOG(ERR, "failed to allocate memory");
7508 return I40E_ERR_NO_MEMORY;
7509 }
7510
7511 for (i = 0; i < vlan_num; i++) {
7512 mv_f[i].filter_type = filter_type;
7513 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
7514 ETH_ADDR_LEN);
7515 }
7516 if (filter_type == I40E_MACVLAN_PERFECT_MATCH ||
7517 filter_type == I40E_MACVLAN_HASH_MATCH) {
7518 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
7519 if (ret != I40E_SUCCESS)
7520 goto DONE;
7521 }
7522
7523 ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
7524 if (ret != I40E_SUCCESS)
7525 goto DONE;
7526
7527
7528 TAILQ_REMOVE(&vsi->mac_list, f, next);
7529 rte_free(f);
7530 vsi->mac_num--;
7531
7532 ret = I40E_SUCCESS;
7533DONE:
7534 rte_free(mv_f);
7535 return ret;
7536}
7537
7538
7539uint64_t
7540i40e_config_hena(const struct i40e_adapter *adapter, uint64_t flags)
7541{
7542 uint64_t hena = 0;
7543 int i;
7544
7545 if (!flags)
7546 return hena;
7547
7548 for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7549 if (flags & (1ULL << i))
7550 hena |= adapter->pctypes_tbl[i];
7551 }
7552
7553 return hena;
7554}
7555
7556
7557uint64_t
7558i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags)
7559{
7560 uint64_t rss_hf = 0;
7561
7562 if (!flags)
7563 return rss_hf;
7564 int i;
7565
7566 for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7567 if (flags & adapter->pctypes_tbl[i])
7568 rss_hf |= (1ULL << i);
7569 }
7570 return rss_hf;
7571}
7572
7573
7574void
7575i40e_pf_disable_rss(struct i40e_pf *pf)
7576{
7577 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7578
7579 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
7580 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
7581 I40E_WRITE_FLUSH(hw);
7582}
7583
7584int
7585i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
7586{
7587 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7588 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7589 uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ?
7590 I40E_VFQF_HKEY_MAX_INDEX :
7591 I40E_PFQF_HKEY_MAX_INDEX;
7592
7593 if (!key || key_len == 0) {
7594 PMD_DRV_LOG(DEBUG, "No key to be configured");
7595 return 0;
7596 } else if (key_len != (key_idx + 1) *
7597 sizeof(uint32_t)) {
7598 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
7599 return -EINVAL;
7600 }
7601
7602 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7603 struct i40e_aqc_get_set_rss_key_data *key_dw =
7604 (struct i40e_aqc_get_set_rss_key_data *)key;
7605 enum i40e_status_code status =
7606 i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
7607
7608 if (status) {
7609 PMD_DRV_LOG(ERR,
7610 "Failed to configure RSS key via AQ, error status: %d",
7611 status);
7612 return -EIO;
7613 }
7614 } else {
7615 uint32_t *hash_key = (uint32_t *)key;
7616 uint16_t i;
7617
7618 if (vsi->type == I40E_VSI_SRIOV) {
7619 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
7620 I40E_WRITE_REG(
7621 hw,
7622 I40E_VFQF_HKEY1(i, vsi->user_param),
7623 hash_key[i]);
7624
7625 } else {
7626 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
7627 I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i),
7628 hash_key[i]);
7629 }
7630 I40E_WRITE_FLUSH(hw);
7631 }
7632
7633 return 0;
7634}
7635
7636static int
7637i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
7638{
7639 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7640 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7641 uint32_t reg;
7642 int ret;
7643
7644 if (!key || !key_len)
7645 return 0;
7646
7647 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7648 ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
7649 (struct i40e_aqc_get_set_rss_key_data *)key);
7650 if (ret) {
7651 PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
7652 return ret;
7653 }
7654 } else {
7655 uint32_t *key_dw = (uint32_t *)key;
7656 uint16_t i;
7657
7658 if (vsi->type == I40E_VSI_SRIOV) {
7659 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) {
7660 reg = I40E_VFQF_HKEY1(i, vsi->user_param);
7661 key_dw[i] = i40e_read_rx_ctl(hw, reg);
7662 }
7663 *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
7664 sizeof(uint32_t);
7665 } else {
7666 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
7667 reg = I40E_PFQF_HKEY(i);
7668 key_dw[i] = i40e_read_rx_ctl(hw, reg);
7669 }
7670 *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
7671 sizeof(uint32_t);
7672 }
7673 }
7674 return 0;
7675}
7676
7677static int
7678i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
7679{
7680 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7681 uint64_t hena;
7682 int ret;
7683
7684 ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
7685 rss_conf->rss_key_len);
7686 if (ret)
7687 return ret;
7688
7689 hena = i40e_config_hena(pf->adapter, rss_conf->rss_hf);
7690 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
7691 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
7692 I40E_WRITE_FLUSH(hw);
7693
7694 return 0;
7695}
7696
7697static int
7698i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
7699 struct rte_eth_rss_conf *rss_conf)
7700{
7701 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7702 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7703 uint64_t rss_hf = rss_conf->rss_hf & pf->adapter->flow_types_mask;
7704 uint64_t hena;
7705
7706 hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7707 hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7708
7709 if (!(hena & pf->adapter->pctypes_mask)) {
7710 if (rss_hf != 0)
7711 return -EINVAL;
7712 return 0;
7713 }
7714
7715 if (rss_hf == 0)
7716 return -EINVAL;
7717
7718 return i40e_hw_rss_hash_set(pf, rss_conf);
7719}
7720
7721static int
7722i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
7723 struct rte_eth_rss_conf *rss_conf)
7724{
7725 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7726 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7727 uint64_t hena;
7728 int ret;
7729
7730 if (!rss_conf)
7731 return -EINVAL;
7732
7733 ret = i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
7734 &rss_conf->rss_key_len);
7735 if (ret)
7736 return ret;
7737
7738 hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7739 hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7740 rss_conf->rss_hf = i40e_parse_hena(pf->adapter, hena);
7741
7742 return 0;
7743}
7744
7745static int
7746i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
7747{
7748 switch (filter_type) {
7749 case RTE_TUNNEL_FILTER_IMAC_IVLAN:
7750 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
7751 break;
7752 case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
7753 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
7754 break;
7755 case RTE_TUNNEL_FILTER_IMAC_TENID:
7756 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
7757 break;
7758 case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
7759 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
7760 break;
7761 case ETH_TUNNEL_FILTER_IMAC:
7762 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7763 break;
7764 case ETH_TUNNEL_FILTER_OIP:
7765 *flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
7766 break;
7767 case ETH_TUNNEL_FILTER_IIP:
7768 *flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7769 break;
7770 default:
7771 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
7772 return -EINVAL;
7773 }
7774
7775 return 0;
7776}
7777
7778
7779static int
7780i40e_tunnel_filter_convert(
7781 struct i40e_aqc_cloud_filters_element_bb *cld_filter,
7782 struct i40e_tunnel_filter *tunnel_filter)
7783{
7784 rte_ether_addr_copy((struct rte_ether_addr *)
7785 &cld_filter->element.outer_mac,
7786 (struct rte_ether_addr *)&tunnel_filter->input.outer_mac);
7787 rte_ether_addr_copy((struct rte_ether_addr *)
7788 &cld_filter->element.inner_mac,
7789 (struct rte_ether_addr *)&tunnel_filter->input.inner_mac);
7790 tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan;
7791 if ((rte_le_to_cpu_16(cld_filter->element.flags) &
7792 I40E_AQC_ADD_CLOUD_FLAGS_IPV6) ==
7793 I40E_AQC_ADD_CLOUD_FLAGS_IPV6)
7794 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6;
7795 else
7796 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4;
7797 tunnel_filter->input.flags = cld_filter->element.flags;
7798 tunnel_filter->input.tenant_id = cld_filter->element.tenant_id;
7799 tunnel_filter->queue = cld_filter->element.queue_number;
7800 rte_memcpy(tunnel_filter->input.general_fields,
7801 cld_filter->general_fields,
7802 sizeof(cld_filter->general_fields));
7803
7804 return 0;
7805}
7806
7807
7808struct i40e_tunnel_filter *
7809i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
7810 const struct i40e_tunnel_filter_input *input)
7811{
7812 int ret;
7813
7814 ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
7815 if (ret < 0)
7816 return NULL;
7817
7818 return tunnel_rule->hash_map[ret];
7819}
7820
7821
7822static int
7823i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
7824 struct i40e_tunnel_filter *tunnel_filter)
7825{
7826 struct i40e_tunnel_rule *rule = &pf->tunnel;
7827 int ret;
7828
7829 ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
7830 if (ret < 0) {
7831 PMD_DRV_LOG(ERR,
7832 "Failed to insert tunnel filter to hash table %d!",
7833 ret);
7834 return ret;
7835 }
7836 rule->hash_map[ret] = tunnel_filter;
7837
7838 TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
7839
7840 return 0;
7841}
7842
7843
7844int
7845i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
7846 struct i40e_tunnel_filter_input *input)
7847{
7848 struct i40e_tunnel_rule *rule = &pf->tunnel;
7849 struct i40e_tunnel_filter *tunnel_filter;
7850 int ret;
7851
7852 ret = rte_hash_del_key(rule->hash_table, input);
7853 if (ret < 0) {
7854 PMD_DRV_LOG(ERR,
7855 "Failed to delete tunnel filter to hash table %d!",
7856 ret);
7857 return ret;
7858 }
7859 tunnel_filter = rule->hash_map[ret];
7860 rule->hash_map[ret] = NULL;
7861
7862 TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
7863 rte_free(tunnel_filter);
7864
7865 return 0;
7866}
7867
7868#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48
7869#define I40E_TR_VXLAN_GRE_KEY_MASK 0x4
7870#define I40E_TR_GENEVE_KEY_MASK 0x8
7871#define I40E_TR_GENERIC_UDP_TUNNEL_MASK 0x40
7872#define I40E_TR_GRE_KEY_MASK 0x400
7873#define I40E_TR_GRE_KEY_WITH_XSUM_MASK 0x800
7874#define I40E_TR_GRE_NO_KEY_MASK 0x8000
7875#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0 0x49
7876#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0 0x41
7877#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0 0x80
7878#define I40E_DIRECTION_INGRESS_KEY 0x8000
7879#define I40E_TR_L4_TYPE_TCP 0x2
7880#define I40E_TR_L4_TYPE_UDP 0x4
7881#define I40E_TR_L4_TYPE_SCTP 0x8
7882
7883static enum
7884i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
7885{
7886 struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
7887 struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
7888 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7889 struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
7890 enum i40e_status_code status = I40E_SUCCESS;
7891
7892 if (pf->support_multi_driver) {
7893 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
7894 return I40E_NOT_SUPPORTED;
7895 }
7896
7897 memset(&filter_replace, 0,
7898 sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7899 memset(&filter_replace_buf, 0,
7900 sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7901
7902
7903 filter_replace.old_filter_type =
7904 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
7905 filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
7906 filter_replace.tr_bit = 0;
7907
7908
7909 filter_replace_buf.data[0] =
7910 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7911 filter_replace_buf.data[0] |=
7912 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7913 filter_replace_buf.data[2] = 0xFF;
7914 filter_replace_buf.data[3] = 0xFF;
7915 filter_replace_buf.data[4] =
7916 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7917 filter_replace_buf.data[4] |=
7918 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7919 filter_replace_buf.data[7] = 0xF0;
7920 filter_replace_buf.data[8]
7921 = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0;
7922 filter_replace_buf.data[8] |=
7923 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7924 filter_replace_buf.data[10] = I40E_TR_VXLAN_GRE_KEY_MASK |
7925 I40E_TR_GENEVE_KEY_MASK |
7926 I40E_TR_GENERIC_UDP_TUNNEL_MASK;
7927 filter_replace_buf.data[11] = (I40E_TR_GRE_KEY_MASK |
7928 I40E_TR_GRE_KEY_WITH_XSUM_MASK |
7929 I40E_TR_GRE_NO_KEY_MASK) >> 8;
7930
7931 status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7932 &filter_replace_buf);
7933 if (!status && (filter_replace.old_filter_type !=
7934 filter_replace.new_filter_type))
7935 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
7936 " original: 0x%x, new: 0x%x",
7937 dev->device->name,
7938 filter_replace.old_filter_type,
7939 filter_replace.new_filter_type);
7940
7941 return status;
7942}
7943
7944static enum
7945i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
7946{
7947 struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
7948 struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
7949 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7950 struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
7951 enum i40e_status_code status = I40E_SUCCESS;
7952
7953 if (pf->support_multi_driver) {
7954 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
7955 return I40E_NOT_SUPPORTED;
7956 }
7957
7958
7959 memset(&filter_replace, 0,
7960 sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7961 memset(&filter_replace_buf, 0,
7962 sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7963 filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7964 I40E_AQC_MIRROR_CLOUD_FILTER;
7965 filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7966 filter_replace.new_filter_type =
7967 I40E_AQC_ADD_CLOUD_FILTER_0X11;
7968
7969 filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7970 filter_replace_buf.data[0] |=
7971 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7972 filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
7973 filter_replace_buf.data[4] |=
7974 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7975 status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7976 &filter_replace_buf);
7977 if (status < 0)
7978 return status;
7979 if (filter_replace.old_filter_type !=
7980 filter_replace.new_filter_type)
7981 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
7982 " original: 0x%x, new: 0x%x",
7983 dev->device->name,
7984 filter_replace.old_filter_type,
7985 filter_replace.new_filter_type);
7986
7987
7988 memset(&filter_replace, 0,
7989 sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7990 memset(&filter_replace_buf, 0,
7991 sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7992
7993 filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7994 I40E_AQC_MIRROR_CLOUD_FILTER;
7995 filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7996 filter_replace.new_filter_type =
7997 I40E_AQC_ADD_CLOUD_FILTER_0X12;
7998
7999 filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8000 filter_replace_buf.data[0] |=
8001 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8002 filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
8003 filter_replace_buf.data[4] |=
8004 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8005
8006 status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8007 &filter_replace_buf);
8008 if (!status && (filter_replace.old_filter_type !=
8009 filter_replace.new_filter_type))
8010 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8011 " original: 0x%x, new: 0x%x",
8012 dev->device->name,
8013 filter_replace.old_filter_type,
8014 filter_replace.new_filter_type);
8015
8016 return status;
8017}
8018
8019static enum i40e_status_code
8020i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
8021{
8022 struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
8023 struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
8024 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8025 struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
8026 enum i40e_status_code status = I40E_SUCCESS;
8027
8028 if (pf->support_multi_driver) {
8029 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
8030 return I40E_NOT_SUPPORTED;
8031 }
8032
8033
8034 memset(&filter_replace, 0,
8035 sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8036 memset(&filter_replace_buf, 0,
8037 sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8038
8039 filter_replace.old_filter_type =
8040 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
8041 filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
8042 filter_replace.tr_bit = I40E_AQC_NEW_TR_22 |
8043 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8044
8045 filter_replace_buf.data[0] =
8046 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
8047 filter_replace_buf.data[0] |=
8048 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8049 filter_replace_buf.data[2] = 0xFF;
8050 filter_replace_buf.data[3] = 0xFF;
8051 filter_replace_buf.data[4] =
8052 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
8053 filter_replace_buf.data[4] |=
8054 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8055 filter_replace_buf.data[6] = 0xFF;
8056 filter_replace_buf.data[7] = 0xFF;
8057 status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8058 &filter_replace_buf);
8059 if (status < 0)
8060 return status;
8061 if (filter_replace.old_filter_type !=
8062 filter_replace.new_filter_type)
8063 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8064 " original: 0x%x, new: 0x%x",
8065 dev->device->name,
8066 filter_replace.old_filter_type,
8067 filter_replace.new_filter_type);
8068
8069
8070 memset(&filter_replace, 0,
8071 sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8072 memset(&filter_replace_buf, 0,
8073 sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8074
8075 filter_replace.old_filter_type =
8076 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
8077 filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
8078 filter_replace.tr_bit = I40E_AQC_NEW_TR_21 |
8079 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8080
8081 filter_replace_buf.data[0] =
8082 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
8083 filter_replace_buf.data[0] |=
8084 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8085 filter_replace_buf.data[2] = 0xFF;
8086 filter_replace_buf.data[3] = 0xFF;
8087 filter_replace_buf.data[4] =
8088 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
8089 filter_replace_buf.data[4] |=
8090 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8091 filter_replace_buf.data[6] = 0xFF;
8092 filter_replace_buf.data[7] = 0xFF;
8093
8094 status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8095 &filter_replace_buf);
8096 if (!status && (filter_replace.old_filter_type !=
8097 filter_replace.new_filter_type))
8098 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8099 " original: 0x%x, new: 0x%x",
8100 dev->device->name,
8101 filter_replace.old_filter_type,
8102 filter_replace.new_filter_type);
8103
8104 return status;
8105}
8106
8107static enum
8108i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
8109{
8110 struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
8111 struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
8112 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8113 struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
8114 enum i40e_status_code status = I40E_SUCCESS;
8115
8116 if (pf->support_multi_driver) {
8117 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
8118 return I40E_NOT_SUPPORTED;
8119 }
8120
8121
8122 memset(&filter_replace, 0,
8123 sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8124 memset(&filter_replace_buf, 0,
8125 sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8126 filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8127 filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
8128 filter_replace.new_filter_type =
8129 I40E_AQC_ADD_CLOUD_FILTER_0X11;
8130
8131 filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
8132 filter_replace_buf.data[0] |=
8133 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8134 filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8135 filter_replace_buf.data[4] |=
8136 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8137 status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8138 &filter_replace_buf);
8139 if (status < 0)
8140 return status;
8141 if (filter_replace.old_filter_type !=
8142 filter_replace.new_filter_type)
8143 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8144 " original: 0x%x, new: 0x%x",
8145 dev->device->name,
8146 filter_replace.old_filter_type,
8147 filter_replace.new_filter_type);
8148
8149
8150 memset(&filter_replace, 0,
8151 sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8152 memset(&filter_replace_buf, 0,
8153 sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8154 filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8155 filter_replace.old_filter_type =
8156 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
8157 filter_replace.new_filter_type =
8158 I40E_AQC_ADD_CLOUD_FILTER_0X12;
8159
8160 filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
8161 filter_replace_buf.data[0] |=
8162 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8163 filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8164 filter_replace_buf.data[4] |=
8165 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8166
8167 status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8168 &filter_replace_buf);
8169 if (!status && (filter_replace.old_filter_type !=
8170 filter_replace.new_filter_type))
8171 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8172 " original: 0x%x, new: 0x%x",
8173 dev->device->name,
8174 filter_replace.old_filter_type,
8175 filter_replace.new_filter_type);
8176
8177 return status;
8178}
8179
8180static enum i40e_status_code
8181i40e_replace_port_l1_filter(struct i40e_pf *pf,
8182 enum i40e_l4_port_type l4_port_type)
8183{
8184 struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
8185 struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
8186 enum i40e_status_code status = I40E_SUCCESS;
8187 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8188 struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
8189
8190 if (pf->support_multi_driver) {
8191 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
8192 return I40E_NOT_SUPPORTED;
8193 }
8194
8195 memset(&filter_replace, 0,
8196 sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8197 memset(&filter_replace_buf, 0,
8198 sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8199
8200
8201 if (l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8202 filter_replace.old_filter_type =
8203 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
8204 filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
8205 filter_replace_buf.data[8] =
8206 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_SRC_PORT;
8207 } else {
8208 filter_replace.old_filter_type =
8209 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
8210 filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X10;
8211 filter_replace_buf.data[8] =
8212 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_DST_PORT;
8213 }
8214
8215 filter_replace.tr_bit = 0;
8216
8217 filter_replace_buf.data[0] =
8218 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0;
8219 filter_replace_buf.data[0] |=
8220 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8221 filter_replace_buf.data[2] = 0x00;
8222 filter_replace_buf.data[3] =
8223 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0;
8224 filter_replace_buf.data[4] =
8225 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0;
8226 filter_replace_buf.data[4] |=
8227 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8228 filter_replace_buf.data[5] = 0x00;
8229 filter_replace_buf.data[6] = I40E_TR_L4_TYPE_UDP |
8230 I40E_TR_L4_TYPE_TCP |
8231 I40E_TR_L4_TYPE_SCTP;
8232 filter_replace_buf.data[7] = 0x00;
8233 filter_replace_buf.data[8] |=
8234 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8235 filter_replace_buf.data[9] = 0x00;
8236 filter_replace_buf.data[10] = 0xFF;
8237 filter_replace_buf.data[11] = 0xFF;
8238
8239 status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8240 &filter_replace_buf);
8241 if (!status && filter_replace.old_filter_type !=
8242 filter_replace.new_filter_type)
8243 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8244 " original: 0x%x, new: 0x%x",
8245 dev->device->name,
8246 filter_replace.old_filter_type,
8247 filter_replace.new_filter_type);
8248
8249 return status;
8250}
8251
8252static enum i40e_status_code
8253i40e_replace_port_cloud_filter(struct i40e_pf *pf,
8254 enum i40e_l4_port_type l4_port_type)
8255{
8256 struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
8257 struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
8258 enum i40e_status_code status = I40E_SUCCESS;
8259 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8260 struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
8261
8262 if (pf->support_multi_driver) {
8263 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
8264 return I40E_NOT_SUPPORTED;
8265 }
8266
8267 memset(&filter_replace, 0,
8268 sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8269 memset(&filter_replace_buf, 0,
8270 sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8271
8272 if (l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8273 filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
8274 filter_replace.new_filter_type =
8275 I40E_AQC_ADD_CLOUD_FILTER_0X11;
8276 filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X11;
8277 } else {
8278 filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
8279 filter_replace.new_filter_type =
8280 I40E_AQC_ADD_CLOUD_FILTER_0X10;
8281 filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
8282 }
8283
8284 filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8285 filter_replace.tr_bit = 0;
8286
8287 filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8288 filter_replace_buf.data[0] |=
8289 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8290 filter_replace_buf.data[4] |=
8291 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8292 status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8293 &filter_replace_buf);
8294
8295 if (!status && filter_replace.old_filter_type !=
8296 filter_replace.new_filter_type)
8297 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8298 " original: 0x%x, new: 0x%x",
8299 dev->device->name,
8300 filter_replace.old_filter_type,
8301 filter_replace.new_filter_type);
8302
8303 return status;
8304}
8305
8306int
8307i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
8308 struct i40e_tunnel_filter_conf *tunnel_filter,
8309 uint8_t add)
8310{
8311 uint16_t ip_type;
8312 uint32_t ipv4_addr, ipv4_addr_le;
8313 uint8_t i, tun_type = 0;
8314
8315 uint32_t convert_ipv6[4];
8316 int val, ret = 0;
8317 struct i40e_pf_vf *vf = NULL;
8318 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8319 struct i40e_vsi *vsi;
8320 struct i40e_aqc_cloud_filters_element_bb *cld_filter;
8321 struct i40e_aqc_cloud_filters_element_bb *pfilter;
8322 struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
8323 struct i40e_tunnel_filter *tunnel, *node;
8324 struct i40e_tunnel_filter check_filter;
8325 uint32_t teid_le;
8326 bool big_buffer = 0;
8327
8328 cld_filter = rte_zmalloc("tunnel_filter",
8329 sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
8330 0);
8331
8332 if (cld_filter == NULL) {
8333 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8334 return -ENOMEM;
8335 }
8336 pfilter = cld_filter;
8337
8338 rte_ether_addr_copy(&tunnel_filter->outer_mac,
8339 (struct rte_ether_addr *)&pfilter->element.outer_mac);
8340 rte_ether_addr_copy(&tunnel_filter->inner_mac,
8341 (struct rte_ether_addr *)&pfilter->element.inner_mac);
8342
8343 pfilter->element.inner_vlan =
8344 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
8345 if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) {
8346 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
8347 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
8348 ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
8349 rte_memcpy(&pfilter->element.ipaddr.v4.data,
8350 &ipv4_addr_le,
8351 sizeof(pfilter->element.ipaddr.v4.data));
8352 } else {
8353 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
8354 for (i = 0; i < 4; i++) {
8355 convert_ipv6[i] =
8356 rte_cpu_to_le_32(rte_be_to_cpu_32(
8357 tunnel_filter->ip_addr.ipv6_addr[i]));
8358 }
8359 rte_memcpy(&pfilter->element.ipaddr.v6.data,
8360 &convert_ipv6,
8361 sizeof(pfilter->element.ipaddr.v6.data));
8362 }
8363
8364
8365 switch (tunnel_filter->tunnel_type) {
8366 case I40E_TUNNEL_TYPE_VXLAN:
8367 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
8368 break;
8369 case I40E_TUNNEL_TYPE_NVGRE:
8370 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
8371 break;
8372 case I40E_TUNNEL_TYPE_IP_IN_GRE:
8373 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
8374 break;
8375 case I40E_TUNNEL_TYPE_MPLSoUDP:
8376 if (!pf->mpls_replace_flag) {
8377 i40e_replace_mpls_l1_filter(pf);
8378 i40e_replace_mpls_cloud_filter(pf);
8379 pf->mpls_replace_flag = 1;
8380 }
8381 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8382 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8383 teid_le >> 4;
8384 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8385 (teid_le & 0xF) << 12;
8386 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8387 0x40;
8388 big_buffer = 1;
8389 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP;
8390 break;
8391 case I40E_TUNNEL_TYPE_MPLSoGRE:
8392 if (!pf->mpls_replace_flag) {
8393 i40e_replace_mpls_l1_filter(pf);
8394 i40e_replace_mpls_cloud_filter(pf);
8395 pf->mpls_replace_flag = 1;
8396 }
8397 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8398 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8399 teid_le >> 4;
8400 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8401 (teid_le & 0xF) << 12;
8402 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8403 0x0;
8404 big_buffer = 1;
8405 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE;
8406 break;
8407 case I40E_TUNNEL_TYPE_GTPC:
8408 if (!pf->gtp_replace_flag) {
8409 i40e_replace_gtp_l1_filter(pf);
8410 i40e_replace_gtp_cloud_filter(pf);
8411 pf->gtp_replace_flag = 1;
8412 }
8413 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8414 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
8415 (teid_le >> 16) & 0xFFFF;
8416 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
8417 teid_le & 0xFFFF;
8418 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
8419 0x0;
8420 big_buffer = 1;
8421 break;
8422 case I40E_TUNNEL_TYPE_GTPU:
8423 if (!pf->gtp_replace_flag) {
8424 i40e_replace_gtp_l1_filter(pf);
8425 i40e_replace_gtp_cloud_filter(pf);
8426 pf->gtp_replace_flag = 1;
8427 }
8428 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8429 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
8430 (teid_le >> 16) & 0xFFFF;
8431 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
8432 teid_le & 0xFFFF;
8433 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
8434 0x0;
8435 big_buffer = 1;
8436 break;
8437 case I40E_TUNNEL_TYPE_QINQ:
8438 if (!pf->qinq_replace_flag) {
8439 ret = i40e_cloud_filter_qinq_create(pf);
8440 if (ret < 0)
8441 PMD_DRV_LOG(DEBUG,
8442 "QinQ tunnel filter already created.");
8443 pf->qinq_replace_flag = 1;
8444 }
8445
8446
8447
8448
8449
8450 pfilter->general_fields[0] = tunnel_filter->inner_vlan;
8451 pfilter->general_fields[1] = tunnel_filter->outer_vlan;
8452 big_buffer = 1;
8453 break;
8454 case I40E_CLOUD_TYPE_UDP:
8455 case I40E_CLOUD_TYPE_TCP:
8456 case I40E_CLOUD_TYPE_SCTP:
8457 if (tunnel_filter->l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8458 if (!pf->sport_replace_flag) {
8459 i40e_replace_port_l1_filter(pf,
8460 tunnel_filter->l4_port_type);
8461 i40e_replace_port_cloud_filter(pf,
8462 tunnel_filter->l4_port_type);
8463 pf->sport_replace_flag = 1;
8464 }
8465 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8466 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8467 I40E_DIRECTION_INGRESS_KEY;
8468
8469 if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP)
8470 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8471 I40E_TR_L4_TYPE_UDP;
8472 else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP)
8473 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8474 I40E_TR_L4_TYPE_TCP;
8475 else
8476 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8477 I40E_TR_L4_TYPE_SCTP;
8478
8479 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8480 (teid_le >> 16) & 0xFFFF;
8481 big_buffer = 1;
8482 } else {
8483 if (!pf->dport_replace_flag) {
8484 i40e_replace_port_l1_filter(pf,
8485 tunnel_filter->l4_port_type);
8486 i40e_replace_port_cloud_filter(pf,
8487 tunnel_filter->l4_port_type);
8488 pf->dport_replace_flag = 1;
8489 }
8490 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8491 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0] =
8492 I40E_DIRECTION_INGRESS_KEY;
8493
8494 if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP)
8495 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8496 I40E_TR_L4_TYPE_UDP;
8497 else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP)
8498 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8499 I40E_TR_L4_TYPE_TCP;
8500 else
8501 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8502 I40E_TR_L4_TYPE_SCTP;
8503
8504 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2] =
8505 (teid_le >> 16) & 0xFFFF;
8506 big_buffer = 1;
8507 }
8508
8509 break;
8510 default:
8511
8512 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
8513 rte_free(cld_filter);
8514 return -EINVAL;
8515 }
8516
8517 if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
8518 pfilter->element.flags =
8519 I40E_AQC_ADD_CLOUD_FILTER_0X11;
8520 else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
8521 pfilter->element.flags =
8522 I40E_AQC_ADD_CLOUD_FILTER_0X12;
8523 else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
8524 pfilter->element.flags =
8525 I40E_AQC_ADD_CLOUD_FILTER_0X11;
8526 else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
8527 pfilter->element.flags =
8528 I40E_AQC_ADD_CLOUD_FILTER_0X12;
8529 else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
8530 pfilter->element.flags |=
8531 I40E_AQC_ADD_CLOUD_FILTER_0X10;
8532 else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP ||
8533 tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP ||
8534 tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_SCTP) {
8535 if (tunnel_filter->l4_port_type == I40E_L4_PORT_TYPE_SRC)
8536 pfilter->element.flags |=
8537 I40E_AQC_ADD_CLOUD_FILTER_0X11;
8538 else
8539 pfilter->element.flags |=
8540 I40E_AQC_ADD_CLOUD_FILTER_0X10;
8541 } else {
8542 val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
8543 &pfilter->element.flags);
8544 if (val < 0) {
8545 rte_free(cld_filter);
8546 return -EINVAL;
8547 }
8548 }
8549
8550 pfilter->element.flags |= rte_cpu_to_le_16(
8551 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
8552 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
8553 pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8554 pfilter->element.queue_number =
8555 rte_cpu_to_le_16(tunnel_filter->queue_id);
8556
8557 if (!tunnel_filter->is_to_vf)
8558 vsi = pf->main_vsi;
8559 else {
8560 if (tunnel_filter->vf_id >= pf->vf_num) {
8561 PMD_DRV_LOG(ERR, "Invalid argument.");
8562 rte_free(cld_filter);
8563 return -EINVAL;
8564 }
8565 vf = &pf->vfs[tunnel_filter->vf_id];
8566 vsi = vf->vsi;
8567 }
8568
8569
8570 memset(&check_filter, 0, sizeof(check_filter));
8571 i40e_tunnel_filter_convert(cld_filter, &check_filter);
8572 check_filter.is_to_vf = tunnel_filter->is_to_vf;
8573 check_filter.vf_id = tunnel_filter->vf_id;
8574 node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
8575 if (add && node) {
8576 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
8577 rte_free(cld_filter);
8578 return -EINVAL;
8579 }
8580
8581 if (!add && !node) {
8582 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
8583 rte_free(cld_filter);
8584 return -EINVAL;
8585 }
8586
8587 if (add) {
8588 if (big_buffer)
8589 ret = i40e_aq_add_cloud_filters_bb(hw,
8590 vsi->seid, cld_filter, 1);
8591 else
8592 ret = i40e_aq_add_cloud_filters(hw,
8593 vsi->seid, &cld_filter->element, 1);
8594 if (ret < 0) {
8595 PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
8596 rte_free(cld_filter);
8597 return -ENOTSUP;
8598 }
8599 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
8600 if (tunnel == NULL) {
8601 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8602 rte_free(cld_filter);
8603 return -ENOMEM;
8604 }
8605
8606 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
8607 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
8608 if (ret < 0)
8609 rte_free(tunnel);
8610 } else {
8611 if (big_buffer)
8612 ret = i40e_aq_rem_cloud_filters_bb(
8613 hw, vsi->seid, cld_filter, 1);
8614 else
8615 ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
8616 &cld_filter->element, 1);
8617 if (ret < 0) {
8618 PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
8619 rte_free(cld_filter);
8620 return -ENOTSUP;
8621 }
8622 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
8623 }
8624
8625 rte_free(cld_filter);
8626 return ret;
8627}
8628
8629static int
8630i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
8631{
8632 uint8_t i;
8633
8634 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
8635 if (pf->vxlan_ports[i] == port)
8636 return i;
8637 }
8638
8639 return -1;
8640}
8641
8642static int
8643i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port, int udp_type)
8644{
8645 int idx, ret;
8646 uint8_t filter_idx = 0;
8647 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8648
8649 idx = i40e_get_vxlan_port_idx(pf, port);
8650
8651
8652 if (idx >= 0) {
8653 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
8654 return -EINVAL;
8655 }
8656
8657
8658 idx = i40e_get_vxlan_port_idx(pf, 0);
8659 if (idx < 0) {
8660 PMD_DRV_LOG(ERR,
8661 "Maximum number of UDP ports reached, not adding port %d",
8662 port);
8663 return -ENOSPC;
8664 }
8665
8666 ret = i40e_aq_add_udp_tunnel(hw, port, udp_type,
8667 &filter_idx, NULL);
8668 if (ret < 0) {
8669 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
8670 return -1;
8671 }
8672
8673 PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
8674 port, filter_idx);
8675
8676
8677 pf->vxlan_ports[idx] = port;
8678 pf->vxlan_bitmap |= (1 << idx);
8679
8680 if (!(pf->flags & I40E_FLAG_VXLAN))
8681 pf->flags |= I40E_FLAG_VXLAN;
8682
8683 return 0;
8684}
8685
8686static int
8687i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
8688{
8689 int idx;
8690 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8691
8692 if (!(pf->flags & I40E_FLAG_VXLAN)) {
8693 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
8694 return -EINVAL;
8695 }
8696
8697 idx = i40e_get_vxlan_port_idx(pf, port);
8698
8699 if (idx < 0) {
8700 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
8701 return -EINVAL;
8702 }
8703
8704 if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
8705 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
8706 return -1;
8707 }
8708
8709 PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
8710 port, idx);
8711
8712 pf->vxlan_ports[idx] = 0;
8713 pf->vxlan_bitmap &= ~(1 << idx);
8714
8715 if (!pf->vxlan_bitmap)
8716 pf->flags &= ~I40E_FLAG_VXLAN;
8717
8718 return 0;
8719}
8720
8721
8722static int
8723i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
8724 struct rte_eth_udp_tunnel *udp_tunnel)
8725{
8726 int ret = 0;
8727 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8728
8729 if (udp_tunnel == NULL)
8730 return -EINVAL;
8731
8732 switch (udp_tunnel->prot_type) {
8733 case RTE_TUNNEL_TYPE_VXLAN:
8734 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
8735 I40E_AQC_TUNNEL_TYPE_VXLAN);
8736 break;
8737 case RTE_TUNNEL_TYPE_VXLAN_GPE:
8738 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
8739 I40E_AQC_TUNNEL_TYPE_VXLAN_GPE);
8740 break;
8741 case RTE_TUNNEL_TYPE_GENEVE:
8742 case RTE_TUNNEL_TYPE_TEREDO:
8743 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8744 ret = -1;
8745 break;
8746
8747 default:
8748 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8749 ret = -1;
8750 break;
8751 }
8752
8753 return ret;
8754}
8755
8756
8757static int
8758i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
8759 struct rte_eth_udp_tunnel *udp_tunnel)
8760{
8761 int ret = 0;
8762 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8763
8764 if (udp_tunnel == NULL)
8765 return -EINVAL;
8766
8767 switch (udp_tunnel->prot_type) {
8768 case RTE_TUNNEL_TYPE_VXLAN:
8769 case RTE_TUNNEL_TYPE_VXLAN_GPE:
8770 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
8771 break;
8772 case RTE_TUNNEL_TYPE_GENEVE:
8773 case RTE_TUNNEL_TYPE_TEREDO:
8774 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8775 ret = -1;
8776 break;
8777 default:
8778 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8779 ret = -1;
8780 break;
8781 }
8782
8783 return ret;
8784}
8785
8786
8787int
8788i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
8789{
8790 struct rte_eth_dev_data *data = pf->dev_data;
8791 int i, num;
8792 struct i40e_rx_queue *rxq;
8793
8794 num = 0;
8795 for (i = 0; i < pf->lan_nb_qps; i++) {
8796 rxq = data->rx_queues[i];
8797 if (rxq && rxq->q_set)
8798 num++;
8799 else
8800 break;
8801 }
8802
8803 return num;
8804}
8805
8806
8807static void
8808i40e_pf_global_rss_reset(struct i40e_pf *pf)
8809{
8810 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8811 uint32_t reg, reg_val;
8812 int i;
8813
8814
8815 reg_val = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
8816 if (!(reg_val & I40E_GLQF_CTL_HTOEP_MASK)) {
8817 reg_val |= I40E_GLQF_CTL_HTOEP_MASK;
8818 i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg_val);
8819 }
8820
8821 for (i = 0; i <= I40E_FILTER_PCTYPE_L2_PAYLOAD; i++) {
8822 uint64_t inset;
8823 int j, pctype;
8824
8825 if (hw->mac.type == I40E_MAC_X722)
8826 pctype = i40e_read_rx_ctl(hw, I40E_GLQF_FD_PCTYPES(i));
8827 else
8828 pctype = i;
8829
8830
8831 inset = i40e_get_default_input_set(i);
8832 if (inset) {
8833 pf->hash_input_set[pctype] = inset;
8834 inset = i40e_translate_input_set_reg(hw->mac.type,
8835 inset);
8836
8837 reg = I40E_GLQF_HASH_INSET(0, pctype);
8838 i40e_check_write_global_reg(hw, reg, (uint32_t)inset);
8839 reg = I40E_GLQF_HASH_INSET(1, pctype);
8840 i40e_check_write_global_reg(hw, reg,
8841 (uint32_t)(inset >> 32));
8842
8843
8844 for (j = 0; j < I40E_INSET_MASK_NUM_REG; j++) {
8845 reg = I40E_GLQF_HASH_MSK(j, pctype);
8846 i40e_check_write_global_reg(hw, reg, 0);
8847 }
8848 }
8849
8850
8851 reg = I40E_GLQF_HSYM(pctype);
8852 reg_val = i40e_read_rx_ctl(hw, reg);
8853 if (reg_val & I40E_GLQF_HSYM_SYMH_ENA_MASK) {
8854 reg_val &= ~I40E_GLQF_HSYM_SYMH_ENA_MASK;
8855 i40e_write_global_rx_ctl(hw, reg, reg_val);
8856 }
8857 }
8858 I40E_WRITE_FLUSH(hw);
8859}
8860
8861int
8862i40e_pf_reset_rss_reta(struct i40e_pf *pf)
8863{
8864 struct i40e_hw *hw = &pf->adapter->hw;
8865 uint8_t lut[ETH_RSS_RETA_SIZE_512];
8866 uint32_t i;
8867 int num;
8868
8869
8870
8871
8872
8873 if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
8874 num = i40e_pf_calc_configured_queues_num(pf);
8875 else
8876 num = pf->dev_data->nb_rx_queues;
8877
8878 num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
8879 if (num <= 0)
8880 return 0;
8881
8882 for (i = 0; i < hw->func_caps.rss_table_size; i++)
8883 lut[i] = (uint8_t)(i % (uint32_t)num);
8884
8885 return i40e_set_rss_lut(pf->main_vsi, lut, (uint16_t)i);
8886}
8887
8888int
8889i40e_pf_reset_rss_key(struct i40e_pf *pf)
8890{
8891 const uint8_t key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
8892 sizeof(uint32_t);
8893 uint8_t *rss_key;
8894
8895
8896 rss_key = pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_key;
8897 if (!rss_key ||
8898 pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_key_len < key_len) {
8899 static uint32_t rss_key_default[] = {0x6b793944,
8900 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
8901 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
8902 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
8903
8904 rss_key = (uint8_t *)rss_key_default;
8905 }
8906
8907 return i40e_set_rss_key(pf->main_vsi, rss_key, key_len);
8908}
8909
8910static int
8911i40e_pf_rss_reset(struct i40e_pf *pf)
8912{
8913 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8914
8915 int ret;
8916
8917 pf->hash_filter_enabled = 0;
8918 i40e_pf_disable_rss(pf);
8919 i40e_set_symmetric_hash_enable_per_port(hw, 0);
8920
8921 if (!pf->support_multi_driver)
8922 i40e_pf_global_rss_reset(pf);
8923
8924
8925 if (pf->adapter->rss_reta_updated == 0) {
8926 ret = i40e_pf_reset_rss_reta(pf);
8927 if (ret)
8928 return ret;
8929 }
8930
8931 return i40e_pf_reset_rss_key(pf);
8932}
8933
8934
8935int
8936i40e_pf_config_rss(struct i40e_pf *pf)
8937{
8938 struct i40e_hw *hw;
8939 enum rte_eth_rx_mq_mode mq_mode;
8940 uint64_t rss_hf, hena;
8941 int ret;
8942
8943 ret = i40e_pf_rss_reset(pf);
8944 if (ret) {
8945 PMD_DRV_LOG(ERR, "Reset RSS failed, RSS has been disabled");
8946 return ret;
8947 }
8948
8949 rss_hf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
8950 mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
8951 if (!(rss_hf & pf->adapter->flow_types_mask) ||
8952 !(mq_mode & ETH_MQ_RX_RSS_FLAG))
8953 return 0;
8954
8955 hw = I40E_PF_TO_HW(pf);
8956 hena = i40e_config_hena(pf->adapter, rss_hf);
8957 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
8958 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
8959 I40E_WRITE_FLUSH(hw);
8960
8961 return 0;
8962}
8963
8964#define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
8965#define I40E_GL_PRS_FVBM(_i) (0x00269760 + ((_i) * 4))
8966int
8967i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
8968{
8969 struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
8970 uint32_t val, reg;
8971 int ret = -EINVAL;
8972
8973 if (pf->support_multi_driver) {
8974 PMD_DRV_LOG(ERR, "GRE key length configuration is unsupported");
8975 return -ENOTSUP;
8976 }
8977
8978 val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
8979 PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
8980
8981 if (len == 3) {
8982 reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
8983 } else if (len == 4) {
8984 reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
8985 } else {
8986 PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
8987 return ret;
8988 }
8989
8990 if (reg != val) {
8991 ret = i40e_aq_debug_write_global_register(hw,
8992 I40E_GL_PRS_FVBM(2),
8993 reg, NULL);
8994 if (ret != 0)
8995 return ret;
8996 PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed "
8997 "with value 0x%08x",
8998 I40E_GL_PRS_FVBM(2), reg);
8999 } else {
9000 ret = 0;
9001 }
9002 PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
9003 I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
9004
9005 return ret;
9006}
9007
9008
9009void
9010i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
9011{
9012 uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
9013
9014 if (enable > 0) {
9015 if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)
9016 return;
9017
9018 reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
9019 } else {
9020 if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK))
9021 return;
9022
9023 reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
9024 }
9025 i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
9026 I40E_WRITE_FLUSH(hw);
9027}
9028
9029
9030
9031
9032static uint64_t
9033i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
9034 enum rte_filter_type filter)
9035{
9036 uint64_t valid;
9037
9038 static const uint64_t valid_hash_inset_table[] = {
9039 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
9040 I40E_INSET_DMAC | I40E_INSET_SMAC |
9041 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9042 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
9043 I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
9044 I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9045 I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9046 I40E_INSET_FLEX_PAYLOAD,
9047 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9048 I40E_INSET_DMAC | I40E_INSET_SMAC |
9049 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9050 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9051 I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9052 I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9053 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9054 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9055 I40E_INSET_FLEX_PAYLOAD,
9056 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9057 I40E_INSET_DMAC | I40E_INSET_SMAC |
9058 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9059 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9060 I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9061 I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9062 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9063 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9064 I40E_INSET_FLEX_PAYLOAD,
9065 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9066 I40E_INSET_DMAC | I40E_INSET_SMAC |
9067 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9068 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9069 I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9070 I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9071 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9072 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9073 I40E_INSET_FLEX_PAYLOAD,
9074 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9075 I40E_INSET_DMAC | I40E_INSET_SMAC |
9076 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9077 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9078 I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9079 I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9080 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9081 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9082 I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
9083 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9084 I40E_INSET_DMAC | I40E_INSET_SMAC |
9085 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9086 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9087 I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9088 I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9089 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9090 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9091 I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
9092 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9093 I40E_INSET_DMAC | I40E_INSET_SMAC |
9094 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9095 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9096 I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9097 I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9098 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9099 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9100 I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
9101 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9102 I40E_INSET_DMAC | I40E_INSET_SMAC |
9103 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9104 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9105 I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9106 I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9107 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9108 I40E_INSET_FLEX_PAYLOAD,
9109 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
9110 I40E_INSET_DMAC | I40E_INSET_SMAC |
9111 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9112 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9113 I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9114 I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
9115 I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
9116 I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
9117 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9118 I40E_INSET_DMAC | I40E_INSET_SMAC |
9119 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9120 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9121 I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9122 I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9123 I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9124 I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
9125 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9126 I40E_INSET_DMAC | I40E_INSET_SMAC |
9127 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9128 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9129 I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9130 I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9131 I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9132 I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9133 I40E_INSET_FLEX_PAYLOAD,
9134 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9135 I40E_INSET_DMAC | I40E_INSET_SMAC |
9136 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9137 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9138 I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9139 I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9140 I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9141 I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9142 I40E_INSET_FLEX_PAYLOAD,
9143 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9144 I40E_INSET_DMAC | I40E_INSET_SMAC |
9145 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9146 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9147 I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9148 I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9149 I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9150 I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9151 I40E_INSET_FLEX_PAYLOAD,
9152 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9153 I40E_INSET_DMAC | I40E_INSET_SMAC |
9154 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9155 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9156 I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9157 I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9158 I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9159 I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9160 I40E_INSET_FLEX_PAYLOAD,
9161 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9162 I40E_INSET_DMAC | I40E_INSET_SMAC |
9163 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9164 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9165 I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9166 I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9167 I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9168 I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
9169 I40E_INSET_FLEX_PAYLOAD,
9170 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9171 I40E_INSET_DMAC | I40E_INSET_SMAC |
9172 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9173 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9174 I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9175 I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9176 I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
9177 I40E_INSET_FLEX_PAYLOAD,
9178 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9179 I40E_INSET_DMAC | I40E_INSET_SMAC |
9180 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9181 I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
9182 I40E_INSET_FLEX_PAYLOAD,
9183 };
9184
9185
9186
9187
9188
9189 static const uint64_t valid_fdir_inset_table[] = {
9190 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
9191 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9192 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9193 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9194 I40E_INSET_IPV4_TTL,
9195 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9196 I40E_INSET_DMAC | I40E_INSET_SMAC |
9197 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9198 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9199 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9200 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9201 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9202 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9203 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9204 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9205 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9206 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9207 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9208 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9209 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9210 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9211 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9212 I40E_INSET_DMAC | I40E_INSET_SMAC |
9213 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9214 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9215 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9216 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9217 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9218 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9219 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9220 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9221 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9222 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9223 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9224 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9225 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9226 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9227 I40E_INSET_SCTP_VT,
9228 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9229 I40E_INSET_DMAC | I40E_INSET_SMAC |
9230 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9231 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9232 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9233 I40E_INSET_IPV4_TTL,
9234 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
9235 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9236 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9237 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
9238 I40E_INSET_IPV6_HOP_LIMIT,
9239 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9240 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9241 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9242 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9243 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9244 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9245 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9246 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9247 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9248 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9249 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9250 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9251 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9252 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9253 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9254 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9255 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9256 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9257 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9258 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9259 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9260 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9261 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9262 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9263 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9264 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9265 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9266 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9267 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9268 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9269 I40E_INSET_SCTP_VT,
9270 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9271 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9272 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9273 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
9274 I40E_INSET_IPV6_HOP_LIMIT,
9275 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9276 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9277 I40E_INSET_LAST_ETHER_TYPE,
9278 };
9279
9280 if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
9281 return 0;
9282 if (filter == RTE_ETH_FILTER_HASH)
9283 valid = valid_hash_inset_table[pctype];
9284 else
9285 valid = valid_fdir_inset_table[pctype];
9286
9287 return valid;
9288}
9289
9290
9291
9292
9293int
9294i40e_validate_input_set(enum i40e_filter_pctype pctype,
9295 enum rte_filter_type filter, uint64_t inset)
9296{
9297 uint64_t valid;
9298
9299 valid = i40e_get_valid_input_set(pctype, filter);
9300 if (inset & (~valid))
9301 return -EINVAL;
9302
9303 return 0;
9304}
9305
9306
9307uint64_t
9308i40e_get_default_input_set(uint16_t pctype)
9309{
9310 static const uint64_t default_inset_table[] = {
9311 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
9312 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
9313 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9314 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9315 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9316 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9317 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9318 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9319 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9320 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9321 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9322 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9323 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9324 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9325 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9326 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9327 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9328 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9329 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9330 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9331 I40E_INSET_SCTP_VT,
9332 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9333 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
9334 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
9335 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
9336 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9337 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9338 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9339 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9340 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9341 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9342 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9343 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9344 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9345 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9346 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9347 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9348 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9349 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9350 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9351 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9352 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9353 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9354 I40E_INSET_SCTP_VT,
9355 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9356 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
9357 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9358 I40E_INSET_LAST_ETHER_TYPE,
9359 };
9360
9361 if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
9362 return 0;
9363
9364 return default_inset_table[pctype];
9365}
9366
9367
9368
9369
9370
9371uint64_t
9372i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
9373{
9374 uint64_t val = 0;
9375 uint16_t i;
9376
9377 struct inset_map {
9378 uint64_t inset;
9379 uint64_t inset_reg;
9380 };
9381
9382 static const struct inset_map inset_map_common[] = {
9383 {I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
9384 {I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
9385 {I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
9386 {I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
9387 {I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
9388 {I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
9389 {I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
9390 {I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
9391 {I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
9392 {I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
9393 {I40E_INSET_IPV6_HOP_LIMIT, I40E_REG_INSET_L3_IP6_HOP_LIMIT},
9394 {I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
9395 {I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
9396 {I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
9397 {I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
9398 {I40E_INSET_TUNNEL_DMAC,
9399 I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
9400 {I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
9401 {I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
9402 {I40E_INSET_TUNNEL_SRC_PORT,
9403 I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
9404 {I40E_INSET_TUNNEL_DST_PORT,
9405 I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
9406 {I40E_INSET_VLAN_TUNNEL, I40E_REG_INSET_TUNNEL_VLAN},
9407 {I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
9408 {I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
9409 {I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
9410 {I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
9411 {I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
9412 {I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
9413 {I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
9414 {I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
9415 };
9416
9417
9418 static const struct inset_map inset_map_diff_x722[] = {
9419 {I40E_INSET_IPV4_SRC, I40E_X722_REG_INSET_L3_SRC_IP4},
9420 {I40E_INSET_IPV4_DST, I40E_X722_REG_INSET_L3_DST_IP4},
9421 {I40E_INSET_IPV4_PROTO, I40E_X722_REG_INSET_L3_IP4_PROTO},
9422 {I40E_INSET_IPV4_TTL, I40E_X722_REG_INSET_L3_IP4_TTL},
9423 };
9424
9425 static const struct inset_map inset_map_diff_not_x722[] = {
9426 {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
9427 {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
9428 {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
9429 {I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
9430 };
9431
9432 if (input == 0)
9433 return val;
9434
9435
9436 if (type == I40E_MAC_X722) {
9437 for (i = 0; i < RTE_DIM(inset_map_diff_x722); i++) {
9438 if (input & inset_map_diff_x722[i].inset)
9439 val |= inset_map_diff_x722[i].inset_reg;
9440 }
9441 } else {
9442 for (i = 0; i < RTE_DIM(inset_map_diff_not_x722); i++) {
9443 if (input & inset_map_diff_not_x722[i].inset)
9444 val |= inset_map_diff_not_x722[i].inset_reg;
9445 }
9446 }
9447
9448 for (i = 0; i < RTE_DIM(inset_map_common); i++) {
9449 if (input & inset_map_common[i].inset)
9450 val |= inset_map_common[i].inset_reg;
9451 }
9452
9453 return val;
9454}
9455
9456static int
9457i40e_get_inset_field_offset(struct i40e_hw *hw, uint32_t pit_reg_start,
9458 uint32_t pit_reg_count, uint32_t hdr_off)
9459{
9460 const uint32_t pit_reg_end = pit_reg_start + pit_reg_count;
9461 uint32_t field_off = I40E_FDIR_FIELD_OFFSET(hdr_off);
9462 uint32_t i, reg_val, src_off, count;
9463
9464 for (i = pit_reg_start; i < pit_reg_end; i++) {
9465 reg_val = i40e_read_rx_ctl(hw, I40E_GLQF_PIT(i));
9466
9467 src_off = I40E_GLQF_PIT_SOURCE_OFF_GET(reg_val);
9468 count = I40E_GLQF_PIT_FSIZE_GET(reg_val);
9469
9470 if (src_off <= field_off && (src_off + count) > field_off)
9471 break;
9472 }
9473
9474 if (i >= pit_reg_end) {
9475 PMD_DRV_LOG(ERR,
9476 "Hardware GLQF_PIT configuration does not support this field mask");
9477 return -1;
9478 }
9479
9480 return I40E_GLQF_PIT_DEST_OFF_GET(reg_val) + field_off - src_off;
9481}
9482
9483int
9484i40e_generate_inset_mask_reg(struct i40e_hw *hw, uint64_t inset,
9485 uint32_t *mask, uint8_t nb_elem)
9486{
9487 static const uint64_t mask_inset[] = {
9488 I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL,
9489 I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT };
9490
9491 static const struct {
9492 uint64_t inset;
9493 uint32_t mask;
9494 uint32_t offset;
9495 } inset_mask_offset_map[] = {
9496 { I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK,
9497 offsetof(struct rte_ipv4_hdr, type_of_service) },
9498
9499 { I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK,
9500 offsetof(struct rte_ipv4_hdr, next_proto_id) },
9501
9502 { I40E_INSET_IPV4_TTL, I40E_INSET_IPV4_TTL_MASK,
9503 offsetof(struct rte_ipv4_hdr, time_to_live) },
9504
9505 { I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK,
9506 offsetof(struct rte_ipv6_hdr, vtc_flow) },
9507
9508 { I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK,
9509 offsetof(struct rte_ipv6_hdr, proto) },
9510
9511 { I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK,
9512 offsetof(struct rte_ipv6_hdr, hop_limits) },
9513 };
9514
9515 uint32_t i;
9516 int idx = 0;
9517
9518 assert(mask);
9519 if (!inset)
9520 return 0;
9521
9522 for (i = 0; i < RTE_DIM(mask_inset); i++) {
9523
9524
9525
9526 if ((mask_inset[i] & inset) == mask_inset[i]) {
9527 inset &= ~mask_inset[i];
9528 if (!inset)
9529 return 0;
9530 }
9531 }
9532
9533 for (i = 0; i < RTE_DIM(inset_mask_offset_map); i++) {
9534 uint32_t pit_start, pit_count;
9535 int offset;
9536
9537 if (!(inset_mask_offset_map[i].inset & inset))
9538 continue;
9539
9540 if (inset_mask_offset_map[i].inset &
9541 (I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9542 I40E_INSET_IPV4_TTL)) {
9543 pit_start = I40E_GLQF_PIT_IPV4_START;
9544 pit_count = I40E_GLQF_PIT_IPV4_COUNT;
9545 } else {
9546 pit_start = I40E_GLQF_PIT_IPV6_START;
9547 pit_count = I40E_GLQF_PIT_IPV6_COUNT;
9548 }
9549
9550 offset = i40e_get_inset_field_offset(hw, pit_start, pit_count,
9551 inset_mask_offset_map[i].offset);
9552
9553 if (offset < 0)
9554 return -EINVAL;
9555
9556 if (idx >= nb_elem) {
9557 PMD_DRV_LOG(ERR,
9558 "Configuration of inset mask out of range %u",
9559 nb_elem);
9560 return -ERANGE;
9561 }
9562
9563 mask[idx] = I40E_GLQF_PIT_BUILD((uint32_t)offset,
9564 inset_mask_offset_map[i].mask);
9565 idx++;
9566 }
9567
9568 return idx;
9569}
9570
9571void
9572i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9573{
9574 uint32_t reg = i40e_read_rx_ctl(hw, addr);
9575
9576 PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
9577 if (reg != val)
9578 i40e_write_rx_ctl(hw, addr, val);
9579 PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
9580 (uint32_t)i40e_read_rx_ctl(hw, addr));
9581}
9582
9583void
9584i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9585{
9586 uint32_t reg = i40e_read_rx_ctl(hw, addr);
9587 struct rte_eth_dev_data *dev_data =
9588 ((struct i40e_adapter *)hw->back)->pf.dev_data;
9589 struct rte_eth_dev *dev = &rte_eth_devices[dev_data->port_id];
9590
9591 if (reg != val) {
9592 i40e_write_rx_ctl(hw, addr, val);
9593 PMD_DRV_LOG(WARNING,
9594 "i40e device %s changed global register [0x%08x]."
9595 " original: 0x%08x, new: 0x%08x",
9596 dev->device->name, addr, reg,
9597 (uint32_t)i40e_read_rx_ctl(hw, addr));
9598 }
9599}
9600
9601static void
9602i40e_filter_input_set_init(struct i40e_pf *pf)
9603{
9604 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9605 enum i40e_filter_pctype pctype;
9606 uint64_t input_set, inset_reg;
9607 uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9608 int num, i;
9609 uint16_t flow_type;
9610
9611 for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
9612 pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
9613 flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
9614
9615 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
9616 continue;
9617
9618 input_set = i40e_get_default_input_set(pctype);
9619
9620 num = i40e_generate_inset_mask_reg(hw, input_set, mask_reg,
9621 I40E_INSET_MASK_NUM_REG);
9622 if (num < 0)
9623 return;
9624 if (pf->support_multi_driver && num > 0) {
9625 PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9626 return;
9627 }
9628 inset_reg = i40e_translate_input_set_reg(hw->mac.type,
9629 input_set);
9630
9631 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
9632 (uint32_t)(inset_reg & UINT32_MAX));
9633 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
9634 (uint32_t)((inset_reg >>
9635 I40E_32_BIT_WIDTH) & UINT32_MAX));
9636 if (!pf->support_multi_driver) {
9637 i40e_check_write_global_reg(hw,
9638 I40E_GLQF_HASH_INSET(0, pctype),
9639 (uint32_t)(inset_reg & UINT32_MAX));
9640 i40e_check_write_global_reg(hw,
9641 I40E_GLQF_HASH_INSET(1, pctype),
9642 (uint32_t)((inset_reg >>
9643 I40E_32_BIT_WIDTH) & UINT32_MAX));
9644
9645 for (i = 0; i < num; i++) {
9646 i40e_check_write_global_reg(hw,
9647 I40E_GLQF_FD_MSK(i, pctype),
9648 mask_reg[i]);
9649 i40e_check_write_global_reg(hw,
9650 I40E_GLQF_HASH_MSK(i, pctype),
9651 mask_reg[i]);
9652 }
9653
9654 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
9655 i40e_check_write_global_reg(hw,
9656 I40E_GLQF_FD_MSK(i, pctype),
9657 0);
9658 i40e_check_write_global_reg(hw,
9659 I40E_GLQF_HASH_MSK(i, pctype),
9660 0);
9661 }
9662 } else {
9663 PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9664 }
9665 I40E_WRITE_FLUSH(hw);
9666
9667
9668 if (!pf->support_multi_driver)
9669 pf->hash_input_set[pctype] = input_set;
9670 pf->fdir.input_set[pctype] = input_set;
9671 }
9672}
9673
9674int
9675i40e_set_hash_inset(struct i40e_hw *hw, uint64_t input_set,
9676 uint32_t pctype, bool add)
9677{
9678 struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
9679 uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9680 uint64_t inset_reg = 0;
9681 int num, i;
9682
9683 if (pf->support_multi_driver) {
9684 PMD_DRV_LOG(ERR,
9685 "Modify input set is not permitted when multi-driver enabled.");
9686 return -EPERM;
9687 }
9688
9689
9690 if (hw->mac.type == I40E_MAC_X722)
9691 pctype = i40e_read_rx_ctl(hw, I40E_GLQF_FD_PCTYPES(pctype));
9692
9693 if (add) {
9694
9695 inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
9696 inset_reg <<= I40E_32_BIT_WIDTH;
9697 inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
9698 input_set |= pf->hash_input_set[pctype];
9699 }
9700 num = i40e_generate_inset_mask_reg(hw, input_set, mask_reg,
9701 I40E_INSET_MASK_NUM_REG);
9702 if (num < 0)
9703 return -EINVAL;
9704
9705 inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
9706
9707 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
9708 (uint32_t)(inset_reg & UINT32_MAX));
9709 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
9710 (uint32_t)((inset_reg >>
9711 I40E_32_BIT_WIDTH) & UINT32_MAX));
9712
9713 for (i = 0; i < num; i++)
9714 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9715 mask_reg[i]);
9716
9717 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
9718 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9719 0);
9720 I40E_WRITE_FLUSH(hw);
9721
9722 pf->hash_input_set[pctype] = input_set;
9723 return 0;
9724}
9725
9726
9727static int
9728i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
9729 struct i40e_ethertype_filter *filter)
9730{
9731 rte_memcpy(&filter->input.mac_addr, &input->mac_addr,
9732 RTE_ETHER_ADDR_LEN);
9733 filter->input.ether_type = input->ether_type;
9734 filter->flags = input->flags;
9735 filter->queue = input->queue;
9736
9737 return 0;
9738}
9739
9740
9741struct i40e_ethertype_filter *
9742i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
9743 const struct i40e_ethertype_filter_input *input)
9744{
9745 int ret;
9746
9747 ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
9748 if (ret < 0)
9749 return NULL;
9750
9751 return ethertype_rule->hash_map[ret];
9752}
9753
9754
9755static int
9756i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
9757 struct i40e_ethertype_filter *filter)
9758{
9759 struct i40e_ethertype_rule *rule = &pf->ethertype;
9760 int ret;
9761
9762 ret = rte_hash_add_key(rule->hash_table, &filter->input);
9763 if (ret < 0) {
9764 PMD_DRV_LOG(ERR,
9765 "Failed to insert ethertype filter"
9766 " to hash table %d!",
9767 ret);
9768 return ret;
9769 }
9770 rule->hash_map[ret] = filter;
9771
9772 TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
9773
9774 return 0;
9775}
9776
9777
9778int
9779i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
9780 struct i40e_ethertype_filter_input *input)
9781{
9782 struct i40e_ethertype_rule *rule = &pf->ethertype;
9783 struct i40e_ethertype_filter *filter;
9784 int ret;
9785
9786 ret = rte_hash_del_key(rule->hash_table, input);
9787 if (ret < 0) {
9788 PMD_DRV_LOG(ERR,
9789 "Failed to delete ethertype filter"
9790 " to hash table %d!",
9791 ret);
9792 return ret;
9793 }
9794 filter = rule->hash_map[ret];
9795 rule->hash_map[ret] = NULL;
9796
9797 TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
9798 rte_free(filter);
9799
9800 return 0;
9801}
9802
9803
9804
9805
9806
9807int
9808i40e_ethertype_filter_set(struct i40e_pf *pf,
9809 struct rte_eth_ethertype_filter *filter,
9810 bool add)
9811{
9812 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9813 struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
9814 struct i40e_ethertype_filter *ethertype_filter, *node;
9815 struct i40e_ethertype_filter check_filter;
9816 struct i40e_control_filter_stats stats;
9817 uint16_t flags = 0;
9818 int ret;
9819
9820 if (filter->queue >= pf->dev_data->nb_rx_queues) {
9821 PMD_DRV_LOG(ERR, "Invalid queue ID");
9822 return -EINVAL;
9823 }
9824 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
9825 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
9826 PMD_DRV_LOG(ERR,
9827 "unsupported ether_type(0x%04x) in control packet filter.",
9828 filter->ether_type);
9829 return -EINVAL;
9830 }
9831 if (filter->ether_type == RTE_ETHER_TYPE_VLAN)
9832 PMD_DRV_LOG(WARNING,
9833 "filter vlan ether_type in first tag is not supported.");
9834
9835
9836 memset(&check_filter, 0, sizeof(check_filter));
9837 i40e_ethertype_filter_convert(filter, &check_filter);
9838 node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
9839 &check_filter.input);
9840 if (add && node) {
9841 PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
9842 return -EINVAL;
9843 }
9844
9845 if (!add && !node) {
9846 PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
9847 return -EINVAL;
9848 }
9849
9850 if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
9851 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
9852 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
9853 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
9854 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
9855
9856 memset(&stats, 0, sizeof(stats));
9857 ret = i40e_aq_add_rem_control_packet_filter(hw,
9858 filter->mac_addr.addr_bytes,
9859 filter->ether_type, flags,
9860 pf->main_vsi->seid,
9861 filter->queue, add, &stats, NULL);
9862
9863 PMD_DRV_LOG(INFO,
9864 "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u",
9865 ret, stats.mac_etype_used, stats.etype_used,
9866 stats.mac_etype_free, stats.etype_free);
9867 if (ret < 0)
9868 return -ENOSYS;
9869
9870
9871 if (add) {
9872 ethertype_filter = rte_zmalloc("ethertype_filter",
9873 sizeof(*ethertype_filter), 0);
9874 if (ethertype_filter == NULL) {
9875 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
9876 return -ENOMEM;
9877 }
9878
9879 rte_memcpy(ethertype_filter, &check_filter,
9880 sizeof(check_filter));
9881 ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
9882 if (ret < 0)
9883 rte_free(ethertype_filter);
9884 } else {
9885 ret = i40e_sw_ethertype_filter_del(pf, &node->input);
9886 }
9887
9888 return ret;
9889}
9890
9891static int
9892i40e_dev_flow_ops_get(struct rte_eth_dev *dev,
9893 const struct rte_flow_ops **ops)
9894{
9895 if (dev == NULL)
9896 return -EINVAL;
9897
9898 *ops = &i40e_flow_ops;
9899 return 0;
9900}
9901
9902
9903
9904
9905
9906static void
9907i40e_enable_extended_tag(struct rte_eth_dev *dev)
9908{
9909 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
9910 uint32_t buf = 0;
9911 int ret;
9912
9913 ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9914 PCI_DEV_CAP_REG);
9915 if (ret < 0) {
9916 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9917 PCI_DEV_CAP_REG);
9918 return;
9919 }
9920 if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) {
9921 PMD_DRV_LOG(ERR, "Does not support Extended Tag");
9922 return;
9923 }
9924
9925 buf = 0;
9926 ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9927 PCI_DEV_CTRL_REG);
9928 if (ret < 0) {
9929 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9930 PCI_DEV_CTRL_REG);
9931 return;
9932 }
9933 if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) {
9934 PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled");
9935 return;
9936 }
9937 buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
9938 ret = rte_pci_write_config(pci_dev, &buf, sizeof(buf),
9939 PCI_DEV_CTRL_REG);
9940 if (ret < 0) {
9941 PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
9942 PCI_DEV_CTRL_REG);
9943 return;
9944 }
9945}
9946
9947
9948
9949
9950
9951
9952static void
9953i40e_hw_init(struct rte_eth_dev *dev)
9954{
9955 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9956
9957 i40e_enable_extended_tag(dev);
9958
9959
9960 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0);
9961
9962
9963 i40e_set_symmetric_hash_enable_per_port(hw, 0);
9964}
9965
9966
9967
9968
9969
9970
9971
9972enum i40e_filter_pctype
9973i40e_flowtype_to_pctype(const struct i40e_adapter *adapter, uint16_t flow_type)
9974{
9975 int i;
9976 uint64_t pctype_mask;
9977
9978 if (flow_type < I40E_FLOW_TYPE_MAX) {
9979 pctype_mask = adapter->pctypes_tbl[flow_type];
9980 for (i = I40E_FILTER_PCTYPE_MAX - 1; i > 0; i--) {
9981 if (pctype_mask & (1ULL << i))
9982 return (enum i40e_filter_pctype)i;
9983 }
9984 }
9985 return I40E_FILTER_PCTYPE_INVALID;
9986}
9987
9988uint16_t
9989i40e_pctype_to_flowtype(const struct i40e_adapter *adapter,
9990 enum i40e_filter_pctype pctype)
9991{
9992 uint16_t flowtype;
9993 uint64_t pctype_mask = 1ULL << pctype;
9994
9995 for (flowtype = RTE_ETH_FLOW_UNKNOWN + 1; flowtype < I40E_FLOW_TYPE_MAX;
9996 flowtype++) {
9997 if (adapter->pctypes_tbl[flowtype] & pctype_mask)
9998 return flowtype;
9999 }
10000
10001 return RTE_ETH_FLOW_UNKNOWN;
10002}
10003
10004
10005
10006
10007
10008
10009
10010
10011
10012
10013
10014
10015
10016
10017#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1 0x10000200
10018#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2 0x203F0200
10019#define I40E_GL_SWR_PRI_JOIN_MAP_0 0x26CE00
10020
10021#define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
10022#define I40E_GL_SWR_PRI_JOIN_MAP_2 0x26CE08
10023
10024
10025#define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200
10026#define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200
10027
10028
10029#define I40E_GL_SWR_PM_UP_THR_EF_VALUE 0x03030303
10030
10031#define I40E_GL_SWR_PM_UP_THR_SF_VALUE 0x06060606
10032#define I40E_GL_SWR_PM_UP_THR 0x269FBC
10033
10034
10035
10036
10037
10038
10039static bool
10040i40e_get_swr_pm_cfg(struct i40e_hw *hw, uint32_t *value)
10041{
10042#define I40E_GL_SWR_PM_EF_DEVICE(dev) \
10043 .device_id = (dev), \
10044 .val = I40E_GL_SWR_PM_UP_THR_EF_VALUE
10045
10046#define I40E_GL_SWR_PM_SF_DEVICE(dev) \
10047 .device_id = (dev), \
10048 .val = I40E_GL_SWR_PM_UP_THR_SF_VALUE
10049
10050 static const struct {
10051 uint16_t device_id;
10052 uint32_t val;
10053 } swr_pm_table[] = {
10054 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_XL710) },
10055 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_KX_C) },
10056 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T) },
10057 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T4) },
10058 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_X722) },
10059
10060 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_KX_B) },
10061 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_A) },
10062 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_B) },
10063 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2) },
10064 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2_A) },
10065 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_B) },
10066 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_SFP28) },
10067 };
10068 uint32_t i;
10069
10070 if (value == NULL) {
10071 PMD_DRV_LOG(ERR, "value is NULL");
10072 return false;
10073 }
10074
10075 for (i = 0; i < RTE_DIM(swr_pm_table); i++) {
10076 if (hw->device_id == swr_pm_table[i].device_id) {
10077 *value = swr_pm_table[i].val;
10078
10079 PMD_DRV_LOG(DEBUG, "Device 0x%x with GL_SWR_PM_UP_THR "
10080 "value - 0x%08x",
10081 hw->device_id, *value);
10082 return true;
10083 }
10084 }
10085
10086 return false;
10087}
10088
10089static int
10090i40e_dev_sync_phy_type(struct i40e_hw *hw)
10091{
10092 enum i40e_status_code status;
10093 struct i40e_aq_get_phy_abilities_resp phy_ab;
10094 int ret = -ENOTSUP;
10095 int retries = 0;
10096
10097 status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
10098 NULL);
10099
10100 while (status) {
10101 PMD_INIT_LOG(WARNING, "Failed to sync phy type: status=%d",
10102 status);
10103 retries++;
10104 rte_delay_us(100000);
10105 if (retries < 5)
10106 status = i40e_aq_get_phy_capabilities(hw, false,
10107 true, &phy_ab, NULL);
10108 else
10109 return ret;
10110 }
10111 return 0;
10112}
10113
10114static void
10115i40e_configure_registers(struct i40e_hw *hw)
10116{
10117 static struct {
10118 uint32_t addr;
10119 uint64_t val;
10120 } reg_table[] = {
10121 {I40E_GL_SWR_PRI_JOIN_MAP_0, 0},
10122 {I40E_GL_SWR_PRI_JOIN_MAP_2, 0},
10123 {I40E_GL_SWR_PM_UP_THR, 0},
10124 };
10125 uint64_t reg;
10126 uint32_t i;
10127 int ret;
10128
10129 for (i = 0; i < RTE_DIM(reg_table); i++) {
10130 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) {
10131 if (hw->mac.type == I40E_MAC_X722)
10132 reg_table[i].val =
10133 I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
10134 else
10135 if (hw->aq.fw_maj_ver < 6)
10136 reg_table[i].val =
10137 I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1;
10138 else
10139 reg_table[i].val =
10140 I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2;
10141 }
10142
10143 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
10144 if (hw->mac.type == I40E_MAC_X722)
10145 reg_table[i].val =
10146 I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE;
10147 else
10148 reg_table[i].val =
10149 I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE;
10150 }
10151
10152 if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
10153 uint32_t cfg_val;
10154
10155 if (!i40e_get_swr_pm_cfg(hw, &cfg_val)) {
10156 PMD_DRV_LOG(DEBUG, "Device 0x%x skips "
10157 "GL_SWR_PM_UP_THR value fixup",
10158 hw->device_id);
10159 continue;
10160 }
10161
10162 reg_table[i].val = cfg_val;
10163 }
10164
10165 ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
10166 ®, NULL);
10167 if (ret < 0) {
10168 PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
10169 reg_table[i].addr);
10170 break;
10171 }
10172 PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
10173 reg_table[i].addr, reg);
10174 if (reg == reg_table[i].val)
10175 continue;
10176
10177 ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
10178 reg_table[i].val, NULL);
10179 if (ret < 0) {
10180 PMD_DRV_LOG(ERR,
10181 "Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32,
10182 reg_table[i].val, reg_table[i].addr);
10183 break;
10184 }
10185 PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
10186 "0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
10187 }
10188}
10189
10190#define I40E_VSI_TSR_QINQ_CONFIG 0xc030
10191#define I40E_VSI_L2TAGSTXVALID(_i) (0x00042800 + ((_i) * 4))
10192#define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
10193static int
10194i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
10195{
10196 uint32_t reg;
10197 int ret;
10198
10199 if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
10200 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
10201 return -EINVAL;
10202 }
10203
10204
10205 reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
10206 if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
10207 reg |= I40E_VSI_TSR_QINQ_CONFIG;
10208 ret = i40e_aq_debug_write_register(hw,
10209 I40E_VSI_TSR(vsi->vsi_id),
10210 reg, NULL);
10211 if (ret < 0) {
10212 PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
10213 vsi->vsi_id);
10214 return I40E_ERR_CONFIG;
10215 }
10216 }
10217
10218
10219 reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
10220 if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
10221 reg = I40E_VSI_L2TAGSTXVALID_QINQ;
10222 ret = i40e_aq_debug_write_register(hw,
10223 I40E_VSI_L2TAGSTXVALID(
10224 vsi->vsi_id), reg, NULL);
10225 if (ret < 0) {
10226 PMD_DRV_LOG(ERR,
10227 "Failed to update VSI_L2TAGSTXVALID[%d]",
10228 vsi->vsi_id);
10229 return I40E_ERR_CONFIG;
10230 }
10231 }
10232
10233 return 0;
10234}
10235
10236
10237
10238
10239
10240
10241
10242
10243
10244
10245
10246
10247
10248static enum i40e_status_code
10249i40e_aq_add_mirror_rule(struct i40e_hw *hw,
10250 uint16_t seid, uint16_t dst_id,
10251 uint16_t rule_type, uint16_t *entries,
10252 uint16_t count, uint16_t *rule_id)
10253{
10254 struct i40e_aq_desc desc;
10255 struct i40e_aqc_add_delete_mirror_rule cmd;
10256 struct i40e_aqc_add_delete_mirror_rule_completion *resp =
10257 (struct i40e_aqc_add_delete_mirror_rule_completion *)
10258 &desc.params.raw;
10259 uint16_t buff_len;
10260 enum i40e_status_code status;
10261
10262 i40e_fill_default_direct_cmd_desc(&desc,
10263 i40e_aqc_opc_add_mirror_rule);
10264 memset(&cmd, 0, sizeof(cmd));
10265
10266 buff_len = sizeof(uint16_t) * count;
10267 desc.datalen = rte_cpu_to_le_16(buff_len);
10268 if (buff_len > 0)
10269 desc.flags |= rte_cpu_to_le_16(
10270 (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
10271 cmd.rule_type = rte_cpu_to_le_16(rule_type <<
10272 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
10273 cmd.num_entries = rte_cpu_to_le_16(count);
10274 cmd.seid = rte_cpu_to_le_16(seid);
10275 cmd.destination = rte_cpu_to_le_16(dst_id);
10276
10277 rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
10278 status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
10279 PMD_DRV_LOG(INFO,
10280 "i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,",
10281 hw->aq.asq_last_status, resp->rule_id,
10282 resp->mirror_rules_used, resp->mirror_rules_free);
10283 *rule_id = rte_le_to_cpu_16(resp->rule_id);
10284
10285 return status;
10286}
10287
10288
10289
10290
10291
10292
10293
10294
10295
10296
10297
10298
10299static enum i40e_status_code
10300i40e_aq_del_mirror_rule(struct i40e_hw *hw,
10301 uint16_t seid, uint16_t rule_type, uint16_t *entries,
10302 uint16_t count, uint16_t rule_id)
10303{
10304 struct i40e_aq_desc desc;
10305 struct i40e_aqc_add_delete_mirror_rule cmd;
10306 uint16_t buff_len = 0;
10307 enum i40e_status_code status;
10308 void *buff = NULL;
10309
10310 i40e_fill_default_direct_cmd_desc(&desc,
10311 i40e_aqc_opc_delete_mirror_rule);
10312 memset(&cmd, 0, sizeof(cmd));
10313 if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
10314 desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
10315 I40E_AQ_FLAG_RD));
10316 cmd.num_entries = count;
10317 buff_len = sizeof(uint16_t) * count;
10318 desc.datalen = rte_cpu_to_le_16(buff_len);
10319 buff = (void *)entries;
10320 } else
10321
10322 cmd.destination = rte_cpu_to_le_16(rule_id);
10323
10324 cmd.rule_type = rte_cpu_to_le_16(rule_type <<
10325 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
10326 cmd.seid = rte_cpu_to_le_16(seid);
10327
10328 rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
10329 status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
10330
10331 return status;
10332}
10333
10334
10335
10336
10337
10338
10339
10340
10341
10342
10343
10344static int
10345i40e_mirror_rule_set(struct rte_eth_dev *dev,
10346 struct rte_eth_mirror_conf *mirror_conf,
10347 uint8_t sw_id, uint8_t on)
10348{
10349 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10350 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10351 struct i40e_mirror_rule *it, *mirr_rule = NULL;
10352 struct i40e_mirror_rule *parent = NULL;
10353 uint16_t seid, dst_seid, rule_id;
10354 uint16_t i, j = 0;
10355 int ret;
10356
10357 PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
10358
10359 if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
10360 PMD_DRV_LOG(ERR,
10361 "mirror rule can not be configured without veb or vfs.");
10362 return -ENOSYS;
10363 }
10364 if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
10365 PMD_DRV_LOG(ERR, "mirror table is full.");
10366 return -ENOSPC;
10367 }
10368 if (mirror_conf->dst_pool > pf->vf_num) {
10369 PMD_DRV_LOG(ERR, "invalid destination pool %u.",
10370 mirror_conf->dst_pool);
10371 return -EINVAL;
10372 }
10373
10374 seid = pf->main_vsi->veb->seid;
10375
10376 TAILQ_FOREACH(it, &pf->mirror_list, rules) {
10377 if (sw_id <= it->index) {
10378 mirr_rule = it;
10379 break;
10380 }
10381 parent = it;
10382 }
10383 if (mirr_rule && sw_id == mirr_rule->index) {
10384 if (on) {
10385 PMD_DRV_LOG(ERR, "mirror rule exists.");
10386 return -EEXIST;
10387 } else {
10388 ret = i40e_aq_del_mirror_rule(hw, seid,
10389 mirr_rule->rule_type,
10390 mirr_rule->entries,
10391 mirr_rule->num_entries, mirr_rule->id);
10392 if (ret < 0) {
10393 PMD_DRV_LOG(ERR,
10394 "failed to remove mirror rule: ret = %d, aq_err = %d.",
10395 ret, hw->aq.asq_last_status);
10396 return -ENOSYS;
10397 }
10398 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
10399 rte_free(mirr_rule);
10400 pf->nb_mirror_rule--;
10401 return 0;
10402 }
10403 } else if (!on) {
10404 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
10405 return -ENOENT;
10406 }
10407
10408 mirr_rule = rte_zmalloc("i40e_mirror_rule",
10409 sizeof(struct i40e_mirror_rule) , 0);
10410 if (!mirr_rule) {
10411 PMD_DRV_LOG(ERR, "failed to allocate memory");
10412 return I40E_ERR_NO_MEMORY;
10413 }
10414 switch (mirror_conf->rule_type) {
10415 case ETH_MIRROR_VLAN:
10416 for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
10417 if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
10418 mirr_rule->entries[j] =
10419 mirror_conf->vlan.vlan_id[i];
10420 j++;
10421 }
10422 }
10423 if (j == 0) {
10424 PMD_DRV_LOG(ERR, "vlan is not specified.");
10425 rte_free(mirr_rule);
10426 return -EINVAL;
10427 }
10428 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
10429 break;
10430 case ETH_MIRROR_VIRTUAL_POOL_UP:
10431 case ETH_MIRROR_VIRTUAL_POOL_DOWN:
10432
10433 if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
10434 PMD_DRV_LOG(ERR, "pool mask is out of range.");
10435 rte_free(mirr_rule);
10436 return -EINVAL;
10437 }
10438 for (i = 0, j = 0; i < pf->vf_num; i++) {
10439 if (mirror_conf->pool_mask & (1ULL << i)) {
10440 mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
10441 j++;
10442 }
10443 }
10444 if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
10445
10446 mirr_rule->entries[j] = pf->main_vsi_seid;
10447 j++;
10448 }
10449 if (j == 0) {
10450 PMD_DRV_LOG(ERR, "pool is not specified.");
10451 rte_free(mirr_rule);
10452 return -EINVAL;
10453 }
10454
10455 mirr_rule->rule_type =
10456 (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
10457 I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
10458 I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
10459 break;
10460 case ETH_MIRROR_UPLINK_PORT:
10461
10462 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
10463 break;
10464 case ETH_MIRROR_DOWNLINK_PORT:
10465 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
10466 break;
10467 default:
10468 PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
10469 mirror_conf->rule_type);
10470 rte_free(mirr_rule);
10471 return -EINVAL;
10472 }
10473
10474
10475 if (mirror_conf->dst_pool == pf->vf_num)
10476 dst_seid = pf->main_vsi_seid;
10477 else
10478 dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
10479
10480 ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
10481 mirr_rule->rule_type, mirr_rule->entries,
10482 j, &rule_id);
10483 if (ret < 0) {
10484 PMD_DRV_LOG(ERR,
10485 "failed to add mirror rule: ret = %d, aq_err = %d.",
10486 ret, hw->aq.asq_last_status);
10487 rte_free(mirr_rule);
10488 return -ENOSYS;
10489 }
10490
10491 mirr_rule->index = sw_id;
10492 mirr_rule->num_entries = j;
10493 mirr_rule->id = rule_id;
10494 mirr_rule->dst_vsi_seid = dst_seid;
10495
10496 if (parent)
10497 TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
10498 else
10499 TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
10500
10501 pf->nb_mirror_rule++;
10502 return 0;
10503}
10504
10505
10506
10507
10508
10509
10510
10511
10512
10513static int
10514i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
10515{
10516 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10517 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10518 struct i40e_mirror_rule *it, *mirr_rule = NULL;
10519 uint16_t seid;
10520 int ret;
10521
10522 PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
10523
10524 seid = pf->main_vsi->veb->seid;
10525
10526 TAILQ_FOREACH(it, &pf->mirror_list, rules) {
10527 if (sw_id == it->index) {
10528 mirr_rule = it;
10529 break;
10530 }
10531 }
10532 if (mirr_rule) {
10533 ret = i40e_aq_del_mirror_rule(hw, seid,
10534 mirr_rule->rule_type,
10535 mirr_rule->entries,
10536 mirr_rule->num_entries, mirr_rule->id);
10537 if (ret < 0) {
10538 PMD_DRV_LOG(ERR,
10539 "failed to remove mirror rule: status = %d, aq_err = %d.",
10540 ret, hw->aq.asq_last_status);
10541 return -ENOSYS;
10542 }
10543 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
10544 rte_free(mirr_rule);
10545 pf->nb_mirror_rule--;
10546 } else {
10547 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
10548 return -ENOENT;
10549 }
10550 return 0;
10551}
10552
10553static uint64_t
10554i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
10555{
10556 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10557 uint64_t systim_cycles;
10558
10559 systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L);
10560 systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H)
10561 << 32;
10562
10563 return systim_cycles;
10564}
10565
10566static uint64_t
10567i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index)
10568{
10569 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10570 uint64_t rx_tstamp;
10571
10572 rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
10573 rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index))
10574 << 32;
10575
10576 return rx_tstamp;
10577}
10578
10579static uint64_t
10580i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
10581{
10582 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10583 uint64_t tx_tstamp;
10584
10585 tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
10586 tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H)
10587 << 32;
10588
10589 return tx_tstamp;
10590}
10591
10592static void
10593i40e_start_timecounters(struct rte_eth_dev *dev)
10594{
10595 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10596 struct i40e_adapter *adapter = dev->data->dev_private;
10597 struct rte_eth_link link;
10598 uint32_t tsync_inc_l;
10599 uint32_t tsync_inc_h;
10600
10601
10602 i40e_dev_link_update(dev, 1);
10603 rte_eth_linkstatus_get(dev, &link);
10604
10605 switch (link.link_speed) {
10606 case ETH_SPEED_NUM_40G:
10607 case ETH_SPEED_NUM_25G:
10608 tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
10609 tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
10610 break;
10611 case ETH_SPEED_NUM_10G:
10612 tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
10613 tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
10614 break;
10615 case ETH_SPEED_NUM_1G:
10616 tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
10617 tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
10618 break;
10619 default:
10620 tsync_inc_l = 0x0;
10621 tsync_inc_h = 0x0;
10622 }
10623
10624
10625 I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
10626 I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
10627
10628 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
10629 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10630 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10631
10632 adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10633 adapter->systime_tc.cc_shift = 0;
10634 adapter->systime_tc.nsec_mask = 0;
10635
10636 adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10637 adapter->rx_tstamp_tc.cc_shift = 0;
10638 adapter->rx_tstamp_tc.nsec_mask = 0;
10639
10640 adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10641 adapter->tx_tstamp_tc.cc_shift = 0;
10642 adapter->tx_tstamp_tc.nsec_mask = 0;
10643}
10644
10645static int
10646i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
10647{
10648 struct i40e_adapter *adapter = dev->data->dev_private;
10649
10650 adapter->systime_tc.nsec += delta;
10651 adapter->rx_tstamp_tc.nsec += delta;
10652 adapter->tx_tstamp_tc.nsec += delta;
10653
10654 return 0;
10655}
10656
10657static int
10658i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
10659{
10660 uint64_t ns;
10661 struct i40e_adapter *adapter = dev->data->dev_private;
10662
10663 ns = rte_timespec_to_ns(ts);
10664
10665
10666 adapter->systime_tc.nsec = ns;
10667 adapter->rx_tstamp_tc.nsec = ns;
10668 adapter->tx_tstamp_tc.nsec = ns;
10669
10670 return 0;
10671}
10672
10673static int
10674i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
10675{
10676 uint64_t ns, systime_cycles;
10677 struct i40e_adapter *adapter = dev->data->dev_private;
10678
10679 systime_cycles = i40e_read_systime_cyclecounter(dev);
10680 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
10681 *ts = rte_ns_to_timespec(ns);
10682
10683 return 0;
10684}
10685
10686static int
10687i40e_timesync_enable(struct rte_eth_dev *dev)
10688{
10689 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10690 uint32_t tsync_ctl_l;
10691 uint32_t tsync_ctl_h;
10692
10693
10694 I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10695 I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10696
10697 I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0);
10698 I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0);
10699
10700 i40e_start_timecounters(dev);
10701
10702
10703 I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10704 I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
10705 I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0));
10706 I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1));
10707 I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2));
10708 I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3));
10709
10710
10711 tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10712 tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
10713
10714 tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10715 tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
10716 tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
10717
10718 I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10719 I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10720
10721 return 0;
10722}
10723
10724static int
10725i40e_timesync_disable(struct rte_eth_dev *dev)
10726{
10727 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10728 uint32_t tsync_ctl_l;
10729 uint32_t tsync_ctl_h;
10730
10731
10732 tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10733 tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
10734
10735 tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10736 tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
10737
10738 I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10739 I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10740
10741
10742 I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10743 I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10744
10745 return 0;
10746}
10747
10748static int
10749i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
10750 struct timespec *timestamp, uint32_t flags)
10751{
10752 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10753 struct i40e_adapter *adapter = dev->data->dev_private;
10754 uint32_t sync_status;
10755 uint32_t index = flags & 0x03;
10756 uint64_t rx_tstamp_cycles;
10757 uint64_t ns;
10758
10759 sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
10760 if ((sync_status & (1 << index)) == 0)
10761 return -EINVAL;
10762
10763 rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index);
10764 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
10765 *timestamp = rte_ns_to_timespec(ns);
10766
10767 return 0;
10768}
10769
10770static int
10771i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
10772 struct timespec *timestamp)
10773{
10774 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10775 struct i40e_adapter *adapter = dev->data->dev_private;
10776 uint32_t sync_status;
10777 uint64_t tx_tstamp_cycles;
10778 uint64_t ns;
10779
10780 sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10781 if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
10782 return -EINVAL;
10783
10784 tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev);
10785 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
10786 *timestamp = rte_ns_to_timespec(ns);
10787
10788 return 0;
10789}
10790
10791
10792
10793
10794
10795
10796
10797
10798
10799static int
10800i40e_parse_dcb_configure(struct rte_eth_dev *dev,
10801 struct i40e_dcbx_config *dcb_cfg,
10802 uint8_t *tc_map)
10803{
10804 struct rte_eth_dcb_rx_conf *dcb_rx_conf;
10805 uint8_t i, tc_bw, bw_lf;
10806
10807 memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
10808
10809 dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
10810 if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
10811 PMD_INIT_LOG(ERR, "number of tc exceeds max.");
10812 return -EINVAL;
10813 }
10814
10815
10816 tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
10817 for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10818 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
10819
10820 bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
10821 for (i = 0; i < bw_lf; i++)
10822 dcb_cfg->etscfg.tcbwtable[i]++;
10823
10824
10825 for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10826 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
10827
10828 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
10829 dcb_cfg->etscfg.prioritytable[i] =
10830 dcb_rx_conf->dcb_tc[i];
10831
10832
10833 dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
10834 dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
10835 dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
10836 dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
10837
10838 if (dcb_rx_conf->nb_tcs == 0)
10839 *tc_map = 1;
10840 else
10841 *tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
10842
10843 if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
10844 dcb_cfg->pfc.willing = 0;
10845 dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
10846 dcb_cfg->pfc.pfcenable = *tc_map;
10847 }
10848 return 0;
10849}
10850
10851
10852static enum i40e_status_code
10853i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
10854 struct i40e_aqc_vsi_properties_data *info,
10855 uint8_t enabled_tcmap)
10856{
10857 enum i40e_status_code ret;
10858 int i, total_tc = 0;
10859 uint16_t qpnum_per_tc, bsf, qp_idx;
10860 struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
10861 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
10862 uint16_t used_queues;
10863
10864 ret = validate_tcmap_parameter(vsi, enabled_tcmap);
10865 if (ret != I40E_SUCCESS)
10866 return ret;
10867
10868 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10869 if (enabled_tcmap & (1 << i))
10870 total_tc++;
10871 }
10872 if (total_tc == 0)
10873 total_tc = 1;
10874 vsi->enabled_tc = enabled_tcmap;
10875
10876
10877 if (vsi->type == I40E_VSI_MAIN)
10878 used_queues = dev_data->nb_rx_queues -
10879 pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10880 else if (vsi->type == I40E_VSI_VMDQ2)
10881 used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10882 else {
10883 PMD_INIT_LOG(ERR, "unsupported VSI type.");
10884 return I40E_ERR_NO_AVAILABLE_VSI;
10885 }
10886
10887 qpnum_per_tc = used_queues / total_tc;
10888
10889 if (qpnum_per_tc == 0) {
10890 PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
10891 return I40E_ERR_INVALID_QP_ID;
10892 }
10893 qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
10894 I40E_MAX_Q_PER_TC);
10895 bsf = rte_bsf32(qpnum_per_tc);
10896
10897
10898
10899
10900
10901
10902 qp_idx = 0;
10903 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10904 if (vsi->enabled_tc & (1 << i)) {
10905 info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
10906 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
10907 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
10908 qp_idx += qpnum_per_tc;
10909 } else
10910 info->tc_mapping[i] = 0;
10911 }
10912
10913
10914 if (vsi->type == I40E_VSI_SRIOV) {
10915 info->mapping_flags |=
10916 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
10917 for (i = 0; i < vsi->nb_qps; i++)
10918 info->queue_mapping[i] =
10919 rte_cpu_to_le_16(vsi->base_queue + i);
10920 } else {
10921 info->mapping_flags |=
10922 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
10923 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
10924 }
10925 info->valid_sections |=
10926 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
10927
10928 return I40E_SUCCESS;
10929}
10930
10931
10932
10933
10934
10935
10936
10937
10938static enum i40e_status_code
10939i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
10940{
10941 struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw;
10942 struct i40e_aqc_query_switching_comp_bw_config_resp bw_query;
10943 struct i40e_aqc_query_switching_comp_ets_config_resp ets_query;
10944 struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi);
10945 enum i40e_status_code ret = I40E_SUCCESS;
10946 int i;
10947 uint32_t bw_max;
10948
10949
10950 if (veb->enabled_tc == tc_map)
10951 return ret;
10952
10953
10954 memset(&veb_bw, 0, sizeof(veb_bw));
10955 veb_bw.tc_valid_bits = tc_map;
10956
10957 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10958 if (tc_map & BIT_ULL(i))
10959 veb_bw.tc_bw_share_credits[i] = 1;
10960 }
10961 ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
10962 &veb_bw, NULL);
10963 if (ret) {
10964 PMD_INIT_LOG(ERR,
10965 "AQ command Config switch_comp BW allocation per TC failed = %d",
10966 hw->aq.asq_last_status);
10967 return ret;
10968 }
10969
10970 memset(&ets_query, 0, sizeof(ets_query));
10971 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
10972 &ets_query, NULL);
10973 if (ret != I40E_SUCCESS) {
10974 PMD_DRV_LOG(ERR,
10975 "Failed to get switch_comp ETS configuration %u",
10976 hw->aq.asq_last_status);
10977 return ret;
10978 }
10979 memset(&bw_query, 0, sizeof(bw_query));
10980 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
10981 &bw_query, NULL);
10982 if (ret != I40E_SUCCESS) {
10983 PMD_DRV_LOG(ERR,
10984 "Failed to get switch_comp bandwidth configuration %u",
10985 hw->aq.asq_last_status);
10986 return ret;
10987 }
10988
10989
10990 veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit);
10991 veb->bw_info.bw_max = ets_query.tc_bw_max;
10992 PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit);
10993 PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max);
10994 bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) |
10995 (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) <<
10996 I40E_16_BIT_WIDTH);
10997 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10998 veb->bw_info.bw_ets_share_credits[i] =
10999 bw_query.tc_bw_share_credits[i];
11000 veb->bw_info.bw_ets_credits[i] =
11001 rte_le_to_cpu_16(bw_query.tc_bw_limits[i]);
11002
11003 veb->bw_info.bw_ets_max[i] =
11004 (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
11005 RTE_LEN2MASK(3, uint8_t));
11006 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i,
11007 veb->bw_info.bw_ets_share_credits[i]);
11008 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i,
11009 veb->bw_info.bw_ets_credits[i]);
11010 PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i,
11011 veb->bw_info.bw_ets_max[i]);
11012 }
11013
11014 veb->enabled_tc = tc_map;
11015
11016 return ret;
11017}
11018
11019
11020
11021
11022
11023
11024
11025
11026
11027static enum i40e_status_code
11028i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
11029{
11030 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
11031 struct i40e_vsi_context ctxt;
11032 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
11033 enum i40e_status_code ret = I40E_SUCCESS;
11034 int i;
11035
11036
11037 if (vsi->enabled_tc == tc_map)
11038 return ret;
11039
11040
11041 memset(&bw_data, 0, sizeof(bw_data));
11042 bw_data.tc_valid_bits = tc_map;
11043
11044 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11045 if (tc_map & BIT_ULL(i))
11046 bw_data.tc_bw_credits[i] = 1;
11047 }
11048 ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
11049 if (ret) {
11050 PMD_INIT_LOG(ERR,
11051 "AQ command Config VSI BW allocation per TC failed = %d",
11052 hw->aq.asq_last_status);
11053 goto out;
11054 }
11055 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
11056 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
11057
11058
11059 ctxt.seid = vsi->seid;
11060 ctxt.pf_num = hw->pf_id;
11061 ctxt.vf_num = 0;
11062 ctxt.uplink_seid = vsi->uplink_seid;
11063 ctxt.info = vsi->info;
11064 i40e_get_cap(hw);
11065 ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
11066 if (ret)
11067 goto out;
11068
11069
11070 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
11071 if (ret) {
11072 PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d",
11073 hw->aq.asq_last_status);
11074 goto out;
11075 }
11076
11077 rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
11078 sizeof(vsi->info.tc_mapping));
11079 rte_memcpy(&vsi->info.queue_mapping,
11080 &ctxt.info.queue_mapping,
11081 sizeof(vsi->info.queue_mapping));
11082 vsi->info.mapping_flags = ctxt.info.mapping_flags;
11083 vsi->info.valid_sections = 0;
11084
11085
11086 ret = i40e_vsi_get_bw_config(vsi);
11087 if (ret) {
11088 PMD_INIT_LOG(ERR,
11089 "Failed updating vsi bw info, err %s aq_err %s",
11090 i40e_stat_str(hw, ret),
11091 i40e_aq_str(hw, hw->aq.asq_last_status));
11092 goto out;
11093 }
11094
11095 vsi->enabled_tc = tc_map;
11096
11097out:
11098 return ret;
11099}
11100
11101
11102
11103
11104
11105
11106
11107
11108
11109static enum i40e_status_code
11110i40e_dcb_hw_configure(struct i40e_pf *pf,
11111 struct i40e_dcbx_config *new_cfg,
11112 uint8_t tc_map)
11113{
11114 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11115 struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
11116 struct i40e_vsi *main_vsi = pf->main_vsi;
11117 struct i40e_vsi_list *vsi_list;
11118 enum i40e_status_code ret;
11119 int i;
11120 uint32_t val;
11121
11122
11123 if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
11124 (hw->aq.fw_maj_ver >= 5))) {
11125 PMD_INIT_LOG(ERR,
11126 "FW < v4.4, can not use FW LLDP API to configure DCB");
11127 return I40E_ERR_FIRMWARE_API_VERSION;
11128 }
11129
11130
11131 if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
11132 PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
11133 return I40E_SUCCESS;
11134 }
11135
11136
11137 *old_cfg = *new_cfg;
11138 old_cfg->etsrec = old_cfg->etscfg;
11139 ret = i40e_set_dcb_config(hw);
11140 if (ret) {
11141 PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s",
11142 i40e_stat_str(hw, ret),
11143 i40e_aq_str(hw, hw->aq.asq_last_status));
11144 return ret;
11145 }
11146
11147 for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
11148 val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
11149 val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK |
11150 I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
11151 I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
11152 val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
11153 I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
11154 I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
11155 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
11156 I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
11157 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
11158 I40E_PRTDCB_RETSTCC_ETSTC_MASK;
11159 I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
11160 }
11161
11162
11163 hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
11164
11165 i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
11166 &hw->local_dcbx_config);
11167
11168
11169 if (main_vsi->veb) {
11170 ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
11171 if (ret)
11172 PMD_INIT_LOG(WARNING,
11173 "Failed configuring TC for VEB seid=%d",
11174 main_vsi->veb->seid);
11175 }
11176
11177 i40e_vsi_config_tc(main_vsi, tc_map);
11178 if (main_vsi->veb) {
11179 TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
11180
11181
11182
11183 if (vsi_list->vsi->type == I40E_VSI_VMDQ2)
11184 ret = i40e_vsi_config_tc(vsi_list->vsi,
11185 tc_map);
11186 else
11187 ret = i40e_vsi_config_tc(vsi_list->vsi,
11188 I40E_DEFAULT_TCMAP);
11189 if (ret)
11190 PMD_INIT_LOG(WARNING,
11191 "Failed configuring TC for VSI seid=%d",
11192 vsi_list->vsi->seid);
11193
11194 }
11195 }
11196 return I40E_SUCCESS;
11197}
11198
11199
11200
11201
11202
11203
11204
11205
11206int
11207i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
11208{
11209 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11210 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11211 int i, ret = 0;
11212
11213 if ((pf->flags & I40E_FLAG_DCB) == 0) {
11214 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
11215 return -ENOTSUP;
11216 }
11217
11218
11219
11220
11221
11222 if (sw_dcb == TRUE) {
11223
11224
11225
11226
11227
11228
11229 ret = i40e_aq_start_lldp(hw, true, NULL);
11230 if (ret != I40E_SUCCESS)
11231 PMD_INIT_LOG(DEBUG, "Failed to start lldp");
11232
11233 ret = i40e_init_dcb(hw, true);
11234
11235
11236
11237
11238 if ((ret == I40E_SUCCESS) || (ret != I40E_SUCCESS &&
11239 hw->aq.asq_last_status == I40E_AQ_RC_EPERM)) {
11240 memset(&hw->local_dcbx_config, 0,
11241 sizeof(struct i40e_dcbx_config));
11242
11243 hw->local_dcbx_config.etscfg.willing = 0;
11244 hw->local_dcbx_config.etscfg.maxtcs = 0;
11245 hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
11246 hw->local_dcbx_config.etscfg.tsatable[0] =
11247 I40E_IEEE_TSA_ETS;
11248
11249 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11250 hw->local_dcbx_config.etscfg.prioritytable[i] = 0;
11251 hw->local_dcbx_config.etsrec =
11252 hw->local_dcbx_config.etscfg;
11253 hw->local_dcbx_config.pfc.willing = 0;
11254 hw->local_dcbx_config.pfc.pfccap =
11255 I40E_MAX_TRAFFIC_CLASS;
11256
11257 hw->local_dcbx_config.numapps = 1;
11258 hw->local_dcbx_config.app[0].selector =
11259 I40E_APP_SEL_ETHTYPE;
11260 hw->local_dcbx_config.app[0].priority = 3;
11261 hw->local_dcbx_config.app[0].protocolid =
11262 I40E_APP_PROTOID_FCOE;
11263 ret = i40e_set_dcb_config(hw);
11264 if (ret) {
11265 PMD_INIT_LOG(ERR,
11266 "default dcb config fails. err = %d, aq_err = %d.",
11267 ret, hw->aq.asq_last_status);
11268 return -ENOSYS;
11269 }
11270 } else {
11271 PMD_INIT_LOG(ERR,
11272 "DCB initialization in FW fails, err = %d, aq_err = %d.",
11273 ret, hw->aq.asq_last_status);
11274 return -ENOTSUP;
11275 }
11276
11277 if (i40e_need_stop_lldp(dev)) {
11278 ret = i40e_aq_stop_lldp(hw, true, true, NULL);
11279 if (ret != I40E_SUCCESS)
11280 PMD_INIT_LOG(DEBUG, "Failed to stop lldp");
11281 }
11282 } else {
11283 ret = i40e_aq_start_lldp(hw, true, NULL);
11284 if (ret != I40E_SUCCESS)
11285 PMD_INIT_LOG(DEBUG, "Failed to start lldp");
11286
11287 ret = i40e_init_dcb(hw, true);
11288 if (!ret) {
11289 if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
11290 PMD_INIT_LOG(ERR,
11291 "HW doesn't support DCBX offload.");
11292 return -ENOTSUP;
11293 }
11294 } else {
11295 PMD_INIT_LOG(ERR,
11296 "DCBX configuration failed, err = %d, aq_err = %d.",
11297 ret, hw->aq.asq_last_status);
11298 return -ENOTSUP;
11299 }
11300 }
11301 return 0;
11302}
11303
11304
11305
11306
11307
11308
11309
11310static int
11311i40e_dcb_setup(struct rte_eth_dev *dev)
11312{
11313 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11314 struct i40e_dcbx_config dcb_cfg;
11315 uint8_t tc_map = 0;
11316 int ret = 0;
11317
11318 if ((pf->flags & I40E_FLAG_DCB) == 0) {
11319 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
11320 return -ENOTSUP;
11321 }
11322
11323 if (pf->vf_num != 0)
11324 PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis.");
11325
11326 ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
11327 if (ret) {
11328 PMD_INIT_LOG(ERR, "invalid dcb config");
11329 return -EINVAL;
11330 }
11331 ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
11332 if (ret) {
11333 PMD_INIT_LOG(ERR, "dcb sw configure fails");
11334 return -ENOSYS;
11335 }
11336
11337 return 0;
11338}
11339
11340static int
11341i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
11342 struct rte_eth_dcb_info *dcb_info)
11343{
11344 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11345 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11346 struct i40e_vsi *vsi = pf->main_vsi;
11347 struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
11348 uint16_t bsf, tc_mapping;
11349 int i, j = 0;
11350
11351 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
11352 dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
11353 else
11354 dcb_info->nb_tcs = 1;
11355 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11356 dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
11357 for (i = 0; i < dcb_info->nb_tcs; i++)
11358 dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
11359
11360
11361 if (!pf->nb_cfg_vmdq_vsi) {
11362 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11363 if (!(vsi->enabled_tc & (1 << i)))
11364 continue;
11365 tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11366 dcb_info->tc_queue.tc_rxq[j][i].base =
11367 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11368 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11369 dcb_info->tc_queue.tc_txq[j][i].base =
11370 dcb_info->tc_queue.tc_rxq[j][i].base;
11371 bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11372 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11373 dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11374 dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11375 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11376 }
11377 return 0;
11378 }
11379
11380
11381 do {
11382 vsi = pf->vmdq[j].vsi;
11383 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11384 if (!(vsi->enabled_tc & (1 << i)))
11385 continue;
11386 tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11387 dcb_info->tc_queue.tc_rxq[j][i].base =
11388 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11389 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11390 dcb_info->tc_queue.tc_txq[j][i].base =
11391 dcb_info->tc_queue.tc_rxq[j][i].base;
11392 bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11393 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11394 dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11395 dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11396 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11397 }
11398 j++;
11399 } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
11400 return 0;
11401}
11402
11403static int
11404i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
11405{
11406 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
11407 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
11408 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11409 uint16_t msix_intr;
11410
11411 msix_intr = intr_handle->intr_vec[queue_id];
11412 if (msix_intr == I40E_MISC_VEC_ID)
11413 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
11414 I40E_PFINT_DYN_CTL0_INTENA_MASK |
11415 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
11416 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
11417 else
11418 I40E_WRITE_REG(hw,
11419 I40E_PFINT_DYN_CTLN(msix_intr -
11420 I40E_RX_VEC_START),
11421 I40E_PFINT_DYN_CTLN_INTENA_MASK |
11422 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
11423 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
11424
11425 I40E_WRITE_FLUSH(hw);
11426 rte_intr_ack(&pci_dev->intr_handle);
11427
11428 return 0;
11429}
11430
11431static int
11432i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
11433{
11434 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
11435 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
11436 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11437 uint16_t msix_intr;
11438
11439 msix_intr = intr_handle->intr_vec[queue_id];
11440 if (msix_intr == I40E_MISC_VEC_ID)
11441 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
11442 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
11443 else
11444 I40E_WRITE_REG(hw,
11445 I40E_PFINT_DYN_CTLN(msix_intr -
11446 I40E_RX_VEC_START),
11447 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
11448 I40E_WRITE_FLUSH(hw);
11449
11450 return 0;
11451}
11452
11453
11454
11455
11456
11457
11458
11459
11460
11461
11462
11463
11464static inline int i40e_valid_regs(enum i40e_mac_type type, uint32_t reg_offset)
11465{
11466 if ((type != I40E_MAC_X722) &&
11467 ((reg_offset >= 0x2b800 && reg_offset <= 0x2bb00) ||
11468 (reg_offset >= 0x38700 && reg_offset <= 0x38a00) ||
11469 (reg_offset >= 0x3d800 && reg_offset <= 0x3db00) ||
11470 (reg_offset >= 0x208e00 && reg_offset <= 0x209000) ||
11471 (reg_offset >= 0x20be00 && reg_offset <= 0x20c000) ||
11472 (reg_offset >= 0x263c00 && reg_offset <= 0x264000) ||
11473 (reg_offset >= 0x265c00 && reg_offset <= 0x266000)))
11474 return 0;
11475 else
11476 return 1;
11477}
11478
11479static int i40e_get_regs(struct rte_eth_dev *dev,
11480 struct rte_dev_reg_info *regs)
11481{
11482 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11483 uint32_t *ptr_data = regs->data;
11484 uint32_t reg_idx, arr_idx, arr_idx2, reg_offset;
11485 const struct i40e_reg_info *reg_info;
11486
11487 if (ptr_data == NULL) {
11488 regs->length = I40E_GLGEN_STAT_CLEAR + 4;
11489 regs->width = sizeof(uint32_t);
11490 return 0;
11491 }
11492
11493
11494 reg_idx = 0;
11495 while (i40e_regs_adminq[reg_idx].name) {
11496 reg_info = &i40e_regs_adminq[reg_idx++];
11497 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11498 for (arr_idx2 = 0;
11499 arr_idx2 <= reg_info->count2;
11500 arr_idx2++) {
11501 reg_offset = arr_idx * reg_info->stride1 +
11502 arr_idx2 * reg_info->stride2;
11503 reg_offset += reg_info->base_addr;
11504 ptr_data[reg_offset >> 2] =
11505 i40e_read_rx_ctl(hw, reg_offset);
11506 }
11507 }
11508
11509
11510 reg_idx = 0;
11511 while (i40e_regs_others[reg_idx].name) {
11512 reg_info = &i40e_regs_others[reg_idx++];
11513 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11514 for (arr_idx2 = 0;
11515 arr_idx2 <= reg_info->count2;
11516 arr_idx2++) {
11517 reg_offset = arr_idx * reg_info->stride1 +
11518 arr_idx2 * reg_info->stride2;
11519 reg_offset += reg_info->base_addr;
11520 if (!i40e_valid_regs(hw->mac.type, reg_offset))
11521 ptr_data[reg_offset >> 2] = 0;
11522 else
11523 ptr_data[reg_offset >> 2] =
11524 I40E_READ_REG(hw, reg_offset);
11525 }
11526 }
11527
11528 return 0;
11529}
11530
11531static int i40e_get_eeprom_length(struct rte_eth_dev *dev)
11532{
11533 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11534
11535
11536 return hw->nvm.sr_size << 1;
11537}
11538
11539static int i40e_get_eeprom(struct rte_eth_dev *dev,
11540 struct rte_dev_eeprom_info *eeprom)
11541{
11542 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11543 uint16_t *data = eeprom->data;
11544 uint16_t offset, length, cnt_words;
11545 int ret_code;
11546
11547 offset = eeprom->offset >> 1;
11548 length = eeprom->length >> 1;
11549 cnt_words = length;
11550
11551 if (offset > hw->nvm.sr_size ||
11552 offset + length > hw->nvm.sr_size) {
11553 PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
11554 return -EINVAL;
11555 }
11556
11557 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
11558
11559 ret_code = i40e_read_nvm_buffer(hw, offset, &cnt_words, data);
11560 if (ret_code != I40E_SUCCESS || cnt_words != length) {
11561 PMD_DRV_LOG(ERR, "EEPROM read failed.");
11562 return -EIO;
11563 }
11564
11565 return 0;
11566}
11567
11568static int i40e_get_module_info(struct rte_eth_dev *dev,
11569 struct rte_eth_dev_module_info *modinfo)
11570{
11571 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11572 uint32_t sff8472_comp = 0;
11573 uint32_t sff8472_swap = 0;
11574 uint32_t sff8636_rev = 0;
11575 i40e_status status;
11576 uint32_t type = 0;
11577
11578
11579 if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
11580 PMD_DRV_LOG(ERR,
11581 "Module EEPROM memory read not supported. "
11582 "Please update the NVM image.\n");
11583 return -EINVAL;
11584 }
11585
11586 status = i40e_update_link_info(hw);
11587 if (status)
11588 return -EIO;
11589
11590 if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) {
11591 PMD_DRV_LOG(ERR,
11592 "Cannot read module EEPROM memory. "
11593 "No module connected.\n");
11594 return -EINVAL;
11595 }
11596
11597 type = hw->phy.link_info.module_type[0];
11598
11599 switch (type) {
11600 case I40E_MODULE_TYPE_SFP:
11601 status = i40e_aq_get_phy_register(hw,
11602 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11603 I40E_I2C_EEPROM_DEV_ADDR, 1,
11604 I40E_MODULE_SFF_8472_COMP,
11605 &sff8472_comp, NULL);
11606 if (status)
11607 return -EIO;
11608
11609 status = i40e_aq_get_phy_register(hw,
11610 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11611 I40E_I2C_EEPROM_DEV_ADDR, 1,
11612 I40E_MODULE_SFF_8472_SWAP,
11613 &sff8472_swap, NULL);
11614 if (status)
11615 return -EIO;
11616
11617
11618
11619
11620 if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) {
11621 PMD_DRV_LOG(WARNING,
11622 "Module address swap to access "
11623 "page 0xA2 is not supported.\n");
11624 modinfo->type = RTE_ETH_MODULE_SFF_8079;
11625 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
11626 } else if (sff8472_comp == 0x00) {
11627
11628 modinfo->type = RTE_ETH_MODULE_SFF_8079;
11629 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
11630 } else {
11631 modinfo->type = RTE_ETH_MODULE_SFF_8472;
11632 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
11633 }
11634 break;
11635 case I40E_MODULE_TYPE_QSFP_PLUS:
11636
11637 status = i40e_aq_get_phy_register(hw,
11638 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11639 0, 1,
11640 I40E_MODULE_REVISION_ADDR,
11641 &sff8636_rev, NULL);
11642 if (status)
11643 return -EIO;
11644
11645 if (sff8636_rev > 0x02) {
11646
11647 modinfo->type = RTE_ETH_MODULE_SFF_8636;
11648 modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11649 } else {
11650 modinfo->type = RTE_ETH_MODULE_SFF_8436;
11651 modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11652 }
11653 break;
11654 case I40E_MODULE_TYPE_QSFP28:
11655 modinfo->type = RTE_ETH_MODULE_SFF_8636;
11656 modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11657 break;
11658 default:
11659 PMD_DRV_LOG(ERR, "Module type unrecognized\n");
11660 return -EINVAL;
11661 }
11662 return 0;
11663}
11664
11665static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
11666 struct rte_dev_eeprom_info *info)
11667{
11668 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11669 bool is_sfp = false;
11670 i40e_status status;
11671 uint8_t *data;
11672 uint32_t value = 0;
11673 uint32_t i;
11674
11675 if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP)
11676 is_sfp = true;
11677
11678 data = info->data;
11679 for (i = 0; i < info->length; i++) {
11680 u32 offset = i + info->offset;
11681 u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0;
11682
11683
11684 if (is_sfp) {
11685 if (offset >= RTE_ETH_MODULE_SFF_8079_LEN) {
11686 offset -= RTE_ETH_MODULE_SFF_8079_LEN;
11687 addr = I40E_I2C_EEPROM_DEV_ADDR2;
11688 }
11689 } else {
11690 while (offset >= RTE_ETH_MODULE_SFF_8436_LEN) {
11691
11692 offset -= RTE_ETH_MODULE_SFF_8436_LEN / 2;
11693 addr++;
11694 }
11695 }
11696 status = i40e_aq_get_phy_register(hw,
11697 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11698 addr, 1, offset, &value, NULL);
11699 if (status)
11700 return -EIO;
11701 data[i] = (uint8_t)value;
11702 }
11703 return 0;
11704}
11705
11706static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
11707 struct rte_ether_addr *mac_addr)
11708{
11709 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11710 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11711 struct i40e_vsi *vsi = pf->main_vsi;
11712 struct i40e_mac_filter_info mac_filter;
11713 struct i40e_mac_filter *f;
11714 int ret;
11715
11716 if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
11717 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
11718 return -EINVAL;
11719 }
11720
11721 TAILQ_FOREACH(f, &vsi->mac_list, next) {
11722 if (rte_is_same_ether_addr(&pf->dev_addr,
11723 &f->mac_info.mac_addr))
11724 break;
11725 }
11726
11727 if (f == NULL) {
11728 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
11729 return -EIO;
11730 }
11731
11732 mac_filter = f->mac_info;
11733 ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr);
11734 if (ret != I40E_SUCCESS) {
11735 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
11736 return -EIO;
11737 }
11738 memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN);
11739 ret = i40e_vsi_add_mac(vsi, &mac_filter);
11740 if (ret != I40E_SUCCESS) {
11741 PMD_DRV_LOG(ERR, "Failed to add mac filter");
11742 return -EIO;
11743 }
11744 memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
11745
11746 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
11747 mac_addr->addr_bytes, NULL);
11748 if (ret != I40E_SUCCESS) {
11749 PMD_DRV_LOG(ERR, "Failed to change mac");
11750 return -EIO;
11751 }
11752
11753 return 0;
11754}
11755
11756static int
11757i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
11758{
11759 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11760 struct rte_eth_dev_data *dev_data = pf->dev_data;
11761 uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
11762 int ret = 0;
11763
11764
11765 if (mtu < RTE_ETHER_MIN_MTU || frame_size > I40E_FRAME_SIZE_MAX)
11766 return -EINVAL;
11767
11768
11769 if (dev_data->dev_started) {
11770 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
11771 dev_data->port_id);
11772 return -EBUSY;
11773 }
11774
11775 if (frame_size > I40E_ETH_MAX_LEN)
11776 dev_data->dev_conf.rxmode.offloads |=
11777 DEV_RX_OFFLOAD_JUMBO_FRAME;
11778 else
11779 dev_data->dev_conf.rxmode.offloads &=
11780 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
11781
11782 dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
11783
11784 return ret;
11785}
11786
11787
11788static void
11789i40e_ethertype_filter_restore(struct i40e_pf *pf)
11790{
11791 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11792 struct i40e_ethertype_filter_list
11793 *ethertype_list = &pf->ethertype.ethertype_list;
11794 struct i40e_ethertype_filter *f;
11795 struct i40e_control_filter_stats stats;
11796 uint16_t flags;
11797
11798 TAILQ_FOREACH(f, ethertype_list, rules) {
11799 flags = 0;
11800 if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
11801 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
11802 if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
11803 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
11804 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
11805
11806 memset(&stats, 0, sizeof(stats));
11807 i40e_aq_add_rem_control_packet_filter(hw,
11808 f->input.mac_addr.addr_bytes,
11809 f->input.ether_type,
11810 flags, pf->main_vsi->seid,
11811 f->queue, 1, &stats, NULL);
11812 }
11813 PMD_DRV_LOG(INFO, "Ethertype filter:"
11814 " mac_etype_used = %u, etype_used = %u,"
11815 " mac_etype_free = %u, etype_free = %u",
11816 stats.mac_etype_used, stats.etype_used,
11817 stats.mac_etype_free, stats.etype_free);
11818}
11819
11820
11821static void
11822i40e_tunnel_filter_restore(struct i40e_pf *pf)
11823{
11824 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11825 struct i40e_vsi *vsi;
11826 struct i40e_pf_vf *vf;
11827 struct i40e_tunnel_filter_list
11828 *tunnel_list = &pf->tunnel.tunnel_list;
11829 struct i40e_tunnel_filter *f;
11830 struct i40e_aqc_cloud_filters_element_bb cld_filter;
11831 bool big_buffer = 0;
11832
11833 TAILQ_FOREACH(f, tunnel_list, rules) {
11834 if (!f->is_to_vf)
11835 vsi = pf->main_vsi;
11836 else {
11837 vf = &pf->vfs[f->vf_id];
11838 vsi = vf->vsi;
11839 }
11840 memset(&cld_filter, 0, sizeof(cld_filter));
11841 rte_ether_addr_copy((struct rte_ether_addr *)
11842 &f->input.outer_mac,
11843 (struct rte_ether_addr *)&cld_filter.element.outer_mac);
11844 rte_ether_addr_copy((struct rte_ether_addr *)
11845 &f->input.inner_mac,
11846 (struct rte_ether_addr *)&cld_filter.element.inner_mac);
11847 cld_filter.element.inner_vlan = f->input.inner_vlan;
11848 cld_filter.element.flags = f->input.flags;
11849 cld_filter.element.tenant_id = f->input.tenant_id;
11850 cld_filter.element.queue_number = f->queue;
11851 rte_memcpy(cld_filter.general_fields,
11852 f->input.general_fields,
11853 sizeof(f->input.general_fields));
11854
11855 if (((f->input.flags &
11856 I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
11857 I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
11858 ((f->input.flags &
11859 I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
11860 I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
11861 ((f->input.flags &
11862 I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
11863 I40E_AQC_ADD_CLOUD_FILTER_0X10))
11864 big_buffer = 1;
11865
11866 if (big_buffer)
11867 i40e_aq_add_cloud_filters_bb(hw,
11868 vsi->seid, &cld_filter, 1);
11869 else
11870 i40e_aq_add_cloud_filters(hw, vsi->seid,
11871 &cld_filter.element, 1);
11872 }
11873}
11874
11875static void
11876i40e_filter_restore(struct i40e_pf *pf)
11877{
11878 i40e_ethertype_filter_restore(pf);
11879 i40e_tunnel_filter_restore(pf);
11880 i40e_fdir_filter_restore(pf);
11881 (void)i40e_hash_filter_restore(pf);
11882}
11883
11884bool
11885is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
11886{
11887 if (strcmp(dev->device->driver->name, drv->driver.name))
11888 return false;
11889
11890 return true;
11891}
11892
11893bool
11894is_i40e_supported(struct rte_eth_dev *dev)
11895{
11896 return is_device_supported(dev, &rte_i40e_pmd);
11897}
11898
11899struct i40e_customized_pctype*
11900i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
11901{
11902 int i;
11903
11904 for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
11905 if (pf->customized_pctype[i].index == index)
11906 return &pf->customized_pctype[i];
11907 }
11908 return NULL;
11909}
11910
11911static int
11912i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
11913 uint32_t pkg_size, uint32_t proto_num,
11914 struct rte_pmd_i40e_proto_info *proto,
11915 enum rte_pmd_i40e_package_op op)
11916{
11917 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11918 uint32_t pctype_num;
11919 struct rte_pmd_i40e_ptype_info *pctype;
11920 uint32_t buff_size;
11921 struct i40e_customized_pctype *new_pctype = NULL;
11922 uint8_t proto_id;
11923 uint8_t pctype_value;
11924 char name[64];
11925 uint32_t i, j, n;
11926 int ret;
11927
11928 if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
11929 op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
11930 PMD_DRV_LOG(ERR, "Unsupported operation.");
11931 return -1;
11932 }
11933
11934 ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11935 (uint8_t *)&pctype_num, sizeof(pctype_num),
11936 RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
11937 if (ret) {
11938 PMD_DRV_LOG(ERR, "Failed to get pctype number");
11939 return -1;
11940 }
11941 if (!pctype_num) {
11942 PMD_DRV_LOG(INFO, "No new pctype added");
11943 return -1;
11944 }
11945
11946 buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info);
11947 pctype = rte_zmalloc("new_pctype", buff_size, 0);
11948 if (!pctype) {
11949 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11950 return -1;
11951 }
11952
11953 ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11954 (uint8_t *)pctype, buff_size,
11955 RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
11956 if (ret) {
11957 PMD_DRV_LOG(ERR, "Failed to get pctype list");
11958 rte_free(pctype);
11959 return -1;
11960 }
11961
11962
11963 for (i = 0; i < pctype_num; i++) {
11964 pctype_value = pctype[i].ptype_id;
11965 memset(name, 0, sizeof(name));
11966 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
11967 proto_id = pctype[i].protocols[j];
11968 if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
11969 continue;
11970 for (n = 0; n < proto_num; n++) {
11971 if (proto[n].proto_id != proto_id)
11972 continue;
11973 strlcat(name, proto[n].name, sizeof(name));
11974 strlcat(name, "_", sizeof(name));
11975 break;
11976 }
11977 }
11978 name[strlen(name) - 1] = '\0';
11979 PMD_DRV_LOG(INFO, "name = %s\n", name);
11980 if (!strcmp(name, "GTPC"))
11981 new_pctype =
11982 i40e_find_customized_pctype(pf,
11983 I40E_CUSTOMIZED_GTPC);
11984 else if (!strcmp(name, "GTPU_IPV4"))
11985 new_pctype =
11986 i40e_find_customized_pctype(pf,
11987 I40E_CUSTOMIZED_GTPU_IPV4);
11988 else if (!strcmp(name, "GTPU_IPV6"))
11989 new_pctype =
11990 i40e_find_customized_pctype(pf,
11991 I40E_CUSTOMIZED_GTPU_IPV6);
11992 else if (!strcmp(name, "GTPU"))
11993 new_pctype =
11994 i40e_find_customized_pctype(pf,
11995 I40E_CUSTOMIZED_GTPU);
11996 else if (!strcmp(name, "IPV4_L2TPV3"))
11997 new_pctype =
11998 i40e_find_customized_pctype(pf,
11999 I40E_CUSTOMIZED_IPV4_L2TPV3);
12000 else if (!strcmp(name, "IPV6_L2TPV3"))
12001 new_pctype =
12002 i40e_find_customized_pctype(pf,
12003 I40E_CUSTOMIZED_IPV6_L2TPV3);
12004 else if (!strcmp(name, "IPV4_ESP"))
12005 new_pctype =
12006 i40e_find_customized_pctype(pf,
12007 I40E_CUSTOMIZED_ESP_IPV4);
12008 else if (!strcmp(name, "IPV6_ESP"))
12009 new_pctype =
12010 i40e_find_customized_pctype(pf,
12011 I40E_CUSTOMIZED_ESP_IPV6);
12012 else if (!strcmp(name, "IPV4_UDP_ESP"))
12013 new_pctype =
12014 i40e_find_customized_pctype(pf,
12015 I40E_CUSTOMIZED_ESP_IPV4_UDP);
12016 else if (!strcmp(name, "IPV6_UDP_ESP"))
12017 new_pctype =
12018 i40e_find_customized_pctype(pf,
12019 I40E_CUSTOMIZED_ESP_IPV6_UDP);
12020 else if (!strcmp(name, "IPV4_AH"))
12021 new_pctype =
12022 i40e_find_customized_pctype(pf,
12023 I40E_CUSTOMIZED_AH_IPV4);
12024 else if (!strcmp(name, "IPV6_AH"))
12025 new_pctype =
12026 i40e_find_customized_pctype(pf,
12027 I40E_CUSTOMIZED_AH_IPV6);
12028 if (new_pctype) {
12029 if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
12030 new_pctype->pctype = pctype_value;
12031 new_pctype->valid = true;
12032 } else {
12033 new_pctype->pctype = I40E_FILTER_PCTYPE_INVALID;
12034 new_pctype->valid = false;
12035 }
12036 }
12037 }
12038
12039 rte_free(pctype);
12040 return 0;
12041}
12042
12043static int
12044i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
12045 uint32_t pkg_size, uint32_t proto_num,
12046 struct rte_pmd_i40e_proto_info *proto,
12047 enum rte_pmd_i40e_package_op op)
12048{
12049 struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
12050 uint16_t port_id = dev->data->port_id;
12051 uint32_t ptype_num;
12052 struct rte_pmd_i40e_ptype_info *ptype;
12053 uint32_t buff_size;
12054 uint8_t proto_id;
12055 char name[RTE_PMD_I40E_DDP_NAME_SIZE];
12056 uint32_t i, j, n;
12057 bool in_tunnel;
12058 int ret;
12059
12060 if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
12061 op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
12062 PMD_DRV_LOG(ERR, "Unsupported operation.");
12063 return -1;
12064 }
12065
12066 if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
12067 rte_pmd_i40e_ptype_mapping_reset(port_id);
12068 return 0;
12069 }
12070
12071
12072 ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12073 (uint8_t *)&ptype_num, sizeof(ptype_num),
12074 RTE_PMD_I40E_PKG_INFO_PTYPE_NUM);
12075 if (ret) {
12076 PMD_DRV_LOG(ERR, "Failed to get ptype number");
12077 return ret;
12078 }
12079 if (!ptype_num) {
12080 PMD_DRV_LOG(INFO, "No new ptype added");
12081 return -1;
12082 }
12083
12084 buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info);
12085 ptype = rte_zmalloc("new_ptype", buff_size, 0);
12086 if (!ptype) {
12087 PMD_DRV_LOG(ERR, "Failed to allocate memory");
12088 return -1;
12089 }
12090
12091
12092 ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12093 (uint8_t *)ptype, buff_size,
12094 RTE_PMD_I40E_PKG_INFO_PTYPE_LIST);
12095 if (ret) {
12096 PMD_DRV_LOG(ERR, "Failed to get ptype list");
12097 rte_free(ptype);
12098 return ret;
12099 }
12100
12101 buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping);
12102 ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0);
12103 if (!ptype_mapping) {
12104 PMD_DRV_LOG(ERR, "Failed to allocate memory");
12105 rte_free(ptype);
12106 return -1;
12107 }
12108
12109
12110 for (i = 0; i < ptype_num; i++) {
12111 ptype_mapping[i].hw_ptype = ptype[i].ptype_id;
12112 ptype_mapping[i].sw_ptype = 0;
12113 in_tunnel = false;
12114 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
12115 proto_id = ptype[i].protocols[j];
12116 if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
12117 continue;
12118 for (n = 0; n < proto_num; n++) {
12119 if (proto[n].proto_id != proto_id)
12120 continue;
12121 memset(name, 0, sizeof(name));
12122 strcpy(name, proto[n].name);
12123 PMD_DRV_LOG(INFO, "name = %s\n", name);
12124 if (!strncasecmp(name, "PPPOE", 5))
12125 ptype_mapping[i].sw_ptype |=
12126 RTE_PTYPE_L2_ETHER_PPPOE;
12127 else if (!strncasecmp(name, "IPV4FRAG", 8) &&
12128 !in_tunnel) {
12129 ptype_mapping[i].sw_ptype |=
12130 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12131 ptype_mapping[i].sw_ptype |=
12132 RTE_PTYPE_L4_FRAG;
12133 } else if (!strncasecmp(name, "IPV4FRAG", 8) &&
12134 in_tunnel) {
12135 ptype_mapping[i].sw_ptype |=
12136 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
12137 ptype_mapping[i].sw_ptype |=
12138 RTE_PTYPE_INNER_L4_FRAG;
12139 } else if (!strncasecmp(name, "OIPV4", 5)) {
12140 ptype_mapping[i].sw_ptype |=
12141 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12142 in_tunnel = true;
12143 } else if (!strncasecmp(name, "IPV4", 4) &&
12144 !in_tunnel)
12145 ptype_mapping[i].sw_ptype |=
12146 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12147 else if (!strncasecmp(name, "IPV4", 4) &&
12148 in_tunnel)
12149 ptype_mapping[i].sw_ptype |=
12150 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
12151 else if (!strncasecmp(name, "IPV6FRAG", 8) &&
12152 !in_tunnel) {
12153 ptype_mapping[i].sw_ptype |=
12154 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12155 ptype_mapping[i].sw_ptype |=
12156 RTE_PTYPE_L4_FRAG;
12157 } else if (!strncasecmp(name, "IPV6FRAG", 8) &&
12158 in_tunnel) {
12159 ptype_mapping[i].sw_ptype |=
12160 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
12161 ptype_mapping[i].sw_ptype |=
12162 RTE_PTYPE_INNER_L4_FRAG;
12163 } else if (!strncasecmp(name, "OIPV6", 5)) {
12164 ptype_mapping[i].sw_ptype |=
12165 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12166 in_tunnel = true;
12167 } else if (!strncasecmp(name, "IPV6", 4) &&
12168 !in_tunnel)
12169 ptype_mapping[i].sw_ptype |=
12170 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12171 else if (!strncasecmp(name, "IPV6", 4) &&
12172 in_tunnel)
12173 ptype_mapping[i].sw_ptype |=
12174 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
12175 else if (!strncasecmp(name, "UDP", 3) &&
12176 !in_tunnel)
12177 ptype_mapping[i].sw_ptype |=
12178 RTE_PTYPE_L4_UDP;
12179 else if (!strncasecmp(name, "UDP", 3) &&
12180 in_tunnel)
12181 ptype_mapping[i].sw_ptype |=
12182 RTE_PTYPE_INNER_L4_UDP;
12183 else if (!strncasecmp(name, "TCP", 3) &&
12184 !in_tunnel)
12185 ptype_mapping[i].sw_ptype |=
12186 RTE_PTYPE_L4_TCP;
12187 else if (!strncasecmp(name, "TCP", 3) &&
12188 in_tunnel)
12189 ptype_mapping[i].sw_ptype |=
12190 RTE_PTYPE_INNER_L4_TCP;
12191 else if (!strncasecmp(name, "SCTP", 4) &&
12192 !in_tunnel)
12193 ptype_mapping[i].sw_ptype |=
12194 RTE_PTYPE_L4_SCTP;
12195 else if (!strncasecmp(name, "SCTP", 4) &&
12196 in_tunnel)
12197 ptype_mapping[i].sw_ptype |=
12198 RTE_PTYPE_INNER_L4_SCTP;
12199 else if ((!strncasecmp(name, "ICMP", 4) ||
12200 !strncasecmp(name, "ICMPV6", 6)) &&
12201 !in_tunnel)
12202 ptype_mapping[i].sw_ptype |=
12203 RTE_PTYPE_L4_ICMP;
12204 else if ((!strncasecmp(name, "ICMP", 4) ||
12205 !strncasecmp(name, "ICMPV6", 6)) &&
12206 in_tunnel)
12207 ptype_mapping[i].sw_ptype |=
12208 RTE_PTYPE_INNER_L4_ICMP;
12209 else if (!strncasecmp(name, "GTPC", 4)) {
12210 ptype_mapping[i].sw_ptype |=
12211 RTE_PTYPE_TUNNEL_GTPC;
12212 in_tunnel = true;
12213 } else if (!strncasecmp(name, "GTPU", 4)) {
12214 ptype_mapping[i].sw_ptype |=
12215 RTE_PTYPE_TUNNEL_GTPU;
12216 in_tunnel = true;
12217 } else if (!strncasecmp(name, "ESP", 3)) {
12218 ptype_mapping[i].sw_ptype |=
12219 RTE_PTYPE_TUNNEL_ESP;
12220 in_tunnel = true;
12221 } else if (!strncasecmp(name, "GRENAT", 6)) {
12222 ptype_mapping[i].sw_ptype |=
12223 RTE_PTYPE_TUNNEL_GRENAT;
12224 in_tunnel = true;
12225 } else if (!strncasecmp(name, "L2TPV2CTL", 9) ||
12226 !strncasecmp(name, "L2TPV2", 6) ||
12227 !strncasecmp(name, "L2TPV3", 6)) {
12228 ptype_mapping[i].sw_ptype |=
12229 RTE_PTYPE_TUNNEL_L2TP;
12230 in_tunnel = true;
12231 }
12232
12233 break;
12234 }
12235 }
12236 }
12237
12238 ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
12239 ptype_num, 0);
12240 if (ret)
12241 PMD_DRV_LOG(ERR, "Failed to update ptype mapping table.");
12242
12243 rte_free(ptype_mapping);
12244 rte_free(ptype);
12245 return ret;
12246}
12247
12248void
12249i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
12250 uint32_t pkg_size, enum rte_pmd_i40e_package_op op)
12251{
12252 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12253 uint32_t proto_num;
12254 struct rte_pmd_i40e_proto_info *proto;
12255 uint32_t buff_size;
12256 uint32_t i;
12257 int ret;
12258
12259 if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
12260 op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
12261 PMD_DRV_LOG(ERR, "Unsupported operation.");
12262 return;
12263 }
12264
12265
12266 ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12267 (uint8_t *)&proto_num, sizeof(proto_num),
12268 RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
12269 if (ret) {
12270 PMD_DRV_LOG(ERR, "Failed to get protocol number");
12271 return;
12272 }
12273 if (!proto_num) {
12274 PMD_DRV_LOG(INFO, "No new protocol added");
12275 return;
12276 }
12277
12278 buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info);
12279 proto = rte_zmalloc("new_proto", buff_size, 0);
12280 if (!proto) {
12281 PMD_DRV_LOG(ERR, "Failed to allocate memory");
12282 return;
12283 }
12284
12285
12286 ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12287 (uint8_t *)proto, buff_size,
12288 RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
12289 if (ret) {
12290 PMD_DRV_LOG(ERR, "Failed to get protocol list");
12291 rte_free(proto);
12292 return;
12293 }
12294
12295
12296 for (i = 0; i < proto_num; i++) {
12297 if (!strncmp(proto[i].name, "GTP", 3)) {
12298 if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
12299 pf->gtp_support = true;
12300 else
12301 pf->gtp_support = false;
12302 break;
12303 }
12304 }
12305
12306
12307 for (i = 0; i < proto_num; i++) {
12308 if (!strncmp(proto[i].name, "ESP", 3)) {
12309 if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
12310 pf->esp_support = true;
12311 else
12312 pf->esp_support = false;
12313 break;
12314 }
12315 }
12316
12317
12318 ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
12319 proto_num, proto, op);
12320 if (ret)
12321 PMD_DRV_LOG(INFO, "No pctype is updated.");
12322
12323
12324 ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
12325 proto_num, proto, op);
12326 if (ret)
12327 PMD_DRV_LOG(INFO, "No ptype is updated.");
12328
12329 rte_free(proto);
12330}
12331
12332
12333
12334
12335
12336
12337
12338
12339
12340
12341
12342
12343
12344
12345
12346
12347
12348
12349
12350
12351
12352
12353
12354
12355
12356
12357
12358
12359
12360
12361
12362
12363
12364
12365
12366
12367
12368
12369
12370
12371
12372
12373
12374
12375static int
12376i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
12377{
12378 int ret = -ENOTSUP;
12379 struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
12380 struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
12381 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12382 struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
12383
12384 if (pf->support_multi_driver) {
12385 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
12386 return ret;
12387 }
12388
12389
12390 memset(&filter_replace, 0,
12391 sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
12392 memset(&filter_replace_buf, 0,
12393 sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
12394
12395
12396 filter_replace.old_filter_type =
12397 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
12398 filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12399 filter_replace.tr_bit = 0;
12400
12401
12402 filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN;
12403 filter_replace_buf.data[0] |=
12404 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12405
12406 filter_replace_buf.data[2] = 0xff;
12407 filter_replace_buf.data[3] = 0x0f;
12408 filter_replace_buf.data[4] =
12409 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN;
12410 filter_replace_buf.data[4] |=
12411 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12412
12413 filter_replace_buf.data[6] = 0xff;
12414 filter_replace_buf.data[7] = 0x0f;
12415 ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
12416 &filter_replace_buf);
12417 if (ret != I40E_SUCCESS)
12418 return ret;
12419
12420 if (filter_replace.old_filter_type !=
12421 filter_replace.new_filter_type)
12422 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
12423 " original: 0x%x, new: 0x%x",
12424 dev->device->name,
12425 filter_replace.old_filter_type,
12426 filter_replace.new_filter_type);
12427
12428
12429 memset(&filter_replace, 0,
12430 sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
12431 memset(&filter_replace_buf, 0,
12432 sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
12433
12434
12435 filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
12436 filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
12437 filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12438
12439
12440 filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
12441 filter_replace_buf.data[0] |=
12442 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12443 filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12444 filter_replace_buf.data[4] |=
12445 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12446 ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
12447 &filter_replace_buf);
12448 if (!ret && (filter_replace.old_filter_type !=
12449 filter_replace.new_filter_type))
12450 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
12451 " original: 0x%x, new: 0x%x",
12452 dev->device->name,
12453 filter_replace.old_filter_type,
12454 filter_replace.new_filter_type);
12455
12456 return ret;
12457}
12458
12459RTE_LOG_REGISTER_SUFFIX(i40e_logtype_init, init, NOTICE);
12460RTE_LOG_REGISTER_SUFFIX(i40e_logtype_driver, driver, NOTICE);
12461#ifdef RTE_ETHDEV_DEBUG_RX
12462RTE_LOG_REGISTER_SUFFIX(i40e_logtype_rx, rx, DEBUG);
12463#endif
12464#ifdef RTE_ETHDEV_DEBUG_TX
12465RTE_LOG_REGISTER_SUFFIX(i40e_logtype_tx, tx, DEBUG);
12466#endif
12467
12468RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
12469 ETH_I40E_FLOATING_VEB_ARG "=1"
12470 ETH_I40E_FLOATING_VEB_LIST_ARG "=<string>"
12471 ETH_I40E_QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
12472 ETH_I40E_SUPPORT_MULTI_DRIVER "=1");
12473