1
2
3
4
5#include <stdio.h>
6#include <stdint.h>
7#include <stdarg.h>
8#include <errno.h>
9#include <sys/queue.h>
10
11#include <rte_interrupts.h>
12#include <rte_log.h>
13#include <rte_debug.h>
14#include <rte_pci.h>
15#include <rte_vxlan.h>
16#include <ethdev_driver.h>
17#include <rte_malloc.h>
18
19#include "ixgbe_logs.h"
20#include "base/ixgbe_api.h"
21#include "base/ixgbe_common.h"
22#include "ixgbe_ethdev.h"
23
24
25#define FDIRCTRL_PBALLOC_MASK 0x03
26
27
28#define PBALLOC_SIZE_SHIFT 15
29
30
31#define PERFECT_BUCKET_64KB_HASH_MASK 0x07FF
32#define PERFECT_BUCKET_128KB_HASH_MASK 0x0FFF
33#define PERFECT_BUCKET_256KB_HASH_MASK 0x1FFF
34#define SIG_BUCKET_64KB_HASH_MASK 0x1FFF
35#define SIG_BUCKET_128KB_HASH_MASK 0x3FFF
36#define SIG_BUCKET_256KB_HASH_MASK 0x7FFF
37#define IXGBE_DEFAULT_FLEXBYTES_OFFSET 12
38#define IXGBE_FDIR_MAX_FLEX_LEN 2
39#define IXGBE_MAX_FLX_SOURCE_OFF 62
40#define IXGBE_FDIRCTRL_FLEX_MASK (0x1F << IXGBE_FDIRCTRL_FLEX_SHIFT)
41#define IXGBE_FDIRCMD_CMD_INTERVAL_US 10
42
43#define IXGBE_FDIR_FLOW_TYPES ( \
44 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
45 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
46 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
47 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
48 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
49 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
50 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
51 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER))
52
53#define IPV6_ADDR_TO_MASK(ipaddr, ipv6m) do { \
54 uint8_t ipv6_addr[16]; \
55 uint8_t i; \
56 rte_memcpy(ipv6_addr, (ipaddr), sizeof(ipv6_addr));\
57 (ipv6m) = 0; \
58 for (i = 0; i < sizeof(ipv6_addr); i++) { \
59 if (ipv6_addr[i] == UINT8_MAX) \
60 (ipv6m) |= 1 << i; \
61 else if (ipv6_addr[i] != 0) { \
62 PMD_DRV_LOG(ERR, " invalid IPv6 address mask."); \
63 return -EINVAL; \
64 } \
65 } \
66} while (0)
67
68#define IPV6_MASK_TO_ADDR(ipv6m, ipaddr) do { \
69 uint8_t ipv6_addr[16]; \
70 uint8_t i; \
71 for (i = 0; i < sizeof(ipv6_addr); i++) { \
72 if ((ipv6m) & (1 << i)) \
73 ipv6_addr[i] = UINT8_MAX; \
74 else \
75 ipv6_addr[i] = 0; \
76 } \
77 rte_memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\
78} while (0)
79
80#define IXGBE_FDIRIP6M_INNER_MAC_SHIFT 4
81
82static int fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash);
83static int fdir_set_input_mask(struct rte_eth_dev *dev,
84 const struct rte_eth_fdir_masks *input_mask);
85static int fdir_set_input_mask_82599(struct rte_eth_dev *dev);
86static int fdir_set_input_mask_x550(struct rte_eth_dev *dev);
87static int ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
88 const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl);
89static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl);
90static uint32_t ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
91 uint32_t key);
92static uint32_t atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
93 enum rte_eth_fdir_pballoc_type pballoc);
94static uint32_t atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
95 enum rte_eth_fdir_pballoc_type pballoc);
96static int fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
97 union ixgbe_atr_input *input, uint8_t queue,
98 uint32_t fdircmd, uint32_t fdirhash,
99 enum rte_fdir_mode mode);
100static int fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
101 union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd,
102 uint32_t fdirhash);
103static int ixgbe_fdir_flush(struct rte_eth_dev *dev);
104
105
106
107
108
109
110
111
112
113static int
114fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl)
115{
116 int i;
117
118 PMD_INIT_FUNC_TRACE();
119
120
121 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
122 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
123
124
125
126
127
128
129 fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
130 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
146 IXGBE_WRITE_FLUSH(hw);
147 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
148 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
149 IXGBE_FDIRCTRL_INIT_DONE)
150 break;
151 msec_delay(1);
152 }
153
154 if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
155 PMD_INIT_LOG(ERR, "Flow Director poll time exceeded during enabling!");
156 return -ETIMEDOUT;
157 }
158 return 0;
159}
160
161
162
163
164
165static inline int
166configure_fdir_flags(const struct rte_eth_fdir_conf *conf, uint32_t *fdirctrl)
167{
168 *fdirctrl = 0;
169
170 switch (conf->pballoc) {
171 case RTE_ETH_FDIR_PBALLOC_64K:
172
173 *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
174 break;
175 case RTE_ETH_FDIR_PBALLOC_128K:
176
177 *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
178 break;
179 case RTE_ETH_FDIR_PBALLOC_256K:
180
181 *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
182 break;
183 default:
184
185 PMD_INIT_LOG(ERR, "Invalid fdir_conf->pballoc value");
186 return -EINVAL;
187 };
188
189
190 switch (conf->status) {
191 case RTE_FDIR_NO_REPORT_STATUS:
192
193 break;
194 case RTE_FDIR_REPORT_STATUS:
195
196 *fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
197 break;
198 case RTE_FDIR_REPORT_STATUS_ALWAYS:
199
200 *fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS;
201 break;
202 default:
203
204 PMD_INIT_LOG(ERR, "Invalid fdir_conf->status value");
205 return -EINVAL;
206 };
207
208 *fdirctrl |= (IXGBE_DEFAULT_FLEXBYTES_OFFSET / sizeof(uint16_t)) <<
209 IXGBE_FDIRCTRL_FLEX_SHIFT;
210
211 if (conf->mode >= RTE_FDIR_MODE_PERFECT &&
212 conf->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) {
213 *fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
214 *fdirctrl |= (conf->drop_queue << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
215 if (conf->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
216 *fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_MACVLAN
217 << IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
218 else if (conf->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
219 *fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_CLOUD
220 << IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
221 }
222
223 return 0;
224}
225
226
227
228
229
230
231
232
233
234
235
236
237static inline uint32_t
238reverse_fdir_bitmasks(uint16_t hi_dword, uint16_t lo_dword)
239{
240 uint32_t mask = hi_dword << 16;
241
242 mask |= lo_dword;
243 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
244 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
245 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
246 return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
247}
248
249
250
251
252
253static int
254fdir_set_input_mask_82599(struct rte_eth_dev *dev)
255{
256 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
257 struct ixgbe_hw_fdir_info *info =
258 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
259
260
261
262
263 uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
264 uint32_t fdirtcpm;
265 uint32_t fdiripv6m;
266 volatile uint32_t *reg;
267
268 PMD_INIT_FUNC_TRACE();
269
270
271
272
273
274
275
276 if (info->mask.dst_port_mask == 0 && info->mask.src_port_mask == 0)
277
278 fdirm |= IXGBE_FDIRM_L4P;
279
280 if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
281
282 fdirm |= IXGBE_FDIRM_VLANP;
283 else if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0xE000))
284
285 fdirm |= IXGBE_FDIRM_VLANID;
286 else if (info->mask.vlan_tci_mask == 0)
287
288 fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
289 else if (info->mask.vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
290 PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
291 return -EINVAL;
292 }
293
294
295 if (info->mask.flex_bytes_mask == 0)
296 fdirm |= IXGBE_FDIRM_FLEX;
297
298 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
299
300
301 fdirtcpm = reverse_fdir_bitmasks(
302 rte_be_to_cpu_16(info->mask.dst_port_mask),
303 rte_be_to_cpu_16(info->mask.src_port_mask));
304
305
306
307
308 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
309 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
310 IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
311
312
313
314
315 reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRSIP4M);
316 *reg = ~(info->mask.src_ipv4_mask);
317 reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRDIP4M);
318 *reg = ~(info->mask.dst_ipv4_mask);
319
320 if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE) {
321
322
323
324 fdiripv6m = (info->mask.dst_ipv6_mask << 16) |
325 info->mask.src_ipv6_mask;
326
327 IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, ~fdiripv6m);
328 }
329
330 return IXGBE_SUCCESS;
331}
332
333
334
335
336
337static int
338fdir_set_input_mask_x550(struct rte_eth_dev *dev)
339{
340 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
341 struct ixgbe_hw_fdir_info *info =
342 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
343
344
345
346 uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 |
347 IXGBE_FDIRM_FLEX;
348 uint32_t fdiripv6m;
349 enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
350 uint16_t mac_mask;
351
352 PMD_INIT_FUNC_TRACE();
353
354
355 if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
356 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, RTE_VXLAN_DEFAULT_PORT);
357
358
359 fdirm |= IXGBE_FDIRM_L4P | IXGBE_FDIRM_L3P;
360
361 if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
362
363 fdirm |= IXGBE_FDIRM_VLANP;
364 else if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0xE000))
365
366 fdirm |= IXGBE_FDIRM_VLANID;
367 else if (info->mask.vlan_tci_mask == 0)
368
369 fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
370 else if (info->mask.vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
371 PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
372 return -EINVAL;
373 }
374
375 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
376
377 fdiripv6m = ((u32)0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
378 fdiripv6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
379 if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
380 fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE |
381 IXGBE_FDIRIP6M_TNI_VNI;
382
383 if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
384 fdiripv6m |= IXGBE_FDIRIP6M_INNER_MAC;
385 mac_mask = info->mask.mac_addr_byte_mask &
386 (IXGBE_FDIRIP6M_INNER_MAC >>
387 IXGBE_FDIRIP6M_INNER_MAC_SHIFT);
388 fdiripv6m &= ~((mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT) &
389 IXGBE_FDIRIP6M_INNER_MAC);
390
391 switch (info->mask.tunnel_type_mask) {
392 case 0:
393
394 fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
395 break;
396 case 1:
397 break;
398 default:
399 PMD_INIT_LOG(ERR, "invalid tunnel_type_mask");
400 return -EINVAL;
401 }
402
403 switch (rte_be_to_cpu_32(info->mask.tunnel_id_mask)) {
404 case 0x0:
405
406 fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI;
407 break;
408 case 0x00FFFFFF:
409 fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI_24;
410 break;
411 case 0xFFFFFFFF:
412 break;
413 default:
414 PMD_INIT_LOG(ERR, "invalid tunnel_id_mask");
415 return -EINVAL;
416 }
417 }
418
419 IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, fdiripv6m);
420 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF);
421 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF);
422 IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF);
423 IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
424 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
425
426 return IXGBE_SUCCESS;
427}
428
429static int
430ixgbe_fdir_store_input_mask_82599(struct rte_eth_dev *dev,
431 const struct rte_eth_fdir_masks *input_mask)
432{
433 struct ixgbe_hw_fdir_info *info =
434 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
435 uint16_t dst_ipv6m = 0;
436 uint16_t src_ipv6m = 0;
437
438 memset(&info->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
439 info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
440 info->mask.src_port_mask = input_mask->src_port_mask;
441 info->mask.dst_port_mask = input_mask->dst_port_mask;
442 info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
443 info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
444 IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m);
445 IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.dst_ip, dst_ipv6m);
446 info->mask.src_ipv6_mask = src_ipv6m;
447 info->mask.dst_ipv6_mask = dst_ipv6m;
448
449 return IXGBE_SUCCESS;
450}
451
452static int
453ixgbe_fdir_store_input_mask_x550(struct rte_eth_dev *dev,
454 const struct rte_eth_fdir_masks *input_mask)
455{
456 struct ixgbe_hw_fdir_info *info =
457 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
458
459 memset(&info->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
460 info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
461 info->mask.mac_addr_byte_mask = input_mask->mac_addr_byte_mask;
462 info->mask.tunnel_type_mask = input_mask->tunnel_type_mask;
463 info->mask.tunnel_id_mask = input_mask->tunnel_id_mask;
464
465 return IXGBE_SUCCESS;
466}
467
468static int
469ixgbe_fdir_store_input_mask(struct rte_eth_dev *dev,
470 const struct rte_eth_fdir_masks *input_mask)
471{
472 enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
473
474 if (mode >= RTE_FDIR_MODE_SIGNATURE &&
475 mode <= RTE_FDIR_MODE_PERFECT)
476 return ixgbe_fdir_store_input_mask_82599(dev, input_mask);
477 else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
478 mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
479 return ixgbe_fdir_store_input_mask_x550(dev, input_mask);
480
481 PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
482 return -ENOTSUP;
483}
484
485int
486ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev)
487{
488 enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
489
490 if (mode >= RTE_FDIR_MODE_SIGNATURE &&
491 mode <= RTE_FDIR_MODE_PERFECT)
492 return fdir_set_input_mask_82599(dev);
493 else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
494 mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
495 return fdir_set_input_mask_x550(dev);
496
497 PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
498 return -ENOTSUP;
499}
500
501int
502ixgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev,
503 uint16_t offset)
504{
505 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
506 struct ixgbe_hw_fdir_info *fdir_info =
507 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
508 uint32_t fdirctrl;
509 int i;
510
511 if (fdir_info->flex_bytes_offset == offset)
512 return 0;
513
514
515
516
517
518
519
520
521 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
522 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
523 IXGBE_FDIRCMD_CLEARHT));
524 IXGBE_WRITE_FLUSH(hw);
525 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
526 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
527 ~IXGBE_FDIRCMD_CLEARHT));
528 IXGBE_WRITE_FLUSH(hw);
529
530 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
531
532 fdirctrl &= ~IXGBE_FDIRCTRL_FLEX_MASK;
533 fdirctrl |= ((offset >> 1)
534 << IXGBE_FDIRCTRL_FLEX_SHIFT);
535
536 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
537 IXGBE_WRITE_FLUSH(hw);
538 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
539 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
540 IXGBE_FDIRCTRL_INIT_DONE)
541 break;
542 msec_delay(1);
543 }
544
545 if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
546 PMD_DRV_LOG(ERR, "Flow Director poll time exceeded!");
547 return -ETIMEDOUT;
548 }
549
550 fdir_info->flex_bytes_offset = offset;
551
552 return 0;
553}
554
555static int
556fdir_set_input_mask(struct rte_eth_dev *dev,
557 const struct rte_eth_fdir_masks *input_mask)
558{
559 int ret;
560
561 ret = ixgbe_fdir_store_input_mask(dev, input_mask);
562 if (ret)
563 return ret;
564
565 return ixgbe_fdir_set_input_mask(dev);
566}
567
568
569
570
571
572static int
573ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
574 const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl)
575{
576 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
577 struct ixgbe_hw_fdir_info *info =
578 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
579 const struct rte_eth_flex_payload_cfg *flex_cfg;
580 const struct rte_eth_fdir_flex_mask *flex_mask;
581 uint32_t fdirm;
582 uint16_t flexbytes = 0;
583 uint16_t i;
584
585 fdirm = IXGBE_READ_REG(hw, IXGBE_FDIRM);
586
587 if (conf == NULL) {
588 PMD_DRV_LOG(ERR, "NULL pointer.");
589 return -EINVAL;
590 }
591
592 for (i = 0; i < conf->nb_payloads; i++) {
593 flex_cfg = &conf->flex_set[i];
594 if (flex_cfg->type != RTE_ETH_RAW_PAYLOAD) {
595 PMD_DRV_LOG(ERR, "unsupported payload type.");
596 return -EINVAL;
597 }
598 if (((flex_cfg->src_offset[0] & 0x1) == 0) &&
599 (flex_cfg->src_offset[1] == flex_cfg->src_offset[0] + 1) &&
600 (flex_cfg->src_offset[0] <= IXGBE_MAX_FLX_SOURCE_OFF)) {
601 *fdirctrl &= ~IXGBE_FDIRCTRL_FLEX_MASK;
602 *fdirctrl |=
603 (flex_cfg->src_offset[0] / sizeof(uint16_t)) <<
604 IXGBE_FDIRCTRL_FLEX_SHIFT;
605 } else {
606 PMD_DRV_LOG(ERR, "invalid flexbytes arguments.");
607 return -EINVAL;
608 }
609 }
610
611 for (i = 0; i < conf->nb_flexmasks; i++) {
612 flex_mask = &conf->flex_mask[i];
613 if (flex_mask->flow_type != RTE_ETH_FLOW_UNKNOWN) {
614 PMD_DRV_LOG(ERR, "flexmask should be set globally.");
615 return -EINVAL;
616 }
617 flexbytes = (uint16_t)(((flex_mask->mask[0] << 8) & 0xFF00) |
618 ((flex_mask->mask[1]) & 0xFF));
619 if (flexbytes == UINT16_MAX)
620 fdirm &= ~IXGBE_FDIRM_FLEX;
621 else if (flexbytes != 0) {
622
623 PMD_DRV_LOG(ERR, " invalid flexbytes mask arguments.");
624 return -EINVAL;
625 }
626 }
627 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
628 info->mask.flex_bytes_mask = flexbytes ? UINT16_MAX : 0;
629 info->flex_bytes_offset = (uint8_t)((*fdirctrl &
630 IXGBE_FDIRCTRL_FLEX_MASK) >>
631 IXGBE_FDIRCTRL_FLEX_SHIFT);
632 return 0;
633}
634
635int
636ixgbe_fdir_configure(struct rte_eth_dev *dev)
637{
638 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
639 int err;
640 uint32_t fdirctrl, pbsize;
641 int i;
642 enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
643
644 PMD_INIT_FUNC_TRACE();
645
646 if (hw->mac.type != ixgbe_mac_82599EB &&
647 hw->mac.type != ixgbe_mac_X540 &&
648 hw->mac.type != ixgbe_mac_X550 &&
649 hw->mac.type != ixgbe_mac_X550EM_x &&
650 hw->mac.type != ixgbe_mac_X550EM_a)
651 return -ENOSYS;
652
653
654 if (hw->mac.type != ixgbe_mac_X550 &&
655 hw->mac.type != ixgbe_mac_X550EM_x &&
656 hw->mac.type != ixgbe_mac_X550EM_a &&
657 mode != RTE_FDIR_MODE_SIGNATURE &&
658 mode != RTE_FDIR_MODE_PERFECT)
659 return -ENOSYS;
660
661 err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf, &fdirctrl);
662 if (err)
663 return err;
664
665
666
667
668
669
670 pbsize = (1 << (PBALLOC_SIZE_SHIFT + (fdirctrl & FDIRCTRL_PBALLOC_MASK)));
671 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
672 (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
673
674
675
676
677
678
679
680 for (i = 1; i < 8; i++)
681 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
682
683 err = fdir_set_input_mask(dev, &dev->data->dev_conf.fdir_conf.mask);
684 if (err < 0) {
685 PMD_INIT_LOG(ERR, " Error on setting FD mask");
686 return err;
687 }
688 err = ixgbe_set_fdir_flex_conf(dev,
689 &dev->data->dev_conf.fdir_conf.flex_conf, &fdirctrl);
690 if (err < 0) {
691 PMD_INIT_LOG(ERR, " Error on setting FD flexible arguments.");
692 return err;
693 }
694
695 err = fdir_enable_82599(hw, fdirctrl);
696 if (err < 0) {
697 PMD_INIT_LOG(ERR, " Error on enabling FD.");
698 return err;
699 }
700 return 0;
701}
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719static uint32_t
720ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
721 uint32_t key)
722{
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763 __be32 common_hash_dword = 0;
764 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
765 u32 hash_result = 0;
766 u8 i;
767
768
769 flow_vm_vlan = IXGBE_NTOHL(atr_input->dword_stream[0]);
770
771
772 for (i = 1; i <= 13; i++)
773 common_hash_dword ^= atr_input->dword_stream[i];
774
775 hi_hash_dword = IXGBE_NTOHL(common_hash_dword);
776
777
778 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
779
780
781 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
782
783
784 if (key & 0x0001)
785 hash_result ^= lo_hash_dword;
786 if (key & 0x00010000)
787 hash_result ^= hi_hash_dword;
788
789
790
791
792
793
794 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
795
796
797
798 for (i = 15; i; i--) {
799 if (key & (0x0001 << i))
800 hash_result ^= lo_hash_dword >> i;
801 if (key & (0x00010000 << i))
802 hash_result ^= hi_hash_dword >> i;
803 }
804
805 return hash_result;
806}
807
808static uint32_t
809atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
810 enum rte_eth_fdir_pballoc_type pballoc)
811{
812 if (pballoc == RTE_ETH_FDIR_PBALLOC_256K)
813 return ixgbe_atr_compute_hash_82599(input,
814 IXGBE_ATR_BUCKET_HASH_KEY) &
815 PERFECT_BUCKET_256KB_HASH_MASK;
816 else if (pballoc == RTE_ETH_FDIR_PBALLOC_128K)
817 return ixgbe_atr_compute_hash_82599(input,
818 IXGBE_ATR_BUCKET_HASH_KEY) &
819 PERFECT_BUCKET_128KB_HASH_MASK;
820 else
821 return ixgbe_atr_compute_hash_82599(input,
822 IXGBE_ATR_BUCKET_HASH_KEY) &
823 PERFECT_BUCKET_64KB_HASH_MASK;
824}
825
826
827
828
829
830static inline int
831ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, uint32_t *fdircmd)
832{
833 int i;
834
835 for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
836 *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
837 if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
838 return 0;
839 rte_delay_us(IXGBE_FDIRCMD_CMD_INTERVAL_US);
840 }
841
842 return -ETIMEDOUT;
843}
844
845
846
847
848
849
850
851static uint32_t
852atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
853 enum rte_eth_fdir_pballoc_type pballoc)
854{
855 uint32_t bucket_hash, sig_hash;
856
857 if (pballoc == RTE_ETH_FDIR_PBALLOC_256K)
858 bucket_hash = ixgbe_atr_compute_hash_82599(input,
859 IXGBE_ATR_BUCKET_HASH_KEY) &
860 SIG_BUCKET_256KB_HASH_MASK;
861 else if (pballoc == RTE_ETH_FDIR_PBALLOC_128K)
862 bucket_hash = ixgbe_atr_compute_hash_82599(input,
863 IXGBE_ATR_BUCKET_HASH_KEY) &
864 SIG_BUCKET_128KB_HASH_MASK;
865 else
866 bucket_hash = ixgbe_atr_compute_hash_82599(input,
867 IXGBE_ATR_BUCKET_HASH_KEY) &
868 SIG_BUCKET_64KB_HASH_MASK;
869
870 sig_hash = ixgbe_atr_compute_hash_82599(input,
871 IXGBE_ATR_SIGNATURE_HASH_KEY);
872
873 return (sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT) | bucket_hash;
874}
875
876
877
878
879
880
881
882static int
883fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
884 union ixgbe_atr_input *input, uint8_t queue,
885 uint32_t fdircmd, uint32_t fdirhash,
886 enum rte_fdir_mode mode)
887{
888 uint32_t fdirport, fdirvlan;
889 u32 addr_low, addr_high;
890 u32 tunnel_type = 0;
891 int err = 0;
892 volatile uint32_t *reg;
893
894 if (mode == RTE_FDIR_MODE_PERFECT) {
895
896
897
898 reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRIPSA);
899 *reg = input->formatted.src_ip[0];
900 reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRIPDA);
901 *reg = input->formatted.dst_ip[0];
902
903
904 fdirport = IXGBE_NTOHS(input->formatted.dst_port);
905 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
906 fdirport |= IXGBE_NTOHS(input->formatted.src_port);
907 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
908 } else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
909 mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) {
910
911 addr_low = ((u32)input->formatted.inner_mac[0] |
912 ((u32)input->formatted.inner_mac[1] << 8) |
913 ((u32)input->formatted.inner_mac[2] << 16) |
914 ((u32)input->formatted.inner_mac[3] << 24));
915 addr_high = ((u32)input->formatted.inner_mac[4] |
916 ((u32)input->formatted.inner_mac[5] << 8));
917
918 if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
919 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low);
920 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), addr_high);
921 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), 0);
922 } else {
923
924 if (input->formatted.tunnel_type)
925 tunnel_type = 0x80000000;
926 tunnel_type |= addr_high;
927 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low);
928 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), tunnel_type);
929 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2),
930 input->formatted.tni_vni);
931 }
932 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, 0);
933 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, 0);
934 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, 0);
935 }
936
937
938 fdirvlan = input->formatted.flex_bytes;
939 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
940 fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
941 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
942
943
944 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
945
946
947
948
949
950 IXGBE_WRITE_FLUSH(hw);
951
952
953 fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
954 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
955 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
956 fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
957 fdircmd |= (uint32_t)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
958
959 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
960
961 PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash);
962
963 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
964 if (err < 0)
965 PMD_DRV_LOG(ERR, "Timeout writing flow director filter.");
966
967 return err;
968}
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985static int
986fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
987 union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd,
988 uint32_t fdirhash)
989{
990 int err = 0;
991
992 PMD_INIT_FUNC_TRACE();
993
994
995 fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
996 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
997 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
998 fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
999
1000 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1001 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1002
1003 PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash);
1004
1005 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1006 if (err < 0)
1007 PMD_DRV_LOG(ERR, "Timeout writing flow director filter.");
1008
1009 return err;
1010}
1011
1012
1013
1014
1015
1016
1017static int
1018fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash)
1019{
1020 uint32_t fdircmd = 0;
1021 int err = 0;
1022
1023 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1024
1025
1026 IXGBE_WRITE_FLUSH(hw);
1027
1028
1029 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
1030
1031 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1032 if (err < 0) {
1033 PMD_INIT_LOG(ERR, "Timeout querying for flow director filter.");
1034 return err;
1035 }
1036
1037
1038 if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
1039 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1040 IXGBE_WRITE_FLUSH(hw);
1041 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1042 IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
1043 }
1044 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1045 if (err < 0)
1046 PMD_INIT_LOG(ERR, "Timeout erasing flow director filter.");
1047 return err;
1048
1049}
1050
1051static inline struct ixgbe_fdir_filter *
1052ixgbe_fdir_filter_lookup(struct ixgbe_hw_fdir_info *fdir_info,
1053 union ixgbe_atr_input *key)
1054{
1055 int ret;
1056
1057 ret = rte_hash_lookup(fdir_info->hash_handle, (const void *)key);
1058 if (ret < 0)
1059 return NULL;
1060
1061 return fdir_info->hash_map[ret];
1062}
1063
1064static inline int
1065ixgbe_insert_fdir_filter(struct ixgbe_hw_fdir_info *fdir_info,
1066 struct ixgbe_fdir_filter *fdir_filter)
1067{
1068 int ret;
1069
1070 ret = rte_hash_add_key(fdir_info->hash_handle,
1071 &fdir_filter->ixgbe_fdir);
1072
1073 if (ret < 0) {
1074 PMD_DRV_LOG(ERR,
1075 "Failed to insert fdir filter to hash table %d!",
1076 ret);
1077 return ret;
1078 }
1079
1080 fdir_info->hash_map[ret] = fdir_filter;
1081
1082 TAILQ_INSERT_TAIL(&fdir_info->fdir_list, fdir_filter, entries);
1083
1084 return 0;
1085}
1086
1087static inline int
1088ixgbe_remove_fdir_filter(struct ixgbe_hw_fdir_info *fdir_info,
1089 union ixgbe_atr_input *key)
1090{
1091 int ret;
1092 struct ixgbe_fdir_filter *fdir_filter;
1093
1094 ret = rte_hash_del_key(fdir_info->hash_handle, key);
1095
1096 if (ret < 0) {
1097 PMD_DRV_LOG(ERR, "No such fdir filter to delete %d!", ret);
1098 return ret;
1099 }
1100
1101 fdir_filter = fdir_info->hash_map[ret];
1102 fdir_info->hash_map[ret] = NULL;
1103
1104 TAILQ_REMOVE(&fdir_info->fdir_list, fdir_filter, entries);
1105 rte_free(fdir_filter);
1106
1107 return 0;
1108}
1109
1110int
1111ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
1112 struct ixgbe_fdir_rule *rule,
1113 bool del,
1114 bool update)
1115{
1116 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1117 uint32_t fdircmd_flags;
1118 uint32_t fdirhash;
1119 uint8_t queue;
1120 bool is_perfect = FALSE;
1121 int err;
1122 struct ixgbe_hw_fdir_info *info =
1123 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1124 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
1125 struct ixgbe_fdir_filter *node;
1126 bool add_node = FALSE;
1127
1128 if (fdir_mode == RTE_FDIR_MODE_NONE ||
1129 fdir_mode != rule->mode)
1130 return -ENOTSUP;
1131
1132
1133
1134
1135
1136
1137
1138 if ((!del) &&
1139 (hw->mac.type == ixgbe_mac_X550 ||
1140 hw->mac.type == ixgbe_mac_X550EM_x ||
1141 hw->mac.type == ixgbe_mac_X550EM_a) &&
1142 (rule->ixgbe_fdir.formatted.flow_type ==
1143 IXGBE_ATR_FLOW_TYPE_IPV4 ||
1144 rule->ixgbe_fdir.formatted.flow_type ==
1145 IXGBE_ATR_FLOW_TYPE_IPV6) &&
1146 (info->mask.src_port_mask != 0 ||
1147 info->mask.dst_port_mask != 0) &&
1148 (rule->mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
1149 rule->mode != RTE_FDIR_MODE_PERFECT_TUNNEL)) {
1150 PMD_DRV_LOG(ERR, "By this device,"
1151 " IPv4 is not supported without"
1152 " L4 protocol and ports masked!");
1153 return -ENOTSUP;
1154 }
1155
1156 if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
1157 fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
1158 is_perfect = TRUE;
1159
1160 if (is_perfect) {
1161 if (rule->ixgbe_fdir.formatted.flow_type &
1162 IXGBE_ATR_L4TYPE_IPV6_MASK) {
1163 PMD_DRV_LOG(ERR, "IPv6 is not supported in"
1164 " perfect mode!");
1165 return -ENOTSUP;
1166 }
1167 fdirhash = atr_compute_perfect_hash_82599(&rule->ixgbe_fdir,
1168 dev->data->dev_conf.fdir_conf.pballoc);
1169 fdirhash |= rule->soft_id <<
1170 IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1171 } else
1172 fdirhash = atr_compute_sig_hash_82599(&rule->ixgbe_fdir,
1173 dev->data->dev_conf.fdir_conf.pballoc);
1174
1175 if (del) {
1176 err = ixgbe_remove_fdir_filter(info, &rule->ixgbe_fdir);
1177 if (err < 0)
1178 return err;
1179
1180 err = fdir_erase_filter_82599(hw, fdirhash);
1181 if (err < 0)
1182 PMD_DRV_LOG(ERR, "Fail to delete FDIR filter!");
1183 else
1184 PMD_DRV_LOG(DEBUG, "Success to delete FDIR filter!");
1185 return err;
1186 }
1187
1188 fdircmd_flags = (update) ? IXGBE_FDIRCMD_FILTER_UPDATE : 0;
1189 if (rule->fdirflags & IXGBE_FDIRCMD_DROP) {
1190 if (is_perfect) {
1191 queue = dev->data->dev_conf.fdir_conf.drop_queue;
1192 fdircmd_flags |= IXGBE_FDIRCMD_DROP;
1193 } else {
1194 PMD_DRV_LOG(ERR, "Drop option is not supported in"
1195 " signature mode.");
1196 return -EINVAL;
1197 }
1198 } else if (rule->queue < IXGBE_MAX_RX_QUEUE_NUM)
1199 queue = (uint8_t)rule->queue;
1200 else
1201 return -EINVAL;
1202
1203 node = ixgbe_fdir_filter_lookup(info, &rule->ixgbe_fdir);
1204 if (node) {
1205 if (update) {
1206 node->fdirflags = fdircmd_flags;
1207 node->fdirhash = fdirhash;
1208 node->queue = queue;
1209 } else {
1210 PMD_DRV_LOG(ERR, "Conflict with existing fdir filter!");
1211 return -EINVAL;
1212 }
1213 } else {
1214 add_node = TRUE;
1215 node = rte_zmalloc("ixgbe_fdir",
1216 sizeof(struct ixgbe_fdir_filter),
1217 0);
1218 if (!node)
1219 return -ENOMEM;
1220 rte_memcpy(&node->ixgbe_fdir,
1221 &rule->ixgbe_fdir,
1222 sizeof(union ixgbe_atr_input));
1223 node->fdirflags = fdircmd_flags;
1224 node->fdirhash = fdirhash;
1225 node->queue = queue;
1226
1227 err = ixgbe_insert_fdir_filter(info, node);
1228 if (err < 0) {
1229 rte_free(node);
1230 return err;
1231 }
1232 }
1233
1234 if (is_perfect) {
1235 err = fdir_write_perfect_filter_82599(hw, &rule->ixgbe_fdir,
1236 queue, fdircmd_flags,
1237 fdirhash, fdir_mode);
1238 } else {
1239 err = fdir_add_signature_filter_82599(hw, &rule->ixgbe_fdir,
1240 queue, fdircmd_flags,
1241 fdirhash);
1242 }
1243 if (err < 0) {
1244 PMD_DRV_LOG(ERR, "Fail to add FDIR filter!");
1245
1246 if (add_node)
1247 (void)ixgbe_remove_fdir_filter(info, &rule->ixgbe_fdir);
1248 } else {
1249 PMD_DRV_LOG(DEBUG, "Success to add FDIR filter");
1250 }
1251
1252 return err;
1253}
1254
1255static int
1256ixgbe_fdir_flush(struct rte_eth_dev *dev)
1257{
1258 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1259 struct ixgbe_hw_fdir_info *info =
1260 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1261 int ret;
1262
1263 ret = ixgbe_reinit_fdir_tables_82599(hw);
1264 if (ret < 0) {
1265 PMD_INIT_LOG(ERR, "Failed to re-initialize FD table.");
1266 return ret;
1267 }
1268
1269 info->f_add = 0;
1270 info->f_remove = 0;
1271 info->add = 0;
1272 info->remove = 0;
1273
1274 return ret;
1275}
1276
1277#define FDIRENTRIES_NUM_SHIFT 10
1278void
1279ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
1280{
1281 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1282 struct ixgbe_hw_fdir_info *info =
1283 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1284 uint32_t fdirctrl, max_num, i;
1285 uint8_t offset;
1286
1287 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1288 offset = ((fdirctrl & IXGBE_FDIRCTRL_FLEX_MASK) >>
1289 IXGBE_FDIRCTRL_FLEX_SHIFT) * sizeof(uint16_t);
1290
1291 fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
1292 max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
1293 (fdirctrl & FDIRCTRL_PBALLOC_MASK)));
1294 if (fdir_info->mode >= RTE_FDIR_MODE_PERFECT &&
1295 fdir_info->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
1296 fdir_info->guarant_spc = max_num;
1297 else if (fdir_info->mode == RTE_FDIR_MODE_SIGNATURE)
1298 fdir_info->guarant_spc = max_num * 4;
1299
1300 fdir_info->mask.vlan_tci_mask = info->mask.vlan_tci_mask;
1301 fdir_info->mask.ipv4_mask.src_ip = info->mask.src_ipv4_mask;
1302 fdir_info->mask.ipv4_mask.dst_ip = info->mask.dst_ipv4_mask;
1303 IPV6_MASK_TO_ADDR(info->mask.src_ipv6_mask,
1304 fdir_info->mask.ipv6_mask.src_ip);
1305 IPV6_MASK_TO_ADDR(info->mask.dst_ipv6_mask,
1306 fdir_info->mask.ipv6_mask.dst_ip);
1307 fdir_info->mask.src_port_mask = info->mask.src_port_mask;
1308 fdir_info->mask.dst_port_mask = info->mask.dst_port_mask;
1309 fdir_info->mask.mac_addr_byte_mask = info->mask.mac_addr_byte_mask;
1310 fdir_info->mask.tunnel_id_mask = info->mask.tunnel_id_mask;
1311 fdir_info->mask.tunnel_type_mask = info->mask.tunnel_type_mask;
1312 fdir_info->max_flexpayload = IXGBE_FDIR_MAX_FLEX_LEN;
1313
1314 if (fdir_info->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN ||
1315 fdir_info->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
1316 fdir_info->flow_types_mask[0] = 0ULL;
1317 else
1318 fdir_info->flow_types_mask[0] = IXGBE_FDIR_FLOW_TYPES;
1319 for (i = 1; i < RTE_FLOW_MASK_ARRAY_SIZE; i++)
1320 fdir_info->flow_types_mask[i] = 0ULL;
1321
1322 fdir_info->flex_payload_unit = sizeof(uint16_t);
1323 fdir_info->max_flex_payload_segment_num = 1;
1324 fdir_info->flex_payload_limit = IXGBE_MAX_FLX_SOURCE_OFF;
1325 fdir_info->flex_conf.nb_payloads = 1;
1326 fdir_info->flex_conf.flex_set[0].type = RTE_ETH_RAW_PAYLOAD;
1327 fdir_info->flex_conf.flex_set[0].src_offset[0] = offset;
1328 fdir_info->flex_conf.flex_set[0].src_offset[1] = offset + 1;
1329 fdir_info->flex_conf.nb_flexmasks = 1;
1330 fdir_info->flex_conf.flex_mask[0].flow_type = RTE_ETH_FLOW_UNKNOWN;
1331 fdir_info->flex_conf.flex_mask[0].mask[0] =
1332 (uint8_t)(info->mask.flex_bytes_mask & 0x00FF);
1333 fdir_info->flex_conf.flex_mask[0].mask[1] =
1334 (uint8_t)((info->mask.flex_bytes_mask & 0xFF00) >> 8);
1335}
1336
1337void
1338ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *fdir_stats)
1339{
1340 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1341 struct ixgbe_hw_fdir_info *info =
1342 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1343 uint32_t reg, max_num;
1344 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
1345
1346
1347 reg = IXGBE_READ_REG(hw, IXGBE_FDIRFREE);
1348 info->collision = (uint16_t)((reg & IXGBE_FDIRFREE_COLL_MASK) >>
1349 IXGBE_FDIRFREE_COLL_SHIFT);
1350 info->free = (uint16_t)((reg & IXGBE_FDIRFREE_FREE_MASK) >>
1351 IXGBE_FDIRFREE_FREE_SHIFT);
1352
1353 reg = IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1354 info->maxhash = (uint16_t)((reg & IXGBE_FDIRLEN_MAXHASH_MASK) >>
1355 IXGBE_FDIRLEN_MAXHASH_SHIFT);
1356 info->maxlen = (uint8_t)((reg & IXGBE_FDIRLEN_MAXLEN_MASK) >>
1357 IXGBE_FDIRLEN_MAXLEN_SHIFT);
1358
1359 reg = IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1360 info->remove += (reg & IXGBE_FDIRUSTAT_REMOVE_MASK) >>
1361 IXGBE_FDIRUSTAT_REMOVE_SHIFT;
1362 info->add += (reg & IXGBE_FDIRUSTAT_ADD_MASK) >>
1363 IXGBE_FDIRUSTAT_ADD_SHIFT;
1364
1365 reg = IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT) & 0xFFFF;
1366 info->f_remove += (reg & IXGBE_FDIRFSTAT_FREMOVE_MASK) >>
1367 IXGBE_FDIRFSTAT_FREMOVE_SHIFT;
1368 info->f_add += (reg & IXGBE_FDIRFSTAT_FADD_MASK) >>
1369 IXGBE_FDIRFSTAT_FADD_SHIFT;
1370
1371
1372 fdir_stats->collision = info->collision;
1373 fdir_stats->free = info->free;
1374 fdir_stats->maxhash = info->maxhash;
1375 fdir_stats->maxlen = info->maxlen;
1376 fdir_stats->remove = info->remove;
1377 fdir_stats->add = info->add;
1378 fdir_stats->f_remove = info->f_remove;
1379 fdir_stats->f_add = info->f_add;
1380
1381 reg = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1382 max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
1383 (reg & FDIRCTRL_PBALLOC_MASK)));
1384 if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
1385 fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
1386 fdir_stats->guarant_cnt = max_num - fdir_stats->free;
1387 else if (fdir_mode == RTE_FDIR_MODE_SIGNATURE)
1388 fdir_stats->guarant_cnt = max_num * 4 - fdir_stats->free;
1389
1390}
1391
1392
1393void
1394ixgbe_fdir_filter_restore(struct rte_eth_dev *dev)
1395{
1396 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1397 struct ixgbe_hw_fdir_info *fdir_info =
1398 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1399 struct ixgbe_fdir_filter *node;
1400 bool is_perfect = FALSE;
1401 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
1402
1403 if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
1404 fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
1405 is_perfect = TRUE;
1406
1407 if (is_perfect) {
1408 TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) {
1409 (void)fdir_write_perfect_filter_82599(hw,
1410 &node->ixgbe_fdir,
1411 node->queue,
1412 node->fdirflags,
1413 node->fdirhash,
1414 fdir_mode);
1415 }
1416 } else {
1417 TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) {
1418 (void)fdir_add_signature_filter_82599(hw,
1419 &node->ixgbe_fdir,
1420 node->queue,
1421 node->fdirflags,
1422 node->fdirhash);
1423 }
1424 }
1425}
1426
1427
1428int
1429ixgbe_clear_all_fdir_filter(struct rte_eth_dev *dev)
1430{
1431 struct ixgbe_hw_fdir_info *fdir_info =
1432 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1433 struct ixgbe_fdir_filter *fdir_filter;
1434 struct ixgbe_fdir_filter *filter_flag;
1435 int ret = 0;
1436
1437
1438 rte_hash_reset(fdir_info->hash_handle);
1439 memset(fdir_info->hash_map, 0,
1440 sizeof(struct ixgbe_fdir_filter *) * IXGBE_MAX_FDIR_FILTER_NUM);
1441 filter_flag = TAILQ_FIRST(&fdir_info->fdir_list);
1442 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
1443 TAILQ_REMOVE(&fdir_info->fdir_list,
1444 fdir_filter,
1445 entries);
1446 rte_free(fdir_filter);
1447 }
1448
1449 if (filter_flag != NULL)
1450 ret = ixgbe_fdir_flush(dev);
1451
1452 return ret;
1453}
1454