1
2
3
4#include "ice.h"
5
6
7
8
9
10static bool ice_is_arfs_active(struct ice_vsi *vsi)
11{
12 return !!vsi->arfs_fltr_list;
13}
14
15
16
17
18
19
20
21
22
23bool
24ice_is_arfs_using_perfect_flow(struct ice_hw *hw, enum ice_fltr_ptype flow_type)
25{
26 struct ice_arfs_active_fltr_cntrs *arfs_fltr_cntrs;
27 struct ice_pf *pf = hw->back;
28 struct ice_vsi *vsi;
29
30 vsi = ice_get_main_vsi(pf);
31 if (!vsi)
32 return false;
33
34 arfs_fltr_cntrs = vsi->arfs_fltr_cntrs;
35
36
37 smp_mb__before_atomic();
38 switch (flow_type) {
39 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
40 return atomic_read(&arfs_fltr_cntrs->active_udpv4_cnt) > 0;
41 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
42 return atomic_read(&arfs_fltr_cntrs->active_udpv6_cnt) > 0;
43 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
44 return atomic_read(&arfs_fltr_cntrs->active_tcpv4_cnt) > 0;
45 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
46 return atomic_read(&arfs_fltr_cntrs->active_tcpv6_cnt) > 0;
47 default:
48 return false;
49 }
50}
51
52
53
54
55
56
57
58static void
59ice_arfs_update_active_fltr_cntrs(struct ice_vsi *vsi,
60 struct ice_arfs_entry *entry, bool add)
61{
62 struct ice_arfs_active_fltr_cntrs *fltr_cntrs = vsi->arfs_fltr_cntrs;
63
64 switch (entry->fltr_info.flow_type) {
65 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
66 if (add)
67 atomic_inc(&fltr_cntrs->active_tcpv4_cnt);
68 else
69 atomic_dec(&fltr_cntrs->active_tcpv4_cnt);
70 break;
71 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
72 if (add)
73 atomic_inc(&fltr_cntrs->active_tcpv6_cnt);
74 else
75 atomic_dec(&fltr_cntrs->active_tcpv6_cnt);
76 break;
77 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
78 if (add)
79 atomic_inc(&fltr_cntrs->active_udpv4_cnt);
80 else
81 atomic_dec(&fltr_cntrs->active_udpv4_cnt);
82 break;
83 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
84 if (add)
85 atomic_inc(&fltr_cntrs->active_udpv6_cnt);
86 else
87 atomic_dec(&fltr_cntrs->active_udpv6_cnt);
88 break;
89 default:
90 dev_err(ice_pf_to_dev(vsi->back), "aRFS: Failed to update filter counters, invalid filter type %d\n",
91 entry->fltr_info.flow_type);
92 }
93}
94
95
96
97
98
99
100
101
102
103
104static void
105ice_arfs_del_flow_rules(struct ice_vsi *vsi, struct hlist_head *del_list_head)
106{
107 struct ice_arfs_entry *e;
108 struct hlist_node *n;
109 struct device *dev;
110
111 dev = ice_pf_to_dev(vsi->back);
112
113 hlist_for_each_entry_safe(e, n, del_list_head, list_entry) {
114 int result;
115
116 result = ice_fdir_write_fltr(vsi->back, &e->fltr_info, false,
117 false);
118 if (!result)
119 ice_arfs_update_active_fltr_cntrs(vsi, e, false);
120 else
121 dev_dbg(dev, "Unable to delete aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n",
122 result, e->fltr_state, e->fltr_info.fltr_id,
123 e->flow_id, e->fltr_info.q_index);
124
125
126 hlist_del(&e->list_entry);
127 devm_kfree(dev, e);
128 }
129}
130
131
132
133
134
135
136
137
138
139
140
141static void
142ice_arfs_add_flow_rules(struct ice_vsi *vsi, struct hlist_head *add_list_head)
143{
144 struct ice_arfs_entry_ptr *ep;
145 struct hlist_node *n;
146 struct device *dev;
147
148 dev = ice_pf_to_dev(vsi->back);
149
150 hlist_for_each_entry_safe(ep, n, add_list_head, list_entry) {
151 int result;
152
153 result = ice_fdir_write_fltr(vsi->back,
154 &ep->arfs_entry->fltr_info, true,
155 false);
156 if (!result)
157 ice_arfs_update_active_fltr_cntrs(vsi, ep->arfs_entry,
158 true);
159 else
160 dev_dbg(dev, "Unable to add aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n",
161 result, ep->arfs_entry->fltr_state,
162 ep->arfs_entry->fltr_info.fltr_id,
163 ep->arfs_entry->flow_id,
164 ep->arfs_entry->fltr_info.q_index);
165
166 hlist_del(&ep->list_entry);
167 devm_kfree(dev, ep);
168 }
169}
170
171
172
173
174
175
176
177
178
179
180static bool
181ice_arfs_is_flow_expired(struct ice_vsi *vsi, struct ice_arfs_entry *arfs_entry)
182{
183#define ICE_ARFS_TIME_DELTA_EXPIRATION msecs_to_jiffies(5000)
184 if (rps_may_expire_flow(vsi->netdev, arfs_entry->fltr_info.q_index,
185 arfs_entry->flow_id,
186 arfs_entry->fltr_info.fltr_id))
187 return true;
188
189
190 if (arfs_entry->fltr_info.flow_type != ICE_FLTR_PTYPE_NONF_IPV4_UDP &&
191 arfs_entry->fltr_info.flow_type != ICE_FLTR_PTYPE_NONF_IPV6_UDP)
192 return false;
193
194 return time_in_range64(arfs_entry->time_activated +
195 ICE_ARFS_TIME_DELTA_EXPIRATION,
196 arfs_entry->time_activated, get_jiffies_64());
197}
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213static void
214ice_arfs_update_flow_rules(struct ice_vsi *vsi, u16 idx,
215 struct hlist_head *add_list,
216 struct hlist_head *del_list)
217{
218 struct ice_arfs_entry *e;
219 struct hlist_node *n;
220 struct device *dev;
221
222 dev = ice_pf_to_dev(vsi->back);
223
224
225 hlist_for_each_entry_safe(e, n, &vsi->arfs_fltr_list[idx], list_entry)
226
227 if (e->fltr_state == ICE_ARFS_INACTIVE) {
228 enum ice_fltr_ptype flow_type = e->fltr_info.flow_type;
229 struct ice_arfs_entry_ptr *ep =
230 devm_kzalloc(dev, sizeof(*ep), GFP_ATOMIC);
231
232 if (!ep)
233 continue;
234 INIT_HLIST_NODE(&ep->list_entry);
235
236 ep->arfs_entry = e;
237 hlist_add_head(&ep->list_entry, add_list);
238 e->fltr_state = ICE_ARFS_ACTIVE;
239
240 if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
241 flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
242 e->time_activated = get_jiffies_64();
243 } else if (e->fltr_state == ICE_ARFS_ACTIVE) {
244
245 if (ice_arfs_is_flow_expired(vsi, e)) {
246
247
248
249
250 hlist_del(&e->list_entry);
251 e->fltr_state = ICE_ARFS_TODEL;
252
253 hlist_add_head(&e->list_entry, del_list);
254 }
255 }
256}
257
258
259
260
261
262void ice_sync_arfs_fltrs(struct ice_pf *pf)
263{
264 HLIST_HEAD(tmp_del_list);
265 HLIST_HEAD(tmp_add_list);
266 struct ice_vsi *pf_vsi;
267 unsigned int i;
268
269 pf_vsi = ice_get_main_vsi(pf);
270 if (!pf_vsi)
271 return;
272
273 if (!ice_is_arfs_active(pf_vsi))
274 return;
275
276 spin_lock_bh(&pf_vsi->arfs_lock);
277
278 for (i = 0; i < ICE_MAX_ARFS_LIST; i++)
279 ice_arfs_update_flow_rules(pf_vsi, i, &tmp_add_list,
280 &tmp_del_list);
281 spin_unlock_bh(&pf_vsi->arfs_lock);
282
283
284 ice_arfs_del_flow_rules(pf_vsi, &tmp_del_list);
285
286
287 ice_arfs_add_flow_rules(pf_vsi, &tmp_add_list);
288}
289
290
291
292
293
294
295
296
297
298
299static struct ice_arfs_entry *
300ice_arfs_build_entry(struct ice_vsi *vsi, const struct flow_keys *fk,
301 u16 rxq_idx, u32 flow_id)
302{
303 struct ice_arfs_entry *arfs_entry;
304 struct ice_fdir_fltr *fltr_info;
305 u8 ip_proto;
306
307 arfs_entry = devm_kzalloc(ice_pf_to_dev(vsi->back),
308 sizeof(*arfs_entry),
309 GFP_ATOMIC | __GFP_NOWARN);
310 if (!arfs_entry)
311 return NULL;
312
313 fltr_info = &arfs_entry->fltr_info;
314 fltr_info->q_index = rxq_idx;
315 fltr_info->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
316 fltr_info->dest_vsi = vsi->idx;
317 ip_proto = fk->basic.ip_proto;
318
319 if (fk->basic.n_proto == htons(ETH_P_IP)) {
320 fltr_info->ip.v4.proto = ip_proto;
321 fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ?
322 ICE_FLTR_PTYPE_NONF_IPV4_TCP :
323 ICE_FLTR_PTYPE_NONF_IPV4_UDP;
324 fltr_info->ip.v4.src_ip = fk->addrs.v4addrs.src;
325 fltr_info->ip.v4.dst_ip = fk->addrs.v4addrs.dst;
326 fltr_info->ip.v4.src_port = fk->ports.src;
327 fltr_info->ip.v4.dst_port = fk->ports.dst;
328 } else {
329 fltr_info->ip.v6.proto = ip_proto;
330 fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ?
331 ICE_FLTR_PTYPE_NONF_IPV6_TCP :
332 ICE_FLTR_PTYPE_NONF_IPV6_UDP;
333 memcpy(&fltr_info->ip.v6.src_ip, &fk->addrs.v6addrs.src,
334 sizeof(struct in6_addr));
335 memcpy(&fltr_info->ip.v6.dst_ip, &fk->addrs.v6addrs.dst,
336 sizeof(struct in6_addr));
337 fltr_info->ip.v6.src_port = fk->ports.src;
338 fltr_info->ip.v6.dst_port = fk->ports.dst;
339 }
340
341 arfs_entry->flow_id = flow_id;
342 fltr_info->fltr_id =
343 atomic_inc_return(vsi->arfs_last_fltr_id) % RPS_NO_FILTER;
344
345 return arfs_entry;
346}
347
348
349
350
351
352
353
354
355
356
357
358static bool
359ice_arfs_is_perfect_flow_set(struct ice_hw *hw, __be16 l3_proto, u8 l4_proto)
360{
361 unsigned long *perfect_fltr = hw->fdir_perfect_fltr;
362
363
364 if (!perfect_fltr)
365 return true;
366
367 if (l3_proto == htons(ETH_P_IP) && l4_proto == IPPROTO_UDP)
368 return test_bit(ICE_FLTR_PTYPE_NONF_IPV4_UDP, perfect_fltr);
369 else if (l3_proto == htons(ETH_P_IP) && l4_proto == IPPROTO_TCP)
370 return test_bit(ICE_FLTR_PTYPE_NONF_IPV4_TCP, perfect_fltr);
371 else if (l3_proto == htons(ETH_P_IPV6) && l4_proto == IPPROTO_UDP)
372 return test_bit(ICE_FLTR_PTYPE_NONF_IPV6_UDP, perfect_fltr);
373 else if (l3_proto == htons(ETH_P_IPV6) && l4_proto == IPPROTO_TCP)
374 return test_bit(ICE_FLTR_PTYPE_NONF_IPV6_TCP, perfect_fltr);
375
376 return false;
377}
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395int
396ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb,
397 u16 rxq_idx, u32 flow_id)
398{
399 struct ice_netdev_priv *np = netdev_priv(netdev);
400 struct ice_arfs_entry *arfs_entry;
401 struct ice_vsi *vsi = np->vsi;
402 struct flow_keys fk;
403 struct ice_pf *pf;
404 __be16 n_proto;
405 u8 ip_proto;
406 u16 idx;
407 int ret;
408
409
410 if (unlikely(!vsi->arfs_fltr_list))
411 return -ENODEV;
412
413 pf = vsi->back;
414
415 if (skb->encapsulation)
416 return -EPROTONOSUPPORT;
417
418 if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
419 return -EPROTONOSUPPORT;
420
421 n_proto = fk.basic.n_proto;
422
423 if ((n_proto == htons(ETH_P_IP) && !ip_is_fragment(ip_hdr(skb))) ||
424 n_proto == htons(ETH_P_IPV6))
425 ip_proto = fk.basic.ip_proto;
426 else
427 return -EPROTONOSUPPORT;
428
429
430 if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP)
431 return -EPROTONOSUPPORT;
432
433
434 if (!ice_arfs_is_perfect_flow_set(&pf->hw, n_proto, ip_proto))
435 return -EOPNOTSUPP;
436
437
438 idx = skb_get_hash_raw(skb) & ICE_ARFS_LST_MASK;
439
440 spin_lock_bh(&vsi->arfs_lock);
441 hlist_for_each_entry(arfs_entry, &vsi->arfs_fltr_list[idx],
442 list_entry) {
443 struct ice_fdir_fltr *fltr_info;
444
445
446 if (arfs_entry->flow_id != flow_id)
447 continue;
448
449 fltr_info = &arfs_entry->fltr_info;
450 ret = fltr_info->fltr_id;
451
452 if (fltr_info->q_index == rxq_idx ||
453 arfs_entry->fltr_state != ICE_ARFS_ACTIVE)
454 goto out;
455
456
457 fltr_info->q_index = rxq_idx;
458 arfs_entry->fltr_state = ICE_ARFS_INACTIVE;
459 ice_arfs_update_active_fltr_cntrs(vsi, arfs_entry, false);
460 goto out_schedule_service_task;
461 }
462
463 arfs_entry = ice_arfs_build_entry(vsi, &fk, rxq_idx, flow_id);
464 if (!arfs_entry) {
465 ret = -ENOMEM;
466 goto out;
467 }
468
469 ret = arfs_entry->fltr_info.fltr_id;
470 INIT_HLIST_NODE(&arfs_entry->list_entry);
471 hlist_add_head(&arfs_entry->list_entry, &vsi->arfs_fltr_list[idx]);
472out_schedule_service_task:
473 ice_service_task_schedule(pf);
474out:
475 spin_unlock_bh(&vsi->arfs_lock);
476 return ret;
477}
478
479
480
481
482
483static int ice_init_arfs_cntrs(struct ice_vsi *vsi)
484{
485 if (!vsi || vsi->type != ICE_VSI_PF)
486 return -EINVAL;
487
488 vsi->arfs_fltr_cntrs = kzalloc(sizeof(*vsi->arfs_fltr_cntrs),
489 GFP_KERNEL);
490 if (!vsi->arfs_fltr_cntrs)
491 return -ENOMEM;
492
493 vsi->arfs_last_fltr_id = kzalloc(sizeof(*vsi->arfs_last_fltr_id),
494 GFP_KERNEL);
495 if (!vsi->arfs_last_fltr_id) {
496 kfree(vsi->arfs_fltr_cntrs);
497 vsi->arfs_fltr_cntrs = NULL;
498 return -ENOMEM;
499 }
500
501 return 0;
502}
503
504
505
506
507
508void ice_init_arfs(struct ice_vsi *vsi)
509{
510 struct hlist_head *arfs_fltr_list;
511 unsigned int i;
512
513 if (!vsi || vsi->type != ICE_VSI_PF)
514 return;
515
516 arfs_fltr_list = kzalloc(sizeof(*arfs_fltr_list) * ICE_MAX_ARFS_LIST,
517 GFP_KERNEL);
518 if (!arfs_fltr_list)
519 return;
520
521 if (ice_init_arfs_cntrs(vsi))
522 goto free_arfs_fltr_list;
523
524 for (i = 0; i < ICE_MAX_ARFS_LIST; i++)
525 INIT_HLIST_HEAD(&arfs_fltr_list[i]);
526
527 spin_lock_init(&vsi->arfs_lock);
528
529 vsi->arfs_fltr_list = arfs_fltr_list;
530
531 return;
532
533free_arfs_fltr_list:
534 kfree(arfs_fltr_list);
535}
536
537
538
539
540
541void ice_clear_arfs(struct ice_vsi *vsi)
542{
543 struct device *dev;
544 unsigned int i;
545
546 if (!vsi || vsi->type != ICE_VSI_PF || !vsi->back ||
547 !vsi->arfs_fltr_list)
548 return;
549
550 dev = ice_pf_to_dev(vsi->back);
551 for (i = 0; i < ICE_MAX_ARFS_LIST; i++) {
552 struct ice_arfs_entry *r;
553 struct hlist_node *n;
554
555 spin_lock_bh(&vsi->arfs_lock);
556 hlist_for_each_entry_safe(r, n, &vsi->arfs_fltr_list[i],
557 list_entry) {
558 hlist_del(&r->list_entry);
559 devm_kfree(dev, r);
560 }
561 spin_unlock_bh(&vsi->arfs_lock);
562 }
563
564 kfree(vsi->arfs_fltr_list);
565 vsi->arfs_fltr_list = NULL;
566 kfree(vsi->arfs_last_fltr_id);
567 vsi->arfs_last_fltr_id = NULL;
568 kfree(vsi->arfs_fltr_cntrs);
569 vsi->arfs_fltr_cntrs = NULL;
570}
571
572
573
574
575
576void ice_free_cpu_rx_rmap(struct ice_vsi *vsi)
577{
578 struct net_device *netdev;
579
580 if (!vsi || vsi->type != ICE_VSI_PF || !vsi->arfs_fltr_list)
581 return;
582
583 netdev = vsi->netdev;
584 if (!netdev || !netdev->rx_cpu_rmap ||
585 netdev->reg_state != NETREG_REGISTERED)
586 return;
587
588 free_irq_cpu_rmap(netdev->rx_cpu_rmap);
589 netdev->rx_cpu_rmap = NULL;
590}
591
592
593
594
595
596int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
597{
598 struct net_device *netdev;
599 struct ice_pf *pf;
600 int base_idx, i;
601
602 if (!vsi || vsi->type != ICE_VSI_PF)
603 return -EINVAL;
604
605 pf = vsi->back;
606 netdev = vsi->netdev;
607 if (!pf || !netdev || !vsi->num_q_vectors ||
608 vsi->netdev->reg_state != NETREG_REGISTERED)
609 return -EINVAL;
610
611 netdev_dbg(netdev, "Setup CPU RMAP: vsi type 0x%x, ifname %s, q_vectors %d\n",
612 vsi->type, netdev->name, vsi->num_q_vectors);
613
614 netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(vsi->num_q_vectors);
615 if (unlikely(!netdev->rx_cpu_rmap))
616 return -EINVAL;
617
618 base_idx = vsi->base_vector;
619 for (i = 0; i < vsi->num_q_vectors; i++)
620 if (irq_cpu_rmap_add(netdev->rx_cpu_rmap,
621 pf->msix_entries[base_idx + i].vector)) {
622 ice_free_cpu_rx_rmap(vsi);
623 return -EINVAL;
624 }
625
626 return 0;
627}
628
629
630
631
632
633void ice_remove_arfs(struct ice_pf *pf)
634{
635 struct ice_vsi *pf_vsi;
636
637 pf_vsi = ice_get_main_vsi(pf);
638 if (!pf_vsi)
639 return;
640
641 ice_free_cpu_rx_rmap(pf_vsi);
642 ice_clear_arfs(pf_vsi);
643}
644
645
646
647
648
649void ice_rebuild_arfs(struct ice_pf *pf)
650{
651 struct ice_vsi *pf_vsi;
652
653 pf_vsi = ice_get_main_vsi(pf);
654 if (!pf_vsi)
655 return;
656
657 ice_remove_arfs(pf);
658 if (ice_set_cpu_rx_rmap(pf_vsi)) {
659 dev_err(ice_pf_to_dev(pf), "Failed to rebuild aRFS\n");
660 return;
661 }
662 ice_init_arfs(pf_vsi);
663}
664