1
2
3
4
5
6
7#include <net/pkt_cls.h>
8#include <net/tc_act/tc_gact.h>
9#include "common.h"
10#include "dwmac4.h"
11#include "dwmac5.h"
12#include "stmmac.h"
13
14static void tc_fill_all_pass_entry(struct stmmac_tc_entry *entry)
15{
16 memset(entry, 0, sizeof(*entry));
17 entry->in_use = true;
18 entry->is_last = true;
19 entry->is_frag = false;
20 entry->prio = ~0x0;
21 entry->handle = 0;
22 entry->val.match_data = 0x0;
23 entry->val.match_en = 0x0;
24 entry->val.af = 1;
25 entry->val.dma_ch_no = 0x0;
26}
27
28static struct stmmac_tc_entry *tc_find_entry(struct stmmac_priv *priv,
29 struct tc_cls_u32_offload *cls,
30 bool free)
31{
32 struct stmmac_tc_entry *entry, *first = NULL, *dup = NULL;
33 u32 loc = cls->knode.handle;
34 int i;
35
36 for (i = 0; i < priv->tc_entries_max; i++) {
37 entry = &priv->tc_entries[i];
38 if (!entry->in_use && !first && free)
39 first = entry;
40 if ((entry->handle == loc) && !free && !entry->is_frag)
41 dup = entry;
42 }
43
44 if (dup)
45 return dup;
46 if (first) {
47 first->handle = loc;
48 first->in_use = true;
49
50
51 memset(&first->val, 0, sizeof(first->val));
52 }
53
54 return first;
55}
56
57static int tc_fill_actions(struct stmmac_tc_entry *entry,
58 struct stmmac_tc_entry *frag,
59 struct tc_cls_u32_offload *cls)
60{
61 struct stmmac_tc_entry *action_entry = entry;
62 const struct tc_action *act;
63 struct tcf_exts *exts;
64 int i;
65
66 exts = cls->knode.exts;
67 if (!tcf_exts_has_actions(exts))
68 return -EINVAL;
69 if (frag)
70 action_entry = frag;
71
72 tcf_exts_for_each_action(i, act, exts) {
73
74 if (is_tcf_gact_ok(act)) {
75 action_entry->val.af = 1;
76 break;
77 }
78
79 if (is_tcf_gact_shot(act)) {
80 action_entry->val.rf = 1;
81 break;
82 }
83
84
85 return -EINVAL;
86 }
87
88 return 0;
89}
90
91static int tc_fill_entry(struct stmmac_priv *priv,
92 struct tc_cls_u32_offload *cls)
93{
94 struct stmmac_tc_entry *entry, *frag = NULL;
95 struct tc_u32_sel *sel = cls->knode.sel;
96 u32 off, data, mask, real_off, rem;
97 u32 prio = cls->common.prio << 16;
98 int ret;
99
100
101 if (sel->nkeys <= 0 || sel->nkeys > 1)
102 return -EINVAL;
103
104 off = sel->keys[0].off << sel->offshift;
105 data = sel->keys[0].val;
106 mask = sel->keys[0].mask;
107
108 switch (ntohs(cls->common.protocol)) {
109 case ETH_P_ALL:
110 break;
111 case ETH_P_IP:
112 off += ETH_HLEN;
113 break;
114 default:
115 return -EINVAL;
116 }
117
118 if (off > priv->tc_off_max)
119 return -EINVAL;
120
121 real_off = off / 4;
122 rem = off % 4;
123
124 entry = tc_find_entry(priv, cls, true);
125 if (!entry)
126 return -EINVAL;
127
128 if (rem) {
129 frag = tc_find_entry(priv, cls, true);
130 if (!frag) {
131 ret = -EINVAL;
132 goto err_unuse;
133 }
134
135 entry->frag_ptr = frag;
136 entry->val.match_en = (mask << (rem * 8)) &
137 GENMASK(31, rem * 8);
138 entry->val.match_data = (data << (rem * 8)) &
139 GENMASK(31, rem * 8);
140 entry->val.frame_offset = real_off;
141 entry->prio = prio;
142
143 frag->val.match_en = (mask >> (rem * 8)) &
144 GENMASK(rem * 8 - 1, 0);
145 frag->val.match_data = (data >> (rem * 8)) &
146 GENMASK(rem * 8 - 1, 0);
147 frag->val.frame_offset = real_off + 1;
148 frag->prio = prio;
149 frag->is_frag = true;
150 } else {
151 entry->frag_ptr = NULL;
152 entry->val.match_en = mask;
153 entry->val.match_data = data;
154 entry->val.frame_offset = real_off;
155 entry->prio = prio;
156 }
157
158 ret = tc_fill_actions(entry, frag, cls);
159 if (ret)
160 goto err_unuse;
161
162 return 0;
163
164err_unuse:
165 if (frag)
166 frag->in_use = false;
167 entry->in_use = false;
168 return ret;
169}
170
171static void tc_unfill_entry(struct stmmac_priv *priv,
172 struct tc_cls_u32_offload *cls)
173{
174 struct stmmac_tc_entry *entry;
175
176 entry = tc_find_entry(priv, cls, false);
177 if (!entry)
178 return;
179
180 entry->in_use = false;
181 if (entry->frag_ptr) {
182 entry = entry->frag_ptr;
183 entry->is_frag = false;
184 entry->in_use = false;
185 }
186}
187
188static int tc_config_knode(struct stmmac_priv *priv,
189 struct tc_cls_u32_offload *cls)
190{
191 int ret;
192
193 ret = tc_fill_entry(priv, cls);
194 if (ret)
195 return ret;
196
197 ret = stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries,
198 priv->tc_entries_max);
199 if (ret)
200 goto err_unfill;
201
202 return 0;
203
204err_unfill:
205 tc_unfill_entry(priv, cls);
206 return ret;
207}
208
209static int tc_delete_knode(struct stmmac_priv *priv,
210 struct tc_cls_u32_offload *cls)
211{
212 int ret;
213
214
215 tc_unfill_entry(priv, cls);
216
217 ret = stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries,
218 priv->tc_entries_max);
219 if (ret)
220 return ret;
221
222 return 0;
223}
224
225static int tc_setup_cls_u32(struct stmmac_priv *priv,
226 struct tc_cls_u32_offload *cls)
227{
228 switch (cls->command) {
229 case TC_CLSU32_REPLACE_KNODE:
230 tc_unfill_entry(priv, cls);
231
232 case TC_CLSU32_NEW_KNODE:
233 return tc_config_knode(priv, cls);
234 case TC_CLSU32_DELETE_KNODE:
235 return tc_delete_knode(priv, cls);
236 default:
237 return -EOPNOTSUPP;
238 }
239}
240
241static int tc_init(struct stmmac_priv *priv)
242{
243 struct dma_features *dma_cap = &priv->dma_cap;
244 unsigned int count;
245 int i;
246
247 if (dma_cap->l3l4fnum) {
248 priv->flow_entries_max = dma_cap->l3l4fnum;
249 priv->flow_entries = devm_kcalloc(priv->device,
250 dma_cap->l3l4fnum,
251 sizeof(*priv->flow_entries),
252 GFP_KERNEL);
253 if (!priv->flow_entries)
254 return -ENOMEM;
255
256 for (i = 0; i < priv->flow_entries_max; i++)
257 priv->flow_entries[i].idx = i;
258
259 dev_info(priv->device, "Enabled Flow TC (entries=%d)\n",
260 priv->flow_entries_max);
261 }
262
263 if (!priv->plat->fpe_cfg) {
264 priv->plat->fpe_cfg = devm_kzalloc(priv->device,
265 sizeof(*priv->plat->fpe_cfg),
266 GFP_KERNEL);
267 if (!priv->plat->fpe_cfg)
268 return -ENOMEM;
269 } else {
270 memset(priv->plat->fpe_cfg, 0, sizeof(*priv->plat->fpe_cfg));
271 }
272
273
274 if (!dma_cap->frpsel)
275 return 0;
276
277 switch (dma_cap->frpbs) {
278 case 0x0:
279 priv->tc_off_max = 64;
280 break;
281 case 0x1:
282 priv->tc_off_max = 128;
283 break;
284 case 0x2:
285 priv->tc_off_max = 256;
286 break;
287 default:
288 return -EINVAL;
289 }
290
291 switch (dma_cap->frpes) {
292 case 0x0:
293 count = 64;
294 break;
295 case 0x1:
296 count = 128;
297 break;
298 case 0x2:
299 count = 256;
300 break;
301 default:
302 return -EINVAL;
303 }
304
305
306 priv->tc_entries_max = count;
307 priv->tc_entries = devm_kcalloc(priv->device,
308 count, sizeof(*priv->tc_entries), GFP_KERNEL);
309 if (!priv->tc_entries)
310 return -ENOMEM;
311
312 tc_fill_all_pass_entry(&priv->tc_entries[count - 1]);
313
314 dev_info(priv->device, "Enabling HW TC (entries=%d, max_off=%d)\n",
315 priv->tc_entries_max, priv->tc_off_max);
316
317 return 0;
318}
319
320static int tc_setup_cbs(struct stmmac_priv *priv,
321 struct tc_cbs_qopt_offload *qopt)
322{
323 u32 tx_queues_count = priv->plat->tx_queues_to_use;
324 u32 queue = qopt->queue;
325 u32 ptr, speed_div;
326 u32 mode_to_use;
327 u64 value;
328 int ret;
329
330
331 if (queue <= 0 || queue >= tx_queues_count)
332 return -EINVAL;
333 if (!priv->dma_cap.av)
334 return -EOPNOTSUPP;
335
336
337 switch (priv->speed) {
338 case SPEED_10000:
339 ptr = 32;
340 speed_div = 10000000;
341 break;
342 case SPEED_5000:
343 ptr = 32;
344 speed_div = 5000000;
345 break;
346 case SPEED_2500:
347 ptr = 8;
348 speed_div = 2500000;
349 break;
350 case SPEED_1000:
351 ptr = 8;
352 speed_div = 1000000;
353 break;
354 case SPEED_100:
355 ptr = 4;
356 speed_div = 100000;
357 break;
358 default:
359 return -EOPNOTSUPP;
360 }
361
362 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
363 if (mode_to_use == MTL_QUEUE_DCB && qopt->enable) {
364 ret = stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_AVB);
365 if (ret)
366 return ret;
367
368 priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
369 } else if (!qopt->enable) {
370 ret = stmmac_dma_qmode(priv, priv->ioaddr, queue,
371 MTL_QUEUE_DCB);
372 if (ret)
373 return ret;
374
375 priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
376 }
377
378
379 value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div);
380 priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0);
381
382 value = div_s64(-qopt->sendslope * 1024ll * ptr, speed_div);
383 priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0);
384
385 value = qopt->hicredit * 1024ll * 8;
386 priv->plat->tx_queues_cfg[queue].high_credit = value & GENMASK(31, 0);
387
388 value = qopt->locredit * 1024ll * 8;
389 priv->plat->tx_queues_cfg[queue].low_credit = value & GENMASK(31, 0);
390
391 ret = stmmac_config_cbs(priv, priv->hw,
392 priv->plat->tx_queues_cfg[queue].send_slope,
393 priv->plat->tx_queues_cfg[queue].idle_slope,
394 priv->plat->tx_queues_cfg[queue].high_credit,
395 priv->plat->tx_queues_cfg[queue].low_credit,
396 queue);
397 if (ret)
398 return ret;
399
400 dev_info(priv->device, "CBS queue %d: send %d, idle %d, hi %d, lo %d\n",
401 queue, qopt->sendslope, qopt->idleslope,
402 qopt->hicredit, qopt->locredit);
403 return 0;
404}
405
406static int tc_parse_flow_actions(struct stmmac_priv *priv,
407 struct flow_action *action,
408 struct stmmac_flow_entry *entry,
409 struct netlink_ext_ack *extack)
410{
411 struct flow_action_entry *act;
412 int i;
413
414 if (!flow_action_has_entries(action))
415 return -EINVAL;
416
417 if (!flow_action_basic_hw_stats_check(action, extack))
418 return -EOPNOTSUPP;
419
420 flow_action_for_each(i, act, action) {
421 switch (act->id) {
422 case FLOW_ACTION_DROP:
423 entry->action |= STMMAC_FLOW_ACTION_DROP;
424 return 0;
425 default:
426 break;
427 }
428 }
429
430
431 return 0;
432}
433
434static int tc_add_basic_flow(struct stmmac_priv *priv,
435 struct flow_cls_offload *cls,
436 struct stmmac_flow_entry *entry)
437{
438 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
439 struct flow_dissector *dissector = rule->match.dissector;
440 struct flow_match_basic match;
441
442
443 if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC))
444 return -EINVAL;
445
446 flow_rule_match_basic(rule, &match);
447 entry->ip_proto = match.key->ip_proto;
448 return 0;
449}
450
451static int tc_add_ip4_flow(struct stmmac_priv *priv,
452 struct flow_cls_offload *cls,
453 struct stmmac_flow_entry *entry)
454{
455 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
456 struct flow_dissector *dissector = rule->match.dissector;
457 bool inv = entry->action & STMMAC_FLOW_ACTION_DROP;
458 struct flow_match_ipv4_addrs match;
459 u32 hw_match;
460 int ret;
461
462
463 if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS))
464 return -EINVAL;
465
466 flow_rule_match_ipv4_addrs(rule, &match);
467 hw_match = ntohl(match.key->src) & ntohl(match.mask->src);
468 if (hw_match) {
469 ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, true,
470 false, true, inv, hw_match);
471 if (ret)
472 return ret;
473 }
474
475 hw_match = ntohl(match.key->dst) & ntohl(match.mask->dst);
476 if (hw_match) {
477 ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, true,
478 false, false, inv, hw_match);
479 if (ret)
480 return ret;
481 }
482
483 return 0;
484}
485
486static int tc_add_ports_flow(struct stmmac_priv *priv,
487 struct flow_cls_offload *cls,
488 struct stmmac_flow_entry *entry)
489{
490 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
491 struct flow_dissector *dissector = rule->match.dissector;
492 bool inv = entry->action & STMMAC_FLOW_ACTION_DROP;
493 struct flow_match_ports match;
494 u32 hw_match;
495 bool is_udp;
496 int ret;
497
498
499 if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS))
500 return -EINVAL;
501
502 switch (entry->ip_proto) {
503 case IPPROTO_TCP:
504 is_udp = false;
505 break;
506 case IPPROTO_UDP:
507 is_udp = true;
508 break;
509 default:
510 return -EINVAL;
511 }
512
513 flow_rule_match_ports(rule, &match);
514
515 hw_match = ntohs(match.key->src) & ntohs(match.mask->src);
516 if (hw_match) {
517 ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, true,
518 is_udp, true, inv, hw_match);
519 if (ret)
520 return ret;
521 }
522
523 hw_match = ntohs(match.key->dst) & ntohs(match.mask->dst);
524 if (hw_match) {
525 ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, true,
526 is_udp, false, inv, hw_match);
527 if (ret)
528 return ret;
529 }
530
531 entry->is_l4 = true;
532 return 0;
533}
534
535static struct stmmac_flow_entry *tc_find_flow(struct stmmac_priv *priv,
536 struct flow_cls_offload *cls,
537 bool get_free)
538{
539 int i;
540
541 for (i = 0; i < priv->flow_entries_max; i++) {
542 struct stmmac_flow_entry *entry = &priv->flow_entries[i];
543
544 if (entry->cookie == cls->cookie)
545 return entry;
546 if (get_free && (entry->in_use == false))
547 return entry;
548 }
549
550 return NULL;
551}
552
553static struct {
554 int (*fn)(struct stmmac_priv *priv, struct flow_cls_offload *cls,
555 struct stmmac_flow_entry *entry);
556} tc_flow_parsers[] = {
557 { .fn = tc_add_basic_flow },
558 { .fn = tc_add_ip4_flow },
559 { .fn = tc_add_ports_flow },
560};
561
562static int tc_add_flow(struct stmmac_priv *priv,
563 struct flow_cls_offload *cls)
564{
565 struct stmmac_flow_entry *entry = tc_find_flow(priv, cls, false);
566 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
567 int i, ret;
568
569 if (!entry) {
570 entry = tc_find_flow(priv, cls, true);
571 if (!entry)
572 return -ENOENT;
573 }
574
575 ret = tc_parse_flow_actions(priv, &rule->action, entry,
576 cls->common.extack);
577 if (ret)
578 return ret;
579
580 for (i = 0; i < ARRAY_SIZE(tc_flow_parsers); i++) {
581 ret = tc_flow_parsers[i].fn(priv, cls, entry);
582 if (!ret) {
583 entry->in_use = true;
584 continue;
585 }
586 }
587
588 if (!entry->in_use)
589 return -EINVAL;
590
591 entry->cookie = cls->cookie;
592 return 0;
593}
594
595static int tc_del_flow(struct stmmac_priv *priv,
596 struct flow_cls_offload *cls)
597{
598 struct stmmac_flow_entry *entry = tc_find_flow(priv, cls, false);
599 int ret;
600
601 if (!entry || !entry->in_use)
602 return -ENOENT;
603
604 if (entry->is_l4) {
605 ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, false,
606 false, false, false, 0);
607 } else {
608 ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, false,
609 false, false, false, 0);
610 }
611
612 entry->in_use = false;
613 entry->cookie = 0;
614 entry->is_l4 = false;
615 return ret;
616}
617
618#define VLAN_PRIO_FULL_MASK (0x07)
619
620static int tc_add_vlan_flow(struct stmmac_priv *priv,
621 struct flow_cls_offload *cls)
622{
623 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
624 struct flow_dissector *dissector = rule->match.dissector;
625 int tc = tc_classid_to_hwtc(priv->dev, cls->classid);
626 struct flow_match_vlan match;
627
628
629 if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN))
630 return -EINVAL;
631
632 if (tc < 0) {
633 netdev_err(priv->dev, "Invalid traffic class\n");
634 return -EINVAL;
635 }
636
637 flow_rule_match_vlan(rule, &match);
638
639 if (match.mask->vlan_priority) {
640 u32 prio;
641
642 if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) {
643 netdev_err(priv->dev, "Only full mask is supported for VLAN priority");
644 return -EINVAL;
645 }
646
647 prio = BIT(match.key->vlan_priority);
648 stmmac_rx_queue_prio(priv, priv->hw, prio, tc);
649 }
650
651 return 0;
652}
653
654static int tc_del_vlan_flow(struct stmmac_priv *priv,
655 struct flow_cls_offload *cls)
656{
657 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
658 struct flow_dissector *dissector = rule->match.dissector;
659 int tc = tc_classid_to_hwtc(priv->dev, cls->classid);
660
661
662 if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN))
663 return -EINVAL;
664
665 if (tc < 0) {
666 netdev_err(priv->dev, "Invalid traffic class\n");
667 return -EINVAL;
668 }
669
670 stmmac_rx_queue_prio(priv, priv->hw, 0, tc);
671
672 return 0;
673}
674
675static int tc_add_flow_cls(struct stmmac_priv *priv,
676 struct flow_cls_offload *cls)
677{
678 int ret;
679
680 ret = tc_add_flow(priv, cls);
681 if (!ret)
682 return ret;
683
684 return tc_add_vlan_flow(priv, cls);
685}
686
687static int tc_del_flow_cls(struct stmmac_priv *priv,
688 struct flow_cls_offload *cls)
689{
690 int ret;
691
692 ret = tc_del_flow(priv, cls);
693 if (!ret)
694 return ret;
695
696 return tc_del_vlan_flow(priv, cls);
697}
698
699static int tc_setup_cls(struct stmmac_priv *priv,
700 struct flow_cls_offload *cls)
701{
702 int ret = 0;
703
704
705 if (priv->rss.enable)
706 return -EBUSY;
707
708 switch (cls->command) {
709 case FLOW_CLS_REPLACE:
710 ret = tc_add_flow_cls(priv, cls);
711 break;
712 case FLOW_CLS_DESTROY:
713 ret = tc_del_flow_cls(priv, cls);
714 break;
715 default:
716 return -EOPNOTSUPP;
717 }
718
719 return ret;
720}
721
722static int tc_setup_taprio(struct stmmac_priv *priv,
723 struct tc_taprio_qopt_offload *qopt)
724{
725 u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
726 struct plat_stmmacenet_data *plat = priv->plat;
727 struct timespec64 time, current_time;
728 ktime_t current_time_ns;
729 bool fpe = false;
730 int i, ret = 0;
731 u64 ctr;
732
733 if (!priv->dma_cap.estsel)
734 return -EOPNOTSUPP;
735
736 switch (wid) {
737 case 0x1:
738 wid = 16;
739 break;
740 case 0x2:
741 wid = 20;
742 break;
743 case 0x3:
744 wid = 24;
745 break;
746 default:
747 return -EOPNOTSUPP;
748 }
749
750 switch (dep) {
751 case 0x1:
752 dep = 64;
753 break;
754 case 0x2:
755 dep = 128;
756 break;
757 case 0x3:
758 dep = 256;
759 break;
760 case 0x4:
761 dep = 512;
762 break;
763 case 0x5:
764 dep = 1024;
765 break;
766 default:
767 return -EOPNOTSUPP;
768 }
769
770 if (!qopt->enable)
771 goto disable;
772 if (qopt->num_entries >= dep)
773 return -EINVAL;
774 if (!qopt->base_time)
775 return -ERANGE;
776 if (!qopt->cycle_time)
777 return -ERANGE;
778
779 if (!plat->est) {
780 plat->est = devm_kzalloc(priv->device, sizeof(*plat->est),
781 GFP_KERNEL);
782 if (!plat->est)
783 return -ENOMEM;
784 } else {
785 memset(plat->est, 0, sizeof(*plat->est));
786 }
787
788 size = qopt->num_entries;
789
790 priv->plat->est->gcl_size = size;
791 priv->plat->est->enable = qopt->enable;
792
793 for (i = 0; i < size; i++) {
794 s64 delta_ns = qopt->entries[i].interval;
795 u32 gates = qopt->entries[i].gate_mask;
796
797 if (delta_ns > GENMASK(wid, 0))
798 return -ERANGE;
799 if (gates > GENMASK(31 - wid, 0))
800 return -ERANGE;
801
802 switch (qopt->entries[i].command) {
803 case TC_TAPRIO_CMD_SET_GATES:
804 if (fpe)
805 return -EINVAL;
806 break;
807 case TC_TAPRIO_CMD_SET_AND_HOLD:
808 gates |= BIT(0);
809 fpe = true;
810 break;
811 case TC_TAPRIO_CMD_SET_AND_RELEASE:
812 gates &= ~BIT(0);
813 fpe = true;
814 break;
815 default:
816 return -EOPNOTSUPP;
817 }
818
819 priv->plat->est->gcl[i] = delta_ns | (gates << wid);
820 }
821
822
823 priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, ¤t_time);
824 current_time_ns = timespec64_to_ktime(current_time);
825 if (ktime_after(qopt->base_time, current_time_ns)) {
826 time = ktime_to_timespec64(qopt->base_time);
827 } else {
828 ktime_t base_time;
829 s64 n;
830
831 n = div64_s64(ktime_sub_ns(current_time_ns, qopt->base_time),
832 qopt->cycle_time);
833 base_time = ktime_add_ns(qopt->base_time,
834 (n + 1) * qopt->cycle_time);
835
836 time = ktime_to_timespec64(base_time);
837 }
838
839 priv->plat->est->btr[0] = (u32)time.tv_nsec;
840 priv->plat->est->btr[1] = (u32)time.tv_sec;
841
842 ctr = qopt->cycle_time;
843 priv->plat->est->ctr[0] = do_div(ctr, NSEC_PER_SEC);
844 priv->plat->est->ctr[1] = (u32)ctr;
845
846 if (fpe && !priv->dma_cap.fpesel)
847 return -EOPNOTSUPP;
848
849
850
851
852 priv->plat->fpe_cfg->enable = fpe;
853
854 ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
855 priv->plat->clk_ptp_rate);
856 if (ret) {
857 netdev_err(priv->dev, "failed to configure EST\n");
858 goto disable;
859 }
860
861 netdev_info(priv->dev, "configured EST\n");
862
863 if (fpe) {
864 stmmac_fpe_handshake(priv, true);
865 netdev_info(priv->dev, "start FPE handshake\n");
866 }
867
868 return 0;
869
870disable:
871 priv->plat->est->enable = false;
872 stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
873 priv->plat->clk_ptp_rate);
874
875 priv->plat->fpe_cfg->enable = false;
876 stmmac_fpe_configure(priv, priv->ioaddr,
877 priv->plat->tx_queues_to_use,
878 priv->plat->rx_queues_to_use,
879 false);
880 netdev_info(priv->dev, "disabled FPE\n");
881
882 stmmac_fpe_handshake(priv, false);
883 netdev_info(priv->dev, "stop FPE handshake\n");
884
885 return ret;
886}
887
888static int tc_setup_etf(struct stmmac_priv *priv,
889 struct tc_etf_qopt_offload *qopt)
890{
891 if (!priv->dma_cap.tbssel)
892 return -EOPNOTSUPP;
893 if (qopt->queue >= priv->plat->tx_queues_to_use)
894 return -EINVAL;
895 if (!(priv->tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL))
896 return -EINVAL;
897
898 if (qopt->enable)
899 priv->tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN;
900 else
901 priv->tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN;
902
903 netdev_info(priv->dev, "%s ETF for Queue %d\n",
904 qopt->enable ? "enabled" : "disabled", qopt->queue);
905 return 0;
906}
907
908const struct stmmac_tc_ops dwmac510_tc_ops = {
909 .init = tc_init,
910 .setup_cls_u32 = tc_setup_cls_u32,
911 .setup_cbs = tc_setup_cbs,
912 .setup_cls = tc_setup_cls,
913 .setup_taprio = tc_setup_taprio,
914 .setup_etf = tc_setup_etf,
915};
916