1
2
3
4
5
6
7#include <net/pkt_cls.h>
8#include <net/tc_act/tc_gact.h>
9#include "common.h"
10#include "dwmac4.h"
11#include "dwmac5.h"
12#include "stmmac.h"
13
14static void tc_fill_all_pass_entry(struct stmmac_tc_entry *entry)
15{
16 memset(entry, 0, sizeof(*entry));
17 entry->in_use = true;
18 entry->is_last = true;
19 entry->is_frag = false;
20 entry->prio = ~0x0;
21 entry->handle = 0;
22 entry->val.match_data = 0x0;
23 entry->val.match_en = 0x0;
24 entry->val.af = 1;
25 entry->val.dma_ch_no = 0x0;
26}
27
28static struct stmmac_tc_entry *tc_find_entry(struct stmmac_priv *priv,
29 struct tc_cls_u32_offload *cls,
30 bool free)
31{
32 struct stmmac_tc_entry *entry, *first = NULL, *dup = NULL;
33 u32 loc = cls->knode.handle;
34 int i;
35
36 for (i = 0; i < priv->tc_entries_max; i++) {
37 entry = &priv->tc_entries[i];
38 if (!entry->in_use && !first && free)
39 first = entry;
40 if ((entry->handle == loc) && !free && !entry->is_frag)
41 dup = entry;
42 }
43
44 if (dup)
45 return dup;
46 if (first) {
47 first->handle = loc;
48 first->in_use = true;
49
50
51 memset(&first->val, 0, sizeof(first->val));
52 }
53
54 return first;
55}
56
57static int tc_fill_actions(struct stmmac_tc_entry *entry,
58 struct stmmac_tc_entry *frag,
59 struct tc_cls_u32_offload *cls)
60{
61 struct stmmac_tc_entry *action_entry = entry;
62 const struct tc_action *act;
63 struct tcf_exts *exts;
64 int i;
65
66 exts = cls->knode.exts;
67 if (!tcf_exts_has_actions(exts))
68 return -EINVAL;
69 if (frag)
70 action_entry = frag;
71
72 tcf_exts_for_each_action(i, act, exts) {
73
74 if (is_tcf_gact_ok(act)) {
75 action_entry->val.af = 1;
76 break;
77 }
78
79 if (is_tcf_gact_shot(act)) {
80 action_entry->val.rf = 1;
81 break;
82 }
83
84
85 return -EINVAL;
86 }
87
88 return 0;
89}
90
91static int tc_fill_entry(struct stmmac_priv *priv,
92 struct tc_cls_u32_offload *cls)
93{
94 struct stmmac_tc_entry *entry, *frag = NULL;
95 struct tc_u32_sel *sel = cls->knode.sel;
96 u32 off, data, mask, real_off, rem;
97 u32 prio = cls->common.prio << 16;
98 int ret;
99
100
101 if (sel->nkeys <= 0 || sel->nkeys > 1)
102 return -EINVAL;
103
104 off = sel->keys[0].off << sel->offshift;
105 data = sel->keys[0].val;
106 mask = sel->keys[0].mask;
107
108 switch (ntohs(cls->common.protocol)) {
109 case ETH_P_ALL:
110 break;
111 case ETH_P_IP:
112 off += ETH_HLEN;
113 break;
114 default:
115 return -EINVAL;
116 }
117
118 if (off > priv->tc_off_max)
119 return -EINVAL;
120
121 real_off = off / 4;
122 rem = off % 4;
123
124 entry = tc_find_entry(priv, cls, true);
125 if (!entry)
126 return -EINVAL;
127
128 if (rem) {
129 frag = tc_find_entry(priv, cls, true);
130 if (!frag) {
131 ret = -EINVAL;
132 goto err_unuse;
133 }
134
135 entry->frag_ptr = frag;
136 entry->val.match_en = (mask << (rem * 8)) &
137 GENMASK(31, rem * 8);
138 entry->val.match_data = (data << (rem * 8)) &
139 GENMASK(31, rem * 8);
140 entry->val.frame_offset = real_off;
141 entry->prio = prio;
142
143 frag->val.match_en = (mask >> (rem * 8)) &
144 GENMASK(rem * 8 - 1, 0);
145 frag->val.match_data = (data >> (rem * 8)) &
146 GENMASK(rem * 8 - 1, 0);
147 frag->val.frame_offset = real_off + 1;
148 frag->prio = prio;
149 frag->is_frag = true;
150 } else {
151 entry->frag_ptr = NULL;
152 entry->val.match_en = mask;
153 entry->val.match_data = data;
154 entry->val.frame_offset = real_off;
155 entry->prio = prio;
156 }
157
158 ret = tc_fill_actions(entry, frag, cls);
159 if (ret)
160 goto err_unuse;
161
162 return 0;
163
164err_unuse:
165 if (frag)
166 frag->in_use = false;
167 entry->in_use = false;
168 return ret;
169}
170
171static void tc_unfill_entry(struct stmmac_priv *priv,
172 struct tc_cls_u32_offload *cls)
173{
174 struct stmmac_tc_entry *entry;
175
176 entry = tc_find_entry(priv, cls, false);
177 if (!entry)
178 return;
179
180 entry->in_use = false;
181 if (entry->frag_ptr) {
182 entry = entry->frag_ptr;
183 entry->is_frag = false;
184 entry->in_use = false;
185 }
186}
187
188static int tc_config_knode(struct stmmac_priv *priv,
189 struct tc_cls_u32_offload *cls)
190{
191 int ret;
192
193 ret = tc_fill_entry(priv, cls);
194 if (ret)
195 return ret;
196
197 ret = stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries,
198 priv->tc_entries_max);
199 if (ret)
200 goto err_unfill;
201
202 return 0;
203
204err_unfill:
205 tc_unfill_entry(priv, cls);
206 return ret;
207}
208
209static int tc_delete_knode(struct stmmac_priv *priv,
210 struct tc_cls_u32_offload *cls)
211{
212
213 tc_unfill_entry(priv, cls);
214
215 return stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries,
216 priv->tc_entries_max);
217}
218
219static int tc_setup_cls_u32(struct stmmac_priv *priv,
220 struct tc_cls_u32_offload *cls)
221{
222 switch (cls->command) {
223 case TC_CLSU32_REPLACE_KNODE:
224 tc_unfill_entry(priv, cls);
225 fallthrough;
226 case TC_CLSU32_NEW_KNODE:
227 return tc_config_knode(priv, cls);
228 case TC_CLSU32_DELETE_KNODE:
229 return tc_delete_knode(priv, cls);
230 default:
231 return -EOPNOTSUPP;
232 }
233}
234
235static int tc_init(struct stmmac_priv *priv)
236{
237 struct dma_features *dma_cap = &priv->dma_cap;
238 unsigned int count;
239 int i;
240
241 if (dma_cap->l3l4fnum) {
242 priv->flow_entries_max = dma_cap->l3l4fnum;
243 priv->flow_entries = devm_kcalloc(priv->device,
244 dma_cap->l3l4fnum,
245 sizeof(*priv->flow_entries),
246 GFP_KERNEL);
247 if (!priv->flow_entries)
248 return -ENOMEM;
249
250 for (i = 0; i < priv->flow_entries_max; i++)
251 priv->flow_entries[i].idx = i;
252
253 dev_info(priv->device, "Enabled Flow TC (entries=%d)\n",
254 priv->flow_entries_max);
255 }
256
257 if (!priv->plat->fpe_cfg) {
258 priv->plat->fpe_cfg = devm_kzalloc(priv->device,
259 sizeof(*priv->plat->fpe_cfg),
260 GFP_KERNEL);
261 if (!priv->plat->fpe_cfg)
262 return -ENOMEM;
263 } else {
264 memset(priv->plat->fpe_cfg, 0, sizeof(*priv->plat->fpe_cfg));
265 }
266
267
268 if (!dma_cap->frpsel)
269 return 0;
270
271 switch (dma_cap->frpbs) {
272 case 0x0:
273 priv->tc_off_max = 64;
274 break;
275 case 0x1:
276 priv->tc_off_max = 128;
277 break;
278 case 0x2:
279 priv->tc_off_max = 256;
280 break;
281 default:
282 return -EINVAL;
283 }
284
285 switch (dma_cap->frpes) {
286 case 0x0:
287 count = 64;
288 break;
289 case 0x1:
290 count = 128;
291 break;
292 case 0x2:
293 count = 256;
294 break;
295 default:
296 return -EINVAL;
297 }
298
299
300 priv->tc_entries_max = count;
301 priv->tc_entries = devm_kcalloc(priv->device,
302 count, sizeof(*priv->tc_entries), GFP_KERNEL);
303 if (!priv->tc_entries)
304 return -ENOMEM;
305
306 tc_fill_all_pass_entry(&priv->tc_entries[count - 1]);
307
308 dev_info(priv->device, "Enabling HW TC (entries=%d, max_off=%d)\n",
309 priv->tc_entries_max, priv->tc_off_max);
310
311 return 0;
312}
313
314static int tc_setup_cbs(struct stmmac_priv *priv,
315 struct tc_cbs_qopt_offload *qopt)
316{
317 u32 tx_queues_count = priv->plat->tx_queues_to_use;
318 u32 queue = qopt->queue;
319 u32 ptr, speed_div;
320 u32 mode_to_use;
321 u64 value;
322 int ret;
323
324
325 if (queue <= 0 || queue >= tx_queues_count)
326 return -EINVAL;
327 if (!priv->dma_cap.av)
328 return -EOPNOTSUPP;
329
330
331 switch (priv->speed) {
332 case SPEED_10000:
333 ptr = 32;
334 speed_div = 10000000;
335 break;
336 case SPEED_5000:
337 ptr = 32;
338 speed_div = 5000000;
339 break;
340 case SPEED_2500:
341 ptr = 8;
342 speed_div = 2500000;
343 break;
344 case SPEED_1000:
345 ptr = 8;
346 speed_div = 1000000;
347 break;
348 case SPEED_100:
349 ptr = 4;
350 speed_div = 100000;
351 break;
352 default:
353 return -EOPNOTSUPP;
354 }
355
356 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
357 if (mode_to_use == MTL_QUEUE_DCB && qopt->enable) {
358 ret = stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_AVB);
359 if (ret)
360 return ret;
361
362 priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
363 } else if (!qopt->enable) {
364 ret = stmmac_dma_qmode(priv, priv->ioaddr, queue,
365 MTL_QUEUE_DCB);
366 if (ret)
367 return ret;
368
369 priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
370 }
371
372
373 value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div);
374 priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0);
375
376 value = div_s64(-qopt->sendslope * 1024ll * ptr, speed_div);
377 priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0);
378
379 value = qopt->hicredit * 1024ll * 8;
380 priv->plat->tx_queues_cfg[queue].high_credit = value & GENMASK(31, 0);
381
382 value = qopt->locredit * 1024ll * 8;
383 priv->plat->tx_queues_cfg[queue].low_credit = value & GENMASK(31, 0);
384
385 ret = stmmac_config_cbs(priv, priv->hw,
386 priv->plat->tx_queues_cfg[queue].send_slope,
387 priv->plat->tx_queues_cfg[queue].idle_slope,
388 priv->plat->tx_queues_cfg[queue].high_credit,
389 priv->plat->tx_queues_cfg[queue].low_credit,
390 queue);
391 if (ret)
392 return ret;
393
394 dev_info(priv->device, "CBS queue %d: send %d, idle %d, hi %d, lo %d\n",
395 queue, qopt->sendslope, qopt->idleslope,
396 qopt->hicredit, qopt->locredit);
397 return 0;
398}
399
400static int tc_parse_flow_actions(struct stmmac_priv *priv,
401 struct flow_action *action,
402 struct stmmac_flow_entry *entry,
403 struct netlink_ext_ack *extack)
404{
405 struct flow_action_entry *act;
406 int i;
407
408 if (!flow_action_has_entries(action))
409 return -EINVAL;
410
411 if (!flow_action_basic_hw_stats_check(action, extack))
412 return -EOPNOTSUPP;
413
414 flow_action_for_each(i, act, action) {
415 switch (act->id) {
416 case FLOW_ACTION_DROP:
417 entry->action |= STMMAC_FLOW_ACTION_DROP;
418 return 0;
419 default:
420 break;
421 }
422 }
423
424
425 return 0;
426}
427
428static int tc_add_basic_flow(struct stmmac_priv *priv,
429 struct flow_cls_offload *cls,
430 struct stmmac_flow_entry *entry)
431{
432 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
433 struct flow_dissector *dissector = rule->match.dissector;
434 struct flow_match_basic match;
435
436
437 if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC))
438 return -EINVAL;
439
440 flow_rule_match_basic(rule, &match);
441 entry->ip_proto = match.key->ip_proto;
442 return 0;
443}
444
445static int tc_add_ip4_flow(struct stmmac_priv *priv,
446 struct flow_cls_offload *cls,
447 struct stmmac_flow_entry *entry)
448{
449 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
450 struct flow_dissector *dissector = rule->match.dissector;
451 bool inv = entry->action & STMMAC_FLOW_ACTION_DROP;
452 struct flow_match_ipv4_addrs match;
453 u32 hw_match;
454 int ret;
455
456
457 if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS))
458 return -EINVAL;
459
460 flow_rule_match_ipv4_addrs(rule, &match);
461 hw_match = ntohl(match.key->src) & ntohl(match.mask->src);
462 if (hw_match) {
463 ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, true,
464 false, true, inv, hw_match);
465 if (ret)
466 return ret;
467 }
468
469 hw_match = ntohl(match.key->dst) & ntohl(match.mask->dst);
470 if (hw_match) {
471 ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, true,
472 false, false, inv, hw_match);
473 if (ret)
474 return ret;
475 }
476
477 return 0;
478}
479
480static int tc_add_ports_flow(struct stmmac_priv *priv,
481 struct flow_cls_offload *cls,
482 struct stmmac_flow_entry *entry)
483{
484 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
485 struct flow_dissector *dissector = rule->match.dissector;
486 bool inv = entry->action & STMMAC_FLOW_ACTION_DROP;
487 struct flow_match_ports match;
488 u32 hw_match;
489 bool is_udp;
490 int ret;
491
492
493 if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS))
494 return -EINVAL;
495
496 switch (entry->ip_proto) {
497 case IPPROTO_TCP:
498 is_udp = false;
499 break;
500 case IPPROTO_UDP:
501 is_udp = true;
502 break;
503 default:
504 return -EINVAL;
505 }
506
507 flow_rule_match_ports(rule, &match);
508
509 hw_match = ntohs(match.key->src) & ntohs(match.mask->src);
510 if (hw_match) {
511 ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, true,
512 is_udp, true, inv, hw_match);
513 if (ret)
514 return ret;
515 }
516
517 hw_match = ntohs(match.key->dst) & ntohs(match.mask->dst);
518 if (hw_match) {
519 ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, true,
520 is_udp, false, inv, hw_match);
521 if (ret)
522 return ret;
523 }
524
525 entry->is_l4 = true;
526 return 0;
527}
528
529static struct stmmac_flow_entry *tc_find_flow(struct stmmac_priv *priv,
530 struct flow_cls_offload *cls,
531 bool get_free)
532{
533 int i;
534
535 for (i = 0; i < priv->flow_entries_max; i++) {
536 struct stmmac_flow_entry *entry = &priv->flow_entries[i];
537
538 if (entry->cookie == cls->cookie)
539 return entry;
540 if (get_free && (entry->in_use == false))
541 return entry;
542 }
543
544 return NULL;
545}
546
547static struct {
548 int (*fn)(struct stmmac_priv *priv, struct flow_cls_offload *cls,
549 struct stmmac_flow_entry *entry);
550} tc_flow_parsers[] = {
551 { .fn = tc_add_basic_flow },
552 { .fn = tc_add_ip4_flow },
553 { .fn = tc_add_ports_flow },
554};
555
556static int tc_add_flow(struct stmmac_priv *priv,
557 struct flow_cls_offload *cls)
558{
559 struct stmmac_flow_entry *entry = tc_find_flow(priv, cls, false);
560 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
561 int i, ret;
562
563 if (!entry) {
564 entry = tc_find_flow(priv, cls, true);
565 if (!entry)
566 return -ENOENT;
567 }
568
569 ret = tc_parse_flow_actions(priv, &rule->action, entry,
570 cls->common.extack);
571 if (ret)
572 return ret;
573
574 for (i = 0; i < ARRAY_SIZE(tc_flow_parsers); i++) {
575 ret = tc_flow_parsers[i].fn(priv, cls, entry);
576 if (!ret)
577 entry->in_use = true;
578 }
579
580 if (!entry->in_use)
581 return -EINVAL;
582
583 entry->cookie = cls->cookie;
584 return 0;
585}
586
587static int tc_del_flow(struct stmmac_priv *priv,
588 struct flow_cls_offload *cls)
589{
590 struct stmmac_flow_entry *entry = tc_find_flow(priv, cls, false);
591 int ret;
592
593 if (!entry || !entry->in_use)
594 return -ENOENT;
595
596 if (entry->is_l4) {
597 ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, false,
598 false, false, false, 0);
599 } else {
600 ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, false,
601 false, false, false, 0);
602 }
603
604 entry->in_use = false;
605 entry->cookie = 0;
606 entry->is_l4 = false;
607 return ret;
608}
609
610#define VLAN_PRIO_FULL_MASK (0x07)
611
612static int tc_add_vlan_flow(struct stmmac_priv *priv,
613 struct flow_cls_offload *cls)
614{
615 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
616 struct flow_dissector *dissector = rule->match.dissector;
617 int tc = tc_classid_to_hwtc(priv->dev, cls->classid);
618 struct flow_match_vlan match;
619
620
621 if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN))
622 return -EINVAL;
623
624 if (tc < 0) {
625 netdev_err(priv->dev, "Invalid traffic class\n");
626 return -EINVAL;
627 }
628
629 flow_rule_match_vlan(rule, &match);
630
631 if (match.mask->vlan_priority) {
632 u32 prio;
633
634 if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) {
635 netdev_err(priv->dev, "Only full mask is supported for VLAN priority");
636 return -EINVAL;
637 }
638
639 prio = BIT(match.key->vlan_priority);
640 stmmac_rx_queue_prio(priv, priv->hw, prio, tc);
641 }
642
643 return 0;
644}
645
646static int tc_del_vlan_flow(struct stmmac_priv *priv,
647 struct flow_cls_offload *cls)
648{
649 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
650 struct flow_dissector *dissector = rule->match.dissector;
651 int tc = tc_classid_to_hwtc(priv->dev, cls->classid);
652
653
654 if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN))
655 return -EINVAL;
656
657 if (tc < 0) {
658 netdev_err(priv->dev, "Invalid traffic class\n");
659 return -EINVAL;
660 }
661
662 stmmac_rx_queue_prio(priv, priv->hw, 0, tc);
663
664 return 0;
665}
666
667static int tc_add_flow_cls(struct stmmac_priv *priv,
668 struct flow_cls_offload *cls)
669{
670 int ret;
671
672 ret = tc_add_flow(priv, cls);
673 if (!ret)
674 return ret;
675
676 return tc_add_vlan_flow(priv, cls);
677}
678
679static int tc_del_flow_cls(struct stmmac_priv *priv,
680 struct flow_cls_offload *cls)
681{
682 int ret;
683
684 ret = tc_del_flow(priv, cls);
685 if (!ret)
686 return ret;
687
688 return tc_del_vlan_flow(priv, cls);
689}
690
691static int tc_setup_cls(struct stmmac_priv *priv,
692 struct flow_cls_offload *cls)
693{
694 int ret = 0;
695
696
697 if (priv->rss.enable)
698 return -EBUSY;
699
700 switch (cls->command) {
701 case FLOW_CLS_REPLACE:
702 ret = tc_add_flow_cls(priv, cls);
703 break;
704 case FLOW_CLS_DESTROY:
705 ret = tc_del_flow_cls(priv, cls);
706 break;
707 default:
708 return -EOPNOTSUPP;
709 }
710
711 return ret;
712}
713
714struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time,
715 ktime_t current_time,
716 u64 cycle_time)
717{
718 struct timespec64 time;
719
720 if (ktime_after(old_base_time, current_time)) {
721 time = ktime_to_timespec64(old_base_time);
722 } else {
723 s64 n;
724 ktime_t base_time;
725
726 n = div64_s64(ktime_sub_ns(current_time, old_base_time),
727 cycle_time);
728 base_time = ktime_add_ns(old_base_time,
729 (n + 1) * cycle_time);
730
731 time = ktime_to_timespec64(base_time);
732 }
733
734 return time;
735}
736
737static int tc_setup_taprio(struct stmmac_priv *priv,
738 struct tc_taprio_qopt_offload *qopt)
739{
740 u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
741 struct plat_stmmacenet_data *plat = priv->plat;
742 struct timespec64 time, current_time, qopt_time;
743 ktime_t current_time_ns;
744 bool fpe = false;
745 int i, ret = 0;
746 u64 ctr;
747
748 if (!priv->dma_cap.estsel)
749 return -EOPNOTSUPP;
750
751 switch (wid) {
752 case 0x1:
753 wid = 16;
754 break;
755 case 0x2:
756 wid = 20;
757 break;
758 case 0x3:
759 wid = 24;
760 break;
761 default:
762 return -EOPNOTSUPP;
763 }
764
765 switch (dep) {
766 case 0x1:
767 dep = 64;
768 break;
769 case 0x2:
770 dep = 128;
771 break;
772 case 0x3:
773 dep = 256;
774 break;
775 case 0x4:
776 dep = 512;
777 break;
778 case 0x5:
779 dep = 1024;
780 break;
781 default:
782 return -EOPNOTSUPP;
783 }
784
785 if (!qopt->enable)
786 goto disable;
787 if (qopt->num_entries >= dep)
788 return -EINVAL;
789 if (!qopt->base_time)
790 return -ERANGE;
791 if (!qopt->cycle_time)
792 return -ERANGE;
793
794 if (!plat->est) {
795 plat->est = devm_kzalloc(priv->device, sizeof(*plat->est),
796 GFP_KERNEL);
797 if (!plat->est)
798 return -ENOMEM;
799
800 mutex_init(&priv->plat->est->lock);
801 } else {
802 memset(plat->est, 0, sizeof(*plat->est));
803 }
804
805 size = qopt->num_entries;
806
807 mutex_lock(&priv->plat->est->lock);
808 priv->plat->est->gcl_size = size;
809 priv->plat->est->enable = qopt->enable;
810 mutex_unlock(&priv->plat->est->lock);
811
812 for (i = 0; i < size; i++) {
813 s64 delta_ns = qopt->entries[i].interval;
814 u32 gates = qopt->entries[i].gate_mask;
815
816 if (delta_ns > GENMASK(wid, 0))
817 return -ERANGE;
818 if (gates > GENMASK(31 - wid, 0))
819 return -ERANGE;
820
821 switch (qopt->entries[i].command) {
822 case TC_TAPRIO_CMD_SET_GATES:
823 if (fpe)
824 return -EINVAL;
825 break;
826 case TC_TAPRIO_CMD_SET_AND_HOLD:
827 gates |= BIT(0);
828 fpe = true;
829 break;
830 case TC_TAPRIO_CMD_SET_AND_RELEASE:
831 gates &= ~BIT(0);
832 fpe = true;
833 break;
834 default:
835 return -EOPNOTSUPP;
836 }
837
838 priv->plat->est->gcl[i] = delta_ns | (gates << wid);
839 }
840
841 mutex_lock(&priv->plat->est->lock);
842
843 priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, ¤t_time);
844 current_time_ns = timespec64_to_ktime(current_time);
845 time = stmmac_calc_tas_basetime(qopt->base_time, current_time_ns,
846 qopt->cycle_time);
847
848 priv->plat->est->btr[0] = (u32)time.tv_nsec;
849 priv->plat->est->btr[1] = (u32)time.tv_sec;
850
851 qopt_time = ktime_to_timespec64(qopt->base_time);
852 priv->plat->est->btr_reserve[0] = (u32)qopt_time.tv_nsec;
853 priv->plat->est->btr_reserve[1] = (u32)qopt_time.tv_sec;
854
855 ctr = qopt->cycle_time;
856 priv->plat->est->ctr[0] = do_div(ctr, NSEC_PER_SEC);
857 priv->plat->est->ctr[1] = (u32)ctr;
858
859 if (fpe && !priv->dma_cap.fpesel) {
860 mutex_unlock(&priv->plat->est->lock);
861 return -EOPNOTSUPP;
862 }
863
864
865
866
867 priv->plat->fpe_cfg->enable = fpe;
868
869 ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
870 priv->plat->clk_ptp_rate);
871 mutex_unlock(&priv->plat->est->lock);
872 if (ret) {
873 netdev_err(priv->dev, "failed to configure EST\n");
874 goto disable;
875 }
876
877 netdev_info(priv->dev, "configured EST\n");
878
879 if (fpe) {
880 stmmac_fpe_handshake(priv, true);
881 netdev_info(priv->dev, "start FPE handshake\n");
882 }
883
884 return 0;
885
886disable:
887 if (priv->plat->est) {
888 mutex_lock(&priv->plat->est->lock);
889 priv->plat->est->enable = false;
890 stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
891 priv->plat->clk_ptp_rate);
892 mutex_unlock(&priv->plat->est->lock);
893 }
894
895 priv->plat->fpe_cfg->enable = false;
896 stmmac_fpe_configure(priv, priv->ioaddr,
897 priv->plat->tx_queues_to_use,
898 priv->plat->rx_queues_to_use,
899 false);
900 netdev_info(priv->dev, "disabled FPE\n");
901
902 stmmac_fpe_handshake(priv, false);
903 netdev_info(priv->dev, "stop FPE handshake\n");
904
905 return ret;
906}
907
908static int tc_setup_etf(struct stmmac_priv *priv,
909 struct tc_etf_qopt_offload *qopt)
910{
911 if (!priv->dma_cap.tbssel)
912 return -EOPNOTSUPP;
913 if (qopt->queue >= priv->plat->tx_queues_to_use)
914 return -EINVAL;
915 if (!(priv->tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL))
916 return -EINVAL;
917
918 if (qopt->enable)
919 priv->tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN;
920 else
921 priv->tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN;
922
923 netdev_info(priv->dev, "%s ETF for Queue %d\n",
924 qopt->enable ? "enabled" : "disabled", qopt->queue);
925 return 0;
926}
927
928const struct stmmac_tc_ops dwmac510_tc_ops = {
929 .init = tc_init,
930 .setup_cls_u32 = tc_setup_cls_u32,
931 .setup_cbs = tc_setup_cbs,
932 .setup_cls = tc_setup_cls,
933 .setup_taprio = tc_setup_taprio,
934 .setup_etf = tc_setup_etf,
935};
936