1
2
3
4#include "sja1105.h"
5
6#define SJA1105_TAS_CLKSRC_DISABLED 0
7#define SJA1105_TAS_CLKSRC_STANDALONE 1
8#define SJA1105_TAS_CLKSRC_AS6802 2
9#define SJA1105_TAS_CLKSRC_PTP 3
10#define SJA1105_GATE_MASK GENMASK_ULL(SJA1105_NUM_TC - 1, 0)
11
12#define work_to_sja1105_tas(d) \
13 container_of((d), struct sja1105_tas_data, tas_work)
14#define tas_to_sja1105(d) \
15 container_of((d), struct sja1105_private, tas_data)
16
17static int sja1105_tas_set_runtime_params(struct sja1105_private *priv)
18{
19 struct sja1105_tas_data *tas_data = &priv->tas_data;
20 struct sja1105_gating_config *gating_cfg = &tas_data->gating_cfg;
21 struct dsa_switch *ds = priv->ds;
22 s64 earliest_base_time = S64_MAX;
23 s64 latest_base_time = 0;
24 s64 its_cycle_time = 0;
25 s64 max_cycle_time = 0;
26 int port;
27
28 tas_data->enabled = false;
29
30 for (port = 0; port < ds->num_ports; port++) {
31 const struct tc_taprio_qopt_offload *offload;
32
33 offload = tas_data->offload[port];
34 if (!offload)
35 continue;
36
37 tas_data->enabled = true;
38
39 if (max_cycle_time < offload->cycle_time)
40 max_cycle_time = offload->cycle_time;
41 if (latest_base_time < offload->base_time)
42 latest_base_time = offload->base_time;
43 if (earliest_base_time > offload->base_time) {
44 earliest_base_time = offload->base_time;
45 its_cycle_time = offload->cycle_time;
46 }
47 }
48
49 if (!list_empty(&gating_cfg->entries)) {
50 tas_data->enabled = true;
51
52 if (max_cycle_time < gating_cfg->cycle_time)
53 max_cycle_time = gating_cfg->cycle_time;
54 if (latest_base_time < gating_cfg->base_time)
55 latest_base_time = gating_cfg->base_time;
56 if (earliest_base_time > gating_cfg->base_time) {
57 earliest_base_time = gating_cfg->base_time;
58 its_cycle_time = gating_cfg->cycle_time;
59 }
60 }
61
62 if (!tas_data->enabled)
63 return 0;
64
65
66
67
68
69
70 earliest_base_time = future_base_time(earliest_base_time,
71 its_cycle_time,
72 latest_base_time);
73 while (earliest_base_time > latest_base_time)
74 earliest_base_time -= its_cycle_time;
75 if (latest_base_time - earliest_base_time >
76 sja1105_delta_to_ns(SJA1105_TAS_MAX_DELTA)) {
77 dev_err(ds->dev,
78 "Base times too far apart: min %llu max %llu\n",
79 earliest_base_time, latest_base_time);
80 return -ERANGE;
81 }
82
83 tas_data->earliest_base_time = earliest_base_time;
84 tas_data->max_cycle_time = max_cycle_time;
85
86 dev_dbg(ds->dev, "earliest base time %lld ns\n", earliest_base_time);
87 dev_dbg(ds->dev, "latest base time %lld ns\n", latest_base_time);
88 dev_dbg(ds->dev, "longest cycle time %lld ns\n", max_cycle_time);
89
90 return 0;
91}
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158int sja1105_init_scheduling(struct sja1105_private *priv)
159{
160 struct sja1105_schedule_entry_points_entry *schedule_entry_points;
161 struct sja1105_schedule_entry_points_params_entry
162 *schedule_entry_points_params;
163 struct sja1105_schedule_params_entry *schedule_params;
164 struct sja1105_tas_data *tas_data = &priv->tas_data;
165 struct sja1105_gating_config *gating_cfg = &tas_data->gating_cfg;
166 struct sja1105_schedule_entry *schedule;
167 struct dsa_switch *ds = priv->ds;
168 struct sja1105_table *table;
169 int schedule_start_idx;
170 s64 entry_point_delta;
171 int schedule_end_idx;
172 int num_entries = 0;
173 int num_cycles = 0;
174 int cycle = 0;
175 int i, k = 0;
176 int port, rc;
177
178 rc = sja1105_tas_set_runtime_params(priv);
179 if (rc < 0)
180 return rc;
181
182
183 table = &priv->static_config.tables[BLK_IDX_SCHEDULE];
184 if (table->entry_count) {
185 kfree(table->entries);
186 table->entry_count = 0;
187 }
188
189
190 table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS];
191 if (table->entry_count) {
192 kfree(table->entries);
193 table->entry_count = 0;
194 }
195
196
197 table = &priv->static_config.tables[BLK_IDX_SCHEDULE_PARAMS];
198 if (table->entry_count) {
199 kfree(table->entries);
200 table->entry_count = 0;
201 }
202
203
204 table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS];
205 if (table->entry_count) {
206 kfree(table->entries);
207 table->entry_count = 0;
208 }
209
210
211 for (port = 0; port < ds->num_ports; port++) {
212 if (tas_data->offload[port]) {
213 num_entries += tas_data->offload[port]->num_entries;
214 num_cycles++;
215 }
216 }
217
218 if (!list_empty(&gating_cfg->entries)) {
219 num_entries += gating_cfg->num_entries;
220 num_cycles++;
221 }
222
223
224 if (!num_cycles)
225 return 0;
226
227
228
229
230 table = &priv->static_config.tables[BLK_IDX_SCHEDULE];
231 table->entries = kcalloc(num_entries, table->ops->unpacked_entry_size,
232 GFP_KERNEL);
233 if (!table->entries)
234 return -ENOMEM;
235 table->entry_count = num_entries;
236 schedule = table->entries;
237
238
239 table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS];
240 table->entries = kcalloc(SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT,
241 table->ops->unpacked_entry_size, GFP_KERNEL);
242 if (!table->entries)
243
244
245
246
247 return -ENOMEM;
248 table->entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT;
249 schedule_entry_points_params = table->entries;
250
251
252 table = &priv->static_config.tables[BLK_IDX_SCHEDULE_PARAMS];
253 table->entries = kcalloc(SJA1105_MAX_SCHEDULE_PARAMS_COUNT,
254 table->ops->unpacked_entry_size, GFP_KERNEL);
255 if (!table->entries)
256 return -ENOMEM;
257 table->entry_count = SJA1105_MAX_SCHEDULE_PARAMS_COUNT;
258 schedule_params = table->entries;
259
260
261 table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS];
262 table->entries = kcalloc(num_cycles, table->ops->unpacked_entry_size,
263 GFP_KERNEL);
264 if (!table->entries)
265 return -ENOMEM;
266 table->entry_count = num_cycles;
267 schedule_entry_points = table->entries;
268
269
270 schedule_entry_points_params->clksrc = SJA1105_TAS_CLKSRC_PTP;
271 schedule_entry_points_params->actsubsch = num_cycles - 1;
272
273 for (port = 0; port < ds->num_ports; port++) {
274 const struct tc_taprio_qopt_offload *offload;
275
276 s64 rbt;
277
278 offload = tas_data->offload[port];
279 if (!offload)
280 continue;
281
282 schedule_start_idx = k;
283 schedule_end_idx = k + offload->num_entries - 1;
284
285
286
287
288 rbt = future_base_time(offload->base_time,
289 offload->cycle_time,
290 tas_data->earliest_base_time);
291 rbt -= tas_data->earliest_base_time;
292
293
294
295
296
297
298 entry_point_delta = ns_to_sja1105_delta(rbt) + 1;
299
300 schedule_entry_points[cycle].subschindx = cycle;
301 schedule_entry_points[cycle].delta = entry_point_delta;
302 schedule_entry_points[cycle].address = schedule_start_idx;
303
304
305
306
307 for (i = cycle; i < 8; i++)
308 schedule_params->subscheind[i] = schedule_end_idx;
309
310 for (i = 0; i < offload->num_entries; i++, k++) {
311 s64 delta_ns = offload->entries[i].interval;
312
313 schedule[k].delta = ns_to_sja1105_delta(delta_ns);
314 schedule[k].destports = BIT(port);
315 schedule[k].resmedia_en = true;
316 schedule[k].resmedia = SJA1105_GATE_MASK &
317 ~offload->entries[i].gate_mask;
318 }
319 cycle++;
320 }
321
322 if (!list_empty(&gating_cfg->entries)) {
323 struct sja1105_gate_entry *e;
324
325
326 s64 rbt;
327
328 schedule_start_idx = k;
329 schedule_end_idx = k + gating_cfg->num_entries - 1;
330 rbt = future_base_time(gating_cfg->base_time,
331 gating_cfg->cycle_time,
332 tas_data->earliest_base_time);
333 rbt -= tas_data->earliest_base_time;
334 entry_point_delta = ns_to_sja1105_delta(rbt) + 1;
335
336 schedule_entry_points[cycle].subschindx = cycle;
337 schedule_entry_points[cycle].delta = entry_point_delta;
338 schedule_entry_points[cycle].address = schedule_start_idx;
339
340 for (i = cycle; i < 8; i++)
341 schedule_params->subscheind[i] = schedule_end_idx;
342
343 list_for_each_entry(e, &gating_cfg->entries, list) {
344 schedule[k].delta = ns_to_sja1105_delta(e->interval);
345 schedule[k].destports = e->rule->vl.destports;
346 schedule[k].setvalid = true;
347 schedule[k].txen = true;
348 schedule[k].vlindex = e->rule->vl.sharindx;
349 schedule[k].winstindex = e->rule->vl.sharindx;
350 if (e->gate_state)
351 schedule[k].winst = true;
352 else
353 schedule[k].winend = true;
354 k++;
355 }
356 }
357
358 return 0;
359}
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382static bool
383sja1105_tas_check_conflicts(struct sja1105_private *priv, int port,
384 const struct tc_taprio_qopt_offload *admin)
385{
386 struct sja1105_tas_data *tas_data = &priv->tas_data;
387 const struct tc_taprio_qopt_offload *offload;
388 s64 max_cycle_time, min_cycle_time;
389 s64 delta1, delta2;
390 s64 rbt1, rbt2;
391 s64 stop_time;
392 s64 t1, t2;
393 int i, j;
394 s32 rem;
395
396 offload = tas_data->offload[port];
397 if (!offload)
398 return false;
399
400
401
402
403 max_cycle_time = max(offload->cycle_time, admin->cycle_time);
404 min_cycle_time = min(offload->cycle_time, admin->cycle_time);
405 div_s64_rem(max_cycle_time, min_cycle_time, &rem);
406 if (rem)
407 return true;
408
409
410
411
412
413 div_s64_rem(offload->base_time, offload->cycle_time, &rem);
414 rbt1 = rem;
415
416 div_s64_rem(admin->base_time, admin->cycle_time, &rem);
417 rbt2 = rem;
418
419 stop_time = max_cycle_time + max(rbt1, rbt2);
420
421
422
423
424 for (i = 0, delta1 = 0;
425 i < offload->num_entries;
426 delta1 += offload->entries[i].interval, i++) {
427
428
429
430 for (j = 0, delta2 = 0;
431 j < admin->num_entries;
432 delta2 += admin->entries[j].interval, j++) {
433
434
435
436
437 for (t1 = rbt1 + delta1;
438 t1 <= stop_time;
439 t1 += offload->cycle_time) {
440
441
442
443
444 for (t2 = rbt2 + delta2;
445 t2 <= stop_time;
446 t2 += admin->cycle_time) {
447 if (t1 == t2) {
448 dev_warn(priv->ds->dev,
449 "GCL entry %d collides with entry %d of port %d\n",
450 j, i, port);
451 return true;
452 }
453 }
454 }
455 }
456 }
457
458 return false;
459}
460
461
462
463
464
465
466bool sja1105_gating_check_conflicts(struct sja1105_private *priv, int port,
467 struct netlink_ext_ack *extack)
468{
469 struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg;
470 size_t num_entries = gating_cfg->num_entries;
471 struct tc_taprio_qopt_offload *dummy;
472 struct dsa_switch *ds = priv->ds;
473 struct sja1105_gate_entry *e;
474 bool conflict;
475 int i = 0;
476
477 if (list_empty(&gating_cfg->entries))
478 return false;
479
480 dummy = kzalloc(struct_size(dummy, entries, num_entries), GFP_KERNEL);
481 if (!dummy) {
482 NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory");
483 return true;
484 }
485
486 dummy->num_entries = num_entries;
487 dummy->base_time = gating_cfg->base_time;
488 dummy->cycle_time = gating_cfg->cycle_time;
489
490 list_for_each_entry(e, &gating_cfg->entries, list)
491 dummy->entries[i++].interval = e->interval;
492
493 if (port != -1) {
494 conflict = sja1105_tas_check_conflicts(priv, port, dummy);
495 } else {
496 for (port = 0; port < ds->num_ports; port++) {
497 conflict = sja1105_tas_check_conflicts(priv, port,
498 dummy);
499 if (conflict)
500 break;
501 }
502 }
503
504 kfree(dummy);
505
506 return conflict;
507}
508
509int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
510 struct tc_taprio_qopt_offload *admin)
511{
512 struct sja1105_private *priv = ds->priv;
513 struct sja1105_tas_data *tas_data = &priv->tas_data;
514 int other_port, rc, i;
515
516
517
518
519 if (!!tas_data->offload[port] == admin->enable)
520 return -EINVAL;
521
522 if (!admin->enable) {
523 taprio_offload_free(tas_data->offload[port]);
524 tas_data->offload[port] = NULL;
525
526 rc = sja1105_init_scheduling(priv);
527 if (rc < 0)
528 return rc;
529
530 return sja1105_static_config_reload(priv, SJA1105_SCHEDULING);
531 }
532
533
534
535
536
537
538
539
540
541 if (admin->cycle_time_extension)
542 return -ENOTSUPP;
543
544 for (i = 0; i < admin->num_entries; i++) {
545 s64 delta_ns = admin->entries[i].interval;
546 s64 delta_cycles = ns_to_sja1105_delta(delta_ns);
547 bool too_long, too_short;
548
549 too_long = (delta_cycles >= SJA1105_TAS_MAX_DELTA);
550 too_short = (delta_cycles == 0);
551 if (too_long || too_short) {
552 dev_err(priv->ds->dev,
553 "Interval %llu too %s for GCL entry %d\n",
554 delta_ns, too_long ? "long" : "short", i);
555 return -ERANGE;
556 }
557 }
558
559 for (other_port = 0; other_port < ds->num_ports; other_port++) {
560 if (other_port == port)
561 continue;
562
563 if (sja1105_tas_check_conflicts(priv, other_port, admin))
564 return -ERANGE;
565 }
566
567 if (sja1105_gating_check_conflicts(priv, port, NULL)) {
568 dev_err(ds->dev, "Conflict with tc-gate schedule\n");
569 return -ERANGE;
570 }
571
572 tas_data->offload[port] = taprio_offload_get(admin);
573
574 rc = sja1105_init_scheduling(priv);
575 if (rc < 0)
576 return rc;
577
578 return sja1105_static_config_reload(priv, SJA1105_SCHEDULING);
579}
580
581static int sja1105_tas_check_running(struct sja1105_private *priv)
582{
583 struct sja1105_tas_data *tas_data = &priv->tas_data;
584 struct dsa_switch *ds = priv->ds;
585 struct sja1105_ptp_cmd cmd = {0};
586 int rc;
587
588 rc = sja1105_ptp_commit(ds, &cmd, SPI_READ);
589 if (rc < 0)
590 return rc;
591
592 if (cmd.ptpstrtsch == 1)
593
594 tas_data->state = SJA1105_TAS_STATE_RUNNING;
595 else if (cmd.ptpstopsch == 1)
596
597 tas_data->state = SJA1105_TAS_STATE_DISABLED;
598 else
599
600 rc = -EINVAL;
601
602 return rc;
603}
604
605
606static int sja1105_tas_adjust_drift(struct sja1105_private *priv,
607 u64 correction)
608{
609 const struct sja1105_regs *regs = priv->info->regs;
610 u32 ptpclkcorp = ns_to_sja1105_ticks(correction);
611
612 return sja1105_xfer_u32(priv, SPI_WRITE, regs->ptpclkcorp,
613 &ptpclkcorp, NULL);
614}
615
616
617static int sja1105_tas_set_base_time(struct sja1105_private *priv,
618 u64 base_time)
619{
620 const struct sja1105_regs *regs = priv->info->regs;
621 u64 ptpschtm = ns_to_sja1105_ticks(base_time);
622
623 return sja1105_xfer_u64(priv, SPI_WRITE, regs->ptpschtm,
624 &ptpschtm, NULL);
625}
626
627static int sja1105_tas_start(struct sja1105_private *priv)
628{
629 struct sja1105_tas_data *tas_data = &priv->tas_data;
630 struct sja1105_ptp_cmd *cmd = &priv->ptp_data.cmd;
631 struct dsa_switch *ds = priv->ds;
632 int rc;
633
634 dev_dbg(ds->dev, "Starting the TAS\n");
635
636 if (tas_data->state == SJA1105_TAS_STATE_ENABLED_NOT_RUNNING ||
637 tas_data->state == SJA1105_TAS_STATE_RUNNING) {
638 dev_err(ds->dev, "TAS already started\n");
639 return -EINVAL;
640 }
641
642 cmd->ptpstrtsch = 1;
643 cmd->ptpstopsch = 0;
644
645 rc = sja1105_ptp_commit(ds, cmd, SPI_WRITE);
646 if (rc < 0)
647 return rc;
648
649 tas_data->state = SJA1105_TAS_STATE_ENABLED_NOT_RUNNING;
650
651 return 0;
652}
653
654static int sja1105_tas_stop(struct sja1105_private *priv)
655{
656 struct sja1105_tas_data *tas_data = &priv->tas_data;
657 struct sja1105_ptp_cmd *cmd = &priv->ptp_data.cmd;
658 struct dsa_switch *ds = priv->ds;
659 int rc;
660
661 dev_dbg(ds->dev, "Stopping the TAS\n");
662
663 if (tas_data->state == SJA1105_TAS_STATE_DISABLED) {
664 dev_err(ds->dev, "TAS already disabled\n");
665 return -EINVAL;
666 }
667
668 cmd->ptpstopsch = 1;
669 cmd->ptpstrtsch = 0;
670
671 rc = sja1105_ptp_commit(ds, cmd, SPI_WRITE);
672 if (rc < 0)
673 return rc;
674
675 tas_data->state = SJA1105_TAS_STATE_DISABLED;
676
677 return 0;
678}
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721static void sja1105_tas_state_machine(struct work_struct *work)
722{
723 struct sja1105_tas_data *tas_data = work_to_sja1105_tas(work);
724 struct sja1105_private *priv = tas_to_sja1105(tas_data);
725 struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
726 struct timespec64 base_time_ts, now_ts;
727 struct dsa_switch *ds = priv->ds;
728 struct timespec64 diff;
729 s64 base_time, now;
730 int rc = 0;
731
732 mutex_lock(&ptp_data->lock);
733
734 switch (tas_data->state) {
735 case SJA1105_TAS_STATE_DISABLED:
736
737 if (tas_data->last_op != SJA1105_PTP_ADJUSTFREQ)
738 break;
739
740 rc = sja1105_tas_adjust_drift(priv, tas_data->max_cycle_time);
741 if (rc < 0)
742 break;
743
744 rc = __sja1105_ptp_gettimex(ds, &now, NULL);
745 if (rc < 0)
746 break;
747
748
749
750
751
752
753
754
755
756
757 base_time = future_base_time(tas_data->earliest_base_time,
758 tas_data->max_cycle_time,
759 now + 1ull * NSEC_PER_SEC);
760 base_time -= sja1105_delta_to_ns(1);
761
762 rc = sja1105_tas_set_base_time(priv, base_time);
763 if (rc < 0)
764 break;
765
766 tas_data->oper_base_time = base_time;
767
768 rc = sja1105_tas_start(priv);
769 if (rc < 0)
770 break;
771
772 base_time_ts = ns_to_timespec64(base_time);
773 now_ts = ns_to_timespec64(now);
774
775 dev_dbg(ds->dev, "OPER base time %lld.%09ld (now %lld.%09ld)\n",
776 base_time_ts.tv_sec, base_time_ts.tv_nsec,
777 now_ts.tv_sec, now_ts.tv_nsec);
778
779 break;
780
781 case SJA1105_TAS_STATE_ENABLED_NOT_RUNNING:
782 if (tas_data->last_op != SJA1105_PTP_ADJUSTFREQ) {
783
784 sja1105_tas_stop(priv);
785 break;
786 }
787
788
789
790
791 rc = __sja1105_ptp_gettimex(ds, &now, NULL);
792 if (rc < 0)
793 break;
794
795 if (now < tas_data->oper_base_time) {
796
797 diff = ns_to_timespec64(tas_data->oper_base_time - now);
798 dev_dbg(ds->dev, "time to start: [%lld.%09ld]",
799 diff.tv_sec, diff.tv_nsec);
800 break;
801 }
802
803
804 rc = sja1105_tas_check_running(priv);
805 if (rc < 0)
806 break;
807
808 if (tas_data->state != SJA1105_TAS_STATE_RUNNING)
809
810 dev_err(ds->dev,
811 "TAS not started despite time elapsed\n");
812
813 break;
814
815 case SJA1105_TAS_STATE_RUNNING:
816
817 if (tas_data->last_op != SJA1105_PTP_ADJUSTFREQ) {
818 sja1105_tas_stop(priv);
819 break;
820 }
821
822 rc = sja1105_tas_check_running(priv);
823 if (rc < 0)
824 break;
825
826 if (tas_data->state != SJA1105_TAS_STATE_RUNNING)
827 dev_err(ds->dev, "TAS surprisingly stopped\n");
828
829 break;
830
831 default:
832 if (net_ratelimit())
833 dev_err(ds->dev, "TAS in an invalid state (incorrect use of API)!\n");
834 }
835
836 if (rc && net_ratelimit())
837 dev_err(ds->dev, "An operation returned %d\n", rc);
838
839 mutex_unlock(&ptp_data->lock);
840}
841
842void sja1105_tas_clockstep(struct dsa_switch *ds)
843{
844 struct sja1105_private *priv = ds->priv;
845 struct sja1105_tas_data *tas_data = &priv->tas_data;
846
847 if (!tas_data->enabled)
848 return;
849
850 tas_data->last_op = SJA1105_PTP_CLOCKSTEP;
851 schedule_work(&tas_data->tas_work);
852}
853
854void sja1105_tas_adjfreq(struct dsa_switch *ds)
855{
856 struct sja1105_private *priv = ds->priv;
857 struct sja1105_tas_data *tas_data = &priv->tas_data;
858
859 if (!tas_data->enabled)
860 return;
861
862
863 if (tas_data->state == SJA1105_TAS_STATE_RUNNING)
864 return;
865
866 tas_data->last_op = SJA1105_PTP_ADJUSTFREQ;
867 schedule_work(&tas_data->tas_work);
868}
869
870void sja1105_tas_setup(struct dsa_switch *ds)
871{
872 struct sja1105_private *priv = ds->priv;
873 struct sja1105_tas_data *tas_data = &priv->tas_data;
874
875 INIT_WORK(&tas_data->tas_work, sja1105_tas_state_machine);
876 tas_data->state = SJA1105_TAS_STATE_DISABLED;
877 tas_data->last_op = SJA1105_PTP_NONE;
878
879 INIT_LIST_HEAD(&tas_data->gating_cfg.entries);
880}
881
882void sja1105_tas_teardown(struct dsa_switch *ds)
883{
884 struct sja1105_private *priv = ds->priv;
885 struct tc_taprio_qopt_offload *offload;
886 int port;
887
888 cancel_work_sync(&priv->tas_data.tas_work);
889
890 for (port = 0; port < ds->num_ports; port++) {
891 offload = priv->tas_data.offload[port];
892 if (!offload)
893 continue;
894
895 taprio_offload_free(offload);
896 }
897}
898