1
2
3
4#include "sja1105.h"
5
6#define SJA1105_TAS_CLKSRC_DISABLED 0
7#define SJA1105_TAS_CLKSRC_STANDALONE 1
8#define SJA1105_TAS_CLKSRC_AS6802 2
9#define SJA1105_TAS_CLKSRC_PTP 3
10#define SJA1105_GATE_MASK GENMASK_ULL(SJA1105_NUM_TC - 1, 0)
11
12#define work_to_sja1105_tas(d) \
13 container_of((d), struct sja1105_tas_data, tas_work)
14#define tas_to_sja1105(d) \
15 container_of((d), struct sja1105_private, tas_data)
16
17static int sja1105_tas_set_runtime_params(struct sja1105_private *priv)
18{
19 struct sja1105_tas_data *tas_data = &priv->tas_data;
20 struct sja1105_gating_config *gating_cfg = &tas_data->gating_cfg;
21 struct dsa_switch *ds = priv->ds;
22 s64 earliest_base_time = S64_MAX;
23 s64 latest_base_time = 0;
24 s64 its_cycle_time = 0;
25 s64 max_cycle_time = 0;
26 int port;
27
28 tas_data->enabled = false;
29
30 for (port = 0; port < SJA1105_NUM_PORTS; port++) {
31 const struct tc_taprio_qopt_offload *offload;
32
33 offload = tas_data->offload[port];
34 if (!offload)
35 continue;
36
37 tas_data->enabled = true;
38
39 if (max_cycle_time < offload->cycle_time)
40 max_cycle_time = offload->cycle_time;
41 if (latest_base_time < offload->base_time)
42 latest_base_time = offload->base_time;
43 if (earliest_base_time > offload->base_time) {
44 earliest_base_time = offload->base_time;
45 its_cycle_time = offload->cycle_time;
46 }
47 }
48
49 if (!list_empty(&gating_cfg->entries)) {
50 tas_data->enabled = true;
51
52 if (max_cycle_time < gating_cfg->cycle_time)
53 max_cycle_time = gating_cfg->cycle_time;
54 if (latest_base_time < gating_cfg->base_time)
55 latest_base_time = gating_cfg->base_time;
56 if (earliest_base_time > gating_cfg->base_time) {
57 earliest_base_time = gating_cfg->base_time;
58 its_cycle_time = gating_cfg->cycle_time;
59 }
60 }
61
62 if (!tas_data->enabled)
63 return 0;
64
65
66
67
68
69
70 earliest_base_time = future_base_time(earliest_base_time,
71 its_cycle_time,
72 latest_base_time);
73 while (earliest_base_time > latest_base_time)
74 earliest_base_time -= its_cycle_time;
75 if (latest_base_time - earliest_base_time >
76 sja1105_delta_to_ns(SJA1105_TAS_MAX_DELTA)) {
77 dev_err(ds->dev,
78 "Base times too far apart: min %llu max %llu\n",
79 earliest_base_time, latest_base_time);
80 return -ERANGE;
81 }
82
83 tas_data->earliest_base_time = earliest_base_time;
84 tas_data->max_cycle_time = max_cycle_time;
85
86 dev_dbg(ds->dev, "earliest base time %lld ns\n", earliest_base_time);
87 dev_dbg(ds->dev, "latest base time %lld ns\n", latest_base_time);
88 dev_dbg(ds->dev, "longest cycle time %lld ns\n", max_cycle_time);
89
90 return 0;
91}
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158int sja1105_init_scheduling(struct sja1105_private *priv)
159{
160 struct sja1105_schedule_entry_points_entry *schedule_entry_points;
161 struct sja1105_schedule_entry_points_params_entry
162 *schedule_entry_points_params;
163 struct sja1105_schedule_params_entry *schedule_params;
164 struct sja1105_tas_data *tas_data = &priv->tas_data;
165 struct sja1105_gating_config *gating_cfg = &tas_data->gating_cfg;
166 struct sja1105_schedule_entry *schedule;
167 struct sja1105_table *table;
168 int schedule_start_idx;
169 s64 entry_point_delta;
170 int schedule_end_idx;
171 int num_entries = 0;
172 int num_cycles = 0;
173 int cycle = 0;
174 int i, k = 0;
175 int port, rc;
176
177 rc = sja1105_tas_set_runtime_params(priv);
178 if (rc < 0)
179 return rc;
180
181
182 table = &priv->static_config.tables[BLK_IDX_SCHEDULE];
183 if (table->entry_count) {
184 kfree(table->entries);
185 table->entry_count = 0;
186 }
187
188
189 table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS];
190 if (table->entry_count) {
191 kfree(table->entries);
192 table->entry_count = 0;
193 }
194
195
196 table = &priv->static_config.tables[BLK_IDX_SCHEDULE_PARAMS];
197 if (table->entry_count) {
198 kfree(table->entries);
199 table->entry_count = 0;
200 }
201
202
203 table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS];
204 if (table->entry_count) {
205 kfree(table->entries);
206 table->entry_count = 0;
207 }
208
209
210 for (port = 0; port < SJA1105_NUM_PORTS; port++) {
211 if (tas_data->offload[port]) {
212 num_entries += tas_data->offload[port]->num_entries;
213 num_cycles++;
214 }
215 }
216
217 if (!list_empty(&gating_cfg->entries)) {
218 num_entries += gating_cfg->num_entries;
219 num_cycles++;
220 }
221
222
223 if (!num_cycles)
224 return 0;
225
226
227
228
229 table = &priv->static_config.tables[BLK_IDX_SCHEDULE];
230 table->entries = kcalloc(num_entries, table->ops->unpacked_entry_size,
231 GFP_KERNEL);
232 if (!table->entries)
233 return -ENOMEM;
234 table->entry_count = num_entries;
235 schedule = table->entries;
236
237
238 table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS];
239 table->entries = kcalloc(SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT,
240 table->ops->unpacked_entry_size, GFP_KERNEL);
241 if (!table->entries)
242
243
244
245
246 return -ENOMEM;
247 table->entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT;
248 schedule_entry_points_params = table->entries;
249
250
251 table = &priv->static_config.tables[BLK_IDX_SCHEDULE_PARAMS];
252 table->entries = kcalloc(SJA1105_MAX_SCHEDULE_PARAMS_COUNT,
253 table->ops->unpacked_entry_size, GFP_KERNEL);
254 if (!table->entries)
255 return -ENOMEM;
256 table->entry_count = SJA1105_MAX_SCHEDULE_PARAMS_COUNT;
257 schedule_params = table->entries;
258
259
260 table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS];
261 table->entries = kcalloc(num_cycles, table->ops->unpacked_entry_size,
262 GFP_KERNEL);
263 if (!table->entries)
264 return -ENOMEM;
265 table->entry_count = num_cycles;
266 schedule_entry_points = table->entries;
267
268
269 schedule_entry_points_params->clksrc = SJA1105_TAS_CLKSRC_PTP;
270 schedule_entry_points_params->actsubsch = num_cycles - 1;
271
272 for (port = 0; port < SJA1105_NUM_PORTS; port++) {
273 const struct tc_taprio_qopt_offload *offload;
274
275 s64 rbt;
276
277 offload = tas_data->offload[port];
278 if (!offload)
279 continue;
280
281 schedule_start_idx = k;
282 schedule_end_idx = k + offload->num_entries - 1;
283
284
285
286
287 rbt = future_base_time(offload->base_time,
288 offload->cycle_time,
289 tas_data->earliest_base_time);
290 rbt -= tas_data->earliest_base_time;
291
292
293
294
295
296
297 entry_point_delta = ns_to_sja1105_delta(rbt) + 1;
298
299 schedule_entry_points[cycle].subschindx = cycle;
300 schedule_entry_points[cycle].delta = entry_point_delta;
301 schedule_entry_points[cycle].address = schedule_start_idx;
302
303
304
305
306 for (i = cycle; i < 8; i++)
307 schedule_params->subscheind[i] = schedule_end_idx;
308
309 for (i = 0; i < offload->num_entries; i++, k++) {
310 s64 delta_ns = offload->entries[i].interval;
311
312 schedule[k].delta = ns_to_sja1105_delta(delta_ns);
313 schedule[k].destports = BIT(port);
314 schedule[k].resmedia_en = true;
315 schedule[k].resmedia = SJA1105_GATE_MASK &
316 ~offload->entries[i].gate_mask;
317 }
318 cycle++;
319 }
320
321 if (!list_empty(&gating_cfg->entries)) {
322 struct sja1105_gate_entry *e;
323
324
325 s64 rbt;
326
327 schedule_start_idx = k;
328 schedule_end_idx = k + gating_cfg->num_entries - 1;
329 rbt = future_base_time(gating_cfg->base_time,
330 gating_cfg->cycle_time,
331 tas_data->earliest_base_time);
332 rbt -= tas_data->earliest_base_time;
333 entry_point_delta = ns_to_sja1105_delta(rbt) + 1;
334
335 schedule_entry_points[cycle].subschindx = cycle;
336 schedule_entry_points[cycle].delta = entry_point_delta;
337 schedule_entry_points[cycle].address = schedule_start_idx;
338
339 for (i = cycle; i < 8; i++)
340 schedule_params->subscheind[i] = schedule_end_idx;
341
342 list_for_each_entry(e, &gating_cfg->entries, list) {
343 schedule[k].delta = ns_to_sja1105_delta(e->interval);
344 schedule[k].destports = e->rule->vl.destports;
345 schedule[k].setvalid = true;
346 schedule[k].txen = true;
347 schedule[k].vlindex = e->rule->vl.sharindx;
348 schedule[k].winstindex = e->rule->vl.sharindx;
349 if (e->gate_state)
350 schedule[k].winst = true;
351 else
352 schedule[k].winend = true;
353 k++;
354 }
355 }
356
357 return 0;
358}
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381static bool
382sja1105_tas_check_conflicts(struct sja1105_private *priv, int port,
383 const struct tc_taprio_qopt_offload *admin)
384{
385 struct sja1105_tas_data *tas_data = &priv->tas_data;
386 const struct tc_taprio_qopt_offload *offload;
387 s64 max_cycle_time, min_cycle_time;
388 s64 delta1, delta2;
389 s64 rbt1, rbt2;
390 s64 stop_time;
391 s64 t1, t2;
392 int i, j;
393 s32 rem;
394
395 offload = tas_data->offload[port];
396 if (!offload)
397 return false;
398
399
400
401
402 max_cycle_time = max(offload->cycle_time, admin->cycle_time);
403 min_cycle_time = min(offload->cycle_time, admin->cycle_time);
404 div_s64_rem(max_cycle_time, min_cycle_time, &rem);
405 if (rem)
406 return true;
407
408
409
410
411
412 div_s64_rem(offload->base_time, offload->cycle_time, &rem);
413 rbt1 = rem;
414
415 div_s64_rem(admin->base_time, admin->cycle_time, &rem);
416 rbt2 = rem;
417
418 stop_time = max_cycle_time + max(rbt1, rbt2);
419
420
421
422
423 for (i = 0, delta1 = 0;
424 i < offload->num_entries;
425 delta1 += offload->entries[i].interval, i++) {
426
427
428
429 for (j = 0, delta2 = 0;
430 j < admin->num_entries;
431 delta2 += admin->entries[j].interval, j++) {
432
433
434
435
436 for (t1 = rbt1 + delta1;
437 t1 <= stop_time;
438 t1 += offload->cycle_time) {
439
440
441
442
443 for (t2 = rbt2 + delta2;
444 t2 <= stop_time;
445 t2 += admin->cycle_time) {
446 if (t1 == t2) {
447 dev_warn(priv->ds->dev,
448 "GCL entry %d collides with entry %d of port %d\n",
449 j, i, port);
450 return true;
451 }
452 }
453 }
454 }
455 }
456
457 return false;
458}
459
460
461
462
463
464
465bool sja1105_gating_check_conflicts(struct sja1105_private *priv, int port,
466 struct netlink_ext_ack *extack)
467{
468 struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg;
469 size_t num_entries = gating_cfg->num_entries;
470 struct tc_taprio_qopt_offload *dummy;
471 struct sja1105_gate_entry *e;
472 bool conflict;
473 int i = 0;
474
475 if (list_empty(&gating_cfg->entries))
476 return false;
477
478 dummy = kzalloc(struct_size(dummy, entries, num_entries), GFP_KERNEL);
479 if (!dummy) {
480 NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory");
481 return true;
482 }
483
484 dummy->num_entries = num_entries;
485 dummy->base_time = gating_cfg->base_time;
486 dummy->cycle_time = gating_cfg->cycle_time;
487
488 list_for_each_entry(e, &gating_cfg->entries, list)
489 dummy->entries[i++].interval = e->interval;
490
491 if (port != -1) {
492 conflict = sja1105_tas_check_conflicts(priv, port, dummy);
493 } else {
494 for (port = 0; port < SJA1105_NUM_PORTS; port++) {
495 conflict = sja1105_tas_check_conflicts(priv, port,
496 dummy);
497 if (conflict)
498 break;
499 }
500 }
501
502 kfree(dummy);
503
504 return conflict;
505}
506
507int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
508 struct tc_taprio_qopt_offload *admin)
509{
510 struct sja1105_private *priv = ds->priv;
511 struct sja1105_tas_data *tas_data = &priv->tas_data;
512 int other_port, rc, i;
513
514
515
516
517 if (!!tas_data->offload[port] == admin->enable)
518 return -EINVAL;
519
520 if (!admin->enable) {
521 taprio_offload_free(tas_data->offload[port]);
522 tas_data->offload[port] = NULL;
523
524 rc = sja1105_init_scheduling(priv);
525 if (rc < 0)
526 return rc;
527
528 return sja1105_static_config_reload(priv, SJA1105_SCHEDULING);
529 }
530
531
532
533
534
535
536
537
538
539 if (admin->cycle_time_extension)
540 return -ENOTSUPP;
541
542 for (i = 0; i < admin->num_entries; i++) {
543 s64 delta_ns = admin->entries[i].interval;
544 s64 delta_cycles = ns_to_sja1105_delta(delta_ns);
545 bool too_long, too_short;
546
547 too_long = (delta_cycles >= SJA1105_TAS_MAX_DELTA);
548 too_short = (delta_cycles == 0);
549 if (too_long || too_short) {
550 dev_err(priv->ds->dev,
551 "Interval %llu too %s for GCL entry %d\n",
552 delta_ns, too_long ? "long" : "short", i);
553 return -ERANGE;
554 }
555 }
556
557 for (other_port = 0; other_port < SJA1105_NUM_PORTS; other_port++) {
558 if (other_port == port)
559 continue;
560
561 if (sja1105_tas_check_conflicts(priv, other_port, admin))
562 return -ERANGE;
563 }
564
565 if (sja1105_gating_check_conflicts(priv, port, NULL)) {
566 dev_err(ds->dev, "Conflict with tc-gate schedule\n");
567 return -ERANGE;
568 }
569
570 tas_data->offload[port] = taprio_offload_get(admin);
571
572 rc = sja1105_init_scheduling(priv);
573 if (rc < 0)
574 return rc;
575
576 return sja1105_static_config_reload(priv, SJA1105_SCHEDULING);
577}
578
579static int sja1105_tas_check_running(struct sja1105_private *priv)
580{
581 struct sja1105_tas_data *tas_data = &priv->tas_data;
582 struct dsa_switch *ds = priv->ds;
583 struct sja1105_ptp_cmd cmd = {0};
584 int rc;
585
586 rc = sja1105_ptp_commit(ds, &cmd, SPI_READ);
587 if (rc < 0)
588 return rc;
589
590 if (cmd.ptpstrtsch == 1)
591
592 tas_data->state = SJA1105_TAS_STATE_RUNNING;
593 else if (cmd.ptpstopsch == 1)
594
595 tas_data->state = SJA1105_TAS_STATE_DISABLED;
596 else
597
598 rc = -EINVAL;
599
600 return rc;
601}
602
603
604static int sja1105_tas_adjust_drift(struct sja1105_private *priv,
605 u64 correction)
606{
607 const struct sja1105_regs *regs = priv->info->regs;
608 u32 ptpclkcorp = ns_to_sja1105_ticks(correction);
609
610 return sja1105_xfer_u32(priv, SPI_WRITE, regs->ptpclkcorp,
611 &ptpclkcorp, NULL);
612}
613
614
615static int sja1105_tas_set_base_time(struct sja1105_private *priv,
616 u64 base_time)
617{
618 const struct sja1105_regs *regs = priv->info->regs;
619 u64 ptpschtm = ns_to_sja1105_ticks(base_time);
620
621 return sja1105_xfer_u64(priv, SPI_WRITE, regs->ptpschtm,
622 &ptpschtm, NULL);
623}
624
625static int sja1105_tas_start(struct sja1105_private *priv)
626{
627 struct sja1105_tas_data *tas_data = &priv->tas_data;
628 struct sja1105_ptp_cmd *cmd = &priv->ptp_data.cmd;
629 struct dsa_switch *ds = priv->ds;
630 int rc;
631
632 dev_dbg(ds->dev, "Starting the TAS\n");
633
634 if (tas_data->state == SJA1105_TAS_STATE_ENABLED_NOT_RUNNING ||
635 tas_data->state == SJA1105_TAS_STATE_RUNNING) {
636 dev_err(ds->dev, "TAS already started\n");
637 return -EINVAL;
638 }
639
640 cmd->ptpstrtsch = 1;
641 cmd->ptpstopsch = 0;
642
643 rc = sja1105_ptp_commit(ds, cmd, SPI_WRITE);
644 if (rc < 0)
645 return rc;
646
647 tas_data->state = SJA1105_TAS_STATE_ENABLED_NOT_RUNNING;
648
649 return 0;
650}
651
652static int sja1105_tas_stop(struct sja1105_private *priv)
653{
654 struct sja1105_tas_data *tas_data = &priv->tas_data;
655 struct sja1105_ptp_cmd *cmd = &priv->ptp_data.cmd;
656 struct dsa_switch *ds = priv->ds;
657 int rc;
658
659 dev_dbg(ds->dev, "Stopping the TAS\n");
660
661 if (tas_data->state == SJA1105_TAS_STATE_DISABLED) {
662 dev_err(ds->dev, "TAS already disabled\n");
663 return -EINVAL;
664 }
665
666 cmd->ptpstopsch = 1;
667 cmd->ptpstrtsch = 0;
668
669 rc = sja1105_ptp_commit(ds, cmd, SPI_WRITE);
670 if (rc < 0)
671 return rc;
672
673 tas_data->state = SJA1105_TAS_STATE_DISABLED;
674
675 return 0;
676}
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719static void sja1105_tas_state_machine(struct work_struct *work)
720{
721 struct sja1105_tas_data *tas_data = work_to_sja1105_tas(work);
722 struct sja1105_private *priv = tas_to_sja1105(tas_data);
723 struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
724 struct timespec64 base_time_ts, now_ts;
725 struct dsa_switch *ds = priv->ds;
726 struct timespec64 diff;
727 s64 base_time, now;
728 int rc = 0;
729
730 mutex_lock(&ptp_data->lock);
731
732 switch (tas_data->state) {
733 case SJA1105_TAS_STATE_DISABLED:
734
735 if (tas_data->last_op != SJA1105_PTP_ADJUSTFREQ)
736 break;
737
738 rc = sja1105_tas_adjust_drift(priv, tas_data->max_cycle_time);
739 if (rc < 0)
740 break;
741
742 rc = __sja1105_ptp_gettimex(ds, &now, NULL);
743 if (rc < 0)
744 break;
745
746
747
748
749
750
751
752
753
754
755 base_time = future_base_time(tas_data->earliest_base_time,
756 tas_data->max_cycle_time,
757 now + 1ull * NSEC_PER_SEC);
758 base_time -= sja1105_delta_to_ns(1);
759
760 rc = sja1105_tas_set_base_time(priv, base_time);
761 if (rc < 0)
762 break;
763
764 tas_data->oper_base_time = base_time;
765
766 rc = sja1105_tas_start(priv);
767 if (rc < 0)
768 break;
769
770 base_time_ts = ns_to_timespec64(base_time);
771 now_ts = ns_to_timespec64(now);
772
773 dev_dbg(ds->dev, "OPER base time %lld.%09ld (now %lld.%09ld)\n",
774 base_time_ts.tv_sec, base_time_ts.tv_nsec,
775 now_ts.tv_sec, now_ts.tv_nsec);
776
777 break;
778
779 case SJA1105_TAS_STATE_ENABLED_NOT_RUNNING:
780 if (tas_data->last_op != SJA1105_PTP_ADJUSTFREQ) {
781
782 sja1105_tas_stop(priv);
783 break;
784 }
785
786
787
788
789 rc = __sja1105_ptp_gettimex(ds, &now, NULL);
790 if (rc < 0)
791 break;
792
793 if (now < tas_data->oper_base_time) {
794
795 diff = ns_to_timespec64(tas_data->oper_base_time - now);
796 dev_dbg(ds->dev, "time to start: [%lld.%09ld]",
797 diff.tv_sec, diff.tv_nsec);
798 break;
799 }
800
801
802 rc = sja1105_tas_check_running(priv);
803 if (rc < 0)
804 break;
805
806 if (tas_data->state != SJA1105_TAS_STATE_RUNNING)
807
808 dev_err(ds->dev,
809 "TAS not started despite time elapsed\n");
810
811 break;
812
813 case SJA1105_TAS_STATE_RUNNING:
814
815 if (tas_data->last_op != SJA1105_PTP_ADJUSTFREQ) {
816 sja1105_tas_stop(priv);
817 break;
818 }
819
820 rc = sja1105_tas_check_running(priv);
821 if (rc < 0)
822 break;
823
824 if (tas_data->state != SJA1105_TAS_STATE_RUNNING)
825 dev_err(ds->dev, "TAS surprisingly stopped\n");
826
827 break;
828
829 default:
830 if (net_ratelimit())
831 dev_err(ds->dev, "TAS in an invalid state (incorrect use of API)!\n");
832 }
833
834 if (rc && net_ratelimit())
835 dev_err(ds->dev, "An operation returned %d\n", rc);
836
837 mutex_unlock(&ptp_data->lock);
838}
839
840void sja1105_tas_clockstep(struct dsa_switch *ds)
841{
842 struct sja1105_private *priv = ds->priv;
843 struct sja1105_tas_data *tas_data = &priv->tas_data;
844
845 if (!tas_data->enabled)
846 return;
847
848 tas_data->last_op = SJA1105_PTP_CLOCKSTEP;
849 schedule_work(&tas_data->tas_work);
850}
851
852void sja1105_tas_adjfreq(struct dsa_switch *ds)
853{
854 struct sja1105_private *priv = ds->priv;
855 struct sja1105_tas_data *tas_data = &priv->tas_data;
856
857 if (!tas_data->enabled)
858 return;
859
860
861 if (tas_data->state == SJA1105_TAS_STATE_RUNNING)
862 return;
863
864 tas_data->last_op = SJA1105_PTP_ADJUSTFREQ;
865 schedule_work(&tas_data->tas_work);
866}
867
868void sja1105_tas_setup(struct dsa_switch *ds)
869{
870 struct sja1105_private *priv = ds->priv;
871 struct sja1105_tas_data *tas_data = &priv->tas_data;
872
873 INIT_WORK(&tas_data->tas_work, sja1105_tas_state_machine);
874 tas_data->state = SJA1105_TAS_STATE_DISABLED;
875 tas_data->last_op = SJA1105_PTP_NONE;
876
877 INIT_LIST_HEAD(&tas_data->gating_cfg.entries);
878}
879
880void sja1105_tas_teardown(struct dsa_switch *ds)
881{
882 struct sja1105_private *priv = ds->priv;
883 struct tc_taprio_qopt_offload *offload;
884 int port;
885
886 cancel_work_sync(&priv->tas_data.tas_work);
887
888 for (port = 0; port < SJA1105_NUM_PORTS; port++) {
889 offload = priv->tas_data.offload[port];
890 if (!offload)
891 continue;
892
893 taprio_offload_free(offload);
894 }
895}
896