1
2
3
4
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <linux/delay.h>
9#include <linux/module.h>
10#include <linux/printk.h>
11#include <linux/spi/spi.h>
12#include <linux/errno.h>
13#include <linux/gpio/consumer.h>
14#include <linux/phylink.h>
15#include <linux/of.h>
16#include <linux/of_net.h>
17#include <linux/of_mdio.h>
18#include <linux/of_device.h>
19#include <linux/netdev_features.h>
20#include <linux/netdevice.h>
21#include <linux/if_bridge.h>
22#include <linux/if_ether.h>
23#include <linux/dsa/8021q.h>
24#include "sja1105.h"
25#include "sja1105_sgmii.h"
26#include "sja1105_tas.h"
27
28static const struct dsa_switch_ops sja1105_switch_ops;
29
30static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len,
31 unsigned int startup_delay)
32{
33 gpiod_set_value_cansleep(gpio, 1);
34
35 msleep(pulse_len);
36 gpiod_set_value_cansleep(gpio, 0);
37
38 msleep(startup_delay);
39}
40
41static void
42sja1105_port_allow_traffic(struct sja1105_l2_forwarding_entry *l2_fwd,
43 int from, int to, bool allow)
44{
45 if (allow) {
46 l2_fwd[from].bc_domain |= BIT(to);
47 l2_fwd[from].reach_port |= BIT(to);
48 l2_fwd[from].fl_domain |= BIT(to);
49 } else {
50 l2_fwd[from].bc_domain &= ~BIT(to);
51 l2_fwd[from].reach_port &= ~BIT(to);
52 l2_fwd[from].fl_domain &= ~BIT(to);
53 }
54}
55
56
57
58
59struct sja1105_dt_port {
60 phy_interface_t phy_mode;
61 sja1105_mii_role_t role;
62};
63
64static int sja1105_init_mac_settings(struct sja1105_private *priv)
65{
66 struct sja1105_mac_config_entry default_mac = {
67
68
69
70
71 .top = {0x3F, 0x7F, 0xBF, 0xFF, 0x13F, 0x17F, 0x1BF, 0x1FF},
72 .base = {0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0},
73 .enabled = {true, true, true, true, true, true, true, true},
74
75 .ifg = 0,
76
77
78
79 .speed = SJA1105_SPEED_AUTO,
80
81 .tp_delin = 0,
82 .tp_delout = 0,
83
84 .maxage = 0xFF,
85
86 .vlanprio = 0,
87 .vlanid = 1,
88 .ing_mirr = false,
89 .egr_mirr = false,
90
91 .drpnona664 = false,
92
93 .drpdtag = false,
94
95 .drpuntag = false,
96
97 .retag = false,
98
99
100
101 .dyn_learn = false,
102 .egress = false,
103 .ingress = false,
104 };
105 struct sja1105_mac_config_entry *mac;
106 struct sja1105_table *table;
107 int i;
108
109 table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG];
110
111
112 if (table->entry_count) {
113 kfree(table->entries);
114 table->entry_count = 0;
115 }
116
117 table->entries = kcalloc(SJA1105_NUM_PORTS,
118 table->ops->unpacked_entry_size, GFP_KERNEL);
119 if (!table->entries)
120 return -ENOMEM;
121
122 table->entry_count = SJA1105_NUM_PORTS;
123
124 mac = table->entries;
125
126 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
127 mac[i] = default_mac;
128 if (i == dsa_upstream_port(priv->ds, i)) {
129
130
131
132 mac[i].dyn_learn = true;
133 mac[i].ingress = true;
134 mac[i].egress = true;
135 }
136 }
137
138 return 0;
139}
140
141static bool sja1105_supports_sgmii(struct sja1105_private *priv, int port)
142{
143 if (priv->info->part_no != SJA1105R_PART_NO &&
144 priv->info->part_no != SJA1105S_PART_NO)
145 return false;
146
147 if (port != SJA1105_SGMII_PORT)
148 return false;
149
150 if (dsa_is_unused_port(priv->ds, port))
151 return false;
152
153 return true;
154}
155
156static int sja1105_init_mii_settings(struct sja1105_private *priv,
157 struct sja1105_dt_port *ports)
158{
159 struct device *dev = &priv->spidev->dev;
160 struct sja1105_xmii_params_entry *mii;
161 struct sja1105_table *table;
162 int i;
163
164 table = &priv->static_config.tables[BLK_IDX_XMII_PARAMS];
165
166
167 if (table->entry_count) {
168 kfree(table->entries);
169 table->entry_count = 0;
170 }
171
172 table->entries = kcalloc(SJA1105_MAX_XMII_PARAMS_COUNT,
173 table->ops->unpacked_entry_size, GFP_KERNEL);
174 if (!table->entries)
175 return -ENOMEM;
176
177
178 table->entry_count = SJA1105_MAX_XMII_PARAMS_COUNT;
179
180 mii = table->entries;
181
182 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
183 if (dsa_is_unused_port(priv->ds, i))
184 continue;
185
186 switch (ports[i].phy_mode) {
187 case PHY_INTERFACE_MODE_MII:
188 mii->xmii_mode[i] = XMII_MODE_MII;
189 break;
190 case PHY_INTERFACE_MODE_RMII:
191 mii->xmii_mode[i] = XMII_MODE_RMII;
192 break;
193 case PHY_INTERFACE_MODE_RGMII:
194 case PHY_INTERFACE_MODE_RGMII_ID:
195 case PHY_INTERFACE_MODE_RGMII_RXID:
196 case PHY_INTERFACE_MODE_RGMII_TXID:
197 mii->xmii_mode[i] = XMII_MODE_RGMII;
198 break;
199 case PHY_INTERFACE_MODE_SGMII:
200 if (!sja1105_supports_sgmii(priv, i))
201 return -EINVAL;
202 mii->xmii_mode[i] = XMII_MODE_SGMII;
203 break;
204 default:
205 dev_err(dev, "Unsupported PHY mode %s!\n",
206 phy_modes(ports[i].phy_mode));
207 }
208
209
210
211
212
213 if (ports[i].phy_mode == PHY_INTERFACE_MODE_SGMII)
214 mii->phy_mac[i] = XMII_MAC;
215 else
216 mii->phy_mac[i] = ports[i].role;
217 }
218 return 0;
219}
220
221static int sja1105_init_static_fdb(struct sja1105_private *priv)
222{
223 struct sja1105_table *table;
224
225 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
226
227
228
229
230 if (table->entry_count) {
231 kfree(table->entries);
232 table->entry_count = 0;
233 }
234 return 0;
235}
236
237static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
238{
239 struct sja1105_table *table;
240 u64 max_fdb_entries = SJA1105_MAX_L2_LOOKUP_COUNT / SJA1105_NUM_PORTS;
241 struct sja1105_l2_lookup_params_entry default_l2_lookup_params = {
242
243 .maxage = SJA1105_AGEING_TIME_MS(300000),
244
245 .dyn_tbsz = SJA1105ET_FDB_BIN_SIZE,
246
247 .start_dynspc = 0,
248 .maxaddrp = {max_fdb_entries, max_fdb_entries, max_fdb_entries,
249 max_fdb_entries, max_fdb_entries, },
250
251 .poly = 0x97,
252
253
254
255 .shared_learn = true,
256
257
258
259
260 .no_enf_hostprt = false,
261
262
263
264 .no_mgmt_learn = true,
265
266 .use_static = true,
267
268
269
270 .owr_dyn = true,
271 .drpnolearn = true,
272 };
273
274 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
275
276 if (table->entry_count) {
277 kfree(table->entries);
278 table->entry_count = 0;
279 }
280
281 table->entries = kcalloc(SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
282 table->ops->unpacked_entry_size, GFP_KERNEL);
283 if (!table->entries)
284 return -ENOMEM;
285
286 table->entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT;
287
288
289 ((struct sja1105_l2_lookup_params_entry *)table->entries)[0] =
290 default_l2_lookup_params;
291
292 return 0;
293}
294
295static int sja1105_init_static_vlan(struct sja1105_private *priv)
296{
297 struct sja1105_table *table;
298 struct sja1105_vlan_lookup_entry pvid = {
299 .ving_mirr = 0,
300 .vegr_mirr = 0,
301 .vmemb_port = 0,
302 .vlan_bc = 0,
303 .tag_port = 0,
304 .vlanid = 1,
305 };
306 struct dsa_switch *ds = priv->ds;
307 int port;
308
309 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
310
311
312
313
314
315 if (table->entry_count) {
316 kfree(table->entries);
317 table->entry_count = 0;
318 }
319
320 table->entries = kcalloc(1, table->ops->unpacked_entry_size,
321 GFP_KERNEL);
322 if (!table->entries)
323 return -ENOMEM;
324
325 table->entry_count = 1;
326
327
328
329
330 for (port = 0; port < ds->num_ports; port++) {
331 struct sja1105_bridge_vlan *v;
332
333 if (dsa_is_unused_port(ds, port))
334 continue;
335
336 pvid.vmemb_port |= BIT(port);
337 pvid.vlan_bc |= BIT(port);
338 pvid.tag_port &= ~BIT(port);
339
340
341
342
343 v = kzalloc(sizeof(*v), GFP_KERNEL);
344 if (!v)
345 return -ENOMEM;
346
347 v->port = port;
348 v->vid = 1;
349 v->untagged = true;
350 if (dsa_is_cpu_port(ds, port))
351 v->pvid = true;
352 list_add(&v->list, &priv->dsa_8021q_vlans);
353 }
354
355 ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid;
356 return 0;
357}
358
359static int sja1105_init_l2_forwarding(struct sja1105_private *priv)
360{
361 struct sja1105_l2_forwarding_entry *l2fwd;
362 struct sja1105_table *table;
363 int i, j;
364
365 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING];
366
367 if (table->entry_count) {
368 kfree(table->entries);
369 table->entry_count = 0;
370 }
371
372 table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_COUNT,
373 table->ops->unpacked_entry_size, GFP_KERNEL);
374 if (!table->entries)
375 return -ENOMEM;
376
377 table->entry_count = SJA1105_MAX_L2_FORWARDING_COUNT;
378
379 l2fwd = table->entries;
380
381
382 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
383 unsigned int upstream = dsa_upstream_port(priv->ds, i);
384
385 for (j = 0; j < SJA1105_NUM_TC; j++)
386 l2fwd[i].vlan_pmap[j] = j;
387
388 if (i == upstream)
389 continue;
390
391 sja1105_port_allow_traffic(l2fwd, i, upstream, true);
392 sja1105_port_allow_traffic(l2fwd, upstream, i, true);
393 }
394
395
396
397 for (i = 0; i < SJA1105_NUM_TC; i++)
398 for (j = 0; j < SJA1105_NUM_PORTS; j++)
399 l2fwd[SJA1105_NUM_PORTS + i].vlan_pmap[j] = i;
400
401 return 0;
402}
403
404static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv)
405{
406 struct sja1105_l2_forwarding_params_entry default_l2fwd_params = {
407
408 .max_dynp = 0,
409
410 .part_spc = { SJA1105_MAX_FRAME_MEMORY, 0, 0, 0, 0, 0, 0, 0 },
411 };
412 struct sja1105_table *table;
413
414 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
415
416 if (table->entry_count) {
417 kfree(table->entries);
418 table->entry_count = 0;
419 }
420
421 table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
422 table->ops->unpacked_entry_size, GFP_KERNEL);
423 if (!table->entries)
424 return -ENOMEM;
425
426 table->entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT;
427
428
429 ((struct sja1105_l2_forwarding_params_entry *)table->entries)[0] =
430 default_l2fwd_params;
431
432 return 0;
433}
434
435void sja1105_frame_memory_partitioning(struct sja1105_private *priv)
436{
437 struct sja1105_l2_forwarding_params_entry *l2_fwd_params;
438 struct sja1105_vl_forwarding_params_entry *vl_fwd_params;
439 struct sja1105_table *table;
440 int max_mem;
441
442
443
444
445 if (priv->vlan_state == SJA1105_VLAN_BEST_EFFORT)
446 max_mem = SJA1105_MAX_FRAME_MEMORY_RETAGGING;
447 else
448 max_mem = SJA1105_MAX_FRAME_MEMORY;
449
450 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
451 l2_fwd_params = table->entries;
452 l2_fwd_params->part_spc[0] = max_mem;
453
454
455
456
457
458
459
460 if (!priv->static_config.tables[BLK_IDX_VL_FORWARDING].entry_count)
461 return;
462
463 table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
464 vl_fwd_params = table->entries;
465
466 l2_fwd_params->part_spc[0] -= SJA1105_VL_FRAME_MEMORY;
467 vl_fwd_params->partspc[0] = SJA1105_VL_FRAME_MEMORY;
468}
469
470static int sja1105_init_general_params(struct sja1105_private *priv)
471{
472 struct sja1105_general_params_entry default_general_params = {
473
474 .mirr_ptacu = true,
475 .switchid = priv->ds->index,
476
477
478
479 .hostprio = 7,
480 .mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A,
481 .mac_flt1 = SJA1105_LINKLOCAL_FILTER_A_MASK,
482 .incl_srcpt1 = false,
483 .send_meta1 = false,
484 .mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B,
485 .mac_flt0 = SJA1105_LINKLOCAL_FILTER_B_MASK,
486 .incl_srcpt0 = false,
487 .send_meta0 = false,
488
489
490
491
492
493 .host_port = dsa_upstream_port(priv->ds, 0),
494
495 .mirr_port = SJA1105_NUM_PORTS,
496
497
498
499
500
501
502
503 .casc_port = SJA1105_NUM_PORTS,
504
505 .vllupformat = SJA1105_VL_FORMAT_PSFP,
506 .vlmarker = 0,
507 .vlmask = 0,
508
509 .ignore2stf = 0,
510
511
512
513 .tpid = ETH_P_SJA1105,
514 .tpid2 = ETH_P_SJA1105,
515 };
516 struct sja1105_table *table;
517
518 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
519
520 if (table->entry_count) {
521 kfree(table->entries);
522 table->entry_count = 0;
523 }
524
525 table->entries = kcalloc(SJA1105_MAX_GENERAL_PARAMS_COUNT,
526 table->ops->unpacked_entry_size, GFP_KERNEL);
527 if (!table->entries)
528 return -ENOMEM;
529
530 table->entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT;
531
532
533 ((struct sja1105_general_params_entry *)table->entries)[0] =
534 default_general_params;
535
536 return 0;
537}
538
539static int sja1105_init_avb_params(struct sja1105_private *priv)
540{
541 struct sja1105_avb_params_entry *avb;
542 struct sja1105_table *table;
543
544 table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS];
545
546
547 if (table->entry_count) {
548 kfree(table->entries);
549 table->entry_count = 0;
550 }
551
552 table->entries = kcalloc(SJA1105_MAX_AVB_PARAMS_COUNT,
553 table->ops->unpacked_entry_size, GFP_KERNEL);
554 if (!table->entries)
555 return -ENOMEM;
556
557 table->entry_count = SJA1105_MAX_AVB_PARAMS_COUNT;
558
559 avb = table->entries;
560
561
562 avb->destmeta = SJA1105_META_DMAC;
563 avb->srcmeta = SJA1105_META_SMAC;
564
565
566
567
568
569
570
571 avb->cas_master = false;
572
573 return 0;
574}
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619#define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000)
620
621static int sja1105_init_l2_policing(struct sja1105_private *priv)
622{
623 struct sja1105_l2_policing_entry *policing;
624 struct sja1105_table *table;
625 int port, tc;
626
627 table = &priv->static_config.tables[BLK_IDX_L2_POLICING];
628
629
630 if (table->entry_count) {
631 kfree(table->entries);
632 table->entry_count = 0;
633 }
634
635 table->entries = kcalloc(SJA1105_MAX_L2_POLICING_COUNT,
636 table->ops->unpacked_entry_size, GFP_KERNEL);
637 if (!table->entries)
638 return -ENOMEM;
639
640 table->entry_count = SJA1105_MAX_L2_POLICING_COUNT;
641
642 policing = table->entries;
643
644
645 for (port = 0; port < SJA1105_NUM_PORTS; port++) {
646 int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + port;
647
648 for (tc = 0; tc < SJA1105_NUM_TC; tc++)
649 policing[port * SJA1105_NUM_TC + tc].sharindx = port;
650
651 policing[bcast].sharindx = port;
652 }
653
654
655 for (port = 0; port < SJA1105_NUM_PORTS; port++) {
656 int mtu = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
657
658 if (dsa_is_cpu_port(priv->ds, port))
659 mtu += VLAN_HLEN;
660
661 policing[port].smax = 65535;
662 policing[port].rate = SJA1105_RATE_MBPS(1000);
663 policing[port].maxlen = mtu;
664 policing[port].partition = 0;
665 }
666
667 return 0;
668}
669
670static int sja1105_static_config_load(struct sja1105_private *priv,
671 struct sja1105_dt_port *ports)
672{
673 int rc;
674
675 sja1105_static_config_free(&priv->static_config);
676 rc = sja1105_static_config_init(&priv->static_config,
677 priv->info->static_ops,
678 priv->info->device_id);
679 if (rc)
680 return rc;
681
682
683 rc = sja1105_init_mac_settings(priv);
684 if (rc < 0)
685 return rc;
686 rc = sja1105_init_mii_settings(priv, ports);
687 if (rc < 0)
688 return rc;
689 rc = sja1105_init_static_fdb(priv);
690 if (rc < 0)
691 return rc;
692 rc = sja1105_init_static_vlan(priv);
693 if (rc < 0)
694 return rc;
695 rc = sja1105_init_l2_lookup_params(priv);
696 if (rc < 0)
697 return rc;
698 rc = sja1105_init_l2_forwarding(priv);
699 if (rc < 0)
700 return rc;
701 rc = sja1105_init_l2_forwarding_params(priv);
702 if (rc < 0)
703 return rc;
704 rc = sja1105_init_l2_policing(priv);
705 if (rc < 0)
706 return rc;
707 rc = sja1105_init_general_params(priv);
708 if (rc < 0)
709 return rc;
710 rc = sja1105_init_avb_params(priv);
711 if (rc < 0)
712 return rc;
713
714
715 return sja1105_static_config_upload(priv);
716}
717
718static int sja1105_parse_rgmii_delays(struct sja1105_private *priv,
719 const struct sja1105_dt_port *ports)
720{
721 int i;
722
723 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
724 if (ports[i].role == XMII_MAC)
725 continue;
726
727 if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
728 ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
729 priv->rgmii_rx_delay[i] = true;
730
731 if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
732 ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
733 priv->rgmii_tx_delay[i] = true;
734
735 if ((priv->rgmii_rx_delay[i] || priv->rgmii_tx_delay[i]) &&
736 !priv->info->setup_rgmii_delay)
737 return -EINVAL;
738 }
739 return 0;
740}
741
742static int sja1105_parse_ports_node(struct sja1105_private *priv,
743 struct sja1105_dt_port *ports,
744 struct device_node *ports_node)
745{
746 struct device *dev = &priv->spidev->dev;
747 struct device_node *child;
748
749 for_each_available_child_of_node(ports_node, child) {
750 struct device_node *phy_node;
751 phy_interface_t phy_mode;
752 u32 index;
753 int err;
754
755
756 if (of_property_read_u32(child, "reg", &index) < 0) {
757 dev_err(dev, "Port number not defined in device tree "
758 "(property \"reg\")\n");
759 of_node_put(child);
760 return -ENODEV;
761 }
762
763
764 err = of_get_phy_mode(child, &phy_mode);
765 if (err) {
766 dev_err(dev, "Failed to read phy-mode or "
767 "phy-interface-type property for port %d\n",
768 index);
769 of_node_put(child);
770 return -ENODEV;
771 }
772 ports[index].phy_mode = phy_mode;
773
774 phy_node = of_parse_phandle(child, "phy-handle", 0);
775 if (!phy_node) {
776 if (!of_phy_is_fixed_link(child)) {
777 dev_err(dev, "phy-handle or fixed-link "
778 "properties missing!\n");
779 of_node_put(child);
780 return -ENODEV;
781 }
782
783
784
785 ports[index].role = XMII_PHY;
786 } else {
787
788 ports[index].role = XMII_MAC;
789 of_node_put(phy_node);
790 }
791
792
793 if (of_property_read_bool(child, "sja1105,role-mac"))
794 ports[index].role = XMII_MAC;
795 else if (of_property_read_bool(child, "sja1105,role-phy"))
796 ports[index].role = XMII_PHY;
797 }
798
799 return 0;
800}
801
802static int sja1105_parse_dt(struct sja1105_private *priv,
803 struct sja1105_dt_port *ports)
804{
805 struct device *dev = &priv->spidev->dev;
806 struct device_node *switch_node = dev->of_node;
807 struct device_node *ports_node;
808 int rc;
809
810 ports_node = of_get_child_by_name(switch_node, "ports");
811 if (!ports_node) {
812 dev_err(dev, "Incorrect bindings: absent \"ports\" node\n");
813 return -ENODEV;
814 }
815
816 rc = sja1105_parse_ports_node(priv, ports, ports_node);
817 of_node_put(ports_node);
818
819 return rc;
820}
821
822static int sja1105_sgmii_read(struct sja1105_private *priv, int pcs_reg)
823{
824 const struct sja1105_regs *regs = priv->info->regs;
825 u32 val;
826 int rc;
827
828 rc = sja1105_xfer_u32(priv, SPI_READ, regs->sgmii + pcs_reg, &val,
829 NULL);
830 if (rc < 0)
831 return rc;
832
833 return val;
834}
835
836static int sja1105_sgmii_write(struct sja1105_private *priv, int pcs_reg,
837 u16 pcs_val)
838{
839 const struct sja1105_regs *regs = priv->info->regs;
840 u32 val = pcs_val;
841 int rc;
842
843 rc = sja1105_xfer_u32(priv, SPI_WRITE, regs->sgmii + pcs_reg, &val,
844 NULL);
845 if (rc < 0)
846 return rc;
847
848 return val;
849}
850
851static void sja1105_sgmii_pcs_config(struct sja1105_private *priv,
852 bool an_enabled, bool an_master)
853{
854 u16 ac = SJA1105_AC_AUTONEG_MODE_SGMII;
855
856
857
858
859
860 sja1105_sgmii_write(priv, SJA1105_DC1, SJA1105_DC1_EN_VSMMD1 |
861 SJA1105_DC1_CLOCK_STOP_EN |
862 SJA1105_DC1_MAC_AUTO_SW |
863 SJA1105_DC1_INIT);
864
865 sja1105_sgmii_write(priv, SJA1105_DC2, SJA1105_DC2_TX_POL_INV_DISABLE);
866
867 if (an_master)
868 ac |= SJA1105_AC_PHY_MODE | SJA1105_AC_SGMII_LINK;
869 sja1105_sgmii_write(priv, SJA1105_AC, ac);
870
871
872
873
874 if (an_enabled)
875 sja1105_sgmii_write(priv, MII_BMCR,
876 BMCR_ANENABLE | BMCR_ANRESTART);
877}
878
879static void sja1105_sgmii_pcs_force_speed(struct sja1105_private *priv,
880 int speed)
881{
882 int pcs_speed;
883
884 switch (speed) {
885 case SPEED_1000:
886 pcs_speed = BMCR_SPEED1000;
887 break;
888 case SPEED_100:
889 pcs_speed = BMCR_SPEED100;
890 break;
891 case SPEED_10:
892 pcs_speed = BMCR_SPEED10;
893 break;
894 default:
895 dev_err(priv->ds->dev, "Invalid speed %d\n", speed);
896 return;
897 }
898 sja1105_sgmii_write(priv, MII_BMCR, pcs_speed | BMCR_FULLDPLX);
899}
900
901
902static int sja1105_speed[] = {
903 [SJA1105_SPEED_AUTO] = SPEED_UNKNOWN,
904 [SJA1105_SPEED_10MBPS] = SPEED_10,
905 [SJA1105_SPEED_100MBPS] = SPEED_100,
906 [SJA1105_SPEED_1000MBPS] = SPEED_1000,
907};
908
909
910static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
911 int speed_mbps)
912{
913 struct sja1105_xmii_params_entry *mii;
914 struct sja1105_mac_config_entry *mac;
915 struct device *dev = priv->ds->dev;
916 sja1105_phy_interface_t phy_mode;
917 sja1105_speed_t speed;
918 int rc;
919
920
921
922
923
924
925
926 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
927 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
928
929 switch (speed_mbps) {
930 case SPEED_UNKNOWN:
931
932
933
934
935
936
937
938 speed = SJA1105_SPEED_AUTO;
939 break;
940 case SPEED_10:
941 speed = SJA1105_SPEED_10MBPS;
942 break;
943 case SPEED_100:
944 speed = SJA1105_SPEED_100MBPS;
945 break;
946 case SPEED_1000:
947 speed = SJA1105_SPEED_1000MBPS;
948 break;
949 default:
950 dev_err(dev, "Invalid speed %iMbps\n", speed_mbps);
951 return -EINVAL;
952 }
953
954
955
956
957
958
959
960
961 if (sja1105_supports_sgmii(priv, port))
962 mac[port].speed = SJA1105_SPEED_1000MBPS;
963 else
964 mac[port].speed = speed;
965
966
967 rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
968 &mac[port], true);
969 if (rc < 0) {
970 dev_err(dev, "Failed to write MAC config: %d\n", rc);
971 return rc;
972 }
973
974
975
976
977
978
979
980 phy_mode = mii->xmii_mode[port];
981 if (phy_mode != XMII_MODE_RGMII)
982 return 0;
983
984 return sja1105_clocking_setup_port(priv, port);
985}
986
987
988
989
990
991
992
993
994static bool sja1105_phy_mode_mismatch(struct sja1105_private *priv, int port,
995 phy_interface_t interface)
996{
997 struct sja1105_xmii_params_entry *mii;
998 sja1105_phy_interface_t phy_mode;
999
1000 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
1001 phy_mode = mii->xmii_mode[port];
1002
1003 switch (interface) {
1004 case PHY_INTERFACE_MODE_MII:
1005 return (phy_mode != XMII_MODE_MII);
1006 case PHY_INTERFACE_MODE_RMII:
1007 return (phy_mode != XMII_MODE_RMII);
1008 case PHY_INTERFACE_MODE_RGMII:
1009 case PHY_INTERFACE_MODE_RGMII_ID:
1010 case PHY_INTERFACE_MODE_RGMII_RXID:
1011 case PHY_INTERFACE_MODE_RGMII_TXID:
1012 return (phy_mode != XMII_MODE_RGMII);
1013 case PHY_INTERFACE_MODE_SGMII:
1014 return (phy_mode != XMII_MODE_SGMII);
1015 default:
1016 return true;
1017 }
1018}
1019
1020static void sja1105_mac_config(struct dsa_switch *ds, int port,
1021 unsigned int mode,
1022 const struct phylink_link_state *state)
1023{
1024 struct sja1105_private *priv = ds->priv;
1025 bool is_sgmii = sja1105_supports_sgmii(priv, port);
1026
1027 if (sja1105_phy_mode_mismatch(priv, port, state->interface)) {
1028 dev_err(ds->dev, "Changing PHY mode to %s not supported!\n",
1029 phy_modes(state->interface));
1030 return;
1031 }
1032
1033 if (phylink_autoneg_inband(mode) && !is_sgmii) {
1034 dev_err(ds->dev, "In-band AN not supported!\n");
1035 return;
1036 }
1037
1038 if (is_sgmii)
1039 sja1105_sgmii_pcs_config(priv, phylink_autoneg_inband(mode),
1040 false);
1041}
1042
1043static void sja1105_mac_link_down(struct dsa_switch *ds, int port,
1044 unsigned int mode,
1045 phy_interface_t interface)
1046{
1047 sja1105_inhibit_tx(ds->priv, BIT(port), true);
1048}
1049
1050static void sja1105_mac_link_up(struct dsa_switch *ds, int port,
1051 unsigned int mode,
1052 phy_interface_t interface,
1053 struct phy_device *phydev,
1054 int speed, int duplex,
1055 bool tx_pause, bool rx_pause)
1056{
1057 struct sja1105_private *priv = ds->priv;
1058
1059 sja1105_adjust_port_config(priv, port, speed);
1060
1061 if (sja1105_supports_sgmii(priv, port) && !phylink_autoneg_inband(mode))
1062 sja1105_sgmii_pcs_force_speed(priv, speed);
1063
1064 sja1105_inhibit_tx(priv, BIT(port), false);
1065}
1066
1067static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
1068 unsigned long *supported,
1069 struct phylink_link_state *state)
1070{
1071
1072
1073
1074
1075 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1076 struct sja1105_private *priv = ds->priv;
1077 struct sja1105_xmii_params_entry *mii;
1078
1079 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
1080
1081
1082
1083
1084
1085 if (state->interface != PHY_INTERFACE_MODE_NA &&
1086 sja1105_phy_mode_mismatch(priv, port, state->interface)) {
1087 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
1088 return;
1089 }
1090
1091
1092
1093
1094 phylink_set(mask, Autoneg);
1095 phylink_set(mask, MII);
1096 phylink_set(mask, 10baseT_Full);
1097 phylink_set(mask, 100baseT_Full);
1098 phylink_set(mask, 100baseT1_Full);
1099 if (mii->xmii_mode[port] == XMII_MODE_RGMII ||
1100 mii->xmii_mode[port] == XMII_MODE_SGMII)
1101 phylink_set(mask, 1000baseT_Full);
1102
1103 bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
1104 bitmap_and(state->advertising, state->advertising, mask,
1105 __ETHTOOL_LINK_MODE_MASK_NBITS);
1106}
1107
1108static int sja1105_mac_pcs_get_state(struct dsa_switch *ds, int port,
1109 struct phylink_link_state *state)
1110{
1111 struct sja1105_private *priv = ds->priv;
1112 int ais;
1113
1114
1115 ais = sja1105_sgmii_read(priv, SJA1105_AIS);
1116 if (ais < 0)
1117 return ais;
1118
1119 switch (SJA1105_AIS_SPEED(ais)) {
1120 case 0:
1121 state->speed = SPEED_10;
1122 break;
1123 case 1:
1124 state->speed = SPEED_100;
1125 break;
1126 case 2:
1127 state->speed = SPEED_1000;
1128 break;
1129 default:
1130 dev_err(ds->dev, "Invalid SGMII PCS speed %lu\n",
1131 SJA1105_AIS_SPEED(ais));
1132 }
1133 state->duplex = SJA1105_AIS_DUPLEX_MODE(ais);
1134 state->an_complete = SJA1105_AIS_COMPLETE(ais);
1135 state->link = SJA1105_AIS_LINK_STATUS(ais);
1136
1137 return 0;
1138}
1139
1140static int
1141sja1105_find_static_fdb_entry(struct sja1105_private *priv, int port,
1142 const struct sja1105_l2_lookup_entry *requested)
1143{
1144 struct sja1105_l2_lookup_entry *l2_lookup;
1145 struct sja1105_table *table;
1146 int i;
1147
1148 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
1149 l2_lookup = table->entries;
1150
1151 for (i = 0; i < table->entry_count; i++)
1152 if (l2_lookup[i].macaddr == requested->macaddr &&
1153 l2_lookup[i].vlanid == requested->vlanid &&
1154 l2_lookup[i].destports & BIT(port))
1155 return i;
1156
1157 return -1;
1158}
1159
1160
1161
1162
1163
1164
1165static int
1166sja1105_static_fdb_change(struct sja1105_private *priv, int port,
1167 const struct sja1105_l2_lookup_entry *requested,
1168 bool keep)
1169{
1170 struct sja1105_l2_lookup_entry *l2_lookup;
1171 struct sja1105_table *table;
1172 int rc, match;
1173
1174 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
1175
1176 match = sja1105_find_static_fdb_entry(priv, port, requested);
1177 if (match < 0) {
1178
1179 if (!keep)
1180 return 0;
1181
1182
1183 rc = sja1105_table_resize(table, table->entry_count + 1);
1184 if (rc)
1185 return rc;
1186
1187 match = table->entry_count - 1;
1188 }
1189
1190
1191 l2_lookup = table->entries;
1192
1193
1194
1195
1196
1197
1198
1199 if (keep) {
1200 l2_lookup[match] = *requested;
1201 return 0;
1202 }
1203
1204
1205
1206
1207 l2_lookup[match] = l2_lookup[table->entry_count - 1];
1208 return sja1105_table_resize(table, table->entry_count - 1);
1209}
1210
1211
1212
1213
1214
1215
1216
1217static int sja1105et_fdb_index(int bin, int way)
1218{
1219 return bin * SJA1105ET_FDB_BIN_SIZE + way;
1220}
1221
1222static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin,
1223 const u8 *addr, u16 vid,
1224 struct sja1105_l2_lookup_entry *match,
1225 int *last_unused)
1226{
1227 int way;
1228
1229 for (way = 0; way < SJA1105ET_FDB_BIN_SIZE; way++) {
1230 struct sja1105_l2_lookup_entry l2_lookup = {0};
1231 int index = sja1105et_fdb_index(bin, way);
1232
1233
1234
1235
1236 if (sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1237 index, &l2_lookup)) {
1238 if (last_unused)
1239 *last_unused = way;
1240 continue;
1241 }
1242
1243 if (l2_lookup.macaddr == ether_addr_to_u64(addr) &&
1244 l2_lookup.vlanid == vid) {
1245 if (match)
1246 *match = l2_lookup;
1247 return way;
1248 }
1249 }
1250
1251 return -1;
1252}
1253
1254int sja1105et_fdb_add(struct dsa_switch *ds, int port,
1255 const unsigned char *addr, u16 vid)
1256{
1257 struct sja1105_l2_lookup_entry l2_lookup = {0};
1258 struct sja1105_private *priv = ds->priv;
1259 struct device *dev = ds->dev;
1260 int last_unused = -1;
1261 int bin, way, rc;
1262
1263 bin = sja1105et_fdb_hash(priv, addr, vid);
1264
1265 way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid,
1266 &l2_lookup, &last_unused);
1267 if (way >= 0) {
1268
1269
1270
1271
1272 if (l2_lookup.destports & BIT(port))
1273 return 0;
1274 l2_lookup.destports |= BIT(port);
1275 } else {
1276 int index = sja1105et_fdb_index(bin, way);
1277
1278
1279
1280
1281 l2_lookup.macaddr = ether_addr_to_u64(addr);
1282 l2_lookup.destports = BIT(port);
1283 l2_lookup.vlanid = vid;
1284
1285 if (last_unused >= 0) {
1286 way = last_unused;
1287 } else {
1288
1289
1290
1291
1292
1293
1294 get_random_bytes(&way, sizeof(u8));
1295 way %= SJA1105ET_FDB_BIN_SIZE;
1296 dev_warn(dev, "Warning, FDB bin %d full while adding entry for %pM. Evicting entry %u.\n",
1297 bin, addr, way);
1298
1299 sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1300 index, NULL, false);
1301 }
1302 }
1303 l2_lookup.index = sja1105et_fdb_index(bin, way);
1304
1305 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1306 l2_lookup.index, &l2_lookup,
1307 true);
1308 if (rc < 0)
1309 return rc;
1310
1311 return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
1312}
1313
1314int sja1105et_fdb_del(struct dsa_switch *ds, int port,
1315 const unsigned char *addr, u16 vid)
1316{
1317 struct sja1105_l2_lookup_entry l2_lookup = {0};
1318 struct sja1105_private *priv = ds->priv;
1319 int index, bin, way, rc;
1320 bool keep;
1321
1322 bin = sja1105et_fdb_hash(priv, addr, vid);
1323 way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid,
1324 &l2_lookup, NULL);
1325 if (way < 0)
1326 return 0;
1327 index = sja1105et_fdb_index(bin, way);
1328
1329
1330
1331
1332
1333
1334 l2_lookup.destports &= ~BIT(port);
1335
1336 if (l2_lookup.destports)
1337 keep = true;
1338 else
1339 keep = false;
1340
1341 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1342 index, &l2_lookup, keep);
1343 if (rc < 0)
1344 return rc;
1345
1346 return sja1105_static_fdb_change(priv, port, &l2_lookup, keep);
1347}
1348
1349int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
1350 const unsigned char *addr, u16 vid)
1351{
1352 struct sja1105_l2_lookup_entry l2_lookup = {0};
1353 struct sja1105_private *priv = ds->priv;
1354 int rc, i;
1355
1356
1357 l2_lookup.macaddr = ether_addr_to_u64(addr);
1358 l2_lookup.vlanid = vid;
1359 l2_lookup.iotag = SJA1105_S_TAG;
1360 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
1361 if (priv->vlan_state != SJA1105_VLAN_UNAWARE) {
1362 l2_lookup.mask_vlanid = VLAN_VID_MASK;
1363 l2_lookup.mask_iotag = BIT(0);
1364 } else {
1365 l2_lookup.mask_vlanid = 0;
1366 l2_lookup.mask_iotag = 0;
1367 }
1368 l2_lookup.destports = BIT(port);
1369
1370 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1371 SJA1105_SEARCH, &l2_lookup);
1372 if (rc == 0) {
1373
1374
1375
1376 if (l2_lookup.destports & BIT(port))
1377 return 0;
1378
1379
1380
1381 l2_lookup.destports |= BIT(port);
1382 goto skip_finding_an_index;
1383 }
1384
1385
1386
1387
1388
1389 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
1390 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1391 i, NULL);
1392 if (rc < 0)
1393 break;
1394 }
1395 if (i == SJA1105_MAX_L2_LOOKUP_COUNT) {
1396 dev_err(ds->dev, "FDB is full, cannot add entry.\n");
1397 return -EINVAL;
1398 }
1399 l2_lookup.lockeds = true;
1400 l2_lookup.index = i;
1401
1402skip_finding_an_index:
1403 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1404 l2_lookup.index, &l2_lookup,
1405 true);
1406 if (rc < 0)
1407 return rc;
1408
1409 return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
1410}
1411
1412int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
1413 const unsigned char *addr, u16 vid)
1414{
1415 struct sja1105_l2_lookup_entry l2_lookup = {0};
1416 struct sja1105_private *priv = ds->priv;
1417 bool keep;
1418 int rc;
1419
1420 l2_lookup.macaddr = ether_addr_to_u64(addr);
1421 l2_lookup.vlanid = vid;
1422 l2_lookup.iotag = SJA1105_S_TAG;
1423 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
1424 if (priv->vlan_state != SJA1105_VLAN_UNAWARE) {
1425 l2_lookup.mask_vlanid = VLAN_VID_MASK;
1426 l2_lookup.mask_iotag = BIT(0);
1427 } else {
1428 l2_lookup.mask_vlanid = 0;
1429 l2_lookup.mask_iotag = 0;
1430 }
1431 l2_lookup.destports = BIT(port);
1432
1433 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1434 SJA1105_SEARCH, &l2_lookup);
1435 if (rc < 0)
1436 return 0;
1437
1438 l2_lookup.destports &= ~BIT(port);
1439
1440
1441
1442
1443 if (l2_lookup.destports)
1444 keep = true;
1445 else
1446 keep = false;
1447
1448 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1449 l2_lookup.index, &l2_lookup, keep);
1450 if (rc < 0)
1451 return rc;
1452
1453 return sja1105_static_fdb_change(priv, port, &l2_lookup, keep);
1454}
1455
1456static int sja1105_fdb_add(struct dsa_switch *ds, int port,
1457 const unsigned char *addr, u16 vid)
1458{
1459 struct sja1105_private *priv = ds->priv;
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470 if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL)
1471 vid = 0;
1472
1473 return priv->info->fdb_add_cmd(ds, port, addr, vid);
1474}
1475
1476static int sja1105_fdb_del(struct dsa_switch *ds, int port,
1477 const unsigned char *addr, u16 vid)
1478{
1479 struct sja1105_private *priv = ds->priv;
1480
1481 if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL)
1482 vid = 0;
1483
1484 return priv->info->fdb_del_cmd(ds, port, addr, vid);
1485}
1486
1487static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
1488 dsa_fdb_dump_cb_t *cb, void *data)
1489{
1490 struct sja1105_private *priv = ds->priv;
1491 struct device *dev = ds->dev;
1492 int i;
1493
1494 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
1495 struct sja1105_l2_lookup_entry l2_lookup = {0};
1496 u8 macaddr[ETH_ALEN];
1497 int rc;
1498
1499 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1500 i, &l2_lookup);
1501
1502 if (rc == -ENOENT)
1503 continue;
1504 if (rc) {
1505 dev_err(dev, "Failed to dump FDB: %d\n", rc);
1506 return rc;
1507 }
1508
1509
1510
1511
1512
1513
1514
1515 if (!(l2_lookup.destports & BIT(port)))
1516 continue;
1517 u64_to_ether_addr(l2_lookup.macaddr, macaddr);
1518
1519
1520 if (priv->vlan_state == SJA1105_VLAN_UNAWARE)
1521 l2_lookup.vlanid = 0;
1522 cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
1523 }
1524 return 0;
1525}
1526
1527
1528static int sja1105_mdb_prepare(struct dsa_switch *ds, int port,
1529 const struct switchdev_obj_port_mdb *mdb)
1530{
1531 return 0;
1532}
1533
1534static void sja1105_mdb_add(struct dsa_switch *ds, int port,
1535 const struct switchdev_obj_port_mdb *mdb)
1536{
1537 sja1105_fdb_add(ds, port, mdb->addr, mdb->vid);
1538}
1539
1540static int sja1105_mdb_del(struct dsa_switch *ds, int port,
1541 const struct switchdev_obj_port_mdb *mdb)
1542{
1543 return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid);
1544}
1545
1546static int sja1105_bridge_member(struct dsa_switch *ds, int port,
1547 struct net_device *br, bool member)
1548{
1549 struct sja1105_l2_forwarding_entry *l2_fwd;
1550 struct sja1105_private *priv = ds->priv;
1551 int i, rc;
1552
1553 l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries;
1554
1555 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1556
1557
1558
1559 if (!dsa_is_user_port(ds, i))
1560 continue;
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570 if (i == port)
1571 continue;
1572 if (dsa_to_port(ds, i)->bridge_dev != br)
1573 continue;
1574 sja1105_port_allow_traffic(l2_fwd, i, port, member);
1575 sja1105_port_allow_traffic(l2_fwd, port, i, member);
1576
1577 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
1578 i, &l2_fwd[i], true);
1579 if (rc < 0)
1580 return rc;
1581 }
1582
1583 return sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
1584 port, &l2_fwd[port], true);
1585}
1586
1587static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
1588 u8 state)
1589{
1590 struct sja1105_private *priv = ds->priv;
1591 struct sja1105_mac_config_entry *mac;
1592
1593 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1594
1595 switch (state) {
1596 case BR_STATE_DISABLED:
1597 case BR_STATE_BLOCKING:
1598
1599
1600
1601
1602
1603 mac[port].ingress = false;
1604 mac[port].egress = false;
1605 mac[port].dyn_learn = false;
1606 break;
1607 case BR_STATE_LISTENING:
1608 mac[port].ingress = true;
1609 mac[port].egress = false;
1610 mac[port].dyn_learn = false;
1611 break;
1612 case BR_STATE_LEARNING:
1613 mac[port].ingress = true;
1614 mac[port].egress = false;
1615 mac[port].dyn_learn = true;
1616 break;
1617 case BR_STATE_FORWARDING:
1618 mac[port].ingress = true;
1619 mac[port].egress = true;
1620 mac[port].dyn_learn = true;
1621 break;
1622 default:
1623 dev_err(ds->dev, "invalid STP state: %d\n", state);
1624 return;
1625 }
1626
1627 sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
1628 &mac[port], true);
1629}
1630
1631static int sja1105_bridge_join(struct dsa_switch *ds, int port,
1632 struct net_device *br)
1633{
1634 return sja1105_bridge_member(ds, port, br, true);
1635}
1636
1637static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
1638 struct net_device *br)
1639{
1640 sja1105_bridge_member(ds, port, br, false);
1641}
1642
1643#define BYTES_PER_KBIT (1000LL / 8)
1644
1645static int sja1105_find_unused_cbs_shaper(struct sja1105_private *priv)
1646{
1647 int i;
1648
1649 for (i = 0; i < priv->info->num_cbs_shapers; i++)
1650 if (!priv->cbs[i].idle_slope && !priv->cbs[i].send_slope)
1651 return i;
1652
1653 return -1;
1654}
1655
1656static int sja1105_delete_cbs_shaper(struct sja1105_private *priv, int port,
1657 int prio)
1658{
1659 int i;
1660
1661 for (i = 0; i < priv->info->num_cbs_shapers; i++) {
1662 struct sja1105_cbs_entry *cbs = &priv->cbs[i];
1663
1664 if (cbs->port == port && cbs->prio == prio) {
1665 memset(cbs, 0, sizeof(*cbs));
1666 return sja1105_dynamic_config_write(priv, BLK_IDX_CBS,
1667 i, cbs, true);
1668 }
1669 }
1670
1671 return 0;
1672}
1673
1674static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port,
1675 struct tc_cbs_qopt_offload *offload)
1676{
1677 struct sja1105_private *priv = ds->priv;
1678 struct sja1105_cbs_entry *cbs;
1679 int index;
1680
1681 if (!offload->enable)
1682 return sja1105_delete_cbs_shaper(priv, port, offload->queue);
1683
1684 index = sja1105_find_unused_cbs_shaper(priv);
1685 if (index < 0)
1686 return -ENOSPC;
1687
1688 cbs = &priv->cbs[index];
1689 cbs->port = port;
1690 cbs->prio = offload->queue;
1691
1692
1693
1694 cbs->credit_hi = offload->hicredit;
1695 cbs->credit_lo = abs(offload->locredit);
1696
1697 cbs->idle_slope = offload->idleslope * BYTES_PER_KBIT;
1698 cbs->send_slope = abs(offload->sendslope * BYTES_PER_KBIT);
1699
1700
1701
1702
1703 cbs->credit_lo &= GENMASK_ULL(31, 0);
1704 cbs->send_slope &= GENMASK_ULL(31, 0);
1705
1706 return sja1105_dynamic_config_write(priv, BLK_IDX_CBS, index, cbs,
1707 true);
1708}
1709
1710static int sja1105_reload_cbs(struct sja1105_private *priv)
1711{
1712 int rc = 0, i;
1713
1714 for (i = 0; i < priv->info->num_cbs_shapers; i++) {
1715 struct sja1105_cbs_entry *cbs = &priv->cbs[i];
1716
1717 if (!cbs->idle_slope && !cbs->send_slope)
1718 continue;
1719
1720 rc = sja1105_dynamic_config_write(priv, BLK_IDX_CBS, i, cbs,
1721 true);
1722 if (rc)
1723 break;
1724 }
1725
1726 return rc;
1727}
1728
1729static const char * const sja1105_reset_reasons[] = {
1730 [SJA1105_VLAN_FILTERING] = "VLAN filtering",
1731 [SJA1105_RX_HWTSTAMPING] = "RX timestamping",
1732 [SJA1105_AGEING_TIME] = "Ageing time",
1733 [SJA1105_SCHEDULING] = "Time-aware scheduling",
1734 [SJA1105_BEST_EFFORT_POLICING] = "Best-effort policing",
1735 [SJA1105_VIRTUAL_LINKS] = "Virtual links",
1736};
1737
1738
1739
1740
1741
1742
1743
1744int sja1105_static_config_reload(struct sja1105_private *priv,
1745 enum sja1105_reset_reason reason)
1746{
1747 struct ptp_system_timestamp ptp_sts_before;
1748 struct ptp_system_timestamp ptp_sts_after;
1749 struct sja1105_mac_config_entry *mac;
1750 int speed_mbps[SJA1105_NUM_PORTS];
1751 struct dsa_switch *ds = priv->ds;
1752 s64 t1, t2, t3, t4;
1753 s64 t12, t34;
1754 u16 bmcr = 0;
1755 int rc, i;
1756 s64 now;
1757
1758 mutex_lock(&priv->mgmt_lock);
1759
1760 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1761
1762
1763
1764
1765
1766
1767 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1768 speed_mbps[i] = sja1105_speed[mac[i].speed];
1769 mac[i].speed = SJA1105_SPEED_AUTO;
1770 }
1771
1772 if (sja1105_supports_sgmii(priv, SJA1105_SGMII_PORT))
1773 bmcr = sja1105_sgmii_read(priv, MII_BMCR);
1774
1775
1776 mutex_lock(&priv->ptp_data.lock);
1777
1778 rc = __sja1105_ptp_gettimex(ds, &now, &ptp_sts_before);
1779 if (rc < 0)
1780 goto out_unlock_ptp;
1781
1782
1783 rc = sja1105_static_config_upload(priv);
1784 if (rc < 0)
1785 goto out_unlock_ptp;
1786
1787 rc = __sja1105_ptp_settime(ds, 0, &ptp_sts_after);
1788 if (rc < 0)
1789 goto out_unlock_ptp;
1790
1791 t1 = timespec64_to_ns(&ptp_sts_before.pre_ts);
1792 t2 = timespec64_to_ns(&ptp_sts_before.post_ts);
1793 t3 = timespec64_to_ns(&ptp_sts_after.pre_ts);
1794 t4 = timespec64_to_ns(&ptp_sts_after.post_ts);
1795
1796 t12 = t1 + (t2 - t1) / 2;
1797
1798 t34 = t3 + (t4 - t3) / 2;
1799
1800 now += (t34 - t12);
1801
1802 __sja1105_ptp_adjtime(ds, now);
1803
1804out_unlock_ptp:
1805 mutex_unlock(&priv->ptp_data.lock);
1806
1807 dev_info(priv->ds->dev,
1808 "Reset switch and programmed static config. Reason: %s\n",
1809 sja1105_reset_reasons[reason]);
1810
1811
1812
1813
1814
1815 rc = sja1105_clocking_setup(priv);
1816 if (rc < 0)
1817 goto out;
1818
1819 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1820 rc = sja1105_adjust_port_config(priv, i, speed_mbps[i]);
1821 if (rc < 0)
1822 goto out;
1823 }
1824
1825 if (sja1105_supports_sgmii(priv, SJA1105_SGMII_PORT)) {
1826 bool an_enabled = !!(bmcr & BMCR_ANENABLE);
1827
1828 sja1105_sgmii_pcs_config(priv, an_enabled, false);
1829
1830 if (!an_enabled) {
1831 int speed = SPEED_UNKNOWN;
1832
1833 if (bmcr & BMCR_SPEED1000)
1834 speed = SPEED_1000;
1835 else if (bmcr & BMCR_SPEED100)
1836 speed = SPEED_100;
1837 else if (bmcr & BMCR_SPEED10)
1838 speed = SPEED_10;
1839
1840 sja1105_sgmii_pcs_force_speed(priv, speed);
1841 }
1842 }
1843
1844 rc = sja1105_reload_cbs(priv);
1845 if (rc < 0)
1846 goto out;
1847out:
1848 mutex_unlock(&priv->mgmt_lock);
1849
1850 return rc;
1851}
1852
1853static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid)
1854{
1855 struct sja1105_mac_config_entry *mac;
1856
1857 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1858
1859 mac[port].vlanid = pvid;
1860
1861 return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
1862 &mac[port], true);
1863}
1864
1865static int sja1105_crosschip_bridge_join(struct dsa_switch *ds,
1866 int tree_index, int sw_index,
1867 int other_port, struct net_device *br)
1868{
1869 struct dsa_switch *other_ds = dsa_switch_find(tree_index, sw_index);
1870 struct sja1105_private *other_priv = other_ds->priv;
1871 struct sja1105_private *priv = ds->priv;
1872 int port, rc;
1873
1874 if (other_ds->ops != &sja1105_switch_ops)
1875 return 0;
1876
1877 for (port = 0; port < ds->num_ports; port++) {
1878 if (!dsa_is_user_port(ds, port))
1879 continue;
1880 if (dsa_to_port(ds, port)->bridge_dev != br)
1881 continue;
1882
1883 rc = dsa_8021q_crosschip_bridge_join(priv->dsa_8021q_ctx,
1884 port,
1885 other_priv->dsa_8021q_ctx,
1886 other_port);
1887 if (rc)
1888 return rc;
1889
1890 rc = dsa_8021q_crosschip_bridge_join(other_priv->dsa_8021q_ctx,
1891 other_port,
1892 priv->dsa_8021q_ctx,
1893 port);
1894 if (rc)
1895 return rc;
1896 }
1897
1898 return 0;
1899}
1900
1901static void sja1105_crosschip_bridge_leave(struct dsa_switch *ds,
1902 int tree_index, int sw_index,
1903 int other_port,
1904 struct net_device *br)
1905{
1906 struct dsa_switch *other_ds = dsa_switch_find(tree_index, sw_index);
1907 struct sja1105_private *other_priv = other_ds->priv;
1908 struct sja1105_private *priv = ds->priv;
1909 int port;
1910
1911 if (other_ds->ops != &sja1105_switch_ops)
1912 return;
1913
1914 for (port = 0; port < ds->num_ports; port++) {
1915 if (!dsa_is_user_port(ds, port))
1916 continue;
1917 if (dsa_to_port(ds, port)->bridge_dev != br)
1918 continue;
1919
1920 dsa_8021q_crosschip_bridge_leave(priv->dsa_8021q_ctx, port,
1921 other_priv->dsa_8021q_ctx,
1922 other_port);
1923
1924 dsa_8021q_crosschip_bridge_leave(other_priv->dsa_8021q_ctx,
1925 other_port,
1926 priv->dsa_8021q_ctx, port);
1927 }
1928}
1929
1930static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled)
1931{
1932 struct sja1105_private *priv = ds->priv;
1933 int rc;
1934
1935 rc = dsa_8021q_setup(priv->dsa_8021q_ctx, enabled);
1936 if (rc)
1937 return rc;
1938
1939 dev_info(ds->dev, "%s switch tagging\n",
1940 enabled ? "Enabled" : "Disabled");
1941 return 0;
1942}
1943
1944static enum dsa_tag_protocol
1945sja1105_get_tag_protocol(struct dsa_switch *ds, int port,
1946 enum dsa_tag_protocol mp)
1947{
1948 return DSA_TAG_PROTO_SJA1105;
1949}
1950
1951static int sja1105_find_free_subvlan(u16 *subvlan_map, bool pvid)
1952{
1953 int subvlan;
1954
1955 if (pvid)
1956 return 0;
1957
1958 for (subvlan = 1; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
1959 if (subvlan_map[subvlan] == VLAN_N_VID)
1960 return subvlan;
1961
1962 return -1;
1963}
1964
1965static int sja1105_find_subvlan(u16 *subvlan_map, u16 vid)
1966{
1967 int subvlan;
1968
1969 for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
1970 if (subvlan_map[subvlan] == vid)
1971 return subvlan;
1972
1973 return -1;
1974}
1975
1976static int sja1105_find_committed_subvlan(struct sja1105_private *priv,
1977 int port, u16 vid)
1978{
1979 struct sja1105_port *sp = &priv->ports[port];
1980
1981 return sja1105_find_subvlan(sp->subvlan_map, vid);
1982}
1983
1984static void sja1105_init_subvlan_map(u16 *subvlan_map)
1985{
1986 int subvlan;
1987
1988 for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
1989 subvlan_map[subvlan] = VLAN_N_VID;
1990}
1991
1992static void sja1105_commit_subvlan_map(struct sja1105_private *priv, int port,
1993 u16 *subvlan_map)
1994{
1995 struct sja1105_port *sp = &priv->ports[port];
1996 int subvlan;
1997
1998 for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
1999 sp->subvlan_map[subvlan] = subvlan_map[subvlan];
2000}
2001
2002static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid)
2003{
2004 struct sja1105_vlan_lookup_entry *vlan;
2005 int count, i;
2006
2007 vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries;
2008 count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count;
2009
2010 for (i = 0; i < count; i++)
2011 if (vlan[i].vlanid == vid)
2012 return i;
2013
2014
2015 return -1;
2016}
2017
2018static int
2019sja1105_find_retagging_entry(struct sja1105_retagging_entry *retagging,
2020 int count, int from_port, u16 from_vid,
2021 u16 to_vid)
2022{
2023 int i;
2024
2025 for (i = 0; i < count; i++)
2026 if (retagging[i].ing_port == BIT(from_port) &&
2027 retagging[i].vlan_ing == from_vid &&
2028 retagging[i].vlan_egr == to_vid)
2029 return i;
2030
2031
2032 return -1;
2033}
2034
2035static int sja1105_commit_vlans(struct sja1105_private *priv,
2036 struct sja1105_vlan_lookup_entry *new_vlan,
2037 struct sja1105_retagging_entry *new_retagging,
2038 int num_retagging)
2039{
2040 struct sja1105_retagging_entry *retagging;
2041 struct sja1105_vlan_lookup_entry *vlan;
2042 struct sja1105_table *table;
2043 int num_vlans = 0;
2044 int rc, i, k = 0;
2045
2046
2047 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
2048 vlan = table->entries;
2049
2050 for (i = 0; i < VLAN_N_VID; i++) {
2051 int match = sja1105_is_vlan_configured(priv, i);
2052
2053 if (new_vlan[i].vlanid != VLAN_N_VID)
2054 num_vlans++;
2055
2056 if (new_vlan[i].vlanid == VLAN_N_VID && match >= 0) {
2057
2058 dev_dbg(priv->ds->dev, "Deleting VLAN %d\n", i);
2059 rc = sja1105_dynamic_config_write(priv,
2060 BLK_IDX_VLAN_LOOKUP,
2061 i, &vlan[match], false);
2062 if (rc < 0)
2063 return rc;
2064 } else if (new_vlan[i].vlanid != VLAN_N_VID) {
2065
2066 if (match >= 0 &&
2067 vlan[match].vlanid == new_vlan[i].vlanid &&
2068 vlan[match].tag_port == new_vlan[i].tag_port &&
2069 vlan[match].vlan_bc == new_vlan[i].vlan_bc &&
2070 vlan[match].vmemb_port == new_vlan[i].vmemb_port)
2071 continue;
2072
2073 dev_dbg(priv->ds->dev, "Updating VLAN %d\n", i);
2074 rc = sja1105_dynamic_config_write(priv,
2075 BLK_IDX_VLAN_LOOKUP,
2076 i, &new_vlan[i],
2077 true);
2078 if (rc < 0)
2079 return rc;
2080 }
2081 }
2082
2083 if (table->entry_count)
2084 kfree(table->entries);
2085
2086 table->entries = kcalloc(num_vlans, table->ops->unpacked_entry_size,
2087 GFP_KERNEL);
2088 if (!table->entries)
2089 return -ENOMEM;
2090
2091 table->entry_count = num_vlans;
2092 vlan = table->entries;
2093
2094 for (i = 0; i < VLAN_N_VID; i++) {
2095 if (new_vlan[i].vlanid == VLAN_N_VID)
2096 continue;
2097 vlan[k++] = new_vlan[i];
2098 }
2099
2100
2101 table = &priv->static_config.tables[BLK_IDX_RETAGGING];
2102 retagging = table->entries;
2103
2104 for (i = 0; i < table->entry_count; i++) {
2105 rc = sja1105_dynamic_config_write(priv, BLK_IDX_RETAGGING,
2106 i, &retagging[i], false);
2107 if (rc)
2108 return rc;
2109 }
2110
2111 if (table->entry_count)
2112 kfree(table->entries);
2113
2114 table->entries = kcalloc(num_retagging, table->ops->unpacked_entry_size,
2115 GFP_KERNEL);
2116 if (!table->entries)
2117 return -ENOMEM;
2118
2119 table->entry_count = num_retagging;
2120 retagging = table->entries;
2121
2122 for (i = 0; i < num_retagging; i++) {
2123 retagging[i] = new_retagging[i];
2124
2125
2126 rc = sja1105_dynamic_config_write(priv, BLK_IDX_RETAGGING,
2127 i, &retagging[i], true);
2128 if (rc < 0)
2129 return rc;
2130 }
2131
2132 return 0;
2133}
2134
2135struct sja1105_crosschip_vlan {
2136 struct list_head list;
2137 u16 vid;
2138 bool untagged;
2139 int port;
2140 int other_port;
2141 struct dsa_8021q_context *other_ctx;
2142};
2143
2144struct sja1105_crosschip_switch {
2145 struct list_head list;
2146 struct dsa_8021q_context *other_ctx;
2147};
2148
2149static int sja1105_commit_pvid(struct sja1105_private *priv)
2150{
2151 struct sja1105_bridge_vlan *v;
2152 struct list_head *vlan_list;
2153 int rc = 0;
2154
2155 if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
2156 vlan_list = &priv->bridge_vlans;
2157 else
2158 vlan_list = &priv->dsa_8021q_vlans;
2159
2160 list_for_each_entry(v, vlan_list, list) {
2161 if (v->pvid) {
2162 rc = sja1105_pvid_apply(priv, v->port, v->vid);
2163 if (rc)
2164 break;
2165 }
2166 }
2167
2168 return rc;
2169}
2170
2171static int
2172sja1105_build_bridge_vlans(struct sja1105_private *priv,
2173 struct sja1105_vlan_lookup_entry *new_vlan)
2174{
2175 struct sja1105_bridge_vlan *v;
2176
2177 if (priv->vlan_state == SJA1105_VLAN_UNAWARE)
2178 return 0;
2179
2180 list_for_each_entry(v, &priv->bridge_vlans, list) {
2181 int match = v->vid;
2182
2183 new_vlan[match].vlanid = v->vid;
2184 new_vlan[match].vmemb_port |= BIT(v->port);
2185 new_vlan[match].vlan_bc |= BIT(v->port);
2186 if (!v->untagged)
2187 new_vlan[match].tag_port |= BIT(v->port);
2188 }
2189
2190 return 0;
2191}
2192
2193static int
2194sja1105_build_dsa_8021q_vlans(struct sja1105_private *priv,
2195 struct sja1105_vlan_lookup_entry *new_vlan)
2196{
2197 struct sja1105_bridge_vlan *v;
2198
2199 if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
2200 return 0;
2201
2202 list_for_each_entry(v, &priv->dsa_8021q_vlans, list) {
2203 int match = v->vid;
2204
2205 new_vlan[match].vlanid = v->vid;
2206 new_vlan[match].vmemb_port |= BIT(v->port);
2207 new_vlan[match].vlan_bc |= BIT(v->port);
2208 if (!v->untagged)
2209 new_vlan[match].tag_port |= BIT(v->port);
2210 }
2211
2212 return 0;
2213}
2214
2215static int sja1105_build_subvlans(struct sja1105_private *priv,
2216 u16 subvlan_map[][DSA_8021Q_N_SUBVLAN],
2217 struct sja1105_vlan_lookup_entry *new_vlan,
2218 struct sja1105_retagging_entry *new_retagging,
2219 int *num_retagging)
2220{
2221 struct sja1105_bridge_vlan *v;
2222 int k = *num_retagging;
2223
2224 if (priv->vlan_state != SJA1105_VLAN_BEST_EFFORT)
2225 return 0;
2226
2227 list_for_each_entry(v, &priv->bridge_vlans, list) {
2228 int upstream = dsa_upstream_port(priv->ds, v->port);
2229 int match, subvlan;
2230 u16 rx_vid;
2231
2232
2233
2234
2235
2236 if (!dsa_is_user_port(priv->ds, v->port))
2237 continue;
2238
2239 subvlan = sja1105_find_subvlan(subvlan_map[v->port],
2240 v->vid);
2241 if (subvlan < 0) {
2242 subvlan = sja1105_find_free_subvlan(subvlan_map[v->port],
2243 v->pvid);
2244 if (subvlan < 0) {
2245 dev_err(priv->ds->dev, "No more free subvlans\n");
2246 return -ENOSPC;
2247 }
2248 }
2249
2250 rx_vid = dsa_8021q_rx_vid_subvlan(priv->ds, v->port, subvlan);
2251
2252
2253
2254
2255
2256
2257 match = rx_vid;
2258 new_vlan[match].vlanid = rx_vid;
2259 new_vlan[match].vmemb_port |= BIT(v->port);
2260 new_vlan[match].vmemb_port |= BIT(upstream);
2261 new_vlan[match].vlan_bc |= BIT(v->port);
2262 new_vlan[match].vlan_bc |= BIT(upstream);
2263
2264
2265
2266 if (!v->untagged)
2267 new_vlan[match].tag_port |= BIT(v->port);
2268
2269 new_vlan[match].tag_port |= BIT(upstream);
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279 match = v->vid;
2280 new_vlan[match].vlan_bc &= ~BIT(upstream);
2281
2282
2283 new_retagging[k].vlan_ing = v->vid;
2284 new_retagging[k].vlan_egr = rx_vid;
2285 new_retagging[k].ing_port = BIT(v->port);
2286 new_retagging[k].egr_port = BIT(upstream);
2287 if (k++ == SJA1105_MAX_RETAGGING_COUNT) {
2288 dev_err(priv->ds->dev, "No more retagging rules\n");
2289 return -ENOSPC;
2290 }
2291
2292 subvlan_map[v->port][subvlan] = v->vid;
2293 }
2294
2295 *num_retagging = k;
2296
2297 return 0;
2298}
2299
2300
2301
2302
2303
2304static int
2305sja1105_build_crosschip_subvlans(struct sja1105_private *priv,
2306 struct sja1105_vlan_lookup_entry *new_vlan,
2307 struct sja1105_retagging_entry *new_retagging,
2308 int *num_retagging)
2309{
2310 struct sja1105_crosschip_vlan *tmp, *pos;
2311 struct dsa_8021q_crosschip_link *c;
2312 struct sja1105_bridge_vlan *v, *w;
2313 struct list_head crosschip_vlans;
2314 int k = *num_retagging;
2315 int rc = 0;
2316
2317 if (priv->vlan_state != SJA1105_VLAN_BEST_EFFORT)
2318 return 0;
2319
2320 INIT_LIST_HEAD(&crosschip_vlans);
2321
2322 list_for_each_entry(c, &priv->dsa_8021q_ctx->crosschip_links, list) {
2323 struct sja1105_private *other_priv = c->other_ctx->ds->priv;
2324
2325 if (other_priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
2326 continue;
2327
2328
2329
2330
2331 if (!dsa_is_user_port(priv->ds, c->port))
2332 continue;
2333 if (!dsa_is_user_port(c->other_ctx->ds, c->other_port))
2334 continue;
2335
2336
2337 list_for_each_entry(v, &other_priv->bridge_vlans, list) {
2338 bool already_added = false;
2339 bool we_have_it = false;
2340
2341 if (v->port != c->other_port)
2342 continue;
2343
2344
2345
2346
2347
2348
2349 if (v->pvid)
2350 continue;
2351
2352
2353 list_for_each_entry(w, &priv->bridge_vlans, list) {
2354 if (w->port == c->port && w->vid == v->vid) {
2355 we_have_it = true;
2356 break;
2357 }
2358 }
2359
2360 if (!we_have_it)
2361 continue;
2362
2363 list_for_each_entry(tmp, &crosschip_vlans, list) {
2364 if (tmp->vid == v->vid &&
2365 tmp->untagged == v->untagged &&
2366 tmp->port == c->port &&
2367 tmp->other_port == v->port &&
2368 tmp->other_ctx == c->other_ctx) {
2369 already_added = true;
2370 break;
2371 }
2372 }
2373
2374 if (already_added)
2375 continue;
2376
2377 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
2378 if (!tmp) {
2379 dev_err(priv->ds->dev, "Failed to allocate memory\n");
2380 rc = -ENOMEM;
2381 goto out;
2382 }
2383 tmp->vid = v->vid;
2384 tmp->port = c->port;
2385 tmp->other_port = v->port;
2386 tmp->other_ctx = c->other_ctx;
2387 tmp->untagged = v->untagged;
2388 list_add(&tmp->list, &crosschip_vlans);
2389 }
2390 }
2391
2392 list_for_each_entry(tmp, &crosschip_vlans, list) {
2393 struct sja1105_private *other_priv = tmp->other_ctx->ds->priv;
2394 int upstream = dsa_upstream_port(priv->ds, tmp->port);
2395 int match, subvlan;
2396 u16 rx_vid;
2397
2398 subvlan = sja1105_find_committed_subvlan(other_priv,
2399 tmp->other_port,
2400 tmp->vid);
2401
2402
2403
2404
2405 if (WARN_ON(subvlan < 0)) {
2406 rc = -EINVAL;
2407 goto out;
2408 }
2409
2410 rx_vid = dsa_8021q_rx_vid_subvlan(tmp->other_ctx->ds,
2411 tmp->other_port,
2412 subvlan);
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424 match = rx_vid;
2425 new_vlan[match].vlanid = rx_vid;
2426 new_vlan[match].vmemb_port |= BIT(tmp->port);
2427 new_vlan[match].vmemb_port |= BIT(upstream);
2428
2429
2430
2431
2432
2433
2434 if (!tmp->untagged)
2435 new_vlan[match].tag_port |= BIT(tmp->port);
2436 new_vlan[match].tag_port |= BIT(upstream);
2437
2438
2439
2440
2441
2442 new_vlan[match].vlan_bc &= ~BIT(tmp->port);
2443
2444
2445
2446
2447
2448 k = sja1105_find_retagging_entry(new_retagging, *num_retagging,
2449 upstream, rx_vid, tmp->vid);
2450 if (k < 0) {
2451 if (*num_retagging == SJA1105_MAX_RETAGGING_COUNT) {
2452 dev_err(priv->ds->dev, "No more retagging rules\n");
2453 rc = -ENOSPC;
2454 goto out;
2455 }
2456 k = (*num_retagging)++;
2457 }
2458
2459 new_retagging[k].vlan_ing = rx_vid;
2460 new_retagging[k].vlan_egr = tmp->vid;
2461 new_retagging[k].ing_port = BIT(upstream);
2462 new_retagging[k].egr_port |= BIT(tmp->port);
2463 }
2464
2465out:
2466 list_for_each_entry_safe(tmp, pos, &crosschip_vlans, list) {
2467 list_del(&tmp->list);
2468 kfree(tmp);
2469 }
2470
2471 return rc;
2472}
2473
2474static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify);
2475
2476static int sja1105_notify_crosschip_switches(struct sja1105_private *priv)
2477{
2478 struct sja1105_crosschip_switch *s, *pos;
2479 struct list_head crosschip_switches;
2480 struct dsa_8021q_crosschip_link *c;
2481 int rc = 0;
2482
2483 INIT_LIST_HEAD(&crosschip_switches);
2484
2485 list_for_each_entry(c, &priv->dsa_8021q_ctx->crosschip_links, list) {
2486 bool already_added = false;
2487
2488 list_for_each_entry(s, &crosschip_switches, list) {
2489 if (s->other_ctx == c->other_ctx) {
2490 already_added = true;
2491 break;
2492 }
2493 }
2494
2495 if (already_added)
2496 continue;
2497
2498 s = kzalloc(sizeof(*s), GFP_KERNEL);
2499 if (!s) {
2500 dev_err(priv->ds->dev, "Failed to allocate memory\n");
2501 rc = -ENOMEM;
2502 goto out;
2503 }
2504 s->other_ctx = c->other_ctx;
2505 list_add(&s->list, &crosschip_switches);
2506 }
2507
2508 list_for_each_entry(s, &crosschip_switches, list) {
2509 struct sja1105_private *other_priv = s->other_ctx->ds->priv;
2510
2511 rc = sja1105_build_vlan_table(other_priv, false);
2512 if (rc)
2513 goto out;
2514 }
2515
2516out:
2517 list_for_each_entry_safe(s, pos, &crosschip_switches, list) {
2518 list_del(&s->list);
2519 kfree(s);
2520 }
2521
2522 return rc;
2523}
2524
2525static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify)
2526{
2527 u16 subvlan_map[SJA1105_NUM_PORTS][DSA_8021Q_N_SUBVLAN];
2528 struct sja1105_retagging_entry *new_retagging;
2529 struct sja1105_vlan_lookup_entry *new_vlan;
2530 struct sja1105_table *table;
2531 int i, num_retagging = 0;
2532 int rc;
2533
2534 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
2535 new_vlan = kcalloc(VLAN_N_VID,
2536 table->ops->unpacked_entry_size, GFP_KERNEL);
2537 if (!new_vlan)
2538 return -ENOMEM;
2539
2540 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
2541 new_retagging = kcalloc(SJA1105_MAX_RETAGGING_COUNT,
2542 table->ops->unpacked_entry_size, GFP_KERNEL);
2543 if (!new_retagging) {
2544 kfree(new_vlan);
2545 return -ENOMEM;
2546 }
2547
2548 for (i = 0; i < VLAN_N_VID; i++)
2549 new_vlan[i].vlanid = VLAN_N_VID;
2550
2551 for (i = 0; i < SJA1105_MAX_RETAGGING_COUNT; i++)
2552 new_retagging[i].vlan_ing = VLAN_N_VID;
2553
2554 for (i = 0; i < priv->ds->num_ports; i++)
2555 sja1105_init_subvlan_map(subvlan_map[i]);
2556
2557
2558 rc = sja1105_build_bridge_vlans(priv, new_vlan);
2559 if (rc)
2560 goto out;
2561
2562
2563
2564
2565
2566
2567 rc = sja1105_build_dsa_8021q_vlans(priv, new_vlan);
2568 if (rc)
2569 goto out;
2570
2571
2572
2573
2574
2575
2576 rc = sja1105_build_subvlans(priv, subvlan_map, new_vlan, new_retagging,
2577 &num_retagging);
2578 if (rc)
2579 goto out;
2580
2581 rc = sja1105_build_crosschip_subvlans(priv, new_vlan, new_retagging,
2582 &num_retagging);
2583 if (rc)
2584 goto out;
2585
2586 rc = sja1105_commit_vlans(priv, new_vlan, new_retagging, num_retagging);
2587 if (rc)
2588 goto out;
2589
2590 rc = sja1105_commit_pvid(priv);
2591 if (rc)
2592 goto out;
2593
2594 for (i = 0; i < priv->ds->num_ports; i++)
2595 sja1105_commit_subvlan_map(priv, i, subvlan_map[i]);
2596
2597 if (notify) {
2598 rc = sja1105_notify_crosschip_switches(priv);
2599 if (rc)
2600 goto out;
2601 }
2602
2603out:
2604 kfree(new_vlan);
2605 kfree(new_retagging);
2606
2607 return rc;
2608}
2609
2610static int sja1105_vlan_prepare(struct dsa_switch *ds, int port,
2611 const struct switchdev_obj_port_vlan *vlan)
2612{
2613 struct sja1105_private *priv = ds->priv;
2614 u16 vid;
2615
2616 if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
2617 return 0;
2618
2619
2620
2621
2622
2623 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
2624 if (vid_is_dsa_8021q(vid)) {
2625 dev_err(ds->dev, "Range 1024-3071 reserved for dsa_8021q operation\n");
2626 return -EBUSY;
2627 }
2628 }
2629
2630 return 0;
2631}
2632
2633
2634
2635
2636
2637int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
2638 struct switchdev_trans *trans)
2639{
2640 struct sja1105_l2_lookup_params_entry *l2_lookup_params;
2641 struct sja1105_general_params_entry *general_params;
2642 struct sja1105_private *priv = ds->priv;
2643 enum sja1105_vlan_state state;
2644 struct sja1105_table *table;
2645 struct sja1105_rule *rule;
2646 bool want_tagging;
2647 u16 tpid, tpid2;
2648 int rc;
2649
2650 if (switchdev_trans_ph_prepare(trans)) {
2651 list_for_each_entry(rule, &priv->flow_block.rules, list) {
2652 if (rule->type == SJA1105_RULE_VL) {
2653 dev_err(ds->dev,
2654 "Cannot change VLAN filtering with active VL rules\n");
2655 return -EBUSY;
2656 }
2657 }
2658
2659 return 0;
2660 }
2661
2662 if (enabled) {
2663
2664 tpid = ETH_P_8021Q;
2665 tpid2 = ETH_P_8021AD;
2666 } else {
2667
2668 tpid = ETH_P_SJA1105;
2669 tpid2 = ETH_P_SJA1105;
2670 }
2671
2672 for (port = 0; port < ds->num_ports; port++) {
2673 struct sja1105_port *sp = &priv->ports[port];
2674
2675 if (enabled)
2676 sp->xmit_tpid = priv->info->qinq_tpid;
2677 else
2678 sp->xmit_tpid = ETH_P_SJA1105;
2679 }
2680
2681 if (!enabled)
2682 state = SJA1105_VLAN_UNAWARE;
2683 else if (priv->best_effort_vlan_filtering)
2684 state = SJA1105_VLAN_BEST_EFFORT;
2685 else
2686 state = SJA1105_VLAN_FILTERING_FULL;
2687
2688 if (priv->vlan_state == state)
2689 return 0;
2690
2691 priv->vlan_state = state;
2692 want_tagging = (state == SJA1105_VLAN_UNAWARE ||
2693 state == SJA1105_VLAN_BEST_EFFORT);
2694
2695 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
2696 general_params = table->entries;
2697
2698 general_params->tpid = tpid;
2699
2700 general_params->tpid2 = tpid2;
2701
2702
2703
2704 general_params->incl_srcpt1 = enabled;
2705 general_params->incl_srcpt0 = enabled;
2706
2707 want_tagging = priv->best_effort_vlan_filtering || !enabled;
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
2728 l2_lookup_params = table->entries;
2729 l2_lookup_params->shared_learn = want_tagging;
2730
2731 sja1105_frame_memory_partitioning(priv);
2732
2733 rc = sja1105_build_vlan_table(priv, false);
2734 if (rc)
2735 return rc;
2736
2737 rc = sja1105_static_config_reload(priv, SJA1105_VLAN_FILTERING);
2738 if (rc)
2739 dev_err(ds->dev, "Failed to change VLAN Ethertype\n");
2740
2741
2742
2743
2744
2745
2746 return sja1105_setup_8021q_tagging(ds, want_tagging);
2747}
2748
2749
2750
2751
2752static int sja1105_vlan_add_one(struct dsa_switch *ds, int port, u16 vid,
2753 u16 flags, struct list_head *vlan_list)
2754{
2755 bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED;
2756 bool pvid = flags & BRIDGE_VLAN_INFO_PVID;
2757 struct sja1105_bridge_vlan *v;
2758
2759 list_for_each_entry(v, vlan_list, list)
2760 if (v->port == port && v->vid == vid &&
2761 v->untagged == untagged && v->pvid == pvid)
2762
2763 return 0;
2764
2765 v = kzalloc(sizeof(*v), GFP_KERNEL);
2766 if (!v) {
2767 dev_err(ds->dev, "Out of memory while storing VLAN\n");
2768 return -ENOMEM;
2769 }
2770
2771 v->port = port;
2772 v->vid = vid;
2773 v->untagged = untagged;
2774 v->pvid = pvid;
2775 list_add(&v->list, vlan_list);
2776
2777 return 1;
2778}
2779
2780
2781static int sja1105_vlan_del_one(struct dsa_switch *ds, int port, u16 vid,
2782 struct list_head *vlan_list)
2783{
2784 struct sja1105_bridge_vlan *v, *n;
2785
2786 list_for_each_entry_safe(v, n, vlan_list, list) {
2787 if (v->port == port && v->vid == vid) {
2788 list_del(&v->list);
2789 kfree(v);
2790 return 1;
2791 }
2792 }
2793
2794 return 0;
2795}
2796
2797static void sja1105_vlan_add(struct dsa_switch *ds, int port,
2798 const struct switchdev_obj_port_vlan *vlan)
2799{
2800 struct sja1105_private *priv = ds->priv;
2801 bool vlan_table_changed = false;
2802 u16 vid;
2803 int rc;
2804
2805 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
2806 rc = sja1105_vlan_add_one(ds, port, vid, vlan->flags,
2807 &priv->bridge_vlans);
2808 if (rc < 0)
2809 return;
2810 if (rc > 0)
2811 vlan_table_changed = true;
2812 }
2813
2814 if (!vlan_table_changed)
2815 return;
2816
2817 rc = sja1105_build_vlan_table(priv, true);
2818 if (rc)
2819 dev_err(ds->dev, "Failed to build VLAN table: %d\n", rc);
2820}
2821
2822static int sja1105_vlan_del(struct dsa_switch *ds, int port,
2823 const struct switchdev_obj_port_vlan *vlan)
2824{
2825 struct sja1105_private *priv = ds->priv;
2826 bool vlan_table_changed = false;
2827 u16 vid;
2828 int rc;
2829
2830 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
2831 rc = sja1105_vlan_del_one(ds, port, vid, &priv->bridge_vlans);
2832 if (rc > 0)
2833 vlan_table_changed = true;
2834 }
2835
2836 if (!vlan_table_changed)
2837 return 0;
2838
2839 return sja1105_build_vlan_table(priv, true);
2840}
2841
2842static int sja1105_dsa_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
2843 u16 flags)
2844{
2845 struct sja1105_private *priv = ds->priv;
2846 int rc;
2847
2848 rc = sja1105_vlan_add_one(ds, port, vid, flags, &priv->dsa_8021q_vlans);
2849 if (rc <= 0)
2850 return rc;
2851
2852 return sja1105_build_vlan_table(priv, true);
2853}
2854
2855static int sja1105_dsa_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
2856{
2857 struct sja1105_private *priv = ds->priv;
2858 int rc;
2859
2860 rc = sja1105_vlan_del_one(ds, port, vid, &priv->dsa_8021q_vlans);
2861 if (!rc)
2862 return 0;
2863
2864 return sja1105_build_vlan_table(priv, true);
2865}
2866
2867static const struct dsa_8021q_ops sja1105_dsa_8021q_ops = {
2868 .vlan_add = sja1105_dsa_8021q_vlan_add,
2869 .vlan_del = sja1105_dsa_8021q_vlan_del,
2870};
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884static int sja1105_setup(struct dsa_switch *ds)
2885{
2886 struct sja1105_dt_port ports[SJA1105_NUM_PORTS];
2887 struct sja1105_private *priv = ds->priv;
2888 int rc;
2889
2890 rc = sja1105_parse_dt(priv, ports);
2891 if (rc < 0) {
2892 dev_err(ds->dev, "Failed to parse DT: %d\n", rc);
2893 return rc;
2894 }
2895
2896
2897
2898
2899 rc = sja1105_parse_rgmii_delays(priv, ports);
2900 if (rc < 0) {
2901 dev_err(ds->dev, "RGMII delay not supported\n");
2902 return rc;
2903 }
2904
2905 rc = sja1105_ptp_clock_register(ds);
2906 if (rc < 0) {
2907 dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc);
2908 return rc;
2909 }
2910
2911 rc = sja1105_static_config_load(priv, ports);
2912 if (rc < 0) {
2913 dev_err(ds->dev, "Failed to load static config: %d\n", rc);
2914 return rc;
2915 }
2916
2917 rc = sja1105_clocking_setup(priv);
2918 if (rc < 0) {
2919 dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc);
2920 return rc;
2921 }
2922
2923
2924
2925
2926
2927
2928
2929
2930 ds->vlan_filtering_is_global = true;
2931
2932
2933 ds->num_tx_queues = SJA1105_NUM_TC;
2934
2935 ds->mtu_enforcement_ingress = true;
2936
2937 ds->configure_vlan_while_not_filtering = true;
2938
2939 rc = sja1105_devlink_setup(ds);
2940 if (rc < 0)
2941 return rc;
2942
2943
2944
2945
2946
2947 rtnl_lock();
2948 rc = sja1105_setup_8021q_tagging(ds, true);
2949 rtnl_unlock();
2950
2951 return rc;
2952}
2953
2954static void sja1105_teardown(struct dsa_switch *ds)
2955{
2956 struct sja1105_private *priv = ds->priv;
2957 struct sja1105_bridge_vlan *v, *n;
2958 int port;
2959
2960 for (port = 0; port < SJA1105_NUM_PORTS; port++) {
2961 struct sja1105_port *sp = &priv->ports[port];
2962
2963 if (!dsa_is_user_port(ds, port))
2964 continue;
2965
2966 if (sp->xmit_worker)
2967 kthread_destroy_worker(sp->xmit_worker);
2968 }
2969
2970 sja1105_devlink_teardown(ds);
2971 sja1105_flower_teardown(ds);
2972 sja1105_tas_teardown(ds);
2973 sja1105_ptp_clock_unregister(ds);
2974 sja1105_static_config_free(&priv->static_config);
2975
2976 list_for_each_entry_safe(v, n, &priv->dsa_8021q_vlans, list) {
2977 list_del(&v->list);
2978 kfree(v);
2979 }
2980
2981 list_for_each_entry_safe(v, n, &priv->bridge_vlans, list) {
2982 list_del(&v->list);
2983 kfree(v);
2984 }
2985}
2986
2987static int sja1105_port_enable(struct dsa_switch *ds, int port,
2988 struct phy_device *phy)
2989{
2990 struct net_device *slave;
2991
2992 if (!dsa_is_user_port(ds, port))
2993 return 0;
2994
2995 slave = dsa_to_port(ds, port)->slave;
2996
2997 slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
2998
2999 return 0;
3000}
3001
3002static void sja1105_port_disable(struct dsa_switch *ds, int port)
3003{
3004 struct sja1105_private *priv = ds->priv;
3005 struct sja1105_port *sp = &priv->ports[port];
3006
3007 if (!dsa_is_user_port(ds, port))
3008 return;
3009
3010 kthread_cancel_work_sync(&sp->xmit_work);
3011 skb_queue_purge(&sp->xmit_queue);
3012}
3013
3014static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
3015 struct sk_buff *skb, bool takets)
3016{
3017 struct sja1105_mgmt_entry mgmt_route = {0};
3018 struct sja1105_private *priv = ds->priv;
3019 struct ethhdr *hdr;
3020 int timeout = 10;
3021 int rc;
3022
3023 hdr = eth_hdr(skb);
3024
3025 mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest);
3026 mgmt_route.destports = BIT(port);
3027 mgmt_route.enfport = 1;
3028 mgmt_route.tsreg = 0;
3029 mgmt_route.takets = takets;
3030
3031 rc = sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
3032 slot, &mgmt_route, true);
3033 if (rc < 0) {
3034 kfree_skb(skb);
3035 return rc;
3036 }
3037
3038
3039 dsa_enqueue_skb(skb, dsa_to_port(ds, port)->slave);
3040
3041
3042 do {
3043 rc = sja1105_dynamic_config_read(priv, BLK_IDX_MGMT_ROUTE,
3044 slot, &mgmt_route);
3045 if (rc < 0) {
3046 dev_err_ratelimited(priv->ds->dev,
3047 "failed to poll for mgmt route\n");
3048 continue;
3049 }
3050
3051
3052
3053
3054
3055 cpu_relax();
3056 } while (mgmt_route.enfport && --timeout);
3057
3058 if (!timeout) {
3059
3060
3061
3062
3063
3064 sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
3065 slot, &mgmt_route, false);
3066 dev_err_ratelimited(priv->ds->dev, "xmit timed out\n");
3067 }
3068
3069 return NETDEV_TX_OK;
3070}
3071
3072#define work_to_port(work) \
3073 container_of((work), struct sja1105_port, xmit_work)
3074#define tagger_to_sja1105(t) \
3075 container_of((t), struct sja1105_private, tagger_data)
3076
3077
3078
3079
3080
3081static void sja1105_port_deferred_xmit(struct kthread_work *work)
3082{
3083 struct sja1105_port *sp = work_to_port(work);
3084 struct sja1105_tagger_data *tagger_data = sp->data;
3085 struct sja1105_private *priv = tagger_to_sja1105(tagger_data);
3086 int port = sp - priv->ports;
3087 struct sk_buff *skb;
3088
3089 while ((skb = skb_dequeue(&sp->xmit_queue)) != NULL) {
3090 struct sk_buff *clone = DSA_SKB_CB(skb)->clone;
3091
3092 mutex_lock(&priv->mgmt_lock);
3093
3094 sja1105_mgmt_xmit(priv->ds, port, 0, skb, !!clone);
3095
3096
3097 if (clone)
3098 sja1105_ptp_txtstamp_skb(priv->ds, port, clone);
3099
3100 mutex_unlock(&priv->mgmt_lock);
3101 }
3102}
3103
3104
3105
3106
3107static int sja1105_set_ageing_time(struct dsa_switch *ds,
3108 unsigned int ageing_time)
3109{
3110 struct sja1105_l2_lookup_params_entry *l2_lookup_params;
3111 struct sja1105_private *priv = ds->priv;
3112 struct sja1105_table *table;
3113 unsigned int maxage;
3114
3115 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
3116 l2_lookup_params = table->entries;
3117
3118 maxage = SJA1105_AGEING_TIME_MS(ageing_time);
3119
3120 if (l2_lookup_params->maxage == maxage)
3121 return 0;
3122
3123 l2_lookup_params->maxage = maxage;
3124
3125 return sja1105_static_config_reload(priv, SJA1105_AGEING_TIME);
3126}
3127
3128static int sja1105_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
3129{
3130 struct sja1105_l2_policing_entry *policing;
3131 struct sja1105_private *priv = ds->priv;
3132
3133 new_mtu += VLAN_ETH_HLEN + ETH_FCS_LEN;
3134
3135 if (dsa_is_cpu_port(ds, port))
3136 new_mtu += VLAN_HLEN;
3137
3138 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
3139
3140 if (policing[port].maxlen == new_mtu)
3141 return 0;
3142
3143 policing[port].maxlen = new_mtu;
3144
3145 return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
3146}
3147
3148static int sja1105_get_max_mtu(struct dsa_switch *ds, int port)
3149{
3150 return 2043 - VLAN_ETH_HLEN - ETH_FCS_LEN;
3151}
3152
3153static int sja1105_port_setup_tc(struct dsa_switch *ds, int port,
3154 enum tc_setup_type type,
3155 void *type_data)
3156{
3157 switch (type) {
3158 case TC_SETUP_QDISC_TAPRIO:
3159 return sja1105_setup_tc_taprio(ds, port, type_data);
3160 case TC_SETUP_QDISC_CBS:
3161 return sja1105_setup_tc_cbs(ds, port, type_data);
3162 default:
3163 return -EOPNOTSUPP;
3164 }
3165}
3166
3167
3168
3169
3170
3171
3172
3173static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to,
3174 bool ingress, bool enabled)
3175{
3176 struct sja1105_general_params_entry *general_params;
3177 struct sja1105_mac_config_entry *mac;
3178 struct sja1105_table *table;
3179 bool already_enabled;
3180 u64 new_mirr_port;
3181 int rc;
3182
3183 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
3184 general_params = table->entries;
3185
3186 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
3187
3188 already_enabled = (general_params->mirr_port != SJA1105_NUM_PORTS);
3189 if (already_enabled && enabled && general_params->mirr_port != to) {
3190 dev_err(priv->ds->dev,
3191 "Delete mirroring rules towards port %llu first\n",
3192 general_params->mirr_port);
3193 return -EBUSY;
3194 }
3195
3196 new_mirr_port = to;
3197 if (!enabled) {
3198 bool keep = false;
3199 int port;
3200
3201
3202 for (port = 0; port < SJA1105_NUM_PORTS; port++) {
3203 if (mac[port].ing_mirr || mac[port].egr_mirr) {
3204 keep = true;
3205 break;
3206 }
3207 }
3208
3209 if (!keep)
3210 new_mirr_port = SJA1105_NUM_PORTS;
3211 }
3212 if (new_mirr_port != general_params->mirr_port) {
3213 general_params->mirr_port = new_mirr_port;
3214
3215 rc = sja1105_dynamic_config_write(priv, BLK_IDX_GENERAL_PARAMS,
3216 0, general_params, true);
3217 if (rc < 0)
3218 return rc;
3219 }
3220
3221 if (ingress)
3222 mac[from].ing_mirr = enabled;
3223 else
3224 mac[from].egr_mirr = enabled;
3225
3226 return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, from,
3227 &mac[from], true);
3228}
3229
3230static int sja1105_mirror_add(struct dsa_switch *ds, int port,
3231 struct dsa_mall_mirror_tc_entry *mirror,
3232 bool ingress)
3233{
3234 return sja1105_mirror_apply(ds->priv, port, mirror->to_local_port,
3235 ingress, true);
3236}
3237
3238static void sja1105_mirror_del(struct dsa_switch *ds, int port,
3239 struct dsa_mall_mirror_tc_entry *mirror)
3240{
3241 sja1105_mirror_apply(ds->priv, port, mirror->to_local_port,
3242 mirror->ingress, false);
3243}
3244
3245static int sja1105_port_policer_add(struct dsa_switch *ds, int port,
3246 struct dsa_mall_policer_tc_entry *policer)
3247{
3248 struct sja1105_l2_policing_entry *policing;
3249 struct sja1105_private *priv = ds->priv;
3250
3251 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
3252
3253
3254
3255
3256
3257 policing[port].rate = div_u64(512 * policer->rate_bytes_per_sec,
3258 1000000);
3259 policing[port].smax = policer->burst;
3260
3261 return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
3262}
3263
3264static void sja1105_port_policer_del(struct dsa_switch *ds, int port)
3265{
3266 struct sja1105_l2_policing_entry *policing;
3267 struct sja1105_private *priv = ds->priv;
3268
3269 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
3270
3271 policing[port].rate = SJA1105_RATE_MBPS(1000);
3272 policing[port].smax = 65535;
3273
3274 sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
3275}
3276
3277static const struct dsa_switch_ops sja1105_switch_ops = {
3278 .get_tag_protocol = sja1105_get_tag_protocol,
3279 .setup = sja1105_setup,
3280 .teardown = sja1105_teardown,
3281 .set_ageing_time = sja1105_set_ageing_time,
3282 .port_change_mtu = sja1105_change_mtu,
3283 .port_max_mtu = sja1105_get_max_mtu,
3284 .phylink_validate = sja1105_phylink_validate,
3285 .phylink_mac_link_state = sja1105_mac_pcs_get_state,
3286 .phylink_mac_config = sja1105_mac_config,
3287 .phylink_mac_link_up = sja1105_mac_link_up,
3288 .phylink_mac_link_down = sja1105_mac_link_down,
3289 .get_strings = sja1105_get_strings,
3290 .get_ethtool_stats = sja1105_get_ethtool_stats,
3291 .get_sset_count = sja1105_get_sset_count,
3292 .get_ts_info = sja1105_get_ts_info,
3293 .port_enable = sja1105_port_enable,
3294 .port_disable = sja1105_port_disable,
3295 .port_fdb_dump = sja1105_fdb_dump,
3296 .port_fdb_add = sja1105_fdb_add,
3297 .port_fdb_del = sja1105_fdb_del,
3298 .port_bridge_join = sja1105_bridge_join,
3299 .port_bridge_leave = sja1105_bridge_leave,
3300 .port_stp_state_set = sja1105_bridge_stp_state_set,
3301 .port_vlan_prepare = sja1105_vlan_prepare,
3302 .port_vlan_filtering = sja1105_vlan_filtering,
3303 .port_vlan_add = sja1105_vlan_add,
3304 .port_vlan_del = sja1105_vlan_del,
3305 .port_mdb_prepare = sja1105_mdb_prepare,
3306 .port_mdb_add = sja1105_mdb_add,
3307 .port_mdb_del = sja1105_mdb_del,
3308 .port_hwtstamp_get = sja1105_hwtstamp_get,
3309 .port_hwtstamp_set = sja1105_hwtstamp_set,
3310 .port_rxtstamp = sja1105_port_rxtstamp,
3311 .port_txtstamp = sja1105_port_txtstamp,
3312 .port_setup_tc = sja1105_port_setup_tc,
3313 .port_mirror_add = sja1105_mirror_add,
3314 .port_mirror_del = sja1105_mirror_del,
3315 .port_policer_add = sja1105_port_policer_add,
3316 .port_policer_del = sja1105_port_policer_del,
3317 .cls_flower_add = sja1105_cls_flower_add,
3318 .cls_flower_del = sja1105_cls_flower_del,
3319 .cls_flower_stats = sja1105_cls_flower_stats,
3320 .crosschip_bridge_join = sja1105_crosschip_bridge_join,
3321 .crosschip_bridge_leave = sja1105_crosschip_bridge_leave,
3322 .devlink_param_get = sja1105_devlink_param_get,
3323 .devlink_param_set = sja1105_devlink_param_set,
3324 .devlink_info_get = sja1105_devlink_info_get,
3325};
3326
3327static const struct of_device_id sja1105_dt_ids[];
3328
3329static int sja1105_check_device_id(struct sja1105_private *priv)
3330{
3331 const struct sja1105_regs *regs = priv->info->regs;
3332 u8 prod_id[SJA1105_SIZE_DEVICE_ID] = {0};
3333 struct device *dev = &priv->spidev->dev;
3334 const struct of_device_id *match;
3335 u32 device_id;
3336 u64 part_no;
3337 int rc;
3338
3339 rc = sja1105_xfer_u32(priv, SPI_READ, regs->device_id, &device_id,
3340 NULL);
3341 if (rc < 0)
3342 return rc;
3343
3344 rc = sja1105_xfer_buf(priv, SPI_READ, regs->prod_id, prod_id,
3345 SJA1105_SIZE_DEVICE_ID);
3346 if (rc < 0)
3347 return rc;
3348
3349 sja1105_unpack(prod_id, &part_no, 19, 4, SJA1105_SIZE_DEVICE_ID);
3350
3351 for (match = sja1105_dt_ids; match->compatible[0]; match++) {
3352 const struct sja1105_info *info = match->data;
3353
3354
3355 if (info->device_id != device_id || info->part_no != part_no)
3356 continue;
3357
3358
3359 if (priv->info->device_id != device_id ||
3360 priv->info->part_no != part_no) {
3361 dev_warn(dev, "Device tree specifies chip %s but found %s, please fix it!\n",
3362 priv->info->name, info->name);
3363
3364 priv->info = info;
3365 }
3366
3367 return 0;
3368 }
3369
3370 dev_err(dev, "Unexpected {device ID, part number}: 0x%x 0x%llx\n",
3371 device_id, part_no);
3372
3373 return -ENODEV;
3374}
3375
3376static int sja1105_probe(struct spi_device *spi)
3377{
3378 struct sja1105_tagger_data *tagger_data;
3379 struct device *dev = &spi->dev;
3380 struct sja1105_private *priv;
3381 struct dsa_switch *ds;
3382 int rc, port;
3383
3384 if (!dev->of_node) {
3385 dev_err(dev, "No DTS bindings for SJA1105 driver\n");
3386 return -EINVAL;
3387 }
3388
3389 priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL);
3390 if (!priv)
3391 return -ENOMEM;
3392
3393
3394 priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
3395 if (IS_ERR(priv->reset_gpio))
3396 dev_dbg(dev, "reset-gpios not defined, ignoring\n");
3397 else
3398 sja1105_hw_reset(priv->reset_gpio, 1, 1);
3399
3400
3401
3402
3403 priv->spidev = spi;
3404 spi_set_drvdata(spi, priv);
3405
3406
3407 spi->bits_per_word = 8;
3408 rc = spi_setup(spi);
3409 if (rc < 0) {
3410 dev_err(dev, "Could not init SPI\n");
3411 return rc;
3412 }
3413
3414 priv->info = of_device_get_match_data(dev);
3415
3416
3417 rc = sja1105_check_device_id(priv);
3418 if (rc < 0) {
3419 dev_err(dev, "Device ID check failed: %d\n", rc);
3420 return rc;
3421 }
3422
3423 dev_info(dev, "Probed switch chip: %s\n", priv->info->name);
3424
3425 ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
3426 if (!ds)
3427 return -ENOMEM;
3428
3429 ds->dev = dev;
3430 ds->num_ports = SJA1105_NUM_PORTS;
3431 ds->ops = &sja1105_switch_ops;
3432 ds->priv = priv;
3433 priv->ds = ds;
3434
3435 tagger_data = &priv->tagger_data;
3436
3437 mutex_init(&priv->ptp_data.lock);
3438 mutex_init(&priv->mgmt_lock);
3439
3440 priv->dsa_8021q_ctx = devm_kzalloc(dev, sizeof(*priv->dsa_8021q_ctx),
3441 GFP_KERNEL);
3442 if (!priv->dsa_8021q_ctx)
3443 return -ENOMEM;
3444
3445 priv->dsa_8021q_ctx->ops = &sja1105_dsa_8021q_ops;
3446 priv->dsa_8021q_ctx->proto = htons(ETH_P_8021Q);
3447 priv->dsa_8021q_ctx->ds = ds;
3448
3449 INIT_LIST_HEAD(&priv->dsa_8021q_ctx->crosschip_links);
3450 INIT_LIST_HEAD(&priv->bridge_vlans);
3451 INIT_LIST_HEAD(&priv->dsa_8021q_vlans);
3452
3453 sja1105_tas_setup(ds);
3454 sja1105_flower_setup(ds);
3455
3456 rc = dsa_register_switch(priv->ds);
3457 if (rc)
3458 return rc;
3459
3460 if (IS_ENABLED(CONFIG_NET_SCH_CBS)) {
3461 priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers,
3462 sizeof(struct sja1105_cbs_entry),
3463 GFP_KERNEL);
3464 if (!priv->cbs)
3465 return -ENOMEM;
3466 }
3467
3468
3469 for (port = 0; port < SJA1105_NUM_PORTS; port++) {
3470 struct sja1105_port *sp = &priv->ports[port];
3471 struct dsa_port *dp = dsa_to_port(ds, port);
3472 struct net_device *slave;
3473 int subvlan;
3474
3475 if (!dsa_is_user_port(ds, port))
3476 continue;
3477
3478 dp->priv = sp;
3479 sp->dp = dp;
3480 sp->data = tagger_data;
3481 slave = dp->slave;
3482 kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit);
3483 sp->xmit_worker = kthread_create_worker(0, "%s_xmit",
3484 slave->name);
3485 if (IS_ERR(sp->xmit_worker)) {
3486 rc = PTR_ERR(sp->xmit_worker);
3487 dev_err(ds->dev,
3488 "failed to create deferred xmit thread: %d\n",
3489 rc);
3490 goto out;
3491 }
3492 skb_queue_head_init(&sp->xmit_queue);
3493 sp->xmit_tpid = ETH_P_SJA1105;
3494
3495 for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
3496 sp->subvlan_map[subvlan] = VLAN_N_VID;
3497 }
3498
3499 return 0;
3500out:
3501 while (port-- > 0) {
3502 struct sja1105_port *sp = &priv->ports[port];
3503
3504 if (!dsa_is_user_port(ds, port))
3505 continue;
3506
3507 kthread_destroy_worker(sp->xmit_worker);
3508 }
3509 return rc;
3510}
3511
3512static int sja1105_remove(struct spi_device *spi)
3513{
3514 struct sja1105_private *priv = spi_get_drvdata(spi);
3515
3516 dsa_unregister_switch(priv->ds);
3517 return 0;
3518}
3519
3520static const struct of_device_id sja1105_dt_ids[] = {
3521 { .compatible = "nxp,sja1105e", .data = &sja1105e_info },
3522 { .compatible = "nxp,sja1105t", .data = &sja1105t_info },
3523 { .compatible = "nxp,sja1105p", .data = &sja1105p_info },
3524 { .compatible = "nxp,sja1105q", .data = &sja1105q_info },
3525 { .compatible = "nxp,sja1105r", .data = &sja1105r_info },
3526 { .compatible = "nxp,sja1105s", .data = &sja1105s_info },
3527 { },
3528};
3529MODULE_DEVICE_TABLE(of, sja1105_dt_ids);
3530
3531static struct spi_driver sja1105_driver = {
3532 .driver = {
3533 .name = "sja1105",
3534 .owner = THIS_MODULE,
3535 .of_match_table = of_match_ptr(sja1105_dt_ids),
3536 },
3537 .probe = sja1105_probe,
3538 .remove = sja1105_remove,
3539};
3540
3541module_spi_driver(sja1105_driver);
3542
3543MODULE_AUTHOR("Vladimir Oltean <olteanv@gmail.com>");
3544MODULE_AUTHOR("Georg Waibel <georg.waibel@sensor-technik.de>");
3545MODULE_DESCRIPTION("SJA1105 Driver");
3546MODULE_LICENSE("GPL v2");
3547