1
2
3
4
5
6
7
8
9
10#include <linux/module.h>
11
12#include <linux/interrupt.h>
13#include <linux/msi.h>
14#include <linux/kthread.h>
15#include <linux/workqueue.h>
16#include <linux/iommu.h>
17#include <net/pkt_cls.h>
18
19#include <linux/fsl/mc.h>
20
21#include "dpaa2-switch.h"
22
23
24#define DPSW_MIN_VER_MAJOR 8
25#define DPSW_MIN_VER_MINOR 9
26
27#define DEFAULT_VLAN_ID 1
28
29static u16 dpaa2_switch_port_get_fdb_id(struct ethsw_port_priv *port_priv)
30{
31 return port_priv->fdb->fdb_id;
32}
33
34static struct dpaa2_switch_fdb *dpaa2_switch_fdb_get_unused(struct ethsw_core *ethsw)
35{
36 int i;
37
38 for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
39 if (!ethsw->fdbs[i].in_use)
40 return ðsw->fdbs[i];
41 return NULL;
42}
43
44static struct dpaa2_switch_filter_block *
45dpaa2_switch_filter_block_get_unused(struct ethsw_core *ethsw)
46{
47 int i;
48
49 for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
50 if (!ethsw->filter_blocks[i].in_use)
51 return ðsw->filter_blocks[i];
52 return NULL;
53}
54
55static u16 dpaa2_switch_port_set_fdb(struct ethsw_port_priv *port_priv,
56 struct net_device *bridge_dev)
57{
58 struct ethsw_port_priv *other_port_priv = NULL;
59 struct dpaa2_switch_fdb *fdb;
60 struct net_device *other_dev;
61 struct list_head *iter;
62
63
64
65
66 if (!bridge_dev) {
67 fdb = dpaa2_switch_fdb_get_unused(port_priv->ethsw_data);
68
69
70
71
72
73
74 if (!fdb) {
75 port_priv->fdb->bridge_dev = NULL;
76 return 0;
77 }
78
79 port_priv->fdb = fdb;
80 port_priv->fdb->in_use = true;
81 port_priv->fdb->bridge_dev = NULL;
82 return 0;
83 }
84
85
86
87
88
89 ASSERT_RTNL();
90
91
92
93
94 netdev_for_each_lower_dev(bridge_dev, other_dev, iter) {
95 if (!dpaa2_switch_port_dev_check(other_dev))
96 continue;
97
98 if (other_dev == port_priv->netdev)
99 continue;
100
101 other_port_priv = netdev_priv(other_dev);
102 break;
103 }
104
105
106
107
108 if (other_port_priv) {
109
110
111
112 port_priv->fdb->in_use = false;
113 port_priv->fdb->bridge_dev = NULL;
114
115
116 port_priv->fdb = other_port_priv->fdb;
117 }
118
119
120 port_priv->fdb->bridge_dev = bridge_dev;
121
122 return 0;
123}
124
125static void dpaa2_switch_fdb_get_flood_cfg(struct ethsw_core *ethsw, u16 fdb_id,
126 enum dpsw_flood_type type,
127 struct dpsw_egress_flood_cfg *cfg)
128{
129 int i = 0, j;
130
131 memset(cfg, 0, sizeof(*cfg));
132
133
134
135
136 for (j = 0; j < ethsw->sw_attr.num_ifs; j++) {
137 if (!ethsw->ports[j])
138 continue;
139 if (ethsw->ports[j]->fdb->fdb_id != fdb_id)
140 continue;
141
142 if (type == DPSW_BROADCAST && ethsw->ports[j]->bcast_flood)
143 cfg->if_id[i++] = ethsw->ports[j]->idx;
144 else if (type == DPSW_FLOODING && ethsw->ports[j]->ucast_flood)
145 cfg->if_id[i++] = ethsw->ports[j]->idx;
146 }
147
148
149 cfg->if_id[i++] = ethsw->sw_attr.num_ifs;
150
151 cfg->fdb_id = fdb_id;
152 cfg->flood_type = type;
153 cfg->num_ifs = i;
154}
155
156static int dpaa2_switch_fdb_set_egress_flood(struct ethsw_core *ethsw, u16 fdb_id)
157{
158 struct dpsw_egress_flood_cfg flood_cfg;
159 int err;
160
161
162 dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_BROADCAST, &flood_cfg);
163 err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle,
164 &flood_cfg);
165 if (err) {
166 dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err);
167 return err;
168 }
169
170
171 dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_FLOODING, &flood_cfg);
172 err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle,
173 &flood_cfg);
174 if (err) {
175 dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err);
176 return err;
177 }
178
179 return 0;
180}
181
182static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
183 dma_addr_t iova_addr)
184{
185 phys_addr_t phys_addr;
186
187 phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
188
189 return phys_to_virt(phys_addr);
190}
191
192static int dpaa2_switch_add_vlan(struct ethsw_port_priv *port_priv, u16 vid)
193{
194 struct ethsw_core *ethsw = port_priv->ethsw_data;
195 struct dpsw_vlan_cfg vcfg = {0};
196 int err;
197
198 vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
199 err = dpsw_vlan_add(ethsw->mc_io, 0,
200 ethsw->dpsw_handle, vid, &vcfg);
201 if (err) {
202 dev_err(ethsw->dev, "dpsw_vlan_add err %d\n", err);
203 return err;
204 }
205 ethsw->vlans[vid] = ETHSW_VLAN_MEMBER;
206
207 return 0;
208}
209
210static bool dpaa2_switch_port_is_up(struct ethsw_port_priv *port_priv)
211{
212 struct net_device *netdev = port_priv->netdev;
213 struct dpsw_link_state state;
214 int err;
215
216 err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
217 port_priv->ethsw_data->dpsw_handle,
218 port_priv->idx, &state);
219 if (err) {
220 netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
221 return true;
222 }
223
224 WARN_ONCE(state.up > 1, "Garbage read into link_state");
225
226 return state.up ? true : false;
227}
228
229static int dpaa2_switch_port_set_pvid(struct ethsw_port_priv *port_priv, u16 pvid)
230{
231 struct ethsw_core *ethsw = port_priv->ethsw_data;
232 struct net_device *netdev = port_priv->netdev;
233 struct dpsw_tci_cfg tci_cfg = { 0 };
234 bool up;
235 int err, ret;
236
237 err = dpsw_if_get_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
238 port_priv->idx, &tci_cfg);
239 if (err) {
240 netdev_err(netdev, "dpsw_if_get_tci err %d\n", err);
241 return err;
242 }
243
244 tci_cfg.vlan_id = pvid;
245
246
247 up = dpaa2_switch_port_is_up(port_priv);
248 if (up) {
249 err = dpsw_if_disable(ethsw->mc_io, 0,
250 ethsw->dpsw_handle,
251 port_priv->idx);
252 if (err) {
253 netdev_err(netdev, "dpsw_if_disable err %d\n", err);
254 return err;
255 }
256 }
257
258 err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
259 port_priv->idx, &tci_cfg);
260 if (err) {
261 netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
262 goto set_tci_error;
263 }
264
265
266 port_priv->vlans[port_priv->pvid] &= ~ETHSW_VLAN_PVID;
267 port_priv->vlans[pvid] |= ETHSW_VLAN_PVID;
268 port_priv->pvid = pvid;
269
270set_tci_error:
271 if (up) {
272 ret = dpsw_if_enable(ethsw->mc_io, 0,
273 ethsw->dpsw_handle,
274 port_priv->idx);
275 if (ret) {
276 netdev_err(netdev, "dpsw_if_enable err %d\n", ret);
277 return ret;
278 }
279 }
280
281 return err;
282}
283
284static int dpaa2_switch_port_add_vlan(struct ethsw_port_priv *port_priv,
285 u16 vid, u16 flags)
286{
287 struct ethsw_core *ethsw = port_priv->ethsw_data;
288 struct net_device *netdev = port_priv->netdev;
289 struct dpsw_vlan_if_cfg vcfg = {0};
290 int err;
291
292 if (port_priv->vlans[vid]) {
293 netdev_warn(netdev, "VLAN %d already configured\n", vid);
294 return -EEXIST;
295 }
296
297
298
299
300 vcfg.num_ifs = 1;
301 vcfg.if_id[0] = port_priv->idx;
302 vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
303 vcfg.options |= DPSW_VLAN_ADD_IF_OPT_FDB_ID;
304 err = dpsw_vlan_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, vid, &vcfg);
305 if (err) {
306 netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err);
307 return err;
308 }
309
310 port_priv->vlans[vid] = ETHSW_VLAN_MEMBER;
311
312 if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
313 err = dpsw_vlan_add_if_untagged(ethsw->mc_io, 0,
314 ethsw->dpsw_handle,
315 vid, &vcfg);
316 if (err) {
317 netdev_err(netdev,
318 "dpsw_vlan_add_if_untagged err %d\n", err);
319 return err;
320 }
321 port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED;
322 }
323
324 if (flags & BRIDGE_VLAN_INFO_PVID) {
325 err = dpaa2_switch_port_set_pvid(port_priv, vid);
326 if (err)
327 return err;
328 }
329
330 return 0;
331}
332
333static enum dpsw_stp_state br_stp_state_to_dpsw(u8 state)
334{
335 switch (state) {
336 case BR_STATE_DISABLED:
337 return DPSW_STP_STATE_DISABLED;
338 case BR_STATE_LISTENING:
339 return DPSW_STP_STATE_LISTENING;
340 case BR_STATE_LEARNING:
341 return DPSW_STP_STATE_LEARNING;
342 case BR_STATE_FORWARDING:
343 return DPSW_STP_STATE_FORWARDING;
344 case BR_STATE_BLOCKING:
345 return DPSW_STP_STATE_BLOCKING;
346 default:
347 return DPSW_STP_STATE_DISABLED;
348 }
349}
350
351static int dpaa2_switch_port_set_stp_state(struct ethsw_port_priv *port_priv, u8 state)
352{
353 struct dpsw_stp_cfg stp_cfg = {0};
354 int err;
355 u16 vid;
356
357 if (!netif_running(port_priv->netdev) || state == port_priv->stp_state)
358 return 0;
359
360 stp_cfg.state = br_stp_state_to_dpsw(state);
361 for (vid = 0; vid <= VLAN_VID_MASK; vid++) {
362 if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
363 stp_cfg.vlan_id = vid;
364 err = dpsw_if_set_stp(port_priv->ethsw_data->mc_io, 0,
365 port_priv->ethsw_data->dpsw_handle,
366 port_priv->idx, &stp_cfg);
367 if (err) {
368 netdev_err(port_priv->netdev,
369 "dpsw_if_set_stp err %d\n", err);
370 return err;
371 }
372 }
373 }
374
375 port_priv->stp_state = state;
376
377 return 0;
378}
379
380static int dpaa2_switch_dellink(struct ethsw_core *ethsw, u16 vid)
381{
382 struct ethsw_port_priv *ppriv_local = NULL;
383 int i, err;
384
385 if (!ethsw->vlans[vid])
386 return -ENOENT;
387
388 err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, vid);
389 if (err) {
390 dev_err(ethsw->dev, "dpsw_vlan_remove err %d\n", err);
391 return err;
392 }
393 ethsw->vlans[vid] = 0;
394
395 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
396 ppriv_local = ethsw->ports[i];
397 if (ppriv_local)
398 ppriv_local->vlans[vid] = 0;
399 }
400
401 return 0;
402}
403
404static int dpaa2_switch_port_fdb_add_uc(struct ethsw_port_priv *port_priv,
405 const unsigned char *addr)
406{
407 struct dpsw_fdb_unicast_cfg entry = {0};
408 u16 fdb_id;
409 int err;
410
411 entry.if_egress = port_priv->idx;
412 entry.type = DPSW_FDB_ENTRY_STATIC;
413 ether_addr_copy(entry.mac_addr, addr);
414
415 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
416 err = dpsw_fdb_add_unicast(port_priv->ethsw_data->mc_io, 0,
417 port_priv->ethsw_data->dpsw_handle,
418 fdb_id, &entry);
419 if (err)
420 netdev_err(port_priv->netdev,
421 "dpsw_fdb_add_unicast err %d\n", err);
422 return err;
423}
424
425static int dpaa2_switch_port_fdb_del_uc(struct ethsw_port_priv *port_priv,
426 const unsigned char *addr)
427{
428 struct dpsw_fdb_unicast_cfg entry = {0};
429 u16 fdb_id;
430 int err;
431
432 entry.if_egress = port_priv->idx;
433 entry.type = DPSW_FDB_ENTRY_STATIC;
434 ether_addr_copy(entry.mac_addr, addr);
435
436 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
437 err = dpsw_fdb_remove_unicast(port_priv->ethsw_data->mc_io, 0,
438 port_priv->ethsw_data->dpsw_handle,
439 fdb_id, &entry);
440
441 if (err && err != -ENXIO)
442 netdev_err(port_priv->netdev,
443 "dpsw_fdb_remove_unicast err %d\n", err);
444 return err;
445}
446
447static int dpaa2_switch_port_fdb_add_mc(struct ethsw_port_priv *port_priv,
448 const unsigned char *addr)
449{
450 struct dpsw_fdb_multicast_cfg entry = {0};
451 u16 fdb_id;
452 int err;
453
454 ether_addr_copy(entry.mac_addr, addr);
455 entry.type = DPSW_FDB_ENTRY_STATIC;
456 entry.num_ifs = 1;
457 entry.if_id[0] = port_priv->idx;
458
459 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
460 err = dpsw_fdb_add_multicast(port_priv->ethsw_data->mc_io, 0,
461 port_priv->ethsw_data->dpsw_handle,
462 fdb_id, &entry);
463
464 if (err && err != -ENXIO)
465 netdev_err(port_priv->netdev, "dpsw_fdb_add_multicast err %d\n",
466 err);
467 return err;
468}
469
470static int dpaa2_switch_port_fdb_del_mc(struct ethsw_port_priv *port_priv,
471 const unsigned char *addr)
472{
473 struct dpsw_fdb_multicast_cfg entry = {0};
474 u16 fdb_id;
475 int err;
476
477 ether_addr_copy(entry.mac_addr, addr);
478 entry.type = DPSW_FDB_ENTRY_STATIC;
479 entry.num_ifs = 1;
480 entry.if_id[0] = port_priv->idx;
481
482 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
483 err = dpsw_fdb_remove_multicast(port_priv->ethsw_data->mc_io, 0,
484 port_priv->ethsw_data->dpsw_handle,
485 fdb_id, &entry);
486
487 if (err && err != -ENAVAIL)
488 netdev_err(port_priv->netdev,
489 "dpsw_fdb_remove_multicast err %d\n", err);
490 return err;
491}
492
493static void dpaa2_switch_port_get_stats(struct net_device *netdev,
494 struct rtnl_link_stats64 *stats)
495{
496 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
497 u64 tmp;
498 int err;
499
500 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
501 port_priv->ethsw_data->dpsw_handle,
502 port_priv->idx,
503 DPSW_CNT_ING_FRAME, &stats->rx_packets);
504 if (err)
505 goto error;
506
507 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
508 port_priv->ethsw_data->dpsw_handle,
509 port_priv->idx,
510 DPSW_CNT_EGR_FRAME, &stats->tx_packets);
511 if (err)
512 goto error;
513
514 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
515 port_priv->ethsw_data->dpsw_handle,
516 port_priv->idx,
517 DPSW_CNT_ING_BYTE, &stats->rx_bytes);
518 if (err)
519 goto error;
520
521 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
522 port_priv->ethsw_data->dpsw_handle,
523 port_priv->idx,
524 DPSW_CNT_EGR_BYTE, &stats->tx_bytes);
525 if (err)
526 goto error;
527
528 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
529 port_priv->ethsw_data->dpsw_handle,
530 port_priv->idx,
531 DPSW_CNT_ING_FRAME_DISCARD,
532 &stats->rx_dropped);
533 if (err)
534 goto error;
535
536 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
537 port_priv->ethsw_data->dpsw_handle,
538 port_priv->idx,
539 DPSW_CNT_ING_FLTR_FRAME,
540 &tmp);
541 if (err)
542 goto error;
543 stats->rx_dropped += tmp;
544
545 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
546 port_priv->ethsw_data->dpsw_handle,
547 port_priv->idx,
548 DPSW_CNT_EGR_FRAME_DISCARD,
549 &stats->tx_dropped);
550 if (err)
551 goto error;
552
553 return;
554
555error:
556 netdev_err(netdev, "dpsw_if_get_counter err %d\n", err);
557}
558
559static bool dpaa2_switch_port_has_offload_stats(const struct net_device *netdev,
560 int attr_id)
561{
562 return (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT);
563}
564
565static int dpaa2_switch_port_get_offload_stats(int attr_id,
566 const struct net_device *netdev,
567 void *sp)
568{
569 switch (attr_id) {
570 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
571 dpaa2_switch_port_get_stats((struct net_device *)netdev, sp);
572 return 0;
573 }
574
575 return -EINVAL;
576}
577
578static int dpaa2_switch_port_change_mtu(struct net_device *netdev, int mtu)
579{
580 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
581 int err;
582
583 err = dpsw_if_set_max_frame_length(port_priv->ethsw_data->mc_io,
584 0,
585 port_priv->ethsw_data->dpsw_handle,
586 port_priv->idx,
587 (u16)ETHSW_L2_MAX_FRM(mtu));
588 if (err) {
589 netdev_err(netdev,
590 "dpsw_if_set_max_frame_length() err %d\n", err);
591 return err;
592 }
593
594 netdev->mtu = mtu;
595 return 0;
596}
597
598static int dpaa2_switch_port_link_state_update(struct net_device *netdev)
599{
600 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
601 struct dpsw_link_state state;
602 int err;
603
604
605
606
607 if (dpaa2_switch_port_is_type_phy(port_priv))
608 return 0;
609
610
611
612
613 if (!netif_running(netdev))
614 return 0;
615
616 err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
617 port_priv->ethsw_data->dpsw_handle,
618 port_priv->idx, &state);
619 if (err) {
620 netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
621 return err;
622 }
623
624 WARN_ONCE(state.up > 1, "Garbage read into link_state");
625
626 if (state.up != port_priv->link_state) {
627 if (state.up) {
628 netif_carrier_on(netdev);
629 netif_tx_start_all_queues(netdev);
630 } else {
631 netif_carrier_off(netdev);
632 netif_tx_stop_all_queues(netdev);
633 }
634 port_priv->link_state = state.up;
635 }
636
637 return 0;
638}
639
640
641
642
643
644
645
646
647static void dpaa2_switch_enable_ctrl_if_napi(struct ethsw_core *ethsw)
648{
649 int i;
650
651
652 ASSERT_RTNL();
653
654
655 ethsw->napi_users++;
656
657
658 if (ethsw->napi_users > 1)
659 return;
660
661 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
662 napi_enable(ðsw->fq[i].napi);
663}
664
665static void dpaa2_switch_disable_ctrl_if_napi(struct ethsw_core *ethsw)
666{
667 int i;
668
669
670 ASSERT_RTNL();
671
672
673 ethsw->napi_users--;
674 if (ethsw->napi_users)
675 return;
676
677 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
678 napi_disable(ðsw->fq[i].napi);
679}
680
681static int dpaa2_switch_port_open(struct net_device *netdev)
682{
683 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
684 struct ethsw_core *ethsw = port_priv->ethsw_data;
685 int err;
686
687 if (!dpaa2_switch_port_is_type_phy(port_priv)) {
688
689
690
691
692
693 netif_carrier_off(netdev);
694 }
695
696 err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0,
697 port_priv->ethsw_data->dpsw_handle,
698 port_priv->idx);
699 if (err) {
700 netdev_err(netdev, "dpsw_if_enable err %d\n", err);
701 return err;
702 }
703
704 dpaa2_switch_enable_ctrl_if_napi(ethsw);
705
706 if (dpaa2_switch_port_is_type_phy(port_priv)) {
707 dpaa2_mac_start(port_priv->mac);
708 phylink_start(port_priv->mac->phylink);
709 }
710
711 return 0;
712}
713
714static int dpaa2_switch_port_stop(struct net_device *netdev)
715{
716 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
717 struct ethsw_core *ethsw = port_priv->ethsw_data;
718 int err;
719
720 if (dpaa2_switch_port_is_type_phy(port_priv)) {
721 phylink_stop(port_priv->mac->phylink);
722 dpaa2_mac_stop(port_priv->mac);
723 } else {
724 netif_tx_stop_all_queues(netdev);
725 netif_carrier_off(netdev);
726 }
727
728 err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
729 port_priv->ethsw_data->dpsw_handle,
730 port_priv->idx);
731 if (err) {
732 netdev_err(netdev, "dpsw_if_disable err %d\n", err);
733 return err;
734 }
735
736 dpaa2_switch_disable_ctrl_if_napi(ethsw);
737
738 return 0;
739}
740
741static int dpaa2_switch_port_parent_id(struct net_device *dev,
742 struct netdev_phys_item_id *ppid)
743{
744 struct ethsw_port_priv *port_priv = netdev_priv(dev);
745
746 ppid->id_len = 1;
747 ppid->id[0] = port_priv->ethsw_data->dev_id;
748
749 return 0;
750}
751
752static int dpaa2_switch_port_get_phys_name(struct net_device *netdev, char *name,
753 size_t len)
754{
755 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
756 int err;
757
758 err = snprintf(name, len, "p%d", port_priv->idx);
759 if (err >= len)
760 return -EINVAL;
761
762 return 0;
763}
764
765struct ethsw_dump_ctx {
766 struct net_device *dev;
767 struct sk_buff *skb;
768 struct netlink_callback *cb;
769 int idx;
770};
771
772static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry *entry,
773 struct ethsw_dump_ctx *dump)
774{
775 int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC;
776 u32 portid = NETLINK_CB(dump->cb->skb).portid;
777 u32 seq = dump->cb->nlh->nlmsg_seq;
778 struct nlmsghdr *nlh;
779 struct ndmsg *ndm;
780
781 if (dump->idx < dump->cb->args[2])
782 goto skip;
783
784 nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
785 sizeof(*ndm), NLM_F_MULTI);
786 if (!nlh)
787 return -EMSGSIZE;
788
789 ndm = nlmsg_data(nlh);
790 ndm->ndm_family = AF_BRIDGE;
791 ndm->ndm_pad1 = 0;
792 ndm->ndm_pad2 = 0;
793 ndm->ndm_flags = NTF_SELF;
794 ndm->ndm_type = 0;
795 ndm->ndm_ifindex = dump->dev->ifindex;
796 ndm->ndm_state = is_dynamic ? NUD_REACHABLE : NUD_NOARP;
797
798 if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, entry->mac_addr))
799 goto nla_put_failure;
800
801 nlmsg_end(dump->skb, nlh);
802
803skip:
804 dump->idx++;
805 return 0;
806
807nla_put_failure:
808 nlmsg_cancel(dump->skb, nlh);
809 return -EMSGSIZE;
810}
811
812static int dpaa2_switch_port_fdb_valid_entry(struct fdb_dump_entry *entry,
813 struct ethsw_port_priv *port_priv)
814{
815 int idx = port_priv->idx;
816 int valid;
817
818 if (entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST)
819 valid = entry->if_info == port_priv->idx;
820 else
821 valid = entry->if_mask[idx / 8] & BIT(idx % 8);
822
823 return valid;
824}
825
826static int dpaa2_switch_fdb_iterate(struct ethsw_port_priv *port_priv,
827 dpaa2_switch_fdb_cb_t cb, void *data)
828{
829 struct net_device *net_dev = port_priv->netdev;
830 struct ethsw_core *ethsw = port_priv->ethsw_data;
831 struct device *dev = net_dev->dev.parent;
832 struct fdb_dump_entry *fdb_entries;
833 struct fdb_dump_entry fdb_entry;
834 dma_addr_t fdb_dump_iova;
835 u16 num_fdb_entries;
836 u32 fdb_dump_size;
837 int err = 0, i;
838 u8 *dma_mem;
839 u16 fdb_id;
840
841 fdb_dump_size = ethsw->sw_attr.max_fdb_entries * sizeof(fdb_entry);
842 dma_mem = kzalloc(fdb_dump_size, GFP_KERNEL);
843 if (!dma_mem)
844 return -ENOMEM;
845
846 fdb_dump_iova = dma_map_single(dev, dma_mem, fdb_dump_size,
847 DMA_FROM_DEVICE);
848 if (dma_mapping_error(dev, fdb_dump_iova)) {
849 netdev_err(net_dev, "dma_map_single() failed\n");
850 err = -ENOMEM;
851 goto err_map;
852 }
853
854 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
855 err = dpsw_fdb_dump(ethsw->mc_io, 0, ethsw->dpsw_handle, fdb_id,
856 fdb_dump_iova, fdb_dump_size, &num_fdb_entries);
857 if (err) {
858 netdev_err(net_dev, "dpsw_fdb_dump() = %d\n", err);
859 goto err_dump;
860 }
861
862 dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_FROM_DEVICE);
863
864 fdb_entries = (struct fdb_dump_entry *)dma_mem;
865 for (i = 0; i < num_fdb_entries; i++) {
866 fdb_entry = fdb_entries[i];
867
868 err = cb(port_priv, &fdb_entry, data);
869 if (err)
870 goto end;
871 }
872
873end:
874 kfree(dma_mem);
875
876 return 0;
877
878err_dump:
879 dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_TO_DEVICE);
880err_map:
881 kfree(dma_mem);
882 return err;
883}
884
885static int dpaa2_switch_fdb_entry_dump(struct ethsw_port_priv *port_priv,
886 struct fdb_dump_entry *fdb_entry,
887 void *data)
888{
889 if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv))
890 return 0;
891
892 return dpaa2_switch_fdb_dump_nl(fdb_entry, data);
893}
894
895static int dpaa2_switch_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
896 struct net_device *net_dev,
897 struct net_device *filter_dev, int *idx)
898{
899 struct ethsw_port_priv *port_priv = netdev_priv(net_dev);
900 struct ethsw_dump_ctx dump = {
901 .dev = net_dev,
902 .skb = skb,
903 .cb = cb,
904 .idx = *idx,
905 };
906 int err;
907
908 err = dpaa2_switch_fdb_iterate(port_priv, dpaa2_switch_fdb_entry_dump, &dump);
909 *idx = dump.idx;
910
911 return err;
912}
913
914static int dpaa2_switch_fdb_entry_fast_age(struct ethsw_port_priv *port_priv,
915 struct fdb_dump_entry *fdb_entry,
916 void *data __always_unused)
917{
918 if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv))
919 return 0;
920
921 if (!(fdb_entry->type & DPSW_FDB_ENTRY_TYPE_DYNAMIC))
922 return 0;
923
924 if (fdb_entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST)
925 dpaa2_switch_port_fdb_del_uc(port_priv, fdb_entry->mac_addr);
926 else
927 dpaa2_switch_port_fdb_del_mc(port_priv, fdb_entry->mac_addr);
928
929 return 0;
930}
931
932static void dpaa2_switch_port_fast_age(struct ethsw_port_priv *port_priv)
933{
934 dpaa2_switch_fdb_iterate(port_priv,
935 dpaa2_switch_fdb_entry_fast_age, NULL);
936}
937
938static int dpaa2_switch_port_vlan_add(struct net_device *netdev, __be16 proto,
939 u16 vid)
940{
941 struct switchdev_obj_port_vlan vlan = {
942 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
943 .vid = vid,
944 .obj.orig_dev = netdev,
945
946 .flags = 0,
947 };
948
949 return dpaa2_switch_port_vlans_add(netdev, &vlan);
950}
951
952static int dpaa2_switch_port_vlan_kill(struct net_device *netdev, __be16 proto,
953 u16 vid)
954{
955 struct switchdev_obj_port_vlan vlan = {
956 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
957 .vid = vid,
958 .obj.orig_dev = netdev,
959
960 .flags = 0,
961 };
962
963 return dpaa2_switch_port_vlans_del(netdev, &vlan);
964}
965
966static int dpaa2_switch_port_set_mac_addr(struct ethsw_port_priv *port_priv)
967{
968 struct ethsw_core *ethsw = port_priv->ethsw_data;
969 struct net_device *net_dev = port_priv->netdev;
970 struct device *dev = net_dev->dev.parent;
971 u8 mac_addr[ETH_ALEN];
972 int err;
973
974 if (!(ethsw->features & ETHSW_FEATURE_MAC_ADDR))
975 return 0;
976
977
978 err = dpsw_if_get_port_mac_addr(ethsw->mc_io, 0, ethsw->dpsw_handle,
979 port_priv->idx, mac_addr);
980 if (err) {
981 dev_err(dev, "dpsw_if_get_port_mac_addr() failed\n");
982 return err;
983 }
984
985
986 if (!is_zero_ether_addr(mac_addr)) {
987 eth_hw_addr_set(net_dev, mac_addr);
988 } else {
989
990
991
992 eth_hw_addr_random(net_dev);
993 dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
994
995
996
997
998
999
1000 net_dev->addr_assign_type = NET_ADDR_PERM;
1001 }
1002
1003 return 0;
1004}
1005
1006static void dpaa2_switch_free_fd(const struct ethsw_core *ethsw,
1007 const struct dpaa2_fd *fd)
1008{
1009 struct device *dev = ethsw->dev;
1010 unsigned char *buffer_start;
1011 struct sk_buff **skbh, *skb;
1012 dma_addr_t fd_addr;
1013
1014 fd_addr = dpaa2_fd_get_addr(fd);
1015 skbh = dpaa2_iova_to_virt(ethsw->iommu_domain, fd_addr);
1016
1017 skb = *skbh;
1018 buffer_start = (unsigned char *)skbh;
1019
1020 dma_unmap_single(dev, fd_addr,
1021 skb_tail_pointer(skb) - buffer_start,
1022 DMA_TO_DEVICE);
1023
1024
1025 dev_kfree_skb(skb);
1026}
1027
1028static int dpaa2_switch_build_single_fd(struct ethsw_core *ethsw,
1029 struct sk_buff *skb,
1030 struct dpaa2_fd *fd)
1031{
1032 struct device *dev = ethsw->dev;
1033 struct sk_buff **skbh;
1034 dma_addr_t addr;
1035 u8 *buff_start;
1036 void *hwa;
1037
1038 buff_start = PTR_ALIGN(skb->data - DPAA2_SWITCH_TX_DATA_OFFSET -
1039 DPAA2_SWITCH_TX_BUF_ALIGN,
1040 DPAA2_SWITCH_TX_BUF_ALIGN);
1041
1042
1043
1044
1045
1046 hwa = buff_start + DPAA2_SWITCH_SWA_SIZE;
1047 memset(hwa, 0, 8);
1048
1049
1050
1051
1052
1053 skbh = (struct sk_buff **)buff_start;
1054 *skbh = skb;
1055
1056 addr = dma_map_single(dev, buff_start,
1057 skb_tail_pointer(skb) - buff_start,
1058 DMA_TO_DEVICE);
1059 if (unlikely(dma_mapping_error(dev, addr)))
1060 return -ENOMEM;
1061
1062
1063 memset(fd, 0, sizeof(*fd));
1064
1065 dpaa2_fd_set_addr(fd, addr);
1066 dpaa2_fd_set_offset(fd, (u16)(skb->data - buff_start));
1067 dpaa2_fd_set_len(fd, skb->len);
1068 dpaa2_fd_set_format(fd, dpaa2_fd_single);
1069
1070 return 0;
1071}
1072
1073static netdev_tx_t dpaa2_switch_port_tx(struct sk_buff *skb,
1074 struct net_device *net_dev)
1075{
1076 struct ethsw_port_priv *port_priv = netdev_priv(net_dev);
1077 struct ethsw_core *ethsw = port_priv->ethsw_data;
1078 int retries = DPAA2_SWITCH_SWP_BUSY_RETRIES;
1079 struct dpaa2_fd fd;
1080 int err;
1081
1082 if (unlikely(skb_headroom(skb) < DPAA2_SWITCH_NEEDED_HEADROOM)) {
1083 struct sk_buff *ns;
1084
1085 ns = skb_realloc_headroom(skb, DPAA2_SWITCH_NEEDED_HEADROOM);
1086 if (unlikely(!ns)) {
1087 net_err_ratelimited("%s: Error reallocating skb headroom\n", net_dev->name);
1088 goto err_free_skb;
1089 }
1090 dev_consume_skb_any(skb);
1091 skb = ns;
1092 }
1093
1094
1095 skb = skb_unshare(skb, GFP_ATOMIC);
1096 if (unlikely(!skb)) {
1097
1098 net_err_ratelimited("%s: Error copying the socket buffer\n", net_dev->name);
1099 goto err_exit;
1100 }
1101
1102
1103
1104
1105 err = skb_linearize(skb);
1106 if (err) {
1107 net_err_ratelimited("%s: skb_linearize error (%d)!\n", net_dev->name, err);
1108 goto err_free_skb;
1109 }
1110
1111 err = dpaa2_switch_build_single_fd(ethsw, skb, &fd);
1112 if (unlikely(err)) {
1113 net_err_ratelimited("%s: ethsw_build_*_fd() %d\n", net_dev->name, err);
1114 goto err_free_skb;
1115 }
1116
1117 do {
1118 err = dpaa2_io_service_enqueue_qd(NULL,
1119 port_priv->tx_qdid,
1120 8, 0, &fd);
1121 retries--;
1122 } while (err == -EBUSY && retries);
1123
1124 if (unlikely(err < 0)) {
1125 dpaa2_switch_free_fd(ethsw, &fd);
1126 goto err_exit;
1127 }
1128
1129 return NETDEV_TX_OK;
1130
1131err_free_skb:
1132 dev_kfree_skb(skb);
1133err_exit:
1134 return NETDEV_TX_OK;
1135}
1136
1137static int
1138dpaa2_switch_setup_tc_cls_flower(struct dpaa2_switch_filter_block *filter_block,
1139 struct flow_cls_offload *f)
1140{
1141 switch (f->command) {
1142 case FLOW_CLS_REPLACE:
1143 return dpaa2_switch_cls_flower_replace(filter_block, f);
1144 case FLOW_CLS_DESTROY:
1145 return dpaa2_switch_cls_flower_destroy(filter_block, f);
1146 default:
1147 return -EOPNOTSUPP;
1148 }
1149}
1150
1151static int
1152dpaa2_switch_setup_tc_cls_matchall(struct dpaa2_switch_filter_block *block,
1153 struct tc_cls_matchall_offload *f)
1154{
1155 switch (f->command) {
1156 case TC_CLSMATCHALL_REPLACE:
1157 return dpaa2_switch_cls_matchall_replace(block, f);
1158 case TC_CLSMATCHALL_DESTROY:
1159 return dpaa2_switch_cls_matchall_destroy(block, f);
1160 default:
1161 return -EOPNOTSUPP;
1162 }
1163}
1164
1165static int dpaa2_switch_port_setup_tc_block_cb_ig(enum tc_setup_type type,
1166 void *type_data,
1167 void *cb_priv)
1168{
1169 switch (type) {
1170 case TC_SETUP_CLSFLOWER:
1171 return dpaa2_switch_setup_tc_cls_flower(cb_priv, type_data);
1172 case TC_SETUP_CLSMATCHALL:
1173 return dpaa2_switch_setup_tc_cls_matchall(cb_priv, type_data);
1174 default:
1175 return -EOPNOTSUPP;
1176 }
1177}
1178
1179static LIST_HEAD(dpaa2_switch_block_cb_list);
1180
1181static int
1182dpaa2_switch_port_acl_tbl_bind(struct ethsw_port_priv *port_priv,
1183 struct dpaa2_switch_filter_block *block)
1184{
1185 struct ethsw_core *ethsw = port_priv->ethsw_data;
1186 struct net_device *netdev = port_priv->netdev;
1187 struct dpsw_acl_if_cfg acl_if_cfg;
1188 int err;
1189
1190 if (port_priv->filter_block)
1191 return -EINVAL;
1192
1193 acl_if_cfg.if_id[0] = port_priv->idx;
1194 acl_if_cfg.num_ifs = 1;
1195 err = dpsw_acl_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
1196 block->acl_id, &acl_if_cfg);
1197 if (err) {
1198 netdev_err(netdev, "dpsw_acl_add_if err %d\n", err);
1199 return err;
1200 }
1201
1202 block->ports |= BIT(port_priv->idx);
1203 port_priv->filter_block = block;
1204
1205 return 0;
1206}
1207
1208static int
1209dpaa2_switch_port_acl_tbl_unbind(struct ethsw_port_priv *port_priv,
1210 struct dpaa2_switch_filter_block *block)
1211{
1212 struct ethsw_core *ethsw = port_priv->ethsw_data;
1213 struct net_device *netdev = port_priv->netdev;
1214 struct dpsw_acl_if_cfg acl_if_cfg;
1215 int err;
1216
1217 if (port_priv->filter_block != block)
1218 return -EINVAL;
1219
1220 acl_if_cfg.if_id[0] = port_priv->idx;
1221 acl_if_cfg.num_ifs = 1;
1222 err = dpsw_acl_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
1223 block->acl_id, &acl_if_cfg);
1224 if (err) {
1225 netdev_err(netdev, "dpsw_acl_add_if err %d\n", err);
1226 return err;
1227 }
1228
1229 block->ports &= ~BIT(port_priv->idx);
1230 port_priv->filter_block = NULL;
1231 return 0;
1232}
1233
1234static int dpaa2_switch_port_block_bind(struct ethsw_port_priv *port_priv,
1235 struct dpaa2_switch_filter_block *block)
1236{
1237 struct dpaa2_switch_filter_block *old_block = port_priv->filter_block;
1238 int err;
1239
1240
1241
1242
1243 err = dpaa2_switch_block_offload_mirror(block, port_priv);
1244 if (err)
1245 return err;
1246
1247
1248
1249
1250 if (port_priv->filter_block == block)
1251 return 0;
1252
1253 err = dpaa2_switch_port_acl_tbl_unbind(port_priv, old_block);
1254 if (err)
1255 return err;
1256
1257
1258
1259
1260 if (old_block->ports == 0)
1261 old_block->in_use = false;
1262
1263 return dpaa2_switch_port_acl_tbl_bind(port_priv, block);
1264}
1265
1266static int
1267dpaa2_switch_port_block_unbind(struct ethsw_port_priv *port_priv,
1268 struct dpaa2_switch_filter_block *block)
1269{
1270 struct ethsw_core *ethsw = port_priv->ethsw_data;
1271 struct dpaa2_switch_filter_block *new_block;
1272 int err;
1273
1274
1275
1276
1277 err = dpaa2_switch_block_unoffload_mirror(block, port_priv);
1278 if (err)
1279 return err;
1280
1281
1282
1283
1284 if (block->ports == BIT(port_priv->idx))
1285 return 0;
1286
1287 err = dpaa2_switch_port_acl_tbl_unbind(port_priv, block);
1288 if (err)
1289 return err;
1290
1291 if (block->ports == 0)
1292 block->in_use = false;
1293
1294 new_block = dpaa2_switch_filter_block_get_unused(ethsw);
1295 new_block->in_use = true;
1296 return dpaa2_switch_port_acl_tbl_bind(port_priv, new_block);
1297}
1298
1299static int dpaa2_switch_setup_tc_block_bind(struct net_device *netdev,
1300 struct flow_block_offload *f)
1301{
1302 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1303 struct ethsw_core *ethsw = port_priv->ethsw_data;
1304 struct dpaa2_switch_filter_block *filter_block;
1305 struct flow_block_cb *block_cb;
1306 bool register_block = false;
1307 int err;
1308
1309 block_cb = flow_block_cb_lookup(f->block,
1310 dpaa2_switch_port_setup_tc_block_cb_ig,
1311 ethsw);
1312
1313 if (!block_cb) {
1314
1315
1316
1317
1318 filter_block = port_priv->filter_block;
1319
1320 block_cb = flow_block_cb_alloc(dpaa2_switch_port_setup_tc_block_cb_ig,
1321 ethsw, filter_block, NULL);
1322 if (IS_ERR(block_cb))
1323 return PTR_ERR(block_cb);
1324
1325 register_block = true;
1326 } else {
1327 filter_block = flow_block_cb_priv(block_cb);
1328 }
1329
1330 flow_block_cb_incref(block_cb);
1331 err = dpaa2_switch_port_block_bind(port_priv, filter_block);
1332 if (err)
1333 goto err_block_bind;
1334
1335 if (register_block) {
1336 flow_block_cb_add(block_cb, f);
1337 list_add_tail(&block_cb->driver_list,
1338 &dpaa2_switch_block_cb_list);
1339 }
1340
1341 return 0;
1342
1343err_block_bind:
1344 if (!flow_block_cb_decref(block_cb))
1345 flow_block_cb_free(block_cb);
1346 return err;
1347}
1348
1349static void dpaa2_switch_setup_tc_block_unbind(struct net_device *netdev,
1350 struct flow_block_offload *f)
1351{
1352 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1353 struct ethsw_core *ethsw = port_priv->ethsw_data;
1354 struct dpaa2_switch_filter_block *filter_block;
1355 struct flow_block_cb *block_cb;
1356 int err;
1357
1358 block_cb = flow_block_cb_lookup(f->block,
1359 dpaa2_switch_port_setup_tc_block_cb_ig,
1360 ethsw);
1361 if (!block_cb)
1362 return;
1363
1364 filter_block = flow_block_cb_priv(block_cb);
1365 err = dpaa2_switch_port_block_unbind(port_priv, filter_block);
1366 if (!err && !flow_block_cb_decref(block_cb)) {
1367 flow_block_cb_remove(block_cb, f);
1368 list_del(&block_cb->driver_list);
1369 }
1370}
1371
1372static int dpaa2_switch_setup_tc_block(struct net_device *netdev,
1373 struct flow_block_offload *f)
1374{
1375 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1376 return -EOPNOTSUPP;
1377
1378 f->driver_block_list = &dpaa2_switch_block_cb_list;
1379
1380 switch (f->command) {
1381 case FLOW_BLOCK_BIND:
1382 return dpaa2_switch_setup_tc_block_bind(netdev, f);
1383 case FLOW_BLOCK_UNBIND:
1384 dpaa2_switch_setup_tc_block_unbind(netdev, f);
1385 return 0;
1386 default:
1387 return -EOPNOTSUPP;
1388 }
1389}
1390
1391static int dpaa2_switch_port_setup_tc(struct net_device *netdev,
1392 enum tc_setup_type type,
1393 void *type_data)
1394{
1395 switch (type) {
1396 case TC_SETUP_BLOCK: {
1397 return dpaa2_switch_setup_tc_block(netdev, type_data);
1398 }
1399 default:
1400 return -EOPNOTSUPP;
1401 }
1402
1403 return 0;
1404}
1405
1406static const struct net_device_ops dpaa2_switch_port_ops = {
1407 .ndo_open = dpaa2_switch_port_open,
1408 .ndo_stop = dpaa2_switch_port_stop,
1409
1410 .ndo_set_mac_address = eth_mac_addr,
1411 .ndo_get_stats64 = dpaa2_switch_port_get_stats,
1412 .ndo_change_mtu = dpaa2_switch_port_change_mtu,
1413 .ndo_has_offload_stats = dpaa2_switch_port_has_offload_stats,
1414 .ndo_get_offload_stats = dpaa2_switch_port_get_offload_stats,
1415 .ndo_fdb_dump = dpaa2_switch_port_fdb_dump,
1416 .ndo_vlan_rx_add_vid = dpaa2_switch_port_vlan_add,
1417 .ndo_vlan_rx_kill_vid = dpaa2_switch_port_vlan_kill,
1418
1419 .ndo_start_xmit = dpaa2_switch_port_tx,
1420 .ndo_get_port_parent_id = dpaa2_switch_port_parent_id,
1421 .ndo_get_phys_port_name = dpaa2_switch_port_get_phys_name,
1422 .ndo_setup_tc = dpaa2_switch_port_setup_tc,
1423};
1424
1425bool dpaa2_switch_port_dev_check(const struct net_device *netdev)
1426{
1427 return netdev->netdev_ops == &dpaa2_switch_port_ops;
1428}
1429
1430static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv)
1431{
1432 struct fsl_mc_device *dpsw_port_dev, *dpmac_dev;
1433 struct dpaa2_mac *mac;
1434 int err;
1435
1436 dpsw_port_dev = to_fsl_mc_device(port_priv->netdev->dev.parent);
1437 dpmac_dev = fsl_mc_get_endpoint(dpsw_port_dev, port_priv->idx);
1438
1439 if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER)
1440 return PTR_ERR(dpmac_dev);
1441
1442 if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
1443 return 0;
1444
1445 mac = kzalloc(sizeof(*mac), GFP_KERNEL);
1446 if (!mac)
1447 return -ENOMEM;
1448
1449 mac->mc_dev = dpmac_dev;
1450 mac->mc_io = port_priv->ethsw_data->mc_io;
1451 mac->net_dev = port_priv->netdev;
1452
1453 err = dpaa2_mac_open(mac);
1454 if (err)
1455 goto err_free_mac;
1456 port_priv->mac = mac;
1457
1458 if (dpaa2_switch_port_is_type_phy(port_priv)) {
1459 err = dpaa2_mac_connect(mac);
1460 if (err) {
1461 netdev_err(port_priv->netdev,
1462 "Error connecting to the MAC endpoint %pe\n",
1463 ERR_PTR(err));
1464 goto err_close_mac;
1465 }
1466 }
1467
1468 return 0;
1469
1470err_close_mac:
1471 dpaa2_mac_close(mac);
1472 port_priv->mac = NULL;
1473err_free_mac:
1474 kfree(mac);
1475 return err;
1476}
1477
1478static void dpaa2_switch_port_disconnect_mac(struct ethsw_port_priv *port_priv)
1479{
1480 if (dpaa2_switch_port_is_type_phy(port_priv))
1481 dpaa2_mac_disconnect(port_priv->mac);
1482
1483 if (!dpaa2_switch_port_has_mac(port_priv))
1484 return;
1485
1486 dpaa2_mac_close(port_priv->mac);
1487 kfree(port_priv->mac);
1488 port_priv->mac = NULL;
1489}
1490
1491static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
1492{
1493 struct device *dev = (struct device *)arg;
1494 struct ethsw_core *ethsw = dev_get_drvdata(dev);
1495 struct ethsw_port_priv *port_priv;
1496 u32 status = ~0;
1497 int err, if_id;
1498
1499 err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
1500 DPSW_IRQ_INDEX_IF, &status);
1501 if (err) {
1502 dev_err(dev, "Can't get irq status (err %d)\n", err);
1503 goto out;
1504 }
1505
1506 if_id = (status & 0xFFFF0000) >> 16;
1507 port_priv = ethsw->ports[if_id];
1508
1509 if (status & DPSW_IRQ_EVENT_LINK_CHANGED) {
1510 dpaa2_switch_port_link_state_update(port_priv->netdev);
1511 dpaa2_switch_port_set_mac_addr(port_priv);
1512 }
1513
1514 if (status & DPSW_IRQ_EVENT_ENDPOINT_CHANGED) {
1515 rtnl_lock();
1516 if (dpaa2_switch_port_has_mac(port_priv))
1517 dpaa2_switch_port_disconnect_mac(port_priv);
1518 else
1519 dpaa2_switch_port_connect_mac(port_priv);
1520 rtnl_unlock();
1521 }
1522
1523out:
1524 err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
1525 DPSW_IRQ_INDEX_IF, status);
1526 if (err)
1527 dev_err(dev, "Can't clear irq status (err %d)\n", err);
1528
1529 return IRQ_HANDLED;
1530}
1531
1532static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev)
1533{
1534 struct device *dev = &sw_dev->dev;
1535 struct ethsw_core *ethsw = dev_get_drvdata(dev);
1536 u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED;
1537 struct fsl_mc_device_irq *irq;
1538 int err;
1539
1540 err = fsl_mc_allocate_irqs(sw_dev);
1541 if (err) {
1542 dev_err(dev, "MC irqs allocation failed\n");
1543 return err;
1544 }
1545
1546 if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_IRQ_NUM)) {
1547 err = -EINVAL;
1548 goto free_irq;
1549 }
1550
1551 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
1552 DPSW_IRQ_INDEX_IF, 0);
1553 if (err) {
1554 dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
1555 goto free_irq;
1556 }
1557
1558 irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
1559
1560 err = devm_request_threaded_irq(dev, irq->virq, NULL,
1561 dpaa2_switch_irq0_handler_thread,
1562 IRQF_NO_SUSPEND | IRQF_ONESHOT,
1563 dev_name(dev), dev);
1564 if (err) {
1565 dev_err(dev, "devm_request_threaded_irq(): %d\n", err);
1566 goto free_irq;
1567 }
1568
1569 err = dpsw_set_irq_mask(ethsw->mc_io, 0, ethsw->dpsw_handle,
1570 DPSW_IRQ_INDEX_IF, mask);
1571 if (err) {
1572 dev_err(dev, "dpsw_set_irq_mask(): %d\n", err);
1573 goto free_devm_irq;
1574 }
1575
1576 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
1577 DPSW_IRQ_INDEX_IF, 1);
1578 if (err) {
1579 dev_err(dev, "dpsw_set_irq_enable(): %d\n", err);
1580 goto free_devm_irq;
1581 }
1582
1583 return 0;
1584
1585free_devm_irq:
1586 devm_free_irq(dev, irq->virq, dev);
1587free_irq:
1588 fsl_mc_free_irqs(sw_dev);
1589 return err;
1590}
1591
1592static void dpaa2_switch_teardown_irqs(struct fsl_mc_device *sw_dev)
1593{
1594 struct device *dev = &sw_dev->dev;
1595 struct ethsw_core *ethsw = dev_get_drvdata(dev);
1596 int err;
1597
1598 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
1599 DPSW_IRQ_INDEX_IF, 0);
1600 if (err)
1601 dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
1602
1603 fsl_mc_free_irqs(sw_dev);
1604}
1605
1606static int dpaa2_switch_port_set_learning(struct ethsw_port_priv *port_priv, bool enable)
1607{
1608 struct ethsw_core *ethsw = port_priv->ethsw_data;
1609 enum dpsw_learning_mode learn_mode;
1610 int err;
1611
1612 if (enable)
1613 learn_mode = DPSW_LEARNING_MODE_HW;
1614 else
1615 learn_mode = DPSW_LEARNING_MODE_DIS;
1616
1617 err = dpsw_if_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle,
1618 port_priv->idx, learn_mode);
1619 if (err)
1620 netdev_err(port_priv->netdev, "dpsw_if_set_learning_mode err %d\n", err);
1621
1622 if (!enable)
1623 dpaa2_switch_port_fast_age(port_priv);
1624
1625 return err;
1626}
1627
1628static int dpaa2_switch_port_attr_stp_state_set(struct net_device *netdev,
1629 u8 state)
1630{
1631 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1632 int err;
1633
1634 err = dpaa2_switch_port_set_stp_state(port_priv, state);
1635 if (err)
1636 return err;
1637
1638 switch (state) {
1639 case BR_STATE_DISABLED:
1640 case BR_STATE_BLOCKING:
1641 case BR_STATE_LISTENING:
1642 err = dpaa2_switch_port_set_learning(port_priv, false);
1643 break;
1644 case BR_STATE_LEARNING:
1645 case BR_STATE_FORWARDING:
1646 err = dpaa2_switch_port_set_learning(port_priv,
1647 port_priv->learn_ena);
1648 break;
1649 }
1650
1651 return err;
1652}
1653
1654static int dpaa2_switch_port_flood(struct ethsw_port_priv *port_priv,
1655 struct switchdev_brport_flags flags)
1656{
1657 struct ethsw_core *ethsw = port_priv->ethsw_data;
1658
1659 if (flags.mask & BR_BCAST_FLOOD)
1660 port_priv->bcast_flood = !!(flags.val & BR_BCAST_FLOOD);
1661
1662 if (flags.mask & BR_FLOOD)
1663 port_priv->ucast_flood = !!(flags.val & BR_FLOOD);
1664
1665 return dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
1666}
1667
1668static int dpaa2_switch_port_pre_bridge_flags(struct net_device *netdev,
1669 struct switchdev_brport_flags flags,
1670 struct netlink_ext_ack *extack)
1671{
1672 if (flags.mask & ~(BR_LEARNING | BR_BCAST_FLOOD | BR_FLOOD |
1673 BR_MCAST_FLOOD))
1674 return -EINVAL;
1675
1676 if (flags.mask & (BR_FLOOD | BR_MCAST_FLOOD)) {
1677 bool multicast = !!(flags.val & BR_MCAST_FLOOD);
1678 bool unicast = !!(flags.val & BR_FLOOD);
1679
1680 if (unicast != multicast) {
1681 NL_SET_ERR_MSG_MOD(extack,
1682 "Cannot configure multicast flooding independently of unicast");
1683 return -EINVAL;
1684 }
1685 }
1686
1687 return 0;
1688}
1689
1690static int dpaa2_switch_port_bridge_flags(struct net_device *netdev,
1691 struct switchdev_brport_flags flags,
1692 struct netlink_ext_ack *extack)
1693{
1694 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1695 int err;
1696
1697 if (flags.mask & BR_LEARNING) {
1698 bool learn_ena = !!(flags.val & BR_LEARNING);
1699
1700 err = dpaa2_switch_port_set_learning(port_priv, learn_ena);
1701 if (err)
1702 return err;
1703 port_priv->learn_ena = learn_ena;
1704 }
1705
1706 if (flags.mask & (BR_BCAST_FLOOD | BR_FLOOD | BR_MCAST_FLOOD)) {
1707 err = dpaa2_switch_port_flood(port_priv, flags);
1708 if (err)
1709 return err;
1710 }
1711
1712 return 0;
1713}
1714
1715static int dpaa2_switch_port_attr_set(struct net_device *netdev, const void *ctx,
1716 const struct switchdev_attr *attr,
1717 struct netlink_ext_ack *extack)
1718{
1719 int err = 0;
1720
1721 switch (attr->id) {
1722 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
1723 err = dpaa2_switch_port_attr_stp_state_set(netdev,
1724 attr->u.stp_state);
1725 break;
1726 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
1727 if (!attr->u.vlan_filtering) {
1728 NL_SET_ERR_MSG_MOD(extack,
1729 "The DPAA2 switch does not support VLAN-unaware operation");
1730 return -EOPNOTSUPP;
1731 }
1732 break;
1733 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
1734 err = dpaa2_switch_port_pre_bridge_flags(netdev, attr->u.brport_flags, extack);
1735 break;
1736 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
1737 err = dpaa2_switch_port_bridge_flags(netdev, attr->u.brport_flags, extack);
1738 break;
1739 default:
1740 err = -EOPNOTSUPP;
1741 break;
1742 }
1743
1744 return err;
1745}
1746
1747int dpaa2_switch_port_vlans_add(struct net_device *netdev,
1748 const struct switchdev_obj_port_vlan *vlan)
1749{
1750 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1751 struct ethsw_core *ethsw = port_priv->ethsw_data;
1752 struct dpsw_attr *attr = ðsw->sw_attr;
1753 int err = 0;
1754
1755
1756
1757
1758 if (port_priv->vlans[vlan->vid] & ETHSW_VLAN_MEMBER)
1759 return -EEXIST;
1760
1761
1762 err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
1763 ðsw->sw_attr);
1764 if (err) {
1765 netdev_err(netdev, "dpsw_get_attributes err %d\n", err);
1766 return err;
1767 }
1768 if (attr->max_vlans - attr->num_vlans < 1)
1769 return -ENOSPC;
1770
1771
1772 err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
1773 ðsw->sw_attr);
1774 if (err) {
1775 netdev_err(netdev, "dpsw_get_attributes err %d\n", err);
1776 return err;
1777 }
1778 if (attr->max_vlans - attr->num_vlans < 1)
1779 return -ENOSPC;
1780
1781 if (!port_priv->ethsw_data->vlans[vlan->vid]) {
1782
1783 err = dpaa2_switch_add_vlan(port_priv, vlan->vid);
1784 if (err)
1785 return err;
1786
1787 port_priv->ethsw_data->vlans[vlan->vid] |= ETHSW_VLAN_GLOBAL;
1788 }
1789
1790 return dpaa2_switch_port_add_vlan(port_priv, vlan->vid, vlan->flags);
1791}
1792
1793static int dpaa2_switch_port_lookup_address(struct net_device *netdev, int is_uc,
1794 const unsigned char *addr)
1795{
1796 struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc;
1797 struct netdev_hw_addr *ha;
1798
1799 netif_addr_lock_bh(netdev);
1800 list_for_each_entry(ha, &list->list, list) {
1801 if (ether_addr_equal(ha->addr, addr)) {
1802 netif_addr_unlock_bh(netdev);
1803 return 1;
1804 }
1805 }
1806 netif_addr_unlock_bh(netdev);
1807 return 0;
1808}
1809
1810static int dpaa2_switch_port_mdb_add(struct net_device *netdev,
1811 const struct switchdev_obj_port_mdb *mdb)
1812{
1813 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1814 int err;
1815
1816
1817 if (dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr))
1818 return -EEXIST;
1819
1820 err = dpaa2_switch_port_fdb_add_mc(port_priv, mdb->addr);
1821 if (err)
1822 return err;
1823
1824 err = dev_mc_add(netdev, mdb->addr);
1825 if (err) {
1826 netdev_err(netdev, "dev_mc_add err %d\n", err);
1827 dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr);
1828 }
1829
1830 return err;
1831}
1832
1833static int dpaa2_switch_port_obj_add(struct net_device *netdev,
1834 const struct switchdev_obj *obj)
1835{
1836 int err;
1837
1838 switch (obj->id) {
1839 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1840 err = dpaa2_switch_port_vlans_add(netdev,
1841 SWITCHDEV_OBJ_PORT_VLAN(obj));
1842 break;
1843 case SWITCHDEV_OBJ_ID_PORT_MDB:
1844 err = dpaa2_switch_port_mdb_add(netdev,
1845 SWITCHDEV_OBJ_PORT_MDB(obj));
1846 break;
1847 default:
1848 err = -EOPNOTSUPP;
1849 break;
1850 }
1851
1852 return err;
1853}
1854
1855static int dpaa2_switch_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid)
1856{
1857 struct ethsw_core *ethsw = port_priv->ethsw_data;
1858 struct net_device *netdev = port_priv->netdev;
1859 struct dpsw_vlan_if_cfg vcfg;
1860 int i, err;
1861
1862 if (!port_priv->vlans[vid])
1863 return -ENOENT;
1864
1865 if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) {
1866
1867
1868
1869
1870 err = dpaa2_switch_port_set_pvid(port_priv, 4095);
1871 if (err)
1872 return err;
1873 }
1874
1875 vcfg.num_ifs = 1;
1876 vcfg.if_id[0] = port_priv->idx;
1877 if (port_priv->vlans[vid] & ETHSW_VLAN_UNTAGGED) {
1878 err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0,
1879 ethsw->dpsw_handle,
1880 vid, &vcfg);
1881 if (err) {
1882 netdev_err(netdev,
1883 "dpsw_vlan_remove_if_untagged err %d\n",
1884 err);
1885 }
1886 port_priv->vlans[vid] &= ~ETHSW_VLAN_UNTAGGED;
1887 }
1888
1889 if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
1890 err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
1891 vid, &vcfg);
1892 if (err) {
1893 netdev_err(netdev,
1894 "dpsw_vlan_remove_if err %d\n", err);
1895 return err;
1896 }
1897 port_priv->vlans[vid] &= ~ETHSW_VLAN_MEMBER;
1898
1899
1900
1901
1902 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
1903 if (ethsw->ports[i] &&
1904 ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER)
1905 return 0;
1906 }
1907
1908 ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL;
1909
1910 err = dpaa2_switch_dellink(ethsw, vid);
1911 if (err)
1912 return err;
1913 }
1914
1915 return 0;
1916}
1917
1918int dpaa2_switch_port_vlans_del(struct net_device *netdev,
1919 const struct switchdev_obj_port_vlan *vlan)
1920{
1921 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1922
1923 if (netif_is_bridge_master(vlan->obj.orig_dev))
1924 return -EOPNOTSUPP;
1925
1926 return dpaa2_switch_port_del_vlan(port_priv, vlan->vid);
1927}
1928
1929static int dpaa2_switch_port_mdb_del(struct net_device *netdev,
1930 const struct switchdev_obj_port_mdb *mdb)
1931{
1932 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1933 int err;
1934
1935 if (!dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr))
1936 return -ENOENT;
1937
1938 err = dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr);
1939 if (err)
1940 return err;
1941
1942 err = dev_mc_del(netdev, mdb->addr);
1943 if (err) {
1944 netdev_err(netdev, "dev_mc_del err %d\n", err);
1945 return err;
1946 }
1947
1948 return err;
1949}
1950
1951static int dpaa2_switch_port_obj_del(struct net_device *netdev,
1952 const struct switchdev_obj *obj)
1953{
1954 int err;
1955
1956 switch (obj->id) {
1957 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1958 err = dpaa2_switch_port_vlans_del(netdev, SWITCHDEV_OBJ_PORT_VLAN(obj));
1959 break;
1960 case SWITCHDEV_OBJ_ID_PORT_MDB:
1961 err = dpaa2_switch_port_mdb_del(netdev, SWITCHDEV_OBJ_PORT_MDB(obj));
1962 break;
1963 default:
1964 err = -EOPNOTSUPP;
1965 break;
1966 }
1967 return err;
1968}
1969
1970static int dpaa2_switch_port_attr_set_event(struct net_device *netdev,
1971 struct switchdev_notifier_port_attr_info *ptr)
1972{
1973 int err;
1974
1975 err = switchdev_handle_port_attr_set(netdev, ptr,
1976 dpaa2_switch_port_dev_check,
1977 dpaa2_switch_port_attr_set);
1978 return notifier_from_errno(err);
1979}
1980
1981static struct notifier_block dpaa2_switch_port_switchdev_nb;
1982static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb;
1983
1984static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
1985 struct net_device *upper_dev,
1986 struct netlink_ext_ack *extack)
1987{
1988 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1989 struct ethsw_core *ethsw = port_priv->ethsw_data;
1990 struct ethsw_port_priv *other_port_priv;
1991 struct net_device *other_dev;
1992 struct list_head *iter;
1993 bool learn_ena;
1994 int err;
1995
1996 netdev_for_each_lower_dev(upper_dev, other_dev, iter) {
1997 if (!dpaa2_switch_port_dev_check(other_dev))
1998 continue;
1999
2000 other_port_priv = netdev_priv(other_dev);
2001 if (other_port_priv->ethsw_data != port_priv->ethsw_data) {
2002 NL_SET_ERR_MSG_MOD(extack,
2003 "Interface from a different DPSW is in the bridge already");
2004 return -EINVAL;
2005 }
2006 }
2007
2008
2009 err = dpaa2_switch_port_del_vlan(port_priv, 1);
2010 if (err)
2011 return err;
2012
2013 dpaa2_switch_port_set_fdb(port_priv, upper_dev);
2014
2015
2016 learn_ena = br_port_flag_is_set(netdev, BR_LEARNING);
2017 err = dpaa2_switch_port_set_learning(port_priv, learn_ena);
2018 port_priv->learn_ena = learn_ena;
2019
2020
2021 err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
2022 if (err)
2023 goto err_egress_flood;
2024
2025 err = switchdev_bridge_port_offload(netdev, netdev, NULL,
2026 &dpaa2_switch_port_switchdev_nb,
2027 &dpaa2_switch_port_switchdev_blocking_nb,
2028 false, extack);
2029 if (err)
2030 goto err_switchdev_offload;
2031
2032 return 0;
2033
2034err_switchdev_offload:
2035err_egress_flood:
2036 dpaa2_switch_port_set_fdb(port_priv, NULL);
2037 return err;
2038}
2039
2040static int dpaa2_switch_port_clear_rxvlan(struct net_device *vdev, int vid, void *arg)
2041{
2042 __be16 vlan_proto = htons(ETH_P_8021Q);
2043
2044 if (vdev)
2045 vlan_proto = vlan_dev_vlan_proto(vdev);
2046
2047 return dpaa2_switch_port_vlan_kill(arg, vlan_proto, vid);
2048}
2049
2050static int dpaa2_switch_port_restore_rxvlan(struct net_device *vdev, int vid, void *arg)
2051{
2052 __be16 vlan_proto = htons(ETH_P_8021Q);
2053
2054 if (vdev)
2055 vlan_proto = vlan_dev_vlan_proto(vdev);
2056
2057 return dpaa2_switch_port_vlan_add(arg, vlan_proto, vid);
2058}
2059
2060static void dpaa2_switch_port_pre_bridge_leave(struct net_device *netdev)
2061{
2062 switchdev_bridge_port_unoffload(netdev, NULL,
2063 &dpaa2_switch_port_switchdev_nb,
2064 &dpaa2_switch_port_switchdev_blocking_nb);
2065}
2066
2067static int dpaa2_switch_port_bridge_leave(struct net_device *netdev)
2068{
2069 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
2070 struct dpaa2_switch_fdb *old_fdb = port_priv->fdb;
2071 struct ethsw_core *ethsw = port_priv->ethsw_data;
2072 int err;
2073
2074
2075 dpaa2_switch_port_fast_age(port_priv);
2076
2077
2078
2079
2080
2081 err = vlan_for_each(netdev, dpaa2_switch_port_clear_rxvlan, netdev);
2082 if (err)
2083 netdev_err(netdev, "Unable to clear RX VLANs from old FDB table, err (%d)\n", err);
2084
2085 dpaa2_switch_port_set_fdb(port_priv, NULL);
2086
2087
2088 err = vlan_for_each(netdev, dpaa2_switch_port_restore_rxvlan, netdev);
2089 if (err)
2090 netdev_err(netdev, "Unable to restore RX VLANs to the new FDB, err (%d)\n", err);
2091
2092
2093
2094
2095
2096 port_priv->bcast_flood = true;
2097 port_priv->ucast_flood = true;
2098
2099
2100
2101
2102
2103 err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
2104 if (err)
2105 return err;
2106
2107
2108 err = dpaa2_switch_fdb_set_egress_flood(ethsw, old_fdb->fdb_id);
2109 if (err)
2110 return err;
2111
2112
2113 err = dpaa2_switch_port_set_learning(port_priv, false);
2114 if (err)
2115 return err;
2116 port_priv->learn_ena = false;
2117
2118
2119
2120
2121 return dpaa2_switch_port_add_vlan(port_priv, DEFAULT_VLAN_ID,
2122 BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID);
2123}
2124
2125static int dpaa2_switch_prevent_bridging_with_8021q_upper(struct net_device *netdev)
2126{
2127 struct net_device *upper_dev;
2128 struct list_head *iter;
2129
2130
2131
2132
2133 netdev_for_each_upper_dev_rcu(netdev, upper_dev, iter)
2134 if (is_vlan_dev(upper_dev))
2135 return -EOPNOTSUPP;
2136
2137 return 0;
2138}
2139
2140static int
2141dpaa2_switch_prechangeupper_sanity_checks(struct net_device *netdev,
2142 struct net_device *upper_dev,
2143 struct netlink_ext_ack *extack)
2144{
2145 int err;
2146
2147 if (!br_vlan_enabled(upper_dev)) {
2148 NL_SET_ERR_MSG_MOD(extack, "Cannot join a VLAN-unaware bridge");
2149 return -EOPNOTSUPP;
2150 }
2151
2152 err = dpaa2_switch_prevent_bridging_with_8021q_upper(netdev);
2153 if (err) {
2154 NL_SET_ERR_MSG_MOD(extack,
2155 "Cannot join a bridge while VLAN uppers are present");
2156 return 0;
2157 }
2158
2159 return 0;
2160}
2161
2162static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb,
2163 unsigned long event, void *ptr)
2164{
2165 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
2166 struct netdev_notifier_changeupper_info *info = ptr;
2167 struct netlink_ext_ack *extack;
2168 struct net_device *upper_dev;
2169 int err = 0;
2170
2171 if (!dpaa2_switch_port_dev_check(netdev))
2172 return NOTIFY_DONE;
2173
2174 extack = netdev_notifier_info_to_extack(&info->info);
2175
2176 switch (event) {
2177 case NETDEV_PRECHANGEUPPER:
2178 upper_dev = info->upper_dev;
2179 if (!netif_is_bridge_master(upper_dev))
2180 break;
2181
2182 err = dpaa2_switch_prechangeupper_sanity_checks(netdev,
2183 upper_dev,
2184 extack);
2185 if (err)
2186 goto out;
2187
2188 if (!info->linking)
2189 dpaa2_switch_port_pre_bridge_leave(netdev);
2190
2191 break;
2192 case NETDEV_CHANGEUPPER:
2193 upper_dev = info->upper_dev;
2194 if (netif_is_bridge_master(upper_dev)) {
2195 if (info->linking)
2196 err = dpaa2_switch_port_bridge_join(netdev,
2197 upper_dev,
2198 extack);
2199 else
2200 err = dpaa2_switch_port_bridge_leave(netdev);
2201 }
2202 break;
2203 }
2204
2205out:
2206 return notifier_from_errno(err);
2207}
2208
2209struct ethsw_switchdev_event_work {
2210 struct work_struct work;
2211 struct switchdev_notifier_fdb_info fdb_info;
2212 struct net_device *dev;
2213 unsigned long event;
2214};
2215
2216static void dpaa2_switch_event_work(struct work_struct *work)
2217{
2218 struct ethsw_switchdev_event_work *switchdev_work =
2219 container_of(work, struct ethsw_switchdev_event_work, work);
2220 struct net_device *dev = switchdev_work->dev;
2221 struct switchdev_notifier_fdb_info *fdb_info;
2222 int err;
2223
2224 rtnl_lock();
2225 fdb_info = &switchdev_work->fdb_info;
2226
2227 switch (switchdev_work->event) {
2228 case SWITCHDEV_FDB_ADD_TO_DEVICE:
2229 if (!fdb_info->added_by_user || fdb_info->is_local)
2230 break;
2231 if (is_unicast_ether_addr(fdb_info->addr))
2232 err = dpaa2_switch_port_fdb_add_uc(netdev_priv(dev),
2233 fdb_info->addr);
2234 else
2235 err = dpaa2_switch_port_fdb_add_mc(netdev_priv(dev),
2236 fdb_info->addr);
2237 if (err)
2238 break;
2239 fdb_info->offloaded = true;
2240 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev,
2241 &fdb_info->info, NULL);
2242 break;
2243 case SWITCHDEV_FDB_DEL_TO_DEVICE:
2244 if (!fdb_info->added_by_user || fdb_info->is_local)
2245 break;
2246 if (is_unicast_ether_addr(fdb_info->addr))
2247 dpaa2_switch_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr);
2248 else
2249 dpaa2_switch_port_fdb_del_mc(netdev_priv(dev), fdb_info->addr);
2250 break;
2251 }
2252
2253 rtnl_unlock();
2254 kfree(switchdev_work->fdb_info.addr);
2255 kfree(switchdev_work);
2256 dev_put(dev);
2257}
2258
2259
2260static int dpaa2_switch_port_event(struct notifier_block *nb,
2261 unsigned long event, void *ptr)
2262{
2263 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2264 struct ethsw_port_priv *port_priv = netdev_priv(dev);
2265 struct ethsw_switchdev_event_work *switchdev_work;
2266 struct switchdev_notifier_fdb_info *fdb_info = ptr;
2267 struct ethsw_core *ethsw = port_priv->ethsw_data;
2268
2269 if (event == SWITCHDEV_PORT_ATTR_SET)
2270 return dpaa2_switch_port_attr_set_event(dev, ptr);
2271
2272 if (!dpaa2_switch_port_dev_check(dev))
2273 return NOTIFY_DONE;
2274
2275 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
2276 if (!switchdev_work)
2277 return NOTIFY_BAD;
2278
2279 INIT_WORK(&switchdev_work->work, dpaa2_switch_event_work);
2280 switchdev_work->dev = dev;
2281 switchdev_work->event = event;
2282
2283 switch (event) {
2284 case SWITCHDEV_FDB_ADD_TO_DEVICE:
2285 case SWITCHDEV_FDB_DEL_TO_DEVICE:
2286 memcpy(&switchdev_work->fdb_info, ptr,
2287 sizeof(switchdev_work->fdb_info));
2288 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
2289 if (!switchdev_work->fdb_info.addr)
2290 goto err_addr_alloc;
2291
2292 ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
2293 fdb_info->addr);
2294
2295
2296 dev_hold(dev);
2297 break;
2298 default:
2299 kfree(switchdev_work);
2300 return NOTIFY_DONE;
2301 }
2302
2303 queue_work(ethsw->workqueue, &switchdev_work->work);
2304
2305 return NOTIFY_DONE;
2306
2307err_addr_alloc:
2308 kfree(switchdev_work);
2309 return NOTIFY_BAD;
2310}
2311
2312static int dpaa2_switch_port_obj_event(unsigned long event,
2313 struct net_device *netdev,
2314 struct switchdev_notifier_port_obj_info *port_obj_info)
2315{
2316 int err = -EOPNOTSUPP;
2317
2318 if (!dpaa2_switch_port_dev_check(netdev))
2319 return NOTIFY_DONE;
2320
2321 switch (event) {
2322 case SWITCHDEV_PORT_OBJ_ADD:
2323 err = dpaa2_switch_port_obj_add(netdev, port_obj_info->obj);
2324 break;
2325 case SWITCHDEV_PORT_OBJ_DEL:
2326 err = dpaa2_switch_port_obj_del(netdev, port_obj_info->obj);
2327 break;
2328 }
2329
2330 port_obj_info->handled = true;
2331 return notifier_from_errno(err);
2332}
2333
2334static int dpaa2_switch_port_blocking_event(struct notifier_block *nb,
2335 unsigned long event, void *ptr)
2336{
2337 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2338
2339 switch (event) {
2340 case SWITCHDEV_PORT_OBJ_ADD:
2341 case SWITCHDEV_PORT_OBJ_DEL:
2342 return dpaa2_switch_port_obj_event(event, dev, ptr);
2343 case SWITCHDEV_PORT_ATTR_SET:
2344 return dpaa2_switch_port_attr_set_event(dev, ptr);
2345 }
2346
2347 return NOTIFY_DONE;
2348}
2349
2350
2351static struct sk_buff *dpaa2_switch_build_linear_skb(struct ethsw_core *ethsw,
2352 const struct dpaa2_fd *fd)
2353{
2354 u16 fd_offset = dpaa2_fd_get_offset(fd);
2355 dma_addr_t addr = dpaa2_fd_get_addr(fd);
2356 u32 fd_length = dpaa2_fd_get_len(fd);
2357 struct device *dev = ethsw->dev;
2358 struct sk_buff *skb = NULL;
2359 void *fd_vaddr;
2360
2361 fd_vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, addr);
2362 dma_unmap_page(dev, addr, DPAA2_SWITCH_RX_BUF_SIZE,
2363 DMA_FROM_DEVICE);
2364
2365 skb = build_skb(fd_vaddr, DPAA2_SWITCH_RX_BUF_SIZE +
2366 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
2367 if (unlikely(!skb)) {
2368 dev_err(dev, "build_skb() failed\n");
2369 return NULL;
2370 }
2371
2372 skb_reserve(skb, fd_offset);
2373 skb_put(skb, fd_length);
2374
2375 ethsw->buf_count--;
2376
2377 return skb;
2378}
2379
2380static void dpaa2_switch_tx_conf(struct dpaa2_switch_fq *fq,
2381 const struct dpaa2_fd *fd)
2382{
2383 dpaa2_switch_free_fd(fq->ethsw, fd);
2384}
2385
2386static void dpaa2_switch_rx(struct dpaa2_switch_fq *fq,
2387 const struct dpaa2_fd *fd)
2388{
2389 struct ethsw_core *ethsw = fq->ethsw;
2390 struct ethsw_port_priv *port_priv;
2391 struct net_device *netdev;
2392 struct vlan_ethhdr *hdr;
2393 struct sk_buff *skb;
2394 u16 vlan_tci, vid;
2395 int if_id, err;
2396
2397
2398 if_id = upper_32_bits(dpaa2_fd_get_flc(fd)) & 0x0000FFFF;
2399
2400 if (if_id >= ethsw->sw_attr.num_ifs) {
2401 dev_err(ethsw->dev, "Frame received from unknown interface!\n");
2402 goto err_free_fd;
2403 }
2404 port_priv = ethsw->ports[if_id];
2405 netdev = port_priv->netdev;
2406
2407
2408 if (dpaa2_fd_get_format(fd) != dpaa2_fd_single) {
2409 if (net_ratelimit()) {
2410 netdev_err(netdev, "Received invalid frame format\n");
2411 goto err_free_fd;
2412 }
2413 }
2414
2415 skb = dpaa2_switch_build_linear_skb(ethsw, fd);
2416 if (unlikely(!skb))
2417 goto err_free_fd;
2418
2419 skb_reset_mac_header(skb);
2420
2421
2422
2423
2424
2425
2426
2427 hdr = vlan_eth_hdr(skb);
2428 vid = ntohs(hdr->h_vlan_TCI) & VLAN_VID_MASK;
2429 if (vid == port_priv->pvid) {
2430 err = __skb_vlan_pop(skb, &vlan_tci);
2431 if (err) {
2432 dev_info(ethsw->dev, "__skb_vlan_pop() returned %d", err);
2433 goto err_free_fd;
2434 }
2435 }
2436
2437 skb->dev = netdev;
2438 skb->protocol = eth_type_trans(skb, skb->dev);
2439
2440
2441 skb->offload_fwd_mark = !!(port_priv->fdb->bridge_dev);
2442
2443 netif_receive_skb(skb);
2444
2445 return;
2446
2447err_free_fd:
2448 dpaa2_switch_free_fd(ethsw, fd);
2449}
2450
2451static void dpaa2_switch_detect_features(struct ethsw_core *ethsw)
2452{
2453 ethsw->features = 0;
2454
2455 if (ethsw->major > 8 || (ethsw->major == 8 && ethsw->minor >= 6))
2456 ethsw->features |= ETHSW_FEATURE_MAC_ADDR;
2457}
2458
2459static int dpaa2_switch_setup_fqs(struct ethsw_core *ethsw)
2460{
2461 struct dpsw_ctrl_if_attr ctrl_if_attr;
2462 struct device *dev = ethsw->dev;
2463 int i = 0;
2464 int err;
2465
2466 err = dpsw_ctrl_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
2467 &ctrl_if_attr);
2468 if (err) {
2469 dev_err(dev, "dpsw_ctrl_if_get_attributes() = %d\n", err);
2470 return err;
2471 }
2472
2473 ethsw->fq[i].fqid = ctrl_if_attr.rx_fqid;
2474 ethsw->fq[i].ethsw = ethsw;
2475 ethsw->fq[i++].type = DPSW_QUEUE_RX;
2476
2477 ethsw->fq[i].fqid = ctrl_if_attr.tx_err_conf_fqid;
2478 ethsw->fq[i].ethsw = ethsw;
2479 ethsw->fq[i++].type = DPSW_QUEUE_TX_ERR_CONF;
2480
2481 return 0;
2482}
2483
2484
2485
2486
2487static void dpaa2_switch_free_bufs(struct ethsw_core *ethsw, u64 *buf_array, int count)
2488{
2489 struct device *dev = ethsw->dev;
2490 void *vaddr;
2491 int i;
2492
2493 for (i = 0; i < count; i++) {
2494 vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, buf_array[i]);
2495 dma_unmap_page(dev, buf_array[i], DPAA2_SWITCH_RX_BUF_SIZE,
2496 DMA_FROM_DEVICE);
2497 free_pages((unsigned long)vaddr, 0);
2498 }
2499}
2500
2501
2502
2503
2504static int dpaa2_switch_add_bufs(struct ethsw_core *ethsw, u16 bpid)
2505{
2506 struct device *dev = ethsw->dev;
2507 u64 buf_array[BUFS_PER_CMD];
2508 struct page *page;
2509 int retries = 0;
2510 dma_addr_t addr;
2511 int err;
2512 int i;
2513
2514 for (i = 0; i < BUFS_PER_CMD; i++) {
2515
2516
2517
2518
2519 page = dev_alloc_pages(0);
2520 if (!page) {
2521 dev_err(dev, "buffer allocation failed\n");
2522 goto err_alloc;
2523 }
2524
2525 addr = dma_map_page(dev, page, 0, DPAA2_SWITCH_RX_BUF_SIZE,
2526 DMA_FROM_DEVICE);
2527 if (dma_mapping_error(dev, addr)) {
2528 dev_err(dev, "dma_map_single() failed\n");
2529 goto err_map;
2530 }
2531 buf_array[i] = addr;
2532 }
2533
2534release_bufs:
2535
2536
2537
2538 while ((err = dpaa2_io_service_release(NULL, bpid,
2539 buf_array, i)) == -EBUSY) {
2540 if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES)
2541 break;
2542
2543 cpu_relax();
2544 }
2545
2546
2547 if (err) {
2548 dpaa2_switch_free_bufs(ethsw, buf_array, i);
2549 return 0;
2550 }
2551
2552 return i;
2553
2554err_map:
2555 __free_pages(page, 0);
2556err_alloc:
2557
2558
2559
2560 if (i)
2561 goto release_bufs;
2562
2563 return 0;
2564}
2565
2566static int dpaa2_switch_refill_bp(struct ethsw_core *ethsw)
2567{
2568 int *count = ðsw->buf_count;
2569 int new_count;
2570 int err = 0;
2571
2572 if (unlikely(*count < DPAA2_ETHSW_REFILL_THRESH)) {
2573 do {
2574 new_count = dpaa2_switch_add_bufs(ethsw, ethsw->bpid);
2575 if (unlikely(!new_count)) {
2576
2577
2578
2579 break;
2580 }
2581 *count += new_count;
2582 } while (*count < DPAA2_ETHSW_NUM_BUFS);
2583
2584 if (unlikely(*count < DPAA2_ETHSW_NUM_BUFS))
2585 err = -ENOMEM;
2586 }
2587
2588 return err;
2589}
2590
2591static int dpaa2_switch_seed_bp(struct ethsw_core *ethsw)
2592{
2593 int *count, i;
2594
2595 for (i = 0; i < DPAA2_ETHSW_NUM_BUFS; i += BUFS_PER_CMD) {
2596 count = ðsw->buf_count;
2597 *count += dpaa2_switch_add_bufs(ethsw, ethsw->bpid);
2598
2599 if (unlikely(*count < BUFS_PER_CMD))
2600 return -ENOMEM;
2601 }
2602
2603 return 0;
2604}
2605
2606static void dpaa2_switch_drain_bp(struct ethsw_core *ethsw)
2607{
2608 u64 buf_array[BUFS_PER_CMD];
2609 int ret;
2610
2611 do {
2612 ret = dpaa2_io_service_acquire(NULL, ethsw->bpid,
2613 buf_array, BUFS_PER_CMD);
2614 if (ret < 0) {
2615 dev_err(ethsw->dev,
2616 "dpaa2_io_service_acquire() = %d\n", ret);
2617 return;
2618 }
2619 dpaa2_switch_free_bufs(ethsw, buf_array, ret);
2620
2621 } while (ret);
2622}
2623
2624static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw)
2625{
2626 struct dpsw_ctrl_if_pools_cfg dpsw_ctrl_if_pools_cfg = { 0 };
2627 struct device *dev = ethsw->dev;
2628 struct fsl_mc_device *dpbp_dev;
2629 struct dpbp_attr dpbp_attrs;
2630 int err;
2631
2632 err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
2633 &dpbp_dev);
2634 if (err) {
2635 if (err == -ENXIO)
2636 err = -EPROBE_DEFER;
2637 else
2638 dev_err(dev, "DPBP device allocation failed\n");
2639 return err;
2640 }
2641 ethsw->dpbp_dev = dpbp_dev;
2642
2643 err = dpbp_open(ethsw->mc_io, 0, dpbp_dev->obj_desc.id,
2644 &dpbp_dev->mc_handle);
2645 if (err) {
2646 dev_err(dev, "dpbp_open() failed\n");
2647 goto err_open;
2648 }
2649
2650 err = dpbp_reset(ethsw->mc_io, 0, dpbp_dev->mc_handle);
2651 if (err) {
2652 dev_err(dev, "dpbp_reset() failed\n");
2653 goto err_reset;
2654 }
2655
2656 err = dpbp_enable(ethsw->mc_io, 0, dpbp_dev->mc_handle);
2657 if (err) {
2658 dev_err(dev, "dpbp_enable() failed\n");
2659 goto err_enable;
2660 }
2661
2662 err = dpbp_get_attributes(ethsw->mc_io, 0, dpbp_dev->mc_handle,
2663 &dpbp_attrs);
2664 if (err) {
2665 dev_err(dev, "dpbp_get_attributes() failed\n");
2666 goto err_get_attr;
2667 }
2668
2669 dpsw_ctrl_if_pools_cfg.num_dpbp = 1;
2670 dpsw_ctrl_if_pools_cfg.pools[0].dpbp_id = dpbp_attrs.id;
2671 dpsw_ctrl_if_pools_cfg.pools[0].buffer_size = DPAA2_SWITCH_RX_BUF_SIZE;
2672 dpsw_ctrl_if_pools_cfg.pools[0].backup_pool = 0;
2673
2674 err = dpsw_ctrl_if_set_pools(ethsw->mc_io, 0, ethsw->dpsw_handle,
2675 &dpsw_ctrl_if_pools_cfg);
2676 if (err) {
2677 dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n");
2678 goto err_get_attr;
2679 }
2680 ethsw->bpid = dpbp_attrs.id;
2681
2682 return 0;
2683
2684err_get_attr:
2685 dpbp_disable(ethsw->mc_io, 0, dpbp_dev->mc_handle);
2686err_enable:
2687err_reset:
2688 dpbp_close(ethsw->mc_io, 0, dpbp_dev->mc_handle);
2689err_open:
2690 fsl_mc_object_free(dpbp_dev);
2691 return err;
2692}
2693
2694static void dpaa2_switch_free_dpbp(struct ethsw_core *ethsw)
2695{
2696 dpbp_disable(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle);
2697 dpbp_close(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle);
2698 fsl_mc_object_free(ethsw->dpbp_dev);
2699}
2700
2701static int dpaa2_switch_alloc_rings(struct ethsw_core *ethsw)
2702{
2703 int i;
2704
2705 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) {
2706 ethsw->fq[i].store =
2707 dpaa2_io_store_create(DPAA2_SWITCH_STORE_SIZE,
2708 ethsw->dev);
2709 if (!ethsw->fq[i].store) {
2710 dev_err(ethsw->dev, "dpaa2_io_store_create failed\n");
2711 while (--i >= 0)
2712 dpaa2_io_store_destroy(ethsw->fq[i].store);
2713 return -ENOMEM;
2714 }
2715 }
2716
2717 return 0;
2718}
2719
2720static void dpaa2_switch_destroy_rings(struct ethsw_core *ethsw)
2721{
2722 int i;
2723
2724 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
2725 dpaa2_io_store_destroy(ethsw->fq[i].store);
2726}
2727
2728static int dpaa2_switch_pull_fq(struct dpaa2_switch_fq *fq)
2729{
2730 int err, retries = 0;
2731
2732
2733
2734
2735 do {
2736 err = dpaa2_io_service_pull_fq(NULL, fq->fqid, fq->store);
2737 cpu_relax();
2738 } while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES);
2739
2740 if (unlikely(err))
2741 dev_err(fq->ethsw->dev, "dpaa2_io_service_pull err %d", err);
2742
2743 return err;
2744}
2745
2746
2747static int dpaa2_switch_store_consume(struct dpaa2_switch_fq *fq)
2748{
2749 struct ethsw_core *ethsw = fq->ethsw;
2750 int cleaned = 0, is_last;
2751 struct dpaa2_dq *dq;
2752 int retries = 0;
2753
2754 do {
2755
2756 dq = dpaa2_io_store_next(fq->store, &is_last);
2757 if (unlikely(!dq)) {
2758 if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES) {
2759 dev_err_once(ethsw->dev,
2760 "No valid dequeue response\n");
2761 return -ETIMEDOUT;
2762 }
2763 continue;
2764 }
2765
2766 if (fq->type == DPSW_QUEUE_RX)
2767 dpaa2_switch_rx(fq, dpaa2_dq_fd(dq));
2768 else
2769 dpaa2_switch_tx_conf(fq, dpaa2_dq_fd(dq));
2770 cleaned++;
2771
2772 } while (!is_last);
2773
2774 return cleaned;
2775}
2776
2777
2778static int dpaa2_switch_poll(struct napi_struct *napi, int budget)
2779{
2780 int err, cleaned = 0, store_cleaned, work_done;
2781 struct dpaa2_switch_fq *fq;
2782 int retries = 0;
2783
2784 fq = container_of(napi, struct dpaa2_switch_fq, napi);
2785
2786 do {
2787 err = dpaa2_switch_pull_fq(fq);
2788 if (unlikely(err))
2789 break;
2790
2791
2792 dpaa2_switch_refill_bp(fq->ethsw);
2793
2794 store_cleaned = dpaa2_switch_store_consume(fq);
2795 cleaned += store_cleaned;
2796
2797 if (cleaned >= budget) {
2798 work_done = budget;
2799 goto out;
2800 }
2801
2802 } while (store_cleaned);
2803
2804
2805
2806
2807 napi_complete_done(napi, cleaned);
2808 do {
2809 err = dpaa2_io_service_rearm(NULL, &fq->nctx);
2810 cpu_relax();
2811 } while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES);
2812
2813 work_done = max(cleaned, 1);
2814out:
2815
2816 return work_done;
2817}
2818
2819static void dpaa2_switch_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
2820{
2821 struct dpaa2_switch_fq *fq;
2822
2823 fq = container_of(nctx, struct dpaa2_switch_fq, nctx);
2824
2825 napi_schedule(&fq->napi);
2826}
2827
2828static int dpaa2_switch_setup_dpio(struct ethsw_core *ethsw)
2829{
2830 struct dpsw_ctrl_if_queue_cfg queue_cfg;
2831 struct dpaa2_io_notification_ctx *nctx;
2832 int err, i, j;
2833
2834 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) {
2835 nctx = ðsw->fq[i].nctx;
2836
2837
2838
2839
2840
2841 nctx->is_cdan = 0;
2842 nctx->id = ethsw->fq[i].fqid;
2843 nctx->desired_cpu = DPAA2_IO_ANY_CPU;
2844 nctx->cb = dpaa2_switch_fqdan_cb;
2845 err = dpaa2_io_service_register(NULL, nctx, ethsw->dev);
2846 if (err) {
2847 err = -EPROBE_DEFER;
2848 goto err_register;
2849 }
2850
2851 queue_cfg.options = DPSW_CTRL_IF_QUEUE_OPT_DEST |
2852 DPSW_CTRL_IF_QUEUE_OPT_USER_CTX;
2853 queue_cfg.dest_cfg.dest_type = DPSW_CTRL_IF_DEST_DPIO;
2854 queue_cfg.dest_cfg.dest_id = nctx->dpio_id;
2855 queue_cfg.dest_cfg.priority = 0;
2856 queue_cfg.user_ctx = nctx->qman64;
2857
2858 err = dpsw_ctrl_if_set_queue(ethsw->mc_io, 0,
2859 ethsw->dpsw_handle,
2860 ethsw->fq[i].type,
2861 &queue_cfg);
2862 if (err)
2863 goto err_set_queue;
2864 }
2865
2866 return 0;
2867
2868err_set_queue:
2869 dpaa2_io_service_deregister(NULL, nctx, ethsw->dev);
2870err_register:
2871 for (j = 0; j < i; j++)
2872 dpaa2_io_service_deregister(NULL, ðsw->fq[j].nctx,
2873 ethsw->dev);
2874
2875 return err;
2876}
2877
2878static void dpaa2_switch_free_dpio(struct ethsw_core *ethsw)
2879{
2880 int i;
2881
2882 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
2883 dpaa2_io_service_deregister(NULL, ðsw->fq[i].nctx,
2884 ethsw->dev);
2885}
2886
2887static int dpaa2_switch_ctrl_if_setup(struct ethsw_core *ethsw)
2888{
2889 int err;
2890
2891
2892 err = dpaa2_switch_setup_fqs(ethsw);
2893 if (err)
2894 return err;
2895
2896
2897 err = dpaa2_switch_setup_dpbp(ethsw);
2898 if (err)
2899 return err;
2900
2901 err = dpaa2_switch_alloc_rings(ethsw);
2902 if (err)
2903 goto err_free_dpbp;
2904
2905 err = dpaa2_switch_setup_dpio(ethsw);
2906 if (err)
2907 goto err_destroy_rings;
2908
2909 err = dpaa2_switch_seed_bp(ethsw);
2910 if (err)
2911 goto err_deregister_dpio;
2912
2913 err = dpsw_ctrl_if_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
2914 if (err) {
2915 dev_err(ethsw->dev, "dpsw_ctrl_if_enable err %d\n", err);
2916 goto err_drain_dpbp;
2917 }
2918
2919 return 0;
2920
2921err_drain_dpbp:
2922 dpaa2_switch_drain_bp(ethsw);
2923err_deregister_dpio:
2924 dpaa2_switch_free_dpio(ethsw);
2925err_destroy_rings:
2926 dpaa2_switch_destroy_rings(ethsw);
2927err_free_dpbp:
2928 dpaa2_switch_free_dpbp(ethsw);
2929
2930 return err;
2931}
2932
2933static void dpaa2_switch_remove_port(struct ethsw_core *ethsw,
2934 u16 port_idx)
2935{
2936 struct ethsw_port_priv *port_priv = ethsw->ports[port_idx];
2937
2938 rtnl_lock();
2939 dpaa2_switch_port_disconnect_mac(port_priv);
2940 rtnl_unlock();
2941 free_netdev(port_priv->netdev);
2942 ethsw->ports[port_idx] = NULL;
2943}
2944
2945static int dpaa2_switch_init(struct fsl_mc_device *sw_dev)
2946{
2947 struct device *dev = &sw_dev->dev;
2948 struct ethsw_core *ethsw = dev_get_drvdata(dev);
2949 struct dpsw_vlan_if_cfg vcfg = {0};
2950 struct dpsw_tci_cfg tci_cfg = {0};
2951 struct dpsw_stp_cfg stp_cfg;
2952 int err;
2953 u16 i;
2954
2955 ethsw->dev_id = sw_dev->obj_desc.id;
2956
2957 err = dpsw_open(ethsw->mc_io, 0, ethsw->dev_id, ðsw->dpsw_handle);
2958 if (err) {
2959 dev_err(dev, "dpsw_open err %d\n", err);
2960 return err;
2961 }
2962
2963 err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
2964 ðsw->sw_attr);
2965 if (err) {
2966 dev_err(dev, "dpsw_get_attributes err %d\n", err);
2967 goto err_close;
2968 }
2969
2970 err = dpsw_get_api_version(ethsw->mc_io, 0,
2971 ðsw->major,
2972 ðsw->minor);
2973 if (err) {
2974 dev_err(dev, "dpsw_get_api_version err %d\n", err);
2975 goto err_close;
2976 }
2977
2978
2979 if (ethsw->major < DPSW_MIN_VER_MAJOR ||
2980 (ethsw->major == DPSW_MIN_VER_MAJOR &&
2981 ethsw->minor < DPSW_MIN_VER_MINOR)) {
2982 dev_err(dev, "DPSW version %d:%d not supported. Use firmware 10.28.0 or greater.\n",
2983 ethsw->major, ethsw->minor);
2984 err = -EOPNOTSUPP;
2985 goto err_close;
2986 }
2987
2988 if (!dpaa2_switch_supports_cpu_traffic(ethsw)) {
2989 err = -EOPNOTSUPP;
2990 goto err_close;
2991 }
2992
2993 dpaa2_switch_detect_features(ethsw);
2994
2995 err = dpsw_reset(ethsw->mc_io, 0, ethsw->dpsw_handle);
2996 if (err) {
2997 dev_err(dev, "dpsw_reset err %d\n", err);
2998 goto err_close;
2999 }
3000
3001 stp_cfg.vlan_id = DEFAULT_VLAN_ID;
3002 stp_cfg.state = DPSW_STP_STATE_FORWARDING;
3003
3004 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
3005 err = dpsw_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle, i);
3006 if (err) {
3007 dev_err(dev, "dpsw_if_disable err %d\n", err);
3008 goto err_close;
3009 }
3010
3011 err = dpsw_if_set_stp(ethsw->mc_io, 0, ethsw->dpsw_handle, i,
3012 &stp_cfg);
3013 if (err) {
3014 dev_err(dev, "dpsw_if_set_stp err %d for port %d\n",
3015 err, i);
3016 goto err_close;
3017 }
3018
3019
3020
3021
3022 vcfg.num_ifs = 1;
3023 vcfg.if_id[0] = i;
3024 err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, ethsw->dpsw_handle,
3025 DEFAULT_VLAN_ID, &vcfg);
3026 if (err) {
3027 dev_err(dev, "dpsw_vlan_remove_if_untagged err %d\n",
3028 err);
3029 goto err_close;
3030 }
3031
3032 tci_cfg.vlan_id = 4095;
3033 err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, i, &tci_cfg);
3034 if (err) {
3035 dev_err(dev, "dpsw_if_set_tci err %d\n", err);
3036 goto err_close;
3037 }
3038
3039 err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
3040 DEFAULT_VLAN_ID, &vcfg);
3041 if (err) {
3042 dev_err(dev, "dpsw_vlan_remove_if err %d\n", err);
3043 goto err_close;
3044 }
3045 }
3046
3047 err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, DEFAULT_VLAN_ID);
3048 if (err) {
3049 dev_err(dev, "dpsw_vlan_remove err %d\n", err);
3050 goto err_close;
3051 }
3052
3053 ethsw->workqueue = alloc_ordered_workqueue("%s_%d_ordered",
3054 WQ_MEM_RECLAIM, "ethsw",
3055 ethsw->sw_attr.id);
3056 if (!ethsw->workqueue) {
3057 err = -ENOMEM;
3058 goto err_close;
3059 }
3060
3061 err = dpsw_fdb_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, 0);
3062 if (err)
3063 goto err_destroy_ordered_workqueue;
3064
3065 err = dpaa2_switch_ctrl_if_setup(ethsw);
3066 if (err)
3067 goto err_destroy_ordered_workqueue;
3068
3069 return 0;
3070
3071err_destroy_ordered_workqueue:
3072 destroy_workqueue(ethsw->workqueue);
3073
3074err_close:
3075 dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
3076 return err;
3077}
3078
3079
3080
3081
3082static int dpaa2_switch_port_trap_mac_addr(struct ethsw_port_priv *port_priv,
3083 const char *mac)
3084{
3085 struct dpaa2_switch_acl_entry acl_entry = {0};
3086
3087
3088 ether_addr_copy(acl_entry.key.match.l2_dest_mac, mac);
3089 eth_broadcast_addr(acl_entry.key.mask.l2_dest_mac);
3090
3091
3092 acl_entry.cfg.precedence = 0;
3093 acl_entry.cfg.result.action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF;
3094
3095 return dpaa2_switch_acl_entry_add(port_priv->filter_block, &acl_entry);
3096}
3097
3098static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port)
3099{
3100 const char stpa[ETH_ALEN] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00};
3101 struct switchdev_obj_port_vlan vlan = {
3102 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
3103 .vid = DEFAULT_VLAN_ID,
3104 .flags = BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID,
3105 };
3106 struct net_device *netdev = port_priv->netdev;
3107 struct ethsw_core *ethsw = port_priv->ethsw_data;
3108 struct dpaa2_switch_filter_block *filter_block;
3109 struct dpsw_fdb_cfg fdb_cfg = {0};
3110 struct dpsw_if_attr dpsw_if_attr;
3111 struct dpaa2_switch_fdb *fdb;
3112 struct dpsw_acl_cfg acl_cfg;
3113 u16 fdb_id, acl_tbl_id;
3114 int err;
3115
3116
3117 err = dpsw_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
3118 port_priv->idx, &dpsw_if_attr);
3119 if (err) {
3120 netdev_err(netdev, "dpsw_if_get_attributes err %d\n", err);
3121 return err;
3122 }
3123 port_priv->tx_qdid = dpsw_if_attr.qdid;
3124
3125
3126 fdb_cfg.num_fdb_entries = ethsw->sw_attr.max_fdb_entries / ethsw->sw_attr.num_ifs;
3127 err = dpsw_fdb_add(ethsw->mc_io, 0, ethsw->dpsw_handle,
3128 &fdb_id, &fdb_cfg);
3129 if (err) {
3130 netdev_err(netdev, "dpsw_fdb_add err %d\n", err);
3131 return err;
3132 }
3133
3134
3135 fdb = dpaa2_switch_fdb_get_unused(ethsw);
3136 fdb->fdb_id = fdb_id;
3137 fdb->in_use = true;
3138 fdb->bridge_dev = NULL;
3139 port_priv->fdb = fdb;
3140
3141
3142
3143
3144
3145 err = dpaa2_switch_port_vlans_add(netdev, &vlan);
3146 if (err)
3147 return err;
3148
3149
3150 err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
3151 if (err)
3152 return err;
3153
3154
3155 acl_cfg.max_entries = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES;
3156 err = dpsw_acl_add(ethsw->mc_io, 0, ethsw->dpsw_handle,
3157 &acl_tbl_id, &acl_cfg);
3158 if (err) {
3159 netdev_err(netdev, "dpsw_acl_add err %d\n", err);
3160 return err;
3161 }
3162
3163 filter_block = dpaa2_switch_filter_block_get_unused(ethsw);
3164 filter_block->ethsw = ethsw;
3165 filter_block->acl_id = acl_tbl_id;
3166 filter_block->in_use = true;
3167 filter_block->num_acl_rules = 0;
3168 INIT_LIST_HEAD(&filter_block->acl_entries);
3169 INIT_LIST_HEAD(&filter_block->mirror_entries);
3170
3171 err = dpaa2_switch_port_acl_tbl_bind(port_priv, filter_block);
3172 if (err)
3173 return err;
3174
3175 err = dpaa2_switch_port_trap_mac_addr(port_priv, stpa);
3176 if (err)
3177 return err;
3178
3179 return err;
3180}
3181
3182static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw)
3183{
3184 dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
3185 dpaa2_switch_free_dpio(ethsw);
3186 dpaa2_switch_destroy_rings(ethsw);
3187 dpaa2_switch_drain_bp(ethsw);
3188 dpaa2_switch_free_dpbp(ethsw);
3189}
3190
3191static void dpaa2_switch_teardown(struct fsl_mc_device *sw_dev)
3192{
3193 struct device *dev = &sw_dev->dev;
3194 struct ethsw_core *ethsw = dev_get_drvdata(dev);
3195 int err;
3196
3197 dpaa2_switch_ctrl_if_teardown(ethsw);
3198
3199 destroy_workqueue(ethsw->workqueue);
3200
3201 err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
3202 if (err)
3203 dev_warn(dev, "dpsw_close err %d\n", err);
3204}
3205
3206static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
3207{
3208 struct ethsw_port_priv *port_priv;
3209 struct ethsw_core *ethsw;
3210 struct device *dev;
3211 int i;
3212
3213 dev = &sw_dev->dev;
3214 ethsw = dev_get_drvdata(dev);
3215
3216 dpaa2_switch_teardown_irqs(sw_dev);
3217
3218 dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
3219
3220 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
3221 port_priv = ethsw->ports[i];
3222 unregister_netdev(port_priv->netdev);
3223 dpaa2_switch_remove_port(ethsw, i);
3224 }
3225
3226 kfree(ethsw->fdbs);
3227 kfree(ethsw->filter_blocks);
3228 kfree(ethsw->ports);
3229
3230 dpaa2_switch_teardown(sw_dev);
3231
3232 fsl_mc_portal_free(ethsw->mc_io);
3233
3234 kfree(ethsw);
3235
3236 dev_set_drvdata(dev, NULL);
3237
3238 return 0;
3239}
3240
3241static int dpaa2_switch_probe_port(struct ethsw_core *ethsw,
3242 u16 port_idx)
3243{
3244 struct ethsw_port_priv *port_priv;
3245 struct device *dev = ethsw->dev;
3246 struct net_device *port_netdev;
3247 int err;
3248
3249 port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv));
3250 if (!port_netdev) {
3251 dev_err(dev, "alloc_etherdev error\n");
3252 return -ENOMEM;
3253 }
3254
3255 port_priv = netdev_priv(port_netdev);
3256 port_priv->netdev = port_netdev;
3257 port_priv->ethsw_data = ethsw;
3258
3259 port_priv->idx = port_idx;
3260 port_priv->stp_state = BR_STATE_FORWARDING;
3261
3262 SET_NETDEV_DEV(port_netdev, dev);
3263 port_netdev->netdev_ops = &dpaa2_switch_port_ops;
3264 port_netdev->ethtool_ops = &dpaa2_switch_port_ethtool_ops;
3265
3266 port_netdev->needed_headroom = DPAA2_SWITCH_NEEDED_HEADROOM;
3267
3268 port_priv->bcast_flood = true;
3269 port_priv->ucast_flood = true;
3270
3271
3272 port_netdev->min_mtu = ETH_MIN_MTU;
3273 port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH;
3274
3275
3276
3277
3278 ethsw->ports[port_idx] = port_priv;
3279
3280
3281
3282
3283 port_netdev->features = NETIF_F_HW_VLAN_CTAG_FILTER |
3284 NETIF_F_HW_VLAN_STAG_FILTER |
3285 NETIF_F_HW_TC;
3286
3287 err = dpaa2_switch_port_init(port_priv, port_idx);
3288 if (err)
3289 goto err_port_probe;
3290
3291 err = dpaa2_switch_port_set_mac_addr(port_priv);
3292 if (err)
3293 goto err_port_probe;
3294
3295 err = dpaa2_switch_port_set_learning(port_priv, false);
3296 if (err)
3297 goto err_port_probe;
3298 port_priv->learn_ena = false;
3299
3300 err = dpaa2_switch_port_connect_mac(port_priv);
3301 if (err)
3302 goto err_port_probe;
3303
3304 return 0;
3305
3306err_port_probe:
3307 free_netdev(port_netdev);
3308 ethsw->ports[port_idx] = NULL;
3309
3310 return err;
3311}
3312
3313static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
3314{
3315 struct device *dev = &sw_dev->dev;
3316 struct ethsw_core *ethsw;
3317 int i, err;
3318
3319
3320 ethsw = kzalloc(sizeof(*ethsw), GFP_KERNEL);
3321
3322 if (!ethsw)
3323 return -ENOMEM;
3324
3325 ethsw->dev = dev;
3326 ethsw->iommu_domain = iommu_get_domain_for_dev(dev);
3327 dev_set_drvdata(dev, ethsw);
3328
3329 err = fsl_mc_portal_allocate(sw_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
3330 ðsw->mc_io);
3331 if (err) {
3332 if (err == -ENXIO)
3333 err = -EPROBE_DEFER;
3334 else
3335 dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
3336 goto err_free_drvdata;
3337 }
3338
3339 err = dpaa2_switch_init(sw_dev);
3340 if (err)
3341 goto err_free_cmdport;
3342
3343 ethsw->ports = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->ports),
3344 GFP_KERNEL);
3345 if (!(ethsw->ports)) {
3346 err = -ENOMEM;
3347 goto err_teardown;
3348 }
3349
3350 ethsw->fdbs = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->fdbs),
3351 GFP_KERNEL);
3352 if (!ethsw->fdbs) {
3353 err = -ENOMEM;
3354 goto err_free_ports;
3355 }
3356
3357 ethsw->filter_blocks = kcalloc(ethsw->sw_attr.num_ifs,
3358 sizeof(*ethsw->filter_blocks),
3359 GFP_KERNEL);
3360 if (!ethsw->filter_blocks) {
3361 err = -ENOMEM;
3362 goto err_free_fdbs;
3363 }
3364
3365 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
3366 err = dpaa2_switch_probe_port(ethsw, i);
3367 if (err)
3368 goto err_free_netdev;
3369 }
3370
3371
3372
3373
3374
3375 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
3376 netif_napi_add(ethsw->ports[0]->netdev,
3377 ðsw->fq[i].napi, dpaa2_switch_poll,
3378 NAPI_POLL_WEIGHT);
3379
3380
3381 err = dpaa2_switch_setup_irqs(sw_dev);
3382 if (err)
3383 goto err_stop;
3384
3385
3386
3387
3388 ethsw->mirror_port = ethsw->sw_attr.num_ifs;
3389
3390
3391
3392
3393 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
3394 err = register_netdev(ethsw->ports[i]->netdev);
3395 if (err < 0) {
3396 dev_err(dev, "register_netdev error %d\n", err);
3397 goto err_unregister_ports;
3398 }
3399 }
3400
3401 return 0;
3402
3403err_unregister_ports:
3404 for (i--; i >= 0; i--)
3405 unregister_netdev(ethsw->ports[i]->netdev);
3406 dpaa2_switch_teardown_irqs(sw_dev);
3407err_stop:
3408 dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
3409err_free_netdev:
3410 for (i--; i >= 0; i--)
3411 dpaa2_switch_remove_port(ethsw, i);
3412 kfree(ethsw->filter_blocks);
3413err_free_fdbs:
3414 kfree(ethsw->fdbs);
3415err_free_ports:
3416 kfree(ethsw->ports);
3417
3418err_teardown:
3419 dpaa2_switch_teardown(sw_dev);
3420
3421err_free_cmdport:
3422 fsl_mc_portal_free(ethsw->mc_io);
3423
3424err_free_drvdata:
3425 kfree(ethsw);
3426 dev_set_drvdata(dev, NULL);
3427
3428 return err;
3429}
3430
3431static const struct fsl_mc_device_id dpaa2_switch_match_id_table[] = {
3432 {
3433 .vendor = FSL_MC_VENDOR_FREESCALE,
3434 .obj_type = "dpsw",
3435 },
3436 { .vendor = 0x0 }
3437};
3438MODULE_DEVICE_TABLE(fslmc, dpaa2_switch_match_id_table);
3439
3440static struct fsl_mc_driver dpaa2_switch_drv = {
3441 .driver = {
3442 .name = KBUILD_MODNAME,
3443 .owner = THIS_MODULE,
3444 },
3445 .probe = dpaa2_switch_probe,
3446 .remove = dpaa2_switch_remove,
3447 .match_id_table = dpaa2_switch_match_id_table
3448};
3449
3450static struct notifier_block dpaa2_switch_port_nb __read_mostly = {
3451 .notifier_call = dpaa2_switch_port_netdevice_event,
3452};
3453
3454static struct notifier_block dpaa2_switch_port_switchdev_nb = {
3455 .notifier_call = dpaa2_switch_port_event,
3456};
3457
3458static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb = {
3459 .notifier_call = dpaa2_switch_port_blocking_event,
3460};
3461
3462static int dpaa2_switch_register_notifiers(void)
3463{
3464 int err;
3465
3466 err = register_netdevice_notifier(&dpaa2_switch_port_nb);
3467 if (err) {
3468 pr_err("dpaa2-switch: failed to register net_device notifier (%d)\n", err);
3469 return err;
3470 }
3471
3472 err = register_switchdev_notifier(&dpaa2_switch_port_switchdev_nb);
3473 if (err) {
3474 pr_err("dpaa2-switch: failed to register switchdev notifier (%d)\n", err);
3475 goto err_switchdev_nb;
3476 }
3477
3478 err = register_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb);
3479 if (err) {
3480 pr_err("dpaa2-switch: failed to register switchdev blocking notifier (%d)\n", err);
3481 goto err_switchdev_blocking_nb;
3482 }
3483
3484 return 0;
3485
3486err_switchdev_blocking_nb:
3487 unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb);
3488err_switchdev_nb:
3489 unregister_netdevice_notifier(&dpaa2_switch_port_nb);
3490
3491 return err;
3492}
3493
3494static void dpaa2_switch_unregister_notifiers(void)
3495{
3496 int err;
3497
3498 err = unregister_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb);
3499 if (err)
3500 pr_err("dpaa2-switch: failed to unregister switchdev blocking notifier (%d)\n",
3501 err);
3502
3503 err = unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb);
3504 if (err)
3505 pr_err("dpaa2-switch: failed to unregister switchdev notifier (%d)\n", err);
3506
3507 err = unregister_netdevice_notifier(&dpaa2_switch_port_nb);
3508 if (err)
3509 pr_err("dpaa2-switch: failed to unregister net_device notifier (%d)\n", err);
3510}
3511
3512static int __init dpaa2_switch_driver_init(void)
3513{
3514 int err;
3515
3516 err = fsl_mc_driver_register(&dpaa2_switch_drv);
3517 if (err)
3518 return err;
3519
3520 err = dpaa2_switch_register_notifiers();
3521 if (err) {
3522 fsl_mc_driver_unregister(&dpaa2_switch_drv);
3523 return err;
3524 }
3525
3526 return 0;
3527}
3528
3529static void __exit dpaa2_switch_driver_exit(void)
3530{
3531 dpaa2_switch_unregister_notifiers();
3532 fsl_mc_driver_unregister(&dpaa2_switch_drv);
3533}
3534
3535module_init(dpaa2_switch_driver_init);
3536module_exit(dpaa2_switch_driver_exit);
3537
3538MODULE_LICENSE("GPL v2");
3539MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver");
3540