1
2
3
4
5
6
7
8
9
10#include <linux/module.h>
11
12#include <linux/interrupt.h>
13#include <linux/msi.h>
14#include <linux/kthread.h>
15#include <linux/workqueue.h>
16
17#include <linux/fsl/mc.h>
18
19#include "ethsw.h"
20
21static struct workqueue_struct *ethsw_owq;
22
23
24#define DPSW_MIN_VER_MAJOR 8
25#define DPSW_MIN_VER_MINOR 0
26
27#define DEFAULT_VLAN_ID 1
28
29static int ethsw_add_vlan(struct ethsw_core *ethsw, u16 vid)
30{
31 int err;
32
33 struct dpsw_vlan_cfg vcfg = {
34 .fdb_id = 0,
35 };
36
37 if (ethsw->vlans[vid]) {
38 dev_err(ethsw->dev, "VLAN already configured\n");
39 return -EEXIST;
40 }
41
42 err = dpsw_vlan_add(ethsw->mc_io, 0,
43 ethsw->dpsw_handle, vid, &vcfg);
44 if (err) {
45 dev_err(ethsw->dev, "dpsw_vlan_add err %d\n", err);
46 return err;
47 }
48 ethsw->vlans[vid] = ETHSW_VLAN_MEMBER;
49
50 return 0;
51}
52
53static int ethsw_port_set_pvid(struct ethsw_port_priv *port_priv, u16 pvid)
54{
55 struct ethsw_core *ethsw = port_priv->ethsw_data;
56 struct net_device *netdev = port_priv->netdev;
57 struct dpsw_tci_cfg tci_cfg = { 0 };
58 bool is_oper;
59 int err, ret;
60
61 err = dpsw_if_get_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
62 port_priv->idx, &tci_cfg);
63 if (err) {
64 netdev_err(netdev, "dpsw_if_get_tci err %d\n", err);
65 return err;
66 }
67
68 tci_cfg.vlan_id = pvid;
69
70
71 is_oper = netif_oper_up(netdev);
72 if (is_oper) {
73 err = dpsw_if_disable(ethsw->mc_io, 0,
74 ethsw->dpsw_handle,
75 port_priv->idx);
76 if (err) {
77 netdev_err(netdev, "dpsw_if_disable err %d\n", err);
78 return err;
79 }
80 }
81
82 err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
83 port_priv->idx, &tci_cfg);
84 if (err) {
85 netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
86 goto set_tci_error;
87 }
88
89
90 port_priv->vlans[port_priv->pvid] &= ~ETHSW_VLAN_PVID;
91 port_priv->vlans[pvid] |= ETHSW_VLAN_PVID;
92 port_priv->pvid = pvid;
93
94set_tci_error:
95 if (is_oper) {
96 ret = dpsw_if_enable(ethsw->mc_io, 0,
97 ethsw->dpsw_handle,
98 port_priv->idx);
99 if (ret) {
100 netdev_err(netdev, "dpsw_if_enable err %d\n", ret);
101 return ret;
102 }
103 }
104
105 return err;
106}
107
108static int ethsw_port_add_vlan(struct ethsw_port_priv *port_priv,
109 u16 vid, u16 flags)
110{
111 struct ethsw_core *ethsw = port_priv->ethsw_data;
112 struct net_device *netdev = port_priv->netdev;
113 struct dpsw_vlan_if_cfg vcfg;
114 int err;
115
116 if (port_priv->vlans[vid]) {
117 netdev_warn(netdev, "VLAN %d already configured\n", vid);
118 return -EEXIST;
119 }
120
121 vcfg.num_ifs = 1;
122 vcfg.if_id[0] = port_priv->idx;
123 err = dpsw_vlan_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, vid, &vcfg);
124 if (err) {
125 netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err);
126 return err;
127 }
128
129 port_priv->vlans[vid] = ETHSW_VLAN_MEMBER;
130
131 if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
132 err = dpsw_vlan_add_if_untagged(ethsw->mc_io, 0,
133 ethsw->dpsw_handle,
134 vid, &vcfg);
135 if (err) {
136 netdev_err(netdev,
137 "dpsw_vlan_add_if_untagged err %d\n", err);
138 return err;
139 }
140 port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED;
141 }
142
143 if (flags & BRIDGE_VLAN_INFO_PVID) {
144 err = ethsw_port_set_pvid(port_priv, vid);
145 if (err)
146 return err;
147 }
148
149 return 0;
150}
151
152static int ethsw_set_learning(struct ethsw_core *ethsw, u8 flag)
153{
154 enum dpsw_fdb_learning_mode learn_mode;
155 int err;
156
157 if (flag)
158 learn_mode = DPSW_FDB_LEARNING_MODE_HW;
159 else
160 learn_mode = DPSW_FDB_LEARNING_MODE_DIS;
161
162 err = dpsw_fdb_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, 0,
163 learn_mode);
164 if (err) {
165 dev_err(ethsw->dev, "dpsw_fdb_set_learning_mode err %d\n", err);
166 return err;
167 }
168 ethsw->learning = !!flag;
169
170 return 0;
171}
172
173static int ethsw_port_set_flood(struct ethsw_port_priv *port_priv, u8 flag)
174{
175 int err;
176
177 err = dpsw_if_set_flooding(port_priv->ethsw_data->mc_io, 0,
178 port_priv->ethsw_data->dpsw_handle,
179 port_priv->idx, flag);
180 if (err) {
181 netdev_err(port_priv->netdev,
182 "dpsw_if_set_flooding err %d\n", err);
183 return err;
184 }
185 port_priv->flood = !!flag;
186
187 return 0;
188}
189
190static int ethsw_port_set_stp_state(struct ethsw_port_priv *port_priv, u8 state)
191{
192 struct dpsw_stp_cfg stp_cfg = {
193 .vlan_id = DEFAULT_VLAN_ID,
194 .state = state,
195 };
196 int err;
197
198 if (!netif_oper_up(port_priv->netdev) || state == port_priv->stp_state)
199 return 0;
200
201 err = dpsw_if_set_stp(port_priv->ethsw_data->mc_io, 0,
202 port_priv->ethsw_data->dpsw_handle,
203 port_priv->idx, &stp_cfg);
204 if (err) {
205 netdev_err(port_priv->netdev,
206 "dpsw_if_set_stp err %d\n", err);
207 return err;
208 }
209
210 port_priv->stp_state = state;
211
212 return 0;
213}
214
215static int ethsw_dellink_switch(struct ethsw_core *ethsw, u16 vid)
216{
217 struct ethsw_port_priv *ppriv_local = NULL;
218 int i, err;
219
220 if (!ethsw->vlans[vid])
221 return -ENOENT;
222
223 err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, vid);
224 if (err) {
225 dev_err(ethsw->dev, "dpsw_vlan_remove err %d\n", err);
226 return err;
227 }
228 ethsw->vlans[vid] = 0;
229
230 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
231 ppriv_local = ethsw->ports[i];
232 ppriv_local->vlans[vid] = 0;
233 }
234
235 return 0;
236}
237
238static int ethsw_port_fdb_add_uc(struct ethsw_port_priv *port_priv,
239 const unsigned char *addr)
240{
241 struct dpsw_fdb_unicast_cfg entry = {0};
242 int err;
243
244 entry.if_egress = port_priv->idx;
245 entry.type = DPSW_FDB_ENTRY_STATIC;
246 ether_addr_copy(entry.mac_addr, addr);
247
248 err = dpsw_fdb_add_unicast(port_priv->ethsw_data->mc_io, 0,
249 port_priv->ethsw_data->dpsw_handle,
250 0, &entry);
251 if (err)
252 netdev_err(port_priv->netdev,
253 "dpsw_fdb_add_unicast err %d\n", err);
254 return err;
255}
256
257static int ethsw_port_fdb_del_uc(struct ethsw_port_priv *port_priv,
258 const unsigned char *addr)
259{
260 struct dpsw_fdb_unicast_cfg entry = {0};
261 int err;
262
263 entry.if_egress = port_priv->idx;
264 entry.type = DPSW_FDB_ENTRY_STATIC;
265 ether_addr_copy(entry.mac_addr, addr);
266
267 err = dpsw_fdb_remove_unicast(port_priv->ethsw_data->mc_io, 0,
268 port_priv->ethsw_data->dpsw_handle,
269 0, &entry);
270
271 if (err && err != -ENXIO)
272 netdev_err(port_priv->netdev,
273 "dpsw_fdb_remove_unicast err %d\n", err);
274 return err;
275}
276
277static int ethsw_port_fdb_add_mc(struct ethsw_port_priv *port_priv,
278 const unsigned char *addr)
279{
280 struct dpsw_fdb_multicast_cfg entry = {0};
281 int err;
282
283 ether_addr_copy(entry.mac_addr, addr);
284 entry.type = DPSW_FDB_ENTRY_STATIC;
285 entry.num_ifs = 1;
286 entry.if_id[0] = port_priv->idx;
287
288 err = dpsw_fdb_add_multicast(port_priv->ethsw_data->mc_io, 0,
289 port_priv->ethsw_data->dpsw_handle,
290 0, &entry);
291
292 if (err && err != -ENXIO)
293 netdev_err(port_priv->netdev, "dpsw_fdb_add_multicast err %d\n",
294 err);
295 return err;
296}
297
298static int ethsw_port_fdb_del_mc(struct ethsw_port_priv *port_priv,
299 const unsigned char *addr)
300{
301 struct dpsw_fdb_multicast_cfg entry = {0};
302 int err;
303
304 ether_addr_copy(entry.mac_addr, addr);
305 entry.type = DPSW_FDB_ENTRY_STATIC;
306 entry.num_ifs = 1;
307 entry.if_id[0] = port_priv->idx;
308
309 err = dpsw_fdb_remove_multicast(port_priv->ethsw_data->mc_io, 0,
310 port_priv->ethsw_data->dpsw_handle,
311 0, &entry);
312
313 if (err && err != -ENAVAIL)
314 netdev_err(port_priv->netdev,
315 "dpsw_fdb_remove_multicast err %d\n", err);
316 return err;
317}
318
319static void port_get_stats(struct net_device *netdev,
320 struct rtnl_link_stats64 *stats)
321{
322 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
323 u64 tmp;
324 int err;
325
326 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
327 port_priv->ethsw_data->dpsw_handle,
328 port_priv->idx,
329 DPSW_CNT_ING_FRAME, &stats->rx_packets);
330 if (err)
331 goto error;
332
333 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
334 port_priv->ethsw_data->dpsw_handle,
335 port_priv->idx,
336 DPSW_CNT_EGR_FRAME, &stats->tx_packets);
337 if (err)
338 goto error;
339
340 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
341 port_priv->ethsw_data->dpsw_handle,
342 port_priv->idx,
343 DPSW_CNT_ING_BYTE, &stats->rx_bytes);
344 if (err)
345 goto error;
346
347 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
348 port_priv->ethsw_data->dpsw_handle,
349 port_priv->idx,
350 DPSW_CNT_EGR_BYTE, &stats->tx_bytes);
351 if (err)
352 goto error;
353
354 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
355 port_priv->ethsw_data->dpsw_handle,
356 port_priv->idx,
357 DPSW_CNT_ING_FRAME_DISCARD,
358 &stats->rx_dropped);
359 if (err)
360 goto error;
361
362 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
363 port_priv->ethsw_data->dpsw_handle,
364 port_priv->idx,
365 DPSW_CNT_ING_FLTR_FRAME,
366 &tmp);
367 if (err)
368 goto error;
369 stats->rx_dropped += tmp;
370
371 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
372 port_priv->ethsw_data->dpsw_handle,
373 port_priv->idx,
374 DPSW_CNT_EGR_FRAME_DISCARD,
375 &stats->tx_dropped);
376 if (err)
377 goto error;
378
379 return;
380
381error:
382 netdev_err(netdev, "dpsw_if_get_counter err %d\n", err);
383}
384
385static bool port_has_offload_stats(const struct net_device *netdev,
386 int attr_id)
387{
388 return (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT);
389}
390
391static int port_get_offload_stats(int attr_id,
392 const struct net_device *netdev,
393 void *sp)
394{
395 switch (attr_id) {
396 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
397 port_get_stats((struct net_device *)netdev, sp);
398 return 0;
399 }
400
401 return -EINVAL;
402}
403
404static int port_change_mtu(struct net_device *netdev, int mtu)
405{
406 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
407 int err;
408
409 err = dpsw_if_set_max_frame_length(port_priv->ethsw_data->mc_io,
410 0,
411 port_priv->ethsw_data->dpsw_handle,
412 port_priv->idx,
413 (u16)ETHSW_L2_MAX_FRM(mtu));
414 if (err) {
415 netdev_err(netdev,
416 "dpsw_if_set_max_frame_length() err %d\n", err);
417 return err;
418 }
419
420 netdev->mtu = mtu;
421 return 0;
422}
423
424static int port_carrier_state_sync(struct net_device *netdev)
425{
426 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
427 struct dpsw_link_state state;
428 int err;
429
430 err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
431 port_priv->ethsw_data->dpsw_handle,
432 port_priv->idx, &state);
433 if (err) {
434 netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
435 return err;
436 }
437
438 WARN_ONCE(state.up > 1, "Garbage read into link_state");
439
440 if (state.up != port_priv->link_state) {
441 if (state.up)
442 netif_carrier_on(netdev);
443 else
444 netif_carrier_off(netdev);
445 port_priv->link_state = state.up;
446 }
447 return 0;
448}
449
450static int port_open(struct net_device *netdev)
451{
452 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
453 int err;
454
455
456 netif_tx_stop_all_queues(netdev);
457
458 err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0,
459 port_priv->ethsw_data->dpsw_handle,
460 port_priv->idx);
461 if (err) {
462 netdev_err(netdev, "dpsw_if_enable err %d\n", err);
463 return err;
464 }
465
466
467 err = port_carrier_state_sync(netdev);
468 if (err) {
469 netdev_err(netdev,
470 "port_carrier_state_sync err %d\n", err);
471 goto err_carrier_sync;
472 }
473
474 return 0;
475
476err_carrier_sync:
477 dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
478 port_priv->ethsw_data->dpsw_handle,
479 port_priv->idx);
480 return err;
481}
482
483static int port_stop(struct net_device *netdev)
484{
485 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
486 int err;
487
488 err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
489 port_priv->ethsw_data->dpsw_handle,
490 port_priv->idx);
491 if (err) {
492 netdev_err(netdev, "dpsw_if_disable err %d\n", err);
493 return err;
494 }
495
496 return 0;
497}
498
499static netdev_tx_t port_dropframe(struct sk_buff *skb,
500 struct net_device *netdev)
501{
502
503 dev_kfree_skb_any(skb);
504
505 return NETDEV_TX_OK;
506}
507
508static int swdev_get_port_parent_id(struct net_device *dev,
509 struct netdev_phys_item_id *ppid)
510{
511 struct ethsw_port_priv *port_priv = netdev_priv(dev);
512
513 ppid->id_len = 1;
514 ppid->id[0] = port_priv->ethsw_data->dev_id;
515
516 return 0;
517}
518
519static const struct net_device_ops ethsw_port_ops = {
520 .ndo_open = port_open,
521 .ndo_stop = port_stop,
522
523 .ndo_set_mac_address = eth_mac_addr,
524 .ndo_change_mtu = port_change_mtu,
525 .ndo_has_offload_stats = port_has_offload_stats,
526 .ndo_get_offload_stats = port_get_offload_stats,
527
528 .ndo_start_xmit = port_dropframe,
529 .ndo_get_port_parent_id = swdev_get_port_parent_id,
530};
531
532static void ethsw_links_state_update(struct ethsw_core *ethsw)
533{
534 int i;
535
536 for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
537 port_carrier_state_sync(ethsw->ports[i]->netdev);
538}
539
540static irqreturn_t ethsw_irq0_handler_thread(int irq_num, void *arg)
541{
542 struct device *dev = (struct device *)arg;
543 struct ethsw_core *ethsw = dev_get_drvdata(dev);
544
545
546 u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000;
547 int err;
548
549 err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
550 DPSW_IRQ_INDEX_IF, &status);
551 if (err) {
552 dev_err(dev, "Can't get irq status (err %d)", err);
553
554 err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
555 DPSW_IRQ_INDEX_IF, 0xFFFFFFFF);
556 if (err)
557 dev_err(dev, "Can't clear irq status (err %d)", err);
558 goto out;
559 }
560
561 if (status & DPSW_IRQ_EVENT_LINK_CHANGED)
562 ethsw_links_state_update(ethsw);
563
564out:
565 return IRQ_HANDLED;
566}
567
568static int ethsw_setup_irqs(struct fsl_mc_device *sw_dev)
569{
570 struct device *dev = &sw_dev->dev;
571 struct ethsw_core *ethsw = dev_get_drvdata(dev);
572 u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED;
573 struct fsl_mc_device_irq *irq;
574 int err;
575
576 err = fsl_mc_allocate_irqs(sw_dev);
577 if (err) {
578 dev_err(dev, "MC irqs allocation failed\n");
579 return err;
580 }
581
582 if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_IRQ_NUM)) {
583 err = -EINVAL;
584 goto free_irq;
585 }
586
587 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
588 DPSW_IRQ_INDEX_IF, 0);
589 if (err) {
590 dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
591 goto free_irq;
592 }
593
594 irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
595
596 err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
597 NULL,
598 ethsw_irq0_handler_thread,
599 IRQF_NO_SUSPEND | IRQF_ONESHOT,
600 dev_name(dev), dev);
601 if (err) {
602 dev_err(dev, "devm_request_threaded_irq(): %d", err);
603 goto free_irq;
604 }
605
606 err = dpsw_set_irq_mask(ethsw->mc_io, 0, ethsw->dpsw_handle,
607 DPSW_IRQ_INDEX_IF, mask);
608 if (err) {
609 dev_err(dev, "dpsw_set_irq_mask(): %d", err);
610 goto free_devm_irq;
611 }
612
613 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
614 DPSW_IRQ_INDEX_IF, 1);
615 if (err) {
616 dev_err(dev, "dpsw_set_irq_enable(): %d", err);
617 goto free_devm_irq;
618 }
619
620 return 0;
621
622free_devm_irq:
623 devm_free_irq(dev, irq->msi_desc->irq, dev);
624free_irq:
625 fsl_mc_free_irqs(sw_dev);
626 return err;
627}
628
629static void ethsw_teardown_irqs(struct fsl_mc_device *sw_dev)
630{
631 struct device *dev = &sw_dev->dev;
632 struct ethsw_core *ethsw = dev_get_drvdata(dev);
633 int err;
634
635 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
636 DPSW_IRQ_INDEX_IF, 0);
637 if (err)
638 dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
639
640 fsl_mc_free_irqs(sw_dev);
641}
642
643static int port_attr_stp_state_set(struct net_device *netdev,
644 struct switchdev_trans *trans,
645 u8 state)
646{
647 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
648
649 if (switchdev_trans_ph_prepare(trans))
650 return 0;
651
652 return ethsw_port_set_stp_state(port_priv, state);
653}
654
655static int port_attr_br_flags_pre_set(struct net_device *netdev,
656 struct switchdev_trans *trans,
657 unsigned long flags)
658{
659 if (flags & ~(BR_LEARNING | BR_FLOOD))
660 return -EINVAL;
661
662 return 0;
663}
664
665static int port_attr_br_flags_set(struct net_device *netdev,
666 struct switchdev_trans *trans,
667 unsigned long flags)
668{
669 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
670 int err = 0;
671
672 if (switchdev_trans_ph_prepare(trans))
673 return 0;
674
675
676 err = ethsw_set_learning(port_priv->ethsw_data, flags & BR_LEARNING);
677 if (err)
678 goto exit;
679
680 err = ethsw_port_set_flood(port_priv, flags & BR_FLOOD);
681
682exit:
683 return err;
684}
685
686static int swdev_port_attr_set(struct net_device *netdev,
687 const struct switchdev_attr *attr,
688 struct switchdev_trans *trans)
689{
690 int err = 0;
691
692 switch (attr->id) {
693 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
694 err = port_attr_stp_state_set(netdev, trans,
695 attr->u.stp_state);
696 break;
697 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
698 err = port_attr_br_flags_pre_set(netdev, trans,
699 attr->u.brport_flags);
700 break;
701 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
702 err = port_attr_br_flags_set(netdev, trans,
703 attr->u.brport_flags);
704 break;
705 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
706
707 break;
708 default:
709 err = -EOPNOTSUPP;
710 break;
711 }
712
713 return err;
714}
715
716static int port_vlans_add(struct net_device *netdev,
717 const struct switchdev_obj_port_vlan *vlan,
718 struct switchdev_trans *trans)
719{
720 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
721 int vid, err = 0;
722
723 if (switchdev_trans_ph_prepare(trans))
724 return 0;
725
726 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
727 if (!port_priv->ethsw_data->vlans[vid]) {
728
729 err = ethsw_add_vlan(port_priv->ethsw_data, vid);
730 if (err)
731 return err;
732
733 port_priv->ethsw_data->vlans[vid] |= ETHSW_VLAN_GLOBAL;
734 }
735 err = ethsw_port_add_vlan(port_priv, vid, vlan->flags);
736 if (err)
737 break;
738 }
739
740 return err;
741}
742
743static int port_lookup_address(struct net_device *netdev, int is_uc,
744 const unsigned char *addr)
745{
746 struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc;
747 struct netdev_hw_addr *ha;
748
749 netif_addr_lock_bh(netdev);
750 list_for_each_entry(ha, &list->list, list) {
751 if (ether_addr_equal(ha->addr, addr)) {
752 netif_addr_unlock_bh(netdev);
753 return 1;
754 }
755 }
756 netif_addr_unlock_bh(netdev);
757 return 0;
758}
759
760static int port_mdb_add(struct net_device *netdev,
761 const struct switchdev_obj_port_mdb *mdb,
762 struct switchdev_trans *trans)
763{
764 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
765 int err;
766
767 if (switchdev_trans_ph_prepare(trans))
768 return 0;
769
770
771 if (port_lookup_address(netdev, 0, mdb->addr))
772 return -EEXIST;
773
774 err = ethsw_port_fdb_add_mc(port_priv, mdb->addr);
775 if (err)
776 return err;
777
778 err = dev_mc_add(netdev, mdb->addr);
779 if (err) {
780 netdev_err(netdev, "dev_mc_add err %d\n", err);
781 ethsw_port_fdb_del_mc(port_priv, mdb->addr);
782 }
783
784 return err;
785}
786
787static int swdev_port_obj_add(struct net_device *netdev,
788 const struct switchdev_obj *obj,
789 struct switchdev_trans *trans)
790{
791 int err;
792
793 switch (obj->id) {
794 case SWITCHDEV_OBJ_ID_PORT_VLAN:
795 err = port_vlans_add(netdev,
796 SWITCHDEV_OBJ_PORT_VLAN(obj),
797 trans);
798 break;
799 case SWITCHDEV_OBJ_ID_PORT_MDB:
800 err = port_mdb_add(netdev,
801 SWITCHDEV_OBJ_PORT_MDB(obj),
802 trans);
803 break;
804 default:
805 err = -EOPNOTSUPP;
806 break;
807 }
808
809 return err;
810}
811
812static int ethsw_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid)
813{
814 struct ethsw_core *ethsw = port_priv->ethsw_data;
815 struct net_device *netdev = port_priv->netdev;
816 struct dpsw_vlan_if_cfg vcfg;
817 int i, err;
818
819 if (!port_priv->vlans[vid])
820 return -ENOENT;
821
822 if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) {
823 err = ethsw_port_set_pvid(port_priv, 0);
824 if (err)
825 return err;
826 }
827
828 vcfg.num_ifs = 1;
829 vcfg.if_id[0] = port_priv->idx;
830 if (port_priv->vlans[vid] & ETHSW_VLAN_UNTAGGED) {
831 err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0,
832 ethsw->dpsw_handle,
833 vid, &vcfg);
834 if (err) {
835 netdev_err(netdev,
836 "dpsw_vlan_remove_if_untagged err %d\n",
837 err);
838 }
839 port_priv->vlans[vid] &= ~ETHSW_VLAN_UNTAGGED;
840 }
841
842 if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
843 err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
844 vid, &vcfg);
845 if (err) {
846 netdev_err(netdev,
847 "dpsw_vlan_remove_if err %d\n", err);
848 return err;
849 }
850 port_priv->vlans[vid] &= ~ETHSW_VLAN_MEMBER;
851
852
853
854
855 for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
856 if (ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER)
857 return 0;
858
859 ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL;
860
861 err = ethsw_dellink_switch(ethsw, vid);
862 if (err)
863 return err;
864 }
865
866 return 0;
867}
868
869static int port_vlans_del(struct net_device *netdev,
870 const struct switchdev_obj_port_vlan *vlan)
871{
872 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
873 int vid, err = 0;
874
875 if (netif_is_bridge_master(vlan->obj.orig_dev))
876 return -EOPNOTSUPP;
877
878 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
879 err = ethsw_port_del_vlan(port_priv, vid);
880 if (err)
881 break;
882 }
883
884 return err;
885}
886
887static int port_mdb_del(struct net_device *netdev,
888 const struct switchdev_obj_port_mdb *mdb)
889{
890 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
891 int err;
892
893 if (!port_lookup_address(netdev, 0, mdb->addr))
894 return -ENOENT;
895
896 err = ethsw_port_fdb_del_mc(port_priv, mdb->addr);
897 if (err)
898 return err;
899
900 err = dev_mc_del(netdev, mdb->addr);
901 if (err) {
902 netdev_err(netdev, "dev_mc_del err %d\n", err);
903 return err;
904 }
905
906 return err;
907}
908
909static int swdev_port_obj_del(struct net_device *netdev,
910 const struct switchdev_obj *obj)
911{
912 int err;
913
914 switch (obj->id) {
915 case SWITCHDEV_OBJ_ID_PORT_VLAN:
916 err = port_vlans_del(netdev, SWITCHDEV_OBJ_PORT_VLAN(obj));
917 break;
918 case SWITCHDEV_OBJ_ID_PORT_MDB:
919 err = port_mdb_del(netdev, SWITCHDEV_OBJ_PORT_MDB(obj));
920 break;
921 default:
922 err = -EOPNOTSUPP;
923 break;
924 }
925 return err;
926}
927
928static int
929ethsw_switchdev_port_attr_set_event(struct net_device *netdev,
930 struct switchdev_notifier_port_attr_info *port_attr_info)
931{
932 int err;
933
934 err = swdev_port_attr_set(netdev, port_attr_info->attr,
935 port_attr_info->trans);
936
937 port_attr_info->handled = true;
938 return notifier_from_errno(err);
939}
940
941
942static int port_bridge_join(struct net_device *netdev,
943 struct net_device *upper_dev)
944{
945 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
946 struct ethsw_core *ethsw = port_priv->ethsw_data;
947 int i, err;
948
949 for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
950 if (ethsw->ports[i]->bridge_dev &&
951 (ethsw->ports[i]->bridge_dev != upper_dev)) {
952 netdev_err(netdev,
953 "Another switch port is connected to %s\n",
954 ethsw->ports[i]->bridge_dev->name);
955 return -EINVAL;
956 }
957
958
959 err = ethsw_port_set_flood(port_priv, 1);
960 if (!err)
961 port_priv->bridge_dev = upper_dev;
962
963 return err;
964}
965
966static int port_bridge_leave(struct net_device *netdev)
967{
968 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
969 int err;
970
971
972 err = ethsw_port_set_flood(port_priv, 0);
973 if (!err)
974 port_priv->bridge_dev = NULL;
975
976 return err;
977}
978
979static bool ethsw_port_dev_check(const struct net_device *netdev)
980{
981 return netdev->netdev_ops == ðsw_port_ops;
982}
983
984static int port_netdevice_event(struct notifier_block *unused,
985 unsigned long event, void *ptr)
986{
987 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
988 struct netdev_notifier_changeupper_info *info = ptr;
989 struct net_device *upper_dev;
990 int err = 0;
991
992 if (!ethsw_port_dev_check(netdev))
993 return NOTIFY_DONE;
994
995
996 if (event == NETDEV_CHANGEUPPER) {
997 upper_dev = info->upper_dev;
998 if (netif_is_bridge_master(upper_dev)) {
999 if (info->linking)
1000 err = port_bridge_join(netdev, upper_dev);
1001 else
1002 err = port_bridge_leave(netdev);
1003 }
1004 }
1005
1006 return notifier_from_errno(err);
1007}
1008
1009static struct notifier_block port_nb __read_mostly = {
1010 .notifier_call = port_netdevice_event,
1011};
1012
1013struct ethsw_switchdev_event_work {
1014 struct work_struct work;
1015 struct switchdev_notifier_fdb_info fdb_info;
1016 struct net_device *dev;
1017 unsigned long event;
1018};
1019
1020static void ethsw_switchdev_event_work(struct work_struct *work)
1021{
1022 struct ethsw_switchdev_event_work *switchdev_work =
1023 container_of(work, struct ethsw_switchdev_event_work, work);
1024 struct net_device *dev = switchdev_work->dev;
1025 struct switchdev_notifier_fdb_info *fdb_info;
1026
1027 rtnl_lock();
1028 fdb_info = &switchdev_work->fdb_info;
1029
1030 switch (switchdev_work->event) {
1031 case SWITCHDEV_FDB_ADD_TO_DEVICE:
1032 if (is_unicast_ether_addr(fdb_info->addr))
1033 ethsw_port_fdb_add_uc(netdev_priv(dev), fdb_info->addr);
1034 else
1035 ethsw_port_fdb_add_mc(netdev_priv(dev), fdb_info->addr);
1036 break;
1037 case SWITCHDEV_FDB_DEL_TO_DEVICE:
1038 if (is_unicast_ether_addr(fdb_info->addr))
1039 ethsw_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr);
1040 else
1041 ethsw_port_fdb_del_mc(netdev_priv(dev), fdb_info->addr);
1042 break;
1043 }
1044
1045 rtnl_unlock();
1046 kfree(switchdev_work->fdb_info.addr);
1047 kfree(switchdev_work);
1048 dev_put(dev);
1049}
1050
1051
1052static int port_switchdev_event(struct notifier_block *unused,
1053 unsigned long event, void *ptr)
1054{
1055 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
1056 struct ethsw_switchdev_event_work *switchdev_work;
1057 struct switchdev_notifier_fdb_info *fdb_info = ptr;
1058
1059 if (!ethsw_port_dev_check(dev))
1060 return NOTIFY_DONE;
1061
1062 if (event == SWITCHDEV_PORT_ATTR_SET)
1063 return ethsw_switchdev_port_attr_set_event(dev, ptr);
1064
1065 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
1066 if (!switchdev_work)
1067 return NOTIFY_BAD;
1068
1069 INIT_WORK(&switchdev_work->work, ethsw_switchdev_event_work);
1070 switchdev_work->dev = dev;
1071 switchdev_work->event = event;
1072
1073 switch (event) {
1074 case SWITCHDEV_FDB_ADD_TO_DEVICE:
1075 case SWITCHDEV_FDB_DEL_TO_DEVICE:
1076 memcpy(&switchdev_work->fdb_info, ptr,
1077 sizeof(switchdev_work->fdb_info));
1078 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
1079 if (!switchdev_work->fdb_info.addr)
1080 goto err_addr_alloc;
1081
1082 ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
1083 fdb_info->addr);
1084
1085
1086 dev_hold(dev);
1087 break;
1088 default:
1089 kfree(switchdev_work);
1090 return NOTIFY_DONE;
1091 }
1092
1093 queue_work(ethsw_owq, &switchdev_work->work);
1094
1095 return NOTIFY_DONE;
1096
1097err_addr_alloc:
1098 kfree(switchdev_work);
1099 return NOTIFY_BAD;
1100}
1101
1102static int
1103ethsw_switchdev_port_obj_event(unsigned long event, struct net_device *netdev,
1104 struct switchdev_notifier_port_obj_info *port_obj_info)
1105{
1106 int err = -EOPNOTSUPP;
1107
1108 switch (event) {
1109 case SWITCHDEV_PORT_OBJ_ADD:
1110 err = swdev_port_obj_add(netdev, port_obj_info->obj,
1111 port_obj_info->trans);
1112 break;
1113 case SWITCHDEV_PORT_OBJ_DEL:
1114 err = swdev_port_obj_del(netdev, port_obj_info->obj);
1115 break;
1116 }
1117
1118 port_obj_info->handled = true;
1119 return notifier_from_errno(err);
1120}
1121
1122static int port_switchdev_blocking_event(struct notifier_block *unused,
1123 unsigned long event, void *ptr)
1124{
1125 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
1126
1127 if (!ethsw_port_dev_check(dev))
1128 return NOTIFY_DONE;
1129
1130 switch (event) {
1131 case SWITCHDEV_PORT_OBJ_ADD:
1132 case SWITCHDEV_PORT_OBJ_DEL:
1133 return ethsw_switchdev_port_obj_event(event, dev, ptr);
1134 case SWITCHDEV_PORT_ATTR_SET:
1135 return ethsw_switchdev_port_attr_set_event(dev, ptr);
1136 }
1137
1138 return NOTIFY_DONE;
1139}
1140
1141static struct notifier_block port_switchdev_nb = {
1142 .notifier_call = port_switchdev_event,
1143};
1144
1145static struct notifier_block port_switchdev_blocking_nb = {
1146 .notifier_call = port_switchdev_blocking_event,
1147};
1148
1149static int ethsw_register_notifier(struct device *dev)
1150{
1151 int err;
1152
1153 err = register_netdevice_notifier(&port_nb);
1154 if (err) {
1155 dev_err(dev, "Failed to register netdev notifier\n");
1156 return err;
1157 }
1158
1159 err = register_switchdev_notifier(&port_switchdev_nb);
1160 if (err) {
1161 dev_err(dev, "Failed to register switchdev notifier\n");
1162 goto err_switchdev_nb;
1163 }
1164
1165 err = register_switchdev_blocking_notifier(&port_switchdev_blocking_nb);
1166 if (err) {
1167 dev_err(dev, "Failed to register switchdev blocking notifier\n");
1168 goto err_switchdev_blocking_nb;
1169 }
1170
1171 return 0;
1172
1173err_switchdev_blocking_nb:
1174 unregister_switchdev_notifier(&port_switchdev_nb);
1175err_switchdev_nb:
1176 unregister_netdevice_notifier(&port_nb);
1177 return err;
1178}
1179
1180static int ethsw_open(struct ethsw_core *ethsw)
1181{
1182 struct ethsw_port_priv *port_priv = NULL;
1183 int i, err;
1184
1185 err = dpsw_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
1186 if (err) {
1187 dev_err(ethsw->dev, "dpsw_enable err %d\n", err);
1188 return err;
1189 }
1190
1191 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
1192 port_priv = ethsw->ports[i];
1193 err = dev_open(port_priv->netdev, NULL);
1194 if (err) {
1195 netdev_err(port_priv->netdev, "dev_open err %d\n", err);
1196 return err;
1197 }
1198 }
1199
1200 return 0;
1201}
1202
1203static int ethsw_stop(struct ethsw_core *ethsw)
1204{
1205 struct ethsw_port_priv *port_priv = NULL;
1206 int i, err;
1207
1208 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
1209 port_priv = ethsw->ports[i];
1210 dev_close(port_priv->netdev);
1211 }
1212
1213 err = dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
1214 if (err) {
1215 dev_err(ethsw->dev, "dpsw_disable err %d\n", err);
1216 return err;
1217 }
1218
1219 return 0;
1220}
1221
1222static int ethsw_init(struct fsl_mc_device *sw_dev)
1223{
1224 struct device *dev = &sw_dev->dev;
1225 struct ethsw_core *ethsw = dev_get_drvdata(dev);
1226 u16 version_major, version_minor, i;
1227 struct dpsw_stp_cfg stp_cfg;
1228 int err;
1229
1230 ethsw->dev_id = sw_dev->obj_desc.id;
1231
1232 err = dpsw_open(ethsw->mc_io, 0, ethsw->dev_id, ðsw->dpsw_handle);
1233 if (err) {
1234 dev_err(dev, "dpsw_open err %d\n", err);
1235 return err;
1236 }
1237
1238 err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
1239 ðsw->sw_attr);
1240 if (err) {
1241 dev_err(dev, "dpsw_get_attributes err %d\n", err);
1242 goto err_close;
1243 }
1244
1245 err = dpsw_get_api_version(ethsw->mc_io, 0,
1246 &version_major,
1247 &version_minor);
1248 if (err) {
1249 dev_err(dev, "dpsw_get_api_version err %d\n", err);
1250 goto err_close;
1251 }
1252
1253
1254 if (version_major < DPSW_MIN_VER_MAJOR ||
1255 (version_major == DPSW_MIN_VER_MAJOR &&
1256 version_minor < DPSW_MIN_VER_MINOR)) {
1257 dev_err(dev, "DPSW version %d:%d not supported. Use %d.%d or greater.\n",
1258 version_major,
1259 version_minor,
1260 DPSW_MIN_VER_MAJOR, DPSW_MIN_VER_MINOR);
1261 err = -ENOTSUPP;
1262 goto err_close;
1263 }
1264
1265 err = dpsw_reset(ethsw->mc_io, 0, ethsw->dpsw_handle);
1266 if (err) {
1267 dev_err(dev, "dpsw_reset err %d\n", err);
1268 goto err_close;
1269 }
1270
1271 err = dpsw_fdb_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, 0,
1272 DPSW_FDB_LEARNING_MODE_HW);
1273 if (err) {
1274 dev_err(dev, "dpsw_fdb_set_learning_mode err %d\n", err);
1275 goto err_close;
1276 }
1277
1278 stp_cfg.vlan_id = DEFAULT_VLAN_ID;
1279 stp_cfg.state = DPSW_STP_STATE_FORWARDING;
1280
1281 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
1282 err = dpsw_if_set_stp(ethsw->mc_io, 0, ethsw->dpsw_handle, i,
1283 &stp_cfg);
1284 if (err) {
1285 dev_err(dev, "dpsw_if_set_stp err %d for port %d\n",
1286 err, i);
1287 goto err_close;
1288 }
1289
1290 err = dpsw_if_set_broadcast(ethsw->mc_io, 0,
1291 ethsw->dpsw_handle, i, 1);
1292 if (err) {
1293 dev_err(dev,
1294 "dpsw_if_set_broadcast err %d for port %d\n",
1295 err, i);
1296 goto err_close;
1297 }
1298 }
1299
1300 ethsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM,
1301 "ethsw");
1302 if (!ethsw_owq) {
1303 err = -ENOMEM;
1304 goto err_close;
1305 }
1306
1307 err = ethsw_register_notifier(dev);
1308 if (err)
1309 goto err_destroy_ordered_workqueue;
1310
1311 return 0;
1312
1313err_destroy_ordered_workqueue:
1314 destroy_workqueue(ethsw_owq);
1315
1316err_close:
1317 dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
1318 return err;
1319}
1320
1321static int ethsw_port_init(struct ethsw_port_priv *port_priv, u16 port)
1322{
1323 const char def_mcast[ETH_ALEN] = {0x01, 0x00, 0x5e, 0x00, 0x00, 0x01};
1324 struct net_device *netdev = port_priv->netdev;
1325 struct ethsw_core *ethsw = port_priv->ethsw_data;
1326 struct dpsw_vlan_if_cfg vcfg;
1327 int err;
1328
1329
1330
1331
1332 vcfg.num_ifs = 1;
1333 vcfg.if_id[0] = port_priv->idx;
1334
1335 err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, ethsw->dpsw_handle,
1336 DEFAULT_VLAN_ID, &vcfg);
1337 if (err) {
1338 netdev_err(netdev, "dpsw_vlan_remove_if_untagged err %d\n",
1339 err);
1340 return err;
1341 }
1342
1343 err = ethsw_port_set_pvid(port_priv, 0);
1344 if (err)
1345 return err;
1346
1347 err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
1348 DEFAULT_VLAN_ID, &vcfg);
1349 if (err) {
1350 netdev_err(netdev, "dpsw_vlan_remove_if err %d\n", err);
1351 return err;
1352 }
1353
1354 return ethsw_port_fdb_add_mc(port_priv, def_mcast);
1355}
1356
1357static void ethsw_unregister_notifier(struct device *dev)
1358{
1359 struct notifier_block *nb;
1360 int err;
1361
1362 nb = &port_switchdev_blocking_nb;
1363 err = unregister_switchdev_blocking_notifier(nb);
1364 if (err)
1365 dev_err(dev,
1366 "Failed to unregister switchdev blocking notifier (%d)\n", err);
1367
1368 err = unregister_switchdev_notifier(&port_switchdev_nb);
1369 if (err)
1370 dev_err(dev,
1371 "Failed to unregister switchdev notifier (%d)\n", err);
1372
1373 err = unregister_netdevice_notifier(&port_nb);
1374 if (err)
1375 dev_err(dev,
1376 "Failed to unregister netdev notifier (%d)\n", err);
1377}
1378
1379static void ethsw_takedown(struct fsl_mc_device *sw_dev)
1380{
1381 struct device *dev = &sw_dev->dev;
1382 struct ethsw_core *ethsw = dev_get_drvdata(dev);
1383 int err;
1384
1385 ethsw_unregister_notifier(dev);
1386
1387 err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
1388 if (err)
1389 dev_warn(dev, "dpsw_close err %d\n", err);
1390}
1391
1392static int ethsw_remove(struct fsl_mc_device *sw_dev)
1393{
1394 struct ethsw_port_priv *port_priv;
1395 struct ethsw_core *ethsw;
1396 struct device *dev;
1397 int i;
1398
1399 dev = &sw_dev->dev;
1400 ethsw = dev_get_drvdata(dev);
1401
1402 ethsw_teardown_irqs(sw_dev);
1403
1404 destroy_workqueue(ethsw_owq);
1405
1406 rtnl_lock();
1407 ethsw_stop(ethsw);
1408 rtnl_unlock();
1409
1410 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
1411 port_priv = ethsw->ports[i];
1412 unregister_netdev(port_priv->netdev);
1413 free_netdev(port_priv->netdev);
1414 }
1415 kfree(ethsw->ports);
1416
1417 ethsw_takedown(sw_dev);
1418 fsl_mc_portal_free(ethsw->mc_io);
1419
1420 kfree(ethsw);
1421
1422 dev_set_drvdata(dev, NULL);
1423
1424 return 0;
1425}
1426
1427static int ethsw_probe_port(struct ethsw_core *ethsw, u16 port_idx)
1428{
1429 struct ethsw_port_priv *port_priv;
1430 struct device *dev = ethsw->dev;
1431 struct net_device *port_netdev;
1432 int err;
1433
1434 port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv));
1435 if (!port_netdev) {
1436 dev_err(dev, "alloc_etherdev error\n");
1437 return -ENOMEM;
1438 }
1439
1440 port_priv = netdev_priv(port_netdev);
1441 port_priv->netdev = port_netdev;
1442 port_priv->ethsw_data = ethsw;
1443
1444 port_priv->idx = port_idx;
1445 port_priv->stp_state = BR_STATE_FORWARDING;
1446
1447
1448 port_priv->flood = true;
1449
1450 SET_NETDEV_DEV(port_netdev, dev);
1451 port_netdev->netdev_ops = ðsw_port_ops;
1452 port_netdev->ethtool_ops = ðsw_port_ethtool_ops;
1453
1454
1455 port_netdev->min_mtu = ETH_MIN_MTU;
1456 port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH;
1457
1458 err = register_netdev(port_netdev);
1459 if (err < 0) {
1460 dev_err(dev, "register_netdev error %d\n", err);
1461 free_netdev(port_netdev);
1462 return err;
1463 }
1464
1465 ethsw->ports[port_idx] = port_priv;
1466
1467 return ethsw_port_init(port_priv, port_idx);
1468}
1469
1470static int ethsw_probe(struct fsl_mc_device *sw_dev)
1471{
1472 struct device *dev = &sw_dev->dev;
1473 struct ethsw_core *ethsw;
1474 int i, err;
1475
1476
1477 ethsw = kzalloc(sizeof(*ethsw), GFP_KERNEL);
1478
1479 if (!ethsw)
1480 return -ENOMEM;
1481
1482 ethsw->dev = dev;
1483 dev_set_drvdata(dev, ethsw);
1484
1485 err = fsl_mc_portal_allocate(sw_dev, 0, ðsw->mc_io);
1486 if (err) {
1487 if (err == -ENXIO)
1488 err = -EPROBE_DEFER;
1489 else
1490 dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
1491 goto err_free_drvdata;
1492 }
1493
1494 err = ethsw_init(sw_dev);
1495 if (err)
1496 goto err_free_cmdport;
1497
1498
1499 ethsw->vlans[DEFAULT_VLAN_ID] = ETHSW_VLAN_MEMBER;
1500
1501
1502 ethsw->learning = true;
1503
1504 ethsw->ports = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->ports),
1505 GFP_KERNEL);
1506 if (!(ethsw->ports)) {
1507 err = -ENOMEM;
1508 goto err_takedown;
1509 }
1510
1511 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
1512 err = ethsw_probe_port(ethsw, i);
1513 if (err)
1514 goto err_free_ports;
1515 }
1516
1517
1518 rtnl_lock();
1519 err = ethsw_open(ethsw);
1520 rtnl_unlock();
1521 if (err)
1522 goto err_free_ports;
1523
1524
1525 err = ethsw_setup_irqs(sw_dev);
1526 if (err)
1527 goto err_stop;
1528
1529 dev_info(dev, "probed %d port switch\n", ethsw->sw_attr.num_ifs);
1530 return 0;
1531
1532err_stop:
1533 rtnl_lock();
1534 ethsw_stop(ethsw);
1535 rtnl_unlock();
1536
1537err_free_ports:
1538
1539 for (i--; i >= 0; i--) {
1540 unregister_netdev(ethsw->ports[i]->netdev);
1541 free_netdev(ethsw->ports[i]->netdev);
1542 }
1543 kfree(ethsw->ports);
1544
1545err_takedown:
1546 ethsw_takedown(sw_dev);
1547
1548err_free_cmdport:
1549 fsl_mc_portal_free(ethsw->mc_io);
1550
1551err_free_drvdata:
1552 kfree(ethsw);
1553 dev_set_drvdata(dev, NULL);
1554
1555 return err;
1556}
1557
1558static const struct fsl_mc_device_id ethsw_match_id_table[] = {
1559 {
1560 .vendor = FSL_MC_VENDOR_FREESCALE,
1561 .obj_type = "dpsw",
1562 },
1563 { .vendor = 0x0 }
1564};
1565MODULE_DEVICE_TABLE(fslmc, ethsw_match_id_table);
1566
1567static struct fsl_mc_driver eth_sw_drv = {
1568 .driver = {
1569 .name = KBUILD_MODNAME,
1570 .owner = THIS_MODULE,
1571 },
1572 .probe = ethsw_probe,
1573 .remove = ethsw_remove,
1574 .match_id_table = ethsw_match_id_table
1575};
1576
1577module_fsl_mc_driver(eth_sw_drv);
1578
1579MODULE_LICENSE("GPL v2");
1580MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver");
1581