1
2
3
4
5
6
7
8
9
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/netdevice.h>
18#include <linux/etherdevice.h>
19#include <linux/platform_device.h>
20#include <linux/of.h>
21#include <linux/of_net.h>
22#include <linux/of_mdio.h>
23#include <linux/phy.h>
24#include <linux/phy_fixed.h>
25#include <net/dsa.h>
26#include <net/ip.h>
27#include <net/ipv6.h>
28
29#include "bcmsysport.h"
30
31
32#define BCM_SYSPORT_IO_MACRO(name, offset) \
33static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
34{ \
35 u32 reg = readl_relaxed(priv->base + offset + off); \
36 return reg; \
37} \
38static inline void name##_writel(struct bcm_sysport_priv *priv, \
39 u32 val, u32 off) \
40{ \
41 writel_relaxed(val, priv->base + offset + off); \
42} \
43
44BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
45BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
46BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
47BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET);
48BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
49BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
50BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
51BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
52BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
53BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
54
55
56
57
58static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off)
59{
60 if (priv->is_lite && off >= RDMA_STATUS)
61 off += 4;
62 return readl_relaxed(priv->base + SYS_PORT_RDMA_OFFSET + off);
63}
64
65static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off)
66{
67 if (priv->is_lite && off >= RDMA_STATUS)
68 off += 4;
69 writel_relaxed(val, priv->base + SYS_PORT_RDMA_OFFSET + off);
70}
71
72static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit)
73{
74 if (!priv->is_lite) {
75 return BIT(bit);
76 } else {
77 if (bit >= ACB_ALGO)
78 return BIT(bit + 1);
79 else
80 return BIT(bit);
81 }
82}
83
84
85
86
87#define BCM_SYSPORT_INTR_L2(which) \
88static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
89 u32 mask) \
90{ \
91 priv->irq##which##_mask &= ~(mask); \
92 intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
93} \
94static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
95 u32 mask) \
96{ \
97 intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \
98 priv->irq##which##_mask |= (mask); \
99} \
100
101BCM_SYSPORT_INTR_L2(0)
102BCM_SYSPORT_INTR_L2(1)
103
104
105
106
107
108static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
109 void __iomem *d,
110 dma_addr_t addr)
111{
112#ifdef CONFIG_PHYS_ADDR_T_64BIT
113 writel_relaxed(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
114 d + DESC_ADDR_HI_STATUS_LEN);
115#endif
116 writel_relaxed(lower_32_bits(addr), d + DESC_ADDR_LO);
117}
118
119static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
120 struct dma_desc *desc,
121 unsigned int port)
122{
123
124 tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port));
125 tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port));
126}
127
128
129static int bcm_sysport_set_rx_csum(struct net_device *dev,
130 netdev_features_t wanted)
131{
132 struct bcm_sysport_priv *priv = netdev_priv(dev);
133 u32 reg;
134
135 priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
136 reg = rxchk_readl(priv, RXCHK_CONTROL);
137 if (priv->rx_chk_en)
138 reg |= RXCHK_EN;
139 else
140 reg &= ~RXCHK_EN;
141
142
143
144
145 if (priv->rx_chk_en && priv->crc_fwd)
146 reg |= RXCHK_SKIP_FCS;
147 else
148 reg &= ~RXCHK_SKIP_FCS;
149
150
151
152
153
154 if (netdev_uses_dsa(dev))
155 reg |= RXCHK_BRCM_TAG_EN;
156 else
157 reg &= ~RXCHK_BRCM_TAG_EN;
158
159 rxchk_writel(priv, reg, RXCHK_CONTROL);
160
161 return 0;
162}
163
164static int bcm_sysport_set_tx_csum(struct net_device *dev,
165 netdev_features_t wanted)
166{
167 struct bcm_sysport_priv *priv = netdev_priv(dev);
168 u32 reg;
169
170
171
172
173 priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
174 reg = tdma_readl(priv, TDMA_CONTROL);
175 if (priv->tsb_en)
176 reg |= tdma_control_bit(priv, TSB_EN);
177 else
178 reg &= ~tdma_control_bit(priv, TSB_EN);
179 tdma_writel(priv, reg, TDMA_CONTROL);
180
181 return 0;
182}
183
184static int bcm_sysport_set_features(struct net_device *dev,
185 netdev_features_t features)
186{
187 netdev_features_t changed = features ^ dev->features;
188 netdev_features_t wanted = dev->wanted_features;
189 int ret = 0;
190
191 if (changed & NETIF_F_RXCSUM)
192 ret = bcm_sysport_set_rx_csum(dev, wanted);
193 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
194 ret = bcm_sysport_set_tx_csum(dev, wanted);
195
196 return ret;
197}
198
199
200
201
202static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
203
204 STAT_NETDEV64(rx_packets),
205 STAT_NETDEV64(tx_packets),
206 STAT_NETDEV64(rx_bytes),
207 STAT_NETDEV64(tx_bytes),
208 STAT_NETDEV(rx_errors),
209 STAT_NETDEV(tx_errors),
210 STAT_NETDEV(rx_dropped),
211 STAT_NETDEV(tx_dropped),
212 STAT_NETDEV(multicast),
213
214 STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
215 STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
216 STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
217 STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
218 STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
219 STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
220 STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
221 STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
222 STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
223 STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
224 STAT_MIB_RX("rx_pkts", mib.rx.pkt),
225 STAT_MIB_RX("rx_bytes", mib.rx.bytes),
226 STAT_MIB_RX("rx_multicast", mib.rx.mca),
227 STAT_MIB_RX("rx_broadcast", mib.rx.bca),
228 STAT_MIB_RX("rx_fcs", mib.rx.fcs),
229 STAT_MIB_RX("rx_control", mib.rx.cf),
230 STAT_MIB_RX("rx_pause", mib.rx.pf),
231 STAT_MIB_RX("rx_unknown", mib.rx.uo),
232 STAT_MIB_RX("rx_align", mib.rx.aln),
233 STAT_MIB_RX("rx_outrange", mib.rx.flr),
234 STAT_MIB_RX("rx_code", mib.rx.cde),
235 STAT_MIB_RX("rx_carrier", mib.rx.fcr),
236 STAT_MIB_RX("rx_oversize", mib.rx.ovr),
237 STAT_MIB_RX("rx_jabber", mib.rx.jbr),
238 STAT_MIB_RX("rx_mtu_err", mib.rx.mtue),
239 STAT_MIB_RX("rx_good_pkts", mib.rx.pok),
240 STAT_MIB_RX("rx_unicast", mib.rx.uc),
241 STAT_MIB_RX("rx_ppp", mib.rx.ppp),
242 STAT_MIB_RX("rx_crc", mib.rx.rcrc),
243
244 STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
245 STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
246 STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
247 STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
248 STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
249 STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
250 STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
251 STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
252 STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
253 STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
254 STAT_MIB_TX("tx_pkts", mib.tx.pkts),
255 STAT_MIB_TX("tx_multicast", mib.tx.mca),
256 STAT_MIB_TX("tx_broadcast", mib.tx.bca),
257 STAT_MIB_TX("tx_pause", mib.tx.pf),
258 STAT_MIB_TX("tx_control", mib.tx.cf),
259 STAT_MIB_TX("tx_fcs_err", mib.tx.fcs),
260 STAT_MIB_TX("tx_oversize", mib.tx.ovr),
261 STAT_MIB_TX("tx_defer", mib.tx.drf),
262 STAT_MIB_TX("tx_excess_defer", mib.tx.edf),
263 STAT_MIB_TX("tx_single_col", mib.tx.scl),
264 STAT_MIB_TX("tx_multi_col", mib.tx.mcl),
265 STAT_MIB_TX("tx_late_col", mib.tx.lcl),
266 STAT_MIB_TX("tx_excess_col", mib.tx.ecl),
267 STAT_MIB_TX("tx_frags", mib.tx.frg),
268 STAT_MIB_TX("tx_total_col", mib.tx.ncl),
269 STAT_MIB_TX("tx_jabber", mib.tx.jbr),
270 STAT_MIB_TX("tx_bytes", mib.tx.bytes),
271 STAT_MIB_TX("tx_good_pkts", mib.tx.pok),
272 STAT_MIB_TX("tx_unicast", mib.tx.uc),
273
274 STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
275 STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
276 STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
277 STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
278
279 STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
280 STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
281 RXCHK_OTHER_DISC_CNTR),
282
283 STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
284 STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
285 STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
286 STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
287 STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
288
289};
290
291#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
292
293static void bcm_sysport_get_drvinfo(struct net_device *dev,
294 struct ethtool_drvinfo *info)
295{
296 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
297 strlcpy(info->version, "0.1", sizeof(info->version));
298 strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
299}
300
301static u32 bcm_sysport_get_msglvl(struct net_device *dev)
302{
303 struct bcm_sysport_priv *priv = netdev_priv(dev);
304
305 return priv->msg_enable;
306}
307
308static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
309{
310 struct bcm_sysport_priv *priv = netdev_priv(dev);
311
312 priv->msg_enable = enable;
313}
314
315static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type)
316{
317 switch (type) {
318 case BCM_SYSPORT_STAT_NETDEV:
319 case BCM_SYSPORT_STAT_NETDEV64:
320 case BCM_SYSPORT_STAT_RXCHK:
321 case BCM_SYSPORT_STAT_RBUF:
322 case BCM_SYSPORT_STAT_SOFT:
323 return true;
324 default:
325 return false;
326 }
327}
328
329static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
330{
331 struct bcm_sysport_priv *priv = netdev_priv(dev);
332 const struct bcm_sysport_stats *s;
333 unsigned int i, j;
334
335 switch (string_set) {
336 case ETH_SS_STATS:
337 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
338 s = &bcm_sysport_gstrings_stats[i];
339 if (priv->is_lite &&
340 !bcm_sysport_lite_stat_valid(s->type))
341 continue;
342 j++;
343 }
344
345 return j + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
346 default:
347 return -EOPNOTSUPP;
348 }
349}
350
351static void bcm_sysport_get_strings(struct net_device *dev,
352 u32 stringset, u8 *data)
353{
354 struct bcm_sysport_priv *priv = netdev_priv(dev);
355 const struct bcm_sysport_stats *s;
356 char buf[128];
357 int i, j;
358
359 switch (stringset) {
360 case ETH_SS_STATS:
361 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
362 s = &bcm_sysport_gstrings_stats[i];
363 if (priv->is_lite &&
364 !bcm_sysport_lite_stat_valid(s->type))
365 continue;
366
367 memcpy(data + j * ETH_GSTRING_LEN, s->stat_string,
368 ETH_GSTRING_LEN);
369 j++;
370 }
371
372 for (i = 0; i < dev->num_tx_queues; i++) {
373 snprintf(buf, sizeof(buf), "txq%d_packets", i);
374 memcpy(data + j * ETH_GSTRING_LEN, buf,
375 ETH_GSTRING_LEN);
376 j++;
377
378 snprintf(buf, sizeof(buf), "txq%d_bytes", i);
379 memcpy(data + j * ETH_GSTRING_LEN, buf,
380 ETH_GSTRING_LEN);
381 j++;
382 }
383 break;
384 default:
385 break;
386 }
387}
388
389static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
390{
391 int i, j = 0;
392
393 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
394 const struct bcm_sysport_stats *s;
395 u8 offset = 0;
396 u32 val = 0;
397 char *p;
398
399 s = &bcm_sysport_gstrings_stats[i];
400 switch (s->type) {
401 case BCM_SYSPORT_STAT_NETDEV:
402 case BCM_SYSPORT_STAT_NETDEV64:
403 case BCM_SYSPORT_STAT_SOFT:
404 continue;
405 case BCM_SYSPORT_STAT_MIB_RX:
406 case BCM_SYSPORT_STAT_MIB_TX:
407 case BCM_SYSPORT_STAT_RUNT:
408 if (priv->is_lite)
409 continue;
410
411 if (s->type != BCM_SYSPORT_STAT_MIB_RX)
412 offset = UMAC_MIB_STAT_OFFSET;
413 val = umac_readl(priv, UMAC_MIB_START + j + offset);
414 break;
415 case BCM_SYSPORT_STAT_RXCHK:
416 val = rxchk_readl(priv, s->reg_offset);
417 if (val == ~0)
418 rxchk_writel(priv, 0, s->reg_offset);
419 break;
420 case BCM_SYSPORT_STAT_RBUF:
421 val = rbuf_readl(priv, s->reg_offset);
422 if (val == ~0)
423 rbuf_writel(priv, 0, s->reg_offset);
424 break;
425 }
426
427 j += s->stat_sizeof;
428 p = (char *)priv + s->stat_offset;
429 *(u32 *)p = val;
430 }
431
432 netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
433}
434
435static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv,
436 u64 *tx_bytes, u64 *tx_packets)
437{
438 struct bcm_sysport_tx_ring *ring;
439 u64 bytes = 0, packets = 0;
440 unsigned int start;
441 unsigned int q;
442
443 for (q = 0; q < priv->netdev->num_tx_queues; q++) {
444 ring = &priv->tx_rings[q];
445 do {
446 start = u64_stats_fetch_begin_irq(&priv->syncp);
447 bytes = ring->bytes;
448 packets = ring->packets;
449 } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
450
451 *tx_bytes += bytes;
452 *tx_packets += packets;
453 }
454}
455
456static void bcm_sysport_get_stats(struct net_device *dev,
457 struct ethtool_stats *stats, u64 *data)
458{
459 struct bcm_sysport_priv *priv = netdev_priv(dev);
460 struct bcm_sysport_stats64 *stats64 = &priv->stats64;
461 struct u64_stats_sync *syncp = &priv->syncp;
462 struct bcm_sysport_tx_ring *ring;
463 u64 tx_bytes = 0, tx_packets = 0;
464 unsigned int start;
465 int i, j;
466
467 if (netif_running(dev)) {
468 bcm_sysport_update_mib_counters(priv);
469 bcm_sysport_update_tx_stats(priv, &tx_bytes, &tx_packets);
470 stats64->tx_bytes = tx_bytes;
471 stats64->tx_packets = tx_packets;
472 }
473
474 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
475 const struct bcm_sysport_stats *s;
476 char *p;
477
478 s = &bcm_sysport_gstrings_stats[i];
479 if (s->type == BCM_SYSPORT_STAT_NETDEV)
480 p = (char *)&dev->stats;
481 else if (s->type == BCM_SYSPORT_STAT_NETDEV64)
482 p = (char *)stats64;
483 else
484 p = (char *)priv;
485
486 if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type))
487 continue;
488 p += s->stat_offset;
489
490 if (s->stat_sizeof == sizeof(u64) &&
491 s->type == BCM_SYSPORT_STAT_NETDEV64) {
492 do {
493 start = u64_stats_fetch_begin_irq(syncp);
494 data[i] = *(u64 *)p;
495 } while (u64_stats_fetch_retry_irq(syncp, start));
496 } else
497 data[i] = *(u32 *)p;
498 j++;
499 }
500
501
502
503
504
505
506 j = bcm_sysport_get_sset_count(dev, ETH_SS_STATS) -
507 dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
508
509 for (i = 0; i < dev->num_tx_queues; i++) {
510 ring = &priv->tx_rings[i];
511 data[j] = ring->packets;
512 j++;
513 data[j] = ring->bytes;
514 j++;
515 }
516}
517
518static void bcm_sysport_get_wol(struct net_device *dev,
519 struct ethtool_wolinfo *wol)
520{
521 struct bcm_sysport_priv *priv = netdev_priv(dev);
522 u32 reg;
523
524 wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE;
525 wol->wolopts = priv->wolopts;
526
527 if (!(priv->wolopts & WAKE_MAGICSECURE))
528 return;
529
530
531 reg = umac_readl(priv, UMAC_PSW_MS);
532 put_unaligned_be16(reg, &wol->sopass[0]);
533 reg = umac_readl(priv, UMAC_PSW_LS);
534 put_unaligned_be32(reg, &wol->sopass[2]);
535}
536
537static int bcm_sysport_set_wol(struct net_device *dev,
538 struct ethtool_wolinfo *wol)
539{
540 struct bcm_sysport_priv *priv = netdev_priv(dev);
541 struct device *kdev = &priv->pdev->dev;
542 u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE;
543
544 if (!device_can_wakeup(kdev))
545 return -ENOTSUPP;
546
547 if (wol->wolopts & ~supported)
548 return -EINVAL;
549
550
551 if (wol->wolopts & WAKE_MAGICSECURE) {
552 umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
553 UMAC_PSW_MS);
554 umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
555 UMAC_PSW_LS);
556 }
557
558
559 if (wol->wolopts) {
560 device_set_wakeup_enable(kdev, 1);
561 if (priv->wol_irq_disabled)
562 enable_irq_wake(priv->wol_irq);
563 priv->wol_irq_disabled = 0;
564 } else {
565 device_set_wakeup_enable(kdev, 0);
566
567 if (!priv->wol_irq_disabled)
568 disable_irq_wake(priv->wol_irq);
569 priv->wol_irq_disabled = 1;
570 }
571
572 priv->wolopts = wol->wolopts;
573
574 return 0;
575}
576
577static void bcm_sysport_set_rx_coalesce(struct bcm_sysport_priv *priv,
578 u32 usecs, u32 pkts)
579{
580 u32 reg;
581
582 reg = rdma_readl(priv, RDMA_MBDONE_INTR);
583 reg &= ~(RDMA_INTR_THRESH_MASK |
584 RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT);
585 reg |= pkts;
586 reg |= DIV_ROUND_UP(usecs * 1000, 8192) << RDMA_TIMEOUT_SHIFT;
587 rdma_writel(priv, reg, RDMA_MBDONE_INTR);
588}
589
590static void bcm_sysport_set_tx_coalesce(struct bcm_sysport_tx_ring *ring,
591 struct ethtool_coalesce *ec)
592{
593 struct bcm_sysport_priv *priv = ring->priv;
594 u32 reg;
595
596 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(ring->index));
597 reg &= ~(RING_INTR_THRESH_MASK |
598 RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT);
599 reg |= ec->tx_max_coalesced_frames;
600 reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) <<
601 RING_TIMEOUT_SHIFT;
602 tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(ring->index));
603}
604
605static int bcm_sysport_get_coalesce(struct net_device *dev,
606 struct ethtool_coalesce *ec)
607{
608 struct bcm_sysport_priv *priv = netdev_priv(dev);
609 u32 reg;
610
611 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0));
612
613 ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000;
614 ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK;
615
616 reg = rdma_readl(priv, RDMA_MBDONE_INTR);
617
618 ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000;
619 ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK;
620 ec->use_adaptive_rx_coalesce = priv->dim.use_dim;
621
622 return 0;
623}
624
625static int bcm_sysport_set_coalesce(struct net_device *dev,
626 struct ethtool_coalesce *ec)
627{
628 struct bcm_sysport_priv *priv = netdev_priv(dev);
629 struct net_dim_cq_moder moder;
630 u32 usecs, pkts;
631 unsigned int i;
632
633
634
635
636
637 if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK ||
638 ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 ||
639 ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK ||
640 ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1)
641 return -EINVAL;
642
643 if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) ||
644 (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0) ||
645 ec->use_adaptive_tx_coalesce)
646 return -EINVAL;
647
648 for (i = 0; i < dev->num_tx_queues; i++)
649 bcm_sysport_set_tx_coalesce(&priv->tx_rings[i], ec);
650
651 priv->rx_coalesce_usecs = ec->rx_coalesce_usecs;
652 priv->rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
653 usecs = priv->rx_coalesce_usecs;
654 pkts = priv->rx_max_coalesced_frames;
655
656 if (ec->use_adaptive_rx_coalesce && !priv->dim.use_dim) {
657 moder = net_dim_get_def_profile(priv->dim.dim.mode);
658 usecs = moder.usec;
659 pkts = moder.pkts;
660 }
661
662 priv->dim.use_dim = ec->use_adaptive_rx_coalesce;
663
664
665 bcm_sysport_set_rx_coalesce(priv, usecs, pkts);
666
667 return 0;
668}
669
670static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
671{
672 dev_consume_skb_any(cb->skb);
673 cb->skb = NULL;
674 dma_unmap_addr_set(cb, dma_addr, 0);
675}
676
677static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
678 struct bcm_sysport_cb *cb)
679{
680 struct device *kdev = &priv->pdev->dev;
681 struct net_device *ndev = priv->netdev;
682 struct sk_buff *skb, *rx_skb;
683 dma_addr_t mapping;
684
685
686 skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
687 if (!skb) {
688 priv->mib.alloc_rx_buff_failed++;
689 netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
690 return NULL;
691 }
692
693 mapping = dma_map_single(kdev, skb->data,
694 RX_BUF_LENGTH, DMA_FROM_DEVICE);
695 if (dma_mapping_error(kdev, mapping)) {
696 priv->mib.rx_dma_failed++;
697 dev_kfree_skb_any(skb);
698 netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
699 return NULL;
700 }
701
702
703 rx_skb = cb->skb;
704 if (likely(rx_skb))
705 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
706 RX_BUF_LENGTH, DMA_FROM_DEVICE);
707
708
709 cb->skb = skb;
710 dma_unmap_addr_set(cb, dma_addr, mapping);
711 dma_desc_set_addr(priv, cb->bd_addr, mapping);
712
713 netif_dbg(priv, rx_status, ndev, "RX refill\n");
714
715
716 return rx_skb;
717}
718
719static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
720{
721 struct bcm_sysport_cb *cb;
722 struct sk_buff *skb;
723 unsigned int i;
724
725 for (i = 0; i < priv->num_rx_bds; i++) {
726 cb = &priv->rx_cbs[i];
727 skb = bcm_sysport_rx_refill(priv, cb);
728 if (skb)
729 dev_kfree_skb(skb);
730 if (!cb->skb)
731 return -ENOMEM;
732 }
733
734 return 0;
735}
736
737
738static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
739 unsigned int budget)
740{
741 struct bcm_sysport_stats64 *stats64 = &priv->stats64;
742 struct net_device *ndev = priv->netdev;
743 unsigned int processed = 0, to_process;
744 unsigned int processed_bytes = 0;
745 struct bcm_sysport_cb *cb;
746 struct sk_buff *skb;
747 unsigned int p_index;
748 u16 len, status;
749 struct bcm_rsb *rsb;
750
751
752 intrl2_0_writel(priv, INTRL2_0_RDMA_MBDONE, INTRL2_CPU_CLEAR);
753
754
755
756
757
758 if (!priv->is_lite)
759 p_index = rdma_readl(priv, RDMA_PROD_INDEX);
760 else
761 p_index = rdma_readl(priv, RDMA_CONS_INDEX);
762 p_index &= RDMA_PROD_INDEX_MASK;
763
764 to_process = (p_index - priv->rx_c_index) & RDMA_CONS_INDEX_MASK;
765
766 netif_dbg(priv, rx_status, ndev,
767 "p_index=%d rx_c_index=%d to_process=%d\n",
768 p_index, priv->rx_c_index, to_process);
769
770 while ((processed < to_process) && (processed < budget)) {
771 cb = &priv->rx_cbs[priv->rx_read_ptr];
772 skb = bcm_sysport_rx_refill(priv, cb);
773
774
775
776
777
778
779
780 if (unlikely(!skb)) {
781 netif_err(priv, rx_err, ndev, "out of memory!\n");
782 ndev->stats.rx_dropped++;
783 ndev->stats.rx_errors++;
784 goto next;
785 }
786
787
788 rsb = (struct bcm_rsb *)skb->data;
789 len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
790 status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
791 DESC_STATUS_MASK;
792
793 netif_dbg(priv, rx_status, ndev,
794 "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
795 p_index, priv->rx_c_index, priv->rx_read_ptr,
796 len, status);
797
798 if (unlikely(len > RX_BUF_LENGTH)) {
799 netif_err(priv, rx_status, ndev, "oversized packet\n");
800 ndev->stats.rx_length_errors++;
801 ndev->stats.rx_errors++;
802 dev_kfree_skb_any(skb);
803 goto next;
804 }
805
806 if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
807 netif_err(priv, rx_status, ndev, "fragmented packet!\n");
808 ndev->stats.rx_dropped++;
809 ndev->stats.rx_errors++;
810 dev_kfree_skb_any(skb);
811 goto next;
812 }
813
814 if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
815 netif_err(priv, rx_err, ndev, "error packet\n");
816 if (status & RX_STATUS_OVFLOW)
817 ndev->stats.rx_over_errors++;
818 ndev->stats.rx_dropped++;
819 ndev->stats.rx_errors++;
820 dev_kfree_skb_any(skb);
821 goto next;
822 }
823
824 skb_put(skb, len);
825
826
827 if (likely(status & DESC_L4_CSUM))
828 skb->ip_summed = CHECKSUM_UNNECESSARY;
829
830
831
832
833
834 skb_pull(skb, sizeof(*rsb) + 2);
835 len -= (sizeof(*rsb) + 2);
836 processed_bytes += len;
837
838
839 if (priv->crc_fwd) {
840 skb_trim(skb, len - ETH_FCS_LEN);
841 len -= ETH_FCS_LEN;
842 }
843
844 skb->protocol = eth_type_trans(skb, ndev);
845 ndev->stats.rx_packets++;
846 ndev->stats.rx_bytes += len;
847 u64_stats_update_begin(&priv->syncp);
848 stats64->rx_packets++;
849 stats64->rx_bytes += len;
850 u64_stats_update_end(&priv->syncp);
851
852 napi_gro_receive(&priv->napi, skb);
853next:
854 processed++;
855 priv->rx_read_ptr++;
856
857 if (priv->rx_read_ptr == priv->num_rx_bds)
858 priv->rx_read_ptr = 0;
859 }
860
861 priv->dim.packets = processed;
862 priv->dim.bytes = processed_bytes;
863
864 return processed;
865}
866
867static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring,
868 struct bcm_sysport_cb *cb,
869 unsigned int *bytes_compl,
870 unsigned int *pkts_compl)
871{
872 struct bcm_sysport_priv *priv = ring->priv;
873 struct device *kdev = &priv->pdev->dev;
874
875 if (cb->skb) {
876 *bytes_compl += cb->skb->len;
877 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
878 dma_unmap_len(cb, dma_len),
879 DMA_TO_DEVICE);
880 (*pkts_compl)++;
881 bcm_sysport_free_cb(cb);
882
883 } else if (dma_unmap_addr(cb, dma_addr)) {
884 *bytes_compl += dma_unmap_len(cb, dma_len);
885 dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
886 dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
887 dma_unmap_addr_set(cb, dma_addr, 0);
888 }
889}
890
891
892static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
893 struct bcm_sysport_tx_ring *ring)
894{
895 unsigned int pkts_compl = 0, bytes_compl = 0;
896 struct net_device *ndev = priv->netdev;
897 unsigned int txbds_processed = 0;
898 struct bcm_sysport_cb *cb;
899 unsigned int txbds_ready;
900 unsigned int c_index;
901 u32 hw_ind;
902
903
904 if (!ring->priv->is_lite)
905 intrl2_1_writel(ring->priv, BIT(ring->index), INTRL2_CPU_CLEAR);
906 else
907 intrl2_0_writel(ring->priv, BIT(ring->index +
908 INTRL2_0_TDMA_MBDONE_SHIFT), INTRL2_CPU_CLEAR);
909
910
911 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
912 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
913 txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK;
914
915 netif_dbg(priv, tx_done, ndev,
916 "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
917 ring->index, ring->c_index, c_index, txbds_ready);
918
919 while (txbds_processed < txbds_ready) {
920 cb = &ring->cbs[ring->clean_index];
921 bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
922
923 ring->desc_count++;
924 txbds_processed++;
925
926 if (likely(ring->clean_index < ring->size - 1))
927 ring->clean_index++;
928 else
929 ring->clean_index = 0;
930 }
931
932 u64_stats_update_begin(&priv->syncp);
933 ring->packets += pkts_compl;
934 ring->bytes += bytes_compl;
935 u64_stats_update_end(&priv->syncp);
936
937 ring->c_index = c_index;
938
939 netif_dbg(priv, tx_done, ndev,
940 "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
941 ring->index, ring->c_index, pkts_compl, bytes_compl);
942
943 return pkts_compl;
944}
945
946
947static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
948 struct bcm_sysport_tx_ring *ring)
949{
950 struct netdev_queue *txq;
951 unsigned int released;
952 unsigned long flags;
953
954 txq = netdev_get_tx_queue(priv->netdev, ring->index);
955
956 spin_lock_irqsave(&ring->lock, flags);
957 released = __bcm_sysport_tx_reclaim(priv, ring);
958 if (released)
959 netif_tx_wake_queue(txq);
960
961 spin_unlock_irqrestore(&ring->lock, flags);
962
963 return released;
964}
965
966
967static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
968 struct bcm_sysport_tx_ring *ring)
969{
970 unsigned long flags;
971
972 spin_lock_irqsave(&ring->lock, flags);
973 __bcm_sysport_tx_reclaim(priv, ring);
974 spin_unlock_irqrestore(&ring->lock, flags);
975}
976
977static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
978{
979 struct bcm_sysport_tx_ring *ring =
980 container_of(napi, struct bcm_sysport_tx_ring, napi);
981 unsigned int work_done = 0;
982
983 work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
984
985 if (work_done == 0) {
986 napi_complete(napi);
987
988 if (!ring->priv->is_lite)
989 intrl2_1_mask_clear(ring->priv, BIT(ring->index));
990 else
991 intrl2_0_mask_clear(ring->priv, BIT(ring->index +
992 INTRL2_0_TDMA_MBDONE_SHIFT));
993
994 return 0;
995 }
996
997 return budget;
998}
999
1000static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
1001{
1002 unsigned int q;
1003
1004 for (q = 0; q < priv->netdev->num_tx_queues; q++)
1005 bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
1006}
1007
1008static int bcm_sysport_poll(struct napi_struct *napi, int budget)
1009{
1010 struct bcm_sysport_priv *priv =
1011 container_of(napi, struct bcm_sysport_priv, napi);
1012 struct net_dim_sample dim_sample;
1013 unsigned int work_done = 0;
1014
1015 work_done = bcm_sysport_desc_rx(priv, budget);
1016
1017 priv->rx_c_index += work_done;
1018 priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
1019
1020
1021
1022
1023
1024 if (!priv->is_lite)
1025 rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
1026 else
1027 rdma_writel(priv, priv->rx_c_index << 16, RDMA_CONS_INDEX);
1028
1029 if (work_done < budget) {
1030 napi_complete_done(napi, work_done);
1031
1032 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
1033 }
1034
1035 if (priv->dim.use_dim) {
1036 net_dim_sample(priv->dim.event_ctr, priv->dim.packets,
1037 priv->dim.bytes, &dim_sample);
1038 net_dim(&priv->dim.dim, dim_sample);
1039 }
1040
1041 return work_done;
1042}
1043
1044static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
1045{
1046 u32 reg;
1047
1048
1049 intrl2_0_mask_set(priv, INTRL2_0_MPD);
1050
1051
1052 reg = umac_readl(priv, UMAC_MPD_CTRL);
1053 reg &= ~MPD_EN;
1054 umac_writel(priv, reg, UMAC_MPD_CTRL);
1055
1056 netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
1057}
1058
1059static void bcm_sysport_dim_work(struct work_struct *work)
1060{
1061 struct net_dim *dim = container_of(work, struct net_dim, work);
1062 struct bcm_sysport_net_dim *ndim =
1063 container_of(dim, struct bcm_sysport_net_dim, dim);
1064 struct bcm_sysport_priv *priv =
1065 container_of(ndim, struct bcm_sysport_priv, dim);
1066 struct net_dim_cq_moder cur_profile =
1067 net_dim_get_profile(dim->mode, dim->profile_ix);
1068
1069 bcm_sysport_set_rx_coalesce(priv, cur_profile.usec, cur_profile.pkts);
1070 dim->state = NET_DIM_START_MEASURE;
1071}
1072
1073
1074static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
1075{
1076 struct net_device *dev = dev_id;
1077 struct bcm_sysport_priv *priv = netdev_priv(dev);
1078 struct bcm_sysport_tx_ring *txr;
1079 unsigned int ring, ring_bit;
1080
1081 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
1082 ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
1083 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
1084
1085 if (unlikely(priv->irq0_stat == 0)) {
1086 netdev_warn(priv->netdev, "spurious RX interrupt\n");
1087 return IRQ_NONE;
1088 }
1089
1090 if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
1091 priv->dim.event_ctr++;
1092 if (likely(napi_schedule_prep(&priv->napi))) {
1093
1094 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
1095 __napi_schedule_irqoff(&priv->napi);
1096 }
1097 }
1098
1099
1100
1101
1102 if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
1103 bcm_sysport_tx_reclaim_all(priv);
1104
1105 if (priv->irq0_stat & INTRL2_0_MPD) {
1106 netdev_info(priv->netdev, "Wake-on-LAN interrupt!\n");
1107 bcm_sysport_resume_from_wol(priv);
1108 }
1109
1110 if (!priv->is_lite)
1111 goto out;
1112
1113 for (ring = 0; ring < dev->num_tx_queues; ring++) {
1114 ring_bit = BIT(ring + INTRL2_0_TDMA_MBDONE_SHIFT);
1115 if (!(priv->irq0_stat & ring_bit))
1116 continue;
1117
1118 txr = &priv->tx_rings[ring];
1119
1120 if (likely(napi_schedule_prep(&txr->napi))) {
1121 intrl2_0_mask_set(priv, ring_bit);
1122 __napi_schedule(&txr->napi);
1123 }
1124 }
1125out:
1126 return IRQ_HANDLED;
1127}
1128
1129
1130static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
1131{
1132 struct net_device *dev = dev_id;
1133 struct bcm_sysport_priv *priv = netdev_priv(dev);
1134 struct bcm_sysport_tx_ring *txr;
1135 unsigned int ring;
1136
1137 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
1138 ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
1139 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1140
1141 if (unlikely(priv->irq1_stat == 0)) {
1142 netdev_warn(priv->netdev, "spurious TX interrupt\n");
1143 return IRQ_NONE;
1144 }
1145
1146 for (ring = 0; ring < dev->num_tx_queues; ring++) {
1147 if (!(priv->irq1_stat & BIT(ring)))
1148 continue;
1149
1150 txr = &priv->tx_rings[ring];
1151
1152 if (likely(napi_schedule_prep(&txr->napi))) {
1153 intrl2_1_mask_set(priv, BIT(ring));
1154 __napi_schedule_irqoff(&txr->napi);
1155 }
1156 }
1157
1158 return IRQ_HANDLED;
1159}
1160
1161static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id)
1162{
1163 struct bcm_sysport_priv *priv = dev_id;
1164
1165 pm_wakeup_event(&priv->pdev->dev, 0);
1166
1167 return IRQ_HANDLED;
1168}
1169
1170#ifdef CONFIG_NET_POLL_CONTROLLER
1171static void bcm_sysport_poll_controller(struct net_device *dev)
1172{
1173 struct bcm_sysport_priv *priv = netdev_priv(dev);
1174
1175 disable_irq(priv->irq0);
1176 bcm_sysport_rx_isr(priv->irq0, priv);
1177 enable_irq(priv->irq0);
1178
1179 if (!priv->is_lite) {
1180 disable_irq(priv->irq1);
1181 bcm_sysport_tx_isr(priv->irq1, priv);
1182 enable_irq(priv->irq1);
1183 }
1184}
1185#endif
1186
1187static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
1188 struct net_device *dev)
1189{
1190 struct sk_buff *nskb;
1191 struct bcm_tsb *tsb;
1192 u32 csum_info;
1193 u8 ip_proto;
1194 u16 csum_start;
1195 __be16 ip_ver;
1196
1197
1198 if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
1199 nskb = skb_realloc_headroom(skb, sizeof(*tsb));
1200 dev_kfree_skb(skb);
1201 if (!nskb) {
1202 dev->stats.tx_errors++;
1203 dev->stats.tx_dropped++;
1204 return NULL;
1205 }
1206 skb = nskb;
1207 }
1208
1209 tsb = skb_push(skb, sizeof(*tsb));
1210
1211 memset(tsb, 0, sizeof(*tsb));
1212
1213 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1214 ip_ver = skb->protocol;
1215 switch (ip_ver) {
1216 case htons(ETH_P_IP):
1217 ip_proto = ip_hdr(skb)->protocol;
1218 break;
1219 case htons(ETH_P_IPV6):
1220 ip_proto = ipv6_hdr(skb)->nexthdr;
1221 break;
1222 default:
1223 return skb;
1224 }
1225
1226
1227 csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
1228 csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
1229 csum_info |= (csum_start << L4_PTR_SHIFT);
1230
1231 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
1232 csum_info |= L4_LENGTH_VALID;
1233 if (ip_proto == IPPROTO_UDP &&
1234 ip_ver == htons(ETH_P_IP))
1235 csum_info |= L4_UDP;
1236 } else {
1237 csum_info = 0;
1238 }
1239
1240 tsb->l4_ptr_dest_map = csum_info;
1241 }
1242
1243 return skb;
1244}
1245
1246static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
1247 struct net_device *dev)
1248{
1249 struct bcm_sysport_priv *priv = netdev_priv(dev);
1250 struct device *kdev = &priv->pdev->dev;
1251 struct bcm_sysport_tx_ring *ring;
1252 struct bcm_sysport_cb *cb;
1253 struct netdev_queue *txq;
1254 struct dma_desc *desc;
1255 unsigned int skb_len;
1256 unsigned long flags;
1257 dma_addr_t mapping;
1258 u32 len_status;
1259 u16 queue;
1260 int ret;
1261
1262 queue = skb_get_queue_mapping(skb);
1263 txq = netdev_get_tx_queue(dev, queue);
1264 ring = &priv->tx_rings[queue];
1265
1266
1267 spin_lock_irqsave(&ring->lock, flags);
1268 if (unlikely(ring->desc_count == 0)) {
1269 netif_tx_stop_queue(txq);
1270 netdev_err(dev, "queue %d awake and ring full!\n", queue);
1271 ret = NETDEV_TX_BUSY;
1272 goto out;
1273 }
1274
1275
1276 if (priv->tsb_en) {
1277 skb = bcm_sysport_insert_tsb(skb, dev);
1278 if (!skb) {
1279 ret = NETDEV_TX_OK;
1280 goto out;
1281 }
1282 }
1283
1284 skb_len = skb->len;
1285
1286 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
1287 if (dma_mapping_error(kdev, mapping)) {
1288 priv->mib.tx_dma_failed++;
1289 netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
1290 skb->data, skb_len);
1291 ret = NETDEV_TX_OK;
1292 goto out;
1293 }
1294
1295
1296 cb = &ring->cbs[ring->curr_desc];
1297 cb->skb = skb;
1298 dma_unmap_addr_set(cb, dma_addr, mapping);
1299 dma_unmap_len_set(cb, dma_len, skb_len);
1300
1301
1302 desc = ring->desc_cpu;
1303
1304 desc->addr_lo = lower_32_bits(mapping);
1305 len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
1306 len_status |= (skb_len << DESC_LEN_SHIFT);
1307 len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
1308 DESC_STATUS_SHIFT;
1309 if (skb->ip_summed == CHECKSUM_PARTIAL)
1310 len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
1311
1312 ring->curr_desc++;
1313 if (ring->curr_desc == ring->size)
1314 ring->curr_desc = 0;
1315 ring->desc_count--;
1316
1317
1318
1319
1320
1321 wmb();
1322 desc->addr_status_len = len_status;
1323 wmb();
1324
1325
1326 tdma_port_write_desc_addr(priv, desc, ring->index);
1327
1328
1329 if (ring->desc_count == 0)
1330 netif_tx_stop_queue(txq);
1331
1332 netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
1333 ring->index, ring->desc_count, ring->curr_desc);
1334
1335 ret = NETDEV_TX_OK;
1336out:
1337 spin_unlock_irqrestore(&ring->lock, flags);
1338 return ret;
1339}
1340
1341static void bcm_sysport_tx_timeout(struct net_device *dev)
1342{
1343 netdev_warn(dev, "transmit timeout!\n");
1344
1345 netif_trans_update(dev);
1346 dev->stats.tx_errors++;
1347
1348 netif_tx_wake_all_queues(dev);
1349}
1350
1351
1352static void bcm_sysport_adj_link(struct net_device *dev)
1353{
1354 struct bcm_sysport_priv *priv = netdev_priv(dev);
1355 struct phy_device *phydev = dev->phydev;
1356 unsigned int changed = 0;
1357 u32 cmd_bits = 0, reg;
1358
1359 if (priv->old_link != phydev->link) {
1360 changed = 1;
1361 priv->old_link = phydev->link;
1362 }
1363
1364 if (priv->old_duplex != phydev->duplex) {
1365 changed = 1;
1366 priv->old_duplex = phydev->duplex;
1367 }
1368
1369 if (priv->is_lite)
1370 goto out;
1371
1372 switch (phydev->speed) {
1373 case SPEED_2500:
1374 cmd_bits = CMD_SPEED_2500;
1375 break;
1376 case SPEED_1000:
1377 cmd_bits = CMD_SPEED_1000;
1378 break;
1379 case SPEED_100:
1380 cmd_bits = CMD_SPEED_100;
1381 break;
1382 case SPEED_10:
1383 cmd_bits = CMD_SPEED_10;
1384 break;
1385 default:
1386 break;
1387 }
1388 cmd_bits <<= CMD_SPEED_SHIFT;
1389
1390 if (phydev->duplex == DUPLEX_HALF)
1391 cmd_bits |= CMD_HD_EN;
1392
1393 if (priv->old_pause != phydev->pause) {
1394 changed = 1;
1395 priv->old_pause = phydev->pause;
1396 }
1397
1398 if (!phydev->pause)
1399 cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
1400
1401 if (!changed)
1402 return;
1403
1404 if (phydev->link) {
1405 reg = umac_readl(priv, UMAC_CMD);
1406 reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
1407 CMD_HD_EN | CMD_RX_PAUSE_IGNORE |
1408 CMD_TX_PAUSE_IGNORE);
1409 reg |= cmd_bits;
1410 umac_writel(priv, reg, UMAC_CMD);
1411 }
1412out:
1413 if (changed)
1414 phy_print_status(phydev);
1415}
1416
1417static void bcm_sysport_init_dim(struct bcm_sysport_priv *priv,
1418 void (*cb)(struct work_struct *work))
1419{
1420 struct bcm_sysport_net_dim *dim = &priv->dim;
1421
1422 INIT_WORK(&dim->dim.work, cb);
1423 dim->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
1424 dim->event_ctr = 0;
1425 dim->packets = 0;
1426 dim->bytes = 0;
1427}
1428
1429static void bcm_sysport_init_rx_coalesce(struct bcm_sysport_priv *priv)
1430{
1431 struct bcm_sysport_net_dim *dim = &priv->dim;
1432 struct net_dim_cq_moder moder;
1433 u32 usecs, pkts;
1434
1435 usecs = priv->rx_coalesce_usecs;
1436 pkts = priv->rx_max_coalesced_frames;
1437
1438
1439 if (dim->use_dim) {
1440 moder = net_dim_get_def_profile(dim->dim.mode);
1441 usecs = moder.usec;
1442 pkts = moder.pkts;
1443 }
1444
1445 bcm_sysport_set_rx_coalesce(priv, usecs, pkts);
1446}
1447
1448static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
1449 unsigned int index)
1450{
1451 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1452 struct device *kdev = &priv->pdev->dev;
1453 size_t size;
1454 void *p;
1455 u32 reg;
1456
1457
1458 size = 256;
1459
1460
1461
1462
1463 p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
1464 GFP_KERNEL);
1465 if (!p) {
1466 netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
1467 return -ENOMEM;
1468 }
1469
1470 ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);
1471 if (!ring->cbs) {
1472 dma_free_coherent(kdev, sizeof(struct dma_desc),
1473 ring->desc_cpu, ring->desc_dma);
1474 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1475 return -ENOMEM;
1476 }
1477
1478
1479 spin_lock_init(&ring->lock);
1480 ring->priv = priv;
1481 netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
1482 ring->index = index;
1483 ring->size = size;
1484 ring->clean_index = 0;
1485 ring->alloc_size = ring->size;
1486 ring->desc_cpu = p;
1487 ring->desc_count = ring->size;
1488 ring->curr_desc = 0;
1489
1490
1491 tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index));
1492 tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
1493 tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
1494 tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
1495
1496
1497 reg = tdma_readl(priv, TDMA_DESC_RING_MAPPING(index));
1498 reg &= ~(RING_QID_MASK | RING_PORT_ID_MASK << RING_PORT_ID_SHIFT);
1499 if (ring->inspect) {
1500 reg |= ring->switch_queue & RING_QID_MASK;
1501 reg |= ring->switch_port << RING_PORT_ID_SHIFT;
1502 } else {
1503 reg |= RING_IGNORE_STATUS;
1504 }
1505 tdma_writel(priv, reg, TDMA_DESC_RING_MAPPING(index));
1506 tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index));
1507
1508
1509 reg = tdma_readl(priv, TDMA_CONTROL);
1510 reg |= tdma_control_bit(priv, ACB_ALGO);
1511 tdma_writel(priv, reg, TDMA_CONTROL);
1512
1513
1514
1515
1516 reg = tdma_readl(priv, TDMA_CONTROL);
1517 if (priv->is_lite)
1518 reg &= ~BIT(TSB_SWAP1);
1519
1520 if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1521 reg |= tdma_control_bit(priv, TSB_SWAP0);
1522 else
1523 reg &= ~tdma_control_bit(priv, TSB_SWAP0);
1524 tdma_writel(priv, reg, TDMA_CONTROL);
1525
1526
1527
1528
1529 tdma_writel(priv, ring->size |
1530 1 << RING_HYST_THRESH_SHIFT,
1531 TDMA_DESC_RING_MAX_HYST(index));
1532
1533
1534 reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN);
1535 reg |= (1 << index);
1536 tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
1537
1538 napi_enable(&ring->napi);
1539
1540 netif_dbg(priv, hw, priv->netdev,
1541 "TDMA cfg, size=%d, desc_cpu=%p switch q=%d,port=%d\n",
1542 ring->size, ring->desc_cpu, ring->switch_queue,
1543 ring->switch_port);
1544
1545 return 0;
1546}
1547
1548static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
1549 unsigned int index)
1550{
1551 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1552 struct device *kdev = &priv->pdev->dev;
1553 u32 reg;
1554
1555
1556 reg = tdma_readl(priv, TDMA_STATUS);
1557 if (!(reg & TDMA_DISABLED))
1558 netdev_warn(priv->netdev, "TDMA not stopped!\n");
1559
1560
1561
1562
1563
1564 if (!ring->cbs)
1565 return;
1566
1567 napi_disable(&ring->napi);
1568 netif_napi_del(&ring->napi);
1569
1570 bcm_sysport_tx_clean(priv, ring);
1571
1572 kfree(ring->cbs);
1573 ring->cbs = NULL;
1574
1575 if (ring->desc_dma) {
1576 dma_free_coherent(kdev, sizeof(struct dma_desc),
1577 ring->desc_cpu, ring->desc_dma);
1578 ring->desc_dma = 0;
1579 }
1580 ring->size = 0;
1581 ring->alloc_size = 0;
1582
1583 netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n");
1584}
1585
1586
1587static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
1588 unsigned int enable)
1589{
1590 unsigned int timeout = 1000;
1591 u32 reg;
1592
1593 reg = rdma_readl(priv, RDMA_CONTROL);
1594 if (enable)
1595 reg |= RDMA_EN;
1596 else
1597 reg &= ~RDMA_EN;
1598 rdma_writel(priv, reg, RDMA_CONTROL);
1599
1600
1601 do {
1602 reg = rdma_readl(priv, RDMA_STATUS);
1603 if (!!(reg & RDMA_DISABLED) == !enable)
1604 return 0;
1605 usleep_range(1000, 2000);
1606 } while (timeout-- > 0);
1607
1608 netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n");
1609
1610 return -ETIMEDOUT;
1611}
1612
1613
1614static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
1615 unsigned int enable)
1616{
1617 unsigned int timeout = 1000;
1618 u32 reg;
1619
1620 reg = tdma_readl(priv, TDMA_CONTROL);
1621 if (enable)
1622 reg |= tdma_control_bit(priv, TDMA_EN);
1623 else
1624 reg &= ~tdma_control_bit(priv, TDMA_EN);
1625 tdma_writel(priv, reg, TDMA_CONTROL);
1626
1627
1628 do {
1629 reg = tdma_readl(priv, TDMA_STATUS);
1630 if (!!(reg & TDMA_DISABLED) == !enable)
1631 return 0;
1632
1633 usleep_range(1000, 2000);
1634 } while (timeout-- > 0);
1635
1636 netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n");
1637
1638 return -ETIMEDOUT;
1639}
1640
1641static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
1642{
1643 struct bcm_sysport_cb *cb;
1644 u32 reg;
1645 int ret;
1646 int i;
1647
1648
1649 priv->num_rx_bds = priv->num_rx_desc_words / WORDS_PER_DESC;
1650 priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
1651 priv->rx_c_index = 0;
1652 priv->rx_read_ptr = 0;
1653 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb),
1654 GFP_KERNEL);
1655 if (!priv->rx_cbs) {
1656 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1657 return -ENOMEM;
1658 }
1659
1660 for (i = 0; i < priv->num_rx_bds; i++) {
1661 cb = priv->rx_cbs + i;
1662 cb->bd_addr = priv->rx_bds + i * DESC_SIZE;
1663 }
1664
1665 ret = bcm_sysport_alloc_rx_bufs(priv);
1666 if (ret) {
1667 netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
1668 return ret;
1669 }
1670
1671
1672 reg = rdma_readl(priv, RDMA_STATUS);
1673 if (!(reg & RDMA_DISABLED))
1674 rdma_enable_set(priv, 0);
1675
1676 rdma_writel(priv, 0, RDMA_WRITE_PTR_LO);
1677 rdma_writel(priv, 0, RDMA_WRITE_PTR_HI);
1678 rdma_writel(priv, 0, RDMA_PROD_INDEX);
1679 rdma_writel(priv, 0, RDMA_CONS_INDEX);
1680 rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT |
1681 RX_BUF_LENGTH, RDMA_RING_BUF_SIZE);
1682
1683 rdma_writel(priv, 0, RDMA_START_ADDR_HI);
1684 rdma_writel(priv, 0, RDMA_START_ADDR_LO);
1685 rdma_writel(priv, 0, RDMA_END_ADDR_HI);
1686 rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO);
1687
1688 netif_dbg(priv, hw, priv->netdev,
1689 "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
1690 priv->num_rx_bds, priv->rx_bds);
1691
1692 return 0;
1693}
1694
1695static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
1696{
1697 struct bcm_sysport_cb *cb;
1698 unsigned int i;
1699 u32 reg;
1700
1701
1702 reg = rdma_readl(priv, RDMA_STATUS);
1703 if (!(reg & RDMA_DISABLED))
1704 netdev_warn(priv->netdev, "RDMA not stopped!\n");
1705
1706 for (i = 0; i < priv->num_rx_bds; i++) {
1707 cb = &priv->rx_cbs[i];
1708 if (dma_unmap_addr(cb, dma_addr))
1709 dma_unmap_single(&priv->pdev->dev,
1710 dma_unmap_addr(cb, dma_addr),
1711 RX_BUF_LENGTH, DMA_FROM_DEVICE);
1712 bcm_sysport_free_cb(cb);
1713 }
1714
1715 kfree(priv->rx_cbs);
1716 priv->rx_cbs = NULL;
1717
1718 netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n");
1719}
1720
1721static void bcm_sysport_set_rx_mode(struct net_device *dev)
1722{
1723 struct bcm_sysport_priv *priv = netdev_priv(dev);
1724 u32 reg;
1725
1726 if (priv->is_lite)
1727 return;
1728
1729 reg = umac_readl(priv, UMAC_CMD);
1730 if (dev->flags & IFF_PROMISC)
1731 reg |= CMD_PROMISC;
1732 else
1733 reg &= ~CMD_PROMISC;
1734 umac_writel(priv, reg, UMAC_CMD);
1735
1736
1737 if (dev->flags & IFF_ALLMULTI)
1738 return;
1739}
1740
1741static inline void umac_enable_set(struct bcm_sysport_priv *priv,
1742 u32 mask, unsigned int enable)
1743{
1744 u32 reg;
1745
1746 if (!priv->is_lite) {
1747 reg = umac_readl(priv, UMAC_CMD);
1748 if (enable)
1749 reg |= mask;
1750 else
1751 reg &= ~mask;
1752 umac_writel(priv, reg, UMAC_CMD);
1753 } else {
1754 reg = gib_readl(priv, GIB_CONTROL);
1755 if (enable)
1756 reg |= mask;
1757 else
1758 reg &= ~mask;
1759 gib_writel(priv, reg, GIB_CONTROL);
1760 }
1761
1762
1763
1764
1765 if (enable == 0)
1766 usleep_range(1000, 2000);
1767}
1768
1769static inline void umac_reset(struct bcm_sysport_priv *priv)
1770{
1771 u32 reg;
1772
1773 if (priv->is_lite)
1774 return;
1775
1776 reg = umac_readl(priv, UMAC_CMD);
1777 reg |= CMD_SW_RESET;
1778 umac_writel(priv, reg, UMAC_CMD);
1779 udelay(10);
1780 reg = umac_readl(priv, UMAC_CMD);
1781 reg &= ~CMD_SW_RESET;
1782 umac_writel(priv, reg, UMAC_CMD);
1783}
1784
1785static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
1786 unsigned char *addr)
1787{
1788 u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
1789 addr[3];
1790 u32 mac1 = (addr[4] << 8) | addr[5];
1791
1792 if (!priv->is_lite) {
1793 umac_writel(priv, mac0, UMAC_MAC0);
1794 umac_writel(priv, mac1, UMAC_MAC1);
1795 } else {
1796 gib_writel(priv, mac0, GIB_MAC0);
1797 gib_writel(priv, mac1, GIB_MAC1);
1798 }
1799}
1800
1801static void topctrl_flush(struct bcm_sysport_priv *priv)
1802{
1803 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
1804 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
1805 mdelay(1);
1806 topctrl_writel(priv, 0, RX_FLUSH_CNTL);
1807 topctrl_writel(priv, 0, TX_FLUSH_CNTL);
1808}
1809
1810static int bcm_sysport_change_mac(struct net_device *dev, void *p)
1811{
1812 struct bcm_sysport_priv *priv = netdev_priv(dev);
1813 struct sockaddr *addr = p;
1814
1815 if (!is_valid_ether_addr(addr->sa_data))
1816 return -EINVAL;
1817
1818 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1819
1820
1821
1822
1823 if (!netif_running(dev))
1824 return 0;
1825
1826 umac_set_hw_addr(priv, dev->dev_addr);
1827
1828 return 0;
1829}
1830
1831static void bcm_sysport_get_stats64(struct net_device *dev,
1832 struct rtnl_link_stats64 *stats)
1833{
1834 struct bcm_sysport_priv *priv = netdev_priv(dev);
1835 struct bcm_sysport_stats64 *stats64 = &priv->stats64;
1836 unsigned int start;
1837
1838 netdev_stats_to_stats64(stats, &dev->stats);
1839
1840 bcm_sysport_update_tx_stats(priv, &stats->tx_bytes,
1841 &stats->tx_packets);
1842
1843 do {
1844 start = u64_stats_fetch_begin_irq(&priv->syncp);
1845 stats->rx_packets = stats64->rx_packets;
1846 stats->rx_bytes = stats64->rx_bytes;
1847 } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
1848}
1849
1850static void bcm_sysport_netif_start(struct net_device *dev)
1851{
1852 struct bcm_sysport_priv *priv = netdev_priv(dev);
1853
1854
1855 bcm_sysport_init_dim(priv, bcm_sysport_dim_work);
1856 bcm_sysport_init_rx_coalesce(priv);
1857 napi_enable(&priv->napi);
1858
1859
1860 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1861
1862 phy_start(dev->phydev);
1863
1864
1865 if (!priv->is_lite)
1866 intrl2_1_mask_clear(priv, 0xffffffff);
1867 else
1868 intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK);
1869
1870
1871 netif_tx_start_all_queues(dev);
1872}
1873
1874static void rbuf_init(struct bcm_sysport_priv *priv)
1875{
1876 u32 reg;
1877
1878 reg = rbuf_readl(priv, RBUF_CONTROL);
1879 reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
1880
1881 if (priv->is_lite)
1882 reg &= ~RBUF_RSB_SWAP1;
1883
1884
1885 if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1886 reg |= RBUF_RSB_SWAP0;
1887 else
1888 reg &= ~RBUF_RSB_SWAP0;
1889 rbuf_writel(priv, reg, RBUF_CONTROL);
1890}
1891
1892static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv)
1893{
1894 intrl2_0_mask_set(priv, 0xffffffff);
1895 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1896 if (!priv->is_lite) {
1897 intrl2_1_mask_set(priv, 0xffffffff);
1898 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1899 }
1900}
1901
1902static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv)
1903{
1904 u32 reg;
1905
1906 reg = gib_readl(priv, GIB_CONTROL);
1907
1908 if (netdev_uses_dsa(priv->netdev)) {
1909 reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT);
1910 reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT;
1911 }
1912 reg &= ~(GIB_IPG_LEN_MASK << GIB_IPG_LEN_SHIFT);
1913 reg |= 12 << GIB_IPG_LEN_SHIFT;
1914 gib_writel(priv, reg, GIB_CONTROL);
1915}
1916
1917static int bcm_sysport_open(struct net_device *dev)
1918{
1919 struct bcm_sysport_priv *priv = netdev_priv(dev);
1920 struct phy_device *phydev;
1921 unsigned int i;
1922 int ret;
1923
1924
1925 umac_reset(priv);
1926
1927
1928 topctrl_flush(priv);
1929
1930
1931 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
1932
1933
1934 rbuf_init(priv);
1935
1936
1937 if (!priv->is_lite)
1938 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1939 else
1940 gib_set_pad_extension(priv);
1941
1942
1943 umac_set_hw_addr(priv, dev->dev_addr);
1944
1945
1946 if (!priv->is_lite)
1947 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
1948 else
1949 priv->crc_fwd = !!(gib_readl(priv, GIB_CONTROL) &
1950 GIB_FCS_STRIP);
1951
1952 phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
1953 0, priv->phy_interface);
1954 if (!phydev) {
1955 netdev_err(dev, "could not attach to PHY\n");
1956 return -ENODEV;
1957 }
1958
1959
1960 priv->old_duplex = -1;
1961 priv->old_link = -1;
1962 priv->old_pause = -1;
1963
1964
1965 bcm_sysport_mask_all_intrs(priv);
1966
1967 ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
1968 if (ret) {
1969 netdev_err(dev, "failed to request RX interrupt\n");
1970 goto out_phy_disconnect;
1971 }
1972
1973 if (!priv->is_lite) {
1974 ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0,
1975 dev->name, dev);
1976 if (ret) {
1977 netdev_err(dev, "failed to request TX interrupt\n");
1978 goto out_free_irq0;
1979 }
1980 }
1981
1982
1983 for (i = 0; i < dev->num_tx_queues; i++) {
1984 ret = bcm_sysport_init_tx_ring(priv, i);
1985 if (ret) {
1986 netdev_err(dev, "failed to initialize TX ring %d\n",
1987 i);
1988 goto out_free_tx_ring;
1989 }
1990 }
1991
1992
1993 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
1994
1995
1996 ret = bcm_sysport_init_rx_ring(priv);
1997 if (ret) {
1998 netdev_err(dev, "failed to initialize RX ring\n");
1999 goto out_free_rx_ring;
2000 }
2001
2002
2003 ret = rdma_enable_set(priv, 1);
2004 if (ret)
2005 goto out_free_rx_ring;
2006
2007
2008 ret = tdma_enable_set(priv, 1);
2009 if (ret)
2010 goto out_clear_rx_int;
2011
2012
2013 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1);
2014
2015 bcm_sysport_netif_start(dev);
2016
2017 return 0;
2018
2019out_clear_rx_int:
2020 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
2021out_free_rx_ring:
2022 bcm_sysport_fini_rx_ring(priv);
2023out_free_tx_ring:
2024 for (i = 0; i < dev->num_tx_queues; i++)
2025 bcm_sysport_fini_tx_ring(priv, i);
2026 if (!priv->is_lite)
2027 free_irq(priv->irq1, dev);
2028out_free_irq0:
2029 free_irq(priv->irq0, dev);
2030out_phy_disconnect:
2031 phy_disconnect(phydev);
2032 return ret;
2033}
2034
2035static void bcm_sysport_netif_stop(struct net_device *dev)
2036{
2037 struct bcm_sysport_priv *priv = netdev_priv(dev);
2038
2039
2040 netif_tx_stop_all_queues(dev);
2041 napi_disable(&priv->napi);
2042 cancel_work_sync(&priv->dim.dim.work);
2043 phy_stop(dev->phydev);
2044
2045
2046 bcm_sysport_mask_all_intrs(priv);
2047}
2048
2049static int bcm_sysport_stop(struct net_device *dev)
2050{
2051 struct bcm_sysport_priv *priv = netdev_priv(dev);
2052 unsigned int i;
2053 int ret;
2054
2055 bcm_sysport_netif_stop(dev);
2056
2057
2058 umac_enable_set(priv, CMD_RX_EN, 0);
2059
2060 ret = tdma_enable_set(priv, 0);
2061 if (ret) {
2062 netdev_err(dev, "timeout disabling RDMA\n");
2063 return ret;
2064 }
2065
2066
2067 usleep_range(2000, 3000);
2068
2069 ret = rdma_enable_set(priv, 0);
2070 if (ret) {
2071 netdev_err(dev, "timeout disabling TDMA\n");
2072 return ret;
2073 }
2074
2075
2076 umac_enable_set(priv, CMD_TX_EN, 0);
2077
2078
2079 for (i = 0; i < dev->num_tx_queues; i++)
2080 bcm_sysport_fini_tx_ring(priv, i);
2081 bcm_sysport_fini_rx_ring(priv);
2082
2083 free_irq(priv->irq0, dev);
2084 if (!priv->is_lite)
2085 free_irq(priv->irq1, dev);
2086
2087
2088 phy_disconnect(dev->phydev);
2089
2090 return 0;
2091}
2092
2093static const struct ethtool_ops bcm_sysport_ethtool_ops = {
2094 .get_drvinfo = bcm_sysport_get_drvinfo,
2095 .get_msglevel = bcm_sysport_get_msglvl,
2096 .set_msglevel = bcm_sysport_set_msglvl,
2097 .get_link = ethtool_op_get_link,
2098 .get_strings = bcm_sysport_get_strings,
2099 .get_ethtool_stats = bcm_sysport_get_stats,
2100 .get_sset_count = bcm_sysport_get_sset_count,
2101 .get_wol = bcm_sysport_get_wol,
2102 .set_wol = bcm_sysport_set_wol,
2103 .get_coalesce = bcm_sysport_get_coalesce,
2104 .set_coalesce = bcm_sysport_set_coalesce,
2105 .get_link_ksettings = phy_ethtool_get_link_ksettings,
2106 .set_link_ksettings = phy_ethtool_set_link_ksettings,
2107};
2108
2109static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
2110 void *accel_priv,
2111 select_queue_fallback_t fallback)
2112{
2113 struct bcm_sysport_priv *priv = netdev_priv(dev);
2114 u16 queue = skb_get_queue_mapping(skb);
2115 struct bcm_sysport_tx_ring *tx_ring;
2116 unsigned int q, port;
2117
2118 if (!netdev_uses_dsa(dev))
2119 return fallback(dev, skb);
2120
2121
2122 q = BRCM_TAG_GET_QUEUE(queue);
2123 port = BRCM_TAG_GET_PORT(queue);
2124 tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
2125
2126 if (unlikely(!tx_ring))
2127 return fallback(dev, skb);
2128
2129 return tx_ring->index;
2130}
2131
2132static const struct net_device_ops bcm_sysport_netdev_ops = {
2133 .ndo_start_xmit = bcm_sysport_xmit,
2134 .ndo_tx_timeout = bcm_sysport_tx_timeout,
2135 .ndo_open = bcm_sysport_open,
2136 .ndo_stop = bcm_sysport_stop,
2137 .ndo_set_features = bcm_sysport_set_features,
2138 .ndo_set_rx_mode = bcm_sysport_set_rx_mode,
2139 .ndo_set_mac_address = bcm_sysport_change_mac,
2140#ifdef CONFIG_NET_POLL_CONTROLLER
2141 .ndo_poll_controller = bcm_sysport_poll_controller,
2142#endif
2143 .ndo_get_stats64 = bcm_sysport_get_stats64,
2144 .ndo_select_queue = bcm_sysport_select_queue,
2145};
2146
2147static int bcm_sysport_map_queues(struct notifier_block *nb,
2148 struct dsa_notifier_register_info *info)
2149{
2150 struct bcm_sysport_tx_ring *ring;
2151 struct bcm_sysport_priv *priv;
2152 struct net_device *slave_dev;
2153 unsigned int num_tx_queues;
2154 unsigned int q, start, port;
2155 struct net_device *dev;
2156
2157 priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
2158 if (priv->netdev != info->master)
2159 return 0;
2160
2161 dev = info->master;
2162
2163
2164
2165
2166 if (info->switch_number)
2167 return 0;
2168
2169 if (dev->netdev_ops != &bcm_sysport_netdev_ops)
2170 return 0;
2171
2172 port = info->port_number;
2173 slave_dev = info->info.dev;
2174
2175
2176
2177
2178
2179
2180
2181 if (priv->is_lite)
2182 netif_set_real_num_tx_queues(slave_dev,
2183 slave_dev->num_tx_queues / 2);
2184
2185 num_tx_queues = slave_dev->real_num_tx_queues;
2186
2187 if (priv->per_port_num_tx_queues &&
2188 priv->per_port_num_tx_queues != num_tx_queues)
2189 netdev_warn(slave_dev, "asymmetric number of per-port queues\n");
2190
2191 priv->per_port_num_tx_queues = num_tx_queues;
2192
2193 start = find_first_zero_bit(&priv->queue_bitmap, dev->num_tx_queues);
2194 for (q = 0; q < num_tx_queues; q++) {
2195 ring = &priv->tx_rings[q + start];
2196
2197
2198
2199
2200 ring->switch_queue = q;
2201 ring->switch_port = port;
2202 ring->inspect = true;
2203 priv->ring_map[q + port * num_tx_queues] = ring;
2204
2205
2206 set_bit(q + start, &priv->queue_bitmap);
2207 }
2208
2209 return 0;
2210}
2211
2212static int bcm_sysport_dsa_notifier(struct notifier_block *nb,
2213 unsigned long event, void *ptr)
2214{
2215 struct dsa_notifier_register_info *info;
2216
2217 if (event != DSA_PORT_REGISTER)
2218 return NOTIFY_DONE;
2219
2220 info = ptr;
2221
2222 return notifier_from_errno(bcm_sysport_map_queues(nb, info));
2223}
2224
2225#define REV_FMT "v%2x.%02x"
2226
2227static const struct bcm_sysport_hw_params bcm_sysport_params[] = {
2228 [SYSTEMPORT] = {
2229 .is_lite = false,
2230 .num_rx_desc_words = SP_NUM_HW_RX_DESC_WORDS,
2231 },
2232 [SYSTEMPORT_LITE] = {
2233 .is_lite = true,
2234 .num_rx_desc_words = SP_LT_NUM_HW_RX_DESC_WORDS,
2235 },
2236};
2237
2238static const struct of_device_id bcm_sysport_of_match[] = {
2239 { .compatible = "brcm,systemportlite-v1.00",
2240 .data = &bcm_sysport_params[SYSTEMPORT_LITE] },
2241 { .compatible = "brcm,systemport-v1.00",
2242 .data = &bcm_sysport_params[SYSTEMPORT] },
2243 { .compatible = "brcm,systemport",
2244 .data = &bcm_sysport_params[SYSTEMPORT] },
2245 { }
2246};
2247MODULE_DEVICE_TABLE(of, bcm_sysport_of_match);
2248
2249static int bcm_sysport_probe(struct platform_device *pdev)
2250{
2251 const struct bcm_sysport_hw_params *params;
2252 const struct of_device_id *of_id = NULL;
2253 struct bcm_sysport_priv *priv;
2254 struct device_node *dn;
2255 struct net_device *dev;
2256 const void *macaddr;
2257 struct resource *r;
2258 u32 txq, rxq;
2259 int ret;
2260
2261 dn = pdev->dev.of_node;
2262 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2263 of_id = of_match_node(bcm_sysport_of_match, dn);
2264 if (!of_id || !of_id->data)
2265 return -EINVAL;
2266
2267
2268 params = of_id->data;
2269
2270
2271 if (of_property_read_u32(dn, "systemport,num-txq", &txq))
2272 txq = TDMA_NUM_RINGS;
2273 if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
2274 rxq = 1;
2275
2276
2277 if (!txq || txq > TDMA_NUM_RINGS)
2278 return -EINVAL;
2279
2280 dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
2281 if (!dev)
2282 return -ENOMEM;
2283
2284
2285 priv = netdev_priv(dev);
2286
2287
2288 priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
2289 sizeof(struct bcm_sysport_tx_ring),
2290 GFP_KERNEL);
2291 if (!priv->tx_rings)
2292 return -ENOMEM;
2293
2294 priv->is_lite = params->is_lite;
2295 priv->num_rx_desc_words = params->num_rx_desc_words;
2296
2297 priv->irq0 = platform_get_irq(pdev, 0);
2298 if (!priv->is_lite) {
2299 priv->irq1 = platform_get_irq(pdev, 1);
2300 priv->wol_irq = platform_get_irq(pdev, 2);
2301 } else {
2302 priv->wol_irq = platform_get_irq(pdev, 1);
2303 }
2304 if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
2305 dev_err(&pdev->dev, "invalid interrupts\n");
2306 ret = -EINVAL;
2307 goto err_free_netdev;
2308 }
2309
2310 priv->base = devm_ioremap_resource(&pdev->dev, r);
2311 if (IS_ERR(priv->base)) {
2312 ret = PTR_ERR(priv->base);
2313 goto err_free_netdev;
2314 }
2315
2316 priv->netdev = dev;
2317 priv->pdev = pdev;
2318
2319 priv->phy_interface = of_get_phy_mode(dn);
2320
2321 if (priv->phy_interface < 0)
2322 priv->phy_interface = PHY_INTERFACE_MODE_GMII;
2323
2324
2325
2326
2327 if (of_phy_is_fixed_link(dn)) {
2328 ret = of_phy_register_fixed_link(dn);
2329 if (ret) {
2330 dev_err(&pdev->dev, "failed to register fixed PHY\n");
2331 goto err_free_netdev;
2332 }
2333
2334 priv->phy_dn = dn;
2335 }
2336
2337
2338 macaddr = of_get_mac_address(dn);
2339 if (!macaddr || !is_valid_ether_addr(macaddr)) {
2340 dev_warn(&pdev->dev, "using random Ethernet MAC\n");
2341 eth_hw_addr_random(dev);
2342 } else {
2343 ether_addr_copy(dev->dev_addr, macaddr);
2344 }
2345
2346 SET_NETDEV_DEV(dev, &pdev->dev);
2347 dev_set_drvdata(&pdev->dev, dev);
2348 dev->ethtool_ops = &bcm_sysport_ethtool_ops;
2349 dev->netdev_ops = &bcm_sysport_netdev_ops;
2350 netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
2351
2352
2353 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
2354 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2355
2356
2357 priv->wol_irq_disabled = 1;
2358 ret = devm_request_irq(&pdev->dev, priv->wol_irq,
2359 bcm_sysport_wol_isr, 0, dev->name, priv);
2360 if (!ret)
2361 device_set_wakeup_capable(&pdev->dev, 1);
2362
2363
2364 BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
2365 dev->needed_headroom += sizeof(struct bcm_tsb);
2366
2367
2368 netif_carrier_off(dev);
2369
2370 priv->rx_max_coalesced_frames = 1;
2371 u64_stats_init(&priv->syncp);
2372
2373 priv->dsa_notifier.notifier_call = bcm_sysport_dsa_notifier;
2374
2375 ret = register_dsa_notifier(&priv->dsa_notifier);
2376 if (ret) {
2377 dev_err(&pdev->dev, "failed to register DSA notifier\n");
2378 goto err_deregister_fixed_link;
2379 }
2380
2381 ret = register_netdev(dev);
2382 if (ret) {
2383 dev_err(&pdev->dev, "failed to register net_device\n");
2384 goto err_deregister_notifier;
2385 }
2386
2387 priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
2388 dev_info(&pdev->dev,
2389 "Broadcom SYSTEMPORT%s" REV_FMT
2390 " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
2391 priv->is_lite ? " Lite" : "",
2392 (priv->rev >> 8) & 0xff, priv->rev & 0xff,
2393 priv->base, priv->irq0, priv->irq1, txq, rxq);
2394
2395 return 0;
2396
2397err_deregister_notifier:
2398 unregister_dsa_notifier(&priv->dsa_notifier);
2399err_deregister_fixed_link:
2400 if (of_phy_is_fixed_link(dn))
2401 of_phy_deregister_fixed_link(dn);
2402err_free_netdev:
2403 free_netdev(dev);
2404 return ret;
2405}
2406
2407static int bcm_sysport_remove(struct platform_device *pdev)
2408{
2409 struct net_device *dev = dev_get_drvdata(&pdev->dev);
2410 struct bcm_sysport_priv *priv = netdev_priv(dev);
2411 struct device_node *dn = pdev->dev.of_node;
2412
2413
2414
2415
2416 unregister_dsa_notifier(&priv->dsa_notifier);
2417 unregister_netdev(dev);
2418 if (of_phy_is_fixed_link(dn))
2419 of_phy_deregister_fixed_link(dn);
2420 free_netdev(dev);
2421 dev_set_drvdata(&pdev->dev, NULL);
2422
2423 return 0;
2424}
2425
2426#ifdef CONFIG_PM_SLEEP
2427static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
2428{
2429 struct net_device *ndev = priv->netdev;
2430 unsigned int timeout = 1000;
2431 u32 reg;
2432
2433
2434 reg = umac_readl(priv, UMAC_MPD_CTRL);
2435 reg |= MPD_EN;
2436 reg &= ~PSW_EN;
2437 if (priv->wolopts & WAKE_MAGICSECURE)
2438 reg |= PSW_EN;
2439 umac_writel(priv, reg, UMAC_MPD_CTRL);
2440
2441
2442 do {
2443 reg = rbuf_readl(priv, RBUF_STATUS);
2444 if (reg & RBUF_WOL_MODE)
2445 break;
2446
2447 udelay(10);
2448 } while (timeout-- > 0);
2449
2450
2451 if (!timeout) {
2452 reg = umac_readl(priv, UMAC_MPD_CTRL);
2453 reg &= ~MPD_EN;
2454 umac_writel(priv, reg, UMAC_MPD_CTRL);
2455 netif_err(priv, wol, ndev, "failed to enter WOL mode\n");
2456 return -ETIMEDOUT;
2457 }
2458
2459
2460 umac_enable_set(priv, CMD_RX_EN, 1);
2461
2462
2463 intrl2_0_mask_clear(priv, INTRL2_0_MPD);
2464
2465 netif_dbg(priv, wol, ndev, "entered WOL mode\n");
2466
2467 return 0;
2468}
2469
2470static int bcm_sysport_suspend(struct device *d)
2471{
2472 struct net_device *dev = dev_get_drvdata(d);
2473 struct bcm_sysport_priv *priv = netdev_priv(dev);
2474 unsigned int i;
2475 int ret = 0;
2476 u32 reg;
2477
2478 if (!netif_running(dev))
2479 return 0;
2480
2481 bcm_sysport_netif_stop(dev);
2482
2483 phy_suspend(dev->phydev);
2484
2485 netif_device_detach(dev);
2486
2487
2488 umac_enable_set(priv, CMD_RX_EN, 0);
2489
2490 ret = rdma_enable_set(priv, 0);
2491 if (ret) {
2492 netdev_err(dev, "RDMA timeout!\n");
2493 return ret;
2494 }
2495
2496
2497 if (priv->rx_chk_en) {
2498 reg = rxchk_readl(priv, RXCHK_CONTROL);
2499 reg &= ~RXCHK_EN;
2500 rxchk_writel(priv, reg, RXCHK_CONTROL);
2501 }
2502
2503
2504 if (!priv->wolopts)
2505 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
2506
2507 ret = tdma_enable_set(priv, 0);
2508 if (ret) {
2509 netdev_err(dev, "TDMA timeout!\n");
2510 return ret;
2511 }
2512
2513
2514 usleep_range(2000, 3000);
2515
2516 umac_enable_set(priv, CMD_TX_EN, 0);
2517
2518 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
2519
2520
2521 for (i = 0; i < dev->num_tx_queues; i++)
2522 bcm_sysport_fini_tx_ring(priv, i);
2523 bcm_sysport_fini_rx_ring(priv);
2524
2525
2526 if (device_may_wakeup(d) && priv->wolopts)
2527 ret = bcm_sysport_suspend_to_wol(priv);
2528
2529 return ret;
2530}
2531
2532static int bcm_sysport_resume(struct device *d)
2533{
2534 struct net_device *dev = dev_get_drvdata(d);
2535 struct bcm_sysport_priv *priv = netdev_priv(dev);
2536 unsigned int i;
2537 u32 reg;
2538 int ret;
2539
2540 if (!netif_running(dev))
2541 return 0;
2542
2543 umac_reset(priv);
2544
2545
2546
2547
2548 bcm_sysport_resume_from_wol(priv);
2549
2550
2551 for (i = 0; i < dev->num_tx_queues; i++) {
2552 ret = bcm_sysport_init_tx_ring(priv, i);
2553 if (ret) {
2554 netdev_err(dev, "failed to initialize TX ring %d\n",
2555 i);
2556 goto out_free_tx_rings;
2557 }
2558 }
2559
2560
2561 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
2562
2563
2564 ret = bcm_sysport_init_rx_ring(priv);
2565 if (ret) {
2566 netdev_err(dev, "failed to initialize RX ring\n");
2567 goto out_free_rx_ring;
2568 }
2569
2570 netif_device_attach(dev);
2571
2572
2573 topctrl_writel(priv, 0, RX_FLUSH_CNTL);
2574
2575 ret = rdma_enable_set(priv, 1);
2576 if (ret) {
2577 netdev_err(dev, "failed to enable RDMA\n");
2578 goto out_free_rx_ring;
2579 }
2580
2581
2582 if (priv->rx_chk_en) {
2583 reg = rxchk_readl(priv, RXCHK_CONTROL);
2584 reg |= RXCHK_EN;
2585 rxchk_writel(priv, reg, RXCHK_CONTROL);
2586 }
2587
2588 rbuf_init(priv);
2589
2590
2591 if (!priv->is_lite)
2592 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
2593 else
2594 gib_set_pad_extension(priv);
2595
2596
2597 umac_set_hw_addr(priv, dev->dev_addr);
2598
2599 umac_enable_set(priv, CMD_RX_EN, 1);
2600
2601
2602 topctrl_writel(priv, 0, TX_FLUSH_CNTL);
2603
2604 umac_enable_set(priv, CMD_TX_EN, 1);
2605
2606 ret = tdma_enable_set(priv, 1);
2607 if (ret) {
2608 netdev_err(dev, "TDMA timeout!\n");
2609 goto out_free_rx_ring;
2610 }
2611
2612 phy_resume(dev->phydev);
2613
2614 bcm_sysport_netif_start(dev);
2615
2616 return 0;
2617
2618out_free_rx_ring:
2619 bcm_sysport_fini_rx_ring(priv);
2620out_free_tx_rings:
2621 for (i = 0; i < dev->num_tx_queues; i++)
2622 bcm_sysport_fini_tx_ring(priv, i);
2623 return ret;
2624}
2625#endif
2626
2627static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
2628 bcm_sysport_suspend, bcm_sysport_resume);
2629
2630static struct platform_driver bcm_sysport_driver = {
2631 .probe = bcm_sysport_probe,
2632 .remove = bcm_sysport_remove,
2633 .driver = {
2634 .name = "brcm-systemport",
2635 .of_match_table = bcm_sysport_of_match,
2636 .pm = &bcm_sysport_pm_ops,
2637 },
2638};
2639module_platform_driver(bcm_sysport_driver);
2640
2641MODULE_AUTHOR("Broadcom Corporation");
2642MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
2643MODULE_ALIAS("platform:brcm-systemport");
2644MODULE_LICENSE("GPL");
2645