1
2
3
4
5
6
7
8
9
10
11#define pr_fmt(fmt) "bcmgenet: " fmt
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/types.h>
17#include <linux/fcntl.h>
18#include <linux/interrupt.h>
19#include <linux/string.h>
20#include <linux/if_ether.h>
21#include <linux/init.h>
22#include <linux/errno.h>
23#include <linux/delay.h>
24#include <linux/platform_device.h>
25#include <linux/dma-mapping.h>
26#include <linux/pm.h>
27#include <linux/clk.h>
28#include <linux/of.h>
29#include <linux/of_address.h>
30#include <linux/of_irq.h>
31#include <linux/of_net.h>
32#include <linux/of_platform.h>
33#include <net/arp.h>
34
35#include <linux/mii.h>
36#include <linux/ethtool.h>
37#include <linux/netdevice.h>
38#include <linux/inetdevice.h>
39#include <linux/etherdevice.h>
40#include <linux/skbuff.h>
41#include <linux/in.h>
42#include <linux/ip.h>
43#include <linux/ipv6.h>
44#include <linux/phy.h>
45#include <linux/platform_data/bcmgenet.h>
46
47#include <asm/unaligned.h>
48
49#include "bcmgenet.h"
50
51
52#define GENET_MAX_MQ_CNT 4
53
54
55#define GENET_Q0_PRIORITY 0
56
57#define GENET_Q16_RX_BD_CNT \
58 (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
59#define GENET_Q16_TX_BD_CNT \
60 (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
61
62#define RX_BUF_LENGTH 2048
63#define SKB_ALIGNMENT 32
64
65
66#define WORDS_PER_BD(p) (p->hw_params->words_per_bd)
67#define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32))
68
69#define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \
70 TOTAL_DESC * DMA_DESC_SIZE)
71
72#define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
73 TOTAL_DESC * DMA_DESC_SIZE)
74
75static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
76 void __iomem *d, u32 value)
77{
78 __raw_writel(value, d + DMA_DESC_LENGTH_STATUS);
79}
80
81static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv,
82 void __iomem *d)
83{
84 return __raw_readl(d + DMA_DESC_LENGTH_STATUS);
85}
86
87static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
88 void __iomem *d,
89 dma_addr_t addr)
90{
91 __raw_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO);
92
93
94
95
96
97#ifdef CONFIG_PHYS_ADDR_T_64BIT
98 if (priv->hw_params->flags & GENET_HAS_40BITS)
99 __raw_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
100#endif
101}
102
103
104static inline void dmadesc_set(struct bcmgenet_priv *priv,
105 void __iomem *d, dma_addr_t addr, u32 val)
106{
107 dmadesc_set_length_status(priv, d, val);
108 dmadesc_set_addr(priv, d, addr);
109}
110
111static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
112 void __iomem *d)
113{
114 dma_addr_t addr;
115
116 addr = __raw_readl(d + DMA_DESC_ADDRESS_LO);
117
118
119
120
121
122#ifdef CONFIG_PHYS_ADDR_T_64BIT
123 if (priv->hw_params->flags & GENET_HAS_40BITS)
124 addr |= (u64)__raw_readl(d + DMA_DESC_ADDRESS_HI) << 32;
125#endif
126 return addr;
127}
128
129#define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x"
130
131#define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
132 NETIF_MSG_LINK)
133
134static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv)
135{
136 if (GENET_IS_V1(priv))
137 return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1);
138 else
139 return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL);
140}
141
142static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
143{
144 if (GENET_IS_V1(priv))
145 bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1);
146 else
147 bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL);
148}
149
150
151
152
153
154static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv)
155{
156 if (GENET_IS_V1(priv))
157 return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1);
158 else
159 return __raw_readl(priv->base +
160 priv->hw_params->tbuf_offset + TBUF_CTRL);
161}
162
163static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
164{
165 if (GENET_IS_V1(priv))
166 bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1);
167 else
168 __raw_writel(val, priv->base +
169 priv->hw_params->tbuf_offset + TBUF_CTRL);
170}
171
172static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv)
173{
174 if (GENET_IS_V1(priv))
175 return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1);
176 else
177 return __raw_readl(priv->base +
178 priv->hw_params->tbuf_offset + TBUF_BP_MC);
179}
180
181static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val)
182{
183 if (GENET_IS_V1(priv))
184 bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1);
185 else
186 __raw_writel(val, priv->base +
187 priv->hw_params->tbuf_offset + TBUF_BP_MC);
188}
189
190
191enum dma_reg {
192 DMA_RING_CFG = 0,
193 DMA_CTRL,
194 DMA_STATUS,
195 DMA_SCB_BURST_SIZE,
196 DMA_ARB_CTRL,
197 DMA_PRIORITY_0,
198 DMA_PRIORITY_1,
199 DMA_PRIORITY_2,
200 DMA_INDEX2RING_0,
201 DMA_INDEX2RING_1,
202 DMA_INDEX2RING_2,
203 DMA_INDEX2RING_3,
204 DMA_INDEX2RING_4,
205 DMA_INDEX2RING_5,
206 DMA_INDEX2RING_6,
207 DMA_INDEX2RING_7,
208 DMA_RING0_TIMEOUT,
209 DMA_RING1_TIMEOUT,
210 DMA_RING2_TIMEOUT,
211 DMA_RING3_TIMEOUT,
212 DMA_RING4_TIMEOUT,
213 DMA_RING5_TIMEOUT,
214 DMA_RING6_TIMEOUT,
215 DMA_RING7_TIMEOUT,
216 DMA_RING8_TIMEOUT,
217 DMA_RING9_TIMEOUT,
218 DMA_RING10_TIMEOUT,
219 DMA_RING11_TIMEOUT,
220 DMA_RING12_TIMEOUT,
221 DMA_RING13_TIMEOUT,
222 DMA_RING14_TIMEOUT,
223 DMA_RING15_TIMEOUT,
224 DMA_RING16_TIMEOUT,
225};
226
227static const u8 bcmgenet_dma_regs_v3plus[] = {
228 [DMA_RING_CFG] = 0x00,
229 [DMA_CTRL] = 0x04,
230 [DMA_STATUS] = 0x08,
231 [DMA_SCB_BURST_SIZE] = 0x0C,
232 [DMA_ARB_CTRL] = 0x2C,
233 [DMA_PRIORITY_0] = 0x30,
234 [DMA_PRIORITY_1] = 0x34,
235 [DMA_PRIORITY_2] = 0x38,
236 [DMA_RING0_TIMEOUT] = 0x2C,
237 [DMA_RING1_TIMEOUT] = 0x30,
238 [DMA_RING2_TIMEOUT] = 0x34,
239 [DMA_RING3_TIMEOUT] = 0x38,
240 [DMA_RING4_TIMEOUT] = 0x3c,
241 [DMA_RING5_TIMEOUT] = 0x40,
242 [DMA_RING6_TIMEOUT] = 0x44,
243 [DMA_RING7_TIMEOUT] = 0x48,
244 [DMA_RING8_TIMEOUT] = 0x4c,
245 [DMA_RING9_TIMEOUT] = 0x50,
246 [DMA_RING10_TIMEOUT] = 0x54,
247 [DMA_RING11_TIMEOUT] = 0x58,
248 [DMA_RING12_TIMEOUT] = 0x5c,
249 [DMA_RING13_TIMEOUT] = 0x60,
250 [DMA_RING14_TIMEOUT] = 0x64,
251 [DMA_RING15_TIMEOUT] = 0x68,
252 [DMA_RING16_TIMEOUT] = 0x6C,
253 [DMA_INDEX2RING_0] = 0x70,
254 [DMA_INDEX2RING_1] = 0x74,
255 [DMA_INDEX2RING_2] = 0x78,
256 [DMA_INDEX2RING_3] = 0x7C,
257 [DMA_INDEX2RING_4] = 0x80,
258 [DMA_INDEX2RING_5] = 0x84,
259 [DMA_INDEX2RING_6] = 0x88,
260 [DMA_INDEX2RING_7] = 0x8C,
261};
262
263static const u8 bcmgenet_dma_regs_v2[] = {
264 [DMA_RING_CFG] = 0x00,
265 [DMA_CTRL] = 0x04,
266 [DMA_STATUS] = 0x08,
267 [DMA_SCB_BURST_SIZE] = 0x0C,
268 [DMA_ARB_CTRL] = 0x30,
269 [DMA_PRIORITY_0] = 0x34,
270 [DMA_PRIORITY_1] = 0x38,
271 [DMA_PRIORITY_2] = 0x3C,
272 [DMA_RING0_TIMEOUT] = 0x2C,
273 [DMA_RING1_TIMEOUT] = 0x30,
274 [DMA_RING2_TIMEOUT] = 0x34,
275 [DMA_RING3_TIMEOUT] = 0x38,
276 [DMA_RING4_TIMEOUT] = 0x3c,
277 [DMA_RING5_TIMEOUT] = 0x40,
278 [DMA_RING6_TIMEOUT] = 0x44,
279 [DMA_RING7_TIMEOUT] = 0x48,
280 [DMA_RING8_TIMEOUT] = 0x4c,
281 [DMA_RING9_TIMEOUT] = 0x50,
282 [DMA_RING10_TIMEOUT] = 0x54,
283 [DMA_RING11_TIMEOUT] = 0x58,
284 [DMA_RING12_TIMEOUT] = 0x5c,
285 [DMA_RING13_TIMEOUT] = 0x60,
286 [DMA_RING14_TIMEOUT] = 0x64,
287 [DMA_RING15_TIMEOUT] = 0x68,
288 [DMA_RING16_TIMEOUT] = 0x6C,
289};
290
291static const u8 bcmgenet_dma_regs_v1[] = {
292 [DMA_CTRL] = 0x00,
293 [DMA_STATUS] = 0x04,
294 [DMA_SCB_BURST_SIZE] = 0x0C,
295 [DMA_ARB_CTRL] = 0x30,
296 [DMA_PRIORITY_0] = 0x34,
297 [DMA_PRIORITY_1] = 0x38,
298 [DMA_PRIORITY_2] = 0x3C,
299 [DMA_RING0_TIMEOUT] = 0x2C,
300 [DMA_RING1_TIMEOUT] = 0x30,
301 [DMA_RING2_TIMEOUT] = 0x34,
302 [DMA_RING3_TIMEOUT] = 0x38,
303 [DMA_RING4_TIMEOUT] = 0x3c,
304 [DMA_RING5_TIMEOUT] = 0x40,
305 [DMA_RING6_TIMEOUT] = 0x44,
306 [DMA_RING7_TIMEOUT] = 0x48,
307 [DMA_RING8_TIMEOUT] = 0x4c,
308 [DMA_RING9_TIMEOUT] = 0x50,
309 [DMA_RING10_TIMEOUT] = 0x54,
310 [DMA_RING11_TIMEOUT] = 0x58,
311 [DMA_RING12_TIMEOUT] = 0x5c,
312 [DMA_RING13_TIMEOUT] = 0x60,
313 [DMA_RING14_TIMEOUT] = 0x64,
314 [DMA_RING15_TIMEOUT] = 0x68,
315 [DMA_RING16_TIMEOUT] = 0x6C,
316};
317
318
319static const u8 *bcmgenet_dma_regs;
320
321static inline struct bcmgenet_priv *dev_to_priv(struct device *dev)
322{
323 return netdev_priv(dev_get_drvdata(dev));
324}
325
326static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
327 enum dma_reg r)
328{
329 return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
330 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
331}
332
333static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv,
334 u32 val, enum dma_reg r)
335{
336 __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
337 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
338}
339
340static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
341 enum dma_reg r)
342{
343 return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
344 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
345}
346
347static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv,
348 u32 val, enum dma_reg r)
349{
350 __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
351 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
352}
353
354
355
356
357
358enum dma_ring_reg {
359 TDMA_READ_PTR = 0,
360 RDMA_WRITE_PTR = TDMA_READ_PTR,
361 TDMA_READ_PTR_HI,
362 RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI,
363 TDMA_CONS_INDEX,
364 RDMA_PROD_INDEX = TDMA_CONS_INDEX,
365 TDMA_PROD_INDEX,
366 RDMA_CONS_INDEX = TDMA_PROD_INDEX,
367 DMA_RING_BUF_SIZE,
368 DMA_START_ADDR,
369 DMA_START_ADDR_HI,
370 DMA_END_ADDR,
371 DMA_END_ADDR_HI,
372 DMA_MBUF_DONE_THRESH,
373 TDMA_FLOW_PERIOD,
374 RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD,
375 TDMA_WRITE_PTR,
376 RDMA_READ_PTR = TDMA_WRITE_PTR,
377 TDMA_WRITE_PTR_HI,
378 RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI
379};
380
381
382
383
384
385
386static const u8 genet_dma_ring_regs_v4[] = {
387 [TDMA_READ_PTR] = 0x00,
388 [TDMA_READ_PTR_HI] = 0x04,
389 [TDMA_CONS_INDEX] = 0x08,
390 [TDMA_PROD_INDEX] = 0x0C,
391 [DMA_RING_BUF_SIZE] = 0x10,
392 [DMA_START_ADDR] = 0x14,
393 [DMA_START_ADDR_HI] = 0x18,
394 [DMA_END_ADDR] = 0x1C,
395 [DMA_END_ADDR_HI] = 0x20,
396 [DMA_MBUF_DONE_THRESH] = 0x24,
397 [TDMA_FLOW_PERIOD] = 0x28,
398 [TDMA_WRITE_PTR] = 0x2C,
399 [TDMA_WRITE_PTR_HI] = 0x30,
400};
401
402static const u8 genet_dma_ring_regs_v123[] = {
403 [TDMA_READ_PTR] = 0x00,
404 [TDMA_CONS_INDEX] = 0x04,
405 [TDMA_PROD_INDEX] = 0x08,
406 [DMA_RING_BUF_SIZE] = 0x0C,
407 [DMA_START_ADDR] = 0x10,
408 [DMA_END_ADDR] = 0x14,
409 [DMA_MBUF_DONE_THRESH] = 0x18,
410 [TDMA_FLOW_PERIOD] = 0x1C,
411 [TDMA_WRITE_PTR] = 0x20,
412};
413
414
415static const u8 *genet_dma_ring_regs;
416
417static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
418 unsigned int ring,
419 enum dma_ring_reg r)
420{
421 return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
422 (DMA_RING_SIZE * ring) +
423 genet_dma_ring_regs[r]);
424}
425
426static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
427 unsigned int ring, u32 val,
428 enum dma_ring_reg r)
429{
430 __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
431 (DMA_RING_SIZE * ring) +
432 genet_dma_ring_regs[r]);
433}
434
435static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
436 unsigned int ring,
437 enum dma_ring_reg r)
438{
439 return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
440 (DMA_RING_SIZE * ring) +
441 genet_dma_ring_regs[r]);
442}
443
444static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
445 unsigned int ring, u32 val,
446 enum dma_ring_reg r)
447{
448 __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
449 (DMA_RING_SIZE * ring) +
450 genet_dma_ring_regs[r]);
451}
452
453static int bcmgenet_get_settings(struct net_device *dev,
454 struct ethtool_cmd *cmd)
455{
456 struct bcmgenet_priv *priv = netdev_priv(dev);
457
458 if (!netif_running(dev))
459 return -EINVAL;
460
461 if (!priv->phydev)
462 return -ENODEV;
463
464 return phy_ethtool_gset(priv->phydev, cmd);
465}
466
467static int bcmgenet_set_settings(struct net_device *dev,
468 struct ethtool_cmd *cmd)
469{
470 struct bcmgenet_priv *priv = netdev_priv(dev);
471
472 if (!netif_running(dev))
473 return -EINVAL;
474
475 if (!priv->phydev)
476 return -ENODEV;
477
478 return phy_ethtool_sset(priv->phydev, cmd);
479}
480
481static int bcmgenet_set_rx_csum(struct net_device *dev,
482 netdev_features_t wanted)
483{
484 struct bcmgenet_priv *priv = netdev_priv(dev);
485 u32 rbuf_chk_ctrl;
486 bool rx_csum_en;
487
488 rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
489
490 rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
491
492
493 if (rx_csum_en)
494 rbuf_chk_ctrl |= RBUF_RXCHK_EN;
495 else
496 rbuf_chk_ctrl &= ~RBUF_RXCHK_EN;
497 priv->desc_rxchk_en = rx_csum_en;
498
499
500
501
502 if (rx_csum_en && priv->crc_fwd_en)
503 rbuf_chk_ctrl |= RBUF_SKIP_FCS;
504 else
505 rbuf_chk_ctrl &= ~RBUF_SKIP_FCS;
506
507 bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL);
508
509 return 0;
510}
511
512static int bcmgenet_set_tx_csum(struct net_device *dev,
513 netdev_features_t wanted)
514{
515 struct bcmgenet_priv *priv = netdev_priv(dev);
516 bool desc_64b_en;
517 u32 tbuf_ctrl, rbuf_ctrl;
518
519 tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv);
520 rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
521
522 desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
523
524
525 if (desc_64b_en) {
526 tbuf_ctrl |= RBUF_64B_EN;
527 rbuf_ctrl |= RBUF_64B_EN;
528 } else {
529 tbuf_ctrl &= ~RBUF_64B_EN;
530 rbuf_ctrl &= ~RBUF_64B_EN;
531 }
532 priv->desc_64b_en = desc_64b_en;
533
534 bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl);
535 bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL);
536
537 return 0;
538}
539
540static int bcmgenet_set_features(struct net_device *dev,
541 netdev_features_t features)
542{
543 netdev_features_t changed = features ^ dev->features;
544 netdev_features_t wanted = dev->wanted_features;
545 int ret = 0;
546
547 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
548 ret = bcmgenet_set_tx_csum(dev, wanted);
549 if (changed & (NETIF_F_RXCSUM))
550 ret = bcmgenet_set_rx_csum(dev, wanted);
551
552 return ret;
553}
554
555static u32 bcmgenet_get_msglevel(struct net_device *dev)
556{
557 struct bcmgenet_priv *priv = netdev_priv(dev);
558
559 return priv->msg_enable;
560}
561
562static void bcmgenet_set_msglevel(struct net_device *dev, u32 level)
563{
564 struct bcmgenet_priv *priv = netdev_priv(dev);
565
566 priv->msg_enable = level;
567}
568
569static int bcmgenet_get_coalesce(struct net_device *dev,
570 struct ethtool_coalesce *ec)
571{
572 struct bcmgenet_priv *priv = netdev_priv(dev);
573
574 ec->tx_max_coalesced_frames =
575 bcmgenet_tdma_ring_readl(priv, DESC_INDEX,
576 DMA_MBUF_DONE_THRESH);
577 ec->rx_max_coalesced_frames =
578 bcmgenet_rdma_ring_readl(priv, DESC_INDEX,
579 DMA_MBUF_DONE_THRESH);
580 ec->rx_coalesce_usecs =
581 bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000;
582
583 return 0;
584}
585
586static int bcmgenet_set_coalesce(struct net_device *dev,
587 struct ethtool_coalesce *ec)
588{
589 struct bcmgenet_priv *priv = netdev_priv(dev);
590 unsigned int i;
591 u32 reg;
592
593
594
595
596
597 if (ec->tx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
598 ec->tx_max_coalesced_frames == 0 ||
599 ec->rx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
600 ec->rx_coalesce_usecs > (DMA_TIMEOUT_MASK * 8) + 1)
601 return -EINVAL;
602
603 if (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0)
604 return -EINVAL;
605
606
607
608
609
610 if (ec->tx_coalesce_usecs || ec->tx_coalesce_usecs_high ||
611 ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_low)
612 return -EOPNOTSUPP;
613
614
615
616
617 for (i = 0; i < priv->hw_params->tx_queues; i++)
618 bcmgenet_tdma_ring_writel(priv, i,
619 ec->tx_max_coalesced_frames,
620 DMA_MBUF_DONE_THRESH);
621 bcmgenet_tdma_ring_writel(priv, DESC_INDEX,
622 ec->tx_max_coalesced_frames,
623 DMA_MBUF_DONE_THRESH);
624
625 for (i = 0; i < priv->hw_params->rx_queues; i++) {
626 bcmgenet_rdma_ring_writel(priv, i,
627 ec->rx_max_coalesced_frames,
628 DMA_MBUF_DONE_THRESH);
629
630 reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i);
631 reg &= ~DMA_TIMEOUT_MASK;
632 reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192);
633 bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i);
634 }
635
636 bcmgenet_rdma_ring_writel(priv, DESC_INDEX,
637 ec->rx_max_coalesced_frames,
638 DMA_MBUF_DONE_THRESH);
639
640 reg = bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT);
641 reg &= ~DMA_TIMEOUT_MASK;
642 reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192);
643 bcmgenet_rdma_writel(priv, reg, DMA_RING16_TIMEOUT);
644
645 return 0;
646}
647
648
649enum bcmgenet_stat_type {
650 BCMGENET_STAT_NETDEV = -1,
651 BCMGENET_STAT_MIB_RX,
652 BCMGENET_STAT_MIB_TX,
653 BCMGENET_STAT_RUNT,
654 BCMGENET_STAT_MISC,
655 BCMGENET_STAT_SOFT,
656};
657
658struct bcmgenet_stats {
659 char stat_string[ETH_GSTRING_LEN];
660 int stat_sizeof;
661 int stat_offset;
662 enum bcmgenet_stat_type type;
663
664 u16 reg_offset;
665};
666
667#define STAT_NETDEV(m) { \
668 .stat_string = __stringify(m), \
669 .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
670 .stat_offset = offsetof(struct net_device_stats, m), \
671 .type = BCMGENET_STAT_NETDEV, \
672}
673
674#define STAT_GENET_MIB(str, m, _type) { \
675 .stat_string = str, \
676 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
677 .stat_offset = offsetof(struct bcmgenet_priv, m), \
678 .type = _type, \
679}
680
681#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
682#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
683#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
684#define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
685
686#define STAT_GENET_MISC(str, m, offset) { \
687 .stat_string = str, \
688 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
689 .stat_offset = offsetof(struct bcmgenet_priv, m), \
690 .type = BCMGENET_STAT_MISC, \
691 .reg_offset = offset, \
692}
693
694
695
696
697
698#define BCMGENET_STAT_OFFSET 0xc
699
700
701
702
703static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
704
705 STAT_NETDEV(rx_packets),
706 STAT_NETDEV(tx_packets),
707 STAT_NETDEV(rx_bytes),
708 STAT_NETDEV(tx_bytes),
709 STAT_NETDEV(rx_errors),
710 STAT_NETDEV(tx_errors),
711 STAT_NETDEV(rx_dropped),
712 STAT_NETDEV(tx_dropped),
713 STAT_NETDEV(multicast),
714
715 STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
716 STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
717 STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
718 STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
719 STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
720 STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
721 STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
722 STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
723 STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
724 STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
725 STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt),
726 STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes),
727 STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca),
728 STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca),
729 STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs),
730 STAT_GENET_MIB_RX("rx_control", mib.rx.cf),
731 STAT_GENET_MIB_RX("rx_pause", mib.rx.pf),
732 STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo),
733 STAT_GENET_MIB_RX("rx_align", mib.rx.aln),
734 STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr),
735 STAT_GENET_MIB_RX("rx_code", mib.rx.cde),
736 STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr),
737 STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr),
738 STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr),
739 STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue),
740 STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok),
741 STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc),
742 STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp),
743 STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc),
744
745 STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
746 STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
747 STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
748 STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
749 STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
750 STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
751 STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
752 STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
753 STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
754 STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
755 STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts),
756 STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca),
757 STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca),
758 STAT_GENET_MIB_TX("tx_pause", mib.tx.pf),
759 STAT_GENET_MIB_TX("tx_control", mib.tx.cf),
760 STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs),
761 STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr),
762 STAT_GENET_MIB_TX("tx_defer", mib.tx.drf),
763 STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf),
764 STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl),
765 STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl),
766 STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl),
767 STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl),
768 STAT_GENET_MIB_TX("tx_frags", mib.tx.frg),
769 STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl),
770 STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr),
771 STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes),
772 STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok),
773 STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc),
774
775 STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
776 STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
777 STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
778 STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
779
780 STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
781 UMAC_RBUF_OVFL_CNT),
782 STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
783 STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
784 STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
785 STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
786 STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
787};
788
789#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
790
791static void bcmgenet_get_drvinfo(struct net_device *dev,
792 struct ethtool_drvinfo *info)
793{
794 strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
795 strlcpy(info->version, "v2.0", sizeof(info->version));
796}
797
798static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
799{
800 switch (string_set) {
801 case ETH_SS_STATS:
802 return BCMGENET_STATS_LEN;
803 default:
804 return -EOPNOTSUPP;
805 }
806}
807
808static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
809 u8 *data)
810{
811 int i;
812
813 switch (stringset) {
814 case ETH_SS_STATS:
815 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
816 memcpy(data + i * ETH_GSTRING_LEN,
817 bcmgenet_gstrings_stats[i].stat_string,
818 ETH_GSTRING_LEN);
819 }
820 break;
821 }
822}
823
824static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
825{
826 int i, j = 0;
827
828 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
829 const struct bcmgenet_stats *s;
830 u8 offset = 0;
831 u32 val = 0;
832 char *p;
833
834 s = &bcmgenet_gstrings_stats[i];
835 switch (s->type) {
836 case BCMGENET_STAT_NETDEV:
837 case BCMGENET_STAT_SOFT:
838 continue;
839 case BCMGENET_STAT_MIB_RX:
840 case BCMGENET_STAT_MIB_TX:
841 case BCMGENET_STAT_RUNT:
842 if (s->type != BCMGENET_STAT_MIB_RX)
843 offset = BCMGENET_STAT_OFFSET;
844 val = bcmgenet_umac_readl(priv,
845 UMAC_MIB_START + j + offset);
846 break;
847 case BCMGENET_STAT_MISC:
848 val = bcmgenet_umac_readl(priv, s->reg_offset);
849
850 if (val == ~0)
851 bcmgenet_umac_writel(priv, 0, s->reg_offset);
852 break;
853 }
854
855 j += s->stat_sizeof;
856 p = (char *)priv + s->stat_offset;
857 *(u32 *)p = val;
858 }
859}
860
861static void bcmgenet_get_ethtool_stats(struct net_device *dev,
862 struct ethtool_stats *stats,
863 u64 *data)
864{
865 struct bcmgenet_priv *priv = netdev_priv(dev);
866 int i;
867
868 if (netif_running(dev))
869 bcmgenet_update_mib_counters(priv);
870
871 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
872 const struct bcmgenet_stats *s;
873 char *p;
874
875 s = &bcmgenet_gstrings_stats[i];
876 if (s->type == BCMGENET_STAT_NETDEV)
877 p = (char *)&dev->stats;
878 else
879 p = (char *)priv;
880 p += s->stat_offset;
881 if (sizeof(unsigned long) != sizeof(u32) &&
882 s->stat_sizeof == sizeof(unsigned long))
883 data[i] = *(unsigned long *)p;
884 else
885 data[i] = *(u32 *)p;
886 }
887}
888
889static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
890{
891 struct bcmgenet_priv *priv = netdev_priv(dev);
892 u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL;
893 u32 reg;
894
895 if (enable && !priv->clk_eee_enabled) {
896 clk_prepare_enable(priv->clk_eee);
897 priv->clk_eee_enabled = true;
898 }
899
900 reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL);
901 if (enable)
902 reg |= EEE_EN;
903 else
904 reg &= ~EEE_EN;
905 bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL);
906
907
908 reg = __raw_readl(priv->base + off);
909 if (enable)
910 reg |= TBUF_EEE_EN | TBUF_PM_EN;
911 else
912 reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
913 __raw_writel(reg, priv->base + off);
914
915
916 reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL);
917 if (enable)
918 reg |= RBUF_EEE_EN | RBUF_PM_EN;
919 else
920 reg &= ~(RBUF_EEE_EN | RBUF_PM_EN);
921 bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL);
922
923 if (!enable && priv->clk_eee_enabled) {
924 clk_disable_unprepare(priv->clk_eee);
925 priv->clk_eee_enabled = false;
926 }
927
928 priv->eee.eee_enabled = enable;
929 priv->eee.eee_active = enable;
930}
931
932static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
933{
934 struct bcmgenet_priv *priv = netdev_priv(dev);
935 struct ethtool_eee *p = &priv->eee;
936
937 if (GENET_IS_V1(priv))
938 return -EOPNOTSUPP;
939
940 e->eee_enabled = p->eee_enabled;
941 e->eee_active = p->eee_active;
942 e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
943
944 return phy_ethtool_get_eee(priv->phydev, e);
945}
946
947static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
948{
949 struct bcmgenet_priv *priv = netdev_priv(dev);
950 struct ethtool_eee *p = &priv->eee;
951 int ret = 0;
952
953 if (GENET_IS_V1(priv))
954 return -EOPNOTSUPP;
955
956 p->eee_enabled = e->eee_enabled;
957
958 if (!p->eee_enabled) {
959 bcmgenet_eee_enable_set(dev, false);
960 } else {
961 ret = phy_init_eee(priv->phydev, 0);
962 if (ret) {
963 netif_err(priv, hw, dev, "EEE initialization failed\n");
964 return ret;
965 }
966
967 bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
968 bcmgenet_eee_enable_set(dev, true);
969 }
970
971 return phy_ethtool_set_eee(priv->phydev, e);
972}
973
974static int bcmgenet_nway_reset(struct net_device *dev)
975{
976 struct bcmgenet_priv *priv = netdev_priv(dev);
977
978 return genphy_restart_aneg(priv->phydev);
979}
980
981
982static struct ethtool_ops bcmgenet_ethtool_ops = {
983 .get_strings = bcmgenet_get_strings,
984 .get_sset_count = bcmgenet_get_sset_count,
985 .get_ethtool_stats = bcmgenet_get_ethtool_stats,
986 .get_settings = bcmgenet_get_settings,
987 .set_settings = bcmgenet_set_settings,
988 .get_drvinfo = bcmgenet_get_drvinfo,
989 .get_link = ethtool_op_get_link,
990 .get_msglevel = bcmgenet_get_msglevel,
991 .set_msglevel = bcmgenet_set_msglevel,
992 .get_wol = bcmgenet_get_wol,
993 .set_wol = bcmgenet_set_wol,
994 .get_eee = bcmgenet_get_eee,
995 .set_eee = bcmgenet_set_eee,
996 .nway_reset = bcmgenet_nway_reset,
997 .get_coalesce = bcmgenet_get_coalesce,
998 .set_coalesce = bcmgenet_set_coalesce,
999};
1000
1001
1002static int bcmgenet_power_down(struct bcmgenet_priv *priv,
1003 enum bcmgenet_power_mode mode)
1004{
1005 int ret = 0;
1006 u32 reg;
1007
1008 switch (mode) {
1009 case GENET_POWER_CABLE_SENSE:
1010 phy_detach(priv->phydev);
1011 break;
1012
1013 case GENET_POWER_WOL_MAGIC:
1014 ret = bcmgenet_wol_power_down_cfg(priv, mode);
1015 break;
1016
1017 case GENET_POWER_PASSIVE:
1018
1019 if (priv->hw_params->flags & GENET_HAS_EXT) {
1020 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
1021 reg |= (EXT_PWR_DOWN_PHY |
1022 EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
1023 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1024
1025 bcmgenet_phy_power_set(priv->dev, false);
1026 }
1027 break;
1028 default:
1029 break;
1030 }
1031
1032 return 0;
1033}
1034
1035static void bcmgenet_power_up(struct bcmgenet_priv *priv,
1036 enum bcmgenet_power_mode mode)
1037{
1038 u32 reg;
1039
1040 if (!(priv->hw_params->flags & GENET_HAS_EXT))
1041 return;
1042
1043 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
1044
1045 switch (mode) {
1046 case GENET_POWER_PASSIVE:
1047 reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY |
1048 EXT_PWR_DOWN_BIAS);
1049
1050 case GENET_POWER_CABLE_SENSE:
1051
1052 reg |= EXT_PWR_DN_EN_LD;
1053 break;
1054 case GENET_POWER_WOL_MAGIC:
1055 bcmgenet_wol_power_up_cfg(priv, mode);
1056 return;
1057 default:
1058 break;
1059 }
1060
1061 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1062 if (mode == GENET_POWER_PASSIVE) {
1063 bcmgenet_phy_power_set(priv->dev, true);
1064 bcmgenet_mii_reset(priv->dev);
1065 }
1066}
1067
1068
1069static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1070{
1071 struct bcmgenet_priv *priv = netdev_priv(dev);
1072 int val = 0;
1073
1074 if (!netif_running(dev))
1075 return -EINVAL;
1076
1077 switch (cmd) {
1078 case SIOCGMIIPHY:
1079 case SIOCGMIIREG:
1080 case SIOCSMIIREG:
1081 if (!priv->phydev)
1082 val = -ENODEV;
1083 else
1084 val = phy_mii_ioctl(priv->phydev, rq, cmd);
1085 break;
1086
1087 default:
1088 val = -EINVAL;
1089 break;
1090 }
1091
1092 return val;
1093}
1094
1095static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
1096 struct bcmgenet_tx_ring *ring)
1097{
1098 struct enet_cb *tx_cb_ptr;
1099
1100 tx_cb_ptr = ring->cbs;
1101 tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
1102
1103
1104 if (ring->write_ptr == ring->end_ptr)
1105 ring->write_ptr = ring->cb_ptr;
1106 else
1107 ring->write_ptr++;
1108
1109 return tx_cb_ptr;
1110}
1111
1112
1113static void bcmgenet_free_cb(struct enet_cb *cb)
1114{
1115 dev_kfree_skb_any(cb->skb);
1116 cb->skb = NULL;
1117 dma_unmap_addr_set(cb, dma_addr, 0);
1118}
1119
1120static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
1121{
1122 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
1123 INTRL2_CPU_MASK_SET);
1124}
1125
1126static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring)
1127{
1128 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
1129 INTRL2_CPU_MASK_CLEAR);
1130}
1131
1132static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring)
1133{
1134 bcmgenet_intrl2_1_writel(ring->priv,
1135 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
1136 INTRL2_CPU_MASK_SET);
1137}
1138
1139static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring)
1140{
1141 bcmgenet_intrl2_1_writel(ring->priv,
1142 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
1143 INTRL2_CPU_MASK_CLEAR);
1144}
1145
1146static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring)
1147{
1148 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
1149 INTRL2_CPU_MASK_SET);
1150}
1151
1152static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring)
1153{
1154 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
1155 INTRL2_CPU_MASK_CLEAR);
1156}
1157
1158static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring)
1159{
1160 bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
1161 INTRL2_CPU_MASK_CLEAR);
1162}
1163
1164static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring)
1165{
1166 bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
1167 INTRL2_CPU_MASK_SET);
1168}
1169
1170
1171static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1172 struct bcmgenet_tx_ring *ring)
1173{
1174 struct bcmgenet_priv *priv = netdev_priv(dev);
1175 struct enet_cb *tx_cb_ptr;
1176 struct netdev_queue *txq;
1177 unsigned int pkts_compl = 0;
1178 unsigned int bytes_compl = 0;
1179 unsigned int c_index;
1180 unsigned int txbds_ready;
1181 unsigned int txbds_processed = 0;
1182
1183
1184 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
1185 c_index &= DMA_C_INDEX_MASK;
1186
1187 if (likely(c_index >= ring->c_index))
1188 txbds_ready = c_index - ring->c_index;
1189 else
1190 txbds_ready = (DMA_C_INDEX_MASK + 1) - ring->c_index + c_index;
1191
1192 netif_dbg(priv, tx_done, dev,
1193 "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
1194 __func__, ring->index, ring->c_index, c_index, txbds_ready);
1195
1196
1197 while (txbds_processed < txbds_ready) {
1198 tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr];
1199 if (tx_cb_ptr->skb) {
1200 pkts_compl++;
1201 bytes_compl += GENET_CB(tx_cb_ptr->skb)->bytes_sent;
1202 dma_unmap_single(&dev->dev,
1203 dma_unmap_addr(tx_cb_ptr, dma_addr),
1204 dma_unmap_len(tx_cb_ptr, dma_len),
1205 DMA_TO_DEVICE);
1206 bcmgenet_free_cb(tx_cb_ptr);
1207 } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
1208 dma_unmap_page(&dev->dev,
1209 dma_unmap_addr(tx_cb_ptr, dma_addr),
1210 dma_unmap_len(tx_cb_ptr, dma_len),
1211 DMA_TO_DEVICE);
1212 dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
1213 }
1214
1215 txbds_processed++;
1216 if (likely(ring->clean_ptr < ring->end_ptr))
1217 ring->clean_ptr++;
1218 else
1219 ring->clean_ptr = ring->cb_ptr;
1220 }
1221
1222 ring->free_bds += txbds_processed;
1223 ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK;
1224
1225 dev->stats.tx_packets += pkts_compl;
1226 dev->stats.tx_bytes += bytes_compl;
1227
1228 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
1229 txq = netdev_get_tx_queue(dev, ring->queue);
1230 if (netif_tx_queue_stopped(txq))
1231 netif_tx_wake_queue(txq);
1232 }
1233
1234 return pkts_compl;
1235}
1236
1237static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
1238 struct bcmgenet_tx_ring *ring)
1239{
1240 unsigned int released;
1241 unsigned long flags;
1242
1243 spin_lock_irqsave(&ring->lock, flags);
1244 released = __bcmgenet_tx_reclaim(dev, ring);
1245 spin_unlock_irqrestore(&ring->lock, flags);
1246
1247 return released;
1248}
1249
1250static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
1251{
1252 struct bcmgenet_tx_ring *ring =
1253 container_of(napi, struct bcmgenet_tx_ring, napi);
1254 unsigned int work_done = 0;
1255
1256 work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
1257
1258 if (work_done == 0) {
1259 napi_complete(napi);
1260 ring->int_enable(ring);
1261
1262 return 0;
1263 }
1264
1265 return budget;
1266}
1267
1268static void bcmgenet_tx_reclaim_all(struct net_device *dev)
1269{
1270 struct bcmgenet_priv *priv = netdev_priv(dev);
1271 int i;
1272
1273 if (netif_is_multiqueue(dev)) {
1274 for (i = 0; i < priv->hw_params->tx_queues; i++)
1275 bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
1276 }
1277
1278 bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
1279}
1280
1281
1282
1283
1284static int bcmgenet_xmit_single(struct net_device *dev,
1285 struct sk_buff *skb,
1286 u16 dma_desc_flags,
1287 struct bcmgenet_tx_ring *ring)
1288{
1289 struct bcmgenet_priv *priv = netdev_priv(dev);
1290 struct device *kdev = &priv->pdev->dev;
1291 struct enet_cb *tx_cb_ptr;
1292 unsigned int skb_len;
1293 dma_addr_t mapping;
1294 u32 length_status;
1295 int ret;
1296
1297 tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
1298
1299 if (unlikely(!tx_cb_ptr))
1300 BUG();
1301
1302 tx_cb_ptr->skb = skb;
1303
1304 skb_len = skb_headlen(skb);
1305
1306 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
1307 ret = dma_mapping_error(kdev, mapping);
1308 if (ret) {
1309 priv->mib.tx_dma_failed++;
1310 netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
1311 dev_kfree_skb(skb);
1312 return ret;
1313 }
1314
1315 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1316 dma_unmap_len_set(tx_cb_ptr, dma_len, skb_len);
1317 length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
1318 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
1319 DMA_TX_APPEND_CRC;
1320
1321 if (skb->ip_summed == CHECKSUM_PARTIAL)
1322 length_status |= DMA_TX_DO_CSUM;
1323
1324 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status);
1325
1326 return 0;
1327}
1328
1329
1330static int bcmgenet_xmit_frag(struct net_device *dev,
1331 skb_frag_t *frag,
1332 u16 dma_desc_flags,
1333 struct bcmgenet_tx_ring *ring)
1334{
1335 struct bcmgenet_priv *priv = netdev_priv(dev);
1336 struct device *kdev = &priv->pdev->dev;
1337 struct enet_cb *tx_cb_ptr;
1338 dma_addr_t mapping;
1339 int ret;
1340
1341 tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
1342
1343 if (unlikely(!tx_cb_ptr))
1344 BUG();
1345 tx_cb_ptr->skb = NULL;
1346
1347 mapping = skb_frag_dma_map(kdev, frag, 0,
1348 skb_frag_size(frag), DMA_TO_DEVICE);
1349 ret = dma_mapping_error(kdev, mapping);
1350 if (ret) {
1351 priv->mib.tx_dma_failed++;
1352 netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
1353 __func__);
1354 return ret;
1355 }
1356
1357 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1358 dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size);
1359
1360 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
1361 (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
1362 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
1363
1364 return 0;
1365}
1366
1367
1368
1369
1370static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
1371 struct sk_buff *skb)
1372{
1373 struct status_64 *status = NULL;
1374 struct sk_buff *new_skb;
1375 u16 offset;
1376 u8 ip_proto;
1377 u16 ip_ver;
1378 u32 tx_csum_info;
1379
1380 if (unlikely(skb_headroom(skb) < sizeof(*status))) {
1381
1382
1383
1384 new_skb = skb_realloc_headroom(skb, sizeof(*status));
1385 dev_kfree_skb(skb);
1386 if (!new_skb) {
1387 dev->stats.tx_dropped++;
1388 return NULL;
1389 }
1390 skb = new_skb;
1391 }
1392
1393 skb_push(skb, sizeof(*status));
1394 status = (struct status_64 *)skb->data;
1395
1396 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1397 ip_ver = htons(skb->protocol);
1398 switch (ip_ver) {
1399 case ETH_P_IP:
1400 ip_proto = ip_hdr(skb)->protocol;
1401 break;
1402 case ETH_P_IPV6:
1403 ip_proto = ipv6_hdr(skb)->nexthdr;
1404 break;
1405 default:
1406 return skb;
1407 }
1408
1409 offset = skb_checksum_start_offset(skb) - sizeof(*status);
1410 tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
1411 (offset + skb->csum_offset);
1412
1413
1414
1415
1416 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
1417 tx_csum_info |= STATUS_TX_CSUM_LV;
1418 if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
1419 tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
1420 } else {
1421 tx_csum_info = 0;
1422 }
1423
1424 status->tx_csum_info = tx_csum_info;
1425 }
1426
1427 return skb;
1428}
1429
1430static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1431{
1432 struct bcmgenet_priv *priv = netdev_priv(dev);
1433 struct bcmgenet_tx_ring *ring = NULL;
1434 struct netdev_queue *txq;
1435 unsigned long flags = 0;
1436 int nr_frags, index;
1437 u16 dma_desc_flags;
1438 int ret;
1439 int i;
1440
1441 index = skb_get_queue_mapping(skb);
1442
1443
1444
1445
1446
1447
1448
1449 if (index == 0)
1450 index = DESC_INDEX;
1451 else
1452 index -= 1;
1453
1454 nr_frags = skb_shinfo(skb)->nr_frags;
1455 ring = &priv->tx_rings[index];
1456 txq = netdev_get_tx_queue(dev, ring->queue);
1457
1458 spin_lock_irqsave(&ring->lock, flags);
1459 if (ring->free_bds <= nr_frags + 1) {
1460 netif_tx_stop_queue(txq);
1461 netdev_err(dev, "%s: tx ring %d full when queue %d awake\n",
1462 __func__, index, ring->queue);
1463 ret = NETDEV_TX_BUSY;
1464 goto out;
1465 }
1466
1467 if (skb_padto(skb, ETH_ZLEN)) {
1468 ret = NETDEV_TX_OK;
1469 goto out;
1470 }
1471
1472
1473
1474
1475 GENET_CB(skb)->bytes_sent = skb->len;
1476
1477
1478 if (priv->desc_64b_en) {
1479 skb = bcmgenet_put_tx_csum(dev, skb);
1480 if (!skb) {
1481 ret = NETDEV_TX_OK;
1482 goto out;
1483 }
1484 }
1485
1486 dma_desc_flags = DMA_SOP;
1487 if (nr_frags == 0)
1488 dma_desc_flags |= DMA_EOP;
1489
1490
1491 ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring);
1492 if (ret) {
1493 ret = NETDEV_TX_OK;
1494 goto out;
1495 }
1496
1497
1498 for (i = 0; i < nr_frags; i++) {
1499 ret = bcmgenet_xmit_frag(dev,
1500 &skb_shinfo(skb)->frags[i],
1501 (i == nr_frags - 1) ? DMA_EOP : 0,
1502 ring);
1503 if (ret) {
1504 ret = NETDEV_TX_OK;
1505 goto out;
1506 }
1507 }
1508
1509 skb_tx_timestamp(skb);
1510
1511
1512 ring->free_bds -= nr_frags + 1;
1513 ring->prod_index += nr_frags + 1;
1514 ring->prod_index &= DMA_P_INDEX_MASK;
1515
1516 if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
1517 netif_tx_stop_queue(txq);
1518
1519 if (!skb->xmit_more || netif_xmit_stopped(txq))
1520
1521 bcmgenet_tdma_ring_writel(priv, ring->index,
1522 ring->prod_index, TDMA_PROD_INDEX);
1523out:
1524 spin_unlock_irqrestore(&ring->lock, flags);
1525
1526 return ret;
1527}
1528
1529static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
1530 struct enet_cb *cb)
1531{
1532 struct device *kdev = &priv->pdev->dev;
1533 struct sk_buff *skb;
1534 struct sk_buff *rx_skb;
1535 dma_addr_t mapping;
1536
1537
1538 skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT);
1539 if (!skb) {
1540 priv->mib.alloc_rx_buff_failed++;
1541 netif_err(priv, rx_err, priv->dev,
1542 "%s: Rx skb allocation failed\n", __func__);
1543 return NULL;
1544 }
1545
1546
1547 mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len,
1548 DMA_FROM_DEVICE);
1549 if (dma_mapping_error(kdev, mapping)) {
1550 priv->mib.rx_dma_failed++;
1551 dev_kfree_skb_any(skb);
1552 netif_err(priv, rx_err, priv->dev,
1553 "%s: Rx skb DMA mapping failed\n", __func__);
1554 return NULL;
1555 }
1556
1557
1558 rx_skb = cb->skb;
1559 if (likely(rx_skb))
1560 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
1561 priv->rx_buf_len, DMA_FROM_DEVICE);
1562
1563
1564 cb->skb = skb;
1565 dma_unmap_addr_set(cb, dma_addr, mapping);
1566 dmadesc_set_addr(priv, cb->bd_addr, mapping);
1567
1568
1569 return rx_skb;
1570}
1571
1572
1573
1574
1575static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
1576 unsigned int budget)
1577{
1578 struct bcmgenet_priv *priv = ring->priv;
1579 struct net_device *dev = priv->dev;
1580 struct enet_cb *cb;
1581 struct sk_buff *skb;
1582 u32 dma_length_status;
1583 unsigned long dma_flag;
1584 int len;
1585 unsigned int rxpktprocessed = 0, rxpkttoprocess;
1586 unsigned int p_index;
1587 unsigned int discards;
1588 unsigned int chksum_ok = 0;
1589
1590 p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
1591
1592 discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) &
1593 DMA_P_INDEX_DISCARD_CNT_MASK;
1594 if (discards > ring->old_discards) {
1595 discards = discards - ring->old_discards;
1596 dev->stats.rx_missed_errors += discards;
1597 dev->stats.rx_errors += discards;
1598 ring->old_discards += discards;
1599
1600
1601 if (ring->old_discards >= 0xC000) {
1602 ring->old_discards = 0;
1603 bcmgenet_rdma_ring_writel(priv, ring->index, 0,
1604 RDMA_PROD_INDEX);
1605 }
1606 }
1607
1608 p_index &= DMA_P_INDEX_MASK;
1609
1610 if (likely(p_index >= ring->c_index))
1611 rxpkttoprocess = p_index - ring->c_index;
1612 else
1613 rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - ring->c_index +
1614 p_index;
1615
1616 netif_dbg(priv, rx_status, dev,
1617 "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
1618
1619 while ((rxpktprocessed < rxpkttoprocess) &&
1620 (rxpktprocessed < budget)) {
1621 cb = &priv->rx_cbs[ring->read_ptr];
1622 skb = bcmgenet_rx_refill(priv, cb);
1623
1624 if (unlikely(!skb)) {
1625 dev->stats.rx_dropped++;
1626 goto next;
1627 }
1628
1629 if (!priv->desc_64b_en) {
1630 dma_length_status =
1631 dmadesc_get_length_status(priv, cb->bd_addr);
1632 } else {
1633 struct status_64 *status;
1634
1635 status = (struct status_64 *)skb->data;
1636 dma_length_status = status->length_status;
1637 }
1638
1639
1640
1641
1642 dma_flag = dma_length_status & 0xffff;
1643 len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
1644
1645 netif_dbg(priv, rx_status, dev,
1646 "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
1647 __func__, p_index, ring->c_index,
1648 ring->read_ptr, dma_length_status);
1649
1650 if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
1651 netif_err(priv, rx_status, dev,
1652 "dropping fragmented packet!\n");
1653 dev->stats.rx_errors++;
1654 dev_kfree_skb_any(skb);
1655 goto next;
1656 }
1657
1658
1659 if (unlikely(dma_flag & (DMA_RX_CRC_ERROR |
1660 DMA_RX_OV |
1661 DMA_RX_NO |
1662 DMA_RX_LG |
1663 DMA_RX_RXER))) {
1664 netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
1665 (unsigned int)dma_flag);
1666 if (dma_flag & DMA_RX_CRC_ERROR)
1667 dev->stats.rx_crc_errors++;
1668 if (dma_flag & DMA_RX_OV)
1669 dev->stats.rx_over_errors++;
1670 if (dma_flag & DMA_RX_NO)
1671 dev->stats.rx_frame_errors++;
1672 if (dma_flag & DMA_RX_LG)
1673 dev->stats.rx_length_errors++;
1674 dev->stats.rx_errors++;
1675 dev_kfree_skb_any(skb);
1676 goto next;
1677 }
1678
1679 chksum_ok = (dma_flag & priv->dma_rx_chk_bit) &&
1680 priv->desc_rxchk_en;
1681
1682 skb_put(skb, len);
1683 if (priv->desc_64b_en) {
1684 skb_pull(skb, 64);
1685 len -= 64;
1686 }
1687
1688 if (likely(chksum_ok))
1689 skb->ip_summed = CHECKSUM_UNNECESSARY;
1690
1691
1692 skb_pull(skb, 2);
1693 len -= 2;
1694
1695 if (priv->crc_fwd_en) {
1696 skb_trim(skb, len - ETH_FCS_LEN);
1697 len -= ETH_FCS_LEN;
1698 }
1699
1700
1701 skb->protocol = eth_type_trans(skb, priv->dev);
1702 dev->stats.rx_packets++;
1703 dev->stats.rx_bytes += len;
1704 if (dma_flag & DMA_RX_MULT)
1705 dev->stats.multicast++;
1706
1707
1708 napi_gro_receive(&ring->napi, skb);
1709 netif_dbg(priv, rx_status, dev, "pushed up to kernel\n");
1710
1711next:
1712 rxpktprocessed++;
1713 if (likely(ring->read_ptr < ring->end_ptr))
1714 ring->read_ptr++;
1715 else
1716 ring->read_ptr = ring->cb_ptr;
1717
1718 ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK;
1719 bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX);
1720 }
1721
1722 return rxpktprocessed;
1723}
1724
1725
1726static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
1727{
1728 struct bcmgenet_rx_ring *ring = container_of(napi,
1729 struct bcmgenet_rx_ring, napi);
1730 unsigned int work_done;
1731
1732 work_done = bcmgenet_desc_rx(ring, budget);
1733
1734 if (work_done < budget) {
1735 napi_complete(napi);
1736 ring->int_enable(ring);
1737 }
1738
1739 return work_done;
1740}
1741
1742
1743static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
1744 struct bcmgenet_rx_ring *ring)
1745{
1746 struct enet_cb *cb;
1747 struct sk_buff *skb;
1748 int i;
1749
1750 netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
1751
1752
1753 for (i = 0; i < ring->size; i++) {
1754 cb = ring->cbs + i;
1755 skb = bcmgenet_rx_refill(priv, cb);
1756 if (skb)
1757 dev_kfree_skb_any(skb);
1758 if (!cb->skb)
1759 return -ENOMEM;
1760 }
1761
1762 return 0;
1763}
1764
1765static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
1766{
1767 struct enet_cb *cb;
1768 int i;
1769
1770 for (i = 0; i < priv->num_rx_bds; i++) {
1771 cb = &priv->rx_cbs[i];
1772
1773 if (dma_unmap_addr(cb, dma_addr)) {
1774 dma_unmap_single(&priv->dev->dev,
1775 dma_unmap_addr(cb, dma_addr),
1776 priv->rx_buf_len, DMA_FROM_DEVICE);
1777 dma_unmap_addr_set(cb, dma_addr, 0);
1778 }
1779
1780 if (cb->skb)
1781 bcmgenet_free_cb(cb);
1782 }
1783}
1784
1785static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
1786{
1787 u32 reg;
1788
1789 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
1790 if (enable)
1791 reg |= mask;
1792 else
1793 reg &= ~mask;
1794 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
1795
1796
1797
1798
1799 if (enable == 0)
1800 usleep_range(1000, 2000);
1801}
1802
1803static int reset_umac(struct bcmgenet_priv *priv)
1804{
1805 struct device *kdev = &priv->pdev->dev;
1806 unsigned int timeout = 0;
1807 u32 reg;
1808
1809
1810 bcmgenet_rbuf_ctrl_set(priv, 0);
1811 udelay(10);
1812
1813
1814 bcmgenet_umac_writel(priv, 0, UMAC_CMD);
1815
1816
1817 bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
1818 while (timeout++ < 1000) {
1819 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
1820 if (!(reg & CMD_SW_RESET))
1821 return 0;
1822
1823 udelay(1);
1824 }
1825
1826 if (timeout == 1000) {
1827 dev_err(kdev,
1828 "timeout waiting for MAC to come out of reset\n");
1829 return -ETIMEDOUT;
1830 }
1831
1832 return 0;
1833}
1834
1835static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
1836{
1837
1838 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1839 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
1840 bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1841 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1842 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
1843 bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1844}
1845
1846static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
1847{
1848 u32 int0_enable = 0;
1849
1850
1851
1852
1853 if (priv->internal_phy) {
1854 int0_enable |= UMAC_IRQ_LINK_EVENT;
1855 } else if (priv->ext_phy) {
1856 int0_enable |= UMAC_IRQ_LINK_EVENT;
1857 } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
1858 if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
1859 int0_enable |= UMAC_IRQ_LINK_EVENT;
1860 }
1861 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
1862}
1863
1864static int init_umac(struct bcmgenet_priv *priv)
1865{
1866 struct device *kdev = &priv->pdev->dev;
1867 int ret;
1868 u32 reg;
1869 u32 int0_enable = 0;
1870 u32 int1_enable = 0;
1871 int i;
1872
1873 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
1874
1875 ret = reset_umac(priv);
1876 if (ret)
1877 return ret;
1878
1879 bcmgenet_umac_writel(priv, 0, UMAC_CMD);
1880
1881 bcmgenet_umac_writel(priv,
1882 MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT,
1883 UMAC_MIB_CTRL);
1884 bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
1885
1886 bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1887
1888
1889 reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
1890 reg |= RBUF_ALIGN_2B;
1891 bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
1892
1893 if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
1894 bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
1895
1896 bcmgenet_intr_disable(priv);
1897
1898
1899 int0_enable |= UMAC_IRQ_RXDMA_DONE;
1900
1901
1902 int0_enable |= UMAC_IRQ_TXDMA_DONE;
1903
1904
1905 if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
1906 reg = bcmgenet_bp_mc_get(priv);
1907 reg |= BIT(priv->hw_params->bp_in_en_shift);
1908
1909
1910 if (netif_is_multiqueue(priv->dev))
1911 reg |= priv->hw_params->bp_in_mask;
1912 else
1913 reg &= ~priv->hw_params->bp_in_mask;
1914 bcmgenet_bp_mc_set(priv, reg);
1915 }
1916
1917
1918 if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
1919 int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
1920
1921
1922 for (i = 0; i < priv->hw_params->rx_queues; ++i)
1923 int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i));
1924
1925
1926 for (i = 0; i < priv->hw_params->tx_queues; ++i)
1927 int1_enable |= (1 << i);
1928
1929 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
1930 bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
1931
1932
1933 dev_dbg(kdev, "done init umac\n");
1934
1935 return 0;
1936}
1937
1938
1939static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
1940 unsigned int index, unsigned int size,
1941 unsigned int start_ptr, unsigned int end_ptr)
1942{
1943 struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
1944 u32 words_per_bd = WORDS_PER_BD(priv);
1945 u32 flow_period_val = 0;
1946
1947 spin_lock_init(&ring->lock);
1948 ring->priv = priv;
1949 ring->index = index;
1950 if (index == DESC_INDEX) {
1951 ring->queue = 0;
1952 ring->int_enable = bcmgenet_tx_ring16_int_enable;
1953 ring->int_disable = bcmgenet_tx_ring16_int_disable;
1954 } else {
1955 ring->queue = index + 1;
1956 ring->int_enable = bcmgenet_tx_ring_int_enable;
1957 ring->int_disable = bcmgenet_tx_ring_int_disable;
1958 }
1959 ring->cbs = priv->tx_cbs + start_ptr;
1960 ring->size = size;
1961 ring->clean_ptr = start_ptr;
1962 ring->c_index = 0;
1963 ring->free_bds = size;
1964 ring->write_ptr = start_ptr;
1965 ring->cb_ptr = start_ptr;
1966 ring->end_ptr = end_ptr - 1;
1967 ring->prod_index = 0;
1968
1969
1970 if (index != DESC_INDEX)
1971 flow_period_val = ENET_MAX_MTU_SIZE << 16;
1972
1973 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
1974 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX);
1975 bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
1976
1977 bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
1978 TDMA_FLOW_PERIOD);
1979 bcmgenet_tdma_ring_writel(priv, index,
1980 ((size << DMA_RING_SIZE_SHIFT) |
1981 RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
1982
1983
1984 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
1985 DMA_START_ADDR);
1986 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
1987 TDMA_READ_PTR);
1988 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
1989 TDMA_WRITE_PTR);
1990 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
1991 DMA_END_ADDR);
1992}
1993
1994
1995static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
1996 unsigned int index, unsigned int size,
1997 unsigned int start_ptr, unsigned int end_ptr)
1998{
1999 struct bcmgenet_rx_ring *ring = &priv->rx_rings[index];
2000 u32 words_per_bd = WORDS_PER_BD(priv);
2001 int ret;
2002
2003 ring->priv = priv;
2004 ring->index = index;
2005 if (index == DESC_INDEX) {
2006 ring->int_enable = bcmgenet_rx_ring16_int_enable;
2007 ring->int_disable = bcmgenet_rx_ring16_int_disable;
2008 } else {
2009 ring->int_enable = bcmgenet_rx_ring_int_enable;
2010 ring->int_disable = bcmgenet_rx_ring_int_disable;
2011 }
2012 ring->cbs = priv->rx_cbs + start_ptr;
2013 ring->size = size;
2014 ring->c_index = 0;
2015 ring->read_ptr = start_ptr;
2016 ring->cb_ptr = start_ptr;
2017 ring->end_ptr = end_ptr - 1;
2018
2019 ret = bcmgenet_alloc_rx_buffers(priv, ring);
2020 if (ret)
2021 return ret;
2022
2023 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
2024 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
2025 bcmgenet_rdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
2026 bcmgenet_rdma_ring_writel(priv, index,
2027 ((size << DMA_RING_SIZE_SHIFT) |
2028 RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
2029 bcmgenet_rdma_ring_writel(priv, index,
2030 (DMA_FC_THRESH_LO <<
2031 DMA_XOFF_THRESHOLD_SHIFT) |
2032 DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
2033
2034
2035 bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2036 DMA_START_ADDR);
2037 bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2038 RDMA_READ_PTR);
2039 bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2040 RDMA_WRITE_PTR);
2041 bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
2042 DMA_END_ADDR);
2043
2044 return ret;
2045}
2046
2047static void bcmgenet_init_tx_napi(struct bcmgenet_priv *priv)
2048{
2049 unsigned int i;
2050 struct bcmgenet_tx_ring *ring;
2051
2052 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2053 ring = &priv->tx_rings[i];
2054 netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
2055 }
2056
2057 ring = &priv->tx_rings[DESC_INDEX];
2058 netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
2059}
2060
2061static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
2062{
2063 unsigned int i;
2064 struct bcmgenet_tx_ring *ring;
2065
2066 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2067 ring = &priv->tx_rings[i];
2068 napi_enable(&ring->napi);
2069 }
2070
2071 ring = &priv->tx_rings[DESC_INDEX];
2072 napi_enable(&ring->napi);
2073}
2074
2075static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
2076{
2077 unsigned int i;
2078 struct bcmgenet_tx_ring *ring;
2079
2080 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2081 ring = &priv->tx_rings[i];
2082 napi_disable(&ring->napi);
2083 }
2084
2085 ring = &priv->tx_rings[DESC_INDEX];
2086 napi_disable(&ring->napi);
2087}
2088
2089static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
2090{
2091 unsigned int i;
2092 struct bcmgenet_tx_ring *ring;
2093
2094 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2095 ring = &priv->tx_rings[i];
2096 netif_napi_del(&ring->napi);
2097 }
2098
2099 ring = &priv->tx_rings[DESC_INDEX];
2100 netif_napi_del(&ring->napi);
2101}
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118static void bcmgenet_init_tx_queues(struct net_device *dev)
2119{
2120 struct bcmgenet_priv *priv = netdev_priv(dev);
2121 u32 i, dma_enable;
2122 u32 dma_ctrl, ring_cfg;
2123 u32 dma_priority[3] = {0, 0, 0};
2124
2125 dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
2126 dma_enable = dma_ctrl & DMA_EN;
2127 dma_ctrl &= ~DMA_EN;
2128 bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
2129
2130 dma_ctrl = 0;
2131 ring_cfg = 0;
2132
2133
2134 bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
2135
2136
2137 for (i = 0; i < priv->hw_params->tx_queues; i++) {
2138 bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q,
2139 i * priv->hw_params->tx_bds_per_q,
2140 (i + 1) * priv->hw_params->tx_bds_per_q);
2141 ring_cfg |= (1 << i);
2142 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2143 dma_priority[DMA_PRIO_REG_INDEX(i)] |=
2144 ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i));
2145 }
2146
2147
2148 bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT,
2149 priv->hw_params->tx_queues *
2150 priv->hw_params->tx_bds_per_q,
2151 TOTAL_DESC);
2152 ring_cfg |= (1 << DESC_INDEX);
2153 dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
2154 dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |=
2155 ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) <<
2156 DMA_PRIO_REG_SHIFT(DESC_INDEX));
2157
2158
2159 bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
2160 bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
2161 bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
2162
2163
2164 bcmgenet_init_tx_napi(priv);
2165
2166
2167 bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG);
2168
2169
2170 if (dma_enable)
2171 dma_ctrl |= DMA_EN;
2172 bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
2173}
2174
2175static void bcmgenet_init_rx_napi(struct bcmgenet_priv *priv)
2176{
2177 unsigned int i;
2178 struct bcmgenet_rx_ring *ring;
2179
2180 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2181 ring = &priv->rx_rings[i];
2182 netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64);
2183 }
2184
2185 ring = &priv->rx_rings[DESC_INDEX];
2186 netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64);
2187}
2188
2189static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
2190{
2191 unsigned int i;
2192 struct bcmgenet_rx_ring *ring;
2193
2194 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2195 ring = &priv->rx_rings[i];
2196 napi_enable(&ring->napi);
2197 }
2198
2199 ring = &priv->rx_rings[DESC_INDEX];
2200 napi_enable(&ring->napi);
2201}
2202
2203static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
2204{
2205 unsigned int i;
2206 struct bcmgenet_rx_ring *ring;
2207
2208 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2209 ring = &priv->rx_rings[i];
2210 napi_disable(&ring->napi);
2211 }
2212
2213 ring = &priv->rx_rings[DESC_INDEX];
2214 napi_disable(&ring->napi);
2215}
2216
2217static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
2218{
2219 unsigned int i;
2220 struct bcmgenet_rx_ring *ring;
2221
2222 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2223 ring = &priv->rx_rings[i];
2224 netif_napi_del(&ring->napi);
2225 }
2226
2227 ring = &priv->rx_rings[DESC_INDEX];
2228 netif_napi_del(&ring->napi);
2229}
2230
2231
2232
2233
2234
2235
2236
2237
2238static int bcmgenet_init_rx_queues(struct net_device *dev)
2239{
2240 struct bcmgenet_priv *priv = netdev_priv(dev);
2241 u32 i;
2242 u32 dma_enable;
2243 u32 dma_ctrl;
2244 u32 ring_cfg;
2245 int ret;
2246
2247 dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL);
2248 dma_enable = dma_ctrl & DMA_EN;
2249 dma_ctrl &= ~DMA_EN;
2250 bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
2251
2252 dma_ctrl = 0;
2253 ring_cfg = 0;
2254
2255
2256 for (i = 0; i < priv->hw_params->rx_queues; i++) {
2257 ret = bcmgenet_init_rx_ring(priv, i,
2258 priv->hw_params->rx_bds_per_q,
2259 i * priv->hw_params->rx_bds_per_q,
2260 (i + 1) *
2261 priv->hw_params->rx_bds_per_q);
2262 if (ret)
2263 return ret;
2264
2265 ring_cfg |= (1 << i);
2266 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2267 }
2268
2269
2270 ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT,
2271 priv->hw_params->rx_queues *
2272 priv->hw_params->rx_bds_per_q,
2273 TOTAL_DESC);
2274 if (ret)
2275 return ret;
2276
2277 ring_cfg |= (1 << DESC_INDEX);
2278 dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
2279
2280
2281 bcmgenet_init_rx_napi(priv);
2282
2283
2284 bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG);
2285
2286
2287 if (dma_enable)
2288 dma_ctrl |= DMA_EN;
2289 bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
2290
2291 return 0;
2292}
2293
2294static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
2295{
2296 int ret = 0;
2297 int timeout = 0;
2298 u32 reg;
2299 u32 dma_ctrl;
2300 int i;
2301
2302
2303 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2304 reg &= ~DMA_EN;
2305 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2306
2307
2308 while (timeout++ < DMA_TIMEOUT_VAL) {
2309 reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
2310 if (reg & DMA_DISABLED)
2311 break;
2312
2313 udelay(1);
2314 }
2315
2316 if (timeout == DMA_TIMEOUT_VAL) {
2317 netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
2318 ret = -ETIMEDOUT;
2319 }
2320
2321
2322 usleep_range(10000, 20000);
2323
2324
2325 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2326 reg &= ~DMA_EN;
2327 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2328
2329 timeout = 0;
2330
2331 while (timeout++ < DMA_TIMEOUT_VAL) {
2332 reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
2333 if (reg & DMA_DISABLED)
2334 break;
2335
2336 udelay(1);
2337 }
2338
2339 if (timeout == DMA_TIMEOUT_VAL) {
2340 netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
2341 ret = -ETIMEDOUT;
2342 }
2343
2344 dma_ctrl = 0;
2345 for (i = 0; i < priv->hw_params->rx_queues; i++)
2346 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2347 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2348 reg &= ~dma_ctrl;
2349 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2350
2351 dma_ctrl = 0;
2352 for (i = 0; i < priv->hw_params->tx_queues; i++)
2353 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2354 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2355 reg &= ~dma_ctrl;
2356 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2357
2358 return ret;
2359}
2360
2361static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
2362{
2363 int i;
2364
2365 bcmgenet_fini_rx_napi(priv);
2366 bcmgenet_fini_tx_napi(priv);
2367
2368
2369 bcmgenet_dma_teardown(priv);
2370
2371 for (i = 0; i < priv->num_tx_bds; i++) {
2372 if (priv->tx_cbs[i].skb != NULL) {
2373 dev_kfree_skb(priv->tx_cbs[i].skb);
2374 priv->tx_cbs[i].skb = NULL;
2375 }
2376 }
2377
2378 bcmgenet_free_rx_buffers(priv);
2379 kfree(priv->rx_cbs);
2380 kfree(priv->tx_cbs);
2381}
2382
2383
2384static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
2385{
2386 int ret;
2387 unsigned int i;
2388 struct enet_cb *cb;
2389
2390 netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
2391
2392
2393 priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
2394 priv->num_rx_bds = TOTAL_DESC;
2395 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
2396 GFP_KERNEL);
2397 if (!priv->rx_cbs)
2398 return -ENOMEM;
2399
2400 for (i = 0; i < priv->num_rx_bds; i++) {
2401 cb = priv->rx_cbs + i;
2402 cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE;
2403 }
2404
2405
2406 priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
2407 priv->num_tx_bds = TOTAL_DESC;
2408 priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
2409 GFP_KERNEL);
2410 if (!priv->tx_cbs) {
2411 kfree(priv->rx_cbs);
2412 return -ENOMEM;
2413 }
2414
2415 for (i = 0; i < priv->num_tx_bds; i++) {
2416 cb = priv->tx_cbs + i;
2417 cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE;
2418 }
2419
2420
2421 bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
2422
2423
2424 ret = bcmgenet_init_rx_queues(priv->dev);
2425 if (ret) {
2426 netdev_err(priv->dev, "failed to initialize Rx queues\n");
2427 bcmgenet_free_rx_buffers(priv);
2428 kfree(priv->rx_cbs);
2429 kfree(priv->tx_cbs);
2430 return ret;
2431 }
2432
2433
2434 bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
2435
2436
2437 bcmgenet_init_tx_queues(priv->dev);
2438
2439 return 0;
2440}
2441
2442
2443static void bcmgenet_irq_task(struct work_struct *work)
2444{
2445 struct bcmgenet_priv *priv = container_of(
2446 work, struct bcmgenet_priv, bcmgenet_irq_work);
2447
2448 netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
2449
2450 if (priv->irq0_stat & UMAC_IRQ_MPD_R) {
2451 priv->irq0_stat &= ~UMAC_IRQ_MPD_R;
2452 netif_dbg(priv, wol, priv->dev,
2453 "magic packet detected, waking up\n");
2454 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
2455 }
2456
2457
2458 if (priv->irq0_stat & UMAC_IRQ_LINK_EVENT) {
2459 phy_mac_interrupt(priv->phydev,
2460 !!(priv->irq0_stat & UMAC_IRQ_LINK_UP));
2461 priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT;
2462 }
2463}
2464
2465
2466static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
2467{
2468 struct bcmgenet_priv *priv = dev_id;
2469 struct bcmgenet_rx_ring *rx_ring;
2470 struct bcmgenet_tx_ring *tx_ring;
2471 unsigned int index;
2472
2473
2474 priv->irq1_stat =
2475 bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
2476 ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
2477
2478
2479 bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
2480
2481 netif_dbg(priv, intr, priv->dev,
2482 "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
2483
2484
2485 for (index = 0; index < priv->hw_params->rx_queues; index++) {
2486 if (!(priv->irq1_stat & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
2487 continue;
2488
2489 rx_ring = &priv->rx_rings[index];
2490
2491 if (likely(napi_schedule_prep(&rx_ring->napi))) {
2492 rx_ring->int_disable(rx_ring);
2493 __napi_schedule(&rx_ring->napi);
2494 }
2495 }
2496
2497
2498 for (index = 0; index < priv->hw_params->tx_queues; index++) {
2499 if (!(priv->irq1_stat & BIT(index)))
2500 continue;
2501
2502 tx_ring = &priv->tx_rings[index];
2503
2504 if (likely(napi_schedule_prep(&tx_ring->napi))) {
2505 tx_ring->int_disable(tx_ring);
2506 __napi_schedule(&tx_ring->napi);
2507 }
2508 }
2509
2510 return IRQ_HANDLED;
2511}
2512
2513
2514static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
2515{
2516 struct bcmgenet_priv *priv = dev_id;
2517 struct bcmgenet_rx_ring *rx_ring;
2518 struct bcmgenet_tx_ring *tx_ring;
2519
2520
2521 priv->irq0_stat =
2522 bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
2523 ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
2524
2525
2526 bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
2527
2528 netif_dbg(priv, intr, priv->dev,
2529 "IRQ=0x%x\n", priv->irq0_stat);
2530
2531 if (priv->irq0_stat & UMAC_IRQ_RXDMA_DONE) {
2532 rx_ring = &priv->rx_rings[DESC_INDEX];
2533
2534 if (likely(napi_schedule_prep(&rx_ring->napi))) {
2535 rx_ring->int_disable(rx_ring);
2536 __napi_schedule(&rx_ring->napi);
2537 }
2538 }
2539
2540 if (priv->irq0_stat & UMAC_IRQ_TXDMA_DONE) {
2541 tx_ring = &priv->tx_rings[DESC_INDEX];
2542
2543 if (likely(napi_schedule_prep(&tx_ring->napi))) {
2544 tx_ring->int_disable(tx_ring);
2545 __napi_schedule(&tx_ring->napi);
2546 }
2547 }
2548
2549 if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
2550 UMAC_IRQ_PHY_DET_F |
2551 UMAC_IRQ_LINK_EVENT |
2552 UMAC_IRQ_HFB_SM |
2553 UMAC_IRQ_HFB_MM |
2554 UMAC_IRQ_MPD_R)) {
2555
2556 schedule_work(&priv->bcmgenet_irq_work);
2557 }
2558
2559 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
2560 priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
2561 priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
2562 wake_up(&priv->wq);
2563 }
2564
2565 return IRQ_HANDLED;
2566}
2567
2568static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
2569{
2570 struct bcmgenet_priv *priv = dev_id;
2571
2572 pm_wakeup_event(&priv->pdev->dev, 0);
2573
2574 return IRQ_HANDLED;
2575}
2576
2577#ifdef CONFIG_NET_POLL_CONTROLLER
2578static void bcmgenet_poll_controller(struct net_device *dev)
2579{
2580 struct bcmgenet_priv *priv = netdev_priv(dev);
2581
2582
2583 disable_irq(priv->irq0);
2584 bcmgenet_isr0(priv->irq0, priv);
2585 enable_irq(priv->irq0);
2586
2587
2588 disable_irq(priv->irq1);
2589 bcmgenet_isr1(priv->irq1, priv);
2590 enable_irq(priv->irq1);
2591}
2592#endif
2593
2594static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
2595{
2596 u32 reg;
2597
2598 reg = bcmgenet_rbuf_ctrl_get(priv);
2599 reg |= BIT(1);
2600 bcmgenet_rbuf_ctrl_set(priv, reg);
2601 udelay(10);
2602
2603 reg &= ~BIT(1);
2604 bcmgenet_rbuf_ctrl_set(priv, reg);
2605 udelay(10);
2606}
2607
2608static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
2609 unsigned char *addr)
2610{
2611 bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
2612 (addr[2] << 8) | addr[3], UMAC_MAC0);
2613 bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
2614}
2615
2616
2617static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
2618{
2619 u32 reg;
2620 u32 dma_ctrl;
2621
2622
2623 dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
2624 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2625 reg &= ~dma_ctrl;
2626 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2627
2628 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2629 reg &= ~dma_ctrl;
2630 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2631
2632 bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
2633 udelay(10);
2634 bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
2635
2636 return dma_ctrl;
2637}
2638
2639static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
2640{
2641 u32 reg;
2642
2643 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2644 reg |= dma_ctrl;
2645 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2646
2647 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2648 reg |= dma_ctrl;
2649 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2650}
2651
2652static bool bcmgenet_hfb_is_filter_enabled(struct bcmgenet_priv *priv,
2653 u32 f_index)
2654{
2655 u32 offset;
2656 u32 reg;
2657
2658 offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
2659 reg = bcmgenet_hfb_reg_readl(priv, offset);
2660 return !!(reg & (1 << (f_index % 32)));
2661}
2662
2663static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
2664{
2665 u32 offset;
2666 u32 reg;
2667
2668 offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
2669 reg = bcmgenet_hfb_reg_readl(priv, offset);
2670 reg |= (1 << (f_index % 32));
2671 bcmgenet_hfb_reg_writel(priv, reg, offset);
2672}
2673
2674static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv,
2675 u32 f_index, u32 rx_queue)
2676{
2677 u32 offset;
2678 u32 reg;
2679
2680 offset = f_index / 8;
2681 reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset);
2682 reg &= ~(0xF << (4 * (f_index % 8)));
2683 reg |= ((rx_queue & 0xF) << (4 * (f_index % 8)));
2684 bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset);
2685}
2686
2687static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
2688 u32 f_index, u32 f_length)
2689{
2690 u32 offset;
2691 u32 reg;
2692
2693 offset = HFB_FLT_LEN_V3PLUS +
2694 ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) *
2695 sizeof(u32);
2696 reg = bcmgenet_hfb_reg_readl(priv, offset);
2697 reg &= ~(0xFF << (8 * (f_index % 4)));
2698 reg |= ((f_length & 0xFF) << (8 * (f_index % 4)));
2699 bcmgenet_hfb_reg_writel(priv, reg, offset);
2700}
2701
2702static int bcmgenet_hfb_find_unused_filter(struct bcmgenet_priv *priv)
2703{
2704 u32 f_index;
2705
2706 for (f_index = 0; f_index < priv->hw_params->hfb_filter_cnt; f_index++)
2707 if (!bcmgenet_hfb_is_filter_enabled(priv, f_index))
2708 return f_index;
2709
2710 return -ENOMEM;
2711}
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748int bcmgenet_hfb_add_filter(struct bcmgenet_priv *priv, u32 *f_data,
2749 u32 f_length, u32 rx_queue)
2750{
2751 int f_index;
2752 u32 i;
2753
2754 f_index = bcmgenet_hfb_find_unused_filter(priv);
2755 if (f_index < 0)
2756 return -ENOMEM;
2757
2758 if (f_length > priv->hw_params->hfb_filter_size)
2759 return -EINVAL;
2760
2761 for (i = 0; i < f_length; i++)
2762 bcmgenet_hfb_writel(priv, f_data[i],
2763 (f_index * priv->hw_params->hfb_filter_size + i) *
2764 sizeof(u32));
2765
2766 bcmgenet_hfb_set_filter_length(priv, f_index, 2 * f_length);
2767 bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f_index, rx_queue);
2768 bcmgenet_hfb_enable_filter(priv, f_index);
2769 bcmgenet_hfb_reg_writel(priv, 0x1, HFB_CTRL);
2770
2771 return 0;
2772}
2773
2774
2775
2776
2777
2778static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
2779{
2780 u32 i;
2781
2782 bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
2783 bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
2784 bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
2785
2786 for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
2787 bcmgenet_rdma_writel(priv, 0x0, i);
2788
2789 for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
2790 bcmgenet_hfb_reg_writel(priv, 0x0,
2791 HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
2792
2793 for (i = 0; i < priv->hw_params->hfb_filter_cnt *
2794 priv->hw_params->hfb_filter_size; i++)
2795 bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32));
2796}
2797
2798static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
2799{
2800 if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
2801 return;
2802
2803 bcmgenet_hfb_clear(priv);
2804}
2805
2806static void bcmgenet_netif_start(struct net_device *dev)
2807{
2808 struct bcmgenet_priv *priv = netdev_priv(dev);
2809
2810
2811 bcmgenet_enable_rx_napi(priv);
2812 bcmgenet_enable_tx_napi(priv);
2813
2814 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
2815
2816 netif_tx_start_all_queues(dev);
2817
2818
2819 bcmgenet_link_intr_enable(priv);
2820
2821 phy_start(priv->phydev);
2822}
2823
2824static int bcmgenet_open(struct net_device *dev)
2825{
2826 struct bcmgenet_priv *priv = netdev_priv(dev);
2827 unsigned long dma_ctrl;
2828 u32 reg;
2829 int ret;
2830
2831 netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
2832
2833
2834 clk_prepare_enable(priv->clk);
2835
2836
2837
2838
2839 if (priv->internal_phy)
2840 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
2841
2842
2843 bcmgenet_umac_reset(priv);
2844
2845 ret = init_umac(priv);
2846 if (ret)
2847 goto err_clk_disable;
2848
2849
2850 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
2851
2852
2853 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2854 priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
2855
2856 bcmgenet_set_hw_addr(priv, dev->dev_addr);
2857
2858 if (priv->internal_phy) {
2859 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
2860 reg |= EXT_ENERGY_DET_MASK;
2861 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
2862 }
2863
2864
2865 dma_ctrl = bcmgenet_dma_disable(priv);
2866
2867
2868 ret = bcmgenet_init_dma(priv);
2869 if (ret) {
2870 netdev_err(dev, "failed to initialize DMA\n");
2871 goto err_clk_disable;
2872 }
2873
2874
2875 bcmgenet_enable_dma(priv, dma_ctrl);
2876
2877
2878 bcmgenet_hfb_init(priv);
2879
2880 ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
2881 dev->name, priv);
2882 if (ret < 0) {
2883 netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
2884 goto err_fini_dma;
2885 }
2886
2887 ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
2888 dev->name, priv);
2889 if (ret < 0) {
2890 netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
2891 goto err_irq0;
2892 }
2893
2894 ret = bcmgenet_mii_probe(dev);
2895 if (ret) {
2896 netdev_err(dev, "failed to connect to PHY\n");
2897 goto err_irq1;
2898 }
2899
2900 bcmgenet_netif_start(dev);
2901
2902 return 0;
2903
2904err_irq1:
2905 free_irq(priv->irq1, priv);
2906err_irq0:
2907 free_irq(priv->irq0, priv);
2908err_fini_dma:
2909 bcmgenet_fini_dma(priv);
2910err_clk_disable:
2911 clk_disable_unprepare(priv->clk);
2912 return ret;
2913}
2914
2915static void bcmgenet_netif_stop(struct net_device *dev)
2916{
2917 struct bcmgenet_priv *priv = netdev_priv(dev);
2918
2919 netif_tx_stop_all_queues(dev);
2920 phy_stop(priv->phydev);
2921 bcmgenet_intr_disable(priv);
2922 bcmgenet_disable_rx_napi(priv);
2923 bcmgenet_disable_tx_napi(priv);
2924
2925
2926
2927
2928 cancel_work_sync(&priv->bcmgenet_irq_work);
2929
2930 priv->old_link = -1;
2931 priv->old_speed = -1;
2932 priv->old_duplex = -1;
2933 priv->old_pause = -1;
2934}
2935
2936static int bcmgenet_close(struct net_device *dev)
2937{
2938 struct bcmgenet_priv *priv = netdev_priv(dev);
2939 int ret;
2940
2941 netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
2942
2943 bcmgenet_netif_stop(dev);
2944
2945
2946 phy_disconnect(priv->phydev);
2947
2948
2949 umac_enable_set(priv, CMD_RX_EN, false);
2950
2951 ret = bcmgenet_dma_teardown(priv);
2952 if (ret)
2953 return ret;
2954
2955
2956 umac_enable_set(priv, CMD_TX_EN, false);
2957
2958
2959 bcmgenet_tx_reclaim_all(dev);
2960 bcmgenet_fini_dma(priv);
2961
2962 free_irq(priv->irq0, priv);
2963 free_irq(priv->irq1, priv);
2964
2965 if (priv->internal_phy)
2966 ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
2967
2968 clk_disable_unprepare(priv->clk);
2969
2970 return ret;
2971}
2972
2973static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
2974{
2975 struct bcmgenet_priv *priv = ring->priv;
2976 u32 p_index, c_index, intsts, intmsk;
2977 struct netdev_queue *txq;
2978 unsigned int free_bds;
2979 unsigned long flags;
2980 bool txq_stopped;
2981
2982 if (!netif_msg_tx_err(priv))
2983 return;
2984
2985 txq = netdev_get_tx_queue(priv->dev, ring->queue);
2986
2987 spin_lock_irqsave(&ring->lock, flags);
2988 if (ring->index == DESC_INDEX) {
2989 intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
2990 intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE;
2991 } else {
2992 intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
2993 intmsk = 1 << ring->index;
2994 }
2995 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
2996 p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
2997 txq_stopped = netif_tx_queue_stopped(txq);
2998 free_bds = ring->free_bds;
2999 spin_unlock_irqrestore(&ring->lock, flags);
3000
3001 netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n"
3002 "TX queue status: %s, interrupts: %s\n"
3003 "(sw)free_bds: %d (sw)size: %d\n"
3004 "(sw)p_index: %d (hw)p_index: %d\n"
3005 "(sw)c_index: %d (hw)c_index: %d\n"
3006 "(sw)clean_p: %d (sw)write_p: %d\n"
3007 "(sw)cb_ptr: %d (sw)end_ptr: %d\n",
3008 ring->index, ring->queue,
3009 txq_stopped ? "stopped" : "active",
3010 intsts & intmsk ? "enabled" : "disabled",
3011 free_bds, ring->size,
3012 ring->prod_index, p_index & DMA_P_INDEX_MASK,
3013 ring->c_index, c_index & DMA_C_INDEX_MASK,
3014 ring->clean_ptr, ring->write_ptr,
3015 ring->cb_ptr, ring->end_ptr);
3016}
3017
3018static void bcmgenet_timeout(struct net_device *dev)
3019{
3020 struct bcmgenet_priv *priv = netdev_priv(dev);
3021 u32 int0_enable = 0;
3022 u32 int1_enable = 0;
3023 unsigned int q;
3024
3025 netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
3026
3027 for (q = 0; q < priv->hw_params->tx_queues; q++)
3028 bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
3029 bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
3030
3031 bcmgenet_tx_reclaim_all(dev);
3032
3033 for (q = 0; q < priv->hw_params->tx_queues; q++)
3034 int1_enable |= (1 << q);
3035
3036 int0_enable = UMAC_IRQ_TXDMA_DONE;
3037
3038
3039 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
3040 bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
3041
3042 dev->trans_start = jiffies;
3043
3044 dev->stats.tx_errors++;
3045
3046 netif_tx_wake_all_queues(dev);
3047}
3048
3049#define MAX_MC_COUNT 16
3050
3051static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
3052 unsigned char *addr,
3053 int *i,
3054 int *mc)
3055{
3056 u32 reg;
3057
3058 bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
3059 UMAC_MDF_ADDR + (*i * 4));
3060 bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
3061 addr[4] << 8 | addr[5],
3062 UMAC_MDF_ADDR + ((*i + 1) * 4));
3063 reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL);
3064 reg |= (1 << (MAX_MC_COUNT - *mc));
3065 bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
3066 *i += 2;
3067 (*mc)++;
3068}
3069
3070static void bcmgenet_set_rx_mode(struct net_device *dev)
3071{
3072 struct bcmgenet_priv *priv = netdev_priv(dev);
3073 struct netdev_hw_addr *ha;
3074 int i, mc;
3075 u32 reg;
3076
3077 netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
3078
3079
3080 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
3081 if (dev->flags & IFF_PROMISC) {
3082 reg |= CMD_PROMISC;
3083 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
3084 bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
3085 return;
3086 } else {
3087 reg &= ~CMD_PROMISC;
3088 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
3089 }
3090
3091
3092 if (dev->flags & IFF_ALLMULTI) {
3093 netdev_warn(dev, "ALLMULTI is not supported\n");
3094 return;
3095 }
3096
3097
3098 i = 0;
3099 mc = 0;
3100
3101 bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc);
3102
3103 bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc);
3104
3105 if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc))
3106 return;
3107
3108 if (!netdev_uc_empty(dev))
3109 netdev_for_each_uc_addr(ha, dev)
3110 bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
3111
3112 if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc))
3113 return;
3114
3115 netdev_for_each_mc_addr(ha, dev)
3116 bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
3117}
3118
3119
3120static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
3121{
3122 struct sockaddr *addr = p;
3123
3124
3125
3126
3127 if (netif_running(dev))
3128 return -EBUSY;
3129
3130 ether_addr_copy(dev->dev_addr, addr->sa_data);
3131
3132 return 0;
3133}
3134
3135static const struct net_device_ops bcmgenet_netdev_ops = {
3136 .ndo_open = bcmgenet_open,
3137 .ndo_stop = bcmgenet_close,
3138 .ndo_start_xmit = bcmgenet_xmit,
3139 .ndo_tx_timeout = bcmgenet_timeout,
3140 .ndo_set_rx_mode = bcmgenet_set_rx_mode,
3141 .ndo_set_mac_address = bcmgenet_set_mac_addr,
3142 .ndo_do_ioctl = bcmgenet_ioctl,
3143 .ndo_set_features = bcmgenet_set_features,
3144#ifdef CONFIG_NET_POLL_CONTROLLER
3145 .ndo_poll_controller = bcmgenet_poll_controller,
3146#endif
3147};
3148
3149
3150static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
3151 [GENET_V1] = {
3152 .tx_queues = 0,
3153 .tx_bds_per_q = 0,
3154 .rx_queues = 0,
3155 .rx_bds_per_q = 0,
3156 .bp_in_en_shift = 16,
3157 .bp_in_mask = 0xffff,
3158 .hfb_filter_cnt = 16,
3159 .qtag_mask = 0x1F,
3160 .hfb_offset = 0x1000,
3161 .rdma_offset = 0x2000,
3162 .tdma_offset = 0x3000,
3163 .words_per_bd = 2,
3164 },
3165 [GENET_V2] = {
3166 .tx_queues = 4,
3167 .tx_bds_per_q = 32,
3168 .rx_queues = 0,
3169 .rx_bds_per_q = 0,
3170 .bp_in_en_shift = 16,
3171 .bp_in_mask = 0xffff,
3172 .hfb_filter_cnt = 16,
3173 .qtag_mask = 0x1F,
3174 .tbuf_offset = 0x0600,
3175 .hfb_offset = 0x1000,
3176 .hfb_reg_offset = 0x2000,
3177 .rdma_offset = 0x3000,
3178 .tdma_offset = 0x4000,
3179 .words_per_bd = 2,
3180 .flags = GENET_HAS_EXT,
3181 },
3182 [GENET_V3] = {
3183 .tx_queues = 4,
3184 .tx_bds_per_q = 32,
3185 .rx_queues = 0,
3186 .rx_bds_per_q = 0,
3187 .bp_in_en_shift = 17,
3188 .bp_in_mask = 0x1ffff,
3189 .hfb_filter_cnt = 48,
3190 .hfb_filter_size = 128,
3191 .qtag_mask = 0x3F,
3192 .tbuf_offset = 0x0600,
3193 .hfb_offset = 0x8000,
3194 .hfb_reg_offset = 0xfc00,
3195 .rdma_offset = 0x10000,
3196 .tdma_offset = 0x11000,
3197 .words_per_bd = 2,
3198 .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
3199 GENET_HAS_MOCA_LINK_DET,
3200 },
3201 [GENET_V4] = {
3202 .tx_queues = 4,
3203 .tx_bds_per_q = 32,
3204 .rx_queues = 0,
3205 .rx_bds_per_q = 0,
3206 .bp_in_en_shift = 17,
3207 .bp_in_mask = 0x1ffff,
3208 .hfb_filter_cnt = 48,
3209 .hfb_filter_size = 128,
3210 .qtag_mask = 0x3F,
3211 .tbuf_offset = 0x0600,
3212 .hfb_offset = 0x8000,
3213 .hfb_reg_offset = 0xfc00,
3214 .rdma_offset = 0x2000,
3215 .tdma_offset = 0x4000,
3216 .words_per_bd = 3,
3217 .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
3218 GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
3219 },
3220};
3221
3222
3223static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
3224{
3225 struct bcmgenet_hw_params *params;
3226 u32 reg;
3227 u8 major;
3228 u16 gphy_rev;
3229
3230 if (GENET_IS_V4(priv)) {
3231 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
3232 genet_dma_ring_regs = genet_dma_ring_regs_v4;
3233 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
3234 priv->version = GENET_V4;
3235 } else if (GENET_IS_V3(priv)) {
3236 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
3237 genet_dma_ring_regs = genet_dma_ring_regs_v123;
3238 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
3239 priv->version = GENET_V3;
3240 } else if (GENET_IS_V2(priv)) {
3241 bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
3242 genet_dma_ring_regs = genet_dma_ring_regs_v123;
3243 priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
3244 priv->version = GENET_V2;
3245 } else if (GENET_IS_V1(priv)) {
3246 bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
3247 genet_dma_ring_regs = genet_dma_ring_regs_v123;
3248 priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
3249 priv->version = GENET_V1;
3250 }
3251
3252
3253 priv->hw_params = &bcmgenet_hw_params[priv->version];
3254 params = priv->hw_params;
3255
3256
3257 reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
3258 major = (reg >> 24 & 0x0f);
3259 if (major == 5)
3260 major = 4;
3261 else if (major == 0)
3262 major = 1;
3263 if (major != priv->version) {
3264 dev_err(&priv->pdev->dev,
3265 "GENET version mismatch, got: %d, configured for: %d\n",
3266 major, priv->version);
3267 }
3268
3269
3270 dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
3271 major, (reg >> 16) & 0x0f, reg & 0xffff);
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285 gphy_rev = reg & 0xffff;
3286
3287
3288 if ((gphy_rev & 0xf0) != 0)
3289 priv->gphy_rev = gphy_rev << 8;
3290
3291
3292 else if ((gphy_rev & 0xff00) != 0)
3293 priv->gphy_rev = gphy_rev;
3294
3295
3296 else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
3297 pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
3298 return;
3299 }
3300
3301#ifdef CONFIG_PHYS_ADDR_T_64BIT
3302 if (!(params->flags & GENET_HAS_40BITS))
3303 pr_warn("GENET does not support 40-bits PA\n");
3304#endif
3305
3306 pr_debug("Configuration for version: %d\n"
3307 "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n"
3308 "BP << en: %2d, BP msk: 0x%05x\n"
3309 "HFB count: %2d, QTAQ msk: 0x%05x\n"
3310 "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
3311 "RDMA: 0x%05x, TDMA: 0x%05x\n"
3312 "Words/BD: %d\n",
3313 priv->version,
3314 params->tx_queues, params->tx_bds_per_q,
3315 params->rx_queues, params->rx_bds_per_q,
3316 params->bp_in_en_shift, params->bp_in_mask,
3317 params->hfb_filter_cnt, params->qtag_mask,
3318 params->tbuf_offset, params->hfb_offset,
3319 params->hfb_reg_offset,
3320 params->rdma_offset, params->tdma_offset,
3321 params->words_per_bd);
3322}
3323
3324static const struct of_device_id bcmgenet_match[] = {
3325 { .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 },
3326 { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 },
3327 { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 },
3328 { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
3329 { },
3330};
3331MODULE_DEVICE_TABLE(of, bcmgenet_match);
3332
3333static int bcmgenet_probe(struct platform_device *pdev)
3334{
3335 struct bcmgenet_platform_data *pd = pdev->dev.platform_data;
3336 struct device_node *dn = pdev->dev.of_node;
3337 const struct of_device_id *of_id = NULL;
3338 struct bcmgenet_priv *priv;
3339 struct net_device *dev;
3340 const void *macaddr;
3341 struct resource *r;
3342 int err = -EIO;
3343
3344
3345 dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
3346 GENET_MAX_MQ_CNT + 1);
3347 if (!dev) {
3348 dev_err(&pdev->dev, "can't allocate net device\n");
3349 return -ENOMEM;
3350 }
3351
3352 if (dn) {
3353 of_id = of_match_node(bcmgenet_match, dn);
3354 if (!of_id)
3355 return -EINVAL;
3356 }
3357
3358 priv = netdev_priv(dev);
3359 priv->irq0 = platform_get_irq(pdev, 0);
3360 priv->irq1 = platform_get_irq(pdev, 1);
3361 priv->wol_irq = platform_get_irq(pdev, 2);
3362 if (!priv->irq0 || !priv->irq1) {
3363 dev_err(&pdev->dev, "can't find IRQs\n");
3364 err = -EINVAL;
3365 goto err;
3366 }
3367
3368 if (dn) {
3369 macaddr = of_get_mac_address(dn);
3370 if (!macaddr) {
3371 dev_err(&pdev->dev, "can't find MAC address\n");
3372 err = -EINVAL;
3373 goto err;
3374 }
3375 } else {
3376 macaddr = pd->mac_address;
3377 }
3378
3379 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3380 priv->base = devm_ioremap_resource(&pdev->dev, r);
3381 if (IS_ERR(priv->base)) {
3382 err = PTR_ERR(priv->base);
3383 goto err;
3384 }
3385
3386 SET_NETDEV_DEV(dev, &pdev->dev);
3387 dev_set_drvdata(&pdev->dev, dev);
3388 ether_addr_copy(dev->dev_addr, macaddr);
3389 dev->watchdog_timeo = 2 * HZ;
3390 dev->ethtool_ops = &bcmgenet_ethtool_ops;
3391 dev->netdev_ops = &bcmgenet_netdev_ops;
3392
3393 priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
3394
3395
3396 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
3397 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
3398
3399
3400 priv->wol_irq_disabled = true;
3401 err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0,
3402 dev->name, priv);
3403 if (!err)
3404 device_set_wakeup_capable(&pdev->dev, 1);
3405
3406
3407
3408
3409 dev->needed_headroom += 64;
3410
3411 netdev_boot_setup_check(dev);
3412
3413 priv->dev = dev;
3414 priv->pdev = pdev;
3415 if (of_id)
3416 priv->version = (enum bcmgenet_version)of_id->data;
3417 else
3418 priv->version = pd->genet_version;
3419
3420 priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
3421 if (IS_ERR(priv->clk)) {
3422 dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
3423 priv->clk = NULL;
3424 }
3425
3426 clk_prepare_enable(priv->clk);
3427
3428 bcmgenet_set_hw_params(priv);
3429
3430
3431 init_waitqueue_head(&priv->wq);
3432
3433 priv->rx_buf_len = RX_BUF_LENGTH;
3434 INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
3435
3436 priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
3437 if (IS_ERR(priv->clk_wol)) {
3438 dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
3439 priv->clk_wol = NULL;
3440 }
3441
3442 priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee");
3443 if (IS_ERR(priv->clk_eee)) {
3444 dev_warn(&priv->pdev->dev, "failed to get enet-eee clock\n");
3445 priv->clk_eee = NULL;
3446 }
3447
3448 err = reset_umac(priv);
3449 if (err)
3450 goto err_clk_disable;
3451
3452 err = bcmgenet_mii_init(dev);
3453 if (err)
3454 goto err_clk_disable;
3455
3456
3457
3458
3459 netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
3460 netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
3461
3462
3463 netif_carrier_off(dev);
3464
3465
3466 clk_disable_unprepare(priv->clk);
3467
3468 err = register_netdev(dev);
3469 if (err)
3470 goto err;
3471
3472 return err;
3473
3474err_clk_disable:
3475 clk_disable_unprepare(priv->clk);
3476err:
3477 free_netdev(dev);
3478 return err;
3479}
3480
3481static int bcmgenet_remove(struct platform_device *pdev)
3482{
3483 struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev);
3484
3485 dev_set_drvdata(&pdev->dev, NULL);
3486 unregister_netdev(priv->dev);
3487 bcmgenet_mii_exit(priv->dev);
3488 free_netdev(priv->dev);
3489
3490 return 0;
3491}
3492
3493#ifdef CONFIG_PM_SLEEP
3494static int bcmgenet_suspend(struct device *d)
3495{
3496 struct net_device *dev = dev_get_drvdata(d);
3497 struct bcmgenet_priv *priv = netdev_priv(dev);
3498 int ret;
3499
3500 if (!netif_running(dev))
3501 return 0;
3502
3503 bcmgenet_netif_stop(dev);
3504
3505 phy_suspend(priv->phydev);
3506
3507 netif_device_detach(dev);
3508
3509
3510 umac_enable_set(priv, CMD_RX_EN, false);
3511
3512 ret = bcmgenet_dma_teardown(priv);
3513 if (ret)
3514 return ret;
3515
3516
3517 umac_enable_set(priv, CMD_TX_EN, false);
3518
3519
3520 bcmgenet_tx_reclaim_all(dev);
3521 bcmgenet_fini_dma(priv);
3522
3523
3524 if (device_may_wakeup(d) && priv->wolopts) {
3525 ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
3526 clk_prepare_enable(priv->clk_wol);
3527 } else if (priv->internal_phy) {
3528 ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
3529 }
3530
3531
3532 clk_disable_unprepare(priv->clk);
3533
3534 return ret;
3535}
3536
3537static int bcmgenet_resume(struct device *d)
3538{
3539 struct net_device *dev = dev_get_drvdata(d);
3540 struct bcmgenet_priv *priv = netdev_priv(dev);
3541 unsigned long dma_ctrl;
3542 int ret;
3543 u32 reg;
3544
3545 if (!netif_running(dev))
3546 return 0;
3547
3548
3549 ret = clk_prepare_enable(priv->clk);
3550 if (ret)
3551 return ret;
3552
3553
3554
3555
3556 if (priv->internal_phy)
3557 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
3558
3559 bcmgenet_umac_reset(priv);
3560
3561 ret = init_umac(priv);
3562 if (ret)
3563 goto out_clk_disable;
3564
3565
3566 if (priv->wolopts)
3567 clk_disable_unprepare(priv->clk_wol);
3568
3569 phy_init_hw(priv->phydev);
3570
3571 bcmgenet_mii_config(priv->dev);
3572
3573
3574 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
3575
3576 bcmgenet_set_hw_addr(priv, dev->dev_addr);
3577
3578 if (priv->internal_phy) {
3579 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
3580 reg |= EXT_ENERGY_DET_MASK;
3581 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
3582 }
3583
3584 if (priv->wolopts)
3585 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
3586
3587
3588 dma_ctrl = bcmgenet_dma_disable(priv);
3589
3590
3591 ret = bcmgenet_init_dma(priv);
3592 if (ret) {
3593 netdev_err(dev, "failed to initialize DMA\n");
3594 goto out_clk_disable;
3595 }
3596
3597
3598 bcmgenet_enable_dma(priv, dma_ctrl);
3599
3600 netif_device_attach(dev);
3601
3602 phy_resume(priv->phydev);
3603
3604 if (priv->eee.eee_enabled)
3605 bcmgenet_eee_enable_set(dev, true);
3606
3607 bcmgenet_netif_start(dev);
3608
3609 return 0;
3610
3611out_clk_disable:
3612 clk_disable_unprepare(priv->clk);
3613 return ret;
3614}
3615#endif
3616
3617static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume);
3618
3619static struct platform_driver bcmgenet_driver = {
3620 .probe = bcmgenet_probe,
3621 .remove = bcmgenet_remove,
3622 .driver = {
3623 .name = "bcmgenet",
3624 .of_match_table = bcmgenet_match,
3625 .pm = &bcmgenet_pm_ops,
3626 },
3627};
3628module_platform_driver(bcmgenet_driver);
3629
3630MODULE_AUTHOR("Broadcom Corporation");
3631MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
3632MODULE_ALIAS("platform:bcmgenet");
3633MODULE_LICENSE("GPL");
3634