1
2
3
4
5
6
7
8
9
10
11#define pr_fmt(fmt) "bcmgenet: " fmt
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/types.h>
17#include <linux/fcntl.h>
18#include <linux/interrupt.h>
19#include <linux/string.h>
20#include <linux/if_ether.h>
21#include <linux/init.h>
22#include <linux/errno.h>
23#include <linux/delay.h>
24#include <linux/platform_device.h>
25#include <linux/dma-mapping.h>
26#include <linux/pm.h>
27#include <linux/clk.h>
28#include <linux/of.h>
29#include <linux/of_address.h>
30#include <linux/of_irq.h>
31#include <linux/of_net.h>
32#include <linux/of_platform.h>
33#include <net/arp.h>
34
35#include <linux/mii.h>
36#include <linux/ethtool.h>
37#include <linux/netdevice.h>
38#include <linux/inetdevice.h>
39#include <linux/etherdevice.h>
40#include <linux/skbuff.h>
41#include <linux/in.h>
42#include <linux/ip.h>
43#include <linux/ipv6.h>
44#include <linux/phy.h>
45#include <linux/platform_data/bcmgenet.h>
46
47#include <asm/unaligned.h>
48
49#include "bcmgenet.h"
50
51
52#define GENET_MAX_MQ_CNT 4
53
54
55#define GENET_Q0_PRIORITY 0
56
57#define GENET_Q16_RX_BD_CNT \
58 (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
59#define GENET_Q16_TX_BD_CNT \
60 (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
61
62#define RX_BUF_LENGTH 2048
63#define SKB_ALIGNMENT 32
64
65
66#define WORDS_PER_BD(p) (p->hw_params->words_per_bd)
67#define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32))
68
69#define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \
70 TOTAL_DESC * DMA_DESC_SIZE)
71
72#define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
73 TOTAL_DESC * DMA_DESC_SIZE)
74
75static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
76 void __iomem *d, u32 value)
77{
78 __raw_writel(value, d + DMA_DESC_LENGTH_STATUS);
79}
80
81static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv,
82 void __iomem *d)
83{
84 return __raw_readl(d + DMA_DESC_LENGTH_STATUS);
85}
86
87static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
88 void __iomem *d,
89 dma_addr_t addr)
90{
91 __raw_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO);
92
93
94
95
96
97#ifdef CONFIG_PHYS_ADDR_T_64BIT
98 if (priv->hw_params->flags & GENET_HAS_40BITS)
99 __raw_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
100#endif
101}
102
103
104static inline void dmadesc_set(struct bcmgenet_priv *priv,
105 void __iomem *d, dma_addr_t addr, u32 val)
106{
107 dmadesc_set_length_status(priv, d, val);
108 dmadesc_set_addr(priv, d, addr);
109}
110
111static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
112 void __iomem *d)
113{
114 dma_addr_t addr;
115
116 addr = __raw_readl(d + DMA_DESC_ADDRESS_LO);
117
118
119
120
121
122#ifdef CONFIG_PHYS_ADDR_T_64BIT
123 if (priv->hw_params->flags & GENET_HAS_40BITS)
124 addr |= (u64)__raw_readl(d + DMA_DESC_ADDRESS_HI) << 32;
125#endif
126 return addr;
127}
128
129#define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x"
130
131#define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
132 NETIF_MSG_LINK)
133
134static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv)
135{
136 if (GENET_IS_V1(priv))
137 return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1);
138 else
139 return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL);
140}
141
142static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
143{
144 if (GENET_IS_V1(priv))
145 bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1);
146 else
147 bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL);
148}
149
150
151
152
153
154static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv)
155{
156 if (GENET_IS_V1(priv))
157 return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1);
158 else
159 return __raw_readl(priv->base +
160 priv->hw_params->tbuf_offset + TBUF_CTRL);
161}
162
163static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
164{
165 if (GENET_IS_V1(priv))
166 bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1);
167 else
168 __raw_writel(val, priv->base +
169 priv->hw_params->tbuf_offset + TBUF_CTRL);
170}
171
172static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv)
173{
174 if (GENET_IS_V1(priv))
175 return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1);
176 else
177 return __raw_readl(priv->base +
178 priv->hw_params->tbuf_offset + TBUF_BP_MC);
179}
180
181static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val)
182{
183 if (GENET_IS_V1(priv))
184 bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1);
185 else
186 __raw_writel(val, priv->base +
187 priv->hw_params->tbuf_offset + TBUF_BP_MC);
188}
189
190
191enum dma_reg {
192 DMA_RING_CFG = 0,
193 DMA_CTRL,
194 DMA_STATUS,
195 DMA_SCB_BURST_SIZE,
196 DMA_ARB_CTRL,
197 DMA_PRIORITY_0,
198 DMA_PRIORITY_1,
199 DMA_PRIORITY_2,
200 DMA_INDEX2RING_0,
201 DMA_INDEX2RING_1,
202 DMA_INDEX2RING_2,
203 DMA_INDEX2RING_3,
204 DMA_INDEX2RING_4,
205 DMA_INDEX2RING_5,
206 DMA_INDEX2RING_6,
207 DMA_INDEX2RING_7,
208 DMA_RING0_TIMEOUT,
209 DMA_RING1_TIMEOUT,
210 DMA_RING2_TIMEOUT,
211 DMA_RING3_TIMEOUT,
212 DMA_RING4_TIMEOUT,
213 DMA_RING5_TIMEOUT,
214 DMA_RING6_TIMEOUT,
215 DMA_RING7_TIMEOUT,
216 DMA_RING8_TIMEOUT,
217 DMA_RING9_TIMEOUT,
218 DMA_RING10_TIMEOUT,
219 DMA_RING11_TIMEOUT,
220 DMA_RING12_TIMEOUT,
221 DMA_RING13_TIMEOUT,
222 DMA_RING14_TIMEOUT,
223 DMA_RING15_TIMEOUT,
224 DMA_RING16_TIMEOUT,
225};
226
227static const u8 bcmgenet_dma_regs_v3plus[] = {
228 [DMA_RING_CFG] = 0x00,
229 [DMA_CTRL] = 0x04,
230 [DMA_STATUS] = 0x08,
231 [DMA_SCB_BURST_SIZE] = 0x0C,
232 [DMA_ARB_CTRL] = 0x2C,
233 [DMA_PRIORITY_0] = 0x30,
234 [DMA_PRIORITY_1] = 0x34,
235 [DMA_PRIORITY_2] = 0x38,
236 [DMA_RING0_TIMEOUT] = 0x2C,
237 [DMA_RING1_TIMEOUT] = 0x30,
238 [DMA_RING2_TIMEOUT] = 0x34,
239 [DMA_RING3_TIMEOUT] = 0x38,
240 [DMA_RING4_TIMEOUT] = 0x3c,
241 [DMA_RING5_TIMEOUT] = 0x40,
242 [DMA_RING6_TIMEOUT] = 0x44,
243 [DMA_RING7_TIMEOUT] = 0x48,
244 [DMA_RING8_TIMEOUT] = 0x4c,
245 [DMA_RING9_TIMEOUT] = 0x50,
246 [DMA_RING10_TIMEOUT] = 0x54,
247 [DMA_RING11_TIMEOUT] = 0x58,
248 [DMA_RING12_TIMEOUT] = 0x5c,
249 [DMA_RING13_TIMEOUT] = 0x60,
250 [DMA_RING14_TIMEOUT] = 0x64,
251 [DMA_RING15_TIMEOUT] = 0x68,
252 [DMA_RING16_TIMEOUT] = 0x6C,
253 [DMA_INDEX2RING_0] = 0x70,
254 [DMA_INDEX2RING_1] = 0x74,
255 [DMA_INDEX2RING_2] = 0x78,
256 [DMA_INDEX2RING_3] = 0x7C,
257 [DMA_INDEX2RING_4] = 0x80,
258 [DMA_INDEX2RING_5] = 0x84,
259 [DMA_INDEX2RING_6] = 0x88,
260 [DMA_INDEX2RING_7] = 0x8C,
261};
262
263static const u8 bcmgenet_dma_regs_v2[] = {
264 [DMA_RING_CFG] = 0x00,
265 [DMA_CTRL] = 0x04,
266 [DMA_STATUS] = 0x08,
267 [DMA_SCB_BURST_SIZE] = 0x0C,
268 [DMA_ARB_CTRL] = 0x30,
269 [DMA_PRIORITY_0] = 0x34,
270 [DMA_PRIORITY_1] = 0x38,
271 [DMA_PRIORITY_2] = 0x3C,
272 [DMA_RING0_TIMEOUT] = 0x2C,
273 [DMA_RING1_TIMEOUT] = 0x30,
274 [DMA_RING2_TIMEOUT] = 0x34,
275 [DMA_RING3_TIMEOUT] = 0x38,
276 [DMA_RING4_TIMEOUT] = 0x3c,
277 [DMA_RING5_TIMEOUT] = 0x40,
278 [DMA_RING6_TIMEOUT] = 0x44,
279 [DMA_RING7_TIMEOUT] = 0x48,
280 [DMA_RING8_TIMEOUT] = 0x4c,
281 [DMA_RING9_TIMEOUT] = 0x50,
282 [DMA_RING10_TIMEOUT] = 0x54,
283 [DMA_RING11_TIMEOUT] = 0x58,
284 [DMA_RING12_TIMEOUT] = 0x5c,
285 [DMA_RING13_TIMEOUT] = 0x60,
286 [DMA_RING14_TIMEOUT] = 0x64,
287 [DMA_RING15_TIMEOUT] = 0x68,
288 [DMA_RING16_TIMEOUT] = 0x6C,
289};
290
291static const u8 bcmgenet_dma_regs_v1[] = {
292 [DMA_CTRL] = 0x00,
293 [DMA_STATUS] = 0x04,
294 [DMA_SCB_BURST_SIZE] = 0x0C,
295 [DMA_ARB_CTRL] = 0x30,
296 [DMA_PRIORITY_0] = 0x34,
297 [DMA_PRIORITY_1] = 0x38,
298 [DMA_PRIORITY_2] = 0x3C,
299 [DMA_RING0_TIMEOUT] = 0x2C,
300 [DMA_RING1_TIMEOUT] = 0x30,
301 [DMA_RING2_TIMEOUT] = 0x34,
302 [DMA_RING3_TIMEOUT] = 0x38,
303 [DMA_RING4_TIMEOUT] = 0x3c,
304 [DMA_RING5_TIMEOUT] = 0x40,
305 [DMA_RING6_TIMEOUT] = 0x44,
306 [DMA_RING7_TIMEOUT] = 0x48,
307 [DMA_RING8_TIMEOUT] = 0x4c,
308 [DMA_RING9_TIMEOUT] = 0x50,
309 [DMA_RING10_TIMEOUT] = 0x54,
310 [DMA_RING11_TIMEOUT] = 0x58,
311 [DMA_RING12_TIMEOUT] = 0x5c,
312 [DMA_RING13_TIMEOUT] = 0x60,
313 [DMA_RING14_TIMEOUT] = 0x64,
314 [DMA_RING15_TIMEOUT] = 0x68,
315 [DMA_RING16_TIMEOUT] = 0x6C,
316};
317
318
319static const u8 *bcmgenet_dma_regs;
320
321static inline struct bcmgenet_priv *dev_to_priv(struct device *dev)
322{
323 return netdev_priv(dev_get_drvdata(dev));
324}
325
326static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
327 enum dma_reg r)
328{
329 return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
330 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
331}
332
333static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv,
334 u32 val, enum dma_reg r)
335{
336 __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
337 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
338}
339
340static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
341 enum dma_reg r)
342{
343 return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
344 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
345}
346
347static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv,
348 u32 val, enum dma_reg r)
349{
350 __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
351 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
352}
353
354
355
356
357
358enum dma_ring_reg {
359 TDMA_READ_PTR = 0,
360 RDMA_WRITE_PTR = TDMA_READ_PTR,
361 TDMA_READ_PTR_HI,
362 RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI,
363 TDMA_CONS_INDEX,
364 RDMA_PROD_INDEX = TDMA_CONS_INDEX,
365 TDMA_PROD_INDEX,
366 RDMA_CONS_INDEX = TDMA_PROD_INDEX,
367 DMA_RING_BUF_SIZE,
368 DMA_START_ADDR,
369 DMA_START_ADDR_HI,
370 DMA_END_ADDR,
371 DMA_END_ADDR_HI,
372 DMA_MBUF_DONE_THRESH,
373 TDMA_FLOW_PERIOD,
374 RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD,
375 TDMA_WRITE_PTR,
376 RDMA_READ_PTR = TDMA_WRITE_PTR,
377 TDMA_WRITE_PTR_HI,
378 RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI
379};
380
381
382
383
384
385
386static const u8 genet_dma_ring_regs_v4[] = {
387 [TDMA_READ_PTR] = 0x00,
388 [TDMA_READ_PTR_HI] = 0x04,
389 [TDMA_CONS_INDEX] = 0x08,
390 [TDMA_PROD_INDEX] = 0x0C,
391 [DMA_RING_BUF_SIZE] = 0x10,
392 [DMA_START_ADDR] = 0x14,
393 [DMA_START_ADDR_HI] = 0x18,
394 [DMA_END_ADDR] = 0x1C,
395 [DMA_END_ADDR_HI] = 0x20,
396 [DMA_MBUF_DONE_THRESH] = 0x24,
397 [TDMA_FLOW_PERIOD] = 0x28,
398 [TDMA_WRITE_PTR] = 0x2C,
399 [TDMA_WRITE_PTR_HI] = 0x30,
400};
401
402static const u8 genet_dma_ring_regs_v123[] = {
403 [TDMA_READ_PTR] = 0x00,
404 [TDMA_CONS_INDEX] = 0x04,
405 [TDMA_PROD_INDEX] = 0x08,
406 [DMA_RING_BUF_SIZE] = 0x0C,
407 [DMA_START_ADDR] = 0x10,
408 [DMA_END_ADDR] = 0x14,
409 [DMA_MBUF_DONE_THRESH] = 0x18,
410 [TDMA_FLOW_PERIOD] = 0x1C,
411 [TDMA_WRITE_PTR] = 0x20,
412};
413
414
415static const u8 *genet_dma_ring_regs;
416
417static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
418 unsigned int ring,
419 enum dma_ring_reg r)
420{
421 return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
422 (DMA_RING_SIZE * ring) +
423 genet_dma_ring_regs[r]);
424}
425
426static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
427 unsigned int ring, u32 val,
428 enum dma_ring_reg r)
429{
430 __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
431 (DMA_RING_SIZE * ring) +
432 genet_dma_ring_regs[r]);
433}
434
435static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
436 unsigned int ring,
437 enum dma_ring_reg r)
438{
439 return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
440 (DMA_RING_SIZE * ring) +
441 genet_dma_ring_regs[r]);
442}
443
444static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
445 unsigned int ring, u32 val,
446 enum dma_ring_reg r)
447{
448 __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
449 (DMA_RING_SIZE * ring) +
450 genet_dma_ring_regs[r]);
451}
452
453static int bcmgenet_get_settings(struct net_device *dev,
454 struct ethtool_cmd *cmd)
455{
456 struct bcmgenet_priv *priv = netdev_priv(dev);
457
458 if (!netif_running(dev))
459 return -EINVAL;
460
461 if (!priv->phydev)
462 return -ENODEV;
463
464 return phy_ethtool_gset(priv->phydev, cmd);
465}
466
467static int bcmgenet_set_settings(struct net_device *dev,
468 struct ethtool_cmd *cmd)
469{
470 struct bcmgenet_priv *priv = netdev_priv(dev);
471
472 if (!netif_running(dev))
473 return -EINVAL;
474
475 if (!priv->phydev)
476 return -ENODEV;
477
478 return phy_ethtool_sset(priv->phydev, cmd);
479}
480
481static int bcmgenet_set_rx_csum(struct net_device *dev,
482 netdev_features_t wanted)
483{
484 struct bcmgenet_priv *priv = netdev_priv(dev);
485 u32 rbuf_chk_ctrl;
486 bool rx_csum_en;
487
488 rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
489
490 rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
491
492
493 if (rx_csum_en)
494 rbuf_chk_ctrl |= RBUF_RXCHK_EN;
495 else
496 rbuf_chk_ctrl &= ~RBUF_RXCHK_EN;
497 priv->desc_rxchk_en = rx_csum_en;
498
499
500
501
502 if (rx_csum_en && priv->crc_fwd_en)
503 rbuf_chk_ctrl |= RBUF_SKIP_FCS;
504 else
505 rbuf_chk_ctrl &= ~RBUF_SKIP_FCS;
506
507 bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL);
508
509 return 0;
510}
511
512static int bcmgenet_set_tx_csum(struct net_device *dev,
513 netdev_features_t wanted)
514{
515 struct bcmgenet_priv *priv = netdev_priv(dev);
516 bool desc_64b_en;
517 u32 tbuf_ctrl, rbuf_ctrl;
518
519 tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv);
520 rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
521
522 desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
523
524
525 if (desc_64b_en) {
526 tbuf_ctrl |= RBUF_64B_EN;
527 rbuf_ctrl |= RBUF_64B_EN;
528 } else {
529 tbuf_ctrl &= ~RBUF_64B_EN;
530 rbuf_ctrl &= ~RBUF_64B_EN;
531 }
532 priv->desc_64b_en = desc_64b_en;
533
534 bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl);
535 bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL);
536
537 return 0;
538}
539
540static int bcmgenet_set_features(struct net_device *dev,
541 netdev_features_t features)
542{
543 netdev_features_t changed = features ^ dev->features;
544 netdev_features_t wanted = dev->wanted_features;
545 int ret = 0;
546
547 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
548 ret = bcmgenet_set_tx_csum(dev, wanted);
549 if (changed & (NETIF_F_RXCSUM))
550 ret = bcmgenet_set_rx_csum(dev, wanted);
551
552 return ret;
553}
554
555static u32 bcmgenet_get_msglevel(struct net_device *dev)
556{
557 struct bcmgenet_priv *priv = netdev_priv(dev);
558
559 return priv->msg_enable;
560}
561
562static void bcmgenet_set_msglevel(struct net_device *dev, u32 level)
563{
564 struct bcmgenet_priv *priv = netdev_priv(dev);
565
566 priv->msg_enable = level;
567}
568
569static int bcmgenet_get_coalesce(struct net_device *dev,
570 struct ethtool_coalesce *ec)
571{
572 struct bcmgenet_priv *priv = netdev_priv(dev);
573
574 ec->tx_max_coalesced_frames =
575 bcmgenet_tdma_ring_readl(priv, DESC_INDEX,
576 DMA_MBUF_DONE_THRESH);
577 ec->rx_max_coalesced_frames =
578 bcmgenet_rdma_ring_readl(priv, DESC_INDEX,
579 DMA_MBUF_DONE_THRESH);
580 ec->rx_coalesce_usecs =
581 bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000;
582
583 return 0;
584}
585
586static int bcmgenet_set_coalesce(struct net_device *dev,
587 struct ethtool_coalesce *ec)
588{
589 struct bcmgenet_priv *priv = netdev_priv(dev);
590 unsigned int i;
591 u32 reg;
592
593
594
595
596
597 if (ec->tx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
598 ec->tx_max_coalesced_frames == 0 ||
599 ec->rx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
600 ec->rx_coalesce_usecs > (DMA_TIMEOUT_MASK * 8) + 1)
601 return -EINVAL;
602
603 if (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0)
604 return -EINVAL;
605
606
607
608
609
610 if (ec->tx_coalesce_usecs || ec->tx_coalesce_usecs_high ||
611 ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_low)
612 return -EOPNOTSUPP;
613
614
615
616
617 for (i = 0; i < priv->hw_params->tx_queues; i++)
618 bcmgenet_tdma_ring_writel(priv, i,
619 ec->tx_max_coalesced_frames,
620 DMA_MBUF_DONE_THRESH);
621 bcmgenet_tdma_ring_writel(priv, DESC_INDEX,
622 ec->tx_max_coalesced_frames,
623 DMA_MBUF_DONE_THRESH);
624
625 for (i = 0; i < priv->hw_params->rx_queues; i++) {
626 bcmgenet_rdma_ring_writel(priv, i,
627 ec->rx_max_coalesced_frames,
628 DMA_MBUF_DONE_THRESH);
629
630 reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i);
631 reg &= ~DMA_TIMEOUT_MASK;
632 reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192);
633 bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i);
634 }
635
636 bcmgenet_rdma_ring_writel(priv, DESC_INDEX,
637 ec->rx_max_coalesced_frames,
638 DMA_MBUF_DONE_THRESH);
639
640 reg = bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT);
641 reg &= ~DMA_TIMEOUT_MASK;
642 reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192);
643 bcmgenet_rdma_writel(priv, reg, DMA_RING16_TIMEOUT);
644
645 return 0;
646}
647
648
649enum bcmgenet_stat_type {
650 BCMGENET_STAT_NETDEV = -1,
651 BCMGENET_STAT_MIB_RX,
652 BCMGENET_STAT_MIB_TX,
653 BCMGENET_STAT_RUNT,
654 BCMGENET_STAT_MISC,
655 BCMGENET_STAT_SOFT,
656};
657
658struct bcmgenet_stats {
659 char stat_string[ETH_GSTRING_LEN];
660 int stat_sizeof;
661 int stat_offset;
662 enum bcmgenet_stat_type type;
663
664 u16 reg_offset;
665};
666
667#define STAT_NETDEV(m) { \
668 .stat_string = __stringify(m), \
669 .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
670 .stat_offset = offsetof(struct net_device_stats, m), \
671 .type = BCMGENET_STAT_NETDEV, \
672}
673
674#define STAT_GENET_MIB(str, m, _type) { \
675 .stat_string = str, \
676 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
677 .stat_offset = offsetof(struct bcmgenet_priv, m), \
678 .type = _type, \
679}
680
681#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
682#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
683#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
684#define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
685
686#define STAT_GENET_MISC(str, m, offset) { \
687 .stat_string = str, \
688 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
689 .stat_offset = offsetof(struct bcmgenet_priv, m), \
690 .type = BCMGENET_STAT_MISC, \
691 .reg_offset = offset, \
692}
693
694
695
696
697
698#define BCMGENET_STAT_OFFSET 0xc
699
700
701
702
703static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
704
705 STAT_NETDEV(rx_packets),
706 STAT_NETDEV(tx_packets),
707 STAT_NETDEV(rx_bytes),
708 STAT_NETDEV(tx_bytes),
709 STAT_NETDEV(rx_errors),
710 STAT_NETDEV(tx_errors),
711 STAT_NETDEV(rx_dropped),
712 STAT_NETDEV(tx_dropped),
713 STAT_NETDEV(multicast),
714
715 STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
716 STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
717 STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
718 STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
719 STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
720 STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
721 STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
722 STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
723 STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
724 STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
725 STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt),
726 STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes),
727 STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca),
728 STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca),
729 STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs),
730 STAT_GENET_MIB_RX("rx_control", mib.rx.cf),
731 STAT_GENET_MIB_RX("rx_pause", mib.rx.pf),
732 STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo),
733 STAT_GENET_MIB_RX("rx_align", mib.rx.aln),
734 STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr),
735 STAT_GENET_MIB_RX("rx_code", mib.rx.cde),
736 STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr),
737 STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr),
738 STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr),
739 STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue),
740 STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok),
741 STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc),
742 STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp),
743 STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc),
744
745 STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
746 STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
747 STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
748 STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
749 STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
750 STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
751 STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
752 STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
753 STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
754 STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
755 STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts),
756 STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca),
757 STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca),
758 STAT_GENET_MIB_TX("tx_pause", mib.tx.pf),
759 STAT_GENET_MIB_TX("tx_control", mib.tx.cf),
760 STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs),
761 STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr),
762 STAT_GENET_MIB_TX("tx_defer", mib.tx.drf),
763 STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf),
764 STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl),
765 STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl),
766 STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl),
767 STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl),
768 STAT_GENET_MIB_TX("tx_frags", mib.tx.frg),
769 STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl),
770 STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr),
771 STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes),
772 STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok),
773 STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc),
774
775 STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
776 STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
777 STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
778 STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
779
780 STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
781 UMAC_RBUF_OVFL_CNT),
782 STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
783 STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
784 STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
785 STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
786 STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
787};
788
789#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
790
791static void bcmgenet_get_drvinfo(struct net_device *dev,
792 struct ethtool_drvinfo *info)
793{
794 strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
795 strlcpy(info->version, "v2.0", sizeof(info->version));
796}
797
798static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
799{
800 switch (string_set) {
801 case ETH_SS_STATS:
802 return BCMGENET_STATS_LEN;
803 default:
804 return -EOPNOTSUPP;
805 }
806}
807
808static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
809 u8 *data)
810{
811 int i;
812
813 switch (stringset) {
814 case ETH_SS_STATS:
815 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
816 memcpy(data + i * ETH_GSTRING_LEN,
817 bcmgenet_gstrings_stats[i].stat_string,
818 ETH_GSTRING_LEN);
819 }
820 break;
821 }
822}
823
824static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
825{
826 int i, j = 0;
827
828 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
829 const struct bcmgenet_stats *s;
830 u8 offset = 0;
831 u32 val = 0;
832 char *p;
833
834 s = &bcmgenet_gstrings_stats[i];
835 switch (s->type) {
836 case BCMGENET_STAT_NETDEV:
837 case BCMGENET_STAT_SOFT:
838 continue;
839 case BCMGENET_STAT_MIB_RX:
840 case BCMGENET_STAT_MIB_TX:
841 case BCMGENET_STAT_RUNT:
842 if (s->type != BCMGENET_STAT_MIB_RX)
843 offset = BCMGENET_STAT_OFFSET;
844 val = bcmgenet_umac_readl(priv,
845 UMAC_MIB_START + j + offset);
846 break;
847 case BCMGENET_STAT_MISC:
848 val = bcmgenet_umac_readl(priv, s->reg_offset);
849
850 if (val == ~0)
851 bcmgenet_umac_writel(priv, 0, s->reg_offset);
852 break;
853 }
854
855 j += s->stat_sizeof;
856 p = (char *)priv + s->stat_offset;
857 *(u32 *)p = val;
858 }
859}
860
861static void bcmgenet_get_ethtool_stats(struct net_device *dev,
862 struct ethtool_stats *stats,
863 u64 *data)
864{
865 struct bcmgenet_priv *priv = netdev_priv(dev);
866 int i;
867
868 if (netif_running(dev))
869 bcmgenet_update_mib_counters(priv);
870
871 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
872 const struct bcmgenet_stats *s;
873 char *p;
874
875 s = &bcmgenet_gstrings_stats[i];
876 if (s->type == BCMGENET_STAT_NETDEV)
877 p = (char *)&dev->stats;
878 else
879 p = (char *)priv;
880 p += s->stat_offset;
881 data[i] = *(u32 *)p;
882 }
883}
884
885static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
886{
887 struct bcmgenet_priv *priv = netdev_priv(dev);
888 u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL;
889 u32 reg;
890
891 if (enable && !priv->clk_eee_enabled) {
892 clk_prepare_enable(priv->clk_eee);
893 priv->clk_eee_enabled = true;
894 }
895
896 reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL);
897 if (enable)
898 reg |= EEE_EN;
899 else
900 reg &= ~EEE_EN;
901 bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL);
902
903
904 reg = __raw_readl(priv->base + off);
905 if (enable)
906 reg |= TBUF_EEE_EN | TBUF_PM_EN;
907 else
908 reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
909 __raw_writel(reg, priv->base + off);
910
911
912 reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL);
913 if (enable)
914 reg |= RBUF_EEE_EN | RBUF_PM_EN;
915 else
916 reg &= ~(RBUF_EEE_EN | RBUF_PM_EN);
917 bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL);
918
919 if (!enable && priv->clk_eee_enabled) {
920 clk_disable_unprepare(priv->clk_eee);
921 priv->clk_eee_enabled = false;
922 }
923
924 priv->eee.eee_enabled = enable;
925 priv->eee.eee_active = enable;
926}
927
928static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
929{
930 struct bcmgenet_priv *priv = netdev_priv(dev);
931 struct ethtool_eee *p = &priv->eee;
932
933 if (GENET_IS_V1(priv))
934 return -EOPNOTSUPP;
935
936 e->eee_enabled = p->eee_enabled;
937 e->eee_active = p->eee_active;
938 e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
939
940 return phy_ethtool_get_eee(priv->phydev, e);
941}
942
943static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
944{
945 struct bcmgenet_priv *priv = netdev_priv(dev);
946 struct ethtool_eee *p = &priv->eee;
947 int ret = 0;
948
949 if (GENET_IS_V1(priv))
950 return -EOPNOTSUPP;
951
952 p->eee_enabled = e->eee_enabled;
953
954 if (!p->eee_enabled) {
955 bcmgenet_eee_enable_set(dev, false);
956 } else {
957 ret = phy_init_eee(priv->phydev, 0);
958 if (ret) {
959 netif_err(priv, hw, dev, "EEE initialization failed\n");
960 return ret;
961 }
962
963 bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
964 bcmgenet_eee_enable_set(dev, true);
965 }
966
967 return phy_ethtool_set_eee(priv->phydev, e);
968}
969
970static int bcmgenet_nway_reset(struct net_device *dev)
971{
972 struct bcmgenet_priv *priv = netdev_priv(dev);
973
974 return genphy_restart_aneg(priv->phydev);
975}
976
977
978static struct ethtool_ops bcmgenet_ethtool_ops = {
979 .get_strings = bcmgenet_get_strings,
980 .get_sset_count = bcmgenet_get_sset_count,
981 .get_ethtool_stats = bcmgenet_get_ethtool_stats,
982 .get_settings = bcmgenet_get_settings,
983 .set_settings = bcmgenet_set_settings,
984 .get_drvinfo = bcmgenet_get_drvinfo,
985 .get_link = ethtool_op_get_link,
986 .get_msglevel = bcmgenet_get_msglevel,
987 .set_msglevel = bcmgenet_set_msglevel,
988 .get_wol = bcmgenet_get_wol,
989 .set_wol = bcmgenet_set_wol,
990 .get_eee = bcmgenet_get_eee,
991 .set_eee = bcmgenet_set_eee,
992 .nway_reset = bcmgenet_nway_reset,
993 .get_coalesce = bcmgenet_get_coalesce,
994 .set_coalesce = bcmgenet_set_coalesce,
995};
996
997
998static int bcmgenet_power_down(struct bcmgenet_priv *priv,
999 enum bcmgenet_power_mode mode)
1000{
1001 int ret = 0;
1002 u32 reg;
1003
1004 switch (mode) {
1005 case GENET_POWER_CABLE_SENSE:
1006 phy_detach(priv->phydev);
1007 break;
1008
1009 case GENET_POWER_WOL_MAGIC:
1010 ret = bcmgenet_wol_power_down_cfg(priv, mode);
1011 break;
1012
1013 case GENET_POWER_PASSIVE:
1014
1015 if (priv->hw_params->flags & GENET_HAS_EXT) {
1016 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
1017 reg |= (EXT_PWR_DOWN_PHY |
1018 EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
1019 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1020
1021 bcmgenet_phy_power_set(priv->dev, false);
1022 }
1023 break;
1024 default:
1025 break;
1026 }
1027
1028 return 0;
1029}
1030
1031static void bcmgenet_power_up(struct bcmgenet_priv *priv,
1032 enum bcmgenet_power_mode mode)
1033{
1034 u32 reg;
1035
1036 if (!(priv->hw_params->flags & GENET_HAS_EXT))
1037 return;
1038
1039 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
1040
1041 switch (mode) {
1042 case GENET_POWER_PASSIVE:
1043 reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY |
1044 EXT_PWR_DOWN_BIAS);
1045
1046 case GENET_POWER_CABLE_SENSE:
1047
1048 reg |= EXT_PWR_DN_EN_LD;
1049 break;
1050 case GENET_POWER_WOL_MAGIC:
1051 bcmgenet_wol_power_up_cfg(priv, mode);
1052 return;
1053 default:
1054 break;
1055 }
1056
1057 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1058 if (mode == GENET_POWER_PASSIVE) {
1059 bcmgenet_phy_power_set(priv->dev, true);
1060 bcmgenet_mii_reset(priv->dev);
1061 }
1062}
1063
1064
1065static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1066{
1067 struct bcmgenet_priv *priv = netdev_priv(dev);
1068 int val = 0;
1069
1070 if (!netif_running(dev))
1071 return -EINVAL;
1072
1073 switch (cmd) {
1074 case SIOCGMIIPHY:
1075 case SIOCGMIIREG:
1076 case SIOCSMIIREG:
1077 if (!priv->phydev)
1078 val = -ENODEV;
1079 else
1080 val = phy_mii_ioctl(priv->phydev, rq, cmd);
1081 break;
1082
1083 default:
1084 val = -EINVAL;
1085 break;
1086 }
1087
1088 return val;
1089}
1090
1091static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
1092 struct bcmgenet_tx_ring *ring)
1093{
1094 struct enet_cb *tx_cb_ptr;
1095
1096 tx_cb_ptr = ring->cbs;
1097 tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
1098
1099
1100 if (ring->write_ptr == ring->end_ptr)
1101 ring->write_ptr = ring->cb_ptr;
1102 else
1103 ring->write_ptr++;
1104
1105 return tx_cb_ptr;
1106}
1107
1108
1109static void bcmgenet_free_cb(struct enet_cb *cb)
1110{
1111 dev_kfree_skb_any(cb->skb);
1112 cb->skb = NULL;
1113 dma_unmap_addr_set(cb, dma_addr, 0);
1114}
1115
1116static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
1117{
1118 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
1119 INTRL2_CPU_MASK_SET);
1120}
1121
1122static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring)
1123{
1124 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
1125 INTRL2_CPU_MASK_CLEAR);
1126}
1127
1128static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring)
1129{
1130 bcmgenet_intrl2_1_writel(ring->priv,
1131 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
1132 INTRL2_CPU_MASK_SET);
1133}
1134
1135static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring)
1136{
1137 bcmgenet_intrl2_1_writel(ring->priv,
1138 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
1139 INTRL2_CPU_MASK_CLEAR);
1140}
1141
1142static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring)
1143{
1144 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
1145 INTRL2_CPU_MASK_SET);
1146}
1147
1148static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring)
1149{
1150 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
1151 INTRL2_CPU_MASK_CLEAR);
1152}
1153
1154static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring)
1155{
1156 bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
1157 INTRL2_CPU_MASK_CLEAR);
1158}
1159
1160static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring)
1161{
1162 bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
1163 INTRL2_CPU_MASK_SET);
1164}
1165
1166
1167static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1168 struct bcmgenet_tx_ring *ring)
1169{
1170 struct bcmgenet_priv *priv = netdev_priv(dev);
1171 struct enet_cb *tx_cb_ptr;
1172 struct netdev_queue *txq;
1173 unsigned int pkts_compl = 0;
1174 unsigned int c_index;
1175 unsigned int txbds_ready;
1176 unsigned int txbds_processed = 0;
1177
1178
1179 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
1180 c_index &= DMA_C_INDEX_MASK;
1181
1182 if (likely(c_index >= ring->c_index))
1183 txbds_ready = c_index - ring->c_index;
1184 else
1185 txbds_ready = (DMA_C_INDEX_MASK + 1) - ring->c_index + c_index;
1186
1187 netif_dbg(priv, tx_done, dev,
1188 "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
1189 __func__, ring->index, ring->c_index, c_index, txbds_ready);
1190
1191
1192 while (txbds_processed < txbds_ready) {
1193 tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr];
1194 if (tx_cb_ptr->skb) {
1195 pkts_compl++;
1196 dev->stats.tx_packets++;
1197 dev->stats.tx_bytes += tx_cb_ptr->skb->len;
1198 dma_unmap_single(&dev->dev,
1199 dma_unmap_addr(tx_cb_ptr, dma_addr),
1200 tx_cb_ptr->skb->len,
1201 DMA_TO_DEVICE);
1202 bcmgenet_free_cb(tx_cb_ptr);
1203 } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
1204 dev->stats.tx_bytes +=
1205 dma_unmap_len(tx_cb_ptr, dma_len);
1206 dma_unmap_page(&dev->dev,
1207 dma_unmap_addr(tx_cb_ptr, dma_addr),
1208 dma_unmap_len(tx_cb_ptr, dma_len),
1209 DMA_TO_DEVICE);
1210 dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
1211 }
1212
1213 txbds_processed++;
1214 if (likely(ring->clean_ptr < ring->end_ptr))
1215 ring->clean_ptr++;
1216 else
1217 ring->clean_ptr = ring->cb_ptr;
1218 }
1219
1220 ring->free_bds += txbds_processed;
1221 ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK;
1222
1223 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
1224 txq = netdev_get_tx_queue(dev, ring->queue);
1225 if (netif_tx_queue_stopped(txq))
1226 netif_tx_wake_queue(txq);
1227 }
1228
1229 return pkts_compl;
1230}
1231
1232static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
1233 struct bcmgenet_tx_ring *ring)
1234{
1235 unsigned int released;
1236 unsigned long flags;
1237
1238 spin_lock_irqsave(&ring->lock, flags);
1239 released = __bcmgenet_tx_reclaim(dev, ring);
1240 spin_unlock_irqrestore(&ring->lock, flags);
1241
1242 return released;
1243}
1244
1245static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
1246{
1247 struct bcmgenet_tx_ring *ring =
1248 container_of(napi, struct bcmgenet_tx_ring, napi);
1249 unsigned int work_done = 0;
1250
1251 work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
1252
1253 if (work_done == 0) {
1254 napi_complete(napi);
1255 ring->int_enable(ring);
1256
1257 return 0;
1258 }
1259
1260 return budget;
1261}
1262
1263static void bcmgenet_tx_reclaim_all(struct net_device *dev)
1264{
1265 struct bcmgenet_priv *priv = netdev_priv(dev);
1266 int i;
1267
1268 if (netif_is_multiqueue(dev)) {
1269 for (i = 0; i < priv->hw_params->tx_queues; i++)
1270 bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
1271 }
1272
1273 bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
1274}
1275
1276
1277
1278
1279static int bcmgenet_xmit_single(struct net_device *dev,
1280 struct sk_buff *skb,
1281 u16 dma_desc_flags,
1282 struct bcmgenet_tx_ring *ring)
1283{
1284 struct bcmgenet_priv *priv = netdev_priv(dev);
1285 struct device *kdev = &priv->pdev->dev;
1286 struct enet_cb *tx_cb_ptr;
1287 unsigned int skb_len;
1288 dma_addr_t mapping;
1289 u32 length_status;
1290 int ret;
1291
1292 tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
1293
1294 if (unlikely(!tx_cb_ptr))
1295 BUG();
1296
1297 tx_cb_ptr->skb = skb;
1298
1299 skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb);
1300
1301 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
1302 ret = dma_mapping_error(kdev, mapping);
1303 if (ret) {
1304 priv->mib.tx_dma_failed++;
1305 netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
1306 dev_kfree_skb(skb);
1307 return ret;
1308 }
1309
1310 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1311 dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len);
1312 length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
1313 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
1314 DMA_TX_APPEND_CRC;
1315
1316 if (skb->ip_summed == CHECKSUM_PARTIAL)
1317 length_status |= DMA_TX_DO_CSUM;
1318
1319 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status);
1320
1321 return 0;
1322}
1323
1324
1325static int bcmgenet_xmit_frag(struct net_device *dev,
1326 skb_frag_t *frag,
1327 u16 dma_desc_flags,
1328 struct bcmgenet_tx_ring *ring)
1329{
1330 struct bcmgenet_priv *priv = netdev_priv(dev);
1331 struct device *kdev = &priv->pdev->dev;
1332 struct enet_cb *tx_cb_ptr;
1333 dma_addr_t mapping;
1334 int ret;
1335
1336 tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
1337
1338 if (unlikely(!tx_cb_ptr))
1339 BUG();
1340 tx_cb_ptr->skb = NULL;
1341
1342 mapping = skb_frag_dma_map(kdev, frag, 0,
1343 skb_frag_size(frag), DMA_TO_DEVICE);
1344 ret = dma_mapping_error(kdev, mapping);
1345 if (ret) {
1346 priv->mib.tx_dma_failed++;
1347 netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
1348 __func__);
1349 return ret;
1350 }
1351
1352 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1353 dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size);
1354
1355 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
1356 (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
1357 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
1358
1359 return 0;
1360}
1361
1362
1363
1364
1365static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
1366 struct sk_buff *skb)
1367{
1368 struct status_64 *status = NULL;
1369 struct sk_buff *new_skb;
1370 u16 offset;
1371 u8 ip_proto;
1372 u16 ip_ver;
1373 u32 tx_csum_info;
1374
1375 if (unlikely(skb_headroom(skb) < sizeof(*status))) {
1376
1377
1378
1379 new_skb = skb_realloc_headroom(skb, sizeof(*status));
1380 dev_kfree_skb(skb);
1381 if (!new_skb) {
1382 dev->stats.tx_dropped++;
1383 return NULL;
1384 }
1385 skb = new_skb;
1386 }
1387
1388 skb_push(skb, sizeof(*status));
1389 status = (struct status_64 *)skb->data;
1390
1391 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1392 ip_ver = htons(skb->protocol);
1393 switch (ip_ver) {
1394 case ETH_P_IP:
1395 ip_proto = ip_hdr(skb)->protocol;
1396 break;
1397 case ETH_P_IPV6:
1398 ip_proto = ipv6_hdr(skb)->nexthdr;
1399 break;
1400 default:
1401 return skb;
1402 }
1403
1404 offset = skb_checksum_start_offset(skb) - sizeof(*status);
1405 tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
1406 (offset + skb->csum_offset);
1407
1408
1409
1410
1411 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
1412 tx_csum_info |= STATUS_TX_CSUM_LV;
1413 if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
1414 tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
1415 } else {
1416 tx_csum_info = 0;
1417 }
1418
1419 status->tx_csum_info = tx_csum_info;
1420 }
1421
1422 return skb;
1423}
1424
1425static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1426{
1427 struct bcmgenet_priv *priv = netdev_priv(dev);
1428 struct bcmgenet_tx_ring *ring = NULL;
1429 struct netdev_queue *txq;
1430 unsigned long flags = 0;
1431 int nr_frags, index;
1432 u16 dma_desc_flags;
1433 int ret;
1434 int i;
1435
1436 index = skb_get_queue_mapping(skb);
1437
1438
1439
1440
1441
1442
1443
1444 if (index == 0)
1445 index = DESC_INDEX;
1446 else
1447 index -= 1;
1448
1449 nr_frags = skb_shinfo(skb)->nr_frags;
1450 ring = &priv->tx_rings[index];
1451 txq = netdev_get_tx_queue(dev, ring->queue);
1452
1453 spin_lock_irqsave(&ring->lock, flags);
1454 if (ring->free_bds <= nr_frags + 1) {
1455 netif_tx_stop_queue(txq);
1456 netdev_err(dev, "%s: tx ring %d full when queue %d awake\n",
1457 __func__, index, ring->queue);
1458 ret = NETDEV_TX_BUSY;
1459 goto out;
1460 }
1461
1462 if (skb_padto(skb, ETH_ZLEN)) {
1463 ret = NETDEV_TX_OK;
1464 goto out;
1465 }
1466
1467
1468 if (priv->desc_64b_en) {
1469 skb = bcmgenet_put_tx_csum(dev, skb);
1470 if (!skb) {
1471 ret = NETDEV_TX_OK;
1472 goto out;
1473 }
1474 }
1475
1476 dma_desc_flags = DMA_SOP;
1477 if (nr_frags == 0)
1478 dma_desc_flags |= DMA_EOP;
1479
1480
1481 ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring);
1482 if (ret) {
1483 ret = NETDEV_TX_OK;
1484 goto out;
1485 }
1486
1487
1488 for (i = 0; i < nr_frags; i++) {
1489 ret = bcmgenet_xmit_frag(dev,
1490 &skb_shinfo(skb)->frags[i],
1491 (i == nr_frags - 1) ? DMA_EOP : 0,
1492 ring);
1493 if (ret) {
1494 ret = NETDEV_TX_OK;
1495 goto out;
1496 }
1497 }
1498
1499 skb_tx_timestamp(skb);
1500
1501
1502 ring->free_bds -= nr_frags + 1;
1503 ring->prod_index += nr_frags + 1;
1504 ring->prod_index &= DMA_P_INDEX_MASK;
1505
1506 if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
1507 netif_tx_stop_queue(txq);
1508
1509 if (!skb->xmit_more || netif_xmit_stopped(txq))
1510
1511 bcmgenet_tdma_ring_writel(priv, ring->index,
1512 ring->prod_index, TDMA_PROD_INDEX);
1513out:
1514 spin_unlock_irqrestore(&ring->lock, flags);
1515
1516 return ret;
1517}
1518
1519static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
1520 struct enet_cb *cb)
1521{
1522 struct device *kdev = &priv->pdev->dev;
1523 struct sk_buff *skb;
1524 struct sk_buff *rx_skb;
1525 dma_addr_t mapping;
1526
1527
1528 skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT);
1529 if (!skb) {
1530 priv->mib.alloc_rx_buff_failed++;
1531 netif_err(priv, rx_err, priv->dev,
1532 "%s: Rx skb allocation failed\n", __func__);
1533 return NULL;
1534 }
1535
1536
1537 mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len,
1538 DMA_FROM_DEVICE);
1539 if (dma_mapping_error(kdev, mapping)) {
1540 priv->mib.rx_dma_failed++;
1541 dev_kfree_skb_any(skb);
1542 netif_err(priv, rx_err, priv->dev,
1543 "%s: Rx skb DMA mapping failed\n", __func__);
1544 return NULL;
1545 }
1546
1547
1548 rx_skb = cb->skb;
1549 if (likely(rx_skb))
1550 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
1551 priv->rx_buf_len, DMA_FROM_DEVICE);
1552
1553
1554 cb->skb = skb;
1555 dma_unmap_addr_set(cb, dma_addr, mapping);
1556 dmadesc_set_addr(priv, cb->bd_addr, mapping);
1557
1558
1559 return rx_skb;
1560}
1561
1562
1563
1564
1565static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
1566 unsigned int budget)
1567{
1568 struct bcmgenet_priv *priv = ring->priv;
1569 struct net_device *dev = priv->dev;
1570 struct enet_cb *cb;
1571 struct sk_buff *skb;
1572 u32 dma_length_status;
1573 unsigned long dma_flag;
1574 int len;
1575 unsigned int rxpktprocessed = 0, rxpkttoprocess;
1576 unsigned int p_index;
1577 unsigned int discards;
1578 unsigned int chksum_ok = 0;
1579
1580 p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
1581
1582 discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) &
1583 DMA_P_INDEX_DISCARD_CNT_MASK;
1584 if (discards > ring->old_discards) {
1585 discards = discards - ring->old_discards;
1586 dev->stats.rx_missed_errors += discards;
1587 dev->stats.rx_errors += discards;
1588 ring->old_discards += discards;
1589
1590
1591 if (ring->old_discards >= 0xC000) {
1592 ring->old_discards = 0;
1593 bcmgenet_rdma_ring_writel(priv, ring->index, 0,
1594 RDMA_PROD_INDEX);
1595 }
1596 }
1597
1598 p_index &= DMA_P_INDEX_MASK;
1599
1600 if (likely(p_index >= ring->c_index))
1601 rxpkttoprocess = p_index - ring->c_index;
1602 else
1603 rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - ring->c_index +
1604 p_index;
1605
1606 netif_dbg(priv, rx_status, dev,
1607 "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
1608
1609 while ((rxpktprocessed < rxpkttoprocess) &&
1610 (rxpktprocessed < budget)) {
1611 cb = &priv->rx_cbs[ring->read_ptr];
1612 skb = bcmgenet_rx_refill(priv, cb);
1613
1614 if (unlikely(!skb)) {
1615 dev->stats.rx_dropped++;
1616 goto next;
1617 }
1618
1619 if (!priv->desc_64b_en) {
1620 dma_length_status =
1621 dmadesc_get_length_status(priv, cb->bd_addr);
1622 } else {
1623 struct status_64 *status;
1624
1625 status = (struct status_64 *)skb->data;
1626 dma_length_status = status->length_status;
1627 }
1628
1629
1630
1631
1632 dma_flag = dma_length_status & 0xffff;
1633 len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
1634
1635 netif_dbg(priv, rx_status, dev,
1636 "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
1637 __func__, p_index, ring->c_index,
1638 ring->read_ptr, dma_length_status);
1639
1640 if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
1641 netif_err(priv, rx_status, dev,
1642 "dropping fragmented packet!\n");
1643 dev->stats.rx_errors++;
1644 dev_kfree_skb_any(skb);
1645 goto next;
1646 }
1647
1648
1649 if (unlikely(dma_flag & (DMA_RX_CRC_ERROR |
1650 DMA_RX_OV |
1651 DMA_RX_NO |
1652 DMA_RX_LG |
1653 DMA_RX_RXER))) {
1654 netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
1655 (unsigned int)dma_flag);
1656 if (dma_flag & DMA_RX_CRC_ERROR)
1657 dev->stats.rx_crc_errors++;
1658 if (dma_flag & DMA_RX_OV)
1659 dev->stats.rx_over_errors++;
1660 if (dma_flag & DMA_RX_NO)
1661 dev->stats.rx_frame_errors++;
1662 if (dma_flag & DMA_RX_LG)
1663 dev->stats.rx_length_errors++;
1664 dev->stats.rx_errors++;
1665 dev_kfree_skb_any(skb);
1666 goto next;
1667 }
1668
1669 chksum_ok = (dma_flag & priv->dma_rx_chk_bit) &&
1670 priv->desc_rxchk_en;
1671
1672 skb_put(skb, len);
1673 if (priv->desc_64b_en) {
1674 skb_pull(skb, 64);
1675 len -= 64;
1676 }
1677
1678 if (likely(chksum_ok))
1679 skb->ip_summed = CHECKSUM_UNNECESSARY;
1680
1681
1682 skb_pull(skb, 2);
1683 len -= 2;
1684
1685 if (priv->crc_fwd_en) {
1686 skb_trim(skb, len - ETH_FCS_LEN);
1687 len -= ETH_FCS_LEN;
1688 }
1689
1690
1691 skb->protocol = eth_type_trans(skb, priv->dev);
1692 dev->stats.rx_packets++;
1693 dev->stats.rx_bytes += len;
1694 if (dma_flag & DMA_RX_MULT)
1695 dev->stats.multicast++;
1696
1697
1698 napi_gro_receive(&ring->napi, skb);
1699 netif_dbg(priv, rx_status, dev, "pushed up to kernel\n");
1700
1701next:
1702 rxpktprocessed++;
1703 if (likely(ring->read_ptr < ring->end_ptr))
1704 ring->read_ptr++;
1705 else
1706 ring->read_ptr = ring->cb_ptr;
1707
1708 ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK;
1709 bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX);
1710 }
1711
1712 return rxpktprocessed;
1713}
1714
1715
1716static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
1717{
1718 struct bcmgenet_rx_ring *ring = container_of(napi,
1719 struct bcmgenet_rx_ring, napi);
1720 unsigned int work_done;
1721
1722 work_done = bcmgenet_desc_rx(ring, budget);
1723
1724 if (work_done < budget) {
1725 napi_complete(napi);
1726 ring->int_enable(ring);
1727 }
1728
1729 return work_done;
1730}
1731
1732
1733static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
1734 struct bcmgenet_rx_ring *ring)
1735{
1736 struct enet_cb *cb;
1737 struct sk_buff *skb;
1738 int i;
1739
1740 netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
1741
1742
1743 for (i = 0; i < ring->size; i++) {
1744 cb = ring->cbs + i;
1745 skb = bcmgenet_rx_refill(priv, cb);
1746 if (skb)
1747 dev_kfree_skb_any(skb);
1748 if (!cb->skb)
1749 return -ENOMEM;
1750 }
1751
1752 return 0;
1753}
1754
1755static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
1756{
1757 struct enet_cb *cb;
1758 int i;
1759
1760 for (i = 0; i < priv->num_rx_bds; i++) {
1761 cb = &priv->rx_cbs[i];
1762
1763 if (dma_unmap_addr(cb, dma_addr)) {
1764 dma_unmap_single(&priv->dev->dev,
1765 dma_unmap_addr(cb, dma_addr),
1766 priv->rx_buf_len, DMA_FROM_DEVICE);
1767 dma_unmap_addr_set(cb, dma_addr, 0);
1768 }
1769
1770 if (cb->skb)
1771 bcmgenet_free_cb(cb);
1772 }
1773}
1774
1775static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
1776{
1777 u32 reg;
1778
1779 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
1780 if (enable)
1781 reg |= mask;
1782 else
1783 reg &= ~mask;
1784 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
1785
1786
1787
1788
1789 if (enable == 0)
1790 usleep_range(1000, 2000);
1791}
1792
1793static int reset_umac(struct bcmgenet_priv *priv)
1794{
1795 struct device *kdev = &priv->pdev->dev;
1796 unsigned int timeout = 0;
1797 u32 reg;
1798
1799
1800 bcmgenet_rbuf_ctrl_set(priv, 0);
1801 udelay(10);
1802
1803
1804 bcmgenet_umac_writel(priv, 0, UMAC_CMD);
1805
1806
1807 bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
1808 while (timeout++ < 1000) {
1809 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
1810 if (!(reg & CMD_SW_RESET))
1811 return 0;
1812
1813 udelay(1);
1814 }
1815
1816 if (timeout == 1000) {
1817 dev_err(kdev,
1818 "timeout waiting for MAC to come out of reset\n");
1819 return -ETIMEDOUT;
1820 }
1821
1822 return 0;
1823}
1824
1825static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
1826{
1827
1828 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1829 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
1830 bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1831 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1832 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
1833 bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1834}
1835
1836static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
1837{
1838 u32 int0_enable = 0;
1839
1840
1841
1842
1843 if (priv->internal_phy) {
1844 int0_enable |= UMAC_IRQ_LINK_EVENT;
1845 } else if (priv->ext_phy) {
1846 int0_enable |= UMAC_IRQ_LINK_EVENT;
1847 } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
1848 if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
1849 int0_enable |= UMAC_IRQ_LINK_EVENT;
1850 }
1851 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
1852}
1853
1854static int init_umac(struct bcmgenet_priv *priv)
1855{
1856 struct device *kdev = &priv->pdev->dev;
1857 int ret;
1858 u32 reg;
1859 u32 int0_enable = 0;
1860 u32 int1_enable = 0;
1861 int i;
1862
1863 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
1864
1865 ret = reset_umac(priv);
1866 if (ret)
1867 return ret;
1868
1869 bcmgenet_umac_writel(priv, 0, UMAC_CMD);
1870
1871 bcmgenet_umac_writel(priv,
1872 MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT,
1873 UMAC_MIB_CTRL);
1874 bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
1875
1876 bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1877
1878
1879 reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
1880 reg |= RBUF_ALIGN_2B;
1881 bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
1882
1883 if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
1884 bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
1885
1886 bcmgenet_intr_disable(priv);
1887
1888
1889 int0_enable |= UMAC_IRQ_RXDMA_DONE;
1890
1891
1892 int0_enable |= UMAC_IRQ_TXDMA_DONE;
1893
1894
1895 if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
1896 reg = bcmgenet_bp_mc_get(priv);
1897 reg |= BIT(priv->hw_params->bp_in_en_shift);
1898
1899
1900 if (netif_is_multiqueue(priv->dev))
1901 reg |= priv->hw_params->bp_in_mask;
1902 else
1903 reg &= ~priv->hw_params->bp_in_mask;
1904 bcmgenet_bp_mc_set(priv, reg);
1905 }
1906
1907
1908 if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
1909 int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
1910
1911
1912 for (i = 0; i < priv->hw_params->rx_queues; ++i)
1913 int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i));
1914
1915
1916 for (i = 0; i < priv->hw_params->tx_queues; ++i)
1917 int1_enable |= (1 << i);
1918
1919 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
1920 bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
1921
1922
1923 dev_dbg(kdev, "done init umac\n");
1924
1925 return 0;
1926}
1927
1928
1929static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
1930 unsigned int index, unsigned int size,
1931 unsigned int start_ptr, unsigned int end_ptr)
1932{
1933 struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
1934 u32 words_per_bd = WORDS_PER_BD(priv);
1935 u32 flow_period_val = 0;
1936
1937 spin_lock_init(&ring->lock);
1938 ring->priv = priv;
1939 ring->index = index;
1940 if (index == DESC_INDEX) {
1941 ring->queue = 0;
1942 ring->int_enable = bcmgenet_tx_ring16_int_enable;
1943 ring->int_disable = bcmgenet_tx_ring16_int_disable;
1944 } else {
1945 ring->queue = index + 1;
1946 ring->int_enable = bcmgenet_tx_ring_int_enable;
1947 ring->int_disable = bcmgenet_tx_ring_int_disable;
1948 }
1949 ring->cbs = priv->tx_cbs + start_ptr;
1950 ring->size = size;
1951 ring->clean_ptr = start_ptr;
1952 ring->c_index = 0;
1953 ring->free_bds = size;
1954 ring->write_ptr = start_ptr;
1955 ring->cb_ptr = start_ptr;
1956 ring->end_ptr = end_ptr - 1;
1957 ring->prod_index = 0;
1958
1959
1960 if (index != DESC_INDEX)
1961 flow_period_val = ENET_MAX_MTU_SIZE << 16;
1962
1963 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
1964 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX);
1965 bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
1966
1967 bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
1968 TDMA_FLOW_PERIOD);
1969 bcmgenet_tdma_ring_writel(priv, index,
1970 ((size << DMA_RING_SIZE_SHIFT) |
1971 RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
1972
1973
1974 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
1975 DMA_START_ADDR);
1976 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
1977 TDMA_READ_PTR);
1978 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
1979 TDMA_WRITE_PTR);
1980 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
1981 DMA_END_ADDR);
1982}
1983
1984
1985static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
1986 unsigned int index, unsigned int size,
1987 unsigned int start_ptr, unsigned int end_ptr)
1988{
1989 struct bcmgenet_rx_ring *ring = &priv->rx_rings[index];
1990 u32 words_per_bd = WORDS_PER_BD(priv);
1991 int ret;
1992
1993 ring->priv = priv;
1994 ring->index = index;
1995 if (index == DESC_INDEX) {
1996 ring->int_enable = bcmgenet_rx_ring16_int_enable;
1997 ring->int_disable = bcmgenet_rx_ring16_int_disable;
1998 } else {
1999 ring->int_enable = bcmgenet_rx_ring_int_enable;
2000 ring->int_disable = bcmgenet_rx_ring_int_disable;
2001 }
2002 ring->cbs = priv->rx_cbs + start_ptr;
2003 ring->size = size;
2004 ring->c_index = 0;
2005 ring->read_ptr = start_ptr;
2006 ring->cb_ptr = start_ptr;
2007 ring->end_ptr = end_ptr - 1;
2008
2009 ret = bcmgenet_alloc_rx_buffers(priv, ring);
2010 if (ret)
2011 return ret;
2012
2013 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
2014 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
2015 bcmgenet_rdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
2016 bcmgenet_rdma_ring_writel(priv, index,
2017 ((size << DMA_RING_SIZE_SHIFT) |
2018 RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
2019 bcmgenet_rdma_ring_writel(priv, index,
2020 (DMA_FC_THRESH_LO <<
2021 DMA_XOFF_THRESHOLD_SHIFT) |
2022 DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
2023
2024
2025 bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2026 DMA_START_ADDR);
2027 bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2028 RDMA_READ_PTR);
2029 bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2030 RDMA_WRITE_PTR);
2031 bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
2032 DMA_END_ADDR);
2033
2034 return ret;
2035}
2036
2037static void bcmgenet_init_tx_napi(struct bcmgenet_priv *priv)
2038{
2039 unsigned int i;
2040 struct bcmgenet_tx_ring *ring;
2041
2042 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2043 ring = &priv->tx_rings[i];
2044 netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
2045 }
2046
2047 ring = &priv->tx_rings[DESC_INDEX];
2048 netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
2049}
2050
2051static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
2052{
2053 unsigned int i;
2054 struct bcmgenet_tx_ring *ring;
2055
2056 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2057 ring = &priv->tx_rings[i];
2058 napi_enable(&ring->napi);
2059 }
2060
2061 ring = &priv->tx_rings[DESC_INDEX];
2062 napi_enable(&ring->napi);
2063}
2064
2065static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
2066{
2067 unsigned int i;
2068 struct bcmgenet_tx_ring *ring;
2069
2070 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2071 ring = &priv->tx_rings[i];
2072 napi_disable(&ring->napi);
2073 }
2074
2075 ring = &priv->tx_rings[DESC_INDEX];
2076 napi_disable(&ring->napi);
2077}
2078
2079static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
2080{
2081 unsigned int i;
2082 struct bcmgenet_tx_ring *ring;
2083
2084 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2085 ring = &priv->tx_rings[i];
2086 netif_napi_del(&ring->napi);
2087 }
2088
2089 ring = &priv->tx_rings[DESC_INDEX];
2090 netif_napi_del(&ring->napi);
2091}
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108static void bcmgenet_init_tx_queues(struct net_device *dev)
2109{
2110 struct bcmgenet_priv *priv = netdev_priv(dev);
2111 u32 i, dma_enable;
2112 u32 dma_ctrl, ring_cfg;
2113 u32 dma_priority[3] = {0, 0, 0};
2114
2115 dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
2116 dma_enable = dma_ctrl & DMA_EN;
2117 dma_ctrl &= ~DMA_EN;
2118 bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
2119
2120 dma_ctrl = 0;
2121 ring_cfg = 0;
2122
2123
2124 bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
2125
2126
2127 for (i = 0; i < priv->hw_params->tx_queues; i++) {
2128 bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q,
2129 i * priv->hw_params->tx_bds_per_q,
2130 (i + 1) * priv->hw_params->tx_bds_per_q);
2131 ring_cfg |= (1 << i);
2132 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2133 dma_priority[DMA_PRIO_REG_INDEX(i)] |=
2134 ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i));
2135 }
2136
2137
2138 bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT,
2139 priv->hw_params->tx_queues *
2140 priv->hw_params->tx_bds_per_q,
2141 TOTAL_DESC);
2142 ring_cfg |= (1 << DESC_INDEX);
2143 dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
2144 dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |=
2145 ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) <<
2146 DMA_PRIO_REG_SHIFT(DESC_INDEX));
2147
2148
2149 bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
2150 bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
2151 bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
2152
2153
2154 bcmgenet_init_tx_napi(priv);
2155
2156
2157 bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG);
2158
2159
2160 if (dma_enable)
2161 dma_ctrl |= DMA_EN;
2162 bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
2163}
2164
2165static void bcmgenet_init_rx_napi(struct bcmgenet_priv *priv)
2166{
2167 unsigned int i;
2168 struct bcmgenet_rx_ring *ring;
2169
2170 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2171 ring = &priv->rx_rings[i];
2172 netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64);
2173 }
2174
2175 ring = &priv->rx_rings[DESC_INDEX];
2176 netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64);
2177}
2178
2179static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
2180{
2181 unsigned int i;
2182 struct bcmgenet_rx_ring *ring;
2183
2184 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2185 ring = &priv->rx_rings[i];
2186 napi_enable(&ring->napi);
2187 }
2188
2189 ring = &priv->rx_rings[DESC_INDEX];
2190 napi_enable(&ring->napi);
2191}
2192
2193static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
2194{
2195 unsigned int i;
2196 struct bcmgenet_rx_ring *ring;
2197
2198 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2199 ring = &priv->rx_rings[i];
2200 napi_disable(&ring->napi);
2201 }
2202
2203 ring = &priv->rx_rings[DESC_INDEX];
2204 napi_disable(&ring->napi);
2205}
2206
2207static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
2208{
2209 unsigned int i;
2210 struct bcmgenet_rx_ring *ring;
2211
2212 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2213 ring = &priv->rx_rings[i];
2214 netif_napi_del(&ring->napi);
2215 }
2216
2217 ring = &priv->rx_rings[DESC_INDEX];
2218 netif_napi_del(&ring->napi);
2219}
2220
2221
2222
2223
2224
2225
2226
2227
2228static int bcmgenet_init_rx_queues(struct net_device *dev)
2229{
2230 struct bcmgenet_priv *priv = netdev_priv(dev);
2231 u32 i;
2232 u32 dma_enable;
2233 u32 dma_ctrl;
2234 u32 ring_cfg;
2235 int ret;
2236
2237 dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL);
2238 dma_enable = dma_ctrl & DMA_EN;
2239 dma_ctrl &= ~DMA_EN;
2240 bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
2241
2242 dma_ctrl = 0;
2243 ring_cfg = 0;
2244
2245
2246 for (i = 0; i < priv->hw_params->rx_queues; i++) {
2247 ret = bcmgenet_init_rx_ring(priv, i,
2248 priv->hw_params->rx_bds_per_q,
2249 i * priv->hw_params->rx_bds_per_q,
2250 (i + 1) *
2251 priv->hw_params->rx_bds_per_q);
2252 if (ret)
2253 return ret;
2254
2255 ring_cfg |= (1 << i);
2256 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2257 }
2258
2259
2260 ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT,
2261 priv->hw_params->rx_queues *
2262 priv->hw_params->rx_bds_per_q,
2263 TOTAL_DESC);
2264 if (ret)
2265 return ret;
2266
2267 ring_cfg |= (1 << DESC_INDEX);
2268 dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
2269
2270
2271 bcmgenet_init_rx_napi(priv);
2272
2273
2274 bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG);
2275
2276
2277 if (dma_enable)
2278 dma_ctrl |= DMA_EN;
2279 bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
2280
2281 return 0;
2282}
2283
2284static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
2285{
2286 int ret = 0;
2287 int timeout = 0;
2288 u32 reg;
2289 u32 dma_ctrl;
2290 int i;
2291
2292
2293 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2294 reg &= ~DMA_EN;
2295 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2296
2297
2298 while (timeout++ < DMA_TIMEOUT_VAL) {
2299 reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
2300 if (reg & DMA_DISABLED)
2301 break;
2302
2303 udelay(1);
2304 }
2305
2306 if (timeout == DMA_TIMEOUT_VAL) {
2307 netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
2308 ret = -ETIMEDOUT;
2309 }
2310
2311
2312 usleep_range(10000, 20000);
2313
2314
2315 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2316 reg &= ~DMA_EN;
2317 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2318
2319 timeout = 0;
2320
2321 while (timeout++ < DMA_TIMEOUT_VAL) {
2322 reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
2323 if (reg & DMA_DISABLED)
2324 break;
2325
2326 udelay(1);
2327 }
2328
2329 if (timeout == DMA_TIMEOUT_VAL) {
2330 netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
2331 ret = -ETIMEDOUT;
2332 }
2333
2334 dma_ctrl = 0;
2335 for (i = 0; i < priv->hw_params->rx_queues; i++)
2336 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2337 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2338 reg &= ~dma_ctrl;
2339 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2340
2341 dma_ctrl = 0;
2342 for (i = 0; i < priv->hw_params->tx_queues; i++)
2343 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2344 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2345 reg &= ~dma_ctrl;
2346 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2347
2348 return ret;
2349}
2350
2351static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
2352{
2353 int i;
2354
2355 bcmgenet_fini_rx_napi(priv);
2356 bcmgenet_fini_tx_napi(priv);
2357
2358
2359 bcmgenet_dma_teardown(priv);
2360
2361 for (i = 0; i < priv->num_tx_bds; i++) {
2362 if (priv->tx_cbs[i].skb != NULL) {
2363 dev_kfree_skb(priv->tx_cbs[i].skb);
2364 priv->tx_cbs[i].skb = NULL;
2365 }
2366 }
2367
2368 bcmgenet_free_rx_buffers(priv);
2369 kfree(priv->rx_cbs);
2370 kfree(priv->tx_cbs);
2371}
2372
2373
2374static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
2375{
2376 int ret;
2377 unsigned int i;
2378 struct enet_cb *cb;
2379
2380 netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
2381
2382
2383 priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
2384 priv->num_rx_bds = TOTAL_DESC;
2385 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
2386 GFP_KERNEL);
2387 if (!priv->rx_cbs)
2388 return -ENOMEM;
2389
2390 for (i = 0; i < priv->num_rx_bds; i++) {
2391 cb = priv->rx_cbs + i;
2392 cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE;
2393 }
2394
2395
2396 priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
2397 priv->num_tx_bds = TOTAL_DESC;
2398 priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
2399 GFP_KERNEL);
2400 if (!priv->tx_cbs) {
2401 kfree(priv->rx_cbs);
2402 return -ENOMEM;
2403 }
2404
2405 for (i = 0; i < priv->num_tx_bds; i++) {
2406 cb = priv->tx_cbs + i;
2407 cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE;
2408 }
2409
2410
2411 bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
2412
2413
2414 ret = bcmgenet_init_rx_queues(priv->dev);
2415 if (ret) {
2416 netdev_err(priv->dev, "failed to initialize Rx queues\n");
2417 bcmgenet_free_rx_buffers(priv);
2418 kfree(priv->rx_cbs);
2419 kfree(priv->tx_cbs);
2420 return ret;
2421 }
2422
2423
2424 bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
2425
2426
2427 bcmgenet_init_tx_queues(priv->dev);
2428
2429 return 0;
2430}
2431
2432
2433static void bcmgenet_irq_task(struct work_struct *work)
2434{
2435 struct bcmgenet_priv *priv = container_of(
2436 work, struct bcmgenet_priv, bcmgenet_irq_work);
2437
2438 netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
2439
2440 if (priv->irq0_stat & UMAC_IRQ_MPD_R) {
2441 priv->irq0_stat &= ~UMAC_IRQ_MPD_R;
2442 netif_dbg(priv, wol, priv->dev,
2443 "magic packet detected, waking up\n");
2444 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
2445 }
2446
2447
2448 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
2449 (priv->irq0_stat & UMAC_IRQ_LINK_EVENT)) {
2450 phy_mac_interrupt(priv->phydev,
2451 !!(priv->irq0_stat & UMAC_IRQ_LINK_UP));
2452 priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT;
2453 }
2454}
2455
2456
2457static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
2458{
2459 struct bcmgenet_priv *priv = dev_id;
2460 struct bcmgenet_rx_ring *rx_ring;
2461 struct bcmgenet_tx_ring *tx_ring;
2462 unsigned int index;
2463
2464
2465 priv->irq1_stat =
2466 bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
2467 ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
2468
2469
2470 bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
2471
2472 netif_dbg(priv, intr, priv->dev,
2473 "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
2474
2475
2476 for (index = 0; index < priv->hw_params->rx_queues; index++) {
2477 if (!(priv->irq1_stat & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
2478 continue;
2479
2480 rx_ring = &priv->rx_rings[index];
2481
2482 if (likely(napi_schedule_prep(&rx_ring->napi))) {
2483 rx_ring->int_disable(rx_ring);
2484 __napi_schedule(&rx_ring->napi);
2485 }
2486 }
2487
2488
2489 for (index = 0; index < priv->hw_params->tx_queues; index++) {
2490 if (!(priv->irq1_stat & BIT(index)))
2491 continue;
2492
2493 tx_ring = &priv->tx_rings[index];
2494
2495 if (likely(napi_schedule_prep(&tx_ring->napi))) {
2496 tx_ring->int_disable(tx_ring);
2497 __napi_schedule(&tx_ring->napi);
2498 }
2499 }
2500
2501 return IRQ_HANDLED;
2502}
2503
2504
2505static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
2506{
2507 struct bcmgenet_priv *priv = dev_id;
2508 struct bcmgenet_rx_ring *rx_ring;
2509 struct bcmgenet_tx_ring *tx_ring;
2510
2511
2512 priv->irq0_stat =
2513 bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
2514 ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
2515
2516
2517 bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
2518
2519 netif_dbg(priv, intr, priv->dev,
2520 "IRQ=0x%x\n", priv->irq0_stat);
2521
2522 if (priv->irq0_stat & UMAC_IRQ_RXDMA_DONE) {
2523 rx_ring = &priv->rx_rings[DESC_INDEX];
2524
2525 if (likely(napi_schedule_prep(&rx_ring->napi))) {
2526 rx_ring->int_disable(rx_ring);
2527 __napi_schedule(&rx_ring->napi);
2528 }
2529 }
2530
2531 if (priv->irq0_stat & UMAC_IRQ_TXDMA_DONE) {
2532 tx_ring = &priv->tx_rings[DESC_INDEX];
2533
2534 if (likely(napi_schedule_prep(&tx_ring->napi))) {
2535 tx_ring->int_disable(tx_ring);
2536 __napi_schedule(&tx_ring->napi);
2537 }
2538 }
2539
2540 if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
2541 UMAC_IRQ_PHY_DET_F |
2542 UMAC_IRQ_LINK_EVENT |
2543 UMAC_IRQ_HFB_SM |
2544 UMAC_IRQ_HFB_MM |
2545 UMAC_IRQ_MPD_R)) {
2546
2547 schedule_work(&priv->bcmgenet_irq_work);
2548 }
2549
2550 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
2551 priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
2552 priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
2553 wake_up(&priv->wq);
2554 }
2555
2556 return IRQ_HANDLED;
2557}
2558
2559static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
2560{
2561 struct bcmgenet_priv *priv = dev_id;
2562
2563 pm_wakeup_event(&priv->pdev->dev, 0);
2564
2565 return IRQ_HANDLED;
2566}
2567
2568#ifdef CONFIG_NET_POLL_CONTROLLER
2569static void bcmgenet_poll_controller(struct net_device *dev)
2570{
2571 struct bcmgenet_priv *priv = netdev_priv(dev);
2572
2573
2574 disable_irq(priv->irq0);
2575 bcmgenet_isr0(priv->irq0, priv);
2576 enable_irq(priv->irq0);
2577
2578
2579 disable_irq(priv->irq1);
2580 bcmgenet_isr1(priv->irq1, priv);
2581 enable_irq(priv->irq1);
2582}
2583#endif
2584
2585static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
2586{
2587 u32 reg;
2588
2589 reg = bcmgenet_rbuf_ctrl_get(priv);
2590 reg |= BIT(1);
2591 bcmgenet_rbuf_ctrl_set(priv, reg);
2592 udelay(10);
2593
2594 reg &= ~BIT(1);
2595 bcmgenet_rbuf_ctrl_set(priv, reg);
2596 udelay(10);
2597}
2598
2599static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
2600 unsigned char *addr)
2601{
2602 bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
2603 (addr[2] << 8) | addr[3], UMAC_MAC0);
2604 bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
2605}
2606
2607
2608static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
2609{
2610 u32 reg;
2611 u32 dma_ctrl;
2612
2613
2614 dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
2615 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2616 reg &= ~dma_ctrl;
2617 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2618
2619 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2620 reg &= ~dma_ctrl;
2621 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2622
2623 bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
2624 udelay(10);
2625 bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
2626
2627 return dma_ctrl;
2628}
2629
2630static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
2631{
2632 u32 reg;
2633
2634 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2635 reg |= dma_ctrl;
2636 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2637
2638 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2639 reg |= dma_ctrl;
2640 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2641}
2642
2643static bool bcmgenet_hfb_is_filter_enabled(struct bcmgenet_priv *priv,
2644 u32 f_index)
2645{
2646 u32 offset;
2647 u32 reg;
2648
2649 offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
2650 reg = bcmgenet_hfb_reg_readl(priv, offset);
2651 return !!(reg & (1 << (f_index % 32)));
2652}
2653
2654static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
2655{
2656 u32 offset;
2657 u32 reg;
2658
2659 offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
2660 reg = bcmgenet_hfb_reg_readl(priv, offset);
2661 reg |= (1 << (f_index % 32));
2662 bcmgenet_hfb_reg_writel(priv, reg, offset);
2663}
2664
2665static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv,
2666 u32 f_index, u32 rx_queue)
2667{
2668 u32 offset;
2669 u32 reg;
2670
2671 offset = f_index / 8;
2672 reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset);
2673 reg &= ~(0xF << (4 * (f_index % 8)));
2674 reg |= ((rx_queue & 0xF) << (4 * (f_index % 8)));
2675 bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset);
2676}
2677
2678static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
2679 u32 f_index, u32 f_length)
2680{
2681 u32 offset;
2682 u32 reg;
2683
2684 offset = HFB_FLT_LEN_V3PLUS +
2685 ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) *
2686 sizeof(u32);
2687 reg = bcmgenet_hfb_reg_readl(priv, offset);
2688 reg &= ~(0xFF << (8 * (f_index % 4)));
2689 reg |= ((f_length & 0xFF) << (8 * (f_index % 4)));
2690 bcmgenet_hfb_reg_writel(priv, reg, offset);
2691}
2692
2693static int bcmgenet_hfb_find_unused_filter(struct bcmgenet_priv *priv)
2694{
2695 u32 f_index;
2696
2697 for (f_index = 0; f_index < priv->hw_params->hfb_filter_cnt; f_index++)
2698 if (!bcmgenet_hfb_is_filter_enabled(priv, f_index))
2699 return f_index;
2700
2701 return -ENOMEM;
2702}
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739int bcmgenet_hfb_add_filter(struct bcmgenet_priv *priv, u32 *f_data,
2740 u32 f_length, u32 rx_queue)
2741{
2742 int f_index;
2743 u32 i;
2744
2745 f_index = bcmgenet_hfb_find_unused_filter(priv);
2746 if (f_index < 0)
2747 return -ENOMEM;
2748
2749 if (f_length > priv->hw_params->hfb_filter_size)
2750 return -EINVAL;
2751
2752 for (i = 0; i < f_length; i++)
2753 bcmgenet_hfb_writel(priv, f_data[i],
2754 (f_index * priv->hw_params->hfb_filter_size + i) *
2755 sizeof(u32));
2756
2757 bcmgenet_hfb_set_filter_length(priv, f_index, 2 * f_length);
2758 bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f_index, rx_queue);
2759 bcmgenet_hfb_enable_filter(priv, f_index);
2760 bcmgenet_hfb_reg_writel(priv, 0x1, HFB_CTRL);
2761
2762 return 0;
2763}
2764
2765
2766
2767
2768
2769static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
2770{
2771 u32 i;
2772
2773 bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
2774 bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
2775 bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
2776
2777 for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
2778 bcmgenet_rdma_writel(priv, 0x0, i);
2779
2780 for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
2781 bcmgenet_hfb_reg_writel(priv, 0x0,
2782 HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
2783
2784 for (i = 0; i < priv->hw_params->hfb_filter_cnt *
2785 priv->hw_params->hfb_filter_size; i++)
2786 bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32));
2787}
2788
2789static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
2790{
2791 if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
2792 return;
2793
2794 bcmgenet_hfb_clear(priv);
2795}
2796
2797static void bcmgenet_netif_start(struct net_device *dev)
2798{
2799 struct bcmgenet_priv *priv = netdev_priv(dev);
2800
2801
2802 bcmgenet_enable_rx_napi(priv);
2803 bcmgenet_enable_tx_napi(priv);
2804
2805 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
2806
2807 netif_tx_start_all_queues(dev);
2808
2809
2810 bcmgenet_link_intr_enable(priv);
2811
2812 phy_start(priv->phydev);
2813}
2814
2815static int bcmgenet_open(struct net_device *dev)
2816{
2817 struct bcmgenet_priv *priv = netdev_priv(dev);
2818 unsigned long dma_ctrl;
2819 u32 reg;
2820 int ret;
2821
2822 netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
2823
2824
2825 clk_prepare_enable(priv->clk);
2826
2827
2828
2829
2830 if (priv->internal_phy)
2831 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
2832
2833
2834 bcmgenet_umac_reset(priv);
2835
2836 ret = init_umac(priv);
2837 if (ret)
2838 goto err_clk_disable;
2839
2840
2841 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
2842
2843
2844 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2845 priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
2846
2847 bcmgenet_set_hw_addr(priv, dev->dev_addr);
2848
2849 if (priv->internal_phy) {
2850 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
2851 reg |= EXT_ENERGY_DET_MASK;
2852 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
2853 }
2854
2855
2856 dma_ctrl = bcmgenet_dma_disable(priv);
2857
2858
2859 ret = bcmgenet_init_dma(priv);
2860 if (ret) {
2861 netdev_err(dev, "failed to initialize DMA\n");
2862 goto err_clk_disable;
2863 }
2864
2865
2866 bcmgenet_enable_dma(priv, dma_ctrl);
2867
2868
2869 bcmgenet_hfb_init(priv);
2870
2871 ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
2872 dev->name, priv);
2873 if (ret < 0) {
2874 netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
2875 goto err_fini_dma;
2876 }
2877
2878 ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
2879 dev->name, priv);
2880 if (ret < 0) {
2881 netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
2882 goto err_irq0;
2883 }
2884
2885 ret = bcmgenet_mii_probe(dev);
2886 if (ret) {
2887 netdev_err(dev, "failed to connect to PHY\n");
2888 goto err_irq1;
2889 }
2890
2891 bcmgenet_netif_start(dev);
2892
2893 return 0;
2894
2895err_irq1:
2896 free_irq(priv->irq1, priv);
2897err_irq0:
2898 free_irq(priv->irq0, priv);
2899err_fini_dma:
2900 bcmgenet_fini_dma(priv);
2901err_clk_disable:
2902 clk_disable_unprepare(priv->clk);
2903 return ret;
2904}
2905
2906static void bcmgenet_netif_stop(struct net_device *dev)
2907{
2908 struct bcmgenet_priv *priv = netdev_priv(dev);
2909
2910 netif_tx_stop_all_queues(dev);
2911 phy_stop(priv->phydev);
2912 bcmgenet_intr_disable(priv);
2913 bcmgenet_disable_rx_napi(priv);
2914 bcmgenet_disable_tx_napi(priv);
2915
2916
2917
2918
2919 cancel_work_sync(&priv->bcmgenet_irq_work);
2920
2921 priv->old_link = -1;
2922 priv->old_speed = -1;
2923 priv->old_duplex = -1;
2924 priv->old_pause = -1;
2925}
2926
2927static int bcmgenet_close(struct net_device *dev)
2928{
2929 struct bcmgenet_priv *priv = netdev_priv(dev);
2930 int ret;
2931
2932 netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
2933
2934 bcmgenet_netif_stop(dev);
2935
2936
2937 phy_disconnect(priv->phydev);
2938
2939
2940 umac_enable_set(priv, CMD_RX_EN, false);
2941
2942 ret = bcmgenet_dma_teardown(priv);
2943 if (ret)
2944 return ret;
2945
2946
2947 umac_enable_set(priv, CMD_TX_EN, false);
2948
2949
2950 bcmgenet_tx_reclaim_all(dev);
2951 bcmgenet_fini_dma(priv);
2952
2953 free_irq(priv->irq0, priv);
2954 free_irq(priv->irq1, priv);
2955
2956 if (priv->internal_phy)
2957 ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
2958
2959 clk_disable_unprepare(priv->clk);
2960
2961 return ret;
2962}
2963
2964static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
2965{
2966 struct bcmgenet_priv *priv = ring->priv;
2967 u32 p_index, c_index, intsts, intmsk;
2968 struct netdev_queue *txq;
2969 unsigned int free_bds;
2970 unsigned long flags;
2971 bool txq_stopped;
2972
2973 if (!netif_msg_tx_err(priv))
2974 return;
2975
2976 txq = netdev_get_tx_queue(priv->dev, ring->queue);
2977
2978 spin_lock_irqsave(&ring->lock, flags);
2979 if (ring->index == DESC_INDEX) {
2980 intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
2981 intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE;
2982 } else {
2983 intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
2984 intmsk = 1 << ring->index;
2985 }
2986 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
2987 p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
2988 txq_stopped = netif_tx_queue_stopped(txq);
2989 free_bds = ring->free_bds;
2990 spin_unlock_irqrestore(&ring->lock, flags);
2991
2992 netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n"
2993 "TX queue status: %s, interrupts: %s\n"
2994 "(sw)free_bds: %d (sw)size: %d\n"
2995 "(sw)p_index: %d (hw)p_index: %d\n"
2996 "(sw)c_index: %d (hw)c_index: %d\n"
2997 "(sw)clean_p: %d (sw)write_p: %d\n"
2998 "(sw)cb_ptr: %d (sw)end_ptr: %d\n",
2999 ring->index, ring->queue,
3000 txq_stopped ? "stopped" : "active",
3001 intsts & intmsk ? "enabled" : "disabled",
3002 free_bds, ring->size,
3003 ring->prod_index, p_index & DMA_P_INDEX_MASK,
3004 ring->c_index, c_index & DMA_C_INDEX_MASK,
3005 ring->clean_ptr, ring->write_ptr,
3006 ring->cb_ptr, ring->end_ptr);
3007}
3008
3009static void bcmgenet_timeout(struct net_device *dev)
3010{
3011 struct bcmgenet_priv *priv = netdev_priv(dev);
3012 u32 int0_enable = 0;
3013 u32 int1_enable = 0;
3014 unsigned int q;
3015
3016 netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
3017
3018 for (q = 0; q < priv->hw_params->tx_queues; q++)
3019 bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
3020 bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
3021
3022 bcmgenet_tx_reclaim_all(dev);
3023
3024 for (q = 0; q < priv->hw_params->tx_queues; q++)
3025 int1_enable |= (1 << q);
3026
3027 int0_enable = UMAC_IRQ_TXDMA_DONE;
3028
3029
3030 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
3031 bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
3032
3033 dev->trans_start = jiffies;
3034
3035 dev->stats.tx_errors++;
3036
3037 netif_tx_wake_all_queues(dev);
3038}
3039
3040#define MAX_MC_COUNT 16
3041
3042static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
3043 unsigned char *addr,
3044 int *i,
3045 int *mc)
3046{
3047 u32 reg;
3048
3049 bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
3050 UMAC_MDF_ADDR + (*i * 4));
3051 bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
3052 addr[4] << 8 | addr[5],
3053 UMAC_MDF_ADDR + ((*i + 1) * 4));
3054 reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL);
3055 reg |= (1 << (MAX_MC_COUNT - *mc));
3056 bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
3057 *i += 2;
3058 (*mc)++;
3059}
3060
3061static void bcmgenet_set_rx_mode(struct net_device *dev)
3062{
3063 struct bcmgenet_priv *priv = netdev_priv(dev);
3064 struct netdev_hw_addr *ha;
3065 int i, mc;
3066 u32 reg;
3067
3068 netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
3069
3070
3071 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
3072 if (dev->flags & IFF_PROMISC) {
3073 reg |= CMD_PROMISC;
3074 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
3075 bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
3076 return;
3077 } else {
3078 reg &= ~CMD_PROMISC;
3079 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
3080 }
3081
3082
3083 if (dev->flags & IFF_ALLMULTI) {
3084 netdev_warn(dev, "ALLMULTI is not supported\n");
3085 return;
3086 }
3087
3088
3089 i = 0;
3090 mc = 0;
3091
3092 bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc);
3093
3094 bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc);
3095
3096 if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc))
3097 return;
3098
3099 if (!netdev_uc_empty(dev))
3100 netdev_for_each_uc_addr(ha, dev)
3101 bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
3102
3103 if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc))
3104 return;
3105
3106 netdev_for_each_mc_addr(ha, dev)
3107 bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
3108}
3109
3110
3111static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
3112{
3113 struct sockaddr *addr = p;
3114
3115
3116
3117
3118 if (netif_running(dev))
3119 return -EBUSY;
3120
3121 ether_addr_copy(dev->dev_addr, addr->sa_data);
3122
3123 return 0;
3124}
3125
3126static const struct net_device_ops bcmgenet_netdev_ops = {
3127 .ndo_open = bcmgenet_open,
3128 .ndo_stop = bcmgenet_close,
3129 .ndo_start_xmit = bcmgenet_xmit,
3130 .ndo_tx_timeout = bcmgenet_timeout,
3131 .ndo_set_rx_mode = bcmgenet_set_rx_mode,
3132 .ndo_set_mac_address = bcmgenet_set_mac_addr,
3133 .ndo_do_ioctl = bcmgenet_ioctl,
3134 .ndo_set_features = bcmgenet_set_features,
3135#ifdef CONFIG_NET_POLL_CONTROLLER
3136 .ndo_poll_controller = bcmgenet_poll_controller,
3137#endif
3138};
3139
3140
3141static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
3142 [GENET_V1] = {
3143 .tx_queues = 0,
3144 .tx_bds_per_q = 0,
3145 .rx_queues = 0,
3146 .rx_bds_per_q = 0,
3147 .bp_in_en_shift = 16,
3148 .bp_in_mask = 0xffff,
3149 .hfb_filter_cnt = 16,
3150 .qtag_mask = 0x1F,
3151 .hfb_offset = 0x1000,
3152 .rdma_offset = 0x2000,
3153 .tdma_offset = 0x3000,
3154 .words_per_bd = 2,
3155 },
3156 [GENET_V2] = {
3157 .tx_queues = 4,
3158 .tx_bds_per_q = 32,
3159 .rx_queues = 0,
3160 .rx_bds_per_q = 0,
3161 .bp_in_en_shift = 16,
3162 .bp_in_mask = 0xffff,
3163 .hfb_filter_cnt = 16,
3164 .qtag_mask = 0x1F,
3165 .tbuf_offset = 0x0600,
3166 .hfb_offset = 0x1000,
3167 .hfb_reg_offset = 0x2000,
3168 .rdma_offset = 0x3000,
3169 .tdma_offset = 0x4000,
3170 .words_per_bd = 2,
3171 .flags = GENET_HAS_EXT,
3172 },
3173 [GENET_V3] = {
3174 .tx_queues = 4,
3175 .tx_bds_per_q = 32,
3176 .rx_queues = 0,
3177 .rx_bds_per_q = 0,
3178 .bp_in_en_shift = 17,
3179 .bp_in_mask = 0x1ffff,
3180 .hfb_filter_cnt = 48,
3181 .hfb_filter_size = 128,
3182 .qtag_mask = 0x3F,
3183 .tbuf_offset = 0x0600,
3184 .hfb_offset = 0x8000,
3185 .hfb_reg_offset = 0xfc00,
3186 .rdma_offset = 0x10000,
3187 .tdma_offset = 0x11000,
3188 .words_per_bd = 2,
3189 .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
3190 GENET_HAS_MOCA_LINK_DET,
3191 },
3192 [GENET_V4] = {
3193 .tx_queues = 4,
3194 .tx_bds_per_q = 32,
3195 .rx_queues = 0,
3196 .rx_bds_per_q = 0,
3197 .bp_in_en_shift = 17,
3198 .bp_in_mask = 0x1ffff,
3199 .hfb_filter_cnt = 48,
3200 .hfb_filter_size = 128,
3201 .qtag_mask = 0x3F,
3202 .tbuf_offset = 0x0600,
3203 .hfb_offset = 0x8000,
3204 .hfb_reg_offset = 0xfc00,
3205 .rdma_offset = 0x2000,
3206 .tdma_offset = 0x4000,
3207 .words_per_bd = 3,
3208 .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
3209 GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
3210 },
3211};
3212
3213
3214static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
3215{
3216 struct bcmgenet_hw_params *params;
3217 u32 reg;
3218 u8 major;
3219 u16 gphy_rev;
3220
3221 if (GENET_IS_V4(priv)) {
3222 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
3223 genet_dma_ring_regs = genet_dma_ring_regs_v4;
3224 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
3225 priv->version = GENET_V4;
3226 } else if (GENET_IS_V3(priv)) {
3227 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
3228 genet_dma_ring_regs = genet_dma_ring_regs_v123;
3229 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
3230 priv->version = GENET_V3;
3231 } else if (GENET_IS_V2(priv)) {
3232 bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
3233 genet_dma_ring_regs = genet_dma_ring_regs_v123;
3234 priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
3235 priv->version = GENET_V2;
3236 } else if (GENET_IS_V1(priv)) {
3237 bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
3238 genet_dma_ring_regs = genet_dma_ring_regs_v123;
3239 priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
3240 priv->version = GENET_V1;
3241 }
3242
3243
3244 priv->hw_params = &bcmgenet_hw_params[priv->version];
3245 params = priv->hw_params;
3246
3247
3248 reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
3249 major = (reg >> 24 & 0x0f);
3250 if (major == 5)
3251 major = 4;
3252 else if (major == 0)
3253 major = 1;
3254 if (major != priv->version) {
3255 dev_err(&priv->pdev->dev,
3256 "GENET version mismatch, got: %d, configured for: %d\n",
3257 major, priv->version);
3258 }
3259
3260
3261 dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
3262 major, (reg >> 16) & 0x0f, reg & 0xffff);
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276 gphy_rev = reg & 0xffff;
3277
3278
3279 if ((gphy_rev & 0xf0) != 0)
3280 priv->gphy_rev = gphy_rev << 8;
3281
3282
3283 else if ((gphy_rev & 0xff00) != 0)
3284 priv->gphy_rev = gphy_rev;
3285
3286
3287 else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
3288 pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
3289 return;
3290 }
3291
3292#ifdef CONFIG_PHYS_ADDR_T_64BIT
3293 if (!(params->flags & GENET_HAS_40BITS))
3294 pr_warn("GENET does not support 40-bits PA\n");
3295#endif
3296
3297 pr_debug("Configuration for version: %d\n"
3298 "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n"
3299 "BP << en: %2d, BP msk: 0x%05x\n"
3300 "HFB count: %2d, QTAQ msk: 0x%05x\n"
3301 "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
3302 "RDMA: 0x%05x, TDMA: 0x%05x\n"
3303 "Words/BD: %d\n",
3304 priv->version,
3305 params->tx_queues, params->tx_bds_per_q,
3306 params->rx_queues, params->rx_bds_per_q,
3307 params->bp_in_en_shift, params->bp_in_mask,
3308 params->hfb_filter_cnt, params->qtag_mask,
3309 params->tbuf_offset, params->hfb_offset,
3310 params->hfb_reg_offset,
3311 params->rdma_offset, params->tdma_offset,
3312 params->words_per_bd);
3313}
3314
3315static const struct of_device_id bcmgenet_match[] = {
3316 { .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 },
3317 { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 },
3318 { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 },
3319 { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
3320 { },
3321};
3322MODULE_DEVICE_TABLE(of, bcmgenet_match);
3323
3324static int bcmgenet_probe(struct platform_device *pdev)
3325{
3326 struct bcmgenet_platform_data *pd = pdev->dev.platform_data;
3327 struct device_node *dn = pdev->dev.of_node;
3328 const struct of_device_id *of_id = NULL;
3329 struct bcmgenet_priv *priv;
3330 struct net_device *dev;
3331 const void *macaddr;
3332 struct resource *r;
3333 int err = -EIO;
3334
3335
3336 dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
3337 GENET_MAX_MQ_CNT + 1);
3338 if (!dev) {
3339 dev_err(&pdev->dev, "can't allocate net device\n");
3340 return -ENOMEM;
3341 }
3342
3343 if (dn) {
3344 of_id = of_match_node(bcmgenet_match, dn);
3345 if (!of_id)
3346 return -EINVAL;
3347 }
3348
3349 priv = netdev_priv(dev);
3350 priv->irq0 = platform_get_irq(pdev, 0);
3351 priv->irq1 = platform_get_irq(pdev, 1);
3352 priv->wol_irq = platform_get_irq(pdev, 2);
3353 if (!priv->irq0 || !priv->irq1) {
3354 dev_err(&pdev->dev, "can't find IRQs\n");
3355 err = -EINVAL;
3356 goto err;
3357 }
3358
3359 if (dn) {
3360 macaddr = of_get_mac_address(dn);
3361 if (!macaddr) {
3362 dev_err(&pdev->dev, "can't find MAC address\n");
3363 err = -EINVAL;
3364 goto err;
3365 }
3366 } else {
3367 macaddr = pd->mac_address;
3368 }
3369
3370 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3371 priv->base = devm_ioremap_resource(&pdev->dev, r);
3372 if (IS_ERR(priv->base)) {
3373 err = PTR_ERR(priv->base);
3374 goto err;
3375 }
3376
3377 SET_NETDEV_DEV(dev, &pdev->dev);
3378 dev_set_drvdata(&pdev->dev, dev);
3379 ether_addr_copy(dev->dev_addr, macaddr);
3380 dev->watchdog_timeo = 2 * HZ;
3381 dev->ethtool_ops = &bcmgenet_ethtool_ops;
3382 dev->netdev_ops = &bcmgenet_netdev_ops;
3383
3384 priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
3385
3386
3387 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
3388 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
3389
3390
3391 priv->wol_irq_disabled = true;
3392 err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0,
3393 dev->name, priv);
3394 if (!err)
3395 device_set_wakeup_capable(&pdev->dev, 1);
3396
3397
3398
3399
3400 dev->needed_headroom += 64;
3401
3402 netdev_boot_setup_check(dev);
3403
3404 priv->dev = dev;
3405 priv->pdev = pdev;
3406 if (of_id)
3407 priv->version = (enum bcmgenet_version)of_id->data;
3408 else
3409 priv->version = pd->genet_version;
3410
3411 priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
3412 if (IS_ERR(priv->clk)) {
3413 dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
3414 priv->clk = NULL;
3415 }
3416
3417 clk_prepare_enable(priv->clk);
3418
3419 bcmgenet_set_hw_params(priv);
3420
3421
3422 init_waitqueue_head(&priv->wq);
3423
3424 priv->rx_buf_len = RX_BUF_LENGTH;
3425 INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
3426
3427 priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
3428 if (IS_ERR(priv->clk_wol)) {
3429 dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
3430 priv->clk_wol = NULL;
3431 }
3432
3433 priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee");
3434 if (IS_ERR(priv->clk_eee)) {
3435 dev_warn(&priv->pdev->dev, "failed to get enet-eee clock\n");
3436 priv->clk_eee = NULL;
3437 }
3438
3439 err = reset_umac(priv);
3440 if (err)
3441 goto err_clk_disable;
3442
3443 err = bcmgenet_mii_init(dev);
3444 if (err)
3445 goto err_clk_disable;
3446
3447
3448
3449
3450 netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
3451 netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
3452
3453
3454 netif_carrier_off(dev);
3455
3456
3457 clk_disable_unprepare(priv->clk);
3458
3459 err = register_netdev(dev);
3460 if (err)
3461 goto err;
3462
3463 return err;
3464
3465err_clk_disable:
3466 clk_disable_unprepare(priv->clk);
3467err:
3468 free_netdev(dev);
3469 return err;
3470}
3471
3472static int bcmgenet_remove(struct platform_device *pdev)
3473{
3474 struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev);
3475
3476 dev_set_drvdata(&pdev->dev, NULL);
3477 unregister_netdev(priv->dev);
3478 bcmgenet_mii_exit(priv->dev);
3479 free_netdev(priv->dev);
3480
3481 return 0;
3482}
3483
3484#ifdef CONFIG_PM_SLEEP
3485static int bcmgenet_suspend(struct device *d)
3486{
3487 struct net_device *dev = dev_get_drvdata(d);
3488 struct bcmgenet_priv *priv = netdev_priv(dev);
3489 int ret;
3490
3491 if (!netif_running(dev))
3492 return 0;
3493
3494 bcmgenet_netif_stop(dev);
3495
3496 phy_suspend(priv->phydev);
3497
3498 netif_device_detach(dev);
3499
3500
3501 umac_enable_set(priv, CMD_RX_EN, false);
3502
3503 ret = bcmgenet_dma_teardown(priv);
3504 if (ret)
3505 return ret;
3506
3507
3508 umac_enable_set(priv, CMD_TX_EN, false);
3509
3510
3511 bcmgenet_tx_reclaim_all(dev);
3512 bcmgenet_fini_dma(priv);
3513
3514
3515 if (device_may_wakeup(d) && priv->wolopts) {
3516 ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
3517 clk_prepare_enable(priv->clk_wol);
3518 } else if (priv->internal_phy) {
3519 ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
3520 }
3521
3522
3523 clk_disable_unprepare(priv->clk);
3524
3525 return ret;
3526}
3527
3528static int bcmgenet_resume(struct device *d)
3529{
3530 struct net_device *dev = dev_get_drvdata(d);
3531 struct bcmgenet_priv *priv = netdev_priv(dev);
3532 unsigned long dma_ctrl;
3533 int ret;
3534 u32 reg;
3535
3536 if (!netif_running(dev))
3537 return 0;
3538
3539
3540 ret = clk_prepare_enable(priv->clk);
3541 if (ret)
3542 return ret;
3543
3544
3545
3546
3547 if (priv->internal_phy)
3548 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
3549
3550 bcmgenet_umac_reset(priv);
3551
3552 ret = init_umac(priv);
3553 if (ret)
3554 goto out_clk_disable;
3555
3556
3557 if (priv->wolopts)
3558 clk_disable_unprepare(priv->clk_wol);
3559
3560 phy_init_hw(priv->phydev);
3561
3562 bcmgenet_mii_config(priv->dev);
3563
3564
3565 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
3566
3567 bcmgenet_set_hw_addr(priv, dev->dev_addr);
3568
3569 if (priv->internal_phy) {
3570 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
3571 reg |= EXT_ENERGY_DET_MASK;
3572 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
3573 }
3574
3575 if (priv->wolopts)
3576 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
3577
3578
3579 dma_ctrl = bcmgenet_dma_disable(priv);
3580
3581
3582 ret = bcmgenet_init_dma(priv);
3583 if (ret) {
3584 netdev_err(dev, "failed to initialize DMA\n");
3585 goto out_clk_disable;
3586 }
3587
3588
3589 bcmgenet_enable_dma(priv, dma_ctrl);
3590
3591 netif_device_attach(dev);
3592
3593 phy_resume(priv->phydev);
3594
3595 if (priv->eee.eee_enabled)
3596 bcmgenet_eee_enable_set(dev, true);
3597
3598 bcmgenet_netif_start(dev);
3599
3600 return 0;
3601
3602out_clk_disable:
3603 clk_disable_unprepare(priv->clk);
3604 return ret;
3605}
3606#endif
3607
3608static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume);
3609
3610static struct platform_driver bcmgenet_driver = {
3611 .probe = bcmgenet_probe,
3612 .remove = bcmgenet_remove,
3613 .driver = {
3614 .name = "bcmgenet",
3615 .of_match_table = bcmgenet_match,
3616 .pm = &bcmgenet_pm_ops,
3617 },
3618};
3619module_platform_driver(bcmgenet_driver);
3620
3621MODULE_AUTHOR("Broadcom Corporation");
3622MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
3623MODULE_ALIAS("platform:bcmgenet");
3624MODULE_LICENSE("GPL");
3625