1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/phy.h>
35#include <linux/delay.h>
36#include <linux/netdevice.h>
37#include <linux/smp.h>
38#include <linux/ethtool.h>
39#include <linux/module.h>
40#include <linux/etherdevice.h>
41#include <linux/skbuff.h>
42#include <linux/jiffies.h>
43#include <linux/interrupt.h>
44#include <linux/platform_device.h>
45
46#include <asm/mipsregs.h>
47
48
49
50
51
52#include <asm/netlogic/xlr/fmn.h>
53
54#include "platform_net.h"
55#include "xlr_net.h"
56
57
58
59
60
61
62static inline void xlr_nae_wreg(u32 __iomem *base, unsigned int reg, u32 val)
63{
64 __raw_writel(val, base + reg);
65}
66
67static inline u32 xlr_nae_rdreg(u32 __iomem *base, unsigned int reg)
68{
69 return __raw_readl(base + reg);
70}
71
72static inline void xlr_reg_update(u32 *base_addr, u32 off, u32 val, u32 mask)
73{
74 u32 tmp;
75
76 tmp = xlr_nae_rdreg(base_addr, off);
77 xlr_nae_wreg(base_addr, off, (tmp & ~mask) | (val & mask));
78}
79
80#define MAC_SKB_BACK_PTR_SIZE SMP_CACHE_BYTES
81
82static int send_to_rfr_fifo(struct xlr_net_priv *priv, void *addr)
83{
84 struct nlm_fmn_msg msg;
85 int ret = 0, num_try = 0, stnid;
86 unsigned long paddr, mflags;
87
88 paddr = virt_to_bus(addr);
89 msg.msg0 = (u64)paddr & 0xffffffffe0ULL;
90 msg.msg1 = 0;
91 msg.msg2 = 0;
92 msg.msg3 = 0;
93 stnid = priv->nd->rfr_station;
94 do {
95 mflags = nlm_cop2_enable_irqsave();
96 ret = nlm_fmn_send(1, 0, stnid, &msg);
97 nlm_cop2_disable_irqrestore(mflags);
98 if (ret == 0)
99 return 0;
100 } while (++num_try < 10000);
101
102 netdev_err(priv->ndev, "Send to RFR failed in RX path\n");
103 return ret;
104}
105
106static inline unsigned char *xlr_alloc_skb(void)
107{
108 struct sk_buff *skb;
109 int buf_len = sizeof(struct sk_buff *);
110 unsigned char *skb_data;
111
112
113 skb = alloc_skb(XLR_RX_BUF_SIZE, GFP_ATOMIC);
114 if (!skb)
115 return NULL;
116 skb_data = skb->data;
117 skb_put(skb, MAC_SKB_BACK_PTR_SIZE);
118 skb_pull(skb, MAC_SKB_BACK_PTR_SIZE);
119 memcpy(skb_data, &skb, buf_len);
120
121 return skb->data;
122}
123
124static void xlr_net_fmn_handler(int bkt, int src_stnid, int size, int code,
125 struct nlm_fmn_msg *msg, void *arg)
126{
127 struct sk_buff *skb;
128 void *skb_data = NULL;
129 struct net_device *ndev;
130 struct xlr_net_priv *priv;
131 u32 port, length;
132 unsigned char *addr;
133 struct xlr_adapter *adapter = arg;
134
135 length = (msg->msg0 >> 40) & 0x3fff;
136 if (length == 0) {
137 addr = bus_to_virt(msg->msg0 & 0xffffffffffULL);
138 addr = addr - MAC_SKB_BACK_PTR_SIZE;
139 skb = (struct sk_buff *)(*(unsigned long *)addr);
140 dev_kfree_skb_any((struct sk_buff *)addr);
141 } else {
142 addr = (unsigned char *)
143 bus_to_virt(msg->msg0 & 0xffffffffe0ULL);
144 length = length - BYTE_OFFSET - MAC_CRC_LEN;
145 port = ((int)msg->msg0) & 0x0f;
146 addr = addr - MAC_SKB_BACK_PTR_SIZE;
147 skb = (struct sk_buff *)(*(unsigned long *)addr);
148 skb->dev = adapter->netdev[port];
149 if (!skb->dev)
150 return;
151 ndev = skb->dev;
152 priv = netdev_priv(ndev);
153
154
155 skb_reserve(skb, BYTE_OFFSET);
156 skb_put(skb, length);
157 skb->protocol = eth_type_trans(skb, skb->dev);
158 netif_rx(skb);
159
160 skb_data = xlr_alloc_skb();
161 if (skb_data)
162 send_to_rfr_fifo(priv, skb_data);
163 }
164}
165
166static struct phy_device *xlr_get_phydev(struct xlr_net_priv *priv)
167{
168 return mdiobus_get_phy(priv->mii_bus, priv->phy_addr);
169}
170
171
172
173
174static int xlr_get_link_ksettings(struct net_device *ndev,
175 struct ethtool_link_ksettings *ecmd)
176{
177 struct xlr_net_priv *priv = netdev_priv(ndev);
178 struct phy_device *phydev = xlr_get_phydev(priv);
179
180 if (!phydev)
181 return -ENODEV;
182
183 phy_ethtool_ksettings_get(phydev, ecmd);
184
185 return 0;
186}
187
188static int xlr_set_link_ksettings(struct net_device *ndev,
189 const struct ethtool_link_ksettings *ecmd)
190{
191 struct xlr_net_priv *priv = netdev_priv(ndev);
192 struct phy_device *phydev = xlr_get_phydev(priv);
193
194 if (!phydev)
195 return -ENODEV;
196 return phy_ethtool_ksettings_set(phydev, ecmd);
197}
198
199static const struct ethtool_ops xlr_ethtool_ops = {
200 .get_link_ksettings = xlr_get_link_ksettings,
201 .set_link_ksettings = xlr_set_link_ksettings,
202};
203
204
205
206
207static int xlr_net_fill_rx_ring(struct net_device *ndev)
208{
209 void *skb_data;
210 struct xlr_net_priv *priv = netdev_priv(ndev);
211 int i;
212
213 for (i = 0; i < MAX_FRIN_SPILL / 4; i++) {
214 skb_data = xlr_alloc_skb();
215 if (!skb_data) {
216 netdev_err(ndev, "SKB allocation failed\n");
217 return -ENOMEM;
218 }
219 send_to_rfr_fifo(priv, skb_data);
220 }
221 netdev_info(ndev, "Rx ring setup done\n");
222 return 0;
223}
224
225static int xlr_net_open(struct net_device *ndev)
226{
227 u32 err;
228 struct xlr_net_priv *priv = netdev_priv(ndev);
229 struct phy_device *phydev = xlr_get_phydev(priv);
230
231
232 phy_start(phydev);
233
234 err = phy_start_aneg(phydev);
235 if (err) {
236 pr_err("Autoneg failed\n");
237 return err;
238 }
239
240 xlr_set_gmac_speed(priv);
241
242 netif_tx_start_all_queues(ndev);
243
244 return 0;
245}
246
247static int xlr_net_stop(struct net_device *ndev)
248{
249 struct xlr_net_priv *priv = netdev_priv(ndev);
250 struct phy_device *phydev = xlr_get_phydev(priv);
251
252 phy_stop(phydev);
253 netif_tx_stop_all_queues(ndev);
254 return 0;
255}
256
257static void xlr_make_tx_desc(struct nlm_fmn_msg *msg, unsigned long addr,
258 struct sk_buff *skb)
259{
260 unsigned long physkb = virt_to_phys(skb);
261 int cpu_core = nlm_core_id();
262 int fr_stn_id = cpu_core * 8 + XLR_FB_STN;
263
264 msg->msg0 = (((u64)1 << 63) |
265 ((u64)127 << 54) |
266 (u64)skb->len << 40 |
267 ((u64)addr));
268 msg->msg1 = (((u64)1 << 63) |
269 ((u64)fr_stn_id << 54) |
270 (u64)0 << 40 |
271 ((u64)physkb & 0xffffffff));
272 msg->msg2 = 0;
273 msg->msg3 = 0;
274}
275
276static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb,
277 struct net_device *ndev)
278{
279 struct nlm_fmn_msg msg;
280 struct xlr_net_priv *priv = netdev_priv(ndev);
281 int ret;
282 u32 flags;
283
284 xlr_make_tx_desc(&msg, virt_to_phys(skb->data), skb);
285 flags = nlm_cop2_enable_irqsave();
286 ret = nlm_fmn_send(2, 0, priv->tx_stnid, &msg);
287 nlm_cop2_disable_irqrestore(flags);
288 if (ret)
289 dev_kfree_skb_any(skb);
290 return NETDEV_TX_OK;
291}
292
293static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb,
294 void *accel_priv,
295 select_queue_fallback_t fallback)
296{
297 return (u16)smp_processor_id();
298}
299
300static void xlr_hw_set_mac_addr(struct net_device *ndev)
301{
302 struct xlr_net_priv *priv = netdev_priv(ndev);
303
304
305 xlr_nae_wreg(priv->base_addr, R_MAC_ADDR0,
306 ((ndev->dev_addr[5] << 24) | (ndev->dev_addr[4] << 16) |
307 (ndev->dev_addr[3] << 8) | (ndev->dev_addr[2])));
308 xlr_nae_wreg(priv->base_addr, R_MAC_ADDR0 + 1,
309 ((ndev->dev_addr[1] << 24) | (ndev->dev_addr[0] << 16)));
310
311 xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK2, 0xffffffff);
312 xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK2 + 1, 0xffffffff);
313 xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK3, 0xffffffff);
314 xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK3 + 1, 0xffffffff);
315
316 xlr_nae_wreg(priv->base_addr, R_MAC_FILTER_CONFIG,
317 (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
318 (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
319 (1 << O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID));
320
321 if (priv->nd->phy_interface == PHY_INTERFACE_MODE_RGMII ||
322 priv->nd->phy_interface == PHY_INTERFACE_MODE_SGMII)
323 xlr_reg_update(priv->base_addr, R_IPG_IFG, MAC_B2B_IPG, 0x7f);
324}
325
326static int xlr_net_set_mac_addr(struct net_device *ndev, void *data)
327{
328 int err;
329
330 err = eth_mac_addr(ndev, data);
331 if (err)
332 return err;
333 xlr_hw_set_mac_addr(ndev);
334 return 0;
335}
336
337static void xlr_set_rx_mode(struct net_device *ndev)
338{
339 struct xlr_net_priv *priv = netdev_priv(ndev);
340 u32 regval;
341
342 regval = xlr_nae_rdreg(priv->base_addr, R_MAC_FILTER_CONFIG);
343
344 if (ndev->flags & IFF_PROMISC) {
345 regval |= (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
346 (1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) |
347 (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
348 (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN);
349 } else {
350 regval &= ~((1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) |
351 (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN));
352 }
353
354 xlr_nae_wreg(priv->base_addr, R_MAC_FILTER_CONFIG, regval);
355}
356
357static void xlr_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats)
358{
359 struct xlr_net_priv *priv = netdev_priv(ndev);
360
361 stats->rx_packets = xlr_nae_rdreg(priv->base_addr, RX_PACKET_COUNTER);
362 stats->tx_packets = xlr_nae_rdreg(priv->base_addr, TX_PACKET_COUNTER);
363 stats->rx_bytes = xlr_nae_rdreg(priv->base_addr, RX_BYTE_COUNTER);
364 stats->tx_bytes = xlr_nae_rdreg(priv->base_addr, TX_BYTE_COUNTER);
365 stats->tx_errors = xlr_nae_rdreg(priv->base_addr, TX_FCS_ERROR_COUNTER);
366 stats->rx_dropped = xlr_nae_rdreg(priv->base_addr,
367 RX_DROP_PACKET_COUNTER);
368 stats->tx_dropped = xlr_nae_rdreg(priv->base_addr,
369 TX_DROP_FRAME_COUNTER);
370
371 stats->multicast = xlr_nae_rdreg(priv->base_addr,
372 RX_MULTICAST_PACKET_COUNTER);
373 stats->collisions = xlr_nae_rdreg(priv->base_addr,
374 TX_TOTAL_COLLISION_COUNTER);
375
376 stats->rx_length_errors = xlr_nae_rdreg(priv->base_addr,
377 RX_FRAME_LENGTH_ERROR_COUNTER);
378 stats->rx_over_errors = xlr_nae_rdreg(priv->base_addr,
379 RX_DROP_PACKET_COUNTER);
380 stats->rx_crc_errors = xlr_nae_rdreg(priv->base_addr,
381 RX_FCS_ERROR_COUNTER);
382 stats->rx_frame_errors = xlr_nae_rdreg(priv->base_addr,
383 RX_ALIGNMENT_ERROR_COUNTER);
384
385 stats->rx_fifo_errors = xlr_nae_rdreg(priv->base_addr,
386 RX_DROP_PACKET_COUNTER);
387 stats->rx_missed_errors = xlr_nae_rdreg(priv->base_addr,
388 RX_CARRIER_SENSE_ERROR_COUNTER);
389
390 stats->rx_errors = (stats->rx_over_errors + stats->rx_crc_errors +
391 stats->rx_frame_errors + stats->rx_fifo_errors +
392 stats->rx_missed_errors);
393
394 stats->tx_aborted_errors = xlr_nae_rdreg(priv->base_addr,
395 TX_EXCESSIVE_COLLISION_PACKET_COUNTER);
396 stats->tx_carrier_errors = xlr_nae_rdreg(priv->base_addr,
397 TX_DROP_FRAME_COUNTER);
398 stats->tx_fifo_errors = xlr_nae_rdreg(priv->base_addr,
399 TX_DROP_FRAME_COUNTER);
400}
401
402static const struct net_device_ops xlr_netdev_ops = {
403 .ndo_open = xlr_net_open,
404 .ndo_stop = xlr_net_stop,
405 .ndo_start_xmit = xlr_net_start_xmit,
406 .ndo_select_queue = xlr_net_select_queue,
407 .ndo_set_mac_address = xlr_net_set_mac_addr,
408 .ndo_set_rx_mode = xlr_set_rx_mode,
409 .ndo_get_stats64 = xlr_stats,
410};
411
412
413
414
415static void *xlr_config_spill(struct xlr_net_priv *priv, int reg_start_0,
416 int reg_start_1, int reg_size, int size)
417{
418 void *spill;
419 u32 *base;
420 unsigned long phys_addr;
421 u32 spill_size;
422
423 base = priv->base_addr;
424 spill_size = size;
425 spill = kmalloc(spill_size + SMP_CACHE_BYTES, GFP_ATOMIC);
426 if (!spill) {
427 pr_err("Unable to allocate memory for spill area!\n");
428 return ZERO_SIZE_PTR;
429 }
430
431 spill = PTR_ALIGN(spill, SMP_CACHE_BYTES);
432 phys_addr = virt_to_phys(spill);
433 dev_dbg(&priv->ndev->dev, "Allocated spill %d bytes at %lx\n",
434 size, phys_addr);
435 xlr_nae_wreg(base, reg_start_0, (phys_addr >> 5) & 0xffffffff);
436 xlr_nae_wreg(base, reg_start_1, ((u64)phys_addr >> 37) & 0x07);
437 xlr_nae_wreg(base, reg_size, spill_size);
438
439 return spill;
440}
441
442
443
444
445
446
447
448static void xlr_config_fifo_spill_area(struct xlr_net_priv *priv)
449{
450 priv->frin_spill = xlr_config_spill(priv,
451 R_REG_FRIN_SPILL_MEM_START_0,
452 R_REG_FRIN_SPILL_MEM_START_1,
453 R_REG_FRIN_SPILL_MEM_SIZE,
454 MAX_FRIN_SPILL * sizeof(u64));
455 priv->frout_spill = xlr_config_spill(priv,
456 R_FROUT_SPILL_MEM_START_0,
457 R_FROUT_SPILL_MEM_START_1,
458 R_FROUT_SPILL_MEM_SIZE,
459 MAX_FROUT_SPILL * sizeof(u64));
460 priv->class_0_spill = xlr_config_spill(priv,
461 R_CLASS0_SPILL_MEM_START_0,
462 R_CLASS0_SPILL_MEM_START_1,
463 R_CLASS0_SPILL_MEM_SIZE,
464 MAX_CLASS_0_SPILL * sizeof(u64));
465 priv->class_1_spill = xlr_config_spill(priv,
466 R_CLASS1_SPILL_MEM_START_0,
467 R_CLASS1_SPILL_MEM_START_1,
468 R_CLASS1_SPILL_MEM_SIZE,
469 MAX_CLASS_1_SPILL * sizeof(u64));
470 priv->class_2_spill = xlr_config_spill(priv,
471 R_CLASS2_SPILL_MEM_START_0,
472 R_CLASS2_SPILL_MEM_START_1,
473 R_CLASS2_SPILL_MEM_SIZE,
474 MAX_CLASS_2_SPILL * sizeof(u64));
475 priv->class_3_spill = xlr_config_spill(priv,
476 R_CLASS3_SPILL_MEM_START_0,
477 R_CLASS3_SPILL_MEM_START_1,
478 R_CLASS3_SPILL_MEM_SIZE,
479 MAX_CLASS_3_SPILL * sizeof(u64));
480}
481
482
483
484
485
486static void xlr_config_pde(struct xlr_net_priv *priv)
487{
488 int i = 0;
489 u64 bkt_map = 0;
490
491
492 for (i = 0; i < hweight32(priv->nd->cpu_mask); i++)
493 bkt_map |= (0xff << (i * 8));
494
495 xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_0, (bkt_map & 0xffffffff));
496 xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_0 + 1,
497 ((bkt_map >> 32) & 0xffffffff));
498
499 xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_1, (bkt_map & 0xffffffff));
500 xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_1 + 1,
501 ((bkt_map >> 32) & 0xffffffff));
502
503 xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_2, (bkt_map & 0xffffffff));
504 xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_2 + 1,
505 ((bkt_map >> 32) & 0xffffffff));
506
507 xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_3, (bkt_map & 0xffffffff));
508 xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_3 + 1,
509 ((bkt_map >> 32) & 0xffffffff));
510}
511
512
513
514
515
516static int xlr_config_common(struct xlr_net_priv *priv)
517{
518 struct xlr_fmn_info *gmac = priv->nd->gmac_fmn_info;
519 int start_stn_id = gmac->start_stn_id;
520 int end_stn_id = gmac->end_stn_id;
521 int *bucket_size = priv->nd->bucket_size;
522 int i, j, err;
523
524
525 for (i = start_stn_id; i <= end_stn_id; i++) {
526 xlr_nae_wreg(priv->base_addr,
527 R_GMAC_RFR0_BUCKET_SIZE + i - start_stn_id,
528 bucket_size[i]);
529 }
530
531
532
533
534
535 for (i = 0; i < 8; i++) {
536 for (j = 0; j < 8; j++)
537 xlr_nae_wreg(priv->base_addr,
538 (R_CC_CPU0_0 + (i * 8)) + j,
539 gmac->credit_config[(i * 8) + j]);
540 }
541
542 xlr_nae_wreg(priv->base_addr, R_MSG_TX_THRESHOLD, 3);
543 xlr_nae_wreg(priv->base_addr, R_DMACR0, 0xffffffff);
544 xlr_nae_wreg(priv->base_addr, R_DMACR1, 0xffffffff);
545 xlr_nae_wreg(priv->base_addr, R_DMACR2, 0xffffffff);
546 xlr_nae_wreg(priv->base_addr, R_DMACR3, 0xffffffff);
547 xlr_nae_wreg(priv->base_addr, R_FREEQCARVE, 0);
548
549 err = xlr_net_fill_rx_ring(priv->ndev);
550 if (err)
551 return err;
552 nlm_register_fmn_handler(start_stn_id, end_stn_id, xlr_net_fmn_handler,
553 priv->adapter);
554 return 0;
555}
556
557static void xlr_config_translate_table(struct xlr_net_priv *priv)
558{
559 u32 cpu_mask;
560 u32 val;
561 int bkts[32];
562 int b1, b2, c1, c2, i, j, k;
563 int use_bkt;
564
565 use_bkt = 0;
566 cpu_mask = priv->nd->cpu_mask;
567
568 pr_info("Using %s-based distribution\n",
569 (use_bkt) ? "bucket" : "class");
570 j = 0;
571 for (i = 0; i < 32; i++) {
572 if ((1 << i) & cpu_mask) {
573
574 bkts[j] = ((i / 4) * 8) + (i % 4);
575 j++;
576 }
577 }
578
579
580 k = 0;
581 c1 = 3;
582 c2 = 0;
583 for (i = 0; i < 64; i++) {
584
585
586
587
588
589
590 c1 = (c1 + 1) & 3;
591 c2 = (c1 + 1) & 3;
592 b1 = bkts[k];
593 k = (k + 1) % j;
594 b2 = bkts[k];
595 k = (k + 1) % j;
596
597 val = ((c1 << 23) | (b1 << 17) | (use_bkt << 16) |
598 (c2 << 7) | (b2 << 1) | (use_bkt << 0));
599 dev_dbg(&priv->ndev->dev, "Table[%d] b1=%d b2=%d c1=%d c2=%d\n",
600 i, b1, b2, c1, c2);
601 xlr_nae_wreg(priv->base_addr, R_TRANSLATETABLE + i, val);
602 c1 = c2;
603 }
604}
605
606static void xlr_config_parser(struct xlr_net_priv *priv)
607{
608 u32 val;
609
610
611 xlr_nae_wreg(priv->base_addr, R_L2TYPE_0, 0x01);
612
613
614 xlr_nae_wreg(priv->base_addr, R_PARSERCONFIGREG,
615 ((0x7f << 8) | (1 << 1)));
616
617
618
619 xlr_nae_wreg(priv->base_addr, R_L3CTABLE,
620 (9 << 20) | (1 << 19) | (1 << 18) | (0x01 << 16) |
621 (0x0800 << 0));
622 xlr_nae_wreg(priv->base_addr, R_L3CTABLE + 1,
623 (9 << 25) | (1 << 21) | (12 << 14) | (4 << 10) |
624 (16 << 4) | 4);
625
626
627 xlr_nae_wreg(priv->base_addr, R_L4CTABLE, 6);
628 xlr_nae_wreg(priv->base_addr, R_L4CTABLE + 2, 17);
629 val = ((0 << 21) | (2 << 17) | (2 << 11) | (2 << 7));
630 xlr_nae_wreg(priv->base_addr, R_L4CTABLE + 1, val);
631 xlr_nae_wreg(priv->base_addr, R_L4CTABLE + 3, val);
632
633 xlr_config_translate_table(priv);
634}
635
636static int xlr_phy_write(u32 *base_addr, int phy_addr, int regnum, u16 val)
637{
638 unsigned long timeout, stoptime, checktime;
639 int timedout;
640
641
642 timeout = msecs_to_jiffies(100);
643 stoptime = jiffies + timeout;
644 timedout = 0;
645
646 xlr_nae_wreg(base_addr, R_MII_MGMT_ADDRESS, (phy_addr << 8) | regnum);
647
648
649 xlr_nae_wreg(base_addr, R_MII_MGMT_WRITE_DATA, (u32)val);
650
651
652 while (!timedout) {
653 checktime = jiffies;
654 if (xlr_nae_rdreg(base_addr, R_MII_MGMT_INDICATORS) == 0)
655 break;
656 timedout = time_after(checktime, stoptime);
657 }
658 if (timedout) {
659 pr_info("Phy device write err: device busy");
660 return -EBUSY;
661 }
662
663 return 0;
664}
665
666static int xlr_phy_read(u32 *base_addr, int phy_addr, int regnum)
667{
668 unsigned long timeout, stoptime, checktime;
669 int timedout;
670
671
672 timeout = msecs_to_jiffies(100);
673 stoptime = jiffies + timeout;
674 timedout = 0;
675
676
677 xlr_nae_wreg(base_addr, R_MII_MGMT_ADDRESS,
678 (phy_addr << 8) | (regnum << 0));
679
680
681 xlr_nae_wreg(base_addr, R_MII_MGMT_COMMAND,
682 (1 << O_MII_MGMT_COMMAND__rstat));
683
684
685 while (!timedout) {
686 checktime = jiffies;
687 if (xlr_nae_rdreg(base_addr, R_MII_MGMT_INDICATORS) == 0)
688 break;
689 timedout = time_after(checktime, stoptime);
690 }
691 if (timedout) {
692 pr_info("Phy device read err: device busy");
693 return -EBUSY;
694 }
695
696
697 xlr_nae_wreg(base_addr, R_MII_MGMT_COMMAND, 0);
698
699
700 return xlr_nae_rdreg(base_addr, R_MII_MGMT_STATUS);
701}
702
703static int xlr_mii_write(struct mii_bus *bus, int phy_addr, int regnum, u16 val)
704{
705 struct xlr_net_priv *priv = bus->priv;
706 int ret;
707
708 ret = xlr_phy_write(priv->mii_addr, phy_addr, regnum, val);
709 dev_dbg(&priv->ndev->dev, "mii_write phy %d : %d <- %x [%x]\n",
710 phy_addr, regnum, val, ret);
711 return ret;
712}
713
714static int xlr_mii_read(struct mii_bus *bus, int phy_addr, int regnum)
715{
716 struct xlr_net_priv *priv = bus->priv;
717 int ret;
718
719 ret = xlr_phy_read(priv->mii_addr, phy_addr, regnum);
720 dev_dbg(&priv->ndev->dev, "mii_read phy %d : %d [%x]\n",
721 phy_addr, regnum, ret);
722 return ret;
723}
724
725
726
727
728
729
730
731static void xlr_sgmii_init(struct xlr_net_priv *priv)
732{
733 int phy;
734
735 xlr_phy_write(priv->serdes_addr, 26, 0, 0x6DB0);
736 xlr_phy_write(priv->serdes_addr, 26, 1, 0xFFFF);
737 xlr_phy_write(priv->serdes_addr, 26, 2, 0xB6D0);
738 xlr_phy_write(priv->serdes_addr, 26, 3, 0x00FF);
739 xlr_phy_write(priv->serdes_addr, 26, 4, 0x0000);
740 xlr_phy_write(priv->serdes_addr, 26, 5, 0x0000);
741 xlr_phy_write(priv->serdes_addr, 26, 6, 0x0005);
742 xlr_phy_write(priv->serdes_addr, 26, 7, 0x0001);
743 xlr_phy_write(priv->serdes_addr, 26, 8, 0x0000);
744 xlr_phy_write(priv->serdes_addr, 26, 9, 0x0000);
745 xlr_phy_write(priv->serdes_addr, 26, 10, 0x0000);
746
747
748 xlr_nae_wreg(priv->gpio_addr, 0x20, 0x7e6802);
749 xlr_nae_wreg(priv->gpio_addr, 0x10, 0x7104);
750
751 xlr_nae_wreg(priv->gpio_addr, 0x22, 0x7e6802);
752 xlr_nae_wreg(priv->gpio_addr, 0x21, 0x7104);
753
754
755 phy = priv->phy_addr % 4 + 27;
756 xlr_phy_write(priv->pcs_addr, phy, 0, 0x1000);
757 xlr_phy_write(priv->pcs_addr, phy, 0, 0x0200);
758}
759
760void xlr_set_gmac_speed(struct xlr_net_priv *priv)
761{
762 struct phy_device *phydev = xlr_get_phydev(priv);
763 int speed;
764
765 if (phydev->interface == PHY_INTERFACE_MODE_SGMII)
766 xlr_sgmii_init(priv);
767
768 if (phydev->speed != priv->phy_speed) {
769 speed = phydev->speed;
770 if (speed == SPEED_1000) {
771
772 xlr_nae_wreg(priv->base_addr, R_MAC_CONFIG_2, 0x7217);
773 priv->phy_speed = speed;
774 } else if (speed == SPEED_100 || speed == SPEED_10) {
775
776 xlr_nae_wreg(priv->base_addr, R_MAC_CONFIG_2, 0x7117);
777 priv->phy_speed = speed;
778 }
779
780 if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
781 if (speed == SPEED_10)
782 xlr_nae_wreg(priv->base_addr,
783 R_INTERFACE_CONTROL,
784 SGMII_SPEED_10);
785 if (speed == SPEED_100)
786 xlr_nae_wreg(priv->base_addr,
787 R_INTERFACE_CONTROL,
788 SGMII_SPEED_100);
789 if (speed == SPEED_1000)
790 xlr_nae_wreg(priv->base_addr,
791 R_INTERFACE_CONTROL,
792 SGMII_SPEED_1000);
793 }
794 if (speed == SPEED_10)
795 xlr_nae_wreg(priv->base_addr, R_CORECONTROL, 0x2);
796 if (speed == SPEED_100)
797 xlr_nae_wreg(priv->base_addr, R_CORECONTROL, 0x1);
798 if (speed == SPEED_1000)
799 xlr_nae_wreg(priv->base_addr, R_CORECONTROL, 0x0);
800 }
801 pr_info("gmac%d : %dMbps\n", priv->port_id, priv->phy_speed);
802}
803
804static void xlr_gmac_link_adjust(struct net_device *ndev)
805{
806 struct xlr_net_priv *priv = netdev_priv(ndev);
807 struct phy_device *phydev = xlr_get_phydev(priv);
808 u32 intreg;
809
810 intreg = xlr_nae_rdreg(priv->base_addr, R_INTREG);
811 if (phydev->link) {
812 if (phydev->speed != priv->phy_speed) {
813 xlr_set_gmac_speed(priv);
814 pr_info("gmac%d : Link up\n", priv->port_id);
815 }
816 } else {
817 xlr_set_gmac_speed(priv);
818 pr_info("gmac%d : Link down\n", priv->port_id);
819 }
820}
821
822static int xlr_mii_probe(struct xlr_net_priv *priv)
823{
824 struct phy_device *phydev = xlr_get_phydev(priv);
825
826 if (!phydev) {
827 pr_err("no PHY found on phy_addr %d\n", priv->phy_addr);
828 return -ENODEV;
829 }
830
831
832 phydev = phy_connect(priv->ndev, phydev_name(phydev),
833 xlr_gmac_link_adjust, priv->nd->phy_interface);
834
835 if (IS_ERR(phydev)) {
836 pr_err("could not attach PHY\n");
837 return PTR_ERR(phydev);
838 }
839 phydev->supported &= (ADVERTISED_10baseT_Full
840 | ADVERTISED_10baseT_Half
841 | ADVERTISED_100baseT_Full
842 | ADVERTISED_100baseT_Half
843 | ADVERTISED_1000baseT_Full
844 | ADVERTISED_Autoneg
845 | ADVERTISED_MII);
846
847 phydev->advertising = phydev->supported;
848 phy_attached_info(phydev);
849 return 0;
850}
851
852static int xlr_setup_mdio(struct xlr_net_priv *priv,
853 struct platform_device *pdev)
854{
855 int err;
856
857 priv->mii_bus = mdiobus_alloc();
858 if (!priv->mii_bus) {
859 pr_err("mdiobus alloc failed\n");
860 return -ENOMEM;
861 }
862
863 priv->mii_bus->priv = priv;
864 priv->mii_bus->name = "xlr-mdio";
865 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d",
866 priv->mii_bus->name, priv->port_id);
867 priv->mii_bus->read = xlr_mii_read;
868 priv->mii_bus->write = xlr_mii_write;
869 priv->mii_bus->parent = &pdev->dev;
870
871
872 priv->mii_bus->phy_mask = ~(1 << priv->phy_addr);
873
874
875 xlr_nae_wreg(priv->base_addr, R_MII_MGMT_CONFIG, 0x7);
876
877 err = mdiobus_register(priv->mii_bus);
878 if (err) {
879 mdiobus_free(priv->mii_bus);
880 pr_err("mdio bus registration failed\n");
881 return err;
882 }
883
884 pr_info("Registered mdio bus id : %s\n", priv->mii_bus->id);
885 err = xlr_mii_probe(priv);
886 if (err) {
887 mdiobus_free(priv->mii_bus);
888 return err;
889 }
890 return 0;
891}
892
893static void xlr_port_enable(struct xlr_net_priv *priv)
894{
895 u32 prid = (read_c0_prid() & 0xf000);
896
897
898 if ((prid == 0x8000 || prid == 0x4000 || prid == 0xc000) &&
899 priv->nd->phy_interface == PHY_INTERFACE_MODE_RGMII)
900 xlr_reg_update(priv->base_addr, R_RX_CONTROL,
901 (1 << O_RX_CONTROL__RGMII),
902 (1 << O_RX_CONTROL__RGMII));
903
904
905 xlr_reg_update(priv->base_addr, R_MAC_CONFIG_1,
906 ((1 << O_MAC_CONFIG_1__rxen) |
907 (1 << O_MAC_CONFIG_1__txen) |
908 (1 << O_MAC_CONFIG_1__rxfc) |
909 (1 << O_MAC_CONFIG_1__txfc)),
910 ((1 << O_MAC_CONFIG_1__rxen) |
911 (1 << O_MAC_CONFIG_1__txen) |
912 (1 << O_MAC_CONFIG_1__rxfc) |
913 (1 << O_MAC_CONFIG_1__txfc)));
914
915
916 xlr_reg_update(priv->base_addr, R_TX_CONTROL,
917 ((1 << O_TX_CONTROL__TXENABLE) |
918 (512 << O_TX_CONTROL__TXTHRESHOLD)), 0x3fff);
919
920
921 xlr_reg_update(priv->base_addr, R_RX_CONTROL,
922 1 << O_RX_CONTROL__RXENABLE,
923 1 << O_RX_CONTROL__RXENABLE);
924}
925
926static void xlr_port_disable(struct xlr_net_priv *priv)
927{
928
929
930 xlr_reg_update(priv->base_addr, R_MAC_CONFIG_1,
931 ((1 << O_MAC_CONFIG_1__rxen) |
932 (1 << O_MAC_CONFIG_1__txen) |
933 (1 << O_MAC_CONFIG_1__rxfc) |
934 (1 << O_MAC_CONFIG_1__txfc)), 0x0);
935
936
937 xlr_reg_update(priv->base_addr, R_TX_CONTROL,
938 ((1 << O_TX_CONTROL__TXENABLE) |
939 (512 << O_TX_CONTROL__TXTHRESHOLD)), 0);
940
941
942 xlr_reg_update(priv->base_addr, R_RX_CONTROL,
943 1 << O_RX_CONTROL__RXENABLE, 0);
944}
945
946
947
948
949static int xlr_gmac_init(struct xlr_net_priv *priv,
950 struct platform_device *pdev)
951{
952 int ret;
953
954 pr_info("Initializing the gmac%d\n", priv->port_id);
955
956 xlr_port_disable(priv);
957
958 xlr_nae_wreg(priv->base_addr, R_DESC_PACK_CTRL,
959 (1 << O_DESC_PACK_CTRL__MAXENTRY) |
960 (BYTE_OFFSET << O_DESC_PACK_CTRL__BYTEOFFSET) |
961 (1600 << O_DESC_PACK_CTRL__REGULARSIZE));
962
963 ret = xlr_setup_mdio(priv, pdev);
964 if (ret)
965 return ret;
966 xlr_port_enable(priv);
967
968
969 xlr_nae_wreg(priv->base_addr, R_MAC_CONFIG_2, 0x7217);
970
971 xlr_nae_wreg(priv->base_addr, R_CORECONTROL, 0x02);
972
973 xlr_nae_wreg(priv->base_addr, R_INTMASK, (1 << O_INTMASK__TXILLEGAL) |
974 (1 << O_INTMASK__MDINT) | (1 << O_INTMASK__TXFETCHERROR) |
975 (1 << O_INTMASK__P2PSPILLECC) | (1 << O_INTMASK__TAGFULL) |
976 (1 << O_INTMASK__UNDERRUN) | (1 << O_INTMASK__ABORT));
977
978
979 xlr_reg_update(priv->base_addr, R_STATCTRL, 0, 1 << O_STATCTRL__CLRCNT);
980 xlr_reg_update(priv->base_addr, R_STATCTRL, 1 << 2, 1 << 2);
981 return 0;
982}
983
984static int xlr_net_probe(struct platform_device *pdev)
985{
986 struct xlr_net_priv *priv = NULL;
987 struct net_device *ndev;
988 struct resource *res;
989 struct xlr_adapter *adapter;
990 int err, port;
991
992 pr_info("XLR/XLS Ethernet Driver controller %d\n", pdev->id);
993
994
995
996 adapter = devm_kzalloc(&pdev->dev, sizeof(*adapter), GFP_KERNEL);
997 if (!adapter)
998 return -ENOMEM;
999
1000
1001
1002
1003
1004
1005 for (port = 0; port < pdev->num_resources / 2; port++) {
1006 ndev = alloc_etherdev_mq(sizeof(struct xlr_net_priv), 32);
1007 if (!ndev) {
1008 dev_err(&pdev->dev,
1009 "Allocation of Ethernet device failed\n");
1010 return -ENOMEM;
1011 }
1012
1013 priv = netdev_priv(ndev);
1014 priv->pdev = pdev;
1015 priv->ndev = ndev;
1016 priv->port_id = (pdev->id * 4) + port;
1017 priv->nd = (struct xlr_net_data *)pdev->dev.platform_data;
1018 res = platform_get_resource(pdev, IORESOURCE_MEM, port);
1019 priv->base_addr = devm_ioremap_resource(&pdev->dev, res);
1020 if (IS_ERR(priv->base_addr)) {
1021 err = PTR_ERR(priv->base_addr);
1022 goto err_gmac;
1023 }
1024 priv->adapter = adapter;
1025 adapter->netdev[port] = ndev;
1026
1027 res = platform_get_resource(pdev, IORESOURCE_IRQ, port);
1028 if (!res) {
1029 dev_err(&pdev->dev, "No irq resource for MAC %d\n",
1030 priv->port_id);
1031 err = -ENODEV;
1032 goto err_gmac;
1033 }
1034
1035 ndev->irq = res->start;
1036
1037 priv->phy_addr = priv->nd->phy_addr[port];
1038 priv->tx_stnid = priv->nd->tx_stnid[port];
1039 priv->mii_addr = priv->nd->mii_addr;
1040 priv->serdes_addr = priv->nd->serdes_addr;
1041 priv->pcs_addr = priv->nd->pcs_addr;
1042 priv->gpio_addr = priv->nd->gpio_addr;
1043
1044 ndev->netdev_ops = &xlr_netdev_ops;
1045 ndev->watchdog_timeo = HZ;
1046
1047
1048 eth_hw_addr_random(ndev);
1049 xlr_hw_set_mac_addr(ndev);
1050 xlr_set_rx_mode(ndev);
1051
1052 priv->num_rx_desc += MAX_NUM_DESC_SPILL;
1053 ndev->ethtool_ops = &xlr_ethtool_ops;
1054 SET_NETDEV_DEV(ndev, &pdev->dev);
1055
1056 xlr_config_fifo_spill_area(priv);
1057
1058 xlr_config_pde(priv);
1059 xlr_config_parser(priv);
1060
1061
1062 if (strcmp(res->name, "gmac") == 0) {
1063 err = xlr_gmac_init(priv, pdev);
1064 if (err) {
1065 dev_err(&pdev->dev, "gmac%d init failed\n",
1066 priv->port_id);
1067 goto err_gmac;
1068 }
1069 }
1070
1071 if (priv->port_id == 0 || priv->port_id == 4) {
1072 err = xlr_config_common(priv);
1073 if (err)
1074 goto err_netdev;
1075 }
1076
1077 err = register_netdev(ndev);
1078 if (err) {
1079 dev_err(&pdev->dev,
1080 "Registering netdev failed for gmac%d\n",
1081 priv->port_id);
1082 goto err_netdev;
1083 }
1084 platform_set_drvdata(pdev, priv);
1085 }
1086
1087 return 0;
1088
1089err_netdev:
1090 mdiobus_free(priv->mii_bus);
1091err_gmac:
1092 free_netdev(ndev);
1093 return err;
1094}
1095
1096static int xlr_net_remove(struct platform_device *pdev)
1097{
1098 struct xlr_net_priv *priv = platform_get_drvdata(pdev);
1099
1100 unregister_netdev(priv->ndev);
1101 mdiobus_unregister(priv->mii_bus);
1102 mdiobus_free(priv->mii_bus);
1103 free_netdev(priv->ndev);
1104 return 0;
1105}
1106
1107static struct platform_driver xlr_net_driver = {
1108 .probe = xlr_net_probe,
1109 .remove = xlr_net_remove,
1110 .driver = {
1111 .name = "xlr-net",
1112 },
1113};
1114
1115module_platform_driver(xlr_net_driver);
1116
1117MODULE_AUTHOR("Ganesan Ramalingam <ganesanr@broadcom.com>");
1118MODULE_DESCRIPTION("Ethernet driver for Netlogic XLR/XLS");
1119MODULE_LICENSE("Dual BSD/GPL");
1120MODULE_ALIAS("platform:xlr-net");
1121