1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include <common.h>
31#include <net.h>
32#include <malloc.h>
33
34#include "mv_eth.h"
35
36
37
38#undef DEBUG_MV_ETH
39
40#ifdef DEBUG_MV_ETH
41#define DEBUG
42#define DP(x) x
43#else
44#define DP(x)
45#endif
46
47#undef MV64360_CHECKSUM_OFFLOAD
48
49
50
51
52
53
54
55
56
57
58#undef MV64360_RX_QUEUE_FILL_ON_TASK
59
60
61
62#define MAGIC_ETH_RUNNING 8031971
63#define MV64360_INTERNAL_SRAM_SIZE _256K
64#define EXTRA_BYTES 32
65#define WRAP ETH_HLEN + 2 + 4 + 16
66#define BUFFER_MTU dev->mtu + WRAP
67#define INT_CAUSE_UNMASK_ALL 0x0007ffff
68#define INT_CAUSE_UNMASK_ALL_EXT 0x0011ffff
69#ifdef MV64360_RX_FILL_ON_TASK
70#define INT_CAUSE_MASK_ALL 0x00000000
71#define INT_CAUSE_CHECK_BITS INT_CAUSE_UNMASK_ALL
72#define INT_CAUSE_CHECK_BITS_EXT INT_CAUSE_UNMASK_ALL_EXT
73#endif
74
75
76#define MV_REG_READ(offset) my_le32_to_cpu(* (volatile unsigned int *) (INTERNAL_REG_BASE_ADDR + offset))
77#define MV_REG_WRITE(offset,data) *(volatile unsigned int *) (INTERNAL_REG_BASE_ADDR + offset) = my_cpu_to_le32 (data)
78#define MV_SET_REG_BITS(regOffset,bits) ((*((volatile unsigned int*)((INTERNAL_REG_BASE_ADDR) + (regOffset)))) |= ((unsigned int)my_cpu_to_le32(bits)))
79#define MV_RESET_REG_BITS(regOffset,bits) ((*((volatile unsigned int*)((INTERNAL_REG_BASE_ADDR) + (regOffset)))) &= ~((unsigned int)my_cpu_to_le32(bits)))
80
81
82static int mv64360_eth_real_open (struct eth_device *eth);
83static int mv64360_eth_real_stop (struct eth_device *eth);
84static struct net_device_stats *mv64360_eth_get_stats (struct eth_device
85 *dev);
86static void eth_port_init_mac_tables (ETH_PORT eth_port_num);
87static void mv64360_eth_update_stat (struct eth_device *dev);
88bool db64360_eth_start (struct eth_device *eth);
89unsigned int eth_read_mib_counter (ETH_PORT eth_port_num,
90 unsigned int mib_offset);
91int mv64360_eth_receive (struct eth_device *dev);
92
93int mv64360_eth_xmit (struct eth_device *, volatile void *packet, int length);
94
95#ifndef UPDATE_STATS_BY_SOFTWARE
96static void mv64360_eth_print_stat (struct eth_device *dev);
97#endif
98
99extern void NetReceive (volatile uchar *, int);
100
101extern unsigned int INTERNAL_REG_BASE_ADDR;
102
103
104
105
106#ifdef DEBUG_MV_ETH
107void print_globals (struct eth_device *dev)
108{
109 printf ("Ethernet PRINT_Globals-Debug function\n");
110 printf ("Base Address for ETH_PORT_INFO: %08x\n",
111 (unsigned int) dev->priv);
112 printf ("Base Address for mv64360_eth_priv: %08x\n",
113 (unsigned int) &(((ETH_PORT_INFO *) dev->priv)->
114 port_private));
115
116 printf ("GT Internal Base Address: %08x\n",
117 INTERNAL_REG_BASE_ADDR);
118 printf ("Base Address for TX-DESCs: %08x Number of allocated Buffers %d\n", (unsigned int) ((ETH_PORT_INFO *) dev->priv)->p_tx_desc_area_base[0], MV64360_TX_QUEUE_SIZE);
119 printf ("Base Address for RX-DESCs: %08x Number of allocated Buffers %d\n", (unsigned int) ((ETH_PORT_INFO *) dev->priv)->p_rx_desc_area_base[0], MV64360_RX_QUEUE_SIZE);
120 printf ("Base Address for RX-Buffer: %08x allocated Bytes %d\n",
121 (unsigned int) ((ETH_PORT_INFO *) dev->priv)->
122 p_rx_buffer_base[0],
123 (MV64360_RX_QUEUE_SIZE * MV64360_RX_BUFFER_SIZE) + 32);
124 printf ("Base Address for TX-Buffer: %08x allocated Bytes %d\n",
125 (unsigned int) ((ETH_PORT_INFO *) dev->priv)->
126 p_tx_buffer_base[0],
127 (MV64360_TX_QUEUE_SIZE * MV64360_TX_BUFFER_SIZE) + 32);
128}
129#endif
130
131#define my_cpu_to_le32(x) my_le32_to_cpu((x))
132
133unsigned long my_le32_to_cpu (unsigned long x)
134{
135 return (((x & 0x000000ffU) << 24) |
136 ((x & 0x0000ff00U) << 8) |
137 ((x & 0x00ff0000U) >> 8) | ((x & 0xff000000U) >> 24));
138}
139
140
141
142
143
144
145
146
147
148
149
150static void mv64360_eth_print_phy_status (struct eth_device *dev)
151{
152 struct mv64360_eth_priv *port_private;
153 unsigned int port_num;
154 ETH_PORT_INFO *ethernet_private = (ETH_PORT_INFO *) dev->priv;
155 unsigned int port_status, phy_reg_data;
156
157 port_private =
158 (struct mv64360_eth_priv *) ethernet_private->port_private;
159 port_num = port_private->port_num;
160
161
162 eth_port_read_smi_reg (port_num, 1, &phy_reg_data);
163 if (!(phy_reg_data & 0x20)) {
164 printf ("Ethernet port changed link status to DOWN\n");
165 } else {
166 port_status =
167 MV_REG_READ (MV64360_ETH_PORT_STATUS_REG (port_num));
168 printf ("Ethernet status port %d: Link up", port_num);
169 printf (", %s",
170 (port_status & BIT2) ? "Full Duplex" : "Half Duplex");
171 if (port_status & BIT4)
172 printf (", Speed 1 Gbps");
173 else
174 printf (", %s",
175 (port_status & BIT5) ? "Speed 100 Mbps" :
176 "Speed 10 Mbps");
177 printf ("\n");
178 }
179}
180
181
182
183
184
185int db64360_eth_probe (struct eth_device *dev)
186{
187 return ((int) db64360_eth_start (dev));
188}
189
190int db64360_eth_poll (struct eth_device *dev)
191{
192 return mv64360_eth_receive (dev);
193}
194
195int db64360_eth_transmit (struct eth_device *dev, volatile void *packet,
196 int length)
197{
198 mv64360_eth_xmit (dev, packet, length);
199 return 0;
200}
201
202void db64360_eth_disable (struct eth_device *dev)
203{
204 mv64360_eth_stop (dev);
205}
206
207
208void mv6436x_eth_initialize (bd_t * bis)
209{
210 struct eth_device *dev;
211 ETH_PORT_INFO *ethernet_private;
212 struct mv64360_eth_priv *port_private;
213 int devnum, x, temp;
214 char *s, *e, buf[64];
215
216 for (devnum = 0; devnum < MV_ETH_DEVS; devnum++) {
217 dev = calloc (sizeof (*dev), 1);
218 if (!dev) {
219 printf ("%s: mv_enet%d allocation failure, %s\n",
220 __FUNCTION__, devnum, "eth_device structure");
221 return;
222 }
223
224
225 sprintf (dev->name, "mv_enet%d", devnum);
226
227#ifdef DEBUG
228 printf ("Initializing %s\n", dev->name);
229#endif
230
231
232 switch (devnum) {
233 case 0:
234 s = "ethaddr";
235 break;
236
237 case 1:
238 s = "eth1addr";
239 break;
240
241 case 2:
242 s = "eth2addr";
243 break;
244
245 default:
246 printf ("%s: Invalid device number %d\n",
247 __FUNCTION__, devnum);
248 return;
249 }
250
251 temp = getenv_r (s, buf, sizeof (buf));
252 s = (temp > 0) ? buf : NULL;
253
254#ifdef DEBUG
255 printf ("Setting MAC %d to %s\n", devnum, s);
256#endif
257 for (x = 0; x < 6; ++x) {
258 dev->enetaddr[x] = s ? simple_strtoul (s, &e, 16) : 0;
259 if (s)
260 s = (*e) ? e + 1 : e;
261 }
262
263 eth_port_uc_addr_set (devnum, dev->enetaddr, 0);
264
265 dev->init = (void *) db64360_eth_probe;
266 dev->halt = (void *) ethernet_phy_reset;
267 dev->send = (void *) db64360_eth_transmit;
268 dev->recv = (void *) db64360_eth_poll;
269
270 ethernet_private = calloc (sizeof (*ethernet_private), 1);
271 dev->priv = (void *) ethernet_private;
272
273 if (!ethernet_private) {
274 printf ("%s: %s allocation failure, %s\n",
275 __FUNCTION__, dev->name,
276 "Private Device Structure");
277 free (dev);
278 return;
279 }
280
281 memset (ethernet_private, 0, sizeof (ETH_PORT_INFO));
282 memcpy (ethernet_private->port_mac_addr, dev->enetaddr, 6);
283
284
285 port_private = calloc (sizeof (*ethernet_private), 1);
286 ethernet_private->port_private = (void *)port_private;
287 if (!port_private) {
288 printf ("%s: %s allocation failure, %s\n",
289 __FUNCTION__, dev->name,
290 "Port Private Device Structure");
291
292 free (ethernet_private);
293 free (dev);
294 return;
295 }
296
297 port_private->stats =
298 calloc (sizeof (struct net_device_stats), 1);
299 if (!port_private->stats) {
300 printf ("%s: %s allocation failure, %s\n",
301 __FUNCTION__, dev->name,
302 "Net stat Structure");
303
304 free (port_private);
305 free (ethernet_private);
306 free (dev);
307 return;
308 }
309 memset (ethernet_private->port_private, 0,
310 sizeof (struct mv64360_eth_priv));
311 switch (devnum) {
312 case 0:
313 ethernet_private->port_num = ETH_0;
314 break;
315 case 1:
316 ethernet_private->port_num = ETH_1;
317 break;
318 case 2:
319 ethernet_private->port_num = ETH_2;
320 break;
321 default:
322 printf ("Invalid device number %d\n", devnum);
323 break;
324 };
325
326 port_private->port_num = devnum;
327
328
329
330
331 mv64360_eth_update_stat (dev);
332 memset (port_private->stats, 0,
333 sizeof (struct net_device_stats));
334
335 switch (devnum) {
336 case 0:
337 s = "ethaddr";
338 break;
339
340 case 1:
341 s = "eth1addr";
342 break;
343
344 case 2:
345 s = "eth2addr";
346 break;
347
348 default:
349 printf ("%s: Invalid device number %d\n",
350 __FUNCTION__, devnum);
351 return;
352 }
353
354 temp = getenv_r (s, buf, sizeof (buf));
355 s = (temp > 0) ? buf : NULL;
356
357#ifdef DEBUG
358 printf ("Setting MAC %d to %s\n", devnum, s);
359#endif
360 for (x = 0; x < 6; ++x) {
361 dev->enetaddr[x] = s ? simple_strtoul (s, &e, 16) : 0;
362 if (s)
363 s = (*e) ? e + 1 : e;
364 }
365
366 DP (printf ("Allocating descriptor and buffer rings\n"));
367
368 ethernet_private->p_rx_desc_area_base[0] =
369 (ETH_RX_DESC *) memalign (16,
370 RX_DESC_ALIGNED_SIZE *
371 MV64360_RX_QUEUE_SIZE + 1);
372 ethernet_private->p_tx_desc_area_base[0] =
373 (ETH_TX_DESC *) memalign (16,
374 TX_DESC_ALIGNED_SIZE *
375 MV64360_TX_QUEUE_SIZE + 1);
376
377 ethernet_private->p_rx_buffer_base[0] =
378 (char *) memalign (16,
379 MV64360_RX_QUEUE_SIZE *
380 MV64360_TX_BUFFER_SIZE + 1);
381 ethernet_private->p_tx_buffer_base[0] =
382 (char *) memalign (16,
383 MV64360_RX_QUEUE_SIZE *
384 MV64360_TX_BUFFER_SIZE + 1);
385
386#ifdef DEBUG_MV_ETH
387
388 print_globals (dev);
389#endif
390 eth_register (dev);
391
392 }
393 DP (printf ("%s: exit\n", __FUNCTION__));
394
395}
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411int mv64360_eth_open (struct eth_device *dev)
412{
413 return (mv64360_eth_real_open (dev));
414}
415
416
417static int mv64360_eth_real_open (struct eth_device *dev)
418{
419
420 unsigned int queue;
421 ETH_PORT_INFO *ethernet_private;
422 struct mv64360_eth_priv *port_private;
423 unsigned int port_num;
424 u32 port_status, phy_reg_data;
425
426 ethernet_private = (ETH_PORT_INFO *) dev->priv;
427
428
429 memcpy (ethernet_private->port_mac_addr, dev->enetaddr, 6);
430
431 port_private =
432 (struct mv64360_eth_priv *) ethernet_private->port_private;
433 port_num = port_private->port_num;
434
435
436 MV_REG_WRITE (MV64360_ETH_RECEIVE_QUEUE_COMMAND_REG (port_num),
437 0x0000ff00);
438
439
440 MV_REG_WRITE (MV64360_ETH_INTERRUPT_CAUSE_REG (port_num), 0);
441 MV_REG_WRITE (MV64360_ETH_INTERRUPT_CAUSE_EXTEND_REG (port_num), 0);
442
443
444 MV_REG_WRITE (MV64360_ETH_INTERRUPT_MASK_REG (port_num),
445 INT_CAUSE_UNMASK_ALL);
446
447
448 MV_REG_WRITE (MV64360_ETH_INTERRUPT_EXTEND_MASK_REG (port_num),
449 INT_CAUSE_UNMASK_ALL_EXT);
450
451
452 ethernet_private->port_phy_addr = 0x8 + port_num;
453
454
455 eth_port_init (ethernet_private);
456
457
458
459
460 for (queue = 0; queue < MV64360_TX_QUEUE_NUM; queue++) {
461 unsigned int size;
462
463 port_private->tx_ring_size[queue] = MV64360_TX_QUEUE_SIZE;
464 size = (port_private->tx_ring_size[queue] * TX_DESC_ALIGNED_SIZE);
465 ethernet_private->tx_desc_area_size[queue] = size;
466
467
468 memset ((void *) ethernet_private->p_tx_desc_area_base[queue],
469 0, ethernet_private->tx_desc_area_size[queue]);
470
471
472 if (ether_init_tx_desc_ring
473 (ethernet_private, ETH_Q0,
474 port_private->tx_ring_size[queue],
475 MV64360_TX_BUFFER_SIZE ,
476 (unsigned int) ethernet_private->
477 p_tx_desc_area_base[queue],
478 (unsigned int) ethernet_private->
479 p_tx_buffer_base[queue]) == false)
480 printf ("### Error initializing TX Ring\n");
481 }
482
483
484 for (queue = 0; queue < MV64360_RX_QUEUE_NUM; queue++) {
485 unsigned int size;
486
487
488 port_private->rx_ring_size[queue] = MV64360_RX_QUEUE_SIZE;
489 size = (port_private->rx_ring_size[queue] *
490 RX_DESC_ALIGNED_SIZE);
491 ethernet_private->rx_desc_area_size[queue] = size;
492
493
494 memset ((void *) ethernet_private->p_rx_desc_area_base[queue],
495 0, ethernet_private->rx_desc_area_size[queue]);
496 if ((ether_init_rx_desc_ring
497 (ethernet_private, ETH_Q0,
498 port_private->rx_ring_size[queue],
499 MV64360_RX_BUFFER_SIZE ,
500 (unsigned int) ethernet_private->
501 p_rx_desc_area_base[queue],
502 (unsigned int) ethernet_private->
503 p_rx_buffer_base[queue])) == false)
504 printf ("### Error initializing RX Ring\n");
505 }
506
507 eth_port_start (ethernet_private);
508
509
510 MV_REG_WRITE (MV64360_ETH_PORT_SERIAL_CONTROL_REG (port_num),
511 (0x5 << 17) |
512 (MV_REG_READ
513 (MV64360_ETH_PORT_SERIAL_CONTROL_REG (port_num))
514 & 0xfff1ffff));
515
516
517
518
519
520
521 MV_REG_WRITE (MV64360_ETH_MAXIMUM_TRANSMIT_UNIT (port_num), 0);
522 port_status = MV_REG_READ (MV64360_ETH_PORT_STATUS_REG (port_num));
523
524
525 eth_port_read_smi_reg (port_num, 1, &phy_reg_data);
526 if (!(phy_reg_data & 0x20)) {
527
528 if ((ethernet_phy_reset (port_num)) != true) {
529 printf ("$$ Warnning: No link on port %d \n",
530 port_num);
531 return 0;
532 } else {
533 eth_port_read_smi_reg (port_num, 1, &phy_reg_data);
534 if (!(phy_reg_data & 0x20)) {
535 printf ("### Error: Phy is not active\n");
536 return 0;
537 }
538 }
539 } else {
540 mv64360_eth_print_phy_status (dev);
541 }
542 port_private->eth_running = MAGIC_ETH_RUNNING;
543 return 1;
544}
545
546
547static int mv64360_eth_free_tx_rings (struct eth_device *dev)
548{
549 unsigned int queue;
550 ETH_PORT_INFO *ethernet_private;
551 struct mv64360_eth_priv *port_private;
552 unsigned int port_num;
553 volatile ETH_TX_DESC *p_tx_curr_desc;
554
555 ethernet_private = (ETH_PORT_INFO *) dev->priv;
556 port_private =
557 (struct mv64360_eth_priv *) ethernet_private->port_private;
558 port_num = port_private->port_num;
559
560
561 MV_REG_WRITE (MV64360_ETH_TRANSMIT_QUEUE_COMMAND_REG (port_num),
562 0x0000ff00);
563
564
565 DP (printf ("Clearing previously allocated TX queues... "));
566 for (queue = 0; queue < MV64360_TX_QUEUE_NUM; queue++) {
567
568 for (p_tx_curr_desc =
569 ethernet_private->p_tx_desc_area_base[queue];
570 ((unsigned int) p_tx_curr_desc <= (unsigned int)
571 ethernet_private->p_tx_desc_area_base[queue] +
572 ethernet_private->tx_desc_area_size[queue]);
573 p_tx_curr_desc =
574 (ETH_TX_DESC *) ((unsigned int) p_tx_curr_desc +
575 TX_DESC_ALIGNED_SIZE)) {
576
577 if (p_tx_curr_desc->return_info != 0) {
578 p_tx_curr_desc->return_info = 0;
579 DP (printf ("freed\n"));
580 }
581 }
582 DP (printf ("Done\n"));
583 }
584 return 0;
585}
586
587static int mv64360_eth_free_rx_rings (struct eth_device *dev)
588{
589 unsigned int queue;
590 ETH_PORT_INFO *ethernet_private;
591 struct mv64360_eth_priv *port_private;
592 unsigned int port_num;
593 volatile ETH_RX_DESC *p_rx_curr_desc;
594
595 ethernet_private = (ETH_PORT_INFO *) dev->priv;
596 port_private =
597 (struct mv64360_eth_priv *) ethernet_private->port_private;
598 port_num = port_private->port_num;
599
600
601
602 MV_REG_WRITE (MV64360_ETH_RECEIVE_QUEUE_COMMAND_REG (port_num),
603 0x0000ff00);
604
605
606 DP (printf ("Clearing previously allocated RX queues... "));
607 for (queue = 0; queue < MV64360_RX_QUEUE_NUM; queue++) {
608
609 for (p_rx_curr_desc =
610 ethernet_private->p_rx_desc_area_base[queue];
611 (((unsigned int) p_rx_curr_desc <
612 ((unsigned int) ethernet_private->
613 p_rx_desc_area_base[queue] +
614 ethernet_private->rx_desc_area_size[queue])));
615 p_rx_curr_desc =
616 (ETH_RX_DESC *) ((unsigned int) p_rx_curr_desc +
617 RX_DESC_ALIGNED_SIZE)) {
618 if (p_rx_curr_desc->return_info != 0) {
619 p_rx_curr_desc->return_info = 0;
620 DP (printf ("freed\n"));
621 }
622 }
623 DP (printf ("Done\n"));
624 }
625 return 0;
626}
627
628
629
630
631
632
633
634
635
636
637
638int mv64360_eth_stop (struct eth_device *dev)
639{
640 ETH_PORT_INFO *ethernet_private;
641 struct mv64360_eth_priv *port_private;
642 unsigned int port_num;
643
644 ethernet_private = (ETH_PORT_INFO *) dev->priv;
645 port_private =
646 (struct mv64360_eth_priv *) ethernet_private->port_private;
647 port_num = port_private->port_num;
648
649
650 MV_REG_WRITE (MV64360_ETH_BASE_ADDR_ENABLE_REG, 0x3f);
651 DP (printf ("%s Ethernet stop called ... \n", __FUNCTION__));
652 mv64360_eth_real_stop (dev);
653
654 return 0;
655};
656
657
658
659static int mv64360_eth_real_stop (struct eth_device *dev)
660{
661 ETH_PORT_INFO *ethernet_private;
662 struct mv64360_eth_priv *port_private;
663 unsigned int port_num;
664
665 ethernet_private = (ETH_PORT_INFO *) dev->priv;
666 port_private =
667 (struct mv64360_eth_priv *) ethernet_private->port_private;
668 port_num = port_private->port_num;
669
670
671 mv64360_eth_free_tx_rings (dev);
672 mv64360_eth_free_rx_rings (dev);
673
674 eth_port_reset (ethernet_private->port_num);
675
676 MV_REG_WRITE (MV64360_ETH_INTERRUPT_CAUSE_REG (port_num), 0);
677 MV_REG_WRITE (MV64360_ETH_INTERRUPT_CAUSE_EXTEND_REG (port_num), 0);
678
679 MV_REG_WRITE (MV64360_ETH_INTERRUPT_MASK_REG (port_num), 0);
680
681 MV_REG_WRITE (MV64360_ETH_INTERRUPT_EXTEND_MASK_REG (port_num), 0);
682 MV_RESET_REG_BITS (MV64360_CPU_INTERRUPT0_MASK_HIGH,
683 BIT0 << port_num);
684
685#ifndef UPDATE_STATS_BY_SOFTWARE
686
687
688
689
690 if (port_private->eth_running == MAGIC_ETH_RUNNING) {
691 port_private->eth_running = 0;
692 mv64360_eth_print_stat (dev);
693 }
694 memset (port_private->stats, 0, sizeof (struct net_device_stats));
695#endif
696 DP (printf ("\nEthernet stopped ... \n"));
697 return 0;
698}
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713int mv64360_eth_xmit (struct eth_device *dev, volatile void *dataPtr,
714 int dataSize)
715{
716 ETH_PORT_INFO *ethernet_private;
717 struct mv64360_eth_priv *port_private;
718 unsigned int port_num;
719 PKT_INFO pkt_info;
720 ETH_FUNC_RET_STATUS status;
721 struct net_device_stats *stats;
722 ETH_FUNC_RET_STATUS release_result;
723
724 ethernet_private = (ETH_PORT_INFO *) dev->priv;
725 port_private =
726 (struct mv64360_eth_priv *) ethernet_private->port_private;
727 port_num = port_private->port_num;
728
729 stats = port_private->stats;
730
731
732 pkt_info.cmd_sts = ETH_TX_FIRST_DESC | ETH_TX_LAST_DESC;
733 pkt_info.byte_cnt = dataSize;
734 pkt_info.buf_ptr = (unsigned int) dataPtr;
735 pkt_info.return_info = 0;
736
737 status = eth_port_send (ethernet_private, ETH_Q0, &pkt_info);
738 if ((status == ETH_ERROR) || (status == ETH_QUEUE_FULL)) {
739 printf ("Error on transmitting packet ..");
740 if (status == ETH_QUEUE_FULL)
741 printf ("ETH Queue is full. \n");
742 if (status == ETH_QUEUE_LAST_RESOURCE)
743 printf ("ETH Queue: using last available resource. \n");
744 goto error;
745 }
746
747
748 stats->tx_bytes += dataSize;
749 stats->tx_packets++;
750
751
752 do {
753 release_result =
754 eth_tx_return_desc (ethernet_private, ETH_Q0,
755 &pkt_info);
756 switch (release_result) {
757 case ETH_OK:
758 DP (printf ("descriptor released\n"));
759 if (pkt_info.cmd_sts & BIT0) {
760 printf ("Error in TX\n");
761 stats->tx_errors++;
762
763 }
764 break;
765 case ETH_RETRY:
766 DP (printf ("transmission still in process\n"));
767 break;
768
769 case ETH_ERROR:
770 printf ("routine can not access Tx desc ring\n");
771 break;
772
773 case ETH_END_OF_JOB:
774 DP (printf ("the routine has nothing to release\n"));
775 break;
776 default:
777 break;
778 }
779 } while (release_result == ETH_OK);
780
781
782 return 0;
783 error:
784 return 1;
785}
786
787
788
789
790
791
792
793
794
795
796
797
798
799int mv64360_eth_receive (struct eth_device *dev)
800{
801 ETH_PORT_INFO *ethernet_private;
802 struct mv64360_eth_priv *port_private;
803 unsigned int port_num;
804 PKT_INFO pkt_info;
805 struct net_device_stats *stats;
806
807
808 ethernet_private = (ETH_PORT_INFO *) dev->priv;
809 port_private =
810 (struct mv64360_eth_priv *) ethernet_private->port_private;
811 port_num = port_private->port_num;
812 stats = port_private->stats;
813
814 while ((eth_port_receive (ethernet_private, ETH_Q0, &pkt_info) ==
815 ETH_OK)) {
816
817#ifdef DEBUG_MV_ETH
818 if (pkt_info.byte_cnt != 0) {
819 printf ("%s: Received %d byte Packet @ 0x%x\n",
820 __FUNCTION__, pkt_info.byte_cnt,
821 pkt_info.buf_ptr);
822 }
823#endif
824
825 stats->rx_packets++;
826 stats->rx_bytes += pkt_info.byte_cnt;
827
828
829
830
831
832 if (((pkt_info.
833 cmd_sts & (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) !=
834 (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC))
835 || (pkt_info.cmd_sts & ETH_ERROR_SUMMARY)) {
836 stats->rx_dropped++;
837
838 printf ("Received packet spread on multiple descriptors\n");
839
840
841 if (pkt_info.cmd_sts & ETH_ERROR_SUMMARY) {
842 stats->rx_errors++;
843 }
844
845
846 pkt_info.buf_ptr &= ~0x7;
847 pkt_info.byte_cnt = 0x0000;
848
849 if (eth_rx_return_buff
850 (ethernet_private, ETH_Q0, &pkt_info) != ETH_OK) {
851 printf ("Error while returning the RX Desc to Ring\n");
852 } else {
853 DP (printf ("RX Desc returned to Ring\n"));
854 }
855
856 } else {
857
858
859#ifdef DEBUG_MV_ETH
860 printf ("\nNow send it to upper layer protocols (NetReceive) ...\n");
861#endif
862
863 NetReceive ((uchar *) pkt_info.buf_ptr,
864 (int) pkt_info.byte_cnt);
865
866
867
868 pkt_info.buf_ptr &= ~0x7;
869 pkt_info.byte_cnt = 0x0000;
870 DP (printf
871 ("RX: pkt_info.buf_ptr = %x\n",
872 pkt_info.buf_ptr));
873 if (eth_rx_return_buff
874 (ethernet_private, ETH_Q0, &pkt_info) != ETH_OK) {
875 printf ("Error while returning the RX Desc to Ring\n");
876 } else {
877 DP (printf ("RX Desc returned to Ring\n"));
878 }
879
880
881
882 }
883 }
884 mv64360_eth_get_stats (dev);
885 return 1;
886}
887
888
889
890
891
892
893
894
895
896
897
898static struct net_device_stats *mv64360_eth_get_stats (struct eth_device *dev)
899{
900 ETH_PORT_INFO *ethernet_private;
901 struct mv64360_eth_priv *port_private;
902 unsigned int port_num;
903
904 ethernet_private = (ETH_PORT_INFO *) dev->priv;
905 port_private =
906 (struct mv64360_eth_priv *) ethernet_private->port_private;
907 port_num = port_private->port_num;
908
909 mv64360_eth_update_stat (dev);
910
911 return port_private->stats;
912}
913
914
915
916
917
918
919
920
921
922
923
924static void mv64360_eth_update_stat (struct eth_device *dev)
925{
926 ETH_PORT_INFO *ethernet_private;
927 struct mv64360_eth_priv *port_private;
928 struct net_device_stats *stats;
929 unsigned int port_num;
930 volatile unsigned int dummy;
931
932 ethernet_private = (ETH_PORT_INFO *) dev->priv;
933 port_private =
934 (struct mv64360_eth_priv *) ethernet_private->port_private;
935 port_num = port_private->port_num;
936 stats = port_private->stats;
937
938
939 stats->rx_packets += (unsigned long)
940 eth_read_mib_counter (ethernet_private->port_num,
941 ETH_MIB_GOOD_FRAMES_RECEIVED);
942 stats->tx_packets += (unsigned long)
943 eth_read_mib_counter (ethernet_private->port_num,
944 ETH_MIB_GOOD_FRAMES_SENT);
945 stats->rx_bytes += (unsigned long)
946 eth_read_mib_counter (ethernet_private->port_num,
947 ETH_MIB_GOOD_OCTETS_RECEIVED_LOW);
948
949
950
951
952
953
954
955
956
957
958 dummy = eth_read_mib_counter (ethernet_private->port_num,
959 ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH);
960 stats->tx_bytes += (unsigned long)
961 eth_read_mib_counter (ethernet_private->port_num,
962 ETH_MIB_GOOD_OCTETS_SENT_LOW);
963 dummy = eth_read_mib_counter (ethernet_private->port_num,
964 ETH_MIB_GOOD_OCTETS_SENT_HIGH);
965 stats->rx_errors += (unsigned long)
966 eth_read_mib_counter (ethernet_private->port_num,
967 ETH_MIB_MAC_RECEIVE_ERROR);
968
969
970 stats->rx_dropped +=
971 (unsigned long) eth_read_mib_counter (ethernet_private->
972 port_num,
973 ETH_MIB_BAD_CRC_EVENT);
974 stats->multicast += (unsigned long)
975 eth_read_mib_counter (ethernet_private->port_num,
976 ETH_MIB_MULTICAST_FRAMES_RECEIVED);
977 stats->collisions +=
978 (unsigned long) eth_read_mib_counter (ethernet_private->
979 port_num,
980 ETH_MIB_COLLISION) +
981 (unsigned long) eth_read_mib_counter (ethernet_private->
982 port_num,
983 ETH_MIB_LATE_COLLISION);
984
985 stats->rx_length_errors +=
986 (unsigned long) eth_read_mib_counter (ethernet_private->
987 port_num,
988 ETH_MIB_UNDERSIZE_RECEIVED)
989 +
990 (unsigned long) eth_read_mib_counter (ethernet_private->
991 port_num,
992 ETH_MIB_OVERSIZE_RECEIVED);
993
994}
995
996#ifndef UPDATE_STATS_BY_SOFTWARE
997
998
999
1000
1001
1002
1003
1004
1005
1006static void mv64360_eth_print_stat (struct eth_device *dev)
1007{
1008 ETH_PORT_INFO *ethernet_private;
1009 struct mv64360_eth_priv *port_private;
1010 struct net_device_stats *stats;
1011 unsigned int port_num;
1012
1013 ethernet_private = (ETH_PORT_INFO *) dev->priv;
1014 port_private =
1015 (struct mv64360_eth_priv *) ethernet_private->port_private;
1016 port_num = port_private->port_num;
1017 stats = port_private->stats;
1018
1019
1020 printf ("\n### Network statistics: ###\n");
1021 printf ("--------------------------\n");
1022 printf (" Packets received: %ld\n", stats->rx_packets);
1023 printf (" Packets send: %ld\n", stats->tx_packets);
1024 printf (" Received bytes: %ld\n", stats->rx_bytes);
1025 printf (" Send bytes: %ld\n", stats->tx_bytes);
1026 if (stats->rx_errors != 0)
1027 printf (" Rx Errors: %ld\n",
1028 stats->rx_errors);
1029 if (stats->rx_dropped != 0)
1030 printf (" Rx dropped (CRC Errors): %ld\n",
1031 stats->rx_dropped);
1032 if (stats->multicast != 0)
1033 printf (" Rx mulicast frames: %ld\n",
1034 stats->multicast);
1035 if (stats->collisions != 0)
1036 printf (" No. of collisions: %ld\n",
1037 stats->collisions);
1038 if (stats->rx_length_errors != 0)
1039 printf (" Rx length errors: %ld\n",
1040 stats->rx_length_errors);
1041}
1042#endif
1043
1044
1045
1046
1047
1048
1049
1050bool db64360_eth_start (struct eth_device *dev)
1051{
1052 return (mv64360_eth_open (dev));
1053}
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247#define ETH_ENABLE_TX_QUEUE(tx_queue, eth_port) \
1248 MV_REG_WRITE(MV64360_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port), (1 << tx_queue))
1249
1250#define ETH_DISABLE_TX_QUEUE(tx_queue, eth_port) \
1251 MV_REG_WRITE(MV64360_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port),\
1252 (1 << (8 + tx_queue)))
1253
1254#define ETH_ENABLE_RX_QUEUE(rx_queue, eth_port) \
1255MV_REG_WRITE(MV64360_ETH_RECEIVE_QUEUE_COMMAND_REG(eth_port), (1 << rx_queue))
1256
1257#define ETH_DISABLE_RX_QUEUE(rx_queue, eth_port) \
1258MV_REG_WRITE(MV64360_ETH_RECEIVE_QUEUE_COMMAND_REG(eth_port), (1 << (8 + rx_queue)))
1259
1260#define CURR_RFD_GET(p_curr_desc, queue) \
1261 ((p_curr_desc) = p_eth_port_ctrl->p_rx_curr_desc_q[queue])
1262
1263#define CURR_RFD_SET(p_curr_desc, queue) \
1264 (p_eth_port_ctrl->p_rx_curr_desc_q[queue] = (p_curr_desc))
1265
1266#define USED_RFD_GET(p_used_desc, queue) \
1267 ((p_used_desc) = p_eth_port_ctrl->p_rx_used_desc_q[queue])
1268
1269#define USED_RFD_SET(p_used_desc, queue)\
1270(p_eth_port_ctrl->p_rx_used_desc_q[queue] = (p_used_desc))
1271
1272
1273#define CURR_TFD_GET(p_curr_desc, queue) \
1274 ((p_curr_desc) = p_eth_port_ctrl->p_tx_curr_desc_q[queue])
1275
1276#define CURR_TFD_SET(p_curr_desc, queue) \
1277 (p_eth_port_ctrl->p_tx_curr_desc_q[queue] = (p_curr_desc))
1278
1279#define USED_TFD_GET(p_used_desc, queue) \
1280 ((p_used_desc) = p_eth_port_ctrl->p_tx_used_desc_q[queue])
1281
1282#define USED_TFD_SET(p_used_desc, queue) \
1283 (p_eth_port_ctrl->p_tx_used_desc_q[queue] = (p_used_desc))
1284
1285#define FIRST_TFD_GET(p_first_desc, queue) \
1286 ((p_first_desc) = p_eth_port_ctrl->p_tx_first_desc_q[queue])
1287
1288#define FIRST_TFD_SET(p_first_desc, queue) \
1289 (p_eth_port_ctrl->p_tx_first_desc_q[queue] = (p_first_desc))
1290
1291
1292
1293#define RX_NEXT_DESC_PTR(p_rx_desc, queue) (ETH_RX_DESC*)(((((unsigned int)p_rx_desc - (unsigned int)p_eth_port_ctrl->p_rx_desc_area_base[queue]) + RX_DESC_ALIGNED_SIZE) % p_eth_port_ctrl->rx_desc_area_size[queue]) + (unsigned int)p_eth_port_ctrl->p_rx_desc_area_base[queue])
1294
1295#define TX_NEXT_DESC_PTR(p_tx_desc, queue) (ETH_TX_DESC*)(((((unsigned int)p_tx_desc - (unsigned int)p_eth_port_ctrl->p_tx_desc_area_base[queue]) + TX_DESC_ALIGNED_SIZE) % p_eth_port_ctrl->tx_desc_area_size[queue]) + (unsigned int)p_eth_port_ctrl->p_tx_desc_area_base[queue])
1296
1297#define LINK_UP_TIMEOUT 100000
1298#define PHY_BUSY_TIMEOUT 10000000
1299
1300
1301
1302
1303static void ethernet_phy_set (ETH_PORT eth_port_num, int phy_addr);
1304static int ethernet_phy_get (ETH_PORT eth_port_num);
1305
1306
1307static void eth_set_access_control (ETH_PORT eth_port_num,
1308 ETH_WIN_PARAM * param);
1309static bool eth_port_uc_addr (ETH_PORT eth_port_num, unsigned char uc_nibble,
1310 ETH_QUEUE queue, int option);
1311#if 0
1312static bool eth_port_smc_addr (ETH_PORT eth_port_num,
1313 unsigned char mc_byte,
1314 ETH_QUEUE queue, int option);
1315static bool eth_port_omc_addr (ETH_PORT eth_port_num,
1316 unsigned char crc8,
1317 ETH_QUEUE queue, int option);
1318#endif
1319
1320static void eth_b_copy (unsigned int src_addr, unsigned int dst_addr,
1321 int byte_count);
1322
1323void eth_dbg (ETH_PORT_INFO * p_eth_port_ctrl);
1324
1325
1326typedef enum _memory_bank { BANK0, BANK1, BANK2, BANK3 } MEMORY_BANK;
1327u32 mv_get_dram_bank_base_addr (MEMORY_BANK bank)
1328{
1329 u32 result = 0;
1330 u32 enable = MV_REG_READ (MV64360_BASE_ADDR_ENABLE);
1331
1332 if (enable & (1 << bank))
1333 return 0;
1334 if (bank == BANK0)
1335 result = MV_REG_READ (MV64360_CS_0_BASE_ADDR);
1336 if (bank == BANK1)
1337 result = MV_REG_READ (MV64360_CS_1_BASE_ADDR);
1338 if (bank == BANK2)
1339 result = MV_REG_READ (MV64360_CS_2_BASE_ADDR);
1340 if (bank == BANK3)
1341 result = MV_REG_READ (MV64360_CS_3_BASE_ADDR);
1342 result &= 0x0000ffff;
1343 result = result << 16;
1344 return result;
1345}
1346
1347u32 mv_get_dram_bank_size (MEMORY_BANK bank)
1348{
1349 u32 result = 0;
1350 u32 enable = MV_REG_READ (MV64360_BASE_ADDR_ENABLE);
1351
1352 if (enable & (1 << bank))
1353 return 0;
1354 if (bank == BANK0)
1355 result = MV_REG_READ (MV64360_CS_0_SIZE);
1356 if (bank == BANK1)
1357 result = MV_REG_READ (MV64360_CS_1_SIZE);
1358 if (bank == BANK2)
1359 result = MV_REG_READ (MV64360_CS_2_SIZE);
1360 if (bank == BANK3)
1361 result = MV_REG_READ (MV64360_CS_3_SIZE);
1362 result += 1;
1363 result &= 0x0000ffff;
1364 result = result << 16;
1365 return result;
1366}
1367
1368u32 mv_get_internal_sram_base (void)
1369{
1370 u32 result;
1371
1372 result = MV_REG_READ (MV64360_INTEGRATED_SRAM_BASE_ADDR);
1373 result &= 0x0000ffff;
1374 result = result << 16;
1375 return result;
1376}
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403static void eth_port_init (ETH_PORT_INFO * p_eth_port_ctrl)
1404{
1405 int queue;
1406 ETH_WIN_PARAM win_param;
1407
1408 p_eth_port_ctrl->port_config = PORT_CONFIG_VALUE;
1409 p_eth_port_ctrl->port_config_extend = PORT_CONFIG_EXTEND_VALUE;
1410 p_eth_port_ctrl->port_sdma_config = PORT_SDMA_CONFIG_VALUE;
1411 p_eth_port_ctrl->port_serial_control = PORT_SERIAL_CONTROL_VALUE;
1412
1413 p_eth_port_ctrl->port_rx_queue_command = 0;
1414 p_eth_port_ctrl->port_tx_queue_command = 0;
1415
1416
1417 for (queue = 0; queue < MAX_RX_QUEUE_NUM; queue++) {
1418 CURR_RFD_SET ((ETH_RX_DESC *) 0x00000000, queue);
1419 USED_RFD_SET ((ETH_RX_DESC *) 0x00000000, queue);
1420 p_eth_port_ctrl->rx_resource_err[queue] = false;
1421 }
1422
1423 for (queue = 0; queue < MAX_TX_QUEUE_NUM; queue++) {
1424 CURR_TFD_SET ((ETH_TX_DESC *) 0x00000000, queue);
1425 USED_TFD_SET ((ETH_TX_DESC *) 0x00000000, queue);
1426 FIRST_TFD_SET ((ETH_TX_DESC *) 0x00000000, queue);
1427 p_eth_port_ctrl->tx_resource_err[queue] = false;
1428 }
1429
1430 eth_port_reset (p_eth_port_ctrl->port_num);
1431
1432
1433 win_param.win = ETH_WIN0;
1434 win_param.target = ETH_TARGET_DRAM;
1435 win_param.attributes = EBAR_ATTR_DRAM_CS0;
1436#ifndef CONFIG_NOT_COHERENT_CACHE
1437 win_param.attributes |= EBAR_ATTR_DRAM_CACHE_COHERENCY_WB;
1438#endif
1439 win_param.high_addr = 0;
1440
1441 win_param.base_addr = mv_get_dram_bank_base_addr (BANK0);
1442 win_param.size = mv_get_dram_bank_size (BANK0);
1443 if (win_param.size == 0)
1444 win_param.enable = 0;
1445 else
1446 win_param.enable = 1;
1447 win_param.access_ctrl = EWIN_ACCESS_FULL;
1448
1449
1450 eth_set_access_control (p_eth_port_ctrl->port_num, &win_param);
1451
1452
1453 win_param.win = ETH_WIN1;
1454 win_param.target = ETH_TARGET_DRAM;
1455 win_param.attributes = EBAR_ATTR_DRAM_CS1;
1456#ifndef CONFIG_NOT_COHERENT_CACHE
1457 win_param.attributes |= EBAR_ATTR_DRAM_CACHE_COHERENCY_WB;
1458#endif
1459 win_param.high_addr = 0;
1460
1461 win_param.base_addr = mv_get_dram_bank_base_addr (BANK1);
1462 win_param.size = mv_get_dram_bank_size (BANK1);
1463 if (win_param.size == 0)
1464 win_param.enable = 0;
1465 else
1466 win_param.enable = 1;
1467 win_param.access_ctrl = EWIN_ACCESS_FULL;
1468
1469
1470 eth_set_access_control (p_eth_port_ctrl->port_num, &win_param);
1471
1472
1473 win_param.win = ETH_WIN2;
1474 win_param.target = ETH_TARGET_DRAM;
1475 win_param.attributes = EBAR_ATTR_DRAM_CS2;
1476#ifndef CONFIG_NOT_COHERENT_CACHE
1477 win_param.attributes |= EBAR_ATTR_DRAM_CACHE_COHERENCY_WB;
1478#endif
1479 win_param.high_addr = 0;
1480
1481 win_param.base_addr = mv_get_dram_bank_base_addr (BANK2);
1482 win_param.size = mv_get_dram_bank_size (BANK2);
1483 if (win_param.size == 0)
1484 win_param.enable = 0;
1485 else
1486 win_param.enable = 1;
1487 win_param.access_ctrl = EWIN_ACCESS_FULL;
1488
1489
1490 eth_set_access_control (p_eth_port_ctrl->port_num, &win_param);
1491
1492
1493 win_param.win = ETH_WIN3;
1494 win_param.target = ETH_TARGET_DRAM;
1495 win_param.attributes = EBAR_ATTR_DRAM_CS3;
1496#ifndef CONFIG_NOT_COHERENT_CACHE
1497 win_param.attributes |= EBAR_ATTR_DRAM_CACHE_COHERENCY_WB;
1498#endif
1499 win_param.high_addr = 0;
1500
1501 win_param.base_addr = mv_get_dram_bank_base_addr (BANK3);
1502 win_param.size = mv_get_dram_bank_size (BANK3);
1503 if (win_param.size == 0)
1504 win_param.enable = 0;
1505 else
1506 win_param.enable = 1;
1507 win_param.access_ctrl = EWIN_ACCESS_FULL;
1508
1509
1510 eth_set_access_control (p_eth_port_ctrl->port_num, &win_param);
1511
1512
1513 win_param.win = ETH_WIN4;
1514 win_param.target = EBAR_TARGET_CBS;
1515 win_param.attributes = EBAR_ATTR_CBS_SRAM | EBAR_ATTR_CBS_SRAM_BLOCK0;
1516 win_param.high_addr = 0;
1517 win_param.base_addr = mv_get_internal_sram_base ();
1518 win_param.size = MV64360_INTERNAL_SRAM_SIZE;
1519 win_param.enable = 1;
1520 win_param.access_ctrl = EWIN_ACCESS_FULL;
1521
1522
1523 eth_set_access_control (p_eth_port_ctrl->port_num, &win_param);
1524
1525 eth_port_init_mac_tables (p_eth_port_ctrl->port_num);
1526
1527 ethernet_phy_set (p_eth_port_ctrl->port_num,
1528 p_eth_port_ctrl->port_phy_addr);
1529
1530 return;
1531
1532}
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564static bool eth_port_start (ETH_PORT_INFO * p_eth_port_ctrl)
1565{
1566 int queue;
1567 volatile ETH_TX_DESC *p_tx_curr_desc;
1568 volatile ETH_RX_DESC *p_rx_curr_desc;
1569 unsigned int phy_reg_data;
1570 ETH_PORT eth_port_num = p_eth_port_ctrl->port_num;
1571
1572
1573
1574 for (queue = 0; queue < MAX_TX_QUEUE_NUM; queue++) {
1575 CURR_TFD_GET (p_tx_curr_desc, queue);
1576 MV_REG_WRITE ((MV64360_ETH_TX_CURRENT_QUEUE_DESC_PTR_0
1577 (eth_port_num)
1578 + (4 * queue)),
1579 ((unsigned int) p_tx_curr_desc));
1580
1581 }
1582
1583
1584 for (queue = 0; queue < MAX_RX_QUEUE_NUM; queue++) {
1585 CURR_RFD_GET (p_rx_curr_desc, queue);
1586 MV_REG_WRITE ((MV64360_ETH_RX_CURRENT_QUEUE_DESC_PTR_0
1587 (eth_port_num)
1588 + (4 * queue)),
1589 ((unsigned int) p_rx_curr_desc));
1590
1591 if (p_rx_curr_desc != NULL)
1592
1593 eth_port_uc_addr_set (p_eth_port_ctrl->port_num,
1594 p_eth_port_ctrl->port_mac_addr,
1595 queue);
1596 }
1597
1598
1599 MV_REG_WRITE (MV64360_ETH_PORT_CONFIG_REG (eth_port_num),
1600 p_eth_port_ctrl->port_config);
1601
1602 MV_REG_WRITE (MV64360_ETH_PORT_CONFIG_EXTEND_REG (eth_port_num),
1603 p_eth_port_ctrl->port_config_extend);
1604
1605 MV_REG_WRITE (MV64360_ETH_PORT_SERIAL_CONTROL_REG (eth_port_num),
1606 p_eth_port_ctrl->port_serial_control);
1607
1608 MV_SET_REG_BITS (MV64360_ETH_PORT_SERIAL_CONTROL_REG (eth_port_num),
1609 ETH_SERIAL_PORT_ENABLE);
1610
1611
1612 MV_REG_WRITE (MV64360_ETH_SDMA_CONFIG_REG (eth_port_num),
1613 p_eth_port_ctrl->port_sdma_config);
1614
1615 MV_REG_WRITE (MV64360_ETH_TX_QUEUE_0_TOKEN_BUCKET_COUNT
1616 (eth_port_num), 0x3fffffff);
1617 MV_REG_WRITE (MV64360_ETH_TX_QUEUE_0_TOKEN_BUCKET_CONFIG
1618 (eth_port_num), 0x03fffcff);
1619
1620 MV_REG_WRITE (MV64360_ETH_MAXIMUM_TRANSMIT_UNIT (eth_port_num), 0x0);
1621
1622
1623 MV_REG_WRITE (MV64360_ETH_RECEIVE_QUEUE_COMMAND_REG (eth_port_num),
1624 p_eth_port_ctrl->port_rx_queue_command);
1625
1626
1627 eth_port_read_smi_reg (eth_port_num, 1, &phy_reg_data);
1628
1629 if (!(phy_reg_data & 0x20))
1630 return false;
1631
1632 return true;
1633}
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654static void eth_port_uc_addr_set (ETH_PORT eth_port_num,
1655 unsigned char *p_addr, ETH_QUEUE queue)
1656{
1657 unsigned int mac_h;
1658 unsigned int mac_l;
1659
1660 mac_l = (p_addr[4] << 8) | (p_addr[5]);
1661 mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) |
1662 (p_addr[2] << 8) | (p_addr[3] << 0);
1663
1664 MV_REG_WRITE (MV64360_ETH_MAC_ADDR_LOW (eth_port_num), mac_l);
1665 MV_REG_WRITE (MV64360_ETH_MAC_ADDR_HIGH (eth_port_num), mac_h);
1666
1667
1668 eth_port_uc_addr (eth_port_num, p_addr[5], queue, ACCEPT_MAC_ADDR);
1669
1670 return;
1671}
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696static bool eth_port_uc_addr (ETH_PORT eth_port_num,
1697 unsigned char uc_nibble,
1698 ETH_QUEUE queue, int option)
1699{
1700 unsigned int unicast_reg;
1701 unsigned int tbl_offset;
1702 unsigned int reg_offset;
1703
1704
1705 uc_nibble = (0xf & uc_nibble);
1706 tbl_offset = (uc_nibble / 4) * 4;
1707 reg_offset = uc_nibble % 4;
1708
1709 switch (option) {
1710 case REJECT_MAC_ADDR:
1711
1712 unicast_reg =
1713 MV_REG_READ ((MV64360_ETH_DA_FILTER_UNICAST_TABLE_BASE
1714 (eth_port_num)
1715 + tbl_offset));
1716
1717 unicast_reg &= (0x0E << (8 * reg_offset));
1718
1719 MV_REG_WRITE ((MV64360_ETH_DA_FILTER_UNICAST_TABLE_BASE
1720 (eth_port_num)
1721 + tbl_offset), unicast_reg);
1722 break;
1723
1724 case ACCEPT_MAC_ADDR:
1725
1726 unicast_reg =
1727 MV_REG_READ ((MV64360_ETH_DA_FILTER_UNICAST_TABLE_BASE
1728 (eth_port_num)
1729 + tbl_offset));
1730
1731 unicast_reg |= ((0x01 | queue) << (8 * reg_offset));
1732
1733 MV_REG_WRITE ((MV64360_ETH_DA_FILTER_UNICAST_TABLE_BASE
1734 (eth_port_num)
1735 + tbl_offset), unicast_reg);
1736
1737 break;
1738
1739 default:
1740 return false;
1741 }
1742 return true;
1743}
1744
1745#if 0
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777static void eth_port_mc_addr (ETH_PORT eth_port_num,
1778 unsigned char *p_addr,
1779 ETH_QUEUE queue, int option)
1780{
1781 unsigned int mac_h;
1782 unsigned int mac_l;
1783 unsigned char crc_result = 0;
1784 int mac_array[48];
1785 int crc[8];
1786 int i;
1787
1788
1789 if ((p_addr[0] == 0x01) &&
1790 (p_addr[1] == 0x00) &&
1791 (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00))
1792
1793 eth_port_smc_addr (eth_port_num, p_addr[5], queue, option);
1794 else {
1795
1796 mac_h = (p_addr[0] << 8) | (p_addr[1]);
1797 mac_l = (p_addr[2] << 24) | (p_addr[3] << 16) |
1798 (p_addr[4] << 8) | (p_addr[5] << 0);
1799
1800 for (i = 0; i < 32; i++)
1801 mac_array[i] = (mac_l >> i) & 0x1;
1802 for (i = 32; i < 48; i++)
1803 mac_array[i] = (mac_h >> (i - 32)) & 0x1;
1804
1805
1806 crc[0] = mac_array[45] ^ mac_array[43] ^ mac_array[40] ^
1807 mac_array[39] ^ mac_array[35] ^ mac_array[34] ^
1808 mac_array[31] ^ mac_array[30] ^ mac_array[28] ^
1809 mac_array[23] ^ mac_array[21] ^ mac_array[19] ^
1810 mac_array[18] ^ mac_array[16] ^ mac_array[14] ^
1811 mac_array[12] ^ mac_array[8] ^ mac_array[7] ^
1812 mac_array[6] ^ mac_array[0];
1813
1814 crc[1] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^
1815 mac_array[43] ^ mac_array[41] ^ mac_array[39] ^
1816 mac_array[36] ^ mac_array[34] ^ mac_array[32] ^
1817 mac_array[30] ^ mac_array[29] ^ mac_array[28] ^
1818 mac_array[24] ^ mac_array[23] ^ mac_array[22] ^
1819 mac_array[21] ^ mac_array[20] ^ mac_array[18] ^
1820 mac_array[17] ^ mac_array[16] ^ mac_array[15] ^
1821 mac_array[14] ^ mac_array[13] ^ mac_array[12] ^
1822 mac_array[9] ^ mac_array[6] ^ mac_array[1] ^
1823 mac_array[0];
1824
1825 crc[2] = mac_array[47] ^ mac_array[46] ^ mac_array[44] ^
1826 mac_array[43] ^ mac_array[42] ^ mac_array[39] ^
1827 mac_array[37] ^ mac_array[34] ^ mac_array[33] ^
1828 mac_array[29] ^ mac_array[28] ^ mac_array[25] ^
1829 mac_array[24] ^ mac_array[22] ^ mac_array[17] ^
1830 mac_array[15] ^ mac_array[13] ^ mac_array[12] ^
1831 mac_array[10] ^ mac_array[8] ^ mac_array[6] ^
1832 mac_array[2] ^ mac_array[1] ^ mac_array[0];
1833
1834 crc[3] = mac_array[47] ^ mac_array[45] ^ mac_array[44] ^
1835 mac_array[43] ^ mac_array[40] ^ mac_array[38] ^
1836 mac_array[35] ^ mac_array[34] ^ mac_array[30] ^
1837 mac_array[29] ^ mac_array[26] ^ mac_array[25] ^
1838 mac_array[23] ^ mac_array[18] ^ mac_array[16] ^
1839 mac_array[14] ^ mac_array[13] ^ mac_array[11] ^
1840 mac_array[9] ^ mac_array[7] ^ mac_array[3] ^
1841 mac_array[2] ^ mac_array[1];
1842
1843 crc[4] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^
1844 mac_array[41] ^ mac_array[39] ^ mac_array[36] ^
1845 mac_array[35] ^ mac_array[31] ^ mac_array[30] ^
1846 mac_array[27] ^ mac_array[26] ^ mac_array[24] ^
1847 mac_array[19] ^ mac_array[17] ^ mac_array[15] ^
1848 mac_array[14] ^ mac_array[12] ^ mac_array[10] ^
1849 mac_array[8] ^ mac_array[4] ^ mac_array[3] ^
1850 mac_array[2];
1851
1852 crc[5] = mac_array[47] ^ mac_array[46] ^ mac_array[45] ^
1853 mac_array[42] ^ mac_array[40] ^ mac_array[37] ^
1854 mac_array[36] ^ mac_array[32] ^ mac_array[31] ^
1855 mac_array[28] ^ mac_array[27] ^ mac_array[25] ^
1856 mac_array[20] ^ mac_array[18] ^ mac_array[16] ^
1857 mac_array[15] ^ mac_array[13] ^ mac_array[11] ^
1858 mac_array[9] ^ mac_array[5] ^ mac_array[4] ^
1859 mac_array[3];
1860
1861 crc[6] = mac_array[47] ^ mac_array[46] ^ mac_array[43] ^
1862 mac_array[41] ^ mac_array[38] ^ mac_array[37] ^
1863 mac_array[33] ^ mac_array[32] ^ mac_array[29] ^
1864 mac_array[28] ^ mac_array[26] ^ mac_array[21] ^
1865 mac_array[19] ^ mac_array[17] ^ mac_array[16] ^
1866 mac_array[14] ^ mac_array[12] ^ mac_array[10] ^
1867 mac_array[6] ^ mac_array[5] ^ mac_array[4];
1868
1869 crc[7] = mac_array[47] ^ mac_array[44] ^ mac_array[42] ^
1870 mac_array[39] ^ mac_array[38] ^ mac_array[34] ^
1871 mac_array[33] ^ mac_array[30] ^ mac_array[29] ^
1872 mac_array[27] ^ mac_array[22] ^ mac_array[20] ^
1873 mac_array[18] ^ mac_array[17] ^ mac_array[15] ^
1874 mac_array[13] ^ mac_array[11] ^ mac_array[7] ^
1875 mac_array[6] ^ mac_array[5];
1876
1877 for (i = 0; i < 8; i++)
1878 crc_result = crc_result | (crc[i] << i);
1879
1880 eth_port_omc_addr (eth_port_num, crc_result, queue, option);
1881 }
1882 return;
1883}
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911static bool eth_port_smc_addr (ETH_PORT eth_port_num,
1912 unsigned char mc_byte,
1913 ETH_QUEUE queue, int option)
1914{
1915 unsigned int smc_table_reg;
1916 unsigned int tbl_offset;
1917 unsigned int reg_offset;
1918
1919
1920 tbl_offset = (mc_byte / 4) * 4;
1921 reg_offset = mc_byte % 4;
1922 queue &= 0x7;
1923
1924 switch (option) {
1925 case REJECT_MAC_ADDR:
1926
1927 smc_table_reg =
1928 MV_REG_READ ((MV64360_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset));
1929 smc_table_reg &= (0x0E << (8 * reg_offset));
1930
1931 MV_REG_WRITE ((MV64360_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset), smc_table_reg);
1932 break;
1933
1934 case ACCEPT_MAC_ADDR:
1935
1936 smc_table_reg =
1937 MV_REG_READ ((MV64360_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset));
1938 smc_table_reg |= ((0x01 | queue) << (8 * reg_offset));
1939
1940 MV_REG_WRITE ((MV64360_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset), smc_table_reg);
1941 break;
1942
1943 default:
1944 return false;
1945 }
1946 return true;
1947}
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975static bool eth_port_omc_addr (ETH_PORT eth_port_num,
1976 unsigned char crc8,
1977 ETH_QUEUE queue, int option)
1978{
1979 unsigned int omc_table_reg;
1980 unsigned int tbl_offset;
1981 unsigned int reg_offset;
1982
1983
1984 tbl_offset = (crc8 / 4) * 4;
1985 reg_offset = crc8 % 4;
1986 queue &= 0x7;
1987
1988 switch (option) {
1989 case REJECT_MAC_ADDR:
1990
1991 omc_table_reg =
1992 MV_REG_READ ((MV64360_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset));
1993 omc_table_reg &= (0x0E << (8 * reg_offset));
1994
1995 MV_REG_WRITE ((MV64360_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset), omc_table_reg);
1996 break;
1997
1998 case ACCEPT_MAC_ADDR:
1999
2000 omc_table_reg =
2001 MV_REG_READ ((MV64360_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset));
2002 omc_table_reg |= ((0x01 | queue) << (8 * reg_offset));
2003
2004 MV_REG_WRITE ((MV64360_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset), omc_table_reg);
2005 break;
2006
2007 default:
2008 return false;
2009 }
2010 return true;
2011}
2012#endif
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031static void eth_port_init_mac_tables (ETH_PORT eth_port_num)
2032{
2033 int table_index;
2034
2035
2036 for (table_index = 0; table_index <= 0xC; table_index += 4)
2037 MV_REG_WRITE ((MV64360_ETH_DA_FILTER_UNICAST_TABLE_BASE
2038 (eth_port_num) + table_index), 0);
2039
2040 for (table_index = 0; table_index <= 0xFC; table_index += 4) {
2041
2042 MV_REG_WRITE ((MV64360_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + table_index), 0);
2043
2044 MV_REG_WRITE ((MV64360_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + table_index), 0);
2045 }
2046}
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065static void eth_clear_mib_counters (ETH_PORT eth_port_num)
2066{
2067 int i;
2068 unsigned int dummy;
2069
2070
2071 for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION;
2072 i += 4)
2073 dummy = MV_REG_READ ((MV64360_ETH_MIB_COUNTERS_BASE
2074 (eth_port_num) + i));
2075
2076 return;
2077}
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100unsigned int eth_read_mib_counter (ETH_PORT eth_port_num,
2101 unsigned int mib_offset)
2102{
2103 return (MV_REG_READ (MV64360_ETH_MIB_COUNTERS_BASE (eth_port_num)
2104 + mib_offset));
2105}
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124static void ethernet_phy_set (ETH_PORT eth_port_num, int phy_addr)
2125{
2126 unsigned int reg_data;
2127
2128 reg_data = MV_REG_READ (MV64360_ETH_PHY_ADDR_REG);
2129
2130 reg_data &= ~(0x1F << (5 * eth_port_num));
2131 reg_data |= (phy_addr << (5 * eth_port_num));
2132
2133 MV_REG_WRITE (MV64360_ETH_PHY_ADDR_REG, reg_data);
2134
2135 return;
2136}
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154static int ethernet_phy_get (ETH_PORT eth_port_num)
2155{
2156 unsigned int reg_data;
2157
2158 reg_data = MV_REG_READ (MV64360_ETH_PHY_ADDR_REG);
2159
2160 return ((reg_data >> (5 * eth_port_num)) & 0x1f);
2161}
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180static bool ethernet_phy_reset (ETH_PORT eth_port_num)
2181{
2182 unsigned int time_out = 50;
2183 unsigned int phy_reg_data;
2184
2185
2186 eth_port_read_smi_reg (eth_port_num, 0, &phy_reg_data);
2187 phy_reg_data |= 0x8000;
2188 eth_port_write_smi_reg (eth_port_num, 0, phy_reg_data);
2189
2190
2191 do {
2192 eth_port_read_smi_reg (eth_port_num, 1, &phy_reg_data);
2193
2194 if (time_out-- == 0)
2195 return false;
2196 }
2197 while (!(phy_reg_data & 0x20));
2198
2199 return true;
2200}
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220static void eth_port_reset (ETH_PORT eth_port_num)
2221{
2222 unsigned int reg_data;
2223
2224
2225 reg_data =
2226 MV_REG_READ (MV64360_ETH_TRANSMIT_QUEUE_COMMAND_REG
2227 (eth_port_num));
2228
2229 if (reg_data & 0xFF) {
2230
2231 MV_REG_WRITE (MV64360_ETH_TRANSMIT_QUEUE_COMMAND_REG
2232 (eth_port_num), (reg_data << 8));
2233
2234
2235 do {
2236
2237 reg_data =
2238 MV_REG_READ
2239 (MV64360_ETH_TRANSMIT_QUEUE_COMMAND_REG
2240 (eth_port_num));
2241 }
2242 while (reg_data & 0xFF);
2243 }
2244
2245
2246 reg_data =
2247 MV_REG_READ (MV64360_ETH_RECEIVE_QUEUE_COMMAND_REG
2248 (eth_port_num));
2249
2250 if (reg_data & 0xFF) {
2251
2252 MV_REG_WRITE (MV64360_ETH_RECEIVE_QUEUE_COMMAND_REG
2253 (eth_port_num), (reg_data << 8));
2254
2255
2256 do {
2257
2258 reg_data =
2259 MV_REG_READ
2260 (MV64360_ETH_RECEIVE_QUEUE_COMMAND_REG
2261 (eth_port_num));
2262 }
2263 while (reg_data & 0xFF);
2264 }
2265
2266
2267
2268 eth_clear_mib_counters (eth_port_num);
2269
2270
2271 reg_data =
2272 MV_REG_READ (MV64360_ETH_PORT_SERIAL_CONTROL_REG
2273 (eth_port_num));
2274 reg_data &= ~ETH_SERIAL_PORT_ENABLE;
2275 MV_REG_WRITE (MV64360_ETH_PORT_SERIAL_CONTROL_REG (eth_port_num),
2276 reg_data);
2277
2278 return;
2279}
2280
2281#if 0
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301static void ethernet_set_config_reg (ETH_PORT eth_port_num,
2302 unsigned int value)
2303{
2304 unsigned int eth_config_reg;
2305
2306 eth_config_reg =
2307 MV_REG_READ (MV64360_ETH_PORT_CONFIG_REG (eth_port_num));
2308 eth_config_reg |= value;
2309 MV_REG_WRITE (MV64360_ETH_PORT_CONFIG_REG (eth_port_num),
2310 eth_config_reg);
2311
2312 return;
2313}
2314#endif
2315
2316#if 0
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336static void ethernet_reset_config_reg (ETH_PORT eth_port_num,
2337 unsigned int value)
2338{
2339 unsigned int eth_config_reg;
2340
2341 eth_config_reg = MV_REG_READ (MV64360_ETH_PORT_CONFIG_EXTEND_REG
2342 (eth_port_num));
2343 eth_config_reg &= ~value;
2344 MV_REG_WRITE (MV64360_ETH_PORT_CONFIG_EXTEND_REG (eth_port_num),
2345 eth_config_reg);
2346
2347 return;
2348}
2349#endif
2350
2351#if 0
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369static unsigned int ethernet_get_config_reg (ETH_PORT eth_port_num)
2370{
2371 unsigned int eth_config_reg;
2372
2373 eth_config_reg = MV_REG_READ (MV64360_ETH_PORT_CONFIG_EXTEND_REG
2374 (eth_port_num));
2375 return eth_config_reg;
2376}
2377
2378#endif
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400static bool eth_port_read_smi_reg (ETH_PORT eth_port_num,
2401 unsigned int phy_reg, unsigned int *value)
2402{
2403 unsigned int reg_value;
2404 unsigned int time_out = PHY_BUSY_TIMEOUT;
2405 int phy_addr;
2406
2407 phy_addr = ethernet_phy_get (eth_port_num);
2408
2409
2410
2411 do {
2412 reg_value = MV_REG_READ (MV64360_ETH_SMI_REG);
2413 if (time_out-- == 0) {
2414 return false;
2415 }
2416 }
2417 while (reg_value & ETH_SMI_BUSY);
2418
2419
2420
2421 MV_REG_WRITE (MV64360_ETH_SMI_REG,
2422 (phy_addr << 16) | (phy_reg << 21) |
2423 ETH_SMI_OPCODE_READ);
2424
2425 time_out = PHY_BUSY_TIMEOUT;
2426
2427 do {
2428 reg_value = MV_REG_READ (MV64360_ETH_SMI_REG);
2429 if (time_out-- == 0) {
2430 return false;
2431 }
2432 }
2433 while ((reg_value & ETH_SMI_READ_VALID) != ETH_SMI_READ_VALID);
2434
2435
2436#define PHY_UPDATE_TIMEOUT 10000
2437 for (time_out = 0; time_out < PHY_UPDATE_TIMEOUT; time_out++);
2438
2439 reg_value = MV_REG_READ (MV64360_ETH_SMI_REG);
2440
2441 *value = reg_value & 0xffff;
2442
2443 return true;
2444}
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466static bool eth_port_write_smi_reg (ETH_PORT eth_port_num,
2467 unsigned int phy_reg, unsigned int value)
2468{
2469 unsigned int reg_value;
2470 unsigned int time_out = PHY_BUSY_TIMEOUT;
2471 int phy_addr;
2472
2473 phy_addr = ethernet_phy_get (eth_port_num);
2474
2475
2476 do {
2477 reg_value = MV_REG_READ (MV64360_ETH_SMI_REG);
2478 if (time_out-- == 0) {
2479 return false;
2480 }
2481 }
2482 while (reg_value & ETH_SMI_BUSY);
2483
2484
2485 MV_REG_WRITE (MV64360_ETH_SMI_REG,
2486 (phy_addr << 16) | (phy_reg << 21) |
2487 ETH_SMI_OPCODE_WRITE | (value & 0xffff));
2488 return true;
2489}
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509static void eth_set_access_control (ETH_PORT eth_port_num,
2510 ETH_WIN_PARAM * param)
2511{
2512 unsigned int access_prot_reg;
2513
2514
2515 access_prot_reg = MV_REG_READ (MV64360_ETH_ACCESS_PROTECTION_REG
2516 (eth_port_num));
2517 access_prot_reg &= (~(3 << (param->win * 2)));
2518 access_prot_reg |= (param->access_ctrl << (param->win * 2));
2519 MV_REG_WRITE (MV64360_ETH_ACCESS_PROTECTION_REG (eth_port_num),
2520 access_prot_reg);
2521
2522
2523 MV_REG_WRITE ((MV64360_ETH_SIZE_REG_0 +
2524 (ETH_SIZE_REG_GAP * param->win)),
2525 (((param->size / 0x10000) - 1) << 16));
2526
2527
2528 MV_REG_WRITE ((MV64360_ETH_BAR_0 + (ETH_BAR_GAP * param->win)),
2529 (param->target | param->attributes | param->base_addr));
2530
2531 if (param->win < 4)
2532 MV_REG_WRITE ((MV64360_ETH_HIGH_ADDR_REMAP_REG_0 +
2533 (ETH_HIGH_ADDR_REMAP_REG_GAP * param->win)),
2534 param->high_addr);
2535
2536
2537 if (param->enable == 1)
2538 MV_RESET_REG_BITS (MV64360_ETH_BASE_ADDR_ENABLE_REG,
2539 (1 << param->win));
2540 else
2541 MV_SET_REG_BITS (MV64360_ETH_BASE_ADDR_ENABLE_REG,
2542 (1 << param->win));
2543}
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575static bool ether_init_rx_desc_ring (ETH_PORT_INFO * p_eth_port_ctrl,
2576 ETH_QUEUE rx_queue,
2577 int rx_desc_num,
2578 int rx_buff_size,
2579 unsigned int rx_desc_base_addr,
2580 unsigned int rx_buff_base_addr)
2581{
2582 ETH_RX_DESC *p_rx_desc;
2583 ETH_RX_DESC *p_rx_prev_desc;
2584 unsigned int buffer_addr;
2585 int ix;
2586
2587
2588 p_rx_desc = (ETH_RX_DESC *) rx_desc_base_addr;
2589 p_rx_prev_desc = p_rx_desc;
2590 buffer_addr = rx_buff_base_addr;
2591
2592
2593 if (rx_buff_base_addr & 0xF)
2594 return false;
2595
2596
2597 if ((rx_buff_size < 8) || (rx_buff_size > RX_BUFFER_MAX_SIZE))
2598 return false;
2599
2600
2601 if ((rx_buff_base_addr + rx_buff_size) & 0x7)
2602 return false;
2603
2604
2605 for (ix = 0; ix < rx_desc_num; ix++) {
2606 p_rx_desc->buf_size = rx_buff_size;
2607 p_rx_desc->byte_cnt = 0x0000;
2608 p_rx_desc->cmd_sts =
2609 ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT;
2610 p_rx_desc->next_desc_ptr =
2611 ((unsigned int) p_rx_desc) + RX_DESC_ALIGNED_SIZE;
2612 p_rx_desc->buf_ptr = buffer_addr;
2613 p_rx_desc->return_info = 0x00000000;
2614 D_CACHE_FLUSH_LINE (p_rx_desc, 0);
2615 buffer_addr += rx_buff_size;
2616 p_rx_prev_desc = p_rx_desc;
2617 p_rx_desc = (ETH_RX_DESC *)
2618 ((unsigned int) p_rx_desc + RX_DESC_ALIGNED_SIZE);
2619 }
2620
2621
2622 p_rx_prev_desc->next_desc_ptr = (rx_desc_base_addr);
2623 D_CACHE_FLUSH_LINE (p_rx_prev_desc, 0);
2624
2625
2626 CURR_RFD_SET ((ETH_RX_DESC *) rx_desc_base_addr, rx_queue);
2627 USED_RFD_SET ((ETH_RX_DESC *) rx_desc_base_addr, rx_queue);
2628
2629 p_eth_port_ctrl->p_rx_desc_area_base[rx_queue] =
2630 (ETH_RX_DESC *) rx_desc_base_addr;
2631 p_eth_port_ctrl->rx_desc_area_size[rx_queue] =
2632 rx_desc_num * RX_DESC_ALIGNED_SIZE;
2633
2634 p_eth_port_ctrl->port_rx_queue_command |= (1 << rx_queue);
2635
2636 return true;
2637}
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669static bool ether_init_tx_desc_ring (ETH_PORT_INFO * p_eth_port_ctrl,
2670 ETH_QUEUE tx_queue,
2671 int tx_desc_num,
2672 int tx_buff_size,
2673 unsigned int tx_desc_base_addr,
2674 unsigned int tx_buff_base_addr)
2675{
2676
2677 ETH_TX_DESC *p_tx_desc;
2678 ETH_TX_DESC *p_tx_prev_desc;
2679 unsigned int buffer_addr;
2680 int ix;
2681
2682
2683
2684 p_tx_desc = (ETH_TX_DESC *) tx_desc_base_addr;
2685 p_tx_prev_desc = p_tx_desc;
2686 buffer_addr = tx_buff_base_addr;
2687
2688
2689 if (tx_buff_base_addr & 0xF)
2690 return false;
2691
2692
2693 if ((tx_buff_size > TX_BUFFER_MAX_SIZE)
2694 || (tx_buff_size < TX_BUFFER_MIN_SIZE))
2695 return false;
2696
2697
2698 for (ix = 0; ix < tx_desc_num; ix++) {
2699 p_tx_desc->byte_cnt = 0x0000;
2700 p_tx_desc->l4i_chk = 0x0000;
2701 p_tx_desc->cmd_sts = 0x00000000;
2702 p_tx_desc->next_desc_ptr =
2703 ((unsigned int) p_tx_desc) + TX_DESC_ALIGNED_SIZE;
2704
2705 p_tx_desc->buf_ptr = buffer_addr;
2706 p_tx_desc->return_info = 0x00000000;
2707 D_CACHE_FLUSH_LINE (p_tx_desc, 0);
2708 buffer_addr += tx_buff_size;
2709 p_tx_prev_desc = p_tx_desc;
2710 p_tx_desc = (ETH_TX_DESC *)
2711 ((unsigned int) p_tx_desc + TX_DESC_ALIGNED_SIZE);
2712
2713 }
2714
2715 p_tx_prev_desc->next_desc_ptr = tx_desc_base_addr;
2716 D_CACHE_FLUSH_LINE (p_tx_prev_desc, 0);
2717
2718 CURR_TFD_SET ((ETH_TX_DESC *) tx_desc_base_addr, tx_queue);
2719 USED_TFD_SET ((ETH_TX_DESC *) tx_desc_base_addr, tx_queue);
2720
2721
2722 p_eth_port_ctrl->p_tx_desc_area_base[tx_queue] =
2723 (ETH_TX_DESC *) tx_desc_base_addr;
2724 p_eth_port_ctrl->tx_desc_area_size[tx_queue] =
2725 (tx_desc_num * TX_DESC_ALIGNED_SIZE);
2726
2727
2728 p_eth_port_ctrl->port_tx_queue_command |= (1 << tx_queue);
2729
2730 return true;
2731}
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761static ETH_FUNC_RET_STATUS eth_port_send (ETH_PORT_INFO * p_eth_port_ctrl,
2762 ETH_QUEUE tx_queue,
2763 PKT_INFO * p_pkt_info)
2764{
2765 volatile ETH_TX_DESC *p_tx_desc_first;
2766 volatile ETH_TX_DESC *p_tx_desc_curr;
2767 volatile ETH_TX_DESC *p_tx_next_desc_curr;
2768 volatile ETH_TX_DESC *p_tx_desc_used;
2769 unsigned int command_status;
2770
2771
2772 if (p_eth_port_ctrl->tx_resource_err[tx_queue] == true)
2773 return ETH_QUEUE_FULL;
2774
2775
2776 CURR_TFD_GET (p_tx_desc_curr, tx_queue);
2777 USED_TFD_GET (p_tx_desc_used, tx_queue);
2778
2779 if (p_tx_desc_curr == NULL)
2780 return ETH_ERROR;
2781
2782
2783 p_tx_next_desc_curr = TX_NEXT_DESC_PTR (p_tx_desc_curr, tx_queue);
2784 command_status = p_pkt_info->cmd_sts | ETH_ZERO_PADDING | ETH_GEN_CRC;
2785
2786 if (command_status & (ETH_TX_FIRST_DESC)) {
2787
2788 FIRST_TFD_SET (p_tx_desc_curr, tx_queue);
2789 p_tx_desc_first = p_tx_desc_curr;
2790 } else {
2791 FIRST_TFD_GET (p_tx_desc_first, tx_queue);
2792 command_status |= ETH_BUFFER_OWNED_BY_DMA;
2793 }
2794
2795
2796
2797
2798 if (p_pkt_info->byte_cnt <= 8) {
2799 printf ("You have failed in the < 8 bytes errata - fixme\n");
2800 return ETH_ERROR;
2801
2802 p_tx_desc_curr->buf_ptr =
2803 (unsigned int) p_tx_desc_curr + TX_BUF_OFFSET_IN_DESC;
2804 eth_b_copy (p_pkt_info->buf_ptr, p_tx_desc_curr->buf_ptr,
2805 p_pkt_info->byte_cnt);
2806 } else
2807 p_tx_desc_curr->buf_ptr = p_pkt_info->buf_ptr;
2808
2809 p_tx_desc_curr->byte_cnt = p_pkt_info->byte_cnt;
2810 p_tx_desc_curr->return_info = p_pkt_info->return_info;
2811
2812 if (p_pkt_info->cmd_sts & (ETH_TX_LAST_DESC)) {
2813
2814 p_tx_desc_curr->cmd_sts = command_status |
2815 ETH_BUFFER_OWNED_BY_DMA | ETH_TX_ENABLE_INTERRUPT;
2816
2817 if (p_tx_desc_curr != p_tx_desc_first)
2818 p_tx_desc_first->cmd_sts |= ETH_BUFFER_OWNED_BY_DMA;
2819
2820
2821
2822 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_curr, 0);
2823 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_first, 0);
2824 CPU_PIPE_FLUSH;
2825
2826
2827 ETH_ENABLE_TX_QUEUE (tx_queue, p_eth_port_ctrl->port_num);
2828
2829
2830 p_tx_desc_first = p_tx_next_desc_curr;
2831 FIRST_TFD_SET (p_tx_desc_first, tx_queue);
2832
2833 } else {
2834 p_tx_desc_curr->cmd_sts = command_status;
2835 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_curr, 0);
2836 }
2837
2838
2839 if (p_tx_next_desc_curr == p_tx_desc_used) {
2840
2841 CURR_TFD_SET (p_tx_desc_first, tx_queue);
2842
2843 p_eth_port_ctrl->tx_resource_err[tx_queue] = true;
2844 return ETH_QUEUE_LAST_RESOURCE;
2845 } else {
2846
2847 CURR_TFD_SET (p_tx_next_desc_curr, tx_queue);
2848 return ETH_OK;
2849 }
2850}
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877static ETH_FUNC_RET_STATUS eth_tx_return_desc (ETH_PORT_INFO *
2878 p_eth_port_ctrl,
2879 ETH_QUEUE tx_queue,
2880 PKT_INFO * p_pkt_info)
2881{
2882 volatile ETH_TX_DESC *p_tx_desc_used = NULL;
2883 volatile ETH_TX_DESC *p_tx_desc_first = NULL;
2884 unsigned int command_status;
2885
2886
2887
2888 USED_TFD_GET (p_tx_desc_used, tx_queue);
2889 FIRST_TFD_GET (p_tx_desc_first, tx_queue);
2890
2891
2892
2893 if (p_tx_desc_used == NULL)
2894 return ETH_ERROR;
2895
2896 command_status = p_tx_desc_used->cmd_sts;
2897
2898
2899 if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
2900 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_used, 0);
2901 return ETH_RETRY;
2902 }
2903
2904
2905 if ((p_tx_desc_used == p_tx_desc_first) &&
2906 (p_eth_port_ctrl->tx_resource_err[tx_queue] == false)) {
2907 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_used, 0);
2908 return ETH_END_OF_JOB;
2909 }
2910
2911
2912 p_pkt_info->cmd_sts = command_status;
2913 p_pkt_info->return_info = p_tx_desc_used->return_info;
2914 p_tx_desc_used->return_info = 0;
2915
2916
2917 USED_TFD_SET (TX_NEXT_DESC_PTR (p_tx_desc_used, tx_queue), tx_queue);
2918
2919
2920 if (p_eth_port_ctrl->tx_resource_err[tx_queue] == true)
2921 p_eth_port_ctrl->tx_resource_err[tx_queue] = false;
2922
2923 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_used, 0);
2924
2925 return ETH_OK;
2926
2927}
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954static ETH_FUNC_RET_STATUS eth_port_receive (ETH_PORT_INFO * p_eth_port_ctrl,
2955 ETH_QUEUE rx_queue,
2956 PKT_INFO * p_pkt_info)
2957{
2958 volatile ETH_RX_DESC *p_rx_curr_desc;
2959 volatile ETH_RX_DESC *p_rx_next_curr_desc;
2960 volatile ETH_RX_DESC *p_rx_used_desc;
2961 unsigned int command_status;
2962
2963
2964 if (p_eth_port_ctrl->rx_resource_err[rx_queue] == true) {
2965 printf ("\nRx Queue is full ...\n");
2966 return ETH_QUEUE_FULL;
2967 }
2968
2969
2970 CURR_RFD_GET (p_rx_curr_desc, rx_queue);
2971 USED_RFD_GET (p_rx_used_desc, rx_queue);
2972
2973
2974 if (p_rx_curr_desc == NULL)
2975 return ETH_ERROR;
2976
2977
2978 p_rx_next_curr_desc = RX_NEXT_DESC_PTR (p_rx_curr_desc, rx_queue);
2979 command_status = p_rx_curr_desc->cmd_sts;
2980
2981
2982 if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
2983
2984 D_CACHE_FLUSH_LINE ((unsigned int) p_rx_curr_desc, 0);
2985
2986 return ETH_END_OF_JOB;
2987 }
2988
2989 p_pkt_info->byte_cnt = (p_rx_curr_desc->byte_cnt) - RX_BUF_OFFSET;
2990 p_pkt_info->cmd_sts = command_status;
2991 p_pkt_info->buf_ptr = (p_rx_curr_desc->buf_ptr) + RX_BUF_OFFSET;
2992 p_pkt_info->return_info = p_rx_curr_desc->return_info;
2993 p_pkt_info->l4i_chk = p_rx_curr_desc->buf_size;
2994
2995
2996
2997 p_rx_curr_desc->return_info = 0;
2998
2999
3000 CURR_RFD_SET (p_rx_next_curr_desc, rx_queue);
3001
3002
3003 if (p_rx_next_curr_desc == p_rx_used_desc)
3004 p_eth_port_ctrl->rx_resource_err[rx_queue] = true;
3005
3006 D_CACHE_FLUSH_LINE ((unsigned int) p_rx_curr_desc, 0);
3007 CPU_PIPE_FLUSH;
3008 return ETH_OK;
3009}
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033static ETH_FUNC_RET_STATUS eth_rx_return_buff (ETH_PORT_INFO *
3034 p_eth_port_ctrl,
3035 ETH_QUEUE rx_queue,
3036 PKT_INFO * p_pkt_info)
3037{
3038 volatile ETH_RX_DESC *p_used_rx_desc;
3039
3040
3041 USED_RFD_GET (p_used_rx_desc, rx_queue);
3042
3043
3044 if (p_used_rx_desc == NULL)
3045 return ETH_ERROR;
3046
3047 p_used_rx_desc->buf_ptr = p_pkt_info->buf_ptr;
3048 p_used_rx_desc->return_info = p_pkt_info->return_info;
3049 p_used_rx_desc->byte_cnt = p_pkt_info->byte_cnt;
3050 p_used_rx_desc->buf_size = MV64360_RX_BUFFER_SIZE;
3051
3052
3053 CPU_PIPE_FLUSH;
3054
3055
3056 p_used_rx_desc->cmd_sts =
3057 ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT;
3058
3059
3060 D_CACHE_FLUSH_LINE ((unsigned int) p_used_rx_desc, 0);
3061 CPU_PIPE_FLUSH;
3062
3063
3064 USED_RFD_SET (RX_NEXT_DESC_PTR (p_used_rx_desc, rx_queue), rx_queue);
3065
3066
3067 if (p_eth_port_ctrl->rx_resource_err[rx_queue] == true)
3068 p_eth_port_ctrl->rx_resource_err[rx_queue] = false;
3069
3070 return ETH_OK;
3071}
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096#if 0
3097static unsigned int eth_port_set_rx_coal (ETH_PORT eth_port_num,
3098 unsigned int t_clk,
3099 unsigned int delay)
3100{
3101 unsigned int coal;
3102
3103 coal = ((t_clk / 1000000) * delay) / 64;
3104
3105 MV_REG_WRITE (MV64360_ETH_SDMA_CONFIG_REG (eth_port_num),
3106 ((coal & 0x3fff) << 8) |
3107 (MV_REG_READ
3108 (MV64360_ETH_SDMA_CONFIG_REG (eth_port_num))
3109 & 0xffc000ff));
3110 return coal;
3111}
3112
3113#endif
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137#if 0
3138static unsigned int eth_port_set_tx_coal (ETH_PORT eth_port_num,
3139 unsigned int t_clk,
3140 unsigned int delay)
3141{
3142 unsigned int coal;
3143
3144 coal = ((t_clk / 1000000) * delay) / 64;
3145
3146 MV_REG_WRITE (MV64360_ETH_TX_FIFO_URGENT_THRESHOLD_REG (eth_port_num),
3147 coal << 4);
3148 return coal;
3149}
3150#endif
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172static void eth_b_copy (unsigned int src_addr, unsigned int dst_addr,
3173 int byte_count)
3174{
3175
3176 *(unsigned int *) dst_addr = 0x0;
3177
3178 while (byte_count != 0) {
3179 *(char *) dst_addr = *(char *) src_addr;
3180 dst_addr++;
3181 src_addr++;
3182 byte_count--;
3183 }
3184}
3185