1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include <common.h>
31#include <net.h>
32#include <malloc.h>
33
34#include "mv_eth.h"
35
36
37
38#undef DEBUG_MV_ETH
39
40#ifdef DEBUG_MV_ETH
41#define DEBUG
42#define DP(x) x
43#else
44#define DP(x)
45#endif
46
47#undef MV64460_CHECKSUM_OFFLOAD
48
49
50
51
52
53
54
55
56
57
58#undef MV64460_RX_QUEUE_FILL_ON_TASK
59
60
61
62#define MAGIC_ETH_RUNNING 8031971
63#define MV64460_INTERNAL_SRAM_SIZE _256K
64#define EXTRA_BYTES 32
65#define WRAP ETH_HLEN + 2 + 4 + 16
66#define BUFFER_MTU dev->mtu + WRAP
67#define INT_CAUSE_UNMASK_ALL 0x0007ffff
68#define INT_CAUSE_UNMASK_ALL_EXT 0x0011ffff
69#ifdef MV64460_RX_FILL_ON_TASK
70#define INT_CAUSE_MASK_ALL 0x00000000
71#define INT_CAUSE_CHECK_BITS INT_CAUSE_UNMASK_ALL
72#define INT_CAUSE_CHECK_BITS_EXT INT_CAUSE_UNMASK_ALL_EXT
73#endif
74
75
76#define MV_REG_READ(offset) my_le32_to_cpu(* (volatile unsigned int *) (INTERNAL_REG_BASE_ADDR + offset))
77#define MV_REG_WRITE(offset,data) *(volatile unsigned int *) (INTERNAL_REG_BASE_ADDR + offset) = my_cpu_to_le32 (data)
78#define MV_SET_REG_BITS(regOffset,bits) ((*((volatile unsigned int*)((INTERNAL_REG_BASE_ADDR) + (regOffset)))) |= ((unsigned int)my_cpu_to_le32(bits)))
79#define MV_RESET_REG_BITS(regOffset,bits) ((*((volatile unsigned int*)((INTERNAL_REG_BASE_ADDR) + (regOffset)))) &= ~((unsigned int)my_cpu_to_le32(bits)))
80
81
82static int mv64460_eth_real_open (struct eth_device *eth);
83static int mv64460_eth_real_stop (struct eth_device *eth);
84static struct net_device_stats *mv64460_eth_get_stats (struct eth_device
85 *dev);
86static void eth_port_init_mac_tables (ETH_PORT eth_port_num);
87static void mv64460_eth_update_stat (struct eth_device *dev);
88bool db64460_eth_start (struct eth_device *eth);
89unsigned int eth_read_mib_counter (ETH_PORT eth_port_num,
90 unsigned int mib_offset);
91int mv64460_eth_receive (struct eth_device *dev);
92
93int mv64460_eth_xmit (struct eth_device *, volatile void *packet, int length);
94
95#ifndef UPDATE_STATS_BY_SOFTWARE
96static void mv64460_eth_print_stat (struct eth_device *dev);
97#endif
98
99extern void NetReceive (volatile uchar *, int);
100
101extern unsigned int INTERNAL_REG_BASE_ADDR;
102
103
104
105
106#ifdef DEBUG_MV_ETH
107void print_globals (struct eth_device *dev)
108{
109 printf ("Ethernet PRINT_Globals-Debug function\n");
110 printf ("Base Address for ETH_PORT_INFO: %08x\n",
111 (unsigned int) dev->priv);
112 printf ("Base Address for mv64460_eth_priv: %08x\n",
113 (unsigned int) &(((ETH_PORT_INFO *) dev->priv)->
114 port_private));
115
116 printf ("GT Internal Base Address: %08x\n",
117 INTERNAL_REG_BASE_ADDR);
118 printf ("Base Address for TX-DESCs: %08x Number of allocated Buffers %d\n", (unsigned int) ((ETH_PORT_INFO *) dev->priv)->p_tx_desc_area_base[0], MV64460_TX_QUEUE_SIZE);
119 printf ("Base Address for RX-DESCs: %08x Number of allocated Buffers %d\n", (unsigned int) ((ETH_PORT_INFO *) dev->priv)->p_rx_desc_area_base[0], MV64460_RX_QUEUE_SIZE);
120 printf ("Base Address for RX-Buffer: %08x allocated Bytes %d\n",
121 (unsigned int) ((ETH_PORT_INFO *) dev->priv)->
122 p_rx_buffer_base[0],
123 (MV64460_RX_QUEUE_SIZE * MV64460_RX_BUFFER_SIZE) + 32);
124 printf ("Base Address for TX-Buffer: %08x allocated Bytes %d\n",
125 (unsigned int) ((ETH_PORT_INFO *) dev->priv)->
126 p_tx_buffer_base[0],
127 (MV64460_TX_QUEUE_SIZE * MV64460_TX_BUFFER_SIZE) + 32);
128}
129#endif
130
131#define my_cpu_to_le32(x) my_le32_to_cpu((x))
132
133unsigned long my_le32_to_cpu (unsigned long x)
134{
135 return (((x & 0x000000ffU) << 24) |
136 ((x & 0x0000ff00U) << 8) |
137 ((x & 0x00ff0000U) >> 8) | ((x & 0xff000000U) >> 24));
138}
139
140
141
142
143
144
145
146
147
148
149
150static void mv64460_eth_print_phy_status (struct eth_device *dev)
151{
152 struct mv64460_eth_priv *port_private;
153 unsigned int port_num;
154 ETH_PORT_INFO *ethernet_private = (ETH_PORT_INFO *) dev->priv;
155 unsigned int port_status, phy_reg_data;
156
157 port_private =
158 (struct mv64460_eth_priv *) ethernet_private->port_private;
159 port_num = port_private->port_num;
160
161
162 eth_port_read_smi_reg (port_num, 1, &phy_reg_data);
163 if (!(phy_reg_data & 0x20)) {
164 printf ("Ethernet port changed link status to DOWN\n");
165 } else {
166 port_status =
167 MV_REG_READ (MV64460_ETH_PORT_STATUS_REG (port_num));
168 printf ("Ethernet status port %d: Link up", port_num);
169 printf (", %s",
170 (port_status & BIT2) ? "Full Duplex" : "Half Duplex");
171 if (port_status & BIT4)
172 printf (", Speed 1 Gbps");
173 else
174 printf (", %s",
175 (port_status & BIT5) ? "Speed 100 Mbps" :
176 "Speed 10 Mbps");
177 printf ("\n");
178 }
179}
180
181
182
183
184
185int db64460_eth_probe (struct eth_device *dev)
186{
187 return ((int) db64460_eth_start (dev));
188}
189
190int db64460_eth_poll (struct eth_device *dev)
191{
192 return mv64460_eth_receive (dev);
193}
194
195int db64460_eth_transmit (struct eth_device *dev, volatile void *packet,
196 int length)
197{
198 mv64460_eth_xmit (dev, packet, length);
199 return 0;
200}
201
202void db64460_eth_disable (struct eth_device *dev)
203{
204 mv64460_eth_stop (dev);
205}
206
207
208void mv6446x_eth_initialize (bd_t * bis)
209{
210 struct eth_device *dev;
211 ETH_PORT_INFO *ethernet_private;
212 struct mv64460_eth_priv *port_private;
213 int devnum, x, temp;
214 char *s, *e, buf[64];
215
216 for (devnum = 0; devnum < MV_ETH_DEVS; devnum++) {
217 dev = calloc (sizeof (*dev), 1);
218 if (!dev) {
219 printf ("%s: mv_enet%d allocation failure, %s\n",
220 __FUNCTION__, devnum, "eth_device structure");
221 return;
222 }
223
224
225 sprintf (dev->name, "mv_enet%d", devnum);
226
227#ifdef DEBUG
228 printf ("Initializing %s\n", dev->name);
229#endif
230
231
232 switch (devnum) {
233 case 0:
234 s = "ethaddr";
235 break;
236
237 case 1:
238 s = "eth1addr";
239 break;
240
241 case 2:
242 s = "eth2addr";
243 break;
244
245 default:
246 printf ("%s: Invalid device number %d\n",
247 __FUNCTION__, devnum);
248 return;
249 }
250
251 temp = getenv_f(s, buf, sizeof (buf));
252 s = (temp > 0) ? buf : NULL;
253
254#ifdef DEBUG
255 printf ("Setting MAC %d to %s\n", devnum, s);
256#endif
257 for (x = 0; x < 6; ++x) {
258 dev->enetaddr[x] = s ? simple_strtoul (s, &e, 16) : 0;
259 if (s)
260 s = (*e) ? e + 1 : e;
261 }
262
263 eth_port_uc_addr_set (devnum, dev->enetaddr, 0);
264
265 dev->init = (void *) db64460_eth_probe;
266 dev->halt = (void *) ethernet_phy_reset;
267 dev->send = (void *) db64460_eth_transmit;
268 dev->recv = (void *) db64460_eth_poll;
269
270 ethernet_private = calloc (sizeof (*ethernet_private), 1);
271 dev->priv = (void *)ethernet_private;
272 if (!ethernet_private) {
273 printf ("%s: %s allocation failure, %s\n",
274 __FUNCTION__, dev->name,
275 "Private Device Structure");
276 free (dev);
277 return;
278 }
279
280 memset (ethernet_private, 0, sizeof (ETH_PORT_INFO));
281 memcpy (ethernet_private->port_mac_addr, dev->enetaddr, 6);
282
283
284 port_private = calloc (sizeof (*ethernet_private), 1);
285 ethernet_private->port_private = (void *)port_private;
286 if (!port_private) {
287 printf ("%s: %s allocation failure, %s\n",
288 __FUNCTION__, dev->name,
289 "Port Private Device Structure");
290
291 free (ethernet_private);
292 free (dev);
293 return;
294 }
295
296 port_private->stats =
297 calloc (sizeof (struct net_device_stats), 1);
298 if (!port_private->stats) {
299 printf ("%s: %s allocation failure, %s\n",
300 __FUNCTION__, dev->name,
301 "Net stat Structure");
302
303 free (port_private);
304 free (ethernet_private);
305 free (dev);
306 return;
307 }
308 memset (ethernet_private->port_private, 0,
309 sizeof (struct mv64460_eth_priv));
310 switch (devnum) {
311 case 0:
312 ethernet_private->port_num = ETH_0;
313 break;
314 case 1:
315 ethernet_private->port_num = ETH_1;
316 break;
317 case 2:
318 ethernet_private->port_num = ETH_2;
319 break;
320 default:
321 printf ("Invalid device number %d\n", devnum);
322 break;
323 };
324
325 port_private->port_num = devnum;
326
327
328
329
330 mv64460_eth_update_stat (dev);
331 memset (port_private->stats, 0,
332 sizeof (struct net_device_stats));
333
334 switch (devnum) {
335 case 0:
336 s = "ethaddr";
337 break;
338
339 case 1:
340 s = "eth1addr";
341 break;
342
343 case 2:
344 s = "eth2addr";
345 break;
346
347 default:
348 printf ("%s: Invalid device number %d\n",
349 __FUNCTION__, devnum);
350 return;
351 }
352
353 temp = getenv_f(s, buf, sizeof (buf));
354 s = (temp > 0) ? buf : NULL;
355
356#ifdef DEBUG
357 printf ("Setting MAC %d to %s\n", devnum, s);
358#endif
359 for (x = 0; x < 6; ++x) {
360 dev->enetaddr[x] = s ? simple_strtoul (s, &e, 16) : 0;
361 if (s)
362 s = (*e) ? e + 1 : e;
363 }
364
365 DP (printf ("Allocating descriptor and buffer rings\n"));
366
367 ethernet_private->p_rx_desc_area_base[0] =
368 (ETH_RX_DESC *) memalign (16,
369 RX_DESC_ALIGNED_SIZE *
370 MV64460_RX_QUEUE_SIZE + 1);
371 ethernet_private->p_tx_desc_area_base[0] =
372 (ETH_TX_DESC *) memalign (16,
373 TX_DESC_ALIGNED_SIZE *
374 MV64460_TX_QUEUE_SIZE + 1);
375
376 ethernet_private->p_rx_buffer_base[0] =
377 (char *) memalign (16,
378 MV64460_RX_QUEUE_SIZE *
379 MV64460_TX_BUFFER_SIZE + 1);
380 ethernet_private->p_tx_buffer_base[0] =
381 (char *) memalign (16,
382 MV64460_RX_QUEUE_SIZE *
383 MV64460_TX_BUFFER_SIZE + 1);
384
385#ifdef DEBUG_MV_ETH
386
387 print_globals (dev);
388#endif
389 eth_register (dev);
390
391 }
392 DP (printf ("%s: exit\n", __FUNCTION__));
393
394}
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410int mv64460_eth_open (struct eth_device *dev)
411{
412 return (mv64460_eth_real_open (dev));
413}
414
415
416static int mv64460_eth_real_open (struct eth_device *dev)
417{
418
419 unsigned int queue;
420 ETH_PORT_INFO *ethernet_private;
421 struct mv64460_eth_priv *port_private;
422 unsigned int port_num;
423 u32 port_status, phy_reg_data;
424
425 ethernet_private = (ETH_PORT_INFO *) dev->priv;
426
427
428 memcpy (ethernet_private->port_mac_addr, dev->enetaddr, 6);
429
430 port_private =
431 (struct mv64460_eth_priv *) ethernet_private->port_private;
432 port_num = port_private->port_num;
433
434
435 MV_REG_WRITE (MV64460_ETH_RECEIVE_QUEUE_COMMAND_REG (port_num),
436 0x0000ff00);
437
438
439 MV_REG_WRITE (MV64460_ETH_INTERRUPT_CAUSE_REG (port_num), 0);
440 MV_REG_WRITE (MV64460_ETH_INTERRUPT_CAUSE_EXTEND_REG (port_num), 0);
441
442
443 MV_REG_WRITE (MV64460_ETH_INTERRUPT_MASK_REG (port_num),
444 INT_CAUSE_UNMASK_ALL);
445
446
447 MV_REG_WRITE (MV64460_ETH_INTERRUPT_EXTEND_MASK_REG (port_num),
448 INT_CAUSE_UNMASK_ALL_EXT);
449
450
451 ethernet_private->port_phy_addr = 0x8 + port_num;
452
453
454 eth_port_init (ethernet_private);
455
456
457
458
459 for (queue = 0; queue < MV64460_TX_QUEUE_NUM; queue++) {
460 unsigned int size;
461
462 port_private->tx_ring_size[queue] = MV64460_TX_QUEUE_SIZE;
463 size = (port_private->tx_ring_size[queue] * TX_DESC_ALIGNED_SIZE);
464 ethernet_private->tx_desc_area_size[queue] = size;
465
466
467 memset ((void *) ethernet_private->p_tx_desc_area_base[queue],
468 0, ethernet_private->tx_desc_area_size[queue]);
469
470
471 if (ether_init_tx_desc_ring
472 (ethernet_private, ETH_Q0,
473 port_private->tx_ring_size[queue],
474 MV64460_TX_BUFFER_SIZE ,
475 (unsigned int) ethernet_private->
476 p_tx_desc_area_base[queue],
477 (unsigned int) ethernet_private->
478 p_tx_buffer_base[queue]) == false)
479 printf ("### Error initializing TX Ring\n");
480 }
481
482
483 for (queue = 0; queue < MV64460_RX_QUEUE_NUM; queue++) {
484 unsigned int size;
485
486
487 port_private->rx_ring_size[queue] = MV64460_RX_QUEUE_SIZE;
488 size = (port_private->rx_ring_size[queue] *
489 RX_DESC_ALIGNED_SIZE);
490 ethernet_private->rx_desc_area_size[queue] = size;
491
492
493 memset ((void *) ethernet_private->p_rx_desc_area_base[queue],
494 0, ethernet_private->rx_desc_area_size[queue]);
495 if ((ether_init_rx_desc_ring
496 (ethernet_private, ETH_Q0,
497 port_private->rx_ring_size[queue],
498 MV64460_RX_BUFFER_SIZE ,
499 (unsigned int) ethernet_private->
500 p_rx_desc_area_base[queue],
501 (unsigned int) ethernet_private->
502 p_rx_buffer_base[queue])) == false)
503 printf ("### Error initializing RX Ring\n");
504 }
505
506 eth_port_start (ethernet_private);
507
508
509 MV_REG_WRITE (MV64460_ETH_PORT_SERIAL_CONTROL_REG (port_num),
510 (0x5 << 17) |
511 (MV_REG_READ
512 (MV64460_ETH_PORT_SERIAL_CONTROL_REG (port_num))
513 & 0xfff1ffff));
514
515
516
517
518
519
520 MV_REG_WRITE (MV64460_ETH_MAXIMUM_TRANSMIT_UNIT (port_num), 0);
521 port_status = MV_REG_READ (MV64460_ETH_PORT_STATUS_REG (port_num));
522
523
524 eth_port_read_smi_reg (port_num, 1, &phy_reg_data);
525 if (!(phy_reg_data & 0x20)) {
526
527 if ((ethernet_phy_reset (port_num)) != true) {
528 printf ("$$ Warnning: No link on port %d \n",
529 port_num);
530 return 0;
531 } else {
532 eth_port_read_smi_reg (port_num, 1, &phy_reg_data);
533 if (!(phy_reg_data & 0x20)) {
534 printf ("### Error: Phy is not active\n");
535 return 0;
536 }
537 }
538 } else {
539 mv64460_eth_print_phy_status (dev);
540 }
541 port_private->eth_running = MAGIC_ETH_RUNNING;
542 return 1;
543}
544
545
546static int mv64460_eth_free_tx_rings (struct eth_device *dev)
547{
548 unsigned int queue;
549 ETH_PORT_INFO *ethernet_private;
550 struct mv64460_eth_priv *port_private;
551 unsigned int port_num;
552 volatile ETH_TX_DESC *p_tx_curr_desc;
553
554 ethernet_private = (ETH_PORT_INFO *) dev->priv;
555 port_private =
556 (struct mv64460_eth_priv *) ethernet_private->port_private;
557 port_num = port_private->port_num;
558
559
560 MV_REG_WRITE (MV64460_ETH_TRANSMIT_QUEUE_COMMAND_REG (port_num),
561 0x0000ff00);
562
563
564 DP (printf ("Clearing previously allocated TX queues... "));
565 for (queue = 0; queue < MV64460_TX_QUEUE_NUM; queue++) {
566
567 for (p_tx_curr_desc =
568 ethernet_private->p_tx_desc_area_base[queue];
569 ((unsigned int) p_tx_curr_desc <= (unsigned int)
570 ethernet_private->p_tx_desc_area_base[queue] +
571 ethernet_private->tx_desc_area_size[queue]);
572 p_tx_curr_desc =
573 (ETH_TX_DESC *) ((unsigned int) p_tx_curr_desc +
574 TX_DESC_ALIGNED_SIZE)) {
575
576 if (p_tx_curr_desc->return_info != 0) {
577 p_tx_curr_desc->return_info = 0;
578 DP (printf ("freed\n"));
579 }
580 }
581 DP (printf ("Done\n"));
582 }
583 return 0;
584}
585
586static int mv64460_eth_free_rx_rings (struct eth_device *dev)
587{
588 unsigned int queue;
589 ETH_PORT_INFO *ethernet_private;
590 struct mv64460_eth_priv *port_private;
591 unsigned int port_num;
592 volatile ETH_RX_DESC *p_rx_curr_desc;
593
594 ethernet_private = (ETH_PORT_INFO *) dev->priv;
595 port_private =
596 (struct mv64460_eth_priv *) ethernet_private->port_private;
597 port_num = port_private->port_num;
598
599
600
601 MV_REG_WRITE (MV64460_ETH_RECEIVE_QUEUE_COMMAND_REG (port_num),
602 0x0000ff00);
603
604
605 DP (printf ("Clearing previously allocated RX queues... "));
606 for (queue = 0; queue < MV64460_RX_QUEUE_NUM; queue++) {
607
608 for (p_rx_curr_desc =
609 ethernet_private->p_rx_desc_area_base[queue];
610 (((unsigned int) p_rx_curr_desc <
611 ((unsigned int) ethernet_private->
612 p_rx_desc_area_base[queue] +
613 ethernet_private->rx_desc_area_size[queue])));
614 p_rx_curr_desc =
615 (ETH_RX_DESC *) ((unsigned int) p_rx_curr_desc +
616 RX_DESC_ALIGNED_SIZE)) {
617 if (p_rx_curr_desc->return_info != 0) {
618 p_rx_curr_desc->return_info = 0;
619 DP (printf ("freed\n"));
620 }
621 }
622 DP (printf ("Done\n"));
623 }
624 return 0;
625}
626
627
628
629
630
631
632
633
634
635
636
637int mv64460_eth_stop (struct eth_device *dev)
638{
639 ETH_PORT_INFO *ethernet_private;
640 struct mv64460_eth_priv *port_private;
641 unsigned int port_num;
642
643 ethernet_private = (ETH_PORT_INFO *) dev->priv;
644 port_private =
645 (struct mv64460_eth_priv *) ethernet_private->port_private;
646 port_num = port_private->port_num;
647
648
649 MV_REG_WRITE (MV64460_ETH_BASE_ADDR_ENABLE_REG, 0x3f);
650 DP (printf ("%s Ethernet stop called ... \n", __FUNCTION__));
651 mv64460_eth_real_stop (dev);
652
653 return 0;
654};
655
656
657
658static int mv64460_eth_real_stop (struct eth_device *dev)
659{
660 ETH_PORT_INFO *ethernet_private;
661 struct mv64460_eth_priv *port_private;
662 unsigned int port_num;
663
664 ethernet_private = (ETH_PORT_INFO *) dev->priv;
665 port_private =
666 (struct mv64460_eth_priv *) ethernet_private->port_private;
667 port_num = port_private->port_num;
668
669
670 mv64460_eth_free_tx_rings (dev);
671 mv64460_eth_free_rx_rings (dev);
672
673 eth_port_reset (ethernet_private->port_num);
674
675 MV_REG_WRITE (MV64460_ETH_INTERRUPT_CAUSE_REG (port_num), 0);
676 MV_REG_WRITE (MV64460_ETH_INTERRUPT_CAUSE_EXTEND_REG (port_num), 0);
677
678 MV_REG_WRITE (MV64460_ETH_INTERRUPT_MASK_REG (port_num), 0);
679
680 MV_REG_WRITE (MV64460_ETH_INTERRUPT_EXTEND_MASK_REG (port_num), 0);
681 MV_RESET_REG_BITS (MV64460_CPU_INTERRUPT0_MASK_HIGH,
682 BIT0 << port_num);
683
684#ifndef UPDATE_STATS_BY_SOFTWARE
685
686
687
688
689 if (port_private->eth_running == MAGIC_ETH_RUNNING) {
690 port_private->eth_running = 0;
691 mv64460_eth_print_stat (dev);
692 }
693 memset (port_private->stats, 0, sizeof (struct net_device_stats));
694#endif
695 DP (printf ("\nEthernet stopped ... \n"));
696 return 0;
697}
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712int mv64460_eth_xmit (struct eth_device *dev, volatile void *dataPtr,
713 int dataSize)
714{
715 ETH_PORT_INFO *ethernet_private;
716 struct mv64460_eth_priv *port_private;
717 unsigned int port_num;
718 PKT_INFO pkt_info;
719 ETH_FUNC_RET_STATUS status;
720 struct net_device_stats *stats;
721 ETH_FUNC_RET_STATUS release_result;
722
723 ethernet_private = (ETH_PORT_INFO *) dev->priv;
724 port_private =
725 (struct mv64460_eth_priv *) ethernet_private->port_private;
726 port_num = port_private->port_num;
727
728 stats = port_private->stats;
729
730
731 pkt_info.cmd_sts = ETH_TX_FIRST_DESC | ETH_TX_LAST_DESC;
732 pkt_info.byte_cnt = dataSize;
733 pkt_info.buf_ptr = (unsigned int) dataPtr;
734 pkt_info.return_info = 0;
735
736 status = eth_port_send (ethernet_private, ETH_Q0, &pkt_info);
737 if ((status == ETH_ERROR) || (status == ETH_QUEUE_FULL)) {
738 printf ("Error on transmitting packet ..");
739 if (status == ETH_QUEUE_FULL)
740 printf ("ETH Queue is full. \n");
741 if (status == ETH_QUEUE_LAST_RESOURCE)
742 printf ("ETH Queue: using last available resource. \n");
743 goto error;
744 }
745
746
747 stats->tx_bytes += dataSize;
748 stats->tx_packets++;
749
750
751 do {
752 release_result =
753 eth_tx_return_desc (ethernet_private, ETH_Q0,
754 &pkt_info);
755 switch (release_result) {
756 case ETH_OK:
757 DP (printf ("descriptor released\n"));
758 if (pkt_info.cmd_sts & BIT0) {
759 printf ("Error in TX\n");
760 stats->tx_errors++;
761
762 }
763 break;
764 case ETH_RETRY:
765 DP (printf ("transmission still in process\n"));
766 break;
767
768 case ETH_ERROR:
769 printf ("routine can not access Tx desc ring\n");
770 break;
771
772 case ETH_END_OF_JOB:
773 DP (printf ("the routine has nothing to release\n"));
774 break;
775 default:
776 break;
777 }
778 } while (release_result == ETH_OK);
779
780
781 return 0;
782 error:
783 return 1;
784}
785
786
787
788
789
790
791
792
793
794
795
796
797
798int mv64460_eth_receive (struct eth_device *dev)
799{
800 ETH_PORT_INFO *ethernet_private;
801 struct mv64460_eth_priv *port_private;
802 unsigned int port_num;
803 PKT_INFO pkt_info;
804 struct net_device_stats *stats;
805
806
807 ethernet_private = (ETH_PORT_INFO *) dev->priv;
808 port_private =
809 (struct mv64460_eth_priv *) ethernet_private->port_private;
810 port_num = port_private->port_num;
811 stats = port_private->stats;
812
813 while ((eth_port_receive (ethernet_private, ETH_Q0, &pkt_info) ==
814 ETH_OK)) {
815
816#ifdef DEBUG_MV_ETH
817 if (pkt_info.byte_cnt != 0) {
818 printf ("%s: Received %d byte Packet @ 0x%x\n",
819 __FUNCTION__, pkt_info.byte_cnt,
820 pkt_info.buf_ptr);
821 }
822#endif
823
824 stats->rx_packets++;
825 stats->rx_bytes += pkt_info.byte_cnt;
826
827
828
829
830
831 if (((pkt_info.
832 cmd_sts & (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) !=
833 (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC))
834 || (pkt_info.cmd_sts & ETH_ERROR_SUMMARY)) {
835 stats->rx_dropped++;
836
837 printf ("Received packet spread on multiple descriptors\n");
838
839
840 if (pkt_info.cmd_sts & ETH_ERROR_SUMMARY) {
841 stats->rx_errors++;
842 }
843
844
845 pkt_info.buf_ptr &= ~0x7;
846 pkt_info.byte_cnt = 0x0000;
847
848 if (eth_rx_return_buff
849 (ethernet_private, ETH_Q0, &pkt_info) != ETH_OK) {
850 printf ("Error while returning the RX Desc to Ring\n");
851 } else {
852 DP (printf ("RX Desc returned to Ring\n"));
853 }
854
855 } else {
856
857
858#ifdef DEBUG_MV_ETH
859 printf ("\nNow send it to upper layer protocols (NetReceive) ...\n");
860#endif
861
862 NetReceive ((uchar *) pkt_info.buf_ptr,
863 (int) pkt_info.byte_cnt);
864
865
866
867 pkt_info.buf_ptr &= ~0x7;
868 pkt_info.byte_cnt = 0x0000;
869 DP (printf
870 ("RX: pkt_info.buf_ptr = %x\n",
871 pkt_info.buf_ptr));
872 if (eth_rx_return_buff
873 (ethernet_private, ETH_Q0, &pkt_info) != ETH_OK) {
874 printf ("Error while returning the RX Desc to Ring\n");
875 } else {
876 DP (printf ("RX Desc returned to Ring\n"));
877 }
878
879
880
881 }
882 }
883 mv64460_eth_get_stats (dev);
884 return 1;
885}
886
887
888
889
890
891
892
893
894
895
896
897static struct net_device_stats *mv64460_eth_get_stats (struct eth_device *dev)
898{
899 ETH_PORT_INFO *ethernet_private;
900 struct mv64460_eth_priv *port_private;
901 unsigned int port_num;
902
903 ethernet_private = (ETH_PORT_INFO *) dev->priv;
904 port_private =
905 (struct mv64460_eth_priv *) ethernet_private->port_private;
906 port_num = port_private->port_num;
907
908 mv64460_eth_update_stat (dev);
909
910 return port_private->stats;
911}
912
913
914
915
916
917
918
919
920
921
922
923static void mv64460_eth_update_stat (struct eth_device *dev)
924{
925 ETH_PORT_INFO *ethernet_private;
926 struct mv64460_eth_priv *port_private;
927 struct net_device_stats *stats;
928 unsigned int port_num;
929 volatile unsigned int dummy;
930
931 ethernet_private = (ETH_PORT_INFO *) dev->priv;
932 port_private =
933 (struct mv64460_eth_priv *) ethernet_private->port_private;
934 port_num = port_private->port_num;
935 stats = port_private->stats;
936
937
938 stats->rx_packets += (unsigned long)
939 eth_read_mib_counter (ethernet_private->port_num,
940 ETH_MIB_GOOD_FRAMES_RECEIVED);
941 stats->tx_packets += (unsigned long)
942 eth_read_mib_counter (ethernet_private->port_num,
943 ETH_MIB_GOOD_FRAMES_SENT);
944 stats->rx_bytes += (unsigned long)
945 eth_read_mib_counter (ethernet_private->port_num,
946 ETH_MIB_GOOD_OCTETS_RECEIVED_LOW);
947
948
949
950
951
952
953
954
955
956
957 dummy = eth_read_mib_counter (ethernet_private->port_num,
958 ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH);
959 stats->tx_bytes += (unsigned long)
960 eth_read_mib_counter (ethernet_private->port_num,
961 ETH_MIB_GOOD_OCTETS_SENT_LOW);
962 dummy = eth_read_mib_counter (ethernet_private->port_num,
963 ETH_MIB_GOOD_OCTETS_SENT_HIGH);
964 stats->rx_errors += (unsigned long)
965 eth_read_mib_counter (ethernet_private->port_num,
966 ETH_MIB_MAC_RECEIVE_ERROR);
967
968
969 stats->rx_dropped +=
970 (unsigned long) eth_read_mib_counter (ethernet_private->
971 port_num,
972 ETH_MIB_BAD_CRC_EVENT);
973 stats->multicast += (unsigned long)
974 eth_read_mib_counter (ethernet_private->port_num,
975 ETH_MIB_MULTICAST_FRAMES_RECEIVED);
976 stats->collisions +=
977 (unsigned long) eth_read_mib_counter (ethernet_private->
978 port_num,
979 ETH_MIB_COLLISION) +
980 (unsigned long) eth_read_mib_counter (ethernet_private->
981 port_num,
982 ETH_MIB_LATE_COLLISION);
983
984 stats->rx_length_errors +=
985 (unsigned long) eth_read_mib_counter (ethernet_private->
986 port_num,
987 ETH_MIB_UNDERSIZE_RECEIVED)
988 +
989 (unsigned long) eth_read_mib_counter (ethernet_private->
990 port_num,
991 ETH_MIB_OVERSIZE_RECEIVED);
992
993}
994
995#ifndef UPDATE_STATS_BY_SOFTWARE
996
997
998
999
1000
1001
1002
1003
1004
1005static void mv64460_eth_print_stat (struct eth_device *dev)
1006{
1007 ETH_PORT_INFO *ethernet_private;
1008 struct mv64460_eth_priv *port_private;
1009 struct net_device_stats *stats;
1010 unsigned int port_num;
1011
1012 ethernet_private = (ETH_PORT_INFO *) dev->priv;
1013 port_private =
1014 (struct mv64460_eth_priv *) ethernet_private->port_private;
1015 port_num = port_private->port_num;
1016 stats = port_private->stats;
1017
1018
1019 printf ("\n### Network statistics: ###\n");
1020 printf ("--------------------------\n");
1021 printf (" Packets received: %ld\n", stats->rx_packets);
1022 printf (" Packets send: %ld\n", stats->tx_packets);
1023 printf (" Received bytes: %ld\n", stats->rx_bytes);
1024 printf (" Send bytes: %ld\n", stats->tx_bytes);
1025 if (stats->rx_errors != 0)
1026 printf (" Rx Errors: %ld\n",
1027 stats->rx_errors);
1028 if (stats->rx_dropped != 0)
1029 printf (" Rx dropped (CRC Errors): %ld\n",
1030 stats->rx_dropped);
1031 if (stats->multicast != 0)
1032 printf (" Rx mulicast frames: %ld\n",
1033 stats->multicast);
1034 if (stats->collisions != 0)
1035 printf (" No. of collisions: %ld\n",
1036 stats->collisions);
1037 if (stats->rx_length_errors != 0)
1038 printf (" Rx length errors: %ld\n",
1039 stats->rx_length_errors);
1040}
1041#endif
1042
1043
1044
1045
1046
1047
1048
1049bool db64460_eth_start (struct eth_device *dev)
1050{
1051 return (mv64460_eth_open (dev));
1052}
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246#define ETH_ENABLE_TX_QUEUE(tx_queue, eth_port) \
1247 MV_REG_WRITE(MV64460_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port), (1 << tx_queue))
1248
1249#define ETH_DISABLE_TX_QUEUE(tx_queue, eth_port) \
1250 MV_REG_WRITE(MV64460_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port),\
1251 (1 << (8 + tx_queue)))
1252
1253#define ETH_ENABLE_RX_QUEUE(rx_queue, eth_port) \
1254MV_REG_WRITE(MV64460_ETH_RECEIVE_QUEUE_COMMAND_REG(eth_port), (1 << rx_queue))
1255
1256#define ETH_DISABLE_RX_QUEUE(rx_queue, eth_port) \
1257MV_REG_WRITE(MV64460_ETH_RECEIVE_QUEUE_COMMAND_REG(eth_port), (1 << (8 + rx_queue)))
1258
1259#define CURR_RFD_GET(p_curr_desc, queue) \
1260 ((p_curr_desc) = p_eth_port_ctrl->p_rx_curr_desc_q[queue])
1261
1262#define CURR_RFD_SET(p_curr_desc, queue) \
1263 (p_eth_port_ctrl->p_rx_curr_desc_q[queue] = (p_curr_desc))
1264
1265#define USED_RFD_GET(p_used_desc, queue) \
1266 ((p_used_desc) = p_eth_port_ctrl->p_rx_used_desc_q[queue])
1267
1268#define USED_RFD_SET(p_used_desc, queue)\
1269(p_eth_port_ctrl->p_rx_used_desc_q[queue] = (p_used_desc))
1270
1271
1272#define CURR_TFD_GET(p_curr_desc, queue) \
1273 ((p_curr_desc) = p_eth_port_ctrl->p_tx_curr_desc_q[queue])
1274
1275#define CURR_TFD_SET(p_curr_desc, queue) \
1276 (p_eth_port_ctrl->p_tx_curr_desc_q[queue] = (p_curr_desc))
1277
1278#define USED_TFD_GET(p_used_desc, queue) \
1279 ((p_used_desc) = p_eth_port_ctrl->p_tx_used_desc_q[queue])
1280
1281#define USED_TFD_SET(p_used_desc, queue) \
1282 (p_eth_port_ctrl->p_tx_used_desc_q[queue] = (p_used_desc))
1283
1284#define FIRST_TFD_GET(p_first_desc, queue) \
1285 ((p_first_desc) = p_eth_port_ctrl->p_tx_first_desc_q[queue])
1286
1287#define FIRST_TFD_SET(p_first_desc, queue) \
1288 (p_eth_port_ctrl->p_tx_first_desc_q[queue] = (p_first_desc))
1289
1290
1291
1292#define RX_NEXT_DESC_PTR(p_rx_desc, queue) (ETH_RX_DESC*)(((((unsigned int)p_rx_desc - (unsigned int)p_eth_port_ctrl->p_rx_desc_area_base[queue]) + RX_DESC_ALIGNED_SIZE) % p_eth_port_ctrl->rx_desc_area_size[queue]) + (unsigned int)p_eth_port_ctrl->p_rx_desc_area_base[queue])
1293
1294#define TX_NEXT_DESC_PTR(p_tx_desc, queue) (ETH_TX_DESC*)(((((unsigned int)p_tx_desc - (unsigned int)p_eth_port_ctrl->p_tx_desc_area_base[queue]) + TX_DESC_ALIGNED_SIZE) % p_eth_port_ctrl->tx_desc_area_size[queue]) + (unsigned int)p_eth_port_ctrl->p_tx_desc_area_base[queue])
1295
1296#define LINK_UP_TIMEOUT 100000
1297#define PHY_BUSY_TIMEOUT 10000000
1298
1299
1300
1301
1302static void ethernet_phy_set (ETH_PORT eth_port_num, int phy_addr);
1303static int ethernet_phy_get (ETH_PORT eth_port_num);
1304
1305
1306static void eth_set_access_control (ETH_PORT eth_port_num,
1307 ETH_WIN_PARAM * param);
1308static bool eth_port_uc_addr (ETH_PORT eth_port_num, unsigned char uc_nibble,
1309 ETH_QUEUE queue, int option);
1310#if 0
1311static bool eth_port_smc_addr (ETH_PORT eth_port_num,
1312 unsigned char mc_byte,
1313 ETH_QUEUE queue, int option);
1314static bool eth_port_omc_addr (ETH_PORT eth_port_num,
1315 unsigned char crc8,
1316 ETH_QUEUE queue, int option);
1317#endif
1318
1319static void eth_b_copy (unsigned int src_addr, unsigned int dst_addr,
1320 int byte_count);
1321
1322void eth_dbg (ETH_PORT_INFO * p_eth_port_ctrl);
1323
1324
1325typedef enum _memory_bank { BANK0, BANK1, BANK2, BANK3 } MEMORY_BANK;
1326u32 mv_get_dram_bank_base_addr (MEMORY_BANK bank)
1327{
1328 u32 result = 0;
1329 u32 enable = MV_REG_READ (MV64460_BASE_ADDR_ENABLE);
1330
1331 if (enable & (1 << bank))
1332 return 0;
1333 if (bank == BANK0)
1334 result = MV_REG_READ (MV64460_CS_0_BASE_ADDR);
1335 if (bank == BANK1)
1336 result = MV_REG_READ (MV64460_CS_1_BASE_ADDR);
1337 if (bank == BANK2)
1338 result = MV_REG_READ (MV64460_CS_2_BASE_ADDR);
1339 if (bank == BANK3)
1340 result = MV_REG_READ (MV64460_CS_3_BASE_ADDR);
1341 result &= 0x0000ffff;
1342 result = result << 16;
1343 return result;
1344}
1345
1346u32 mv_get_dram_bank_size (MEMORY_BANK bank)
1347{
1348 u32 result = 0;
1349 u32 enable = MV_REG_READ (MV64460_BASE_ADDR_ENABLE);
1350
1351 if (enable & (1 << bank))
1352 return 0;
1353 if (bank == BANK0)
1354 result = MV_REG_READ (MV64460_CS_0_SIZE);
1355 if (bank == BANK1)
1356 result = MV_REG_READ (MV64460_CS_1_SIZE);
1357 if (bank == BANK2)
1358 result = MV_REG_READ (MV64460_CS_2_SIZE);
1359 if (bank == BANK3)
1360 result = MV_REG_READ (MV64460_CS_3_SIZE);
1361 result += 1;
1362 result &= 0x0000ffff;
1363 result = result << 16;
1364 return result;
1365}
1366
1367u32 mv_get_internal_sram_base (void)
1368{
1369 u32 result;
1370
1371 result = MV_REG_READ (MV64460_INTEGRATED_SRAM_BASE_ADDR);
1372 result &= 0x0000ffff;
1373 result = result << 16;
1374 return result;
1375}
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402static void eth_port_init (ETH_PORT_INFO * p_eth_port_ctrl)
1403{
1404 int queue;
1405 ETH_WIN_PARAM win_param;
1406
1407 p_eth_port_ctrl->port_config = PORT_CONFIG_VALUE;
1408 p_eth_port_ctrl->port_config_extend = PORT_CONFIG_EXTEND_VALUE;
1409 p_eth_port_ctrl->port_sdma_config = PORT_SDMA_CONFIG_VALUE;
1410 p_eth_port_ctrl->port_serial_control = PORT_SERIAL_CONTROL_VALUE;
1411
1412 p_eth_port_ctrl->port_rx_queue_command = 0;
1413 p_eth_port_ctrl->port_tx_queue_command = 0;
1414
1415
1416 for (queue = 0; queue < MAX_RX_QUEUE_NUM; queue++) {
1417 CURR_RFD_SET ((ETH_RX_DESC *) 0x00000000, queue);
1418 USED_RFD_SET ((ETH_RX_DESC *) 0x00000000, queue);
1419 p_eth_port_ctrl->rx_resource_err[queue] = false;
1420 }
1421
1422 for (queue = 0; queue < MAX_TX_QUEUE_NUM; queue++) {
1423 CURR_TFD_SET ((ETH_TX_DESC *) 0x00000000, queue);
1424 USED_TFD_SET ((ETH_TX_DESC *) 0x00000000, queue);
1425 FIRST_TFD_SET ((ETH_TX_DESC *) 0x00000000, queue);
1426 p_eth_port_ctrl->tx_resource_err[queue] = false;
1427 }
1428
1429 eth_port_reset (p_eth_port_ctrl->port_num);
1430
1431
1432 win_param.win = ETH_WIN0;
1433 win_param.target = ETH_TARGET_DRAM;
1434 win_param.attributes = EBAR_ATTR_DRAM_CS0;
1435#ifndef CONFIG_NOT_COHERENT_CACHE
1436 win_param.attributes |= EBAR_ATTR_DRAM_CACHE_COHERENCY_WB;
1437#endif
1438 win_param.high_addr = 0;
1439
1440 win_param.base_addr = mv_get_dram_bank_base_addr (BANK0);
1441 win_param.size = mv_get_dram_bank_size (BANK0);
1442 if (win_param.size == 0)
1443 win_param.enable = 0;
1444 else
1445 win_param.enable = 1;
1446 win_param.access_ctrl = EWIN_ACCESS_FULL;
1447
1448
1449 eth_set_access_control (p_eth_port_ctrl->port_num, &win_param);
1450
1451
1452 win_param.win = ETH_WIN1;
1453 win_param.target = ETH_TARGET_DRAM;
1454 win_param.attributes = EBAR_ATTR_DRAM_CS1;
1455#ifndef CONFIG_NOT_COHERENT_CACHE
1456 win_param.attributes |= EBAR_ATTR_DRAM_CACHE_COHERENCY_WB;
1457#endif
1458 win_param.high_addr = 0;
1459
1460 win_param.base_addr = mv_get_dram_bank_base_addr (BANK1);
1461 win_param.size = mv_get_dram_bank_size (BANK1);
1462 if (win_param.size == 0)
1463 win_param.enable = 0;
1464 else
1465 win_param.enable = 1;
1466 win_param.access_ctrl = EWIN_ACCESS_FULL;
1467
1468
1469 eth_set_access_control (p_eth_port_ctrl->port_num, &win_param);
1470
1471
1472 win_param.win = ETH_WIN2;
1473 win_param.target = ETH_TARGET_DRAM;
1474 win_param.attributes = EBAR_ATTR_DRAM_CS2;
1475#ifndef CONFIG_NOT_COHERENT_CACHE
1476 win_param.attributes |= EBAR_ATTR_DRAM_CACHE_COHERENCY_WB;
1477#endif
1478 win_param.high_addr = 0;
1479
1480 win_param.base_addr = mv_get_dram_bank_base_addr (BANK2);
1481 win_param.size = mv_get_dram_bank_size (BANK2);
1482 if (win_param.size == 0)
1483 win_param.enable = 0;
1484 else
1485 win_param.enable = 1;
1486 win_param.access_ctrl = EWIN_ACCESS_FULL;
1487
1488
1489 eth_set_access_control (p_eth_port_ctrl->port_num, &win_param);
1490
1491
1492 win_param.win = ETH_WIN3;
1493 win_param.target = ETH_TARGET_DRAM;
1494 win_param.attributes = EBAR_ATTR_DRAM_CS3;
1495#ifndef CONFIG_NOT_COHERENT_CACHE
1496 win_param.attributes |= EBAR_ATTR_DRAM_CACHE_COHERENCY_WB;
1497#endif
1498 win_param.high_addr = 0;
1499
1500 win_param.base_addr = mv_get_dram_bank_base_addr (BANK3);
1501 win_param.size = mv_get_dram_bank_size (BANK3);
1502 if (win_param.size == 0)
1503 win_param.enable = 0;
1504 else
1505 win_param.enable = 1;
1506 win_param.access_ctrl = EWIN_ACCESS_FULL;
1507
1508
1509 eth_set_access_control (p_eth_port_ctrl->port_num, &win_param);
1510
1511
1512 win_param.win = ETH_WIN4;
1513 win_param.target = EBAR_TARGET_CBS;
1514 win_param.attributes = EBAR_ATTR_CBS_SRAM | EBAR_ATTR_CBS_SRAM_BLOCK0;
1515 win_param.high_addr = 0;
1516 win_param.base_addr = mv_get_internal_sram_base ();
1517 win_param.size = MV64460_INTERNAL_SRAM_SIZE;
1518 win_param.enable = 1;
1519 win_param.access_ctrl = EWIN_ACCESS_FULL;
1520
1521
1522 eth_set_access_control (p_eth_port_ctrl->port_num, &win_param);
1523
1524 eth_port_init_mac_tables (p_eth_port_ctrl->port_num);
1525
1526 ethernet_phy_set (p_eth_port_ctrl->port_num,
1527 p_eth_port_ctrl->port_phy_addr);
1528
1529 return;
1530
1531}
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563static bool eth_port_start (ETH_PORT_INFO * p_eth_port_ctrl)
1564{
1565 int queue;
1566 volatile ETH_TX_DESC *p_tx_curr_desc;
1567 volatile ETH_RX_DESC *p_rx_curr_desc;
1568 unsigned int phy_reg_data;
1569 ETH_PORT eth_port_num = p_eth_port_ctrl->port_num;
1570
1571
1572
1573 for (queue = 0; queue < MAX_TX_QUEUE_NUM; queue++) {
1574 CURR_TFD_GET (p_tx_curr_desc, queue);
1575 MV_REG_WRITE ((MV64460_ETH_TX_CURRENT_QUEUE_DESC_PTR_0
1576 (eth_port_num)
1577 + (4 * queue)),
1578 ((unsigned int) p_tx_curr_desc));
1579
1580 }
1581
1582
1583 for (queue = 0; queue < MAX_RX_QUEUE_NUM; queue++) {
1584 CURR_RFD_GET (p_rx_curr_desc, queue);
1585 MV_REG_WRITE ((MV64460_ETH_RX_CURRENT_QUEUE_DESC_PTR_0
1586 (eth_port_num)
1587 + (4 * queue)),
1588 ((unsigned int) p_rx_curr_desc));
1589
1590 if (p_rx_curr_desc != NULL)
1591
1592 eth_port_uc_addr_set (p_eth_port_ctrl->port_num,
1593 p_eth_port_ctrl->port_mac_addr,
1594 queue);
1595 }
1596
1597
1598 MV_REG_WRITE (MV64460_ETH_PORT_CONFIG_REG (eth_port_num),
1599 p_eth_port_ctrl->port_config);
1600
1601 MV_REG_WRITE (MV64460_ETH_PORT_CONFIG_EXTEND_REG (eth_port_num),
1602 p_eth_port_ctrl->port_config_extend);
1603
1604 MV_REG_WRITE (MV64460_ETH_PORT_SERIAL_CONTROL_REG (eth_port_num),
1605 p_eth_port_ctrl->port_serial_control);
1606
1607 MV_SET_REG_BITS (MV64460_ETH_PORT_SERIAL_CONTROL_REG (eth_port_num),
1608 ETH_SERIAL_PORT_ENABLE);
1609
1610
1611 MV_REG_WRITE (MV64460_ETH_SDMA_CONFIG_REG (eth_port_num),
1612 p_eth_port_ctrl->port_sdma_config);
1613
1614 MV_REG_WRITE (MV64460_ETH_TX_QUEUE_0_TOKEN_BUCKET_COUNT
1615 (eth_port_num), 0x3fffffff);
1616 MV_REG_WRITE (MV64460_ETH_TX_QUEUE_0_TOKEN_BUCKET_CONFIG
1617 (eth_port_num), 0x03fffcff);
1618
1619 MV_REG_WRITE (MV64460_ETH_MAXIMUM_TRANSMIT_UNIT (eth_port_num), 0x0);
1620
1621
1622 MV_REG_WRITE (MV64460_ETH_RECEIVE_QUEUE_COMMAND_REG (eth_port_num),
1623 p_eth_port_ctrl->port_rx_queue_command);
1624
1625
1626 eth_port_read_smi_reg (eth_port_num, 1, &phy_reg_data);
1627
1628 if (!(phy_reg_data & 0x20))
1629 return false;
1630
1631 return true;
1632}
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653static void eth_port_uc_addr_set (ETH_PORT eth_port_num,
1654 unsigned char *p_addr, ETH_QUEUE queue)
1655{
1656 unsigned int mac_h;
1657 unsigned int mac_l;
1658
1659 mac_l = (p_addr[4] << 8) | (p_addr[5]);
1660 mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) |
1661 (p_addr[2] << 8) | (p_addr[3] << 0);
1662
1663 MV_REG_WRITE (MV64460_ETH_MAC_ADDR_LOW (eth_port_num), mac_l);
1664 MV_REG_WRITE (MV64460_ETH_MAC_ADDR_HIGH (eth_port_num), mac_h);
1665
1666
1667 eth_port_uc_addr (eth_port_num, p_addr[5], queue, ACCEPT_MAC_ADDR);
1668
1669 return;
1670}
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695static bool eth_port_uc_addr (ETH_PORT eth_port_num,
1696 unsigned char uc_nibble,
1697 ETH_QUEUE queue, int option)
1698{
1699 unsigned int unicast_reg;
1700 unsigned int tbl_offset;
1701 unsigned int reg_offset;
1702
1703
1704 uc_nibble = (0xf & uc_nibble);
1705 tbl_offset = (uc_nibble / 4) * 4;
1706 reg_offset = uc_nibble % 4;
1707
1708 switch (option) {
1709 case REJECT_MAC_ADDR:
1710
1711 unicast_reg =
1712 MV_REG_READ ((MV64460_ETH_DA_FILTER_UNICAST_TABLE_BASE
1713 (eth_port_num)
1714 + tbl_offset));
1715
1716 unicast_reg &= (0x0E << (8 * reg_offset));
1717
1718 MV_REG_WRITE ((MV64460_ETH_DA_FILTER_UNICAST_TABLE_BASE
1719 (eth_port_num)
1720 + tbl_offset), unicast_reg);
1721 break;
1722
1723 case ACCEPT_MAC_ADDR:
1724
1725 unicast_reg =
1726 MV_REG_READ ((MV64460_ETH_DA_FILTER_UNICAST_TABLE_BASE
1727 (eth_port_num)
1728 + tbl_offset));
1729
1730 unicast_reg |= ((0x01 | queue) << (8 * reg_offset));
1731
1732 MV_REG_WRITE ((MV64460_ETH_DA_FILTER_UNICAST_TABLE_BASE
1733 (eth_port_num)
1734 + tbl_offset), unicast_reg);
1735
1736 break;
1737
1738 default:
1739 return false;
1740 }
1741 return true;
1742}
1743
1744#if 0
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776static void eth_port_mc_addr (ETH_PORT eth_port_num,
1777 unsigned char *p_addr,
1778 ETH_QUEUE queue, int option)
1779{
1780 unsigned int mac_h;
1781 unsigned int mac_l;
1782 unsigned char crc_result = 0;
1783 int mac_array[48];
1784 int crc[8];
1785 int i;
1786
1787
1788 if ((p_addr[0] == 0x01) &&
1789 (p_addr[1] == 0x00) &&
1790 (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00))
1791
1792 eth_port_smc_addr (eth_port_num, p_addr[5], queue, option);
1793 else {
1794
1795 mac_h = (p_addr[0] << 8) | (p_addr[1]);
1796 mac_l = (p_addr[2] << 24) | (p_addr[3] << 16) |
1797 (p_addr[4] << 8) | (p_addr[5] << 0);
1798
1799 for (i = 0; i < 32; i++)
1800 mac_array[i] = (mac_l >> i) & 0x1;
1801 for (i = 32; i < 48; i++)
1802 mac_array[i] = (mac_h >> (i - 32)) & 0x1;
1803
1804
1805 crc[0] = mac_array[45] ^ mac_array[43] ^ mac_array[40] ^
1806 mac_array[39] ^ mac_array[35] ^ mac_array[34] ^
1807 mac_array[31] ^ mac_array[30] ^ mac_array[28] ^
1808 mac_array[23] ^ mac_array[21] ^ mac_array[19] ^
1809 mac_array[18] ^ mac_array[16] ^ mac_array[14] ^
1810 mac_array[12] ^ mac_array[8] ^ mac_array[7] ^
1811 mac_array[6] ^ mac_array[0];
1812
1813 crc[1] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^
1814 mac_array[43] ^ mac_array[41] ^ mac_array[39] ^
1815 mac_array[36] ^ mac_array[34] ^ mac_array[32] ^
1816 mac_array[30] ^ mac_array[29] ^ mac_array[28] ^
1817 mac_array[24] ^ mac_array[23] ^ mac_array[22] ^
1818 mac_array[21] ^ mac_array[20] ^ mac_array[18] ^
1819 mac_array[17] ^ mac_array[16] ^ mac_array[15] ^
1820 mac_array[14] ^ mac_array[13] ^ mac_array[12] ^
1821 mac_array[9] ^ mac_array[6] ^ mac_array[1] ^
1822 mac_array[0];
1823
1824 crc[2] = mac_array[47] ^ mac_array[46] ^ mac_array[44] ^
1825 mac_array[43] ^ mac_array[42] ^ mac_array[39] ^
1826 mac_array[37] ^ mac_array[34] ^ mac_array[33] ^
1827 mac_array[29] ^ mac_array[28] ^ mac_array[25] ^
1828 mac_array[24] ^ mac_array[22] ^ mac_array[17] ^
1829 mac_array[15] ^ mac_array[13] ^ mac_array[12] ^
1830 mac_array[10] ^ mac_array[8] ^ mac_array[6] ^
1831 mac_array[2] ^ mac_array[1] ^ mac_array[0];
1832
1833 crc[3] = mac_array[47] ^ mac_array[45] ^ mac_array[44] ^
1834 mac_array[43] ^ mac_array[40] ^ mac_array[38] ^
1835 mac_array[35] ^ mac_array[34] ^ mac_array[30] ^
1836 mac_array[29] ^ mac_array[26] ^ mac_array[25] ^
1837 mac_array[23] ^ mac_array[18] ^ mac_array[16] ^
1838 mac_array[14] ^ mac_array[13] ^ mac_array[11] ^
1839 mac_array[9] ^ mac_array[7] ^ mac_array[3] ^
1840 mac_array[2] ^ mac_array[1];
1841
1842 crc[4] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^
1843 mac_array[41] ^ mac_array[39] ^ mac_array[36] ^
1844 mac_array[35] ^ mac_array[31] ^ mac_array[30] ^
1845 mac_array[27] ^ mac_array[26] ^ mac_array[24] ^
1846 mac_array[19] ^ mac_array[17] ^ mac_array[15] ^
1847 mac_array[14] ^ mac_array[12] ^ mac_array[10] ^
1848 mac_array[8] ^ mac_array[4] ^ mac_array[3] ^
1849 mac_array[2];
1850
1851 crc[5] = mac_array[47] ^ mac_array[46] ^ mac_array[45] ^
1852 mac_array[42] ^ mac_array[40] ^ mac_array[37] ^
1853 mac_array[36] ^ mac_array[32] ^ mac_array[31] ^
1854 mac_array[28] ^ mac_array[27] ^ mac_array[25] ^
1855 mac_array[20] ^ mac_array[18] ^ mac_array[16] ^
1856 mac_array[15] ^ mac_array[13] ^ mac_array[11] ^
1857 mac_array[9] ^ mac_array[5] ^ mac_array[4] ^
1858 mac_array[3];
1859
1860 crc[6] = mac_array[47] ^ mac_array[46] ^ mac_array[43] ^
1861 mac_array[41] ^ mac_array[38] ^ mac_array[37] ^
1862 mac_array[33] ^ mac_array[32] ^ mac_array[29] ^
1863 mac_array[28] ^ mac_array[26] ^ mac_array[21] ^
1864 mac_array[19] ^ mac_array[17] ^ mac_array[16] ^
1865 mac_array[14] ^ mac_array[12] ^ mac_array[10] ^
1866 mac_array[6] ^ mac_array[5] ^ mac_array[4];
1867
1868 crc[7] = mac_array[47] ^ mac_array[44] ^ mac_array[42] ^
1869 mac_array[39] ^ mac_array[38] ^ mac_array[34] ^
1870 mac_array[33] ^ mac_array[30] ^ mac_array[29] ^
1871 mac_array[27] ^ mac_array[22] ^ mac_array[20] ^
1872 mac_array[18] ^ mac_array[17] ^ mac_array[15] ^
1873 mac_array[13] ^ mac_array[11] ^ mac_array[7] ^
1874 mac_array[6] ^ mac_array[5];
1875
1876 for (i = 0; i < 8; i++)
1877 crc_result = crc_result | (crc[i] << i);
1878
1879 eth_port_omc_addr (eth_port_num, crc_result, queue, option);
1880 }
1881 return;
1882}
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910static bool eth_port_smc_addr (ETH_PORT eth_port_num,
1911 unsigned char mc_byte,
1912 ETH_QUEUE queue, int option)
1913{
1914 unsigned int smc_table_reg;
1915 unsigned int tbl_offset;
1916 unsigned int reg_offset;
1917
1918
1919 tbl_offset = (mc_byte / 4) * 4;
1920 reg_offset = mc_byte % 4;
1921 queue &= 0x7;
1922
1923 switch (option) {
1924 case REJECT_MAC_ADDR:
1925
1926 smc_table_reg =
1927 MV_REG_READ ((MV64460_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset));
1928 smc_table_reg &= (0x0E << (8 * reg_offset));
1929
1930 MV_REG_WRITE ((MV64460_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset), smc_table_reg);
1931 break;
1932
1933 case ACCEPT_MAC_ADDR:
1934
1935 smc_table_reg =
1936 MV_REG_READ ((MV64460_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset));
1937 smc_table_reg |= ((0x01 | queue) << (8 * reg_offset));
1938
1939 MV_REG_WRITE ((MV64460_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset), smc_table_reg);
1940 break;
1941
1942 default:
1943 return false;
1944 }
1945 return true;
1946}
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974static bool eth_port_omc_addr (ETH_PORT eth_port_num,
1975 unsigned char crc8,
1976 ETH_QUEUE queue, int option)
1977{
1978 unsigned int omc_table_reg;
1979 unsigned int tbl_offset;
1980 unsigned int reg_offset;
1981
1982
1983 tbl_offset = (crc8 / 4) * 4;
1984 reg_offset = crc8 % 4;
1985 queue &= 0x7;
1986
1987 switch (option) {
1988 case REJECT_MAC_ADDR:
1989
1990 omc_table_reg =
1991 MV_REG_READ ((MV64460_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset));
1992 omc_table_reg &= (0x0E << (8 * reg_offset));
1993
1994 MV_REG_WRITE ((MV64460_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset), omc_table_reg);
1995 break;
1996
1997 case ACCEPT_MAC_ADDR:
1998
1999 omc_table_reg =
2000 MV_REG_READ ((MV64460_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset));
2001 omc_table_reg |= ((0x01 | queue) << (8 * reg_offset));
2002
2003 MV_REG_WRITE ((MV64460_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset), omc_table_reg);
2004 break;
2005
2006 default:
2007 return false;
2008 }
2009 return true;
2010}
2011#endif
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030static void eth_port_init_mac_tables (ETH_PORT eth_port_num)
2031{
2032 int table_index;
2033
2034
2035 for (table_index = 0; table_index <= 0xC; table_index += 4)
2036 MV_REG_WRITE ((MV64460_ETH_DA_FILTER_UNICAST_TABLE_BASE
2037 (eth_port_num) + table_index), 0);
2038
2039 for (table_index = 0; table_index <= 0xFC; table_index += 4) {
2040
2041 MV_REG_WRITE ((MV64460_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + table_index), 0);
2042
2043 MV_REG_WRITE ((MV64460_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + table_index), 0);
2044 }
2045}
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064static void eth_clear_mib_counters (ETH_PORT eth_port_num)
2065{
2066 int i;
2067 unsigned int dummy;
2068
2069
2070 for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION;
2071 i += 4)
2072 dummy = MV_REG_READ ((MV64460_ETH_MIB_COUNTERS_BASE
2073 (eth_port_num) + i));
2074
2075 return;
2076}
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099unsigned int eth_read_mib_counter (ETH_PORT eth_port_num,
2100 unsigned int mib_offset)
2101{
2102 return (MV_REG_READ (MV64460_ETH_MIB_COUNTERS_BASE (eth_port_num)
2103 + mib_offset));
2104}
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123static void ethernet_phy_set (ETH_PORT eth_port_num, int phy_addr)
2124{
2125 unsigned int reg_data;
2126
2127 reg_data = MV_REG_READ (MV64460_ETH_PHY_ADDR_REG);
2128
2129 reg_data &= ~(0x1F << (5 * eth_port_num));
2130 reg_data |= (phy_addr << (5 * eth_port_num));
2131
2132 MV_REG_WRITE (MV64460_ETH_PHY_ADDR_REG, reg_data);
2133
2134 return;
2135}
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153static int ethernet_phy_get (ETH_PORT eth_port_num)
2154{
2155 unsigned int reg_data;
2156
2157 reg_data = MV_REG_READ (MV64460_ETH_PHY_ADDR_REG);
2158
2159 return ((reg_data >> (5 * eth_port_num)) & 0x1f);
2160}
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179static bool ethernet_phy_reset (ETH_PORT eth_port_num)
2180{
2181 unsigned int time_out = 50;
2182 unsigned int phy_reg_data;
2183
2184
2185 eth_port_read_smi_reg (eth_port_num, 0, &phy_reg_data);
2186 phy_reg_data |= 0x8000;
2187 eth_port_write_smi_reg (eth_port_num, 0, phy_reg_data);
2188
2189
2190 do {
2191 eth_port_read_smi_reg (eth_port_num, 1, &phy_reg_data);
2192
2193 if (time_out-- == 0)
2194 return false;
2195 }
2196 while (!(phy_reg_data & 0x20));
2197
2198 return true;
2199}
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219static void eth_port_reset (ETH_PORT eth_port_num)
2220{
2221 unsigned int reg_data;
2222
2223
2224 reg_data =
2225 MV_REG_READ (MV64460_ETH_TRANSMIT_QUEUE_COMMAND_REG
2226 (eth_port_num));
2227
2228 if (reg_data & 0xFF) {
2229
2230 MV_REG_WRITE (MV64460_ETH_TRANSMIT_QUEUE_COMMAND_REG
2231 (eth_port_num), (reg_data << 8));
2232
2233
2234 do {
2235
2236 reg_data =
2237 MV_REG_READ
2238 (MV64460_ETH_TRANSMIT_QUEUE_COMMAND_REG
2239 (eth_port_num));
2240 }
2241 while (reg_data & 0xFF);
2242 }
2243
2244
2245 reg_data =
2246 MV_REG_READ (MV64460_ETH_RECEIVE_QUEUE_COMMAND_REG
2247 (eth_port_num));
2248
2249 if (reg_data & 0xFF) {
2250
2251 MV_REG_WRITE (MV64460_ETH_RECEIVE_QUEUE_COMMAND_REG
2252 (eth_port_num), (reg_data << 8));
2253
2254
2255 do {
2256
2257 reg_data =
2258 MV_REG_READ
2259 (MV64460_ETH_RECEIVE_QUEUE_COMMAND_REG
2260 (eth_port_num));
2261 }
2262 while (reg_data & 0xFF);
2263 }
2264
2265
2266
2267 eth_clear_mib_counters (eth_port_num);
2268
2269
2270 reg_data =
2271 MV_REG_READ (MV64460_ETH_PORT_SERIAL_CONTROL_REG
2272 (eth_port_num));
2273 reg_data &= ~ETH_SERIAL_PORT_ENABLE;
2274 MV_REG_WRITE (MV64460_ETH_PORT_SERIAL_CONTROL_REG (eth_port_num),
2275 reg_data);
2276
2277 return;
2278}
2279
2280#if 0
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300static void ethernet_set_config_reg (ETH_PORT eth_port_num,
2301 unsigned int value)
2302{
2303 unsigned int eth_config_reg;
2304
2305 eth_config_reg =
2306 MV_REG_READ (MV64460_ETH_PORT_CONFIG_REG (eth_port_num));
2307 eth_config_reg |= value;
2308 MV_REG_WRITE (MV64460_ETH_PORT_CONFIG_REG (eth_port_num),
2309 eth_config_reg);
2310
2311 return;
2312}
2313#endif
2314
2315#if 0
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335static void ethernet_reset_config_reg (ETH_PORT eth_port_num,
2336 unsigned int value)
2337{
2338 unsigned int eth_config_reg;
2339
2340 eth_config_reg = MV_REG_READ (MV64460_ETH_PORT_CONFIG_EXTEND_REG
2341 (eth_port_num));
2342 eth_config_reg &= ~value;
2343 MV_REG_WRITE (MV64460_ETH_PORT_CONFIG_EXTEND_REG (eth_port_num),
2344 eth_config_reg);
2345
2346 return;
2347}
2348#endif
2349
2350#if 0
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368static unsigned int ethernet_get_config_reg (ETH_PORT eth_port_num)
2369{
2370 unsigned int eth_config_reg;
2371
2372 eth_config_reg = MV_REG_READ (MV64460_ETH_PORT_CONFIG_EXTEND_REG
2373 (eth_port_num));
2374 return eth_config_reg;
2375}
2376
2377#endif
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399static bool eth_port_read_smi_reg (ETH_PORT eth_port_num,
2400 unsigned int phy_reg, unsigned int *value)
2401{
2402 unsigned int reg_value;
2403 unsigned int time_out = PHY_BUSY_TIMEOUT;
2404 int phy_addr;
2405
2406 phy_addr = ethernet_phy_get (eth_port_num);
2407
2408
2409
2410 do {
2411 reg_value = MV_REG_READ (MV64460_ETH_SMI_REG);
2412 if (time_out-- == 0) {
2413 return false;
2414 }
2415 }
2416 while (reg_value & ETH_SMI_BUSY);
2417
2418
2419
2420 MV_REG_WRITE (MV64460_ETH_SMI_REG,
2421 (phy_addr << 16) | (phy_reg << 21) |
2422 ETH_SMI_OPCODE_READ);
2423
2424 time_out = PHY_BUSY_TIMEOUT;
2425
2426 do {
2427 reg_value = MV_REG_READ (MV64460_ETH_SMI_REG);
2428 if (time_out-- == 0) {
2429 return false;
2430 }
2431 }
2432 while ((reg_value & ETH_SMI_READ_VALID) != ETH_SMI_READ_VALID);
2433
2434
2435#define PHY_UPDATE_TIMEOUT 10000
2436 for (time_out = 0; time_out < PHY_UPDATE_TIMEOUT; time_out++);
2437
2438 reg_value = MV_REG_READ (MV64460_ETH_SMI_REG);
2439
2440 *value = reg_value & 0xffff;
2441
2442 return true;
2443}
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465static bool eth_port_write_smi_reg (ETH_PORT eth_port_num,
2466 unsigned int phy_reg, unsigned int value)
2467{
2468 unsigned int reg_value;
2469 unsigned int time_out = PHY_BUSY_TIMEOUT;
2470 int phy_addr;
2471
2472 phy_addr = ethernet_phy_get (eth_port_num);
2473
2474
2475 do {
2476 reg_value = MV_REG_READ (MV64460_ETH_SMI_REG);
2477 if (time_out-- == 0) {
2478 return false;
2479 }
2480 }
2481 while (reg_value & ETH_SMI_BUSY);
2482
2483
2484 MV_REG_WRITE (MV64460_ETH_SMI_REG,
2485 (phy_addr << 16) | (phy_reg << 21) |
2486 ETH_SMI_OPCODE_WRITE | (value & 0xffff));
2487 return true;
2488}
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508static void eth_set_access_control (ETH_PORT eth_port_num,
2509 ETH_WIN_PARAM * param)
2510{
2511 unsigned int access_prot_reg;
2512
2513
2514 access_prot_reg = MV_REG_READ (MV64460_ETH_ACCESS_PROTECTION_REG
2515 (eth_port_num));
2516 access_prot_reg &= (~(3 << (param->win * 2)));
2517 access_prot_reg |= (param->access_ctrl << (param->win * 2));
2518 MV_REG_WRITE (MV64460_ETH_ACCESS_PROTECTION_REG (eth_port_num),
2519 access_prot_reg);
2520
2521
2522 MV_REG_WRITE ((MV64460_ETH_SIZE_REG_0 +
2523 (ETH_SIZE_REG_GAP * param->win)),
2524 (((param->size / 0x10000) - 1) << 16));
2525
2526
2527 MV_REG_WRITE ((MV64460_ETH_BAR_0 + (ETH_BAR_GAP * param->win)),
2528 (param->target | param->attributes | param->base_addr));
2529
2530 if (param->win < 4)
2531 MV_REG_WRITE ((MV64460_ETH_HIGH_ADDR_REMAP_REG_0 +
2532 (ETH_HIGH_ADDR_REMAP_REG_GAP * param->win)),
2533 param->high_addr);
2534
2535
2536 if (param->enable == 1)
2537 MV_RESET_REG_BITS (MV64460_ETH_BASE_ADDR_ENABLE_REG,
2538 (1 << param->win));
2539 else
2540 MV_SET_REG_BITS (MV64460_ETH_BASE_ADDR_ENABLE_REG,
2541 (1 << param->win));
2542}
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574static bool ether_init_rx_desc_ring (ETH_PORT_INFO * p_eth_port_ctrl,
2575 ETH_QUEUE rx_queue,
2576 int rx_desc_num,
2577 int rx_buff_size,
2578 unsigned int rx_desc_base_addr,
2579 unsigned int rx_buff_base_addr)
2580{
2581 ETH_RX_DESC *p_rx_desc;
2582 ETH_RX_DESC *p_rx_prev_desc;
2583 unsigned int buffer_addr;
2584 int ix;
2585
2586
2587 p_rx_desc = (ETH_RX_DESC *) rx_desc_base_addr;
2588 p_rx_prev_desc = p_rx_desc;
2589 buffer_addr = rx_buff_base_addr;
2590
2591
2592 if (rx_buff_base_addr & 0xF)
2593 return false;
2594
2595
2596 if ((rx_buff_size < 8) || (rx_buff_size > RX_BUFFER_MAX_SIZE))
2597 return false;
2598
2599
2600 if ((rx_buff_base_addr + rx_buff_size) & 0x7)
2601 return false;
2602
2603
2604 for (ix = 0; ix < rx_desc_num; ix++) {
2605 p_rx_desc->buf_size = rx_buff_size;
2606 p_rx_desc->byte_cnt = 0x0000;
2607 p_rx_desc->cmd_sts =
2608 ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT;
2609 p_rx_desc->next_desc_ptr =
2610 ((unsigned int) p_rx_desc) + RX_DESC_ALIGNED_SIZE;
2611 p_rx_desc->buf_ptr = buffer_addr;
2612 p_rx_desc->return_info = 0x00000000;
2613 D_CACHE_FLUSH_LINE (p_rx_desc, 0);
2614 buffer_addr += rx_buff_size;
2615 p_rx_prev_desc = p_rx_desc;
2616 p_rx_desc = (ETH_RX_DESC *)
2617 ((unsigned int) p_rx_desc + RX_DESC_ALIGNED_SIZE);
2618 }
2619
2620
2621 p_rx_prev_desc->next_desc_ptr = (rx_desc_base_addr);
2622 D_CACHE_FLUSH_LINE (p_rx_prev_desc, 0);
2623
2624
2625 CURR_RFD_SET ((ETH_RX_DESC *) rx_desc_base_addr, rx_queue);
2626 USED_RFD_SET ((ETH_RX_DESC *) rx_desc_base_addr, rx_queue);
2627
2628 p_eth_port_ctrl->p_rx_desc_area_base[rx_queue] =
2629 (ETH_RX_DESC *) rx_desc_base_addr;
2630 p_eth_port_ctrl->rx_desc_area_size[rx_queue] =
2631 rx_desc_num * RX_DESC_ALIGNED_SIZE;
2632
2633 p_eth_port_ctrl->port_rx_queue_command |= (1 << rx_queue);
2634
2635 return true;
2636}
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668static bool ether_init_tx_desc_ring (ETH_PORT_INFO * p_eth_port_ctrl,
2669 ETH_QUEUE tx_queue,
2670 int tx_desc_num,
2671 int tx_buff_size,
2672 unsigned int tx_desc_base_addr,
2673 unsigned int tx_buff_base_addr)
2674{
2675
2676 ETH_TX_DESC *p_tx_desc;
2677 ETH_TX_DESC *p_tx_prev_desc;
2678 unsigned int buffer_addr;
2679 int ix;
2680
2681
2682
2683 p_tx_desc = (ETH_TX_DESC *) tx_desc_base_addr;
2684 p_tx_prev_desc = p_tx_desc;
2685 buffer_addr = tx_buff_base_addr;
2686
2687
2688 if (tx_buff_base_addr & 0xF)
2689 return false;
2690
2691
2692 if ((tx_buff_size > TX_BUFFER_MAX_SIZE)
2693 || (tx_buff_size < TX_BUFFER_MIN_SIZE))
2694 return false;
2695
2696
2697 for (ix = 0; ix < tx_desc_num; ix++) {
2698 p_tx_desc->byte_cnt = 0x0000;
2699 p_tx_desc->l4i_chk = 0x0000;
2700 p_tx_desc->cmd_sts = 0x00000000;
2701 p_tx_desc->next_desc_ptr =
2702 ((unsigned int) p_tx_desc) + TX_DESC_ALIGNED_SIZE;
2703
2704 p_tx_desc->buf_ptr = buffer_addr;
2705 p_tx_desc->return_info = 0x00000000;
2706 D_CACHE_FLUSH_LINE (p_tx_desc, 0);
2707 buffer_addr += tx_buff_size;
2708 p_tx_prev_desc = p_tx_desc;
2709 p_tx_desc = (ETH_TX_DESC *)
2710 ((unsigned int) p_tx_desc + TX_DESC_ALIGNED_SIZE);
2711
2712 }
2713
2714 p_tx_prev_desc->next_desc_ptr = tx_desc_base_addr;
2715 D_CACHE_FLUSH_LINE (p_tx_prev_desc, 0);
2716
2717 CURR_TFD_SET ((ETH_TX_DESC *) tx_desc_base_addr, tx_queue);
2718 USED_TFD_SET ((ETH_TX_DESC *) tx_desc_base_addr, tx_queue);
2719
2720
2721 p_eth_port_ctrl->p_tx_desc_area_base[tx_queue] =
2722 (ETH_TX_DESC *) tx_desc_base_addr;
2723 p_eth_port_ctrl->tx_desc_area_size[tx_queue] =
2724 (tx_desc_num * TX_DESC_ALIGNED_SIZE);
2725
2726
2727 p_eth_port_ctrl->port_tx_queue_command |= (1 << tx_queue);
2728
2729 return true;
2730}
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760static ETH_FUNC_RET_STATUS eth_port_send (ETH_PORT_INFO * p_eth_port_ctrl,
2761 ETH_QUEUE tx_queue,
2762 PKT_INFO * p_pkt_info)
2763{
2764 volatile ETH_TX_DESC *p_tx_desc_first;
2765 volatile ETH_TX_DESC *p_tx_desc_curr;
2766 volatile ETH_TX_DESC *p_tx_next_desc_curr;
2767 volatile ETH_TX_DESC *p_tx_desc_used;
2768 unsigned int command_status;
2769
2770
2771 if (p_eth_port_ctrl->tx_resource_err[tx_queue] == true)
2772 return ETH_QUEUE_FULL;
2773
2774
2775 CURR_TFD_GET (p_tx_desc_curr, tx_queue);
2776 USED_TFD_GET (p_tx_desc_used, tx_queue);
2777
2778 if (p_tx_desc_curr == NULL)
2779 return ETH_ERROR;
2780
2781
2782 p_tx_next_desc_curr = TX_NEXT_DESC_PTR (p_tx_desc_curr, tx_queue);
2783 command_status = p_pkt_info->cmd_sts | ETH_ZERO_PADDING | ETH_GEN_CRC;
2784
2785 if (command_status & (ETH_TX_FIRST_DESC)) {
2786
2787 FIRST_TFD_SET (p_tx_desc_curr, tx_queue);
2788 p_tx_desc_first = p_tx_desc_curr;
2789 } else {
2790 FIRST_TFD_GET (p_tx_desc_first, tx_queue);
2791 command_status |= ETH_BUFFER_OWNED_BY_DMA;
2792 }
2793
2794
2795
2796
2797 if (p_pkt_info->byte_cnt <= 8) {
2798 printf ("You have failed in the < 8 bytes errata - fixme\n");
2799 return ETH_ERROR;
2800
2801 p_tx_desc_curr->buf_ptr =
2802 (unsigned int) p_tx_desc_curr + TX_BUF_OFFSET_IN_DESC;
2803 eth_b_copy (p_pkt_info->buf_ptr, p_tx_desc_curr->buf_ptr,
2804 p_pkt_info->byte_cnt);
2805 } else
2806 p_tx_desc_curr->buf_ptr = p_pkt_info->buf_ptr;
2807
2808 p_tx_desc_curr->byte_cnt = p_pkt_info->byte_cnt;
2809 p_tx_desc_curr->return_info = p_pkt_info->return_info;
2810
2811 if (p_pkt_info->cmd_sts & (ETH_TX_LAST_DESC)) {
2812
2813 p_tx_desc_curr->cmd_sts = command_status |
2814 ETH_BUFFER_OWNED_BY_DMA | ETH_TX_ENABLE_INTERRUPT;
2815
2816 if (p_tx_desc_curr != p_tx_desc_first)
2817 p_tx_desc_first->cmd_sts |= ETH_BUFFER_OWNED_BY_DMA;
2818
2819
2820
2821 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_curr, 0);
2822 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_first, 0);
2823 CPU_PIPE_FLUSH;
2824
2825
2826 ETH_ENABLE_TX_QUEUE (tx_queue, p_eth_port_ctrl->port_num);
2827
2828
2829 p_tx_desc_first = p_tx_next_desc_curr;
2830 FIRST_TFD_SET (p_tx_desc_first, tx_queue);
2831
2832 } else {
2833 p_tx_desc_curr->cmd_sts = command_status;
2834 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_curr, 0);
2835 }
2836
2837
2838 if (p_tx_next_desc_curr == p_tx_desc_used) {
2839
2840 CURR_TFD_SET (p_tx_desc_first, tx_queue);
2841
2842 p_eth_port_ctrl->tx_resource_err[tx_queue] = true;
2843 return ETH_QUEUE_LAST_RESOURCE;
2844 } else {
2845
2846 CURR_TFD_SET (p_tx_next_desc_curr, tx_queue);
2847 return ETH_OK;
2848 }
2849}
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876static ETH_FUNC_RET_STATUS eth_tx_return_desc (ETH_PORT_INFO *
2877 p_eth_port_ctrl,
2878 ETH_QUEUE tx_queue,
2879 PKT_INFO * p_pkt_info)
2880{
2881 volatile ETH_TX_DESC *p_tx_desc_used = NULL;
2882 volatile ETH_TX_DESC *p_tx_desc_first = NULL;
2883 unsigned int command_status;
2884
2885
2886
2887 USED_TFD_GET (p_tx_desc_used, tx_queue);
2888 FIRST_TFD_GET (p_tx_desc_first, tx_queue);
2889
2890
2891
2892 if (p_tx_desc_used == NULL)
2893 return ETH_ERROR;
2894
2895 command_status = p_tx_desc_used->cmd_sts;
2896
2897
2898 if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
2899 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_used, 0);
2900 return ETH_RETRY;
2901 }
2902
2903
2904 if ((p_tx_desc_used == p_tx_desc_first) &&
2905 (p_eth_port_ctrl->tx_resource_err[tx_queue] == false)) {
2906 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_used, 0);
2907 return ETH_END_OF_JOB;
2908 }
2909
2910
2911 p_pkt_info->cmd_sts = command_status;
2912 p_pkt_info->return_info = p_tx_desc_used->return_info;
2913 p_tx_desc_used->return_info = 0;
2914
2915
2916 USED_TFD_SET (TX_NEXT_DESC_PTR (p_tx_desc_used, tx_queue), tx_queue);
2917
2918
2919 if (p_eth_port_ctrl->tx_resource_err[tx_queue] == true)
2920 p_eth_port_ctrl->tx_resource_err[tx_queue] = false;
2921
2922 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_used, 0);
2923
2924 return ETH_OK;
2925
2926}
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953static ETH_FUNC_RET_STATUS eth_port_receive (ETH_PORT_INFO * p_eth_port_ctrl,
2954 ETH_QUEUE rx_queue,
2955 PKT_INFO * p_pkt_info)
2956{
2957 volatile ETH_RX_DESC *p_rx_curr_desc;
2958 volatile ETH_RX_DESC *p_rx_next_curr_desc;
2959 volatile ETH_RX_DESC *p_rx_used_desc;
2960 unsigned int command_status;
2961
2962
2963 if (p_eth_port_ctrl->rx_resource_err[rx_queue] == true) {
2964 printf ("\nRx Queue is full ...\n");
2965 return ETH_QUEUE_FULL;
2966 }
2967
2968
2969 CURR_RFD_GET (p_rx_curr_desc, rx_queue);
2970 USED_RFD_GET (p_rx_used_desc, rx_queue);
2971
2972
2973 if (p_rx_curr_desc == NULL)
2974 return ETH_ERROR;
2975
2976
2977 p_rx_next_curr_desc = RX_NEXT_DESC_PTR (p_rx_curr_desc, rx_queue);
2978 command_status = p_rx_curr_desc->cmd_sts;
2979
2980
2981 if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
2982
2983 D_CACHE_FLUSH_LINE ((unsigned int) p_rx_curr_desc, 0);
2984
2985 return ETH_END_OF_JOB;
2986 }
2987
2988 p_pkt_info->byte_cnt = (p_rx_curr_desc->byte_cnt) - RX_BUF_OFFSET;
2989 p_pkt_info->cmd_sts = command_status;
2990 p_pkt_info->buf_ptr = (p_rx_curr_desc->buf_ptr) + RX_BUF_OFFSET;
2991 p_pkt_info->return_info = p_rx_curr_desc->return_info;
2992 p_pkt_info->l4i_chk = p_rx_curr_desc->buf_size;
2993
2994
2995
2996 p_rx_curr_desc->return_info = 0;
2997
2998
2999 CURR_RFD_SET (p_rx_next_curr_desc, rx_queue);
3000
3001
3002 if (p_rx_next_curr_desc == p_rx_used_desc)
3003 p_eth_port_ctrl->rx_resource_err[rx_queue] = true;
3004
3005 D_CACHE_FLUSH_LINE ((unsigned int) p_rx_curr_desc, 0);
3006 CPU_PIPE_FLUSH;
3007 return ETH_OK;
3008}
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032static ETH_FUNC_RET_STATUS eth_rx_return_buff (ETH_PORT_INFO *
3033 p_eth_port_ctrl,
3034 ETH_QUEUE rx_queue,
3035 PKT_INFO * p_pkt_info)
3036{
3037 volatile ETH_RX_DESC *p_used_rx_desc;
3038
3039
3040 USED_RFD_GET (p_used_rx_desc, rx_queue);
3041
3042
3043 if (p_used_rx_desc == NULL)
3044 return ETH_ERROR;
3045
3046 p_used_rx_desc->buf_ptr = p_pkt_info->buf_ptr;
3047 p_used_rx_desc->return_info = p_pkt_info->return_info;
3048 p_used_rx_desc->byte_cnt = p_pkt_info->byte_cnt;
3049 p_used_rx_desc->buf_size = MV64460_RX_BUFFER_SIZE;
3050
3051
3052 CPU_PIPE_FLUSH;
3053
3054
3055 p_used_rx_desc->cmd_sts =
3056 ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT;
3057
3058
3059 D_CACHE_FLUSH_LINE ((unsigned int) p_used_rx_desc, 0);
3060 CPU_PIPE_FLUSH;
3061
3062
3063 USED_RFD_SET (RX_NEXT_DESC_PTR (p_used_rx_desc, rx_queue), rx_queue);
3064
3065
3066 if (p_eth_port_ctrl->rx_resource_err[rx_queue] == true)
3067 p_eth_port_ctrl->rx_resource_err[rx_queue] = false;
3068
3069 return ETH_OK;
3070}
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095#if 0
3096static unsigned int eth_port_set_rx_coal (ETH_PORT eth_port_num,
3097 unsigned int t_clk,
3098 unsigned int delay)
3099{
3100 unsigned int coal;
3101
3102 coal = ((t_clk / 1000000) * delay) / 64;
3103
3104 MV_REG_WRITE (MV64460_ETH_SDMA_CONFIG_REG (eth_port_num),
3105 ((coal & 0x3fff) << 8) |
3106 (MV_REG_READ
3107 (MV64460_ETH_SDMA_CONFIG_REG (eth_port_num))
3108 & 0xffc000ff));
3109 return coal;
3110}
3111
3112#endif
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136#if 0
3137static unsigned int eth_port_set_tx_coal (ETH_PORT eth_port_num,
3138 unsigned int t_clk,
3139 unsigned int delay)
3140{
3141 unsigned int coal;
3142
3143 coal = ((t_clk / 1000000) * delay) / 64;
3144
3145 MV_REG_WRITE (MV64460_ETH_TX_FIFO_URGENT_THRESHOLD_REG (eth_port_num),
3146 coal << 4);
3147 return coal;
3148}
3149#endif
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171static void eth_b_copy (unsigned int src_addr, unsigned int dst_addr,
3172 int byte_count)
3173{
3174
3175 *(unsigned int *) dst_addr = 0x0;
3176
3177 while (byte_count != 0) {
3178 *(char *) dst_addr = *(char *) src_addr;
3179 dst_addr++;
3180 src_addr++;
3181 byte_count--;
3182 }
3183}
3184