1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include <common.h>
31#include <net.h>
32#include <malloc.h>
33
34#include "mv_eth.h"
35
36
37
38#undef DEBUG_MV_ETH
39
40#ifdef DEBUG_MV_ETH
41#define DEBUG
42#define DP(x) x
43#else
44#define DP(x)
45#endif
46
47#undef MV64360_CHECKSUM_OFFLOAD
48
49
50
51
52
53
54
55
56
57
58#undef MV64360_RX_QUEUE_FILL_ON_TASK
59
60
61
62#define MAGIC_ETH_RUNNING 8031971
63#define MV64360_INTERNAL_SRAM_SIZE _256K
64#define EXTRA_BYTES 32
65#define WRAP ETH_HLEN + 2 + 4 + 16
66#define BUFFER_MTU dev->mtu + WRAP
67#define INT_CAUSE_UNMASK_ALL 0x0007ffff
68#define INT_CAUSE_UNMASK_ALL_EXT 0x0011ffff
69#ifdef MV64360_RX_FILL_ON_TASK
70#define INT_CAUSE_MASK_ALL 0x00000000
71#define INT_CAUSE_CHECK_BITS INT_CAUSE_UNMASK_ALL
72#define INT_CAUSE_CHECK_BITS_EXT INT_CAUSE_UNMASK_ALL_EXT
73#endif
74
75
76#define MV_REG_READ(offset) my_le32_to_cpu(* (volatile unsigned int *) (INTERNAL_REG_BASE_ADDR + offset))
77#define MV_REG_WRITE(offset,data) *(volatile unsigned int *) (INTERNAL_REG_BASE_ADDR + offset) = my_cpu_to_le32 (data)
78#define MV_SET_REG_BITS(regOffset,bits) ((*((volatile unsigned int*)((INTERNAL_REG_BASE_ADDR) + (regOffset)))) |= ((unsigned int)my_cpu_to_le32(bits)))
79#define MV_RESET_REG_BITS(regOffset,bits) ((*((volatile unsigned int*)((INTERNAL_REG_BASE_ADDR) + (regOffset)))) &= ~((unsigned int)my_cpu_to_le32(bits)))
80
81
82static int mv64360_eth_real_open (struct eth_device *eth);
83static int mv64360_eth_real_stop (struct eth_device *eth);
84static struct net_device_stats *mv64360_eth_get_stats (struct eth_device
85 *dev);
86static void eth_port_init_mac_tables (ETH_PORT eth_port_num);
87static void mv64360_eth_update_stat (struct eth_device *dev);
88bool db64360_eth_start (struct eth_device *eth);
89unsigned int eth_read_mib_counter (ETH_PORT eth_port_num,
90 unsigned int mib_offset);
91int mv64360_eth_receive (struct eth_device *dev);
92
93int mv64360_eth_xmit (struct eth_device *, volatile void *packet, int length);
94
95#ifndef UPDATE_STATS_BY_SOFTWARE
96static void mv64360_eth_print_stat (struct eth_device *dev);
97#endif
98
99extern void NetReceive (volatile uchar *, int);
100
101extern unsigned int INTERNAL_REG_BASE_ADDR;
102
103
104
105
106#ifdef DEBUG_MV_ETH
107void print_globals (struct eth_device *dev)
108{
109 printf ("Ethernet PRINT_Globals-Debug function\n");
110 printf ("Base Address for ETH_PORT_INFO: %08x\n",
111 (unsigned int) dev->priv);
112 printf ("Base Address for mv64360_eth_priv: %08x\n",
113 (unsigned int) &(((ETH_PORT_INFO *) dev->priv)->
114 port_private));
115
116 printf ("GT Internal Base Address: %08x\n",
117 INTERNAL_REG_BASE_ADDR);
118 printf ("Base Address for TX-DESCs: %08x Number of allocated Buffers %d\n", (unsigned int) ((ETH_PORT_INFO *) dev->priv)->p_tx_desc_area_base[0], MV64360_TX_QUEUE_SIZE);
119 printf ("Base Address for RX-DESCs: %08x Number of allocated Buffers %d\n", (unsigned int) ((ETH_PORT_INFO *) dev->priv)->p_rx_desc_area_base[0], MV64360_RX_QUEUE_SIZE);
120 printf ("Base Address for RX-Buffer: %08x allocated Bytes %d\n",
121 (unsigned int) ((ETH_PORT_INFO *) dev->priv)->
122 p_rx_buffer_base[0],
123 (MV64360_RX_QUEUE_SIZE * MV64360_RX_BUFFER_SIZE) + 32);
124 printf ("Base Address for TX-Buffer: %08x allocated Bytes %d\n",
125 (unsigned int) ((ETH_PORT_INFO *) dev->priv)->
126 p_tx_buffer_base[0],
127 (MV64360_TX_QUEUE_SIZE * MV64360_TX_BUFFER_SIZE) + 32);
128}
129#endif
130
131#define my_cpu_to_le32(x) my_le32_to_cpu((x))
132
133unsigned long my_le32_to_cpu (unsigned long x)
134{
135 return (((x & 0x000000ffU) << 24) |
136 ((x & 0x0000ff00U) << 8) |
137 ((x & 0x00ff0000U) >> 8) | ((x & 0xff000000U) >> 24));
138}
139
140
141
142
143
144
145
146
147
148
149
150static void mv64360_eth_print_phy_status (struct eth_device *dev)
151{
152 struct mv64360_eth_priv *port_private;
153 unsigned int port_num;
154 ETH_PORT_INFO *ethernet_private = (ETH_PORT_INFO *) dev->priv;
155 unsigned int port_status, phy_reg_data;
156
157 port_private =
158 (struct mv64360_eth_priv *) ethernet_private->port_private;
159 port_num = port_private->port_num;
160
161
162 eth_port_read_smi_reg (port_num, 1, &phy_reg_data);
163 if (!(phy_reg_data & 0x20)) {
164 printf ("Ethernet port changed link status to DOWN\n");
165 } else {
166 port_status =
167 MV_REG_READ (MV64360_ETH_PORT_STATUS_REG (port_num));
168 printf ("Ethernet status port %d: Link up", port_num);
169 printf (", %s",
170 (port_status & BIT2) ? "Full Duplex" : "Half Duplex");
171 if (port_status & BIT4)
172 printf (", Speed 1 Gbps");
173 else
174 printf (", %s",
175 (port_status & BIT5) ? "Speed 100 Mbps" :
176 "Speed 10 Mbps");
177 printf ("\n");
178 }
179}
180
181
182
183
184
185int db64360_eth_probe (struct eth_device *dev)
186{
187 return ((int) db64360_eth_start (dev));
188}
189
190int db64360_eth_poll (struct eth_device *dev)
191{
192 return mv64360_eth_receive (dev);
193}
194
195int db64360_eth_transmit (struct eth_device *dev, volatile void *packet,
196 int length)
197{
198 mv64360_eth_xmit (dev, packet, length);
199 return 0;
200}
201
202void db64360_eth_disable (struct eth_device *dev)
203{
204 mv64360_eth_stop (dev);
205}
206
207
208void mv6436x_eth_initialize (bd_t * bis)
209{
210 struct eth_device *dev;
211 ETH_PORT_INFO *ethernet_private;
212 struct mv64360_eth_priv *port_private;
213 int devnum, x, temp;
214 char *s, *e, buf[64];
215
216 for (devnum = 0; devnum < MV_ETH_DEVS; devnum++) {
217 dev = calloc (sizeof (*dev), 1);
218 if (!dev) {
219 printf ("%s: mv_enet%d allocation failure, %s\n",
220 __FUNCTION__, devnum, "eth_device structure");
221 return;
222 }
223
224
225 sprintf (dev->name, "mv_enet%d", devnum);
226
227#ifdef DEBUG
228 printf ("Initializing %s\n", dev->name);
229#endif
230
231
232 switch (devnum) {
233 case 0:
234 s = "ethaddr";
235 break;
236
237 case 1:
238 s = "eth1addr";
239 break;
240
241 case 2:
242 s = "eth2addr";
243 break;
244
245 default:
246 printf ("%s: Invalid device number %d\n",
247 __FUNCTION__, devnum);
248 return;
249 }
250
251 temp = getenv_r (s, buf, sizeof (buf));
252 s = (temp > 0) ? buf : NULL;
253
254#ifdef DEBUG
255 printf ("Setting MAC %d to %s\n", devnum, s);
256#endif
257 for (x = 0; x < 6; ++x) {
258 dev->enetaddr[x] = s ? simple_strtoul (s, &e, 16) : 0;
259 if (s)
260 s = (*e) ? e + 1 : e;
261 }
262
263 eth_port_uc_addr_set (devnum, dev->enetaddr, 0);
264
265 dev->init = (void *) db64360_eth_probe;
266 dev->halt = (void *) ethernet_phy_reset;
267 dev->send = (void *) db64360_eth_transmit;
268 dev->recv = (void *) db64360_eth_poll;
269
270 ethernet_private =
271 calloc (sizeof (*ethernet_private), 1);
272 dev->priv = (void *) ethernet_private;
273 if (!ethernet_private) {
274 printf ("%s: %s allocation failure, %s\n",
275 __FUNCTION__, dev->name,
276 "Private Device Structure");
277 free (dev);
278 return;
279 }
280
281 memset (ethernet_private, 0, sizeof (ETH_PORT_INFO));
282 memcpy (ethernet_private->port_mac_addr, dev->enetaddr, 6);
283
284
285 port_private =
286 calloc (sizeof (*ethernet_private), 1);
287 ethernet_private->port_private = (void *)port_private;
288 if (!port_private) {
289 printf ("%s: %s allocation failure, %s\n",
290 __FUNCTION__, dev->name,
291 "Port Private Device Structure");
292
293 free (ethernet_private);
294 free (dev);
295 return;
296 }
297
298 port_private->stats =
299 calloc (sizeof (struct net_device_stats), 1);
300 if (!port_private->stats) {
301 printf ("%s: %s allocation failure, %s\n",
302 __FUNCTION__, dev->name,
303 "Net stat Structure");
304
305 free (port_private);
306 free (ethernet_private);
307 free (dev);
308 return;
309 }
310 memset (ethernet_private->port_private, 0,
311 sizeof (struct mv64360_eth_priv));
312 switch (devnum) {
313 case 0:
314 ethernet_private->port_num = ETH_0;
315 break;
316 case 1:
317 ethernet_private->port_num = ETH_1;
318 break;
319 case 2:
320 ethernet_private->port_num = ETH_2;
321 break;
322 default:
323 printf ("Invalid device number %d\n", devnum);
324 break;
325 };
326
327 port_private->port_num = devnum;
328
329
330
331
332 mv64360_eth_update_stat (dev);
333 memset (port_private->stats, 0,
334 sizeof (struct net_device_stats));
335
336 switch (devnum) {
337 case 0:
338 s = "ethaddr";
339 break;
340
341 case 1:
342 s = "eth1addr";
343 break;
344
345 case 2:
346 s = "eth2addr";
347 break;
348
349 default:
350 printf ("%s: Invalid device number %d\n",
351 __FUNCTION__, devnum);
352 return;
353 }
354
355 temp = getenv_r (s, buf, sizeof (buf));
356 s = (temp > 0) ? buf : NULL;
357
358#ifdef DEBUG
359 printf ("Setting MAC %d to %s\n", devnum, s);
360#endif
361 for (x = 0; x < 6; ++x) {
362 dev->enetaddr[x] = s ? simple_strtoul (s, &e, 16) : 0;
363 if (s)
364 s = (*e) ? e + 1 : e;
365 }
366
367 DP (printf ("Allocating descriptor and buffer rings\n"));
368
369 ethernet_private->p_rx_desc_area_base[0] =
370 (ETH_RX_DESC *) memalign (16,
371 RX_DESC_ALIGNED_SIZE *
372 MV64360_RX_QUEUE_SIZE + 1);
373 ethernet_private->p_tx_desc_area_base[0] =
374 (ETH_TX_DESC *) memalign (16,
375 TX_DESC_ALIGNED_SIZE *
376 MV64360_TX_QUEUE_SIZE + 1);
377
378 ethernet_private->p_rx_buffer_base[0] =
379 (char *) memalign (16,
380 MV64360_RX_QUEUE_SIZE *
381 MV64360_TX_BUFFER_SIZE + 1);
382 ethernet_private->p_tx_buffer_base[0] =
383 (char *) memalign (16,
384 MV64360_RX_QUEUE_SIZE *
385 MV64360_TX_BUFFER_SIZE + 1);
386
387#ifdef DEBUG_MV_ETH
388
389 print_globals (dev);
390#endif
391 eth_register (dev);
392
393 }
394 DP (printf ("%s: exit\n", __FUNCTION__));
395
396}
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412int mv64360_eth_open (struct eth_device *dev)
413{
414 return (mv64360_eth_real_open (dev));
415}
416
417
418static int mv64360_eth_real_open (struct eth_device *dev)
419{
420
421 unsigned int queue;
422 ETH_PORT_INFO *ethernet_private;
423 struct mv64360_eth_priv *port_private;
424 unsigned int port_num;
425 u32 port_status, phy_reg_data;
426
427 ethernet_private = (ETH_PORT_INFO *) dev->priv;
428
429
430 memcpy (ethernet_private->port_mac_addr, dev->enetaddr, 6);
431
432 port_private =
433 (struct mv64360_eth_priv *) ethernet_private->port_private;
434 port_num = port_private->port_num;
435
436
437 MV_REG_WRITE (MV64360_ETH_RECEIVE_QUEUE_COMMAND_REG (port_num),
438 0x0000ff00);
439
440
441 MV_REG_WRITE (MV64360_ETH_INTERRUPT_CAUSE_REG (port_num), 0);
442 MV_REG_WRITE (MV64360_ETH_INTERRUPT_CAUSE_EXTEND_REG (port_num), 0);
443
444
445 MV_REG_WRITE (MV64360_ETH_INTERRUPT_MASK_REG (port_num),
446 INT_CAUSE_UNMASK_ALL);
447
448
449 MV_REG_WRITE (MV64360_ETH_INTERRUPT_EXTEND_MASK_REG (port_num),
450 INT_CAUSE_UNMASK_ALL_EXT);
451
452
453 ethernet_private->port_phy_addr = 0x8 + port_num;
454
455
456 eth_port_init (ethernet_private);
457
458
459
460
461 for (queue = 0; queue < MV64360_TX_QUEUE_NUM; queue++) {
462 unsigned int size;
463
464 port_private->tx_ring_size[queue] = MV64360_TX_QUEUE_SIZE;
465 size = (port_private->tx_ring_size[queue] * TX_DESC_ALIGNED_SIZE);
466 ethernet_private->tx_desc_area_size[queue] = size;
467
468
469 memset ((void *) ethernet_private->p_tx_desc_area_base[queue],
470 0, ethernet_private->tx_desc_area_size[queue]);
471
472
473 if (ether_init_tx_desc_ring
474 (ethernet_private, ETH_Q0,
475 port_private->tx_ring_size[queue],
476 MV64360_TX_BUFFER_SIZE ,
477 (unsigned int) ethernet_private->
478 p_tx_desc_area_base[queue],
479 (unsigned int) ethernet_private->
480 p_tx_buffer_base[queue]) == false)
481 printf ("### Error initializing TX Ring\n");
482 }
483
484
485 for (queue = 0; queue < MV64360_RX_QUEUE_NUM; queue++) {
486 unsigned int size;
487
488
489 port_private->rx_ring_size[queue] = MV64360_RX_QUEUE_SIZE;
490 size = (port_private->rx_ring_size[queue] *
491 RX_DESC_ALIGNED_SIZE);
492 ethernet_private->rx_desc_area_size[queue] = size;
493
494
495 memset ((void *) ethernet_private->p_rx_desc_area_base[queue],
496 0, ethernet_private->rx_desc_area_size[queue]);
497 if ((ether_init_rx_desc_ring
498 (ethernet_private, ETH_Q0,
499 port_private->rx_ring_size[queue],
500 MV64360_RX_BUFFER_SIZE ,
501 (unsigned int) ethernet_private->
502 p_rx_desc_area_base[queue],
503 (unsigned int) ethernet_private->
504 p_rx_buffer_base[queue])) == false)
505 printf ("### Error initializing RX Ring\n");
506 }
507
508 eth_port_start (ethernet_private);
509
510
511 MV_REG_WRITE (MV64360_ETH_PORT_SERIAL_CONTROL_REG (port_num),
512 (0x5 << 17) |
513 (MV_REG_READ
514 (MV64360_ETH_PORT_SERIAL_CONTROL_REG (port_num))
515 & 0xfff1ffff));
516
517
518
519
520
521
522 MV_REG_WRITE (MV64360_ETH_MAXIMUM_TRANSMIT_UNIT (port_num), 0);
523 port_status = MV_REG_READ (MV64360_ETH_PORT_STATUS_REG (port_num));
524
525
526 eth_port_read_smi_reg (port_num, 1, &phy_reg_data);
527 if (!(phy_reg_data & 0x20)) {
528
529 if ((ethernet_phy_reset (port_num)) != true) {
530 printf ("$$ Warnning: No link on port %d \n",
531 port_num);
532 return 0;
533 } else {
534 eth_port_read_smi_reg (port_num, 1, &phy_reg_data);
535 if (!(phy_reg_data & 0x20)) {
536 printf ("### Error: Phy is not active\n");
537 return 0;
538 }
539 }
540 } else {
541 mv64360_eth_print_phy_status (dev);
542 }
543 port_private->eth_running = MAGIC_ETH_RUNNING;
544 return 1;
545}
546
547
548static int mv64360_eth_free_tx_rings (struct eth_device *dev)
549{
550 unsigned int queue;
551 ETH_PORT_INFO *ethernet_private;
552 struct mv64360_eth_priv *port_private;
553 unsigned int port_num;
554 volatile ETH_TX_DESC *p_tx_curr_desc;
555
556 ethernet_private = (ETH_PORT_INFO *) dev->priv;
557 port_private =
558 (struct mv64360_eth_priv *) ethernet_private->port_private;
559 port_num = port_private->port_num;
560
561
562 MV_REG_WRITE (MV64360_ETH_TRANSMIT_QUEUE_COMMAND_REG (port_num),
563 0x0000ff00);
564
565
566 DP (printf ("Clearing previously allocated TX queues... "));
567 for (queue = 0; queue < MV64360_TX_QUEUE_NUM; queue++) {
568
569 for (p_tx_curr_desc =
570 ethernet_private->p_tx_desc_area_base[queue];
571 ((unsigned int) p_tx_curr_desc <= (unsigned int)
572 ethernet_private->p_tx_desc_area_base[queue] +
573 ethernet_private->tx_desc_area_size[queue]);
574 p_tx_curr_desc =
575 (ETH_TX_DESC *) ((unsigned int) p_tx_curr_desc +
576 TX_DESC_ALIGNED_SIZE)) {
577
578 if (p_tx_curr_desc->return_info != 0) {
579 p_tx_curr_desc->return_info = 0;
580 DP (printf ("freed\n"));
581 }
582 }
583 DP (printf ("Done\n"));
584 }
585 return 0;
586}
587
588static int mv64360_eth_free_rx_rings (struct eth_device *dev)
589{
590 unsigned int queue;
591 ETH_PORT_INFO *ethernet_private;
592 struct mv64360_eth_priv *port_private;
593 unsigned int port_num;
594 volatile ETH_RX_DESC *p_rx_curr_desc;
595
596 ethernet_private = (ETH_PORT_INFO *) dev->priv;
597 port_private =
598 (struct mv64360_eth_priv *) ethernet_private->port_private;
599 port_num = port_private->port_num;
600
601
602
603 MV_REG_WRITE (MV64360_ETH_RECEIVE_QUEUE_COMMAND_REG (port_num),
604 0x0000ff00);
605
606
607 DP (printf ("Clearing previously allocated RX queues... "));
608 for (queue = 0; queue < MV64360_RX_QUEUE_NUM; queue++) {
609
610 for (p_rx_curr_desc =
611 ethernet_private->p_rx_desc_area_base[queue];
612 (((unsigned int) p_rx_curr_desc <
613 ((unsigned int) ethernet_private->
614 p_rx_desc_area_base[queue] +
615 ethernet_private->rx_desc_area_size[queue])));
616 p_rx_curr_desc =
617 (ETH_RX_DESC *) ((unsigned int) p_rx_curr_desc +
618 RX_DESC_ALIGNED_SIZE)) {
619 if (p_rx_curr_desc->return_info != 0) {
620 p_rx_curr_desc->return_info = 0;
621 DP (printf ("freed\n"));
622 }
623 }
624 DP (printf ("Done\n"));
625 }
626 return 0;
627}
628
629
630
631
632
633
634
635
636
637
638
639int mv64360_eth_stop (struct eth_device *dev)
640{
641 ETH_PORT_INFO *ethernet_private;
642 struct mv64360_eth_priv *port_private;
643 unsigned int port_num;
644
645 ethernet_private = (ETH_PORT_INFO *) dev->priv;
646 port_private =
647 (struct mv64360_eth_priv *) ethernet_private->port_private;
648 port_num = port_private->port_num;
649
650
651 MV_REG_WRITE (MV64360_ETH_BASE_ADDR_ENABLE_REG, 0x3f);
652 DP (printf ("%s Ethernet stop called ... \n", __FUNCTION__));
653 mv64360_eth_real_stop (dev);
654
655 return 0;
656};
657
658
659
660static int mv64360_eth_real_stop (struct eth_device *dev)
661{
662 ETH_PORT_INFO *ethernet_private;
663 struct mv64360_eth_priv *port_private;
664 unsigned int port_num;
665
666 ethernet_private = (ETH_PORT_INFO *) dev->priv;
667 port_private =
668 (struct mv64360_eth_priv *) ethernet_private->port_private;
669 port_num = port_private->port_num;
670
671
672 mv64360_eth_free_tx_rings (dev);
673 mv64360_eth_free_rx_rings (dev);
674
675 eth_port_reset (ethernet_private->port_num);
676
677 MV_REG_WRITE (MV64360_ETH_INTERRUPT_CAUSE_REG (port_num), 0);
678 MV_REG_WRITE (MV64360_ETH_INTERRUPT_CAUSE_EXTEND_REG (port_num), 0);
679
680 MV_REG_WRITE (MV64360_ETH_INTERRUPT_MASK_REG (port_num), 0);
681
682 MV_REG_WRITE (MV64360_ETH_INTERRUPT_EXTEND_MASK_REG (port_num), 0);
683 MV_RESET_REG_BITS (MV64360_CPU_INTERRUPT0_MASK_HIGH,
684 BIT0 << port_num);
685
686#ifndef UPDATE_STATS_BY_SOFTWARE
687
688
689
690
691 if (port_private->eth_running == MAGIC_ETH_RUNNING) {
692 port_private->eth_running = 0;
693 mv64360_eth_print_stat (dev);
694 }
695 memset (port_private->stats, 0, sizeof (struct net_device_stats));
696#endif
697 DP (printf ("\nEthernet stopped ... \n"));
698 return 0;
699}
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714int mv64360_eth_xmit (struct eth_device *dev, volatile void *dataPtr,
715 int dataSize)
716{
717 ETH_PORT_INFO *ethernet_private;
718 struct mv64360_eth_priv *port_private;
719 unsigned int port_num;
720 PKT_INFO pkt_info;
721 ETH_FUNC_RET_STATUS status;
722 struct net_device_stats *stats;
723 ETH_FUNC_RET_STATUS release_result;
724
725 ethernet_private = (ETH_PORT_INFO *) dev->priv;
726 port_private =
727 (struct mv64360_eth_priv *) ethernet_private->port_private;
728 port_num = port_private->port_num;
729
730 stats = port_private->stats;
731
732
733 pkt_info.cmd_sts = ETH_TX_FIRST_DESC | ETH_TX_LAST_DESC;
734 pkt_info.byte_cnt = dataSize;
735 pkt_info.buf_ptr = (unsigned int) dataPtr;
736 pkt_info.return_info = 0;
737
738 status = eth_port_send (ethernet_private, ETH_Q0, &pkt_info);
739 if ((status == ETH_ERROR) || (status == ETH_QUEUE_FULL)) {
740 printf ("Error on transmitting packet ..");
741 if (status == ETH_QUEUE_FULL)
742 printf ("ETH Queue is full. \n");
743 if (status == ETH_QUEUE_LAST_RESOURCE)
744 printf ("ETH Queue: using last available resource. \n");
745 goto error;
746 }
747
748
749 stats->tx_bytes += dataSize;
750 stats->tx_packets++;
751
752
753 do {
754 release_result =
755 eth_tx_return_desc (ethernet_private, ETH_Q0,
756 &pkt_info);
757 switch (release_result) {
758 case ETH_OK:
759 DP (printf ("descriptor released\n"));
760 if (pkt_info.cmd_sts & BIT0) {
761 printf ("Error in TX\n");
762 stats->tx_errors++;
763
764 }
765 break;
766 case ETH_RETRY:
767 DP (printf ("transmission still in process\n"));
768 break;
769
770 case ETH_ERROR:
771 printf ("routine can not access Tx desc ring\n");
772 break;
773
774 case ETH_END_OF_JOB:
775 DP (printf ("the routine has nothing to release\n"));
776 break;
777 default:
778 break;
779 }
780 } while (release_result == ETH_OK);
781
782
783 return 0;
784 error:
785 return 1;
786}
787
788
789
790
791
792
793
794
795
796
797
798
799
800int mv64360_eth_receive (struct eth_device *dev)
801{
802 ETH_PORT_INFO *ethernet_private;
803 struct mv64360_eth_priv *port_private;
804 unsigned int port_num;
805 PKT_INFO pkt_info;
806 struct net_device_stats *stats;
807
808
809 ethernet_private = (ETH_PORT_INFO *) dev->priv;
810 port_private =
811 (struct mv64360_eth_priv *) ethernet_private->port_private;
812 port_num = port_private->port_num;
813 stats = port_private->stats;
814
815 while ((eth_port_receive (ethernet_private, ETH_Q0, &pkt_info) ==
816 ETH_OK)) {
817
818#ifdef DEBUG_MV_ETH
819 if (pkt_info.byte_cnt != 0) {
820 printf ("%s: Received %d byte Packet @ 0x%x\n",
821 __FUNCTION__, pkt_info.byte_cnt,
822 pkt_info.buf_ptr);
823 }
824#endif
825
826 stats->rx_packets++;
827 stats->rx_bytes += pkt_info.byte_cnt;
828
829
830
831
832
833 if (((pkt_info.
834 cmd_sts & (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) !=
835 (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC))
836 || (pkt_info.cmd_sts & ETH_ERROR_SUMMARY)) {
837 stats->rx_dropped++;
838
839 printf ("Received packet spread on multiple descriptors\n");
840
841
842 if (pkt_info.cmd_sts & ETH_ERROR_SUMMARY) {
843 stats->rx_errors++;
844 }
845
846
847 pkt_info.buf_ptr &= ~0x7;
848 pkt_info.byte_cnt = 0x0000;
849
850 if (eth_rx_return_buff
851 (ethernet_private, ETH_Q0, &pkt_info) != ETH_OK) {
852 printf ("Error while returning the RX Desc to Ring\n");
853 } else {
854 DP (printf ("RX Desc returned to Ring\n"));
855 }
856
857 } else {
858
859
860#ifdef DEBUG_MV_ETH
861 printf ("\nNow send it to upper layer protocols (NetReceive) ...\n");
862#endif
863
864 NetReceive ((uchar *) pkt_info.buf_ptr,
865 (int) pkt_info.byte_cnt);
866
867
868
869 pkt_info.buf_ptr &= ~0x7;
870 pkt_info.byte_cnt = 0x0000;
871 DP (printf
872 ("RX: pkt_info.buf_ptr = %x\n",
873 pkt_info.buf_ptr));
874 if (eth_rx_return_buff
875 (ethernet_private, ETH_Q0, &pkt_info) != ETH_OK) {
876 printf ("Error while returning the RX Desc to Ring\n");
877 } else {
878 DP (printf ("RX Desc returned to Ring\n"));
879 }
880
881
882
883 }
884 }
885 mv64360_eth_get_stats (dev);
886 return 1;
887}
888
889
890
891
892
893
894
895
896
897
898
899static struct net_device_stats *mv64360_eth_get_stats (struct eth_device *dev)
900{
901 ETH_PORT_INFO *ethernet_private;
902 struct mv64360_eth_priv *port_private;
903 unsigned int port_num;
904
905 ethernet_private = (ETH_PORT_INFO *) dev->priv;
906 port_private =
907 (struct mv64360_eth_priv *) ethernet_private->port_private;
908 port_num = port_private->port_num;
909
910 mv64360_eth_update_stat (dev);
911
912 return port_private->stats;
913}
914
915
916
917
918
919
920
921
922
923
924
925static void mv64360_eth_update_stat (struct eth_device *dev)
926{
927 ETH_PORT_INFO *ethernet_private;
928 struct mv64360_eth_priv *port_private;
929 struct net_device_stats *stats;
930 unsigned int port_num;
931 volatile unsigned int dummy;
932
933 ethernet_private = (ETH_PORT_INFO *) dev->priv;
934 port_private =
935 (struct mv64360_eth_priv *) ethernet_private->port_private;
936 port_num = port_private->port_num;
937 stats = port_private->stats;
938
939
940 stats->rx_packets += (unsigned long)
941 eth_read_mib_counter (ethernet_private->port_num,
942 ETH_MIB_GOOD_FRAMES_RECEIVED);
943 stats->tx_packets += (unsigned long)
944 eth_read_mib_counter (ethernet_private->port_num,
945 ETH_MIB_GOOD_FRAMES_SENT);
946 stats->rx_bytes += (unsigned long)
947 eth_read_mib_counter (ethernet_private->port_num,
948 ETH_MIB_GOOD_OCTETS_RECEIVED_LOW);
949
950
951
952
953
954
955
956
957
958
959 dummy = eth_read_mib_counter (ethernet_private->port_num,
960 ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH);
961 stats->tx_bytes += (unsigned long)
962 eth_read_mib_counter (ethernet_private->port_num,
963 ETH_MIB_GOOD_OCTETS_SENT_LOW);
964 dummy = eth_read_mib_counter (ethernet_private->port_num,
965 ETH_MIB_GOOD_OCTETS_SENT_HIGH);
966 stats->rx_errors += (unsigned long)
967 eth_read_mib_counter (ethernet_private->port_num,
968 ETH_MIB_MAC_RECEIVE_ERROR);
969
970
971 stats->rx_dropped +=
972 (unsigned long) eth_read_mib_counter (ethernet_private->
973 port_num,
974 ETH_MIB_BAD_CRC_EVENT);
975 stats->multicast += (unsigned long)
976 eth_read_mib_counter (ethernet_private->port_num,
977 ETH_MIB_MULTICAST_FRAMES_RECEIVED);
978 stats->collisions +=
979 (unsigned long) eth_read_mib_counter (ethernet_private->
980 port_num,
981 ETH_MIB_COLLISION) +
982 (unsigned long) eth_read_mib_counter (ethernet_private->
983 port_num,
984 ETH_MIB_LATE_COLLISION);
985
986 stats->rx_length_errors +=
987 (unsigned long) eth_read_mib_counter (ethernet_private->
988 port_num,
989 ETH_MIB_UNDERSIZE_RECEIVED)
990 +
991 (unsigned long) eth_read_mib_counter (ethernet_private->
992 port_num,
993 ETH_MIB_OVERSIZE_RECEIVED);
994
995}
996
997#ifndef UPDATE_STATS_BY_SOFTWARE
998
999
1000
1001
1002
1003
1004
1005
1006
1007static void mv64360_eth_print_stat (struct eth_device *dev)
1008{
1009 ETH_PORT_INFO *ethernet_private;
1010 struct mv64360_eth_priv *port_private;
1011 struct net_device_stats *stats;
1012 unsigned int port_num;
1013
1014 ethernet_private = (ETH_PORT_INFO *) dev->priv;
1015 port_private =
1016 (struct mv64360_eth_priv *) ethernet_private->port_private;
1017 port_num = port_private->port_num;
1018 stats = port_private->stats;
1019
1020
1021 printf ("\n### Network statistics: ###\n");
1022 printf ("--------------------------\n");
1023 printf (" Packets received: %ld\n", stats->rx_packets);
1024 printf (" Packets send: %ld\n", stats->tx_packets);
1025 printf (" Received bytes: %ld\n", stats->rx_bytes);
1026 printf (" Send bytes: %ld\n", stats->tx_bytes);
1027 if (stats->rx_errors != 0)
1028 printf (" Rx Errors: %ld\n",
1029 stats->rx_errors);
1030 if (stats->rx_dropped != 0)
1031 printf (" Rx dropped (CRC Errors): %ld\n",
1032 stats->rx_dropped);
1033 if (stats->multicast != 0)
1034 printf (" Rx mulicast frames: %ld\n",
1035 stats->multicast);
1036 if (stats->collisions != 0)
1037 printf (" No. of collisions: %ld\n",
1038 stats->collisions);
1039 if (stats->rx_length_errors != 0)
1040 printf (" Rx length errors: %ld\n",
1041 stats->rx_length_errors);
1042}
1043#endif
1044
1045
1046
1047
1048
1049
1050
1051bool db64360_eth_start (struct eth_device *dev)
1052{
1053 return (mv64360_eth_open (dev));
1054}
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248#define ETH_ENABLE_TX_QUEUE(tx_queue, eth_port) \
1249 MV_REG_WRITE(MV64360_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port), (1 << tx_queue))
1250
1251#define ETH_DISABLE_TX_QUEUE(tx_queue, eth_port) \
1252 MV_REG_WRITE(MV64360_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port),\
1253 (1 << (8 + tx_queue)))
1254
1255#define ETH_ENABLE_RX_QUEUE(rx_queue, eth_port) \
1256MV_REG_WRITE(MV64360_ETH_RECEIVE_QUEUE_COMMAND_REG(eth_port), (1 << rx_queue))
1257
1258#define ETH_DISABLE_RX_QUEUE(rx_queue, eth_port) \
1259MV_REG_WRITE(MV64360_ETH_RECEIVE_QUEUE_COMMAND_REG(eth_port), (1 << (8 + rx_queue)))
1260
1261#define CURR_RFD_GET(p_curr_desc, queue) \
1262 ((p_curr_desc) = p_eth_port_ctrl->p_rx_curr_desc_q[queue])
1263
1264#define CURR_RFD_SET(p_curr_desc, queue) \
1265 (p_eth_port_ctrl->p_rx_curr_desc_q[queue] = (p_curr_desc))
1266
1267#define USED_RFD_GET(p_used_desc, queue) \
1268 ((p_used_desc) = p_eth_port_ctrl->p_rx_used_desc_q[queue])
1269
1270#define USED_RFD_SET(p_used_desc, queue)\
1271(p_eth_port_ctrl->p_rx_used_desc_q[queue] = (p_used_desc))
1272
1273
1274#define CURR_TFD_GET(p_curr_desc, queue) \
1275 ((p_curr_desc) = p_eth_port_ctrl->p_tx_curr_desc_q[queue])
1276
1277#define CURR_TFD_SET(p_curr_desc, queue) \
1278 (p_eth_port_ctrl->p_tx_curr_desc_q[queue] = (p_curr_desc))
1279
1280#define USED_TFD_GET(p_used_desc, queue) \
1281 ((p_used_desc) = p_eth_port_ctrl->p_tx_used_desc_q[queue])
1282
1283#define USED_TFD_SET(p_used_desc, queue) \
1284 (p_eth_port_ctrl->p_tx_used_desc_q[queue] = (p_used_desc))
1285
1286#define FIRST_TFD_GET(p_first_desc, queue) \
1287 ((p_first_desc) = p_eth_port_ctrl->p_tx_first_desc_q[queue])
1288
1289#define FIRST_TFD_SET(p_first_desc, queue) \
1290 (p_eth_port_ctrl->p_tx_first_desc_q[queue] = (p_first_desc))
1291
1292
1293
1294#define RX_NEXT_DESC_PTR(p_rx_desc, queue) (ETH_RX_DESC*)(((((unsigned int)p_rx_desc - (unsigned int)p_eth_port_ctrl->p_rx_desc_area_base[queue]) + RX_DESC_ALIGNED_SIZE) % p_eth_port_ctrl->rx_desc_area_size[queue]) + (unsigned int)p_eth_port_ctrl->p_rx_desc_area_base[queue])
1295
1296#define TX_NEXT_DESC_PTR(p_tx_desc, queue) (ETH_TX_DESC*)(((((unsigned int)p_tx_desc - (unsigned int)p_eth_port_ctrl->p_tx_desc_area_base[queue]) + TX_DESC_ALIGNED_SIZE) % p_eth_port_ctrl->tx_desc_area_size[queue]) + (unsigned int)p_eth_port_ctrl->p_tx_desc_area_base[queue])
1297
1298#define LINK_UP_TIMEOUT 100000
1299#define PHY_BUSY_TIMEOUT 10000000
1300
1301
1302
1303
1304static void ethernet_phy_set (ETH_PORT eth_port_num, int phy_addr);
1305static int ethernet_phy_get (ETH_PORT eth_port_num);
1306
1307
1308static void eth_set_access_control (ETH_PORT eth_port_num,
1309 ETH_WIN_PARAM * param);
1310static bool eth_port_uc_addr (ETH_PORT eth_port_num, unsigned char uc_nibble,
1311 ETH_QUEUE queue, int option);
1312#if 0
1313static bool eth_port_smc_addr (ETH_PORT eth_port_num,
1314 unsigned char mc_byte,
1315 ETH_QUEUE queue, int option);
1316static bool eth_port_omc_addr (ETH_PORT eth_port_num,
1317 unsigned char crc8,
1318 ETH_QUEUE queue, int option);
1319#endif
1320
1321static void eth_b_copy (unsigned int src_addr, unsigned int dst_addr,
1322 int byte_count);
1323
1324void eth_dbg (ETH_PORT_INFO * p_eth_port_ctrl);
1325
1326
1327typedef enum _memory_bank { BANK0, BANK1, BANK2, BANK3 } MEMORY_BANK;
1328u32 mv_get_dram_bank_base_addr (MEMORY_BANK bank)
1329{
1330 u32 result = 0;
1331 u32 enable = MV_REG_READ (MV64360_BASE_ADDR_ENABLE);
1332
1333 if (enable & (1 << bank))
1334 return 0;
1335 if (bank == BANK0)
1336 result = MV_REG_READ (MV64360_CS_0_BASE_ADDR);
1337 if (bank == BANK1)
1338 result = MV_REG_READ (MV64360_CS_1_BASE_ADDR);
1339 if (bank == BANK2)
1340 result = MV_REG_READ (MV64360_CS_2_BASE_ADDR);
1341 if (bank == BANK3)
1342 result = MV_REG_READ (MV64360_CS_3_BASE_ADDR);
1343 result &= 0x0000ffff;
1344 result = result << 16;
1345 return result;
1346}
1347
1348u32 mv_get_dram_bank_size (MEMORY_BANK bank)
1349{
1350 u32 result = 0;
1351 u32 enable = MV_REG_READ (MV64360_BASE_ADDR_ENABLE);
1352
1353 if (enable & (1 << bank))
1354 return 0;
1355 if (bank == BANK0)
1356 result = MV_REG_READ (MV64360_CS_0_SIZE);
1357 if (bank == BANK1)
1358 result = MV_REG_READ (MV64360_CS_1_SIZE);
1359 if (bank == BANK2)
1360 result = MV_REG_READ (MV64360_CS_2_SIZE);
1361 if (bank == BANK3)
1362 result = MV_REG_READ (MV64360_CS_3_SIZE);
1363 result += 1;
1364 result &= 0x0000ffff;
1365 result = result << 16;
1366 return result;
1367}
1368
1369u32 mv_get_internal_sram_base (void)
1370{
1371 u32 result;
1372
1373 result = MV_REG_READ (MV64360_INTEGRATED_SRAM_BASE_ADDR);
1374 result &= 0x0000ffff;
1375 result = result << 16;
1376 return result;
1377}
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404static void eth_port_init (ETH_PORT_INFO * p_eth_port_ctrl)
1405{
1406 int queue;
1407 ETH_WIN_PARAM win_param;
1408
1409 p_eth_port_ctrl->port_config = PORT_CONFIG_VALUE;
1410 p_eth_port_ctrl->port_config_extend = PORT_CONFIG_EXTEND_VALUE;
1411 p_eth_port_ctrl->port_sdma_config = PORT_SDMA_CONFIG_VALUE;
1412 p_eth_port_ctrl->port_serial_control = PORT_SERIAL_CONTROL_VALUE;
1413
1414 p_eth_port_ctrl->port_rx_queue_command = 0;
1415 p_eth_port_ctrl->port_tx_queue_command = 0;
1416
1417
1418 for (queue = 0; queue < MAX_RX_QUEUE_NUM; queue++) {
1419 CURR_RFD_SET ((ETH_RX_DESC *) 0x00000000, queue);
1420 USED_RFD_SET ((ETH_RX_DESC *) 0x00000000, queue);
1421 p_eth_port_ctrl->rx_resource_err[queue] = false;
1422 }
1423
1424 for (queue = 0; queue < MAX_TX_QUEUE_NUM; queue++) {
1425 CURR_TFD_SET ((ETH_TX_DESC *) 0x00000000, queue);
1426 USED_TFD_SET ((ETH_TX_DESC *) 0x00000000, queue);
1427 FIRST_TFD_SET ((ETH_TX_DESC *) 0x00000000, queue);
1428 p_eth_port_ctrl->tx_resource_err[queue] = false;
1429 }
1430
1431 eth_port_reset (p_eth_port_ctrl->port_num);
1432
1433
1434 win_param.win = ETH_WIN0;
1435 win_param.target = ETH_TARGET_DRAM;
1436 win_param.attributes = EBAR_ATTR_DRAM_CS0;
1437#ifndef CONFIG_NOT_COHERENT_CACHE
1438 win_param.attributes |= EBAR_ATTR_DRAM_CACHE_COHERENCY_WB;
1439#endif
1440 win_param.high_addr = 0;
1441
1442 win_param.base_addr = mv_get_dram_bank_base_addr (BANK0);
1443 win_param.size = mv_get_dram_bank_size (BANK0);
1444 if (win_param.size == 0)
1445 win_param.enable = 0;
1446 else
1447 win_param.enable = 1;
1448 win_param.access_ctrl = EWIN_ACCESS_FULL;
1449
1450
1451 eth_set_access_control (p_eth_port_ctrl->port_num, &win_param);
1452
1453
1454 win_param.win = ETH_WIN1;
1455 win_param.target = ETH_TARGET_DRAM;
1456 win_param.attributes = EBAR_ATTR_DRAM_CS1;
1457#ifndef CONFIG_NOT_COHERENT_CACHE
1458 win_param.attributes |= EBAR_ATTR_DRAM_CACHE_COHERENCY_WB;
1459#endif
1460 win_param.high_addr = 0;
1461
1462 win_param.base_addr = mv_get_dram_bank_base_addr (BANK1);
1463 win_param.size = mv_get_dram_bank_size (BANK1);
1464 if (win_param.size == 0)
1465 win_param.enable = 0;
1466 else
1467 win_param.enable = 1;
1468 win_param.access_ctrl = EWIN_ACCESS_FULL;
1469
1470
1471 eth_set_access_control (p_eth_port_ctrl->port_num, &win_param);
1472
1473
1474 win_param.win = ETH_WIN2;
1475 win_param.target = ETH_TARGET_DRAM;
1476 win_param.attributes = EBAR_ATTR_DRAM_CS2;
1477#ifndef CONFIG_NOT_COHERENT_CACHE
1478 win_param.attributes |= EBAR_ATTR_DRAM_CACHE_COHERENCY_WB;
1479#endif
1480 win_param.high_addr = 0;
1481
1482 win_param.base_addr = mv_get_dram_bank_base_addr (BANK2);
1483 win_param.size = mv_get_dram_bank_size (BANK2);
1484 if (win_param.size == 0)
1485 win_param.enable = 0;
1486 else
1487 win_param.enable = 1;
1488 win_param.access_ctrl = EWIN_ACCESS_FULL;
1489
1490
1491 eth_set_access_control (p_eth_port_ctrl->port_num, &win_param);
1492
1493
1494 win_param.win = ETH_WIN3;
1495 win_param.target = ETH_TARGET_DRAM;
1496 win_param.attributes = EBAR_ATTR_DRAM_CS3;
1497#ifndef CONFIG_NOT_COHERENT_CACHE
1498 win_param.attributes |= EBAR_ATTR_DRAM_CACHE_COHERENCY_WB;
1499#endif
1500 win_param.high_addr = 0;
1501
1502 win_param.base_addr = mv_get_dram_bank_base_addr (BANK3);
1503 win_param.size = mv_get_dram_bank_size (BANK3);
1504 if (win_param.size == 0)
1505 win_param.enable = 0;
1506 else
1507 win_param.enable = 1;
1508 win_param.access_ctrl = EWIN_ACCESS_FULL;
1509
1510
1511 eth_set_access_control (p_eth_port_ctrl->port_num, &win_param);
1512
1513
1514 win_param.win = ETH_WIN4;
1515 win_param.target = EBAR_TARGET_CBS;
1516 win_param.attributes = EBAR_ATTR_CBS_SRAM | EBAR_ATTR_CBS_SRAM_BLOCK0;
1517 win_param.high_addr = 0;
1518 win_param.base_addr = mv_get_internal_sram_base ();
1519 win_param.size = MV64360_INTERNAL_SRAM_SIZE;
1520 win_param.enable = 1;
1521 win_param.access_ctrl = EWIN_ACCESS_FULL;
1522
1523
1524 eth_set_access_control (p_eth_port_ctrl->port_num, &win_param);
1525
1526 eth_port_init_mac_tables (p_eth_port_ctrl->port_num);
1527
1528 ethernet_phy_set (p_eth_port_ctrl->port_num,
1529 p_eth_port_ctrl->port_phy_addr);
1530
1531 return;
1532
1533}
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565static bool eth_port_start (ETH_PORT_INFO * p_eth_port_ctrl)
1566{
1567 int queue;
1568 volatile ETH_TX_DESC *p_tx_curr_desc;
1569 volatile ETH_RX_DESC *p_rx_curr_desc;
1570 unsigned int phy_reg_data;
1571 ETH_PORT eth_port_num = p_eth_port_ctrl->port_num;
1572
1573
1574
1575 for (queue = 0; queue < MAX_TX_QUEUE_NUM; queue++) {
1576 CURR_TFD_GET (p_tx_curr_desc, queue);
1577 MV_REG_WRITE ((MV64360_ETH_TX_CURRENT_QUEUE_DESC_PTR_0
1578 (eth_port_num)
1579 + (4 * queue)),
1580 ((unsigned int) p_tx_curr_desc));
1581
1582 }
1583
1584
1585 for (queue = 0; queue < MAX_RX_QUEUE_NUM; queue++) {
1586 CURR_RFD_GET (p_rx_curr_desc, queue);
1587 MV_REG_WRITE ((MV64360_ETH_RX_CURRENT_QUEUE_DESC_PTR_0
1588 (eth_port_num)
1589 + (4 * queue)),
1590 ((unsigned int) p_rx_curr_desc));
1591
1592 if (p_rx_curr_desc != NULL)
1593
1594 eth_port_uc_addr_set (p_eth_port_ctrl->port_num,
1595 p_eth_port_ctrl->port_mac_addr,
1596 queue);
1597 }
1598
1599
1600 MV_REG_WRITE (MV64360_ETH_PORT_CONFIG_REG (eth_port_num),
1601 p_eth_port_ctrl->port_config);
1602
1603 MV_REG_WRITE (MV64360_ETH_PORT_CONFIG_EXTEND_REG (eth_port_num),
1604 p_eth_port_ctrl->port_config_extend);
1605
1606 MV_REG_WRITE (MV64360_ETH_PORT_SERIAL_CONTROL_REG (eth_port_num),
1607 p_eth_port_ctrl->port_serial_control);
1608
1609 MV_SET_REG_BITS (MV64360_ETH_PORT_SERIAL_CONTROL_REG (eth_port_num),
1610 ETH_SERIAL_PORT_ENABLE);
1611
1612
1613 MV_REG_WRITE (MV64360_ETH_SDMA_CONFIG_REG (eth_port_num),
1614 p_eth_port_ctrl->port_sdma_config);
1615
1616 MV_REG_WRITE (MV64360_ETH_TX_QUEUE_0_TOKEN_BUCKET_COUNT
1617 (eth_port_num), 0x3fffffff);
1618 MV_REG_WRITE (MV64360_ETH_TX_QUEUE_0_TOKEN_BUCKET_CONFIG
1619 (eth_port_num), 0x03fffcff);
1620
1621 MV_REG_WRITE (MV64360_ETH_MAXIMUM_TRANSMIT_UNIT (eth_port_num), 0x0);
1622
1623
1624 MV_REG_WRITE (MV64360_ETH_RECEIVE_QUEUE_COMMAND_REG (eth_port_num),
1625 p_eth_port_ctrl->port_rx_queue_command);
1626
1627
1628 eth_port_read_smi_reg (eth_port_num, 1, &phy_reg_data);
1629
1630 if (!(phy_reg_data & 0x20))
1631 return false;
1632
1633 return true;
1634}
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655static void eth_port_uc_addr_set (ETH_PORT eth_port_num,
1656 unsigned char *p_addr, ETH_QUEUE queue)
1657{
1658 unsigned int mac_h;
1659 unsigned int mac_l;
1660
1661 mac_l = (p_addr[4] << 8) | (p_addr[5]);
1662 mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) |
1663 (p_addr[2] << 8) | (p_addr[3] << 0);
1664
1665 MV_REG_WRITE (MV64360_ETH_MAC_ADDR_LOW (eth_port_num), mac_l);
1666 MV_REG_WRITE (MV64360_ETH_MAC_ADDR_HIGH (eth_port_num), mac_h);
1667
1668
1669 eth_port_uc_addr (eth_port_num, p_addr[5], queue, ACCEPT_MAC_ADDR);
1670
1671 return;
1672}
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697static bool eth_port_uc_addr (ETH_PORT eth_port_num,
1698 unsigned char uc_nibble,
1699 ETH_QUEUE queue, int option)
1700{
1701 unsigned int unicast_reg;
1702 unsigned int tbl_offset;
1703 unsigned int reg_offset;
1704
1705
1706 uc_nibble = (0xf & uc_nibble);
1707 tbl_offset = (uc_nibble / 4) * 4;
1708 reg_offset = uc_nibble % 4;
1709
1710 switch (option) {
1711 case REJECT_MAC_ADDR:
1712
1713 unicast_reg =
1714 MV_REG_READ ((MV64360_ETH_DA_FILTER_UNICAST_TABLE_BASE
1715 (eth_port_num)
1716 + tbl_offset));
1717
1718 unicast_reg &= (0x0E << (8 * reg_offset));
1719
1720 MV_REG_WRITE ((MV64360_ETH_DA_FILTER_UNICAST_TABLE_BASE
1721 (eth_port_num)
1722 + tbl_offset), unicast_reg);
1723 break;
1724
1725 case ACCEPT_MAC_ADDR:
1726
1727 unicast_reg =
1728 MV_REG_READ ((MV64360_ETH_DA_FILTER_UNICAST_TABLE_BASE
1729 (eth_port_num)
1730 + tbl_offset));
1731
1732 unicast_reg |= ((0x01 | queue) << (8 * reg_offset));
1733
1734 MV_REG_WRITE ((MV64360_ETH_DA_FILTER_UNICAST_TABLE_BASE
1735 (eth_port_num)
1736 + tbl_offset), unicast_reg);
1737
1738 break;
1739
1740 default:
1741 return false;
1742 }
1743 return true;
1744}
1745
1746#if 0
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778static void eth_port_mc_addr (ETH_PORT eth_port_num,
1779 unsigned char *p_addr,
1780 ETH_QUEUE queue, int option)
1781{
1782 unsigned int mac_h;
1783 unsigned int mac_l;
1784 unsigned char crc_result = 0;
1785 int mac_array[48];
1786 int crc[8];
1787 int i;
1788
1789
1790 if ((p_addr[0] == 0x01) &&
1791 (p_addr[1] == 0x00) &&
1792 (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00))
1793
1794 eth_port_smc_addr (eth_port_num, p_addr[5], queue, option);
1795 else {
1796
1797 mac_h = (p_addr[0] << 8) | (p_addr[1]);
1798 mac_l = (p_addr[2] << 24) | (p_addr[3] << 16) |
1799 (p_addr[4] << 8) | (p_addr[5] << 0);
1800
1801 for (i = 0; i < 32; i++)
1802 mac_array[i] = (mac_l >> i) & 0x1;
1803 for (i = 32; i < 48; i++)
1804 mac_array[i] = (mac_h >> (i - 32)) & 0x1;
1805
1806
1807 crc[0] = mac_array[45] ^ mac_array[43] ^ mac_array[40] ^
1808 mac_array[39] ^ mac_array[35] ^ mac_array[34] ^
1809 mac_array[31] ^ mac_array[30] ^ mac_array[28] ^
1810 mac_array[23] ^ mac_array[21] ^ mac_array[19] ^
1811 mac_array[18] ^ mac_array[16] ^ mac_array[14] ^
1812 mac_array[12] ^ mac_array[8] ^ mac_array[7] ^
1813 mac_array[6] ^ mac_array[0];
1814
1815 crc[1] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^
1816 mac_array[43] ^ mac_array[41] ^ mac_array[39] ^
1817 mac_array[36] ^ mac_array[34] ^ mac_array[32] ^
1818 mac_array[30] ^ mac_array[29] ^ mac_array[28] ^
1819 mac_array[24] ^ mac_array[23] ^ mac_array[22] ^
1820 mac_array[21] ^ mac_array[20] ^ mac_array[18] ^
1821 mac_array[17] ^ mac_array[16] ^ mac_array[15] ^
1822 mac_array[14] ^ mac_array[13] ^ mac_array[12] ^
1823 mac_array[9] ^ mac_array[6] ^ mac_array[1] ^
1824 mac_array[0];
1825
1826 crc[2] = mac_array[47] ^ mac_array[46] ^ mac_array[44] ^
1827 mac_array[43] ^ mac_array[42] ^ mac_array[39] ^
1828 mac_array[37] ^ mac_array[34] ^ mac_array[33] ^
1829 mac_array[29] ^ mac_array[28] ^ mac_array[25] ^
1830 mac_array[24] ^ mac_array[22] ^ mac_array[17] ^
1831 mac_array[15] ^ mac_array[13] ^ mac_array[12] ^
1832 mac_array[10] ^ mac_array[8] ^ mac_array[6] ^
1833 mac_array[2] ^ mac_array[1] ^ mac_array[0];
1834
1835 crc[3] = mac_array[47] ^ mac_array[45] ^ mac_array[44] ^
1836 mac_array[43] ^ mac_array[40] ^ mac_array[38] ^
1837 mac_array[35] ^ mac_array[34] ^ mac_array[30] ^
1838 mac_array[29] ^ mac_array[26] ^ mac_array[25] ^
1839 mac_array[23] ^ mac_array[18] ^ mac_array[16] ^
1840 mac_array[14] ^ mac_array[13] ^ mac_array[11] ^
1841 mac_array[9] ^ mac_array[7] ^ mac_array[3] ^
1842 mac_array[2] ^ mac_array[1];
1843
1844 crc[4] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^
1845 mac_array[41] ^ mac_array[39] ^ mac_array[36] ^
1846 mac_array[35] ^ mac_array[31] ^ mac_array[30] ^
1847 mac_array[27] ^ mac_array[26] ^ mac_array[24] ^
1848 mac_array[19] ^ mac_array[17] ^ mac_array[15] ^
1849 mac_array[14] ^ mac_array[12] ^ mac_array[10] ^
1850 mac_array[8] ^ mac_array[4] ^ mac_array[3] ^
1851 mac_array[2];
1852
1853 crc[5] = mac_array[47] ^ mac_array[46] ^ mac_array[45] ^
1854 mac_array[42] ^ mac_array[40] ^ mac_array[37] ^
1855 mac_array[36] ^ mac_array[32] ^ mac_array[31] ^
1856 mac_array[28] ^ mac_array[27] ^ mac_array[25] ^
1857 mac_array[20] ^ mac_array[18] ^ mac_array[16] ^
1858 mac_array[15] ^ mac_array[13] ^ mac_array[11] ^
1859 mac_array[9] ^ mac_array[5] ^ mac_array[4] ^
1860 mac_array[3];
1861
1862 crc[6] = mac_array[47] ^ mac_array[46] ^ mac_array[43] ^
1863 mac_array[41] ^ mac_array[38] ^ mac_array[37] ^
1864 mac_array[33] ^ mac_array[32] ^ mac_array[29] ^
1865 mac_array[28] ^ mac_array[26] ^ mac_array[21] ^
1866 mac_array[19] ^ mac_array[17] ^ mac_array[16] ^
1867 mac_array[14] ^ mac_array[12] ^ mac_array[10] ^
1868 mac_array[6] ^ mac_array[5] ^ mac_array[4];
1869
1870 crc[7] = mac_array[47] ^ mac_array[44] ^ mac_array[42] ^
1871 mac_array[39] ^ mac_array[38] ^ mac_array[34] ^
1872 mac_array[33] ^ mac_array[30] ^ mac_array[29] ^
1873 mac_array[27] ^ mac_array[22] ^ mac_array[20] ^
1874 mac_array[18] ^ mac_array[17] ^ mac_array[15] ^
1875 mac_array[13] ^ mac_array[11] ^ mac_array[7] ^
1876 mac_array[6] ^ mac_array[5];
1877
1878 for (i = 0; i < 8; i++)
1879 crc_result = crc_result | (crc[i] << i);
1880
1881 eth_port_omc_addr (eth_port_num, crc_result, queue, option);
1882 }
1883 return;
1884}
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912static bool eth_port_smc_addr (ETH_PORT eth_port_num,
1913 unsigned char mc_byte,
1914 ETH_QUEUE queue, int option)
1915{
1916 unsigned int smc_table_reg;
1917 unsigned int tbl_offset;
1918 unsigned int reg_offset;
1919
1920
1921 tbl_offset = (mc_byte / 4) * 4;
1922 reg_offset = mc_byte % 4;
1923 queue &= 0x7;
1924
1925 switch (option) {
1926 case REJECT_MAC_ADDR:
1927
1928 smc_table_reg =
1929 MV_REG_READ ((MV64360_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset));
1930 smc_table_reg &= (0x0E << (8 * reg_offset));
1931
1932 MV_REG_WRITE ((MV64360_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset), smc_table_reg);
1933 break;
1934
1935 case ACCEPT_MAC_ADDR:
1936
1937 smc_table_reg =
1938 MV_REG_READ ((MV64360_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset));
1939 smc_table_reg |= ((0x01 | queue) << (8 * reg_offset));
1940
1941 MV_REG_WRITE ((MV64360_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset), smc_table_reg);
1942 break;
1943
1944 default:
1945 return false;
1946 }
1947 return true;
1948}
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976static bool eth_port_omc_addr (ETH_PORT eth_port_num,
1977 unsigned char crc8,
1978 ETH_QUEUE queue, int option)
1979{
1980 unsigned int omc_table_reg;
1981 unsigned int tbl_offset;
1982 unsigned int reg_offset;
1983
1984
1985 tbl_offset = (crc8 / 4) * 4;
1986 reg_offset = crc8 % 4;
1987 queue &= 0x7;
1988
1989 switch (option) {
1990 case REJECT_MAC_ADDR:
1991
1992 omc_table_reg =
1993 MV_REG_READ ((MV64360_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset));
1994 omc_table_reg &= (0x0E << (8 * reg_offset));
1995
1996 MV_REG_WRITE ((MV64360_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset), omc_table_reg);
1997 break;
1998
1999 case ACCEPT_MAC_ADDR:
2000
2001 omc_table_reg =
2002 MV_REG_READ ((MV64360_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset));
2003 omc_table_reg |= ((0x01 | queue) << (8 * reg_offset));
2004
2005 MV_REG_WRITE ((MV64360_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset), omc_table_reg);
2006 break;
2007
2008 default:
2009 return false;
2010 }
2011 return true;
2012}
2013#endif
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032static void eth_port_init_mac_tables (ETH_PORT eth_port_num)
2033{
2034 int table_index;
2035
2036
2037 for (table_index = 0; table_index <= 0xC; table_index += 4)
2038 MV_REG_WRITE ((MV64360_ETH_DA_FILTER_UNICAST_TABLE_BASE
2039 (eth_port_num) + table_index), 0);
2040
2041 for (table_index = 0; table_index <= 0xFC; table_index += 4) {
2042
2043 MV_REG_WRITE ((MV64360_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + table_index), 0);
2044
2045 MV_REG_WRITE ((MV64360_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + table_index), 0);
2046 }
2047}
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066static void eth_clear_mib_counters (ETH_PORT eth_port_num)
2067{
2068 int i;
2069 unsigned int dummy;
2070
2071
2072 for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION;
2073 i += 4)
2074 dummy = MV_REG_READ ((MV64360_ETH_MIB_COUNTERS_BASE
2075 (eth_port_num) + i));
2076
2077 return;
2078}
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101unsigned int eth_read_mib_counter (ETH_PORT eth_port_num,
2102 unsigned int mib_offset)
2103{
2104 return (MV_REG_READ (MV64360_ETH_MIB_COUNTERS_BASE (eth_port_num)
2105 + mib_offset));
2106}
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125static void ethernet_phy_set (ETH_PORT eth_port_num, int phy_addr)
2126{
2127 unsigned int reg_data;
2128
2129 reg_data = MV_REG_READ (MV64360_ETH_PHY_ADDR_REG);
2130
2131 reg_data &= ~(0x1F << (5 * eth_port_num));
2132 reg_data |= (phy_addr << (5 * eth_port_num));
2133
2134 MV_REG_WRITE (MV64360_ETH_PHY_ADDR_REG, reg_data);
2135
2136 return;
2137}
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155static int ethernet_phy_get (ETH_PORT eth_port_num)
2156{
2157 unsigned int reg_data;
2158
2159 reg_data = MV_REG_READ (MV64360_ETH_PHY_ADDR_REG);
2160
2161 return ((reg_data >> (5 * eth_port_num)) & 0x1f);
2162}
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181static bool ethernet_phy_reset (ETH_PORT eth_port_num)
2182{
2183 unsigned int time_out = 50;
2184 unsigned int phy_reg_data;
2185
2186
2187 eth_port_read_smi_reg (eth_port_num, 0, &phy_reg_data);
2188 phy_reg_data |= 0x8000;
2189 eth_port_write_smi_reg (eth_port_num, 0, phy_reg_data);
2190
2191
2192 do {
2193 eth_port_read_smi_reg (eth_port_num, 1, &phy_reg_data);
2194
2195 if (time_out-- == 0)
2196 return false;
2197 }
2198 while (!(phy_reg_data & 0x20));
2199
2200 return true;
2201}
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221static void eth_port_reset (ETH_PORT eth_port_num)
2222{
2223 unsigned int reg_data;
2224
2225
2226 reg_data =
2227 MV_REG_READ (MV64360_ETH_TRANSMIT_QUEUE_COMMAND_REG
2228 (eth_port_num));
2229
2230 if (reg_data & 0xFF) {
2231
2232 MV_REG_WRITE (MV64360_ETH_TRANSMIT_QUEUE_COMMAND_REG
2233 (eth_port_num), (reg_data << 8));
2234
2235
2236 do {
2237
2238 reg_data =
2239 MV_REG_READ
2240 (MV64360_ETH_TRANSMIT_QUEUE_COMMAND_REG
2241 (eth_port_num));
2242 }
2243 while (reg_data & 0xFF);
2244 }
2245
2246
2247 reg_data =
2248 MV_REG_READ (MV64360_ETH_RECEIVE_QUEUE_COMMAND_REG
2249 (eth_port_num));
2250
2251 if (reg_data & 0xFF) {
2252
2253 MV_REG_WRITE (MV64360_ETH_RECEIVE_QUEUE_COMMAND_REG
2254 (eth_port_num), (reg_data << 8));
2255
2256
2257 do {
2258
2259 reg_data =
2260 MV_REG_READ
2261 (MV64360_ETH_RECEIVE_QUEUE_COMMAND_REG
2262 (eth_port_num));
2263 }
2264 while (reg_data & 0xFF);
2265 }
2266
2267
2268
2269 eth_clear_mib_counters (eth_port_num);
2270
2271
2272 reg_data =
2273 MV_REG_READ (MV64360_ETH_PORT_SERIAL_CONTROL_REG
2274 (eth_port_num));
2275 reg_data &= ~ETH_SERIAL_PORT_ENABLE;
2276 MV_REG_WRITE (MV64360_ETH_PORT_SERIAL_CONTROL_REG (eth_port_num),
2277 reg_data);
2278
2279 return;
2280}
2281
2282#if 0
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302static void ethernet_set_config_reg (ETH_PORT eth_port_num,
2303 unsigned int value)
2304{
2305 unsigned int eth_config_reg;
2306
2307 eth_config_reg =
2308 MV_REG_READ (MV64360_ETH_PORT_CONFIG_REG (eth_port_num));
2309 eth_config_reg |= value;
2310 MV_REG_WRITE (MV64360_ETH_PORT_CONFIG_REG (eth_port_num),
2311 eth_config_reg);
2312
2313 return;
2314}
2315#endif
2316
2317#if 0
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337static void ethernet_reset_config_reg (ETH_PORT eth_port_num,
2338 unsigned int value)
2339{
2340 unsigned int eth_config_reg;
2341
2342 eth_config_reg = MV_REG_READ (MV64360_ETH_PORT_CONFIG_EXTEND_REG
2343 (eth_port_num));
2344 eth_config_reg &= ~value;
2345 MV_REG_WRITE (MV64360_ETH_PORT_CONFIG_EXTEND_REG (eth_port_num),
2346 eth_config_reg);
2347
2348 return;
2349}
2350#endif
2351
2352#if 0
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370static unsigned int ethernet_get_config_reg (ETH_PORT eth_port_num)
2371{
2372 unsigned int eth_config_reg;
2373
2374 eth_config_reg = MV_REG_READ (MV64360_ETH_PORT_CONFIG_EXTEND_REG
2375 (eth_port_num));
2376 return eth_config_reg;
2377}
2378
2379#endif
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401static bool eth_port_read_smi_reg (ETH_PORT eth_port_num,
2402 unsigned int phy_reg, unsigned int *value)
2403{
2404 unsigned int reg_value;
2405 unsigned int time_out = PHY_BUSY_TIMEOUT;
2406 int phy_addr;
2407
2408 phy_addr = ethernet_phy_get (eth_port_num);
2409
2410
2411
2412 do {
2413 reg_value = MV_REG_READ (MV64360_ETH_SMI_REG);
2414 if (time_out-- == 0) {
2415 return false;
2416 }
2417 }
2418 while (reg_value & ETH_SMI_BUSY);
2419
2420
2421
2422 MV_REG_WRITE (MV64360_ETH_SMI_REG,
2423 (phy_addr << 16) | (phy_reg << 21) |
2424 ETH_SMI_OPCODE_READ);
2425
2426 time_out = PHY_BUSY_TIMEOUT;
2427
2428 do {
2429 reg_value = MV_REG_READ (MV64360_ETH_SMI_REG);
2430 if (time_out-- == 0) {
2431 return false;
2432 }
2433 }
2434 while ((reg_value & ETH_SMI_READ_VALID) != ETH_SMI_READ_VALID);
2435
2436
2437#define PHY_UPDATE_TIMEOUT 10000
2438 for (time_out = 0; time_out < PHY_UPDATE_TIMEOUT; time_out++);
2439
2440 reg_value = MV_REG_READ (MV64360_ETH_SMI_REG);
2441
2442 *value = reg_value & 0xffff;
2443
2444 return true;
2445}
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467static bool eth_port_write_smi_reg (ETH_PORT eth_port_num,
2468 unsigned int phy_reg, unsigned int value)
2469{
2470 unsigned int reg_value;
2471 unsigned int time_out = PHY_BUSY_TIMEOUT;
2472 int phy_addr;
2473
2474 phy_addr = ethernet_phy_get (eth_port_num);
2475
2476
2477 do {
2478 reg_value = MV_REG_READ (MV64360_ETH_SMI_REG);
2479 if (time_out-- == 0) {
2480 return false;
2481 }
2482 }
2483 while (reg_value & ETH_SMI_BUSY);
2484
2485
2486 MV_REG_WRITE (MV64360_ETH_SMI_REG,
2487 (phy_addr << 16) | (phy_reg << 21) |
2488 ETH_SMI_OPCODE_WRITE | (value & 0xffff));
2489 return true;
2490}
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510static void eth_set_access_control (ETH_PORT eth_port_num,
2511 ETH_WIN_PARAM * param)
2512{
2513 unsigned int access_prot_reg;
2514
2515
2516 access_prot_reg = MV_REG_READ (MV64360_ETH_ACCESS_PROTECTION_REG
2517 (eth_port_num));
2518 access_prot_reg &= (~(3 << (param->win * 2)));
2519 access_prot_reg |= (param->access_ctrl << (param->win * 2));
2520 MV_REG_WRITE (MV64360_ETH_ACCESS_PROTECTION_REG (eth_port_num),
2521 access_prot_reg);
2522
2523
2524 MV_REG_WRITE ((MV64360_ETH_SIZE_REG_0 +
2525 (ETH_SIZE_REG_GAP * param->win)),
2526 (((param->size / 0x10000) - 1) << 16));
2527
2528
2529 MV_REG_WRITE ((MV64360_ETH_BAR_0 + (ETH_BAR_GAP * param->win)),
2530 (param->target | param->attributes | param->base_addr));
2531
2532 if (param->win < 4)
2533 MV_REG_WRITE ((MV64360_ETH_HIGH_ADDR_REMAP_REG_0 +
2534 (ETH_HIGH_ADDR_REMAP_REG_GAP * param->win)),
2535 param->high_addr);
2536
2537
2538 if (param->enable == 1)
2539 MV_RESET_REG_BITS (MV64360_ETH_BASE_ADDR_ENABLE_REG,
2540 (1 << param->win));
2541 else
2542 MV_SET_REG_BITS (MV64360_ETH_BASE_ADDR_ENABLE_REG,
2543 (1 << param->win));
2544}
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576static bool ether_init_rx_desc_ring (ETH_PORT_INFO * p_eth_port_ctrl,
2577 ETH_QUEUE rx_queue,
2578 int rx_desc_num,
2579 int rx_buff_size,
2580 unsigned int rx_desc_base_addr,
2581 unsigned int rx_buff_base_addr)
2582{
2583 ETH_RX_DESC *p_rx_desc;
2584 ETH_RX_DESC *p_rx_prev_desc;
2585 unsigned int buffer_addr;
2586 int ix;
2587
2588
2589 p_rx_desc = (ETH_RX_DESC *) rx_desc_base_addr;
2590 p_rx_prev_desc = p_rx_desc;
2591 buffer_addr = rx_buff_base_addr;
2592
2593
2594 if (rx_buff_base_addr & 0xF)
2595 return false;
2596
2597
2598 if ((rx_buff_size < 8) || (rx_buff_size > RX_BUFFER_MAX_SIZE))
2599 return false;
2600
2601
2602 if ((rx_buff_base_addr + rx_buff_size) & 0x7)
2603 return false;
2604
2605
2606 for (ix = 0; ix < rx_desc_num; ix++) {
2607 p_rx_desc->buf_size = rx_buff_size;
2608 p_rx_desc->byte_cnt = 0x0000;
2609 p_rx_desc->cmd_sts =
2610 ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT;
2611 p_rx_desc->next_desc_ptr =
2612 ((unsigned int) p_rx_desc) + RX_DESC_ALIGNED_SIZE;
2613 p_rx_desc->buf_ptr = buffer_addr;
2614 p_rx_desc->return_info = 0x00000000;
2615 D_CACHE_FLUSH_LINE (p_rx_desc, 0);
2616 buffer_addr += rx_buff_size;
2617 p_rx_prev_desc = p_rx_desc;
2618 p_rx_desc = (ETH_RX_DESC *)
2619 ((unsigned int) p_rx_desc + RX_DESC_ALIGNED_SIZE);
2620 }
2621
2622
2623 p_rx_prev_desc->next_desc_ptr = (rx_desc_base_addr);
2624 D_CACHE_FLUSH_LINE (p_rx_prev_desc, 0);
2625
2626
2627 CURR_RFD_SET ((ETH_RX_DESC *) rx_desc_base_addr, rx_queue);
2628 USED_RFD_SET ((ETH_RX_DESC *) rx_desc_base_addr, rx_queue);
2629
2630 p_eth_port_ctrl->p_rx_desc_area_base[rx_queue] =
2631 (ETH_RX_DESC *) rx_desc_base_addr;
2632 p_eth_port_ctrl->rx_desc_area_size[rx_queue] =
2633 rx_desc_num * RX_DESC_ALIGNED_SIZE;
2634
2635 p_eth_port_ctrl->port_rx_queue_command |= (1 << rx_queue);
2636
2637 return true;
2638}
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670static bool ether_init_tx_desc_ring (ETH_PORT_INFO * p_eth_port_ctrl,
2671 ETH_QUEUE tx_queue,
2672 int tx_desc_num,
2673 int tx_buff_size,
2674 unsigned int tx_desc_base_addr,
2675 unsigned int tx_buff_base_addr)
2676{
2677
2678 ETH_TX_DESC *p_tx_desc;
2679 ETH_TX_DESC *p_tx_prev_desc;
2680 unsigned int buffer_addr;
2681 int ix;
2682
2683
2684
2685 p_tx_desc = (ETH_TX_DESC *) tx_desc_base_addr;
2686 p_tx_prev_desc = p_tx_desc;
2687 buffer_addr = tx_buff_base_addr;
2688
2689
2690 if (tx_buff_base_addr & 0xF)
2691 return false;
2692
2693
2694 if ((tx_buff_size > TX_BUFFER_MAX_SIZE)
2695 || (tx_buff_size < TX_BUFFER_MIN_SIZE))
2696 return false;
2697
2698
2699 for (ix = 0; ix < tx_desc_num; ix++) {
2700 p_tx_desc->byte_cnt = 0x0000;
2701 p_tx_desc->l4i_chk = 0x0000;
2702 p_tx_desc->cmd_sts = 0x00000000;
2703 p_tx_desc->next_desc_ptr =
2704 ((unsigned int) p_tx_desc) + TX_DESC_ALIGNED_SIZE;
2705
2706 p_tx_desc->buf_ptr = buffer_addr;
2707 p_tx_desc->return_info = 0x00000000;
2708 D_CACHE_FLUSH_LINE (p_tx_desc, 0);
2709 buffer_addr += tx_buff_size;
2710 p_tx_prev_desc = p_tx_desc;
2711 p_tx_desc = (ETH_TX_DESC *)
2712 ((unsigned int) p_tx_desc + TX_DESC_ALIGNED_SIZE);
2713
2714 }
2715
2716 p_tx_prev_desc->next_desc_ptr = tx_desc_base_addr;
2717 D_CACHE_FLUSH_LINE (p_tx_prev_desc, 0);
2718
2719 CURR_TFD_SET ((ETH_TX_DESC *) tx_desc_base_addr, tx_queue);
2720 USED_TFD_SET ((ETH_TX_DESC *) tx_desc_base_addr, tx_queue);
2721
2722
2723 p_eth_port_ctrl->p_tx_desc_area_base[tx_queue] =
2724 (ETH_TX_DESC *) tx_desc_base_addr;
2725 p_eth_port_ctrl->tx_desc_area_size[tx_queue] =
2726 (tx_desc_num * TX_DESC_ALIGNED_SIZE);
2727
2728
2729 p_eth_port_ctrl->port_tx_queue_command |= (1 << tx_queue);
2730
2731 return true;
2732}
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762static ETH_FUNC_RET_STATUS eth_port_send (ETH_PORT_INFO * p_eth_port_ctrl,
2763 ETH_QUEUE tx_queue,
2764 PKT_INFO * p_pkt_info)
2765{
2766 volatile ETH_TX_DESC *p_tx_desc_first;
2767 volatile ETH_TX_DESC *p_tx_desc_curr;
2768 volatile ETH_TX_DESC *p_tx_next_desc_curr;
2769 volatile ETH_TX_DESC *p_tx_desc_used;
2770 unsigned int command_status;
2771
2772
2773 if (p_eth_port_ctrl->tx_resource_err[tx_queue] == true)
2774 return ETH_QUEUE_FULL;
2775
2776
2777 CURR_TFD_GET (p_tx_desc_curr, tx_queue);
2778 USED_TFD_GET (p_tx_desc_used, tx_queue);
2779
2780 if (p_tx_desc_curr == NULL)
2781 return ETH_ERROR;
2782
2783
2784 p_tx_next_desc_curr = TX_NEXT_DESC_PTR (p_tx_desc_curr, tx_queue);
2785 command_status = p_pkt_info->cmd_sts | ETH_ZERO_PADDING | ETH_GEN_CRC;
2786
2787 if (command_status & (ETH_TX_FIRST_DESC)) {
2788
2789 FIRST_TFD_SET (p_tx_desc_curr, tx_queue);
2790 p_tx_desc_first = p_tx_desc_curr;
2791 } else {
2792 FIRST_TFD_GET (p_tx_desc_first, tx_queue);
2793 command_status |= ETH_BUFFER_OWNED_BY_DMA;
2794 }
2795
2796
2797
2798
2799 if (p_pkt_info->byte_cnt <= 8) {
2800 printf ("You have failed in the < 8 bytes errata - fixme\n");
2801 return ETH_ERROR;
2802
2803 p_tx_desc_curr->buf_ptr =
2804 (unsigned int) p_tx_desc_curr + TX_BUF_OFFSET_IN_DESC;
2805 eth_b_copy (p_pkt_info->buf_ptr, p_tx_desc_curr->buf_ptr,
2806 p_pkt_info->byte_cnt);
2807 } else
2808 p_tx_desc_curr->buf_ptr = p_pkt_info->buf_ptr;
2809
2810 p_tx_desc_curr->byte_cnt = p_pkt_info->byte_cnt;
2811 p_tx_desc_curr->return_info = p_pkt_info->return_info;
2812
2813 if (p_pkt_info->cmd_sts & (ETH_TX_LAST_DESC)) {
2814
2815 p_tx_desc_curr->cmd_sts = command_status |
2816 ETH_BUFFER_OWNED_BY_DMA | ETH_TX_ENABLE_INTERRUPT;
2817
2818 if (p_tx_desc_curr != p_tx_desc_first)
2819 p_tx_desc_first->cmd_sts |= ETH_BUFFER_OWNED_BY_DMA;
2820
2821
2822
2823 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_curr, 0);
2824 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_first, 0);
2825 CPU_PIPE_FLUSH;
2826
2827
2828 ETH_ENABLE_TX_QUEUE (tx_queue, p_eth_port_ctrl->port_num);
2829
2830
2831 p_tx_desc_first = p_tx_next_desc_curr;
2832 FIRST_TFD_SET (p_tx_desc_first, tx_queue);
2833
2834 } else {
2835 p_tx_desc_curr->cmd_sts = command_status;
2836 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_curr, 0);
2837 }
2838
2839
2840 if (p_tx_next_desc_curr == p_tx_desc_used) {
2841
2842 CURR_TFD_SET (p_tx_desc_first, tx_queue);
2843
2844 p_eth_port_ctrl->tx_resource_err[tx_queue] = true;
2845 return ETH_QUEUE_LAST_RESOURCE;
2846 } else {
2847
2848 CURR_TFD_SET (p_tx_next_desc_curr, tx_queue);
2849 return ETH_OK;
2850 }
2851}
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878static ETH_FUNC_RET_STATUS eth_tx_return_desc (ETH_PORT_INFO *
2879 p_eth_port_ctrl,
2880 ETH_QUEUE tx_queue,
2881 PKT_INFO * p_pkt_info)
2882{
2883 volatile ETH_TX_DESC *p_tx_desc_used = NULL;
2884 volatile ETH_TX_DESC *p_tx_desc_first = NULL;
2885 unsigned int command_status;
2886
2887
2888
2889 USED_TFD_GET (p_tx_desc_used, tx_queue);
2890 FIRST_TFD_GET (p_tx_desc_first, tx_queue);
2891
2892
2893
2894 if (p_tx_desc_used == NULL)
2895 return ETH_ERROR;
2896
2897 command_status = p_tx_desc_used->cmd_sts;
2898
2899
2900 if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
2901 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_used, 0);
2902 return ETH_RETRY;
2903 }
2904
2905
2906 if ((p_tx_desc_used == p_tx_desc_first) &&
2907 (p_eth_port_ctrl->tx_resource_err[tx_queue] == false)) {
2908 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_used, 0);
2909 return ETH_END_OF_JOB;
2910 }
2911
2912
2913 p_pkt_info->cmd_sts = command_status;
2914 p_pkt_info->return_info = p_tx_desc_used->return_info;
2915 p_tx_desc_used->return_info = 0;
2916
2917
2918 USED_TFD_SET (TX_NEXT_DESC_PTR (p_tx_desc_used, tx_queue), tx_queue);
2919
2920
2921 if (p_eth_port_ctrl->tx_resource_err[tx_queue] == true)
2922 p_eth_port_ctrl->tx_resource_err[tx_queue] = false;
2923
2924 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_used, 0);
2925
2926 return ETH_OK;
2927
2928}
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955static ETH_FUNC_RET_STATUS eth_port_receive (ETH_PORT_INFO * p_eth_port_ctrl,
2956 ETH_QUEUE rx_queue,
2957 PKT_INFO * p_pkt_info)
2958{
2959 volatile ETH_RX_DESC *p_rx_curr_desc;
2960 volatile ETH_RX_DESC *p_rx_next_curr_desc;
2961 volatile ETH_RX_DESC *p_rx_used_desc;
2962 unsigned int command_status;
2963
2964
2965 if (p_eth_port_ctrl->rx_resource_err[rx_queue] == true) {
2966 printf ("\nRx Queue is full ...\n");
2967 return ETH_QUEUE_FULL;
2968 }
2969
2970
2971 CURR_RFD_GET (p_rx_curr_desc, rx_queue);
2972 USED_RFD_GET (p_rx_used_desc, rx_queue);
2973
2974
2975 if (p_rx_curr_desc == NULL)
2976 return ETH_ERROR;
2977
2978
2979 p_rx_next_curr_desc = RX_NEXT_DESC_PTR (p_rx_curr_desc, rx_queue);
2980 command_status = p_rx_curr_desc->cmd_sts;
2981
2982
2983 if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
2984
2985 D_CACHE_FLUSH_LINE ((unsigned int) p_rx_curr_desc, 0);
2986
2987 return ETH_END_OF_JOB;
2988 }
2989
2990 p_pkt_info->byte_cnt = (p_rx_curr_desc->byte_cnt) - RX_BUF_OFFSET;
2991 p_pkt_info->cmd_sts = command_status;
2992 p_pkt_info->buf_ptr = (p_rx_curr_desc->buf_ptr) + RX_BUF_OFFSET;
2993 p_pkt_info->return_info = p_rx_curr_desc->return_info;
2994 p_pkt_info->l4i_chk = p_rx_curr_desc->buf_size;
2995
2996
2997
2998 p_rx_curr_desc->return_info = 0;
2999
3000
3001 CURR_RFD_SET (p_rx_next_curr_desc, rx_queue);
3002
3003
3004 if (p_rx_next_curr_desc == p_rx_used_desc)
3005 p_eth_port_ctrl->rx_resource_err[rx_queue] = true;
3006
3007 D_CACHE_FLUSH_LINE ((unsigned int) p_rx_curr_desc, 0);
3008 CPU_PIPE_FLUSH;
3009 return ETH_OK;
3010}
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034static ETH_FUNC_RET_STATUS eth_rx_return_buff (ETH_PORT_INFO *
3035 p_eth_port_ctrl,
3036 ETH_QUEUE rx_queue,
3037 PKT_INFO * p_pkt_info)
3038{
3039 volatile ETH_RX_DESC *p_used_rx_desc;
3040
3041
3042 USED_RFD_GET (p_used_rx_desc, rx_queue);
3043
3044
3045 if (p_used_rx_desc == NULL)
3046 return ETH_ERROR;
3047
3048 p_used_rx_desc->buf_ptr = p_pkt_info->buf_ptr;
3049 p_used_rx_desc->return_info = p_pkt_info->return_info;
3050 p_used_rx_desc->byte_cnt = p_pkt_info->byte_cnt;
3051 p_used_rx_desc->buf_size = MV64360_RX_BUFFER_SIZE;
3052
3053
3054 CPU_PIPE_FLUSH;
3055
3056
3057 p_used_rx_desc->cmd_sts =
3058 ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT;
3059
3060
3061 D_CACHE_FLUSH_LINE ((unsigned int) p_used_rx_desc, 0);
3062 CPU_PIPE_FLUSH;
3063
3064
3065 USED_RFD_SET (RX_NEXT_DESC_PTR (p_used_rx_desc, rx_queue), rx_queue);
3066
3067
3068 if (p_eth_port_ctrl->rx_resource_err[rx_queue] == true)
3069 p_eth_port_ctrl->rx_resource_err[rx_queue] = false;
3070
3071 return ETH_OK;
3072}
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097#if 0
3098static unsigned int eth_port_set_rx_coal (ETH_PORT eth_port_num,
3099 unsigned int t_clk,
3100 unsigned int delay)
3101{
3102 unsigned int coal;
3103
3104 coal = ((t_clk / 1000000) * delay) / 64;
3105
3106 MV_REG_WRITE (MV64360_ETH_SDMA_CONFIG_REG (eth_port_num),
3107 ((coal & 0x3fff) << 8) |
3108 (MV_REG_READ
3109 (MV64360_ETH_SDMA_CONFIG_REG (eth_port_num))
3110 & 0xffc000ff));
3111 return coal;
3112}
3113
3114#endif
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138#if 0
3139static unsigned int eth_port_set_tx_coal (ETH_PORT eth_port_num,
3140 unsigned int t_clk,
3141 unsigned int delay)
3142{
3143 unsigned int coal;
3144
3145 coal = ((t_clk / 1000000) * delay) / 64;
3146
3147 MV_REG_WRITE (MV64360_ETH_TX_FIFO_URGENT_THRESHOLD_REG (eth_port_num),
3148 coal << 4);
3149 return coal;
3150}
3151#endif
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173static void eth_b_copy (unsigned int src_addr, unsigned int dst_addr,
3174 int byte_count)
3175{
3176
3177 *(unsigned int *) dst_addr = 0x0;
3178
3179 while (byte_count != 0) {
3180 *(char *) dst_addr = *(char *) src_addr;
3181 dst_addr++;
3182 src_addr++;
3183 byte_count--;
3184 }
3185}
3186