1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <common.h>
15#include <net.h>
16#include <malloc.h>
17
18#include "mv_eth.h"
19
20
21
22#undef DEBUG_MV_ETH
23
24#ifdef DEBUG_MV_ETH
25#define DEBUG
26#define DP(x) x
27#else
28#define DP(x)
29#endif
30
31#undef MV64360_CHECKSUM_OFFLOAD
32
33
34
35
36
37
38
39
40
41
42#undef MV64360_RX_QUEUE_FILL_ON_TASK
43
44
45
46#define MAGIC_ETH_RUNNING 8031971
47#define MV64360_INTERNAL_SRAM_SIZE _256K
48#define EXTRA_BYTES 32
49#define WRAP ETH_HLEN + 2 + 4 + 16
50#define BUFFER_MTU dev->mtu + WRAP
51#define INT_CAUSE_UNMASK_ALL 0x0007ffff
52#define INT_CAUSE_UNMASK_ALL_EXT 0x0011ffff
53#ifdef MV64360_RX_FILL_ON_TASK
54#define INT_CAUSE_MASK_ALL 0x00000000
55#define INT_CAUSE_CHECK_BITS INT_CAUSE_UNMASK_ALL
56#define INT_CAUSE_CHECK_BITS_EXT INT_CAUSE_UNMASK_ALL_EXT
57#endif
58
59
60#define MV_REG_READ(offset) my_le32_to_cpu(* (volatile unsigned int *) (INTERNAL_REG_BASE_ADDR + offset))
61#define MV_REG_WRITE(offset,data) *(volatile unsigned int *) (INTERNAL_REG_BASE_ADDR + offset) = my_cpu_to_le32 (data)
62#define MV_SET_REG_BITS(regOffset,bits) ((*((volatile unsigned int*)((INTERNAL_REG_BASE_ADDR) + (regOffset)))) |= ((unsigned int)my_cpu_to_le32(bits)))
63#define MV_RESET_REG_BITS(regOffset,bits) ((*((volatile unsigned int*)((INTERNAL_REG_BASE_ADDR) + (regOffset)))) &= ~((unsigned int)my_cpu_to_le32(bits)))
64
65
66static int mv64360_eth_real_open (struct eth_device *eth);
67static int mv64360_eth_real_stop (struct eth_device *eth);
68static struct net_device_stats *mv64360_eth_get_stats (struct eth_device
69 *dev);
70static void eth_port_init_mac_tables (ETH_PORT eth_port_num);
71static void mv64360_eth_update_stat (struct eth_device *dev);
72bool db64360_eth_start (struct eth_device *eth);
73unsigned int eth_read_mib_counter (ETH_PORT eth_port_num,
74 unsigned int mib_offset);
75int mv64360_eth_receive (struct eth_device *dev);
76
77int mv64360_eth_xmit (struct eth_device *, volatile void *packet, int length);
78
79#ifndef UPDATE_STATS_BY_SOFTWARE
80static void mv64360_eth_print_stat (struct eth_device *dev);
81#endif
82
83extern unsigned int INTERNAL_REG_BASE_ADDR;
84
85
86
87
88#ifdef DEBUG_MV_ETH
89void print_globals (struct eth_device *dev)
90{
91 printf ("Ethernet PRINT_Globals-Debug function\n");
92 printf ("Base Address for ETH_PORT_INFO: %08x\n",
93 (unsigned int) dev->priv);
94 printf ("Base Address for mv64360_eth_priv: %08x\n",
95 (unsigned int) &(((ETH_PORT_INFO *) dev->priv)->
96 port_private));
97
98 printf ("GT Internal Base Address: %08x\n",
99 INTERNAL_REG_BASE_ADDR);
100 printf ("Base Address for TX-DESCs: %08x Number of allocated Buffers %d\n", (unsigned int) ((ETH_PORT_INFO *) dev->priv)->p_tx_desc_area_base[0], MV64360_TX_QUEUE_SIZE);
101 printf ("Base Address for RX-DESCs: %08x Number of allocated Buffers %d\n", (unsigned int) ((ETH_PORT_INFO *) dev->priv)->p_rx_desc_area_base[0], MV64360_RX_QUEUE_SIZE);
102 printf ("Base Address for RX-Buffer: %08x allocated Bytes %d\n",
103 (unsigned int) ((ETH_PORT_INFO *) dev->priv)->
104 p_rx_buffer_base[0],
105 (MV64360_RX_QUEUE_SIZE * MV64360_RX_BUFFER_SIZE) + 32);
106 printf ("Base Address for TX-Buffer: %08x allocated Bytes %d\n",
107 (unsigned int) ((ETH_PORT_INFO *) dev->priv)->
108 p_tx_buffer_base[0],
109 (MV64360_TX_QUEUE_SIZE * MV64360_TX_BUFFER_SIZE) + 32);
110}
111#endif
112
113#define my_cpu_to_le32(x) my_le32_to_cpu((x))
114
115unsigned long my_le32_to_cpu (unsigned long x)
116{
117 return (((x & 0x000000ffU) << 24) |
118 ((x & 0x0000ff00U) << 8) |
119 ((x & 0x00ff0000U) >> 8) | ((x & 0xff000000U) >> 24));
120}
121
122
123
124
125
126
127
128
129
130
131
132static void mv64360_eth_print_phy_status (struct eth_device *dev)
133{
134 struct mv64360_eth_priv *port_private;
135 unsigned int port_num;
136 ETH_PORT_INFO *ethernet_private = (ETH_PORT_INFO *) dev->priv;
137 unsigned int port_status, phy_reg_data;
138
139 port_private =
140 (struct mv64360_eth_priv *) ethernet_private->port_private;
141 port_num = port_private->port_num;
142
143
144 eth_port_read_smi_reg (port_num, 1, &phy_reg_data);
145 if (!(phy_reg_data & 0x20)) {
146 printf ("Ethernet port changed link status to DOWN\n");
147 } else {
148 port_status =
149 MV_REG_READ (MV64360_ETH_PORT_STATUS_REG (port_num));
150 printf ("Ethernet status port %d: Link up", port_num);
151 printf (", %s",
152 (port_status & BIT2) ? "Full Duplex" : "Half Duplex");
153 if (port_status & BIT4)
154 printf (", Speed 1 Gbps");
155 else
156 printf (", %s",
157 (port_status & BIT5) ? "Speed 100 Mbps" :
158 "Speed 10 Mbps");
159 printf ("\n");
160 }
161}
162
163
164
165
166
167int db64360_eth_probe (struct eth_device *dev)
168{
169 return ((int) db64360_eth_start (dev));
170}
171
172int db64360_eth_poll (struct eth_device *dev)
173{
174 return mv64360_eth_receive (dev);
175}
176
177int db64360_eth_transmit(struct eth_device *dev, void *packet, int length)
178{
179 mv64360_eth_xmit (dev, packet, length);
180 return 0;
181}
182
183void db64360_eth_disable (struct eth_device *dev)
184{
185 mv64360_eth_stop (dev);
186}
187
188
189void mv6436x_eth_initialize (bd_t * bis)
190{
191 struct eth_device *dev;
192 ETH_PORT_INFO *ethernet_private;
193 struct mv64360_eth_priv *port_private;
194 int devnum, x, temp;
195 char *s, *e, buf[64];
196
197 for (devnum = 0; devnum < MV_ETH_DEVS; devnum++) {
198 dev = calloc (sizeof (*dev), 1);
199 if (!dev) {
200 printf ("%s: mv_enet%d allocation failure, %s\n",
201 __FUNCTION__, devnum, "eth_device structure");
202 return;
203 }
204
205
206 sprintf (dev->name, "mv_enet%d", devnum);
207
208#ifdef DEBUG
209 printf ("Initializing %s\n", dev->name);
210#endif
211
212
213 switch (devnum) {
214 case 0:
215 s = "ethaddr";
216 break;
217
218 case 1:
219 s = "eth1addr";
220 break;
221
222 case 2:
223 s = "eth2addr";
224 break;
225
226 default:
227 printf ("%s: Invalid device number %d\n",
228 __FUNCTION__, devnum);
229 return;
230 }
231
232 temp = getenv_f(s, buf, sizeof (buf));
233 s = (temp > 0) ? buf : NULL;
234
235#ifdef DEBUG
236 printf ("Setting MAC %d to %s\n", devnum, s);
237#endif
238 for (x = 0; x < 6; ++x) {
239 dev->enetaddr[x] = s ? simple_strtoul (s, &e, 16) : 0;
240 if (s)
241 s = (*e) ? e + 1 : e;
242 }
243
244 eth_port_uc_addr_set (devnum, dev->enetaddr, 0);
245
246 dev->init = (void *) db64360_eth_probe;
247 dev->halt = (void *) ethernet_phy_reset;
248 dev->send = (void *) db64360_eth_transmit;
249 dev->recv = (void *) db64360_eth_poll;
250
251 ethernet_private = calloc (sizeof (*ethernet_private), 1);
252 dev->priv = (void *) ethernet_private;
253
254 if (!ethernet_private) {
255 printf ("%s: %s allocation failure, %s\n",
256 __FUNCTION__, dev->name,
257 "Private Device Structure");
258 free (dev);
259 return;
260 }
261
262 memset (ethernet_private, 0, sizeof (ETH_PORT_INFO));
263 memcpy (ethernet_private->port_mac_addr, dev->enetaddr, 6);
264
265
266 port_private = calloc (sizeof (*ethernet_private), 1);
267 ethernet_private->port_private = (void *)port_private;
268 if (!port_private) {
269 printf ("%s: %s allocation failure, %s\n",
270 __FUNCTION__, dev->name,
271 "Port Private Device Structure");
272
273 free (ethernet_private);
274 free (dev);
275 return;
276 }
277
278 port_private->stats =
279 calloc (sizeof (struct net_device_stats), 1);
280 if (!port_private->stats) {
281 printf ("%s: %s allocation failure, %s\n",
282 __FUNCTION__, dev->name,
283 "Net stat Structure");
284
285 free (port_private);
286 free (ethernet_private);
287 free (dev);
288 return;
289 }
290 memset (ethernet_private->port_private, 0,
291 sizeof (struct mv64360_eth_priv));
292 switch (devnum) {
293 case 0:
294 ethernet_private->port_num = ETH_0;
295 break;
296 case 1:
297 ethernet_private->port_num = ETH_1;
298 break;
299 case 2:
300 ethernet_private->port_num = ETH_2;
301 break;
302 default:
303 printf ("Invalid device number %d\n", devnum);
304 break;
305 };
306
307 port_private->port_num = devnum;
308
309
310
311
312 mv64360_eth_update_stat (dev);
313 memset (port_private->stats, 0,
314 sizeof (struct net_device_stats));
315
316 switch (devnum) {
317 case 0:
318 s = "ethaddr";
319 break;
320
321 case 1:
322 s = "eth1addr";
323 break;
324
325 case 2:
326 s = "eth2addr";
327 break;
328
329 default:
330 printf ("%s: Invalid device number %d\n",
331 __FUNCTION__, devnum);
332 return;
333 }
334
335 temp = getenv_f(s, buf, sizeof (buf));
336 s = (temp > 0) ? buf : NULL;
337
338#ifdef DEBUG
339 printf ("Setting MAC %d to %s\n", devnum, s);
340#endif
341 for (x = 0; x < 6; ++x) {
342 dev->enetaddr[x] = s ? simple_strtoul (s, &e, 16) : 0;
343 if (s)
344 s = (*e) ? e + 1 : e;
345 }
346
347 DP (printf ("Allocating descriptor and buffer rings\n"));
348
349 ethernet_private->p_rx_desc_area_base[0] =
350 (ETH_RX_DESC *) memalign (16,
351 RX_DESC_ALIGNED_SIZE *
352 MV64360_RX_QUEUE_SIZE + 1);
353 ethernet_private->p_tx_desc_area_base[0] =
354 (ETH_TX_DESC *) memalign (16,
355 TX_DESC_ALIGNED_SIZE *
356 MV64360_TX_QUEUE_SIZE + 1);
357
358 ethernet_private->p_rx_buffer_base[0] =
359 (char *) memalign (16,
360 MV64360_RX_QUEUE_SIZE *
361 MV64360_TX_BUFFER_SIZE + 1);
362 ethernet_private->p_tx_buffer_base[0] =
363 (char *) memalign (16,
364 MV64360_RX_QUEUE_SIZE *
365 MV64360_TX_BUFFER_SIZE + 1);
366
367#ifdef DEBUG_MV_ETH
368
369 print_globals (dev);
370#endif
371 eth_register (dev);
372
373 }
374 DP (printf ("%s: exit\n", __FUNCTION__));
375
376}
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392int mv64360_eth_open (struct eth_device *dev)
393{
394 return (mv64360_eth_real_open (dev));
395}
396
397
398static int mv64360_eth_real_open (struct eth_device *dev)
399{
400
401 unsigned int queue;
402 ETH_PORT_INFO *ethernet_private;
403 struct mv64360_eth_priv *port_private;
404 unsigned int port_num;
405 u32 phy_reg_data;
406
407 ethernet_private = (ETH_PORT_INFO *) dev->priv;
408
409
410 memcpy (ethernet_private->port_mac_addr, dev->enetaddr, 6);
411
412 port_private =
413 (struct mv64360_eth_priv *) ethernet_private->port_private;
414 port_num = port_private->port_num;
415
416
417 MV_REG_WRITE (MV64360_ETH_RECEIVE_QUEUE_COMMAND_REG (port_num),
418 0x0000ff00);
419
420
421 MV_REG_WRITE (MV64360_ETH_INTERRUPT_CAUSE_REG (port_num), 0);
422 MV_REG_WRITE (MV64360_ETH_INTERRUPT_CAUSE_EXTEND_REG (port_num), 0);
423
424
425 MV_REG_WRITE (MV64360_ETH_INTERRUPT_MASK_REG (port_num),
426 INT_CAUSE_UNMASK_ALL);
427
428
429 MV_REG_WRITE (MV64360_ETH_INTERRUPT_EXTEND_MASK_REG (port_num),
430 INT_CAUSE_UNMASK_ALL_EXT);
431
432
433 ethernet_private->port_phy_addr = 0x8 + port_num;
434
435
436 eth_port_init (ethernet_private);
437
438
439
440
441 for (queue = 0; queue < MV64360_TX_QUEUE_NUM; queue++) {
442 unsigned int size;
443
444 port_private->tx_ring_size[queue] = MV64360_TX_QUEUE_SIZE;
445 size = (port_private->tx_ring_size[queue] * TX_DESC_ALIGNED_SIZE);
446 ethernet_private->tx_desc_area_size[queue] = size;
447
448
449 memset ((void *) ethernet_private->p_tx_desc_area_base[queue],
450 0, ethernet_private->tx_desc_area_size[queue]);
451
452
453 if (ether_init_tx_desc_ring
454 (ethernet_private, ETH_Q0,
455 port_private->tx_ring_size[queue],
456 MV64360_TX_BUFFER_SIZE ,
457 (unsigned int) ethernet_private->
458 p_tx_desc_area_base[queue],
459 (unsigned int) ethernet_private->
460 p_tx_buffer_base[queue]) == false)
461 printf ("### Error initializing TX Ring\n");
462 }
463
464
465 for (queue = 0; queue < MV64360_RX_QUEUE_NUM; queue++) {
466 unsigned int size;
467
468
469 port_private->rx_ring_size[queue] = MV64360_RX_QUEUE_SIZE;
470 size = (port_private->rx_ring_size[queue] *
471 RX_DESC_ALIGNED_SIZE);
472 ethernet_private->rx_desc_area_size[queue] = size;
473
474
475 memset ((void *) ethernet_private->p_rx_desc_area_base[queue],
476 0, ethernet_private->rx_desc_area_size[queue]);
477 if ((ether_init_rx_desc_ring
478 (ethernet_private, ETH_Q0,
479 port_private->rx_ring_size[queue],
480 MV64360_RX_BUFFER_SIZE ,
481 (unsigned int) ethernet_private->
482 p_rx_desc_area_base[queue],
483 (unsigned int) ethernet_private->
484 p_rx_buffer_base[queue])) == false)
485 printf ("### Error initializing RX Ring\n");
486 }
487
488 eth_port_start (ethernet_private);
489
490
491 MV_REG_WRITE (MV64360_ETH_PORT_SERIAL_CONTROL_REG (port_num),
492 (0x5 << 17) |
493 (MV_REG_READ
494 (MV64360_ETH_PORT_SERIAL_CONTROL_REG (port_num))
495 & 0xfff1ffff));
496
497
498
499
500
501
502 MV_REG_WRITE (MV64360_ETH_MAXIMUM_TRANSMIT_UNIT (port_num), 0);
503 MV_REG_READ (MV64360_ETH_PORT_STATUS_REG (port_num));
504
505
506 eth_port_read_smi_reg (port_num, 1, &phy_reg_data);
507 if (!(phy_reg_data & 0x20)) {
508
509 if ((ethernet_phy_reset (port_num)) != true) {
510 printf ("$$ Warnning: No link on port %d \n",
511 port_num);
512 return 0;
513 } else {
514 eth_port_read_smi_reg (port_num, 1, &phy_reg_data);
515 if (!(phy_reg_data & 0x20)) {
516 printf ("### Error: Phy is not active\n");
517 return 0;
518 }
519 }
520 } else {
521 mv64360_eth_print_phy_status (dev);
522 }
523 port_private->eth_running = MAGIC_ETH_RUNNING;
524 return 1;
525}
526
527
528static int mv64360_eth_free_tx_rings (struct eth_device *dev)
529{
530 unsigned int queue;
531 ETH_PORT_INFO *ethernet_private;
532 struct mv64360_eth_priv *port_private;
533 unsigned int port_num;
534 volatile ETH_TX_DESC *p_tx_curr_desc;
535
536 ethernet_private = (ETH_PORT_INFO *) dev->priv;
537 port_private =
538 (struct mv64360_eth_priv *) ethernet_private->port_private;
539 port_num = port_private->port_num;
540
541
542 MV_REG_WRITE (MV64360_ETH_TRANSMIT_QUEUE_COMMAND_REG (port_num),
543 0x0000ff00);
544
545
546 DP (printf ("Clearing previously allocated TX queues... "));
547 for (queue = 0; queue < MV64360_TX_QUEUE_NUM; queue++) {
548
549 for (p_tx_curr_desc =
550 ethernet_private->p_tx_desc_area_base[queue];
551 ((unsigned int) p_tx_curr_desc <= (unsigned int)
552 ethernet_private->p_tx_desc_area_base[queue] +
553 ethernet_private->tx_desc_area_size[queue]);
554 p_tx_curr_desc =
555 (ETH_TX_DESC *) ((unsigned int) p_tx_curr_desc +
556 TX_DESC_ALIGNED_SIZE)) {
557
558 if (p_tx_curr_desc->return_info != 0) {
559 p_tx_curr_desc->return_info = 0;
560 DP (printf ("freed\n"));
561 }
562 }
563 DP (printf ("Done\n"));
564 }
565 return 0;
566}
567
568static int mv64360_eth_free_rx_rings (struct eth_device *dev)
569{
570 unsigned int queue;
571 ETH_PORT_INFO *ethernet_private;
572 struct mv64360_eth_priv *port_private;
573 unsigned int port_num;
574 volatile ETH_RX_DESC *p_rx_curr_desc;
575
576 ethernet_private = (ETH_PORT_INFO *) dev->priv;
577 port_private =
578 (struct mv64360_eth_priv *) ethernet_private->port_private;
579 port_num = port_private->port_num;
580
581
582
583 MV_REG_WRITE (MV64360_ETH_RECEIVE_QUEUE_COMMAND_REG (port_num),
584 0x0000ff00);
585
586
587 DP (printf ("Clearing previously allocated RX queues... "));
588 for (queue = 0; queue < MV64360_RX_QUEUE_NUM; queue++) {
589
590 for (p_rx_curr_desc =
591 ethernet_private->p_rx_desc_area_base[queue];
592 (((unsigned int) p_rx_curr_desc <
593 ((unsigned int) ethernet_private->
594 p_rx_desc_area_base[queue] +
595 ethernet_private->rx_desc_area_size[queue])));
596 p_rx_curr_desc =
597 (ETH_RX_DESC *) ((unsigned int) p_rx_curr_desc +
598 RX_DESC_ALIGNED_SIZE)) {
599 if (p_rx_curr_desc->return_info != 0) {
600 p_rx_curr_desc->return_info = 0;
601 DP (printf ("freed\n"));
602 }
603 }
604 DP (printf ("Done\n"));
605 }
606 return 0;
607}
608
609
610
611
612
613
614
615
616
617
618
619int mv64360_eth_stop (struct eth_device *dev)
620{
621
622 MV_REG_WRITE (MV64360_ETH_BASE_ADDR_ENABLE_REG, 0x3f);
623 DP (printf ("%s Ethernet stop called ... \n", __FUNCTION__));
624 mv64360_eth_real_stop (dev);
625
626 return 0;
627};
628
629
630
631static int mv64360_eth_real_stop (struct eth_device *dev)
632{
633 ETH_PORT_INFO *ethernet_private;
634 struct mv64360_eth_priv *port_private;
635 unsigned int port_num;
636
637 ethernet_private = (ETH_PORT_INFO *) dev->priv;
638 port_private =
639 (struct mv64360_eth_priv *) ethernet_private->port_private;
640 port_num = port_private->port_num;
641
642
643 mv64360_eth_free_tx_rings (dev);
644 mv64360_eth_free_rx_rings (dev);
645
646 eth_port_reset (ethernet_private->port_num);
647
648 MV_REG_WRITE (MV64360_ETH_INTERRUPT_CAUSE_REG (port_num), 0);
649 MV_REG_WRITE (MV64360_ETH_INTERRUPT_CAUSE_EXTEND_REG (port_num), 0);
650
651 MV_REG_WRITE (MV64360_ETH_INTERRUPT_MASK_REG (port_num), 0);
652
653 MV_REG_WRITE (MV64360_ETH_INTERRUPT_EXTEND_MASK_REG (port_num), 0);
654 MV_RESET_REG_BITS (MV64360_CPU_INTERRUPT0_MASK_HIGH,
655 BIT0 << port_num);
656
657#ifndef UPDATE_STATS_BY_SOFTWARE
658
659
660
661
662 if (port_private->eth_running == MAGIC_ETH_RUNNING) {
663 port_private->eth_running = 0;
664 mv64360_eth_print_stat (dev);
665 }
666 memset (port_private->stats, 0, sizeof (struct net_device_stats));
667#endif
668 DP (printf ("\nEthernet stopped ... \n"));
669 return 0;
670}
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685int mv64360_eth_xmit (struct eth_device *dev, volatile void *dataPtr,
686 int dataSize)
687{
688 ETH_PORT_INFO *ethernet_private;
689 struct mv64360_eth_priv *port_private;
690 PKT_INFO pkt_info;
691 ETH_FUNC_RET_STATUS status;
692 struct net_device_stats *stats;
693 ETH_FUNC_RET_STATUS release_result;
694
695 ethernet_private = (ETH_PORT_INFO *) dev->priv;
696 port_private =
697 (struct mv64360_eth_priv *) ethernet_private->port_private;
698
699 stats = port_private->stats;
700
701
702 pkt_info.cmd_sts = ETH_TX_FIRST_DESC | ETH_TX_LAST_DESC;
703 pkt_info.byte_cnt = dataSize;
704 pkt_info.buf_ptr = (unsigned int) dataPtr;
705 pkt_info.return_info = 0;
706
707 status = eth_port_send (ethernet_private, ETH_Q0, &pkt_info);
708 if ((status == ETH_ERROR) || (status == ETH_QUEUE_FULL)) {
709 printf ("Error on transmitting packet ..");
710 if (status == ETH_QUEUE_FULL)
711 printf ("ETH Queue is full. \n");
712 if (status == ETH_QUEUE_LAST_RESOURCE)
713 printf ("ETH Queue: using last available resource. \n");
714 goto error;
715 }
716
717
718 stats->tx_bytes += dataSize;
719 stats->tx_packets++;
720
721
722 do {
723 release_result =
724 eth_tx_return_desc (ethernet_private, ETH_Q0,
725 &pkt_info);
726 switch (release_result) {
727 case ETH_OK:
728 DP (printf ("descriptor released\n"));
729 if (pkt_info.cmd_sts & BIT0) {
730 printf ("Error in TX\n");
731 stats->tx_errors++;
732
733 }
734 break;
735 case ETH_RETRY:
736 DP (printf ("transmission still in process\n"));
737 break;
738
739 case ETH_ERROR:
740 printf ("routine can not access Tx desc ring\n");
741 break;
742
743 case ETH_END_OF_JOB:
744 DP (printf ("the routine has nothing to release\n"));
745 break;
746 default:
747 break;
748 }
749 } while (release_result == ETH_OK);
750
751
752 return 0;
753 error:
754 return 1;
755}
756
757
758
759
760
761
762
763
764
765
766
767
768
769int mv64360_eth_receive (struct eth_device *dev)
770{
771 ETH_PORT_INFO *ethernet_private;
772 struct mv64360_eth_priv *port_private;
773 PKT_INFO pkt_info;
774 struct net_device_stats *stats;
775
776 ethernet_private = (ETH_PORT_INFO *) dev->priv;
777 port_private =
778 (struct mv64360_eth_priv *) ethernet_private->port_private;
779 stats = port_private->stats;
780
781 while ((eth_port_receive (ethernet_private, ETH_Q0, &pkt_info) ==
782 ETH_OK)) {
783
784#ifdef DEBUG_MV_ETH
785 if (pkt_info.byte_cnt != 0) {
786 printf ("%s: Received %d byte Packet @ 0x%x\n",
787 __FUNCTION__, pkt_info.byte_cnt,
788 pkt_info.buf_ptr);
789 }
790#endif
791
792 stats->rx_packets++;
793 stats->rx_bytes += pkt_info.byte_cnt;
794
795
796
797
798
799 if (((pkt_info.
800 cmd_sts & (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) !=
801 (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC))
802 || (pkt_info.cmd_sts & ETH_ERROR_SUMMARY)) {
803 stats->rx_dropped++;
804
805 printf ("Received packet spread on multiple descriptors\n");
806
807
808 if (pkt_info.cmd_sts & ETH_ERROR_SUMMARY) {
809 stats->rx_errors++;
810 }
811
812
813 pkt_info.buf_ptr &= ~0x7;
814 pkt_info.byte_cnt = 0x0000;
815
816 if (eth_rx_return_buff
817 (ethernet_private, ETH_Q0, &pkt_info) != ETH_OK) {
818 printf ("Error while returning the RX Desc to Ring\n");
819 } else {
820 DP (printf ("RX Desc returned to Ring\n"));
821 }
822
823 } else {
824
825
826#ifdef DEBUG_MV_ETH
827 printf ("\nNow send it to upper layer protocols (NetReceive) ...\n");
828#endif
829
830 NetReceive ((uchar *) pkt_info.buf_ptr,
831 (int) pkt_info.byte_cnt);
832
833
834
835 pkt_info.buf_ptr &= ~0x7;
836 pkt_info.byte_cnt = 0x0000;
837 DP (printf
838 ("RX: pkt_info.buf_ptr = %x\n",
839 pkt_info.buf_ptr));
840 if (eth_rx_return_buff
841 (ethernet_private, ETH_Q0, &pkt_info) != ETH_OK) {
842 printf ("Error while returning the RX Desc to Ring\n");
843 } else {
844 DP (printf ("RX Desc returned to Ring\n"));
845 }
846
847
848
849 }
850 }
851 mv64360_eth_get_stats (dev);
852 return 1;
853}
854
855
856
857
858
859
860
861
862
863
864
865static struct net_device_stats *mv64360_eth_get_stats (struct eth_device *dev)
866{
867 ETH_PORT_INFO *ethernet_private;
868 struct mv64360_eth_priv *port_private;
869
870 ethernet_private = (ETH_PORT_INFO *) dev->priv;
871 port_private =
872 (struct mv64360_eth_priv *) ethernet_private->port_private;
873
874 mv64360_eth_update_stat (dev);
875
876 return port_private->stats;
877}
878
879
880
881
882
883
884
885
886
887
888
889static void mv64360_eth_update_stat (struct eth_device *dev)
890{
891 ETH_PORT_INFO *ethernet_private;
892 struct mv64360_eth_priv *port_private;
893 struct net_device_stats *stats;
894
895 ethernet_private = (ETH_PORT_INFO *) dev->priv;
896 port_private =
897 (struct mv64360_eth_priv *) ethernet_private->port_private;
898 stats = port_private->stats;
899
900
901 stats->rx_packets += (unsigned long)
902 eth_read_mib_counter (ethernet_private->port_num,
903 ETH_MIB_GOOD_FRAMES_RECEIVED);
904 stats->tx_packets += (unsigned long)
905 eth_read_mib_counter (ethernet_private->port_num,
906 ETH_MIB_GOOD_FRAMES_SENT);
907 stats->rx_bytes += (unsigned long)
908 eth_read_mib_counter (ethernet_private->port_num,
909 ETH_MIB_GOOD_OCTETS_RECEIVED_LOW);
910
911
912
913
914
915
916
917
918
919
920 eth_read_mib_counter (ethernet_private->port_num,
921 ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH);
922 stats->tx_bytes += (unsigned long)
923 eth_read_mib_counter (ethernet_private->port_num,
924 ETH_MIB_GOOD_OCTETS_SENT_LOW);
925 eth_read_mib_counter (ethernet_private->port_num,
926 ETH_MIB_GOOD_OCTETS_SENT_HIGH);
927 stats->rx_errors += (unsigned long)
928 eth_read_mib_counter (ethernet_private->port_num,
929 ETH_MIB_MAC_RECEIVE_ERROR);
930
931
932 stats->rx_dropped +=
933 (unsigned long) eth_read_mib_counter (ethernet_private->
934 port_num,
935 ETH_MIB_BAD_CRC_EVENT);
936 stats->multicast += (unsigned long)
937 eth_read_mib_counter (ethernet_private->port_num,
938 ETH_MIB_MULTICAST_FRAMES_RECEIVED);
939 stats->collisions +=
940 (unsigned long) eth_read_mib_counter (ethernet_private->
941 port_num,
942 ETH_MIB_COLLISION) +
943 (unsigned long) eth_read_mib_counter (ethernet_private->
944 port_num,
945 ETH_MIB_LATE_COLLISION);
946
947 stats->rx_length_errors +=
948 (unsigned long) eth_read_mib_counter (ethernet_private->
949 port_num,
950 ETH_MIB_UNDERSIZE_RECEIVED)
951 +
952 (unsigned long) eth_read_mib_counter (ethernet_private->
953 port_num,
954 ETH_MIB_OVERSIZE_RECEIVED);
955
956}
957
958#ifndef UPDATE_STATS_BY_SOFTWARE
959
960
961
962
963
964
965
966
967
968static void mv64360_eth_print_stat (struct eth_device *dev)
969{
970 ETH_PORT_INFO *ethernet_private;
971 struct mv64360_eth_priv *port_private;
972 struct net_device_stats *stats;
973
974 ethernet_private = (ETH_PORT_INFO *) dev->priv;
975 port_private =
976 (struct mv64360_eth_priv *) ethernet_private->port_private;
977 stats = port_private->stats;
978
979
980 printf ("\n### Network statistics: ###\n");
981 printf ("--------------------------\n");
982 printf (" Packets received: %ld\n", stats->rx_packets);
983 printf (" Packets send: %ld\n", stats->tx_packets);
984 printf (" Received bytes: %ld\n", stats->rx_bytes);
985 printf (" Send bytes: %ld\n", stats->tx_bytes);
986 if (stats->rx_errors != 0)
987 printf (" Rx Errors: %ld\n",
988 stats->rx_errors);
989 if (stats->rx_dropped != 0)
990 printf (" Rx dropped (CRC Errors): %ld\n",
991 stats->rx_dropped);
992 if (stats->multicast != 0)
993 printf (" Rx mulicast frames: %ld\n",
994 stats->multicast);
995 if (stats->collisions != 0)
996 printf (" No. of collisions: %ld\n",
997 stats->collisions);
998 if (stats->rx_length_errors != 0)
999 printf (" Rx length errors: %ld\n",
1000 stats->rx_length_errors);
1001}
1002#endif
1003
1004
1005
1006
1007
1008
1009
1010bool db64360_eth_start (struct eth_device *dev)
1011{
1012 return (mv64360_eth_open (dev));
1013}
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193#define ETH_ENABLE_TX_QUEUE(tx_queue, eth_port) \
1194 MV_REG_WRITE(MV64360_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port), (1 << tx_queue))
1195
1196#define ETH_DISABLE_TX_QUEUE(tx_queue, eth_port) \
1197 MV_REG_WRITE(MV64360_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port),\
1198 (1 << (8 + tx_queue)))
1199
1200#define ETH_ENABLE_RX_QUEUE(rx_queue, eth_port) \
1201MV_REG_WRITE(MV64360_ETH_RECEIVE_QUEUE_COMMAND_REG(eth_port), (1 << rx_queue))
1202
1203#define ETH_DISABLE_RX_QUEUE(rx_queue, eth_port) \
1204MV_REG_WRITE(MV64360_ETH_RECEIVE_QUEUE_COMMAND_REG(eth_port), (1 << (8 + rx_queue)))
1205
1206#define CURR_RFD_GET(p_curr_desc, queue) \
1207 ((p_curr_desc) = p_eth_port_ctrl->p_rx_curr_desc_q[queue])
1208
1209#define CURR_RFD_SET(p_curr_desc, queue) \
1210 (p_eth_port_ctrl->p_rx_curr_desc_q[queue] = (p_curr_desc))
1211
1212#define USED_RFD_GET(p_used_desc, queue) \
1213 ((p_used_desc) = p_eth_port_ctrl->p_rx_used_desc_q[queue])
1214
1215#define USED_RFD_SET(p_used_desc, queue)\
1216(p_eth_port_ctrl->p_rx_used_desc_q[queue] = (p_used_desc))
1217
1218
1219#define CURR_TFD_GET(p_curr_desc, queue) \
1220 ((p_curr_desc) = p_eth_port_ctrl->p_tx_curr_desc_q[queue])
1221
1222#define CURR_TFD_SET(p_curr_desc, queue) \
1223 (p_eth_port_ctrl->p_tx_curr_desc_q[queue] = (p_curr_desc))
1224
1225#define USED_TFD_GET(p_used_desc, queue) \
1226 ((p_used_desc) = p_eth_port_ctrl->p_tx_used_desc_q[queue])
1227
1228#define USED_TFD_SET(p_used_desc, queue) \
1229 (p_eth_port_ctrl->p_tx_used_desc_q[queue] = (p_used_desc))
1230
1231#define FIRST_TFD_GET(p_first_desc, queue) \
1232 ((p_first_desc) = p_eth_port_ctrl->p_tx_first_desc_q[queue])
1233
1234#define FIRST_TFD_SET(p_first_desc, queue) \
1235 (p_eth_port_ctrl->p_tx_first_desc_q[queue] = (p_first_desc))
1236
1237
1238
1239#define RX_NEXT_DESC_PTR(p_rx_desc, queue) (ETH_RX_DESC*)(((((unsigned int)p_rx_desc - (unsigned int)p_eth_port_ctrl->p_rx_desc_area_base[queue]) + RX_DESC_ALIGNED_SIZE) % p_eth_port_ctrl->rx_desc_area_size[queue]) + (unsigned int)p_eth_port_ctrl->p_rx_desc_area_base[queue])
1240
1241#define TX_NEXT_DESC_PTR(p_tx_desc, queue) (ETH_TX_DESC*)(((((unsigned int)p_tx_desc - (unsigned int)p_eth_port_ctrl->p_tx_desc_area_base[queue]) + TX_DESC_ALIGNED_SIZE) % p_eth_port_ctrl->tx_desc_area_size[queue]) + (unsigned int)p_eth_port_ctrl->p_tx_desc_area_base[queue])
1242
1243#define LINK_UP_TIMEOUT 100000
1244#define PHY_BUSY_TIMEOUT 10000000
1245
1246
1247
1248
1249static void ethernet_phy_set (ETH_PORT eth_port_num, int phy_addr);
1250static int ethernet_phy_get (ETH_PORT eth_port_num);
1251
1252
1253static void eth_set_access_control (ETH_PORT eth_port_num,
1254 ETH_WIN_PARAM * param);
1255static bool eth_port_uc_addr (ETH_PORT eth_port_num, unsigned char uc_nibble,
1256 ETH_QUEUE queue, int option);
1257#if 0
1258static bool eth_port_smc_addr (ETH_PORT eth_port_num,
1259 unsigned char mc_byte,
1260 ETH_QUEUE queue, int option);
1261static bool eth_port_omc_addr (ETH_PORT eth_port_num,
1262 unsigned char crc8,
1263 ETH_QUEUE queue, int option);
1264#endif
1265
1266static void eth_b_copy (unsigned int src_addr, unsigned int dst_addr,
1267 int byte_count);
1268
1269void eth_dbg (ETH_PORT_INFO * p_eth_port_ctrl);
1270
1271
1272typedef enum _memory_bank { BANK0, BANK1, BANK2, BANK3 } MEMORY_BANK;
1273u32 mv_get_dram_bank_base_addr (MEMORY_BANK bank)
1274{
1275 u32 result = 0;
1276 u32 enable = MV_REG_READ (MV64360_BASE_ADDR_ENABLE);
1277
1278 if (enable & (1 << bank))
1279 return 0;
1280 if (bank == BANK0)
1281 result = MV_REG_READ (MV64360_CS_0_BASE_ADDR);
1282 if (bank == BANK1)
1283 result = MV_REG_READ (MV64360_CS_1_BASE_ADDR);
1284 if (bank == BANK2)
1285 result = MV_REG_READ (MV64360_CS_2_BASE_ADDR);
1286 if (bank == BANK3)
1287 result = MV_REG_READ (MV64360_CS_3_BASE_ADDR);
1288 result &= 0x0000ffff;
1289 result = result << 16;
1290 return result;
1291}
1292
1293u32 mv_get_dram_bank_size (MEMORY_BANK bank)
1294{
1295 u32 result = 0;
1296 u32 enable = MV_REG_READ (MV64360_BASE_ADDR_ENABLE);
1297
1298 if (enable & (1 << bank))
1299 return 0;
1300 if (bank == BANK0)
1301 result = MV_REG_READ (MV64360_CS_0_SIZE);
1302 if (bank == BANK1)
1303 result = MV_REG_READ (MV64360_CS_1_SIZE);
1304 if (bank == BANK2)
1305 result = MV_REG_READ (MV64360_CS_2_SIZE);
1306 if (bank == BANK3)
1307 result = MV_REG_READ (MV64360_CS_3_SIZE);
1308 result += 1;
1309 result &= 0x0000ffff;
1310 result = result << 16;
1311 return result;
1312}
1313
1314u32 mv_get_internal_sram_base (void)
1315{
1316 u32 result;
1317
1318 result = MV_REG_READ (MV64360_INTEGRATED_SRAM_BASE_ADDR);
1319 result &= 0x0000ffff;
1320 result = result << 16;
1321 return result;
1322}
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349static void eth_port_init (ETH_PORT_INFO * p_eth_port_ctrl)
1350{
1351 int queue;
1352 ETH_WIN_PARAM win_param;
1353
1354 p_eth_port_ctrl->port_config = PORT_CONFIG_VALUE;
1355 p_eth_port_ctrl->port_config_extend = PORT_CONFIG_EXTEND_VALUE;
1356 p_eth_port_ctrl->port_sdma_config = PORT_SDMA_CONFIG_VALUE;
1357 p_eth_port_ctrl->port_serial_control = PORT_SERIAL_CONTROL_VALUE;
1358
1359 p_eth_port_ctrl->port_rx_queue_command = 0;
1360 p_eth_port_ctrl->port_tx_queue_command = 0;
1361
1362
1363 for (queue = 0; queue < MAX_RX_QUEUE_NUM; queue++) {
1364 CURR_RFD_SET ((ETH_RX_DESC *) 0x00000000, queue);
1365 USED_RFD_SET ((ETH_RX_DESC *) 0x00000000, queue);
1366 p_eth_port_ctrl->rx_resource_err[queue] = false;
1367 }
1368
1369 for (queue = 0; queue < MAX_TX_QUEUE_NUM; queue++) {
1370 CURR_TFD_SET ((ETH_TX_DESC *) 0x00000000, queue);
1371 USED_TFD_SET ((ETH_TX_DESC *) 0x00000000, queue);
1372 FIRST_TFD_SET ((ETH_TX_DESC *) 0x00000000, queue);
1373 p_eth_port_ctrl->tx_resource_err[queue] = false;
1374 }
1375
1376 eth_port_reset (p_eth_port_ctrl->port_num);
1377
1378
1379 win_param.win = ETH_WIN0;
1380 win_param.target = ETH_TARGET_DRAM;
1381 win_param.attributes = EBAR_ATTR_DRAM_CS0;
1382#ifndef CONFIG_NOT_COHERENT_CACHE
1383 win_param.attributes |= EBAR_ATTR_DRAM_CACHE_COHERENCY_WB;
1384#endif
1385 win_param.high_addr = 0;
1386
1387 win_param.base_addr = mv_get_dram_bank_base_addr (BANK0);
1388 win_param.size = mv_get_dram_bank_size (BANK0);
1389 if (win_param.size == 0)
1390 win_param.enable = 0;
1391 else
1392 win_param.enable = 1;
1393 win_param.access_ctrl = EWIN_ACCESS_FULL;
1394
1395
1396 eth_set_access_control (p_eth_port_ctrl->port_num, &win_param);
1397
1398
1399 win_param.win = ETH_WIN1;
1400 win_param.target = ETH_TARGET_DRAM;
1401 win_param.attributes = EBAR_ATTR_DRAM_CS1;
1402#ifndef CONFIG_NOT_COHERENT_CACHE
1403 win_param.attributes |= EBAR_ATTR_DRAM_CACHE_COHERENCY_WB;
1404#endif
1405 win_param.high_addr = 0;
1406
1407 win_param.base_addr = mv_get_dram_bank_base_addr (BANK1);
1408 win_param.size = mv_get_dram_bank_size (BANK1);
1409 if (win_param.size == 0)
1410 win_param.enable = 0;
1411 else
1412 win_param.enable = 1;
1413 win_param.access_ctrl = EWIN_ACCESS_FULL;
1414
1415
1416 eth_set_access_control (p_eth_port_ctrl->port_num, &win_param);
1417
1418
1419 win_param.win = ETH_WIN2;
1420 win_param.target = ETH_TARGET_DRAM;
1421 win_param.attributes = EBAR_ATTR_DRAM_CS2;
1422#ifndef CONFIG_NOT_COHERENT_CACHE
1423 win_param.attributes |= EBAR_ATTR_DRAM_CACHE_COHERENCY_WB;
1424#endif
1425 win_param.high_addr = 0;
1426
1427 win_param.base_addr = mv_get_dram_bank_base_addr (BANK2);
1428 win_param.size = mv_get_dram_bank_size (BANK2);
1429 if (win_param.size == 0)
1430 win_param.enable = 0;
1431 else
1432 win_param.enable = 1;
1433 win_param.access_ctrl = EWIN_ACCESS_FULL;
1434
1435
1436 eth_set_access_control (p_eth_port_ctrl->port_num, &win_param);
1437
1438
1439 win_param.win = ETH_WIN3;
1440 win_param.target = ETH_TARGET_DRAM;
1441 win_param.attributes = EBAR_ATTR_DRAM_CS3;
1442#ifndef CONFIG_NOT_COHERENT_CACHE
1443 win_param.attributes |= EBAR_ATTR_DRAM_CACHE_COHERENCY_WB;
1444#endif
1445 win_param.high_addr = 0;
1446
1447 win_param.base_addr = mv_get_dram_bank_base_addr (BANK3);
1448 win_param.size = mv_get_dram_bank_size (BANK3);
1449 if (win_param.size == 0)
1450 win_param.enable = 0;
1451 else
1452 win_param.enable = 1;
1453 win_param.access_ctrl = EWIN_ACCESS_FULL;
1454
1455
1456 eth_set_access_control (p_eth_port_ctrl->port_num, &win_param);
1457
1458
1459 win_param.win = ETH_WIN4;
1460 win_param.target = EBAR_TARGET_CBS;
1461 win_param.attributes = EBAR_ATTR_CBS_SRAM | EBAR_ATTR_CBS_SRAM_BLOCK0;
1462 win_param.high_addr = 0;
1463 win_param.base_addr = mv_get_internal_sram_base ();
1464 win_param.size = MV64360_INTERNAL_SRAM_SIZE;
1465 win_param.enable = 1;
1466 win_param.access_ctrl = EWIN_ACCESS_FULL;
1467
1468
1469 eth_set_access_control (p_eth_port_ctrl->port_num, &win_param);
1470
1471 eth_port_init_mac_tables (p_eth_port_ctrl->port_num);
1472
1473 ethernet_phy_set (p_eth_port_ctrl->port_num,
1474 p_eth_port_ctrl->port_phy_addr);
1475
1476 return;
1477
1478}
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510static bool eth_port_start (ETH_PORT_INFO * p_eth_port_ctrl)
1511{
1512 int queue;
1513 volatile ETH_TX_DESC *p_tx_curr_desc;
1514 volatile ETH_RX_DESC *p_rx_curr_desc;
1515 unsigned int phy_reg_data;
1516 ETH_PORT eth_port_num = p_eth_port_ctrl->port_num;
1517
1518
1519
1520 for (queue = 0; queue < MAX_TX_QUEUE_NUM; queue++) {
1521 CURR_TFD_GET (p_tx_curr_desc, queue);
1522 MV_REG_WRITE ((MV64360_ETH_TX_CURRENT_QUEUE_DESC_PTR_0
1523 (eth_port_num)
1524 + (4 * queue)),
1525 ((unsigned int) p_tx_curr_desc));
1526
1527 }
1528
1529
1530 for (queue = 0; queue < MAX_RX_QUEUE_NUM; queue++) {
1531 CURR_RFD_GET (p_rx_curr_desc, queue);
1532 MV_REG_WRITE ((MV64360_ETH_RX_CURRENT_QUEUE_DESC_PTR_0
1533 (eth_port_num)
1534 + (4 * queue)),
1535 ((unsigned int) p_rx_curr_desc));
1536
1537 if (p_rx_curr_desc != NULL)
1538
1539 eth_port_uc_addr_set (p_eth_port_ctrl->port_num,
1540 p_eth_port_ctrl->port_mac_addr,
1541 queue);
1542 }
1543
1544
1545 MV_REG_WRITE (MV64360_ETH_PORT_CONFIG_REG (eth_port_num),
1546 p_eth_port_ctrl->port_config);
1547
1548 MV_REG_WRITE (MV64360_ETH_PORT_CONFIG_EXTEND_REG (eth_port_num),
1549 p_eth_port_ctrl->port_config_extend);
1550
1551 MV_REG_WRITE (MV64360_ETH_PORT_SERIAL_CONTROL_REG (eth_port_num),
1552 p_eth_port_ctrl->port_serial_control);
1553
1554 MV_SET_REG_BITS (MV64360_ETH_PORT_SERIAL_CONTROL_REG (eth_port_num),
1555 ETH_SERIAL_PORT_ENABLE);
1556
1557
1558 MV_REG_WRITE (MV64360_ETH_SDMA_CONFIG_REG (eth_port_num),
1559 p_eth_port_ctrl->port_sdma_config);
1560
1561 MV_REG_WRITE (MV64360_ETH_TX_QUEUE_0_TOKEN_BUCKET_COUNT
1562 (eth_port_num), 0x3fffffff);
1563 MV_REG_WRITE (MV64360_ETH_TX_QUEUE_0_TOKEN_BUCKET_CONFIG
1564 (eth_port_num), 0x03fffcff);
1565
1566 MV_REG_WRITE (MV64360_ETH_MAXIMUM_TRANSMIT_UNIT (eth_port_num), 0x0);
1567
1568
1569 MV_REG_WRITE (MV64360_ETH_RECEIVE_QUEUE_COMMAND_REG (eth_port_num),
1570 p_eth_port_ctrl->port_rx_queue_command);
1571
1572
1573 eth_port_read_smi_reg (eth_port_num, 1, &phy_reg_data);
1574
1575 if (!(phy_reg_data & 0x20))
1576 return false;
1577
1578 return true;
1579}
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600static void eth_port_uc_addr_set (ETH_PORT eth_port_num,
1601 unsigned char *p_addr, ETH_QUEUE queue)
1602{
1603 unsigned int mac_h;
1604 unsigned int mac_l;
1605
1606 mac_l = (p_addr[4] << 8) | (p_addr[5]);
1607 mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) |
1608 (p_addr[2] << 8) | (p_addr[3] << 0);
1609
1610 MV_REG_WRITE (MV64360_ETH_MAC_ADDR_LOW (eth_port_num), mac_l);
1611 MV_REG_WRITE (MV64360_ETH_MAC_ADDR_HIGH (eth_port_num), mac_h);
1612
1613
1614 eth_port_uc_addr (eth_port_num, p_addr[5], queue, ACCEPT_MAC_ADDR);
1615
1616 return;
1617}
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642static bool eth_port_uc_addr (ETH_PORT eth_port_num,
1643 unsigned char uc_nibble,
1644 ETH_QUEUE queue, int option)
1645{
1646 unsigned int unicast_reg;
1647 unsigned int tbl_offset;
1648 unsigned int reg_offset;
1649
1650
1651 uc_nibble = (0xf & uc_nibble);
1652 tbl_offset = (uc_nibble / 4) * 4;
1653 reg_offset = uc_nibble % 4;
1654
1655 switch (option) {
1656 case REJECT_MAC_ADDR:
1657
1658 unicast_reg =
1659 MV_REG_READ ((MV64360_ETH_DA_FILTER_UNICAST_TABLE_BASE
1660 (eth_port_num)
1661 + tbl_offset));
1662
1663 unicast_reg &= (0x0E << (8 * reg_offset));
1664
1665 MV_REG_WRITE ((MV64360_ETH_DA_FILTER_UNICAST_TABLE_BASE
1666 (eth_port_num)
1667 + tbl_offset), unicast_reg);
1668 break;
1669
1670 case ACCEPT_MAC_ADDR:
1671
1672 unicast_reg =
1673 MV_REG_READ ((MV64360_ETH_DA_FILTER_UNICAST_TABLE_BASE
1674 (eth_port_num)
1675 + tbl_offset));
1676
1677 unicast_reg |= ((0x01 | queue) << (8 * reg_offset));
1678
1679 MV_REG_WRITE ((MV64360_ETH_DA_FILTER_UNICAST_TABLE_BASE
1680 (eth_port_num)
1681 + tbl_offset), unicast_reg);
1682
1683 break;
1684
1685 default:
1686 return false;
1687 }
1688 return true;
1689}
1690
1691#if 0
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723static void eth_port_mc_addr (ETH_PORT eth_port_num,
1724 unsigned char *p_addr,
1725 ETH_QUEUE queue, int option)
1726{
1727 unsigned int mac_h;
1728 unsigned int mac_l;
1729 unsigned char crc_result = 0;
1730 int mac_array[48];
1731 int crc[8];
1732 int i;
1733
1734
1735 if ((p_addr[0] == 0x01) &&
1736 (p_addr[1] == 0x00) &&
1737 (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00))
1738
1739 eth_port_smc_addr (eth_port_num, p_addr[5], queue, option);
1740 else {
1741
1742 mac_h = (p_addr[0] << 8) | (p_addr[1]);
1743 mac_l = (p_addr[2] << 24) | (p_addr[3] << 16) |
1744 (p_addr[4] << 8) | (p_addr[5] << 0);
1745
1746 for (i = 0; i < 32; i++)
1747 mac_array[i] = (mac_l >> i) & 0x1;
1748 for (i = 32; i < 48; i++)
1749 mac_array[i] = (mac_h >> (i - 32)) & 0x1;
1750
1751
1752 crc[0] = mac_array[45] ^ mac_array[43] ^ mac_array[40] ^
1753 mac_array[39] ^ mac_array[35] ^ mac_array[34] ^
1754 mac_array[31] ^ mac_array[30] ^ mac_array[28] ^
1755 mac_array[23] ^ mac_array[21] ^ mac_array[19] ^
1756 mac_array[18] ^ mac_array[16] ^ mac_array[14] ^
1757 mac_array[12] ^ mac_array[8] ^ mac_array[7] ^
1758 mac_array[6] ^ mac_array[0];
1759
1760 crc[1] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^
1761 mac_array[43] ^ mac_array[41] ^ mac_array[39] ^
1762 mac_array[36] ^ mac_array[34] ^ mac_array[32] ^
1763 mac_array[30] ^ mac_array[29] ^ mac_array[28] ^
1764 mac_array[24] ^ mac_array[23] ^ mac_array[22] ^
1765 mac_array[21] ^ mac_array[20] ^ mac_array[18] ^
1766 mac_array[17] ^ mac_array[16] ^ mac_array[15] ^
1767 mac_array[14] ^ mac_array[13] ^ mac_array[12] ^
1768 mac_array[9] ^ mac_array[6] ^ mac_array[1] ^
1769 mac_array[0];
1770
1771 crc[2] = mac_array[47] ^ mac_array[46] ^ mac_array[44] ^
1772 mac_array[43] ^ mac_array[42] ^ mac_array[39] ^
1773 mac_array[37] ^ mac_array[34] ^ mac_array[33] ^
1774 mac_array[29] ^ mac_array[28] ^ mac_array[25] ^
1775 mac_array[24] ^ mac_array[22] ^ mac_array[17] ^
1776 mac_array[15] ^ mac_array[13] ^ mac_array[12] ^
1777 mac_array[10] ^ mac_array[8] ^ mac_array[6] ^
1778 mac_array[2] ^ mac_array[1] ^ mac_array[0];
1779
1780 crc[3] = mac_array[47] ^ mac_array[45] ^ mac_array[44] ^
1781 mac_array[43] ^ mac_array[40] ^ mac_array[38] ^
1782 mac_array[35] ^ mac_array[34] ^ mac_array[30] ^
1783 mac_array[29] ^ mac_array[26] ^ mac_array[25] ^
1784 mac_array[23] ^ mac_array[18] ^ mac_array[16] ^
1785 mac_array[14] ^ mac_array[13] ^ mac_array[11] ^
1786 mac_array[9] ^ mac_array[7] ^ mac_array[3] ^
1787 mac_array[2] ^ mac_array[1];
1788
1789 crc[4] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^
1790 mac_array[41] ^ mac_array[39] ^ mac_array[36] ^
1791 mac_array[35] ^ mac_array[31] ^ mac_array[30] ^
1792 mac_array[27] ^ mac_array[26] ^ mac_array[24] ^
1793 mac_array[19] ^ mac_array[17] ^ mac_array[15] ^
1794 mac_array[14] ^ mac_array[12] ^ mac_array[10] ^
1795 mac_array[8] ^ mac_array[4] ^ mac_array[3] ^
1796 mac_array[2];
1797
1798 crc[5] = mac_array[47] ^ mac_array[46] ^ mac_array[45] ^
1799 mac_array[42] ^ mac_array[40] ^ mac_array[37] ^
1800 mac_array[36] ^ mac_array[32] ^ mac_array[31] ^
1801 mac_array[28] ^ mac_array[27] ^ mac_array[25] ^
1802 mac_array[20] ^ mac_array[18] ^ mac_array[16] ^
1803 mac_array[15] ^ mac_array[13] ^ mac_array[11] ^
1804 mac_array[9] ^ mac_array[5] ^ mac_array[4] ^
1805 mac_array[3];
1806
1807 crc[6] = mac_array[47] ^ mac_array[46] ^ mac_array[43] ^
1808 mac_array[41] ^ mac_array[38] ^ mac_array[37] ^
1809 mac_array[33] ^ mac_array[32] ^ mac_array[29] ^
1810 mac_array[28] ^ mac_array[26] ^ mac_array[21] ^
1811 mac_array[19] ^ mac_array[17] ^ mac_array[16] ^
1812 mac_array[14] ^ mac_array[12] ^ mac_array[10] ^
1813 mac_array[6] ^ mac_array[5] ^ mac_array[4];
1814
1815 crc[7] = mac_array[47] ^ mac_array[44] ^ mac_array[42] ^
1816 mac_array[39] ^ mac_array[38] ^ mac_array[34] ^
1817 mac_array[33] ^ mac_array[30] ^ mac_array[29] ^
1818 mac_array[27] ^ mac_array[22] ^ mac_array[20] ^
1819 mac_array[18] ^ mac_array[17] ^ mac_array[15] ^
1820 mac_array[13] ^ mac_array[11] ^ mac_array[7] ^
1821 mac_array[6] ^ mac_array[5];
1822
1823 for (i = 0; i < 8; i++)
1824 crc_result = crc_result | (crc[i] << i);
1825
1826 eth_port_omc_addr (eth_port_num, crc_result, queue, option);
1827 }
1828 return;
1829}
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857static bool eth_port_smc_addr (ETH_PORT eth_port_num,
1858 unsigned char mc_byte,
1859 ETH_QUEUE queue, int option)
1860{
1861 unsigned int smc_table_reg;
1862 unsigned int tbl_offset;
1863 unsigned int reg_offset;
1864
1865
1866 tbl_offset = (mc_byte / 4) * 4;
1867 reg_offset = mc_byte % 4;
1868 queue &= 0x7;
1869
1870 switch (option) {
1871 case REJECT_MAC_ADDR:
1872
1873 smc_table_reg =
1874 MV_REG_READ ((MV64360_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset));
1875 smc_table_reg &= (0x0E << (8 * reg_offset));
1876
1877 MV_REG_WRITE ((MV64360_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset), smc_table_reg);
1878 break;
1879
1880 case ACCEPT_MAC_ADDR:
1881
1882 smc_table_reg =
1883 MV_REG_READ ((MV64360_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset));
1884 smc_table_reg |= ((0x01 | queue) << (8 * reg_offset));
1885
1886 MV_REG_WRITE ((MV64360_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset), smc_table_reg);
1887 break;
1888
1889 default:
1890 return false;
1891 }
1892 return true;
1893}
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921static bool eth_port_omc_addr (ETH_PORT eth_port_num,
1922 unsigned char crc8,
1923 ETH_QUEUE queue, int option)
1924{
1925 unsigned int omc_table_reg;
1926 unsigned int tbl_offset;
1927 unsigned int reg_offset;
1928
1929
1930 tbl_offset = (crc8 / 4) * 4;
1931 reg_offset = crc8 % 4;
1932 queue &= 0x7;
1933
1934 switch (option) {
1935 case REJECT_MAC_ADDR:
1936
1937 omc_table_reg =
1938 MV_REG_READ ((MV64360_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset));
1939 omc_table_reg &= (0x0E << (8 * reg_offset));
1940
1941 MV_REG_WRITE ((MV64360_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset), omc_table_reg);
1942 break;
1943
1944 case ACCEPT_MAC_ADDR:
1945
1946 omc_table_reg =
1947 MV_REG_READ ((MV64360_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset));
1948 omc_table_reg |= ((0x01 | queue) << (8 * reg_offset));
1949
1950 MV_REG_WRITE ((MV64360_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset), omc_table_reg);
1951 break;
1952
1953 default:
1954 return false;
1955 }
1956 return true;
1957}
1958#endif
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977static void eth_port_init_mac_tables (ETH_PORT eth_port_num)
1978{
1979 int table_index;
1980
1981
1982 for (table_index = 0; table_index <= 0xC; table_index += 4)
1983 MV_REG_WRITE ((MV64360_ETH_DA_FILTER_UNICAST_TABLE_BASE
1984 (eth_port_num) + table_index), 0);
1985
1986 for (table_index = 0; table_index <= 0xFC; table_index += 4) {
1987
1988 MV_REG_WRITE ((MV64360_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + table_index), 0);
1989
1990 MV_REG_WRITE ((MV64360_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + table_index), 0);
1991 }
1992}
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011static void eth_clear_mib_counters (ETH_PORT eth_port_num)
2012{
2013 int i;
2014
2015
2016 for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION;
2017 i += 4)
2018 MV_REG_READ((MV64360_ETH_MIB_COUNTERS_BASE(eth_port_num) + i));
2019
2020 return;
2021}
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044unsigned int eth_read_mib_counter (ETH_PORT eth_port_num,
2045 unsigned int mib_offset)
2046{
2047 return (MV_REG_READ (MV64360_ETH_MIB_COUNTERS_BASE (eth_port_num)
2048 + mib_offset));
2049}
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068static void ethernet_phy_set (ETH_PORT eth_port_num, int phy_addr)
2069{
2070 unsigned int reg_data;
2071
2072 reg_data = MV_REG_READ (MV64360_ETH_PHY_ADDR_REG);
2073
2074 reg_data &= ~(0x1F << (5 * eth_port_num));
2075 reg_data |= (phy_addr << (5 * eth_port_num));
2076
2077 MV_REG_WRITE (MV64360_ETH_PHY_ADDR_REG, reg_data);
2078
2079 return;
2080}
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098static int ethernet_phy_get (ETH_PORT eth_port_num)
2099{
2100 unsigned int reg_data;
2101
2102 reg_data = MV_REG_READ (MV64360_ETH_PHY_ADDR_REG);
2103
2104 return ((reg_data >> (5 * eth_port_num)) & 0x1f);
2105}
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124static bool ethernet_phy_reset (ETH_PORT eth_port_num)
2125{
2126 unsigned int time_out = 50;
2127 unsigned int phy_reg_data;
2128
2129
2130 eth_port_read_smi_reg (eth_port_num, 0, &phy_reg_data);
2131 phy_reg_data |= 0x8000;
2132 eth_port_write_smi_reg (eth_port_num, 0, phy_reg_data);
2133
2134
2135 do {
2136 eth_port_read_smi_reg (eth_port_num, 1, &phy_reg_data);
2137
2138 if (time_out-- == 0)
2139 return false;
2140 }
2141 while (!(phy_reg_data & 0x20));
2142
2143 return true;
2144}
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164static void eth_port_reset (ETH_PORT eth_port_num)
2165{
2166 unsigned int reg_data;
2167
2168
2169 reg_data =
2170 MV_REG_READ (MV64360_ETH_TRANSMIT_QUEUE_COMMAND_REG
2171 (eth_port_num));
2172
2173 if (reg_data & 0xFF) {
2174
2175 MV_REG_WRITE (MV64360_ETH_TRANSMIT_QUEUE_COMMAND_REG
2176 (eth_port_num), (reg_data << 8));
2177
2178
2179 do {
2180
2181 reg_data =
2182 MV_REG_READ
2183 (MV64360_ETH_TRANSMIT_QUEUE_COMMAND_REG
2184 (eth_port_num));
2185 }
2186 while (reg_data & 0xFF);
2187 }
2188
2189
2190 reg_data =
2191 MV_REG_READ (MV64360_ETH_RECEIVE_QUEUE_COMMAND_REG
2192 (eth_port_num));
2193
2194 if (reg_data & 0xFF) {
2195
2196 MV_REG_WRITE (MV64360_ETH_RECEIVE_QUEUE_COMMAND_REG
2197 (eth_port_num), (reg_data << 8));
2198
2199
2200 do {
2201
2202 reg_data =
2203 MV_REG_READ
2204 (MV64360_ETH_RECEIVE_QUEUE_COMMAND_REG
2205 (eth_port_num));
2206 }
2207 while (reg_data & 0xFF);
2208 }
2209
2210
2211
2212 eth_clear_mib_counters (eth_port_num);
2213
2214
2215 reg_data =
2216 MV_REG_READ (MV64360_ETH_PORT_SERIAL_CONTROL_REG
2217 (eth_port_num));
2218 reg_data &= ~ETH_SERIAL_PORT_ENABLE;
2219 MV_REG_WRITE (MV64360_ETH_PORT_SERIAL_CONTROL_REG (eth_port_num),
2220 reg_data);
2221
2222 return;
2223}
2224
2225#if 0
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245static void ethernet_set_config_reg (ETH_PORT eth_port_num,
2246 unsigned int value)
2247{
2248 unsigned int eth_config_reg;
2249
2250 eth_config_reg =
2251 MV_REG_READ (MV64360_ETH_PORT_CONFIG_REG (eth_port_num));
2252 eth_config_reg |= value;
2253 MV_REG_WRITE (MV64360_ETH_PORT_CONFIG_REG (eth_port_num),
2254 eth_config_reg);
2255
2256 return;
2257}
2258#endif
2259
2260#if 0
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280static void ethernet_reset_config_reg (ETH_PORT eth_port_num,
2281 unsigned int value)
2282{
2283 unsigned int eth_config_reg;
2284
2285 eth_config_reg = MV_REG_READ (MV64360_ETH_PORT_CONFIG_EXTEND_REG
2286 (eth_port_num));
2287 eth_config_reg &= ~value;
2288 MV_REG_WRITE (MV64360_ETH_PORT_CONFIG_EXTEND_REG (eth_port_num),
2289 eth_config_reg);
2290
2291 return;
2292}
2293#endif
2294
2295#if 0
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313static unsigned int ethernet_get_config_reg (ETH_PORT eth_port_num)
2314{
2315 unsigned int eth_config_reg;
2316
2317 eth_config_reg = MV_REG_READ (MV64360_ETH_PORT_CONFIG_EXTEND_REG
2318 (eth_port_num));
2319 return eth_config_reg;
2320}
2321
2322#endif
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344static bool eth_port_read_smi_reg (ETH_PORT eth_port_num,
2345 unsigned int phy_reg, unsigned int *value)
2346{
2347 unsigned int reg_value;
2348 unsigned int time_out = PHY_BUSY_TIMEOUT;
2349 int phy_addr;
2350
2351 phy_addr = ethernet_phy_get (eth_port_num);
2352
2353
2354
2355 do {
2356 reg_value = MV_REG_READ (MV64360_ETH_SMI_REG);
2357 if (time_out-- == 0) {
2358 return false;
2359 }
2360 }
2361 while (reg_value & ETH_SMI_BUSY);
2362
2363
2364
2365 MV_REG_WRITE (MV64360_ETH_SMI_REG,
2366 (phy_addr << 16) | (phy_reg << 21) |
2367 ETH_SMI_OPCODE_READ);
2368
2369 time_out = PHY_BUSY_TIMEOUT;
2370
2371 do {
2372 reg_value = MV_REG_READ (MV64360_ETH_SMI_REG);
2373 if (time_out-- == 0) {
2374 return false;
2375 }
2376 }
2377 while ((reg_value & ETH_SMI_READ_VALID) != ETH_SMI_READ_VALID);
2378
2379
2380#define PHY_UPDATE_TIMEOUT 10000
2381 for (time_out = 0; time_out < PHY_UPDATE_TIMEOUT; time_out++);
2382
2383 reg_value = MV_REG_READ (MV64360_ETH_SMI_REG);
2384
2385 *value = reg_value & 0xffff;
2386
2387 return true;
2388}
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410static bool eth_port_write_smi_reg (ETH_PORT eth_port_num,
2411 unsigned int phy_reg, unsigned int value)
2412{
2413 unsigned int reg_value;
2414 unsigned int time_out = PHY_BUSY_TIMEOUT;
2415 int phy_addr;
2416
2417 phy_addr = ethernet_phy_get (eth_port_num);
2418
2419
2420 do {
2421 reg_value = MV_REG_READ (MV64360_ETH_SMI_REG);
2422 if (time_out-- == 0) {
2423 return false;
2424 }
2425 }
2426 while (reg_value & ETH_SMI_BUSY);
2427
2428
2429 MV_REG_WRITE (MV64360_ETH_SMI_REG,
2430 (phy_addr << 16) | (phy_reg << 21) |
2431 ETH_SMI_OPCODE_WRITE | (value & 0xffff));
2432 return true;
2433}
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453static void eth_set_access_control (ETH_PORT eth_port_num,
2454 ETH_WIN_PARAM * param)
2455{
2456 unsigned int access_prot_reg;
2457
2458
2459 access_prot_reg = MV_REG_READ (MV64360_ETH_ACCESS_PROTECTION_REG
2460 (eth_port_num));
2461 access_prot_reg &= (~(3 << (param->win * 2)));
2462 access_prot_reg |= (param->access_ctrl << (param->win * 2));
2463 MV_REG_WRITE (MV64360_ETH_ACCESS_PROTECTION_REG (eth_port_num),
2464 access_prot_reg);
2465
2466
2467 MV_REG_WRITE ((MV64360_ETH_SIZE_REG_0 +
2468 (ETH_SIZE_REG_GAP * param->win)),
2469 (((param->size / 0x10000) - 1) << 16));
2470
2471
2472 MV_REG_WRITE ((MV64360_ETH_BAR_0 + (ETH_BAR_GAP * param->win)),
2473 (param->target | param->attributes | param->base_addr));
2474
2475 if (param->win < 4)
2476 MV_REG_WRITE ((MV64360_ETH_HIGH_ADDR_REMAP_REG_0 +
2477 (ETH_HIGH_ADDR_REMAP_REG_GAP * param->win)),
2478 param->high_addr);
2479
2480
2481 if (param->enable == 1)
2482 MV_RESET_REG_BITS (MV64360_ETH_BASE_ADDR_ENABLE_REG,
2483 (1 << param->win));
2484 else
2485 MV_SET_REG_BITS (MV64360_ETH_BASE_ADDR_ENABLE_REG,
2486 (1 << param->win));
2487}
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519static bool ether_init_rx_desc_ring (ETH_PORT_INFO * p_eth_port_ctrl,
2520 ETH_QUEUE rx_queue,
2521 int rx_desc_num,
2522 int rx_buff_size,
2523 unsigned int rx_desc_base_addr,
2524 unsigned int rx_buff_base_addr)
2525{
2526 ETH_RX_DESC *p_rx_desc;
2527 ETH_RX_DESC *p_rx_prev_desc;
2528 unsigned int buffer_addr;
2529 int ix;
2530
2531
2532 p_rx_desc = (ETH_RX_DESC *) rx_desc_base_addr;
2533 p_rx_prev_desc = p_rx_desc;
2534 buffer_addr = rx_buff_base_addr;
2535
2536
2537 if (rx_buff_base_addr & 0xF)
2538 return false;
2539
2540
2541 if ((rx_buff_size < 8) || (rx_buff_size > RX_BUFFER_MAX_SIZE))
2542 return false;
2543
2544
2545 if ((rx_buff_base_addr + rx_buff_size) & 0x7)
2546 return false;
2547
2548
2549 for (ix = 0; ix < rx_desc_num; ix++) {
2550 p_rx_desc->buf_size = rx_buff_size;
2551 p_rx_desc->byte_cnt = 0x0000;
2552 p_rx_desc->cmd_sts =
2553 ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT;
2554 p_rx_desc->next_desc_ptr =
2555 ((unsigned int) p_rx_desc) + RX_DESC_ALIGNED_SIZE;
2556 p_rx_desc->buf_ptr = buffer_addr;
2557 p_rx_desc->return_info = 0x00000000;
2558 D_CACHE_FLUSH_LINE (p_rx_desc, 0);
2559 buffer_addr += rx_buff_size;
2560 p_rx_prev_desc = p_rx_desc;
2561 p_rx_desc = (ETH_RX_DESC *)
2562 ((unsigned int) p_rx_desc + RX_DESC_ALIGNED_SIZE);
2563 }
2564
2565
2566 p_rx_prev_desc->next_desc_ptr = (rx_desc_base_addr);
2567 D_CACHE_FLUSH_LINE (p_rx_prev_desc, 0);
2568
2569
2570 CURR_RFD_SET ((ETH_RX_DESC *) rx_desc_base_addr, rx_queue);
2571 USED_RFD_SET ((ETH_RX_DESC *) rx_desc_base_addr, rx_queue);
2572
2573 p_eth_port_ctrl->p_rx_desc_area_base[rx_queue] =
2574 (ETH_RX_DESC *) rx_desc_base_addr;
2575 p_eth_port_ctrl->rx_desc_area_size[rx_queue] =
2576 rx_desc_num * RX_DESC_ALIGNED_SIZE;
2577
2578 p_eth_port_ctrl->port_rx_queue_command |= (1 << rx_queue);
2579
2580 return true;
2581}
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613static bool ether_init_tx_desc_ring (ETH_PORT_INFO * p_eth_port_ctrl,
2614 ETH_QUEUE tx_queue,
2615 int tx_desc_num,
2616 int tx_buff_size,
2617 unsigned int tx_desc_base_addr,
2618 unsigned int tx_buff_base_addr)
2619{
2620
2621 ETH_TX_DESC *p_tx_desc;
2622 ETH_TX_DESC *p_tx_prev_desc;
2623 unsigned int buffer_addr;
2624 int ix;
2625
2626
2627
2628 p_tx_desc = (ETH_TX_DESC *) tx_desc_base_addr;
2629 p_tx_prev_desc = p_tx_desc;
2630 buffer_addr = tx_buff_base_addr;
2631
2632
2633 if (tx_buff_base_addr & 0xF)
2634 return false;
2635
2636
2637 if ((tx_buff_size > TX_BUFFER_MAX_SIZE)
2638 || (tx_buff_size < TX_BUFFER_MIN_SIZE))
2639 return false;
2640
2641
2642 for (ix = 0; ix < tx_desc_num; ix++) {
2643 p_tx_desc->byte_cnt = 0x0000;
2644 p_tx_desc->l4i_chk = 0x0000;
2645 p_tx_desc->cmd_sts = 0x00000000;
2646 p_tx_desc->next_desc_ptr =
2647 ((unsigned int) p_tx_desc) + TX_DESC_ALIGNED_SIZE;
2648
2649 p_tx_desc->buf_ptr = buffer_addr;
2650 p_tx_desc->return_info = 0x00000000;
2651 D_CACHE_FLUSH_LINE (p_tx_desc, 0);
2652 buffer_addr += tx_buff_size;
2653 p_tx_prev_desc = p_tx_desc;
2654 p_tx_desc = (ETH_TX_DESC *)
2655 ((unsigned int) p_tx_desc + TX_DESC_ALIGNED_SIZE);
2656
2657 }
2658
2659 p_tx_prev_desc->next_desc_ptr = tx_desc_base_addr;
2660 D_CACHE_FLUSH_LINE (p_tx_prev_desc, 0);
2661
2662 CURR_TFD_SET ((ETH_TX_DESC *) tx_desc_base_addr, tx_queue);
2663 USED_TFD_SET ((ETH_TX_DESC *) tx_desc_base_addr, tx_queue);
2664
2665
2666 p_eth_port_ctrl->p_tx_desc_area_base[tx_queue] =
2667 (ETH_TX_DESC *) tx_desc_base_addr;
2668 p_eth_port_ctrl->tx_desc_area_size[tx_queue] =
2669 (tx_desc_num * TX_DESC_ALIGNED_SIZE);
2670
2671
2672 p_eth_port_ctrl->port_tx_queue_command |= (1 << tx_queue);
2673
2674 return true;
2675}
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705static ETH_FUNC_RET_STATUS eth_port_send (ETH_PORT_INFO * p_eth_port_ctrl,
2706 ETH_QUEUE tx_queue,
2707 PKT_INFO * p_pkt_info)
2708{
2709 volatile ETH_TX_DESC *p_tx_desc_first;
2710 volatile ETH_TX_DESC *p_tx_desc_curr;
2711 volatile ETH_TX_DESC *p_tx_next_desc_curr;
2712 volatile ETH_TX_DESC *p_tx_desc_used;
2713 unsigned int command_status;
2714
2715
2716 if (p_eth_port_ctrl->tx_resource_err[tx_queue] == true)
2717 return ETH_QUEUE_FULL;
2718
2719
2720 CURR_TFD_GET (p_tx_desc_curr, tx_queue);
2721 USED_TFD_GET (p_tx_desc_used, tx_queue);
2722
2723 if (p_tx_desc_curr == NULL)
2724 return ETH_ERROR;
2725
2726
2727 p_tx_next_desc_curr = TX_NEXT_DESC_PTR (p_tx_desc_curr, tx_queue);
2728 command_status = p_pkt_info->cmd_sts | ETH_ZERO_PADDING | ETH_GEN_CRC;
2729
2730 if (command_status & (ETH_TX_FIRST_DESC)) {
2731
2732 FIRST_TFD_SET (p_tx_desc_curr, tx_queue);
2733 p_tx_desc_first = p_tx_desc_curr;
2734 } else {
2735 FIRST_TFD_GET (p_tx_desc_first, tx_queue);
2736 command_status |= ETH_BUFFER_OWNED_BY_DMA;
2737 }
2738
2739
2740
2741
2742 if (p_pkt_info->byte_cnt <= 8) {
2743 printf ("You have failed in the < 8 bytes errata - fixme\n");
2744 return ETH_ERROR;
2745
2746 p_tx_desc_curr->buf_ptr =
2747 (unsigned int) p_tx_desc_curr + TX_BUF_OFFSET_IN_DESC;
2748 eth_b_copy (p_pkt_info->buf_ptr, p_tx_desc_curr->buf_ptr,
2749 p_pkt_info->byte_cnt);
2750 } else
2751 p_tx_desc_curr->buf_ptr = p_pkt_info->buf_ptr;
2752
2753 p_tx_desc_curr->byte_cnt = p_pkt_info->byte_cnt;
2754 p_tx_desc_curr->return_info = p_pkt_info->return_info;
2755
2756 if (p_pkt_info->cmd_sts & (ETH_TX_LAST_DESC)) {
2757
2758 p_tx_desc_curr->cmd_sts = command_status |
2759 ETH_BUFFER_OWNED_BY_DMA | ETH_TX_ENABLE_INTERRUPT;
2760
2761 if (p_tx_desc_curr != p_tx_desc_first)
2762 p_tx_desc_first->cmd_sts |= ETH_BUFFER_OWNED_BY_DMA;
2763
2764
2765
2766 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_curr, 0);
2767 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_first, 0);
2768 CPU_PIPE_FLUSH;
2769
2770
2771 ETH_ENABLE_TX_QUEUE (tx_queue, p_eth_port_ctrl->port_num);
2772
2773
2774 p_tx_desc_first = p_tx_next_desc_curr;
2775 FIRST_TFD_SET (p_tx_desc_first, tx_queue);
2776
2777 } else {
2778 p_tx_desc_curr->cmd_sts = command_status;
2779 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_curr, 0);
2780 }
2781
2782
2783 if (p_tx_next_desc_curr == p_tx_desc_used) {
2784
2785 CURR_TFD_SET (p_tx_desc_first, tx_queue);
2786
2787 p_eth_port_ctrl->tx_resource_err[tx_queue] = true;
2788 return ETH_QUEUE_LAST_RESOURCE;
2789 } else {
2790
2791 CURR_TFD_SET (p_tx_next_desc_curr, tx_queue);
2792 return ETH_OK;
2793 }
2794}
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821static ETH_FUNC_RET_STATUS eth_tx_return_desc (ETH_PORT_INFO *
2822 p_eth_port_ctrl,
2823 ETH_QUEUE tx_queue,
2824 PKT_INFO * p_pkt_info)
2825{
2826 volatile ETH_TX_DESC *p_tx_desc_used = NULL;
2827 volatile ETH_TX_DESC *p_tx_desc_first = NULL;
2828 unsigned int command_status;
2829
2830
2831
2832 USED_TFD_GET (p_tx_desc_used, tx_queue);
2833 FIRST_TFD_GET (p_tx_desc_first, tx_queue);
2834
2835
2836
2837 if (p_tx_desc_used == NULL)
2838 return ETH_ERROR;
2839
2840 command_status = p_tx_desc_used->cmd_sts;
2841
2842
2843 if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
2844 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_used, 0);
2845 return ETH_RETRY;
2846 }
2847
2848
2849 if ((p_tx_desc_used == p_tx_desc_first) &&
2850 (p_eth_port_ctrl->tx_resource_err[tx_queue] == false)) {
2851 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_used, 0);
2852 return ETH_END_OF_JOB;
2853 }
2854
2855
2856 p_pkt_info->cmd_sts = command_status;
2857 p_pkt_info->return_info = p_tx_desc_used->return_info;
2858 p_tx_desc_used->return_info = 0;
2859
2860
2861 USED_TFD_SET (TX_NEXT_DESC_PTR (p_tx_desc_used, tx_queue), tx_queue);
2862
2863
2864 if (p_eth_port_ctrl->tx_resource_err[tx_queue] == true)
2865 p_eth_port_ctrl->tx_resource_err[tx_queue] = false;
2866
2867 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_used, 0);
2868
2869 return ETH_OK;
2870
2871}
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898static ETH_FUNC_RET_STATUS eth_port_receive (ETH_PORT_INFO * p_eth_port_ctrl,
2899 ETH_QUEUE rx_queue,
2900 PKT_INFO * p_pkt_info)
2901{
2902 volatile ETH_RX_DESC *p_rx_curr_desc;
2903 volatile ETH_RX_DESC *p_rx_next_curr_desc;
2904 volatile ETH_RX_DESC *p_rx_used_desc;
2905 unsigned int command_status;
2906
2907
2908 if (p_eth_port_ctrl->rx_resource_err[rx_queue] == true) {
2909 printf ("\nRx Queue is full ...\n");
2910 return ETH_QUEUE_FULL;
2911 }
2912
2913
2914 CURR_RFD_GET (p_rx_curr_desc, rx_queue);
2915 USED_RFD_GET (p_rx_used_desc, rx_queue);
2916
2917
2918 if (p_rx_curr_desc == NULL)
2919 return ETH_ERROR;
2920
2921
2922 p_rx_next_curr_desc = RX_NEXT_DESC_PTR (p_rx_curr_desc, rx_queue);
2923 command_status = p_rx_curr_desc->cmd_sts;
2924
2925
2926 if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
2927
2928 D_CACHE_FLUSH_LINE ((unsigned int) p_rx_curr_desc, 0);
2929
2930 return ETH_END_OF_JOB;
2931 }
2932
2933 p_pkt_info->byte_cnt = (p_rx_curr_desc->byte_cnt) - RX_BUF_OFFSET;
2934 p_pkt_info->cmd_sts = command_status;
2935 p_pkt_info->buf_ptr = (p_rx_curr_desc->buf_ptr) + RX_BUF_OFFSET;
2936 p_pkt_info->return_info = p_rx_curr_desc->return_info;
2937 p_pkt_info->l4i_chk = p_rx_curr_desc->buf_size;
2938
2939
2940
2941 p_rx_curr_desc->return_info = 0;
2942
2943
2944 CURR_RFD_SET (p_rx_next_curr_desc, rx_queue);
2945
2946
2947 if (p_rx_next_curr_desc == p_rx_used_desc)
2948 p_eth_port_ctrl->rx_resource_err[rx_queue] = true;
2949
2950 D_CACHE_FLUSH_LINE ((unsigned int) p_rx_curr_desc, 0);
2951 CPU_PIPE_FLUSH;
2952 return ETH_OK;
2953}
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977static ETH_FUNC_RET_STATUS eth_rx_return_buff (ETH_PORT_INFO *
2978 p_eth_port_ctrl,
2979 ETH_QUEUE rx_queue,
2980 PKT_INFO * p_pkt_info)
2981{
2982 volatile ETH_RX_DESC *p_used_rx_desc;
2983
2984
2985 USED_RFD_GET (p_used_rx_desc, rx_queue);
2986
2987
2988 if (p_used_rx_desc == NULL)
2989 return ETH_ERROR;
2990
2991 p_used_rx_desc->buf_ptr = p_pkt_info->buf_ptr;
2992 p_used_rx_desc->return_info = p_pkt_info->return_info;
2993 p_used_rx_desc->byte_cnt = p_pkt_info->byte_cnt;
2994 p_used_rx_desc->buf_size = MV64360_RX_BUFFER_SIZE;
2995
2996
2997 CPU_PIPE_FLUSH;
2998
2999
3000 p_used_rx_desc->cmd_sts =
3001 ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT;
3002
3003
3004 D_CACHE_FLUSH_LINE ((unsigned int) p_used_rx_desc, 0);
3005 CPU_PIPE_FLUSH;
3006
3007
3008 USED_RFD_SET (RX_NEXT_DESC_PTR (p_used_rx_desc, rx_queue), rx_queue);
3009
3010
3011 if (p_eth_port_ctrl->rx_resource_err[rx_queue] == true)
3012 p_eth_port_ctrl->rx_resource_err[rx_queue] = false;
3013
3014 return ETH_OK;
3015}
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040#if 0
3041static unsigned int eth_port_set_rx_coal (ETH_PORT eth_port_num,
3042 unsigned int t_clk,
3043 unsigned int delay)
3044{
3045 unsigned int coal;
3046
3047 coal = ((t_clk / 1000000) * delay) / 64;
3048
3049 MV_REG_WRITE (MV64360_ETH_SDMA_CONFIG_REG (eth_port_num),
3050 ((coal & 0x3fff) << 8) |
3051 (MV_REG_READ
3052 (MV64360_ETH_SDMA_CONFIG_REG (eth_port_num))
3053 & 0xffc000ff));
3054 return coal;
3055}
3056
3057#endif
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081#if 0
3082static unsigned int eth_port_set_tx_coal (ETH_PORT eth_port_num,
3083 unsigned int t_clk,
3084 unsigned int delay)
3085{
3086 unsigned int coal;
3087
3088 coal = ((t_clk / 1000000) * delay) / 64;
3089
3090 MV_REG_WRITE (MV64360_ETH_TX_FIFO_URGENT_THRESHOLD_REG (eth_port_num),
3091 coal << 4);
3092 return coal;
3093}
3094#endif
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116static void eth_b_copy (unsigned int src_addr, unsigned int dst_addr,
3117 int byte_count)
3118{
3119
3120 *(unsigned int *) dst_addr = 0x0;
3121
3122 while (byte_count != 0) {
3123 *(char *) dst_addr = *(char *) src_addr;
3124 dst_addr++;
3125 src_addr++;
3126 byte_count--;
3127 }
3128}
3129