1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include <common.h>
31#include <net.h>
32#include <malloc.h>
33
34#include "mv_eth.h"
35
36
37
38#undef DEBUG_MV_ETH
39
40#ifdef DEBUG_MV_ETH
41#define DEBUG
42#define DP(x) x
43#else
44#define DP(x)
45#endif
46
47#undef MV64360_CHECKSUM_OFFLOAD
48
49
50
51
52
53
54
55
56
57
58#undef MV64360_RX_QUEUE_FILL_ON_TASK
59
60
61
62#define MAGIC_ETH_RUNNING 8031971
63#define MV64360_INTERNAL_SRAM_SIZE _256K
64#define EXTRA_BYTES 32
65#define WRAP ETH_HLEN + 2 + 4 + 16
66#define BUFFER_MTU dev->mtu + WRAP
67#define INT_CAUSE_UNMASK_ALL 0x0007ffff
68#define INT_CAUSE_UNMASK_ALL_EXT 0x0011ffff
69#ifdef MV64360_RX_FILL_ON_TASK
70#define INT_CAUSE_MASK_ALL 0x00000000
71#define INT_CAUSE_CHECK_BITS INT_CAUSE_UNMASK_ALL
72#define INT_CAUSE_CHECK_BITS_EXT INT_CAUSE_UNMASK_ALL_EXT
73#endif
74
75
76#define MV_REG_READ(offset) my_le32_to_cpu(* (volatile unsigned int *) (INTERNAL_REG_BASE_ADDR + offset))
77#define MV_REG_WRITE(offset,data) *(volatile unsigned int *) (INTERNAL_REG_BASE_ADDR + offset) = my_cpu_to_le32 (data)
78#define MV_SET_REG_BITS(regOffset,bits) ((*((volatile unsigned int*)((INTERNAL_REG_BASE_ADDR) + (regOffset)))) |= ((unsigned int)my_cpu_to_le32(bits)))
79#define MV_RESET_REG_BITS(regOffset,bits) ((*((volatile unsigned int*)((INTERNAL_REG_BASE_ADDR) + (regOffset)))) &= ~((unsigned int)my_cpu_to_le32(bits)))
80
81
82static int mv64360_eth_real_open (struct eth_device *eth);
83static int mv64360_eth_real_stop (struct eth_device *eth);
84static struct net_device_stats *mv64360_eth_get_stats (struct eth_device
85 *dev);
86static void eth_port_init_mac_tables (ETH_PORT eth_port_num);
87static void mv64360_eth_update_stat (struct eth_device *dev);
88bool db64360_eth_start (struct eth_device *eth);
89unsigned int eth_read_mib_counter (ETH_PORT eth_port_num,
90 unsigned int mib_offset);
91int mv64360_eth_receive (struct eth_device *dev);
92
93int mv64360_eth_xmit (struct eth_device *, volatile void *packet, int length);
94
95#ifndef UPDATE_STATS_BY_SOFTWARE
96static void mv64360_eth_print_stat (struct eth_device *dev);
97#endif
98
99extern unsigned int INTERNAL_REG_BASE_ADDR;
100
101
102
103
104#ifdef DEBUG_MV_ETH
105void print_globals (struct eth_device *dev)
106{
107 printf ("Ethernet PRINT_Globals-Debug function\n");
108 printf ("Base Address for ETH_PORT_INFO: %08x\n",
109 (unsigned int) dev->priv);
110 printf ("Base Address for mv64360_eth_priv: %08x\n",
111 (unsigned int) &(((ETH_PORT_INFO *) dev->priv)->
112 port_private));
113
114 printf ("GT Internal Base Address: %08x\n",
115 INTERNAL_REG_BASE_ADDR);
116 printf ("Base Address for TX-DESCs: %08x Number of allocated Buffers %d\n", (unsigned int) ((ETH_PORT_INFO *) dev->priv)->p_tx_desc_area_base[0], MV64360_TX_QUEUE_SIZE);
117 printf ("Base Address for RX-DESCs: %08x Number of allocated Buffers %d\n", (unsigned int) ((ETH_PORT_INFO *) dev->priv)->p_rx_desc_area_base[0], MV64360_RX_QUEUE_SIZE);
118 printf ("Base Address for RX-Buffer: %08x allocated Bytes %d\n",
119 (unsigned int) ((ETH_PORT_INFO *) dev->priv)->
120 p_rx_buffer_base[0],
121 (MV64360_RX_QUEUE_SIZE * MV64360_RX_BUFFER_SIZE) + 32);
122 printf ("Base Address for TX-Buffer: %08x allocated Bytes %d\n",
123 (unsigned int) ((ETH_PORT_INFO *) dev->priv)->
124 p_tx_buffer_base[0],
125 (MV64360_TX_QUEUE_SIZE * MV64360_TX_BUFFER_SIZE) + 32);
126}
127#endif
128
129#define my_cpu_to_le32(x) my_le32_to_cpu((x))
130
131unsigned long my_le32_to_cpu (unsigned long x)
132{
133 return (((x & 0x000000ffU) << 24) |
134 ((x & 0x0000ff00U) << 8) |
135 ((x & 0x00ff0000U) >> 8) | ((x & 0xff000000U) >> 24));
136}
137
138
139
140
141
142
143
144
145
146
147
148static void mv64360_eth_print_phy_status (struct eth_device *dev)
149{
150 struct mv64360_eth_priv *port_private;
151 unsigned int port_num;
152 ETH_PORT_INFO *ethernet_private = (ETH_PORT_INFO *) dev->priv;
153 unsigned int port_status, phy_reg_data;
154
155 port_private =
156 (struct mv64360_eth_priv *) ethernet_private->port_private;
157 port_num = port_private->port_num;
158
159
160 eth_port_read_smi_reg (port_num, 1, &phy_reg_data);
161 if (!(phy_reg_data & 0x20)) {
162 printf ("Ethernet port changed link status to DOWN\n");
163 } else {
164 port_status =
165 MV_REG_READ (MV64360_ETH_PORT_STATUS_REG (port_num));
166 printf ("Ethernet status port %d: Link up", port_num);
167 printf (", %s",
168 (port_status & BIT2) ? "Full Duplex" : "Half Duplex");
169 if (port_status & BIT4)
170 printf (", Speed 1 Gbps");
171 else
172 printf (", %s",
173 (port_status & BIT5) ? "Speed 100 Mbps" :
174 "Speed 10 Mbps");
175 printf ("\n");
176 }
177}
178
179
180
181
182
183int db64360_eth_probe (struct eth_device *dev)
184{
185 return ((int) db64360_eth_start (dev));
186}
187
188int db64360_eth_poll (struct eth_device *dev)
189{
190 return mv64360_eth_receive (dev);
191}
192
193int db64360_eth_transmit(struct eth_device *dev, void *packet, int length)
194{
195 mv64360_eth_xmit (dev, packet, length);
196 return 0;
197}
198
199void db64360_eth_disable (struct eth_device *dev)
200{
201 mv64360_eth_stop (dev);
202}
203
204
205void mv6436x_eth_initialize (bd_t * bis)
206{
207 struct eth_device *dev;
208 ETH_PORT_INFO *ethernet_private;
209 struct mv64360_eth_priv *port_private;
210 int devnum, x, temp;
211 char *s, *e, buf[64];
212
213 for (devnum = 0; devnum < MV_ETH_DEVS; devnum++) {
214 dev = calloc (sizeof (*dev), 1);
215 if (!dev) {
216 printf ("%s: mv_enet%d allocation failure, %s\n",
217 __FUNCTION__, devnum, "eth_device structure");
218 return;
219 }
220
221
222 sprintf (dev->name, "mv_enet%d", devnum);
223
224#ifdef DEBUG
225 printf ("Initializing %s\n", dev->name);
226#endif
227
228
229 switch (devnum) {
230 case 0:
231 s = "ethaddr";
232 break;
233
234 case 1:
235 s = "eth1addr";
236 break;
237
238 case 2:
239 s = "eth2addr";
240 break;
241
242 default:
243 printf ("%s: Invalid device number %d\n",
244 __FUNCTION__, devnum);
245 return;
246 }
247
248 temp = getenv_f(s, buf, sizeof (buf));
249 s = (temp > 0) ? buf : NULL;
250
251#ifdef DEBUG
252 printf ("Setting MAC %d to %s\n", devnum, s);
253#endif
254 for (x = 0; x < 6; ++x) {
255 dev->enetaddr[x] = s ? simple_strtoul (s, &e, 16) : 0;
256 if (s)
257 s = (*e) ? e + 1 : e;
258 }
259
260 eth_port_uc_addr_set (devnum, dev->enetaddr, 0);
261
262 dev->init = (void *) db64360_eth_probe;
263 dev->halt = (void *) ethernet_phy_reset;
264 dev->send = (void *) db64360_eth_transmit;
265 dev->recv = (void *) db64360_eth_poll;
266
267 ethernet_private =
268 calloc (sizeof (*ethernet_private), 1);
269 dev->priv = (void *) ethernet_private;
270 if (!ethernet_private) {
271 printf ("%s: %s allocation failure, %s\n",
272 __FUNCTION__, dev->name,
273 "Private Device Structure");
274 free (dev);
275 return;
276 }
277
278 memset (ethernet_private, 0, sizeof (ETH_PORT_INFO));
279 memcpy (ethernet_private->port_mac_addr, dev->enetaddr, 6);
280
281
282 port_private =
283 calloc (sizeof (*ethernet_private), 1);
284 ethernet_private->port_private = (void *)port_private;
285 if (!port_private) {
286 printf ("%s: %s allocation failure, %s\n",
287 __FUNCTION__, dev->name,
288 "Port Private Device Structure");
289
290 free (ethernet_private);
291 free (dev);
292 return;
293 }
294
295 port_private->stats =
296 calloc (sizeof (struct net_device_stats), 1);
297 if (!port_private->stats) {
298 printf ("%s: %s allocation failure, %s\n",
299 __FUNCTION__, dev->name,
300 "Net stat Structure");
301
302 free (port_private);
303 free (ethernet_private);
304 free (dev);
305 return;
306 }
307 memset (ethernet_private->port_private, 0,
308 sizeof (struct mv64360_eth_priv));
309 switch (devnum) {
310 case 0:
311 ethernet_private->port_num = ETH_0;
312 break;
313 case 1:
314 ethernet_private->port_num = ETH_1;
315 break;
316 case 2:
317 ethernet_private->port_num = ETH_2;
318 break;
319 default:
320 printf ("Invalid device number %d\n", devnum);
321 break;
322 };
323
324 port_private->port_num = devnum;
325
326
327
328
329 mv64360_eth_update_stat (dev);
330 memset (port_private->stats, 0,
331 sizeof (struct net_device_stats));
332
333 switch (devnum) {
334 case 0:
335 s = "ethaddr";
336 break;
337
338 case 1:
339 s = "eth1addr";
340 break;
341
342 case 2:
343 s = "eth2addr";
344 break;
345
346 default:
347 printf ("%s: Invalid device number %d\n",
348 __FUNCTION__, devnum);
349 return;
350 }
351
352 temp = getenv_f(s, buf, sizeof (buf));
353 s = (temp > 0) ? buf : NULL;
354
355#ifdef DEBUG
356 printf ("Setting MAC %d to %s\n", devnum, s);
357#endif
358 for (x = 0; x < 6; ++x) {
359 dev->enetaddr[x] = s ? simple_strtoul (s, &e, 16) : 0;
360 if (s)
361 s = (*e) ? e + 1 : e;
362 }
363
364 DP (printf ("Allocating descriptor and buffer rings\n"));
365
366 ethernet_private->p_rx_desc_area_base[0] =
367 (ETH_RX_DESC *) memalign (16,
368 RX_DESC_ALIGNED_SIZE *
369 MV64360_RX_QUEUE_SIZE + 1);
370 ethernet_private->p_tx_desc_area_base[0] =
371 (ETH_TX_DESC *) memalign (16,
372 TX_DESC_ALIGNED_SIZE *
373 MV64360_TX_QUEUE_SIZE + 1);
374
375 ethernet_private->p_rx_buffer_base[0] =
376 (char *) memalign (16,
377 MV64360_RX_QUEUE_SIZE *
378 MV64360_TX_BUFFER_SIZE + 1);
379 ethernet_private->p_tx_buffer_base[0] =
380 (char *) memalign (16,
381 MV64360_RX_QUEUE_SIZE *
382 MV64360_TX_BUFFER_SIZE + 1);
383
384#ifdef DEBUG_MV_ETH
385
386 print_globals (dev);
387#endif
388 eth_register (dev);
389
390 }
391 DP (printf ("%s: exit\n", __FUNCTION__));
392
393}
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409int mv64360_eth_open (struct eth_device *dev)
410{
411 return (mv64360_eth_real_open (dev));
412}
413
414
415static int mv64360_eth_real_open (struct eth_device *dev)
416{
417
418 unsigned int queue;
419 ETH_PORT_INFO *ethernet_private;
420 struct mv64360_eth_priv *port_private;
421 unsigned int port_num;
422 u32 phy_reg_data;
423
424 ethernet_private = (ETH_PORT_INFO *) dev->priv;
425
426
427 memcpy (ethernet_private->port_mac_addr, dev->enetaddr, 6);
428
429 port_private =
430 (struct mv64360_eth_priv *) ethernet_private->port_private;
431 port_num = port_private->port_num;
432
433
434 MV_REG_WRITE (MV64360_ETH_RECEIVE_QUEUE_COMMAND_REG (port_num),
435 0x0000ff00);
436
437
438 MV_REG_WRITE (MV64360_ETH_INTERRUPT_CAUSE_REG (port_num), 0);
439 MV_REG_WRITE (MV64360_ETH_INTERRUPT_CAUSE_EXTEND_REG (port_num), 0);
440
441
442 MV_REG_WRITE (MV64360_ETH_INTERRUPT_MASK_REG (port_num),
443 INT_CAUSE_UNMASK_ALL);
444
445
446 MV_REG_WRITE (MV64360_ETH_INTERRUPT_EXTEND_MASK_REG (port_num),
447 INT_CAUSE_UNMASK_ALL_EXT);
448
449
450 ethernet_private->port_phy_addr = 0x8 + port_num;
451
452
453 eth_port_init (ethernet_private);
454
455
456
457
458 for (queue = 0; queue < MV64360_TX_QUEUE_NUM; queue++) {
459 unsigned int size;
460
461 port_private->tx_ring_size[queue] = MV64360_TX_QUEUE_SIZE;
462 size = (port_private->tx_ring_size[queue] * TX_DESC_ALIGNED_SIZE);
463 ethernet_private->tx_desc_area_size[queue] = size;
464
465
466 memset ((void *) ethernet_private->p_tx_desc_area_base[queue],
467 0, ethernet_private->tx_desc_area_size[queue]);
468
469
470 if (ether_init_tx_desc_ring
471 (ethernet_private, ETH_Q0,
472 port_private->tx_ring_size[queue],
473 MV64360_TX_BUFFER_SIZE ,
474 (unsigned int) ethernet_private->
475 p_tx_desc_area_base[queue],
476 (unsigned int) ethernet_private->
477 p_tx_buffer_base[queue]) == false)
478 printf ("### Error initializing TX Ring\n");
479 }
480
481
482 for (queue = 0; queue < MV64360_RX_QUEUE_NUM; queue++) {
483 unsigned int size;
484
485
486 port_private->rx_ring_size[queue] = MV64360_RX_QUEUE_SIZE;
487 size = (port_private->rx_ring_size[queue] *
488 RX_DESC_ALIGNED_SIZE);
489 ethernet_private->rx_desc_area_size[queue] = size;
490
491
492 memset ((void *) ethernet_private->p_rx_desc_area_base[queue],
493 0, ethernet_private->rx_desc_area_size[queue]);
494 if ((ether_init_rx_desc_ring
495 (ethernet_private, ETH_Q0,
496 port_private->rx_ring_size[queue],
497 MV64360_RX_BUFFER_SIZE ,
498 (unsigned int) ethernet_private->
499 p_rx_desc_area_base[queue],
500 (unsigned int) ethernet_private->
501 p_rx_buffer_base[queue])) == false)
502 printf ("### Error initializing RX Ring\n");
503 }
504
505 eth_port_start (ethernet_private);
506
507
508 MV_REG_WRITE (MV64360_ETH_PORT_SERIAL_CONTROL_REG (port_num),
509 (0x5 << 17) |
510 (MV_REG_READ
511 (MV64360_ETH_PORT_SERIAL_CONTROL_REG (port_num))
512 & 0xfff1ffff));
513
514
515
516
517
518
519 MV_REG_WRITE (MV64360_ETH_MAXIMUM_TRANSMIT_UNIT (port_num), 0);
520 MV_REG_READ (MV64360_ETH_PORT_STATUS_REG (port_num));
521
522
523 eth_port_read_smi_reg (port_num, 1, &phy_reg_data);
524 if (!(phy_reg_data & 0x20)) {
525
526 if ((ethernet_phy_reset (port_num)) != true) {
527 printf ("$$ Warnning: No link on port %d \n",
528 port_num);
529 return 0;
530 } else {
531 eth_port_read_smi_reg (port_num, 1, &phy_reg_data);
532 if (!(phy_reg_data & 0x20)) {
533 printf ("### Error: Phy is not active\n");
534 return 0;
535 }
536 }
537 } else {
538 mv64360_eth_print_phy_status (dev);
539 }
540 port_private->eth_running = MAGIC_ETH_RUNNING;
541 return 1;
542}
543
544
545static int mv64360_eth_free_tx_rings (struct eth_device *dev)
546{
547 unsigned int queue;
548 ETH_PORT_INFO *ethernet_private;
549 struct mv64360_eth_priv *port_private;
550 unsigned int port_num;
551 volatile ETH_TX_DESC *p_tx_curr_desc;
552
553 ethernet_private = (ETH_PORT_INFO *) dev->priv;
554 port_private =
555 (struct mv64360_eth_priv *) ethernet_private->port_private;
556 port_num = port_private->port_num;
557
558
559 MV_REG_WRITE (MV64360_ETH_TRANSMIT_QUEUE_COMMAND_REG (port_num),
560 0x0000ff00);
561
562
563 DP (printf ("Clearing previously allocated TX queues... "));
564 for (queue = 0; queue < MV64360_TX_QUEUE_NUM; queue++) {
565
566 for (p_tx_curr_desc =
567 ethernet_private->p_tx_desc_area_base[queue];
568 ((unsigned int) p_tx_curr_desc <= (unsigned int)
569 ethernet_private->p_tx_desc_area_base[queue] +
570 ethernet_private->tx_desc_area_size[queue]);
571 p_tx_curr_desc =
572 (ETH_TX_DESC *) ((unsigned int) p_tx_curr_desc +
573 TX_DESC_ALIGNED_SIZE)) {
574
575 if (p_tx_curr_desc->return_info != 0) {
576 p_tx_curr_desc->return_info = 0;
577 DP (printf ("freed\n"));
578 }
579 }
580 DP (printf ("Done\n"));
581 }
582 return 0;
583}
584
585static int mv64360_eth_free_rx_rings (struct eth_device *dev)
586{
587 unsigned int queue;
588 ETH_PORT_INFO *ethernet_private;
589 struct mv64360_eth_priv *port_private;
590 unsigned int port_num;
591 volatile ETH_RX_DESC *p_rx_curr_desc;
592
593 ethernet_private = (ETH_PORT_INFO *) dev->priv;
594 port_private =
595 (struct mv64360_eth_priv *) ethernet_private->port_private;
596 port_num = port_private->port_num;
597
598
599
600 MV_REG_WRITE (MV64360_ETH_RECEIVE_QUEUE_COMMAND_REG (port_num),
601 0x0000ff00);
602
603
604 DP (printf ("Clearing previously allocated RX queues... "));
605 for (queue = 0; queue < MV64360_RX_QUEUE_NUM; queue++) {
606
607 for (p_rx_curr_desc =
608 ethernet_private->p_rx_desc_area_base[queue];
609 (((unsigned int) p_rx_curr_desc <
610 ((unsigned int) ethernet_private->
611 p_rx_desc_area_base[queue] +
612 ethernet_private->rx_desc_area_size[queue])));
613 p_rx_curr_desc =
614 (ETH_RX_DESC *) ((unsigned int) p_rx_curr_desc +
615 RX_DESC_ALIGNED_SIZE)) {
616 if (p_rx_curr_desc->return_info != 0) {
617 p_rx_curr_desc->return_info = 0;
618 DP (printf ("freed\n"));
619 }
620 }
621 DP (printf ("Done\n"));
622 }
623 return 0;
624}
625
626
627
628
629
630
631
632
633
634
635
636int mv64360_eth_stop (struct eth_device *dev)
637{
638
639 MV_REG_WRITE (MV64360_ETH_BASE_ADDR_ENABLE_REG, 0x3f);
640 DP (printf ("%s Ethernet stop called ... \n", __FUNCTION__));
641 mv64360_eth_real_stop (dev);
642
643 return 0;
644};
645
646
647
648static int mv64360_eth_real_stop (struct eth_device *dev)
649{
650 ETH_PORT_INFO *ethernet_private;
651 struct mv64360_eth_priv *port_private;
652 unsigned int port_num;
653
654 ethernet_private = (ETH_PORT_INFO *) dev->priv;
655 port_private =
656 (struct mv64360_eth_priv *) ethernet_private->port_private;
657 port_num = port_private->port_num;
658
659
660 mv64360_eth_free_tx_rings (dev);
661 mv64360_eth_free_rx_rings (dev);
662
663 eth_port_reset (ethernet_private->port_num);
664
665 MV_REG_WRITE (MV64360_ETH_INTERRUPT_CAUSE_REG (port_num), 0);
666 MV_REG_WRITE (MV64360_ETH_INTERRUPT_CAUSE_EXTEND_REG (port_num), 0);
667
668 MV_REG_WRITE (MV64360_ETH_INTERRUPT_MASK_REG (port_num), 0);
669
670 MV_REG_WRITE (MV64360_ETH_INTERRUPT_EXTEND_MASK_REG (port_num), 0);
671 MV_RESET_REG_BITS (MV64360_CPU_INTERRUPT0_MASK_HIGH,
672 BIT0 << port_num);
673
674#ifndef UPDATE_STATS_BY_SOFTWARE
675
676
677
678
679 if (port_private->eth_running == MAGIC_ETH_RUNNING) {
680 port_private->eth_running = 0;
681 mv64360_eth_print_stat (dev);
682 }
683 memset (port_private->stats, 0, sizeof (struct net_device_stats));
684#endif
685 DP (printf ("\nEthernet stopped ... \n"));
686 return 0;
687}
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702int mv64360_eth_xmit (struct eth_device *dev, volatile void *dataPtr,
703 int dataSize)
704{
705 ETH_PORT_INFO *ethernet_private;
706 struct mv64360_eth_priv *port_private;
707 PKT_INFO pkt_info;
708 ETH_FUNC_RET_STATUS status;
709 struct net_device_stats *stats;
710 ETH_FUNC_RET_STATUS release_result;
711
712 ethernet_private = (ETH_PORT_INFO *) dev->priv;
713 port_private =
714 (struct mv64360_eth_priv *) ethernet_private->port_private;
715
716 stats = port_private->stats;
717
718
719 pkt_info.cmd_sts = ETH_TX_FIRST_DESC | ETH_TX_LAST_DESC;
720 pkt_info.byte_cnt = dataSize;
721 pkt_info.buf_ptr = (unsigned int) dataPtr;
722 pkt_info.return_info = 0;
723
724 status = eth_port_send (ethernet_private, ETH_Q0, &pkt_info);
725 if ((status == ETH_ERROR) || (status == ETH_QUEUE_FULL)) {
726 printf ("Error on transmitting packet ..");
727 if (status == ETH_QUEUE_FULL)
728 printf ("ETH Queue is full. \n");
729 if (status == ETH_QUEUE_LAST_RESOURCE)
730 printf ("ETH Queue: using last available resource. \n");
731 goto error;
732 }
733
734
735 stats->tx_bytes += dataSize;
736 stats->tx_packets++;
737
738
739 do {
740 release_result =
741 eth_tx_return_desc (ethernet_private, ETH_Q0,
742 &pkt_info);
743 switch (release_result) {
744 case ETH_OK:
745 DP (printf ("descriptor released\n"));
746 if (pkt_info.cmd_sts & BIT0) {
747 printf ("Error in TX\n");
748 stats->tx_errors++;
749
750 }
751 break;
752 case ETH_RETRY:
753 DP (printf ("transmission still in process\n"));
754 break;
755
756 case ETH_ERROR:
757 printf ("routine can not access Tx desc ring\n");
758 break;
759
760 case ETH_END_OF_JOB:
761 DP (printf ("the routine has nothing to release\n"));
762 break;
763 default:
764 break;
765 }
766 } while (release_result == ETH_OK);
767
768
769 return 0;
770 error:
771 return 1;
772}
773
774
775
776
777
778
779
780
781
782
783
784
785
786int mv64360_eth_receive (struct eth_device *dev)
787{
788 ETH_PORT_INFO *ethernet_private;
789 struct mv64360_eth_priv *port_private;
790 PKT_INFO pkt_info;
791 struct net_device_stats *stats;
792
793
794 ethernet_private = (ETH_PORT_INFO *) dev->priv;
795 port_private =
796 (struct mv64360_eth_priv *) ethernet_private->port_private;
797 stats = port_private->stats;
798
799 while ((eth_port_receive (ethernet_private, ETH_Q0, &pkt_info) ==
800 ETH_OK)) {
801
802#ifdef DEBUG_MV_ETH
803 if (pkt_info.byte_cnt != 0) {
804 printf ("%s: Received %d byte Packet @ 0x%x\n",
805 __FUNCTION__, pkt_info.byte_cnt,
806 pkt_info.buf_ptr);
807 }
808#endif
809
810 stats->rx_packets++;
811 stats->rx_bytes += pkt_info.byte_cnt;
812
813
814
815
816
817 if (((pkt_info.
818 cmd_sts & (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) !=
819 (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC))
820 || (pkt_info.cmd_sts & ETH_ERROR_SUMMARY)) {
821 stats->rx_dropped++;
822
823 printf ("Received packet spread on multiple descriptors\n");
824
825
826 if (pkt_info.cmd_sts & ETH_ERROR_SUMMARY) {
827 stats->rx_errors++;
828 }
829
830
831 pkt_info.buf_ptr &= ~0x7;
832 pkt_info.byte_cnt = 0x0000;
833
834 if (eth_rx_return_buff
835 (ethernet_private, ETH_Q0, &pkt_info) != ETH_OK) {
836 printf ("Error while returning the RX Desc to Ring\n");
837 } else {
838 DP (printf ("RX Desc returned to Ring\n"));
839 }
840
841 } else {
842
843
844#ifdef DEBUG_MV_ETH
845 printf ("\nNow send it to upper layer protocols (NetReceive) ...\n");
846#endif
847
848 NetReceive ((uchar *) pkt_info.buf_ptr,
849 (int) pkt_info.byte_cnt);
850
851
852
853 pkt_info.buf_ptr &= ~0x7;
854 pkt_info.byte_cnt = 0x0000;
855 DP (printf
856 ("RX: pkt_info.buf_ptr = %x\n",
857 pkt_info.buf_ptr));
858 if (eth_rx_return_buff
859 (ethernet_private, ETH_Q0, &pkt_info) != ETH_OK) {
860 printf ("Error while returning the RX Desc to Ring\n");
861 } else {
862 DP (printf ("RX Desc returned to Ring\n"));
863 }
864
865
866
867 }
868 }
869 mv64360_eth_get_stats (dev);
870 return 1;
871}
872
873
874
875
876
877
878
879
880
881
882
883static struct net_device_stats *mv64360_eth_get_stats (struct eth_device *dev)
884{
885 ETH_PORT_INFO *ethernet_private;
886 struct mv64360_eth_priv *port_private;
887
888 ethernet_private = (ETH_PORT_INFO *) dev->priv;
889 port_private =
890 (struct mv64360_eth_priv *) ethernet_private->port_private;
891
892 mv64360_eth_update_stat (dev);
893
894 return port_private->stats;
895}
896
897
898
899
900
901
902
903
904
905
906
907static void mv64360_eth_update_stat (struct eth_device *dev)
908{
909 ETH_PORT_INFO *ethernet_private;
910 struct mv64360_eth_priv *port_private;
911 struct net_device_stats *stats;
912
913 ethernet_private = (ETH_PORT_INFO *) dev->priv;
914 port_private =
915 (struct mv64360_eth_priv *) ethernet_private->port_private;
916 stats = port_private->stats;
917
918
919 stats->rx_packets += (unsigned long)
920 eth_read_mib_counter (ethernet_private->port_num,
921 ETH_MIB_GOOD_FRAMES_RECEIVED);
922 stats->tx_packets += (unsigned long)
923 eth_read_mib_counter (ethernet_private->port_num,
924 ETH_MIB_GOOD_FRAMES_SENT);
925 stats->rx_bytes += (unsigned long)
926 eth_read_mib_counter (ethernet_private->port_num,
927 ETH_MIB_GOOD_OCTETS_RECEIVED_LOW);
928
929
930
931
932
933
934
935
936
937
938 (void)eth_read_mib_counter (ethernet_private->port_num,
939 ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH);
940 stats->tx_bytes += (unsigned long)
941 eth_read_mib_counter (ethernet_private->port_num,
942 ETH_MIB_GOOD_OCTETS_SENT_LOW);
943 (void)eth_read_mib_counter (ethernet_private->port_num,
944 ETH_MIB_GOOD_OCTETS_SENT_HIGH);
945 stats->rx_errors += (unsigned long)
946 eth_read_mib_counter (ethernet_private->port_num,
947 ETH_MIB_MAC_RECEIVE_ERROR);
948
949
950 stats->rx_dropped +=
951 (unsigned long) eth_read_mib_counter (ethernet_private->
952 port_num,
953 ETH_MIB_BAD_CRC_EVENT);
954 stats->multicast += (unsigned long)
955 eth_read_mib_counter (ethernet_private->port_num,
956 ETH_MIB_MULTICAST_FRAMES_RECEIVED);
957 stats->collisions +=
958 (unsigned long) eth_read_mib_counter (ethernet_private->
959 port_num,
960 ETH_MIB_COLLISION) +
961 (unsigned long) eth_read_mib_counter (ethernet_private->
962 port_num,
963 ETH_MIB_LATE_COLLISION);
964
965 stats->rx_length_errors +=
966 (unsigned long) eth_read_mib_counter (ethernet_private->
967 port_num,
968 ETH_MIB_UNDERSIZE_RECEIVED)
969 +
970 (unsigned long) eth_read_mib_counter (ethernet_private->
971 port_num,
972 ETH_MIB_OVERSIZE_RECEIVED);
973
974}
975
976#ifndef UPDATE_STATS_BY_SOFTWARE
977
978
979
980
981
982
983
984
985
986static void mv64360_eth_print_stat (struct eth_device *dev)
987{
988 ETH_PORT_INFO *ethernet_private;
989 struct mv64360_eth_priv *port_private;
990 struct net_device_stats *stats;
991
992 ethernet_private = (ETH_PORT_INFO *) dev->priv;
993 port_private =
994 (struct mv64360_eth_priv *) ethernet_private->port_private;
995 stats = port_private->stats;
996
997
998 printf ("\n### Network statistics: ###\n");
999 printf ("--------------------------\n");
1000 printf (" Packets received: %ld\n", stats->rx_packets);
1001 printf (" Packets send: %ld\n", stats->tx_packets);
1002 printf (" Received bytes: %ld\n", stats->rx_bytes);
1003 printf (" Send bytes: %ld\n", stats->tx_bytes);
1004 if (stats->rx_errors != 0)
1005 printf (" Rx Errors: %ld\n",
1006 stats->rx_errors);
1007 if (stats->rx_dropped != 0)
1008 printf (" Rx dropped (CRC Errors): %ld\n",
1009 stats->rx_dropped);
1010 if (stats->multicast != 0)
1011 printf (" Rx mulicast frames: %ld\n",
1012 stats->multicast);
1013 if (stats->collisions != 0)
1014 printf (" No. of collisions: %ld\n",
1015 stats->collisions);
1016 if (stats->rx_length_errors != 0)
1017 printf (" Rx length errors: %ld\n",
1018 stats->rx_length_errors);
1019}
1020#endif
1021
1022
1023
1024
1025
1026
1027
1028bool db64360_eth_start (struct eth_device *dev)
1029{
1030 return (mv64360_eth_open (dev));
1031}
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225#define ETH_ENABLE_TX_QUEUE(tx_queue, eth_port) \
1226 MV_REG_WRITE(MV64360_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port), (1 << tx_queue))
1227
1228#define ETH_DISABLE_TX_QUEUE(tx_queue, eth_port) \
1229 MV_REG_WRITE(MV64360_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port),\
1230 (1 << (8 + tx_queue)))
1231
1232#define ETH_ENABLE_RX_QUEUE(rx_queue, eth_port) \
1233MV_REG_WRITE(MV64360_ETH_RECEIVE_QUEUE_COMMAND_REG(eth_port), (1 << rx_queue))
1234
1235#define ETH_DISABLE_RX_QUEUE(rx_queue, eth_port) \
1236MV_REG_WRITE(MV64360_ETH_RECEIVE_QUEUE_COMMAND_REG(eth_port), (1 << (8 + rx_queue)))
1237
1238#define CURR_RFD_GET(p_curr_desc, queue) \
1239 ((p_curr_desc) = p_eth_port_ctrl->p_rx_curr_desc_q[queue])
1240
1241#define CURR_RFD_SET(p_curr_desc, queue) \
1242 (p_eth_port_ctrl->p_rx_curr_desc_q[queue] = (p_curr_desc))
1243
1244#define USED_RFD_GET(p_used_desc, queue) \
1245 ((p_used_desc) = p_eth_port_ctrl->p_rx_used_desc_q[queue])
1246
1247#define USED_RFD_SET(p_used_desc, queue)\
1248(p_eth_port_ctrl->p_rx_used_desc_q[queue] = (p_used_desc))
1249
1250
1251#define CURR_TFD_GET(p_curr_desc, queue) \
1252 ((p_curr_desc) = p_eth_port_ctrl->p_tx_curr_desc_q[queue])
1253
1254#define CURR_TFD_SET(p_curr_desc, queue) \
1255 (p_eth_port_ctrl->p_tx_curr_desc_q[queue] = (p_curr_desc))
1256
1257#define USED_TFD_GET(p_used_desc, queue) \
1258 ((p_used_desc) = p_eth_port_ctrl->p_tx_used_desc_q[queue])
1259
1260#define USED_TFD_SET(p_used_desc, queue) \
1261 (p_eth_port_ctrl->p_tx_used_desc_q[queue] = (p_used_desc))
1262
1263#define FIRST_TFD_GET(p_first_desc, queue) \
1264 ((p_first_desc) = p_eth_port_ctrl->p_tx_first_desc_q[queue])
1265
1266#define FIRST_TFD_SET(p_first_desc, queue) \
1267 (p_eth_port_ctrl->p_tx_first_desc_q[queue] = (p_first_desc))
1268
1269
1270
1271#define RX_NEXT_DESC_PTR(p_rx_desc, queue) (ETH_RX_DESC*)(((((unsigned int)p_rx_desc - (unsigned int)p_eth_port_ctrl->p_rx_desc_area_base[queue]) + RX_DESC_ALIGNED_SIZE) % p_eth_port_ctrl->rx_desc_area_size[queue]) + (unsigned int)p_eth_port_ctrl->p_rx_desc_area_base[queue])
1272
1273#define TX_NEXT_DESC_PTR(p_tx_desc, queue) (ETH_TX_DESC*)(((((unsigned int)p_tx_desc - (unsigned int)p_eth_port_ctrl->p_tx_desc_area_base[queue]) + TX_DESC_ALIGNED_SIZE) % p_eth_port_ctrl->tx_desc_area_size[queue]) + (unsigned int)p_eth_port_ctrl->p_tx_desc_area_base[queue])
1274
1275#define LINK_UP_TIMEOUT 100000
1276#define PHY_BUSY_TIMEOUT 10000000
1277
1278
1279
1280
1281static void ethernet_phy_set (ETH_PORT eth_port_num, int phy_addr);
1282static int ethernet_phy_get (ETH_PORT eth_port_num);
1283
1284
1285static void eth_set_access_control (ETH_PORT eth_port_num,
1286 ETH_WIN_PARAM * param);
1287static bool eth_port_uc_addr (ETH_PORT eth_port_num, unsigned char uc_nibble,
1288 ETH_QUEUE queue, int option);
1289#if 0
1290static bool eth_port_smc_addr (ETH_PORT eth_port_num,
1291 unsigned char mc_byte,
1292 ETH_QUEUE queue, int option);
1293static bool eth_port_omc_addr (ETH_PORT eth_port_num,
1294 unsigned char crc8,
1295 ETH_QUEUE queue, int option);
1296#endif
1297
1298static void eth_b_copy (unsigned int src_addr, unsigned int dst_addr,
1299 int byte_count);
1300
1301void eth_dbg (ETH_PORT_INFO * p_eth_port_ctrl);
1302
1303
1304typedef enum _memory_bank { BANK0, BANK1, BANK2, BANK3 } MEMORY_BANK;
1305u32 mv_get_dram_bank_base_addr (MEMORY_BANK bank)
1306{
1307 u32 result = 0;
1308 u32 enable = MV_REG_READ (MV64360_BASE_ADDR_ENABLE);
1309
1310 if (enable & (1 << bank))
1311 return 0;
1312 if (bank == BANK0)
1313 result = MV_REG_READ (MV64360_CS_0_BASE_ADDR);
1314 if (bank == BANK1)
1315 result = MV_REG_READ (MV64360_CS_1_BASE_ADDR);
1316 if (bank == BANK2)
1317 result = MV_REG_READ (MV64360_CS_2_BASE_ADDR);
1318 if (bank == BANK3)
1319 result = MV_REG_READ (MV64360_CS_3_BASE_ADDR);
1320 result &= 0x0000ffff;
1321 result = result << 16;
1322 return result;
1323}
1324
1325u32 mv_get_dram_bank_size (MEMORY_BANK bank)
1326{
1327 u32 result = 0;
1328 u32 enable = MV_REG_READ (MV64360_BASE_ADDR_ENABLE);
1329
1330 if (enable & (1 << bank))
1331 return 0;
1332 if (bank == BANK0)
1333 result = MV_REG_READ (MV64360_CS_0_SIZE);
1334 if (bank == BANK1)
1335 result = MV_REG_READ (MV64360_CS_1_SIZE);
1336 if (bank == BANK2)
1337 result = MV_REG_READ (MV64360_CS_2_SIZE);
1338 if (bank == BANK3)
1339 result = MV_REG_READ (MV64360_CS_3_SIZE);
1340 result += 1;
1341 result &= 0x0000ffff;
1342 result = result << 16;
1343 return result;
1344}
1345
1346u32 mv_get_internal_sram_base (void)
1347{
1348 u32 result;
1349
1350 result = MV_REG_READ (MV64360_INTEGRATED_SRAM_BASE_ADDR);
1351 result &= 0x0000ffff;
1352 result = result << 16;
1353 return result;
1354}
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381static void eth_port_init (ETH_PORT_INFO * p_eth_port_ctrl)
1382{
1383 int queue;
1384 ETH_WIN_PARAM win_param;
1385
1386 p_eth_port_ctrl->port_config = PORT_CONFIG_VALUE;
1387 p_eth_port_ctrl->port_config_extend = PORT_CONFIG_EXTEND_VALUE;
1388 p_eth_port_ctrl->port_sdma_config = PORT_SDMA_CONFIG_VALUE;
1389 p_eth_port_ctrl->port_serial_control = PORT_SERIAL_CONTROL_VALUE;
1390
1391 p_eth_port_ctrl->port_rx_queue_command = 0;
1392 p_eth_port_ctrl->port_tx_queue_command = 0;
1393
1394
1395 for (queue = 0; queue < MAX_RX_QUEUE_NUM; queue++) {
1396 CURR_RFD_SET ((ETH_RX_DESC *) 0x00000000, queue);
1397 USED_RFD_SET ((ETH_RX_DESC *) 0x00000000, queue);
1398 p_eth_port_ctrl->rx_resource_err[queue] = false;
1399 }
1400
1401 for (queue = 0; queue < MAX_TX_QUEUE_NUM; queue++) {
1402 CURR_TFD_SET ((ETH_TX_DESC *) 0x00000000, queue);
1403 USED_TFD_SET ((ETH_TX_DESC *) 0x00000000, queue);
1404 FIRST_TFD_SET ((ETH_TX_DESC *) 0x00000000, queue);
1405 p_eth_port_ctrl->tx_resource_err[queue] = false;
1406 }
1407
1408 eth_port_reset (p_eth_port_ctrl->port_num);
1409
1410
1411 win_param.win = ETH_WIN0;
1412 win_param.target = ETH_TARGET_DRAM;
1413 win_param.attributes = EBAR_ATTR_DRAM_CS0;
1414#ifndef CONFIG_NOT_COHERENT_CACHE
1415 win_param.attributes |= EBAR_ATTR_DRAM_CACHE_COHERENCY_WB;
1416#endif
1417 win_param.high_addr = 0;
1418
1419 win_param.base_addr = mv_get_dram_bank_base_addr (BANK0);
1420 win_param.size = mv_get_dram_bank_size (BANK0);
1421 if (win_param.size == 0)
1422 win_param.enable = 0;
1423 else
1424 win_param.enable = 1;
1425 win_param.access_ctrl = EWIN_ACCESS_FULL;
1426
1427
1428 eth_set_access_control (p_eth_port_ctrl->port_num, &win_param);
1429
1430
1431 win_param.win = ETH_WIN1;
1432 win_param.target = ETH_TARGET_DRAM;
1433 win_param.attributes = EBAR_ATTR_DRAM_CS1;
1434#ifndef CONFIG_NOT_COHERENT_CACHE
1435 win_param.attributes |= EBAR_ATTR_DRAM_CACHE_COHERENCY_WB;
1436#endif
1437 win_param.high_addr = 0;
1438
1439 win_param.base_addr = mv_get_dram_bank_base_addr (BANK1);
1440 win_param.size = mv_get_dram_bank_size (BANK1);
1441 if (win_param.size == 0)
1442 win_param.enable = 0;
1443 else
1444 win_param.enable = 1;
1445 win_param.access_ctrl = EWIN_ACCESS_FULL;
1446
1447
1448 eth_set_access_control (p_eth_port_ctrl->port_num, &win_param);
1449
1450
1451 win_param.win = ETH_WIN2;
1452 win_param.target = ETH_TARGET_DRAM;
1453 win_param.attributes = EBAR_ATTR_DRAM_CS2;
1454#ifndef CONFIG_NOT_COHERENT_CACHE
1455 win_param.attributes |= EBAR_ATTR_DRAM_CACHE_COHERENCY_WB;
1456#endif
1457 win_param.high_addr = 0;
1458
1459 win_param.base_addr = mv_get_dram_bank_base_addr (BANK2);
1460 win_param.size = mv_get_dram_bank_size (BANK2);
1461 if (win_param.size == 0)
1462 win_param.enable = 0;
1463 else
1464 win_param.enable = 1;
1465 win_param.access_ctrl = EWIN_ACCESS_FULL;
1466
1467
1468 eth_set_access_control (p_eth_port_ctrl->port_num, &win_param);
1469
1470
1471 win_param.win = ETH_WIN3;
1472 win_param.target = ETH_TARGET_DRAM;
1473 win_param.attributes = EBAR_ATTR_DRAM_CS3;
1474#ifndef CONFIG_NOT_COHERENT_CACHE
1475 win_param.attributes |= EBAR_ATTR_DRAM_CACHE_COHERENCY_WB;
1476#endif
1477 win_param.high_addr = 0;
1478
1479 win_param.base_addr = mv_get_dram_bank_base_addr (BANK3);
1480 win_param.size = mv_get_dram_bank_size (BANK3);
1481 if (win_param.size == 0)
1482 win_param.enable = 0;
1483 else
1484 win_param.enable = 1;
1485 win_param.access_ctrl = EWIN_ACCESS_FULL;
1486
1487
1488 eth_set_access_control (p_eth_port_ctrl->port_num, &win_param);
1489
1490
1491 win_param.win = ETH_WIN4;
1492 win_param.target = EBAR_TARGET_CBS;
1493 win_param.attributes = EBAR_ATTR_CBS_SRAM | EBAR_ATTR_CBS_SRAM_BLOCK0;
1494 win_param.high_addr = 0;
1495 win_param.base_addr = mv_get_internal_sram_base ();
1496 win_param.size = MV64360_INTERNAL_SRAM_SIZE;
1497 win_param.enable = 1;
1498 win_param.access_ctrl = EWIN_ACCESS_FULL;
1499
1500
1501 eth_set_access_control (p_eth_port_ctrl->port_num, &win_param);
1502
1503 eth_port_init_mac_tables (p_eth_port_ctrl->port_num);
1504
1505 ethernet_phy_set (p_eth_port_ctrl->port_num,
1506 p_eth_port_ctrl->port_phy_addr);
1507
1508 return;
1509
1510}
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542static bool eth_port_start (ETH_PORT_INFO * p_eth_port_ctrl)
1543{
1544 int queue;
1545 volatile ETH_TX_DESC *p_tx_curr_desc;
1546 volatile ETH_RX_DESC *p_rx_curr_desc;
1547 unsigned int phy_reg_data;
1548 ETH_PORT eth_port_num = p_eth_port_ctrl->port_num;
1549
1550
1551
1552 for (queue = 0; queue < MAX_TX_QUEUE_NUM; queue++) {
1553 CURR_TFD_GET (p_tx_curr_desc, queue);
1554 MV_REG_WRITE ((MV64360_ETH_TX_CURRENT_QUEUE_DESC_PTR_0
1555 (eth_port_num)
1556 + (4 * queue)),
1557 ((unsigned int) p_tx_curr_desc));
1558
1559 }
1560
1561
1562 for (queue = 0; queue < MAX_RX_QUEUE_NUM; queue++) {
1563 CURR_RFD_GET (p_rx_curr_desc, queue);
1564 MV_REG_WRITE ((MV64360_ETH_RX_CURRENT_QUEUE_DESC_PTR_0
1565 (eth_port_num)
1566 + (4 * queue)),
1567 ((unsigned int) p_rx_curr_desc));
1568
1569 if (p_rx_curr_desc != NULL)
1570
1571 eth_port_uc_addr_set (p_eth_port_ctrl->port_num,
1572 p_eth_port_ctrl->port_mac_addr,
1573 queue);
1574 }
1575
1576
1577 MV_REG_WRITE (MV64360_ETH_PORT_CONFIG_REG (eth_port_num),
1578 p_eth_port_ctrl->port_config);
1579
1580 MV_REG_WRITE (MV64360_ETH_PORT_CONFIG_EXTEND_REG (eth_port_num),
1581 p_eth_port_ctrl->port_config_extend);
1582
1583 MV_REG_WRITE (MV64360_ETH_PORT_SERIAL_CONTROL_REG (eth_port_num),
1584 p_eth_port_ctrl->port_serial_control);
1585
1586 MV_SET_REG_BITS (MV64360_ETH_PORT_SERIAL_CONTROL_REG (eth_port_num),
1587 ETH_SERIAL_PORT_ENABLE);
1588
1589
1590 MV_REG_WRITE (MV64360_ETH_SDMA_CONFIG_REG (eth_port_num),
1591 p_eth_port_ctrl->port_sdma_config);
1592
1593 MV_REG_WRITE (MV64360_ETH_TX_QUEUE_0_TOKEN_BUCKET_COUNT
1594 (eth_port_num), 0x3fffffff);
1595 MV_REG_WRITE (MV64360_ETH_TX_QUEUE_0_TOKEN_BUCKET_CONFIG
1596 (eth_port_num), 0x03fffcff);
1597
1598 MV_REG_WRITE (MV64360_ETH_MAXIMUM_TRANSMIT_UNIT (eth_port_num), 0x0);
1599
1600
1601 MV_REG_WRITE (MV64360_ETH_RECEIVE_QUEUE_COMMAND_REG (eth_port_num),
1602 p_eth_port_ctrl->port_rx_queue_command);
1603
1604
1605 eth_port_read_smi_reg (eth_port_num, 1, &phy_reg_data);
1606
1607 if (!(phy_reg_data & 0x20))
1608 return false;
1609
1610 return true;
1611}
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632static void eth_port_uc_addr_set (ETH_PORT eth_port_num,
1633 unsigned char *p_addr, ETH_QUEUE queue)
1634{
1635 unsigned int mac_h;
1636 unsigned int mac_l;
1637
1638 mac_l = (p_addr[4] << 8) | (p_addr[5]);
1639 mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) |
1640 (p_addr[2] << 8) | (p_addr[3] << 0);
1641
1642 MV_REG_WRITE (MV64360_ETH_MAC_ADDR_LOW (eth_port_num), mac_l);
1643 MV_REG_WRITE (MV64360_ETH_MAC_ADDR_HIGH (eth_port_num), mac_h);
1644
1645
1646 eth_port_uc_addr (eth_port_num, p_addr[5], queue, ACCEPT_MAC_ADDR);
1647
1648 return;
1649}
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674static bool eth_port_uc_addr (ETH_PORT eth_port_num,
1675 unsigned char uc_nibble,
1676 ETH_QUEUE queue, int option)
1677{
1678 unsigned int unicast_reg;
1679 unsigned int tbl_offset;
1680 unsigned int reg_offset;
1681
1682
1683 uc_nibble = (0xf & uc_nibble);
1684 tbl_offset = (uc_nibble / 4) * 4;
1685 reg_offset = uc_nibble % 4;
1686
1687 switch (option) {
1688 case REJECT_MAC_ADDR:
1689
1690 unicast_reg =
1691 MV_REG_READ ((MV64360_ETH_DA_FILTER_UNICAST_TABLE_BASE
1692 (eth_port_num)
1693 + tbl_offset));
1694
1695 unicast_reg &= (0x0E << (8 * reg_offset));
1696
1697 MV_REG_WRITE ((MV64360_ETH_DA_FILTER_UNICAST_TABLE_BASE
1698 (eth_port_num)
1699 + tbl_offset), unicast_reg);
1700 break;
1701
1702 case ACCEPT_MAC_ADDR:
1703
1704 unicast_reg =
1705 MV_REG_READ ((MV64360_ETH_DA_FILTER_UNICAST_TABLE_BASE
1706 (eth_port_num)
1707 + tbl_offset));
1708
1709 unicast_reg |= ((0x01 | queue) << (8 * reg_offset));
1710
1711 MV_REG_WRITE ((MV64360_ETH_DA_FILTER_UNICAST_TABLE_BASE
1712 (eth_port_num)
1713 + tbl_offset), unicast_reg);
1714
1715 break;
1716
1717 default:
1718 return false;
1719 }
1720 return true;
1721}
1722
1723#if 0
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755static void eth_port_mc_addr (ETH_PORT eth_port_num,
1756 unsigned char *p_addr,
1757 ETH_QUEUE queue, int option)
1758{
1759 unsigned int mac_h;
1760 unsigned int mac_l;
1761 unsigned char crc_result = 0;
1762 int mac_array[48];
1763 int crc[8];
1764 int i;
1765
1766
1767 if ((p_addr[0] == 0x01) &&
1768 (p_addr[1] == 0x00) &&
1769 (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00))
1770
1771 eth_port_smc_addr (eth_port_num, p_addr[5], queue, option);
1772 else {
1773
1774 mac_h = (p_addr[0] << 8) | (p_addr[1]);
1775 mac_l = (p_addr[2] << 24) | (p_addr[3] << 16) |
1776 (p_addr[4] << 8) | (p_addr[5] << 0);
1777
1778 for (i = 0; i < 32; i++)
1779 mac_array[i] = (mac_l >> i) & 0x1;
1780 for (i = 32; i < 48; i++)
1781 mac_array[i] = (mac_h >> (i - 32)) & 0x1;
1782
1783
1784 crc[0] = mac_array[45] ^ mac_array[43] ^ mac_array[40] ^
1785 mac_array[39] ^ mac_array[35] ^ mac_array[34] ^
1786 mac_array[31] ^ mac_array[30] ^ mac_array[28] ^
1787 mac_array[23] ^ mac_array[21] ^ mac_array[19] ^
1788 mac_array[18] ^ mac_array[16] ^ mac_array[14] ^
1789 mac_array[12] ^ mac_array[8] ^ mac_array[7] ^
1790 mac_array[6] ^ mac_array[0];
1791
1792 crc[1] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^
1793 mac_array[43] ^ mac_array[41] ^ mac_array[39] ^
1794 mac_array[36] ^ mac_array[34] ^ mac_array[32] ^
1795 mac_array[30] ^ mac_array[29] ^ mac_array[28] ^
1796 mac_array[24] ^ mac_array[23] ^ mac_array[22] ^
1797 mac_array[21] ^ mac_array[20] ^ mac_array[18] ^
1798 mac_array[17] ^ mac_array[16] ^ mac_array[15] ^
1799 mac_array[14] ^ mac_array[13] ^ mac_array[12] ^
1800 mac_array[9] ^ mac_array[6] ^ mac_array[1] ^
1801 mac_array[0];
1802
1803 crc[2] = mac_array[47] ^ mac_array[46] ^ mac_array[44] ^
1804 mac_array[43] ^ mac_array[42] ^ mac_array[39] ^
1805 mac_array[37] ^ mac_array[34] ^ mac_array[33] ^
1806 mac_array[29] ^ mac_array[28] ^ mac_array[25] ^
1807 mac_array[24] ^ mac_array[22] ^ mac_array[17] ^
1808 mac_array[15] ^ mac_array[13] ^ mac_array[12] ^
1809 mac_array[10] ^ mac_array[8] ^ mac_array[6] ^
1810 mac_array[2] ^ mac_array[1] ^ mac_array[0];
1811
1812 crc[3] = mac_array[47] ^ mac_array[45] ^ mac_array[44] ^
1813 mac_array[43] ^ mac_array[40] ^ mac_array[38] ^
1814 mac_array[35] ^ mac_array[34] ^ mac_array[30] ^
1815 mac_array[29] ^ mac_array[26] ^ mac_array[25] ^
1816 mac_array[23] ^ mac_array[18] ^ mac_array[16] ^
1817 mac_array[14] ^ mac_array[13] ^ mac_array[11] ^
1818 mac_array[9] ^ mac_array[7] ^ mac_array[3] ^
1819 mac_array[2] ^ mac_array[1];
1820
1821 crc[4] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^
1822 mac_array[41] ^ mac_array[39] ^ mac_array[36] ^
1823 mac_array[35] ^ mac_array[31] ^ mac_array[30] ^
1824 mac_array[27] ^ mac_array[26] ^ mac_array[24] ^
1825 mac_array[19] ^ mac_array[17] ^ mac_array[15] ^
1826 mac_array[14] ^ mac_array[12] ^ mac_array[10] ^
1827 mac_array[8] ^ mac_array[4] ^ mac_array[3] ^
1828 mac_array[2];
1829
1830 crc[5] = mac_array[47] ^ mac_array[46] ^ mac_array[45] ^
1831 mac_array[42] ^ mac_array[40] ^ mac_array[37] ^
1832 mac_array[36] ^ mac_array[32] ^ mac_array[31] ^
1833 mac_array[28] ^ mac_array[27] ^ mac_array[25] ^
1834 mac_array[20] ^ mac_array[18] ^ mac_array[16] ^
1835 mac_array[15] ^ mac_array[13] ^ mac_array[11] ^
1836 mac_array[9] ^ mac_array[5] ^ mac_array[4] ^
1837 mac_array[3];
1838
1839 crc[6] = mac_array[47] ^ mac_array[46] ^ mac_array[43] ^
1840 mac_array[41] ^ mac_array[38] ^ mac_array[37] ^
1841 mac_array[33] ^ mac_array[32] ^ mac_array[29] ^
1842 mac_array[28] ^ mac_array[26] ^ mac_array[21] ^
1843 mac_array[19] ^ mac_array[17] ^ mac_array[16] ^
1844 mac_array[14] ^ mac_array[12] ^ mac_array[10] ^
1845 mac_array[6] ^ mac_array[5] ^ mac_array[4];
1846
1847 crc[7] = mac_array[47] ^ mac_array[44] ^ mac_array[42] ^
1848 mac_array[39] ^ mac_array[38] ^ mac_array[34] ^
1849 mac_array[33] ^ mac_array[30] ^ mac_array[29] ^
1850 mac_array[27] ^ mac_array[22] ^ mac_array[20] ^
1851 mac_array[18] ^ mac_array[17] ^ mac_array[15] ^
1852 mac_array[13] ^ mac_array[11] ^ mac_array[7] ^
1853 mac_array[6] ^ mac_array[5];
1854
1855 for (i = 0; i < 8; i++)
1856 crc_result = crc_result | (crc[i] << i);
1857
1858 eth_port_omc_addr (eth_port_num, crc_result, queue, option);
1859 }
1860 return;
1861}
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889static bool eth_port_smc_addr (ETH_PORT eth_port_num,
1890 unsigned char mc_byte,
1891 ETH_QUEUE queue, int option)
1892{
1893 unsigned int smc_table_reg;
1894 unsigned int tbl_offset;
1895 unsigned int reg_offset;
1896
1897
1898 tbl_offset = (mc_byte / 4) * 4;
1899 reg_offset = mc_byte % 4;
1900 queue &= 0x7;
1901
1902 switch (option) {
1903 case REJECT_MAC_ADDR:
1904
1905 smc_table_reg =
1906 MV_REG_READ ((MV64360_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset));
1907 smc_table_reg &= (0x0E << (8 * reg_offset));
1908
1909 MV_REG_WRITE ((MV64360_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset), smc_table_reg);
1910 break;
1911
1912 case ACCEPT_MAC_ADDR:
1913
1914 smc_table_reg =
1915 MV_REG_READ ((MV64360_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset));
1916 smc_table_reg |= ((0x01 | queue) << (8 * reg_offset));
1917
1918 MV_REG_WRITE ((MV64360_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset), smc_table_reg);
1919 break;
1920
1921 default:
1922 return false;
1923 }
1924 return true;
1925}
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953static bool eth_port_omc_addr (ETH_PORT eth_port_num,
1954 unsigned char crc8,
1955 ETH_QUEUE queue, int option)
1956{
1957 unsigned int omc_table_reg;
1958 unsigned int tbl_offset;
1959 unsigned int reg_offset;
1960
1961
1962 tbl_offset = (crc8 / 4) * 4;
1963 reg_offset = crc8 % 4;
1964 queue &= 0x7;
1965
1966 switch (option) {
1967 case REJECT_MAC_ADDR:
1968
1969 omc_table_reg =
1970 MV_REG_READ ((MV64360_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset));
1971 omc_table_reg &= (0x0E << (8 * reg_offset));
1972
1973 MV_REG_WRITE ((MV64360_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset), omc_table_reg);
1974 break;
1975
1976 case ACCEPT_MAC_ADDR:
1977
1978 omc_table_reg =
1979 MV_REG_READ ((MV64360_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset));
1980 omc_table_reg |= ((0x01 | queue) << (8 * reg_offset));
1981
1982 MV_REG_WRITE ((MV64360_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset), omc_table_reg);
1983 break;
1984
1985 default:
1986 return false;
1987 }
1988 return true;
1989}
1990#endif
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009static void eth_port_init_mac_tables (ETH_PORT eth_port_num)
2010{
2011 int table_index;
2012
2013
2014 for (table_index = 0; table_index <= 0xC; table_index += 4)
2015 MV_REG_WRITE ((MV64360_ETH_DA_FILTER_UNICAST_TABLE_BASE
2016 (eth_port_num) + table_index), 0);
2017
2018 for (table_index = 0; table_index <= 0xFC; table_index += 4) {
2019
2020 MV_REG_WRITE ((MV64360_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + table_index), 0);
2021
2022 MV_REG_WRITE ((MV64360_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + table_index), 0);
2023 }
2024}
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043static void eth_clear_mib_counters (ETH_PORT eth_port_num)
2044{
2045 int i;
2046
2047
2048 for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION;
2049 i += 4) {
2050 (void)MV_REG_READ ((MV64360_ETH_MIB_COUNTERS_BASE
2051 (eth_port_num) + i));
2052 }
2053
2054 return;
2055}
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078unsigned int eth_read_mib_counter (ETH_PORT eth_port_num,
2079 unsigned int mib_offset)
2080{
2081 return (MV_REG_READ (MV64360_ETH_MIB_COUNTERS_BASE (eth_port_num)
2082 + mib_offset));
2083}
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102static void ethernet_phy_set (ETH_PORT eth_port_num, int phy_addr)
2103{
2104 unsigned int reg_data;
2105
2106 reg_data = MV_REG_READ (MV64360_ETH_PHY_ADDR_REG);
2107
2108 reg_data &= ~(0x1F << (5 * eth_port_num));
2109 reg_data |= (phy_addr << (5 * eth_port_num));
2110
2111 MV_REG_WRITE (MV64360_ETH_PHY_ADDR_REG, reg_data);
2112
2113 return;
2114}
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132static int ethernet_phy_get (ETH_PORT eth_port_num)
2133{
2134 unsigned int reg_data;
2135
2136 reg_data = MV_REG_READ (MV64360_ETH_PHY_ADDR_REG);
2137
2138 return ((reg_data >> (5 * eth_port_num)) & 0x1f);
2139}
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158static bool ethernet_phy_reset (ETH_PORT eth_port_num)
2159{
2160 unsigned int time_out = 50;
2161 unsigned int phy_reg_data;
2162
2163
2164 eth_port_read_smi_reg (eth_port_num, 0, &phy_reg_data);
2165 phy_reg_data |= 0x8000;
2166 eth_port_write_smi_reg (eth_port_num, 0, phy_reg_data);
2167
2168
2169 do {
2170 eth_port_read_smi_reg (eth_port_num, 1, &phy_reg_data);
2171
2172 if (time_out-- == 0)
2173 return false;
2174 }
2175 while (!(phy_reg_data & 0x20));
2176
2177 return true;
2178}
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198static void eth_port_reset (ETH_PORT eth_port_num)
2199{
2200 unsigned int reg_data;
2201
2202
2203 reg_data =
2204 MV_REG_READ (MV64360_ETH_TRANSMIT_QUEUE_COMMAND_REG
2205 (eth_port_num));
2206
2207 if (reg_data & 0xFF) {
2208
2209 MV_REG_WRITE (MV64360_ETH_TRANSMIT_QUEUE_COMMAND_REG
2210 (eth_port_num), (reg_data << 8));
2211
2212
2213 do {
2214
2215 reg_data =
2216 MV_REG_READ
2217 (MV64360_ETH_TRANSMIT_QUEUE_COMMAND_REG
2218 (eth_port_num));
2219 }
2220 while (reg_data & 0xFF);
2221 }
2222
2223
2224 reg_data =
2225 MV_REG_READ (MV64360_ETH_RECEIVE_QUEUE_COMMAND_REG
2226 (eth_port_num));
2227
2228 if (reg_data & 0xFF) {
2229
2230 MV_REG_WRITE (MV64360_ETH_RECEIVE_QUEUE_COMMAND_REG
2231 (eth_port_num), (reg_data << 8));
2232
2233
2234 do {
2235
2236 reg_data =
2237 MV_REG_READ
2238 (MV64360_ETH_RECEIVE_QUEUE_COMMAND_REG
2239 (eth_port_num));
2240 }
2241 while (reg_data & 0xFF);
2242 }
2243
2244
2245
2246 eth_clear_mib_counters (eth_port_num);
2247
2248
2249 reg_data =
2250 MV_REG_READ (MV64360_ETH_PORT_SERIAL_CONTROL_REG
2251 (eth_port_num));
2252 reg_data &= ~ETH_SERIAL_PORT_ENABLE;
2253 MV_REG_WRITE (MV64360_ETH_PORT_SERIAL_CONTROL_REG (eth_port_num),
2254 reg_data);
2255
2256 return;
2257}
2258
2259#if 0
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279static void ethernet_set_config_reg (ETH_PORT eth_port_num,
2280 unsigned int value)
2281{
2282 unsigned int eth_config_reg;
2283
2284 eth_config_reg =
2285 MV_REG_READ (MV64360_ETH_PORT_CONFIG_REG (eth_port_num));
2286 eth_config_reg |= value;
2287 MV_REG_WRITE (MV64360_ETH_PORT_CONFIG_REG (eth_port_num),
2288 eth_config_reg);
2289
2290 return;
2291}
2292#endif
2293
2294#if 0
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314static void ethernet_reset_config_reg (ETH_PORT eth_port_num,
2315 unsigned int value)
2316{
2317 unsigned int eth_config_reg;
2318
2319 eth_config_reg = MV_REG_READ (MV64360_ETH_PORT_CONFIG_EXTEND_REG
2320 (eth_port_num));
2321 eth_config_reg &= ~value;
2322 MV_REG_WRITE (MV64360_ETH_PORT_CONFIG_EXTEND_REG (eth_port_num),
2323 eth_config_reg);
2324
2325 return;
2326}
2327#endif
2328
2329#if 0
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347static unsigned int ethernet_get_config_reg (ETH_PORT eth_port_num)
2348{
2349 unsigned int eth_config_reg;
2350
2351 eth_config_reg = MV_REG_READ (MV64360_ETH_PORT_CONFIG_EXTEND_REG
2352 (eth_port_num));
2353 return eth_config_reg;
2354}
2355
2356#endif
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378static bool eth_port_read_smi_reg (ETH_PORT eth_port_num,
2379 unsigned int phy_reg, unsigned int *value)
2380{
2381 unsigned int reg_value;
2382 unsigned int time_out = PHY_BUSY_TIMEOUT;
2383 int phy_addr;
2384
2385 phy_addr = ethernet_phy_get (eth_port_num);
2386
2387
2388
2389 do {
2390 reg_value = MV_REG_READ (MV64360_ETH_SMI_REG);
2391 if (time_out-- == 0) {
2392 return false;
2393 }
2394 }
2395 while (reg_value & ETH_SMI_BUSY);
2396
2397
2398
2399 MV_REG_WRITE (MV64360_ETH_SMI_REG,
2400 (phy_addr << 16) | (phy_reg << 21) |
2401 ETH_SMI_OPCODE_READ);
2402
2403 time_out = PHY_BUSY_TIMEOUT;
2404
2405 do {
2406 reg_value = MV_REG_READ (MV64360_ETH_SMI_REG);
2407 if (time_out-- == 0) {
2408 return false;
2409 }
2410 }
2411 while ((reg_value & ETH_SMI_READ_VALID) != ETH_SMI_READ_VALID);
2412
2413
2414#define PHY_UPDATE_TIMEOUT 10000
2415 for (time_out = 0; time_out < PHY_UPDATE_TIMEOUT; time_out++);
2416
2417 reg_value = MV_REG_READ (MV64360_ETH_SMI_REG);
2418
2419 *value = reg_value & 0xffff;
2420
2421 return true;
2422}
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444static bool eth_port_write_smi_reg (ETH_PORT eth_port_num,
2445 unsigned int phy_reg, unsigned int value)
2446{
2447 unsigned int reg_value;
2448 unsigned int time_out = PHY_BUSY_TIMEOUT;
2449 int phy_addr;
2450
2451 phy_addr = ethernet_phy_get (eth_port_num);
2452
2453
2454 do {
2455 reg_value = MV_REG_READ (MV64360_ETH_SMI_REG);
2456 if (time_out-- == 0) {
2457 return false;
2458 }
2459 }
2460 while (reg_value & ETH_SMI_BUSY);
2461
2462
2463 MV_REG_WRITE (MV64360_ETH_SMI_REG,
2464 (phy_addr << 16) | (phy_reg << 21) |
2465 ETH_SMI_OPCODE_WRITE | (value & 0xffff));
2466 return true;
2467}
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487static void eth_set_access_control (ETH_PORT eth_port_num,
2488 ETH_WIN_PARAM * param)
2489{
2490 unsigned int access_prot_reg;
2491
2492
2493 access_prot_reg = MV_REG_READ (MV64360_ETH_ACCESS_PROTECTION_REG
2494 (eth_port_num));
2495 access_prot_reg &= (~(3 << (param->win * 2)));
2496 access_prot_reg |= (param->access_ctrl << (param->win * 2));
2497 MV_REG_WRITE (MV64360_ETH_ACCESS_PROTECTION_REG (eth_port_num),
2498 access_prot_reg);
2499
2500
2501 MV_REG_WRITE ((MV64360_ETH_SIZE_REG_0 +
2502 (ETH_SIZE_REG_GAP * param->win)),
2503 (((param->size / 0x10000) - 1) << 16));
2504
2505
2506 MV_REG_WRITE ((MV64360_ETH_BAR_0 + (ETH_BAR_GAP * param->win)),
2507 (param->target | param->attributes | param->base_addr));
2508
2509 if (param->win < 4)
2510 MV_REG_WRITE ((MV64360_ETH_HIGH_ADDR_REMAP_REG_0 +
2511 (ETH_HIGH_ADDR_REMAP_REG_GAP * param->win)),
2512 param->high_addr);
2513
2514
2515 if (param->enable == 1)
2516 MV_RESET_REG_BITS (MV64360_ETH_BASE_ADDR_ENABLE_REG,
2517 (1 << param->win));
2518 else
2519 MV_SET_REG_BITS (MV64360_ETH_BASE_ADDR_ENABLE_REG,
2520 (1 << param->win));
2521}
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553static bool ether_init_rx_desc_ring (ETH_PORT_INFO * p_eth_port_ctrl,
2554 ETH_QUEUE rx_queue,
2555 int rx_desc_num,
2556 int rx_buff_size,
2557 unsigned int rx_desc_base_addr,
2558 unsigned int rx_buff_base_addr)
2559{
2560 ETH_RX_DESC *p_rx_desc;
2561 ETH_RX_DESC *p_rx_prev_desc;
2562 unsigned int buffer_addr;
2563 int ix;
2564
2565
2566 p_rx_desc = (ETH_RX_DESC *) rx_desc_base_addr;
2567 p_rx_prev_desc = p_rx_desc;
2568 buffer_addr = rx_buff_base_addr;
2569
2570
2571 if (rx_buff_base_addr & 0xF)
2572 return false;
2573
2574
2575 if ((rx_buff_size < 8) || (rx_buff_size > RX_BUFFER_MAX_SIZE))
2576 return false;
2577
2578
2579 if ((rx_buff_base_addr + rx_buff_size) & 0x7)
2580 return false;
2581
2582
2583 for (ix = 0; ix < rx_desc_num; ix++) {
2584 p_rx_desc->buf_size = rx_buff_size;
2585 p_rx_desc->byte_cnt = 0x0000;
2586 p_rx_desc->cmd_sts =
2587 ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT;
2588 p_rx_desc->next_desc_ptr =
2589 ((unsigned int) p_rx_desc) + RX_DESC_ALIGNED_SIZE;
2590 p_rx_desc->buf_ptr = buffer_addr;
2591 p_rx_desc->return_info = 0x00000000;
2592 D_CACHE_FLUSH_LINE (p_rx_desc, 0);
2593 buffer_addr += rx_buff_size;
2594 p_rx_prev_desc = p_rx_desc;
2595 p_rx_desc = (ETH_RX_DESC *)
2596 ((unsigned int) p_rx_desc + RX_DESC_ALIGNED_SIZE);
2597 }
2598
2599
2600 p_rx_prev_desc->next_desc_ptr = (rx_desc_base_addr);
2601 D_CACHE_FLUSH_LINE (p_rx_prev_desc, 0);
2602
2603
2604 CURR_RFD_SET ((ETH_RX_DESC *) rx_desc_base_addr, rx_queue);
2605 USED_RFD_SET ((ETH_RX_DESC *) rx_desc_base_addr, rx_queue);
2606
2607 p_eth_port_ctrl->p_rx_desc_area_base[rx_queue] =
2608 (ETH_RX_DESC *) rx_desc_base_addr;
2609 p_eth_port_ctrl->rx_desc_area_size[rx_queue] =
2610 rx_desc_num * RX_DESC_ALIGNED_SIZE;
2611
2612 p_eth_port_ctrl->port_rx_queue_command |= (1 << rx_queue);
2613
2614 return true;
2615}
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647static bool ether_init_tx_desc_ring (ETH_PORT_INFO * p_eth_port_ctrl,
2648 ETH_QUEUE tx_queue,
2649 int tx_desc_num,
2650 int tx_buff_size,
2651 unsigned int tx_desc_base_addr,
2652 unsigned int tx_buff_base_addr)
2653{
2654
2655 ETH_TX_DESC *p_tx_desc;
2656 ETH_TX_DESC *p_tx_prev_desc;
2657 unsigned int buffer_addr;
2658 int ix;
2659
2660
2661
2662 p_tx_desc = (ETH_TX_DESC *) tx_desc_base_addr;
2663 p_tx_prev_desc = p_tx_desc;
2664 buffer_addr = tx_buff_base_addr;
2665
2666
2667 if (tx_buff_base_addr & 0xF)
2668 return false;
2669
2670
2671 if ((tx_buff_size > TX_BUFFER_MAX_SIZE)
2672 || (tx_buff_size < TX_BUFFER_MIN_SIZE))
2673 return false;
2674
2675
2676 for (ix = 0; ix < tx_desc_num; ix++) {
2677 p_tx_desc->byte_cnt = 0x0000;
2678 p_tx_desc->l4i_chk = 0x0000;
2679 p_tx_desc->cmd_sts = 0x00000000;
2680 p_tx_desc->next_desc_ptr =
2681 ((unsigned int) p_tx_desc) + TX_DESC_ALIGNED_SIZE;
2682
2683 p_tx_desc->buf_ptr = buffer_addr;
2684 p_tx_desc->return_info = 0x00000000;
2685 D_CACHE_FLUSH_LINE (p_tx_desc, 0);
2686 buffer_addr += tx_buff_size;
2687 p_tx_prev_desc = p_tx_desc;
2688 p_tx_desc = (ETH_TX_DESC *)
2689 ((unsigned int) p_tx_desc + TX_DESC_ALIGNED_SIZE);
2690
2691 }
2692
2693 p_tx_prev_desc->next_desc_ptr = tx_desc_base_addr;
2694 D_CACHE_FLUSH_LINE (p_tx_prev_desc, 0);
2695
2696 CURR_TFD_SET ((ETH_TX_DESC *) tx_desc_base_addr, tx_queue);
2697 USED_TFD_SET ((ETH_TX_DESC *) tx_desc_base_addr, tx_queue);
2698
2699
2700 p_eth_port_ctrl->p_tx_desc_area_base[tx_queue] =
2701 (ETH_TX_DESC *) tx_desc_base_addr;
2702 p_eth_port_ctrl->tx_desc_area_size[tx_queue] =
2703 (tx_desc_num * TX_DESC_ALIGNED_SIZE);
2704
2705
2706 p_eth_port_ctrl->port_tx_queue_command |= (1 << tx_queue);
2707
2708 return true;
2709}
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739static ETH_FUNC_RET_STATUS eth_port_send (ETH_PORT_INFO * p_eth_port_ctrl,
2740 ETH_QUEUE tx_queue,
2741 PKT_INFO * p_pkt_info)
2742{
2743 volatile ETH_TX_DESC *p_tx_desc_first;
2744 volatile ETH_TX_DESC *p_tx_desc_curr;
2745 volatile ETH_TX_DESC *p_tx_next_desc_curr;
2746 volatile ETH_TX_DESC *p_tx_desc_used;
2747 unsigned int command_status;
2748
2749
2750 if (p_eth_port_ctrl->tx_resource_err[tx_queue] == true)
2751 return ETH_QUEUE_FULL;
2752
2753
2754 CURR_TFD_GET (p_tx_desc_curr, tx_queue);
2755 USED_TFD_GET (p_tx_desc_used, tx_queue);
2756
2757 if (p_tx_desc_curr == NULL)
2758 return ETH_ERROR;
2759
2760
2761 p_tx_next_desc_curr = TX_NEXT_DESC_PTR (p_tx_desc_curr, tx_queue);
2762 command_status = p_pkt_info->cmd_sts | ETH_ZERO_PADDING | ETH_GEN_CRC;
2763
2764 if (command_status & (ETH_TX_FIRST_DESC)) {
2765
2766 FIRST_TFD_SET (p_tx_desc_curr, tx_queue);
2767 p_tx_desc_first = p_tx_desc_curr;
2768 } else {
2769 FIRST_TFD_GET (p_tx_desc_first, tx_queue);
2770 command_status |= ETH_BUFFER_OWNED_BY_DMA;
2771 }
2772
2773
2774
2775
2776 if (p_pkt_info->byte_cnt <= 8) {
2777 printf ("You have failed in the < 8 bytes errata - fixme\n");
2778 return ETH_ERROR;
2779
2780 p_tx_desc_curr->buf_ptr =
2781 (unsigned int) p_tx_desc_curr + TX_BUF_OFFSET_IN_DESC;
2782 eth_b_copy (p_pkt_info->buf_ptr, p_tx_desc_curr->buf_ptr,
2783 p_pkt_info->byte_cnt);
2784 } else
2785 p_tx_desc_curr->buf_ptr = p_pkt_info->buf_ptr;
2786
2787 p_tx_desc_curr->byte_cnt = p_pkt_info->byte_cnt;
2788 p_tx_desc_curr->return_info = p_pkt_info->return_info;
2789
2790 if (p_pkt_info->cmd_sts & (ETH_TX_LAST_DESC)) {
2791
2792 p_tx_desc_curr->cmd_sts = command_status |
2793 ETH_BUFFER_OWNED_BY_DMA | ETH_TX_ENABLE_INTERRUPT;
2794
2795 if (p_tx_desc_curr != p_tx_desc_first)
2796 p_tx_desc_first->cmd_sts |= ETH_BUFFER_OWNED_BY_DMA;
2797
2798
2799
2800 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_curr, 0);
2801 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_first, 0);
2802 CPU_PIPE_FLUSH;
2803
2804
2805 ETH_ENABLE_TX_QUEUE (tx_queue, p_eth_port_ctrl->port_num);
2806
2807
2808 p_tx_desc_first = p_tx_next_desc_curr;
2809 FIRST_TFD_SET (p_tx_desc_first, tx_queue);
2810
2811 } else {
2812 p_tx_desc_curr->cmd_sts = command_status;
2813 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_curr, 0);
2814 }
2815
2816
2817 if (p_tx_next_desc_curr == p_tx_desc_used) {
2818
2819 CURR_TFD_SET (p_tx_desc_first, tx_queue);
2820
2821 p_eth_port_ctrl->tx_resource_err[tx_queue] = true;
2822 return ETH_QUEUE_LAST_RESOURCE;
2823 } else {
2824
2825 CURR_TFD_SET (p_tx_next_desc_curr, tx_queue);
2826 return ETH_OK;
2827 }
2828}
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855static ETH_FUNC_RET_STATUS eth_tx_return_desc (ETH_PORT_INFO *
2856 p_eth_port_ctrl,
2857 ETH_QUEUE tx_queue,
2858 PKT_INFO * p_pkt_info)
2859{
2860 volatile ETH_TX_DESC *p_tx_desc_used = NULL;
2861 volatile ETH_TX_DESC *p_tx_desc_first = NULL;
2862 unsigned int command_status;
2863
2864
2865
2866 USED_TFD_GET (p_tx_desc_used, tx_queue);
2867 FIRST_TFD_GET (p_tx_desc_first, tx_queue);
2868
2869
2870
2871 if (p_tx_desc_used == NULL)
2872 return ETH_ERROR;
2873
2874 command_status = p_tx_desc_used->cmd_sts;
2875
2876
2877 if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
2878 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_used, 0);
2879 return ETH_RETRY;
2880 }
2881
2882
2883 if ((p_tx_desc_used == p_tx_desc_first) &&
2884 (p_eth_port_ctrl->tx_resource_err[tx_queue] == false)) {
2885 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_used, 0);
2886 return ETH_END_OF_JOB;
2887 }
2888
2889
2890 p_pkt_info->cmd_sts = command_status;
2891 p_pkt_info->return_info = p_tx_desc_used->return_info;
2892 p_tx_desc_used->return_info = 0;
2893
2894
2895 USED_TFD_SET (TX_NEXT_DESC_PTR (p_tx_desc_used, tx_queue), tx_queue);
2896
2897
2898 if (p_eth_port_ctrl->tx_resource_err[tx_queue] == true)
2899 p_eth_port_ctrl->tx_resource_err[tx_queue] = false;
2900
2901 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_used, 0);
2902
2903 return ETH_OK;
2904
2905}
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932static ETH_FUNC_RET_STATUS eth_port_receive (ETH_PORT_INFO * p_eth_port_ctrl,
2933 ETH_QUEUE rx_queue,
2934 PKT_INFO * p_pkt_info)
2935{
2936 volatile ETH_RX_DESC *p_rx_curr_desc;
2937 volatile ETH_RX_DESC *p_rx_next_curr_desc;
2938 volatile ETH_RX_DESC *p_rx_used_desc;
2939 unsigned int command_status;
2940
2941
2942 if (p_eth_port_ctrl->rx_resource_err[rx_queue] == true) {
2943 printf ("\nRx Queue is full ...\n");
2944 return ETH_QUEUE_FULL;
2945 }
2946
2947
2948 CURR_RFD_GET (p_rx_curr_desc, rx_queue);
2949 USED_RFD_GET (p_rx_used_desc, rx_queue);
2950
2951
2952 if (p_rx_curr_desc == NULL)
2953 return ETH_ERROR;
2954
2955
2956 p_rx_next_curr_desc = RX_NEXT_DESC_PTR (p_rx_curr_desc, rx_queue);
2957 command_status = p_rx_curr_desc->cmd_sts;
2958
2959
2960 if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
2961
2962 D_CACHE_FLUSH_LINE ((unsigned int) p_rx_curr_desc, 0);
2963
2964 return ETH_END_OF_JOB;
2965 }
2966
2967 p_pkt_info->byte_cnt = (p_rx_curr_desc->byte_cnt) - RX_BUF_OFFSET;
2968 p_pkt_info->cmd_sts = command_status;
2969 p_pkt_info->buf_ptr = (p_rx_curr_desc->buf_ptr) + RX_BUF_OFFSET;
2970 p_pkt_info->return_info = p_rx_curr_desc->return_info;
2971 p_pkt_info->l4i_chk = p_rx_curr_desc->buf_size;
2972
2973
2974
2975 p_rx_curr_desc->return_info = 0;
2976
2977
2978 CURR_RFD_SET (p_rx_next_curr_desc, rx_queue);
2979
2980
2981 if (p_rx_next_curr_desc == p_rx_used_desc)
2982 p_eth_port_ctrl->rx_resource_err[rx_queue] = true;
2983
2984 D_CACHE_FLUSH_LINE ((unsigned int) p_rx_curr_desc, 0);
2985 CPU_PIPE_FLUSH;
2986 return ETH_OK;
2987}
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011static ETH_FUNC_RET_STATUS eth_rx_return_buff (ETH_PORT_INFO *
3012 p_eth_port_ctrl,
3013 ETH_QUEUE rx_queue,
3014 PKT_INFO * p_pkt_info)
3015{
3016 volatile ETH_RX_DESC *p_used_rx_desc;
3017
3018
3019 USED_RFD_GET (p_used_rx_desc, rx_queue);
3020
3021
3022 if (p_used_rx_desc == NULL)
3023 return ETH_ERROR;
3024
3025 p_used_rx_desc->buf_ptr = p_pkt_info->buf_ptr;
3026 p_used_rx_desc->return_info = p_pkt_info->return_info;
3027 p_used_rx_desc->byte_cnt = p_pkt_info->byte_cnt;
3028 p_used_rx_desc->buf_size = MV64360_RX_BUFFER_SIZE;
3029
3030
3031 CPU_PIPE_FLUSH;
3032
3033
3034 p_used_rx_desc->cmd_sts =
3035 ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT;
3036
3037
3038 D_CACHE_FLUSH_LINE ((unsigned int) p_used_rx_desc, 0);
3039 CPU_PIPE_FLUSH;
3040
3041
3042 USED_RFD_SET (RX_NEXT_DESC_PTR (p_used_rx_desc, rx_queue), rx_queue);
3043
3044
3045 if (p_eth_port_ctrl->rx_resource_err[rx_queue] == true)
3046 p_eth_port_ctrl->rx_resource_err[rx_queue] = false;
3047
3048 return ETH_OK;
3049}
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074#if 0
3075static unsigned int eth_port_set_rx_coal (ETH_PORT eth_port_num,
3076 unsigned int t_clk,
3077 unsigned int delay)
3078{
3079 unsigned int coal;
3080
3081 coal = ((t_clk / 1000000) * delay) / 64;
3082
3083 MV_REG_WRITE (MV64360_ETH_SDMA_CONFIG_REG (eth_port_num),
3084 ((coal & 0x3fff) << 8) |
3085 (MV_REG_READ
3086 (MV64360_ETH_SDMA_CONFIG_REG (eth_port_num))
3087 & 0xffc000ff));
3088 return coal;
3089}
3090
3091#endif
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115#if 0
3116static unsigned int eth_port_set_tx_coal (ETH_PORT eth_port_num,
3117 unsigned int t_clk,
3118 unsigned int delay)
3119{
3120 unsigned int coal;
3121
3122 coal = ((t_clk / 1000000) * delay) / 64;
3123
3124 MV_REG_WRITE (MV64360_ETH_TX_FIFO_URGENT_THRESHOLD_REG (eth_port_num),
3125 coal << 4);
3126 return coal;
3127}
3128#endif
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150static void eth_b_copy (unsigned int src_addr, unsigned int dst_addr,
3151 int byte_count)
3152{
3153
3154 *(unsigned int *) dst_addr = 0x0;
3155
3156 while (byte_count != 0) {
3157 *(char *) dst_addr = *(char *) src_addr;
3158 dst_addr++;
3159 src_addr++;
3160 byte_count--;
3161 }
3162}
3163