1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include <common.h>
31#include <net.h>
32#include <malloc.h>
33#include <miiphy.h>
34
35#include "mv_eth.h"
36
37
38
39#undef DEBUG_MV_ETH
40
41#ifdef DEBUG_MV_ETH
42#define DEBUG
43#define DP(x) x
44#else
45#define DP(x)
46#endif
47
48
49#define ETH_PHY_DFCDL_CONFIG0_REG 0x2100
50#define ETH_PHY_DFCDL_CONFIG1_REG 0x2104
51#define ETH_PHY_DFCDL_ADDR_REG 0x2110
52#define ETH_PHY_DFCDL_DATA0_REG 0x2114
53
54#define PHY_AUTONEGOTIATE_TIMEOUT 4000
55#define PHY_UPDATE_TIMEOUT 10000
56
57#undef MV64460_CHECKSUM_OFFLOAD
58
59
60
61
62
63
64#undef MV64460_RX_QUEUE_FILL_ON_TASK
65
66
67#define MAGIC_ETH_RUNNING 8031971
68#define MV64460_INTERNAL_SRAM_SIZE _256K
69#define EXTRA_BYTES 32
70#define WRAP ETH_HLEN + 2 + 4 + 16
71#define BUFFER_MTU dev->mtu + WRAP
72#define INT_CAUSE_UNMASK_ALL 0x0007ffff
73#define INT_CAUSE_UNMASK_ALL_EXT 0x0011ffff
74#ifdef MV64460_RX_FILL_ON_TASK
75#define INT_CAUSE_MASK_ALL 0x00000000
76#define INT_CAUSE_CHECK_BITS INT_CAUSE_UNMASK_ALL
77#define INT_CAUSE_CHECK_BITS_EXT INT_CAUSE_UNMASK_ALL_EXT
78#endif
79
80
81#define MV_REG_READ(offset) my_le32_to_cpu(* (volatile unsigned int *) (INTERNAL_REG_BASE_ADDR + offset))
82#define MV_REG_WRITE(offset,data) *(volatile unsigned int *) (INTERNAL_REG_BASE_ADDR + offset) = my_cpu_to_le32 (data)
83#define MV_SET_REG_BITS(regOffset,bits) ((*((volatile unsigned int*)((INTERNAL_REG_BASE_ADDR) + (regOffset)))) |= ((unsigned int)my_cpu_to_le32(bits)))
84#define MV_RESET_REG_BITS(regOffset,bits) ((*((volatile unsigned int*)((INTERNAL_REG_BASE_ADDR) + (regOffset)))) &= ~((unsigned int)my_cpu_to_le32(bits)))
85
86#define my_cpu_to_le32(x) my_le32_to_cpu((x))
87
88
89static int mv64460_eth_real_open (struct eth_device *eth);
90static int mv64460_eth_real_stop (struct eth_device *eth);
91static struct net_device_stats *mv64460_eth_get_stats (struct eth_device
92 *dev);
93static void eth_port_init_mac_tables (ETH_PORT eth_port_num);
94static void mv64460_eth_update_stat (struct eth_device *dev);
95bool db64460_eth_start (struct eth_device *eth);
96unsigned int eth_read_mib_counter (ETH_PORT eth_port_num,
97 unsigned int mib_offset);
98int mv64460_eth_receive (struct eth_device *dev);
99
100int mv64460_eth_xmit (struct eth_device *, volatile void *packet, int length);
101
102int mv_miiphy_read(const char *devname, unsigned char phy_addr,
103 unsigned char phy_reg, unsigned short *value);
104int mv_miiphy_write(const char *devname, unsigned char phy_addr,
105 unsigned char phy_reg, unsigned short value);
106
107int phy_setup_aneg (char *devname, unsigned char addr);
108
109#ifndef UPDATE_STATS_BY_SOFTWARE
110static void mv64460_eth_print_stat (struct eth_device *dev);
111#endif
112
113extern void NetReceive (volatile uchar *, int);
114
115extern unsigned int INTERNAL_REG_BASE_ADDR;
116
117unsigned long my_le32_to_cpu (unsigned long x)
118{
119 return (((x & 0x000000ffU) << 24) |
120 ((x & 0x0000ff00U) << 8) |
121 ((x & 0x00ff0000U) >> 8) | ((x & 0xff000000U) >> 24));
122}
123
124
125
126
127#ifdef DEBUG_MV_ETH
128void print_globals (struct eth_device *dev)
129{
130 printf ("Ethernet PRINT_Globals-Debug function\n");
131 printf ("Base Address for ETH_PORT_INFO: %08x\n",
132 (unsigned int) dev->priv);
133 printf ("Base Address for mv64460_eth_priv: %08x\n",
134 (unsigned int) &(((ETH_PORT_INFO *) dev->priv)->
135 port_private));
136
137 printf ("GT Internal Base Address: %08x\n",
138 INTERNAL_REG_BASE_ADDR);
139 printf ("Base Address for TX-DESCs: %08x Number of allocated Buffers %d\n",
140 (unsigned int) ((ETH_PORT_INFO *) dev->priv)->p_tx_desc_area_base[0], MV64460_TX_QUEUE_SIZE);
141 printf ("Base Address for RX-DESCs: %08x Number of allocated Buffers %d\n",
142 (unsigned int) ((ETH_PORT_INFO *) dev->priv)->p_rx_desc_area_base[0], MV64460_RX_QUEUE_SIZE);
143 printf ("Base Address for RX-Buffer: %08x allocated Bytes %d\n",
144 (unsigned int) ((ETH_PORT_INFO *) dev->priv)->
145 p_rx_buffer_base[0],
146 (MV64460_RX_QUEUE_SIZE * MV64460_RX_BUFFER_SIZE) + 32);
147 printf ("Base Address for TX-Buffer: %08x allocated Bytes %d\n",
148 (unsigned int) ((ETH_PORT_INFO *) dev->priv)->
149 p_tx_buffer_base[0],
150 (MV64460_TX_QUEUE_SIZE * MV64460_TX_BUFFER_SIZE) + 32);
151}
152#endif
153
154
155
156
157
158
159
160
161
162void mv64460_eth_print_phy_status (struct eth_device *dev)
163{
164 struct mv64460_eth_priv *port_private;
165 unsigned int port_num;
166 ETH_PORT_INFO *ethernet_private = (ETH_PORT_INFO *) dev->priv;
167 unsigned int port_status, phy_reg_data;
168
169 port_private =
170 (struct mv64460_eth_priv *) ethernet_private->port_private;
171 port_num = port_private->port_num;
172
173
174 eth_port_read_smi_reg (port_num, 1, &phy_reg_data);
175 if (!(phy_reg_data & 0x20)) {
176 printf ("Ethernet port changed link status to DOWN\n");
177 } else {
178 port_status =
179 MV_REG_READ (MV64460_ETH_PORT_STATUS_REG (port_num));
180 printf ("Ethernet status port %d: Link up", port_num);
181 printf (", %s",
182 (port_status & BIT2) ? "Full Duplex" : "Half Duplex");
183 if (port_status & BIT4)
184 printf (", Speed 1 Gbps");
185 else
186 printf (", %s",
187 (port_status & BIT5) ? "Speed 100 Mbps" :
188 "Speed 10 Mbps");
189 printf ("\n");
190 }
191}
192
193
194
195
196
197int db64460_eth_probe (struct eth_device *dev)
198{
199 return ((int) db64460_eth_start (dev));
200}
201
202int db64460_eth_poll (struct eth_device *dev)
203{
204 return mv64460_eth_receive (dev);
205}
206
207int db64460_eth_transmit (struct eth_device *dev, volatile void *packet,
208 int length)
209{
210 mv64460_eth_xmit (dev, packet, length);
211 return 0;
212}
213
214void db64460_eth_disable (struct eth_device *dev)
215{
216 mv64460_eth_stop (dev);
217}
218
219#define DFCDL(write,read) ((write << 6) | read)
220unsigned int ethDfcdls[] = {
221 DFCDL(0,0), DFCDL(1,1), DFCDL(2,2), DFCDL(3,3),
222 DFCDL(4,4), DFCDL(5,5), DFCDL(6,6), DFCDL(7,7),
223 DFCDL(8,8), DFCDL(9,9), DFCDL(10,10), DFCDL(11,11),
224 DFCDL(12,12), DFCDL(13,13), DFCDL(14,14), DFCDL(15,15),
225 DFCDL(16,16), DFCDL(17,17), DFCDL(18,18), DFCDL(19,19),
226 DFCDL(20,20), DFCDL(21,21), DFCDL(22,22), DFCDL(23,23),
227 DFCDL(24,24), DFCDL(25,25), DFCDL(26,26), DFCDL(27,27),
228 DFCDL(28,28), DFCDL(29,29), DFCDL(30,30), DFCDL(31,31),
229 DFCDL(32,32), DFCDL(33,33), DFCDL(34,34), DFCDL(35,35),
230 DFCDL(36,36), DFCDL(37,37), DFCDL(38,38), DFCDL(39,39),
231 DFCDL(40,40), DFCDL(41,41), DFCDL(42,42), DFCDL(43,43),
232 DFCDL(44,44), DFCDL(45,45), DFCDL(46,46), DFCDL(47,47),
233 DFCDL(48,48), DFCDL(49,49), DFCDL(50,50), DFCDL(51,51),
234 DFCDL(52,52), DFCDL(53,53), DFCDL(54,54), DFCDL(55,55),
235 DFCDL(56,56), DFCDL(57,57), DFCDL(58,58), DFCDL(59,59),
236 DFCDL(60,60), DFCDL(61,61), DFCDL(62,62), DFCDL(63,63),
237};
238
239void mv_eth_phy_init (void)
240{
241 int i;
242
243 MV_REG_WRITE (ETH_PHY_DFCDL_ADDR_REG, 0);
244
245 for (i = 0; i < 64; i++) {
246 MV_REG_WRITE (ETH_PHY_DFCDL_DATA0_REG, ethDfcdls[i]);
247 }
248
249 MV_REG_WRITE (ETH_PHY_DFCDL_CONFIG0_REG, 0x300000);
250}
251
252void mv6446x_eth_initialize (bd_t * bis)
253{
254 struct eth_device *dev;
255 ETH_PORT_INFO *ethernet_private;
256 struct mv64460_eth_priv *port_private;
257 int devnum, x, temp;
258 char *s, *e, buf[64];
259
260
261
262
263 temp = MV_REG_READ(0x20A0);
264 temp |= 0x04000080;
265 MV_REG_WRITE(0x20A0, temp);
266
267 mv_eth_phy_init();
268
269 for (devnum = 0; devnum < MV_ETH_DEVS; devnum++) {
270 dev = calloc (sizeof (*dev), 1);
271 if (!dev) {
272 printf ("%s: mv_enet%d allocation failure, %s\n",
273 __FUNCTION__, devnum, "eth_device structure");
274 return;
275 }
276
277
278 sprintf (dev->name, "mv_enet%d", devnum);
279
280#ifdef DEBUG
281 printf ("Initializing %s\n", dev->name);
282#endif
283
284
285 switch (devnum) {
286 case 0:
287 s = "ethaddr";
288 break;
289 case 1:
290 s = "eth1addr";
291 break;
292 case 2:
293 s = "eth2addr";
294 break;
295 default:
296 printf ("%s: Invalid device number %d\n",
297 __FUNCTION__, devnum);
298 return;
299 }
300
301 temp = getenv_f(s, buf, sizeof (buf));
302 s = (temp > 0) ? buf : NULL;
303
304#ifdef DEBUG
305 printf ("Setting MAC %d to %s\n", devnum, s);
306#endif
307 for (x = 0; x < 6; ++x) {
308 dev->enetaddr[x] = s ? simple_strtoul (s, &e, 16) : 0;
309 if (s)
310 s = (*e) ? e + 1 : e;
311 }
312
313 eth_port_uc_addr_set (devnum, dev->enetaddr, 0);
314
315 dev->init = (void *) db64460_eth_probe;
316 dev->halt = (void *) ethernet_phy_reset;
317 dev->send = (void *) db64460_eth_transmit;
318 dev->recv = (void *) db64460_eth_poll;
319
320 ethernet_private = calloc (sizeof (*ethernet_private), 1);
321 dev->priv = (void *)ethernet_private;
322 if (!ethernet_private) {
323 printf ("%s: %s allocation failure, %s\n",
324 __FUNCTION__, dev->name,
325 "Private Device Structure");
326 free (dev);
327 return;
328 }
329
330 memset (ethernet_private, 0, sizeof (ETH_PORT_INFO));
331 memcpy (ethernet_private->port_mac_addr, dev->enetaddr, 6);
332
333
334 port_private = calloc (sizeof (*ethernet_private), 1);
335 ethernet_private->port_private = (void *)port_private;
336 if (!port_private) {
337 printf ("%s: %s allocation failure, %s\n",
338 __FUNCTION__, dev->name,
339 "Port Private Device Structure");
340
341 free (ethernet_private);
342 free (dev);
343 return;
344 }
345
346 port_private->stats =
347 calloc (sizeof (struct net_device_stats), 1);
348 if (!port_private->stats) {
349 printf ("%s: %s allocation failure, %s\n",
350 __FUNCTION__, dev->name,
351 "Net stat Structure");
352
353 free (port_private);
354 free (ethernet_private);
355 free (dev);
356 return;
357 }
358 memset (ethernet_private->port_private, 0,
359 sizeof (struct mv64460_eth_priv));
360 switch (devnum) {
361 case 0:
362 ethernet_private->port_num = ETH_0;
363 break;
364 case 1:
365 ethernet_private->port_num = ETH_1;
366 break;
367 case 2:
368 ethernet_private->port_num = ETH_2;
369 break;
370 default:
371 printf ("Invalid device number %d\n", devnum);
372 break;
373 };
374
375 port_private->port_num = devnum;
376
377
378
379
380 mv64460_eth_update_stat (dev);
381 memset (port_private->stats, 0,
382 sizeof (struct net_device_stats));
383
384 switch (devnum) {
385 case 0:
386 s = "ethaddr";
387 break;
388 case 1:
389 s = "eth1addr";
390 break;
391 case 2:
392 s = "eth2addr";
393 break;
394 default:
395 printf ("%s: Invalid device number %d\n",
396 __FUNCTION__, devnum);
397 return;
398 }
399
400 temp = getenv_f(s, buf, sizeof (buf));
401 s = (temp > 0) ? buf : NULL;
402
403#ifdef DEBUG
404 printf ("Setting MAC %d to %s\n", devnum, s);
405#endif
406 for (x = 0; x < 6; ++x) {
407 dev->enetaddr[x] = s ? simple_strtoul (s, &e, 16) : 0;
408 if (s)
409 s = (*e) ? e + 1 : e;
410 }
411
412 DP (printf ("Allocating descriptor and buffer rings\n"));
413
414 ethernet_private->p_rx_desc_area_base[0] =
415 (ETH_RX_DESC *) memalign (16,
416 RX_DESC_ALIGNED_SIZE *
417 MV64460_RX_QUEUE_SIZE + 1);
418 ethernet_private->p_tx_desc_area_base[0] =
419 (ETH_TX_DESC *) memalign (16,
420 TX_DESC_ALIGNED_SIZE *
421 MV64460_TX_QUEUE_SIZE + 1);
422
423 ethernet_private->p_rx_buffer_base[0] =
424 (char *) memalign (16,
425 MV64460_RX_QUEUE_SIZE *
426 MV64460_TX_BUFFER_SIZE + 1);
427 ethernet_private->p_tx_buffer_base[0] =
428 (char *) memalign (16,
429 MV64460_RX_QUEUE_SIZE *
430 MV64460_TX_BUFFER_SIZE + 1);
431
432#ifdef DEBUG_MV_ETH
433
434 print_globals (dev);
435#endif
436 eth_register (dev);
437
438 miiphy_register(dev->name, mv_miiphy_read, mv_miiphy_write);
439 }
440 DP (printf ("%s: exit\n", __FUNCTION__));
441
442}
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458int mv64460_eth_open (struct eth_device *dev)
459{
460 return (mv64460_eth_real_open (dev));
461}
462
463
464static int mv64460_eth_real_open (struct eth_device *dev)
465{
466
467 unsigned int queue;
468 ETH_PORT_INFO *ethernet_private;
469 struct mv64460_eth_priv *port_private;
470 unsigned int port_num;
471 ushort reg_short;
472 int speed;
473 int duplex;
474 int i;
475 int reg;
476
477 ethernet_private = (ETH_PORT_INFO *) dev->priv;
478
479
480 memcpy (ethernet_private->port_mac_addr, dev->enetaddr, 6);
481
482 port_private = (struct mv64460_eth_priv *) ethernet_private->port_private;
483 port_num = port_private->port_num;
484
485
486 MV_REG_WRITE (MV64460_ETH_RECEIVE_QUEUE_COMMAND_REG (port_num), 0x0000ff00);
487
488
489 MV_REG_WRITE (MV64460_ETH_INTERRUPT_CAUSE_REG (port_num), 0);
490 MV_REG_WRITE (MV64460_ETH_INTERRUPT_CAUSE_EXTEND_REG (port_num), 0);
491
492
493 MV_REG_WRITE (MV64460_ETH_INTERRUPT_MASK_REG (port_num),
494 INT_CAUSE_UNMASK_ALL);
495
496
497 MV_REG_WRITE (MV64460_ETH_INTERRUPT_EXTEND_MASK_REG (port_num),
498 INT_CAUSE_UNMASK_ALL_EXT);
499
500
501 ethernet_private->port_phy_addr = 0x1 + (port_num << 1);
502 reg = ethernet_private->port_phy_addr;
503
504
505 eth_port_init (ethernet_private);
506
507
508
509 for (queue = 0; queue < MV64460_TX_QUEUE_NUM; queue++) {
510 unsigned int size;
511
512 port_private->tx_ring_size[queue] = MV64460_TX_QUEUE_SIZE;
513 size = (port_private->tx_ring_size[queue] * TX_DESC_ALIGNED_SIZE);
514 ethernet_private->tx_desc_area_size[queue] = size;
515
516
517 memset ((void *) ethernet_private->p_tx_desc_area_base[queue],
518 0, ethernet_private->tx_desc_area_size[queue]);
519
520
521 if (ether_init_tx_desc_ring
522 (ethernet_private, ETH_Q0,
523 port_private->tx_ring_size[queue],
524 MV64460_TX_BUFFER_SIZE ,
525 (unsigned int) ethernet_private->
526 p_tx_desc_area_base[queue],
527 (unsigned int) ethernet_private->
528 p_tx_buffer_base[queue]) == false)
529 printf ("### Error initializing TX Ring\n");
530 }
531
532
533 for (queue = 0; queue < MV64460_RX_QUEUE_NUM; queue++) {
534 unsigned int size;
535
536
537 port_private->rx_ring_size[queue] = MV64460_RX_QUEUE_SIZE;
538 size = (port_private->rx_ring_size[queue] *
539 RX_DESC_ALIGNED_SIZE);
540 ethernet_private->rx_desc_area_size[queue] = size;
541
542
543 memset ((void *) ethernet_private->p_rx_desc_area_base[queue],
544 0, ethernet_private->rx_desc_area_size[queue]);
545 if ((ether_init_rx_desc_ring
546 (ethernet_private, ETH_Q0,
547 port_private->rx_ring_size[queue],
548 MV64460_RX_BUFFER_SIZE ,
549 (unsigned int) ethernet_private->
550 p_rx_desc_area_base[queue],
551 (unsigned int) ethernet_private->
552 p_rx_buffer_base[queue])) == false)
553 printf ("### Error initializing RX Ring\n");
554 }
555
556 eth_port_start (ethernet_private);
557
558
559 MV_REG_WRITE (MV64460_ETH_PORT_SERIAL_CONTROL_REG (port_num),
560 (0x5 << 17) |
561 (MV_REG_READ
562 (MV64460_ETH_PORT_SERIAL_CONTROL_REG (port_num))
563 & 0xfff1ffff));
564
565
566
567
568
569
570 MV_REG_WRITE (MV64460_ETH_MAXIMUM_TRANSMIT_UNIT (port_num), 0);
571 MV_REG_READ (MV64460_ETH_PORT_STATUS_REG (port_num));
572
573#if defined(CONFIG_PHY_RESET)
574
575
576
577
578 if (port_private->first_init == 0) {
579 port_private->first_init = 1;
580 ethernet_phy_reset (port_num);
581
582
583 phy_setup_aneg (dev->name, reg);
584 udelay (1000);
585 }
586#endif
587
588 miiphy_read (dev->name, reg, MII_BMSR, ®_short);
589
590
591
592
593 if ((reg_short & BMSR_ANEGCAPABLE)
594 && !(reg_short & BMSR_ANEGCOMPLETE)) {
595 puts ("Waiting for PHY auto negotiation to complete");
596 i = 0;
597 while (!(reg_short & BMSR_ANEGCOMPLETE)) {
598
599
600
601 if (i > PHY_AUTONEGOTIATE_TIMEOUT) {
602 puts (" TIMEOUT !\n");
603 break;
604 }
605
606 if ((i++ % 1000) == 0) {
607 putc ('.');
608 }
609 udelay (1000);
610 miiphy_read (dev->name, reg, MII_BMSR, ®_short);
611
612 }
613 puts (" done\n");
614 udelay (500000);
615 }
616
617 speed = miiphy_speed (dev->name, reg);
618 duplex = miiphy_duplex (dev->name, reg);
619
620 printf ("ENET Speed is %d Mbps - %s duplex connection\n",
621 (int) speed, (duplex == HALF) ? "HALF" : "FULL");
622
623 port_private->eth_running = MAGIC_ETH_RUNNING;
624 return 1;
625}
626
627static int mv64460_eth_free_tx_rings (struct eth_device *dev)
628{
629 unsigned int queue;
630 ETH_PORT_INFO *ethernet_private;
631 struct mv64460_eth_priv *port_private;
632 unsigned int port_num;
633 volatile ETH_TX_DESC *p_tx_curr_desc;
634
635 ethernet_private = (ETH_PORT_INFO *) dev->priv;
636 port_private =
637 (struct mv64460_eth_priv *) ethernet_private->port_private;
638 port_num = port_private->port_num;
639
640
641 MV_REG_WRITE (MV64460_ETH_TRANSMIT_QUEUE_COMMAND_REG (port_num),
642 0x0000ff00);
643
644
645 DP (printf ("Clearing previously allocated TX queues... "));
646 for (queue = 0; queue < MV64460_TX_QUEUE_NUM; queue++) {
647
648 for (p_tx_curr_desc =
649 ethernet_private->p_tx_desc_area_base[queue];
650 ((unsigned int) p_tx_curr_desc <= (unsigned int)
651 ethernet_private->p_tx_desc_area_base[queue] +
652 ethernet_private->tx_desc_area_size[queue]);
653 p_tx_curr_desc =
654 (ETH_TX_DESC *) ((unsigned int) p_tx_curr_desc +
655 TX_DESC_ALIGNED_SIZE)) {
656
657 if (p_tx_curr_desc->return_info != 0) {
658 p_tx_curr_desc->return_info = 0;
659 DP (printf ("freed\n"));
660 }
661 }
662 DP (printf ("Done\n"));
663 }
664 return 0;
665}
666
667static int mv64460_eth_free_rx_rings (struct eth_device *dev)
668{
669 unsigned int queue;
670 ETH_PORT_INFO *ethernet_private;
671 struct mv64460_eth_priv *port_private;
672 unsigned int port_num;
673 volatile ETH_RX_DESC *p_rx_curr_desc;
674
675 ethernet_private = (ETH_PORT_INFO *) dev->priv;
676 port_private =
677 (struct mv64460_eth_priv *) ethernet_private->port_private;
678 port_num = port_private->port_num;
679
680
681 MV_REG_WRITE (MV64460_ETH_RECEIVE_QUEUE_COMMAND_REG (port_num),
682 0x0000ff00);
683
684
685 DP (printf ("Clearing previously allocated RX queues... "));
686 for (queue = 0; queue < MV64460_RX_QUEUE_NUM; queue++) {
687
688 for (p_rx_curr_desc =
689 ethernet_private->p_rx_desc_area_base[queue];
690 (((unsigned int) p_rx_curr_desc <
691 ((unsigned int) ethernet_private->
692 p_rx_desc_area_base[queue] +
693 ethernet_private->rx_desc_area_size[queue])));
694 p_rx_curr_desc =
695 (ETH_RX_DESC *) ((unsigned int) p_rx_curr_desc +
696 RX_DESC_ALIGNED_SIZE)) {
697 if (p_rx_curr_desc->return_info != 0) {
698 p_rx_curr_desc->return_info = 0;
699 DP (printf ("freed\n"));
700 }
701 }
702 DP (printf ("Done\n"));
703 }
704 return 0;
705}
706
707
708
709
710
711
712
713
714
715
716
717int mv64460_eth_stop (struct eth_device *dev)
718{
719
720 MV_REG_WRITE (MV64460_ETH_BASE_ADDR_ENABLE_REG, 0x3f);
721 DP (printf ("%s Ethernet stop called ... \n", __FUNCTION__));
722 mv64460_eth_real_stop (dev);
723
724 return 0;
725};
726
727
728
729static int mv64460_eth_real_stop (struct eth_device *dev)
730{
731 ETH_PORT_INFO *ethernet_private;
732 struct mv64460_eth_priv *port_private;
733 unsigned int port_num;
734
735 ethernet_private = (ETH_PORT_INFO *) dev->priv;
736 port_private =
737 (struct mv64460_eth_priv *) ethernet_private->port_private;
738 port_num = port_private->port_num;
739
740 mv64460_eth_free_tx_rings (dev);
741 mv64460_eth_free_rx_rings (dev);
742
743 eth_port_reset (ethernet_private->port_num);
744
745 MV_REG_WRITE (MV64460_ETH_INTERRUPT_CAUSE_REG (port_num), 0);
746 MV_REG_WRITE (MV64460_ETH_INTERRUPT_CAUSE_EXTEND_REG (port_num), 0);
747
748 MV_REG_WRITE (MV64460_ETH_INTERRUPT_MASK_REG (port_num), 0);
749
750 MV_REG_WRITE (MV64460_ETH_INTERRUPT_EXTEND_MASK_REG (port_num), 0);
751 MV_RESET_REG_BITS (MV64460_CPU_INTERRUPT0_MASK_HIGH,
752 BIT0 << port_num);
753
754#ifndef UPDATE_STATS_BY_SOFTWARE
755
756
757
758
759 if (port_private->eth_running == MAGIC_ETH_RUNNING) {
760 port_private->eth_running = 0;
761 mv64460_eth_print_stat (dev);
762 }
763 memset (port_private->stats, 0, sizeof (struct net_device_stats));
764#endif
765 DP (printf ("\nEthernet stopped ... \n"));
766 return 0;
767}
768
769
770
771
772
773
774
775
776
777
778
779
780
781int mv64460_eth_xmit (struct eth_device *dev, volatile void *dataPtr,
782 int dataSize)
783{
784 ETH_PORT_INFO *ethernet_private;
785 struct mv64460_eth_priv *port_private;
786 PKT_INFO pkt_info;
787 ETH_FUNC_RET_STATUS status;
788 struct net_device_stats *stats;
789 ETH_FUNC_RET_STATUS release_result;
790
791 ethernet_private = (ETH_PORT_INFO *) dev->priv;
792 port_private =
793 (struct mv64460_eth_priv *) ethernet_private->port_private;
794
795 stats = port_private->stats;
796
797
798 pkt_info.cmd_sts = ETH_TX_FIRST_DESC | ETH_TX_LAST_DESC;
799 pkt_info.byte_cnt = dataSize;
800 pkt_info.buf_ptr = (unsigned int) dataPtr;
801 pkt_info.return_info = 0;
802
803 status = eth_port_send (ethernet_private, ETH_Q0, &pkt_info);
804 if ((status == ETH_ERROR) || (status == ETH_QUEUE_FULL)) {
805 printf ("Error on transmitting packet ..");
806 if (status == ETH_QUEUE_FULL)
807 printf ("ETH Queue is full. \n");
808 if (status == ETH_QUEUE_LAST_RESOURCE)
809 printf ("ETH Queue: using last available resource. \n");
810 return 1;
811 }
812
813
814 stats->tx_bytes += dataSize;
815 stats->tx_packets++;
816
817
818 do {
819 release_result =
820 eth_tx_return_desc (ethernet_private, ETH_Q0,
821 &pkt_info);
822 switch (release_result) {
823 case ETH_OK:
824 DP (printf ("descriptor released\n"));
825 if (pkt_info.cmd_sts & BIT0) {
826 printf ("Error in TX\n");
827 stats->tx_errors++;
828 }
829 break;
830 case ETH_RETRY:
831 DP (printf ("transmission still in process\n"));
832 break;
833
834 case ETH_ERROR:
835 printf ("routine can not access Tx desc ring\n");
836 break;
837
838 case ETH_END_OF_JOB:
839 DP (printf ("the routine has nothing to release\n"));
840 break;
841 default:
842 break;
843 }
844 } while (release_result == ETH_OK);
845
846 return 0;
847}
848
849
850
851
852
853
854
855
856
857
858
859
860
861int mv64460_eth_receive (struct eth_device *dev)
862{
863 ETH_PORT_INFO *ethernet_private;
864 struct mv64460_eth_priv *port_private;
865 PKT_INFO pkt_info;
866 struct net_device_stats *stats;
867
868 ethernet_private = (ETH_PORT_INFO *) dev->priv;
869 port_private = (struct mv64460_eth_priv *) ethernet_private->port_private;
870 stats = port_private->stats;
871
872 while ((eth_port_receive (ethernet_private, ETH_Q0, &pkt_info) == ETH_OK)) {
873#ifdef DEBUG_MV_ETH
874 if (pkt_info.byte_cnt != 0) {
875 printf ("%s: Received %d byte Packet @ 0x%x\n",
876 __FUNCTION__, pkt_info.byte_cnt,
877 pkt_info.buf_ptr);
878 if(pkt_info.buf_ptr != 0){
879 for(i=0; i < pkt_info.byte_cnt; i++){
880 if((i % 4) == 0){
881 printf("\n0x");
882 }
883 printf("%02x", ((char*)pkt_info.buf_ptr)[i]);
884 }
885 printf("\n");
886 }
887 }
888#endif
889
890 stats->rx_packets++;
891 stats->rx_bytes += pkt_info.byte_cnt;
892
893
894
895
896
897 if (((pkt_info.
898 cmd_sts & (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) !=
899 (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC))
900 || (pkt_info.cmd_sts & ETH_ERROR_SUMMARY)) {
901 stats->rx_dropped++;
902
903 printf ("Received packet spread on multiple descriptors\n");
904
905
906 if (pkt_info.cmd_sts & ETH_ERROR_SUMMARY) {
907 stats->rx_errors++;
908 }
909
910
911 pkt_info.buf_ptr &= ~0x7;
912 pkt_info.byte_cnt = 0x0000;
913
914 if (eth_rx_return_buff
915 (ethernet_private, ETH_Q0, &pkt_info) != ETH_OK) {
916 printf ("Error while returning the RX Desc to Ring\n");
917 } else {
918 DP (printf ("RX Desc returned to Ring\n"));
919 }
920
921 } else {
922
923
924#ifdef DEBUG_MV_ETH
925 printf ("\nNow send it to upper layer protocols (NetReceive) ...\n");
926#endif
927
928 NetReceive ((uchar *) pkt_info.buf_ptr,
929 (int) pkt_info.byte_cnt);
930
931
932
933 pkt_info.buf_ptr &= ~0x7;
934 pkt_info.byte_cnt = 0x0000;
935 DP (printf ("RX: pkt_info.buf_ptr = %x\n", pkt_info.buf_ptr));
936 if (eth_rx_return_buff
937 (ethernet_private, ETH_Q0, &pkt_info) != ETH_OK) {
938 printf ("Error while returning the RX Desc to Ring\n");
939 } else {
940 DP (printf ("RX: Desc returned to Ring\n"));
941 }
942
943
944
945 }
946 }
947 mv64460_eth_get_stats (dev);
948 return 1;
949}
950
951
952
953
954
955
956
957
958
959
960
961static struct net_device_stats *mv64460_eth_get_stats (struct eth_device *dev)
962{
963 ETH_PORT_INFO *ethernet_private;
964 struct mv64460_eth_priv *port_private;
965
966 ethernet_private = (ETH_PORT_INFO *) dev->priv;
967 port_private =
968 (struct mv64460_eth_priv *) ethernet_private->port_private;
969
970 mv64460_eth_update_stat (dev);
971
972 return port_private->stats;
973}
974
975
976
977
978
979
980
981
982
983
984static void mv64460_eth_update_stat (struct eth_device *dev)
985{
986 ETH_PORT_INFO *ethernet_private;
987 struct mv64460_eth_priv *port_private;
988 struct net_device_stats *stats;
989
990 ethernet_private = (ETH_PORT_INFO *) dev->priv;
991 port_private =
992 (struct mv64460_eth_priv *) ethernet_private->port_private;
993 stats = port_private->stats;
994
995
996 stats->rx_packets += (unsigned long)
997 eth_read_mib_counter (ethernet_private->port_num,
998 ETH_MIB_GOOD_FRAMES_RECEIVED);
999 stats->tx_packets += (unsigned long)
1000 eth_read_mib_counter (ethernet_private->port_num,
1001 ETH_MIB_GOOD_FRAMES_SENT);
1002 stats->rx_bytes += (unsigned long)
1003 eth_read_mib_counter (ethernet_private->port_num,
1004 ETH_MIB_GOOD_OCTETS_RECEIVED_LOW);
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015 (void)eth_read_mib_counter (ethernet_private->port_num,
1016 ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH);
1017 stats->tx_bytes += (unsigned long)
1018 eth_read_mib_counter (ethernet_private->port_num,
1019 ETH_MIB_GOOD_OCTETS_SENT_LOW);
1020 (void)eth_read_mib_counter (ethernet_private->port_num,
1021 ETH_MIB_GOOD_OCTETS_SENT_HIGH);
1022 stats->rx_errors += (unsigned long)
1023 eth_read_mib_counter (ethernet_private->port_num,
1024 ETH_MIB_MAC_RECEIVE_ERROR);
1025
1026
1027 stats->rx_dropped +=
1028 (unsigned long) eth_read_mib_counter (ethernet_private->
1029 port_num,
1030 ETH_MIB_BAD_CRC_EVENT);
1031 stats->multicast += (unsigned long)
1032 eth_read_mib_counter (ethernet_private->port_num,
1033 ETH_MIB_MULTICAST_FRAMES_RECEIVED);
1034 stats->collisions +=
1035 (unsigned long) eth_read_mib_counter (ethernet_private->
1036 port_num,
1037 ETH_MIB_COLLISION) +
1038 (unsigned long) eth_read_mib_counter (ethernet_private->
1039 port_num,
1040 ETH_MIB_LATE_COLLISION);
1041
1042 stats->rx_length_errors +=
1043 (unsigned long) eth_read_mib_counter (ethernet_private->
1044 port_num,
1045 ETH_MIB_UNDERSIZE_RECEIVED)
1046 +
1047 (unsigned long) eth_read_mib_counter (ethernet_private->
1048 port_num,
1049 ETH_MIB_OVERSIZE_RECEIVED);
1050
1051}
1052
1053#ifndef UPDATE_STATS_BY_SOFTWARE
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063static void mv64460_eth_print_stat (struct eth_device *dev)
1064{
1065 ETH_PORT_INFO *ethernet_private;
1066 struct mv64460_eth_priv *port_private;
1067 struct net_device_stats *stats;
1068
1069 ethernet_private = (ETH_PORT_INFO *) dev->priv;
1070 port_private =
1071 (struct mv64460_eth_priv *) ethernet_private->port_private;
1072 stats = port_private->stats;
1073
1074
1075 printf ("\n### Network statistics: ###\n");
1076 printf ("--------------------------\n");
1077 printf (" Packets received: %ld\n", stats->rx_packets);
1078 printf (" Packets send: %ld\n", stats->tx_packets);
1079 printf (" Received bytes: %ld\n", stats->rx_bytes);
1080 printf (" Send bytes: %ld\n", stats->tx_bytes);
1081 if (stats->rx_errors != 0)
1082 printf (" Rx Errors: %ld\n",
1083 stats->rx_errors);
1084 if (stats->rx_dropped != 0)
1085 printf (" Rx dropped (CRC Errors): %ld\n",
1086 stats->rx_dropped);
1087 if (stats->multicast != 0)
1088 printf (" Rx mulicast frames: %ld\n",
1089 stats->multicast);
1090 if (stats->collisions != 0)
1091 printf (" No. of collisions: %ld\n",
1092 stats->collisions);
1093 if (stats->rx_length_errors != 0)
1094 printf (" Rx length errors: %ld\n",
1095 stats->rx_length_errors);
1096}
1097#endif
1098
1099
1100
1101
1102
1103
1104
1105bool db64460_eth_start (struct eth_device *dev)
1106{
1107 return (mv64460_eth_open (dev));
1108}
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302#define ETH_ENABLE_TX_QUEUE(tx_queue, eth_port) \
1303 MV_REG_WRITE(MV64460_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port), (1 << tx_queue))
1304
1305#define ETH_DISABLE_TX_QUEUE(tx_queue, eth_port) \
1306 MV_REG_WRITE(MV64460_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port),\
1307 (1 << (8 + tx_queue)))
1308
1309#define ETH_ENABLE_RX_QUEUE(rx_queue, eth_port) \
1310MV_REG_WRITE(MV64460_ETH_RECEIVE_QUEUE_COMMAND_REG(eth_port), (1 << rx_queue))
1311
1312#define ETH_DISABLE_RX_QUEUE(rx_queue, eth_port) \
1313MV_REG_WRITE(MV64460_ETH_RECEIVE_QUEUE_COMMAND_REG(eth_port), (1 << (8 + rx_queue)))
1314
1315#define CURR_RFD_GET(p_curr_desc, queue) \
1316 ((p_curr_desc) = p_eth_port_ctrl->p_rx_curr_desc_q[queue])
1317
1318#define CURR_RFD_SET(p_curr_desc, queue) \
1319 (p_eth_port_ctrl->p_rx_curr_desc_q[queue] = (p_curr_desc))
1320
1321#define USED_RFD_GET(p_used_desc, queue) \
1322 ((p_used_desc) = p_eth_port_ctrl->p_rx_used_desc_q[queue])
1323
1324#define USED_RFD_SET(p_used_desc, queue)\
1325(p_eth_port_ctrl->p_rx_used_desc_q[queue] = (p_used_desc))
1326
1327
1328#define CURR_TFD_GET(p_curr_desc, queue) \
1329 ((p_curr_desc) = p_eth_port_ctrl->p_tx_curr_desc_q[queue])
1330
1331#define CURR_TFD_SET(p_curr_desc, queue) \
1332 (p_eth_port_ctrl->p_tx_curr_desc_q[queue] = (p_curr_desc))
1333
1334#define USED_TFD_GET(p_used_desc, queue) \
1335 ((p_used_desc) = p_eth_port_ctrl->p_tx_used_desc_q[queue])
1336
1337#define USED_TFD_SET(p_used_desc, queue) \
1338 (p_eth_port_ctrl->p_tx_used_desc_q[queue] = (p_used_desc))
1339
1340#define FIRST_TFD_GET(p_first_desc, queue) \
1341 ((p_first_desc) = p_eth_port_ctrl->p_tx_first_desc_q[queue])
1342
1343#define FIRST_TFD_SET(p_first_desc, queue) \
1344 (p_eth_port_ctrl->p_tx_first_desc_q[queue] = (p_first_desc))
1345
1346
1347
1348#define RX_NEXT_DESC_PTR(p_rx_desc, queue) (ETH_RX_DESC*)(((((unsigned int)p_rx_desc - (unsigned int)p_eth_port_ctrl->p_rx_desc_area_base[queue]) + RX_DESC_ALIGNED_SIZE) % p_eth_port_ctrl->rx_desc_area_size[queue]) + (unsigned int)p_eth_port_ctrl->p_rx_desc_area_base[queue])
1349
1350#define TX_NEXT_DESC_PTR(p_tx_desc, queue) (ETH_TX_DESC*)(((((unsigned int)p_tx_desc - (unsigned int)p_eth_port_ctrl->p_tx_desc_area_base[queue]) + TX_DESC_ALIGNED_SIZE) % p_eth_port_ctrl->tx_desc_area_size[queue]) + (unsigned int)p_eth_port_ctrl->p_tx_desc_area_base[queue])
1351
1352#define LINK_UP_TIMEOUT 100000
1353#define PHY_BUSY_TIMEOUT 10000000
1354
1355
1356
1357
1358static void ethernet_phy_set (ETH_PORT eth_port_num, int phy_addr);
1359static int ethernet_phy_get (ETH_PORT eth_port_num);
1360
1361
1362static void eth_set_access_control (ETH_PORT eth_port_num,
1363 ETH_WIN_PARAM * param);
1364static bool eth_port_uc_addr (ETH_PORT eth_port_num, unsigned char uc_nibble,
1365 ETH_QUEUE queue, int option);
1366#if 0
1367static bool eth_port_smc_addr (ETH_PORT eth_port_num,
1368 unsigned char mc_byte,
1369 ETH_QUEUE queue, int option);
1370static bool eth_port_omc_addr (ETH_PORT eth_port_num,
1371 unsigned char crc8,
1372 ETH_QUEUE queue, int option);
1373#endif
1374
1375static void eth_b_copy (unsigned int src_addr, unsigned int dst_addr,
1376 int byte_count);
1377
1378void eth_dbg (ETH_PORT_INFO * p_eth_port_ctrl);
1379
1380
1381typedef enum _memory_bank { BANK0, BANK1, BANK2, BANK3 } MEMORY_BANK;
1382u32 mv_get_dram_bank_base_addr (MEMORY_BANK bank)
1383{
1384 u32 result = 0;
1385 u32 enable = MV_REG_READ (MV64460_BASE_ADDR_ENABLE);
1386
1387 if (enable & (1 << bank))
1388 return 0;
1389 if (bank == BANK0)
1390 result = MV_REG_READ (MV64460_CS_0_BASE_ADDR);
1391 if (bank == BANK1)
1392 result = MV_REG_READ (MV64460_CS_1_BASE_ADDR);
1393 if (bank == BANK2)
1394 result = MV_REG_READ (MV64460_CS_2_BASE_ADDR);
1395 if (bank == BANK3)
1396 result = MV_REG_READ (MV64460_CS_3_BASE_ADDR);
1397 result &= 0x0000ffff;
1398 result = result << 16;
1399 return result;
1400}
1401
1402u32 mv_get_dram_bank_size (MEMORY_BANK bank)
1403{
1404 u32 result = 0;
1405 u32 enable = MV_REG_READ (MV64460_BASE_ADDR_ENABLE);
1406
1407 if (enable & (1 << bank))
1408 return 0;
1409 if (bank == BANK0)
1410 result = MV_REG_READ (MV64460_CS_0_SIZE);
1411 if (bank == BANK1)
1412 result = MV_REG_READ (MV64460_CS_1_SIZE);
1413 if (bank == BANK2)
1414 result = MV_REG_READ (MV64460_CS_2_SIZE);
1415 if (bank == BANK3)
1416 result = MV_REG_READ (MV64460_CS_3_SIZE);
1417 result += 1;
1418 result &= 0x0000ffff;
1419 result = result << 16;
1420 return result;
1421}
1422
1423u32 mv_get_internal_sram_base (void)
1424{
1425 u32 result;
1426
1427 result = MV_REG_READ (MV64460_INTEGRATED_SRAM_BASE_ADDR);
1428 result &= 0x0000ffff;
1429 result = result << 16;
1430 return result;
1431}
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458static void eth_port_init (ETH_PORT_INFO * p_eth_port_ctrl)
1459{
1460 int queue;
1461 ETH_WIN_PARAM win_param;
1462
1463 p_eth_port_ctrl->port_config = PORT_CONFIG_VALUE;
1464 p_eth_port_ctrl->port_config_extend = PORT_CONFIG_EXTEND_VALUE;
1465 p_eth_port_ctrl->port_sdma_config = PORT_SDMA_CONFIG_VALUE;
1466 p_eth_port_ctrl->port_serial_control = PORT_SERIAL_CONTROL_VALUE;
1467
1468 p_eth_port_ctrl->port_rx_queue_command = 0;
1469 p_eth_port_ctrl->port_tx_queue_command = 0;
1470
1471
1472 for (queue = 0; queue < MAX_RX_QUEUE_NUM; queue++) {
1473 CURR_RFD_SET ((ETH_RX_DESC *) 0x00000000, queue);
1474 USED_RFD_SET ((ETH_RX_DESC *) 0x00000000, queue);
1475 p_eth_port_ctrl->rx_resource_err[queue] = false;
1476 }
1477
1478 for (queue = 0; queue < MAX_TX_QUEUE_NUM; queue++) {
1479 CURR_TFD_SET ((ETH_TX_DESC *) 0x00000000, queue);
1480 USED_TFD_SET ((ETH_TX_DESC *) 0x00000000, queue);
1481 FIRST_TFD_SET ((ETH_TX_DESC *) 0x00000000, queue);
1482 p_eth_port_ctrl->tx_resource_err[queue] = false;
1483 }
1484
1485 eth_port_reset (p_eth_port_ctrl->port_num);
1486
1487
1488 win_param.win = ETH_WIN0;
1489 win_param.target = ETH_TARGET_DRAM;
1490 win_param.attributes = EBAR_ATTR_DRAM_CS0;
1491#ifndef CONFIG_NOT_COHERENT_CACHE
1492 win_param.attributes |= EBAR_ATTR_DRAM_CACHE_COHERENCY_WB;
1493#endif
1494 win_param.high_addr = 0;
1495
1496 win_param.base_addr = mv_get_dram_bank_base_addr (BANK0);
1497 win_param.size = mv_get_dram_bank_size (BANK0);
1498 if (win_param.size == 0)
1499 win_param.enable = 0;
1500 else
1501 win_param.enable = 1;
1502 win_param.access_ctrl = EWIN_ACCESS_FULL;
1503
1504
1505 eth_set_access_control (p_eth_port_ctrl->port_num, &win_param);
1506
1507
1508 win_param.win = ETH_WIN1;
1509 win_param.target = ETH_TARGET_DRAM;
1510 win_param.attributes = EBAR_ATTR_DRAM_CS1;
1511#ifndef CONFIG_NOT_COHERENT_CACHE
1512 win_param.attributes |= EBAR_ATTR_DRAM_CACHE_COHERENCY_WB;
1513#endif
1514 win_param.high_addr = 0;
1515
1516 win_param.base_addr = mv_get_dram_bank_base_addr (BANK1);
1517 win_param.size = mv_get_dram_bank_size (BANK1);
1518 if (win_param.size == 0)
1519 win_param.enable = 0;
1520 else
1521 win_param.enable = 1;
1522 win_param.access_ctrl = EWIN_ACCESS_FULL;
1523
1524
1525 eth_set_access_control (p_eth_port_ctrl->port_num, &win_param);
1526
1527
1528 win_param.win = ETH_WIN2;
1529 win_param.target = ETH_TARGET_DRAM;
1530 win_param.attributes = EBAR_ATTR_DRAM_CS2;
1531#ifndef CONFIG_NOT_COHERENT_CACHE
1532 win_param.attributes |= EBAR_ATTR_DRAM_CACHE_COHERENCY_WB;
1533#endif
1534 win_param.high_addr = 0;
1535
1536 win_param.base_addr = mv_get_dram_bank_base_addr (BANK2);
1537 win_param.size = mv_get_dram_bank_size (BANK2);
1538 if (win_param.size == 0)
1539 win_param.enable = 0;
1540 else
1541 win_param.enable = 1;
1542 win_param.access_ctrl = EWIN_ACCESS_FULL;
1543
1544
1545 eth_set_access_control (p_eth_port_ctrl->port_num, &win_param);
1546
1547
1548 win_param.win = ETH_WIN3;
1549 win_param.target = ETH_TARGET_DRAM;
1550 win_param.attributes = EBAR_ATTR_DRAM_CS3;
1551#ifndef CONFIG_NOT_COHERENT_CACHE
1552 win_param.attributes |= EBAR_ATTR_DRAM_CACHE_COHERENCY_WB;
1553#endif
1554 win_param.high_addr = 0;
1555
1556 win_param.base_addr = mv_get_dram_bank_base_addr (BANK3);
1557 win_param.size = mv_get_dram_bank_size (BANK3);
1558 if (win_param.size == 0)
1559 win_param.enable = 0;
1560 else
1561 win_param.enable = 1;
1562 win_param.access_ctrl = EWIN_ACCESS_FULL;
1563
1564
1565 eth_set_access_control (p_eth_port_ctrl->port_num, &win_param);
1566
1567
1568 win_param.win = ETH_WIN4;
1569 win_param.target = EBAR_TARGET_CBS;
1570 win_param.attributes = EBAR_ATTR_CBS_SRAM | EBAR_ATTR_CBS_SRAM_BLOCK0;
1571 win_param.high_addr = 0;
1572 win_param.base_addr = mv_get_internal_sram_base ();
1573 win_param.size = MV64460_INTERNAL_SRAM_SIZE;
1574 win_param.enable = 1;
1575 win_param.access_ctrl = EWIN_ACCESS_FULL;
1576
1577
1578 eth_set_access_control (p_eth_port_ctrl->port_num, &win_param);
1579
1580 eth_port_init_mac_tables (p_eth_port_ctrl->port_num);
1581
1582 ethernet_phy_set (p_eth_port_ctrl->port_num,
1583 p_eth_port_ctrl->port_phy_addr);
1584
1585 return;
1586
1587}
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619static bool eth_port_start (ETH_PORT_INFO * p_eth_port_ctrl)
1620{
1621 int queue;
1622 volatile ETH_TX_DESC *p_tx_curr_desc;
1623 volatile ETH_RX_DESC *p_rx_curr_desc;
1624 unsigned int phy_reg_data;
1625 ETH_PORT eth_port_num = p_eth_port_ctrl->port_num;
1626
1627
1628 for (queue = 0; queue < MAX_TX_QUEUE_NUM; queue++) {
1629 CURR_TFD_GET (p_tx_curr_desc, queue);
1630 MV_REG_WRITE ((MV64460_ETH_TX_CURRENT_QUEUE_DESC_PTR_0
1631 (eth_port_num)
1632 + (4 * queue)),
1633 ((unsigned int) p_tx_curr_desc));
1634
1635 }
1636
1637
1638 for (queue = 0; queue < MAX_RX_QUEUE_NUM; queue++) {
1639 CURR_RFD_GET (p_rx_curr_desc, queue);
1640 MV_REG_WRITE ((MV64460_ETH_RX_CURRENT_QUEUE_DESC_PTR_0
1641 (eth_port_num)
1642 + (4 * queue)),
1643 ((unsigned int) p_rx_curr_desc));
1644
1645 if (p_rx_curr_desc != NULL)
1646
1647 eth_port_uc_addr_set (p_eth_port_ctrl->port_num,
1648 p_eth_port_ctrl->port_mac_addr,
1649 queue);
1650 }
1651
1652
1653 MV_REG_WRITE (MV64460_ETH_PORT_CONFIG_REG (eth_port_num),
1654 p_eth_port_ctrl->port_config);
1655
1656 MV_REG_WRITE (MV64460_ETH_PORT_CONFIG_EXTEND_REG (eth_port_num),
1657 p_eth_port_ctrl->port_config_extend);
1658
1659 MV_REG_WRITE (MV64460_ETH_PORT_SERIAL_CONTROL_REG (eth_port_num),
1660 p_eth_port_ctrl->port_serial_control);
1661
1662 MV_SET_REG_BITS (MV64460_ETH_PORT_SERIAL_CONTROL_REG (eth_port_num),
1663 ETH_SERIAL_PORT_ENABLE);
1664
1665
1666 MV_REG_WRITE (MV64460_ETH_SDMA_CONFIG_REG (eth_port_num),
1667 p_eth_port_ctrl->port_sdma_config);
1668
1669 MV_REG_WRITE (MV64460_ETH_TX_QUEUE_0_TOKEN_BUCKET_COUNT
1670 (eth_port_num), 0x3fffffff);
1671 MV_REG_WRITE (MV64460_ETH_TX_QUEUE_0_TOKEN_BUCKET_CONFIG
1672 (eth_port_num), 0x03fffcff);
1673
1674 MV_REG_WRITE (MV64460_ETH_MAXIMUM_TRANSMIT_UNIT (eth_port_num), 0x0);
1675
1676
1677 MV_REG_WRITE (MV64460_ETH_RECEIVE_QUEUE_COMMAND_REG (eth_port_num),
1678 p_eth_port_ctrl->port_rx_queue_command);
1679
1680
1681 eth_port_read_smi_reg (eth_port_num, 1, &phy_reg_data);
1682
1683 if (!(phy_reg_data & 0x20))
1684 return false;
1685
1686 return true;
1687}
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708static void eth_port_uc_addr_set (ETH_PORT eth_port_num,
1709 unsigned char *p_addr, ETH_QUEUE queue)
1710{
1711 unsigned int mac_h;
1712 unsigned int mac_l;
1713
1714 mac_l = (p_addr[4] << 8) | (p_addr[5]);
1715 mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) |
1716 (p_addr[2] << 8) | (p_addr[3] << 0);
1717
1718 MV_REG_WRITE (MV64460_ETH_MAC_ADDR_LOW (eth_port_num), mac_l);
1719 MV_REG_WRITE (MV64460_ETH_MAC_ADDR_HIGH (eth_port_num), mac_h);
1720
1721
1722 eth_port_uc_addr (eth_port_num, p_addr[5], queue, ACCEPT_MAC_ADDR);
1723
1724 return;
1725}
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750static bool eth_port_uc_addr (ETH_PORT eth_port_num,
1751 unsigned char uc_nibble,
1752 ETH_QUEUE queue, int option)
1753{
1754 unsigned int unicast_reg;
1755 unsigned int tbl_offset;
1756 unsigned int reg_offset;
1757
1758
1759 uc_nibble = (0xf & uc_nibble);
1760 tbl_offset = (uc_nibble / 4) * 4;
1761 reg_offset = uc_nibble % 4;
1762
1763 switch (option) {
1764 case REJECT_MAC_ADDR:
1765
1766 unicast_reg =
1767 MV_REG_READ ((MV64460_ETH_DA_FILTER_UNICAST_TABLE_BASE
1768 (eth_port_num)
1769 + tbl_offset));
1770
1771 unicast_reg &= (0x0E << (8 * reg_offset));
1772
1773 MV_REG_WRITE ((MV64460_ETH_DA_FILTER_UNICAST_TABLE_BASE
1774 (eth_port_num)
1775 + tbl_offset), unicast_reg);
1776 break;
1777
1778 case ACCEPT_MAC_ADDR:
1779
1780 unicast_reg =
1781 MV_REG_READ ((MV64460_ETH_DA_FILTER_UNICAST_TABLE_BASE
1782 (eth_port_num)
1783 + tbl_offset));
1784
1785 unicast_reg |= ((0x01 | queue) << (8 * reg_offset));
1786
1787 MV_REG_WRITE ((MV64460_ETH_DA_FILTER_UNICAST_TABLE_BASE
1788 (eth_port_num)
1789 + tbl_offset), unicast_reg);
1790
1791 break;
1792
1793 default:
1794 return false;
1795 }
1796 return true;
1797}
1798
1799#if 0
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831static void eth_port_mc_addr (ETH_PORT eth_port_num,
1832 unsigned char *p_addr,
1833 ETH_QUEUE queue, int option)
1834{
1835 unsigned int mac_h;
1836 unsigned int mac_l;
1837 unsigned char crc_result = 0;
1838 int mac_array[48];
1839 int crc[8];
1840 int i;
1841
1842 if ((p_addr[0] == 0x01) &&
1843 (p_addr[1] == 0x00) &&
1844 (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00)) {
1845
1846 eth_port_smc_addr (eth_port_num, p_addr[5], queue, option);
1847 } else {
1848
1849 mac_h = (p_addr[0] << 8) | (p_addr[1]);
1850 mac_l = (p_addr[2] << 24) | (p_addr[3] << 16) |
1851 (p_addr[4] << 8) | (p_addr[5] << 0);
1852
1853 for (i = 0; i < 32; i++)
1854 mac_array[i] = (mac_l >> i) & 0x1;
1855 for (i = 32; i < 48; i++)
1856 mac_array[i] = (mac_h >> (i - 32)) & 0x1;
1857
1858 crc[0] = mac_array[45] ^ mac_array[43] ^ mac_array[40] ^
1859 mac_array[39] ^ mac_array[35] ^ mac_array[34] ^
1860 mac_array[31] ^ mac_array[30] ^ mac_array[28] ^
1861 mac_array[23] ^ mac_array[21] ^ mac_array[19] ^
1862 mac_array[18] ^ mac_array[16] ^ mac_array[14] ^
1863 mac_array[12] ^ mac_array[8] ^ mac_array[7] ^
1864 mac_array[6] ^ mac_array[0];
1865
1866 crc[1] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^
1867 mac_array[43] ^ mac_array[41] ^ mac_array[39] ^
1868 mac_array[36] ^ mac_array[34] ^ mac_array[32] ^
1869 mac_array[30] ^ mac_array[29] ^ mac_array[28] ^
1870 mac_array[24] ^ mac_array[23] ^ mac_array[22] ^
1871 mac_array[21] ^ mac_array[20] ^ mac_array[18] ^
1872 mac_array[17] ^ mac_array[16] ^ mac_array[15] ^
1873 mac_array[14] ^ mac_array[13] ^ mac_array[12] ^
1874 mac_array[9] ^ mac_array[6] ^ mac_array[1] ^
1875 mac_array[0];
1876
1877 crc[2] = mac_array[47] ^ mac_array[46] ^ mac_array[44] ^
1878 mac_array[43] ^ mac_array[42] ^ mac_array[39] ^
1879 mac_array[37] ^ mac_array[34] ^ mac_array[33] ^
1880 mac_array[29] ^ mac_array[28] ^ mac_array[25] ^
1881 mac_array[24] ^ mac_array[22] ^ mac_array[17] ^
1882 mac_array[15] ^ mac_array[13] ^ mac_array[12] ^
1883 mac_array[10] ^ mac_array[8] ^ mac_array[6] ^
1884 mac_array[2] ^ mac_array[1] ^ mac_array[0];
1885
1886 crc[3] = mac_array[47] ^ mac_array[45] ^ mac_array[44] ^
1887 mac_array[43] ^ mac_array[40] ^ mac_array[38] ^
1888 mac_array[35] ^ mac_array[34] ^ mac_array[30] ^
1889 mac_array[29] ^ mac_array[26] ^ mac_array[25] ^
1890 mac_array[23] ^ mac_array[18] ^ mac_array[16] ^
1891 mac_array[14] ^ mac_array[13] ^ mac_array[11] ^
1892 mac_array[9] ^ mac_array[7] ^ mac_array[3] ^
1893 mac_array[2] ^ mac_array[1];
1894
1895 crc[4] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^
1896 mac_array[41] ^ mac_array[39] ^ mac_array[36] ^
1897 mac_array[35] ^ mac_array[31] ^ mac_array[30] ^
1898 mac_array[27] ^ mac_array[26] ^ mac_array[24] ^
1899 mac_array[19] ^ mac_array[17] ^ mac_array[15] ^
1900 mac_array[14] ^ mac_array[12] ^ mac_array[10] ^
1901 mac_array[8] ^ mac_array[4] ^ mac_array[3] ^
1902 mac_array[2];
1903
1904 crc[5] = mac_array[47] ^ mac_array[46] ^ mac_array[45] ^
1905 mac_array[42] ^ mac_array[40] ^ mac_array[37] ^
1906 mac_array[36] ^ mac_array[32] ^ mac_array[31] ^
1907 mac_array[28] ^ mac_array[27] ^ mac_array[25] ^
1908 mac_array[20] ^ mac_array[18] ^ mac_array[16] ^
1909 mac_array[15] ^ mac_array[13] ^ mac_array[11] ^
1910 mac_array[9] ^ mac_array[5] ^ mac_array[4] ^
1911 mac_array[3];
1912
1913 crc[6] = mac_array[47] ^ mac_array[46] ^ mac_array[43] ^
1914 mac_array[41] ^ mac_array[38] ^ mac_array[37] ^
1915 mac_array[33] ^ mac_array[32] ^ mac_array[29] ^
1916 mac_array[28] ^ mac_array[26] ^ mac_array[21] ^
1917 mac_array[19] ^ mac_array[17] ^ mac_array[16] ^
1918 mac_array[14] ^ mac_array[12] ^ mac_array[10] ^
1919 mac_array[6] ^ mac_array[5] ^ mac_array[4];
1920
1921 crc[7] = mac_array[47] ^ mac_array[44] ^ mac_array[42] ^
1922 mac_array[39] ^ mac_array[38] ^ mac_array[34] ^
1923 mac_array[33] ^ mac_array[30] ^ mac_array[29] ^
1924 mac_array[27] ^ mac_array[22] ^ mac_array[20] ^
1925 mac_array[18] ^ mac_array[17] ^ mac_array[15] ^
1926 mac_array[13] ^ mac_array[11] ^ mac_array[7] ^
1927 mac_array[6] ^ mac_array[5];
1928
1929 for (i = 0; i < 8; i++)
1930 crc_result = crc_result | (crc[i] << i);
1931
1932 eth_port_omc_addr (eth_port_num, crc_result, queue, option);
1933 }
1934 return;
1935}
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963static bool eth_port_smc_addr (ETH_PORT eth_port_num,
1964 unsigned char mc_byte,
1965 ETH_QUEUE queue, int option)
1966{
1967 unsigned int smc_table_reg;
1968 unsigned int tbl_offset;
1969 unsigned int reg_offset;
1970
1971
1972 tbl_offset = (mc_byte / 4) * 4;
1973 reg_offset = mc_byte % 4;
1974 queue &= 0x7;
1975
1976 switch (option) {
1977 case REJECT_MAC_ADDR:
1978
1979 smc_table_reg =
1980 MV_REG_READ ((MV64460_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset));
1981 smc_table_reg &= (0x0E << (8 * reg_offset));
1982
1983 MV_REG_WRITE ((MV64460_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset), smc_table_reg);
1984 break;
1985
1986 case ACCEPT_MAC_ADDR:
1987
1988 smc_table_reg =
1989 MV_REG_READ ((MV64460_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset));
1990 smc_table_reg |= ((0x01 | queue) << (8 * reg_offset));
1991
1992 MV_REG_WRITE ((MV64460_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset), smc_table_reg);
1993 break;
1994
1995 default:
1996 return false;
1997 }
1998 return true;
1999}
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027static bool eth_port_omc_addr (ETH_PORT eth_port_num,
2028 unsigned char crc8,
2029 ETH_QUEUE queue, int option)
2030{
2031 unsigned int omc_table_reg;
2032 unsigned int tbl_offset;
2033 unsigned int reg_offset;
2034
2035
2036 tbl_offset = (crc8 / 4) * 4;
2037 reg_offset = crc8 % 4;
2038 queue &= 0x7;
2039
2040 switch (option) {
2041 case REJECT_MAC_ADDR:
2042
2043 omc_table_reg =
2044 MV_REG_READ ((MV64460_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset));
2045 omc_table_reg &= (0x0E << (8 * reg_offset));
2046
2047 MV_REG_WRITE ((MV64460_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset), omc_table_reg);
2048 break;
2049
2050 case ACCEPT_MAC_ADDR:
2051
2052 omc_table_reg =
2053 MV_REG_READ ((MV64460_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset));
2054 omc_table_reg |= ((0x01 | queue) << (8 * reg_offset));
2055
2056 MV_REG_WRITE ((MV64460_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset), omc_table_reg);
2057 break;
2058
2059 default:
2060 return false;
2061 }
2062 return true;
2063}
2064#endif
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083static void eth_port_init_mac_tables (ETH_PORT eth_port_num)
2084{
2085 int table_index;
2086
2087
2088 for (table_index = 0; table_index <= 0xC; table_index += 4)
2089 MV_REG_WRITE ((MV64460_ETH_DA_FILTER_UNICAST_TABLE_BASE
2090 (eth_port_num) + table_index), 0);
2091
2092 for (table_index = 0; table_index <= 0xFC; table_index += 4) {
2093
2094 MV_REG_WRITE ((MV64460_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + table_index), 0);
2095
2096 MV_REG_WRITE ((MV64460_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + table_index), 0);
2097 }
2098}
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117static void eth_clear_mib_counters (ETH_PORT eth_port_num)
2118{
2119 int i;
2120
2121
2122 for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION;
2123 i += 4) {
2124 (void)MV_REG_READ ((MV64460_ETH_MIB_COUNTERS_BASE
2125 (eth_port_num) + i));
2126 }
2127
2128 return;
2129}
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152unsigned int eth_read_mib_counter (ETH_PORT eth_port_num,
2153 unsigned int mib_offset)
2154{
2155 return (MV_REG_READ (MV64460_ETH_MIB_COUNTERS_BASE (eth_port_num)
2156 + mib_offset));
2157}
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176static void ethernet_phy_set (ETH_PORT eth_port_num, int phy_addr)
2177{
2178 unsigned int reg_data;
2179
2180 reg_data = MV_REG_READ (MV64460_ETH_PHY_ADDR_REG);
2181
2182 reg_data &= ~(0x1F << (5 * eth_port_num));
2183 reg_data |= (phy_addr << (5 * eth_port_num));
2184
2185 MV_REG_WRITE (MV64460_ETH_PHY_ADDR_REG, reg_data);
2186
2187 return;
2188}
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206static int ethernet_phy_get (ETH_PORT eth_port_num)
2207{
2208 unsigned int reg_data;
2209
2210 reg_data = MV_REG_READ (MV64460_ETH_PHY_ADDR_REG);
2211
2212 return ((reg_data >> (5 * eth_port_num)) & 0x1f);
2213}
2214
2215
2216
2217
2218int phy_setup_aneg (char *devname, unsigned char addr)
2219{
2220 unsigned short ctl, adv;
2221
2222
2223 miiphy_read (devname, addr, MII_ADVERTISE, &adv);
2224 adv |= (LPA_LPACK | LPA_RFAULT | LPA_100BASE4 |
2225 LPA_100FULL | LPA_100HALF | LPA_10FULL |
2226 LPA_10HALF);
2227 miiphy_write (devname, addr, MII_ADVERTISE, adv);
2228
2229 miiphy_read (devname, addr, MII_CTRL1000, &adv);
2230 adv |= (0x0300);
2231 miiphy_write (devname, addr, MII_CTRL1000, adv);
2232
2233
2234 miiphy_read (devname, addr, MII_BMCR, &ctl);
2235 ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
2236 miiphy_write (devname, addr, MII_BMCR, ctl);
2237
2238 return 0;
2239}
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258static bool ethernet_phy_reset (ETH_PORT eth_port_num)
2259{
2260 unsigned int time_out = 50;
2261 unsigned int phy_reg_data;
2262
2263 eth_port_read_smi_reg (eth_port_num, 20, &phy_reg_data);
2264 phy_reg_data |= 0x0083;
2265 eth_port_write_smi_reg (eth_port_num, 20, phy_reg_data);
2266
2267
2268 eth_port_read_smi_reg (eth_port_num, 0, &phy_reg_data);
2269 phy_reg_data |= 0x8000;
2270 eth_port_write_smi_reg (eth_port_num, 0, phy_reg_data);
2271
2272
2273 do {
2274 eth_port_read_smi_reg (eth_port_num, 1, &phy_reg_data);
2275
2276 if (time_out-- == 0)
2277 return false;
2278 }
2279 while (!(phy_reg_data & 0x20));
2280
2281 return true;
2282}
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302static void eth_port_reset (ETH_PORT eth_port_num)
2303{
2304 unsigned int reg_data;
2305
2306
2307 reg_data =
2308 MV_REG_READ (MV64460_ETH_TRANSMIT_QUEUE_COMMAND_REG
2309 (eth_port_num));
2310
2311 if (reg_data & 0xFF) {
2312
2313 MV_REG_WRITE (MV64460_ETH_TRANSMIT_QUEUE_COMMAND_REG
2314 (eth_port_num), (reg_data << 8));
2315
2316
2317 do {
2318
2319 reg_data =
2320 MV_REG_READ
2321 (MV64460_ETH_TRANSMIT_QUEUE_COMMAND_REG
2322 (eth_port_num));
2323 }
2324 while (reg_data & 0xFF);
2325 }
2326
2327
2328 reg_data =
2329 MV_REG_READ (MV64460_ETH_RECEIVE_QUEUE_COMMAND_REG
2330 (eth_port_num));
2331
2332 if (reg_data & 0xFF) {
2333
2334 MV_REG_WRITE (MV64460_ETH_RECEIVE_QUEUE_COMMAND_REG
2335 (eth_port_num), (reg_data << 8));
2336
2337
2338 do {
2339
2340 reg_data =
2341 MV_REG_READ
2342 (MV64460_ETH_RECEIVE_QUEUE_COMMAND_REG
2343 (eth_port_num));
2344 }
2345 while (reg_data & 0xFF);
2346 }
2347
2348
2349 eth_clear_mib_counters (eth_port_num);
2350
2351
2352 reg_data =
2353 MV_REG_READ (MV64460_ETH_PORT_SERIAL_CONTROL_REG
2354 (eth_port_num));
2355 reg_data &= ~ETH_SERIAL_PORT_ENABLE;
2356 MV_REG_WRITE (MV64460_ETH_PORT_SERIAL_CONTROL_REG (eth_port_num),
2357 reg_data);
2358
2359 return;
2360}
2361
2362#if 0
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382static void ethernet_set_config_reg (ETH_PORT eth_port_num,
2383 unsigned int value)
2384{
2385 unsigned int eth_config_reg;
2386
2387 eth_config_reg =
2388 MV_REG_READ (MV64460_ETH_PORT_CONFIG_REG (eth_port_num));
2389 eth_config_reg |= value;
2390 MV_REG_WRITE (MV64460_ETH_PORT_CONFIG_REG (eth_port_num),
2391 eth_config_reg);
2392
2393 return;
2394}
2395#endif
2396
2397#if 0
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417static void ethernet_reset_config_reg (ETH_PORT eth_port_num,
2418 unsigned int value)
2419{
2420 unsigned int eth_config_reg;
2421
2422 eth_config_reg = MV_REG_READ (MV64460_ETH_PORT_CONFIG_EXTEND_REG
2423 (eth_port_num));
2424 eth_config_reg &= ~value;
2425 MV_REG_WRITE (MV64460_ETH_PORT_CONFIG_EXTEND_REG (eth_port_num),
2426 eth_config_reg);
2427
2428 return;
2429}
2430#endif
2431
2432#if 0
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450static unsigned int ethernet_get_config_reg (ETH_PORT eth_port_num)
2451{
2452 unsigned int eth_config_reg;
2453
2454 eth_config_reg = MV_REG_READ (MV64460_ETH_PORT_CONFIG_EXTEND_REG
2455 (eth_port_num));
2456 return eth_config_reg;
2457}
2458
2459#endif
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481static bool eth_port_read_smi_reg (ETH_PORT eth_port_num,
2482 unsigned int phy_reg, unsigned int *value)
2483{
2484 unsigned int reg_value;
2485 unsigned int time_out = PHY_BUSY_TIMEOUT;
2486 int phy_addr;
2487
2488 phy_addr = ethernet_phy_get (eth_port_num);
2489
2490
2491 do {
2492 reg_value = MV_REG_READ (MV64460_ETH_SMI_REG);
2493 if (time_out-- == 0) {
2494 return false;
2495 }
2496 }
2497 while (reg_value & ETH_SMI_BUSY);
2498
2499
2500
2501 MV_REG_WRITE (MV64460_ETH_SMI_REG,
2502 (phy_addr << 16) | (phy_reg << 21) |
2503 ETH_SMI_OPCODE_READ);
2504
2505 time_out = PHY_BUSY_TIMEOUT;
2506
2507 do {
2508 reg_value = MV_REG_READ (MV64460_ETH_SMI_REG);
2509 if (time_out-- == 0) {
2510 return false;
2511 }
2512 }
2513 while ((reg_value & ETH_SMI_READ_VALID) != ETH_SMI_READ_VALID);
2514
2515
2516#define PHY_UPDATE_TIMEOUT 10000
2517 for (time_out = 0; time_out < PHY_UPDATE_TIMEOUT; time_out++);
2518
2519 reg_value = MV_REG_READ (MV64460_ETH_SMI_REG);
2520
2521 *value = reg_value & 0xffff;
2522
2523 return true;
2524}
2525
2526int mv_miiphy_read(const char *devname, unsigned char phy_addr,
2527 unsigned char phy_reg, unsigned short *value)
2528{
2529 unsigned int reg_value;
2530 unsigned int time_out = PHY_BUSY_TIMEOUT;
2531
2532
2533 do {
2534 reg_value = MV_REG_READ (MV64460_ETH_SMI_REG);
2535 if (time_out-- == 0) {
2536 return false;
2537 }
2538 }
2539 while (reg_value & ETH_SMI_BUSY);
2540
2541
2542 MV_REG_WRITE (MV64460_ETH_SMI_REG,
2543 (phy_addr << 16) | (phy_reg << 21) |
2544 ETH_SMI_OPCODE_READ);
2545
2546 time_out = PHY_BUSY_TIMEOUT;
2547
2548 do {
2549 reg_value = MV_REG_READ (MV64460_ETH_SMI_REG);
2550 if (time_out-- == 0) {
2551 return false;
2552 }
2553 }
2554 while ((reg_value & ETH_SMI_READ_VALID) != ETH_SMI_READ_VALID);
2555
2556
2557 for (time_out = 0; time_out < PHY_UPDATE_TIMEOUT; time_out++);
2558
2559 reg_value = MV_REG_READ (MV64460_ETH_SMI_REG);
2560
2561 *value = reg_value & 0xffff;
2562
2563 return 0;
2564}
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586static bool eth_port_write_smi_reg (ETH_PORT eth_port_num,
2587 unsigned int phy_reg, unsigned int value)
2588{
2589 unsigned int reg_value;
2590 unsigned int time_out = PHY_BUSY_TIMEOUT;
2591 int phy_addr;
2592
2593 phy_addr = ethernet_phy_get (eth_port_num);
2594
2595
2596 do {
2597 reg_value = MV_REG_READ (MV64460_ETH_SMI_REG);
2598 if (time_out-- == 0) {
2599 return false;
2600 }
2601 }
2602 while (reg_value & ETH_SMI_BUSY);
2603
2604
2605 MV_REG_WRITE (MV64460_ETH_SMI_REG,
2606 (phy_addr << 16) | (phy_reg << 21) |
2607 ETH_SMI_OPCODE_WRITE | (value & 0xffff));
2608 return true;
2609}
2610
2611int mv_miiphy_write(const char *devname, unsigned char phy_addr,
2612 unsigned char phy_reg, unsigned short value)
2613{
2614 unsigned int reg_value;
2615 unsigned int time_out = PHY_BUSY_TIMEOUT;
2616
2617
2618 do {
2619 reg_value = MV_REG_READ (MV64460_ETH_SMI_REG);
2620 if (time_out-- == 0) {
2621 return false;
2622 }
2623 }
2624 while (reg_value & ETH_SMI_BUSY);
2625
2626
2627 MV_REG_WRITE (MV64460_ETH_SMI_REG,
2628 (phy_addr << 16) | (phy_reg << 21) |
2629 ETH_SMI_OPCODE_WRITE | (value & 0xffff));
2630 return 0;
2631}
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651static void eth_set_access_control (ETH_PORT eth_port_num,
2652 ETH_WIN_PARAM * param)
2653{
2654 unsigned int access_prot_reg;
2655
2656
2657 access_prot_reg = MV_REG_READ (MV64460_ETH_ACCESS_PROTECTION_REG
2658 (eth_port_num));
2659 access_prot_reg &= (~(3 << (param->win * 2)));
2660 access_prot_reg |= (param->access_ctrl << (param->win * 2));
2661 MV_REG_WRITE (MV64460_ETH_ACCESS_PROTECTION_REG (eth_port_num),
2662 access_prot_reg);
2663
2664
2665 MV_REG_WRITE ((MV64460_ETH_SIZE_REG_0 +
2666 (ETH_SIZE_REG_GAP * param->win)),
2667 (((param->size / 0x10000) - 1) << 16));
2668
2669
2670 MV_REG_WRITE ((MV64460_ETH_BAR_0 + (ETH_BAR_GAP * param->win)),
2671 (param->target | param->attributes | param->base_addr));
2672
2673 if (param->win < 4)
2674 MV_REG_WRITE ((MV64460_ETH_HIGH_ADDR_REMAP_REG_0 +
2675 (ETH_HIGH_ADDR_REMAP_REG_GAP * param->win)),
2676 param->high_addr);
2677
2678
2679 if (param->enable == 1)
2680 MV_RESET_REG_BITS (MV64460_ETH_BASE_ADDR_ENABLE_REG,
2681 (1 << param->win));
2682 else
2683 MV_SET_REG_BITS (MV64460_ETH_BASE_ADDR_ENABLE_REG,
2684 (1 << param->win));
2685}
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717static bool ether_init_rx_desc_ring (ETH_PORT_INFO * p_eth_port_ctrl,
2718 ETH_QUEUE rx_queue,
2719 int rx_desc_num,
2720 int rx_buff_size,
2721 unsigned int rx_desc_base_addr,
2722 unsigned int rx_buff_base_addr)
2723{
2724 ETH_RX_DESC *p_rx_desc;
2725 ETH_RX_DESC *p_rx_prev_desc;
2726 unsigned int buffer_addr;
2727 int ix;
2728
2729 p_rx_desc = (ETH_RX_DESC *) rx_desc_base_addr;
2730 p_rx_prev_desc = p_rx_desc;
2731 buffer_addr = rx_buff_base_addr;
2732
2733
2734 if (rx_buff_base_addr & 0xF)
2735 return false;
2736
2737
2738 if ((rx_buff_size < 8) || (rx_buff_size > RX_BUFFER_MAX_SIZE))
2739 return false;
2740
2741
2742 if ((rx_buff_base_addr + rx_buff_size) & 0x7)
2743 return false;
2744
2745
2746 for (ix = 0; ix < rx_desc_num; ix++) {
2747 p_rx_desc->buf_size = rx_buff_size;
2748 p_rx_desc->byte_cnt = 0x0000;
2749 p_rx_desc->cmd_sts =
2750 ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT;
2751 p_rx_desc->next_desc_ptr =
2752 ((unsigned int) p_rx_desc) + RX_DESC_ALIGNED_SIZE;
2753 p_rx_desc->buf_ptr = buffer_addr;
2754 p_rx_desc->return_info = 0x00000000;
2755 D_CACHE_FLUSH_LINE (p_rx_desc, 0);
2756 buffer_addr += rx_buff_size;
2757 p_rx_prev_desc = p_rx_desc;
2758 p_rx_desc = (ETH_RX_DESC *)
2759 ((unsigned int) p_rx_desc + RX_DESC_ALIGNED_SIZE);
2760 }
2761
2762
2763 p_rx_prev_desc->next_desc_ptr = (rx_desc_base_addr);
2764 D_CACHE_FLUSH_LINE (p_rx_prev_desc, 0);
2765
2766
2767 CURR_RFD_SET ((ETH_RX_DESC *) rx_desc_base_addr, rx_queue);
2768 USED_RFD_SET ((ETH_RX_DESC *) rx_desc_base_addr, rx_queue);
2769
2770 p_eth_port_ctrl->p_rx_desc_area_base[rx_queue] =
2771 (ETH_RX_DESC *) rx_desc_base_addr;
2772 p_eth_port_ctrl->rx_desc_area_size[rx_queue] =
2773 rx_desc_num * RX_DESC_ALIGNED_SIZE;
2774
2775 p_eth_port_ctrl->port_rx_queue_command |= (1 << rx_queue);
2776
2777 return true;
2778}
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810static bool ether_init_tx_desc_ring (ETH_PORT_INFO * p_eth_port_ctrl,
2811 ETH_QUEUE tx_queue,
2812 int tx_desc_num,
2813 int tx_buff_size,
2814 unsigned int tx_desc_base_addr,
2815 unsigned int tx_buff_base_addr)
2816{
2817
2818 ETH_TX_DESC *p_tx_desc;
2819 ETH_TX_DESC *p_tx_prev_desc;
2820 unsigned int buffer_addr;
2821 int ix;
2822
2823
2824 p_tx_desc = (ETH_TX_DESC *) tx_desc_base_addr;
2825 p_tx_prev_desc = p_tx_desc;
2826 buffer_addr = tx_buff_base_addr;
2827
2828
2829 if (tx_buff_base_addr & 0xF)
2830 return false;
2831
2832
2833 if ((tx_buff_size > TX_BUFFER_MAX_SIZE)
2834 || (tx_buff_size < TX_BUFFER_MIN_SIZE))
2835 return false;
2836
2837
2838 for (ix = 0; ix < tx_desc_num; ix++) {
2839 p_tx_desc->byte_cnt = 0x0000;
2840 p_tx_desc->l4i_chk = 0x0000;
2841 p_tx_desc->cmd_sts = 0x00000000;
2842 p_tx_desc->next_desc_ptr =
2843 ((unsigned int) p_tx_desc) + TX_DESC_ALIGNED_SIZE;
2844
2845 p_tx_desc->buf_ptr = buffer_addr;
2846 p_tx_desc->return_info = 0x00000000;
2847 D_CACHE_FLUSH_LINE (p_tx_desc, 0);
2848 buffer_addr += tx_buff_size;
2849 p_tx_prev_desc = p_tx_desc;
2850 p_tx_desc = (ETH_TX_DESC *)
2851 ((unsigned int) p_tx_desc + TX_DESC_ALIGNED_SIZE);
2852
2853 }
2854
2855 p_tx_prev_desc->next_desc_ptr = tx_desc_base_addr;
2856 D_CACHE_FLUSH_LINE (p_tx_prev_desc, 0);
2857
2858 CURR_TFD_SET ((ETH_TX_DESC *) tx_desc_base_addr, tx_queue);
2859 USED_TFD_SET ((ETH_TX_DESC *) tx_desc_base_addr, tx_queue);
2860
2861
2862 p_eth_port_ctrl->p_tx_desc_area_base[tx_queue] =
2863 (ETH_TX_DESC *) tx_desc_base_addr;
2864 p_eth_port_ctrl->tx_desc_area_size[tx_queue] =
2865 (tx_desc_num * TX_DESC_ALIGNED_SIZE);
2866
2867
2868 p_eth_port_ctrl->port_tx_queue_command |= (1 << tx_queue);
2869
2870 return true;
2871}
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901static ETH_FUNC_RET_STATUS eth_port_send (ETH_PORT_INFO * p_eth_port_ctrl,
2902 ETH_QUEUE tx_queue,
2903 PKT_INFO * p_pkt_info)
2904{
2905 volatile ETH_TX_DESC *p_tx_desc_first;
2906 volatile ETH_TX_DESC *p_tx_desc_curr;
2907 volatile ETH_TX_DESC *p_tx_next_desc_curr;
2908 volatile ETH_TX_DESC *p_tx_desc_used;
2909 unsigned int command_status;
2910
2911
2912 if (p_eth_port_ctrl->tx_resource_err[tx_queue] == true)
2913 return ETH_QUEUE_FULL;
2914
2915
2916 CURR_TFD_GET (p_tx_desc_curr, tx_queue);
2917 USED_TFD_GET (p_tx_desc_used, tx_queue);
2918
2919 if (p_tx_desc_curr == NULL)
2920 return ETH_ERROR;
2921
2922
2923 p_tx_next_desc_curr = TX_NEXT_DESC_PTR (p_tx_desc_curr, tx_queue);
2924 command_status = p_pkt_info->cmd_sts | ETH_ZERO_PADDING | ETH_GEN_CRC;
2925
2926 if (command_status & (ETH_TX_FIRST_DESC)) {
2927
2928 FIRST_TFD_SET (p_tx_desc_curr, tx_queue);
2929 p_tx_desc_first = p_tx_desc_curr;
2930 } else {
2931 FIRST_TFD_GET (p_tx_desc_first, tx_queue);
2932 command_status |= ETH_BUFFER_OWNED_BY_DMA;
2933 }
2934
2935
2936
2937
2938 if (p_pkt_info->byte_cnt <= 8) {
2939 printf ("You have failed in the < 8 bytes errata - fixme\n");
2940 return ETH_ERROR;
2941
2942 p_tx_desc_curr->buf_ptr =
2943 (unsigned int) p_tx_desc_curr + TX_BUF_OFFSET_IN_DESC;
2944 eth_b_copy (p_pkt_info->buf_ptr, p_tx_desc_curr->buf_ptr,
2945 p_pkt_info->byte_cnt);
2946 } else
2947 p_tx_desc_curr->buf_ptr = p_pkt_info->buf_ptr;
2948
2949 p_tx_desc_curr->byte_cnt = p_pkt_info->byte_cnt;
2950 p_tx_desc_curr->return_info = p_pkt_info->return_info;
2951
2952 if (p_pkt_info->cmd_sts & (ETH_TX_LAST_DESC)) {
2953
2954 p_tx_desc_curr->cmd_sts = command_status |
2955 ETH_BUFFER_OWNED_BY_DMA | ETH_TX_ENABLE_INTERRUPT;
2956
2957 if (p_tx_desc_curr != p_tx_desc_first)
2958 p_tx_desc_first->cmd_sts |= ETH_BUFFER_OWNED_BY_DMA;
2959
2960
2961
2962 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_curr, 0);
2963 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_first, 0);
2964 CPU_PIPE_FLUSH;
2965
2966
2967 ETH_ENABLE_TX_QUEUE (tx_queue, p_eth_port_ctrl->port_num);
2968
2969
2970 p_tx_desc_first = p_tx_next_desc_curr;
2971 FIRST_TFD_SET (p_tx_desc_first, tx_queue);
2972
2973 } else {
2974 p_tx_desc_curr->cmd_sts = command_status;
2975 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_curr, 0);
2976 }
2977
2978
2979 if (p_tx_next_desc_curr == p_tx_desc_used) {
2980
2981 CURR_TFD_SET (p_tx_desc_first, tx_queue);
2982
2983 p_eth_port_ctrl->tx_resource_err[tx_queue] = true;
2984 return ETH_QUEUE_LAST_RESOURCE;
2985 } else {
2986
2987 CURR_TFD_SET (p_tx_next_desc_curr, tx_queue);
2988 return ETH_OK;
2989 }
2990}
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017static ETH_FUNC_RET_STATUS eth_tx_return_desc (ETH_PORT_INFO *
3018 p_eth_port_ctrl,
3019 ETH_QUEUE tx_queue,
3020 PKT_INFO * p_pkt_info)
3021{
3022 volatile ETH_TX_DESC *p_tx_desc_used = NULL;
3023 volatile ETH_TX_DESC *p_tx_desc_first = NULL;
3024 unsigned int command_status;
3025
3026
3027 USED_TFD_GET (p_tx_desc_used, tx_queue);
3028 FIRST_TFD_GET (p_tx_desc_first, tx_queue);
3029
3030
3031 if (p_tx_desc_used == NULL)
3032 return ETH_ERROR;
3033
3034 command_status = p_tx_desc_used->cmd_sts;
3035
3036
3037 if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
3038 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_used, 0);
3039 return ETH_RETRY;
3040 }
3041
3042
3043 if ((p_tx_desc_used == p_tx_desc_first) &&
3044 (p_eth_port_ctrl->tx_resource_err[tx_queue] == false)) {
3045 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_used, 0);
3046 return ETH_END_OF_JOB;
3047 }
3048
3049
3050 p_pkt_info->cmd_sts = command_status;
3051 p_pkt_info->return_info = p_tx_desc_used->return_info;
3052 p_tx_desc_used->return_info = 0;
3053
3054
3055 USED_TFD_SET (TX_NEXT_DESC_PTR (p_tx_desc_used, tx_queue), tx_queue);
3056
3057
3058 if (p_eth_port_ctrl->tx_resource_err[tx_queue] == true)
3059 p_eth_port_ctrl->tx_resource_err[tx_queue] = false;
3060
3061 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_used, 0);
3062
3063 return ETH_OK;
3064
3065}
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092static ETH_FUNC_RET_STATUS eth_port_receive (ETH_PORT_INFO * p_eth_port_ctrl,
3093 ETH_QUEUE rx_queue,
3094 PKT_INFO * p_pkt_info)
3095{
3096 volatile ETH_RX_DESC *p_rx_curr_desc;
3097 volatile ETH_RX_DESC *p_rx_next_curr_desc;
3098 volatile ETH_RX_DESC *p_rx_used_desc;
3099 unsigned int command_status;
3100
3101
3102 if (p_eth_port_ctrl->rx_resource_err[rx_queue] == true) {
3103 printf ("\nRx Queue is full ...\n");
3104 return ETH_QUEUE_FULL;
3105 }
3106
3107
3108 CURR_RFD_GET (p_rx_curr_desc, rx_queue);
3109 USED_RFD_GET (p_rx_used_desc, rx_queue);
3110
3111
3112 if (p_rx_curr_desc == NULL)
3113 return ETH_ERROR;
3114
3115
3116 p_rx_next_curr_desc = RX_NEXT_DESC_PTR (p_rx_curr_desc, rx_queue);
3117 command_status = p_rx_curr_desc->cmd_sts;
3118
3119
3120 if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
3121
3122 D_CACHE_FLUSH_LINE ((unsigned int) p_rx_curr_desc, 0);
3123
3124 return ETH_END_OF_JOB;
3125 }
3126
3127 p_pkt_info->byte_cnt = (p_rx_curr_desc->byte_cnt) - RX_BUF_OFFSET;
3128 p_pkt_info->cmd_sts = command_status;
3129 p_pkt_info->buf_ptr = (p_rx_curr_desc->buf_ptr) + RX_BUF_OFFSET;
3130 p_pkt_info->return_info = p_rx_curr_desc->return_info;
3131 p_pkt_info->l4i_chk = p_rx_curr_desc->buf_size;
3132
3133
3134
3135 p_rx_curr_desc->return_info = 0;
3136
3137
3138 CURR_RFD_SET (p_rx_next_curr_desc, rx_queue);
3139
3140
3141 if (p_rx_next_curr_desc == p_rx_used_desc)
3142 p_eth_port_ctrl->rx_resource_err[rx_queue] = true;
3143
3144 D_CACHE_FLUSH_LINE ((unsigned int) p_rx_curr_desc, 0);
3145 CPU_PIPE_FLUSH;
3146
3147 return ETH_OK;
3148}
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172static ETH_FUNC_RET_STATUS eth_rx_return_buff (ETH_PORT_INFO *
3173 p_eth_port_ctrl,
3174 ETH_QUEUE rx_queue,
3175 PKT_INFO * p_pkt_info)
3176{
3177 volatile ETH_RX_DESC *p_used_rx_desc;
3178
3179
3180 USED_RFD_GET (p_used_rx_desc, rx_queue);
3181
3182
3183 if (p_used_rx_desc == NULL)
3184 return ETH_ERROR;
3185
3186 p_used_rx_desc->buf_ptr = p_pkt_info->buf_ptr;
3187 p_used_rx_desc->return_info = p_pkt_info->return_info;
3188 p_used_rx_desc->byte_cnt = p_pkt_info->byte_cnt;
3189 p_used_rx_desc->buf_size = MV64460_RX_BUFFER_SIZE;
3190
3191
3192 CPU_PIPE_FLUSH;
3193
3194
3195 p_used_rx_desc->cmd_sts =
3196 ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT;
3197
3198
3199 D_CACHE_FLUSH_LINE ((unsigned int) p_used_rx_desc, 0);
3200 CPU_PIPE_FLUSH;
3201
3202
3203 USED_RFD_SET (RX_NEXT_DESC_PTR (p_used_rx_desc, rx_queue), rx_queue);
3204
3205
3206 if (p_eth_port_ctrl->rx_resource_err[rx_queue] == true)
3207 p_eth_port_ctrl->rx_resource_err[rx_queue] = false;
3208
3209 return ETH_OK;
3210}
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235#if 0
3236static unsigned int eth_port_set_rx_coal (ETH_PORT eth_port_num,
3237 unsigned int t_clk,
3238 unsigned int delay)
3239{
3240 unsigned int coal;
3241
3242 coal = ((t_clk / 1000000) * delay) / 64;
3243
3244 MV_REG_WRITE (MV64460_ETH_SDMA_CONFIG_REG (eth_port_num),
3245 ((coal & 0x3fff) << 8) |
3246 (MV_REG_READ
3247 (MV64460_ETH_SDMA_CONFIG_REG (eth_port_num))
3248 & 0xffc000ff));
3249 return coal;
3250}
3251
3252#endif
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276#if 0
3277static unsigned int eth_port_set_tx_coal (ETH_PORT eth_port_num,
3278 unsigned int t_clk,
3279 unsigned int delay)
3280{
3281 unsigned int coal;
3282
3283 coal = ((t_clk / 1000000) * delay) / 64;
3284
3285 MV_REG_WRITE (MV64460_ETH_TX_FIFO_URGENT_THRESHOLD_REG (eth_port_num),
3286 coal << 4);
3287 return coal;
3288}
3289#endif
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311static void eth_b_copy (unsigned int src_addr, unsigned int dst_addr,
3312 int byte_count)
3313{
3314
3315 *(unsigned int *) dst_addr = 0x0;
3316
3317 while (byte_count != 0) {
3318 *(char *) dst_addr = *(char *) src_addr;
3319 dst_addr++;
3320 src_addr++;
3321 byte_count--;
3322 }
3323}
3324