1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include <common.h>
31#include <net.h>
32#include <malloc.h>
33#include <miiphy.h>
34
35#include "mv_eth.h"
36
37
38
39#undef DEBUG_MV_ETH
40
41#ifdef DEBUG_MV_ETH
42#define DEBUG
43#define DP(x) x
44#else
45#define DP(x)
46#endif
47
48
49#define ETH_PHY_DFCDL_CONFIG0_REG 0x2100
50#define ETH_PHY_DFCDL_CONFIG1_REG 0x2104
51#define ETH_PHY_DFCDL_ADDR_REG 0x2110
52#define ETH_PHY_DFCDL_DATA0_REG 0x2114
53
54#define PHY_AUTONEGOTIATE_TIMEOUT 4000
55#define PHY_UPDATE_TIMEOUT 10000
56
57#undef MV64460_CHECKSUM_OFFLOAD
58
59
60
61
62
63
64#undef MV64460_RX_QUEUE_FILL_ON_TASK
65
66
67#define MAGIC_ETH_RUNNING 8031971
68#define MV64460_INTERNAL_SRAM_SIZE _256K
69#define EXTRA_BYTES 32
70#define WRAP ETH_HLEN + 2 + 4 + 16
71#define BUFFER_MTU dev->mtu + WRAP
72#define INT_CAUSE_UNMASK_ALL 0x0007ffff
73#define INT_CAUSE_UNMASK_ALL_EXT 0x0011ffff
74#ifdef MV64460_RX_FILL_ON_TASK
75#define INT_CAUSE_MASK_ALL 0x00000000
76#define INT_CAUSE_CHECK_BITS INT_CAUSE_UNMASK_ALL
77#define INT_CAUSE_CHECK_BITS_EXT INT_CAUSE_UNMASK_ALL_EXT
78#endif
79
80
81#define MV_REG_READ(offset) my_le32_to_cpu(* (volatile unsigned int *) (INTERNAL_REG_BASE_ADDR + offset))
82#define MV_REG_WRITE(offset,data) *(volatile unsigned int *) (INTERNAL_REG_BASE_ADDR + offset) = my_cpu_to_le32 (data)
83#define MV_SET_REG_BITS(regOffset,bits) ((*((volatile unsigned int*)((INTERNAL_REG_BASE_ADDR) + (regOffset)))) |= ((unsigned int)my_cpu_to_le32(bits)))
84#define MV_RESET_REG_BITS(regOffset,bits) ((*((volatile unsigned int*)((INTERNAL_REG_BASE_ADDR) + (regOffset)))) &= ~((unsigned int)my_cpu_to_le32(bits)))
85
86#define my_cpu_to_le32(x) my_le32_to_cpu((x))
87
88
89static int mv64460_eth_real_open (struct eth_device *eth);
90static int mv64460_eth_real_stop (struct eth_device *eth);
91static struct net_device_stats *mv64460_eth_get_stats (struct eth_device
92 *dev);
93static void eth_port_init_mac_tables (ETH_PORT eth_port_num);
94static void mv64460_eth_update_stat (struct eth_device *dev);
95bool db64460_eth_start (struct eth_device *eth);
96unsigned int eth_read_mib_counter (ETH_PORT eth_port_num,
97 unsigned int mib_offset);
98int mv64460_eth_receive (struct eth_device *dev);
99
100int mv64460_eth_xmit (struct eth_device *, volatile void *packet, int length);
101
102int mv_miiphy_read(const char *devname, unsigned char phy_addr,
103 unsigned char phy_reg, unsigned short *value);
104int mv_miiphy_write(const char *devname, unsigned char phy_addr,
105 unsigned char phy_reg, unsigned short value);
106
107int phy_setup_aneg (char *devname, unsigned char addr);
108
109#ifndef UPDATE_STATS_BY_SOFTWARE
110static void mv64460_eth_print_stat (struct eth_device *dev);
111#endif
112
113extern void NetReceive (volatile uchar *, int);
114
115extern unsigned int INTERNAL_REG_BASE_ADDR;
116
117unsigned long my_le32_to_cpu (unsigned long x)
118{
119 return (((x & 0x000000ffU) << 24) |
120 ((x & 0x0000ff00U) << 8) |
121 ((x & 0x00ff0000U) >> 8) | ((x & 0xff000000U) >> 24));
122}
123
124
125
126
127#ifdef DEBUG_MV_ETH
128void print_globals (struct eth_device *dev)
129{
130 printf ("Ethernet PRINT_Globals-Debug function\n");
131 printf ("Base Address for ETH_PORT_INFO: %08x\n",
132 (unsigned int) dev->priv);
133 printf ("Base Address for mv64460_eth_priv: %08x\n",
134 (unsigned int) &(((ETH_PORT_INFO *) dev->priv)->
135 port_private));
136
137 printf ("GT Internal Base Address: %08x\n",
138 INTERNAL_REG_BASE_ADDR);
139 printf ("Base Address for TX-DESCs: %08x Number of allocated Buffers %d\n",
140 (unsigned int) ((ETH_PORT_INFO *) dev->priv)->p_tx_desc_area_base[0], MV64460_TX_QUEUE_SIZE);
141 printf ("Base Address for RX-DESCs: %08x Number of allocated Buffers %d\n",
142 (unsigned int) ((ETH_PORT_INFO *) dev->priv)->p_rx_desc_area_base[0], MV64460_RX_QUEUE_SIZE);
143 printf ("Base Address for RX-Buffer: %08x allocated Bytes %d\n",
144 (unsigned int) ((ETH_PORT_INFO *) dev->priv)->
145 p_rx_buffer_base[0],
146 (MV64460_RX_QUEUE_SIZE * MV64460_RX_BUFFER_SIZE) + 32);
147 printf ("Base Address for TX-Buffer: %08x allocated Bytes %d\n",
148 (unsigned int) ((ETH_PORT_INFO *) dev->priv)->
149 p_tx_buffer_base[0],
150 (MV64460_TX_QUEUE_SIZE * MV64460_TX_BUFFER_SIZE) + 32);
151}
152#endif
153
154
155
156
157
158
159
160
161
162void mv64460_eth_print_phy_status (struct eth_device *dev)
163{
164 struct mv64460_eth_priv *port_private;
165 unsigned int port_num;
166 ETH_PORT_INFO *ethernet_private = (ETH_PORT_INFO *) dev->priv;
167 unsigned int port_status, phy_reg_data;
168
169 port_private =
170 (struct mv64460_eth_priv *) ethernet_private->port_private;
171 port_num = port_private->port_num;
172
173
174 eth_port_read_smi_reg (port_num, 1, &phy_reg_data);
175 if (!(phy_reg_data & 0x20)) {
176 printf ("Ethernet port changed link status to DOWN\n");
177 } else {
178 port_status =
179 MV_REG_READ (MV64460_ETH_PORT_STATUS_REG (port_num));
180 printf ("Ethernet status port %d: Link up", port_num);
181 printf (", %s",
182 (port_status & BIT2) ? "Full Duplex" : "Half Duplex");
183 if (port_status & BIT4)
184 printf (", Speed 1 Gbps");
185 else
186 printf (", %s",
187 (port_status & BIT5) ? "Speed 100 Mbps" :
188 "Speed 10 Mbps");
189 printf ("\n");
190 }
191}
192
193
194
195
196
197int db64460_eth_probe (struct eth_device *dev)
198{
199 return ((int) db64460_eth_start (dev));
200}
201
202int db64460_eth_poll (struct eth_device *dev)
203{
204 return mv64460_eth_receive (dev);
205}
206
207int db64460_eth_transmit (struct eth_device *dev, volatile void *packet,
208 int length)
209{
210 mv64460_eth_xmit (dev, packet, length);
211 return 0;
212}
213
214void db64460_eth_disable (struct eth_device *dev)
215{
216 mv64460_eth_stop (dev);
217}
218
219#define DFCDL(write,read) ((write << 6) | read)
220unsigned int ethDfcdls[] = {
221 DFCDL(0,0), DFCDL(1,1), DFCDL(2,2), DFCDL(3,3),
222 DFCDL(4,4), DFCDL(5,5), DFCDL(6,6), DFCDL(7,7),
223 DFCDL(8,8), DFCDL(9,9), DFCDL(10,10), DFCDL(11,11),
224 DFCDL(12,12), DFCDL(13,13), DFCDL(14,14), DFCDL(15,15),
225 DFCDL(16,16), DFCDL(17,17), DFCDL(18,18), DFCDL(19,19),
226 DFCDL(20,20), DFCDL(21,21), DFCDL(22,22), DFCDL(23,23),
227 DFCDL(24,24), DFCDL(25,25), DFCDL(26,26), DFCDL(27,27),
228 DFCDL(28,28), DFCDL(29,29), DFCDL(30,30), DFCDL(31,31),
229 DFCDL(32,32), DFCDL(33,33), DFCDL(34,34), DFCDL(35,35),
230 DFCDL(36,36), DFCDL(37,37), DFCDL(38,38), DFCDL(39,39),
231 DFCDL(40,40), DFCDL(41,41), DFCDL(42,42), DFCDL(43,43),
232 DFCDL(44,44), DFCDL(45,45), DFCDL(46,46), DFCDL(47,47),
233 DFCDL(48,48), DFCDL(49,49), DFCDL(50,50), DFCDL(51,51),
234 DFCDL(52,52), DFCDL(53,53), DFCDL(54,54), DFCDL(55,55),
235 DFCDL(56,56), DFCDL(57,57), DFCDL(58,58), DFCDL(59,59),
236 DFCDL(60,60), DFCDL(61,61), DFCDL(62,62), DFCDL(63,63),
237};
238
239void mv_eth_phy_init (void)
240{
241 int i;
242
243 MV_REG_WRITE (ETH_PHY_DFCDL_ADDR_REG, 0);
244
245 for (i = 0; i < 64; i++) {
246 MV_REG_WRITE (ETH_PHY_DFCDL_DATA0_REG, ethDfcdls[i]);
247 }
248
249 MV_REG_WRITE (ETH_PHY_DFCDL_CONFIG0_REG, 0x300000);
250}
251
252void mv6446x_eth_initialize (bd_t * bis)
253{
254 struct eth_device *dev;
255 ETH_PORT_INFO *ethernet_private;
256 struct mv64460_eth_priv *port_private;
257 int devnum, x, temp;
258 char *s, *e, buf[64];
259
260
261
262
263 temp = MV_REG_READ(0x20A0);
264 temp |= 0x04000080;
265 MV_REG_WRITE(0x20A0, temp);
266
267 mv_eth_phy_init();
268
269 for (devnum = 0; devnum < MV_ETH_DEVS; devnum++) {
270 dev = calloc (sizeof (*dev), 1);
271 if (!dev) {
272 printf ("%s: mv_enet%d allocation failure, %s\n",
273 __FUNCTION__, devnum, "eth_device structure");
274 return;
275 }
276
277
278 sprintf (dev->name, "mv_enet%d", devnum);
279
280#ifdef DEBUG
281 printf ("Initializing %s\n", dev->name);
282#endif
283
284
285 switch (devnum) {
286 case 0:
287 s = "ethaddr";
288 break;
289 case 1:
290 s = "eth1addr";
291 break;
292 case 2:
293 s = "eth2addr";
294 break;
295 default:
296 printf ("%s: Invalid device number %d\n",
297 __FUNCTION__, devnum);
298 return;
299 }
300
301 temp = getenv_f(s, buf, sizeof (buf));
302 s = (temp > 0) ? buf : NULL;
303
304#ifdef DEBUG
305 printf ("Setting MAC %d to %s\n", devnum, s);
306#endif
307 for (x = 0; x < 6; ++x) {
308 dev->enetaddr[x] = s ? simple_strtoul (s, &e, 16) : 0;
309 if (s)
310 s = (*e) ? e + 1 : e;
311 }
312
313 eth_port_uc_addr_set (devnum, dev->enetaddr, 0);
314
315 dev->init = (void *) db64460_eth_probe;
316 dev->halt = (void *) ethernet_phy_reset;
317 dev->send = (void *) db64460_eth_transmit;
318 dev->recv = (void *) db64460_eth_poll;
319
320 ethernet_private = calloc (sizeof (*ethernet_private), 1);
321 dev->priv = (void *)ethernet_private;
322 if (!ethernet_private) {
323 printf ("%s: %s allocation failure, %s\n",
324 __FUNCTION__, dev->name,
325 "Private Device Structure");
326 free (dev);
327 return;
328 }
329
330 memset (ethernet_private, 0, sizeof (ETH_PORT_INFO));
331 memcpy (ethernet_private->port_mac_addr, dev->enetaddr, 6);
332
333
334 port_private = calloc (sizeof (*ethernet_private), 1);
335 ethernet_private->port_private = (void *)port_private;
336 if (!port_private) {
337 printf ("%s: %s allocation failure, %s\n",
338 __FUNCTION__, dev->name,
339 "Port Private Device Structure");
340
341 free (ethernet_private);
342 free (dev);
343 return;
344 }
345
346 port_private->stats =
347 calloc (sizeof (struct net_device_stats), 1);
348 if (!port_private->stats) {
349 printf ("%s: %s allocation failure, %s\n",
350 __FUNCTION__, dev->name,
351 "Net stat Structure");
352
353 free (port_private);
354 free (ethernet_private);
355 free (dev);
356 return;
357 }
358 memset (ethernet_private->port_private, 0,
359 sizeof (struct mv64460_eth_priv));
360 switch (devnum) {
361 case 0:
362 ethernet_private->port_num = ETH_0;
363 break;
364 case 1:
365 ethernet_private->port_num = ETH_1;
366 break;
367 case 2:
368 ethernet_private->port_num = ETH_2;
369 break;
370 default:
371 printf ("Invalid device number %d\n", devnum);
372 break;
373 };
374
375 port_private->port_num = devnum;
376
377
378
379
380 mv64460_eth_update_stat (dev);
381 memset (port_private->stats, 0,
382 sizeof (struct net_device_stats));
383
384 switch (devnum) {
385 case 0:
386 s = "ethaddr";
387 break;
388 case 1:
389 s = "eth1addr";
390 break;
391 case 2:
392 s = "eth2addr";
393 break;
394 default:
395 printf ("%s: Invalid device number %d\n",
396 __FUNCTION__, devnum);
397 return;
398 }
399
400 temp = getenv_f(s, buf, sizeof (buf));
401 s = (temp > 0) ? buf : NULL;
402
403#ifdef DEBUG
404 printf ("Setting MAC %d to %s\n", devnum, s);
405#endif
406 for (x = 0; x < 6; ++x) {
407 dev->enetaddr[x] = s ? simple_strtoul (s, &e, 16) : 0;
408 if (s)
409 s = (*e) ? e + 1 : e;
410 }
411
412 DP (printf ("Allocating descriptor and buffer rings\n"));
413
414 ethernet_private->p_rx_desc_area_base[0] =
415 (ETH_RX_DESC *) memalign (16,
416 RX_DESC_ALIGNED_SIZE *
417 MV64460_RX_QUEUE_SIZE + 1);
418 ethernet_private->p_tx_desc_area_base[0] =
419 (ETH_TX_DESC *) memalign (16,
420 TX_DESC_ALIGNED_SIZE *
421 MV64460_TX_QUEUE_SIZE + 1);
422
423 ethernet_private->p_rx_buffer_base[0] =
424 (char *) memalign (16,
425 MV64460_RX_QUEUE_SIZE *
426 MV64460_TX_BUFFER_SIZE + 1);
427 ethernet_private->p_tx_buffer_base[0] =
428 (char *) memalign (16,
429 MV64460_RX_QUEUE_SIZE *
430 MV64460_TX_BUFFER_SIZE + 1);
431
432#ifdef DEBUG_MV_ETH
433
434 print_globals (dev);
435#endif
436 eth_register (dev);
437
438 miiphy_register(dev->name, mv_miiphy_read, mv_miiphy_write);
439 }
440 DP (printf ("%s: exit\n", __FUNCTION__));
441
442}
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458int mv64460_eth_open (struct eth_device *dev)
459{
460 return (mv64460_eth_real_open (dev));
461}
462
463
464static int mv64460_eth_real_open (struct eth_device *dev)
465{
466
467 unsigned int queue;
468 ETH_PORT_INFO *ethernet_private;
469 struct mv64460_eth_priv *port_private;
470 unsigned int port_num;
471 u32 port_status;
472 ushort reg_short;
473 int speed;
474 int duplex;
475 int i;
476 int reg;
477
478 ethernet_private = (ETH_PORT_INFO *) dev->priv;
479
480
481 memcpy (ethernet_private->port_mac_addr, dev->enetaddr, 6);
482
483 port_private = (struct mv64460_eth_priv *) ethernet_private->port_private;
484 port_num = port_private->port_num;
485
486
487 MV_REG_WRITE (MV64460_ETH_RECEIVE_QUEUE_COMMAND_REG (port_num), 0x0000ff00);
488
489
490 MV_REG_WRITE (MV64460_ETH_INTERRUPT_CAUSE_REG (port_num), 0);
491 MV_REG_WRITE (MV64460_ETH_INTERRUPT_CAUSE_EXTEND_REG (port_num), 0);
492
493
494 MV_REG_WRITE (MV64460_ETH_INTERRUPT_MASK_REG (port_num),
495 INT_CAUSE_UNMASK_ALL);
496
497
498 MV_REG_WRITE (MV64460_ETH_INTERRUPT_EXTEND_MASK_REG (port_num),
499 INT_CAUSE_UNMASK_ALL_EXT);
500
501
502 ethernet_private->port_phy_addr = 0x1 + (port_num << 1);
503 reg = ethernet_private->port_phy_addr;
504
505
506 eth_port_init (ethernet_private);
507
508
509
510 for (queue = 0; queue < MV64460_TX_QUEUE_NUM; queue++) {
511 unsigned int size;
512
513 port_private->tx_ring_size[queue] = MV64460_TX_QUEUE_SIZE;
514 size = (port_private->tx_ring_size[queue] * TX_DESC_ALIGNED_SIZE);
515 ethernet_private->tx_desc_area_size[queue] = size;
516
517
518 memset ((void *) ethernet_private->p_tx_desc_area_base[queue],
519 0, ethernet_private->tx_desc_area_size[queue]);
520
521
522 if (ether_init_tx_desc_ring
523 (ethernet_private, ETH_Q0,
524 port_private->tx_ring_size[queue],
525 MV64460_TX_BUFFER_SIZE ,
526 (unsigned int) ethernet_private->
527 p_tx_desc_area_base[queue],
528 (unsigned int) ethernet_private->
529 p_tx_buffer_base[queue]) == false)
530 printf ("### Error initializing TX Ring\n");
531 }
532
533
534 for (queue = 0; queue < MV64460_RX_QUEUE_NUM; queue++) {
535 unsigned int size;
536
537
538 port_private->rx_ring_size[queue] = MV64460_RX_QUEUE_SIZE;
539 size = (port_private->rx_ring_size[queue] *
540 RX_DESC_ALIGNED_SIZE);
541 ethernet_private->rx_desc_area_size[queue] = size;
542
543
544 memset ((void *) ethernet_private->p_rx_desc_area_base[queue],
545 0, ethernet_private->rx_desc_area_size[queue]);
546 if ((ether_init_rx_desc_ring
547 (ethernet_private, ETH_Q0,
548 port_private->rx_ring_size[queue],
549 MV64460_RX_BUFFER_SIZE ,
550 (unsigned int) ethernet_private->
551 p_rx_desc_area_base[queue],
552 (unsigned int) ethernet_private->
553 p_rx_buffer_base[queue])) == false)
554 printf ("### Error initializing RX Ring\n");
555 }
556
557 eth_port_start (ethernet_private);
558
559
560 MV_REG_WRITE (MV64460_ETH_PORT_SERIAL_CONTROL_REG (port_num),
561 (0x5 << 17) |
562 (MV_REG_READ
563 (MV64460_ETH_PORT_SERIAL_CONTROL_REG (port_num))
564 & 0xfff1ffff));
565
566
567
568
569
570
571 MV_REG_WRITE (MV64460_ETH_MAXIMUM_TRANSMIT_UNIT (port_num), 0);
572 port_status = MV_REG_READ (MV64460_ETH_PORT_STATUS_REG (port_num));
573
574#if defined(CONFIG_PHY_RESET)
575
576
577
578
579 if (port_private->first_init == 0) {
580 port_private->first_init = 1;
581 ethernet_phy_reset (port_num);
582
583
584 phy_setup_aneg (dev->name, reg);
585 udelay (1000);
586 }
587#endif
588
589 miiphy_read (dev->name, reg, MII_BMSR, ®_short);
590
591
592
593
594 if ((reg_short & BMSR_ANEGCAPABLE)
595 && !(reg_short & BMSR_ANEGCOMPLETE)) {
596 puts ("Waiting for PHY auto negotiation to complete");
597 i = 0;
598 while (!(reg_short & BMSR_ANEGCOMPLETE)) {
599
600
601
602 if (i > PHY_AUTONEGOTIATE_TIMEOUT) {
603 puts (" TIMEOUT !\n");
604 break;
605 }
606
607 if ((i++ % 1000) == 0) {
608 putc ('.');
609 }
610 udelay (1000);
611 miiphy_read (dev->name, reg, MII_BMSR, ®_short);
612
613 }
614 puts (" done\n");
615 udelay (500000);
616 }
617
618 speed = miiphy_speed (dev->name, reg);
619 duplex = miiphy_duplex (dev->name, reg);
620
621 printf ("ENET Speed is %d Mbps - %s duplex connection\n",
622 (int) speed, (duplex == HALF) ? "HALF" : "FULL");
623
624 port_private->eth_running = MAGIC_ETH_RUNNING;
625 return 1;
626}
627
628static int mv64460_eth_free_tx_rings (struct eth_device *dev)
629{
630 unsigned int queue;
631 ETH_PORT_INFO *ethernet_private;
632 struct mv64460_eth_priv *port_private;
633 unsigned int port_num;
634 volatile ETH_TX_DESC *p_tx_curr_desc;
635
636 ethernet_private = (ETH_PORT_INFO *) dev->priv;
637 port_private =
638 (struct mv64460_eth_priv *) ethernet_private->port_private;
639 port_num = port_private->port_num;
640
641
642 MV_REG_WRITE (MV64460_ETH_TRANSMIT_QUEUE_COMMAND_REG (port_num),
643 0x0000ff00);
644
645
646 DP (printf ("Clearing previously allocated TX queues... "));
647 for (queue = 0; queue < MV64460_TX_QUEUE_NUM; queue++) {
648
649 for (p_tx_curr_desc =
650 ethernet_private->p_tx_desc_area_base[queue];
651 ((unsigned int) p_tx_curr_desc <= (unsigned int)
652 ethernet_private->p_tx_desc_area_base[queue] +
653 ethernet_private->tx_desc_area_size[queue]);
654 p_tx_curr_desc =
655 (ETH_TX_DESC *) ((unsigned int) p_tx_curr_desc +
656 TX_DESC_ALIGNED_SIZE)) {
657
658 if (p_tx_curr_desc->return_info != 0) {
659 p_tx_curr_desc->return_info = 0;
660 DP (printf ("freed\n"));
661 }
662 }
663 DP (printf ("Done\n"));
664 }
665 return 0;
666}
667
668static int mv64460_eth_free_rx_rings (struct eth_device *dev)
669{
670 unsigned int queue;
671 ETH_PORT_INFO *ethernet_private;
672 struct mv64460_eth_priv *port_private;
673 unsigned int port_num;
674 volatile ETH_RX_DESC *p_rx_curr_desc;
675
676 ethernet_private = (ETH_PORT_INFO *) dev->priv;
677 port_private =
678 (struct mv64460_eth_priv *) ethernet_private->port_private;
679 port_num = port_private->port_num;
680
681
682 MV_REG_WRITE (MV64460_ETH_RECEIVE_QUEUE_COMMAND_REG (port_num),
683 0x0000ff00);
684
685
686 DP (printf ("Clearing previously allocated RX queues... "));
687 for (queue = 0; queue < MV64460_RX_QUEUE_NUM; queue++) {
688
689 for (p_rx_curr_desc =
690 ethernet_private->p_rx_desc_area_base[queue];
691 (((unsigned int) p_rx_curr_desc <
692 ((unsigned int) ethernet_private->
693 p_rx_desc_area_base[queue] +
694 ethernet_private->rx_desc_area_size[queue])));
695 p_rx_curr_desc =
696 (ETH_RX_DESC *) ((unsigned int) p_rx_curr_desc +
697 RX_DESC_ALIGNED_SIZE)) {
698 if (p_rx_curr_desc->return_info != 0) {
699 p_rx_curr_desc->return_info = 0;
700 DP (printf ("freed\n"));
701 }
702 }
703 DP (printf ("Done\n"));
704 }
705 return 0;
706}
707
708
709
710
711
712
713
714
715
716
717
718int mv64460_eth_stop (struct eth_device *dev)
719{
720 ETH_PORT_INFO *ethernet_private;
721 struct mv64460_eth_priv *port_private;
722 unsigned int port_num;
723
724 ethernet_private = (ETH_PORT_INFO *) dev->priv;
725 port_private =
726 (struct mv64460_eth_priv *) ethernet_private->port_private;
727 port_num = port_private->port_num;
728
729
730 MV_REG_WRITE (MV64460_ETH_BASE_ADDR_ENABLE_REG, 0x3f);
731 DP (printf ("%s Ethernet stop called ... \n", __FUNCTION__));
732 mv64460_eth_real_stop (dev);
733
734 return 0;
735};
736
737
738
739static int mv64460_eth_real_stop (struct eth_device *dev)
740{
741 ETH_PORT_INFO *ethernet_private;
742 struct mv64460_eth_priv *port_private;
743 unsigned int port_num;
744
745 ethernet_private = (ETH_PORT_INFO *) dev->priv;
746 port_private =
747 (struct mv64460_eth_priv *) ethernet_private->port_private;
748 port_num = port_private->port_num;
749
750 mv64460_eth_free_tx_rings (dev);
751 mv64460_eth_free_rx_rings (dev);
752
753 eth_port_reset (ethernet_private->port_num);
754
755 MV_REG_WRITE (MV64460_ETH_INTERRUPT_CAUSE_REG (port_num), 0);
756 MV_REG_WRITE (MV64460_ETH_INTERRUPT_CAUSE_EXTEND_REG (port_num), 0);
757
758 MV_REG_WRITE (MV64460_ETH_INTERRUPT_MASK_REG (port_num), 0);
759
760 MV_REG_WRITE (MV64460_ETH_INTERRUPT_EXTEND_MASK_REG (port_num), 0);
761 MV_RESET_REG_BITS (MV64460_CPU_INTERRUPT0_MASK_HIGH,
762 BIT0 << port_num);
763
764#ifndef UPDATE_STATS_BY_SOFTWARE
765
766
767
768
769 if (port_private->eth_running == MAGIC_ETH_RUNNING) {
770 port_private->eth_running = 0;
771 mv64460_eth_print_stat (dev);
772 }
773 memset (port_private->stats, 0, sizeof (struct net_device_stats));
774#endif
775 DP (printf ("\nEthernet stopped ... \n"));
776 return 0;
777}
778
779
780
781
782
783
784
785
786
787
788
789
790
791int mv64460_eth_xmit (struct eth_device *dev, volatile void *dataPtr,
792 int dataSize)
793{
794 ETH_PORT_INFO *ethernet_private;
795 struct mv64460_eth_priv *port_private;
796 unsigned int port_num;
797 PKT_INFO pkt_info;
798 ETH_FUNC_RET_STATUS status;
799 struct net_device_stats *stats;
800 ETH_FUNC_RET_STATUS release_result;
801
802 ethernet_private = (ETH_PORT_INFO *) dev->priv;
803 port_private =
804 (struct mv64460_eth_priv *) ethernet_private->port_private;
805 port_num = port_private->port_num;
806
807 stats = port_private->stats;
808
809
810 pkt_info.cmd_sts = ETH_TX_FIRST_DESC | ETH_TX_LAST_DESC;
811 pkt_info.byte_cnt = dataSize;
812 pkt_info.buf_ptr = (unsigned int) dataPtr;
813 pkt_info.return_info = 0;
814
815 status = eth_port_send (ethernet_private, ETH_Q0, &pkt_info);
816 if ((status == ETH_ERROR) || (status == ETH_QUEUE_FULL)) {
817 printf ("Error on transmitting packet ..");
818 if (status == ETH_QUEUE_FULL)
819 printf ("ETH Queue is full. \n");
820 if (status == ETH_QUEUE_LAST_RESOURCE)
821 printf ("ETH Queue: using last available resource. \n");
822 return 1;
823 }
824
825
826 stats->tx_bytes += dataSize;
827 stats->tx_packets++;
828
829
830 do {
831 release_result =
832 eth_tx_return_desc (ethernet_private, ETH_Q0,
833 &pkt_info);
834 switch (release_result) {
835 case ETH_OK:
836 DP (printf ("descriptor released\n"));
837 if (pkt_info.cmd_sts & BIT0) {
838 printf ("Error in TX\n");
839 stats->tx_errors++;
840 }
841 break;
842 case ETH_RETRY:
843 DP (printf ("transmission still in process\n"));
844 break;
845
846 case ETH_ERROR:
847 printf ("routine can not access Tx desc ring\n");
848 break;
849
850 case ETH_END_OF_JOB:
851 DP (printf ("the routine has nothing to release\n"));
852 break;
853 default:
854 break;
855 }
856 } while (release_result == ETH_OK);
857
858 return 0;
859}
860
861
862
863
864
865
866
867
868
869
870
871
872
873int mv64460_eth_receive (struct eth_device *dev)
874{
875 ETH_PORT_INFO *ethernet_private;
876 struct mv64460_eth_priv *port_private;
877 unsigned int port_num;
878 PKT_INFO pkt_info;
879 struct net_device_stats *stats;
880
881 ethernet_private = (ETH_PORT_INFO *) dev->priv;
882 port_private = (struct mv64460_eth_priv *) ethernet_private->port_private;
883 port_num = port_private->port_num;
884 stats = port_private->stats;
885
886 while ((eth_port_receive (ethernet_private, ETH_Q0, &pkt_info) == ETH_OK)) {
887#ifdef DEBUG_MV_ETH
888 if (pkt_info.byte_cnt != 0) {
889 printf ("%s: Received %d byte Packet @ 0x%x\n",
890 __FUNCTION__, pkt_info.byte_cnt,
891 pkt_info.buf_ptr);
892 if(pkt_info.buf_ptr != 0){
893 for(i=0; i < pkt_info.byte_cnt; i++){
894 if((i % 4) == 0){
895 printf("\n0x");
896 }
897 printf("%02x", ((char*)pkt_info.buf_ptr)[i]);
898 }
899 printf("\n");
900 }
901 }
902#endif
903
904 stats->rx_packets++;
905 stats->rx_bytes += pkt_info.byte_cnt;
906
907
908
909
910
911 if (((pkt_info.
912 cmd_sts & (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) !=
913 (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC))
914 || (pkt_info.cmd_sts & ETH_ERROR_SUMMARY)) {
915 stats->rx_dropped++;
916
917 printf ("Received packet spread on multiple descriptors\n");
918
919
920 if (pkt_info.cmd_sts & ETH_ERROR_SUMMARY) {
921 stats->rx_errors++;
922 }
923
924
925 pkt_info.buf_ptr &= ~0x7;
926 pkt_info.byte_cnt = 0x0000;
927
928 if (eth_rx_return_buff
929 (ethernet_private, ETH_Q0, &pkt_info) != ETH_OK) {
930 printf ("Error while returning the RX Desc to Ring\n");
931 } else {
932 DP (printf ("RX Desc returned to Ring\n"));
933 }
934
935 } else {
936
937
938#ifdef DEBUG_MV_ETH
939 printf ("\nNow send it to upper layer protocols (NetReceive) ...\n");
940#endif
941
942 NetReceive ((uchar *) pkt_info.buf_ptr,
943 (int) pkt_info.byte_cnt);
944
945
946
947 pkt_info.buf_ptr &= ~0x7;
948 pkt_info.byte_cnt = 0x0000;
949 DP (printf ("RX: pkt_info.buf_ptr = %x\n", pkt_info.buf_ptr));
950 if (eth_rx_return_buff
951 (ethernet_private, ETH_Q0, &pkt_info) != ETH_OK) {
952 printf ("Error while returning the RX Desc to Ring\n");
953 } else {
954 DP (printf ("RX: Desc returned to Ring\n"));
955 }
956
957
958
959 }
960 }
961 mv64460_eth_get_stats (dev);
962 return 1;
963}
964
965
966
967
968
969
970
971
972
973
974
975static struct net_device_stats *mv64460_eth_get_stats (struct eth_device *dev)
976{
977 ETH_PORT_INFO *ethernet_private;
978 struct mv64460_eth_priv *port_private;
979 unsigned int port_num;
980
981 ethernet_private = (ETH_PORT_INFO *) dev->priv;
982 port_private =
983 (struct mv64460_eth_priv *) ethernet_private->port_private;
984 port_num = port_private->port_num;
985
986 mv64460_eth_update_stat (dev);
987
988 return port_private->stats;
989}
990
991
992
993
994
995
996
997
998
999
1000static void mv64460_eth_update_stat (struct eth_device *dev)
1001{
1002 ETH_PORT_INFO *ethernet_private;
1003 struct mv64460_eth_priv *port_private;
1004 struct net_device_stats *stats;
1005 unsigned int port_num;
1006 volatile unsigned int dummy;
1007
1008 ethernet_private = (ETH_PORT_INFO *) dev->priv;
1009 port_private =
1010 (struct mv64460_eth_priv *) ethernet_private->port_private;
1011 port_num = port_private->port_num;
1012 stats = port_private->stats;
1013
1014
1015 stats->rx_packets += (unsigned long)
1016 eth_read_mib_counter (ethernet_private->port_num,
1017 ETH_MIB_GOOD_FRAMES_RECEIVED);
1018 stats->tx_packets += (unsigned long)
1019 eth_read_mib_counter (ethernet_private->port_num,
1020 ETH_MIB_GOOD_FRAMES_SENT);
1021 stats->rx_bytes += (unsigned long)
1022 eth_read_mib_counter (ethernet_private->port_num,
1023 ETH_MIB_GOOD_OCTETS_RECEIVED_LOW);
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034 dummy = eth_read_mib_counter (ethernet_private->port_num,
1035 ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH);
1036 stats->tx_bytes += (unsigned long)
1037 eth_read_mib_counter (ethernet_private->port_num,
1038 ETH_MIB_GOOD_OCTETS_SENT_LOW);
1039 dummy = eth_read_mib_counter (ethernet_private->port_num,
1040 ETH_MIB_GOOD_OCTETS_SENT_HIGH);
1041 stats->rx_errors += (unsigned long)
1042 eth_read_mib_counter (ethernet_private->port_num,
1043 ETH_MIB_MAC_RECEIVE_ERROR);
1044
1045
1046 stats->rx_dropped +=
1047 (unsigned long) eth_read_mib_counter (ethernet_private->
1048 port_num,
1049 ETH_MIB_BAD_CRC_EVENT);
1050 stats->multicast += (unsigned long)
1051 eth_read_mib_counter (ethernet_private->port_num,
1052 ETH_MIB_MULTICAST_FRAMES_RECEIVED);
1053 stats->collisions +=
1054 (unsigned long) eth_read_mib_counter (ethernet_private->
1055 port_num,
1056 ETH_MIB_COLLISION) +
1057 (unsigned long) eth_read_mib_counter (ethernet_private->
1058 port_num,
1059 ETH_MIB_LATE_COLLISION);
1060
1061 stats->rx_length_errors +=
1062 (unsigned long) eth_read_mib_counter (ethernet_private->
1063 port_num,
1064 ETH_MIB_UNDERSIZE_RECEIVED)
1065 +
1066 (unsigned long) eth_read_mib_counter (ethernet_private->
1067 port_num,
1068 ETH_MIB_OVERSIZE_RECEIVED);
1069
1070}
1071
1072#ifndef UPDATE_STATS_BY_SOFTWARE
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082static void mv64460_eth_print_stat (struct eth_device *dev)
1083{
1084 ETH_PORT_INFO *ethernet_private;
1085 struct mv64460_eth_priv *port_private;
1086 struct net_device_stats *stats;
1087 unsigned int port_num;
1088
1089 ethernet_private = (ETH_PORT_INFO *) dev->priv;
1090 port_private =
1091 (struct mv64460_eth_priv *) ethernet_private->port_private;
1092 port_num = port_private->port_num;
1093 stats = port_private->stats;
1094
1095
1096 printf ("\n### Network statistics: ###\n");
1097 printf ("--------------------------\n");
1098 printf (" Packets received: %ld\n", stats->rx_packets);
1099 printf (" Packets send: %ld\n", stats->tx_packets);
1100 printf (" Received bytes: %ld\n", stats->rx_bytes);
1101 printf (" Send bytes: %ld\n", stats->tx_bytes);
1102 if (stats->rx_errors != 0)
1103 printf (" Rx Errors: %ld\n",
1104 stats->rx_errors);
1105 if (stats->rx_dropped != 0)
1106 printf (" Rx dropped (CRC Errors): %ld\n",
1107 stats->rx_dropped);
1108 if (stats->multicast != 0)
1109 printf (" Rx mulicast frames: %ld\n",
1110 stats->multicast);
1111 if (stats->collisions != 0)
1112 printf (" No. of collisions: %ld\n",
1113 stats->collisions);
1114 if (stats->rx_length_errors != 0)
1115 printf (" Rx length errors: %ld\n",
1116 stats->rx_length_errors);
1117}
1118#endif
1119
1120
1121
1122
1123
1124
1125
1126bool db64460_eth_start (struct eth_device *dev)
1127{
1128 return (mv64460_eth_open (dev));
1129}
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323#define ETH_ENABLE_TX_QUEUE(tx_queue, eth_port) \
1324 MV_REG_WRITE(MV64460_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port), (1 << tx_queue))
1325
1326#define ETH_DISABLE_TX_QUEUE(tx_queue, eth_port) \
1327 MV_REG_WRITE(MV64460_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port),\
1328 (1 << (8 + tx_queue)))
1329
1330#define ETH_ENABLE_RX_QUEUE(rx_queue, eth_port) \
1331MV_REG_WRITE(MV64460_ETH_RECEIVE_QUEUE_COMMAND_REG(eth_port), (1 << rx_queue))
1332
1333#define ETH_DISABLE_RX_QUEUE(rx_queue, eth_port) \
1334MV_REG_WRITE(MV64460_ETH_RECEIVE_QUEUE_COMMAND_REG(eth_port), (1 << (8 + rx_queue)))
1335
1336#define CURR_RFD_GET(p_curr_desc, queue) \
1337 ((p_curr_desc) = p_eth_port_ctrl->p_rx_curr_desc_q[queue])
1338
1339#define CURR_RFD_SET(p_curr_desc, queue) \
1340 (p_eth_port_ctrl->p_rx_curr_desc_q[queue] = (p_curr_desc))
1341
1342#define USED_RFD_GET(p_used_desc, queue) \
1343 ((p_used_desc) = p_eth_port_ctrl->p_rx_used_desc_q[queue])
1344
1345#define USED_RFD_SET(p_used_desc, queue)\
1346(p_eth_port_ctrl->p_rx_used_desc_q[queue] = (p_used_desc))
1347
1348
1349#define CURR_TFD_GET(p_curr_desc, queue) \
1350 ((p_curr_desc) = p_eth_port_ctrl->p_tx_curr_desc_q[queue])
1351
1352#define CURR_TFD_SET(p_curr_desc, queue) \
1353 (p_eth_port_ctrl->p_tx_curr_desc_q[queue] = (p_curr_desc))
1354
1355#define USED_TFD_GET(p_used_desc, queue) \
1356 ((p_used_desc) = p_eth_port_ctrl->p_tx_used_desc_q[queue])
1357
1358#define USED_TFD_SET(p_used_desc, queue) \
1359 (p_eth_port_ctrl->p_tx_used_desc_q[queue] = (p_used_desc))
1360
1361#define FIRST_TFD_GET(p_first_desc, queue) \
1362 ((p_first_desc) = p_eth_port_ctrl->p_tx_first_desc_q[queue])
1363
1364#define FIRST_TFD_SET(p_first_desc, queue) \
1365 (p_eth_port_ctrl->p_tx_first_desc_q[queue] = (p_first_desc))
1366
1367
1368
1369#define RX_NEXT_DESC_PTR(p_rx_desc, queue) (ETH_RX_DESC*)(((((unsigned int)p_rx_desc - (unsigned int)p_eth_port_ctrl->p_rx_desc_area_base[queue]) + RX_DESC_ALIGNED_SIZE) % p_eth_port_ctrl->rx_desc_area_size[queue]) + (unsigned int)p_eth_port_ctrl->p_rx_desc_area_base[queue])
1370
1371#define TX_NEXT_DESC_PTR(p_tx_desc, queue) (ETH_TX_DESC*)(((((unsigned int)p_tx_desc - (unsigned int)p_eth_port_ctrl->p_tx_desc_area_base[queue]) + TX_DESC_ALIGNED_SIZE) % p_eth_port_ctrl->tx_desc_area_size[queue]) + (unsigned int)p_eth_port_ctrl->p_tx_desc_area_base[queue])
1372
1373#define LINK_UP_TIMEOUT 100000
1374#define PHY_BUSY_TIMEOUT 10000000
1375
1376
1377
1378
1379static void ethernet_phy_set (ETH_PORT eth_port_num, int phy_addr);
1380static int ethernet_phy_get (ETH_PORT eth_port_num);
1381
1382
1383static void eth_set_access_control (ETH_PORT eth_port_num,
1384 ETH_WIN_PARAM * param);
1385static bool eth_port_uc_addr (ETH_PORT eth_port_num, unsigned char uc_nibble,
1386 ETH_QUEUE queue, int option);
1387#if 0
1388static bool eth_port_smc_addr (ETH_PORT eth_port_num,
1389 unsigned char mc_byte,
1390 ETH_QUEUE queue, int option);
1391static bool eth_port_omc_addr (ETH_PORT eth_port_num,
1392 unsigned char crc8,
1393 ETH_QUEUE queue, int option);
1394#endif
1395
1396static void eth_b_copy (unsigned int src_addr, unsigned int dst_addr,
1397 int byte_count);
1398
1399void eth_dbg (ETH_PORT_INFO * p_eth_port_ctrl);
1400
1401
1402typedef enum _memory_bank { BANK0, BANK1, BANK2, BANK3 } MEMORY_BANK;
1403u32 mv_get_dram_bank_base_addr (MEMORY_BANK bank)
1404{
1405 u32 result = 0;
1406 u32 enable = MV_REG_READ (MV64460_BASE_ADDR_ENABLE);
1407
1408 if (enable & (1 << bank))
1409 return 0;
1410 if (bank == BANK0)
1411 result = MV_REG_READ (MV64460_CS_0_BASE_ADDR);
1412 if (bank == BANK1)
1413 result = MV_REG_READ (MV64460_CS_1_BASE_ADDR);
1414 if (bank == BANK2)
1415 result = MV_REG_READ (MV64460_CS_2_BASE_ADDR);
1416 if (bank == BANK3)
1417 result = MV_REG_READ (MV64460_CS_3_BASE_ADDR);
1418 result &= 0x0000ffff;
1419 result = result << 16;
1420 return result;
1421}
1422
1423u32 mv_get_dram_bank_size (MEMORY_BANK bank)
1424{
1425 u32 result = 0;
1426 u32 enable = MV_REG_READ (MV64460_BASE_ADDR_ENABLE);
1427
1428 if (enable & (1 << bank))
1429 return 0;
1430 if (bank == BANK0)
1431 result = MV_REG_READ (MV64460_CS_0_SIZE);
1432 if (bank == BANK1)
1433 result = MV_REG_READ (MV64460_CS_1_SIZE);
1434 if (bank == BANK2)
1435 result = MV_REG_READ (MV64460_CS_2_SIZE);
1436 if (bank == BANK3)
1437 result = MV_REG_READ (MV64460_CS_3_SIZE);
1438 result += 1;
1439 result &= 0x0000ffff;
1440 result = result << 16;
1441 return result;
1442}
1443
1444u32 mv_get_internal_sram_base (void)
1445{
1446 u32 result;
1447
1448 result = MV_REG_READ (MV64460_INTEGRATED_SRAM_BASE_ADDR);
1449 result &= 0x0000ffff;
1450 result = result << 16;
1451 return result;
1452}
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479static void eth_port_init (ETH_PORT_INFO * p_eth_port_ctrl)
1480{
1481 int queue;
1482 ETH_WIN_PARAM win_param;
1483
1484 p_eth_port_ctrl->port_config = PORT_CONFIG_VALUE;
1485 p_eth_port_ctrl->port_config_extend = PORT_CONFIG_EXTEND_VALUE;
1486 p_eth_port_ctrl->port_sdma_config = PORT_SDMA_CONFIG_VALUE;
1487 p_eth_port_ctrl->port_serial_control = PORT_SERIAL_CONTROL_VALUE;
1488
1489 p_eth_port_ctrl->port_rx_queue_command = 0;
1490 p_eth_port_ctrl->port_tx_queue_command = 0;
1491
1492
1493 for (queue = 0; queue < MAX_RX_QUEUE_NUM; queue++) {
1494 CURR_RFD_SET ((ETH_RX_DESC *) 0x00000000, queue);
1495 USED_RFD_SET ((ETH_RX_DESC *) 0x00000000, queue);
1496 p_eth_port_ctrl->rx_resource_err[queue] = false;
1497 }
1498
1499 for (queue = 0; queue < MAX_TX_QUEUE_NUM; queue++) {
1500 CURR_TFD_SET ((ETH_TX_DESC *) 0x00000000, queue);
1501 USED_TFD_SET ((ETH_TX_DESC *) 0x00000000, queue);
1502 FIRST_TFD_SET ((ETH_TX_DESC *) 0x00000000, queue);
1503 p_eth_port_ctrl->tx_resource_err[queue] = false;
1504 }
1505
1506 eth_port_reset (p_eth_port_ctrl->port_num);
1507
1508
1509 win_param.win = ETH_WIN0;
1510 win_param.target = ETH_TARGET_DRAM;
1511 win_param.attributes = EBAR_ATTR_DRAM_CS0;
1512#ifndef CONFIG_NOT_COHERENT_CACHE
1513 win_param.attributes |= EBAR_ATTR_DRAM_CACHE_COHERENCY_WB;
1514#endif
1515 win_param.high_addr = 0;
1516
1517 win_param.base_addr = mv_get_dram_bank_base_addr (BANK0);
1518 win_param.size = mv_get_dram_bank_size (BANK0);
1519 if (win_param.size == 0)
1520 win_param.enable = 0;
1521 else
1522 win_param.enable = 1;
1523 win_param.access_ctrl = EWIN_ACCESS_FULL;
1524
1525
1526 eth_set_access_control (p_eth_port_ctrl->port_num, &win_param);
1527
1528
1529 win_param.win = ETH_WIN1;
1530 win_param.target = ETH_TARGET_DRAM;
1531 win_param.attributes = EBAR_ATTR_DRAM_CS1;
1532#ifndef CONFIG_NOT_COHERENT_CACHE
1533 win_param.attributes |= EBAR_ATTR_DRAM_CACHE_COHERENCY_WB;
1534#endif
1535 win_param.high_addr = 0;
1536
1537 win_param.base_addr = mv_get_dram_bank_base_addr (BANK1);
1538 win_param.size = mv_get_dram_bank_size (BANK1);
1539 if (win_param.size == 0)
1540 win_param.enable = 0;
1541 else
1542 win_param.enable = 1;
1543 win_param.access_ctrl = EWIN_ACCESS_FULL;
1544
1545
1546 eth_set_access_control (p_eth_port_ctrl->port_num, &win_param);
1547
1548
1549 win_param.win = ETH_WIN2;
1550 win_param.target = ETH_TARGET_DRAM;
1551 win_param.attributes = EBAR_ATTR_DRAM_CS2;
1552#ifndef CONFIG_NOT_COHERENT_CACHE
1553 win_param.attributes |= EBAR_ATTR_DRAM_CACHE_COHERENCY_WB;
1554#endif
1555 win_param.high_addr = 0;
1556
1557 win_param.base_addr = mv_get_dram_bank_base_addr (BANK2);
1558 win_param.size = mv_get_dram_bank_size (BANK2);
1559 if (win_param.size == 0)
1560 win_param.enable = 0;
1561 else
1562 win_param.enable = 1;
1563 win_param.access_ctrl = EWIN_ACCESS_FULL;
1564
1565
1566 eth_set_access_control (p_eth_port_ctrl->port_num, &win_param);
1567
1568
1569 win_param.win = ETH_WIN3;
1570 win_param.target = ETH_TARGET_DRAM;
1571 win_param.attributes = EBAR_ATTR_DRAM_CS3;
1572#ifndef CONFIG_NOT_COHERENT_CACHE
1573 win_param.attributes |= EBAR_ATTR_DRAM_CACHE_COHERENCY_WB;
1574#endif
1575 win_param.high_addr = 0;
1576
1577 win_param.base_addr = mv_get_dram_bank_base_addr (BANK3);
1578 win_param.size = mv_get_dram_bank_size (BANK3);
1579 if (win_param.size == 0)
1580 win_param.enable = 0;
1581 else
1582 win_param.enable = 1;
1583 win_param.access_ctrl = EWIN_ACCESS_FULL;
1584
1585
1586 eth_set_access_control (p_eth_port_ctrl->port_num, &win_param);
1587
1588
1589 win_param.win = ETH_WIN4;
1590 win_param.target = EBAR_TARGET_CBS;
1591 win_param.attributes = EBAR_ATTR_CBS_SRAM | EBAR_ATTR_CBS_SRAM_BLOCK0;
1592 win_param.high_addr = 0;
1593 win_param.base_addr = mv_get_internal_sram_base ();
1594 win_param.size = MV64460_INTERNAL_SRAM_SIZE;
1595 win_param.enable = 1;
1596 win_param.access_ctrl = EWIN_ACCESS_FULL;
1597
1598
1599 eth_set_access_control (p_eth_port_ctrl->port_num, &win_param);
1600
1601 eth_port_init_mac_tables (p_eth_port_ctrl->port_num);
1602
1603 ethernet_phy_set (p_eth_port_ctrl->port_num,
1604 p_eth_port_ctrl->port_phy_addr);
1605
1606 return;
1607
1608}
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640static bool eth_port_start (ETH_PORT_INFO * p_eth_port_ctrl)
1641{
1642 int queue;
1643 volatile ETH_TX_DESC *p_tx_curr_desc;
1644 volatile ETH_RX_DESC *p_rx_curr_desc;
1645 unsigned int phy_reg_data;
1646 ETH_PORT eth_port_num = p_eth_port_ctrl->port_num;
1647
1648
1649 for (queue = 0; queue < MAX_TX_QUEUE_NUM; queue++) {
1650 CURR_TFD_GET (p_tx_curr_desc, queue);
1651 MV_REG_WRITE ((MV64460_ETH_TX_CURRENT_QUEUE_DESC_PTR_0
1652 (eth_port_num)
1653 + (4 * queue)),
1654 ((unsigned int) p_tx_curr_desc));
1655
1656 }
1657
1658
1659 for (queue = 0; queue < MAX_RX_QUEUE_NUM; queue++) {
1660 CURR_RFD_GET (p_rx_curr_desc, queue);
1661 MV_REG_WRITE ((MV64460_ETH_RX_CURRENT_QUEUE_DESC_PTR_0
1662 (eth_port_num)
1663 + (4 * queue)),
1664 ((unsigned int) p_rx_curr_desc));
1665
1666 if (p_rx_curr_desc != NULL)
1667
1668 eth_port_uc_addr_set (p_eth_port_ctrl->port_num,
1669 p_eth_port_ctrl->port_mac_addr,
1670 queue);
1671 }
1672
1673
1674 MV_REG_WRITE (MV64460_ETH_PORT_CONFIG_REG (eth_port_num),
1675 p_eth_port_ctrl->port_config);
1676
1677 MV_REG_WRITE (MV64460_ETH_PORT_CONFIG_EXTEND_REG (eth_port_num),
1678 p_eth_port_ctrl->port_config_extend);
1679
1680 MV_REG_WRITE (MV64460_ETH_PORT_SERIAL_CONTROL_REG (eth_port_num),
1681 p_eth_port_ctrl->port_serial_control);
1682
1683 MV_SET_REG_BITS (MV64460_ETH_PORT_SERIAL_CONTROL_REG (eth_port_num),
1684 ETH_SERIAL_PORT_ENABLE);
1685
1686
1687 MV_REG_WRITE (MV64460_ETH_SDMA_CONFIG_REG (eth_port_num),
1688 p_eth_port_ctrl->port_sdma_config);
1689
1690 MV_REG_WRITE (MV64460_ETH_TX_QUEUE_0_TOKEN_BUCKET_COUNT
1691 (eth_port_num), 0x3fffffff);
1692 MV_REG_WRITE (MV64460_ETH_TX_QUEUE_0_TOKEN_BUCKET_CONFIG
1693 (eth_port_num), 0x03fffcff);
1694
1695 MV_REG_WRITE (MV64460_ETH_MAXIMUM_TRANSMIT_UNIT (eth_port_num), 0x0);
1696
1697
1698 MV_REG_WRITE (MV64460_ETH_RECEIVE_QUEUE_COMMAND_REG (eth_port_num),
1699 p_eth_port_ctrl->port_rx_queue_command);
1700
1701
1702 eth_port_read_smi_reg (eth_port_num, 1, &phy_reg_data);
1703
1704 if (!(phy_reg_data & 0x20))
1705 return false;
1706
1707 return true;
1708}
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729static void eth_port_uc_addr_set (ETH_PORT eth_port_num,
1730 unsigned char *p_addr, ETH_QUEUE queue)
1731{
1732 unsigned int mac_h;
1733 unsigned int mac_l;
1734
1735 mac_l = (p_addr[4] << 8) | (p_addr[5]);
1736 mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) |
1737 (p_addr[2] << 8) | (p_addr[3] << 0);
1738
1739 MV_REG_WRITE (MV64460_ETH_MAC_ADDR_LOW (eth_port_num), mac_l);
1740 MV_REG_WRITE (MV64460_ETH_MAC_ADDR_HIGH (eth_port_num), mac_h);
1741
1742
1743 eth_port_uc_addr (eth_port_num, p_addr[5], queue, ACCEPT_MAC_ADDR);
1744
1745 return;
1746}
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771static bool eth_port_uc_addr (ETH_PORT eth_port_num,
1772 unsigned char uc_nibble,
1773 ETH_QUEUE queue, int option)
1774{
1775 unsigned int unicast_reg;
1776 unsigned int tbl_offset;
1777 unsigned int reg_offset;
1778
1779
1780 uc_nibble = (0xf & uc_nibble);
1781 tbl_offset = (uc_nibble / 4) * 4;
1782 reg_offset = uc_nibble % 4;
1783
1784 switch (option) {
1785 case REJECT_MAC_ADDR:
1786
1787 unicast_reg =
1788 MV_REG_READ ((MV64460_ETH_DA_FILTER_UNICAST_TABLE_BASE
1789 (eth_port_num)
1790 + tbl_offset));
1791
1792 unicast_reg &= (0x0E << (8 * reg_offset));
1793
1794 MV_REG_WRITE ((MV64460_ETH_DA_FILTER_UNICAST_TABLE_BASE
1795 (eth_port_num)
1796 + tbl_offset), unicast_reg);
1797 break;
1798
1799 case ACCEPT_MAC_ADDR:
1800
1801 unicast_reg =
1802 MV_REG_READ ((MV64460_ETH_DA_FILTER_UNICAST_TABLE_BASE
1803 (eth_port_num)
1804 + tbl_offset));
1805
1806 unicast_reg |= ((0x01 | queue) << (8 * reg_offset));
1807
1808 MV_REG_WRITE ((MV64460_ETH_DA_FILTER_UNICAST_TABLE_BASE
1809 (eth_port_num)
1810 + tbl_offset), unicast_reg);
1811
1812 break;
1813
1814 default:
1815 return false;
1816 }
1817 return true;
1818}
1819
1820#if 0
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852static void eth_port_mc_addr (ETH_PORT eth_port_num,
1853 unsigned char *p_addr,
1854 ETH_QUEUE queue, int option)
1855{
1856 unsigned int mac_h;
1857 unsigned int mac_l;
1858 unsigned char crc_result = 0;
1859 int mac_array[48];
1860 int crc[8];
1861 int i;
1862
1863 if ((p_addr[0] == 0x01) &&
1864 (p_addr[1] == 0x00) &&
1865 (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00)) {
1866
1867 eth_port_smc_addr (eth_port_num, p_addr[5], queue, option);
1868 } else {
1869
1870 mac_h = (p_addr[0] << 8) | (p_addr[1]);
1871 mac_l = (p_addr[2] << 24) | (p_addr[3] << 16) |
1872 (p_addr[4] << 8) | (p_addr[5] << 0);
1873
1874 for (i = 0; i < 32; i++)
1875 mac_array[i] = (mac_l >> i) & 0x1;
1876 for (i = 32; i < 48; i++)
1877 mac_array[i] = (mac_h >> (i - 32)) & 0x1;
1878
1879 crc[0] = mac_array[45] ^ mac_array[43] ^ mac_array[40] ^
1880 mac_array[39] ^ mac_array[35] ^ mac_array[34] ^
1881 mac_array[31] ^ mac_array[30] ^ mac_array[28] ^
1882 mac_array[23] ^ mac_array[21] ^ mac_array[19] ^
1883 mac_array[18] ^ mac_array[16] ^ mac_array[14] ^
1884 mac_array[12] ^ mac_array[8] ^ mac_array[7] ^
1885 mac_array[6] ^ mac_array[0];
1886
1887 crc[1] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^
1888 mac_array[43] ^ mac_array[41] ^ mac_array[39] ^
1889 mac_array[36] ^ mac_array[34] ^ mac_array[32] ^
1890 mac_array[30] ^ mac_array[29] ^ mac_array[28] ^
1891 mac_array[24] ^ mac_array[23] ^ mac_array[22] ^
1892 mac_array[21] ^ mac_array[20] ^ mac_array[18] ^
1893 mac_array[17] ^ mac_array[16] ^ mac_array[15] ^
1894 mac_array[14] ^ mac_array[13] ^ mac_array[12] ^
1895 mac_array[9] ^ mac_array[6] ^ mac_array[1] ^
1896 mac_array[0];
1897
1898 crc[2] = mac_array[47] ^ mac_array[46] ^ mac_array[44] ^
1899 mac_array[43] ^ mac_array[42] ^ mac_array[39] ^
1900 mac_array[37] ^ mac_array[34] ^ mac_array[33] ^
1901 mac_array[29] ^ mac_array[28] ^ mac_array[25] ^
1902 mac_array[24] ^ mac_array[22] ^ mac_array[17] ^
1903 mac_array[15] ^ mac_array[13] ^ mac_array[12] ^
1904 mac_array[10] ^ mac_array[8] ^ mac_array[6] ^
1905 mac_array[2] ^ mac_array[1] ^ mac_array[0];
1906
1907 crc[3] = mac_array[47] ^ mac_array[45] ^ mac_array[44] ^
1908 mac_array[43] ^ mac_array[40] ^ mac_array[38] ^
1909 mac_array[35] ^ mac_array[34] ^ mac_array[30] ^
1910 mac_array[29] ^ mac_array[26] ^ mac_array[25] ^
1911 mac_array[23] ^ mac_array[18] ^ mac_array[16] ^
1912 mac_array[14] ^ mac_array[13] ^ mac_array[11] ^
1913 mac_array[9] ^ mac_array[7] ^ mac_array[3] ^
1914 mac_array[2] ^ mac_array[1];
1915
1916 crc[4] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^
1917 mac_array[41] ^ mac_array[39] ^ mac_array[36] ^
1918 mac_array[35] ^ mac_array[31] ^ mac_array[30] ^
1919 mac_array[27] ^ mac_array[26] ^ mac_array[24] ^
1920 mac_array[19] ^ mac_array[17] ^ mac_array[15] ^
1921 mac_array[14] ^ mac_array[12] ^ mac_array[10] ^
1922 mac_array[8] ^ mac_array[4] ^ mac_array[3] ^
1923 mac_array[2];
1924
1925 crc[5] = mac_array[47] ^ mac_array[46] ^ mac_array[45] ^
1926 mac_array[42] ^ mac_array[40] ^ mac_array[37] ^
1927 mac_array[36] ^ mac_array[32] ^ mac_array[31] ^
1928 mac_array[28] ^ mac_array[27] ^ mac_array[25] ^
1929 mac_array[20] ^ mac_array[18] ^ mac_array[16] ^
1930 mac_array[15] ^ mac_array[13] ^ mac_array[11] ^
1931 mac_array[9] ^ mac_array[5] ^ mac_array[4] ^
1932 mac_array[3];
1933
1934 crc[6] = mac_array[47] ^ mac_array[46] ^ mac_array[43] ^
1935 mac_array[41] ^ mac_array[38] ^ mac_array[37] ^
1936 mac_array[33] ^ mac_array[32] ^ mac_array[29] ^
1937 mac_array[28] ^ mac_array[26] ^ mac_array[21] ^
1938 mac_array[19] ^ mac_array[17] ^ mac_array[16] ^
1939 mac_array[14] ^ mac_array[12] ^ mac_array[10] ^
1940 mac_array[6] ^ mac_array[5] ^ mac_array[4];
1941
1942 crc[7] = mac_array[47] ^ mac_array[44] ^ mac_array[42] ^
1943 mac_array[39] ^ mac_array[38] ^ mac_array[34] ^
1944 mac_array[33] ^ mac_array[30] ^ mac_array[29] ^
1945 mac_array[27] ^ mac_array[22] ^ mac_array[20] ^
1946 mac_array[18] ^ mac_array[17] ^ mac_array[15] ^
1947 mac_array[13] ^ mac_array[11] ^ mac_array[7] ^
1948 mac_array[6] ^ mac_array[5];
1949
1950 for (i = 0; i < 8; i++)
1951 crc_result = crc_result | (crc[i] << i);
1952
1953 eth_port_omc_addr (eth_port_num, crc_result, queue, option);
1954 }
1955 return;
1956}
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984static bool eth_port_smc_addr (ETH_PORT eth_port_num,
1985 unsigned char mc_byte,
1986 ETH_QUEUE queue, int option)
1987{
1988 unsigned int smc_table_reg;
1989 unsigned int tbl_offset;
1990 unsigned int reg_offset;
1991
1992
1993 tbl_offset = (mc_byte / 4) * 4;
1994 reg_offset = mc_byte % 4;
1995 queue &= 0x7;
1996
1997 switch (option) {
1998 case REJECT_MAC_ADDR:
1999
2000 smc_table_reg =
2001 MV_REG_READ ((MV64460_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset));
2002 smc_table_reg &= (0x0E << (8 * reg_offset));
2003
2004 MV_REG_WRITE ((MV64460_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset), smc_table_reg);
2005 break;
2006
2007 case ACCEPT_MAC_ADDR:
2008
2009 smc_table_reg =
2010 MV_REG_READ ((MV64460_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset));
2011 smc_table_reg |= ((0x01 | queue) << (8 * reg_offset));
2012
2013 MV_REG_WRITE ((MV64460_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset), smc_table_reg);
2014 break;
2015
2016 default:
2017 return false;
2018 }
2019 return true;
2020}
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048static bool eth_port_omc_addr (ETH_PORT eth_port_num,
2049 unsigned char crc8,
2050 ETH_QUEUE queue, int option)
2051{
2052 unsigned int omc_table_reg;
2053 unsigned int tbl_offset;
2054 unsigned int reg_offset;
2055
2056
2057 tbl_offset = (crc8 / 4) * 4;
2058 reg_offset = crc8 % 4;
2059 queue &= 0x7;
2060
2061 switch (option) {
2062 case REJECT_MAC_ADDR:
2063
2064 omc_table_reg =
2065 MV_REG_READ ((MV64460_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset));
2066 omc_table_reg &= (0x0E << (8 * reg_offset));
2067
2068 MV_REG_WRITE ((MV64460_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset), omc_table_reg);
2069 break;
2070
2071 case ACCEPT_MAC_ADDR:
2072
2073 omc_table_reg =
2074 MV_REG_READ ((MV64460_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset));
2075 omc_table_reg |= ((0x01 | queue) << (8 * reg_offset));
2076
2077 MV_REG_WRITE ((MV64460_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + tbl_offset), omc_table_reg);
2078 break;
2079
2080 default:
2081 return false;
2082 }
2083 return true;
2084}
2085#endif
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104static void eth_port_init_mac_tables (ETH_PORT eth_port_num)
2105{
2106 int table_index;
2107
2108
2109 for (table_index = 0; table_index <= 0xC; table_index += 4)
2110 MV_REG_WRITE ((MV64460_ETH_DA_FILTER_UNICAST_TABLE_BASE
2111 (eth_port_num) + table_index), 0);
2112
2113 for (table_index = 0; table_index <= 0xFC; table_index += 4) {
2114
2115 MV_REG_WRITE ((MV64460_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + table_index), 0);
2116
2117 MV_REG_WRITE ((MV64460_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + table_index), 0);
2118 }
2119}
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138static void eth_clear_mib_counters (ETH_PORT eth_port_num)
2139{
2140 int i;
2141 unsigned int dummy;
2142
2143
2144 for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION;
2145 i += 4)
2146 dummy = MV_REG_READ ((MV64460_ETH_MIB_COUNTERS_BASE
2147 (eth_port_num) + i));
2148
2149 return;
2150}
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173unsigned int eth_read_mib_counter (ETH_PORT eth_port_num,
2174 unsigned int mib_offset)
2175{
2176 return (MV_REG_READ (MV64460_ETH_MIB_COUNTERS_BASE (eth_port_num)
2177 + mib_offset));
2178}
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197static void ethernet_phy_set (ETH_PORT eth_port_num, int phy_addr)
2198{
2199 unsigned int reg_data;
2200
2201 reg_data = MV_REG_READ (MV64460_ETH_PHY_ADDR_REG);
2202
2203 reg_data &= ~(0x1F << (5 * eth_port_num));
2204 reg_data |= (phy_addr << (5 * eth_port_num));
2205
2206 MV_REG_WRITE (MV64460_ETH_PHY_ADDR_REG, reg_data);
2207
2208 return;
2209}
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227static int ethernet_phy_get (ETH_PORT eth_port_num)
2228{
2229 unsigned int reg_data;
2230
2231 reg_data = MV_REG_READ (MV64460_ETH_PHY_ADDR_REG);
2232
2233 return ((reg_data >> (5 * eth_port_num)) & 0x1f);
2234}
2235
2236
2237
2238
2239int phy_setup_aneg (char *devname, unsigned char addr)
2240{
2241 unsigned short ctl, adv;
2242
2243
2244 miiphy_read (devname, addr, MII_ADVERTISE, &adv);
2245 adv |= (LPA_LPACK | LPA_RFAULT | LPA_100BASE4 |
2246 LPA_100FULL | LPA_100HALF | LPA_10FULL |
2247 LPA_10HALF);
2248 miiphy_write (devname, addr, MII_ADVERTISE, adv);
2249
2250 miiphy_read (devname, addr, MII_CTRL1000, &adv);
2251 adv |= (0x0300);
2252 miiphy_write (devname, addr, MII_CTRL1000, adv);
2253
2254
2255 miiphy_read (devname, addr, MII_BMCR, &ctl);
2256 ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
2257 miiphy_write (devname, addr, MII_BMCR, ctl);
2258
2259 return 0;
2260}
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279static bool ethernet_phy_reset (ETH_PORT eth_port_num)
2280{
2281 unsigned int time_out = 50;
2282 unsigned int phy_reg_data;
2283
2284 eth_port_read_smi_reg (eth_port_num, 20, &phy_reg_data);
2285 phy_reg_data |= 0x0083;
2286 eth_port_write_smi_reg (eth_port_num, 20, phy_reg_data);
2287
2288
2289 eth_port_read_smi_reg (eth_port_num, 0, &phy_reg_data);
2290 phy_reg_data |= 0x8000;
2291 eth_port_write_smi_reg (eth_port_num, 0, phy_reg_data);
2292
2293
2294 do {
2295 eth_port_read_smi_reg (eth_port_num, 1, &phy_reg_data);
2296
2297 if (time_out-- == 0)
2298 return false;
2299 }
2300 while (!(phy_reg_data & 0x20));
2301
2302 return true;
2303}
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323static void eth_port_reset (ETH_PORT eth_port_num)
2324{
2325 unsigned int reg_data;
2326
2327
2328 reg_data =
2329 MV_REG_READ (MV64460_ETH_TRANSMIT_QUEUE_COMMAND_REG
2330 (eth_port_num));
2331
2332 if (reg_data & 0xFF) {
2333
2334 MV_REG_WRITE (MV64460_ETH_TRANSMIT_QUEUE_COMMAND_REG
2335 (eth_port_num), (reg_data << 8));
2336
2337
2338 do {
2339
2340 reg_data =
2341 MV_REG_READ
2342 (MV64460_ETH_TRANSMIT_QUEUE_COMMAND_REG
2343 (eth_port_num));
2344 }
2345 while (reg_data & 0xFF);
2346 }
2347
2348
2349 reg_data =
2350 MV_REG_READ (MV64460_ETH_RECEIVE_QUEUE_COMMAND_REG
2351 (eth_port_num));
2352
2353 if (reg_data & 0xFF) {
2354
2355 MV_REG_WRITE (MV64460_ETH_RECEIVE_QUEUE_COMMAND_REG
2356 (eth_port_num), (reg_data << 8));
2357
2358
2359 do {
2360
2361 reg_data =
2362 MV_REG_READ
2363 (MV64460_ETH_RECEIVE_QUEUE_COMMAND_REG
2364 (eth_port_num));
2365 }
2366 while (reg_data & 0xFF);
2367 }
2368
2369
2370 eth_clear_mib_counters (eth_port_num);
2371
2372
2373 reg_data =
2374 MV_REG_READ (MV64460_ETH_PORT_SERIAL_CONTROL_REG
2375 (eth_port_num));
2376 reg_data &= ~ETH_SERIAL_PORT_ENABLE;
2377 MV_REG_WRITE (MV64460_ETH_PORT_SERIAL_CONTROL_REG (eth_port_num),
2378 reg_data);
2379
2380 return;
2381}
2382
2383#if 0
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403static void ethernet_set_config_reg (ETH_PORT eth_port_num,
2404 unsigned int value)
2405{
2406 unsigned int eth_config_reg;
2407
2408 eth_config_reg =
2409 MV_REG_READ (MV64460_ETH_PORT_CONFIG_REG (eth_port_num));
2410 eth_config_reg |= value;
2411 MV_REG_WRITE (MV64460_ETH_PORT_CONFIG_REG (eth_port_num),
2412 eth_config_reg);
2413
2414 return;
2415}
2416#endif
2417
2418#if 0
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438static void ethernet_reset_config_reg (ETH_PORT eth_port_num,
2439 unsigned int value)
2440{
2441 unsigned int eth_config_reg;
2442
2443 eth_config_reg = MV_REG_READ (MV64460_ETH_PORT_CONFIG_EXTEND_REG
2444 (eth_port_num));
2445 eth_config_reg &= ~value;
2446 MV_REG_WRITE (MV64460_ETH_PORT_CONFIG_EXTEND_REG (eth_port_num),
2447 eth_config_reg);
2448
2449 return;
2450}
2451#endif
2452
2453#if 0
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471static unsigned int ethernet_get_config_reg (ETH_PORT eth_port_num)
2472{
2473 unsigned int eth_config_reg;
2474
2475 eth_config_reg = MV_REG_READ (MV64460_ETH_PORT_CONFIG_EXTEND_REG
2476 (eth_port_num));
2477 return eth_config_reg;
2478}
2479
2480#endif
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502static bool eth_port_read_smi_reg (ETH_PORT eth_port_num,
2503 unsigned int phy_reg, unsigned int *value)
2504{
2505 unsigned int reg_value;
2506 unsigned int time_out = PHY_BUSY_TIMEOUT;
2507 int phy_addr;
2508
2509 phy_addr = ethernet_phy_get (eth_port_num);
2510
2511
2512 do {
2513 reg_value = MV_REG_READ (MV64460_ETH_SMI_REG);
2514 if (time_out-- == 0) {
2515 return false;
2516 }
2517 }
2518 while (reg_value & ETH_SMI_BUSY);
2519
2520
2521
2522 MV_REG_WRITE (MV64460_ETH_SMI_REG,
2523 (phy_addr << 16) | (phy_reg << 21) |
2524 ETH_SMI_OPCODE_READ);
2525
2526 time_out = PHY_BUSY_TIMEOUT;
2527
2528 do {
2529 reg_value = MV_REG_READ (MV64460_ETH_SMI_REG);
2530 if (time_out-- == 0) {
2531 return false;
2532 }
2533 }
2534 while ((reg_value & ETH_SMI_READ_VALID) != ETH_SMI_READ_VALID);
2535
2536
2537#define PHY_UPDATE_TIMEOUT 10000
2538 for (time_out = 0; time_out < PHY_UPDATE_TIMEOUT; time_out++);
2539
2540 reg_value = MV_REG_READ (MV64460_ETH_SMI_REG);
2541
2542 *value = reg_value & 0xffff;
2543
2544 return true;
2545}
2546
2547int mv_miiphy_read(const char *devname, unsigned char phy_addr,
2548 unsigned char phy_reg, unsigned short *value)
2549{
2550 unsigned int reg_value;
2551 unsigned int time_out = PHY_BUSY_TIMEOUT;
2552
2553
2554 do {
2555 reg_value = MV_REG_READ (MV64460_ETH_SMI_REG);
2556 if (time_out-- == 0) {
2557 return false;
2558 }
2559 }
2560 while (reg_value & ETH_SMI_BUSY);
2561
2562
2563 MV_REG_WRITE (MV64460_ETH_SMI_REG,
2564 (phy_addr << 16) | (phy_reg << 21) |
2565 ETH_SMI_OPCODE_READ);
2566
2567 time_out = PHY_BUSY_TIMEOUT;
2568
2569 do {
2570 reg_value = MV_REG_READ (MV64460_ETH_SMI_REG);
2571 if (time_out-- == 0) {
2572 return false;
2573 }
2574 }
2575 while ((reg_value & ETH_SMI_READ_VALID) != ETH_SMI_READ_VALID);
2576
2577
2578 for (time_out = 0; time_out < PHY_UPDATE_TIMEOUT; time_out++);
2579
2580 reg_value = MV_REG_READ (MV64460_ETH_SMI_REG);
2581
2582 *value = reg_value & 0xffff;
2583
2584 return 0;
2585}
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607static bool eth_port_write_smi_reg (ETH_PORT eth_port_num,
2608 unsigned int phy_reg, unsigned int value)
2609{
2610 unsigned int reg_value;
2611 unsigned int time_out = PHY_BUSY_TIMEOUT;
2612 int phy_addr;
2613
2614 phy_addr = ethernet_phy_get (eth_port_num);
2615
2616
2617 do {
2618 reg_value = MV_REG_READ (MV64460_ETH_SMI_REG);
2619 if (time_out-- == 0) {
2620 return false;
2621 }
2622 }
2623 while (reg_value & ETH_SMI_BUSY);
2624
2625
2626 MV_REG_WRITE (MV64460_ETH_SMI_REG,
2627 (phy_addr << 16) | (phy_reg << 21) |
2628 ETH_SMI_OPCODE_WRITE | (value & 0xffff));
2629 return true;
2630}
2631
2632int mv_miiphy_write(const char *devname, unsigned char phy_addr,
2633 unsigned char phy_reg, unsigned short value)
2634{
2635 unsigned int reg_value;
2636 unsigned int time_out = PHY_BUSY_TIMEOUT;
2637
2638
2639 do {
2640 reg_value = MV_REG_READ (MV64460_ETH_SMI_REG);
2641 if (time_out-- == 0) {
2642 return false;
2643 }
2644 }
2645 while (reg_value & ETH_SMI_BUSY);
2646
2647
2648 MV_REG_WRITE (MV64460_ETH_SMI_REG,
2649 (phy_addr << 16) | (phy_reg << 21) |
2650 ETH_SMI_OPCODE_WRITE | (value & 0xffff));
2651 return 0;
2652}
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672static void eth_set_access_control (ETH_PORT eth_port_num,
2673 ETH_WIN_PARAM * param)
2674{
2675 unsigned int access_prot_reg;
2676
2677
2678 access_prot_reg = MV_REG_READ (MV64460_ETH_ACCESS_PROTECTION_REG
2679 (eth_port_num));
2680 access_prot_reg &= (~(3 << (param->win * 2)));
2681 access_prot_reg |= (param->access_ctrl << (param->win * 2));
2682 MV_REG_WRITE (MV64460_ETH_ACCESS_PROTECTION_REG (eth_port_num),
2683 access_prot_reg);
2684
2685
2686 MV_REG_WRITE ((MV64460_ETH_SIZE_REG_0 +
2687 (ETH_SIZE_REG_GAP * param->win)),
2688 (((param->size / 0x10000) - 1) << 16));
2689
2690
2691 MV_REG_WRITE ((MV64460_ETH_BAR_0 + (ETH_BAR_GAP * param->win)),
2692 (param->target | param->attributes | param->base_addr));
2693
2694 if (param->win < 4)
2695 MV_REG_WRITE ((MV64460_ETH_HIGH_ADDR_REMAP_REG_0 +
2696 (ETH_HIGH_ADDR_REMAP_REG_GAP * param->win)),
2697 param->high_addr);
2698
2699
2700 if (param->enable == 1)
2701 MV_RESET_REG_BITS (MV64460_ETH_BASE_ADDR_ENABLE_REG,
2702 (1 << param->win));
2703 else
2704 MV_SET_REG_BITS (MV64460_ETH_BASE_ADDR_ENABLE_REG,
2705 (1 << param->win));
2706}
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738static bool ether_init_rx_desc_ring (ETH_PORT_INFO * p_eth_port_ctrl,
2739 ETH_QUEUE rx_queue,
2740 int rx_desc_num,
2741 int rx_buff_size,
2742 unsigned int rx_desc_base_addr,
2743 unsigned int rx_buff_base_addr)
2744{
2745 ETH_RX_DESC *p_rx_desc;
2746 ETH_RX_DESC *p_rx_prev_desc;
2747 unsigned int buffer_addr;
2748 int ix;
2749
2750 p_rx_desc = (ETH_RX_DESC *) rx_desc_base_addr;
2751 p_rx_prev_desc = p_rx_desc;
2752 buffer_addr = rx_buff_base_addr;
2753
2754
2755 if (rx_buff_base_addr & 0xF)
2756 return false;
2757
2758
2759 if ((rx_buff_size < 8) || (rx_buff_size > RX_BUFFER_MAX_SIZE))
2760 return false;
2761
2762
2763 if ((rx_buff_base_addr + rx_buff_size) & 0x7)
2764 return false;
2765
2766
2767 for (ix = 0; ix < rx_desc_num; ix++) {
2768 p_rx_desc->buf_size = rx_buff_size;
2769 p_rx_desc->byte_cnt = 0x0000;
2770 p_rx_desc->cmd_sts =
2771 ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT;
2772 p_rx_desc->next_desc_ptr =
2773 ((unsigned int) p_rx_desc) + RX_DESC_ALIGNED_SIZE;
2774 p_rx_desc->buf_ptr = buffer_addr;
2775 p_rx_desc->return_info = 0x00000000;
2776 D_CACHE_FLUSH_LINE (p_rx_desc, 0);
2777 buffer_addr += rx_buff_size;
2778 p_rx_prev_desc = p_rx_desc;
2779 p_rx_desc = (ETH_RX_DESC *)
2780 ((unsigned int) p_rx_desc + RX_DESC_ALIGNED_SIZE);
2781 }
2782
2783
2784 p_rx_prev_desc->next_desc_ptr = (rx_desc_base_addr);
2785 D_CACHE_FLUSH_LINE (p_rx_prev_desc, 0);
2786
2787
2788 CURR_RFD_SET ((ETH_RX_DESC *) rx_desc_base_addr, rx_queue);
2789 USED_RFD_SET ((ETH_RX_DESC *) rx_desc_base_addr, rx_queue);
2790
2791 p_eth_port_ctrl->p_rx_desc_area_base[rx_queue] =
2792 (ETH_RX_DESC *) rx_desc_base_addr;
2793 p_eth_port_ctrl->rx_desc_area_size[rx_queue] =
2794 rx_desc_num * RX_DESC_ALIGNED_SIZE;
2795
2796 p_eth_port_ctrl->port_rx_queue_command |= (1 << rx_queue);
2797
2798 return true;
2799}
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831static bool ether_init_tx_desc_ring (ETH_PORT_INFO * p_eth_port_ctrl,
2832 ETH_QUEUE tx_queue,
2833 int tx_desc_num,
2834 int tx_buff_size,
2835 unsigned int tx_desc_base_addr,
2836 unsigned int tx_buff_base_addr)
2837{
2838
2839 ETH_TX_DESC *p_tx_desc;
2840 ETH_TX_DESC *p_tx_prev_desc;
2841 unsigned int buffer_addr;
2842 int ix;
2843
2844
2845 p_tx_desc = (ETH_TX_DESC *) tx_desc_base_addr;
2846 p_tx_prev_desc = p_tx_desc;
2847 buffer_addr = tx_buff_base_addr;
2848
2849
2850 if (tx_buff_base_addr & 0xF)
2851 return false;
2852
2853
2854 if ((tx_buff_size > TX_BUFFER_MAX_SIZE)
2855 || (tx_buff_size < TX_BUFFER_MIN_SIZE))
2856 return false;
2857
2858
2859 for (ix = 0; ix < tx_desc_num; ix++) {
2860 p_tx_desc->byte_cnt = 0x0000;
2861 p_tx_desc->l4i_chk = 0x0000;
2862 p_tx_desc->cmd_sts = 0x00000000;
2863 p_tx_desc->next_desc_ptr =
2864 ((unsigned int) p_tx_desc) + TX_DESC_ALIGNED_SIZE;
2865
2866 p_tx_desc->buf_ptr = buffer_addr;
2867 p_tx_desc->return_info = 0x00000000;
2868 D_CACHE_FLUSH_LINE (p_tx_desc, 0);
2869 buffer_addr += tx_buff_size;
2870 p_tx_prev_desc = p_tx_desc;
2871 p_tx_desc = (ETH_TX_DESC *)
2872 ((unsigned int) p_tx_desc + TX_DESC_ALIGNED_SIZE);
2873
2874 }
2875
2876 p_tx_prev_desc->next_desc_ptr = tx_desc_base_addr;
2877 D_CACHE_FLUSH_LINE (p_tx_prev_desc, 0);
2878
2879 CURR_TFD_SET ((ETH_TX_DESC *) tx_desc_base_addr, tx_queue);
2880 USED_TFD_SET ((ETH_TX_DESC *) tx_desc_base_addr, tx_queue);
2881
2882
2883 p_eth_port_ctrl->p_tx_desc_area_base[tx_queue] =
2884 (ETH_TX_DESC *) tx_desc_base_addr;
2885 p_eth_port_ctrl->tx_desc_area_size[tx_queue] =
2886 (tx_desc_num * TX_DESC_ALIGNED_SIZE);
2887
2888
2889 p_eth_port_ctrl->port_tx_queue_command |= (1 << tx_queue);
2890
2891 return true;
2892}
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922static ETH_FUNC_RET_STATUS eth_port_send (ETH_PORT_INFO * p_eth_port_ctrl,
2923 ETH_QUEUE tx_queue,
2924 PKT_INFO * p_pkt_info)
2925{
2926 volatile ETH_TX_DESC *p_tx_desc_first;
2927 volatile ETH_TX_DESC *p_tx_desc_curr;
2928 volatile ETH_TX_DESC *p_tx_next_desc_curr;
2929 volatile ETH_TX_DESC *p_tx_desc_used;
2930 unsigned int command_status;
2931
2932
2933 if (p_eth_port_ctrl->tx_resource_err[tx_queue] == true)
2934 return ETH_QUEUE_FULL;
2935
2936
2937 CURR_TFD_GET (p_tx_desc_curr, tx_queue);
2938 USED_TFD_GET (p_tx_desc_used, tx_queue);
2939
2940 if (p_tx_desc_curr == NULL)
2941 return ETH_ERROR;
2942
2943
2944 p_tx_next_desc_curr = TX_NEXT_DESC_PTR (p_tx_desc_curr, tx_queue);
2945 command_status = p_pkt_info->cmd_sts | ETH_ZERO_PADDING | ETH_GEN_CRC;
2946
2947 if (command_status & (ETH_TX_FIRST_DESC)) {
2948
2949 FIRST_TFD_SET (p_tx_desc_curr, tx_queue);
2950 p_tx_desc_first = p_tx_desc_curr;
2951 } else {
2952 FIRST_TFD_GET (p_tx_desc_first, tx_queue);
2953 command_status |= ETH_BUFFER_OWNED_BY_DMA;
2954 }
2955
2956
2957
2958
2959 if (p_pkt_info->byte_cnt <= 8) {
2960 printf ("You have failed in the < 8 bytes errata - fixme\n");
2961 return ETH_ERROR;
2962
2963 p_tx_desc_curr->buf_ptr =
2964 (unsigned int) p_tx_desc_curr + TX_BUF_OFFSET_IN_DESC;
2965 eth_b_copy (p_pkt_info->buf_ptr, p_tx_desc_curr->buf_ptr,
2966 p_pkt_info->byte_cnt);
2967 } else
2968 p_tx_desc_curr->buf_ptr = p_pkt_info->buf_ptr;
2969
2970 p_tx_desc_curr->byte_cnt = p_pkt_info->byte_cnt;
2971 p_tx_desc_curr->return_info = p_pkt_info->return_info;
2972
2973 if (p_pkt_info->cmd_sts & (ETH_TX_LAST_DESC)) {
2974
2975 p_tx_desc_curr->cmd_sts = command_status |
2976 ETH_BUFFER_OWNED_BY_DMA | ETH_TX_ENABLE_INTERRUPT;
2977
2978 if (p_tx_desc_curr != p_tx_desc_first)
2979 p_tx_desc_first->cmd_sts |= ETH_BUFFER_OWNED_BY_DMA;
2980
2981
2982
2983 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_curr, 0);
2984 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_first, 0);
2985 CPU_PIPE_FLUSH;
2986
2987
2988 ETH_ENABLE_TX_QUEUE (tx_queue, p_eth_port_ctrl->port_num);
2989
2990
2991 p_tx_desc_first = p_tx_next_desc_curr;
2992 FIRST_TFD_SET (p_tx_desc_first, tx_queue);
2993
2994 } else {
2995 p_tx_desc_curr->cmd_sts = command_status;
2996 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_curr, 0);
2997 }
2998
2999
3000 if (p_tx_next_desc_curr == p_tx_desc_used) {
3001
3002 CURR_TFD_SET (p_tx_desc_first, tx_queue);
3003
3004 p_eth_port_ctrl->tx_resource_err[tx_queue] = true;
3005 return ETH_QUEUE_LAST_RESOURCE;
3006 } else {
3007
3008 CURR_TFD_SET (p_tx_next_desc_curr, tx_queue);
3009 return ETH_OK;
3010 }
3011}
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038static ETH_FUNC_RET_STATUS eth_tx_return_desc (ETH_PORT_INFO *
3039 p_eth_port_ctrl,
3040 ETH_QUEUE tx_queue,
3041 PKT_INFO * p_pkt_info)
3042{
3043 volatile ETH_TX_DESC *p_tx_desc_used = NULL;
3044 volatile ETH_TX_DESC *p_tx_desc_first = NULL;
3045 unsigned int command_status;
3046
3047
3048 USED_TFD_GET (p_tx_desc_used, tx_queue);
3049 FIRST_TFD_GET (p_tx_desc_first, tx_queue);
3050
3051
3052 if (p_tx_desc_used == NULL)
3053 return ETH_ERROR;
3054
3055 command_status = p_tx_desc_used->cmd_sts;
3056
3057
3058 if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
3059 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_used, 0);
3060 return ETH_RETRY;
3061 }
3062
3063
3064 if ((p_tx_desc_used == p_tx_desc_first) &&
3065 (p_eth_port_ctrl->tx_resource_err[tx_queue] == false)) {
3066 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_used, 0);
3067 return ETH_END_OF_JOB;
3068 }
3069
3070
3071 p_pkt_info->cmd_sts = command_status;
3072 p_pkt_info->return_info = p_tx_desc_used->return_info;
3073 p_tx_desc_used->return_info = 0;
3074
3075
3076 USED_TFD_SET (TX_NEXT_DESC_PTR (p_tx_desc_used, tx_queue), tx_queue);
3077
3078
3079 if (p_eth_port_ctrl->tx_resource_err[tx_queue] == true)
3080 p_eth_port_ctrl->tx_resource_err[tx_queue] = false;
3081
3082 D_CACHE_FLUSH_LINE ((unsigned int) p_tx_desc_used, 0);
3083
3084 return ETH_OK;
3085
3086}
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113static ETH_FUNC_RET_STATUS eth_port_receive (ETH_PORT_INFO * p_eth_port_ctrl,
3114 ETH_QUEUE rx_queue,
3115 PKT_INFO * p_pkt_info)
3116{
3117 volatile ETH_RX_DESC *p_rx_curr_desc;
3118 volatile ETH_RX_DESC *p_rx_next_curr_desc;
3119 volatile ETH_RX_DESC *p_rx_used_desc;
3120 unsigned int command_status;
3121
3122
3123 if (p_eth_port_ctrl->rx_resource_err[rx_queue] == true) {
3124 printf ("\nRx Queue is full ...\n");
3125 return ETH_QUEUE_FULL;
3126 }
3127
3128
3129 CURR_RFD_GET (p_rx_curr_desc, rx_queue);
3130 USED_RFD_GET (p_rx_used_desc, rx_queue);
3131
3132
3133 if (p_rx_curr_desc == NULL)
3134 return ETH_ERROR;
3135
3136
3137 p_rx_next_curr_desc = RX_NEXT_DESC_PTR (p_rx_curr_desc, rx_queue);
3138 command_status = p_rx_curr_desc->cmd_sts;
3139
3140
3141 if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
3142
3143 D_CACHE_FLUSH_LINE ((unsigned int) p_rx_curr_desc, 0);
3144
3145 return ETH_END_OF_JOB;
3146 }
3147
3148 p_pkt_info->byte_cnt = (p_rx_curr_desc->byte_cnt) - RX_BUF_OFFSET;
3149 p_pkt_info->cmd_sts = command_status;
3150 p_pkt_info->buf_ptr = (p_rx_curr_desc->buf_ptr) + RX_BUF_OFFSET;
3151 p_pkt_info->return_info = p_rx_curr_desc->return_info;
3152 p_pkt_info->l4i_chk = p_rx_curr_desc->buf_size;
3153
3154
3155
3156 p_rx_curr_desc->return_info = 0;
3157
3158
3159 CURR_RFD_SET (p_rx_next_curr_desc, rx_queue);
3160
3161
3162 if (p_rx_next_curr_desc == p_rx_used_desc)
3163 p_eth_port_ctrl->rx_resource_err[rx_queue] = true;
3164
3165 D_CACHE_FLUSH_LINE ((unsigned int) p_rx_curr_desc, 0);
3166 CPU_PIPE_FLUSH;
3167
3168 return ETH_OK;
3169}
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193static ETH_FUNC_RET_STATUS eth_rx_return_buff (ETH_PORT_INFO *
3194 p_eth_port_ctrl,
3195 ETH_QUEUE rx_queue,
3196 PKT_INFO * p_pkt_info)
3197{
3198 volatile ETH_RX_DESC *p_used_rx_desc;
3199
3200
3201 USED_RFD_GET (p_used_rx_desc, rx_queue);
3202
3203
3204 if (p_used_rx_desc == NULL)
3205 return ETH_ERROR;
3206
3207 p_used_rx_desc->buf_ptr = p_pkt_info->buf_ptr;
3208 p_used_rx_desc->return_info = p_pkt_info->return_info;
3209 p_used_rx_desc->byte_cnt = p_pkt_info->byte_cnt;
3210 p_used_rx_desc->buf_size = MV64460_RX_BUFFER_SIZE;
3211
3212
3213 CPU_PIPE_FLUSH;
3214
3215
3216 p_used_rx_desc->cmd_sts =
3217 ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT;
3218
3219
3220 D_CACHE_FLUSH_LINE ((unsigned int) p_used_rx_desc, 0);
3221 CPU_PIPE_FLUSH;
3222
3223
3224 USED_RFD_SET (RX_NEXT_DESC_PTR (p_used_rx_desc, rx_queue), rx_queue);
3225
3226
3227 if (p_eth_port_ctrl->rx_resource_err[rx_queue] == true)
3228 p_eth_port_ctrl->rx_resource_err[rx_queue] = false;
3229
3230 return ETH_OK;
3231}
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256#if 0
3257static unsigned int eth_port_set_rx_coal (ETH_PORT eth_port_num,
3258 unsigned int t_clk,
3259 unsigned int delay)
3260{
3261 unsigned int coal;
3262
3263 coal = ((t_clk / 1000000) * delay) / 64;
3264
3265 MV_REG_WRITE (MV64460_ETH_SDMA_CONFIG_REG (eth_port_num),
3266 ((coal & 0x3fff) << 8) |
3267 (MV_REG_READ
3268 (MV64460_ETH_SDMA_CONFIG_REG (eth_port_num))
3269 & 0xffc000ff));
3270 return coal;
3271}
3272
3273#endif
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297#if 0
3298static unsigned int eth_port_set_tx_coal (ETH_PORT eth_port_num,
3299 unsigned int t_clk,
3300 unsigned int delay)
3301{
3302 unsigned int coal;
3303
3304 coal = ((t_clk / 1000000) * delay) / 64;
3305
3306 MV_REG_WRITE (MV64460_ETH_TX_FIFO_URGENT_THRESHOLD_REG (eth_port_num),
3307 coal << 4);
3308 return coal;
3309}
3310#endif
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332static void eth_b_copy (unsigned int src_addr, unsigned int dst_addr,
3333 int byte_count)
3334{
3335
3336 *(unsigned int *) dst_addr = 0x0;
3337
3338 while (byte_count != 0) {
3339 *(char *) dst_addr = *(char *) src_addr;
3340 dst_addr++;
3341 src_addr++;
3342 byte_count--;
3343 }
3344}
3345