1
2
3
4
5
6
7
8
9#include <common.h>
10#include <net.h>
11#include <malloc.h>
12#include <linux/errno.h>
13#include <asm/io.h>
14#include <linux/immap_qe.h>
15#include "uccf.h"
16#include "uec.h"
17#include "uec_phy.h"
18#include "miiphy.h"
19#include <fsl_qe.h>
20#include <phy.h>
21
22
23#ifndef CONFIG_UTBIPAR_INIT_TBIPA
24#define CONFIG_UTBIPAR_INIT_TBIPA 0x1F
25#endif
26
27static uec_info_t uec_info[] = {
28#ifdef CONFIG_UEC_ETH1
29 STD_UEC_INFO(1),
30#endif
31#ifdef CONFIG_UEC_ETH2
32 STD_UEC_INFO(2),
33#endif
34#ifdef CONFIG_UEC_ETH3
35 STD_UEC_INFO(3),
36#endif
37#ifdef CONFIG_UEC_ETH4
38 STD_UEC_INFO(4),
39#endif
40#ifdef CONFIG_UEC_ETH5
41 STD_UEC_INFO(5),
42#endif
43#ifdef CONFIG_UEC_ETH6
44 STD_UEC_INFO(6),
45#endif
46#ifdef CONFIG_UEC_ETH7
47 STD_UEC_INFO(7),
48#endif
49#ifdef CONFIG_UEC_ETH8
50 STD_UEC_INFO(8),
51#endif
52};
53
54#define MAXCONTROLLERS (8)
55
56static struct eth_device *devlist[MAXCONTROLLERS];
57
58static int uec_mac_enable(uec_private_t *uec, comm_dir_e mode)
59{
60 uec_t *uec_regs;
61 u32 maccfg1;
62
63 if (!uec) {
64 printf("%s: uec not initial\n", __FUNCTION__);
65 return -EINVAL;
66 }
67 uec_regs = uec->uec_regs;
68
69 maccfg1 = in_be32(&uec_regs->maccfg1);
70
71 if (mode & COMM_DIR_TX) {
72 maccfg1 |= MACCFG1_ENABLE_TX;
73 out_be32(&uec_regs->maccfg1, maccfg1);
74 uec->mac_tx_enabled = 1;
75 }
76
77 if (mode & COMM_DIR_RX) {
78 maccfg1 |= MACCFG1_ENABLE_RX;
79 out_be32(&uec_regs->maccfg1, maccfg1);
80 uec->mac_rx_enabled = 1;
81 }
82
83 return 0;
84}
85
86static int uec_mac_disable(uec_private_t *uec, comm_dir_e mode)
87{
88 uec_t *uec_regs;
89 u32 maccfg1;
90
91 if (!uec) {
92 printf("%s: uec not initial\n", __FUNCTION__);
93 return -EINVAL;
94 }
95 uec_regs = uec->uec_regs;
96
97 maccfg1 = in_be32(&uec_regs->maccfg1);
98
99 if (mode & COMM_DIR_TX) {
100 maccfg1 &= ~MACCFG1_ENABLE_TX;
101 out_be32(&uec_regs->maccfg1, maccfg1);
102 uec->mac_tx_enabled = 0;
103 }
104
105 if (mode & COMM_DIR_RX) {
106 maccfg1 &= ~MACCFG1_ENABLE_RX;
107 out_be32(&uec_regs->maccfg1, maccfg1);
108 uec->mac_rx_enabled = 0;
109 }
110
111 return 0;
112}
113
114static int uec_graceful_stop_tx(uec_private_t *uec)
115{
116 ucc_fast_t *uf_regs;
117 u32 cecr_subblock;
118 u32 ucce;
119
120 if (!uec || !uec->uccf) {
121 printf("%s: No handle passed.\n", __FUNCTION__);
122 return -EINVAL;
123 }
124
125 uf_regs = uec->uccf->uf_regs;
126
127
128 out_be32(&uf_regs->ucce, UCCE_GRA);
129
130
131 cecr_subblock =
132 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
133 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
134 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
135
136
137 do {
138 ucce = in_be32(&uf_regs->ucce);
139 } while (! (ucce & UCCE_GRA));
140
141 uec->grace_stopped_tx = 1;
142
143 return 0;
144}
145
146static int uec_graceful_stop_rx(uec_private_t *uec)
147{
148 u32 cecr_subblock;
149 u8 ack;
150
151 if (!uec) {
152 printf("%s: No handle passed.\n", __FUNCTION__);
153 return -EINVAL;
154 }
155
156 if (!uec->p_rx_glbl_pram) {
157 printf("%s: No init rx global parameter\n", __FUNCTION__);
158 return -EINVAL;
159 }
160
161
162 ack = uec->p_rx_glbl_pram->rxgstpack;
163 ack &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
164 uec->p_rx_glbl_pram->rxgstpack = ack;
165
166
167 do {
168
169 cecr_subblock =
170 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
171 qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
172 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
173 ack = uec->p_rx_glbl_pram->rxgstpack;
174 } while (! (ack & GRACEFUL_STOP_ACKNOWLEDGE_RX ));
175
176 uec->grace_stopped_rx = 1;
177
178 return 0;
179}
180
181static int uec_restart_tx(uec_private_t *uec)
182{
183 u32 cecr_subblock;
184
185 if (!uec || !uec->uec_info) {
186 printf("%s: No handle passed.\n", __FUNCTION__);
187 return -EINVAL;
188 }
189
190 cecr_subblock =
191 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
192 qe_issue_cmd(QE_RESTART_TX, cecr_subblock,
193 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
194
195 uec->grace_stopped_tx = 0;
196
197 return 0;
198}
199
200static int uec_restart_rx(uec_private_t *uec)
201{
202 u32 cecr_subblock;
203
204 if (!uec || !uec->uec_info) {
205 printf("%s: No handle passed.\n", __FUNCTION__);
206 return -EINVAL;
207 }
208
209 cecr_subblock =
210 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
211 qe_issue_cmd(QE_RESTART_RX, cecr_subblock,
212 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
213
214 uec->grace_stopped_rx = 0;
215
216 return 0;
217}
218
219static int uec_open(uec_private_t *uec, comm_dir_e mode)
220{
221 ucc_fast_private_t *uccf;
222
223 if (!uec || !uec->uccf) {
224 printf("%s: No handle passed.\n", __FUNCTION__);
225 return -EINVAL;
226 }
227 uccf = uec->uccf;
228
229
230 if (uec->uec_info->uf_info.ucc_num >= UCC_MAX_NUM) {
231 printf("%s: ucc_num out of range.\n", __FUNCTION__);
232 return -EINVAL;
233 }
234
235
236 uec_mac_enable(uec, mode);
237
238
239 ucc_fast_enable(uccf, mode);
240
241
242 if ((mode & COMM_DIR_TX) && uec->grace_stopped_tx) {
243 uec_restart_tx(uec);
244 }
245 if ((mode & COMM_DIR_RX) && uec->grace_stopped_rx) {
246 uec_restart_rx(uec);
247 }
248
249 return 0;
250}
251
252static int uec_stop(uec_private_t *uec, comm_dir_e mode)
253{
254 if (!uec || !uec->uccf) {
255 printf("%s: No handle passed.\n", __FUNCTION__);
256 return -EINVAL;
257 }
258
259
260 if (uec->uec_info->uf_info.ucc_num >= UCC_MAX_NUM) {
261 printf("%s: ucc_num out of range.\n", __FUNCTION__);
262 return -EINVAL;
263 }
264
265 if ((mode & COMM_DIR_TX) && !uec->grace_stopped_tx) {
266 uec_graceful_stop_tx(uec);
267 }
268
269 if ((mode & COMM_DIR_RX) && !uec->grace_stopped_rx) {
270 uec_graceful_stop_rx(uec);
271 }
272
273
274 ucc_fast_disable(uec->uccf, mode);
275
276
277 uec_mac_disable(uec, mode);
278
279 return 0;
280}
281
282static int uec_set_mac_duplex(uec_private_t *uec, int duplex)
283{
284 uec_t *uec_regs;
285 u32 maccfg2;
286
287 if (!uec) {
288 printf("%s: uec not initial\n", __FUNCTION__);
289 return -EINVAL;
290 }
291 uec_regs = uec->uec_regs;
292
293 if (duplex == DUPLEX_HALF) {
294 maccfg2 = in_be32(&uec_regs->maccfg2);
295 maccfg2 &= ~MACCFG2_FDX;
296 out_be32(&uec_regs->maccfg2, maccfg2);
297 }
298
299 if (duplex == DUPLEX_FULL) {
300 maccfg2 = in_be32(&uec_regs->maccfg2);
301 maccfg2 |= MACCFG2_FDX;
302 out_be32(&uec_regs->maccfg2, maccfg2);
303 }
304
305 return 0;
306}
307
308static int uec_set_mac_if_mode(uec_private_t *uec,
309 phy_interface_t if_mode, int speed)
310{
311 phy_interface_t enet_if_mode;
312 uec_t *uec_regs;
313 u32 upsmr;
314 u32 maccfg2;
315
316 if (!uec) {
317 printf("%s: uec not initial\n", __FUNCTION__);
318 return -EINVAL;
319 }
320
321 uec_regs = uec->uec_regs;
322 enet_if_mode = if_mode;
323
324 maccfg2 = in_be32(&uec_regs->maccfg2);
325 maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
326
327 upsmr = in_be32(&uec->uccf->uf_regs->upsmr);
328 upsmr &= ~(UPSMR_RPM | UPSMR_TBIM | UPSMR_R10M | UPSMR_RMM);
329
330 switch (speed) {
331 case SPEED_10:
332 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
333 switch (enet_if_mode) {
334 case PHY_INTERFACE_MODE_MII:
335 break;
336 case PHY_INTERFACE_MODE_RGMII:
337 upsmr |= (UPSMR_RPM | UPSMR_R10M);
338 break;
339 case PHY_INTERFACE_MODE_RMII:
340 upsmr |= (UPSMR_R10M | UPSMR_RMM);
341 break;
342 default:
343 return -EINVAL;
344 break;
345 }
346 break;
347 case SPEED_100:
348 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
349 switch (enet_if_mode) {
350 case PHY_INTERFACE_MODE_MII:
351 break;
352 case PHY_INTERFACE_MODE_RGMII:
353 upsmr |= UPSMR_RPM;
354 break;
355 case PHY_INTERFACE_MODE_RMII:
356 upsmr |= UPSMR_RMM;
357 break;
358 default:
359 return -EINVAL;
360 break;
361 }
362 break;
363 case SPEED_1000:
364 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
365 switch (enet_if_mode) {
366 case PHY_INTERFACE_MODE_GMII:
367 break;
368 case PHY_INTERFACE_MODE_TBI:
369 upsmr |= UPSMR_TBIM;
370 break;
371 case PHY_INTERFACE_MODE_RTBI:
372 upsmr |= (UPSMR_RPM | UPSMR_TBIM);
373 break;
374 case PHY_INTERFACE_MODE_RGMII_RXID:
375 case PHY_INTERFACE_MODE_RGMII_TXID:
376 case PHY_INTERFACE_MODE_RGMII_ID:
377 case PHY_INTERFACE_MODE_RGMII:
378 upsmr |= UPSMR_RPM;
379 break;
380 case PHY_INTERFACE_MODE_SGMII:
381 upsmr |= UPSMR_SGMM;
382 break;
383 default:
384 return -EINVAL;
385 break;
386 }
387 break;
388 default:
389 return -EINVAL;
390 break;
391 }
392
393 out_be32(&uec_regs->maccfg2, maccfg2);
394 out_be32(&uec->uccf->uf_regs->upsmr, upsmr);
395
396 return 0;
397}
398
399static int init_mii_management_configuration(uec_mii_t *uec_mii_regs)
400{
401 uint timeout = 0x1000;
402 u32 miimcfg = 0;
403
404 miimcfg = in_be32(&uec_mii_regs->miimcfg);
405 miimcfg |= MIIMCFG_MNGMNT_CLC_DIV_INIT_VALUE;
406 out_be32(&uec_mii_regs->miimcfg, miimcfg);
407
408
409 while ((in_be32(&uec_mii_regs->miimcfg) & MIIMIND_BUSY) && timeout--);
410 if (timeout <= 0) {
411 printf("%s: The MII Bus is stuck!", __FUNCTION__);
412 return -ETIMEDOUT;
413 }
414
415 return 0;
416}
417
418static int init_phy(struct eth_device *dev)
419{
420 uec_private_t *uec;
421 uec_mii_t *umii_regs;
422 struct uec_mii_info *mii_info;
423 struct phy_info *curphy;
424 int err;
425
426 uec = (uec_private_t *)dev->priv;
427 umii_regs = uec->uec_mii_regs;
428
429 uec->oldlink = 0;
430 uec->oldspeed = 0;
431 uec->oldduplex = -1;
432
433 mii_info = malloc(sizeof(*mii_info));
434 if (!mii_info) {
435 printf("%s: Could not allocate mii_info", dev->name);
436 return -ENOMEM;
437 }
438 memset(mii_info, 0, sizeof(*mii_info));
439
440 if (uec->uec_info->uf_info.eth_type == GIGA_ETH) {
441 mii_info->speed = SPEED_1000;
442 } else {
443 mii_info->speed = SPEED_100;
444 }
445
446 mii_info->duplex = DUPLEX_FULL;
447 mii_info->pause = 0;
448 mii_info->link = 1;
449
450 mii_info->advertising = (ADVERTISED_10baseT_Half |
451 ADVERTISED_10baseT_Full |
452 ADVERTISED_100baseT_Half |
453 ADVERTISED_100baseT_Full |
454 ADVERTISED_1000baseT_Full);
455 mii_info->autoneg = 1;
456 mii_info->mii_id = uec->uec_info->phy_address;
457 mii_info->dev = dev;
458
459 mii_info->mdio_read = &uec_read_phy_reg;
460 mii_info->mdio_write = &uec_write_phy_reg;
461
462 uec->mii_info = mii_info;
463
464 qe_set_mii_clk_src(uec->uec_info->uf_info.ucc_num);
465
466 if (init_mii_management_configuration(umii_regs)) {
467 printf("%s: The MII Bus is stuck!", dev->name);
468 err = -1;
469 goto bus_fail;
470 }
471
472
473 curphy = uec_get_phy_info(uec->mii_info);
474 if (!curphy) {
475 printf("%s: No PHY found", dev->name);
476 err = -1;
477 goto no_phy;
478 }
479
480 mii_info->phyinfo = curphy;
481
482
483 if (curphy->init) {
484 err = curphy->init(uec->mii_info);
485 if (err)
486 goto phy_init_fail;
487 }
488
489 return 0;
490
491phy_init_fail:
492no_phy:
493bus_fail:
494 free(mii_info);
495 return err;
496}
497
498static void adjust_link(struct eth_device *dev)
499{
500 uec_private_t *uec = (uec_private_t *)dev->priv;
501 struct uec_mii_info *mii_info = uec->mii_info;
502
503 extern void change_phy_interface_mode(struct eth_device *dev,
504 phy_interface_t mode, int speed);
505
506 if (mii_info->link) {
507
508
509 if (mii_info->duplex != uec->oldduplex) {
510 if (!(mii_info->duplex)) {
511 uec_set_mac_duplex(uec, DUPLEX_HALF);
512 printf("%s: Half Duplex\n", dev->name);
513 } else {
514 uec_set_mac_duplex(uec, DUPLEX_FULL);
515 printf("%s: Full Duplex\n", dev->name);
516 }
517 uec->oldduplex = mii_info->duplex;
518 }
519
520 if (mii_info->speed != uec->oldspeed) {
521 phy_interface_t mode =
522 uec->uec_info->enet_interface_type;
523 if (uec->uec_info->uf_info.eth_type == GIGA_ETH) {
524 switch (mii_info->speed) {
525 case SPEED_1000:
526 break;
527 case SPEED_100:
528 printf ("switching to rgmii 100\n");
529 mode = PHY_INTERFACE_MODE_RGMII;
530 break;
531 case SPEED_10:
532 printf ("switching to rgmii 10\n");
533 mode = PHY_INTERFACE_MODE_RGMII;
534 break;
535 default:
536 printf("%s: Ack,Speed(%d)is illegal\n",
537 dev->name, mii_info->speed);
538 break;
539 }
540 }
541
542
543 change_phy_interface_mode(dev, mode, mii_info->speed);
544
545 uec_set_mac_if_mode(uec, mode, mii_info->speed);
546
547 printf("%s: Speed %dBT\n", dev->name, mii_info->speed);
548 uec->oldspeed = mii_info->speed;
549 }
550
551 if (!uec->oldlink) {
552 printf("%s: Link is up\n", dev->name);
553 uec->oldlink = 1;
554 }
555
556 } else {
557 if (uec->oldlink) {
558 printf("%s: Link is down\n", dev->name);
559 uec->oldlink = 0;
560 uec->oldspeed = 0;
561 uec->oldduplex = -1;
562 }
563 }
564}
565
566static void phy_change(struct eth_device *dev)
567{
568 uec_private_t *uec = (uec_private_t *)dev->priv;
569
570#if defined(CONFIG_P1012) || defined(CONFIG_P1021) || defined(CONFIG_P1025)
571 ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
572
573
574 setbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE9);
575 setbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE12);
576#endif
577
578
579 uec->mii_info->phyinfo->read_status(uec->mii_info);
580
581#if defined(CONFIG_P1012) || defined(CONFIG_P1021) || defined(CONFIG_P1025)
582
583
584
585
586 clrbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE12);
587#endif
588
589
590 adjust_link(dev);
591}
592
593#if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
594
595
596
597
598
599
600
601static int uec_miiphy_find_dev_by_name(const char *devname)
602{
603 int i;
604
605 for (i = 0; i < MAXCONTROLLERS; i++) {
606 if (strncmp(devname, devlist[i]->name, strlen(devname)) == 0) {
607 break;
608 }
609 }
610
611
612 if (i == MAXCONTROLLERS) {
613 debug ("%s: device %s not found in devlist\n", __FUNCTION__, devname);
614 i = -1;
615 }
616
617 return i;
618}
619
620
621
622
623
624
625
626static int uec_miiphy_read(struct mii_dev *bus, int addr, int devad, int reg)
627{
628 unsigned short value = 0;
629 int devindex = 0;
630
631 if (bus->name == NULL) {
632 debug("%s: NULL pointer given\n", __FUNCTION__);
633 } else {
634 devindex = uec_miiphy_find_dev_by_name(bus->name);
635 if (devindex >= 0) {
636 value = uec_read_phy_reg(devlist[devindex], addr, reg);
637 }
638 }
639 return value;
640}
641
642
643
644
645
646
647
648static int uec_miiphy_write(struct mii_dev *bus, int addr, int devad, int reg,
649 u16 value)
650{
651 int devindex = 0;
652
653 if (bus->name == NULL) {
654 debug("%s: NULL pointer given\n", __FUNCTION__);
655 } else {
656 devindex = uec_miiphy_find_dev_by_name(bus->name);
657 if (devindex >= 0) {
658 uec_write_phy_reg(devlist[devindex], addr, reg, value);
659 }
660 }
661 return 0;
662}
663#endif
664
665static int uec_set_mac_address(uec_private_t *uec, u8 *mac_addr)
666{
667 uec_t *uec_regs;
668 u32 mac_addr1;
669 u32 mac_addr2;
670
671 if (!uec) {
672 printf("%s: uec not initial\n", __FUNCTION__);
673 return -EINVAL;
674 }
675
676 uec_regs = uec->uec_regs;
677
678
679
680
681
682 mac_addr1 = (mac_addr[5] << 24) | (mac_addr[4] << 16) | \
683 (mac_addr[3] << 8) | (mac_addr[2]);
684 out_be32(&uec_regs->macstnaddr1, mac_addr1);
685
686 mac_addr2 = ((mac_addr[1] << 24) | (mac_addr[0] << 16)) & 0xffff0000;
687 out_be32(&uec_regs->macstnaddr2, mac_addr2);
688
689 return 0;
690}
691
692static int uec_convert_threads_num(uec_num_of_threads_e threads_num,
693 int *threads_num_ret)
694{
695 int num_threads_numerica;
696
697 switch (threads_num) {
698 case UEC_NUM_OF_THREADS_1:
699 num_threads_numerica = 1;
700 break;
701 case UEC_NUM_OF_THREADS_2:
702 num_threads_numerica = 2;
703 break;
704 case UEC_NUM_OF_THREADS_4:
705 num_threads_numerica = 4;
706 break;
707 case UEC_NUM_OF_THREADS_6:
708 num_threads_numerica = 6;
709 break;
710 case UEC_NUM_OF_THREADS_8:
711 num_threads_numerica = 8;
712 break;
713 default:
714 printf("%s: Bad number of threads value.",
715 __FUNCTION__);
716 return -EINVAL;
717 }
718
719 *threads_num_ret = num_threads_numerica;
720
721 return 0;
722}
723
724static void uec_init_tx_parameter(uec_private_t *uec, int num_threads_tx)
725{
726 uec_info_t *uec_info;
727 u32 end_bd;
728 u8 bmrx = 0;
729 int i;
730
731 uec_info = uec->uec_info;
732
733
734 uec->tx_glbl_pram_offset = qe_muram_alloc(
735 sizeof(uec_tx_global_pram_t),
736 UEC_TX_GLOBAL_PRAM_ALIGNMENT);
737 uec->p_tx_glbl_pram = (uec_tx_global_pram_t *)
738 qe_muram_addr(uec->tx_glbl_pram_offset);
739
740
741 memset(uec->p_tx_glbl_pram, 0, sizeof(uec_tx_global_pram_t));
742
743
744
745
746 out_be16(&uec->p_tx_glbl_pram->temoder, TEMODER_INIT_VALUE);
747
748
749 uec->send_q_mem_reg_offset = qe_muram_alloc(
750 sizeof(uec_send_queue_qd_t),
751 UEC_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
752 uec->p_send_q_mem_reg = (uec_send_queue_mem_region_t *)
753 qe_muram_addr(uec->send_q_mem_reg_offset);
754 out_be32(&uec->p_tx_glbl_pram->sqptr, uec->send_q_mem_reg_offset);
755
756
757 end_bd = (u32)uec->p_tx_bd_ring + (uec_info->tx_bd_ring_len - 1)
758 * SIZEOFBD;
759 out_be32(&uec->p_send_q_mem_reg->sqqd[0].bd_ring_base,
760 (u32)(uec->p_tx_bd_ring));
761 out_be32(&uec->p_send_q_mem_reg->sqqd[0].last_bd_completed_address,
762 end_bd);
763
764
765 out_be32(&uec->p_tx_glbl_pram->schedulerbasepointer, 0);
766
767
768 out_be32(&uec->p_tx_glbl_pram->txrmonbaseptr, 0);
769
770
771 bmrx = BMR_INIT_VALUE;
772 out_be32(&uec->p_tx_glbl_pram->tstate, ((u32)(bmrx) << BMR_SHIFT));
773
774
775 for (i = 0; i < MAX_IPH_OFFSET_ENTRY; i++) {
776 out_8(&uec->p_tx_glbl_pram->iphoffset[i], 0);
777 }
778
779
780 for (i = 0; i < UEC_TX_VTAG_TABLE_ENTRY_MAX; i++) {
781 out_be32(&uec->p_tx_glbl_pram->vtagtable[i], 0);
782 }
783
784
785 uec->thread_dat_tx_offset = qe_muram_alloc(
786 num_threads_tx * sizeof(uec_thread_data_tx_t) +
787 32 *(num_threads_tx == 1), UEC_THREAD_DATA_ALIGNMENT);
788
789 uec->p_thread_data_tx = (uec_thread_data_tx_t *)
790 qe_muram_addr(uec->thread_dat_tx_offset);
791 out_be32(&uec->p_tx_glbl_pram->tqptr, uec->thread_dat_tx_offset);
792}
793
794static void uec_init_rx_parameter(uec_private_t *uec, int num_threads_rx)
795{
796 u8 bmrx = 0;
797 int i;
798 uec_82xx_address_filtering_pram_t *p_af_pram;
799
800
801 uec->rx_glbl_pram_offset = qe_muram_alloc(
802 sizeof(uec_rx_global_pram_t), UEC_RX_GLOBAL_PRAM_ALIGNMENT);
803 uec->p_rx_glbl_pram = (uec_rx_global_pram_t *)
804 qe_muram_addr(uec->rx_glbl_pram_offset);
805
806
807 memset(uec->p_rx_glbl_pram, 0, sizeof(uec_rx_global_pram_t));
808
809
810
811
812
813
814
815
816 out_be32(&uec->p_rx_glbl_pram->remoder, REMODER_INIT_VALUE);
817
818
819 uec->thread_dat_rx_offset = qe_muram_alloc(
820 num_threads_rx * sizeof(uec_thread_data_rx_t),
821 UEC_THREAD_DATA_ALIGNMENT);
822 uec->p_thread_data_rx = (uec_thread_data_rx_t *)
823 qe_muram_addr(uec->thread_dat_rx_offset);
824 out_be32(&uec->p_rx_glbl_pram->rqptr, uec->thread_dat_rx_offset);
825
826
827 out_be16(&uec->p_rx_glbl_pram->typeorlen, 3072);
828
829
830 out_be32(&uec->p_rx_glbl_pram->rxrmonbaseptr, 0);
831
832
833 out_be32(&uec->p_rx_glbl_pram->intcoalescingptr, 0);
834
835
836 bmrx = BMR_INIT_VALUE;
837 out_8(&uec->p_rx_glbl_pram->rstate, bmrx);
838
839
840 out_be16(&uec->p_rx_glbl_pram->mrblr, MAX_RXBUF_LEN);
841
842
843 uec->rx_bd_qs_tbl_offset = qe_muram_alloc(
844 sizeof(uec_rx_bd_queues_entry_t) + \
845 sizeof(uec_rx_prefetched_bds_t),
846 UEC_RX_BD_QUEUES_ALIGNMENT);
847 uec->p_rx_bd_qs_tbl = (uec_rx_bd_queues_entry_t *)
848 qe_muram_addr(uec->rx_bd_qs_tbl_offset);
849
850
851 memset(uec->p_rx_bd_qs_tbl, 0, sizeof(uec_rx_bd_queues_entry_t) + \
852 sizeof(uec_rx_prefetched_bds_t));
853 out_be32(&uec->p_rx_glbl_pram->rbdqptr, uec->rx_bd_qs_tbl_offset);
854 out_be32(&uec->p_rx_bd_qs_tbl->externalbdbaseptr,
855 (u32)uec->p_rx_bd_ring);
856
857
858 out_be16(&uec->p_rx_glbl_pram->mflr, MAX_FRAME_LEN);
859
860 out_be16(&uec->p_rx_glbl_pram->minflr, MIN_FRAME_LEN);
861
862 out_be16(&uec->p_rx_glbl_pram->maxd1, MAX_DMA1_LEN);
863
864 out_be16(&uec->p_rx_glbl_pram->maxd2, MAX_DMA2_LEN);
865
866 out_be32(&uec->p_rx_glbl_pram->ecamptr, 0);
867
868 out_be32(&uec->p_rx_glbl_pram->l2qt, 0);
869
870 for (i = 0; i < 8; i++) {
871 out_be32(&uec->p_rx_glbl_pram->l3qt[i], 0);
872 }
873
874
875 out_be16(&uec->p_rx_glbl_pram->vlantype, 0x8100);
876
877 out_be16(&uec->p_rx_glbl_pram->vlantci, 0);
878
879
880 p_af_pram = (uec_82xx_address_filtering_pram_t *) \
881 uec->p_rx_glbl_pram->addressfiltering;
882
883 p_af_pram->iaddr_h = 0;
884 p_af_pram->iaddr_l = 0;
885 p_af_pram->gaddr_h = 0;
886 p_af_pram->gaddr_l = 0;
887}
888
889static int uec_issue_init_enet_rxtx_cmd(uec_private_t *uec,
890 int thread_tx, int thread_rx)
891{
892 uec_init_cmd_pram_t *p_init_enet_param;
893 u32 init_enet_param_offset;
894 uec_info_t *uec_info;
895 int i;
896 int snum;
897 u32 init_enet_offset;
898 u32 entry_val;
899 u32 command;
900 u32 cecr_subblock;
901
902 uec_info = uec->uec_info;
903
904
905 uec->init_enet_param_offset = qe_muram_alloc(
906 sizeof(uec_init_cmd_pram_t), 4);
907 init_enet_param_offset = uec->init_enet_param_offset;
908 uec->p_init_enet_param = (uec_init_cmd_pram_t *)
909 qe_muram_addr(uec->init_enet_param_offset);
910
911
912 memset((void *)uec->p_init_enet_param, 0, sizeof(uec_init_cmd_pram_t));
913
914
915 p_init_enet_param = uec->p_init_enet_param;
916 p_init_enet_param->resinit0 = ENET_INIT_PARAM_MAGIC_RES_INIT0;
917 p_init_enet_param->resinit1 = ENET_INIT_PARAM_MAGIC_RES_INIT1;
918 p_init_enet_param->resinit2 = ENET_INIT_PARAM_MAGIC_RES_INIT2;
919 p_init_enet_param->resinit3 = ENET_INIT_PARAM_MAGIC_RES_INIT3;
920 p_init_enet_param->resinit4 = ENET_INIT_PARAM_MAGIC_RES_INIT4;
921 p_init_enet_param->largestexternallookupkeysize = 0;
922
923 p_init_enet_param->rgftgfrxglobal |= ((u32)uec_info->num_threads_rx)
924 << ENET_INIT_PARAM_RGF_SHIFT;
925 p_init_enet_param->rgftgfrxglobal |= ((u32)uec_info->num_threads_tx)
926 << ENET_INIT_PARAM_TGF_SHIFT;
927
928
929 p_init_enet_param->rgftgfrxglobal |= uec->rx_glbl_pram_offset |
930 (u32)uec_info->risc_rx;
931
932
933 for (i = 0; i < (thread_rx + 1); i++) {
934 if ((snum = qe_get_snum()) < 0) {
935 printf("%s can not get snum\n", __FUNCTION__);
936 return -ENOMEM;
937 }
938
939 if (i==0) {
940 init_enet_offset = 0;
941 } else {
942 init_enet_offset = qe_muram_alloc(
943 sizeof(uec_thread_rx_pram_t),
944 UEC_THREAD_RX_PRAM_ALIGNMENT);
945 }
946
947 entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) |
948 init_enet_offset | (u32)uec_info->risc_rx;
949 p_init_enet_param->rxthread[i] = entry_val;
950 }
951
952
953 p_init_enet_param->txglobal = uec->tx_glbl_pram_offset |
954 (u32)uec_info->risc_tx;
955
956
957 for (i = 0; i < thread_tx; i++) {
958 if ((snum = qe_get_snum()) < 0) {
959 printf("%s can not get snum\n", __FUNCTION__);
960 return -ENOMEM;
961 }
962
963 init_enet_offset = qe_muram_alloc(sizeof(uec_thread_tx_pram_t),
964 UEC_THREAD_TX_PRAM_ALIGNMENT);
965
966 entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) |
967 init_enet_offset | (u32)uec_info->risc_tx;
968 p_init_enet_param->txthread[i] = entry_val;
969 }
970
971 __asm__ __volatile__("sync");
972
973
974 command = QE_INIT_TX_RX;
975 cecr_subblock = ucc_fast_get_qe_cr_subblock(
976 uec->uec_info->uf_info.ucc_num);
977 qe_issue_cmd(command, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET,
978 init_enet_param_offset);
979
980 return 0;
981}
982
983static int uec_startup(uec_private_t *uec)
984{
985 uec_info_t *uec_info;
986 ucc_fast_info_t *uf_info;
987 ucc_fast_private_t *uccf;
988 ucc_fast_t *uf_regs;
989 uec_t *uec_regs;
990 int num_threads_tx;
991 int num_threads_rx;
992 u32 utbipar;
993 u32 length;
994 u32 align;
995 qe_bd_t *bd;
996 u8 *buf;
997 int i;
998
999 if (!uec || !uec->uec_info) {
1000 printf("%s: uec or uec_info not initial\n", __FUNCTION__);
1001 return -EINVAL;
1002 }
1003
1004 uec_info = uec->uec_info;
1005 uf_info = &(uec_info->uf_info);
1006
1007
1008 if ((uec_info->rx_bd_ring_len < UEC_RX_BD_RING_SIZE_MIN) || \
1009 (uec_info->rx_bd_ring_len % UEC_RX_BD_RING_SIZE_ALIGNMENT)) {
1010 printf("%s: Rx BD ring len must be multiple of 4, and > 8.\n",
1011 __FUNCTION__);
1012 return -EINVAL;
1013 }
1014
1015
1016 if (uec_info->tx_bd_ring_len < UEC_TX_BD_RING_SIZE_MIN) {
1017 printf("%s: Tx BD ring length must not be smaller than 2.\n",
1018 __FUNCTION__);
1019 return -EINVAL;
1020 }
1021
1022
1023 if ((MAX_RXBUF_LEN == 0) || (MAX_RXBUF_LEN % UEC_MRBLR_ALIGNMENT)) {
1024 printf("%s: max rx buffer length must be mutliple of 128.\n",
1025 __FUNCTION__);
1026 return -EINVAL;
1027 }
1028
1029
1030 uec->grace_stopped_rx = 1;
1031 uec->grace_stopped_tx = 1;
1032
1033
1034 if (ucc_fast_init(uf_info, &uccf)) {
1035 printf("%s: failed to init ucc fast\n", __FUNCTION__);
1036 return -ENOMEM;
1037 }
1038
1039
1040 uec->uccf = uccf;
1041
1042
1043 if (uec_convert_threads_num(uec_info->num_threads_tx,
1044 &num_threads_tx)) {
1045 return -EINVAL;
1046 }
1047
1048
1049 if (uec_convert_threads_num(uec_info->num_threads_rx,
1050 &num_threads_rx)) {
1051 return -EINVAL;
1052 }
1053
1054 uf_regs = uccf->uf_regs;
1055
1056
1057 uec_regs = (uec_t *)(&uf_regs->ucc_eth);
1058
1059
1060 uec->uec_regs = uec_regs;
1061
1062
1063 out_be32(&uec->uccf->uf_regs->upsmr, UPSMR_INIT_VALUE);
1064
1065
1066 out_be32(&uec_regs->maccfg1, MACCFG1_INIT_VALUE);
1067
1068
1069 out_be32(&uec_regs->maccfg2, MACCFG2_INIT_VALUE);
1070
1071
1072 uec_set_mac_if_mode(uec, uec_info->enet_interface_type, uec_info->speed);
1073
1074
1075#ifndef CONFIG_eTSEC_MDIO_BUS
1076 uec->uec_mii_regs = (uec_mii_t *)(&uec_regs->miimcfg);
1077#else
1078 uec->uec_mii_regs = (uec_mii_t *) CONFIG_MIIM_ADDRESS;
1079#endif
1080
1081
1082 qe_set_mii_clk_src(uec_info->uf_info.ucc_num);
1083
1084
1085 utbipar = in_be32(&uec_regs->utbipar);
1086 utbipar &= ~UTBIPAR_PHY_ADDRESS_MASK;
1087
1088
1089
1090
1091 utbipar |= CONFIG_UTBIPAR_INIT_TBIPA << UTBIPAR_PHY_ADDRESS_SHIFT;
1092 out_be32(&uec_regs->utbipar, utbipar);
1093
1094
1095 if ((uec->uec_info->enet_interface_type == PHY_INTERFACE_MODE_SGMII) &&
1096 (uec->uec_info->speed == SPEED_1000)) {
1097 uec_write_phy_reg(uec->dev, uec_regs->utbipar,
1098 ENET_TBI_MII_ANA, TBIANA_SETTINGS);
1099
1100 uec_write_phy_reg(uec->dev, uec_regs->utbipar,
1101 ENET_TBI_MII_TBICON, TBICON_CLK_SELECT);
1102
1103 uec_write_phy_reg(uec->dev, uec_regs->utbipar,
1104 ENET_TBI_MII_CR, TBICR_SETTINGS);
1105 }
1106
1107
1108 length = ((uec_info->tx_bd_ring_len * SIZEOFBD) /
1109 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) *
1110 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
1111 if ((uec_info->tx_bd_ring_len * SIZEOFBD) %
1112 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) {
1113 length += UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
1114 }
1115
1116 align = UEC_TX_BD_RING_ALIGNMENT;
1117 uec->tx_bd_ring_offset = (u32)malloc((u32)(length + align));
1118 if (uec->tx_bd_ring_offset != 0) {
1119 uec->p_tx_bd_ring = (u8 *)((uec->tx_bd_ring_offset + align)
1120 & ~(align - 1));
1121 }
1122
1123
1124 memset((void *)(uec->tx_bd_ring_offset), 0, length + align);
1125
1126
1127 length = uec_info->rx_bd_ring_len * SIZEOFBD;
1128 align = UEC_RX_BD_RING_ALIGNMENT;
1129 uec->rx_bd_ring_offset = (u32)(malloc((u32)(length + align)));
1130 if (uec->rx_bd_ring_offset != 0) {
1131 uec->p_rx_bd_ring = (u8 *)((uec->rx_bd_ring_offset + align)
1132 & ~(align - 1));
1133 }
1134
1135
1136 memset((void *)(uec->rx_bd_ring_offset), 0, length + align);
1137
1138
1139 length = uec_info->rx_bd_ring_len * MAX_RXBUF_LEN;
1140 align = UEC_RX_DATA_BUF_ALIGNMENT;
1141 uec->rx_buf_offset = (u32)malloc(length + align);
1142 if (uec->rx_buf_offset != 0) {
1143 uec->p_rx_buf = (u8 *)((uec->rx_buf_offset + align)
1144 & ~(align - 1));
1145 }
1146
1147
1148 memset((void *)(uec->rx_buf_offset), 0, length + align);
1149
1150
1151 bd = (qe_bd_t *)uec->p_tx_bd_ring;
1152 uec->txBd = bd;
1153
1154 for (i = 0; i < uec_info->tx_bd_ring_len; i++) {
1155 BD_DATA_CLEAR(bd);
1156 BD_STATUS_SET(bd, 0);
1157 BD_LENGTH_SET(bd, 0);
1158 bd ++;
1159 }
1160 BD_STATUS_SET((--bd), TxBD_WRAP);
1161
1162
1163 bd = (qe_bd_t *)uec->p_rx_bd_ring;
1164 uec->rxBd = bd;
1165 buf = uec->p_rx_buf;
1166 for (i = 0; i < uec_info->rx_bd_ring_len; i++) {
1167 BD_DATA_SET(bd, buf);
1168 BD_LENGTH_SET(bd, 0);
1169 BD_STATUS_SET(bd, RxBD_EMPTY);
1170 buf += MAX_RXBUF_LEN;
1171 bd ++;
1172 }
1173 BD_STATUS_SET((--bd), RxBD_WRAP | RxBD_EMPTY);
1174
1175
1176 uec_init_tx_parameter(uec, num_threads_tx);
1177
1178
1179 uec_init_rx_parameter(uec, num_threads_rx);
1180
1181
1182 if (uec_issue_init_enet_rxtx_cmd(uec, num_threads_tx,
1183 num_threads_rx)) {
1184 printf("%s issue init enet cmd failed\n", __FUNCTION__);
1185 return -ENOMEM;
1186 }
1187
1188 return 0;
1189}
1190
1191static int uec_init(struct eth_device* dev, bd_t *bd)
1192{
1193 uec_private_t *uec;
1194 int err, i;
1195 struct phy_info *curphy;
1196#if defined(CONFIG_P1012) || defined(CONFIG_P1021) || defined(CONFIG_P1025)
1197 ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
1198#endif
1199
1200 uec = (uec_private_t *)dev->priv;
1201
1202 if (uec->the_first_run == 0) {
1203#if defined(CONFIG_P1012) || defined(CONFIG_P1021) || defined(CONFIG_P1025)
1204
1205 setbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE9);
1206 setbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE12);
1207#endif
1208
1209 err = init_phy(dev);
1210 if (err) {
1211 printf("%s: Cannot initialize PHY, aborting.\n",
1212 dev->name);
1213 return err;
1214 }
1215
1216 curphy = uec->mii_info->phyinfo;
1217
1218 if (curphy->config_aneg) {
1219 err = curphy->config_aneg(uec->mii_info);
1220 if (err) {
1221 printf("%s: Can't negotiate PHY\n", dev->name);
1222 return err;
1223 }
1224 }
1225
1226
1227 i = 50;
1228 do {
1229 err = curphy->read_status(uec->mii_info);
1230 if (!(((i-- > 0) && !uec->mii_info->link) || err))
1231 break;
1232 udelay(100000);
1233 } while (1);
1234
1235#if defined(CONFIG_P1012) || defined(CONFIG_P1021) || defined(CONFIG_P1025)
1236
1237 clrbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE12);
1238#endif
1239
1240 if (err || i <= 0)
1241 printf("warning: %s: timeout on PHY link\n", dev->name);
1242
1243 adjust_link(dev);
1244 uec->the_first_run = 1;
1245 }
1246
1247
1248 if (dev->enetaddr[0] & 0x01) {
1249 printf("%s: MacAddress is multcast address\n",
1250 __FUNCTION__);
1251 return -1;
1252 }
1253 uec_set_mac_address(uec, dev->enetaddr);
1254
1255
1256 err = uec_open(uec, COMM_DIR_RX_AND_TX);
1257 if (err) {
1258 printf("%s: cannot enable UEC device\n", dev->name);
1259 return -1;
1260 }
1261
1262 phy_change(dev);
1263
1264 return (uec->mii_info->link ? 0 : -1);
1265}
1266
1267static void uec_halt(struct eth_device* dev)
1268{
1269 uec_private_t *uec = (uec_private_t *)dev->priv;
1270 uec_stop(uec, COMM_DIR_RX_AND_TX);
1271}
1272
1273static int uec_send(struct eth_device *dev, void *buf, int len)
1274{
1275 uec_private_t *uec;
1276 ucc_fast_private_t *uccf;
1277 volatile qe_bd_t *bd;
1278 u16 status;
1279 int i;
1280 int result = 0;
1281
1282 uec = (uec_private_t *)dev->priv;
1283 uccf = uec->uccf;
1284 bd = uec->txBd;
1285
1286
1287 for (i = 0; bd->status & TxBD_READY; i++) {
1288 if (i > 0x100000) {
1289 printf("%s: tx buffer not ready\n", dev->name);
1290 return result;
1291 }
1292 }
1293
1294
1295 BD_DATA_SET(bd, buf);
1296 BD_LENGTH_SET(bd, len);
1297 status = bd->status;
1298 status &= BD_WRAP;
1299 status |= (TxBD_READY | TxBD_LAST);
1300 BD_STATUS_SET(bd, status);
1301
1302
1303 ucc_fast_transmit_on_demand(uccf);
1304
1305
1306 for (i = 0; bd->status & TxBD_READY; i++) {
1307 if (i > 0x100000) {
1308 printf("%s: tx error\n", dev->name);
1309 return result;
1310 }
1311 }
1312
1313
1314 BD_ADVANCE(bd, status, uec->p_tx_bd_ring);
1315 uec->txBd = bd;
1316 result = 1;
1317
1318 return result;
1319}
1320
1321static int uec_recv(struct eth_device* dev)
1322{
1323 uec_private_t *uec = dev->priv;
1324 volatile qe_bd_t *bd;
1325 u16 status;
1326 u16 len;
1327 u8 *data;
1328
1329 bd = uec->rxBd;
1330 status = bd->status;
1331
1332 while (!(status & RxBD_EMPTY)) {
1333 if (!(status & RxBD_ERROR)) {
1334 data = BD_DATA(bd);
1335 len = BD_LENGTH(bd);
1336 net_process_received_packet(data, len);
1337 } else {
1338 printf("%s: Rx error\n", dev->name);
1339 }
1340 status &= BD_CLEAN;
1341 BD_LENGTH_SET(bd, 0);
1342 BD_STATUS_SET(bd, status | RxBD_EMPTY);
1343 BD_ADVANCE(bd, status, uec->p_rx_bd_ring);
1344 status = bd->status;
1345 }
1346 uec->rxBd = bd;
1347
1348 return 1;
1349}
1350
1351int uec_initialize(bd_t *bis, uec_info_t *uec_info)
1352{
1353 struct eth_device *dev;
1354 int i;
1355 uec_private_t *uec;
1356 int err;
1357
1358 dev = (struct eth_device *)malloc(sizeof(struct eth_device));
1359 if (!dev)
1360 return 0;
1361 memset(dev, 0, sizeof(struct eth_device));
1362
1363
1364 uec = (uec_private_t *)malloc(sizeof(uec_private_t));
1365 if (!uec) {
1366 return -ENOMEM;
1367 }
1368 memset(uec, 0, sizeof(uec_private_t));
1369
1370
1371#if (MAX_QE_RISC == 4)
1372 uec_info->risc_tx = QE_RISC_ALLOCATION_FOUR_RISCS;
1373 uec_info->risc_rx = QE_RISC_ALLOCATION_FOUR_RISCS;
1374#endif
1375
1376 devlist[uec_info->uf_info.ucc_num] = dev;
1377
1378 uec->uec_info = uec_info;
1379 uec->dev = dev;
1380
1381 sprintf(dev->name, "UEC%d", uec_info->uf_info.ucc_num);
1382 dev->iobase = 0;
1383 dev->priv = (void *)uec;
1384 dev->init = uec_init;
1385 dev->halt = uec_halt;
1386 dev->send = uec_send;
1387 dev->recv = uec_recv;
1388
1389
1390 for (i = 0; i < 6; i++)
1391 dev->enetaddr[i] = 0;
1392
1393 eth_register(dev);
1394
1395 err = uec_startup(uec);
1396 if (err) {
1397 printf("%s: Cannot configure net device, aborting.",dev->name);
1398 return err;
1399 }
1400
1401#if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
1402 int retval;
1403 struct mii_dev *mdiodev = mdio_alloc();
1404 if (!mdiodev)
1405 return -ENOMEM;
1406 strncpy(mdiodev->name, dev->name, MDIO_NAME_LEN);
1407 mdiodev->read = uec_miiphy_read;
1408 mdiodev->write = uec_miiphy_write;
1409
1410 retval = mdio_register(mdiodev);
1411 if (retval < 0)
1412 return retval;
1413#endif
1414
1415 return 1;
1416}
1417
1418int uec_eth_init(bd_t *bis, uec_info_t *uecs, int num)
1419{
1420 int i;
1421
1422 for (i = 0; i < num; i++)
1423 uec_initialize(bis, &uecs[i]);
1424
1425 return 0;
1426}
1427
1428int uec_standard_init(bd_t *bis)
1429{
1430 return uec_eth_init(bis, uec_info, ARRAY_SIZE(uec_info));
1431}
1432