1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <common.h>
28#include <malloc.h>
29#include <asm/cpm_85xx.h>
30#include <command.h>
31#include <config.h>
32#include <net.h>
33
34#if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
35#include <miiphy.h>
36#endif
37
38#if defined(CONFIG_ETHER_ON_FCC) && defined(CONFIG_CMD_NET)
39
40static struct ether_fcc_info_s
41{
42 int ether_index;
43 int proff_enet;
44 ulong cpm_cr_enet_sblock;
45 ulong cpm_cr_enet_page;
46 ulong cmxfcr_mask;
47 ulong cmxfcr_value;
48}
49 ether_fcc_info[] =
50{
51#ifdef CONFIG_ETHER_ON_FCC1
52{
53 0,
54 PROFF_FCC1,
55 CPM_CR_FCC1_SBLOCK,
56 CPM_CR_FCC1_PAGE,
57 CONFIG_SYS_CMXFCR_MASK1,
58 CONFIG_SYS_CMXFCR_VALUE1
59},
60#endif
61
62#ifdef CONFIG_ETHER_ON_FCC2
63{
64 1,
65 PROFF_FCC2,
66 CPM_CR_FCC2_SBLOCK,
67 CPM_CR_FCC2_PAGE,
68 CONFIG_SYS_CMXFCR_MASK2,
69 CONFIG_SYS_CMXFCR_VALUE2
70},
71#endif
72
73#ifdef CONFIG_ETHER_ON_FCC3
74{
75 2,
76 PROFF_FCC3,
77 CPM_CR_FCC3_SBLOCK,
78 CPM_CR_FCC3_PAGE,
79 CONFIG_SYS_CMXFCR_MASK3,
80 CONFIG_SYS_CMXFCR_VALUE3
81},
82#endif
83};
84
85
86
87
88#define PKT_MAXDMA_SIZE 1520
89
90
91#define PKT_MAXBUF_SIZE 1518
92#define PKT_MINBUF_SIZE 64
93
94
95#define PKT_MAXBLR_SIZE 1536
96
97#define TOUT_LOOP 1000000
98
99#define TX_BUF_CNT 2
100
101static uint rxIdx;
102static uint txIdx;
103
104
105
106
107
108
109
110typedef volatile struct rtxbd {
111 cbd_t rxbd[PKTBUFSRX];
112 cbd_t txbd[TX_BUF_CNT];
113} RTXBD;
114
115
116#ifdef __GNUC__
117static RTXBD rtx __attribute__ ((aligned(8)));
118#else
119#error "rtx must be 64-bit aligned"
120#endif
121
122#undef ET_DEBUG
123
124static int fec_send(struct eth_device *dev, void *packet, int length)
125{
126 int i = 0;
127 int result = 0;
128
129 if (length <= 0) {
130 printf("fec: bad packet size: %d\n", length);
131 goto out;
132 }
133
134 for(i=0; rtx.txbd[txIdx].cbd_sc & BD_ENET_TX_READY; i++) {
135 if (i >= TOUT_LOOP) {
136 printf("fec: tx buffer not ready\n");
137 goto out;
138 }
139 }
140
141 rtx.txbd[txIdx].cbd_bufaddr = (uint)packet;
142 rtx.txbd[txIdx].cbd_datlen = length;
143 rtx.txbd[txIdx].cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_LAST | \
144 BD_ENET_TX_TC | BD_ENET_TX_PAD);
145
146 for(i=0; rtx.txbd[txIdx].cbd_sc & BD_ENET_TX_READY; i++) {
147 if (i >= TOUT_LOOP) {
148 printf("fec: tx error\n");
149 goto out;
150 }
151 }
152
153#ifdef ET_DEBUG
154 printf("cycles: 0x%x txIdx=0x%04x status: 0x%04x\n", i, txIdx,rtx.txbd[txIdx].cbd_sc);
155 printf("packets at 0x%08x, length_in_bytes=0x%x\n",(uint)packet,length);
156 for(i=0;i<(length/16 + 1);i++) {
157 printf("%08x %08x %08x %08x\n",*((uint *)rtx.txbd[txIdx].cbd_bufaddr+i*4),\
158 *((uint *)rtx.txbd[txIdx].cbd_bufaddr + i*4 + 1),*((uint *)rtx.txbd[txIdx].cbd_bufaddr + i*4 + 2), \
159 *((uint *)rtx.txbd[txIdx].cbd_bufaddr + i*4 + 3));
160 }
161#endif
162
163
164 result = rtx.txbd[txIdx].cbd_sc & BD_ENET_TX_STATS;
165 txIdx = (txIdx + 1) % TX_BUF_CNT;
166
167out:
168 return result;
169}
170
171static int fec_recv(struct eth_device* dev)
172{
173 int length;
174
175 for (;;)
176 {
177 if (rtx.rxbd[rxIdx].cbd_sc & BD_ENET_RX_EMPTY) {
178 length = -1;
179 break;
180 }
181 length = rtx.rxbd[rxIdx].cbd_datlen;
182
183 if (rtx.rxbd[rxIdx].cbd_sc & 0x003f) {
184 printf("fec: rx error %04x\n", rtx.rxbd[rxIdx].cbd_sc);
185 }
186 else {
187
188 net_process_received_packet(net_rx_packets[rxIdx], length - 4);
189 }
190
191
192
193 rtx.rxbd[rxIdx].cbd_datlen = 0;
194
195
196 if ((rxIdx + 1) >= PKTBUFSRX) {
197 rtx.rxbd[PKTBUFSRX - 1].cbd_sc = (BD_ENET_RX_WRAP | BD_ENET_RX_EMPTY);
198 rxIdx = 0;
199 }
200 else {
201 rtx.rxbd[rxIdx].cbd_sc = BD_ENET_RX_EMPTY;
202 rxIdx++;
203 }
204 }
205 return length;
206}
207
208
209static int fec_init(struct eth_device* dev, struct bd_info *bis)
210{
211 struct ether_fcc_info_s * info = dev->priv;
212 int i;
213 volatile ccsr_cpm_t *cpm = (ccsr_cpm_t *)CONFIG_SYS_MPC85xx_CPM_ADDR;
214 volatile ccsr_cpm_cp_t *cp = &(cpm->im_cpm_cp);
215 fcc_enet_t *pram_ptr;
216 unsigned long mem_addr;
217
218#if 0
219 mii_discover_phy();
220#endif
221
222
223
224
225 cpm->im_cpm_mux.cmxuar = 0;
226 cpm->im_cpm_mux.cmxfcr = (cpm->im_cpm_mux.cmxfcr & ~info->cmxfcr_mask) |
227 info->cmxfcr_value;
228
229
230 if(info->ether_index == 0) {
231 cpm->im_cpm_fcc1.gfmr = FCC_GFMR_MODE_ENET | FCC_GFMR_TCRC_32;
232 } else if (info->ether_index == 1) {
233 cpm->im_cpm_fcc2.gfmr = FCC_GFMR_MODE_ENET | FCC_GFMR_TCRC_32;
234 } else if (info->ether_index == 2) {
235 cpm->im_cpm_fcc3.gfmr = FCC_GFMR_MODE_ENET | FCC_GFMR_TCRC_32;
236 }
237
238
239 if(info->ether_index == 0) {
240 cpm->im_cpm_fcc1.fpsmr = CONFIG_SYS_FCC_PSMR | FCC_PSMR_ENCRC;
241 } else if (info->ether_index == 1){
242 cpm->im_cpm_fcc2.fpsmr = CONFIG_SYS_FCC_PSMR | FCC_PSMR_ENCRC;
243 } else if (info->ether_index == 2){
244 cpm->im_cpm_fcc3.fpsmr = CONFIG_SYS_FCC_PSMR | FCC_PSMR_ENCRC;
245 }
246
247
248 if(info->ether_index == 0) {
249 cpm->im_cpm_fcc1.fdsr = 0xD555;
250 } else if (info->ether_index == 1) {
251 cpm->im_cpm_fcc2.fdsr = 0xD555;
252 } else if (info->ether_index == 2) {
253 cpm->im_cpm_fcc3.fdsr = 0xD555;
254 }
255
256
257 rxIdx = 0;
258 txIdx = 0;
259
260
261 for (i = 0; i < PKTBUFSRX; i++)
262 {
263 rtx.rxbd[i].cbd_sc = BD_ENET_RX_EMPTY;
264 rtx.rxbd[i].cbd_datlen = 0;
265 rtx.rxbd[i].cbd_bufaddr = (uint)net_rx_packets[i];
266 }
267 rtx.rxbd[PKTBUFSRX - 1].cbd_sc |= BD_ENET_RX_WRAP;
268
269
270 for (i = 0; i < TX_BUF_CNT; i++)
271 {
272 rtx.txbd[i].cbd_sc = 0;
273 rtx.txbd[i].cbd_datlen = 0;
274 rtx.txbd[i].cbd_bufaddr = 0;
275 }
276 rtx.txbd[TX_BUF_CNT - 1].cbd_sc |= BD_ENET_TX_WRAP;
277
278
279 pram_ptr = (fcc_enet_t *)&(cpm->im_dprambase[info->proff_enet]);
280
281
282 memset((void*)pram_ptr, 0, sizeof(fcc_enet_t));
283
284
285
286
287
288
289
290
291
292
293
294 mem_addr = CPM_FCC_SPECIAL_BASE + ((info->ether_index) * 64);
295 pram_ptr->fen_genfcc.fcc_riptr = mem_addr;
296 pram_ptr->fen_genfcc.fcc_tiptr = mem_addr+32;
297
298
299
300
301 pram_ptr->fen_genfcc.fcc_mrblr = PKT_MAXBLR_SIZE;
302
303 pram_ptr->fen_genfcc.fcc_rstate = (CPMFCR_GBL | CPMFCR_EB |
304 CONFIG_SYS_CPMFCR_RAMTYPE) << 24;
305 pram_ptr->fen_genfcc.fcc_rbase = (unsigned int)(&rtx.rxbd[rxIdx]);
306 pram_ptr->fen_genfcc.fcc_rbdstat = 0;
307 pram_ptr->fen_genfcc.fcc_rbdlen = 0;
308 pram_ptr->fen_genfcc.fcc_rdptr = 0;
309
310 pram_ptr->fen_genfcc.fcc_tstate = (CPMFCR_GBL | CPMFCR_EB |
311 CONFIG_SYS_CPMFCR_RAMTYPE) << 24;
312 pram_ptr->fen_genfcc.fcc_tbase = (unsigned int)(&rtx.txbd[txIdx]);
313 pram_ptr->fen_genfcc.fcc_tbdstat = 0;
314 pram_ptr->fen_genfcc.fcc_tbdlen = 0;
315 pram_ptr->fen_genfcc.fcc_tdptr = 0;
316
317
318 pram_ptr->fen_statbuf = 0x0;
319 pram_ptr->fen_cmask = 0xdebb20e3;
320 pram_ptr->fen_cpres = 0xffffffff;
321 pram_ptr->fen_crcec = 0;
322 pram_ptr->fen_alec = 0;
323 pram_ptr->fen_disfc = 0;
324 pram_ptr->fen_retlim = 15;
325 pram_ptr->fen_retcnt = 0;
326 pram_ptr->fen_pper = 0;
327 pram_ptr->fen_boffcnt = 0;
328 pram_ptr->fen_gaddrh = 0;
329 pram_ptr->fen_gaddrl = 0;
330 pram_ptr->fen_mflr = PKT_MAXBUF_SIZE;
331
332
333
334
335
336
337
338
339
340#define ea eth_get_ethaddr()
341 pram_ptr->fen_paddrh = (ea[5] << 8) + ea[4];
342 pram_ptr->fen_paddrm = (ea[3] << 8) + ea[2];
343 pram_ptr->fen_paddrl = (ea[1] << 8) + ea[0];
344#undef ea
345 pram_ptr->fen_ibdcount = 0;
346 pram_ptr->fen_ibdstart = 0;
347 pram_ptr->fen_ibdend = 0;
348 pram_ptr->fen_txlen = 0;
349 pram_ptr->fen_iaddrh = 0;
350 pram_ptr->fen_iaddrl = 0;
351 pram_ptr->fen_minflr = PKT_MINBUF_SIZE;
352
353 pram_ptr->fen_padptr = pram_ptr->fen_genfcc.fcc_tiptr;
354 pram_ptr->fen_maxd1 = PKT_MAXDMA_SIZE;
355 pram_ptr->fen_maxd2 = PKT_MAXDMA_SIZE;
356
357#if defined(ET_DEBUG)
358 printf("parm_ptr(0xff788500) = %p\n",pram_ptr);
359 printf("pram_ptr->fen_genfcc.fcc_rbase %08x\n",
360 pram_ptr->fen_genfcc.fcc_rbase);
361 printf("pram_ptr->fen_genfcc.fcc_tbase %08x\n",
362 pram_ptr->fen_genfcc.fcc_tbase);
363#endif
364
365
366
367 if(info->ether_index == 0) {
368 cpm->im_cpm_fcc1.fcce = ~0x0;
369 cpm->im_cpm_fcc1.fccm = 0;
370 } else if (info->ether_index == 1) {
371 cpm->im_cpm_fcc2.fcce = ~0x0;
372 cpm->im_cpm_fcc2.fccm = 0;
373 } else if (info->ether_index == 2) {
374 cpm->im_cpm_fcc3.fcce = ~0x0;
375 cpm->im_cpm_fcc3.fccm = 0;
376 }
377
378
379
380
381
382
383
384
385
386 cp->cpcr = mk_cr_cmd(info->cpm_cr_enet_page,
387 info->cpm_cr_enet_sblock,
388 0x0c,
389 CPM_CR_INIT_TRX) | CPM_CR_FLG;
390 do {
391 __asm__ __volatile__ ("eieio");
392 } while (cp->cpcr & CPM_CR_FLG);
393
394
395 if(info->ether_index == 0) {
396 cpm->im_cpm_fcc1.gfmr |= FCC_GFMR_ENT | FCC_GFMR_ENR;
397 } else if (info->ether_index == 1) {
398 cpm->im_cpm_fcc2.gfmr |= FCC_GFMR_ENT | FCC_GFMR_ENR;
399 } else if (info->ether_index == 2) {
400 cpm->im_cpm_fcc3.gfmr |= FCC_GFMR_ENT | FCC_GFMR_ENR;
401 }
402
403 return 1;
404}
405
406static void fec_halt(struct eth_device* dev)
407{
408 struct ether_fcc_info_s * info = dev->priv;
409 volatile ccsr_cpm_t *cpm = (ccsr_cpm_t *)CONFIG_SYS_MPC85xx_CPM_ADDR;
410
411
412 if(info->ether_index == 0) {
413 cpm->im_cpm_fcc1.gfmr &= ~(FCC_GFMR_ENT | FCC_GFMR_ENR);
414 } else if(info->ether_index == 1) {
415 cpm->im_cpm_fcc2.gfmr &= ~(FCC_GFMR_ENT | FCC_GFMR_ENR);
416 } else if(info->ether_index == 2) {
417 cpm->im_cpm_fcc3.gfmr &= ~(FCC_GFMR_ENT | FCC_GFMR_ENR);
418 }
419}
420
421int fec_initialize(struct bd_info *bis)
422{
423 struct eth_device* dev;
424 int i;
425
426 for (i = 0; i < ARRAY_SIZE(ether_fcc_info); i++)
427 {
428 dev = (struct eth_device*) malloc(sizeof *dev);
429 memset(dev, 0, sizeof *dev);
430
431 sprintf(dev->name, "FCC%d",
432 ether_fcc_info[i].ether_index + 1);
433 dev->priv = ðer_fcc_info[i];
434 dev->init = fec_init;
435 dev->halt = fec_halt;
436 dev->send = fec_send;
437 dev->recv = fec_recv;
438
439 eth_register(dev);
440
441#if (defined(CONFIG_MII) || defined(CONFIG_CMD_MII)) \
442 && defined(CONFIG_BITBANGMII)
443 int retval;
444 struct mii_dev *mdiodev = mdio_alloc();
445 if (!mdiodev)
446 return -ENOMEM;
447 strncpy(mdiodev->name, dev->name, MDIO_NAME_LEN);
448 mdiodev->read = bb_miiphy_read;
449 mdiodev->write = bb_miiphy_write;
450
451 retval = mdio_register(mdiodev);
452 if (retval < 0)
453 return retval;
454#endif
455 }
456
457 return 1;
458}
459
460#endif
461