1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38#include <common.h>
39#include <asm/cpm_8260.h>
40#include <mpc8260.h>
41#include <malloc.h>
42#include <net.h>
43#include <command.h>
44#include <config.h>
45
46#if (CONFIG_ETHER_INDEX == 1)
47# define PROFF_ENET PROFF_SCC1
48# define CPM_CR_ENET_PAGE CPM_CR_SCC1_PAGE
49# define CPM_CR_ENET_SBLOCK CPM_CR_SCC1_SBLOCK
50# define CMXSCR_MASK (CMXSCR_SC1 |\
51 CMXSCR_RS1CS_MSK |\
52 CMXSCR_TS1CS_MSK)
53
54#elif (CONFIG_ETHER_INDEX == 2)
55# define PROFF_ENET PROFF_SCC2
56# define CPM_CR_ENET_PAGE CPM_CR_SCC2_PAGE
57# define CPM_CR_ENET_SBLOCK CPM_CR_SCC2_SBLOCK
58# define CMXSCR_MASK (CMXSCR_SC2 |\
59 CMXSCR_RS2CS_MSK |\
60 CMXSCR_TS2CS_MSK)
61
62#elif (CONFIG_ETHER_INDEX == 3)
63# define PROFF_ENET PROFF_SCC3
64# define CPM_CR_ENET_PAGE CPM_CR_SCC3_PAGE
65# define CPM_CR_ENET_SBLOCK CPM_CR_SCC3_SBLOCK
66# define CMXSCR_MASK (CMXSCR_SC3 |\
67 CMXSCR_RS3CS_MSK |\
68 CMXSCR_TS3CS_MSK)
69#elif (CONFIG_ETHER_INDEX == 4)
70# define PROFF_ENET PROFF_SCC4
71# define CPM_CR_ENET_PAGE CPM_CR_SCC4_PAGE
72# define CPM_CR_ENET_SBLOCK CPM_CR_SCC4_SBLOCK
73# define CMXSCR_MASK (CMXSCR_SC4 |\
74 CMXSCR_RS4CS_MSK |\
75 CMXSCR_TS4CS_MSK)
76
77#endif
78
79
80
81#define DBUF_LENGTH 1520
82
83#define TX_BUF_CNT 2
84
85#if !defined(CONFIG_SYS_SCC_TOUT_LOOP)
86 #define CONFIG_SYS_SCC_TOUT_LOOP 1000000
87#endif
88
89static char txbuf[TX_BUF_CNT][ DBUF_LENGTH ];
90
91static uint rxIdx;
92static uint txIdx;
93
94
95
96
97
98
99
100typedef volatile struct CommonBufferDescriptor {
101 cbd_t rxbd[PKTBUFSRX];
102 cbd_t txbd[TX_BUF_CNT];
103} RTXBD;
104
105static RTXBD *rtx;
106
107
108static int sec_send(struct eth_device *dev, void *packet, int length)
109{
110 int i;
111 int result = 0;
112
113 if (length <= 0) {
114 printf("scc: bad packet size: %d\n", length);
115 goto out;
116 }
117
118 for(i=0; rtx->txbd[txIdx].cbd_sc & BD_ENET_TX_READY; i++) {
119 if (i >= CONFIG_SYS_SCC_TOUT_LOOP) {
120 puts ("scc: tx buffer not ready\n");
121 goto out;
122 }
123 }
124
125 rtx->txbd[txIdx].cbd_bufaddr = (uint)packet;
126 rtx->txbd[txIdx].cbd_datlen = length;
127 rtx->txbd[txIdx].cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_LAST |
128 BD_ENET_TX_WRAP);
129
130 for(i=0; rtx->txbd[txIdx].cbd_sc & BD_ENET_TX_READY; i++) {
131 if (i >= CONFIG_SYS_SCC_TOUT_LOOP) {
132 puts ("scc: tx error\n");
133 goto out;
134 }
135 }
136
137
138 result = rtx->txbd[txIdx].cbd_sc & BD_ENET_TX_STATS;
139
140 out:
141 return result;
142}
143
144
145static int sec_rx(struct eth_device *dev)
146{
147 int length;
148
149 for (;;)
150 {
151 if (rtx->rxbd[rxIdx].cbd_sc & BD_ENET_RX_EMPTY) {
152 length = -1;
153 break;
154 }
155
156 length = rtx->rxbd[rxIdx].cbd_datlen;
157
158 if (rtx->rxbd[rxIdx].cbd_sc & 0x003f)
159 {
160 printf("err: %x\n", rtx->rxbd[rxIdx].cbd_sc);
161 }
162 else
163 {
164
165 NetReceive(NetRxPackets[rxIdx], length - 4);
166 }
167
168
169
170 rtx->rxbd[rxIdx].cbd_datlen = 0;
171
172
173 if ((rxIdx + 1) >= PKTBUFSRX) {
174 rtx->rxbd[PKTBUFSRX - 1].cbd_sc = (BD_ENET_RX_WRAP |
175 BD_ENET_RX_EMPTY);
176 rxIdx = 0;
177 }
178 else {
179 rtx->rxbd[rxIdx].cbd_sc = BD_ENET_RX_EMPTY;
180 rxIdx++;
181 }
182 }
183 return length;
184}
185
186
187
188
189
190
191
192static int sec_init(struct eth_device *dev, bd_t *bis)
193{
194 int i;
195 volatile immap_t *immr = (immap_t *)CONFIG_SYS_IMMR;
196 scc_enet_t *pram_ptr;
197 uint dpaddr;
198 uchar ea[6];
199
200 rxIdx = 0;
201 txIdx = 0;
202
203
204
205
206
207 if (rtx == NULL) {
208 dpaddr = m8260_cpm_dpalloc(sizeof(RTXBD) + 2, 16);
209 rtx = (RTXBD *)&immr->im_dprambase[dpaddr];
210 }
211
212
213
214
215 immr->im_cpmux.cmx_uar = 0;
216 immr->im_cpmux.cmx_scr = ( (immr->im_cpmux.cmx_scr & ~CMXSCR_MASK) |
217 CONFIG_SYS_CMXSCR_VALUE);
218
219
220
221 pram_ptr = (scc_enet_t *)&(immr->im_dprambase[PROFF_ENET]);
222 pram_ptr->sen_genscc.scc_rbase = (unsigned int)(&rtx->rxbd[0]);
223 pram_ptr->sen_genscc.scc_tbase = (unsigned int)(&rtx->txbd[0]);
224
225 pram_ptr->sen_genscc.scc_rfcr = 0x18;
226 pram_ptr->sen_genscc.scc_tfcr = 0x18;
227
228 pram_ptr->sen_genscc.scc_mrblr = DBUF_LENGTH;
229
230 pram_ptr->sen_cpres = ~(0x0);
231 pram_ptr->sen_cmask = 0xdebb20e3;
232
233
234
235 while(immr->im_cpm.cp_cpcr & CPM_CR_FLG);
236 immr->im_cpm.cp_cpcr = mk_cr_cmd(CPM_CR_ENET_PAGE,
237 CPM_CR_ENET_SBLOCK,
238 0x0c,
239 CPM_CR_INIT_TRX) | CPM_CR_FLG;
240
241
242 pram_ptr->sen_crcec = 0x0;
243 pram_ptr->sen_alec = 0x0;
244 pram_ptr->sen_disfc = 0x0;
245
246 pram_ptr->sen_pads = 0x8888;
247
248 pram_ptr->sen_retlim = 15;
249
250 pram_ptr->sen_maxflr = 1518;
251 pram_ptr->sen_minflr = 64;
252
253 pram_ptr->sen_maxd1 = DBUF_LENGTH;
254 pram_ptr->sen_maxd2 = DBUF_LENGTH;
255
256 pram_ptr->sen_gaddr1 = 0x0;
257 pram_ptr->sen_gaddr2 = 0x0;
258 pram_ptr->sen_gaddr3 = 0x0;
259 pram_ptr->sen_gaddr4 = 0x0;
260
261 eth_getenv_enetaddr("ethaddr", ea);
262 pram_ptr->sen_paddrh = (ea[5] << 8) + ea[4];
263 pram_ptr->sen_paddrm = (ea[3] << 8) + ea[2];
264 pram_ptr->sen_paddrl = (ea[1] << 8) + ea[0];
265
266 pram_ptr->sen_pper = 0x0;
267
268 pram_ptr->sen_iaddr1 = 0x0;
269 pram_ptr->sen_iaddr2 = 0x0;
270 pram_ptr->sen_iaddr3 = 0x0;
271 pram_ptr->sen_iaddr4 = 0x0;
272
273 pram_ptr->sen_taddrh = 0x0;
274 pram_ptr->sen_taddrm = 0x0;
275 pram_ptr->sen_taddrl = 0x0;
276
277
278 for (i = 0; i < PKTBUFSRX; i++)
279 {
280 rtx->rxbd[i].cbd_sc = BD_ENET_RX_EMPTY;
281 rtx->rxbd[i].cbd_datlen = 0;
282 rtx->rxbd[i].cbd_bufaddr = (uint)NetRxPackets[i];
283 }
284
285 rtx->rxbd[PKTBUFSRX - 1].cbd_sc |= BD_ENET_RX_WRAP;
286
287
288 for (i = 0; i < TX_BUF_CNT; i++)
289 {
290 rtx->txbd[i].cbd_sc = (BD_ENET_TX_PAD |
291 BD_ENET_TX_LAST |
292 BD_ENET_TX_TC);
293 rtx->txbd[i].cbd_datlen = 0;
294 rtx->txbd[i].cbd_bufaddr = (uint)&txbuf[i][0];
295 }
296
297 rtx->txbd[TX_BUF_CNT - 1].cbd_sc |= BD_ENET_TX_WRAP;
298
299
300 immr->im_scc[CONFIG_ETHER_INDEX-1].scc_scce = ~(0x0);
301
302
303 immr->im_scc[CONFIG_ETHER_INDEX-1].scc_sccm = (SCCE_ENET_TXE |
304 SCCE_ENET_RXF |
305 SCCE_ENET_TXB);
306
307
308
309
310 immr->im_scc[CONFIG_ETHER_INDEX-1].scc_gsmrh = 0;
311
312
313 immr->im_scc[CONFIG_ETHER_INDEX-1].scc_gsmrl = (SCC_GSMRL_TCI |
314 SCC_GSMRL_TPL_48 |
315 SCC_GSMRL_TPP_10 |
316 SCC_GSMRL_MODE_ENET);
317
318
319 immr->im_scc[CONFIG_ETHER_INDEX-1].scc_dsr = 0xd555;
320
321
322
323
324
325
326
327
328
329
330 immr->im_scc[CONFIG_ETHER_INDEX-1].scc_psmr = SCC_PSMR_ENCRC |
331 SCC_PSMR_NIB22 |
332#if defined(CONFIG_SCC_ENET_FULL_DUPLEX)
333 SCC_PSMR_FDE |
334#endif
335#if defined(CONFIG_SCC_ENET_NO_BROADCAST)
336 SCC_PSMR_BRO |
337#endif
338#if defined(CONFIG_SCC_ENET_PROMISCOUS)
339 SCC_PSMR_PRO |
340#endif
341 0;
342
343
344 immr->im_scc[CONFIG_ETHER_INDEX-1].scc_gsmrl |= (SCC_GSMRL_ENR |
345 SCC_GSMRL_ENT);
346
347 return 0;
348}
349
350
351static void sec_halt(struct eth_device *dev)
352{
353 volatile immap_t *immr = (immap_t *)CONFIG_SYS_IMMR;
354 immr->im_scc[CONFIG_ETHER_INDEX-1].scc_gsmrl &= ~(SCC_GSMRL_ENR |
355 SCC_GSMRL_ENT);
356}
357
358#if 0
359static void sec_restart(void)
360{
361 volatile immap_t *immr = (immap_t *)CONFIG_SYS_IMMR;
362 immr->im_cpm.cp_scc[CONFIG_ETHER_INDEX-1].scc_gsmrl |= (SCC_GSMRL_ENR |
363 SCC_GSMRL_ENT);
364}
365#endif
366
367int mpc82xx_scc_enet_initialize(bd_t *bis)
368{
369 struct eth_device *dev;
370
371 dev = (struct eth_device *) malloc(sizeof *dev);
372 memset(dev, 0, sizeof *dev);
373
374 sprintf(dev->name, "SCC");
375 dev->init = sec_init;
376 dev->halt = sec_halt;
377 dev->send = sec_send;
378 dev->recv = sec_rx;
379
380 eth_register(dev);
381
382 return 1;
383}
384