1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38#include <common.h>
39#include <asm/cpm_8260.h>
40#include <mpc8260.h>
41#include <malloc.h>
42#include <net.h>
43#include <command.h>
44#include <config.h>
45
46#ifndef CONFIG_NET_MULTI
47#error "CONFIG_NET_MULTI must be defined."
48#endif
49
50#if (CONFIG_ETHER_INDEX == 1)
51# define PROFF_ENET PROFF_SCC1
52# define CPM_CR_ENET_PAGE CPM_CR_SCC1_PAGE
53# define CPM_CR_ENET_SBLOCK CPM_CR_SCC1_SBLOCK
54# define CMXSCR_MASK (CMXSCR_SC1 |\
55 CMXSCR_RS1CS_MSK |\
56 CMXSCR_TS1CS_MSK)
57
58#elif (CONFIG_ETHER_INDEX == 2)
59# define PROFF_ENET PROFF_SCC2
60# define CPM_CR_ENET_PAGE CPM_CR_SCC2_PAGE
61# define CPM_CR_ENET_SBLOCK CPM_CR_SCC2_SBLOCK
62# define CMXSCR_MASK (CMXSCR_SC2 |\
63 CMXSCR_RS2CS_MSK |\
64 CMXSCR_TS2CS_MSK)
65
66#elif (CONFIG_ETHER_INDEX == 3)
67# define PROFF_ENET PROFF_SCC3
68# define CPM_CR_ENET_PAGE CPM_CR_SCC3_PAGE
69# define CPM_CR_ENET_SBLOCK CPM_CR_SCC3_SBLOCK
70# define CMXSCR_MASK (CMXSCR_SC3 |\
71 CMXSCR_RS3CS_MSK |\
72 CMXSCR_TS3CS_MSK)
73#elif (CONFIG_ETHER_INDEX == 4)
74# define PROFF_ENET PROFF_SCC4
75# define CPM_CR_ENET_PAGE CPM_CR_SCC4_PAGE
76# define CPM_CR_ENET_SBLOCK CPM_CR_SCC4_SBLOCK
77# define CMXSCR_MASK (CMXSCR_SC4 |\
78 CMXSCR_RS4CS_MSK |\
79 CMXSCR_TS4CS_MSK)
80
81#endif
82
83
84
85#define DBUF_LENGTH 1520
86
87#define TX_BUF_CNT 2
88
89#if !defined(CONFIG_SYS_SCC_TOUT_LOOP)
90 #define CONFIG_SYS_SCC_TOUT_LOOP 1000000
91#endif
92
93static char txbuf[TX_BUF_CNT][ DBUF_LENGTH ];
94
95static uint rxIdx;
96static uint txIdx;
97
98
99
100
101
102
103
104typedef volatile struct CommonBufferDescriptor {
105 cbd_t rxbd[PKTBUFSRX];
106 cbd_t txbd[TX_BUF_CNT];
107} RTXBD;
108
109static RTXBD *rtx;
110
111
112static int sec_send(struct eth_device *dev, volatile void *packet, int length)
113{
114 int i;
115 int result = 0;
116
117 if (length <= 0) {
118 printf("scc: bad packet size: %d\n", length);
119 goto out;
120 }
121
122 for(i=0; rtx->txbd[txIdx].cbd_sc & BD_ENET_TX_READY; i++) {
123 if (i >= CONFIG_SYS_SCC_TOUT_LOOP) {
124 puts ("scc: tx buffer not ready\n");
125 goto out;
126 }
127 }
128
129 rtx->txbd[txIdx].cbd_bufaddr = (uint)packet;
130 rtx->txbd[txIdx].cbd_datlen = length;
131 rtx->txbd[txIdx].cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_LAST |
132 BD_ENET_TX_WRAP);
133
134 for(i=0; rtx->txbd[txIdx].cbd_sc & BD_ENET_TX_READY; i++) {
135 if (i >= CONFIG_SYS_SCC_TOUT_LOOP) {
136 puts ("scc: tx error\n");
137 goto out;
138 }
139 }
140
141
142 result = rtx->txbd[txIdx].cbd_sc & BD_ENET_TX_STATS;
143
144 out:
145 return result;
146}
147
148
149static int sec_rx(struct eth_device *dev)
150{
151 int length;
152
153 for (;;)
154 {
155 if (rtx->rxbd[rxIdx].cbd_sc & BD_ENET_RX_EMPTY) {
156 length = -1;
157 break;
158 }
159
160 length = rtx->rxbd[rxIdx].cbd_datlen;
161
162 if (rtx->rxbd[rxIdx].cbd_sc & 0x003f)
163 {
164 printf("err: %x\n", rtx->rxbd[rxIdx].cbd_sc);
165 }
166 else
167 {
168
169 NetReceive(NetRxPackets[rxIdx], length - 4);
170 }
171
172
173
174 rtx->rxbd[rxIdx].cbd_datlen = 0;
175
176
177 if ((rxIdx + 1) >= PKTBUFSRX) {
178 rtx->rxbd[PKTBUFSRX - 1].cbd_sc = (BD_ENET_RX_WRAP |
179 BD_ENET_RX_EMPTY);
180 rxIdx = 0;
181 }
182 else {
183 rtx->rxbd[rxIdx].cbd_sc = BD_ENET_RX_EMPTY;
184 rxIdx++;
185 }
186 }
187 return length;
188}
189
190
191
192
193
194
195
196static int sec_init(struct eth_device *dev, bd_t *bis)
197{
198 int i;
199 volatile immap_t *immr = (immap_t *)CONFIG_SYS_IMMR;
200 scc_enet_t *pram_ptr;
201 uint dpaddr;
202 uchar ea[6];
203
204 rxIdx = 0;
205 txIdx = 0;
206
207
208
209
210
211 if (rtx == NULL) {
212 dpaddr = m8260_cpm_dpalloc(sizeof(RTXBD) + 2, 16);
213 rtx = (RTXBD *)&immr->im_dprambase[dpaddr];
214 }
215
216
217
218
219 immr->im_cpmux.cmx_uar = 0;
220 immr->im_cpmux.cmx_scr = ( (immr->im_cpmux.cmx_scr & ~CMXSCR_MASK) |
221 CONFIG_SYS_CMXSCR_VALUE);
222
223
224
225 pram_ptr = (scc_enet_t *)&(immr->im_dprambase[PROFF_ENET]);
226 pram_ptr->sen_genscc.scc_rbase = (unsigned int)(&rtx->rxbd[0]);
227 pram_ptr->sen_genscc.scc_tbase = (unsigned int)(&rtx->txbd[0]);
228
229 pram_ptr->sen_genscc.scc_rfcr = 0x18;
230 pram_ptr->sen_genscc.scc_tfcr = 0x18;
231
232 pram_ptr->sen_genscc.scc_mrblr = DBUF_LENGTH;
233
234 pram_ptr->sen_cpres = ~(0x0);
235 pram_ptr->sen_cmask = 0xdebb20e3;
236
237
238
239 while(immr->im_cpm.cp_cpcr & CPM_CR_FLG);
240 immr->im_cpm.cp_cpcr = mk_cr_cmd(CPM_CR_ENET_PAGE,
241 CPM_CR_ENET_SBLOCK,
242 0x0c,
243 CPM_CR_INIT_TRX) | CPM_CR_FLG;
244
245
246 pram_ptr->sen_crcec = 0x0;
247 pram_ptr->sen_alec = 0x0;
248 pram_ptr->sen_disfc = 0x0;
249
250 pram_ptr->sen_pads = 0x8888;
251
252 pram_ptr->sen_retlim = 15;
253
254 pram_ptr->sen_maxflr = 1518;
255 pram_ptr->sen_minflr = 64;
256
257 pram_ptr->sen_maxd1 = DBUF_LENGTH;
258 pram_ptr->sen_maxd2 = DBUF_LENGTH;
259
260 pram_ptr->sen_gaddr1 = 0x0;
261 pram_ptr->sen_gaddr2 = 0x0;
262 pram_ptr->sen_gaddr3 = 0x0;
263 pram_ptr->sen_gaddr4 = 0x0;
264
265 eth_getenv_enetaddr("ethaddr", ea);
266 pram_ptr->sen_paddrh = (ea[5] << 8) + ea[4];
267 pram_ptr->sen_paddrm = (ea[3] << 8) + ea[2];
268 pram_ptr->sen_paddrl = (ea[1] << 8) + ea[0];
269
270 pram_ptr->sen_pper = 0x0;
271
272 pram_ptr->sen_iaddr1 = 0x0;
273 pram_ptr->sen_iaddr2 = 0x0;
274 pram_ptr->sen_iaddr3 = 0x0;
275 pram_ptr->sen_iaddr4 = 0x0;
276
277 pram_ptr->sen_taddrh = 0x0;
278 pram_ptr->sen_taddrm = 0x0;
279 pram_ptr->sen_taddrl = 0x0;
280
281
282 for (i = 0; i < PKTBUFSRX; i++)
283 {
284 rtx->rxbd[i].cbd_sc = BD_ENET_RX_EMPTY;
285 rtx->rxbd[i].cbd_datlen = 0;
286 rtx->rxbd[i].cbd_bufaddr = (uint)NetRxPackets[i];
287 }
288
289 rtx->rxbd[PKTBUFSRX - 1].cbd_sc |= BD_ENET_RX_WRAP;
290
291
292 for (i = 0; i < TX_BUF_CNT; i++)
293 {
294 rtx->txbd[i].cbd_sc = (BD_ENET_TX_PAD |
295 BD_ENET_TX_LAST |
296 BD_ENET_TX_TC);
297 rtx->txbd[i].cbd_datlen = 0;
298 rtx->txbd[i].cbd_bufaddr = (uint)&txbuf[i][0];
299 }
300
301 rtx->txbd[TX_BUF_CNT - 1].cbd_sc |= BD_ENET_TX_WRAP;
302
303
304 immr->im_scc[CONFIG_ETHER_INDEX-1].scc_scce = ~(0x0);
305
306
307 immr->im_scc[CONFIG_ETHER_INDEX-1].scc_sccm = (SCCE_ENET_TXE |
308 SCCE_ENET_RXF |
309 SCCE_ENET_TXB);
310
311
312
313
314 immr->im_scc[CONFIG_ETHER_INDEX-1].scc_gsmrh = 0;
315
316
317 immr->im_scc[CONFIG_ETHER_INDEX-1].scc_gsmrl = (SCC_GSMRL_TCI |
318 SCC_GSMRL_TPL_48 |
319 SCC_GSMRL_TPP_10 |
320 SCC_GSMRL_MODE_ENET);
321
322
323 immr->im_scc[CONFIG_ETHER_INDEX-1].scc_dsr = 0xd555;
324
325
326
327
328
329
330
331
332
333
334 immr->im_scc[CONFIG_ETHER_INDEX-1].scc_psmr = SCC_PSMR_ENCRC |
335 SCC_PSMR_NIB22 |
336#if defined(CONFIG_SCC_ENET_FULL_DUPLEX)
337 SCC_PSMR_FDE |
338#endif
339#if defined(CONFIG_SCC_ENET_NO_BROADCAST)
340 SCC_PSMR_BRO |
341#endif
342#if defined(CONFIG_SCC_ENET_PROMISCOUS)
343 SCC_PSMR_PRO |
344#endif
345 0;
346
347
348 immr->im_scc[CONFIG_ETHER_INDEX-1].scc_gsmrl |= (SCC_GSMRL_ENR |
349 SCC_GSMRL_ENT);
350
351 return 0;
352}
353
354
355static void sec_halt(struct eth_device *dev)
356{
357 volatile immap_t *immr = (immap_t *)CONFIG_SYS_IMMR;
358 immr->im_scc[CONFIG_ETHER_INDEX-1].scc_gsmrl &= ~(SCC_GSMRL_ENR |
359 SCC_GSMRL_ENT);
360}
361
362#if 0
363static void sec_restart(void)
364{
365 volatile immap_t *immr = (immap_t *)CONFIG_SYS_IMMR;
366 immr->im_cpm.cp_scc[CONFIG_ETHER_INDEX-1].scc_gsmrl |= (SCC_GSMRL_ENR |
367 SCC_GSMRL_ENT);
368}
369#endif
370
371int mpc82xx_scc_enet_initialize(bd_t *bis)
372{
373 struct eth_device *dev;
374
375 dev = (struct eth_device *) malloc(sizeof *dev);
376 memset(dev, 0, sizeof *dev);
377
378 sprintf(dev->name, "SCC");
379 dev->init = sec_init;
380 dev->halt = sec_halt;
381 dev->send = sec_send;
382 dev->recv = sec_rx;
383
384 eth_register(dev);
385
386 return 1;
387}
388