1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <common.h>
23#include <asm/cpm_8260.h>
24#include <mpc8260.h>
25#include <malloc.h>
26#include <net.h>
27#include <command.h>
28#include <config.h>
29
30#if (CONFIG_ETHER_INDEX == 1)
31# define PROFF_ENET PROFF_SCC1
32# define CPM_CR_ENET_PAGE CPM_CR_SCC1_PAGE
33# define CPM_CR_ENET_SBLOCK CPM_CR_SCC1_SBLOCK
34# define CMXSCR_MASK (CMXSCR_SC1 |\
35 CMXSCR_RS1CS_MSK |\
36 CMXSCR_TS1CS_MSK)
37
38#elif (CONFIG_ETHER_INDEX == 2)
39# define PROFF_ENET PROFF_SCC2
40# define CPM_CR_ENET_PAGE CPM_CR_SCC2_PAGE
41# define CPM_CR_ENET_SBLOCK CPM_CR_SCC2_SBLOCK
42# define CMXSCR_MASK (CMXSCR_SC2 |\
43 CMXSCR_RS2CS_MSK |\
44 CMXSCR_TS2CS_MSK)
45
46#elif (CONFIG_ETHER_INDEX == 3)
47# define PROFF_ENET PROFF_SCC3
48# define CPM_CR_ENET_PAGE CPM_CR_SCC3_PAGE
49# define CPM_CR_ENET_SBLOCK CPM_CR_SCC3_SBLOCK
50# define CMXSCR_MASK (CMXSCR_SC3 |\
51 CMXSCR_RS3CS_MSK |\
52 CMXSCR_TS3CS_MSK)
53#elif (CONFIG_ETHER_INDEX == 4)
54# define PROFF_ENET PROFF_SCC4
55# define CPM_CR_ENET_PAGE CPM_CR_SCC4_PAGE
56# define CPM_CR_ENET_SBLOCK CPM_CR_SCC4_SBLOCK
57# define CMXSCR_MASK (CMXSCR_SC4 |\
58 CMXSCR_RS4CS_MSK |\
59 CMXSCR_TS4CS_MSK)
60
61#endif
62
63
64
65#define DBUF_LENGTH 1520
66
67#define TX_BUF_CNT 2
68
69#if !defined(CONFIG_SYS_SCC_TOUT_LOOP)
70 #define CONFIG_SYS_SCC_TOUT_LOOP 1000000
71#endif
72
73static char txbuf[TX_BUF_CNT][ DBUF_LENGTH ];
74
75static uint rxIdx;
76static uint txIdx;
77
78
79
80
81
82
83
84typedef volatile struct CommonBufferDescriptor {
85 cbd_t rxbd[PKTBUFSRX];
86 cbd_t txbd[TX_BUF_CNT];
87} RTXBD;
88
89static RTXBD *rtx;
90
91
92static int sec_send(struct eth_device *dev, void *packet, int length)
93{
94 int i;
95 int result = 0;
96
97 if (length <= 0) {
98 printf("scc: bad packet size: %d\n", length);
99 goto out;
100 }
101
102 for(i=0; rtx->txbd[txIdx].cbd_sc & BD_ENET_TX_READY; i++) {
103 if (i >= CONFIG_SYS_SCC_TOUT_LOOP) {
104 puts ("scc: tx buffer not ready\n");
105 goto out;
106 }
107 }
108
109 rtx->txbd[txIdx].cbd_bufaddr = (uint)packet;
110 rtx->txbd[txIdx].cbd_datlen = length;
111 rtx->txbd[txIdx].cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_LAST |
112 BD_ENET_TX_WRAP);
113
114 for(i=0; rtx->txbd[txIdx].cbd_sc & BD_ENET_TX_READY; i++) {
115 if (i >= CONFIG_SYS_SCC_TOUT_LOOP) {
116 puts ("scc: tx error\n");
117 goto out;
118 }
119 }
120
121
122 result = rtx->txbd[txIdx].cbd_sc & BD_ENET_TX_STATS;
123
124 out:
125 return result;
126}
127
128
129static int sec_rx(struct eth_device *dev)
130{
131 int length;
132
133 for (;;)
134 {
135 if (rtx->rxbd[rxIdx].cbd_sc & BD_ENET_RX_EMPTY) {
136 length = -1;
137 break;
138 }
139
140 length = rtx->rxbd[rxIdx].cbd_datlen;
141
142 if (rtx->rxbd[rxIdx].cbd_sc & 0x003f)
143 {
144 printf("err: %x\n", rtx->rxbd[rxIdx].cbd_sc);
145 }
146 else
147 {
148
149 net_process_received_packet(net_rx_packets[rxIdx], length - 4);
150 }
151
152
153
154 rtx->rxbd[rxIdx].cbd_datlen = 0;
155
156
157 if ((rxIdx + 1) >= PKTBUFSRX) {
158 rtx->rxbd[PKTBUFSRX - 1].cbd_sc = (BD_ENET_RX_WRAP |
159 BD_ENET_RX_EMPTY);
160 rxIdx = 0;
161 }
162 else {
163 rtx->rxbd[rxIdx].cbd_sc = BD_ENET_RX_EMPTY;
164 rxIdx++;
165 }
166 }
167 return length;
168}
169
170
171
172
173
174
175
176static int sec_init(struct eth_device *dev, bd_t *bis)
177{
178 int i;
179 volatile immap_t *immr = (immap_t *)CONFIG_SYS_IMMR;
180 scc_enet_t *pram_ptr;
181 uint dpaddr;
182 uchar ea[6];
183
184 rxIdx = 0;
185 txIdx = 0;
186
187
188
189
190
191 if (rtx == NULL) {
192 dpaddr = m8260_cpm_dpalloc(sizeof(RTXBD) + 2, 16);
193 rtx = (RTXBD *)&immr->im_dprambase[dpaddr];
194 }
195
196
197
198
199 immr->im_cpmux.cmx_uar = 0;
200 immr->im_cpmux.cmx_scr = ( (immr->im_cpmux.cmx_scr & ~CMXSCR_MASK) |
201 CONFIG_SYS_CMXSCR_VALUE);
202
203
204
205 pram_ptr = (scc_enet_t *)&(immr->im_dprambase[PROFF_ENET]);
206 pram_ptr->sen_genscc.scc_rbase = (unsigned int)(&rtx->rxbd[0]);
207 pram_ptr->sen_genscc.scc_tbase = (unsigned int)(&rtx->txbd[0]);
208
209 pram_ptr->sen_genscc.scc_rfcr = 0x18;
210 pram_ptr->sen_genscc.scc_tfcr = 0x18;
211
212 pram_ptr->sen_genscc.scc_mrblr = DBUF_LENGTH;
213
214 pram_ptr->sen_cpres = ~(0x0);
215 pram_ptr->sen_cmask = 0xdebb20e3;
216
217
218
219 while(immr->im_cpm.cp_cpcr & CPM_CR_FLG);
220 immr->im_cpm.cp_cpcr = mk_cr_cmd(CPM_CR_ENET_PAGE,
221 CPM_CR_ENET_SBLOCK,
222 0x0c,
223 CPM_CR_INIT_TRX) | CPM_CR_FLG;
224
225
226 pram_ptr->sen_crcec = 0x0;
227 pram_ptr->sen_alec = 0x0;
228 pram_ptr->sen_disfc = 0x0;
229
230 pram_ptr->sen_pads = 0x8888;
231
232 pram_ptr->sen_retlim = 15;
233
234 pram_ptr->sen_maxflr = 1518;
235 pram_ptr->sen_minflr = 64;
236
237 pram_ptr->sen_maxd1 = DBUF_LENGTH;
238 pram_ptr->sen_maxd2 = DBUF_LENGTH;
239
240 pram_ptr->sen_gaddr1 = 0x0;
241 pram_ptr->sen_gaddr2 = 0x0;
242 pram_ptr->sen_gaddr3 = 0x0;
243 pram_ptr->sen_gaddr4 = 0x0;
244
245 eth_getenv_enetaddr("ethaddr", ea);
246 pram_ptr->sen_paddrh = (ea[5] << 8) + ea[4];
247 pram_ptr->sen_paddrm = (ea[3] << 8) + ea[2];
248 pram_ptr->sen_paddrl = (ea[1] << 8) + ea[0];
249
250 pram_ptr->sen_pper = 0x0;
251
252 pram_ptr->sen_iaddr1 = 0x0;
253 pram_ptr->sen_iaddr2 = 0x0;
254 pram_ptr->sen_iaddr3 = 0x0;
255 pram_ptr->sen_iaddr4 = 0x0;
256
257 pram_ptr->sen_taddrh = 0x0;
258 pram_ptr->sen_taddrm = 0x0;
259 pram_ptr->sen_taddrl = 0x0;
260
261
262 for (i = 0; i < PKTBUFSRX; i++)
263 {
264 rtx->rxbd[i].cbd_sc = BD_ENET_RX_EMPTY;
265 rtx->rxbd[i].cbd_datlen = 0;
266 rtx->rxbd[i].cbd_bufaddr = (uint)net_rx_packets[i];
267 }
268
269 rtx->rxbd[PKTBUFSRX - 1].cbd_sc |= BD_ENET_RX_WRAP;
270
271
272 for (i = 0; i < TX_BUF_CNT; i++)
273 {
274 rtx->txbd[i].cbd_sc = (BD_ENET_TX_PAD |
275 BD_ENET_TX_LAST |
276 BD_ENET_TX_TC);
277 rtx->txbd[i].cbd_datlen = 0;
278 rtx->txbd[i].cbd_bufaddr = (uint)&txbuf[i][0];
279 }
280
281 rtx->txbd[TX_BUF_CNT - 1].cbd_sc |= BD_ENET_TX_WRAP;
282
283
284 immr->im_scc[CONFIG_ETHER_INDEX-1].scc_scce = ~(0x0);
285
286
287 immr->im_scc[CONFIG_ETHER_INDEX-1].scc_sccm = (SCCE_ENET_TXE |
288 SCCE_ENET_RXF |
289 SCCE_ENET_TXB);
290
291
292
293
294 immr->im_scc[CONFIG_ETHER_INDEX-1].scc_gsmrh = 0;
295
296
297 immr->im_scc[CONFIG_ETHER_INDEX-1].scc_gsmrl = (SCC_GSMRL_TCI |
298 SCC_GSMRL_TPL_48 |
299 SCC_GSMRL_TPP_10 |
300 SCC_GSMRL_MODE_ENET);
301
302
303 immr->im_scc[CONFIG_ETHER_INDEX-1].scc_dsr = 0xd555;
304
305
306
307
308
309
310
311
312
313
314 immr->im_scc[CONFIG_ETHER_INDEX-1].scc_psmr = SCC_PSMR_ENCRC |
315 SCC_PSMR_NIB22 |
316#if defined(CONFIG_SCC_ENET_FULL_DUPLEX)
317 SCC_PSMR_FDE |
318#endif
319#if defined(CONFIG_SCC_ENET_NO_BROADCAST)
320 SCC_PSMR_BRO |
321#endif
322#if defined(CONFIG_SCC_ENET_PROMISCOUS)
323 SCC_PSMR_PRO |
324#endif
325 0;
326
327
328 immr->im_scc[CONFIG_ETHER_INDEX-1].scc_gsmrl |= (SCC_GSMRL_ENR |
329 SCC_GSMRL_ENT);
330
331 return 0;
332}
333
334
335static void sec_halt(struct eth_device *dev)
336{
337 volatile immap_t *immr = (immap_t *)CONFIG_SYS_IMMR;
338 immr->im_scc[CONFIG_ETHER_INDEX-1].scc_gsmrl &= ~(SCC_GSMRL_ENR |
339 SCC_GSMRL_ENT);
340}
341
342#if 0
343static void sec_restart(void)
344{
345 volatile immap_t *immr = (immap_t *)CONFIG_SYS_IMMR;
346 immr->im_cpm.cp_scc[CONFIG_ETHER_INDEX-1].scc_gsmrl |= (SCC_GSMRL_ENR |
347 SCC_GSMRL_ENT);
348}
349#endif
350
351int mpc82xx_scc_enet_initialize(bd_t *bis)
352{
353 struct eth_device *dev;
354
355 dev = (struct eth_device *) malloc(sizeof *dev);
356 memset(dev, 0, sizeof *dev);
357
358 strcpy(dev->name, "SCC");
359 dev->init = sec_init;
360 dev->halt = sec_halt;
361 dev->send = sec_send;
362 dev->recv = sec_rx;
363
364 eth_register(dev);
365
366 return 1;
367}
368