1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37static unsigned int version_printed;
38
39static int sonic_debug = -1;
40module_param(sonic_debug, int, 0);
41MODULE_PARM_DESC(sonic_debug, "debug message level");
42
43static void sonic_msg_init(struct net_device *dev)
44{
45 struct sonic_local *lp = netdev_priv(dev);
46
47 lp->msg_enable = netif_msg_init(sonic_debug, 0);
48
49 if (version_printed++ == 0)
50 netif_dbg(lp, drv, dev, "%s", version);
51}
52
53
54
55
56
57
58
59
60static int sonic_open(struct net_device *dev)
61{
62 struct sonic_local *lp = netdev_priv(dev);
63 int i;
64
65 netif_dbg(lp, ifup, dev, "%s: initializing sonic driver\n", __func__);
66
67 for (i = 0; i < SONIC_NUM_RRS; i++) {
68 struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
69 if (skb == NULL) {
70 while(i > 0) {
71 i--;
72 dev_kfree_skb(lp->rx_skb[i]);
73 lp->rx_skb[i] = NULL;
74 }
75 printk(KERN_ERR "%s: couldn't allocate receive buffers\n",
76 dev->name);
77 return -ENOMEM;
78 }
79
80 if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
81 skb_reserve(skb, 2);
82 lp->rx_skb[i] = skb;
83 }
84
85 for (i = 0; i < SONIC_NUM_RRS; i++) {
86 dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE),
87 SONIC_RBSIZE, DMA_FROM_DEVICE);
88 if (dma_mapping_error(lp->device, laddr)) {
89 while(i > 0) {
90 i--;
91 dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
92 lp->rx_laddr[i] = (dma_addr_t)0;
93 }
94 for (i = 0; i < SONIC_NUM_RRS; i++) {
95 dev_kfree_skb(lp->rx_skb[i]);
96 lp->rx_skb[i] = NULL;
97 }
98 printk(KERN_ERR "%s: couldn't map rx DMA buffers\n",
99 dev->name);
100 return -ENOMEM;
101 }
102 lp->rx_laddr[i] = laddr;
103 }
104
105
106
107
108 sonic_init(dev);
109
110 netif_start_queue(dev);
111
112 netif_dbg(lp, ifup, dev, "%s: Initialization done\n", __func__);
113
114 return 0;
115}
116
117
118
119
120
121static int sonic_close(struct net_device *dev)
122{
123 struct sonic_local *lp = netdev_priv(dev);
124 int i;
125
126 netif_dbg(lp, ifdown, dev, "%s\n", __func__);
127
128 netif_stop_queue(dev);
129
130
131
132
133 SONIC_WRITE(SONIC_IMR, 0);
134 SONIC_WRITE(SONIC_ISR, 0x7fff);
135 SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
136
137
138 for (i = 0; i < SONIC_NUM_TDS; i++) {
139 if(lp->tx_laddr[i]) {
140 dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE);
141 lp->tx_laddr[i] = (dma_addr_t)0;
142 }
143 if(lp->tx_skb[i]) {
144 dev_kfree_skb(lp->tx_skb[i]);
145 lp->tx_skb[i] = NULL;
146 }
147 }
148
149
150 for (i = 0; i < SONIC_NUM_RRS; i++) {
151 if(lp->rx_laddr[i]) {
152 dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
153 lp->rx_laddr[i] = (dma_addr_t)0;
154 }
155 if(lp->rx_skb[i]) {
156 dev_kfree_skb(lp->rx_skb[i]);
157 lp->rx_skb[i] = NULL;
158 }
159 }
160
161 return 0;
162}
163
164static void sonic_tx_timeout(struct net_device *dev)
165{
166 struct sonic_local *lp = netdev_priv(dev);
167 int i;
168
169
170
171
172 SONIC_WRITE(SONIC_IMR, 0);
173 SONIC_WRITE(SONIC_ISR, 0x7fff);
174 SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
175
176 for (i = 0; i < SONIC_NUM_TDS; i++) {
177 if(lp->tx_laddr[i]) {
178 dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE);
179 lp->tx_laddr[i] = (dma_addr_t)0;
180 }
181 if(lp->tx_skb[i]) {
182 dev_kfree_skb(lp->tx_skb[i]);
183 lp->tx_skb[i] = NULL;
184 }
185 }
186
187 sonic_init(dev);
188 lp->stats.tx_errors++;
189 netif_trans_update(dev);
190 netif_wake_queue(dev);
191}
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
214{
215 struct sonic_local *lp = netdev_priv(dev);
216 dma_addr_t laddr;
217 int length;
218 int entry = lp->next_tx;
219
220 netif_dbg(lp, tx_queued, dev, "%s: skb=%p\n", __func__, skb);
221
222 length = skb->len;
223 if (length < ETH_ZLEN) {
224 if (skb_padto(skb, ETH_ZLEN))
225 return NETDEV_TX_OK;
226 length = ETH_ZLEN;
227 }
228
229
230
231
232
233 laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
234 if (!laddr) {
235 pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name);
236 dev_kfree_skb_any(skb);
237 return NETDEV_TX_OK;
238 }
239
240 sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0);
241 sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1);
242 sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length);
243 sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_L, laddr & 0xffff);
244 sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_H, laddr >> 16);
245 sonic_tda_put(dev, entry, SONIC_TD_FRAG_SIZE, length);
246 sonic_tda_put(dev, entry, SONIC_TD_LINK,
247 sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);
248
249
250
251
252
253 wmb();
254 lp->tx_len[entry] = length;
255 lp->tx_laddr[entry] = laddr;
256 lp->tx_skb[entry] = skb;
257
258 wmb();
259 sonic_tda_put(dev, lp->eol_tx, SONIC_TD_LINK,
260 sonic_tda_get(dev, lp->eol_tx, SONIC_TD_LINK) & ~SONIC_EOL);
261 lp->eol_tx = entry;
262
263 lp->next_tx = (entry + 1) & SONIC_TDS_MASK;
264 if (lp->tx_skb[lp->next_tx] != NULL) {
265
266 netif_dbg(lp, tx_queued, dev, "%s: stopping queue\n", __func__);
267 netif_stop_queue(dev);
268
269 } else netif_start_queue(dev);
270
271 netif_dbg(lp, tx_queued, dev, "%s: issuing Tx command\n", __func__);
272
273 SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
274
275 return NETDEV_TX_OK;
276}
277
278
279
280
281
282static irqreturn_t sonic_interrupt(int irq, void *dev_id)
283{
284 struct net_device *dev = dev_id;
285 struct sonic_local *lp = netdev_priv(dev);
286 int status;
287
288 if (!(status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT))
289 return IRQ_NONE;
290
291 do {
292 if (status & SONIC_INT_PKTRX) {
293 netif_dbg(lp, intr, dev, "%s: packet rx\n", __func__);
294 sonic_rx(dev);
295 SONIC_WRITE(SONIC_ISR, SONIC_INT_PKTRX);
296 }
297
298 if (status & SONIC_INT_TXDN) {
299 int entry = lp->cur_tx;
300 int td_status;
301 int freed_some = 0;
302
303
304
305
306
307
308
309
310 netif_dbg(lp, intr, dev, "%s: tx done\n", __func__);
311
312 while (lp->tx_skb[entry] != NULL) {
313 if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0)
314 break;
315
316 if (td_status & 0x0001) {
317 lp->stats.tx_packets++;
318 lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE);
319 } else {
320 lp->stats.tx_errors++;
321 if (td_status & 0x0642)
322 lp->stats.tx_aborted_errors++;
323 if (td_status & 0x0180)
324 lp->stats.tx_carrier_errors++;
325 if (td_status & 0x0020)
326 lp->stats.tx_window_errors++;
327 if (td_status & 0x0004)
328 lp->stats.tx_fifo_errors++;
329 }
330
331
332 dev_consume_skb_irq(lp->tx_skb[entry]);
333 lp->tx_skb[entry] = NULL;
334
335 dma_unmap_single(lp->device, lp->tx_laddr[entry], lp->tx_len[entry], DMA_TO_DEVICE);
336 lp->tx_laddr[entry] = (dma_addr_t)0;
337 freed_some = 1;
338
339 if (sonic_tda_get(dev, entry, SONIC_TD_LINK) & SONIC_EOL) {
340 entry = (entry + 1) & SONIC_TDS_MASK;
341 break;
342 }
343 entry = (entry + 1) & SONIC_TDS_MASK;
344 }
345
346 if (freed_some || lp->tx_skb[entry] == NULL)
347 netif_wake_queue(dev);
348 lp->cur_tx = entry;
349 SONIC_WRITE(SONIC_ISR, SONIC_INT_TXDN);
350 }
351
352
353
354
355 if (status & SONIC_INT_RFO) {
356 netif_dbg(lp, rx_err, dev, "%s: rx fifo overrun\n",
357 __func__);
358 lp->stats.rx_fifo_errors++;
359 SONIC_WRITE(SONIC_ISR, SONIC_INT_RFO);
360 }
361 if (status & SONIC_INT_RDE) {
362 netif_dbg(lp, rx_err, dev, "%s: rx descriptors exhausted\n",
363 __func__);
364 lp->stats.rx_dropped++;
365 SONIC_WRITE(SONIC_ISR, SONIC_INT_RDE);
366 }
367 if (status & SONIC_INT_RBAE) {
368 netif_dbg(lp, rx_err, dev, "%s: rx buffer area exceeded\n",
369 __func__);
370 lp->stats.rx_dropped++;
371 SONIC_WRITE(SONIC_ISR, SONIC_INT_RBAE);
372 }
373
374
375 if (status & SONIC_INT_FAE) {
376 lp->stats.rx_frame_errors += 65536;
377 SONIC_WRITE(SONIC_ISR, SONIC_INT_FAE);
378 }
379 if (status & SONIC_INT_CRC) {
380 lp->stats.rx_crc_errors += 65536;
381 SONIC_WRITE(SONIC_ISR, SONIC_INT_CRC);
382 }
383 if (status & SONIC_INT_MP) {
384 lp->stats.rx_missed_errors += 65536;
385 SONIC_WRITE(SONIC_ISR, SONIC_INT_MP);
386 }
387
388
389 if (status & SONIC_INT_TXER) {
390 if (SONIC_READ(SONIC_TCR) & SONIC_TCR_FU)
391 netif_dbg(lp, tx_err, dev, "%s: tx fifo underrun\n",
392 __func__);
393 SONIC_WRITE(SONIC_ISR, SONIC_INT_TXER);
394 }
395
396
397 if (status & SONIC_INT_BR) {
398 printk(KERN_ERR "%s: Bus retry occurred! Device interrupt disabled.\n",
399 dev->name);
400
401
402 SONIC_WRITE(SONIC_IMR, 0);
403 SONIC_WRITE(SONIC_ISR, SONIC_INT_BR);
404 }
405
406
407 if (status & SONIC_INT_LCD)
408 SONIC_WRITE(SONIC_ISR, SONIC_INT_LCD);
409 } while((status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT));
410 return IRQ_HANDLED;
411}
412
413
414
415
416static void sonic_rx(struct net_device *dev)
417{
418 struct sonic_local *lp = netdev_priv(dev);
419 int status;
420 int entry = lp->cur_rx;
421
422 while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) {
423 struct sk_buff *used_skb;
424 struct sk_buff *new_skb;
425 dma_addr_t new_laddr;
426 u16 bufadr_l;
427 u16 bufadr_h;
428 int pkt_len;
429
430 status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
431 if (status & SONIC_RCR_PRX) {
432
433 new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
434 if (new_skb == NULL) {
435 lp->stats.rx_dropped++;
436 break;
437 }
438
439 if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
440 skb_reserve(new_skb, 2);
441
442 new_laddr = dma_map_single(lp->device, skb_put(new_skb, SONIC_RBSIZE),
443 SONIC_RBSIZE, DMA_FROM_DEVICE);
444 if (!new_laddr) {
445 dev_kfree_skb(new_skb);
446 printk(KERN_ERR "%s: Failed to map rx buffer, dropping packet.\n", dev->name);
447 lp->stats.rx_dropped++;
448 break;
449 }
450
451
452 dma_unmap_single(lp->device, lp->rx_laddr[entry], SONIC_RBSIZE, DMA_FROM_DEVICE);
453 used_skb = lp->rx_skb[entry];
454 pkt_len = sonic_rda_get(dev, entry, SONIC_RD_PKTLEN);
455 skb_trim(used_skb, pkt_len);
456 used_skb->protocol = eth_type_trans(used_skb, dev);
457 netif_rx(used_skb);
458 lp->stats.rx_packets++;
459 lp->stats.rx_bytes += pkt_len;
460
461
462 lp->rx_laddr[entry] = new_laddr;
463 lp->rx_skb[entry] = new_skb;
464
465 bufadr_l = (unsigned long)new_laddr & 0xffff;
466 bufadr_h = (unsigned long)new_laddr >> 16;
467 sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, bufadr_l);
468 sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, bufadr_h);
469 } else {
470
471 lp->stats.rx_errors++;
472 if (status & SONIC_RCR_FAER)
473 lp->stats.rx_frame_errors++;
474 if (status & SONIC_RCR_CRCR)
475 lp->stats.rx_crc_errors++;
476 }
477 if (status & SONIC_RCR_LPKT) {
478
479
480
481
482 lp->cur_rwp += SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode);
483 if (lp->cur_rwp >= lp->rra_end) lp->cur_rwp = lp->rra_laddr & 0xffff;
484 SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
485 if (SONIC_READ(SONIC_ISR) & SONIC_INT_RBE) {
486 netif_dbg(lp, rx_err, dev, "%s: rx buffer exhausted\n",
487 __func__);
488 SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE);
489 }
490 } else
491 printk(KERN_ERR "%s: rx desc without RCR_LPKT. Shouldn't happen !?\n",
492 dev->name);
493
494
495
496 sonic_rda_put(dev, entry, SONIC_RD_LINK,
497 sonic_rda_get(dev, entry, SONIC_RD_LINK) | SONIC_EOL);
498 sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1);
499 sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK,
500 sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK) & ~SONIC_EOL);
501 lp->eol_rx = entry;
502 lp->cur_rx = entry = (entry + 1) & SONIC_RDS_MASK;
503 }
504
505
506
507
508
509}
510
511
512
513
514
515
516static struct net_device_stats *sonic_get_stats(struct net_device *dev)
517{
518 struct sonic_local *lp = netdev_priv(dev);
519
520
521 lp->stats.rx_crc_errors += SONIC_READ(SONIC_CRCT);
522 SONIC_WRITE(SONIC_CRCT, 0xffff);
523 lp->stats.rx_frame_errors += SONIC_READ(SONIC_FAET);
524 SONIC_WRITE(SONIC_FAET, 0xffff);
525 lp->stats.rx_missed_errors += SONIC_READ(SONIC_MPT);
526 SONIC_WRITE(SONIC_MPT, 0xffff);
527
528 return &lp->stats;
529}
530
531
532
533
534
535static void sonic_multicast_list(struct net_device *dev)
536{
537 struct sonic_local *lp = netdev_priv(dev);
538 unsigned int rcr;
539 struct netdev_hw_addr *ha;
540 unsigned char *addr;
541 int i;
542
543 rcr = SONIC_READ(SONIC_RCR) & ~(SONIC_RCR_PRO | SONIC_RCR_AMC);
544 rcr |= SONIC_RCR_BRD;
545
546 if (dev->flags & IFF_PROMISC) {
547 rcr |= SONIC_RCR_PRO;
548 } else {
549 if ((dev->flags & IFF_ALLMULTI) ||
550 (netdev_mc_count(dev) > 15)) {
551 rcr |= SONIC_RCR_AMC;
552 } else {
553 netif_dbg(lp, ifup, dev, "%s: mc_count %d\n", __func__,
554 netdev_mc_count(dev));
555 sonic_set_cam_enable(dev, 1);
556 i = 1;
557 netdev_for_each_mc_addr(ha, dev) {
558 addr = ha->addr;
559 sonic_cda_put(dev, i, SONIC_CD_CAP0, addr[1] << 8 | addr[0]);
560 sonic_cda_put(dev, i, SONIC_CD_CAP1, addr[3] << 8 | addr[2]);
561 sonic_cda_put(dev, i, SONIC_CD_CAP2, addr[5] << 8 | addr[4]);
562 sonic_set_cam_enable(dev, sonic_get_cam_enable(dev) | (1 << i));
563 i++;
564 }
565 SONIC_WRITE(SONIC_CDC, 16);
566
567 SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
568 SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
569 }
570 }
571
572 netif_dbg(lp, ifup, dev, "%s: setting RCR=%x\n", __func__, rcr);
573
574 SONIC_WRITE(SONIC_RCR, rcr);
575}
576
577
578
579
580
581static int sonic_init(struct net_device *dev)
582{
583 unsigned int cmd;
584 struct sonic_local *lp = netdev_priv(dev);
585 int i;
586
587
588
589
590
591 SONIC_WRITE(SONIC_IMR, 0);
592 SONIC_WRITE(SONIC_ISR, 0x7fff);
593 SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
594
595
596
597
598
599 SONIC_WRITE(SONIC_CMD, 0);
600 SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
601
602
603
604
605 netif_dbg(lp, ifup, dev, "%s: initialize receive resource area\n",
606 __func__);
607
608 for (i = 0; i < SONIC_NUM_RRS; i++) {
609 u16 bufadr_l = (unsigned long)lp->rx_laddr[i] & 0xffff;
610 u16 bufadr_h = (unsigned long)lp->rx_laddr[i] >> 16;
611 sonic_rra_put(dev, i, SONIC_RR_BUFADR_L, bufadr_l);
612 sonic_rra_put(dev, i, SONIC_RR_BUFADR_H, bufadr_h);
613 sonic_rra_put(dev, i, SONIC_RR_BUFSIZE_L, SONIC_RBSIZE >> 1);
614 sonic_rra_put(dev, i, SONIC_RR_BUFSIZE_H, 0);
615 }
616
617
618 lp->rra_end = (lp->rra_laddr + SONIC_NUM_RRS * SIZEOF_SONIC_RR *
619 SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
620 lp->cur_rwp = (lp->rra_laddr + (SONIC_NUM_RRS - 1) * SIZEOF_SONIC_RR *
621 SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
622
623 SONIC_WRITE(SONIC_RSA, lp->rra_laddr & 0xffff);
624 SONIC_WRITE(SONIC_REA, lp->rra_end);
625 SONIC_WRITE(SONIC_RRP, lp->rra_laddr & 0xffff);
626 SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
627 SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16);
628 SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1));
629
630
631 netif_dbg(lp, ifup, dev, "%s: issuing RRRA command\n", __func__);
632
633 SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA);
634 i = 0;
635 while (i++ < 100) {
636 if (SONIC_READ(SONIC_CMD) & SONIC_CR_RRRA)
637 break;
638 }
639
640 netif_dbg(lp, ifup, dev, "%s: status=%x, i=%d\n", __func__,
641 SONIC_READ(SONIC_CMD), i);
642
643
644
645
646
647
648 netif_dbg(lp, ifup, dev, "%s: initialize receive descriptors\n",
649 __func__);
650
651 for (i=0; i<SONIC_NUM_RDS; i++) {
652 sonic_rda_put(dev, i, SONIC_RD_STATUS, 0);
653 sonic_rda_put(dev, i, SONIC_RD_PKTLEN, 0);
654 sonic_rda_put(dev, i, SONIC_RD_PKTPTR_L, 0);
655 sonic_rda_put(dev, i, SONIC_RD_PKTPTR_H, 0);
656 sonic_rda_put(dev, i, SONIC_RD_SEQNO, 0);
657 sonic_rda_put(dev, i, SONIC_RD_IN_USE, 1);
658 sonic_rda_put(dev, i, SONIC_RD_LINK,
659 lp->rda_laddr +
660 ((i+1) * SIZEOF_SONIC_RD * SONIC_BUS_SCALE(lp->dma_bitmode)));
661 }
662
663 sonic_rda_put(dev, SONIC_NUM_RDS - 1, SONIC_RD_LINK,
664 (lp->rda_laddr & 0xffff) | SONIC_EOL);
665 lp->eol_rx = SONIC_NUM_RDS - 1;
666 lp->cur_rx = 0;
667 SONIC_WRITE(SONIC_URDA, lp->rda_laddr >> 16);
668 SONIC_WRITE(SONIC_CRDA, lp->rda_laddr & 0xffff);
669
670
671
672
673 netif_dbg(lp, ifup, dev, "%s: initialize transmit descriptors\n",
674 __func__);
675
676 for (i = 0; i < SONIC_NUM_TDS; i++) {
677 sonic_tda_put(dev, i, SONIC_TD_STATUS, 0);
678 sonic_tda_put(dev, i, SONIC_TD_CONFIG, 0);
679 sonic_tda_put(dev, i, SONIC_TD_PKTSIZE, 0);
680 sonic_tda_put(dev, i, SONIC_TD_FRAG_COUNT, 0);
681 sonic_tda_put(dev, i, SONIC_TD_LINK,
682 (lp->tda_laddr & 0xffff) +
683 (i + 1) * SIZEOF_SONIC_TD * SONIC_BUS_SCALE(lp->dma_bitmode));
684 lp->tx_skb[i] = NULL;
685 }
686
687 sonic_tda_put(dev, SONIC_NUM_TDS - 1, SONIC_TD_LINK,
688 (lp->tda_laddr & 0xffff));
689
690 SONIC_WRITE(SONIC_UTDA, lp->tda_laddr >> 16);
691 SONIC_WRITE(SONIC_CTDA, lp->tda_laddr & 0xffff);
692 lp->cur_tx = lp->next_tx = 0;
693 lp->eol_tx = SONIC_NUM_TDS - 1;
694
695
696
697
698 sonic_cda_put(dev, 0, SONIC_CD_CAP0, dev->dev_addr[1] << 8 | dev->dev_addr[0]);
699 sonic_cda_put(dev, 0, SONIC_CD_CAP1, dev->dev_addr[3] << 8 | dev->dev_addr[2]);
700 sonic_cda_put(dev, 0, SONIC_CD_CAP2, dev->dev_addr[5] << 8 | dev->dev_addr[4]);
701 sonic_set_cam_enable(dev, 1);
702
703 for (i = 0; i < 16; i++)
704 sonic_cda_put(dev, i, SONIC_CD_ENTRY_POINTER, i);
705
706
707
708
709 SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
710 SONIC_WRITE(SONIC_CDC, 16);
711
712
713
714
715 SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
716
717 i = 0;
718 while (i++ < 100) {
719 if (SONIC_READ(SONIC_ISR) & SONIC_INT_LCD)
720 break;
721 }
722 netif_dbg(lp, ifup, dev, "%s: CMD=%x, ISR=%x, i=%d\n", __func__,
723 SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR), i);
724
725
726
727
728
729 SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN | SONIC_CR_STP);
730 SONIC_WRITE(SONIC_RCR, SONIC_RCR_DEFAULT);
731 SONIC_WRITE(SONIC_TCR, SONIC_TCR_DEFAULT);
732 SONIC_WRITE(SONIC_ISR, 0x7fff);
733 SONIC_WRITE(SONIC_IMR, SONIC_IMR_DEFAULT);
734
735 cmd = SONIC_READ(SONIC_CMD);
736 if ((cmd & SONIC_CR_RXEN) == 0 || (cmd & SONIC_CR_STP) == 0)
737 printk(KERN_ERR "sonic_init: failed, status=%x\n", cmd);
738
739 netif_dbg(lp, ifup, dev, "%s: new status=%x\n", __func__,
740 SONIC_READ(SONIC_CMD));
741
742 return 0;
743}
744
745MODULE_LICENSE("GPL");
746