1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58#include "et131x_version.h"
59#include "et131x_defs.h"
60
61#include <linux/pci.h>
62#include <linux/init.h>
63#include <linux/module.h>
64#include <linux/types.h>
65#include <linux/kernel.h>
66
67#include <linux/sched.h>
68#include <linux/ptrace.h>
69#include <linux/slab.h>
70#include <linux/ctype.h>
71#include <linux/string.h>
72#include <linux/timer.h>
73#include <linux/interrupt.h>
74#include <linux/in.h>
75#include <linux/delay.h>
76#include <linux/io.h>
77#include <linux/bitops.h>
78#include <asm/system.h>
79
80#include <linux/netdevice.h>
81#include <linux/etherdevice.h>
82#include <linux/skbuff.h>
83#include <linux/if_arp.h>
84#include <linux/ioport.h>
85
86#include "et1310_phy.h"
87#include "et1310_pm.h"
88#include "et1310_jagcore.h"
89
90#include "et131x_adapter.h"
91#include "et131x_initpci.h"
92
93#include "et1310_rx.h"
94
95
96void nic_return_rfd(struct et131x_adapter *etdev, PMP_RFD pMpRfd);
97
98
99
100
101
102
103
104
105
106
107int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
108{
109 uint32_t OuterLoop, InnerLoop;
110 uint32_t bufsize;
111 uint32_t pktStatRingSize, FBRChunkSize;
112 RX_RING_t *rx_ring;
113
114
115 rx_ring = (RX_RING_t *) &adapter->RxRing;
116
117
118#ifdef USE_FBR0
119 rx_ring->Fbr[0] = kmalloc(sizeof(FBRLOOKUPTABLE), GFP_KERNEL);
120#endif
121
122 rx_ring->Fbr[1] = kmalloc(sizeof(FBRLOOKUPTABLE), GFP_KERNEL);
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142 if (adapter->RegistryJumboPacket < 2048) {
143#ifdef USE_FBR0
144 rx_ring->Fbr0BufferSize = 256;
145 rx_ring->Fbr0NumEntries = 512;
146#endif
147 rx_ring->Fbr1BufferSize = 2048;
148 rx_ring->Fbr1NumEntries = 512;
149 } else if (adapter->RegistryJumboPacket < 4096) {
150#ifdef USE_FBR0
151 rx_ring->Fbr0BufferSize = 512;
152 rx_ring->Fbr0NumEntries = 1024;
153#endif
154 rx_ring->Fbr1BufferSize = 4096;
155 rx_ring->Fbr1NumEntries = 512;
156 } else {
157#ifdef USE_FBR0
158 rx_ring->Fbr0BufferSize = 1024;
159 rx_ring->Fbr0NumEntries = 768;
160#endif
161 rx_ring->Fbr1BufferSize = 16384;
162 rx_ring->Fbr1NumEntries = 128;
163 }
164
165#ifdef USE_FBR0
166 adapter->RxRing.PsrNumEntries = adapter->RxRing.Fbr0NumEntries +
167 adapter->RxRing.Fbr1NumEntries;
168#else
169 adapter->RxRing.PsrNumEntries = adapter->RxRing.Fbr1NumEntries;
170#endif
171
172
173 bufsize = (sizeof(FBR_DESC_t) * rx_ring->Fbr1NumEntries) + 0xfff;
174 rx_ring->pFbr1RingVa = pci_alloc_consistent(adapter->pdev,
175 bufsize,
176 &rx_ring->pFbr1RingPa);
177 if (!rx_ring->pFbr1RingVa) {
178 dev_err(&adapter->pdev->dev,
179 "Cannot alloc memory for Free Buffer Ring 1\n");
180 return -ENOMEM;
181 }
182
183
184
185
186
187
188
189
190 rx_ring->Fbr1Realpa = rx_ring->pFbr1RingPa;
191
192
193 et131x_align_allocated_memory(adapter,
194 &rx_ring->Fbr1Realpa,
195 &rx_ring->Fbr1offset, 0x0FFF);
196
197 rx_ring->pFbr1RingVa = (void *)((uint8_t *) rx_ring->pFbr1RingVa +
198 rx_ring->Fbr1offset);
199
200#ifdef USE_FBR0
201
202 bufsize = (sizeof(FBR_DESC_t) * rx_ring->Fbr0NumEntries) + 0xfff;
203 rx_ring->pFbr0RingVa = pci_alloc_consistent(adapter->pdev,
204 bufsize,
205 &rx_ring->pFbr0RingPa);
206 if (!rx_ring->pFbr0RingVa) {
207 dev_err(&adapter->pdev->dev,
208 "Cannot alloc memory for Free Buffer Ring 0\n");
209 return -ENOMEM;
210 }
211
212
213
214
215
216
217
218
219 rx_ring->Fbr0Realpa = rx_ring->pFbr0RingPa;
220
221
222 et131x_align_allocated_memory(adapter,
223 &rx_ring->Fbr0Realpa,
224 &rx_ring->Fbr0offset, 0x0FFF);
225
226 rx_ring->pFbr0RingVa = (void *)((uint8_t *) rx_ring->pFbr0RingVa +
227 rx_ring->Fbr0offset);
228#endif
229
230 for (OuterLoop = 0; OuterLoop < (rx_ring->Fbr1NumEntries / FBR_CHUNKS);
231 OuterLoop++) {
232 uint64_t Fbr1Offset;
233 uint64_t Fbr1TempPa;
234 uint32_t Fbr1Align;
235
236
237
238
239
240
241
242
243 if (rx_ring->Fbr1BufferSize > 4096)
244 Fbr1Align = 4096;
245 else
246 Fbr1Align = rx_ring->Fbr1BufferSize;
247
248 FBRChunkSize =
249 (FBR_CHUNKS * rx_ring->Fbr1BufferSize) + Fbr1Align - 1;
250 rx_ring->Fbr1MemVa[OuterLoop] =
251 pci_alloc_consistent(adapter->pdev, FBRChunkSize,
252 &rx_ring->Fbr1MemPa[OuterLoop]);
253
254 if (!rx_ring->Fbr1MemVa[OuterLoop]) {
255 dev_err(&adapter->pdev->dev,
256 "Could not alloc memory\n");
257 return -ENOMEM;
258 }
259
260
261 Fbr1TempPa = rx_ring->Fbr1MemPa[OuterLoop];
262
263 et131x_align_allocated_memory(adapter,
264 &Fbr1TempPa,
265 &Fbr1Offset, (Fbr1Align - 1));
266
267 for (InnerLoop = 0; InnerLoop < FBR_CHUNKS; InnerLoop++) {
268 uint32_t index = (OuterLoop * FBR_CHUNKS) + InnerLoop;
269
270
271
272
273 rx_ring->Fbr[1]->Va[index] =
274 (uint8_t *) rx_ring->Fbr1MemVa[OuterLoop] +
275 (InnerLoop * rx_ring->Fbr1BufferSize) + Fbr1Offset;
276
277
278
279
280 rx_ring->Fbr[1]->PAHigh[index] =
281 (uint32_t) (Fbr1TempPa >> 32);
282 rx_ring->Fbr[1]->PALow[index] = (uint32_t) Fbr1TempPa;
283
284 Fbr1TempPa += rx_ring->Fbr1BufferSize;
285
286 rx_ring->Fbr[1]->Buffer1[index] =
287 rx_ring->Fbr[1]->Va[index];
288 rx_ring->Fbr[1]->Buffer2[index] =
289 rx_ring->Fbr[1]->Va[index] - 4;
290 }
291 }
292
293#ifdef USE_FBR0
294
295 for (OuterLoop = 0; OuterLoop < (rx_ring->Fbr0NumEntries / FBR_CHUNKS);
296 OuterLoop++) {
297 uint64_t Fbr0Offset;
298 uint64_t Fbr0TempPa;
299
300 FBRChunkSize = ((FBR_CHUNKS + 1) * rx_ring->Fbr0BufferSize) - 1;
301 rx_ring->Fbr0MemVa[OuterLoop] =
302 pci_alloc_consistent(adapter->pdev, FBRChunkSize,
303 &rx_ring->Fbr0MemPa[OuterLoop]);
304
305 if (!rx_ring->Fbr0MemVa[OuterLoop]) {
306 dev_err(&adapter->pdev->dev,
307 "Could not alloc memory\n");
308 return -ENOMEM;
309 }
310
311
312 Fbr0TempPa = rx_ring->Fbr0MemPa[OuterLoop];
313
314 et131x_align_allocated_memory(adapter,
315 &Fbr0TempPa,
316 &Fbr0Offset,
317 rx_ring->Fbr0BufferSize - 1);
318
319 for (InnerLoop = 0; InnerLoop < FBR_CHUNKS; InnerLoop++) {
320 uint32_t index = (OuterLoop * FBR_CHUNKS) + InnerLoop;
321
322 rx_ring->Fbr[0]->Va[index] =
323 (uint8_t *) rx_ring->Fbr0MemVa[OuterLoop] +
324 (InnerLoop * rx_ring->Fbr0BufferSize) + Fbr0Offset;
325
326 rx_ring->Fbr[0]->PAHigh[index] =
327 (uint32_t) (Fbr0TempPa >> 32);
328 rx_ring->Fbr[0]->PALow[index] = (uint32_t) Fbr0TempPa;
329
330 Fbr0TempPa += rx_ring->Fbr0BufferSize;
331
332 rx_ring->Fbr[0]->Buffer1[index] =
333 rx_ring->Fbr[0]->Va[index];
334 rx_ring->Fbr[0]->Buffer2[index] =
335 rx_ring->Fbr[0]->Va[index] - 4;
336 }
337 }
338#endif
339
340
341 pktStatRingSize =
342 sizeof(PKT_STAT_DESC_t) * adapter->RxRing.PsrNumEntries;
343
344 rx_ring->pPSRingVa = pci_alloc_consistent(adapter->pdev,
345 pktStatRingSize + 0x0fff,
346 &rx_ring->pPSRingPa);
347
348 if (!rx_ring->pPSRingVa) {
349 dev_err(&adapter->pdev->dev,
350 "Cannot alloc memory for Packet Status Ring\n");
351 return -ENOMEM;
352 }
353
354
355
356
357
358
359
360
361 rx_ring->pPSRingRealPa = rx_ring->pPSRingPa;
362
363
364 et131x_align_allocated_memory(adapter,
365 &rx_ring->pPSRingRealPa,
366 &rx_ring->pPSRingOffset, 0x0FFF);
367
368 rx_ring->pPSRingVa = (void *)((uint8_t *) rx_ring->pPSRingVa +
369 rx_ring->pPSRingOffset);
370
371
372 rx_ring->pRxStatusVa = pci_alloc_consistent(adapter->pdev,
373 sizeof(RX_STATUS_BLOCK_t) +
374 0x7, &rx_ring->pRxStatusPa);
375 if (!rx_ring->pRxStatusVa) {
376 dev_err(&adapter->pdev->dev,
377 "Cannot alloc memory for Status Block\n");
378 return -ENOMEM;
379 }
380
381
382 rx_ring->RxStatusRealPA = rx_ring->pRxStatusPa;
383
384
385 et131x_align_allocated_memory(adapter,
386 &rx_ring->RxStatusRealPA,
387 &rx_ring->RxStatusOffset, 0x07);
388
389 rx_ring->pRxStatusVa = (void *)((uint8_t *) rx_ring->pRxStatusVa +
390 rx_ring->RxStatusOffset);
391 rx_ring->NumRfd = NIC_DEFAULT_NUM_RFD;
392
393
394
395
396
397
398
399 rx_ring->RecvLookaside = kmem_cache_create(adapter->netdev->name,
400 sizeof(MP_RFD),
401 0,
402 SLAB_CACHE_DMA |
403 SLAB_HWCACHE_ALIGN,
404 NULL);
405
406 adapter->Flags |= fMP_ADAPTER_RECV_LOOKASIDE;
407
408
409
410
411 INIT_LIST_HEAD(&rx_ring->RecvList);
412 INIT_LIST_HEAD(&rx_ring->RecvPendingList);
413 return 0;
414}
415
416
417
418
419
420void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
421{
422 uint32_t index;
423 uint32_t bufsize;
424 uint32_t pktStatRingSize;
425 PMP_RFD pMpRfd;
426 RX_RING_t *rx_ring;
427
428
429 rx_ring = (RX_RING_t *) &adapter->RxRing;
430
431
432 WARN_ON(rx_ring->nReadyRecv != rx_ring->NumRfd);
433
434 while (!list_empty(&rx_ring->RecvList)) {
435 pMpRfd = (MP_RFD *) list_entry(rx_ring->RecvList.next,
436 MP_RFD, list_node);
437
438 list_del(&pMpRfd->list_node);
439 et131x_rfd_resources_free(adapter, pMpRfd);
440 }
441
442 while (!list_empty(&rx_ring->RecvPendingList)) {
443 pMpRfd = (MP_RFD *) list_entry(rx_ring->RecvPendingList.next,
444 MP_RFD, list_node);
445 list_del(&pMpRfd->list_node);
446 et131x_rfd_resources_free(adapter, pMpRfd);
447 }
448
449
450 if (rx_ring->pFbr1RingVa) {
451
452 for (index = 0; index <
453 (rx_ring->Fbr1NumEntries / FBR_CHUNKS); index++) {
454 if (rx_ring->Fbr1MemVa[index]) {
455 uint32_t Fbr1Align;
456
457 if (rx_ring->Fbr1BufferSize > 4096)
458 Fbr1Align = 4096;
459 else
460 Fbr1Align = rx_ring->Fbr1BufferSize;
461
462 bufsize =
463 (rx_ring->Fbr1BufferSize * FBR_CHUNKS) +
464 Fbr1Align - 1;
465
466 pci_free_consistent(adapter->pdev,
467 bufsize,
468 rx_ring->Fbr1MemVa[index],
469 rx_ring->Fbr1MemPa[index]);
470
471 rx_ring->Fbr1MemVa[index] = NULL;
472 }
473 }
474
475
476 rx_ring->pFbr1RingVa = (void *)((uint8_t *)
477 rx_ring->pFbr1RingVa - rx_ring->Fbr1offset);
478
479 bufsize =
480 (sizeof(FBR_DESC_t) * rx_ring->Fbr1NumEntries) + 0xfff;
481
482 pci_free_consistent(adapter->pdev,
483 bufsize,
484 rx_ring->pFbr1RingVa, rx_ring->pFbr1RingPa);
485
486 rx_ring->pFbr1RingVa = NULL;
487 }
488
489#ifdef USE_FBR0
490
491 if (rx_ring->pFbr0RingVa) {
492
493 for (index = 0; index <
494 (rx_ring->Fbr0NumEntries / FBR_CHUNKS); index++) {
495 if (rx_ring->Fbr0MemVa[index]) {
496 bufsize =
497 (rx_ring->Fbr0BufferSize *
498 (FBR_CHUNKS + 1)) - 1;
499
500 pci_free_consistent(adapter->pdev,
501 bufsize,
502 rx_ring->Fbr0MemVa[index],
503 rx_ring->Fbr0MemPa[index]);
504
505 rx_ring->Fbr0MemVa[index] = NULL;
506 }
507 }
508
509
510 rx_ring->pFbr0RingVa = (void *)((uint8_t *)
511 rx_ring->pFbr0RingVa - rx_ring->Fbr0offset);
512
513 bufsize =
514 (sizeof(FBR_DESC_t) * rx_ring->Fbr0NumEntries) + 0xfff;
515
516 pci_free_consistent(adapter->pdev,
517 bufsize,
518 rx_ring->pFbr0RingVa, rx_ring->pFbr0RingPa);
519
520 rx_ring->pFbr0RingVa = NULL;
521 }
522#endif
523
524
525 if (rx_ring->pPSRingVa) {
526 rx_ring->pPSRingVa = (void *)((uint8_t *) rx_ring->pPSRingVa -
527 rx_ring->pPSRingOffset);
528
529 pktStatRingSize =
530 sizeof(PKT_STAT_DESC_t) * adapter->RxRing.PsrNumEntries;
531
532 pci_free_consistent(adapter->pdev,
533 pktStatRingSize + 0x0fff,
534 rx_ring->pPSRingVa, rx_ring->pPSRingPa);
535
536 rx_ring->pPSRingVa = NULL;
537 }
538
539
540 if (rx_ring->pRxStatusVa) {
541 rx_ring->pRxStatusVa = (void *)((uint8_t *)
542 rx_ring->pRxStatusVa - rx_ring->RxStatusOffset);
543
544 pci_free_consistent(adapter->pdev,
545 sizeof(RX_STATUS_BLOCK_t) + 0x7,
546 rx_ring->pRxStatusVa, rx_ring->pRxStatusPa);
547
548 rx_ring->pRxStatusVa = NULL;
549 }
550
551
552
553
554
555
556 if (adapter->Flags & fMP_ADAPTER_RECV_LOOKASIDE) {
557 kmem_cache_destroy(rx_ring->RecvLookaside);
558 adapter->Flags &= ~fMP_ADAPTER_RECV_LOOKASIDE;
559 }
560
561
562#ifdef USE_FBR0
563 kfree(rx_ring->Fbr[0]);
564#endif
565
566 kfree(rx_ring->Fbr[1]);
567
568
569 rx_ring->nReadyRecv = 0;
570}
571
572
573
574
575
576
577
578int et131x_init_recv(struct et131x_adapter *adapter)
579{
580 int status = -ENOMEM;
581 PMP_RFD pMpRfd = NULL;
582 uint32_t RfdCount;
583 uint32_t TotalNumRfd = 0;
584 RX_RING_t *rx_ring = NULL;
585
586
587 rx_ring = (RX_RING_t *) &adapter->RxRing;
588
589
590 for (RfdCount = 0; RfdCount < rx_ring->NumRfd; RfdCount++) {
591 pMpRfd = (MP_RFD *) kmem_cache_alloc(rx_ring->RecvLookaside,
592 GFP_ATOMIC | GFP_DMA);
593
594 if (!pMpRfd) {
595 dev_err(&adapter->pdev->dev,
596 "Couldn't alloc RFD out of kmem_cache\n");
597 status = -ENOMEM;
598 continue;
599 }
600
601 status = et131x_rfd_resources_alloc(adapter, pMpRfd);
602 if (status != 0) {
603 dev_err(&adapter->pdev->dev,
604 "Couldn't alloc packet for RFD\n");
605 kmem_cache_free(rx_ring->RecvLookaside, pMpRfd);
606 continue;
607 }
608
609
610 list_add_tail(&pMpRfd->list_node, &rx_ring->RecvList);
611
612
613 rx_ring->nReadyRecv++;
614 TotalNumRfd++;
615 }
616
617 if (TotalNumRfd > NIC_MIN_NUM_RFD)
618 status = 0;
619
620 rx_ring->NumRfd = TotalNumRfd;
621
622 if (status != 0) {
623 kmem_cache_free(rx_ring->RecvLookaside, pMpRfd);
624 dev_err(&adapter->pdev->dev,
625 "Allocation problems in et131x_init_recv\n");
626 }
627 return status;
628}
629
630
631
632
633
634
635
636
637int et131x_rfd_resources_alloc(struct et131x_adapter *adapter, MP_RFD *pMpRfd)
638{
639 pMpRfd->Packet = NULL;
640
641 return 0;
642}
643
644
645
646
647
648
649void et131x_rfd_resources_free(struct et131x_adapter *adapter, MP_RFD *pMpRfd)
650{
651 pMpRfd->Packet = NULL;
652 kmem_cache_free(adapter->RxRing.RecvLookaside, pMpRfd);
653}
654
655
656
657
658
659void ConfigRxDmaRegs(struct et131x_adapter *etdev)
660{
661 struct _RXDMA_t __iomem *rx_dma = &etdev->regs->rxdma;
662 struct _rx_ring_t *pRxLocal = &etdev->RxRing;
663 PFBR_DESC_t fbr_entry;
664 uint32_t entry;
665 RXDMA_PSR_NUM_DES_t psr_num_des;
666 unsigned long flags;
667
668
669 et131x_rx_dma_disable(etdev);
670
671
672
673
674
675
676
677
678 writel((uint32_t) (pRxLocal->RxStatusRealPA >> 32),
679 &rx_dma->dma_wb_base_hi);
680 writel((uint32_t) pRxLocal->RxStatusRealPA, &rx_dma->dma_wb_base_lo);
681
682 memset(pRxLocal->pRxStatusVa, 0, sizeof(RX_STATUS_BLOCK_t));
683
684
685
686
687 writel((uint32_t) (pRxLocal->pPSRingRealPa >> 32),
688 &rx_dma->psr_base_hi);
689 writel((uint32_t) pRxLocal->pPSRingRealPa, &rx_dma->psr_base_lo);
690 writel(pRxLocal->PsrNumEntries - 1, &rx_dma->psr_num_des.value);
691 writel(0, &rx_dma->psr_full_offset.value);
692
693 psr_num_des.value = readl(&rx_dma->psr_num_des.value);
694 writel((psr_num_des.bits.psr_ndes * LO_MARK_PERCENT_FOR_PSR) / 100,
695 &rx_dma->psr_min_des.value);
696
697 spin_lock_irqsave(&etdev->RcvLock, flags);
698
699
700 pRxLocal->local_psr_full.bits.psr_full = 0;
701 pRxLocal->local_psr_full.bits.psr_full_wrap = 0;
702
703
704 fbr_entry = (PFBR_DESC_t) pRxLocal->pFbr1RingVa;
705 for (entry = 0; entry < pRxLocal->Fbr1NumEntries; entry++) {
706 fbr_entry->addr_hi = pRxLocal->Fbr[1]->PAHigh[entry];
707 fbr_entry->addr_lo = pRxLocal->Fbr[1]->PALow[entry];
708 fbr_entry->word2.bits.bi = entry;
709 fbr_entry++;
710 }
711
712
713
714
715 writel((uint32_t) (pRxLocal->Fbr1Realpa >> 32), &rx_dma->fbr1_base_hi);
716 writel((uint32_t) pRxLocal->Fbr1Realpa, &rx_dma->fbr1_base_lo);
717 writel(pRxLocal->Fbr1NumEntries - 1, &rx_dma->fbr1_num_des.value);
718 writel(ET_DMA10_WRAP, &rx_dma->fbr1_full_offset);
719
720
721
722
723 pRxLocal->local_Fbr1_full = ET_DMA10_WRAP;
724 writel(((pRxLocal->Fbr1NumEntries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
725 &rx_dma->fbr1_min_des.value);
726
727#ifdef USE_FBR0
728
729 fbr_entry = (PFBR_DESC_t) pRxLocal->pFbr0RingVa;
730 for (entry = 0; entry < pRxLocal->Fbr0NumEntries; entry++) {
731 fbr_entry->addr_hi = pRxLocal->Fbr[0]->PAHigh[entry];
732 fbr_entry->addr_lo = pRxLocal->Fbr[0]->PALow[entry];
733 fbr_entry->word2.bits.bi = entry;
734 fbr_entry++;
735 }
736
737 writel((uint32_t) (pRxLocal->Fbr0Realpa >> 32), &rx_dma->fbr0_base_hi);
738 writel((uint32_t) pRxLocal->Fbr0Realpa, &rx_dma->fbr0_base_lo);
739 writel(pRxLocal->Fbr0NumEntries - 1, &rx_dma->fbr0_num_des.value);
740 writel(ET_DMA10_WRAP, &rx_dma->fbr0_full_offset);
741
742
743
744
745 pRxLocal->local_Fbr0_full = ET_DMA10_WRAP;
746 writel(((pRxLocal->Fbr0NumEntries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
747 &rx_dma->fbr0_min_des.value);
748#endif
749
750
751
752
753
754
755 writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done.value);
756
757
758
759
760
761
762 writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time.value);
763
764 spin_unlock_irqrestore(&etdev->RcvLock, flags);
765}
766
767
768
769
770
771void SetRxDmaTimer(struct et131x_adapter *etdev)
772{
773
774
775
776 if ((etdev->linkspeed == TRUEPHY_SPEED_100MBPS) ||
777 (etdev->linkspeed == TRUEPHY_SPEED_10MBPS)) {
778 writel(0, &etdev->regs->rxdma.max_pkt_time.value);
779 writel(1, &etdev->regs->rxdma.num_pkt_done.value);
780 }
781}
782
783
784
785
786
787void et131x_rx_dma_disable(struct et131x_adapter *etdev)
788{
789 RXDMA_CSR_t csr;
790
791
792 writel(0x00002001, &etdev->regs->rxdma.csr.value);
793 csr.value = readl(&etdev->regs->rxdma.csr.value);
794 if (csr.bits.halt_status != 1) {
795 udelay(5);
796 csr.value = readl(&etdev->regs->rxdma.csr.value);
797 if (csr.bits.halt_status != 1)
798 dev_err(&etdev->pdev->dev,
799 "RX Dma failed to enter halt state. CSR 0x%08x\n",
800 csr.value);
801 }
802}
803
804
805
806
807
808void et131x_rx_dma_enable(struct et131x_adapter *etdev)
809{
810 if (etdev->RegistryPhyLoopbk)
811
812 writel(0x1, &etdev->regs->rxdma.csr.value);
813 else {
814
815 RXDMA_CSR_t csr = { 0 };
816
817 csr.bits.fbr1_enable = 1;
818 if (etdev->RxRing.Fbr1BufferSize == 4096)
819 csr.bits.fbr1_size = 1;
820 else if (etdev->RxRing.Fbr1BufferSize == 8192)
821 csr.bits.fbr1_size = 2;
822 else if (etdev->RxRing.Fbr1BufferSize == 16384)
823 csr.bits.fbr1_size = 3;
824#ifdef USE_FBR0
825 csr.bits.fbr0_enable = 1;
826 if (etdev->RxRing.Fbr0BufferSize == 256)
827 csr.bits.fbr0_size = 1;
828 else if (etdev->RxRing.Fbr0BufferSize == 512)
829 csr.bits.fbr0_size = 2;
830 else if (etdev->RxRing.Fbr0BufferSize == 1024)
831 csr.bits.fbr0_size = 3;
832#endif
833 writel(csr.value, &etdev->regs->rxdma.csr.value);
834
835 csr.value = readl(&etdev->regs->rxdma.csr.value);
836 if (csr.bits.halt_status != 0) {
837 udelay(5);
838 csr.value = readl(&etdev->regs->rxdma.csr.value);
839 if (csr.bits.halt_status != 0) {
840 dev_err(&etdev->pdev->dev,
841 "RX Dma failed to exit halt state. CSR 0x%08x\n",
842 csr.value);
843 }
844 }
845 }
846}
847
848
849
850
851
852
853
854
855
856
857
858
859PMP_RFD nic_rx_pkts(struct et131x_adapter *etdev)
860{
861 struct _rx_ring_t *pRxLocal = &etdev->RxRing;
862 PRX_STATUS_BLOCK_t pRxStatusBlock;
863 PPKT_STAT_DESC_t pPSREntry;
864 PMP_RFD pMpRfd;
865 uint32_t nIndex;
866 uint8_t *pBufVa;
867 unsigned long flags;
868 struct list_head *element;
869 uint8_t ringIndex;
870 uint16_t bufferIndex;
871 uint32_t localLen;
872 PKT_STAT_DESC_WORD0_t Word0;
873
874
875
876
877
878 pRxStatusBlock = (PRX_STATUS_BLOCK_t) pRxLocal->pRxStatusVa;
879
880 if (pRxStatusBlock->Word1.bits.PSRoffset ==
881 pRxLocal->local_psr_full.bits.psr_full &&
882 pRxStatusBlock->Word1.bits.PSRwrap ==
883 pRxLocal->local_psr_full.bits.psr_full_wrap) {
884
885 return NULL;
886 }
887
888
889 pPSREntry = (PPKT_STAT_DESC_t) (pRxLocal->pPSRingVa) +
890 pRxLocal->local_psr_full.bits.psr_full;
891
892
893
894
895
896 localLen = pPSREntry->word1.bits.length;
897 ringIndex = (uint8_t) pPSREntry->word1.bits.ri;
898 bufferIndex = (uint16_t) pPSREntry->word1.bits.bi;
899 Word0 = pPSREntry->word0;
900
901
902 if (++pRxLocal->local_psr_full.bits.psr_full >
903 pRxLocal->PsrNumEntries - 1) {
904 pRxLocal->local_psr_full.bits.psr_full = 0;
905 pRxLocal->local_psr_full.bits.psr_full_wrap ^= 1;
906 }
907
908 writel(pRxLocal->local_psr_full.value,
909 &etdev->regs->rxdma.psr_full_offset.value);
910
911#ifndef USE_FBR0
912 if (ringIndex != 1) {
913 return NULL;
914 }
915#endif
916
917#ifdef USE_FBR0
918 if (ringIndex > 1 ||
919 (ringIndex == 0 &&
920 bufferIndex > pRxLocal->Fbr0NumEntries - 1) ||
921 (ringIndex == 1 &&
922 bufferIndex > pRxLocal->Fbr1NumEntries - 1))
923#else
924 if (ringIndex != 1 ||
925 bufferIndex > pRxLocal->Fbr1NumEntries - 1)
926#endif
927 {
928
929 dev_err(&etdev->pdev->dev,
930 "NICRxPkts PSR Entry %d indicates "
931 "length of %d and/or bad bi(%d)\n",
932 pRxLocal->local_psr_full.bits.psr_full,
933 localLen, bufferIndex);
934 return NULL;
935 }
936
937
938 spin_lock_irqsave(&etdev->RcvLock, flags);
939
940 pMpRfd = NULL;
941 element = pRxLocal->RecvList.next;
942 pMpRfd = (PMP_RFD) list_entry(element, MP_RFD, list_node);
943
944 if (pMpRfd == NULL) {
945 spin_unlock_irqrestore(&etdev->RcvLock, flags);
946 return NULL;
947 }
948
949 list_del(&pMpRfd->list_node);
950 pRxLocal->nReadyRecv--;
951
952 spin_unlock_irqrestore(&etdev->RcvLock, flags);
953
954 pMpRfd->bufferindex = bufferIndex;
955 pMpRfd->ringindex = ringIndex;
956
957
958
959
960
961
962 if (localLen < (NIC_MIN_PACKET_SIZE + 4)) {
963 etdev->Stats.other_errors++;
964 localLen = 0;
965 }
966
967 if (localLen) {
968 if (etdev->ReplicaPhyLoopbk == 1) {
969 pBufVa = pRxLocal->Fbr[ringIndex]->Va[bufferIndex];
970
971 if (memcmp(&pBufVa[6], &etdev->CurrentAddress[0],
972 ETH_ALEN) == 0) {
973 if (memcmp(&pBufVa[42], "Replica packet",
974 ETH_HLEN)) {
975 etdev->ReplicaPhyLoopbkPF = 1;
976 }
977 }
978 }
979
980
981 if ((Word0.value & ALCATEL_MULTICAST_PKT) &&
982 !(Word0.value & ALCATEL_BROADCAST_PKT)) {
983
984
985
986
987
988
989
990 if ((etdev->PacketFilter & ET131X_PACKET_TYPE_MULTICAST)
991 && !(etdev->PacketFilter & ET131X_PACKET_TYPE_PROMISCUOUS)
992 && !(etdev->PacketFilter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
993 pBufVa = pRxLocal->Fbr[ringIndex]->
994 Va[bufferIndex];
995
996
997
998
999
1000 for (nIndex = 0;
1001 nIndex < etdev->MCAddressCount;
1002 nIndex++) {
1003 if (pBufVa[0] ==
1004 etdev->MCList[nIndex][0]
1005 && pBufVa[1] ==
1006 etdev->MCList[nIndex][1]
1007 && pBufVa[2] ==
1008 etdev->MCList[nIndex][2]
1009 && pBufVa[3] ==
1010 etdev->MCList[nIndex][3]
1011 && pBufVa[4] ==
1012 etdev->MCList[nIndex][4]
1013 && pBufVa[5] ==
1014 etdev->MCList[nIndex][5]) {
1015 break;
1016 }
1017 }
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027 if (nIndex == etdev->MCAddressCount)
1028 localLen = 0;
1029 }
1030
1031 if (localLen > 0)
1032 etdev->Stats.multircv++;
1033 } else if (Word0.value & ALCATEL_BROADCAST_PKT)
1034 etdev->Stats.brdcstrcv++;
1035 else
1036
1037
1038
1039
1040
1041 etdev->Stats.unircv++;
1042 }
1043
1044 if (localLen > 0) {
1045 struct sk_buff *skb = NULL;
1046
1047
1048 pMpRfd->PacketSize = localLen;
1049
1050 skb = dev_alloc_skb(pMpRfd->PacketSize + 2);
1051 if (!skb) {
1052 dev_err(&etdev->pdev->dev,
1053 "Couldn't alloc an SKB for Rx\n");
1054 return NULL;
1055 }
1056
1057 etdev->net_stats.rx_bytes += pMpRfd->PacketSize;
1058
1059 memcpy(skb_put(skb, pMpRfd->PacketSize),
1060 pRxLocal->Fbr[ringIndex]->Va[bufferIndex],
1061 pMpRfd->PacketSize);
1062
1063 skb->dev = etdev->netdev;
1064 skb->protocol = eth_type_trans(skb, etdev->netdev);
1065 skb->ip_summed = CHECKSUM_NONE;
1066
1067 netif_rx(skb);
1068 } else {
1069 pMpRfd->PacketSize = 0;
1070 }
1071
1072 nic_return_rfd(etdev, pMpRfd);
1073 return pMpRfd;
1074}
1075
1076
1077
1078
1079
1080
1081
1082void et131x_reset_recv(struct et131x_adapter *etdev)
1083{
1084 PMP_RFD pMpRfd;
1085 struct list_head *element;
1086
1087 WARN_ON(list_empty(&etdev->RxRing.RecvList));
1088
1089
1090
1091
1092 while (!list_empty(&etdev->RxRing.RecvPendingList)) {
1093 element = etdev->RxRing.RecvPendingList.next;
1094
1095 pMpRfd = (PMP_RFD) list_entry(element, MP_RFD, list_node);
1096
1097 list_move_tail(&pMpRfd->list_node, &etdev->RxRing.RecvList);
1098 }
1099}
1100
1101
1102
1103
1104
1105
1106
1107void et131x_handle_recv_interrupt(struct et131x_adapter *etdev)
1108{
1109 PMP_RFD pMpRfd = NULL;
1110 struct sk_buff *PacketArray[NUM_PACKETS_HANDLED];
1111 PMP_RFD RFDFreeArray[NUM_PACKETS_HANDLED];
1112 uint32_t PacketArrayCount = 0;
1113 uint32_t PacketsToHandle;
1114 uint32_t PacketFreeCount = 0;
1115 bool TempUnfinishedRec = false;
1116
1117 PacketsToHandle = NUM_PACKETS_HANDLED;
1118
1119
1120 while (PacketArrayCount < PacketsToHandle) {
1121 if (list_empty(&etdev->RxRing.RecvList)) {
1122 WARN_ON(etdev->RxRing.nReadyRecv != 0);
1123 TempUnfinishedRec = true;
1124 break;
1125 }
1126
1127 pMpRfd = nic_rx_pkts(etdev);
1128
1129 if (pMpRfd == NULL)
1130 break;
1131
1132
1133
1134
1135
1136
1137 if (!etdev->PacketFilter ||
1138 !(etdev->Flags & fMP_ADAPTER_LINK_DETECTION) ||
1139 pMpRfd->PacketSize == 0) {
1140 continue;
1141 }
1142
1143
1144 etdev->Stats.ipackets++;
1145
1146
1147 if (etdev->RxRing.nReadyRecv >= RFD_LOW_WATER_MARK) {
1148
1149
1150
1151
1152
1153
1154
1155
1156 } else {
1157 RFDFreeArray[PacketFreeCount] = pMpRfd;
1158 PacketFreeCount++;
1159
1160 dev_warn(&etdev->pdev->dev,
1161 "RFD's are running out\n");
1162 }
1163
1164 PacketArray[PacketArrayCount] = pMpRfd->Packet;
1165 PacketArrayCount++;
1166 }
1167
1168 if ((PacketArrayCount == NUM_PACKETS_HANDLED) || TempUnfinishedRec) {
1169 etdev->RxRing.UnfinishedReceives = true;
1170 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
1171 &etdev->regs->global.watchdog_timer);
1172 } else {
1173
1174 etdev->RxRing.UnfinishedReceives = false;
1175 }
1176}
1177
1178static inline u32 bump_fbr(u32 *fbr, u32 limit)
1179{
1180 u32 v = *fbr;
1181 v++;
1182
1183
1184
1185
1186 if ((v & ET_DMA10_MASK) > limit) {
1187 v &= ~ET_DMA10_MASK;
1188 v ^= ET_DMA10_WRAP;
1189 }
1190
1191 v &= (ET_DMA10_MASK|ET_DMA10_WRAP);
1192 *fbr = v;
1193 return v;
1194}
1195
1196
1197
1198
1199
1200
1201void nic_return_rfd(struct et131x_adapter *etdev, PMP_RFD pMpRfd)
1202{
1203 struct _rx_ring_t *rx_local = &etdev->RxRing;
1204 struct _RXDMA_t __iomem *rx_dma = &etdev->regs->rxdma;
1205 uint16_t bi = pMpRfd->bufferindex;
1206 uint8_t ri = pMpRfd->ringindex;
1207 unsigned long flags;
1208
1209
1210
1211
1212 if (
1213#ifdef USE_FBR0
1214 (ri == 0 && bi < rx_local->Fbr0NumEntries) ||
1215#endif
1216 (ri == 1 && bi < rx_local->Fbr1NumEntries)) {
1217 spin_lock_irqsave(&etdev->FbrLock, flags);
1218
1219 if (ri == 1) {
1220 PFBR_DESC_t pNextDesc =
1221 (PFBR_DESC_t) (rx_local->pFbr1RingVa) +
1222 INDEX10(rx_local->local_Fbr1_full);
1223
1224
1225
1226
1227
1228 pNextDesc->addr_hi = rx_local->Fbr[1]->PAHigh[bi];
1229 pNextDesc->addr_lo = rx_local->Fbr[1]->PALow[bi];
1230 pNextDesc->word2.value = bi;
1231
1232 writel(bump_fbr(&rx_local->local_Fbr1_full,
1233 rx_local->Fbr1NumEntries - 1),
1234 &rx_dma->fbr1_full_offset);
1235 }
1236#ifdef USE_FBR0
1237 else {
1238 PFBR_DESC_t pNextDesc =
1239 (PFBR_DESC_t) rx_local->pFbr0RingVa +
1240 INDEX10(rx_local->local_Fbr0_full);
1241
1242
1243
1244
1245
1246 pNextDesc->addr_hi = rx_local->Fbr[0]->PAHigh[bi];
1247 pNextDesc->addr_lo = rx_local->Fbr[0]->PALow[bi];
1248 pNextDesc->word2.value = bi;
1249
1250 writel(bump_fbr(&rx_local->local_Fbr0_full,
1251 rx_local->Fbr0NumEntries - 1),
1252 &rx_dma->fbr0_full_offset);
1253 }
1254#endif
1255 spin_unlock_irqrestore(&etdev->FbrLock, flags);
1256 } else {
1257 dev_err(&etdev->pdev->dev,
1258 "NICReturnRFD illegal Buffer Index returned\n");
1259 }
1260
1261
1262
1263
1264 spin_lock_irqsave(&etdev->RcvLock, flags);
1265 list_add_tail(&pMpRfd->list_node, &rx_local->RecvList);
1266 rx_local->nReadyRecv++;
1267 spin_unlock_irqrestore(&etdev->RcvLock, flags);
1268
1269 WARN_ON(rx_local->nReadyRecv > rx_local->NumRfd);
1270}
1271