1
2
3
4
5
6
7
8
9
10
11
12
13#define HWMTM
14
15#ifndef FDDI
16#define FDDI
17#endif
18
19#include "h/types.h"
20#include "h/fddi.h"
21#include "h/smc.h"
22#include "h/supern_2.h"
23#include "h/skfbiinc.h"
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40#ifdef COMMON_MB_POOL
41static SMbuf *mb_start = 0 ;
42static SMbuf *mb_free = 0 ;
43static int mb_init = FALSE ;
44static int call_count = 0 ;
45#endif
46
47
48
49
50
51
52
53#ifdef DEBUG
54#ifndef DEBUG_BRD
55extern struct smt_debug debug ;
56#endif
57#endif
58
59#ifdef NDIS_OS2
60extern u_char offDepth ;
61extern u_char force_irq_pending ;
62#endif
63
64
65
66
67
68
69
70static void queue_llc_rx(struct s_smc *smc, SMbuf *mb);
71static void smt_to_llc(struct s_smc *smc, SMbuf *mb);
72static void init_txd_ring(struct s_smc *smc);
73static void init_rxd_ring(struct s_smc *smc);
74static void queue_txd_mb(struct s_smc *smc, SMbuf *mb);
75static u_long init_descr_ring(struct s_smc *smc, union s_fp_descr volatile *start,
76 int count);
77static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue);
78static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue);
79static SMbuf* get_llc_rx(struct s_smc *smc);
80static SMbuf* get_txd_mb(struct s_smc *smc);
81static void mac_drv_clear_txd(struct s_smc *smc);
82
83
84
85
86
87
88
89
90extern void* mac_drv_get_space(struct s_smc *smc, unsigned int size);
91extern void* mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size);
92extern void mac_drv_fill_rxd(struct s_smc *smc);
93extern void mac_drv_tx_complete(struct s_smc *smc,
94 volatile struct s_smt_fp_txd *txd);
95extern void mac_drv_rx_complete(struct s_smc *smc,
96 volatile struct s_smt_fp_rxd *rxd,
97 int frag_count, int len);
98extern void mac_drv_requeue_rxd(struct s_smc *smc,
99 volatile struct s_smt_fp_rxd *rxd,
100 int frag_count);
101extern void mac_drv_clear_rxd(struct s_smc *smc,
102 volatile struct s_smt_fp_rxd *rxd, int frag_count);
103
104#ifdef USE_OS_CPY
105extern void hwm_cpy_rxd2mb(void);
106extern void hwm_cpy_txd2mb(void);
107#endif
108
109#ifdef ALL_RX_COMPLETE
110extern void mac_drv_all_receives_complete(void);
111#endif
112
113extern u_long mac_drv_virt2phys(struct s_smc *smc, void *virt);
114extern u_long dma_master(struct s_smc *smc, void *virt, int len, int flag);
115
116#ifdef NDIS_OS2
117extern void post_proc(void);
118#else
119extern void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr,
120 int flag);
121#endif
122
123extern int mac_drv_rx_init(struct s_smc *smc, int len, int fc, char *look_ahead,
124 int la_len);
125
126
127
128
129
130
131void process_receive(struct s_smc *smc);
132void fddi_isr(struct s_smc *smc);
133void smt_free_mbuf(struct s_smc *smc, SMbuf *mb);
134void init_driver_fplus(struct s_smc *smc);
135void mac_drv_rx_mode(struct s_smc *smc, int mode);
136void init_fddi_driver(struct s_smc *smc, u_char *mac_addr);
137void mac_drv_clear_tx_queue(struct s_smc *smc);
138void mac_drv_clear_rx_queue(struct s_smc *smc);
139void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
140 int frame_status);
141void hwm_rx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
142 int frame_status);
143
144int mac_drv_init(struct s_smc *smc);
145int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count, int frame_len,
146 int frame_status);
147
148u_int mac_drv_check_space(void);
149
150SMbuf* smt_get_mbuf(struct s_smc *smc);
151
152#ifdef DEBUG
153 void mac_drv_debug_lev(struct s_smc *smc, int flag, int lev);
154#endif
155
156
157
158
159
160
161#ifndef UNUSED
162#ifdef lint
163#define UNUSED(x) (x) = (x)
164#else
165#define UNUSED(x)
166#endif
167#endif
168
169#ifdef USE_CAN_ADDR
170#define MA smc->hw.fddi_canon_addr.a
171#define GROUP_ADDR_BIT 0x01
172#else
173#define MA smc->hw.fddi_home_addr.a
174#define GROUP_ADDR_BIT 0x80
175#endif
176
177#define RXD_TXD_COUNT (HWM_ASYNC_TXD_COUNT+HWM_SYNC_TXD_COUNT+\
178 SMT_R1_RXD_COUNT+SMT_R2_RXD_COUNT)
179
180#ifdef MB_OUTSIDE_SMC
181#define EXT_VIRT_MEM ((RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd) +\
182 MAX_MBUF*sizeof(SMbuf))
183#define EXT_VIRT_MEM_2 ((RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd))
184#else
185#define EXT_VIRT_MEM ((RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd))
186#endif
187
188
189
190
191#if defined(NDIS_OS2) || defined(ODI2)
192#define CR_READ(var) ((var) & 0xffff0000 | ((var) & 0xffff))
193#else
194#define CR_READ(var) (__le32)(var)
195#endif
196
197#define IMASK_SLOW (IS_PLINT1 | IS_PLINT2 | IS_TIMINT | IS_TOKEN | \
198 IS_MINTR1 | IS_MINTR2 | IS_MINTR3 | IS_R1_P | \
199 IS_R1_C | IS_XA_C | IS_XS_C)
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221u_int mac_drv_check_space(void)
222{
223#ifdef MB_OUTSIDE_SMC
224#ifdef COMMON_MB_POOL
225 call_count++ ;
226 if (call_count == 1) {
227 return EXT_VIRT_MEM;
228 }
229 else {
230 return EXT_VIRT_MEM_2;
231 }
232#else
233 return EXT_VIRT_MEM;
234#endif
235#else
236 return 0;
237#endif
238}
239
240
241
242
243
244
245
246
247
248
249
250
251int mac_drv_init(struct s_smc *smc)
252{
253 if (sizeof(struct s_smt_fp_rxd) % 16) {
254 SMT_PANIC(smc,HWM_E0001,HWM_E0001_MSG) ;
255 }
256 if (sizeof(struct s_smt_fp_txd) % 16) {
257 SMT_PANIC(smc,HWM_E0002,HWM_E0002_MSG) ;
258 }
259
260
261
262
263 if (!(smc->os.hwm.descr_p = (union s_fp_descr volatile *)
264 mac_drv_get_desc_mem(smc,(u_int)
265 (RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd)))) {
266 return 1;
267 }
268
269
270
271
272#ifndef MB_OUTSIDE_SMC
273 smc->os.hwm.mbuf_pool.mb_start=(SMbuf *)(&smc->os.hwm.mbuf_pool.mb[0]) ;
274#else
275#ifndef COMMON_MB_POOL
276 if (!(smc->os.hwm.mbuf_pool.mb_start = (SMbuf *) mac_drv_get_space(smc,
277 MAX_MBUF*sizeof(SMbuf)))) {
278 return 1;
279 }
280#else
281 if (!mb_start) {
282 if (!(mb_start = (SMbuf *) mac_drv_get_space(smc,
283 MAX_MBUF*sizeof(SMbuf)))) {
284 return 1;
285 }
286 }
287#endif
288#endif
289 return 0;
290}
291
292
293
294
295
296
297
298
299
300
301void init_driver_fplus(struct s_smc *smc)
302{
303 smc->hw.fp.mdr2init = FM_LSB | FM_BMMODE | FM_ENNPRQ | FM_ENHSRQ | 3 ;
304
305#ifdef PCI
306 smc->hw.fp.mdr2init |= FM_CHKPAR | FM_PARITY ;
307#endif
308 smc->hw.fp.mdr3init = FM_MENRQAUNLCK | FM_MENRS ;
309
310#ifdef USE_CAN_ADDR
311
312 smc->hw.fp.frselreg_init = FM_ENXMTADSWAP | FM_ENRCVADSWAP ;
313#endif
314}
315
316static u_long init_descr_ring(struct s_smc *smc,
317 union s_fp_descr volatile *start,
318 int count)
319{
320 int i ;
321 union s_fp_descr volatile *d1 ;
322 union s_fp_descr volatile *d2 ;
323 u_long phys ;
324
325 DB_GEN(3, "descr ring starts at = %p", start);
326 for (i=count-1, d1=start; i ; i--) {
327 d2 = d1 ;
328 d1++ ;
329 d2->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ;
330 d2->r.rxd_next = &d1->r ;
331 phys = mac_drv_virt2phys(smc,(void *)d1) ;
332 d2->r.rxd_nrdadr = cpu_to_le32(phys) ;
333 }
334 DB_GEN(3, "descr ring ends at = %p", d1);
335 d1->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ;
336 d1->r.rxd_next = &start->r ;
337 phys = mac_drv_virt2phys(smc,(void *)start) ;
338 d1->r.rxd_nrdadr = cpu_to_le32(phys) ;
339
340 for (i=count, d1=start; i ; i--) {
341 DRV_BUF_FLUSH(&d1->r,DDI_DMA_SYNC_FORDEV) ;
342 d1++;
343 }
344 return phys;
345}
346
347static void init_txd_ring(struct s_smc *smc)
348{
349 struct s_smt_fp_txd volatile *ds ;
350 struct s_smt_tx_queue *queue ;
351 u_long phys ;
352
353
354
355
356 ds = (struct s_smt_fp_txd volatile *) ((char *)smc->os.hwm.descr_p +
357 SMT_R1_RXD_COUNT*sizeof(struct s_smt_fp_rxd)) ;
358 queue = smc->hw.fp.tx[QUEUE_A0] ;
359 DB_GEN(3, "Init async TxD ring, %d TxDs", HWM_ASYNC_TXD_COUNT);
360 (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
361 HWM_ASYNC_TXD_COUNT) ;
362 phys = le32_to_cpu(ds->txd_ntdadr) ;
363 ds++ ;
364 queue->tx_curr_put = queue->tx_curr_get = ds ;
365 ds-- ;
366 queue->tx_free = HWM_ASYNC_TXD_COUNT ;
367 queue->tx_used = 0 ;
368 outpd(ADDR(B5_XA_DA),phys) ;
369
370 ds = (struct s_smt_fp_txd volatile *) ((char *)ds +
371 HWM_ASYNC_TXD_COUNT*sizeof(struct s_smt_fp_txd)) ;
372 queue = smc->hw.fp.tx[QUEUE_S] ;
373 DB_GEN(3, "Init sync TxD ring, %d TxDs", HWM_SYNC_TXD_COUNT);
374 (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
375 HWM_SYNC_TXD_COUNT) ;
376 phys = le32_to_cpu(ds->txd_ntdadr) ;
377 ds++ ;
378 queue->tx_curr_put = queue->tx_curr_get = ds ;
379 queue->tx_free = HWM_SYNC_TXD_COUNT ;
380 queue->tx_used = 0 ;
381 outpd(ADDR(B5_XS_DA),phys) ;
382}
383
384static void init_rxd_ring(struct s_smc *smc)
385{
386 struct s_smt_fp_rxd volatile *ds ;
387 struct s_smt_rx_queue *queue ;
388 u_long phys ;
389
390
391
392
393 ds = (struct s_smt_fp_rxd volatile *) smc->os.hwm.descr_p ;
394 queue = smc->hw.fp.rx[QUEUE_R1] ;
395 DB_GEN(3, "Init RxD ring, %d RxDs", SMT_R1_RXD_COUNT);
396 (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
397 SMT_R1_RXD_COUNT) ;
398 phys = le32_to_cpu(ds->rxd_nrdadr) ;
399 ds++ ;
400 queue->rx_curr_put = queue->rx_curr_get = ds ;
401 queue->rx_free = SMT_R1_RXD_COUNT ;
402 queue->rx_used = 0 ;
403 outpd(ADDR(B4_R1_DA),phys) ;
404}
405
406
407
408
409
410
411
412
413
414void init_fddi_driver(struct s_smc *smc, u_char *mac_addr)
415{
416 SMbuf *mb ;
417 int i ;
418
419 init_board(smc,mac_addr) ;
420 (void)init_fplus(smc) ;
421
422
423
424
425#ifndef COMMON_MB_POOL
426 mb = smc->os.hwm.mbuf_pool.mb_start ;
427 smc->os.hwm.mbuf_pool.mb_free = (SMbuf *)NULL ;
428 for (i = 0; i < MAX_MBUF; i++) {
429 mb->sm_use_count = 1 ;
430 smt_free_mbuf(smc,mb) ;
431 mb++ ;
432 }
433#else
434 mb = mb_start ;
435 if (!mb_init) {
436 mb_free = 0 ;
437 for (i = 0; i < MAX_MBUF; i++) {
438 mb->sm_use_count = 1 ;
439 smt_free_mbuf(smc,mb) ;
440 mb++ ;
441 }
442 mb_init = TRUE ;
443 }
444#endif
445
446
447
448
449 smc->os.hwm.llc_rx_pipe = smc->os.hwm.llc_rx_tail = (SMbuf *)NULL ;
450 smc->os.hwm.txd_tx_pipe = smc->os.hwm.txd_tx_tail = NULL ;
451 smc->os.hwm.pass_SMT = smc->os.hwm.pass_NSA = smc->os.hwm.pass_DB = 0 ;
452 smc->os.hwm.pass_llc_promisc = TRUE ;
453 smc->os.hwm.queued_rx_frames = smc->os.hwm.queued_txd_mb = 0 ;
454 smc->os.hwm.detec_count = 0 ;
455 smc->os.hwm.rx_break = 0 ;
456 smc->os.hwm.rx_len_error = 0 ;
457 smc->os.hwm.isr_flag = FALSE ;
458
459
460
461
462 i = 16 - ((long)smc->os.hwm.descr_p & 0xf) ;
463 if (i != 16) {
464 DB_GEN(3, "i = %d", i);
465 smc->os.hwm.descr_p = (union s_fp_descr volatile *)
466 ((char *)smc->os.hwm.descr_p+i) ;
467 }
468 DB_GEN(3, "pt to descr area = %p", smc->os.hwm.descr_p);
469
470 init_txd_ring(smc) ;
471 init_rxd_ring(smc) ;
472 mac_drv_fill_rxd(smc) ;
473
474 init_plc(smc) ;
475}
476
477
478SMbuf *smt_get_mbuf(struct s_smc *smc)
479{
480 register SMbuf *mb ;
481
482#ifndef COMMON_MB_POOL
483 mb = smc->os.hwm.mbuf_pool.mb_free ;
484#else
485 mb = mb_free ;
486#endif
487 if (mb) {
488#ifndef COMMON_MB_POOL
489 smc->os.hwm.mbuf_pool.mb_free = mb->sm_next ;
490#else
491 mb_free = mb->sm_next ;
492#endif
493 mb->sm_off = 8 ;
494 mb->sm_use_count = 1 ;
495 }
496 DB_GEN(3, "get SMbuf: mb = %p", mb);
497 return mb;
498}
499
500void smt_free_mbuf(struct s_smc *smc, SMbuf *mb)
501{
502
503 if (mb) {
504 mb->sm_use_count-- ;
505 DB_GEN(3, "free_mbuf: sm_use_count = %d", mb->sm_use_count);
506
507
508
509
510
511 if (!mb->sm_use_count) {
512 DB_GEN(3, "free SMbuf: mb = %p", mb);
513#ifndef COMMON_MB_POOL
514 mb->sm_next = smc->os.hwm.mbuf_pool.mb_free ;
515 smc->os.hwm.mbuf_pool.mb_free = mb ;
516#else
517 mb->sm_next = mb_free ;
518 mb_free = mb ;
519#endif
520 }
521 }
522 else
523 SMT_PANIC(smc,HWM_E0003,HWM_E0003_MSG) ;
524}
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546void mac_drv_repair_descr(struct s_smc *smc)
547{
548 u_long phys ;
549
550 if (smc->hw.hw_state != STOPPED) {
551 SK_BREAK() ;
552 SMT_PANIC(smc,HWM_E0013,HWM_E0013_MSG) ;
553 return ;
554 }
555
556
557
558
559 phys = repair_txd_ring(smc,smc->hw.fp.tx[QUEUE_A0]) ;
560 outpd(ADDR(B5_XA_DA),phys) ;
561 if (smc->hw.fp.tx_q[QUEUE_A0].tx_used) {
562 outpd(ADDR(B0_XA_CSR),CSR_START) ;
563 }
564 phys = repair_txd_ring(smc,smc->hw.fp.tx[QUEUE_S]) ;
565 outpd(ADDR(B5_XS_DA),phys) ;
566 if (smc->hw.fp.tx_q[QUEUE_S].tx_used) {
567 outpd(ADDR(B0_XS_CSR),CSR_START) ;
568 }
569
570
571
572
573 phys = repair_rxd_ring(smc,smc->hw.fp.rx[QUEUE_R1]) ;
574 outpd(ADDR(B4_R1_DA),phys) ;
575 outpd(ADDR(B0_R1_CSR),CSR_START) ;
576}
577
578static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue)
579{
580 int i ;
581 int tx_used ;
582 u_long phys ;
583 u_long tbctrl ;
584 struct s_smt_fp_txd volatile *t ;
585
586 SK_UNUSED(smc) ;
587
588 t = queue->tx_curr_get ;
589 tx_used = queue->tx_used ;
590 for (i = tx_used+queue->tx_free-1 ; i ; i-- ) {
591 t = t->txd_next ;
592 }
593 phys = le32_to_cpu(t->txd_ntdadr) ;
594
595 t = queue->tx_curr_get ;
596 while (tx_used) {
597 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ;
598 tbctrl = le32_to_cpu(t->txd_tbctrl) ;
599
600 if (tbctrl & BMU_OWN) {
601 if (tbctrl & BMU_STF) {
602 break ;
603 }
604 else {
605
606
607
608 t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ;
609 }
610 }
611 phys = le32_to_cpu(t->txd_ntdadr) ;
612 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
613 t = t->txd_next ;
614 tx_used-- ;
615 }
616 return phys;
617}
618
619
620
621
622
623
624
625
626
627
628
629
630static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue)
631{
632 int i ;
633 int rx_used ;
634 u_long phys ;
635 u_long rbctrl ;
636 struct s_smt_fp_rxd volatile *r ;
637
638 SK_UNUSED(smc) ;
639
640 r = queue->rx_curr_get ;
641 rx_used = queue->rx_used ;
642 for (i = SMT_R1_RXD_COUNT-1 ; i ; i-- ) {
643 r = r->rxd_next ;
644 }
645 phys = le32_to_cpu(r->rxd_nrdadr) ;
646
647 r = queue->rx_curr_get ;
648 while (rx_used) {
649 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
650 rbctrl = le32_to_cpu(r->rxd_rbctrl) ;
651
652 if (rbctrl & BMU_OWN) {
653 if (rbctrl & BMU_STF) {
654 break ;
655 }
656 else {
657
658
659
660 r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
661 }
662 }
663 phys = le32_to_cpu(r->rxd_nrdadr) ;
664 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
665 r = r->rxd_next ;
666 rx_used-- ;
667 }
668 return phys;
669}
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701void fddi_isr(struct s_smc *smc)
702{
703 u_long is ;
704 u_short stu, stl ;
705 SMbuf *mb ;
706
707#ifdef USE_BREAK_ISR
708 int force_irq ;
709#endif
710
711#ifdef ODI2
712 if (smc->os.hwm.rx_break) {
713 mac_drv_fill_rxd(smc) ;
714 if (smc->hw.fp.rx_q[QUEUE_R1].rx_used > 0) {
715 smc->os.hwm.rx_break = 0 ;
716 process_receive(smc) ;
717 }
718 else {
719 smc->os.hwm.detec_count = 0 ;
720 smt_force_irq(smc) ;
721 }
722 }
723#endif
724 smc->os.hwm.isr_flag = TRUE ;
725
726#ifdef USE_BREAK_ISR
727 force_irq = TRUE ;
728 if (smc->os.hwm.leave_isr) {
729 smc->os.hwm.leave_isr = FALSE ;
730 process_receive(smc) ;
731 }
732#endif
733
734 while ((is = GET_ISR() & ISR_MASK)) {
735 NDD_TRACE("CH0B",is,0,0) ;
736 DB_GEN(7, "ISA = 0x%lx", is);
737
738 if (is & IMASK_SLOW) {
739 NDD_TRACE("CH1b",is,0,0) ;
740 if (is & IS_PLINT1) {
741 plc1_irq(smc) ;
742 }
743 if (is & IS_PLINT2) {
744 plc2_irq(smc) ;
745 }
746 if (is & IS_MINTR1) {
747 stu = inpw(FM_A(FM_ST1U)) ;
748 stl = inpw(FM_A(FM_ST1L)) ;
749 DB_GEN(6, "Slow transmit complete");
750 mac1_irq(smc,stu,stl) ;
751 }
752 if (is & IS_MINTR2) {
753 stu= inpw(FM_A(FM_ST2U)) ;
754 stl= inpw(FM_A(FM_ST2L)) ;
755 DB_GEN(6, "Slow receive complete");
756 DB_GEN(7, "stl = %x : stu = %x", stl, stu);
757 mac2_irq(smc,stu,stl) ;
758 }
759 if (is & IS_MINTR3) {
760 stu= inpw(FM_A(FM_ST3U)) ;
761 stl= inpw(FM_A(FM_ST3L)) ;
762 DB_GEN(6, "FORMAC Mode Register 3");
763 mac3_irq(smc,stu,stl) ;
764 }
765 if (is & IS_TIMINT) {
766 timer_irq(smc) ;
767#ifdef NDIS_OS2
768 force_irq_pending = 0 ;
769#endif
770
771
772
773 if (++smc->os.hwm.detec_count > 4) {
774
775
776
777 process_receive(smc) ;
778 }
779 }
780 if (is & IS_TOKEN) {
781 rtm_irq(smc) ;
782 }
783 if (is & IS_R1_P) {
784
785 outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_P) ;
786 SMT_PANIC(smc,HWM_E0004,HWM_E0004_MSG) ;
787 }
788 if (is & IS_R1_C) {
789
790 outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_C) ;
791 SMT_PANIC(smc,HWM_E0005,HWM_E0005_MSG) ;
792 }
793 if (is & IS_XA_C) {
794
795 outpd(ADDR(B5_XA_CSR),CSR_IRQ_CL_C) ;
796 SMT_PANIC(smc,HWM_E0006,HWM_E0006_MSG) ;
797 }
798 if (is & IS_XS_C) {
799
800 outpd(ADDR(B5_XS_CSR),CSR_IRQ_CL_C) ;
801 SMT_PANIC(smc,HWM_E0007,HWM_E0007_MSG) ;
802 }
803 }
804
805
806
807
808 if (is & (IS_XS_F|IS_XA_F)) {
809 DB_GEN(6, "Fast tx complete queue");
810
811
812
813
814 outpd(ADDR(B5_XS_CSR),CSR_IRQ_CL_F) ;
815 outpd(ADDR(B5_XA_CSR),CSR_IRQ_CL_F) ;
816 mac_drv_clear_txd(smc) ;
817 llc_restart_tx(smc) ;
818 }
819
820
821
822
823 if (is & IS_R1_F) {
824 DB_GEN(6, "Fast receive complete");
825
826#ifndef USE_BREAK_ISR
827 outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_F) ;
828 process_receive(smc) ;
829#else
830 process_receive(smc) ;
831 if (smc->os.hwm.leave_isr) {
832 force_irq = FALSE ;
833 } else {
834 outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_F) ;
835 process_receive(smc) ;
836 }
837#endif
838 }
839
840#ifndef NDIS_OS2
841 while ((mb = get_llc_rx(smc))) {
842 smt_to_llc(smc,mb) ;
843 }
844#else
845 if (offDepth)
846 post_proc() ;
847
848 while (!offDepth && (mb = get_llc_rx(smc))) {
849 smt_to_llc(smc,mb) ;
850 }
851
852 if (!offDepth && smc->os.hwm.rx_break) {
853 process_receive(smc) ;
854 }
855#endif
856 if (smc->q.ev_get != smc->q.ev_put) {
857 NDD_TRACE("CH2a",0,0,0) ;
858 ev_dispatcher(smc) ;
859 }
860#ifdef NDIS_OS2
861 post_proc() ;
862 if (offDepth) {
863 break ;
864 }
865#endif
866#ifdef USE_BREAK_ISR
867 if (smc->os.hwm.leave_isr) {
868 break ;
869 }
870#endif
871
872
873 }
874
875#ifdef USE_BREAK_ISR
876 if (smc->os.hwm.leave_isr && force_irq) {
877 smt_force_irq(smc) ;
878 }
879#endif
880 smc->os.hwm.isr_flag = FALSE ;
881 NDD_TRACE("CH0E",0,0,0) ;
882}
883
884
885
886
887
888
889
890
891#ifndef NDIS_OS2
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984void mac_drv_rx_mode(struct s_smc *smc, int mode)
985{
986 switch(mode) {
987 case RX_ENABLE_PASS_SMT:
988 smc->os.hwm.pass_SMT = TRUE ;
989 break ;
990 case RX_DISABLE_PASS_SMT:
991 smc->os.hwm.pass_SMT = FALSE ;
992 break ;
993 case RX_ENABLE_PASS_NSA:
994 smc->os.hwm.pass_NSA = TRUE ;
995 break ;
996 case RX_DISABLE_PASS_NSA:
997 smc->os.hwm.pass_NSA = FALSE ;
998 break ;
999 case RX_ENABLE_PASS_DB:
1000 smc->os.hwm.pass_DB = TRUE ;
1001 break ;
1002 case RX_DISABLE_PASS_DB:
1003 smc->os.hwm.pass_DB = FALSE ;
1004 break ;
1005 case RX_DISABLE_PASS_ALL:
1006 smc->os.hwm.pass_SMT = smc->os.hwm.pass_NSA = FALSE ;
1007 smc->os.hwm.pass_DB = FALSE ;
1008 smc->os.hwm.pass_llc_promisc = TRUE ;
1009 mac_set_rx_mode(smc,RX_DISABLE_NSA) ;
1010 break ;
1011 case RX_DISABLE_LLC_PROMISC:
1012 smc->os.hwm.pass_llc_promisc = FALSE ;
1013 break ;
1014 case RX_ENABLE_LLC_PROMISC:
1015 smc->os.hwm.pass_llc_promisc = TRUE ;
1016 break ;
1017 case RX_ENABLE_ALLMULTI:
1018 case RX_DISABLE_ALLMULTI:
1019 case RX_ENABLE_PROMISC:
1020 case RX_DISABLE_PROMISC:
1021 case RX_ENABLE_NSA:
1022 case RX_DISABLE_NSA:
1023 default:
1024 mac_set_rx_mode(smc,mode) ;
1025 break ;
1026 }
1027}
1028#endif
1029
1030
1031
1032
1033void process_receive(struct s_smc *smc)
1034{
1035 int i ;
1036 int n ;
1037 int frag_count ;
1038 int used_frags ;
1039 struct s_smt_rx_queue *queue ;
1040 struct s_smt_fp_rxd volatile *r ;
1041 struct s_smt_fp_rxd volatile *rxd ;
1042 u_long rbctrl ;
1043 u_long rfsw ;
1044 u_short rx_used ;
1045 u_char far *virt ;
1046 char far *data ;
1047 SMbuf *mb ;
1048 u_char fc ;
1049 int len ;
1050
1051 smc->os.hwm.detec_count = 0 ;
1052 queue = smc->hw.fp.rx[QUEUE_R1] ;
1053 NDD_TRACE("RHxB",0,0,0) ;
1054 for ( ; ; ) {
1055 r = queue->rx_curr_get ;
1056 rx_used = queue->rx_used ;
1057 frag_count = 0 ;
1058
1059#ifdef USE_BREAK_ISR
1060 if (smc->os.hwm.leave_isr) {
1061 goto rx_end ;
1062 }
1063#endif
1064#ifdef NDIS_OS2
1065 if (offDepth) {
1066 smc->os.hwm.rx_break = 1 ;
1067 goto rx_end ;
1068 }
1069 smc->os.hwm.rx_break = 0 ;
1070#endif
1071#ifdef ODI2
1072 if (smc->os.hwm.rx_break) {
1073 goto rx_end ;
1074 }
1075#endif
1076 n = 0 ;
1077 do {
1078 DB_RX(5, "Check RxD %p for OWN and EOF", r);
1079 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
1080 rbctrl = le32_to_cpu(CR_READ(r->rxd_rbctrl));
1081
1082 if (rbctrl & BMU_OWN) {
1083 NDD_TRACE("RHxE",r,rfsw,rbctrl) ;
1084 DB_RX(4, "End of RxDs");
1085 goto rx_end ;
1086 }
1087
1088
1089
1090 if (!rx_used) {
1091 SK_BREAK() ;
1092 SMT_PANIC(smc,HWM_E0009,HWM_E0009_MSG) ;
1093
1094
1095
1096 smc->hw.hw_state = STOPPED ;
1097 mac_drv_clear_rx_queue(smc) ;
1098 smc->hw.hw_state = STARTED ;
1099 mac_drv_fill_rxd(smc) ;
1100 smc->os.hwm.detec_count = 0 ;
1101 goto rx_end ;
1102 }
1103 rfsw = le32_to_cpu(r->rxd_rfsw) ;
1104 if ((rbctrl & BMU_STF) != ((rbctrl & BMU_ST_BUF) <<5)) {
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119 SK_BREAK() ;
1120 rfsw = 0 ;
1121 if (frag_count) {
1122 break ;
1123 }
1124 }
1125 n += rbctrl & 0xffff ;
1126 r = r->rxd_next ;
1127 frag_count++ ;
1128 rx_used-- ;
1129 } while (!(rbctrl & BMU_EOF)) ;
1130 used_frags = frag_count ;
1131 DB_RX(5, "EOF set in RxD, used_frags = %d", used_frags);
1132
1133
1134
1135 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
1136 while (rx_used && !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) {
1137 DB_RX(5, "Check STF bit in %p", r);
1138 r = r->rxd_next ;
1139 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
1140 frag_count++ ;
1141 rx_used-- ;
1142 }
1143 DB_RX(5, "STF bit found");
1144
1145
1146
1147
1148 rxd = queue->rx_curr_get ;
1149 queue->rx_curr_get = r ;
1150 queue->rx_free += frag_count ;
1151 queue->rx_used = rx_used ;
1152
1153
1154
1155
1156 rxd->rxd_rbctrl &= cpu_to_le32(~BMU_STF) ;
1157
1158 for (r=rxd, i=frag_count ; i ; r=r->rxd_next, i--){
1159 DB_RX(5, "dma_complete for RxD %p", r);
1160 dma_complete(smc,(union s_fp_descr volatile *)r,DMA_WR);
1161 }
1162 smc->hw.fp.err_stats.err_valid++ ;
1163 smc->mib.m[MAC0].fddiMACCopied_Ct++ ;
1164
1165
1166 len = (rfsw & RD_LENGTH) - 4 ;
1167
1168 DB_RX(4, "frame length = %d", len);
1169
1170
1171
1172 if (rfsw & (RX_MSRABT|RX_FS_E|RX_FS_CRC|RX_FS_IMPL)){
1173 if (rfsw & RD_S_MSRABT) {
1174 DB_RX(2, "Frame aborted by the FORMAC");
1175 smc->hw.fp.err_stats.err_abort++ ;
1176 }
1177
1178
1179
1180 if (rfsw & RD_S_SEAC2) {
1181 DB_RX(2, "E-Indicator set");
1182 smc->hw.fp.err_stats.err_e_indicator++ ;
1183 }
1184 if (rfsw & RD_S_SFRMERR) {
1185 DB_RX(2, "CRC error");
1186 smc->hw.fp.err_stats.err_crc++ ;
1187 }
1188 if (rfsw & RX_FS_IMPL) {
1189 DB_RX(2, "Implementer frame");
1190 smc->hw.fp.err_stats.err_imp_frame++ ;
1191 }
1192 goto abort_frame ;
1193 }
1194 if (len > FDDI_RAW_MTU-4) {
1195 DB_RX(2, "Frame too long error");
1196 smc->hw.fp.err_stats.err_too_long++ ;
1197 goto abort_frame ;
1198 }
1199
1200
1201
1202
1203 if (len <= 4) {
1204 DB_RX(2, "Frame length = 0");
1205 goto abort_frame ;
1206 }
1207
1208 if (len != (n-4)) {
1209 DB_RX(4, "BMU: rx len differs: [%d:%d]", len, n);
1210 smc->os.hwm.rx_len_error++ ;
1211 goto abort_frame ;
1212 }
1213
1214
1215
1216
1217 virt = (u_char far *) rxd->rxd_virt ;
1218 DB_RX(2, "FC = %x", *virt);
1219 if (virt[12] == MA[5] &&
1220 virt[11] == MA[4] &&
1221 virt[10] == MA[3] &&
1222 virt[9] == MA[2] &&
1223 virt[8] == MA[1] &&
1224 (virt[7] & ~GROUP_ADDR_BIT) == MA[0]) {
1225 goto abort_frame ;
1226 }
1227
1228
1229
1230
1231 if (rfsw & RX_FS_LLC) {
1232
1233
1234
1235
1236
1237 if (!smc->os.hwm.pass_llc_promisc) {
1238 if(!(virt[1] & GROUP_ADDR_BIT)) {
1239 if (virt[6] != MA[5] ||
1240 virt[5] != MA[4] ||
1241 virt[4] != MA[3] ||
1242 virt[3] != MA[2] ||
1243 virt[2] != MA[1] ||
1244 virt[1] != MA[0]) {
1245 DB_RX(2, "DA != MA and not multi- or broadcast");
1246 goto abort_frame ;
1247 }
1248 }
1249 }
1250
1251
1252
1253
1254 DB_RX(4, "LLC - receive");
1255 mac_drv_rx_complete(smc,rxd,frag_count,len) ;
1256 }
1257 else {
1258 if (!(mb = smt_get_mbuf(smc))) {
1259 smc->hw.fp.err_stats.err_no_buf++ ;
1260 DB_RX(4, "No SMbuf; receive terminated");
1261 goto abort_frame ;
1262 }
1263 data = smtod(mb,char *) - 1 ;
1264
1265
1266
1267
1268#ifdef USE_OS_CPY
1269 hwm_cpy_rxd2mb(rxd,data,len) ;
1270#else
1271 for (r=rxd, i=used_frags ; i ; r=r->rxd_next, i--){
1272 n = le32_to_cpu(r->rxd_rbctrl) & RD_LENGTH ;
1273 DB_RX(6, "cp SMT frame to mb: len = %d", n);
1274 memcpy(data,r->rxd_virt,n) ;
1275 data += n ;
1276 }
1277 data = smtod(mb,char *) - 1 ;
1278#endif
1279 fc = *(char *)mb->sm_data = *data ;
1280 mb->sm_len = len - 1 ;
1281 data++ ;
1282
1283
1284
1285
1286 switch(fc) {
1287 case FC_SMT_INFO :
1288 smc->hw.fp.err_stats.err_smt_frame++ ;
1289 DB_RX(5, "SMT frame received");
1290
1291 if (smc->os.hwm.pass_SMT) {
1292 DB_RX(5, "pass SMT frame");
1293 mac_drv_rx_complete(smc, rxd,
1294 frag_count,len) ;
1295 }
1296 else {
1297 DB_RX(5, "requeue RxD");
1298 mac_drv_requeue_rxd(smc,rxd,frag_count);
1299 }
1300
1301 smt_received_pack(smc,mb,(int)(rfsw>>25)) ;
1302 break ;
1303 case FC_SMT_NSA :
1304 smc->hw.fp.err_stats.err_smt_frame++ ;
1305 DB_RX(5, "SMT frame received");
1306
1307
1308
1309
1310 if (smc->os.hwm.pass_NSA ||
1311 (smc->os.hwm.pass_SMT &&
1312 !(rfsw & A_INDIC))) {
1313 DB_RX(5, "pass SMT frame");
1314 mac_drv_rx_complete(smc, rxd,
1315 frag_count,len) ;
1316 }
1317 else {
1318 DB_RX(5, "requeue RxD");
1319 mac_drv_requeue_rxd(smc,rxd,frag_count);
1320 }
1321
1322 smt_received_pack(smc,mb,(int)(rfsw>>25)) ;
1323 break ;
1324 case FC_BEACON :
1325 if (smc->os.hwm.pass_DB) {
1326 DB_RX(5, "pass DB frame");
1327 mac_drv_rx_complete(smc, rxd,
1328 frag_count,len) ;
1329 }
1330 else {
1331 DB_RX(5, "requeue RxD");
1332 mac_drv_requeue_rxd(smc,rxd,frag_count);
1333 }
1334 smt_free_mbuf(smc,mb) ;
1335 break ;
1336 default :
1337
1338
1339
1340 DB_RX(2, "unknown FC error");
1341 smt_free_mbuf(smc,mb) ;
1342 DB_RX(5, "requeue RxD");
1343 mac_drv_requeue_rxd(smc,rxd,frag_count) ;
1344 if ((fc & 0xf0) == FC_MAC)
1345 smc->hw.fp.err_stats.err_mac_frame++ ;
1346 else
1347 smc->hw.fp.err_stats.err_imp_frame++ ;
1348
1349 break ;
1350 }
1351 }
1352
1353 DB_RX(3, "next RxD is %p", queue->rx_curr_get);
1354 NDD_TRACE("RHx1",queue->rx_curr_get,0,0) ;
1355
1356 continue ;
1357
1358abort_frame:
1359 DB_RX(5, "requeue RxD");
1360 mac_drv_requeue_rxd(smc,rxd,frag_count) ;
1361
1362 DB_RX(3, "next RxD is %p", queue->rx_curr_get);
1363 NDD_TRACE("RHx2",queue->rx_curr_get,0,0) ;
1364 }
1365rx_end:
1366#ifdef ALL_RX_COMPLETE
1367 mac_drv_all_receives_complete(smc) ;
1368#endif
1369 return ;
1370}
1371
1372static void smt_to_llc(struct s_smc *smc, SMbuf *mb)
1373{
1374 u_char fc ;
1375
1376 DB_RX(4, "send a queued frame to the llc layer");
1377 smc->os.hwm.r.len = mb->sm_len ;
1378 smc->os.hwm.r.mb_pos = smtod(mb,char *) ;
1379 fc = *smc->os.hwm.r.mb_pos ;
1380 (void)mac_drv_rx_init(smc,(int)mb->sm_len,(int)fc,
1381 smc->os.hwm.r.mb_pos,(int)mb->sm_len) ;
1382 smt_free_mbuf(smc,mb) ;
1383}
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407void hwm_rx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
1408 int frame_status)
1409{
1410 struct s_smt_fp_rxd volatile *r ;
1411 __le32 rbctrl;
1412
1413 NDD_TRACE("RHfB",virt,len,frame_status) ;
1414 DB_RX(2, "hwm_rx_frag: len = %d, frame_status = %x", len, frame_status);
1415 r = smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put ;
1416 r->rxd_virt = virt ;
1417 r->rxd_rbadr = cpu_to_le32(phys) ;
1418 rbctrl = cpu_to_le32( (((__u32)frame_status &
1419 (FIRST_FRAG|LAST_FRAG))<<26) |
1420 (((u_long) frame_status & FIRST_FRAG) << 21) |
1421 BMU_OWN | BMU_CHECK | BMU_EN_IRQ_EOF | len) ;
1422 r->rxd_rbctrl = rbctrl ;
1423
1424 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
1425 outpd(ADDR(B0_R1_CSR),CSR_START) ;
1426 smc->hw.fp.rx_q[QUEUE_R1].rx_free-- ;
1427 smc->hw.fp.rx_q[QUEUE_R1].rx_used++ ;
1428 smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put = r->rxd_next ;
1429 NDD_TRACE("RHfE",r,le32_to_cpu(r->rxd_rbadr),0) ;
1430}
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455void mac_drv_clear_rx_queue(struct s_smc *smc)
1456{
1457 struct s_smt_fp_rxd volatile *r ;
1458 struct s_smt_fp_rxd volatile *next_rxd ;
1459 struct s_smt_rx_queue *queue ;
1460 int frag_count ;
1461 int i ;
1462
1463 if (smc->hw.hw_state != STOPPED) {
1464 SK_BREAK() ;
1465 SMT_PANIC(smc,HWM_E0012,HWM_E0012_MSG) ;
1466 return ;
1467 }
1468
1469 queue = smc->hw.fp.rx[QUEUE_R1] ;
1470 DB_RX(5, "clear_rx_queue");
1471
1472
1473
1474
1475 r = queue->rx_curr_get ;
1476 while (queue->rx_used) {
1477 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
1478 DB_RX(5, "switch OWN bit of RxD 0x%p", r);
1479 r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
1480 frag_count = 1 ;
1481 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
1482 r = r->rxd_next ;
1483 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
1484 while (r != queue->rx_curr_put &&
1485 !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) {
1486 DB_RX(5, "Check STF bit in %p", r);
1487 r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
1488 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
1489 r = r->rxd_next ;
1490 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
1491 frag_count++ ;
1492 }
1493 DB_RX(5, "STF bit found");
1494 next_rxd = r ;
1495
1496 for (r=queue->rx_curr_get,i=frag_count; i ; r=r->rxd_next,i--){
1497 DB_RX(5, "dma_complete for RxD %p", r);
1498 dma_complete(smc,(union s_fp_descr volatile *)r,DMA_WR);
1499 }
1500
1501 DB_RX(5, "mac_drv_clear_rxd: RxD %p frag_count %d",
1502 queue->rx_curr_get, frag_count);
1503 mac_drv_clear_rxd(smc,queue->rx_curr_get,frag_count) ;
1504
1505 queue->rx_curr_get = next_rxd ;
1506 queue->rx_used -= frag_count ;
1507 queue->rx_free += frag_count ;
1508 }
1509}
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count, int frame_len,
1543 int frame_status)
1544{
1545 NDD_TRACE("THiB",fc,frag_count,frame_len) ;
1546 smc->os.hwm.tx_p = smc->hw.fp.tx[frame_status & QUEUE_A0] ;
1547 smc->os.hwm.tx_descr = TX_DESCRIPTOR | (((u_long)(frame_len-1)&3)<<27) ;
1548 smc->os.hwm.tx_len = frame_len ;
1549 DB_TX(3, "hwm_tx_init: fc = %x, len = %d", fc, frame_len);
1550 if ((fc & ~(FC_SYNC_BIT|FC_LLC_PRIOR)) == FC_ASYNC_LLC) {
1551 frame_status |= LAN_TX ;
1552 }
1553 else {
1554 switch (fc) {
1555 case FC_SMT_INFO :
1556 case FC_SMT_NSA :
1557 frame_status |= LAN_TX ;
1558 break ;
1559 case FC_SMT_LOC :
1560 frame_status |= LOC_TX ;
1561 break ;
1562 case FC_SMT_LAN_LOC :
1563 frame_status |= LAN_TX | LOC_TX ;
1564 break ;
1565 default :
1566 SMT_PANIC(smc,HWM_E0010,HWM_E0010_MSG) ;
1567 }
1568 }
1569 if (!smc->hw.mac_ring_is_up) {
1570 frame_status &= ~LAN_TX ;
1571 frame_status |= RING_DOWN ;
1572 DB_TX(2, "Ring is down: terminate LAN_TX");
1573 }
1574 if (frag_count > smc->os.hwm.tx_p->tx_free) {
1575#ifndef NDIS_OS2
1576 mac_drv_clear_txd(smc) ;
1577 if (frag_count > smc->os.hwm.tx_p->tx_free) {
1578 DB_TX(2, "Out of TxDs, terminate LAN_TX");
1579 frame_status &= ~LAN_TX ;
1580 frame_status |= OUT_OF_TXD ;
1581 }
1582#else
1583 DB_TX(2, "Out of TxDs, terminate LAN_TX");
1584 frame_status &= ~LAN_TX ;
1585 frame_status |= OUT_OF_TXD ;
1586#endif
1587 }
1588 DB_TX(3, "frame_status = %x", frame_status);
1589 NDD_TRACE("THiE",frame_status,smc->os.hwm.tx_p->tx_free,0) ;
1590 return frame_status;
1591}
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
1621 int frame_status)
1622{
1623 struct s_smt_fp_txd volatile *t ;
1624 struct s_smt_tx_queue *queue ;
1625 __le32 tbctrl ;
1626
1627 queue = smc->os.hwm.tx_p ;
1628
1629 NDD_TRACE("THfB",virt,len,frame_status) ;
1630
1631
1632
1633
1634
1635 t = queue->tx_curr_put ;
1636
1637 DB_TX(2, "hwm_tx_frag: len = %d, frame_status = %x", len, frame_status);
1638 if (frame_status & LAN_TX) {
1639
1640 DB_TX(3, "LAN_TX: TxD = %p, virt = %p", t, virt);
1641 t->txd_virt = virt ;
1642 t->txd_txdscr = cpu_to_le32(smc->os.hwm.tx_descr) ;
1643 t->txd_tbadr = cpu_to_le32(phys) ;
1644 tbctrl = cpu_to_le32((((__u32)frame_status &
1645 (FIRST_FRAG|LAST_FRAG|EN_IRQ_EOF))<< 26) |
1646 BMU_OWN|BMU_CHECK |len) ;
1647 t->txd_tbctrl = tbctrl ;
1648
1649#ifndef AIX
1650 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
1651 outpd(queue->tx_bmu_ctl,CSR_START) ;
1652#else
1653 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
1654 if (frame_status & QUEUE_A0) {
1655 outpd(ADDR(B0_XA_CSR),CSR_START) ;
1656 }
1657 else {
1658 outpd(ADDR(B0_XS_CSR),CSR_START) ;
1659 }
1660#endif
1661 queue->tx_free-- ;
1662 queue->tx_used++ ;
1663 queue->tx_curr_put = t->txd_next ;
1664 if (frame_status & LAST_FRAG) {
1665 smc->mib.m[MAC0].fddiMACTransmit_Ct++ ;
1666 }
1667 }
1668 if (frame_status & LOC_TX) {
1669 DB_TX(3, "LOC_TX:");
1670 if (frame_status & FIRST_FRAG) {
1671 if(!(smc->os.hwm.tx_mb = smt_get_mbuf(smc))) {
1672 smc->hw.fp.err_stats.err_no_buf++ ;
1673 DB_TX(4, "No SMbuf; transmit terminated");
1674 }
1675 else {
1676 smc->os.hwm.tx_data =
1677 smtod(smc->os.hwm.tx_mb,char *) - 1 ;
1678#ifdef USE_OS_CPY
1679#ifdef PASS_1ST_TXD_2_TX_COMP
1680 hwm_cpy_txd2mb(t,smc->os.hwm.tx_data,
1681 smc->os.hwm.tx_len) ;
1682#endif
1683#endif
1684 }
1685 }
1686 if (smc->os.hwm.tx_mb) {
1687#ifndef USE_OS_CPY
1688 DB_TX(3, "copy fragment into MBuf");
1689 memcpy(smc->os.hwm.tx_data,virt,len) ;
1690 smc->os.hwm.tx_data += len ;
1691#endif
1692 if (frame_status & LAST_FRAG) {
1693#ifdef USE_OS_CPY
1694#ifndef PASS_1ST_TXD_2_TX_COMP
1695
1696
1697
1698
1699
1700
1701
1702 hwm_cpy_txd2mb(t,smc->os.hwm.tx_data,
1703 smc->os.hwm.tx_len) ;
1704#endif
1705#endif
1706 smc->os.hwm.tx_data =
1707 smtod(smc->os.hwm.tx_mb,char *) - 1 ;
1708 *(char *)smc->os.hwm.tx_mb->sm_data =
1709 *smc->os.hwm.tx_data ;
1710 smc->os.hwm.tx_data++ ;
1711 smc->os.hwm.tx_mb->sm_len =
1712 smc->os.hwm.tx_len - 1 ;
1713 DB_TX(3, "pass LLC frame to SMT");
1714 smt_received_pack(smc,smc->os.hwm.tx_mb,
1715 RD_FS_LOCAL) ;
1716 }
1717 }
1718 }
1719 NDD_TRACE("THfE",t,queue->tx_free,0) ;
1720}
1721
1722
1723
1724
1725
1726static void queue_llc_rx(struct s_smc *smc, SMbuf *mb)
1727{
1728 DB_GEN(4, "queue_llc_rx: mb = %p", mb);
1729 smc->os.hwm.queued_rx_frames++ ;
1730 mb->sm_next = (SMbuf *)NULL ;
1731 if (smc->os.hwm.llc_rx_pipe == NULL) {
1732 smc->os.hwm.llc_rx_pipe = mb ;
1733 }
1734 else {
1735 smc->os.hwm.llc_rx_tail->sm_next = mb ;
1736 }
1737 smc->os.hwm.llc_rx_tail = mb ;
1738
1739
1740
1741
1742 if (!smc->os.hwm.isr_flag) {
1743 smt_force_irq(smc) ;
1744 }
1745}
1746
1747
1748
1749
1750static SMbuf *get_llc_rx(struct s_smc *smc)
1751{
1752 SMbuf *mb ;
1753
1754 if ((mb = smc->os.hwm.llc_rx_pipe)) {
1755 smc->os.hwm.queued_rx_frames-- ;
1756 smc->os.hwm.llc_rx_pipe = mb->sm_next ;
1757 }
1758 DB_GEN(4, "get_llc_rx: mb = 0x%p", mb);
1759 return mb;
1760}
1761
1762
1763
1764
1765
1766static void queue_txd_mb(struct s_smc *smc, SMbuf *mb)
1767{
1768 DB_GEN(4, "_rx: queue_txd_mb = %p", mb);
1769 smc->os.hwm.queued_txd_mb++ ;
1770 mb->sm_next = (SMbuf *)NULL ;
1771 if (smc->os.hwm.txd_tx_pipe == NULL) {
1772 smc->os.hwm.txd_tx_pipe = mb ;
1773 }
1774 else {
1775 smc->os.hwm.txd_tx_tail->sm_next = mb ;
1776 }
1777 smc->os.hwm.txd_tx_tail = mb ;
1778}
1779
1780
1781
1782
1783static SMbuf *get_txd_mb(struct s_smc *smc)
1784{
1785 SMbuf *mb ;
1786
1787 if ((mb = smc->os.hwm.txd_tx_pipe)) {
1788 smc->os.hwm.queued_txd_mb-- ;
1789 smc->os.hwm.txd_tx_pipe = mb->sm_next ;
1790 }
1791 DB_GEN(4, "get_txd_mb: mb = 0x%p", mb);
1792 return mb;
1793}
1794
1795
1796
1797
1798void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc)
1799{
1800 char far *data ;
1801 int len ;
1802 int n ;
1803 int i ;
1804 int frag_count ;
1805 int frame_status ;
1806 SK_LOC_DECL(char far,*virt[3]) ;
1807 int frag_len[3] ;
1808 struct s_smt_tx_queue *queue ;
1809 struct s_smt_fp_txd volatile *t ;
1810 u_long phys ;
1811 __le32 tbctrl;
1812
1813 NDD_TRACE("THSB",mb,fc,0) ;
1814 DB_TX(4, "smt_send_mbuf: mb = 0x%p, fc = 0x%x", mb, fc);
1815
1816 mb->sm_off-- ;
1817 mb->sm_len++ ;
1818 data = smtod(mb,char *) ;
1819 *data = fc ;
1820 if (fc == FC_SMT_LOC)
1821 *data = FC_SMT_INFO ;
1822
1823
1824
1825
1826 frag_count = 0 ;
1827 len = mb->sm_len ;
1828 while (len) {
1829 n = SMT_PAGESIZE - ((long)data & (SMT_PAGESIZE-1)) ;
1830 if (n >= len) {
1831 n = len ;
1832 }
1833 DB_TX(5, "frag: virt/len = 0x%p/%d", data, n);
1834 virt[frag_count] = data ;
1835 frag_len[frag_count] = n ;
1836 frag_count++ ;
1837 len -= n ;
1838 data += n ;
1839 }
1840
1841
1842
1843
1844 queue = smc->hw.fp.tx[QUEUE_A0] ;
1845 if (fc == FC_BEACON || fc == FC_SMT_LOC) {
1846 frame_status = LOC_TX ;
1847 }
1848 else {
1849 frame_status = LAN_TX ;
1850 if ((smc->os.hwm.pass_NSA &&(fc == FC_SMT_NSA)) ||
1851 (smc->os.hwm.pass_SMT &&(fc == FC_SMT_INFO)))
1852 frame_status |= LOC_TX ;
1853 }
1854
1855 if (!smc->hw.mac_ring_is_up || frag_count > queue->tx_free) {
1856 frame_status &= ~LAN_TX;
1857 if (frame_status) {
1858 DB_TX(2, "Ring is down: terminate LAN_TX");
1859 }
1860 else {
1861 DB_TX(2, "Ring is down: terminate transmission");
1862 smt_free_mbuf(smc,mb) ;
1863 return ;
1864 }
1865 }
1866 DB_TX(5, "frame_status = 0x%x", frame_status);
1867
1868 if ((frame_status & LAN_TX) && (frame_status & LOC_TX)) {
1869 mb->sm_use_count = 2 ;
1870 }
1871
1872 if (frame_status & LAN_TX) {
1873 t = queue->tx_curr_put ;
1874 frame_status |= FIRST_FRAG ;
1875 for (i = 0; i < frag_count; i++) {
1876 DB_TX(5, "init TxD = 0x%p", t);
1877 if (i == frag_count-1) {
1878 frame_status |= LAST_FRAG ;
1879 t->txd_txdscr = cpu_to_le32(TX_DESCRIPTOR |
1880 (((__u32)(mb->sm_len-1)&3) << 27)) ;
1881 }
1882 t->txd_virt = virt[i] ;
1883 phys = dma_master(smc, (void far *)virt[i],
1884 frag_len[i], DMA_RD|SMT_BUF) ;
1885 t->txd_tbadr = cpu_to_le32(phys) ;
1886 tbctrl = cpu_to_le32((((__u32)frame_status &
1887 (FIRST_FRAG|LAST_FRAG)) << 26) |
1888 BMU_OWN | BMU_CHECK | BMU_SMT_TX |frag_len[i]) ;
1889 t->txd_tbctrl = tbctrl ;
1890#ifndef AIX
1891 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
1892 outpd(queue->tx_bmu_ctl,CSR_START) ;
1893#else
1894 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
1895 outpd(ADDR(B0_XA_CSR),CSR_START) ;
1896#endif
1897 frame_status &= ~FIRST_FRAG ;
1898 queue->tx_curr_put = t = t->txd_next ;
1899 queue->tx_free-- ;
1900 queue->tx_used++ ;
1901 }
1902 smc->mib.m[MAC0].fddiMACTransmit_Ct++ ;
1903 queue_txd_mb(smc,mb) ;
1904 }
1905
1906 if (frame_status & LOC_TX) {
1907 DB_TX(5, "pass Mbuf to LLC queue");
1908 queue_llc_rx(smc,mb) ;
1909 }
1910
1911
1912
1913
1914
1915 mac_drv_clear_txd(smc) ;
1916 NDD_TRACE("THSE",t,queue->tx_free,frag_count) ;
1917}
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933static void mac_drv_clear_txd(struct s_smc *smc)
1934{
1935 struct s_smt_tx_queue *queue ;
1936 struct s_smt_fp_txd volatile *t1 ;
1937 struct s_smt_fp_txd volatile *t2 = NULL ;
1938 SMbuf *mb ;
1939 u_long tbctrl ;
1940 int i ;
1941 int frag_count ;
1942 int n ;
1943
1944 NDD_TRACE("THcB",0,0,0) ;
1945 for (i = QUEUE_S; i <= QUEUE_A0; i++) {
1946 queue = smc->hw.fp.tx[i] ;
1947 t1 = queue->tx_curr_get ;
1948 DB_TX(5, "clear_txd: QUEUE = %d (0=sync/1=async)", i);
1949
1950 for ( ; ; ) {
1951 frag_count = 0 ;
1952
1953 do {
1954 DRV_BUF_FLUSH(t1,DDI_DMA_SYNC_FORCPU) ;
1955 DB_TX(5, "check OWN/EOF bit of TxD 0x%p", t1);
1956 tbctrl = le32_to_cpu(CR_READ(t1->txd_tbctrl));
1957
1958 if (tbctrl & BMU_OWN || !queue->tx_used){
1959 DB_TX(4, "End of TxDs queue %d", i);
1960 goto free_next_queue ;
1961 }
1962 t1 = t1->txd_next ;
1963 frag_count++ ;
1964 } while (!(tbctrl & BMU_EOF)) ;
1965
1966 t1 = queue->tx_curr_get ;
1967 for (n = frag_count; n; n--) {
1968 tbctrl = le32_to_cpu(t1->txd_tbctrl) ;
1969 dma_complete(smc,
1970 (union s_fp_descr volatile *) t1,
1971 (int) (DMA_RD |
1972 ((tbctrl & BMU_SMT_TX) >> 18))) ;
1973 t2 = t1 ;
1974 t1 = t1->txd_next ;
1975 }
1976
1977 if (tbctrl & BMU_SMT_TX) {
1978 mb = get_txd_mb(smc) ;
1979 smt_free_mbuf(smc,mb) ;
1980 }
1981 else {
1982#ifndef PASS_1ST_TXD_2_TX_COMP
1983 DB_TX(4, "mac_drv_tx_comp for TxD 0x%p", t2);
1984 mac_drv_tx_complete(smc,t2) ;
1985#else
1986 DB_TX(4, "mac_drv_tx_comp for TxD 0x%x",
1987 queue->tx_curr_get);
1988 mac_drv_tx_complete(smc,queue->tx_curr_get) ;
1989#endif
1990 }
1991 queue->tx_curr_get = t1 ;
1992 queue->tx_free += frag_count ;
1993 queue->tx_used -= frag_count ;
1994 }
1995free_next_queue: ;
1996 }
1997 NDD_TRACE("THcE",0,0,0) ;
1998}
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023void mac_drv_clear_tx_queue(struct s_smc *smc)
2024{
2025 struct s_smt_fp_txd volatile *t ;
2026 struct s_smt_tx_queue *queue ;
2027 int tx_used ;
2028 int i ;
2029
2030 if (smc->hw.hw_state != STOPPED) {
2031 SK_BREAK() ;
2032 SMT_PANIC(smc,HWM_E0011,HWM_E0011_MSG) ;
2033 return ;
2034 }
2035
2036 for (i = QUEUE_S; i <= QUEUE_A0; i++) {
2037 queue = smc->hw.fp.tx[i] ;
2038 DB_TX(5, "clear_tx_queue: QUEUE = %d (0=sync/1=async)", i);
2039
2040
2041
2042
2043 t = queue->tx_curr_get ;
2044 tx_used = queue->tx_used ;
2045 while (tx_used) {
2046 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ;
2047 DB_TX(5, "switch OWN bit of TxD 0x%p", t);
2048 t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ;
2049 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
2050 t = t->txd_next ;
2051 tx_used-- ;
2052 }
2053 }
2054
2055
2056
2057
2058 mac_drv_clear_txd(smc) ;
2059
2060 for (i = QUEUE_S; i <= QUEUE_A0; i++) {
2061 queue = smc->hw.fp.tx[i] ;
2062 t = queue->tx_curr_get ;
2063
2064
2065
2066
2067
2068
2069 if (i == QUEUE_S) {
2070 outpd(ADDR(B5_XS_DA),le32_to_cpu(t->txd_ntdadr)) ;
2071 }
2072 else {
2073 outpd(ADDR(B5_XA_DA),le32_to_cpu(t->txd_ntdadr)) ;
2074 }
2075
2076 queue->tx_curr_put = queue->tx_curr_get->txd_next ;
2077 queue->tx_curr_get = queue->tx_curr_put ;
2078 }
2079}
2080
2081
2082
2083
2084
2085
2086
2087
2088#ifdef DEBUG
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115void mac_drv_debug_lev(struct s_smc *smc, int flag, int lev)
2116{
2117 switch(flag) {
2118 case (int)NULL:
2119 DB_P.d_smtf = DB_P.d_smt = DB_P.d_ecm = DB_P.d_rmt = 0 ;
2120 DB_P.d_cfm = 0 ;
2121 DB_P.d_os.hwm_rx = DB_P.d_os.hwm_tx = DB_P.d_os.hwm_gen = 0 ;
2122#ifdef SBA
2123 DB_P.d_sba = 0 ;
2124#endif
2125#ifdef ESS
2126 DB_P.d_ess = 0 ;
2127#endif
2128 break ;
2129 case DEBUG_SMTF:
2130 DB_P.d_smtf = lev ;
2131 break ;
2132 case DEBUG_SMT:
2133 DB_P.d_smt = lev ;
2134 break ;
2135 case DEBUG_ECM:
2136 DB_P.d_ecm = lev ;
2137 break ;
2138 case DEBUG_RMT:
2139 DB_P.d_rmt = lev ;
2140 break ;
2141 case DEBUG_CFM:
2142 DB_P.d_cfm = lev ;
2143 break ;
2144 case DEBUG_PCM:
2145 DB_P.d_pcm = lev ;
2146 break ;
2147 case DEBUG_SBA:
2148#ifdef SBA
2149 DB_P.d_sba = lev ;
2150#endif
2151 break ;
2152 case DEBUG_ESS:
2153#ifdef ESS
2154 DB_P.d_ess = lev ;
2155#endif
2156 break ;
2157 case DB_HWM_RX:
2158 DB_P.d_os.hwm_rx = lev ;
2159 break ;
2160 case DB_HWM_TX:
2161 DB_P.d_os.hwm_tx = lev ;
2162 break ;
2163 case DB_HWM_GEN:
2164 DB_P.d_os.hwm_gen = lev ;
2165 break ;
2166 default:
2167 break ;
2168 }
2169}
2170#endif
2171