1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#ifndef lint
18static char const ID_sccs[] = "@(#)hwmtm.c 1.40 99/05/31 (C) SK" ;
19#endif
20
21#define HWMTM
22
23#ifndef FDDI
24#define FDDI
25#endif
26
27#include "h/types.h"
28#include "h/fddi.h"
29#include "h/smc.h"
30#include "h/supern_2.h"
31#include "h/skfbiinc.h"
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48#ifdef COMMON_MB_POOL
49static SMbuf *mb_start = 0 ;
50static SMbuf *mb_free = 0 ;
51static int mb_init = FALSE ;
52static int call_count = 0 ;
53#endif
54
55
56
57
58
59
60
61#ifdef DEBUG
62#ifndef DEBUG_BRD
63extern struct smt_debug debug ;
64#endif
65#endif
66
67#ifdef NDIS_OS2
68extern u_char offDepth ;
69extern u_char force_irq_pending ;
70#endif
71
72
73
74
75
76
77
78static void queue_llc_rx(struct s_smc *smc, SMbuf *mb);
79static void smt_to_llc(struct s_smc *smc, SMbuf *mb);
80static void init_txd_ring(struct s_smc *smc);
81static void init_rxd_ring(struct s_smc *smc);
82static void queue_txd_mb(struct s_smc *smc, SMbuf *mb);
83static u_long init_descr_ring(struct s_smc *smc, union s_fp_descr volatile *start,
84 int count);
85static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue);
86static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue);
87static SMbuf* get_llc_rx(struct s_smc *smc);
88static SMbuf* get_txd_mb(struct s_smc *smc);
89static void mac_drv_clear_txd(struct s_smc *smc);
90
91
92
93
94
95
96
97
98extern void* mac_drv_get_space(struct s_smc *smc, unsigned int size);
99extern void* mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size);
100extern void mac_drv_fill_rxd(struct s_smc *smc);
101extern void mac_drv_tx_complete(struct s_smc *smc,
102 volatile struct s_smt_fp_txd *txd);
103extern void mac_drv_rx_complete(struct s_smc *smc,
104 volatile struct s_smt_fp_rxd *rxd,
105 int frag_count, int len);
106extern void mac_drv_requeue_rxd(struct s_smc *smc,
107 volatile struct s_smt_fp_rxd *rxd,
108 int frag_count);
109extern void mac_drv_clear_rxd(struct s_smc *smc,
110 volatile struct s_smt_fp_rxd *rxd, int frag_count);
111
112#ifdef USE_OS_CPY
113extern void hwm_cpy_rxd2mb(void);
114extern void hwm_cpy_txd2mb(void);
115#endif
116
117#ifdef ALL_RX_COMPLETE
118extern void mac_drv_all_receives_complete(void);
119#endif
120
121extern u_long mac_drv_virt2phys(struct s_smc *smc, void *virt);
122extern u_long dma_master(struct s_smc *smc, void *virt, int len, int flag);
123
124#ifdef NDIS_OS2
125extern void post_proc(void);
126#else
127extern void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr,
128 int flag);
129#endif
130
131extern int mac_drv_rx_init(struct s_smc *smc, int len, int fc, char *look_ahead,
132 int la_len);
133
134
135
136
137
138
139void process_receive(struct s_smc *smc);
140void fddi_isr(struct s_smc *smc);
141void smt_free_mbuf(struct s_smc *smc, SMbuf *mb);
142void init_driver_fplus(struct s_smc *smc);
143void mac_drv_rx_mode(struct s_smc *smc, int mode);
144void init_fddi_driver(struct s_smc *smc, u_char *mac_addr);
145void mac_drv_clear_tx_queue(struct s_smc *smc);
146void mac_drv_clear_rx_queue(struct s_smc *smc);
147void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
148 int frame_status);
149void hwm_rx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
150 int frame_status);
151
152int mac_drv_init(struct s_smc *smc);
153int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count, int frame_len,
154 int frame_status);
155
156u_int mac_drv_check_space(void);
157
158SMbuf* smt_get_mbuf(struct s_smc *smc);
159
160#ifdef DEBUG
161 void mac_drv_debug_lev(void);
162#endif
163
164
165
166
167
168
169#ifndef UNUSED
170#ifdef lint
171#define UNUSED(x) (x) = (x)
172#else
173#define UNUSED(x)
174#endif
175#endif
176
177#ifdef USE_CAN_ADDR
178#define MA smc->hw.fddi_canon_addr.a
179#define GROUP_ADDR_BIT 0x01
180#else
181#define MA smc->hw.fddi_home_addr.a
182#define GROUP_ADDR_BIT 0x80
183#endif
184
185#define RXD_TXD_COUNT (HWM_ASYNC_TXD_COUNT+HWM_SYNC_TXD_COUNT+\
186 SMT_R1_RXD_COUNT+SMT_R2_RXD_COUNT)
187
188#ifdef MB_OUTSIDE_SMC
189#define EXT_VIRT_MEM ((RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd) +\
190 MAX_MBUF*sizeof(SMbuf))
191#define EXT_VIRT_MEM_2 ((RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd))
192#else
193#define EXT_VIRT_MEM ((RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd))
194#endif
195
196
197
198
199#if defined(NDIS_OS2) || defined(ODI2)
200#define CR_READ(var) ((var) & 0xffff0000 | ((var) & 0xffff))
201#else
202#define CR_READ(var) (__le32)(var)
203#endif
204
205#define IMASK_SLOW (IS_PLINT1 | IS_PLINT2 | IS_TIMINT | IS_TOKEN | \
206 IS_MINTR1 | IS_MINTR2 | IS_MINTR3 | IS_R1_P | \
207 IS_R1_C | IS_XA_C | IS_XS_C)
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229u_int mac_drv_check_space(void)
230{
231#ifdef MB_OUTSIDE_SMC
232#ifdef COMMON_MB_POOL
233 call_count++ ;
234 if (call_count == 1) {
235 return EXT_VIRT_MEM;
236 }
237 else {
238 return EXT_VIRT_MEM_2;
239 }
240#else
241 return EXT_VIRT_MEM;
242#endif
243#else
244 return 0;
245#endif
246}
247
248
249
250
251
252
253
254
255
256
257
258
259int mac_drv_init(struct s_smc *smc)
260{
261 if (sizeof(struct s_smt_fp_rxd) % 16) {
262 SMT_PANIC(smc,HWM_E0001,HWM_E0001_MSG) ;
263 }
264 if (sizeof(struct s_smt_fp_txd) % 16) {
265 SMT_PANIC(smc,HWM_E0002,HWM_E0002_MSG) ;
266 }
267
268
269
270
271 if (!(smc->os.hwm.descr_p = (union s_fp_descr volatile *)
272 mac_drv_get_desc_mem(smc,(u_int)
273 (RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd)))) {
274 return 1;
275 }
276
277
278
279
280#ifndef MB_OUTSIDE_SMC
281 smc->os.hwm.mbuf_pool.mb_start=(SMbuf *)(&smc->os.hwm.mbuf_pool.mb[0]) ;
282#else
283#ifndef COMMON_MB_POOL
284 if (!(smc->os.hwm.mbuf_pool.mb_start = (SMbuf *) mac_drv_get_space(smc,
285 MAX_MBUF*sizeof(SMbuf)))) {
286 return 1;
287 }
288#else
289 if (!mb_start) {
290 if (!(mb_start = (SMbuf *) mac_drv_get_space(smc,
291 MAX_MBUF*sizeof(SMbuf)))) {
292 return 1;
293 }
294 }
295#endif
296#endif
297 return 0;
298}
299
300
301
302
303
304
305
306
307
308
309void init_driver_fplus(struct s_smc *smc)
310{
311 smc->hw.fp.mdr2init = FM_LSB | FM_BMMODE | FM_ENNPRQ | FM_ENHSRQ | 3 ;
312
313#ifdef PCI
314 smc->hw.fp.mdr2init |= FM_CHKPAR | FM_PARITY ;
315#endif
316 smc->hw.fp.mdr3init = FM_MENRQAUNLCK | FM_MENRS ;
317
318#ifdef USE_CAN_ADDR
319
320 smc->hw.fp.frselreg_init = FM_ENXMTADSWAP | FM_ENRCVADSWAP ;
321#endif
322}
323
324static u_long init_descr_ring(struct s_smc *smc,
325 union s_fp_descr volatile *start,
326 int count)
327{
328 int i ;
329 union s_fp_descr volatile *d1 ;
330 union s_fp_descr volatile *d2 ;
331 u_long phys ;
332
333 DB_GEN("descr ring starts at = %x ",(void *)start,0,3) ;
334 for (i=count-1, d1=start; i ; i--) {
335 d2 = d1 ;
336 d1++ ;
337 d2->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ;
338 d2->r.rxd_next = &d1->r ;
339 phys = mac_drv_virt2phys(smc,(void *)d1) ;
340 d2->r.rxd_nrdadr = cpu_to_le32(phys) ;
341 }
342 DB_GEN("descr ring ends at = %x ",(void *)d1,0,3) ;
343 d1->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ;
344 d1->r.rxd_next = &start->r ;
345 phys = mac_drv_virt2phys(smc,(void *)start) ;
346 d1->r.rxd_nrdadr = cpu_to_le32(phys) ;
347
348 for (i=count, d1=start; i ; i--) {
349 DRV_BUF_FLUSH(&d1->r,DDI_DMA_SYNC_FORDEV) ;
350 d1++;
351 }
352 return phys;
353}
354
355static void init_txd_ring(struct s_smc *smc)
356{
357 struct s_smt_fp_txd volatile *ds ;
358 struct s_smt_tx_queue *queue ;
359 u_long phys ;
360
361
362
363
364 ds = (struct s_smt_fp_txd volatile *) ((char *)smc->os.hwm.descr_p +
365 SMT_R1_RXD_COUNT*sizeof(struct s_smt_fp_rxd)) ;
366 queue = smc->hw.fp.tx[QUEUE_A0] ;
367 DB_GEN("Init async TxD ring, %d TxDs ",HWM_ASYNC_TXD_COUNT,0,3) ;
368 (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
369 HWM_ASYNC_TXD_COUNT) ;
370 phys = le32_to_cpu(ds->txd_ntdadr) ;
371 ds++ ;
372 queue->tx_curr_put = queue->tx_curr_get = ds ;
373 ds-- ;
374 queue->tx_free = HWM_ASYNC_TXD_COUNT ;
375 queue->tx_used = 0 ;
376 outpd(ADDR(B5_XA_DA),phys) ;
377
378 ds = (struct s_smt_fp_txd volatile *) ((char *)ds +
379 HWM_ASYNC_TXD_COUNT*sizeof(struct s_smt_fp_txd)) ;
380 queue = smc->hw.fp.tx[QUEUE_S] ;
381 DB_GEN("Init sync TxD ring, %d TxDs ",HWM_SYNC_TXD_COUNT,0,3) ;
382 (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
383 HWM_SYNC_TXD_COUNT) ;
384 phys = le32_to_cpu(ds->txd_ntdadr) ;
385 ds++ ;
386 queue->tx_curr_put = queue->tx_curr_get = ds ;
387 queue->tx_free = HWM_SYNC_TXD_COUNT ;
388 queue->tx_used = 0 ;
389 outpd(ADDR(B5_XS_DA),phys) ;
390}
391
392static void init_rxd_ring(struct s_smc *smc)
393{
394 struct s_smt_fp_rxd volatile *ds ;
395 struct s_smt_rx_queue *queue ;
396 u_long phys ;
397
398
399
400
401 ds = (struct s_smt_fp_rxd volatile *) smc->os.hwm.descr_p ;
402 queue = smc->hw.fp.rx[QUEUE_R1] ;
403 DB_GEN("Init RxD ring, %d RxDs ",SMT_R1_RXD_COUNT,0,3) ;
404 (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
405 SMT_R1_RXD_COUNT) ;
406 phys = le32_to_cpu(ds->rxd_nrdadr) ;
407 ds++ ;
408 queue->rx_curr_put = queue->rx_curr_get = ds ;
409 queue->rx_free = SMT_R1_RXD_COUNT ;
410 queue->rx_used = 0 ;
411 outpd(ADDR(B4_R1_DA),phys) ;
412}
413
414
415
416
417
418
419
420
421
422void init_fddi_driver(struct s_smc *smc, u_char *mac_addr)
423{
424 SMbuf *mb ;
425 int i ;
426
427 init_board(smc,mac_addr) ;
428 (void)init_fplus(smc) ;
429
430
431
432
433#ifndef COMMON_MB_POOL
434 mb = smc->os.hwm.mbuf_pool.mb_start ;
435 smc->os.hwm.mbuf_pool.mb_free = (SMbuf *)NULL ;
436 for (i = 0; i < MAX_MBUF; i++) {
437 mb->sm_use_count = 1 ;
438 smt_free_mbuf(smc,mb) ;
439 mb++ ;
440 }
441#else
442 mb = mb_start ;
443 if (!mb_init) {
444 mb_free = 0 ;
445 for (i = 0; i < MAX_MBUF; i++) {
446 mb->sm_use_count = 1 ;
447 smt_free_mbuf(smc,mb) ;
448 mb++ ;
449 }
450 mb_init = TRUE ;
451 }
452#endif
453
454
455
456
457 smc->os.hwm.llc_rx_pipe = smc->os.hwm.llc_rx_tail = (SMbuf *)NULL ;
458 smc->os.hwm.txd_tx_pipe = smc->os.hwm.txd_tx_tail = NULL ;
459 smc->os.hwm.pass_SMT = smc->os.hwm.pass_NSA = smc->os.hwm.pass_DB = 0 ;
460 smc->os.hwm.pass_llc_promisc = TRUE ;
461 smc->os.hwm.queued_rx_frames = smc->os.hwm.queued_txd_mb = 0 ;
462 smc->os.hwm.detec_count = 0 ;
463 smc->os.hwm.rx_break = 0 ;
464 smc->os.hwm.rx_len_error = 0 ;
465 smc->os.hwm.isr_flag = FALSE ;
466
467
468
469
470 i = 16 - ((long)smc->os.hwm.descr_p & 0xf) ;
471 if (i != 16) {
472 DB_GEN("i = %d",i,0,3) ;
473 smc->os.hwm.descr_p = (union s_fp_descr volatile *)
474 ((char *)smc->os.hwm.descr_p+i) ;
475 }
476 DB_GEN("pt to descr area = %x",(void *)smc->os.hwm.descr_p,0,3) ;
477
478 init_txd_ring(smc) ;
479 init_rxd_ring(smc) ;
480 mac_drv_fill_rxd(smc) ;
481
482 init_plc(smc) ;
483}
484
485
486SMbuf *smt_get_mbuf(struct s_smc *smc)
487{
488 register SMbuf *mb ;
489
490#ifndef COMMON_MB_POOL
491 mb = smc->os.hwm.mbuf_pool.mb_free ;
492#else
493 mb = mb_free ;
494#endif
495 if (mb) {
496#ifndef COMMON_MB_POOL
497 smc->os.hwm.mbuf_pool.mb_free = mb->sm_next ;
498#else
499 mb_free = mb->sm_next ;
500#endif
501 mb->sm_off = 8 ;
502 mb->sm_use_count = 1 ;
503 }
504 DB_GEN("get SMbuf: mb = %x",(void *)mb,0,3) ;
505 return mb;
506}
507
508void smt_free_mbuf(struct s_smc *smc, SMbuf *mb)
509{
510
511 if (mb) {
512 mb->sm_use_count-- ;
513 DB_GEN("free_mbuf: sm_use_count = %d",mb->sm_use_count,0,3) ;
514
515
516
517
518
519 if (!mb->sm_use_count) {
520 DB_GEN("free SMbuf: mb = %x",(void *)mb,0,3) ;
521#ifndef COMMON_MB_POOL
522 mb->sm_next = smc->os.hwm.mbuf_pool.mb_free ;
523 smc->os.hwm.mbuf_pool.mb_free = mb ;
524#else
525 mb->sm_next = mb_free ;
526 mb_free = mb ;
527#endif
528 }
529 }
530 else
531 SMT_PANIC(smc,HWM_E0003,HWM_E0003_MSG) ;
532}
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554void mac_drv_repair_descr(struct s_smc *smc)
555{
556 u_long phys ;
557
558 if (smc->hw.hw_state != STOPPED) {
559 SK_BREAK() ;
560 SMT_PANIC(smc,HWM_E0013,HWM_E0013_MSG) ;
561 return ;
562 }
563
564
565
566
567 phys = repair_txd_ring(smc,smc->hw.fp.tx[QUEUE_A0]) ;
568 outpd(ADDR(B5_XA_DA),phys) ;
569 if (smc->hw.fp.tx_q[QUEUE_A0].tx_used) {
570 outpd(ADDR(B0_XA_CSR),CSR_START) ;
571 }
572 phys = repair_txd_ring(smc,smc->hw.fp.tx[QUEUE_S]) ;
573 outpd(ADDR(B5_XS_DA),phys) ;
574 if (smc->hw.fp.tx_q[QUEUE_S].tx_used) {
575 outpd(ADDR(B0_XS_CSR),CSR_START) ;
576 }
577
578
579
580
581 phys = repair_rxd_ring(smc,smc->hw.fp.rx[QUEUE_R1]) ;
582 outpd(ADDR(B4_R1_DA),phys) ;
583 outpd(ADDR(B0_R1_CSR),CSR_START) ;
584}
585
586static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue)
587{
588 int i ;
589 int tx_used ;
590 u_long phys ;
591 u_long tbctrl ;
592 struct s_smt_fp_txd volatile *t ;
593
594 SK_UNUSED(smc) ;
595
596 t = queue->tx_curr_get ;
597 tx_used = queue->tx_used ;
598 for (i = tx_used+queue->tx_free-1 ; i ; i-- ) {
599 t = t->txd_next ;
600 }
601 phys = le32_to_cpu(t->txd_ntdadr) ;
602
603 t = queue->tx_curr_get ;
604 while (tx_used) {
605 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ;
606 tbctrl = le32_to_cpu(t->txd_tbctrl) ;
607
608 if (tbctrl & BMU_OWN) {
609 if (tbctrl & BMU_STF) {
610 break ;
611 }
612 else {
613
614
615
616 t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ;
617 }
618 }
619 phys = le32_to_cpu(t->txd_ntdadr) ;
620 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
621 t = t->txd_next ;
622 tx_used-- ;
623 }
624 return phys;
625}
626
627
628
629
630
631
632
633
634
635
636
637
638static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue)
639{
640 int i ;
641 int rx_used ;
642 u_long phys ;
643 u_long rbctrl ;
644 struct s_smt_fp_rxd volatile *r ;
645
646 SK_UNUSED(smc) ;
647
648 r = queue->rx_curr_get ;
649 rx_used = queue->rx_used ;
650 for (i = SMT_R1_RXD_COUNT-1 ; i ; i-- ) {
651 r = r->rxd_next ;
652 }
653 phys = le32_to_cpu(r->rxd_nrdadr) ;
654
655 r = queue->rx_curr_get ;
656 while (rx_used) {
657 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
658 rbctrl = le32_to_cpu(r->rxd_rbctrl) ;
659
660 if (rbctrl & BMU_OWN) {
661 if (rbctrl & BMU_STF) {
662 break ;
663 }
664 else {
665
666
667
668 r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
669 }
670 }
671 phys = le32_to_cpu(r->rxd_nrdadr) ;
672 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
673 r = r->rxd_next ;
674 rx_used-- ;
675 }
676 return phys;
677}
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709void fddi_isr(struct s_smc *smc)
710{
711 u_long is ;
712 u_short stu, stl ;
713 SMbuf *mb ;
714
715#ifdef USE_BREAK_ISR
716 int force_irq ;
717#endif
718
719#ifdef ODI2
720 if (smc->os.hwm.rx_break) {
721 mac_drv_fill_rxd(smc) ;
722 if (smc->hw.fp.rx_q[QUEUE_R1].rx_used > 0) {
723 smc->os.hwm.rx_break = 0 ;
724 process_receive(smc) ;
725 }
726 else {
727 smc->os.hwm.detec_count = 0 ;
728 smt_force_irq(smc) ;
729 }
730 }
731#endif
732 smc->os.hwm.isr_flag = TRUE ;
733
734#ifdef USE_BREAK_ISR
735 force_irq = TRUE ;
736 if (smc->os.hwm.leave_isr) {
737 smc->os.hwm.leave_isr = FALSE ;
738 process_receive(smc) ;
739 }
740#endif
741
742 while ((is = GET_ISR() & ISR_MASK)) {
743 NDD_TRACE("CH0B",is,0,0) ;
744 DB_GEN("ISA = 0x%x",is,0,7) ;
745
746 if (is & IMASK_SLOW) {
747 NDD_TRACE("CH1b",is,0,0) ;
748 if (is & IS_PLINT1) {
749 plc1_irq(smc) ;
750 }
751 if (is & IS_PLINT2) {
752 plc2_irq(smc) ;
753 }
754 if (is & IS_MINTR1) {
755 stu = inpw(FM_A(FM_ST1U)) ;
756 stl = inpw(FM_A(FM_ST1L)) ;
757 DB_GEN("Slow transmit complete",0,0,6) ;
758 mac1_irq(smc,stu,stl) ;
759 }
760 if (is & IS_MINTR2) {
761 stu= inpw(FM_A(FM_ST2U)) ;
762 stl= inpw(FM_A(FM_ST2L)) ;
763 DB_GEN("Slow receive complete",0,0,6) ;
764 DB_GEN("stl = %x : stu = %x",stl,stu,7) ;
765 mac2_irq(smc,stu,stl) ;
766 }
767 if (is & IS_MINTR3) {
768 stu= inpw(FM_A(FM_ST3U)) ;
769 stl= inpw(FM_A(FM_ST3L)) ;
770 DB_GEN("FORMAC Mode Register 3",0,0,6) ;
771 mac3_irq(smc,stu,stl) ;
772 }
773 if (is & IS_TIMINT) {
774 timer_irq(smc) ;
775#ifdef NDIS_OS2
776 force_irq_pending = 0 ;
777#endif
778
779
780
781 if (++smc->os.hwm.detec_count > 4) {
782
783
784
785 process_receive(smc) ;
786 }
787 }
788 if (is & IS_TOKEN) {
789 rtm_irq(smc) ;
790 }
791 if (is & IS_R1_P) {
792
793 outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_P) ;
794 SMT_PANIC(smc,HWM_E0004,HWM_E0004_MSG) ;
795 }
796 if (is & IS_R1_C) {
797
798 outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_C) ;
799 SMT_PANIC(smc,HWM_E0005,HWM_E0005_MSG) ;
800 }
801 if (is & IS_XA_C) {
802
803 outpd(ADDR(B5_XA_CSR),CSR_IRQ_CL_C) ;
804 SMT_PANIC(smc,HWM_E0006,HWM_E0006_MSG) ;
805 }
806 if (is & IS_XS_C) {
807
808 outpd(ADDR(B5_XS_CSR),CSR_IRQ_CL_C) ;
809 SMT_PANIC(smc,HWM_E0007,HWM_E0007_MSG) ;
810 }
811 }
812
813
814
815
816 if (is & (IS_XS_F|IS_XA_F)) {
817 DB_GEN("Fast tx complete queue",0,0,6) ;
818
819
820
821
822 outpd(ADDR(B5_XS_CSR),CSR_IRQ_CL_F) ;
823 outpd(ADDR(B5_XA_CSR),CSR_IRQ_CL_F) ;
824 mac_drv_clear_txd(smc) ;
825 llc_restart_tx(smc) ;
826 }
827
828
829
830
831 if (is & IS_R1_F) {
832 DB_GEN("Fast receive complete",0,0,6) ;
833
834#ifndef USE_BREAK_ISR
835 outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_F) ;
836 process_receive(smc) ;
837#else
838 process_receive(smc) ;
839 if (smc->os.hwm.leave_isr) {
840 force_irq = FALSE ;
841 } else {
842 outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_F) ;
843 process_receive(smc) ;
844 }
845#endif
846 }
847
848#ifndef NDIS_OS2
849 while ((mb = get_llc_rx(smc))) {
850 smt_to_llc(smc,mb) ;
851 }
852#else
853 if (offDepth)
854 post_proc() ;
855
856 while (!offDepth && (mb = get_llc_rx(smc))) {
857 smt_to_llc(smc,mb) ;
858 }
859
860 if (!offDepth && smc->os.hwm.rx_break) {
861 process_receive(smc) ;
862 }
863#endif
864 if (smc->q.ev_get != smc->q.ev_put) {
865 NDD_TRACE("CH2a",0,0,0) ;
866 ev_dispatcher(smc) ;
867 }
868#ifdef NDIS_OS2
869 post_proc() ;
870 if (offDepth) {
871 break ;
872 }
873#endif
874#ifdef USE_BREAK_ISR
875 if (smc->os.hwm.leave_isr) {
876 break ;
877 }
878#endif
879
880
881 }
882
883#ifdef USE_BREAK_ISR
884 if (smc->os.hwm.leave_isr && force_irq) {
885 smt_force_irq(smc) ;
886 }
887#endif
888 smc->os.hwm.isr_flag = FALSE ;
889 NDD_TRACE("CH0E",0,0,0) ;
890}
891
892
893
894
895
896
897
898
899#ifndef NDIS_OS2
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992void mac_drv_rx_mode(struct s_smc *smc, int mode)
993{
994 switch(mode) {
995 case RX_ENABLE_PASS_SMT:
996 smc->os.hwm.pass_SMT = TRUE ;
997 break ;
998 case RX_DISABLE_PASS_SMT:
999 smc->os.hwm.pass_SMT = FALSE ;
1000 break ;
1001 case RX_ENABLE_PASS_NSA:
1002 smc->os.hwm.pass_NSA = TRUE ;
1003 break ;
1004 case RX_DISABLE_PASS_NSA:
1005 smc->os.hwm.pass_NSA = FALSE ;
1006 break ;
1007 case RX_ENABLE_PASS_DB:
1008 smc->os.hwm.pass_DB = TRUE ;
1009 break ;
1010 case RX_DISABLE_PASS_DB:
1011 smc->os.hwm.pass_DB = FALSE ;
1012 break ;
1013 case RX_DISABLE_PASS_ALL:
1014 smc->os.hwm.pass_SMT = smc->os.hwm.pass_NSA = FALSE ;
1015 smc->os.hwm.pass_DB = FALSE ;
1016 smc->os.hwm.pass_llc_promisc = TRUE ;
1017 mac_set_rx_mode(smc,RX_DISABLE_NSA) ;
1018 break ;
1019 case RX_DISABLE_LLC_PROMISC:
1020 smc->os.hwm.pass_llc_promisc = FALSE ;
1021 break ;
1022 case RX_ENABLE_LLC_PROMISC:
1023 smc->os.hwm.pass_llc_promisc = TRUE ;
1024 break ;
1025 case RX_ENABLE_ALLMULTI:
1026 case RX_DISABLE_ALLMULTI:
1027 case RX_ENABLE_PROMISC:
1028 case RX_DISABLE_PROMISC:
1029 case RX_ENABLE_NSA:
1030 case RX_DISABLE_NSA:
1031 default:
1032 mac_set_rx_mode(smc,mode) ;
1033 break ;
1034 }
1035}
1036#endif
1037
1038
1039
1040
1041void process_receive(struct s_smc *smc)
1042{
1043 int i ;
1044 int n ;
1045 int frag_count ;
1046 int used_frags ;
1047 struct s_smt_rx_queue *queue ;
1048 struct s_smt_fp_rxd volatile *r ;
1049 struct s_smt_fp_rxd volatile *rxd ;
1050 u_long rbctrl ;
1051 u_long rfsw ;
1052 u_short rx_used ;
1053 u_char far *virt ;
1054 char far *data ;
1055 SMbuf *mb ;
1056 u_char fc ;
1057 int len ;
1058
1059 smc->os.hwm.detec_count = 0 ;
1060 queue = smc->hw.fp.rx[QUEUE_R1] ;
1061 NDD_TRACE("RHxB",0,0,0) ;
1062 for ( ; ; ) {
1063 r = queue->rx_curr_get ;
1064 rx_used = queue->rx_used ;
1065 frag_count = 0 ;
1066
1067#ifdef USE_BREAK_ISR
1068 if (smc->os.hwm.leave_isr) {
1069 goto rx_end ;
1070 }
1071#endif
1072#ifdef NDIS_OS2
1073 if (offDepth) {
1074 smc->os.hwm.rx_break = 1 ;
1075 goto rx_end ;
1076 }
1077 smc->os.hwm.rx_break = 0 ;
1078#endif
1079#ifdef ODI2
1080 if (smc->os.hwm.rx_break) {
1081 goto rx_end ;
1082 }
1083#endif
1084 n = 0 ;
1085 do {
1086 DB_RX("Check RxD %x for OWN and EOF",(void *)r,0,5) ;
1087 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
1088 rbctrl = le32_to_cpu(CR_READ(r->rxd_rbctrl));
1089
1090 if (rbctrl & BMU_OWN) {
1091 NDD_TRACE("RHxE",r,rfsw,rbctrl) ;
1092 DB_RX("End of RxDs",0,0,4) ;
1093 goto rx_end ;
1094 }
1095
1096
1097
1098 if (!rx_used) {
1099 SK_BREAK() ;
1100 SMT_PANIC(smc,HWM_E0009,HWM_E0009_MSG) ;
1101
1102
1103
1104 smc->hw.hw_state = STOPPED ;
1105 mac_drv_clear_rx_queue(smc) ;
1106 smc->hw.hw_state = STARTED ;
1107 mac_drv_fill_rxd(smc) ;
1108 smc->os.hwm.detec_count = 0 ;
1109 goto rx_end ;
1110 }
1111 rfsw = le32_to_cpu(r->rxd_rfsw) ;
1112 if ((rbctrl & BMU_STF) != ((rbctrl & BMU_ST_BUF) <<5)) {
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127 SK_BREAK() ;
1128 rfsw = 0 ;
1129 if (frag_count) {
1130 break ;
1131 }
1132 }
1133 n += rbctrl & 0xffff ;
1134 r = r->rxd_next ;
1135 frag_count++ ;
1136 rx_used-- ;
1137 } while (!(rbctrl & BMU_EOF)) ;
1138 used_frags = frag_count ;
1139 DB_RX("EOF set in RxD, used_frags = %d ",used_frags,0,5) ;
1140
1141
1142
1143 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
1144 while (rx_used && !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) {
1145 DB_RX("Check STF bit in %x",(void *)r,0,5) ;
1146 r = r->rxd_next ;
1147 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
1148 frag_count++ ;
1149 rx_used-- ;
1150 }
1151 DB_RX("STF bit found",0,0,5) ;
1152
1153
1154
1155
1156 rxd = queue->rx_curr_get ;
1157 queue->rx_curr_get = r ;
1158 queue->rx_free += frag_count ;
1159 queue->rx_used = rx_used ;
1160
1161
1162
1163
1164 rxd->rxd_rbctrl &= cpu_to_le32(~BMU_STF) ;
1165
1166 for (r=rxd, i=frag_count ; i ; r=r->rxd_next, i--){
1167 DB_RX("dma_complete for RxD %x",(void *)r,0,5) ;
1168 dma_complete(smc,(union s_fp_descr volatile *)r,DMA_WR);
1169 }
1170 smc->hw.fp.err_stats.err_valid++ ;
1171 smc->mib.m[MAC0].fddiMACCopied_Ct++ ;
1172
1173
1174 len = (rfsw & RD_LENGTH) - 4 ;
1175
1176 DB_RX("frame length = %d",len,0,4) ;
1177
1178
1179
1180 if (rfsw & (RX_MSRABT|RX_FS_E|RX_FS_CRC|RX_FS_IMPL)){
1181 if (rfsw & RD_S_MSRABT) {
1182 DB_RX("Frame aborted by the FORMAC",0,0,2) ;
1183 smc->hw.fp.err_stats.err_abort++ ;
1184 }
1185
1186
1187
1188 if (rfsw & RD_S_SEAC2) {
1189 DB_RX("E-Indicator set",0,0,2) ;
1190 smc->hw.fp.err_stats.err_e_indicator++ ;
1191 }
1192 if (rfsw & RD_S_SFRMERR) {
1193 DB_RX("CRC error",0,0,2) ;
1194 smc->hw.fp.err_stats.err_crc++ ;
1195 }
1196 if (rfsw & RX_FS_IMPL) {
1197 DB_RX("Implementer frame",0,0,2) ;
1198 smc->hw.fp.err_stats.err_imp_frame++ ;
1199 }
1200 goto abort_frame ;
1201 }
1202 if (len > FDDI_RAW_MTU-4) {
1203 DB_RX("Frame too long error",0,0,2) ;
1204 smc->hw.fp.err_stats.err_too_long++ ;
1205 goto abort_frame ;
1206 }
1207
1208
1209
1210
1211 if (len <= 4) {
1212 DB_RX("Frame length = 0",0,0,2) ;
1213 goto abort_frame ;
1214 }
1215
1216 if (len != (n-4)) {
1217 DB_RX("BMU: rx len differs: [%d:%d]",len,n,4);
1218 smc->os.hwm.rx_len_error++ ;
1219 goto abort_frame ;
1220 }
1221
1222
1223
1224
1225 virt = (u_char far *) rxd->rxd_virt ;
1226 DB_RX("FC = %x",*virt,0,2) ;
1227 if (virt[12] == MA[5] &&
1228 virt[11] == MA[4] &&
1229 virt[10] == MA[3] &&
1230 virt[9] == MA[2] &&
1231 virt[8] == MA[1] &&
1232 (virt[7] & ~GROUP_ADDR_BIT) == MA[0]) {
1233 goto abort_frame ;
1234 }
1235
1236
1237
1238
1239 if (rfsw & RX_FS_LLC) {
1240
1241
1242
1243
1244
1245 if (!smc->os.hwm.pass_llc_promisc) {
1246 if(!(virt[1] & GROUP_ADDR_BIT)) {
1247 if (virt[6] != MA[5] ||
1248 virt[5] != MA[4] ||
1249 virt[4] != MA[3] ||
1250 virt[3] != MA[2] ||
1251 virt[2] != MA[1] ||
1252 virt[1] != MA[0]) {
1253 DB_RX("DA != MA and not multi- or broadcast",0,0,2) ;
1254 goto abort_frame ;
1255 }
1256 }
1257 }
1258
1259
1260
1261
1262 DB_RX("LLC - receive",0,0,4) ;
1263 mac_drv_rx_complete(smc,rxd,frag_count,len) ;
1264 }
1265 else {
1266 if (!(mb = smt_get_mbuf(smc))) {
1267 smc->hw.fp.err_stats.err_no_buf++ ;
1268 DB_RX("No SMbuf; receive terminated",0,0,4) ;
1269 goto abort_frame ;
1270 }
1271 data = smtod(mb,char *) - 1 ;
1272
1273
1274
1275
1276#ifdef USE_OS_CPY
1277 hwm_cpy_rxd2mb(rxd,data,len) ;
1278#else
1279 for (r=rxd, i=used_frags ; i ; r=r->rxd_next, i--){
1280 n = le32_to_cpu(r->rxd_rbctrl) & RD_LENGTH ;
1281 DB_RX("cp SMT frame to mb: len = %d",n,0,6) ;
1282 memcpy(data,r->rxd_virt,n) ;
1283 data += n ;
1284 }
1285 data = smtod(mb,char *) - 1 ;
1286#endif
1287 fc = *(char *)mb->sm_data = *data ;
1288 mb->sm_len = len - 1 ;
1289 data++ ;
1290
1291
1292
1293
1294 switch(fc) {
1295 case FC_SMT_INFO :
1296 smc->hw.fp.err_stats.err_smt_frame++ ;
1297 DB_RX("SMT frame received ",0,0,5) ;
1298
1299 if (smc->os.hwm.pass_SMT) {
1300 DB_RX("pass SMT frame ",0,0,5) ;
1301 mac_drv_rx_complete(smc, rxd,
1302 frag_count,len) ;
1303 }
1304 else {
1305 DB_RX("requeue RxD",0,0,5) ;
1306 mac_drv_requeue_rxd(smc,rxd,frag_count);
1307 }
1308
1309 smt_received_pack(smc,mb,(int)(rfsw>>25)) ;
1310 break ;
1311 case FC_SMT_NSA :
1312 smc->hw.fp.err_stats.err_smt_frame++ ;
1313 DB_RX("SMT frame received ",0,0,5) ;
1314
1315
1316
1317
1318 if (smc->os.hwm.pass_NSA ||
1319 (smc->os.hwm.pass_SMT &&
1320 !(rfsw & A_INDIC))) {
1321 DB_RX("pass SMT frame ",0,0,5) ;
1322 mac_drv_rx_complete(smc, rxd,
1323 frag_count,len) ;
1324 }
1325 else {
1326 DB_RX("requeue RxD",0,0,5) ;
1327 mac_drv_requeue_rxd(smc,rxd,frag_count);
1328 }
1329
1330 smt_received_pack(smc,mb,(int)(rfsw>>25)) ;
1331 break ;
1332 case FC_BEACON :
1333 if (smc->os.hwm.pass_DB) {
1334 DB_RX("pass DB frame ",0,0,5) ;
1335 mac_drv_rx_complete(smc, rxd,
1336 frag_count,len) ;
1337 }
1338 else {
1339 DB_RX("requeue RxD",0,0,5) ;
1340 mac_drv_requeue_rxd(smc,rxd,frag_count);
1341 }
1342 smt_free_mbuf(smc,mb) ;
1343 break ;
1344 default :
1345
1346
1347
1348 DB_RX("unknown FC error",0,0,2) ;
1349 smt_free_mbuf(smc,mb) ;
1350 DB_RX("requeue RxD",0,0,5) ;
1351 mac_drv_requeue_rxd(smc,rxd,frag_count) ;
1352 if ((fc & 0xf0) == FC_MAC)
1353 smc->hw.fp.err_stats.err_mac_frame++ ;
1354 else
1355 smc->hw.fp.err_stats.err_imp_frame++ ;
1356
1357 break ;
1358 }
1359 }
1360
1361 DB_RX("next RxD is %x ",queue->rx_curr_get,0,3) ;
1362 NDD_TRACE("RHx1",queue->rx_curr_get,0,0) ;
1363
1364 continue ;
1365
1366abort_frame:
1367 DB_RX("requeue RxD",0,0,5) ;
1368 mac_drv_requeue_rxd(smc,rxd,frag_count) ;
1369
1370 DB_RX("next RxD is %x ",queue->rx_curr_get,0,3) ;
1371 NDD_TRACE("RHx2",queue->rx_curr_get,0,0) ;
1372 }
1373rx_end:
1374#ifdef ALL_RX_COMPLETE
1375 mac_drv_all_receives_complete(smc) ;
1376#endif
1377 return ;
1378}
1379
1380static void smt_to_llc(struct s_smc *smc, SMbuf *mb)
1381{
1382 u_char fc ;
1383
1384 DB_RX("send a queued frame to the llc layer",0,0,4) ;
1385 smc->os.hwm.r.len = mb->sm_len ;
1386 smc->os.hwm.r.mb_pos = smtod(mb,char *) ;
1387 fc = *smc->os.hwm.r.mb_pos ;
1388 (void)mac_drv_rx_init(smc,(int)mb->sm_len,(int)fc,
1389 smc->os.hwm.r.mb_pos,(int)mb->sm_len) ;
1390 smt_free_mbuf(smc,mb) ;
1391}
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415void hwm_rx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
1416 int frame_status)
1417{
1418 struct s_smt_fp_rxd volatile *r ;
1419 __le32 rbctrl;
1420
1421 NDD_TRACE("RHfB",virt,len,frame_status) ;
1422 DB_RX("hwm_rx_frag: len = %d, frame_status = %x\n",len,frame_status,2) ;
1423 r = smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put ;
1424 r->rxd_virt = virt ;
1425 r->rxd_rbadr = cpu_to_le32(phys) ;
1426 rbctrl = cpu_to_le32( (((__u32)frame_status &
1427 (FIRST_FRAG|LAST_FRAG))<<26) |
1428 (((u_long) frame_status & FIRST_FRAG) << 21) |
1429 BMU_OWN | BMU_CHECK | BMU_EN_IRQ_EOF | len) ;
1430 r->rxd_rbctrl = rbctrl ;
1431
1432 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
1433 outpd(ADDR(B0_R1_CSR),CSR_START) ;
1434 smc->hw.fp.rx_q[QUEUE_R1].rx_free-- ;
1435 smc->hw.fp.rx_q[QUEUE_R1].rx_used++ ;
1436 smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put = r->rxd_next ;
1437 NDD_TRACE("RHfE",r,le32_to_cpu(r->rxd_rbadr),0) ;
1438}
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463void mac_drv_clear_rx_queue(struct s_smc *smc)
1464{
1465 struct s_smt_fp_rxd volatile *r ;
1466 struct s_smt_fp_rxd volatile *next_rxd ;
1467 struct s_smt_rx_queue *queue ;
1468 int frag_count ;
1469 int i ;
1470
1471 if (smc->hw.hw_state != STOPPED) {
1472 SK_BREAK() ;
1473 SMT_PANIC(smc,HWM_E0012,HWM_E0012_MSG) ;
1474 return ;
1475 }
1476
1477 queue = smc->hw.fp.rx[QUEUE_R1] ;
1478 DB_RX("clear_rx_queue",0,0,5) ;
1479
1480
1481
1482
1483 r = queue->rx_curr_get ;
1484 while (queue->rx_used) {
1485 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
1486 DB_RX("switch OWN bit of RxD 0x%x ",r,0,5) ;
1487 r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
1488 frag_count = 1 ;
1489 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
1490 r = r->rxd_next ;
1491 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
1492 while (r != queue->rx_curr_put &&
1493 !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) {
1494 DB_RX("Check STF bit in %x",(void *)r,0,5) ;
1495 r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
1496 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
1497 r = r->rxd_next ;
1498 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
1499 frag_count++ ;
1500 }
1501 DB_RX("STF bit found",0,0,5) ;
1502 next_rxd = r ;
1503
1504 for (r=queue->rx_curr_get,i=frag_count; i ; r=r->rxd_next,i--){
1505 DB_RX("dma_complete for RxD %x",(void *)r,0,5) ;
1506 dma_complete(smc,(union s_fp_descr volatile *)r,DMA_WR);
1507 }
1508
1509 DB_RX("mac_drv_clear_rxd: RxD %x frag_count %d ",
1510 (void *)queue->rx_curr_get,frag_count,5) ;
1511 mac_drv_clear_rxd(smc,queue->rx_curr_get,frag_count) ;
1512
1513 queue->rx_curr_get = next_rxd ;
1514 queue->rx_used -= frag_count ;
1515 queue->rx_free += frag_count ;
1516 }
1517}
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count, int frame_len,
1551 int frame_status)
1552{
1553 NDD_TRACE("THiB",fc,frag_count,frame_len) ;
1554 smc->os.hwm.tx_p = smc->hw.fp.tx[frame_status & QUEUE_A0] ;
1555 smc->os.hwm.tx_descr = TX_DESCRIPTOR | (((u_long)(frame_len-1)&3)<<27) ;
1556 smc->os.hwm.tx_len = frame_len ;
1557 DB_TX("hwm_tx_init: fc = %x, len = %d",fc,frame_len,3) ;
1558 if ((fc & ~(FC_SYNC_BIT|FC_LLC_PRIOR)) == FC_ASYNC_LLC) {
1559 frame_status |= LAN_TX ;
1560 }
1561 else {
1562 switch (fc) {
1563 case FC_SMT_INFO :
1564 case FC_SMT_NSA :
1565 frame_status |= LAN_TX ;
1566 break ;
1567 case FC_SMT_LOC :
1568 frame_status |= LOC_TX ;
1569 break ;
1570 case FC_SMT_LAN_LOC :
1571 frame_status |= LAN_TX | LOC_TX ;
1572 break ;
1573 default :
1574 SMT_PANIC(smc,HWM_E0010,HWM_E0010_MSG) ;
1575 }
1576 }
1577 if (!smc->hw.mac_ring_is_up) {
1578 frame_status &= ~LAN_TX ;
1579 frame_status |= RING_DOWN ;
1580 DB_TX("Ring is down: terminate LAN_TX",0,0,2) ;
1581 }
1582 if (frag_count > smc->os.hwm.tx_p->tx_free) {
1583#ifndef NDIS_OS2
1584 mac_drv_clear_txd(smc) ;
1585 if (frag_count > smc->os.hwm.tx_p->tx_free) {
1586 DB_TX("Out of TxDs, terminate LAN_TX",0,0,2) ;
1587 frame_status &= ~LAN_TX ;
1588 frame_status |= OUT_OF_TXD ;
1589 }
1590#else
1591 DB_TX("Out of TxDs, terminate LAN_TX",0,0,2) ;
1592 frame_status &= ~LAN_TX ;
1593 frame_status |= OUT_OF_TXD ;
1594#endif
1595 }
1596 DB_TX("frame_status = %x",frame_status,0,3) ;
1597 NDD_TRACE("THiE",frame_status,smc->os.hwm.tx_p->tx_free,0) ;
1598 return frame_status;
1599}
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
1629 int frame_status)
1630{
1631 struct s_smt_fp_txd volatile *t ;
1632 struct s_smt_tx_queue *queue ;
1633 __le32 tbctrl ;
1634
1635 queue = smc->os.hwm.tx_p ;
1636
1637 NDD_TRACE("THfB",virt,len,frame_status) ;
1638
1639
1640
1641
1642
1643 t = queue->tx_curr_put ;
1644
1645 DB_TX("hwm_tx_frag: len = %d, frame_status = %x ",len,frame_status,2) ;
1646 if (frame_status & LAN_TX) {
1647
1648 DB_TX("LAN_TX: TxD = %x, virt = %x ",t,virt,3) ;
1649 t->txd_virt = virt ;
1650 t->txd_txdscr = cpu_to_le32(smc->os.hwm.tx_descr) ;
1651 t->txd_tbadr = cpu_to_le32(phys) ;
1652 tbctrl = cpu_to_le32((((__u32)frame_status &
1653 (FIRST_FRAG|LAST_FRAG|EN_IRQ_EOF))<< 26) |
1654 BMU_OWN|BMU_CHECK |len) ;
1655 t->txd_tbctrl = tbctrl ;
1656
1657#ifndef AIX
1658 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
1659 outpd(queue->tx_bmu_ctl,CSR_START) ;
1660#else
1661 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
1662 if (frame_status & QUEUE_A0) {
1663 outpd(ADDR(B0_XA_CSR),CSR_START) ;
1664 }
1665 else {
1666 outpd(ADDR(B0_XS_CSR),CSR_START) ;
1667 }
1668#endif
1669 queue->tx_free-- ;
1670 queue->tx_used++ ;
1671 queue->tx_curr_put = t->txd_next ;
1672 if (frame_status & LAST_FRAG) {
1673 smc->mib.m[MAC0].fddiMACTransmit_Ct++ ;
1674 }
1675 }
1676 if (frame_status & LOC_TX) {
1677 DB_TX("LOC_TX: ",0,0,3) ;
1678 if (frame_status & FIRST_FRAG) {
1679 if(!(smc->os.hwm.tx_mb = smt_get_mbuf(smc))) {
1680 smc->hw.fp.err_stats.err_no_buf++ ;
1681 DB_TX("No SMbuf; transmit terminated",0,0,4) ;
1682 }
1683 else {
1684 smc->os.hwm.tx_data =
1685 smtod(smc->os.hwm.tx_mb,char *) - 1 ;
1686#ifdef USE_OS_CPY
1687#ifdef PASS_1ST_TXD_2_TX_COMP
1688 hwm_cpy_txd2mb(t,smc->os.hwm.tx_data,
1689 smc->os.hwm.tx_len) ;
1690#endif
1691#endif
1692 }
1693 }
1694 if (smc->os.hwm.tx_mb) {
1695#ifndef USE_OS_CPY
1696 DB_TX("copy fragment into MBuf ",0,0,3) ;
1697 memcpy(smc->os.hwm.tx_data,virt,len) ;
1698 smc->os.hwm.tx_data += len ;
1699#endif
1700 if (frame_status & LAST_FRAG) {
1701#ifdef USE_OS_CPY
1702#ifndef PASS_1ST_TXD_2_TX_COMP
1703
1704
1705
1706
1707
1708
1709
1710 hwm_cpy_txd2mb(t,smc->os.hwm.tx_data,
1711 smc->os.hwm.tx_len) ;
1712#endif
1713#endif
1714 smc->os.hwm.tx_data =
1715 smtod(smc->os.hwm.tx_mb,char *) - 1 ;
1716 *(char *)smc->os.hwm.tx_mb->sm_data =
1717 *smc->os.hwm.tx_data ;
1718 smc->os.hwm.tx_data++ ;
1719 smc->os.hwm.tx_mb->sm_len =
1720 smc->os.hwm.tx_len - 1 ;
1721 DB_TX("pass LLC frame to SMT ",0,0,3) ;
1722 smt_received_pack(smc,smc->os.hwm.tx_mb,
1723 RD_FS_LOCAL) ;
1724 }
1725 }
1726 }
1727 NDD_TRACE("THfE",t,queue->tx_free,0) ;
1728}
1729
1730
1731
1732
1733
1734static void queue_llc_rx(struct s_smc *smc, SMbuf *mb)
1735{
1736 DB_GEN("queue_llc_rx: mb = %x",(void *)mb,0,4) ;
1737 smc->os.hwm.queued_rx_frames++ ;
1738 mb->sm_next = (SMbuf *)NULL ;
1739 if (smc->os.hwm.llc_rx_pipe == NULL) {
1740 smc->os.hwm.llc_rx_pipe = mb ;
1741 }
1742 else {
1743 smc->os.hwm.llc_rx_tail->sm_next = mb ;
1744 }
1745 smc->os.hwm.llc_rx_tail = mb ;
1746
1747
1748
1749
1750 if (!smc->os.hwm.isr_flag) {
1751 smt_force_irq(smc) ;
1752 }
1753}
1754
1755
1756
1757
1758static SMbuf *get_llc_rx(struct s_smc *smc)
1759{
1760 SMbuf *mb ;
1761
1762 if ((mb = smc->os.hwm.llc_rx_pipe)) {
1763 smc->os.hwm.queued_rx_frames-- ;
1764 smc->os.hwm.llc_rx_pipe = mb->sm_next ;
1765 }
1766 DB_GEN("get_llc_rx: mb = 0x%x",(void *)mb,0,4) ;
1767 return mb;
1768}
1769
1770
1771
1772
1773
1774static void queue_txd_mb(struct s_smc *smc, SMbuf *mb)
1775{
1776 DB_GEN("_rx: queue_txd_mb = %x",(void *)mb,0,4) ;
1777 smc->os.hwm.queued_txd_mb++ ;
1778 mb->sm_next = (SMbuf *)NULL ;
1779 if (smc->os.hwm.txd_tx_pipe == NULL) {
1780 smc->os.hwm.txd_tx_pipe = mb ;
1781 }
1782 else {
1783 smc->os.hwm.txd_tx_tail->sm_next = mb ;
1784 }
1785 smc->os.hwm.txd_tx_tail = mb ;
1786}
1787
1788
1789
1790
1791static SMbuf *get_txd_mb(struct s_smc *smc)
1792{
1793 SMbuf *mb ;
1794
1795 if ((mb = smc->os.hwm.txd_tx_pipe)) {
1796 smc->os.hwm.queued_txd_mb-- ;
1797 smc->os.hwm.txd_tx_pipe = mb->sm_next ;
1798 }
1799 DB_GEN("get_txd_mb: mb = 0x%x",(void *)mb,0,4) ;
1800 return mb;
1801}
1802
1803
1804
1805
1806void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc)
1807{
1808 char far *data ;
1809 int len ;
1810 int n ;
1811 int i ;
1812 int frag_count ;
1813 int frame_status ;
1814 SK_LOC_DECL(char far,*virt[3]) ;
1815 int frag_len[3] ;
1816 struct s_smt_tx_queue *queue ;
1817 struct s_smt_fp_txd volatile *t ;
1818 u_long phys ;
1819 __le32 tbctrl;
1820
1821 NDD_TRACE("THSB",mb,fc,0) ;
1822 DB_TX("smt_send_mbuf: mb = 0x%x, fc = 0x%x",mb,fc,4) ;
1823
1824 mb->sm_off-- ;
1825 mb->sm_len++ ;
1826 data = smtod(mb,char *) ;
1827 *data = fc ;
1828 if (fc == FC_SMT_LOC)
1829 *data = FC_SMT_INFO ;
1830
1831
1832
1833
1834 frag_count = 0 ;
1835 len = mb->sm_len ;
1836 while (len) {
1837 n = SMT_PAGESIZE - ((long)data & (SMT_PAGESIZE-1)) ;
1838 if (n >= len) {
1839 n = len ;
1840 }
1841 DB_TX("frag: virt/len = 0x%x/%d ",(void *)data,n,5) ;
1842 virt[frag_count] = data ;
1843 frag_len[frag_count] = n ;
1844 frag_count++ ;
1845 len -= n ;
1846 data += n ;
1847 }
1848
1849
1850
1851
1852 queue = smc->hw.fp.tx[QUEUE_A0] ;
1853 if (fc == FC_BEACON || fc == FC_SMT_LOC) {
1854 frame_status = LOC_TX ;
1855 }
1856 else {
1857 frame_status = LAN_TX ;
1858 if ((smc->os.hwm.pass_NSA &&(fc == FC_SMT_NSA)) ||
1859 (smc->os.hwm.pass_SMT &&(fc == FC_SMT_INFO)))
1860 frame_status |= LOC_TX ;
1861 }
1862
1863 if (!smc->hw.mac_ring_is_up || frag_count > queue->tx_free) {
1864 frame_status &= ~LAN_TX;
1865 if (frame_status) {
1866 DB_TX("Ring is down: terminate LAN_TX",0,0,2) ;
1867 }
1868 else {
1869 DB_TX("Ring is down: terminate transmission",0,0,2) ;
1870 smt_free_mbuf(smc,mb) ;
1871 return ;
1872 }
1873 }
1874 DB_TX("frame_status = 0x%x ",frame_status,0,5) ;
1875
1876 if ((frame_status & LAN_TX) && (frame_status & LOC_TX)) {
1877 mb->sm_use_count = 2 ;
1878 }
1879
1880 if (frame_status & LAN_TX) {
1881 t = queue->tx_curr_put ;
1882 frame_status |= FIRST_FRAG ;
1883 for (i = 0; i < frag_count; i++) {
1884 DB_TX("init TxD = 0x%x",(void *)t,0,5) ;
1885 if (i == frag_count-1) {
1886 frame_status |= LAST_FRAG ;
1887 t->txd_txdscr = cpu_to_le32(TX_DESCRIPTOR |
1888 (((__u32)(mb->sm_len-1)&3) << 27)) ;
1889 }
1890 t->txd_virt = virt[i] ;
1891 phys = dma_master(smc, (void far *)virt[i],
1892 frag_len[i], DMA_RD|SMT_BUF) ;
1893 t->txd_tbadr = cpu_to_le32(phys) ;
1894 tbctrl = cpu_to_le32((((__u32)frame_status &
1895 (FIRST_FRAG|LAST_FRAG)) << 26) |
1896 BMU_OWN | BMU_CHECK | BMU_SMT_TX |frag_len[i]) ;
1897 t->txd_tbctrl = tbctrl ;
1898#ifndef AIX
1899 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
1900 outpd(queue->tx_bmu_ctl,CSR_START) ;
1901#else
1902 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
1903 outpd(ADDR(B0_XA_CSR),CSR_START) ;
1904#endif
1905 frame_status &= ~FIRST_FRAG ;
1906 queue->tx_curr_put = t = t->txd_next ;
1907 queue->tx_free-- ;
1908 queue->tx_used++ ;
1909 }
1910 smc->mib.m[MAC0].fddiMACTransmit_Ct++ ;
1911 queue_txd_mb(smc,mb) ;
1912 }
1913
1914 if (frame_status & LOC_TX) {
1915 DB_TX("pass Mbuf to LLC queue",0,0,5) ;
1916 queue_llc_rx(smc,mb) ;
1917 }
1918
1919
1920
1921
1922
1923 mac_drv_clear_txd(smc) ;
1924 NDD_TRACE("THSE",t,queue->tx_free,frag_count) ;
1925}
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941static void mac_drv_clear_txd(struct s_smc *smc)
1942{
1943 struct s_smt_tx_queue *queue ;
1944 struct s_smt_fp_txd volatile *t1 ;
1945 struct s_smt_fp_txd volatile *t2 = NULL ;
1946 SMbuf *mb ;
1947 u_long tbctrl ;
1948 int i ;
1949 int frag_count ;
1950 int n ;
1951
1952 NDD_TRACE("THcB",0,0,0) ;
1953 for (i = QUEUE_S; i <= QUEUE_A0; i++) {
1954 queue = smc->hw.fp.tx[i] ;
1955 t1 = queue->tx_curr_get ;
1956 DB_TX("clear_txd: QUEUE = %d (0=sync/1=async)",i,0,5) ;
1957
1958 for ( ; ; ) {
1959 frag_count = 0 ;
1960
1961 do {
1962 DRV_BUF_FLUSH(t1,DDI_DMA_SYNC_FORCPU) ;
1963 DB_TX("check OWN/EOF bit of TxD 0x%x",t1,0,5) ;
1964 tbctrl = le32_to_cpu(CR_READ(t1->txd_tbctrl));
1965
1966 if (tbctrl & BMU_OWN || !queue->tx_used){
1967 DB_TX("End of TxDs queue %d",i,0,4) ;
1968 goto free_next_queue ;
1969 }
1970 t1 = t1->txd_next ;
1971 frag_count++ ;
1972 } while (!(tbctrl & BMU_EOF)) ;
1973
1974 t1 = queue->tx_curr_get ;
1975 for (n = frag_count; n; n--) {
1976 tbctrl = le32_to_cpu(t1->txd_tbctrl) ;
1977 dma_complete(smc,
1978 (union s_fp_descr volatile *) t1,
1979 (int) (DMA_RD |
1980 ((tbctrl & BMU_SMT_TX) >> 18))) ;
1981 t2 = t1 ;
1982 t1 = t1->txd_next ;
1983 }
1984
1985 if (tbctrl & BMU_SMT_TX) {
1986 mb = get_txd_mb(smc) ;
1987 smt_free_mbuf(smc,mb) ;
1988 }
1989 else {
1990#ifndef PASS_1ST_TXD_2_TX_COMP
1991 DB_TX("mac_drv_tx_comp for TxD 0x%x",t2,0,4) ;
1992 mac_drv_tx_complete(smc,t2) ;
1993#else
1994 DB_TX("mac_drv_tx_comp for TxD 0x%x",
1995 queue->tx_curr_get,0,4) ;
1996 mac_drv_tx_complete(smc,queue->tx_curr_get) ;
1997#endif
1998 }
1999 queue->tx_curr_get = t1 ;
2000 queue->tx_free += frag_count ;
2001 queue->tx_used -= frag_count ;
2002 }
2003free_next_queue: ;
2004 }
2005 NDD_TRACE("THcE",0,0,0) ;
2006}
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031void mac_drv_clear_tx_queue(struct s_smc *smc)
2032{
2033 struct s_smt_fp_txd volatile *t ;
2034 struct s_smt_tx_queue *queue ;
2035 int tx_used ;
2036 int i ;
2037
2038 if (smc->hw.hw_state != STOPPED) {
2039 SK_BREAK() ;
2040 SMT_PANIC(smc,HWM_E0011,HWM_E0011_MSG) ;
2041 return ;
2042 }
2043
2044 for (i = QUEUE_S; i <= QUEUE_A0; i++) {
2045 queue = smc->hw.fp.tx[i] ;
2046 DB_TX("clear_tx_queue: QUEUE = %d (0=sync/1=async)",i,0,5) ;
2047
2048
2049
2050
2051 t = queue->tx_curr_get ;
2052 tx_used = queue->tx_used ;
2053 while (tx_used) {
2054 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ;
2055 DB_TX("switch OWN bit of TxD 0x%x ",t,0,5) ;
2056 t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ;
2057 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
2058 t = t->txd_next ;
2059 tx_used-- ;
2060 }
2061 }
2062
2063
2064
2065
2066 mac_drv_clear_txd(smc) ;
2067
2068 for (i = QUEUE_S; i <= QUEUE_A0; i++) {
2069 queue = smc->hw.fp.tx[i] ;
2070 t = queue->tx_curr_get ;
2071
2072
2073
2074
2075
2076
2077 if (i == QUEUE_S) {
2078 outpd(ADDR(B5_XS_DA),le32_to_cpu(t->txd_ntdadr)) ;
2079 }
2080 else {
2081 outpd(ADDR(B5_XA_DA),le32_to_cpu(t->txd_ntdadr)) ;
2082 }
2083
2084 queue->tx_curr_put = queue->tx_curr_get->txd_next ;
2085 queue->tx_curr_get = queue->tx_curr_put ;
2086 }
2087}
2088
2089
2090
2091
2092
2093
2094
2095
2096#ifdef DEBUG
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123void mac_drv_debug_lev(struct s_smc *smc, int flag, int lev)
2124{
2125 switch(flag) {
2126 case (int)NULL:
2127 DB_P.d_smtf = DB_P.d_smt = DB_P.d_ecm = DB_P.d_rmt = 0 ;
2128 DB_P.d_cfm = 0 ;
2129 DB_P.d_os.hwm_rx = DB_P.d_os.hwm_tx = DB_P.d_os.hwm_gen = 0 ;
2130#ifdef SBA
2131 DB_P.d_sba = 0 ;
2132#endif
2133#ifdef ESS
2134 DB_P.d_ess = 0 ;
2135#endif
2136 break ;
2137 case DEBUG_SMTF:
2138 DB_P.d_smtf = lev ;
2139 break ;
2140 case DEBUG_SMT:
2141 DB_P.d_smt = lev ;
2142 break ;
2143 case DEBUG_ECM:
2144 DB_P.d_ecm = lev ;
2145 break ;
2146 case DEBUG_RMT:
2147 DB_P.d_rmt = lev ;
2148 break ;
2149 case DEBUG_CFM:
2150 DB_P.d_cfm = lev ;
2151 break ;
2152 case DEBUG_PCM:
2153 DB_P.d_pcm = lev ;
2154 break ;
2155 case DEBUG_SBA:
2156#ifdef SBA
2157 DB_P.d_sba = lev ;
2158#endif
2159 break ;
2160 case DEBUG_ESS:
2161#ifdef ESS
2162 DB_P.d_ess = lev ;
2163#endif
2164 break ;
2165 case DB_HWM_RX:
2166 DB_P.d_os.hwm_rx = lev ;
2167 break ;
2168 case DB_HWM_TX:
2169 DB_P.d_os.hwm_tx = lev ;
2170 break ;
2171 case DB_HWM_GEN:
2172 DB_P.d_os.hwm_gen = lev ;
2173 break ;
2174 default:
2175 break ;
2176 }
2177}
2178#endif
2179