1
2
3
4
5
6
7
8
9
10
11
12
13#ifndef lint
14static char const ID_sccs[] = "@(#)hwmtm.c 1.40 99/05/31 (C) SK" ;
15#endif
16
17#define HWMTM
18
19#ifndef FDDI
20#define FDDI
21#endif
22
23#include "h/types.h"
24#include "h/fddi.h"
25#include "h/smc.h"
26#include "h/supern_2.h"
27#include "h/skfbiinc.h"
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44#ifdef COMMON_MB_POOL
45static SMbuf *mb_start = 0 ;
46static SMbuf *mb_free = 0 ;
47static int mb_init = FALSE ;
48static int call_count = 0 ;
49#endif
50
51
52
53
54
55
56
57#ifdef DEBUG
58#ifndef DEBUG_BRD
59extern struct smt_debug debug ;
60#endif
61#endif
62
63#ifdef NDIS_OS2
64extern u_char offDepth ;
65extern u_char force_irq_pending ;
66#endif
67
68
69
70
71
72
73
74static void queue_llc_rx(struct s_smc *smc, SMbuf *mb);
75static void smt_to_llc(struct s_smc *smc, SMbuf *mb);
76static void init_txd_ring(struct s_smc *smc);
77static void init_rxd_ring(struct s_smc *smc);
78static void queue_txd_mb(struct s_smc *smc, SMbuf *mb);
79static u_long init_descr_ring(struct s_smc *smc, union s_fp_descr volatile *start,
80 int count);
81static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue);
82static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue);
83static SMbuf* get_llc_rx(struct s_smc *smc);
84static SMbuf* get_txd_mb(struct s_smc *smc);
85static void mac_drv_clear_txd(struct s_smc *smc);
86
87
88
89
90
91
92
93
94extern void* mac_drv_get_space(struct s_smc *smc, unsigned int size);
95extern void* mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size);
96extern void mac_drv_fill_rxd(struct s_smc *smc);
97extern void mac_drv_tx_complete(struct s_smc *smc,
98 volatile struct s_smt_fp_txd *txd);
99extern void mac_drv_rx_complete(struct s_smc *smc,
100 volatile struct s_smt_fp_rxd *rxd,
101 int frag_count, int len);
102extern void mac_drv_requeue_rxd(struct s_smc *smc,
103 volatile struct s_smt_fp_rxd *rxd,
104 int frag_count);
105extern void mac_drv_clear_rxd(struct s_smc *smc,
106 volatile struct s_smt_fp_rxd *rxd, int frag_count);
107
108#ifdef USE_OS_CPY
109extern void hwm_cpy_rxd2mb(void);
110extern void hwm_cpy_txd2mb(void);
111#endif
112
113#ifdef ALL_RX_COMPLETE
114extern void mac_drv_all_receives_complete(void);
115#endif
116
117extern u_long mac_drv_virt2phys(struct s_smc *smc, void *virt);
118extern u_long dma_master(struct s_smc *smc, void *virt, int len, int flag);
119
120#ifdef NDIS_OS2
121extern void post_proc(void);
122#else
123extern void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr,
124 int flag);
125#endif
126
127extern int mac_drv_rx_init(struct s_smc *smc, int len, int fc, char *look_ahead,
128 int la_len);
129
130
131
132
133
134
135void process_receive(struct s_smc *smc);
136void fddi_isr(struct s_smc *smc);
137void smt_free_mbuf(struct s_smc *smc, SMbuf *mb);
138void init_driver_fplus(struct s_smc *smc);
139void mac_drv_rx_mode(struct s_smc *smc, int mode);
140void init_fddi_driver(struct s_smc *smc, u_char *mac_addr);
141void mac_drv_clear_tx_queue(struct s_smc *smc);
142void mac_drv_clear_rx_queue(struct s_smc *smc);
143void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
144 int frame_status);
145void hwm_rx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
146 int frame_status);
147
148int mac_drv_init(struct s_smc *smc);
149int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count, int frame_len,
150 int frame_status);
151
152u_int mac_drv_check_space(void);
153
154SMbuf* smt_get_mbuf(struct s_smc *smc);
155
156#ifdef DEBUG
157 void mac_drv_debug_lev(struct s_smc *smc, int flag, int lev);
158#endif
159
160
161
162
163
164
165#ifndef UNUSED
166#ifdef lint
167#define UNUSED(x) (x) = (x)
168#else
169#define UNUSED(x)
170#endif
171#endif
172
173#ifdef USE_CAN_ADDR
174#define MA smc->hw.fddi_canon_addr.a
175#define GROUP_ADDR_BIT 0x01
176#else
177#define MA smc->hw.fddi_home_addr.a
178#define GROUP_ADDR_BIT 0x80
179#endif
180
181#define RXD_TXD_COUNT (HWM_ASYNC_TXD_COUNT+HWM_SYNC_TXD_COUNT+\
182 SMT_R1_RXD_COUNT+SMT_R2_RXD_COUNT)
183
184#ifdef MB_OUTSIDE_SMC
185#define EXT_VIRT_MEM ((RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd) +\
186 MAX_MBUF*sizeof(SMbuf))
187#define EXT_VIRT_MEM_2 ((RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd))
188#else
189#define EXT_VIRT_MEM ((RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd))
190#endif
191
192
193
194
195#if defined(NDIS_OS2) || defined(ODI2)
196#define CR_READ(var) ((var) & 0xffff0000 | ((var) & 0xffff))
197#else
198#define CR_READ(var) (__le32)(var)
199#endif
200
201#define IMASK_SLOW (IS_PLINT1 | IS_PLINT2 | IS_TIMINT | IS_TOKEN | \
202 IS_MINTR1 | IS_MINTR2 | IS_MINTR3 | IS_R1_P | \
203 IS_R1_C | IS_XA_C | IS_XS_C)
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225u_int mac_drv_check_space(void)
226{
227#ifdef MB_OUTSIDE_SMC
228#ifdef COMMON_MB_POOL
229 call_count++ ;
230 if (call_count == 1) {
231 return EXT_VIRT_MEM;
232 }
233 else {
234 return EXT_VIRT_MEM_2;
235 }
236#else
237 return EXT_VIRT_MEM;
238#endif
239#else
240 return 0;
241#endif
242}
243
244
245
246
247
248
249
250
251
252
253
254
255int mac_drv_init(struct s_smc *smc)
256{
257 if (sizeof(struct s_smt_fp_rxd) % 16) {
258 SMT_PANIC(smc,HWM_E0001,HWM_E0001_MSG) ;
259 }
260 if (sizeof(struct s_smt_fp_txd) % 16) {
261 SMT_PANIC(smc,HWM_E0002,HWM_E0002_MSG) ;
262 }
263
264
265
266
267 if (!(smc->os.hwm.descr_p = (union s_fp_descr volatile *)
268 mac_drv_get_desc_mem(smc,(u_int)
269 (RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd)))) {
270 return 1;
271 }
272
273
274
275
276#ifndef MB_OUTSIDE_SMC
277 smc->os.hwm.mbuf_pool.mb_start=(SMbuf *)(&smc->os.hwm.mbuf_pool.mb[0]) ;
278#else
279#ifndef COMMON_MB_POOL
280 if (!(smc->os.hwm.mbuf_pool.mb_start = (SMbuf *) mac_drv_get_space(smc,
281 MAX_MBUF*sizeof(SMbuf)))) {
282 return 1;
283 }
284#else
285 if (!mb_start) {
286 if (!(mb_start = (SMbuf *) mac_drv_get_space(smc,
287 MAX_MBUF*sizeof(SMbuf)))) {
288 return 1;
289 }
290 }
291#endif
292#endif
293 return 0;
294}
295
296
297
298
299
300
301
302
303
304
305void init_driver_fplus(struct s_smc *smc)
306{
307 smc->hw.fp.mdr2init = FM_LSB | FM_BMMODE | FM_ENNPRQ | FM_ENHSRQ | 3 ;
308
309#ifdef PCI
310 smc->hw.fp.mdr2init |= FM_CHKPAR | FM_PARITY ;
311#endif
312 smc->hw.fp.mdr3init = FM_MENRQAUNLCK | FM_MENRS ;
313
314#ifdef USE_CAN_ADDR
315
316 smc->hw.fp.frselreg_init = FM_ENXMTADSWAP | FM_ENRCVADSWAP ;
317#endif
318}
319
320static u_long init_descr_ring(struct s_smc *smc,
321 union s_fp_descr volatile *start,
322 int count)
323{
324 int i ;
325 union s_fp_descr volatile *d1 ;
326 union s_fp_descr volatile *d2 ;
327 u_long phys ;
328
329 DB_GEN(3, "descr ring starts at = %p", start);
330 for (i=count-1, d1=start; i ; i--) {
331 d2 = d1 ;
332 d1++ ;
333 d2->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ;
334 d2->r.rxd_next = &d1->r ;
335 phys = mac_drv_virt2phys(smc,(void *)d1) ;
336 d2->r.rxd_nrdadr = cpu_to_le32(phys) ;
337 }
338 DB_GEN(3, "descr ring ends at = %p", d1);
339 d1->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ;
340 d1->r.rxd_next = &start->r ;
341 phys = mac_drv_virt2phys(smc,(void *)start) ;
342 d1->r.rxd_nrdadr = cpu_to_le32(phys) ;
343
344 for (i=count, d1=start; i ; i--) {
345 DRV_BUF_FLUSH(&d1->r,DDI_DMA_SYNC_FORDEV) ;
346 d1++;
347 }
348 return phys;
349}
350
351static void init_txd_ring(struct s_smc *smc)
352{
353 struct s_smt_fp_txd volatile *ds ;
354 struct s_smt_tx_queue *queue ;
355 u_long phys ;
356
357
358
359
360 ds = (struct s_smt_fp_txd volatile *) ((char *)smc->os.hwm.descr_p +
361 SMT_R1_RXD_COUNT*sizeof(struct s_smt_fp_rxd)) ;
362 queue = smc->hw.fp.tx[QUEUE_A0] ;
363 DB_GEN(3, "Init async TxD ring, %d TxDs", HWM_ASYNC_TXD_COUNT);
364 (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
365 HWM_ASYNC_TXD_COUNT) ;
366 phys = le32_to_cpu(ds->txd_ntdadr) ;
367 ds++ ;
368 queue->tx_curr_put = queue->tx_curr_get = ds ;
369 ds-- ;
370 queue->tx_free = HWM_ASYNC_TXD_COUNT ;
371 queue->tx_used = 0 ;
372 outpd(ADDR(B5_XA_DA),phys) ;
373
374 ds = (struct s_smt_fp_txd volatile *) ((char *)ds +
375 HWM_ASYNC_TXD_COUNT*sizeof(struct s_smt_fp_txd)) ;
376 queue = smc->hw.fp.tx[QUEUE_S] ;
377 DB_GEN(3, "Init sync TxD ring, %d TxDs", HWM_SYNC_TXD_COUNT);
378 (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
379 HWM_SYNC_TXD_COUNT) ;
380 phys = le32_to_cpu(ds->txd_ntdadr) ;
381 ds++ ;
382 queue->tx_curr_put = queue->tx_curr_get = ds ;
383 queue->tx_free = HWM_SYNC_TXD_COUNT ;
384 queue->tx_used = 0 ;
385 outpd(ADDR(B5_XS_DA),phys) ;
386}
387
388static void init_rxd_ring(struct s_smc *smc)
389{
390 struct s_smt_fp_rxd volatile *ds ;
391 struct s_smt_rx_queue *queue ;
392 u_long phys ;
393
394
395
396
397 ds = (struct s_smt_fp_rxd volatile *) smc->os.hwm.descr_p ;
398 queue = smc->hw.fp.rx[QUEUE_R1] ;
399 DB_GEN(3, "Init RxD ring, %d RxDs", SMT_R1_RXD_COUNT);
400 (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
401 SMT_R1_RXD_COUNT) ;
402 phys = le32_to_cpu(ds->rxd_nrdadr) ;
403 ds++ ;
404 queue->rx_curr_put = queue->rx_curr_get = ds ;
405 queue->rx_free = SMT_R1_RXD_COUNT ;
406 queue->rx_used = 0 ;
407 outpd(ADDR(B4_R1_DA),phys) ;
408}
409
410
411
412
413
414
415
416
417
418void init_fddi_driver(struct s_smc *smc, u_char *mac_addr)
419{
420 SMbuf *mb ;
421 int i ;
422
423 init_board(smc,mac_addr) ;
424 (void)init_fplus(smc) ;
425
426
427
428
429#ifndef COMMON_MB_POOL
430 mb = smc->os.hwm.mbuf_pool.mb_start ;
431 smc->os.hwm.mbuf_pool.mb_free = (SMbuf *)NULL ;
432 for (i = 0; i < MAX_MBUF; i++) {
433 mb->sm_use_count = 1 ;
434 smt_free_mbuf(smc,mb) ;
435 mb++ ;
436 }
437#else
438 mb = mb_start ;
439 if (!mb_init) {
440 mb_free = 0 ;
441 for (i = 0; i < MAX_MBUF; i++) {
442 mb->sm_use_count = 1 ;
443 smt_free_mbuf(smc,mb) ;
444 mb++ ;
445 }
446 mb_init = TRUE ;
447 }
448#endif
449
450
451
452
453 smc->os.hwm.llc_rx_pipe = smc->os.hwm.llc_rx_tail = (SMbuf *)NULL ;
454 smc->os.hwm.txd_tx_pipe = smc->os.hwm.txd_tx_tail = NULL ;
455 smc->os.hwm.pass_SMT = smc->os.hwm.pass_NSA = smc->os.hwm.pass_DB = 0 ;
456 smc->os.hwm.pass_llc_promisc = TRUE ;
457 smc->os.hwm.queued_rx_frames = smc->os.hwm.queued_txd_mb = 0 ;
458 smc->os.hwm.detec_count = 0 ;
459 smc->os.hwm.rx_break = 0 ;
460 smc->os.hwm.rx_len_error = 0 ;
461 smc->os.hwm.isr_flag = FALSE ;
462
463
464
465
466 i = 16 - ((long)smc->os.hwm.descr_p & 0xf) ;
467 if (i != 16) {
468 DB_GEN(3, "i = %d", i);
469 smc->os.hwm.descr_p = (union s_fp_descr volatile *)
470 ((char *)smc->os.hwm.descr_p+i) ;
471 }
472 DB_GEN(3, "pt to descr area = %p", smc->os.hwm.descr_p);
473
474 init_txd_ring(smc) ;
475 init_rxd_ring(smc) ;
476 mac_drv_fill_rxd(smc) ;
477
478 init_plc(smc) ;
479}
480
481
482SMbuf *smt_get_mbuf(struct s_smc *smc)
483{
484 register SMbuf *mb ;
485
486#ifndef COMMON_MB_POOL
487 mb = smc->os.hwm.mbuf_pool.mb_free ;
488#else
489 mb = mb_free ;
490#endif
491 if (mb) {
492#ifndef COMMON_MB_POOL
493 smc->os.hwm.mbuf_pool.mb_free = mb->sm_next ;
494#else
495 mb_free = mb->sm_next ;
496#endif
497 mb->sm_off = 8 ;
498 mb->sm_use_count = 1 ;
499 }
500 DB_GEN(3, "get SMbuf: mb = %p", mb);
501 return mb;
502}
503
504void smt_free_mbuf(struct s_smc *smc, SMbuf *mb)
505{
506
507 if (mb) {
508 mb->sm_use_count-- ;
509 DB_GEN(3, "free_mbuf: sm_use_count = %d", mb->sm_use_count);
510
511
512
513
514
515 if (!mb->sm_use_count) {
516 DB_GEN(3, "free SMbuf: mb = %p", mb);
517#ifndef COMMON_MB_POOL
518 mb->sm_next = smc->os.hwm.mbuf_pool.mb_free ;
519 smc->os.hwm.mbuf_pool.mb_free = mb ;
520#else
521 mb->sm_next = mb_free ;
522 mb_free = mb ;
523#endif
524 }
525 }
526 else
527 SMT_PANIC(smc,HWM_E0003,HWM_E0003_MSG) ;
528}
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550void mac_drv_repair_descr(struct s_smc *smc)
551{
552 u_long phys ;
553
554 if (smc->hw.hw_state != STOPPED) {
555 SK_BREAK() ;
556 SMT_PANIC(smc,HWM_E0013,HWM_E0013_MSG) ;
557 return ;
558 }
559
560
561
562
563 phys = repair_txd_ring(smc,smc->hw.fp.tx[QUEUE_A0]) ;
564 outpd(ADDR(B5_XA_DA),phys) ;
565 if (smc->hw.fp.tx_q[QUEUE_A0].tx_used) {
566 outpd(ADDR(B0_XA_CSR),CSR_START) ;
567 }
568 phys = repair_txd_ring(smc,smc->hw.fp.tx[QUEUE_S]) ;
569 outpd(ADDR(B5_XS_DA),phys) ;
570 if (smc->hw.fp.tx_q[QUEUE_S].tx_used) {
571 outpd(ADDR(B0_XS_CSR),CSR_START) ;
572 }
573
574
575
576
577 phys = repair_rxd_ring(smc,smc->hw.fp.rx[QUEUE_R1]) ;
578 outpd(ADDR(B4_R1_DA),phys) ;
579 outpd(ADDR(B0_R1_CSR),CSR_START) ;
580}
581
582static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue)
583{
584 int i ;
585 int tx_used ;
586 u_long phys ;
587 u_long tbctrl ;
588 struct s_smt_fp_txd volatile *t ;
589
590 SK_UNUSED(smc) ;
591
592 t = queue->tx_curr_get ;
593 tx_used = queue->tx_used ;
594 for (i = tx_used+queue->tx_free-1 ; i ; i-- ) {
595 t = t->txd_next ;
596 }
597 phys = le32_to_cpu(t->txd_ntdadr) ;
598
599 t = queue->tx_curr_get ;
600 while (tx_used) {
601 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ;
602 tbctrl = le32_to_cpu(t->txd_tbctrl) ;
603
604 if (tbctrl & BMU_OWN) {
605 if (tbctrl & BMU_STF) {
606 break ;
607 }
608 else {
609
610
611
612 t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ;
613 }
614 }
615 phys = le32_to_cpu(t->txd_ntdadr) ;
616 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
617 t = t->txd_next ;
618 tx_used-- ;
619 }
620 return phys;
621}
622
623
624
625
626
627
628
629
630
631
632
633
634static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue)
635{
636 int i ;
637 int rx_used ;
638 u_long phys ;
639 u_long rbctrl ;
640 struct s_smt_fp_rxd volatile *r ;
641
642 SK_UNUSED(smc) ;
643
644 r = queue->rx_curr_get ;
645 rx_used = queue->rx_used ;
646 for (i = SMT_R1_RXD_COUNT-1 ; i ; i-- ) {
647 r = r->rxd_next ;
648 }
649 phys = le32_to_cpu(r->rxd_nrdadr) ;
650
651 r = queue->rx_curr_get ;
652 while (rx_used) {
653 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
654 rbctrl = le32_to_cpu(r->rxd_rbctrl) ;
655
656 if (rbctrl & BMU_OWN) {
657 if (rbctrl & BMU_STF) {
658 break ;
659 }
660 else {
661
662
663
664 r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
665 }
666 }
667 phys = le32_to_cpu(r->rxd_nrdadr) ;
668 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
669 r = r->rxd_next ;
670 rx_used-- ;
671 }
672 return phys;
673}
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705void fddi_isr(struct s_smc *smc)
706{
707 u_long is ;
708 u_short stu, stl ;
709 SMbuf *mb ;
710
711#ifdef USE_BREAK_ISR
712 int force_irq ;
713#endif
714
715#ifdef ODI2
716 if (smc->os.hwm.rx_break) {
717 mac_drv_fill_rxd(smc) ;
718 if (smc->hw.fp.rx_q[QUEUE_R1].rx_used > 0) {
719 smc->os.hwm.rx_break = 0 ;
720 process_receive(smc) ;
721 }
722 else {
723 smc->os.hwm.detec_count = 0 ;
724 smt_force_irq(smc) ;
725 }
726 }
727#endif
728 smc->os.hwm.isr_flag = TRUE ;
729
730#ifdef USE_BREAK_ISR
731 force_irq = TRUE ;
732 if (smc->os.hwm.leave_isr) {
733 smc->os.hwm.leave_isr = FALSE ;
734 process_receive(smc) ;
735 }
736#endif
737
738 while ((is = GET_ISR() & ISR_MASK)) {
739 NDD_TRACE("CH0B",is,0,0) ;
740 DB_GEN(7, "ISA = 0x%lx", is);
741
742 if (is & IMASK_SLOW) {
743 NDD_TRACE("CH1b",is,0,0) ;
744 if (is & IS_PLINT1) {
745 plc1_irq(smc) ;
746 }
747 if (is & IS_PLINT2) {
748 plc2_irq(smc) ;
749 }
750 if (is & IS_MINTR1) {
751 stu = inpw(FM_A(FM_ST1U)) ;
752 stl = inpw(FM_A(FM_ST1L)) ;
753 DB_GEN(6, "Slow transmit complete");
754 mac1_irq(smc,stu,stl) ;
755 }
756 if (is & IS_MINTR2) {
757 stu= inpw(FM_A(FM_ST2U)) ;
758 stl= inpw(FM_A(FM_ST2L)) ;
759 DB_GEN(6, "Slow receive complete");
760 DB_GEN(7, "stl = %x : stu = %x", stl, stu);
761 mac2_irq(smc,stu,stl) ;
762 }
763 if (is & IS_MINTR3) {
764 stu= inpw(FM_A(FM_ST3U)) ;
765 stl= inpw(FM_A(FM_ST3L)) ;
766 DB_GEN(6, "FORMAC Mode Register 3");
767 mac3_irq(smc,stu,stl) ;
768 }
769 if (is & IS_TIMINT) {
770 timer_irq(smc) ;
771#ifdef NDIS_OS2
772 force_irq_pending = 0 ;
773#endif
774
775
776
777 if (++smc->os.hwm.detec_count > 4) {
778
779
780
781 process_receive(smc) ;
782 }
783 }
784 if (is & IS_TOKEN) {
785 rtm_irq(smc) ;
786 }
787 if (is & IS_R1_P) {
788
789 outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_P) ;
790 SMT_PANIC(smc,HWM_E0004,HWM_E0004_MSG) ;
791 }
792 if (is & IS_R1_C) {
793
794 outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_C) ;
795 SMT_PANIC(smc,HWM_E0005,HWM_E0005_MSG) ;
796 }
797 if (is & IS_XA_C) {
798
799 outpd(ADDR(B5_XA_CSR),CSR_IRQ_CL_C) ;
800 SMT_PANIC(smc,HWM_E0006,HWM_E0006_MSG) ;
801 }
802 if (is & IS_XS_C) {
803
804 outpd(ADDR(B5_XS_CSR),CSR_IRQ_CL_C) ;
805 SMT_PANIC(smc,HWM_E0007,HWM_E0007_MSG) ;
806 }
807 }
808
809
810
811
812 if (is & (IS_XS_F|IS_XA_F)) {
813 DB_GEN(6, "Fast tx complete queue");
814
815
816
817
818 outpd(ADDR(B5_XS_CSR),CSR_IRQ_CL_F) ;
819 outpd(ADDR(B5_XA_CSR),CSR_IRQ_CL_F) ;
820 mac_drv_clear_txd(smc) ;
821 llc_restart_tx(smc) ;
822 }
823
824
825
826
827 if (is & IS_R1_F) {
828 DB_GEN(6, "Fast receive complete");
829
830#ifndef USE_BREAK_ISR
831 outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_F) ;
832 process_receive(smc) ;
833#else
834 process_receive(smc) ;
835 if (smc->os.hwm.leave_isr) {
836 force_irq = FALSE ;
837 } else {
838 outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_F) ;
839 process_receive(smc) ;
840 }
841#endif
842 }
843
844#ifndef NDIS_OS2
845 while ((mb = get_llc_rx(smc))) {
846 smt_to_llc(smc,mb) ;
847 }
848#else
849 if (offDepth)
850 post_proc() ;
851
852 while (!offDepth && (mb = get_llc_rx(smc))) {
853 smt_to_llc(smc,mb) ;
854 }
855
856 if (!offDepth && smc->os.hwm.rx_break) {
857 process_receive(smc) ;
858 }
859#endif
860 if (smc->q.ev_get != smc->q.ev_put) {
861 NDD_TRACE("CH2a",0,0,0) ;
862 ev_dispatcher(smc) ;
863 }
864#ifdef NDIS_OS2
865 post_proc() ;
866 if (offDepth) {
867 break ;
868 }
869#endif
870#ifdef USE_BREAK_ISR
871 if (smc->os.hwm.leave_isr) {
872 break ;
873 }
874#endif
875
876
877 }
878
879#ifdef USE_BREAK_ISR
880 if (smc->os.hwm.leave_isr && force_irq) {
881 smt_force_irq(smc) ;
882 }
883#endif
884 smc->os.hwm.isr_flag = FALSE ;
885 NDD_TRACE("CH0E",0,0,0) ;
886}
887
888
889
890
891
892
893
894
895#ifndef NDIS_OS2
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988void mac_drv_rx_mode(struct s_smc *smc, int mode)
989{
990 switch(mode) {
991 case RX_ENABLE_PASS_SMT:
992 smc->os.hwm.pass_SMT = TRUE ;
993 break ;
994 case RX_DISABLE_PASS_SMT:
995 smc->os.hwm.pass_SMT = FALSE ;
996 break ;
997 case RX_ENABLE_PASS_NSA:
998 smc->os.hwm.pass_NSA = TRUE ;
999 break ;
1000 case RX_DISABLE_PASS_NSA:
1001 smc->os.hwm.pass_NSA = FALSE ;
1002 break ;
1003 case RX_ENABLE_PASS_DB:
1004 smc->os.hwm.pass_DB = TRUE ;
1005 break ;
1006 case RX_DISABLE_PASS_DB:
1007 smc->os.hwm.pass_DB = FALSE ;
1008 break ;
1009 case RX_DISABLE_PASS_ALL:
1010 smc->os.hwm.pass_SMT = smc->os.hwm.pass_NSA = FALSE ;
1011 smc->os.hwm.pass_DB = FALSE ;
1012 smc->os.hwm.pass_llc_promisc = TRUE ;
1013 mac_set_rx_mode(smc,RX_DISABLE_NSA) ;
1014 break ;
1015 case RX_DISABLE_LLC_PROMISC:
1016 smc->os.hwm.pass_llc_promisc = FALSE ;
1017 break ;
1018 case RX_ENABLE_LLC_PROMISC:
1019 smc->os.hwm.pass_llc_promisc = TRUE ;
1020 break ;
1021 case RX_ENABLE_ALLMULTI:
1022 case RX_DISABLE_ALLMULTI:
1023 case RX_ENABLE_PROMISC:
1024 case RX_DISABLE_PROMISC:
1025 case RX_ENABLE_NSA:
1026 case RX_DISABLE_NSA:
1027 default:
1028 mac_set_rx_mode(smc,mode) ;
1029 break ;
1030 }
1031}
1032#endif
1033
1034
1035
1036
1037void process_receive(struct s_smc *smc)
1038{
1039 int i ;
1040 int n ;
1041 int frag_count ;
1042 int used_frags ;
1043 struct s_smt_rx_queue *queue ;
1044 struct s_smt_fp_rxd volatile *r ;
1045 struct s_smt_fp_rxd volatile *rxd ;
1046 u_long rbctrl ;
1047 u_long rfsw ;
1048 u_short rx_used ;
1049 u_char far *virt ;
1050 char far *data ;
1051 SMbuf *mb ;
1052 u_char fc ;
1053 int len ;
1054
1055 smc->os.hwm.detec_count = 0 ;
1056 queue = smc->hw.fp.rx[QUEUE_R1] ;
1057 NDD_TRACE("RHxB",0,0,0) ;
1058 for ( ; ; ) {
1059 r = queue->rx_curr_get ;
1060 rx_used = queue->rx_used ;
1061 frag_count = 0 ;
1062
1063#ifdef USE_BREAK_ISR
1064 if (smc->os.hwm.leave_isr) {
1065 goto rx_end ;
1066 }
1067#endif
1068#ifdef NDIS_OS2
1069 if (offDepth) {
1070 smc->os.hwm.rx_break = 1 ;
1071 goto rx_end ;
1072 }
1073 smc->os.hwm.rx_break = 0 ;
1074#endif
1075#ifdef ODI2
1076 if (smc->os.hwm.rx_break) {
1077 goto rx_end ;
1078 }
1079#endif
1080 n = 0 ;
1081 do {
1082 DB_RX(5, "Check RxD %p for OWN and EOF", r);
1083 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
1084 rbctrl = le32_to_cpu(CR_READ(r->rxd_rbctrl));
1085
1086 if (rbctrl & BMU_OWN) {
1087 NDD_TRACE("RHxE",r,rfsw,rbctrl) ;
1088 DB_RX(4, "End of RxDs");
1089 goto rx_end ;
1090 }
1091
1092
1093
1094 if (!rx_used) {
1095 SK_BREAK() ;
1096 SMT_PANIC(smc,HWM_E0009,HWM_E0009_MSG) ;
1097
1098
1099
1100 smc->hw.hw_state = STOPPED ;
1101 mac_drv_clear_rx_queue(smc) ;
1102 smc->hw.hw_state = STARTED ;
1103 mac_drv_fill_rxd(smc) ;
1104 smc->os.hwm.detec_count = 0 ;
1105 goto rx_end ;
1106 }
1107 rfsw = le32_to_cpu(r->rxd_rfsw) ;
1108 if ((rbctrl & BMU_STF) != ((rbctrl & BMU_ST_BUF) <<5)) {
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123 SK_BREAK() ;
1124 rfsw = 0 ;
1125 if (frag_count) {
1126 break ;
1127 }
1128 }
1129 n += rbctrl & 0xffff ;
1130 r = r->rxd_next ;
1131 frag_count++ ;
1132 rx_used-- ;
1133 } while (!(rbctrl & BMU_EOF)) ;
1134 used_frags = frag_count ;
1135 DB_RX(5, "EOF set in RxD, used_frags = %d", used_frags);
1136
1137
1138
1139 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
1140 while (rx_used && !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) {
1141 DB_RX(5, "Check STF bit in %p", r);
1142 r = r->rxd_next ;
1143 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
1144 frag_count++ ;
1145 rx_used-- ;
1146 }
1147 DB_RX(5, "STF bit found");
1148
1149
1150
1151
1152 rxd = queue->rx_curr_get ;
1153 queue->rx_curr_get = r ;
1154 queue->rx_free += frag_count ;
1155 queue->rx_used = rx_used ;
1156
1157
1158
1159
1160 rxd->rxd_rbctrl &= cpu_to_le32(~BMU_STF) ;
1161
1162 for (r=rxd, i=frag_count ; i ; r=r->rxd_next, i--){
1163 DB_RX(5, "dma_complete for RxD %p", r);
1164 dma_complete(smc,(union s_fp_descr volatile *)r,DMA_WR);
1165 }
1166 smc->hw.fp.err_stats.err_valid++ ;
1167 smc->mib.m[MAC0].fddiMACCopied_Ct++ ;
1168
1169
1170 len = (rfsw & RD_LENGTH) - 4 ;
1171
1172 DB_RX(4, "frame length = %d", len);
1173
1174
1175
1176 if (rfsw & (RX_MSRABT|RX_FS_E|RX_FS_CRC|RX_FS_IMPL)){
1177 if (rfsw & RD_S_MSRABT) {
1178 DB_RX(2, "Frame aborted by the FORMAC");
1179 smc->hw.fp.err_stats.err_abort++ ;
1180 }
1181
1182
1183
1184 if (rfsw & RD_S_SEAC2) {
1185 DB_RX(2, "E-Indicator set");
1186 smc->hw.fp.err_stats.err_e_indicator++ ;
1187 }
1188 if (rfsw & RD_S_SFRMERR) {
1189 DB_RX(2, "CRC error");
1190 smc->hw.fp.err_stats.err_crc++ ;
1191 }
1192 if (rfsw & RX_FS_IMPL) {
1193 DB_RX(2, "Implementer frame");
1194 smc->hw.fp.err_stats.err_imp_frame++ ;
1195 }
1196 goto abort_frame ;
1197 }
1198 if (len > FDDI_RAW_MTU-4) {
1199 DB_RX(2, "Frame too long error");
1200 smc->hw.fp.err_stats.err_too_long++ ;
1201 goto abort_frame ;
1202 }
1203
1204
1205
1206
1207 if (len <= 4) {
1208 DB_RX(2, "Frame length = 0");
1209 goto abort_frame ;
1210 }
1211
1212 if (len != (n-4)) {
1213 DB_RX(4, "BMU: rx len differs: [%d:%d]", len, n);
1214 smc->os.hwm.rx_len_error++ ;
1215 goto abort_frame ;
1216 }
1217
1218
1219
1220
1221 virt = (u_char far *) rxd->rxd_virt ;
1222 DB_RX(2, "FC = %x", *virt);
1223 if (virt[12] == MA[5] &&
1224 virt[11] == MA[4] &&
1225 virt[10] == MA[3] &&
1226 virt[9] == MA[2] &&
1227 virt[8] == MA[1] &&
1228 (virt[7] & ~GROUP_ADDR_BIT) == MA[0]) {
1229 goto abort_frame ;
1230 }
1231
1232
1233
1234
1235 if (rfsw & RX_FS_LLC) {
1236
1237
1238
1239
1240
1241 if (!smc->os.hwm.pass_llc_promisc) {
1242 if(!(virt[1] & GROUP_ADDR_BIT)) {
1243 if (virt[6] != MA[5] ||
1244 virt[5] != MA[4] ||
1245 virt[4] != MA[3] ||
1246 virt[3] != MA[2] ||
1247 virt[2] != MA[1] ||
1248 virt[1] != MA[0]) {
1249 DB_RX(2, "DA != MA and not multi- or broadcast");
1250 goto abort_frame ;
1251 }
1252 }
1253 }
1254
1255
1256
1257
1258 DB_RX(4, "LLC - receive");
1259 mac_drv_rx_complete(smc,rxd,frag_count,len) ;
1260 }
1261 else {
1262 if (!(mb = smt_get_mbuf(smc))) {
1263 smc->hw.fp.err_stats.err_no_buf++ ;
1264 DB_RX(4, "No SMbuf; receive terminated");
1265 goto abort_frame ;
1266 }
1267 data = smtod(mb,char *) - 1 ;
1268
1269
1270
1271
1272#ifdef USE_OS_CPY
1273 hwm_cpy_rxd2mb(rxd,data,len) ;
1274#else
1275 for (r=rxd, i=used_frags ; i ; r=r->rxd_next, i--){
1276 n = le32_to_cpu(r->rxd_rbctrl) & RD_LENGTH ;
1277 DB_RX(6, "cp SMT frame to mb: len = %d", n);
1278 memcpy(data,r->rxd_virt,n) ;
1279 data += n ;
1280 }
1281 data = smtod(mb,char *) - 1 ;
1282#endif
1283 fc = *(char *)mb->sm_data = *data ;
1284 mb->sm_len = len - 1 ;
1285 data++ ;
1286
1287
1288
1289
1290 switch(fc) {
1291 case FC_SMT_INFO :
1292 smc->hw.fp.err_stats.err_smt_frame++ ;
1293 DB_RX(5, "SMT frame received");
1294
1295 if (smc->os.hwm.pass_SMT) {
1296 DB_RX(5, "pass SMT frame");
1297 mac_drv_rx_complete(smc, rxd,
1298 frag_count,len) ;
1299 }
1300 else {
1301 DB_RX(5, "requeue RxD");
1302 mac_drv_requeue_rxd(smc,rxd,frag_count);
1303 }
1304
1305 smt_received_pack(smc,mb,(int)(rfsw>>25)) ;
1306 break ;
1307 case FC_SMT_NSA :
1308 smc->hw.fp.err_stats.err_smt_frame++ ;
1309 DB_RX(5, "SMT frame received");
1310
1311
1312
1313
1314 if (smc->os.hwm.pass_NSA ||
1315 (smc->os.hwm.pass_SMT &&
1316 !(rfsw & A_INDIC))) {
1317 DB_RX(5, "pass SMT frame");
1318 mac_drv_rx_complete(smc, rxd,
1319 frag_count,len) ;
1320 }
1321 else {
1322 DB_RX(5, "requeue RxD");
1323 mac_drv_requeue_rxd(smc,rxd,frag_count);
1324 }
1325
1326 smt_received_pack(smc,mb,(int)(rfsw>>25)) ;
1327 break ;
1328 case FC_BEACON :
1329 if (smc->os.hwm.pass_DB) {
1330 DB_RX(5, "pass DB frame");
1331 mac_drv_rx_complete(smc, rxd,
1332 frag_count,len) ;
1333 }
1334 else {
1335 DB_RX(5, "requeue RxD");
1336 mac_drv_requeue_rxd(smc,rxd,frag_count);
1337 }
1338 smt_free_mbuf(smc,mb) ;
1339 break ;
1340 default :
1341
1342
1343
1344 DB_RX(2, "unknown FC error");
1345 smt_free_mbuf(smc,mb) ;
1346 DB_RX(5, "requeue RxD");
1347 mac_drv_requeue_rxd(smc,rxd,frag_count) ;
1348 if ((fc & 0xf0) == FC_MAC)
1349 smc->hw.fp.err_stats.err_mac_frame++ ;
1350 else
1351 smc->hw.fp.err_stats.err_imp_frame++ ;
1352
1353 break ;
1354 }
1355 }
1356
1357 DB_RX(3, "next RxD is %p", queue->rx_curr_get);
1358 NDD_TRACE("RHx1",queue->rx_curr_get,0,0) ;
1359
1360 continue ;
1361
1362abort_frame:
1363 DB_RX(5, "requeue RxD");
1364 mac_drv_requeue_rxd(smc,rxd,frag_count) ;
1365
1366 DB_RX(3, "next RxD is %p", queue->rx_curr_get);
1367 NDD_TRACE("RHx2",queue->rx_curr_get,0,0) ;
1368 }
1369rx_end:
1370#ifdef ALL_RX_COMPLETE
1371 mac_drv_all_receives_complete(smc) ;
1372#endif
1373 return ;
1374}
1375
1376static void smt_to_llc(struct s_smc *smc, SMbuf *mb)
1377{
1378 u_char fc ;
1379
1380 DB_RX(4, "send a queued frame to the llc layer");
1381 smc->os.hwm.r.len = mb->sm_len ;
1382 smc->os.hwm.r.mb_pos = smtod(mb,char *) ;
1383 fc = *smc->os.hwm.r.mb_pos ;
1384 (void)mac_drv_rx_init(smc,(int)mb->sm_len,(int)fc,
1385 smc->os.hwm.r.mb_pos,(int)mb->sm_len) ;
1386 smt_free_mbuf(smc,mb) ;
1387}
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411void hwm_rx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
1412 int frame_status)
1413{
1414 struct s_smt_fp_rxd volatile *r ;
1415 __le32 rbctrl;
1416
1417 NDD_TRACE("RHfB",virt,len,frame_status) ;
1418 DB_RX(2, "hwm_rx_frag: len = %d, frame_status = %x", len, frame_status);
1419 r = smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put ;
1420 r->rxd_virt = virt ;
1421 r->rxd_rbadr = cpu_to_le32(phys) ;
1422 rbctrl = cpu_to_le32( (((__u32)frame_status &
1423 (FIRST_FRAG|LAST_FRAG))<<26) |
1424 (((u_long) frame_status & FIRST_FRAG) << 21) |
1425 BMU_OWN | BMU_CHECK | BMU_EN_IRQ_EOF | len) ;
1426 r->rxd_rbctrl = rbctrl ;
1427
1428 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
1429 outpd(ADDR(B0_R1_CSR),CSR_START) ;
1430 smc->hw.fp.rx_q[QUEUE_R1].rx_free-- ;
1431 smc->hw.fp.rx_q[QUEUE_R1].rx_used++ ;
1432 smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put = r->rxd_next ;
1433 NDD_TRACE("RHfE",r,le32_to_cpu(r->rxd_rbadr),0) ;
1434}
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459void mac_drv_clear_rx_queue(struct s_smc *smc)
1460{
1461 struct s_smt_fp_rxd volatile *r ;
1462 struct s_smt_fp_rxd volatile *next_rxd ;
1463 struct s_smt_rx_queue *queue ;
1464 int frag_count ;
1465 int i ;
1466
1467 if (smc->hw.hw_state != STOPPED) {
1468 SK_BREAK() ;
1469 SMT_PANIC(smc,HWM_E0012,HWM_E0012_MSG) ;
1470 return ;
1471 }
1472
1473 queue = smc->hw.fp.rx[QUEUE_R1] ;
1474 DB_RX(5, "clear_rx_queue");
1475
1476
1477
1478
1479 r = queue->rx_curr_get ;
1480 while (queue->rx_used) {
1481 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
1482 DB_RX(5, "switch OWN bit of RxD 0x%p", r);
1483 r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
1484 frag_count = 1 ;
1485 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
1486 r = r->rxd_next ;
1487 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
1488 while (r != queue->rx_curr_put &&
1489 !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) {
1490 DB_RX(5, "Check STF bit in %p", r);
1491 r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
1492 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
1493 r = r->rxd_next ;
1494 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
1495 frag_count++ ;
1496 }
1497 DB_RX(5, "STF bit found");
1498 next_rxd = r ;
1499
1500 for (r=queue->rx_curr_get,i=frag_count; i ; r=r->rxd_next,i--){
1501 DB_RX(5, "dma_complete for RxD %p", r);
1502 dma_complete(smc,(union s_fp_descr volatile *)r,DMA_WR);
1503 }
1504
1505 DB_RX(5, "mac_drv_clear_rxd: RxD %p frag_count %d",
1506 queue->rx_curr_get, frag_count);
1507 mac_drv_clear_rxd(smc,queue->rx_curr_get,frag_count) ;
1508
1509 queue->rx_curr_get = next_rxd ;
1510 queue->rx_used -= frag_count ;
1511 queue->rx_free += frag_count ;
1512 }
1513}
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count, int frame_len,
1547 int frame_status)
1548{
1549 NDD_TRACE("THiB",fc,frag_count,frame_len) ;
1550 smc->os.hwm.tx_p = smc->hw.fp.tx[frame_status & QUEUE_A0] ;
1551 smc->os.hwm.tx_descr = TX_DESCRIPTOR | (((u_long)(frame_len-1)&3)<<27) ;
1552 smc->os.hwm.tx_len = frame_len ;
1553 DB_TX(3, "hwm_tx_init: fc = %x, len = %d", fc, frame_len);
1554 if ((fc & ~(FC_SYNC_BIT|FC_LLC_PRIOR)) == FC_ASYNC_LLC) {
1555 frame_status |= LAN_TX ;
1556 }
1557 else {
1558 switch (fc) {
1559 case FC_SMT_INFO :
1560 case FC_SMT_NSA :
1561 frame_status |= LAN_TX ;
1562 break ;
1563 case FC_SMT_LOC :
1564 frame_status |= LOC_TX ;
1565 break ;
1566 case FC_SMT_LAN_LOC :
1567 frame_status |= LAN_TX | LOC_TX ;
1568 break ;
1569 default :
1570 SMT_PANIC(smc,HWM_E0010,HWM_E0010_MSG) ;
1571 }
1572 }
1573 if (!smc->hw.mac_ring_is_up) {
1574 frame_status &= ~LAN_TX ;
1575 frame_status |= RING_DOWN ;
1576 DB_TX(2, "Ring is down: terminate LAN_TX");
1577 }
1578 if (frag_count > smc->os.hwm.tx_p->tx_free) {
1579#ifndef NDIS_OS2
1580 mac_drv_clear_txd(smc) ;
1581 if (frag_count > smc->os.hwm.tx_p->tx_free) {
1582 DB_TX(2, "Out of TxDs, terminate LAN_TX");
1583 frame_status &= ~LAN_TX ;
1584 frame_status |= OUT_OF_TXD ;
1585 }
1586#else
1587 DB_TX(2, "Out of TxDs, terminate LAN_TX");
1588 frame_status &= ~LAN_TX ;
1589 frame_status |= OUT_OF_TXD ;
1590#endif
1591 }
1592 DB_TX(3, "frame_status = %x", frame_status);
1593 NDD_TRACE("THiE",frame_status,smc->os.hwm.tx_p->tx_free,0) ;
1594 return frame_status;
1595}
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
1625 int frame_status)
1626{
1627 struct s_smt_fp_txd volatile *t ;
1628 struct s_smt_tx_queue *queue ;
1629 __le32 tbctrl ;
1630
1631 queue = smc->os.hwm.tx_p ;
1632
1633 NDD_TRACE("THfB",virt,len,frame_status) ;
1634
1635
1636
1637
1638
1639 t = queue->tx_curr_put ;
1640
1641 DB_TX(2, "hwm_tx_frag: len = %d, frame_status = %x", len, frame_status);
1642 if (frame_status & LAN_TX) {
1643
1644 DB_TX(3, "LAN_TX: TxD = %p, virt = %p", t, virt);
1645 t->txd_virt = virt ;
1646 t->txd_txdscr = cpu_to_le32(smc->os.hwm.tx_descr) ;
1647 t->txd_tbadr = cpu_to_le32(phys) ;
1648 tbctrl = cpu_to_le32((((__u32)frame_status &
1649 (FIRST_FRAG|LAST_FRAG|EN_IRQ_EOF))<< 26) |
1650 BMU_OWN|BMU_CHECK |len) ;
1651 t->txd_tbctrl = tbctrl ;
1652
1653#ifndef AIX
1654 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
1655 outpd(queue->tx_bmu_ctl,CSR_START) ;
1656#else
1657 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
1658 if (frame_status & QUEUE_A0) {
1659 outpd(ADDR(B0_XA_CSR),CSR_START) ;
1660 }
1661 else {
1662 outpd(ADDR(B0_XS_CSR),CSR_START) ;
1663 }
1664#endif
1665 queue->tx_free-- ;
1666 queue->tx_used++ ;
1667 queue->tx_curr_put = t->txd_next ;
1668 if (frame_status & LAST_FRAG) {
1669 smc->mib.m[MAC0].fddiMACTransmit_Ct++ ;
1670 }
1671 }
1672 if (frame_status & LOC_TX) {
1673 DB_TX(3, "LOC_TX:");
1674 if (frame_status & FIRST_FRAG) {
1675 if(!(smc->os.hwm.tx_mb = smt_get_mbuf(smc))) {
1676 smc->hw.fp.err_stats.err_no_buf++ ;
1677 DB_TX(4, "No SMbuf; transmit terminated");
1678 }
1679 else {
1680 smc->os.hwm.tx_data =
1681 smtod(smc->os.hwm.tx_mb,char *) - 1 ;
1682#ifdef USE_OS_CPY
1683#ifdef PASS_1ST_TXD_2_TX_COMP
1684 hwm_cpy_txd2mb(t,smc->os.hwm.tx_data,
1685 smc->os.hwm.tx_len) ;
1686#endif
1687#endif
1688 }
1689 }
1690 if (smc->os.hwm.tx_mb) {
1691#ifndef USE_OS_CPY
1692 DB_TX(3, "copy fragment into MBuf");
1693 memcpy(smc->os.hwm.tx_data,virt,len) ;
1694 smc->os.hwm.tx_data += len ;
1695#endif
1696 if (frame_status & LAST_FRAG) {
1697#ifdef USE_OS_CPY
1698#ifndef PASS_1ST_TXD_2_TX_COMP
1699
1700
1701
1702
1703
1704
1705
1706 hwm_cpy_txd2mb(t,smc->os.hwm.tx_data,
1707 smc->os.hwm.tx_len) ;
1708#endif
1709#endif
1710 smc->os.hwm.tx_data =
1711 smtod(smc->os.hwm.tx_mb,char *) - 1 ;
1712 *(char *)smc->os.hwm.tx_mb->sm_data =
1713 *smc->os.hwm.tx_data ;
1714 smc->os.hwm.tx_data++ ;
1715 smc->os.hwm.tx_mb->sm_len =
1716 smc->os.hwm.tx_len - 1 ;
1717 DB_TX(3, "pass LLC frame to SMT");
1718 smt_received_pack(smc,smc->os.hwm.tx_mb,
1719 RD_FS_LOCAL) ;
1720 }
1721 }
1722 }
1723 NDD_TRACE("THfE",t,queue->tx_free,0) ;
1724}
1725
1726
1727
1728
1729
1730static void queue_llc_rx(struct s_smc *smc, SMbuf *mb)
1731{
1732 DB_GEN(4, "queue_llc_rx: mb = %p", mb);
1733 smc->os.hwm.queued_rx_frames++ ;
1734 mb->sm_next = (SMbuf *)NULL ;
1735 if (smc->os.hwm.llc_rx_pipe == NULL) {
1736 smc->os.hwm.llc_rx_pipe = mb ;
1737 }
1738 else {
1739 smc->os.hwm.llc_rx_tail->sm_next = mb ;
1740 }
1741 smc->os.hwm.llc_rx_tail = mb ;
1742
1743
1744
1745
1746 if (!smc->os.hwm.isr_flag) {
1747 smt_force_irq(smc) ;
1748 }
1749}
1750
1751
1752
1753
1754static SMbuf *get_llc_rx(struct s_smc *smc)
1755{
1756 SMbuf *mb ;
1757
1758 if ((mb = smc->os.hwm.llc_rx_pipe)) {
1759 smc->os.hwm.queued_rx_frames-- ;
1760 smc->os.hwm.llc_rx_pipe = mb->sm_next ;
1761 }
1762 DB_GEN(4, "get_llc_rx: mb = 0x%p", mb);
1763 return mb;
1764}
1765
1766
1767
1768
1769
1770static void queue_txd_mb(struct s_smc *smc, SMbuf *mb)
1771{
1772 DB_GEN(4, "_rx: queue_txd_mb = %p", mb);
1773 smc->os.hwm.queued_txd_mb++ ;
1774 mb->sm_next = (SMbuf *)NULL ;
1775 if (smc->os.hwm.txd_tx_pipe == NULL) {
1776 smc->os.hwm.txd_tx_pipe = mb ;
1777 }
1778 else {
1779 smc->os.hwm.txd_tx_tail->sm_next = mb ;
1780 }
1781 smc->os.hwm.txd_tx_tail = mb ;
1782}
1783
1784
1785
1786
1787static SMbuf *get_txd_mb(struct s_smc *smc)
1788{
1789 SMbuf *mb ;
1790
1791 if ((mb = smc->os.hwm.txd_tx_pipe)) {
1792 smc->os.hwm.queued_txd_mb-- ;
1793 smc->os.hwm.txd_tx_pipe = mb->sm_next ;
1794 }
1795 DB_GEN(4, "get_txd_mb: mb = 0x%p", mb);
1796 return mb;
1797}
1798
1799
1800
1801
1802void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc)
1803{
1804 char far *data ;
1805 int len ;
1806 int n ;
1807 int i ;
1808 int frag_count ;
1809 int frame_status ;
1810 SK_LOC_DECL(char far,*virt[3]) ;
1811 int frag_len[3] ;
1812 struct s_smt_tx_queue *queue ;
1813 struct s_smt_fp_txd volatile *t ;
1814 u_long phys ;
1815 __le32 tbctrl;
1816
1817 NDD_TRACE("THSB",mb,fc,0) ;
1818 DB_TX(4, "smt_send_mbuf: mb = 0x%p, fc = 0x%x", mb, fc);
1819
1820 mb->sm_off-- ;
1821 mb->sm_len++ ;
1822 data = smtod(mb,char *) ;
1823 *data = fc ;
1824 if (fc == FC_SMT_LOC)
1825 *data = FC_SMT_INFO ;
1826
1827
1828
1829
1830 frag_count = 0 ;
1831 len = mb->sm_len ;
1832 while (len) {
1833 n = SMT_PAGESIZE - ((long)data & (SMT_PAGESIZE-1)) ;
1834 if (n >= len) {
1835 n = len ;
1836 }
1837 DB_TX(5, "frag: virt/len = 0x%p/%d", data, n);
1838 virt[frag_count] = data ;
1839 frag_len[frag_count] = n ;
1840 frag_count++ ;
1841 len -= n ;
1842 data += n ;
1843 }
1844
1845
1846
1847
1848 queue = smc->hw.fp.tx[QUEUE_A0] ;
1849 if (fc == FC_BEACON || fc == FC_SMT_LOC) {
1850 frame_status = LOC_TX ;
1851 }
1852 else {
1853 frame_status = LAN_TX ;
1854 if ((smc->os.hwm.pass_NSA &&(fc == FC_SMT_NSA)) ||
1855 (smc->os.hwm.pass_SMT &&(fc == FC_SMT_INFO)))
1856 frame_status |= LOC_TX ;
1857 }
1858
1859 if (!smc->hw.mac_ring_is_up || frag_count > queue->tx_free) {
1860 frame_status &= ~LAN_TX;
1861 if (frame_status) {
1862 DB_TX(2, "Ring is down: terminate LAN_TX");
1863 }
1864 else {
1865 DB_TX(2, "Ring is down: terminate transmission");
1866 smt_free_mbuf(smc,mb) ;
1867 return ;
1868 }
1869 }
1870 DB_TX(5, "frame_status = 0x%x", frame_status);
1871
1872 if ((frame_status & LAN_TX) && (frame_status & LOC_TX)) {
1873 mb->sm_use_count = 2 ;
1874 }
1875
1876 if (frame_status & LAN_TX) {
1877 t = queue->tx_curr_put ;
1878 frame_status |= FIRST_FRAG ;
1879 for (i = 0; i < frag_count; i++) {
1880 DB_TX(5, "init TxD = 0x%p", t);
1881 if (i == frag_count-1) {
1882 frame_status |= LAST_FRAG ;
1883 t->txd_txdscr = cpu_to_le32(TX_DESCRIPTOR |
1884 (((__u32)(mb->sm_len-1)&3) << 27)) ;
1885 }
1886 t->txd_virt = virt[i] ;
1887 phys = dma_master(smc, (void far *)virt[i],
1888 frag_len[i], DMA_RD|SMT_BUF) ;
1889 t->txd_tbadr = cpu_to_le32(phys) ;
1890 tbctrl = cpu_to_le32((((__u32)frame_status &
1891 (FIRST_FRAG|LAST_FRAG)) << 26) |
1892 BMU_OWN | BMU_CHECK | BMU_SMT_TX |frag_len[i]) ;
1893 t->txd_tbctrl = tbctrl ;
1894#ifndef AIX
1895 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
1896 outpd(queue->tx_bmu_ctl,CSR_START) ;
1897#else
1898 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
1899 outpd(ADDR(B0_XA_CSR),CSR_START) ;
1900#endif
1901 frame_status &= ~FIRST_FRAG ;
1902 queue->tx_curr_put = t = t->txd_next ;
1903 queue->tx_free-- ;
1904 queue->tx_used++ ;
1905 }
1906 smc->mib.m[MAC0].fddiMACTransmit_Ct++ ;
1907 queue_txd_mb(smc,mb) ;
1908 }
1909
1910 if (frame_status & LOC_TX) {
1911 DB_TX(5, "pass Mbuf to LLC queue");
1912 queue_llc_rx(smc,mb) ;
1913 }
1914
1915
1916
1917
1918
1919 mac_drv_clear_txd(smc) ;
1920 NDD_TRACE("THSE",t,queue->tx_free,frag_count) ;
1921}
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937static void mac_drv_clear_txd(struct s_smc *smc)
1938{
1939 struct s_smt_tx_queue *queue ;
1940 struct s_smt_fp_txd volatile *t1 ;
1941 struct s_smt_fp_txd volatile *t2 = NULL ;
1942 SMbuf *mb ;
1943 u_long tbctrl ;
1944 int i ;
1945 int frag_count ;
1946 int n ;
1947
1948 NDD_TRACE("THcB",0,0,0) ;
1949 for (i = QUEUE_S; i <= QUEUE_A0; i++) {
1950 queue = smc->hw.fp.tx[i] ;
1951 t1 = queue->tx_curr_get ;
1952 DB_TX(5, "clear_txd: QUEUE = %d (0=sync/1=async)", i);
1953
1954 for ( ; ; ) {
1955 frag_count = 0 ;
1956
1957 do {
1958 DRV_BUF_FLUSH(t1,DDI_DMA_SYNC_FORCPU) ;
1959 DB_TX(5, "check OWN/EOF bit of TxD 0x%p", t1);
1960 tbctrl = le32_to_cpu(CR_READ(t1->txd_tbctrl));
1961
1962 if (tbctrl & BMU_OWN || !queue->tx_used){
1963 DB_TX(4, "End of TxDs queue %d", i);
1964 goto free_next_queue ;
1965 }
1966 t1 = t1->txd_next ;
1967 frag_count++ ;
1968 } while (!(tbctrl & BMU_EOF)) ;
1969
1970 t1 = queue->tx_curr_get ;
1971 for (n = frag_count; n; n--) {
1972 tbctrl = le32_to_cpu(t1->txd_tbctrl) ;
1973 dma_complete(smc,
1974 (union s_fp_descr volatile *) t1,
1975 (int) (DMA_RD |
1976 ((tbctrl & BMU_SMT_TX) >> 18))) ;
1977 t2 = t1 ;
1978 t1 = t1->txd_next ;
1979 }
1980
1981 if (tbctrl & BMU_SMT_TX) {
1982 mb = get_txd_mb(smc) ;
1983 smt_free_mbuf(smc,mb) ;
1984 }
1985 else {
1986#ifndef PASS_1ST_TXD_2_TX_COMP
1987 DB_TX(4, "mac_drv_tx_comp for TxD 0x%p", t2);
1988 mac_drv_tx_complete(smc,t2) ;
1989#else
1990 DB_TX(4, "mac_drv_tx_comp for TxD 0x%x",
1991 queue->tx_curr_get);
1992 mac_drv_tx_complete(smc,queue->tx_curr_get) ;
1993#endif
1994 }
1995 queue->tx_curr_get = t1 ;
1996 queue->tx_free += frag_count ;
1997 queue->tx_used -= frag_count ;
1998 }
1999free_next_queue: ;
2000 }
2001 NDD_TRACE("THcE",0,0,0) ;
2002}
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027void mac_drv_clear_tx_queue(struct s_smc *smc)
2028{
2029 struct s_smt_fp_txd volatile *t ;
2030 struct s_smt_tx_queue *queue ;
2031 int tx_used ;
2032 int i ;
2033
2034 if (smc->hw.hw_state != STOPPED) {
2035 SK_BREAK() ;
2036 SMT_PANIC(smc,HWM_E0011,HWM_E0011_MSG) ;
2037 return ;
2038 }
2039
2040 for (i = QUEUE_S; i <= QUEUE_A0; i++) {
2041 queue = smc->hw.fp.tx[i] ;
2042 DB_TX(5, "clear_tx_queue: QUEUE = %d (0=sync/1=async)", i);
2043
2044
2045
2046
2047 t = queue->tx_curr_get ;
2048 tx_used = queue->tx_used ;
2049 while (tx_used) {
2050 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ;
2051 DB_TX(5, "switch OWN bit of TxD 0x%p", t);
2052 t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ;
2053 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
2054 t = t->txd_next ;
2055 tx_used-- ;
2056 }
2057 }
2058
2059
2060
2061
2062 mac_drv_clear_txd(smc) ;
2063
2064 for (i = QUEUE_S; i <= QUEUE_A0; i++) {
2065 queue = smc->hw.fp.tx[i] ;
2066 t = queue->tx_curr_get ;
2067
2068
2069
2070
2071
2072
2073 if (i == QUEUE_S) {
2074 outpd(ADDR(B5_XS_DA),le32_to_cpu(t->txd_ntdadr)) ;
2075 }
2076 else {
2077 outpd(ADDR(B5_XA_DA),le32_to_cpu(t->txd_ntdadr)) ;
2078 }
2079
2080 queue->tx_curr_put = queue->tx_curr_get->txd_next ;
2081 queue->tx_curr_get = queue->tx_curr_put ;
2082 }
2083}
2084
2085
2086
2087
2088
2089
2090
2091
2092#ifdef DEBUG
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119void mac_drv_debug_lev(struct s_smc *smc, int flag, int lev)
2120{
2121 switch(flag) {
2122 case (int)NULL:
2123 DB_P.d_smtf = DB_P.d_smt = DB_P.d_ecm = DB_P.d_rmt = 0 ;
2124 DB_P.d_cfm = 0 ;
2125 DB_P.d_os.hwm_rx = DB_P.d_os.hwm_tx = DB_P.d_os.hwm_gen = 0 ;
2126#ifdef SBA
2127 DB_P.d_sba = 0 ;
2128#endif
2129#ifdef ESS
2130 DB_P.d_ess = 0 ;
2131#endif
2132 break ;
2133 case DEBUG_SMTF:
2134 DB_P.d_smtf = lev ;
2135 break ;
2136 case DEBUG_SMT:
2137 DB_P.d_smt = lev ;
2138 break ;
2139 case DEBUG_ECM:
2140 DB_P.d_ecm = lev ;
2141 break ;
2142 case DEBUG_RMT:
2143 DB_P.d_rmt = lev ;
2144 break ;
2145 case DEBUG_CFM:
2146 DB_P.d_cfm = lev ;
2147 break ;
2148 case DEBUG_PCM:
2149 DB_P.d_pcm = lev ;
2150 break ;
2151 case DEBUG_SBA:
2152#ifdef SBA
2153 DB_P.d_sba = lev ;
2154#endif
2155 break ;
2156 case DEBUG_ESS:
2157#ifdef ESS
2158 DB_P.d_ess = lev ;
2159#endif
2160 break ;
2161 case DB_HWM_RX:
2162 DB_P.d_os.hwm_rx = lev ;
2163 break ;
2164 case DB_HWM_TX:
2165 DB_P.d_os.hwm_tx = lev ;
2166 break ;
2167 case DB_HWM_GEN:
2168 DB_P.d_os.hwm_gen = lev ;
2169 break ;
2170 default:
2171 break ;
2172 }
2173}
2174#endif
2175