1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35
36#include <linux/module.h>
37#include <linux/kernel.h>
38#include <linux/mm.h>
39#include <linux/net.h>
40#include <linux/skbuff.h>
41#include <linux/netdevice.h>
42#include <linux/if_arp.h>
43#include <linux/delay.h>
44#include <linux/hdlc.h>
45#include <linux/ioport.h>
46#include <linux/init.h>
47#include <linux/gfp.h>
48#include <asm/dma.h>
49#include <asm/io.h>
50#define RT_LOCK
51#define RT_UNLOCK
52#include <linux/spinlock.h>
53
54#include "z85230.h"
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72static inline int z8530_read_port(unsigned long p)
73{
74 u8 r = inb(Z8530_PORT_OF(p));
75
76 if (p & Z8530_PORT_SLEEP)
77 udelay(5);
78 return r;
79}
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96static inline void z8530_write_port(unsigned long p, u8 d)
97{
98 outb(d, Z8530_PORT_OF(p));
99 if (p & Z8530_PORT_SLEEP)
100 udelay(5);
101}
102
103static void z8530_rx_done(struct z8530_channel *c);
104static void z8530_tx_done(struct z8530_channel *c);
105
106
107
108
109
110
111
112
113
114
115
116
117static inline u8 read_zsreg(struct z8530_channel *c, u8 reg)
118{
119 if (reg)
120 z8530_write_port(c->ctrlio, reg);
121 return z8530_read_port(c->ctrlio);
122}
123
124
125
126
127
128
129
130
131
132static inline u8 read_zsdata(struct z8530_channel *c)
133{
134 u8 r;
135
136 r = z8530_read_port(c->dataio);
137 return r;
138}
139
140
141
142
143
144
145
146
147
148
149
150
151
152static inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val)
153{
154 if (reg)
155 z8530_write_port(c->ctrlio, reg);
156 z8530_write_port(c->ctrlio, val);
157}
158
159
160
161
162
163
164
165
166
167static inline void write_zsctrl(struct z8530_channel *c, u8 val)
168{
169 z8530_write_port(c->ctrlio, val);
170}
171
172
173
174
175
176
177
178
179static inline void write_zsdata(struct z8530_channel *c, u8 val)
180{
181 z8530_write_port(c->dataio, val);
182}
183
184
185
186
187u8 z8530_dead_port[] = {
188 255
189};
190EXPORT_SYMBOL(z8530_dead_port);
191
192
193
194
195
196
197
198
199u8 z8530_hdlc_kilostream[] = {
200 4, SYNC_ENAB | SDLC | X1CLK,
201 2, 0,
202 1, 0,
203 3, ENT_HM | RxCRC_ENAB | Rx8,
204 5, TxCRC_ENAB | RTS | TxENAB | Tx8 | DTR,
205 9, 0,
206 6, 0xFF,
207 7, FLAG,
208 10, ABUNDER | NRZ | CRCPS,
209 11, TCTRxCP,
210 14, DISDPLL,
211 15, DCDIE | SYNCIE | CTSIE | TxUIE | BRKIE,
212 1, EXT_INT_ENAB | TxINT_ENAB | INT_ALL_Rx,
213 9, NV | MIE | NORESET,
214 255
215};
216EXPORT_SYMBOL(z8530_hdlc_kilostream);
217
218
219
220
221u8 z8530_hdlc_kilostream_85230[] = {
222 4, SYNC_ENAB | SDLC | X1CLK,
223 2, 0,
224 1, 0,
225 3, ENT_HM | RxCRC_ENAB | Rx8,
226 5, TxCRC_ENAB | RTS | TxENAB | Tx8 | DTR,
227 9, 0,
228 6, 0xFF,
229 7, FLAG,
230 10, ABUNDER | NRZ | CRCPS,
231 11, TCTRxCP,
232 14, DISDPLL,
233 15, DCDIE | SYNCIE | CTSIE | TxUIE | BRKIE,
234 1, EXT_INT_ENAB | TxINT_ENAB | INT_ALL_Rx,
235 9, NV | MIE | NORESET,
236 23, 3,
237
238 255
239};
240EXPORT_SYMBOL(z8530_hdlc_kilostream_85230);
241
242
243
244
245
246
247
248
249
250
251
252
253
254static void z8530_flush_fifo(struct z8530_channel *c)
255{
256 read_zsreg(c, R1);
257 read_zsreg(c, R1);
258 read_zsreg(c, R1);
259 read_zsreg(c, R1);
260 if (c->dev->type == Z85230) {
261 read_zsreg(c, R1);
262 read_zsreg(c, R1);
263 read_zsreg(c, R1);
264 read_zsreg(c, R1);
265 }
266}
267
268
269
270
271
272
273
274
275
276
277
278
279static void z8530_rtsdtr(struct z8530_channel *c, int set)
280{
281 if (set)
282 c->regs[5] |= (RTS | DTR);
283 else
284 c->regs[5] &= ~(RTS | DTR);
285 write_zsreg(c, R5, c->regs[5]);
286}
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312static void z8530_rx(struct z8530_channel *c)
313{
314 u8 ch, stat;
315
316 while (1) {
317
318 if (!(read_zsreg(c, R0) & 1))
319 break;
320 ch = read_zsdata(c);
321 stat = read_zsreg(c, R1);
322
323
324
325 if (c->count < c->max) {
326 *c->dptr++ = ch;
327 c->count++;
328 }
329
330 if (stat & END_FR) {
331
332
333 if (stat & (Rx_OVR | CRC_ERR)) {
334
335 if (c->skb)
336 c->dptr = c->skb->data;
337 c->count = 0;
338 if (stat & Rx_OVR) {
339 pr_warn("%s: overrun\n", c->dev->name);
340 c->rx_overrun++;
341 }
342 if (stat & CRC_ERR) {
343 c->rx_crc_err++;
344
345 }
346
347 } else {
348
349
350
351 z8530_rx_done(c);
352 write_zsctrl(c, RES_Rx_CRC);
353 }
354 }
355 }
356
357
358 write_zsctrl(c, ERR_RES);
359 write_zsctrl(c, RES_H_IUS);
360}
361
362
363
364
365
366
367
368
369
370
371
372static void z8530_tx(struct z8530_channel *c)
373{
374 while (c->txcount) {
375
376 if (!(read_zsreg(c, R0) & 4))
377 return;
378 c->txcount--;
379
380
381 write_zsreg(c, R8, *c->tx_ptr++);
382 write_zsctrl(c, RES_H_IUS);
383
384 if (c->txcount == 0) {
385 write_zsctrl(c, RES_EOM_L);
386 write_zsreg(c, R10, c->regs[10] & ~ABUNDER);
387 }
388 }
389
390
391
392
393 write_zsctrl(c, RES_Tx_P);
394
395 z8530_tx_done(c);
396 write_zsctrl(c, RES_H_IUS);
397}
398
399
400
401
402
403
404
405
406
407
408
409static void z8530_status(struct z8530_channel *chan)
410{
411 u8 status, altered;
412
413 status = read_zsreg(chan, R0);
414 altered = chan->status ^ status;
415
416 chan->status = status;
417
418 if (status & TxEOM) {
419
420 chan->netdevice->stats.tx_fifo_errors++;
421 write_zsctrl(chan, ERR_RES);
422 z8530_tx_done(chan);
423 }
424
425 if (altered & chan->dcdcheck) {
426 if (status & chan->dcdcheck) {
427 pr_info("%s: DCD raised\n", chan->dev->name);
428 write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
429 if (chan->netdevice)
430 netif_carrier_on(chan->netdevice);
431 } else {
432 pr_info("%s: DCD lost\n", chan->dev->name);
433 write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
434 z8530_flush_fifo(chan);
435 if (chan->netdevice)
436 netif_carrier_off(chan->netdevice);
437 }
438 }
439 write_zsctrl(chan, RES_EXT_INT);
440 write_zsctrl(chan, RES_H_IUS);
441}
442
443struct z8530_irqhandler z8530_sync = {
444 .rx = z8530_rx,
445 .tx = z8530_tx,
446 .status = z8530_status,
447};
448EXPORT_SYMBOL(z8530_sync);
449
450
451
452
453
454
455
456
457
458
459
460static void z8530_dma_rx(struct z8530_channel *chan)
461{
462 if (chan->rxdma_on) {
463
464 u8 status;
465
466 read_zsreg(chan, R7);
467 read_zsreg(chan, R6);
468
469 status = read_zsreg(chan, R1);
470
471 if (status & END_FR)
472 z8530_rx_done(chan);
473
474 write_zsctrl(chan, ERR_RES);
475 write_zsctrl(chan, RES_H_IUS);
476 } else {
477
478 z8530_rx(chan);
479 }
480}
481
482
483
484
485
486
487
488
489static void z8530_dma_tx(struct z8530_channel *chan)
490{
491 if (!chan->dma_tx) {
492 pr_warn("Hey who turned the DMA off?\n");
493 z8530_tx(chan);
494 return;
495 }
496
497 pr_err("DMA tx - bogus event!\n");
498 z8530_tx(chan);
499}
500
501
502
503
504
505
506
507
508
509
510static void z8530_dma_status(struct z8530_channel *chan)
511{
512 u8 status, altered;
513
514 status = read_zsreg(chan, R0);
515 altered = chan->status ^ status;
516
517 chan->status = status;
518
519 if (chan->dma_tx) {
520 if (status & TxEOM) {
521 unsigned long flags;
522
523 flags = claim_dma_lock();
524 disable_dma(chan->txdma);
525 clear_dma_ff(chan->txdma);
526 chan->txdma_on = 0;
527 release_dma_lock(flags);
528 z8530_tx_done(chan);
529 }
530 }
531
532 if (altered & chan->dcdcheck) {
533 if (status & chan->dcdcheck) {
534 pr_info("%s: DCD raised\n", chan->dev->name);
535 write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
536 if (chan->netdevice)
537 netif_carrier_on(chan->netdevice);
538 } else {
539 pr_info("%s: DCD lost\n", chan->dev->name);
540 write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
541 z8530_flush_fifo(chan);
542 if (chan->netdevice)
543 netif_carrier_off(chan->netdevice);
544 }
545 }
546
547 write_zsctrl(chan, RES_EXT_INT);
548 write_zsctrl(chan, RES_H_IUS);
549}
550
551static struct z8530_irqhandler z8530_dma_sync = {
552 .rx = z8530_dma_rx,
553 .tx = z8530_dma_tx,
554 .status = z8530_dma_status,
555};
556
557static struct z8530_irqhandler z8530_txdma_sync = {
558 .rx = z8530_rx,
559 .tx = z8530_dma_tx,
560 .status = z8530_dma_status,
561};
562
563
564
565
566
567
568
569
570
571
572static void z8530_rx_clear(struct z8530_channel *c)
573{
574
575
576 u8 stat;
577
578 read_zsdata(c);
579 stat = read_zsreg(c, R1);
580
581 if (stat & END_FR)
582 write_zsctrl(c, RES_Rx_CRC);
583
584
585 write_zsctrl(c, ERR_RES);
586 write_zsctrl(c, RES_H_IUS);
587}
588
589
590
591
592
593
594
595
596
597
598static void z8530_tx_clear(struct z8530_channel *c)
599{
600 write_zsctrl(c, RES_Tx_P);
601 write_zsctrl(c, RES_H_IUS);
602}
603
604
605
606
607
608
609
610
611
612
613static void z8530_status_clear(struct z8530_channel *chan)
614{
615 u8 status = read_zsreg(chan, R0);
616
617 if (status & TxEOM)
618 write_zsctrl(chan, ERR_RES);
619 write_zsctrl(chan, RES_EXT_INT);
620 write_zsctrl(chan, RES_H_IUS);
621}
622
623struct z8530_irqhandler z8530_nop = {
624 .rx = z8530_rx_clear,
625 .tx = z8530_tx_clear,
626 .status = z8530_status_clear,
627};
628EXPORT_SYMBOL(z8530_nop);
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646irqreturn_t z8530_interrupt(int irq, void *dev_id)
647{
648 struct z8530_dev *dev = dev_id;
649 u8 intr;
650 static volatile int locker=0;
651 int work = 0;
652 struct z8530_irqhandler *irqs;
653
654 if (locker) {
655 pr_err("IRQ re-enter\n");
656 return IRQ_NONE;
657 }
658 locker = 1;
659
660 spin_lock(&dev->lock);
661
662 while (++work < 5000) {
663 intr = read_zsreg(&dev->chanA, R3);
664 if (!(intr &
665 (CHARxIP | CHATxIP | CHAEXT | CHBRxIP | CHBTxIP | CHBEXT)))
666 break;
667
668
669
670
671
672
673
674
675
676 irqs = dev->chanA.irqs;
677
678 if (intr & (CHARxIP | CHATxIP | CHAEXT)) {
679 if (intr & CHARxIP)
680 irqs->rx(&dev->chanA);
681 if (intr & CHATxIP)
682 irqs->tx(&dev->chanA);
683 if (intr & CHAEXT)
684 irqs->status(&dev->chanA);
685 }
686
687 irqs = dev->chanB.irqs;
688
689 if (intr & (CHBRxIP | CHBTxIP | CHBEXT)) {
690 if (intr & CHBRxIP)
691 irqs->rx(&dev->chanB);
692 if (intr & CHBTxIP)
693 irqs->tx(&dev->chanB);
694 if (intr & CHBEXT)
695 irqs->status(&dev->chanB);
696 }
697 }
698 spin_unlock(&dev->lock);
699 if (work == 5000)
700 pr_err("%s: interrupt jammed - abort(0x%X)!\n",
701 dev->name, intr);
702
703 locker = 0;
704 return IRQ_HANDLED;
705}
706EXPORT_SYMBOL(z8530_interrupt);
707
708static const u8 reg_init[16] = {
709 0, 0, 0, 0,
710 0, 0, 0, 0,
711 0, 0, 0, 0,
712 0x55, 0, 0, 0
713};
714
715
716
717
718
719
720
721
722
723int z8530_sync_open(struct net_device *dev, struct z8530_channel *c)
724{
725 unsigned long flags;
726
727 spin_lock_irqsave(c->lock, flags);
728
729 c->sync = 1;
730 c->mtu = dev->mtu + 64;
731 c->count = 0;
732 c->skb = NULL;
733 c->skb2 = NULL;
734 c->irqs = &z8530_sync;
735
736
737 z8530_rx_done(c);
738 z8530_rx_done(c);
739 z8530_rtsdtr(c, 1);
740 c->dma_tx = 0;
741 c->regs[R1] |= TxINT_ENAB;
742 write_zsreg(c, R1, c->regs[R1]);
743 write_zsreg(c, R3, c->regs[R3] | RxENABLE);
744
745 spin_unlock_irqrestore(c->lock, flags);
746 return 0;
747}
748EXPORT_SYMBOL(z8530_sync_open);
749
750
751
752
753
754
755
756
757
758int z8530_sync_close(struct net_device *dev, struct z8530_channel *c)
759{
760 u8 chk;
761 unsigned long flags;
762
763 spin_lock_irqsave(c->lock, flags);
764 c->irqs = &z8530_nop;
765 c->max = 0;
766 c->sync = 0;
767
768 chk = read_zsreg(c, R0);
769 write_zsreg(c, R3, c->regs[R3]);
770 z8530_rtsdtr(c, 0);
771
772 spin_unlock_irqrestore(c->lock, flags);
773 return 0;
774}
775EXPORT_SYMBOL(z8530_sync_close);
776
777
778
779
780
781
782
783
784
785
786int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
787{
788 unsigned long cflags, dflags;
789
790 c->sync = 1;
791 c->mtu = dev->mtu + 64;
792 c->count = 0;
793 c->skb = NULL;
794 c->skb2 = NULL;
795
796
797
798 c->rxdma_on = 0;
799 c->txdma_on = 0;
800
801
802
803
804
805
806 if (c->mtu > PAGE_SIZE / 2)
807 return -EMSGSIZE;
808
809 c->rx_buf[0] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
810 if (!c->rx_buf[0])
811 return -ENOBUFS;
812 c->rx_buf[1] = c->rx_buf[0] + PAGE_SIZE / 2;
813
814 c->tx_dma_buf[0] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
815 if (!c->tx_dma_buf[0]) {
816 free_page((unsigned long)c->rx_buf[0]);
817 c->rx_buf[0] = NULL;
818 return -ENOBUFS;
819 }
820 c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE / 2;
821
822 c->tx_dma_used = 0;
823 c->dma_tx = 1;
824 c->dma_num = 0;
825 c->dma_ready = 1;
826
827
828
829
830 spin_lock_irqsave(c->lock, cflags);
831
832
833
834
835 c->regs[R14] |= DTRREQ;
836 write_zsreg(c, R14, c->regs[R14]);
837
838 c->regs[R1] &= ~TxINT_ENAB;
839 write_zsreg(c, R1, c->regs[R1]);
840
841
842
843
844 c->regs[R1] |= WT_FN_RDYFN;
845 c->regs[R1] |= WT_RDY_RT;
846 c->regs[R1] |= INT_ERR_Rx;
847 c->regs[R1] &= ~TxINT_ENAB;
848 write_zsreg(c, R1, c->regs[R1]);
849 c->regs[R1] |= WT_RDY_ENAB;
850 write_zsreg(c, R1, c->regs[R1]);
851
852
853
854
855
856
857
858 dflags = claim_dma_lock();
859
860 disable_dma(c->rxdma);
861 clear_dma_ff(c->rxdma);
862 set_dma_mode(c->rxdma, DMA_MODE_READ | 0x10);
863 set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0]));
864 set_dma_count(c->rxdma, c->mtu);
865 enable_dma(c->rxdma);
866
867 disable_dma(c->txdma);
868 clear_dma_ff(c->txdma);
869 set_dma_mode(c->txdma, DMA_MODE_WRITE);
870 disable_dma(c->txdma);
871
872 release_dma_lock(dflags);
873
874
875
876
877 c->rxdma_on = 1;
878 c->txdma_on = 1;
879 c->tx_dma_used = 1;
880
881 c->irqs = &z8530_dma_sync;
882 z8530_rtsdtr(c, 1);
883 write_zsreg(c, R3, c->regs[R3] | RxENABLE);
884
885 spin_unlock_irqrestore(c->lock, cflags);
886
887 return 0;
888}
889EXPORT_SYMBOL(z8530_sync_dma_open);
890
891
892
893
894
895
896
897
898
899int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c)
900{
901 u8 chk;
902 unsigned long flags;
903
904 c->irqs = &z8530_nop;
905 c->max = 0;
906 c->sync = 0;
907
908
909
910
911 flags = claim_dma_lock();
912 disable_dma(c->rxdma);
913 clear_dma_ff(c->rxdma);
914
915 c->rxdma_on = 0;
916
917 disable_dma(c->txdma);
918 clear_dma_ff(c->txdma);
919 release_dma_lock(flags);
920
921 c->txdma_on = 0;
922 c->tx_dma_used = 0;
923
924 spin_lock_irqsave(c->lock, flags);
925
926
927
928
929 c->regs[R1] &= ~WT_RDY_ENAB;
930 write_zsreg(c, R1, c->regs[R1]);
931 c->regs[R1] &= ~(WT_RDY_RT | WT_FN_RDYFN | INT_ERR_Rx);
932 c->regs[R1] |= INT_ALL_Rx;
933 write_zsreg(c, R1, c->regs[R1]);
934 c->regs[R14] &= ~DTRREQ;
935 write_zsreg(c, R14, c->regs[R14]);
936
937 if (c->rx_buf[0]) {
938 free_page((unsigned long)c->rx_buf[0]);
939 c->rx_buf[0] = NULL;
940 }
941 if (c->tx_dma_buf[0]) {
942 free_page((unsigned long)c->tx_dma_buf[0]);
943 c->tx_dma_buf[0] = NULL;
944 }
945 chk = read_zsreg(c, R0);
946 write_zsreg(c, R3, c->regs[R3]);
947 z8530_rtsdtr(c, 0);
948
949 spin_unlock_irqrestore(c->lock, flags);
950
951 return 0;
952}
953EXPORT_SYMBOL(z8530_sync_dma_close);
954
955
956
957
958
959
960
961
962
963
964
965int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)
966{
967 unsigned long cflags, dflags;
968
969 printk("Opening sync interface for TX-DMA\n");
970 c->sync = 1;
971 c->mtu = dev->mtu + 64;
972 c->count = 0;
973 c->skb = NULL;
974 c->skb2 = NULL;
975
976
977
978
979
980
981 if (c->mtu > PAGE_SIZE / 2)
982 return -EMSGSIZE;
983
984 c->tx_dma_buf[0] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
985 if (!c->tx_dma_buf[0])
986 return -ENOBUFS;
987
988 c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE / 2;
989
990 spin_lock_irqsave(c->lock, cflags);
991
992
993
994
995 z8530_rx_done(c);
996 z8530_rx_done(c);
997
998
999
1000
1001 c->rxdma_on = 0;
1002 c->txdma_on = 0;
1003
1004 c->tx_dma_used = 0;
1005 c->dma_num = 0;
1006 c->dma_ready = 1;
1007 c->dma_tx = 1;
1008
1009
1010
1011
1012
1013
1014 c->regs[R14] |= DTRREQ;
1015 write_zsreg(c, R14, c->regs[R14]);
1016
1017 c->regs[R1] &= ~TxINT_ENAB;
1018 write_zsreg(c, R1, c->regs[R1]);
1019
1020
1021
1022
1023 dflags = claim_dma_lock();
1024
1025 disable_dma(c->txdma);
1026 clear_dma_ff(c->txdma);
1027 set_dma_mode(c->txdma, DMA_MODE_WRITE);
1028 disable_dma(c->txdma);
1029
1030 release_dma_lock(dflags);
1031
1032
1033
1034
1035 c->rxdma_on = 0;
1036 c->txdma_on = 1;
1037 c->tx_dma_used = 1;
1038
1039 c->irqs = &z8530_txdma_sync;
1040 z8530_rtsdtr(c, 1);
1041 write_zsreg(c, R3, c->regs[R3] | RxENABLE);
1042 spin_unlock_irqrestore(c->lock, cflags);
1043
1044 return 0;
1045}
1046EXPORT_SYMBOL(z8530_sync_txdma_open);
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c)
1058{
1059 unsigned long dflags, cflags;
1060 u8 chk;
1061
1062 spin_lock_irqsave(c->lock, cflags);
1063
1064 c->irqs = &z8530_nop;
1065 c->max = 0;
1066 c->sync = 0;
1067
1068
1069
1070
1071 dflags = claim_dma_lock();
1072
1073 disable_dma(c->txdma);
1074 clear_dma_ff(c->txdma);
1075 c->txdma_on = 0;
1076 c->tx_dma_used = 0;
1077
1078 release_dma_lock(dflags);
1079
1080
1081
1082
1083 c->regs[R1] &= ~WT_RDY_ENAB;
1084 write_zsreg(c, R1, c->regs[R1]);
1085 c->regs[R1] &= ~(WT_RDY_RT | WT_FN_RDYFN | INT_ERR_Rx);
1086 c->regs[R1] |= INT_ALL_Rx;
1087 write_zsreg(c, R1, c->regs[R1]);
1088 c->regs[R14] &= ~DTRREQ;
1089 write_zsreg(c, R14, c->regs[R14]);
1090
1091 if (c->tx_dma_buf[0]) {
1092 free_page((unsigned long)c->tx_dma_buf[0]);
1093 c->tx_dma_buf[0] = NULL;
1094 }
1095 chk = read_zsreg(c, R0);
1096 write_zsreg(c, R3, c->regs[R3]);
1097 z8530_rtsdtr(c, 0);
1098
1099 spin_unlock_irqrestore(c->lock, cflags);
1100 return 0;
1101}
1102EXPORT_SYMBOL(z8530_sync_txdma_close);
1103
1104
1105
1106
1107static const char * const z8530_type_name[] = {
1108 "Z8530",
1109 "Z85C30",
1110 "Z85230"
1111};
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124void z8530_describe(struct z8530_dev *dev, char *mapping, unsigned long io)
1125{
1126 pr_info("%s: %s found at %s 0x%lX, IRQ %d\n",
1127 dev->name,
1128 z8530_type_name[dev->type],
1129 mapping,
1130 Z8530_PORT_OF(io),
1131 dev->irq);
1132}
1133EXPORT_SYMBOL(z8530_describe);
1134
1135
1136
1137static inline int do_z8530_init(struct z8530_dev *dev)
1138{
1139
1140
1141
1142 dev->chanA.irqs = &z8530_nop;
1143 dev->chanB.irqs = &z8530_nop;
1144 dev->chanA.dcdcheck = DCD;
1145 dev->chanB.dcdcheck = DCD;
1146
1147
1148 write_zsreg(&dev->chanA, R9, 0xC0);
1149 udelay(200);
1150
1151 write_zsreg(&dev->chanA, R12, 0xAA);
1152 if (read_zsreg(&dev->chanA, R12) != 0xAA)
1153 return -ENODEV;
1154 write_zsreg(&dev->chanA, R12, 0x55);
1155 if (read_zsreg(&dev->chanA, R12) != 0x55)
1156 return -ENODEV;
1157
1158 dev->type = Z8530;
1159
1160
1161
1162
1163 write_zsreg(&dev->chanA, R15, 0x01);
1164
1165
1166
1167
1168
1169 if (read_zsreg(&dev->chanA, R15) == 0x01) {
1170
1171
1172 write_zsreg(&dev->chanA, R8, 0);
1173 if (read_zsreg(&dev->chanA, R0) & Tx_BUF_EMP)
1174 dev->type = Z85230;
1175 else
1176 dev->type = Z85C30;
1177 }
1178
1179
1180
1181
1182
1183
1184 write_zsreg(&dev->chanA, R15, 0);
1185
1186
1187
1188
1189 memcpy(dev->chanA.regs, reg_init, 16);
1190 memcpy(dev->chanB.regs, reg_init, 16);
1191
1192 return 0;
1193}
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212int z8530_init(struct z8530_dev *dev)
1213{
1214 unsigned long flags;
1215 int ret;
1216
1217
1218 spin_lock_init(&dev->lock);
1219 dev->chanA.lock = &dev->lock;
1220 dev->chanB.lock = &dev->lock;
1221
1222 spin_lock_irqsave(&dev->lock, flags);
1223 ret = do_z8530_init(dev);
1224 spin_unlock_irqrestore(&dev->lock, flags);
1225
1226 return ret;
1227}
1228EXPORT_SYMBOL(z8530_init);
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240int z8530_shutdown(struct z8530_dev *dev)
1241{
1242 unsigned long flags;
1243
1244
1245 spin_lock_irqsave(&dev->lock, flags);
1246 dev->chanA.irqs = &z8530_nop;
1247 dev->chanB.irqs = &z8530_nop;
1248 write_zsreg(&dev->chanA, R9, 0xC0);
1249
1250 udelay(100);
1251 spin_unlock_irqrestore(&dev->lock, flags);
1252 return 0;
1253}
1254EXPORT_SYMBOL(z8530_shutdown);
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267int z8530_channel_load(struct z8530_channel *c, u8 *rtable)
1268{
1269 unsigned long flags;
1270
1271 spin_lock_irqsave(c->lock, flags);
1272
1273 while (*rtable != 255) {
1274 int reg = *rtable++;
1275
1276 if (reg > 0x0F)
1277 write_zsreg(c, R15, c->regs[15] | 1);
1278 write_zsreg(c, reg & 0x0F, *rtable);
1279 if (reg > 0x0F)
1280 write_zsreg(c, R15, c->regs[15] & ~1);
1281 c->regs[reg] = *rtable++;
1282 }
1283 c->rx_function = z8530_null_rx;
1284 c->skb = NULL;
1285 c->tx_skb = NULL;
1286 c->tx_next_skb = NULL;
1287 c->mtu = 1500;
1288 c->max = 0;
1289 c->count = 0;
1290 c->status = read_zsreg(c, R0);
1291 c->sync = 1;
1292 write_zsreg(c, R3, c->regs[R3] | RxENABLE);
1293
1294 spin_unlock_irqrestore(c->lock, flags);
1295 return 0;
1296}
1297EXPORT_SYMBOL(z8530_channel_load);
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313static void z8530_tx_begin(struct z8530_channel *c)
1314{
1315 unsigned long flags;
1316
1317 if (c->tx_skb)
1318 return;
1319
1320 c->tx_skb = c->tx_next_skb;
1321 c->tx_next_skb = NULL;
1322 c->tx_ptr = c->tx_next_ptr;
1323
1324 if (!c->tx_skb) {
1325
1326 if (c->dma_tx) {
1327 flags = claim_dma_lock();
1328 disable_dma(c->txdma);
1329
1330
1331 if (get_dma_residue(c->txdma)) {
1332 c->netdevice->stats.tx_dropped++;
1333 c->netdevice->stats.tx_fifo_errors++;
1334 }
1335 release_dma_lock(flags);
1336 }
1337 c->txcount = 0;
1338 } else {
1339 c->txcount = c->tx_skb->len;
1340
1341 if (c->dma_tx) {
1342
1343
1344
1345
1346
1347
1348 flags = claim_dma_lock();
1349 disable_dma(c->txdma);
1350
1351
1352
1353
1354 if (c->dev->type != Z85230) {
1355 write_zsctrl(c, RES_Tx_CRC);
1356 write_zsctrl(c, RES_EOM_L);
1357 }
1358 write_zsreg(c, R10, c->regs[10] & ~ABUNDER);
1359 clear_dma_ff(c->txdma);
1360 set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr));
1361 set_dma_count(c->txdma, c->txcount);
1362 enable_dma(c->txdma);
1363 release_dma_lock(flags);
1364 write_zsctrl(c, RES_EOM_L);
1365 write_zsreg(c, R5, c->regs[R5] | TxENAB);
1366 } else {
1367
1368 write_zsreg(c, R10, c->regs[10]);
1369 write_zsctrl(c, RES_Tx_CRC);
1370
1371 while (c->txcount && (read_zsreg(c, R0) & Tx_BUF_EMP)) {
1372 write_zsreg(c, R8, *c->tx_ptr++);
1373 c->txcount--;
1374 }
1375 }
1376 }
1377
1378
1379 netif_wake_queue(c->netdevice);
1380}
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393static void z8530_tx_done(struct z8530_channel *c)
1394{
1395 struct sk_buff *skb;
1396
1397
1398 if (!c->tx_skb)
1399 return;
1400
1401 skb = c->tx_skb;
1402 c->tx_skb = NULL;
1403 z8530_tx_begin(c);
1404 c->netdevice->stats.tx_packets++;
1405 c->netdevice->stats.tx_bytes += skb->len;
1406 dev_consume_skb_irq(skb);
1407}
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)
1418{
1419 dev_kfree_skb_any(skb);
1420}
1421EXPORT_SYMBOL(z8530_null_rx);
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435static void z8530_rx_done(struct z8530_channel *c)
1436{
1437 struct sk_buff *skb;
1438 int ct;
1439
1440
1441
1442 if (c->rxdma_on) {
1443
1444
1445
1446 int ready = c->dma_ready;
1447 unsigned char *rxb = c->rx_buf[c->dma_num];
1448 unsigned long flags;
1449
1450
1451
1452 flags = claim_dma_lock();
1453
1454 disable_dma(c->rxdma);
1455 clear_dma_ff(c->rxdma);
1456 c->rxdma_on = 0;
1457 ct = c->mtu - get_dma_residue(c->rxdma);
1458 if (ct < 0)
1459 ct = 2;
1460 c->dma_ready = 0;
1461
1462
1463
1464
1465
1466 if (ready) {
1467 c->dma_num ^= 1;
1468 set_dma_mode(c->rxdma, DMA_MODE_READ | 0x10);
1469 set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num]));
1470 set_dma_count(c->rxdma, c->mtu);
1471 c->rxdma_on = 1;
1472 enable_dma(c->rxdma);
1473
1474
1475
1476 write_zsreg(c, R0, RES_Rx_CRC);
1477 } else {
1478
1479
1480
1481 netdev_warn(c->netdevice, "DMA flip overrun!\n");
1482 }
1483
1484 release_dma_lock(flags);
1485
1486
1487
1488
1489
1490
1491
1492
1493 skb = dev_alloc_skb(ct);
1494 if (!skb) {
1495 c->netdevice->stats.rx_dropped++;
1496 netdev_warn(c->netdevice, "Memory squeeze\n");
1497 } else {
1498 skb_put(skb, ct);
1499 skb_copy_to_linear_data(skb, rxb, ct);
1500 c->netdevice->stats.rx_packets++;
1501 c->netdevice->stats.rx_bytes += ct;
1502 }
1503 c->dma_ready = 1;
1504 } else {
1505 RT_LOCK;
1506 skb = c->skb;
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519 ct = c->count;
1520
1521 c->skb = c->skb2;
1522 c->count = 0;
1523 c->max = c->mtu;
1524 if (c->skb) {
1525 c->dptr = c->skb->data;
1526 c->max = c->mtu;
1527 } else {
1528 c->count = 0;
1529 c->max = 0;
1530 }
1531 RT_UNLOCK;
1532
1533 c->skb2 = dev_alloc_skb(c->mtu);
1534 if (c->skb2)
1535 skb_put(c->skb2, c->mtu);
1536
1537 c->netdevice->stats.rx_packets++;
1538 c->netdevice->stats.rx_bytes += ct;
1539 }
1540
1541
1542 if (skb) {
1543 skb_trim(skb, ct);
1544 c->rx_function(c, skb);
1545 } else {
1546 c->netdevice->stats.rx_dropped++;
1547 netdev_err(c->netdevice, "Lost a frame\n");
1548 }
1549}
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559static inline int spans_boundary(struct sk_buff *skb)
1560{
1561 unsigned long a = (unsigned long)skb->data;
1562
1563 a ^= (a + skb->len);
1564 if (a & 0x00010000)
1565 return 1;
1566 return 0;
1567}
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
1583{
1584 unsigned long flags;
1585
1586 netif_stop_queue(c->netdevice);
1587 if (c->tx_next_skb)
1588 return NETDEV_TX_BUSY;
1589
1590
1591
1592
1593
1594
1595 if (c->dma_tx &&
1596 ((unsigned long)(virt_to_bus(skb->data + skb->len)) >=
1597 16 * 1024 * 1024 || spans_boundary(skb))) {
1598
1599
1600
1601
1602
1603
1604 c->tx_next_ptr = c->tx_dma_buf[c->tx_dma_used];
1605 c->tx_dma_used ^= 1;
1606 skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len);
1607 } else {
1608 c->tx_next_ptr = skb->data;
1609 }
1610 RT_LOCK;
1611 c->tx_next_skb = skb;
1612 RT_UNLOCK;
1613
1614 spin_lock_irqsave(c->lock, flags);
1615 z8530_tx_begin(c);
1616 spin_unlock_irqrestore(c->lock, flags);
1617
1618 return NETDEV_TX_OK;
1619}
1620EXPORT_SYMBOL(z8530_queue_xmit);
1621
1622
1623
1624static const char banner[] __initconst =
1625 KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n";
1626
1627static int __init z85230_init_driver(void)
1628{
1629 printk(banner);
1630 return 0;
1631}
1632module_init(z85230_init_driver);
1633
1634static void __exit z85230_cleanup_driver(void)
1635{
1636}
1637module_exit(z85230_cleanup_driver);
1638
1639MODULE_AUTHOR("Red Hat Inc.");
1640MODULE_DESCRIPTION("Z85x30 synchronous driver core");
1641MODULE_LICENSE("GPL");
1642