1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83#include <linux/module.h>
84#include <linux/sched.h>
85#include <linux/types.h>
86#include <linux/errno.h>
87#include <linux/list.h>
88#include <linux/ioport.h>
89#include <linux/pci.h>
90#include <linux/kernel.h>
91#include <linux/mm.h>
92#include <linux/slab.h>
93
94#include <asm/system.h>
95#include <asm/cache.h>
96#include <asm/byteorder.h>
97#include <asm/uaccess.h>
98#include <asm/io.h>
99#include <asm/irq.h>
100
101#include <linux/init.h>
102#include <linux/string.h>
103
104#include <linux/if_arp.h>
105#include <linux/netdevice.h>
106#include <linux/skbuff.h>
107#include <linux/delay.h>
108#include <linux/hdlc.h>
109#include <linux/mutex.h>
110
111
112static const char version[] = "$Id: dscc4.c,v 1.173 2003/09/20 23:55:34 romieu Exp $ for Linux\n";
113static int debug;
114static int quartz;
115
116#ifdef CONFIG_DSCC4_PCI_RST
117static DEFINE_MUTEX(dscc4_mutex);
118static u32 dscc4_pci_config_store[16];
119#endif
120
121#define DRV_NAME "dscc4"
122
123#undef DSCC4_POLLING
124
125
126
127MODULE_AUTHOR("Maintainer: Francois Romieu <romieu@cogenit.fr>");
128MODULE_DESCRIPTION("Siemens PEB20534 PCI Controller");
129MODULE_LICENSE("GPL");
130module_param(debug, int, 0);
131MODULE_PARM_DESC(debug,"Enable/disable extra messages");
132module_param(quartz, int, 0);
133MODULE_PARM_DESC(quartz,"If present, on-board quartz frequency (Hz)");
134
135
136
137struct thingie {
138 int define;
139 u32 bits;
140};
141
142struct TxFD {
143 __le32 state;
144 __le32 next;
145 __le32 data;
146 __le32 complete;
147 u32 jiffies;
148
149
150};
151
152struct RxFD {
153 __le32 state1;
154 __le32 next;
155 __le32 data;
156 __le32 state2;
157 __le32 end;
158};
159
160#define DUMMY_SKB_SIZE 64
161#define TX_LOW 8
162#define TX_RING_SIZE 32
163#define RX_RING_SIZE 32
164#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct TxFD)
165#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct RxFD)
166#define IRQ_RING_SIZE 64
167#define TX_TIMEOUT (HZ/10)
168#define DSCC4_HZ_MAX 33000000
169#define BRR_DIVIDER_MAX 64*0x00004000
170#define dev_per_card 4
171#define SCC_REGISTERS_MAX 23
172
173#define SOURCE_ID(flags) (((flags) >> 28) & 0x03)
174#define TO_SIZE(state) (((state) >> 16) & 0x1fff)
175
176
177
178
179
180
181#define TO_STATE_TX(len) cpu_to_le32(((len) & TxSizeMax) << 16)
182#define TO_STATE_RX(len) cpu_to_le32((RX_MAX(len) % RxSizeMax) << 16)
183#define RX_MAX(len) ((((len) >> 5) + 1) << 5)
184#define SCC_REG_START(dpriv) (SCC_START+(dpriv->dev_id)*SCC_OFFSET)
185
186struct dscc4_pci_priv {
187 __le32 *iqcfg;
188 int cfg_cur;
189 spinlock_t lock;
190 struct pci_dev *pdev;
191
192 struct dscc4_dev_priv *root;
193 dma_addr_t iqcfg_dma;
194 u32 xtal_hz;
195};
196
197struct dscc4_dev_priv {
198 struct sk_buff *rx_skbuff[RX_RING_SIZE];
199 struct sk_buff *tx_skbuff[TX_RING_SIZE];
200
201 struct RxFD *rx_fd;
202 struct TxFD *tx_fd;
203 __le32 *iqrx;
204 __le32 *iqtx;
205
206
207 volatile u32 tx_current;
208 u32 rx_current;
209 u32 iqtx_current;
210 u32 iqrx_current;
211
212 volatile u32 tx_dirty;
213 volatile u32 ltda;
214 u32 rx_dirty;
215 u32 lrda;
216
217 dma_addr_t tx_fd_dma;
218 dma_addr_t rx_fd_dma;
219 dma_addr_t iqtx_dma;
220 dma_addr_t iqrx_dma;
221
222 u32 scc_regs[SCC_REGISTERS_MAX];
223
224 struct timer_list timer;
225
226 struct dscc4_pci_priv *pci_priv;
227 spinlock_t lock;
228
229 int dev_id;
230 volatile u32 flags;
231 u32 timer_help;
232
233 unsigned short encoding;
234 unsigned short parity;
235 struct net_device *dev;
236 sync_serial_settings settings;
237 void __iomem *base_addr;
238 u32 __pad __attribute__ ((aligned (4)));
239};
240
241
242#define GCMDR 0x00
243#define GSTAR 0x04
244#define GMODE 0x08
245#define IQLENR0 0x0C
246#define IQLENR1 0x10
247#define IQRX0 0x14
248#define IQTX0 0x24
249#define IQCFG 0x3c
250#define FIFOCR1 0x44
251#define FIFOCR2 0x48
252#define FIFOCR3 0x4c
253#define FIFOCR4 0x34
254#define CH0CFG 0x50
255#define CH0BRDA 0x54
256#define CH0BTDA 0x58
257#define CH0FRDA 0x98
258#define CH0FTDA 0xb0
259#define CH0LRDA 0xc8
260#define CH0LTDA 0xe0
261
262
263#define SCC_START 0x0100
264#define SCC_OFFSET 0x80
265#define CMDR 0x00
266#define STAR 0x04
267#define CCR0 0x08
268#define CCR1 0x0c
269#define CCR2 0x10
270#define BRR 0x2C
271#define RLCR 0x40
272#define IMR 0x54
273#define ISR 0x58
274
275#define GPDIR 0x0400
276#define GPDATA 0x0404
277#define GPIM 0x0408
278
279
280#define EncodingMask 0x00700000
281#define CrcMask 0x00000003
282
283#define IntRxScc0 0x10000000
284#define IntTxScc0 0x01000000
285
286#define TxPollCmd 0x00000400
287#define RxActivate 0x08000000
288#define MTFi 0x04000000
289#define Rdr 0x00400000
290#define Rdt 0x00200000
291#define Idr 0x00100000
292#define Idt 0x00080000
293#define TxSccRes 0x01000000
294#define RxSccRes 0x00010000
295#define TxSizeMax 0x1fff
296#define RxSizeMax 0x1ffc
297
298#define Ccr0ClockMask 0x0000003f
299#define Ccr1LoopMask 0x00000200
300#define IsrMask 0x000fffff
301#define BrrExpMask 0x00000f00
302#define BrrMultMask 0x0000003f
303#define EncodingMask 0x00700000
304#define Hold cpu_to_le32(0x40000000)
305#define SccBusy 0x10000000
306#define PowerUp 0x80000000
307#define Vis 0x00001000
308#define FrameOk (FrameVfr | FrameCrc)
309#define FrameVfr 0x80
310#define FrameRdo 0x40
311#define FrameCrc 0x20
312#define FrameRab 0x10
313#define FrameAborted cpu_to_le32(0x00000200)
314#define FrameEnd cpu_to_le32(0x80000000)
315#define DataComplete cpu_to_le32(0x40000000)
316#define LengthCheck 0x00008000
317#define SccEvt 0x02000000
318#define NoAck 0x00000200
319#define Action 0x00000001
320#define HiDesc cpu_to_le32(0x20000000)
321
322
323#define RxEvt 0xf0000000
324#define TxEvt 0x0f000000
325#define Alls 0x00040000
326#define Xdu 0x00010000
327#define Cts 0x00004000
328#define Xmr 0x00002000
329#define Xpr 0x00001000
330#define Rdo 0x00000080
331#define Rfs 0x00000040
332#define Cd 0x00000004
333#define Rfo 0x00000002
334#define Flex 0x00000001
335
336
337#define Cfg 0x00200000
338#define Hi 0x00040000
339#define Fi 0x00020000
340#define Err 0x00010000
341#define Arf 0x00000002
342#define ArAck 0x00000001
343
344
345#define Ready 0x00000000
346#define NeedIDR 0x00000001
347#define NeedIDT 0x00000002
348#define RdoSet 0x00000004
349#define FakeReset 0x00000008
350
351
352#ifdef DSCC4_POLLING
353#define EventsMask 0xfffeef7f
354#else
355#define EventsMask 0xfffa8f7a
356#endif
357
358
359static void dscc4_rx_irq(struct dscc4_pci_priv *, struct dscc4_dev_priv *);
360static void dscc4_tx_irq(struct dscc4_pci_priv *, struct dscc4_dev_priv *);
361static int dscc4_found1(struct pci_dev *, void __iomem *ioaddr);
362static int dscc4_init_one(struct pci_dev *, const struct pci_device_id *ent);
363static int dscc4_open(struct net_device *);
364static netdev_tx_t dscc4_start_xmit(struct sk_buff *,
365 struct net_device *);
366static int dscc4_close(struct net_device *);
367static int dscc4_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
368static int dscc4_init_ring(struct net_device *);
369static void dscc4_release_ring(struct dscc4_dev_priv *);
370static void dscc4_timer(unsigned long);
371static void dscc4_tx_timeout(struct net_device *);
372static irqreturn_t dscc4_irq(int irq, void *dev_id);
373static int dscc4_hdlc_attach(struct net_device *, unsigned short, unsigned short);
374static int dscc4_set_iface(struct dscc4_dev_priv *, struct net_device *);
375#ifdef DSCC4_POLLING
376static int dscc4_tx_poll(struct dscc4_dev_priv *, struct net_device *);
377#endif
378
379static inline struct dscc4_dev_priv *dscc4_priv(struct net_device *dev)
380{
381 return dev_to_hdlc(dev)->priv;
382}
383
384static inline struct net_device *dscc4_to_dev(struct dscc4_dev_priv *p)
385{
386 return p->dev;
387}
388
389static void scc_patchl(u32 mask, u32 value, struct dscc4_dev_priv *dpriv,
390 struct net_device *dev, int offset)
391{
392 u32 state;
393
394
395 state = dpriv->scc_regs[offset >> 2];
396 state &= ~mask;
397 state |= value;
398 dpriv->scc_regs[offset >> 2] = state;
399 writel(state, dpriv->base_addr + SCC_REG_START(dpriv) + offset);
400}
401
402static void scc_writel(u32 bits, struct dscc4_dev_priv *dpriv,
403 struct net_device *dev, int offset)
404{
405
406
407
408
409 dpriv->scc_regs[offset >> 2] = bits;
410 writel(bits, dpriv->base_addr + SCC_REG_START(dpriv) + offset);
411}
412
413static inline u32 scc_readl(struct dscc4_dev_priv *dpriv, int offset)
414{
415 return dpriv->scc_regs[offset >> 2];
416}
417
418static u32 scc_readl_star(struct dscc4_dev_priv *dpriv, struct net_device *dev)
419{
420
421 readl(dpriv->base_addr + SCC_REG_START(dpriv) + STAR);
422 return readl(dpriv->base_addr + SCC_REG_START(dpriv) + STAR);
423}
424
425static inline void dscc4_do_tx(struct dscc4_dev_priv *dpriv,
426 struct net_device *dev)
427{
428 dpriv->ltda = dpriv->tx_fd_dma +
429 ((dpriv->tx_current-1)%TX_RING_SIZE)*sizeof(struct TxFD);
430 writel(dpriv->ltda, dpriv->base_addr + CH0LTDA + dpriv->dev_id*4);
431
432 readl(dpriv->base_addr + CH0LTDA + dpriv->dev_id*4);
433}
434
435static inline void dscc4_rx_update(struct dscc4_dev_priv *dpriv,
436 struct net_device *dev)
437{
438 dpriv->lrda = dpriv->rx_fd_dma +
439 ((dpriv->rx_dirty - 1)%RX_RING_SIZE)*sizeof(struct RxFD);
440 writel(dpriv->lrda, dpriv->base_addr + CH0LRDA + dpriv->dev_id*4);
441}
442
443static inline unsigned int dscc4_tx_done(struct dscc4_dev_priv *dpriv)
444{
445 return dpriv->tx_current == dpriv->tx_dirty;
446}
447
448static inline unsigned int dscc4_tx_quiescent(struct dscc4_dev_priv *dpriv,
449 struct net_device *dev)
450{
451 return readl(dpriv->base_addr + CH0FTDA + dpriv->dev_id*4) == dpriv->ltda;
452}
453
454static int state_check(u32 state, struct dscc4_dev_priv *dpriv,
455 struct net_device *dev, const char *msg)
456{
457 int ret = 0;
458
459 if (debug > 1) {
460 if (SOURCE_ID(state) != dpriv->dev_id) {
461 printk(KERN_DEBUG "%s (%s): Source Id=%d, state=%08x\n",
462 dev->name, msg, SOURCE_ID(state), state );
463 ret = -1;
464 }
465 if (state & 0x0df80c00) {
466 printk(KERN_DEBUG "%s (%s): state=%08x (UFO alert)\n",
467 dev->name, msg, state);
468 ret = -1;
469 }
470 }
471 return ret;
472}
473
474static void dscc4_tx_print(struct net_device *dev,
475 struct dscc4_dev_priv *dpriv,
476 char *msg)
477{
478 printk(KERN_DEBUG "%s: tx_current=%02d tx_dirty=%02d (%s)\n",
479 dev->name, dpriv->tx_current, dpriv->tx_dirty, msg);
480}
481
482static void dscc4_release_ring(struct dscc4_dev_priv *dpriv)
483{
484 struct pci_dev *pdev = dpriv->pci_priv->pdev;
485 struct TxFD *tx_fd = dpriv->tx_fd;
486 struct RxFD *rx_fd = dpriv->rx_fd;
487 struct sk_buff **skbuff;
488 int i;
489
490 pci_free_consistent(pdev, TX_TOTAL_SIZE, tx_fd, dpriv->tx_fd_dma);
491 pci_free_consistent(pdev, RX_TOTAL_SIZE, rx_fd, dpriv->rx_fd_dma);
492
493 skbuff = dpriv->tx_skbuff;
494 for (i = 0; i < TX_RING_SIZE; i++) {
495 if (*skbuff) {
496 pci_unmap_single(pdev, le32_to_cpu(tx_fd->data),
497 (*skbuff)->len, PCI_DMA_TODEVICE);
498 dev_kfree_skb(*skbuff);
499 }
500 skbuff++;
501 tx_fd++;
502 }
503
504 skbuff = dpriv->rx_skbuff;
505 for (i = 0; i < RX_RING_SIZE; i++) {
506 if (*skbuff) {
507 pci_unmap_single(pdev, le32_to_cpu(rx_fd->data),
508 RX_MAX(HDLC_MAX_MRU), PCI_DMA_FROMDEVICE);
509 dev_kfree_skb(*skbuff);
510 }
511 skbuff++;
512 rx_fd++;
513 }
514}
515
516static inline int try_get_rx_skb(struct dscc4_dev_priv *dpriv,
517 struct net_device *dev)
518{
519 unsigned int dirty = dpriv->rx_dirty%RX_RING_SIZE;
520 struct RxFD *rx_fd = dpriv->rx_fd + dirty;
521 const int len = RX_MAX(HDLC_MAX_MRU);
522 struct sk_buff *skb;
523 int ret = 0;
524
525 skb = dev_alloc_skb(len);
526 dpriv->rx_skbuff[dirty] = skb;
527 if (skb) {
528 skb->protocol = hdlc_type_trans(skb, dev);
529 rx_fd->data = cpu_to_le32(pci_map_single(dpriv->pci_priv->pdev,
530 skb->data, len, PCI_DMA_FROMDEVICE));
531 } else {
532 rx_fd->data = 0;
533 ret = -1;
534 }
535 return ret;
536}
537
538
539
540
541static int dscc4_wait_ack_cec(struct dscc4_dev_priv *dpriv,
542 struct net_device *dev, char *msg)
543{
544 s8 i = 0;
545
546 do {
547 if (!(scc_readl_star(dpriv, dev) & SccBusy)) {
548 printk(KERN_DEBUG "%s: %s ack (%d try)\n", dev->name,
549 msg, i);
550 goto done;
551 }
552 schedule_timeout_uninterruptible(10);
553 rmb();
554 } while (++i > 0);
555 printk(KERN_ERR "%s: %s timeout\n", dev->name, msg);
556done:
557 return (i >= 0) ? i : -EAGAIN;
558}
559
560static int dscc4_do_action(struct net_device *dev, char *msg)
561{
562 void __iomem *ioaddr = dscc4_priv(dev)->base_addr;
563 s16 i = 0;
564
565 writel(Action, ioaddr + GCMDR);
566 ioaddr += GSTAR;
567 do {
568 u32 state = readl(ioaddr);
569
570 if (state & ArAck) {
571 printk(KERN_DEBUG "%s: %s ack\n", dev->name, msg);
572 writel(ArAck, ioaddr);
573 goto done;
574 } else if (state & Arf) {
575 printk(KERN_ERR "%s: %s failed\n", dev->name, msg);
576 writel(Arf, ioaddr);
577 i = -1;
578 goto done;
579 }
580 rmb();
581 } while (++i > 0);
582 printk(KERN_ERR "%s: %s timeout\n", dev->name, msg);
583done:
584 return i;
585}
586
587static inline int dscc4_xpr_ack(struct dscc4_dev_priv *dpriv)
588{
589 int cur = dpriv->iqtx_current%IRQ_RING_SIZE;
590 s8 i = 0;
591
592 do {
593 if (!(dpriv->flags & (NeedIDR | NeedIDT)) ||
594 (dpriv->iqtx[cur] & cpu_to_le32(Xpr)))
595 break;
596 smp_rmb();
597 schedule_timeout_uninterruptible(10);
598 } while (++i > 0);
599
600 return (i >= 0 ) ? i : -EAGAIN;
601}
602
603#if 0
604static void dscc4_rx_reset(struct dscc4_dev_priv *dpriv, struct net_device *dev)
605{
606 unsigned long flags;
607
608 spin_lock_irqsave(&dpriv->pci_priv->lock, flags);
609
610 writel(0x00000000, dpriv->base_addr + CH0LRDA + dpriv->dev_id*4);
611 scc_patchl(PowerUp, 0, dpriv, dev, CCR0);
612 readl(dpriv->base_addr + CH0LRDA + dpriv->dev_id*4);
613 writel(MTFi|Rdr, dpriv->base_addr + dpriv->dev_id*0x0c + CH0CFG);
614 writel(Action, dpriv->base_addr + GCMDR);
615 spin_unlock_irqrestore(&dpriv->pci_priv->lock, flags);
616}
617
618#endif
619
620#if 0
621static void dscc4_tx_reset(struct dscc4_dev_priv *dpriv, struct net_device *dev)
622{
623 u16 i = 0;
624
625
626 scc_patchl(PowerUp, 0, dpriv, dev, CCR0);
627 scc_writel(0x00050000, dpriv, dev, CCR2);
628
629
630
631 while (!dscc4_tx_quiescent(dpriv, dev) && ++i) {
632 udelay(1);
633 wmb();
634 }
635
636 writel(MTFi|Rdt, dpriv->base_addr + dpriv->dev_id*0x0c + CH0CFG);
637 if (dscc4_do_action(dev, "Rdt") < 0)
638 printk(KERN_ERR "%s: Tx reset failed\n", dev->name);
639}
640#endif
641
642
643static inline void dscc4_rx_skb(struct dscc4_dev_priv *dpriv,
644 struct net_device *dev)
645{
646 struct RxFD *rx_fd = dpriv->rx_fd + dpriv->rx_current%RX_RING_SIZE;
647 struct pci_dev *pdev = dpriv->pci_priv->pdev;
648 struct sk_buff *skb;
649 int pkt_len;
650
651 skb = dpriv->rx_skbuff[dpriv->rx_current++%RX_RING_SIZE];
652 if (!skb) {
653 printk(KERN_DEBUG "%s: skb=0 (%s)\n", dev->name, __func__);
654 goto refill;
655 }
656 pkt_len = TO_SIZE(le32_to_cpu(rx_fd->state2));
657 pci_unmap_single(pdev, le32_to_cpu(rx_fd->data),
658 RX_MAX(HDLC_MAX_MRU), PCI_DMA_FROMDEVICE);
659 if ((skb->data[--pkt_len] & FrameOk) == FrameOk) {
660 dev->stats.rx_packets++;
661 dev->stats.rx_bytes += pkt_len;
662 skb_put(skb, pkt_len);
663 if (netif_running(dev))
664 skb->protocol = hdlc_type_trans(skb, dev);
665 netif_rx(skb);
666 } else {
667 if (skb->data[pkt_len] & FrameRdo)
668 dev->stats.rx_fifo_errors++;
669 else if (!(skb->data[pkt_len] & FrameCrc))
670 dev->stats.rx_crc_errors++;
671 else if ((skb->data[pkt_len] & (FrameVfr | FrameRab)) !=
672 (FrameVfr | FrameRab))
673 dev->stats.rx_length_errors++;
674 dev->stats.rx_errors++;
675 dev_kfree_skb_irq(skb);
676 }
677refill:
678 while ((dpriv->rx_dirty - dpriv->rx_current) % RX_RING_SIZE) {
679 if (try_get_rx_skb(dpriv, dev) < 0)
680 break;
681 dpriv->rx_dirty++;
682 }
683 dscc4_rx_update(dpriv, dev);
684 rx_fd->state2 = 0x00000000;
685 rx_fd->end = cpu_to_le32(0xbabeface);
686}
687
688static void dscc4_free1(struct pci_dev *pdev)
689{
690 struct dscc4_pci_priv *ppriv;
691 struct dscc4_dev_priv *root;
692 int i;
693
694 ppriv = pci_get_drvdata(pdev);
695 root = ppriv->root;
696
697 for (i = 0; i < dev_per_card; i++)
698 unregister_hdlc_device(dscc4_to_dev(root + i));
699
700 pci_set_drvdata(pdev, NULL);
701
702 for (i = 0; i < dev_per_card; i++)
703 free_netdev(root[i].dev);
704 kfree(root);
705 kfree(ppriv);
706}
707
708static int __devinit dscc4_init_one(struct pci_dev *pdev,
709 const struct pci_device_id *ent)
710{
711 struct dscc4_pci_priv *priv;
712 struct dscc4_dev_priv *dpriv;
713 void __iomem *ioaddr;
714 int i, rc;
715
716 printk(KERN_DEBUG "%s", version);
717
718 rc = pci_enable_device(pdev);
719 if (rc < 0)
720 goto out;
721
722 rc = pci_request_region(pdev, 0, "registers");
723 if (rc < 0) {
724 printk(KERN_ERR "%s: can't reserve MMIO region (regs)\n",
725 DRV_NAME);
726 goto err_disable_0;
727 }
728 rc = pci_request_region(pdev, 1, "LBI interface");
729 if (rc < 0) {
730 printk(KERN_ERR "%s: can't reserve MMIO region (lbi)\n",
731 DRV_NAME);
732 goto err_free_mmio_region_1;
733 }
734
735 ioaddr = pci_ioremap_bar(pdev, 0);
736 if (!ioaddr) {
737 printk(KERN_ERR "%s: cannot remap MMIO region %llx @ %llx\n",
738 DRV_NAME, (unsigned long long)pci_resource_len(pdev, 0),
739 (unsigned long long)pci_resource_start(pdev, 0));
740 rc = -EIO;
741 goto err_free_mmio_regions_2;
742 }
743 printk(KERN_DEBUG "Siemens DSCC4, MMIO at %#llx (regs), %#llx (lbi), IRQ %d\n",
744 (unsigned long long)pci_resource_start(pdev, 0),
745 (unsigned long long)pci_resource_start(pdev, 1), pdev->irq);
746
747
748 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xf8);
749 pci_set_master(pdev);
750
751 rc = dscc4_found1(pdev, ioaddr);
752 if (rc < 0)
753 goto err_iounmap_3;
754
755 priv = pci_get_drvdata(pdev);
756
757 rc = request_irq(pdev->irq, dscc4_irq, IRQF_SHARED, DRV_NAME, priv->root);
758 if (rc < 0) {
759 printk(KERN_WARNING "%s: IRQ %d busy\n", DRV_NAME, pdev->irq);
760 goto err_release_4;
761 }
762
763
764 writel(0x00000001, ioaddr + GMODE);
765
766 {
767 u32 bits;
768
769 bits = (IRQ_RING_SIZE >> 5) - 1;
770 bits |= bits << 4;
771 bits |= bits << 8;
772 bits |= bits << 16;
773 writel(bits, ioaddr + IQLENR0);
774 }
775
776 writel((u32)(((IRQ_RING_SIZE >> 5) - 1) << 20), ioaddr + IQLENR1);
777 priv->iqcfg = (__le32 *) pci_alloc_consistent(pdev,
778 IRQ_RING_SIZE*sizeof(__le32), &priv->iqcfg_dma);
779 if (!priv->iqcfg)
780 goto err_free_irq_5;
781 writel(priv->iqcfg_dma, ioaddr + IQCFG);
782
783 rc = -ENOMEM;
784
785
786
787
788
789 for (i = 0; i < dev_per_card; i++) {
790 dpriv = priv->root + i;
791 dpriv->iqtx = (__le32 *) pci_alloc_consistent(pdev,
792 IRQ_RING_SIZE*sizeof(u32), &dpriv->iqtx_dma);
793 if (!dpriv->iqtx)
794 goto err_free_iqtx_6;
795 writel(dpriv->iqtx_dma, ioaddr + IQTX0 + i*4);
796 }
797 for (i = 0; i < dev_per_card; i++) {
798 dpriv = priv->root + i;
799 dpriv->iqrx = (__le32 *) pci_alloc_consistent(pdev,
800 IRQ_RING_SIZE*sizeof(u32), &dpriv->iqrx_dma);
801 if (!dpriv->iqrx)
802 goto err_free_iqrx_7;
803 writel(dpriv->iqrx_dma, ioaddr + IQRX0 + i*4);
804 }
805
806
807 writel(0x42104000, ioaddr + FIFOCR1);
808
809 writel(0xdef6d800, ioaddr + FIFOCR2);
810
811 writel(0x18181818, ioaddr + FIFOCR4);
812
813 writel(0x0000000e, ioaddr + FIFOCR3);
814
815 writel(0xff200001, ioaddr + GCMDR);
816
817 rc = 0;
818out:
819 return rc;
820
821err_free_iqrx_7:
822 while (--i >= 0) {
823 dpriv = priv->root + i;
824 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
825 dpriv->iqrx, dpriv->iqrx_dma);
826 }
827 i = dev_per_card;
828err_free_iqtx_6:
829 while (--i >= 0) {
830 dpriv = priv->root + i;
831 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
832 dpriv->iqtx, dpriv->iqtx_dma);
833 }
834 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), priv->iqcfg,
835 priv->iqcfg_dma);
836err_free_irq_5:
837 free_irq(pdev->irq, priv->root);
838err_release_4:
839 dscc4_free1(pdev);
840err_iounmap_3:
841 iounmap (ioaddr);
842err_free_mmio_regions_2:
843 pci_release_region(pdev, 1);
844err_free_mmio_region_1:
845 pci_release_region(pdev, 0);
846err_disable_0:
847 pci_disable_device(pdev);
848 goto out;
849};
850
851
852
853
854
855static void dscc4_init_registers(struct dscc4_dev_priv *dpriv,
856 struct net_device *dev)
857{
858
859 scc_writel(0x00000000, dpriv, dev, CCR0);
860
861 scc_writel(LengthCheck | (HDLC_MAX_MRU >> 5), dpriv, dev, RLCR);
862
863
864
865
866
867
868
869 scc_writel(0x02408000, dpriv, dev, CCR1);
870
871
872 scc_writel(0x00050008 & ~RxActivate, dpriv, dev, CCR2);
873
874
875}
876
877static inline int dscc4_set_quartz(struct dscc4_dev_priv *dpriv, int hz)
878{
879 int ret = 0;
880
881 if ((hz < 0) || (hz > DSCC4_HZ_MAX))
882 ret = -EOPNOTSUPP;
883 else
884 dpriv->pci_priv->xtal_hz = hz;
885
886 return ret;
887}
888
889static const struct net_device_ops dscc4_ops = {
890 .ndo_open = dscc4_open,
891 .ndo_stop = dscc4_close,
892 .ndo_change_mtu = hdlc_change_mtu,
893 .ndo_start_xmit = hdlc_start_xmit,
894 .ndo_do_ioctl = dscc4_ioctl,
895 .ndo_tx_timeout = dscc4_tx_timeout,
896};
897
898static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr)
899{
900 struct dscc4_pci_priv *ppriv;
901 struct dscc4_dev_priv *root;
902 int i, ret = -ENOMEM;
903
904 root = kcalloc(dev_per_card, sizeof(*root), GFP_KERNEL);
905 if (!root) {
906 printk(KERN_ERR "%s: can't allocate data\n", DRV_NAME);
907 goto err_out;
908 }
909
910 for (i = 0; i < dev_per_card; i++) {
911 root[i].dev = alloc_hdlcdev(root + i);
912 if (!root[i].dev)
913 goto err_free_dev;
914 }
915
916 ppriv = kzalloc(sizeof(*ppriv), GFP_KERNEL);
917 if (!ppriv) {
918 printk(KERN_ERR "%s: can't allocate private data\n", DRV_NAME);
919 goto err_free_dev;
920 }
921
922 ppriv->root = root;
923 spin_lock_init(&ppriv->lock);
924
925 for (i = 0; i < dev_per_card; i++) {
926 struct dscc4_dev_priv *dpriv = root + i;
927 struct net_device *d = dscc4_to_dev(dpriv);
928 hdlc_device *hdlc = dev_to_hdlc(d);
929
930 d->base_addr = (unsigned long)ioaddr;
931 d->irq = pdev->irq;
932 d->netdev_ops = &dscc4_ops;
933 d->watchdog_timeo = TX_TIMEOUT;
934 SET_NETDEV_DEV(d, &pdev->dev);
935
936 dpriv->dev_id = i;
937 dpriv->pci_priv = ppriv;
938 dpriv->base_addr = ioaddr;
939 spin_lock_init(&dpriv->lock);
940
941 hdlc->xmit = dscc4_start_xmit;
942 hdlc->attach = dscc4_hdlc_attach;
943
944 dscc4_init_registers(dpriv, d);
945 dpriv->parity = PARITY_CRC16_PR0_CCITT;
946 dpriv->encoding = ENCODING_NRZ;
947
948 ret = dscc4_init_ring(d);
949 if (ret < 0)
950 goto err_unregister;
951
952 ret = register_hdlc_device(d);
953 if (ret < 0) {
954 printk(KERN_ERR "%s: unable to register\n", DRV_NAME);
955 dscc4_release_ring(dpriv);
956 goto err_unregister;
957 }
958 }
959
960 ret = dscc4_set_quartz(root, quartz);
961 if (ret < 0)
962 goto err_unregister;
963
964 pci_set_drvdata(pdev, ppriv);
965 return ret;
966
967err_unregister:
968 while (i-- > 0) {
969 dscc4_release_ring(root + i);
970 unregister_hdlc_device(dscc4_to_dev(root + i));
971 }
972 kfree(ppriv);
973 i = dev_per_card;
974err_free_dev:
975 while (i-- > 0)
976 free_netdev(root[i].dev);
977 kfree(root);
978err_out:
979 return ret;
980};
981
982
983static void dscc4_timer(unsigned long data)
984{
985 struct net_device *dev = (struct net_device *)data;
986 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
987
988
989 goto done;
990done:
991 dpriv->timer.expires = jiffies + TX_TIMEOUT;
992 add_timer(&dpriv->timer);
993}
994
995static void dscc4_tx_timeout(struct net_device *dev)
996{
997
998}
999
1000static int dscc4_loopback_check(struct dscc4_dev_priv *dpriv)
1001{
1002 sync_serial_settings *settings = &dpriv->settings;
1003
1004 if (settings->loopback && (settings->clock_type != CLOCK_INT)) {
1005 struct net_device *dev = dscc4_to_dev(dpriv);
1006
1007 printk(KERN_INFO "%s: loopback requires clock\n", dev->name);
1008 return -1;
1009 }
1010 return 0;
1011}
1012
1013#ifdef CONFIG_DSCC4_PCI_RST
1014
1015
1016
1017
1018
1019
1020
1021static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr)
1022{
1023 int i;
1024
1025 mutex_lock(&dscc4_mutex);
1026 for (i = 0; i < 16; i++)
1027 pci_read_config_dword(pdev, i << 2, dscc4_pci_config_store + i);
1028
1029
1030 writel(0x001c0000, ioaddr + GMODE);
1031
1032 writel(0x0000ffff, ioaddr + GPDIR);
1033
1034 writel(0x0000ffff, ioaddr + GPIM);
1035
1036 writel(0x0000ffff, ioaddr + GPDATA);
1037 writel(0x00000000, ioaddr + GPDATA);
1038
1039
1040 readl(ioaddr + GSTAR);
1041
1042 schedule_timeout_uninterruptible(10);
1043
1044 for (i = 0; i < 16; i++)
1045 pci_write_config_dword(pdev, i << 2, dscc4_pci_config_store[i]);
1046 mutex_unlock(&dscc4_mutex);
1047}
1048#else
1049#define dscc4_pci_reset(pdev,ioaddr) do {} while (0)
1050#endif
1051
1052static int dscc4_open(struct net_device *dev)
1053{
1054 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
1055 struct dscc4_pci_priv *ppriv;
1056 int ret = -EAGAIN;
1057
1058 if ((dscc4_loopback_check(dpriv) < 0))
1059 goto err;
1060
1061 if ((ret = hdlc_open(dev)))
1062 goto err;
1063
1064 ppriv = dpriv->pci_priv;
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075 if (dpriv->flags & FakeReset) {
1076 dpriv->flags &= ~FakeReset;
1077 scc_patchl(0, PowerUp, dpriv, dev, CCR0);
1078 scc_patchl(0, 0x00050000, dpriv, dev, CCR2);
1079 scc_writel(EventsMask, dpriv, dev, IMR);
1080 printk(KERN_INFO "%s: up again.\n", dev->name);
1081 goto done;
1082 }
1083
1084
1085 dpriv->flags = NeedIDR | NeedIDT;
1086
1087 scc_patchl(0, PowerUp | Vis, dpriv, dev, CCR0);
1088
1089
1090
1091
1092
1093
1094
1095
1096 if (scc_readl_star(dpriv, dev) & SccBusy) {
1097 printk(KERN_ERR "%s busy. Try later\n", dev->name);
1098 ret = -EAGAIN;
1099 goto err_out;
1100 } else
1101 printk(KERN_INFO "%s: available. Good\n", dev->name);
1102
1103 scc_writel(EventsMask, dpriv, dev, IMR);
1104
1105
1106 scc_writel(TxSccRes | RxSccRes, dpriv, dev, CMDR);
1107
1108 if ((ret = dscc4_wait_ack_cec(dpriv, dev, "Cec")) < 0)
1109 goto err_disable_scc_events;
1110
1111
1112
1113
1114
1115
1116
1117
1118 if ((ret = dscc4_xpr_ack(dpriv)) < 0) {
1119 printk(KERN_ERR "%s: %s timeout\n", DRV_NAME, "XPR");
1120 goto err_disable_scc_events;
1121 }
1122
1123 if (debug > 2)
1124 dscc4_tx_print(dev, dpriv, "Open");
1125
1126done:
1127 netif_start_queue(dev);
1128
1129 init_timer(&dpriv->timer);
1130 dpriv->timer.expires = jiffies + 10*HZ;
1131 dpriv->timer.data = (unsigned long)dev;
1132 dpriv->timer.function = dscc4_timer;
1133 add_timer(&dpriv->timer);
1134 netif_carrier_on(dev);
1135
1136 return 0;
1137
1138err_disable_scc_events:
1139 scc_writel(0xffffffff, dpriv, dev, IMR);
1140 scc_patchl(PowerUp | Vis, 0, dpriv, dev, CCR0);
1141err_out:
1142 hdlc_close(dev);
1143err:
1144 return ret;
1145}
1146
1147#ifdef DSCC4_POLLING
1148static int dscc4_tx_poll(struct dscc4_dev_priv *dpriv, struct net_device *dev)
1149{
1150
1151}
1152#endif
1153
1154static netdev_tx_t dscc4_start_xmit(struct sk_buff *skb,
1155 struct net_device *dev)
1156{
1157 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
1158 struct dscc4_pci_priv *ppriv = dpriv->pci_priv;
1159 struct TxFD *tx_fd;
1160 int next;
1161
1162 next = dpriv->tx_current%TX_RING_SIZE;
1163 dpriv->tx_skbuff[next] = skb;
1164 tx_fd = dpriv->tx_fd + next;
1165 tx_fd->state = FrameEnd | TO_STATE_TX(skb->len);
1166 tx_fd->data = cpu_to_le32(pci_map_single(ppriv->pdev, skb->data, skb->len,
1167 PCI_DMA_TODEVICE));
1168 tx_fd->complete = 0x00000000;
1169 tx_fd->jiffies = jiffies;
1170 mb();
1171
1172#ifdef DSCC4_POLLING
1173 spin_lock(&dpriv->lock);
1174 while (dscc4_tx_poll(dpriv, dev));
1175 spin_unlock(&dpriv->lock);
1176#endif
1177
1178 if (debug > 2)
1179 dscc4_tx_print(dev, dpriv, "Xmit");
1180
1181 if (!((++dpriv->tx_current - dpriv->tx_dirty)%TX_RING_SIZE))
1182 netif_stop_queue(dev);
1183
1184 if (dscc4_tx_quiescent(dpriv, dev))
1185 dscc4_do_tx(dpriv, dev);
1186
1187 return NETDEV_TX_OK;
1188}
1189
1190static int dscc4_close(struct net_device *dev)
1191{
1192 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
1193
1194 del_timer_sync(&dpriv->timer);
1195 netif_stop_queue(dev);
1196
1197 scc_patchl(PowerUp | Vis, 0, dpriv, dev, CCR0);
1198 scc_patchl(0x00050000, 0, dpriv, dev, CCR2);
1199 scc_writel(0xffffffff, dpriv, dev, IMR);
1200
1201 dpriv->flags |= FakeReset;
1202
1203 hdlc_close(dev);
1204
1205 return 0;
1206}
1207
1208static inline int dscc4_check_clock_ability(int port)
1209{
1210 int ret = 0;
1211
1212#ifdef CONFIG_DSCC4_PCISYNC
1213 if (port >= 2)
1214 ret = -1;
1215#endif
1216 return ret;
1217}
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262static int dscc4_set_clock(struct net_device *dev, u32 *bps, u32 *state)
1263{
1264 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
1265 int ret = -1;
1266 u32 brr;
1267
1268 *state &= ~Ccr0ClockMask;
1269 if (*bps) {
1270 u32 n = 0, m = 0, divider;
1271 int xtal;
1272
1273 xtal = dpriv->pci_priv->xtal_hz;
1274 if (!xtal)
1275 goto done;
1276 if (dscc4_check_clock_ability(dpriv->dev_id) < 0)
1277 goto done;
1278 divider = xtal / *bps;
1279 if (divider > BRR_DIVIDER_MAX) {
1280 divider >>= 4;
1281 *state |= 0x00000036;
1282 } else
1283 *state |= 0x00000037;
1284 if (divider >> 22) {
1285 n = 63;
1286 m = 15;
1287 } else if (divider) {
1288
1289 m = 0;
1290 while (0xffffffc0 & divider) {
1291 m++;
1292 divider >>= 1;
1293 }
1294 n = divider;
1295 }
1296 brr = (m << 8) | n;
1297 divider = n << m;
1298 if (!(*state & 0x00000001))
1299 divider <<= 4;
1300 *bps = xtal / divider;
1301 } else {
1302
1303
1304
1305
1306
1307 brr = 0;
1308 }
1309 scc_writel(brr, dpriv, dev, BRR);
1310 ret = 0;
1311done:
1312 return ret;
1313}
1314
1315static int dscc4_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1316{
1317 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
1318 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
1319 const size_t size = sizeof(dpriv->settings);
1320 int ret = 0;
1321
1322 if (dev->flags & IFF_UP)
1323 return -EBUSY;
1324
1325 if (cmd != SIOCWANDEV)
1326 return -EOPNOTSUPP;
1327
1328 switch(ifr->ifr_settings.type) {
1329 case IF_GET_IFACE:
1330 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
1331 if (ifr->ifr_settings.size < size) {
1332 ifr->ifr_settings.size = size;
1333 return -ENOBUFS;
1334 }
1335 if (copy_to_user(line, &dpriv->settings, size))
1336 return -EFAULT;
1337 break;
1338
1339 case IF_IFACE_SYNC_SERIAL:
1340 if (!capable(CAP_NET_ADMIN))
1341 return -EPERM;
1342
1343 if (dpriv->flags & FakeReset) {
1344 printk(KERN_INFO "%s: please reset the device"
1345 " before this command\n", dev->name);
1346 return -EPERM;
1347 }
1348 if (copy_from_user(&dpriv->settings, line, size))
1349 return -EFAULT;
1350 ret = dscc4_set_iface(dpriv, dev);
1351 break;
1352
1353 default:
1354 ret = hdlc_ioctl(dev, ifr, cmd);
1355 break;
1356 }
1357
1358 return ret;
1359}
1360
1361static int dscc4_match(const struct thingie *p, int value)
1362{
1363 int i;
1364
1365 for (i = 0; p[i].define != -1; i++) {
1366 if (value == p[i].define)
1367 break;
1368 }
1369 if (p[i].define == -1)
1370 return -1;
1371 else
1372 return i;
1373}
1374
1375static int dscc4_clock_setting(struct dscc4_dev_priv *dpriv,
1376 struct net_device *dev)
1377{
1378 sync_serial_settings *settings = &dpriv->settings;
1379 int ret = -EOPNOTSUPP;
1380 u32 bps, state;
1381
1382 bps = settings->clock_rate;
1383 state = scc_readl(dpriv, CCR0);
1384 if (dscc4_set_clock(dev, &bps, &state) < 0)
1385 goto done;
1386 if (bps) {
1387 printk(KERN_DEBUG "%s: generated RxClk (DCE)\n", dev->name);
1388 if (settings->clock_rate != bps) {
1389 printk(KERN_DEBUG "%s: clock adjusted (%08d -> %08d)\n",
1390 dev->name, settings->clock_rate, bps);
1391 settings->clock_rate = bps;
1392 }
1393 } else {
1394 state |= PowerUp | Vis;
1395 printk(KERN_DEBUG "%s: external RxClk (DTE)\n", dev->name);
1396 }
1397 scc_writel(state, dpriv, dev, CCR0);
1398 ret = 0;
1399done:
1400 return ret;
1401}
1402
1403static int dscc4_encoding_setting(struct dscc4_dev_priv *dpriv,
1404 struct net_device *dev)
1405{
1406 static const struct thingie encoding[] = {
1407 { ENCODING_NRZ, 0x00000000 },
1408 { ENCODING_NRZI, 0x00200000 },
1409 { ENCODING_FM_MARK, 0x00400000 },
1410 { ENCODING_FM_SPACE, 0x00500000 },
1411 { ENCODING_MANCHESTER, 0x00600000 },
1412 { -1, 0}
1413 };
1414 int i, ret = 0;
1415
1416 i = dscc4_match(encoding, dpriv->encoding);
1417 if (i >= 0)
1418 scc_patchl(EncodingMask, encoding[i].bits, dpriv, dev, CCR0);
1419 else
1420 ret = -EOPNOTSUPP;
1421 return ret;
1422}
1423
1424static int dscc4_loopback_setting(struct dscc4_dev_priv *dpriv,
1425 struct net_device *dev)
1426{
1427 sync_serial_settings *settings = &dpriv->settings;
1428 u32 state;
1429
1430 state = scc_readl(dpriv, CCR1);
1431 if (settings->loopback) {
1432 printk(KERN_DEBUG "%s: loopback\n", dev->name);
1433 state |= 0x00000100;
1434 } else {
1435 printk(KERN_DEBUG "%s: normal\n", dev->name);
1436 state &= ~0x00000100;
1437 }
1438 scc_writel(state, dpriv, dev, CCR1);
1439 return 0;
1440}
1441
1442static int dscc4_crc_setting(struct dscc4_dev_priv *dpriv,
1443 struct net_device *dev)
1444{
1445 static const struct thingie crc[] = {
1446 { PARITY_CRC16_PR0_CCITT, 0x00000010 },
1447 { PARITY_CRC16_PR1_CCITT, 0x00000000 },
1448 { PARITY_CRC32_PR0_CCITT, 0x00000011 },
1449 { PARITY_CRC32_PR1_CCITT, 0x00000001 }
1450 };
1451 int i, ret = 0;
1452
1453 i = dscc4_match(crc, dpriv->parity);
1454 if (i >= 0)
1455 scc_patchl(CrcMask, crc[i].bits, dpriv, dev, CCR1);
1456 else
1457 ret = -EOPNOTSUPP;
1458 return ret;
1459}
1460
1461static int dscc4_set_iface(struct dscc4_dev_priv *dpriv, struct net_device *dev)
1462{
1463 struct {
1464 int (*action)(struct dscc4_dev_priv *, struct net_device *);
1465 } *p, do_setting[] = {
1466 { dscc4_encoding_setting },
1467 { dscc4_clock_setting },
1468 { dscc4_loopback_setting },
1469 { dscc4_crc_setting },
1470 { NULL }
1471 };
1472 int ret = 0;
1473
1474 for (p = do_setting; p->action; p++) {
1475 if ((ret = p->action(dpriv, dev)) < 0)
1476 break;
1477 }
1478 return ret;
1479}
1480
1481static irqreturn_t dscc4_irq(int irq, void *token)
1482{
1483 struct dscc4_dev_priv *root = token;
1484 struct dscc4_pci_priv *priv;
1485 struct net_device *dev;
1486 void __iomem *ioaddr;
1487 u32 state;
1488 unsigned long flags;
1489 int i, handled = 1;
1490
1491 priv = root->pci_priv;
1492 dev = dscc4_to_dev(root);
1493
1494 spin_lock_irqsave(&priv->lock, flags);
1495
1496 ioaddr = root->base_addr;
1497
1498 state = readl(ioaddr + GSTAR);
1499 if (!state) {
1500 handled = 0;
1501 goto out;
1502 }
1503 if (debug > 3)
1504 printk(KERN_DEBUG "%s: GSTAR = 0x%08x\n", DRV_NAME, state);
1505 writel(state, ioaddr + GSTAR);
1506
1507 if (state & Arf) {
1508 printk(KERN_ERR "%s: failure (Arf). Harass the maintener\n",
1509 dev->name);
1510 goto out;
1511 }
1512 state &= ~ArAck;
1513 if (state & Cfg) {
1514 if (debug > 0)
1515 printk(KERN_DEBUG "%s: CfgIV\n", DRV_NAME);
1516 if (priv->iqcfg[priv->cfg_cur++%IRQ_RING_SIZE] & cpu_to_le32(Arf))
1517 printk(KERN_ERR "%s: %s failed\n", dev->name, "CFG");
1518 if (!(state &= ~Cfg))
1519 goto out;
1520 }
1521 if (state & RxEvt) {
1522 i = dev_per_card - 1;
1523 do {
1524 dscc4_rx_irq(priv, root + i);
1525 } while (--i >= 0);
1526 state &= ~RxEvt;
1527 }
1528 if (state & TxEvt) {
1529 i = dev_per_card - 1;
1530 do {
1531 dscc4_tx_irq(priv, root + i);
1532 } while (--i >= 0);
1533 state &= ~TxEvt;
1534 }
1535out:
1536 spin_unlock_irqrestore(&priv->lock, flags);
1537 return IRQ_RETVAL(handled);
1538}
1539
1540static void dscc4_tx_irq(struct dscc4_pci_priv *ppriv,
1541 struct dscc4_dev_priv *dpriv)
1542{
1543 struct net_device *dev = dscc4_to_dev(dpriv);
1544 u32 state;
1545 int cur, loop = 0;
1546
1547try:
1548 cur = dpriv->iqtx_current%IRQ_RING_SIZE;
1549 state = le32_to_cpu(dpriv->iqtx[cur]);
1550 if (!state) {
1551 if (debug > 4)
1552 printk(KERN_DEBUG "%s: Tx ISR = 0x%08x\n", dev->name,
1553 state);
1554 if ((debug > 1) && (loop > 1))
1555 printk(KERN_DEBUG "%s: Tx irq loop=%d\n", dev->name, loop);
1556 if (loop && netif_queue_stopped(dev))
1557 if ((dpriv->tx_current - dpriv->tx_dirty)%TX_RING_SIZE)
1558 netif_wake_queue(dev);
1559
1560 if (netif_running(dev) && dscc4_tx_quiescent(dpriv, dev) &&
1561 !dscc4_tx_done(dpriv))
1562 dscc4_do_tx(dpriv, dev);
1563 return;
1564 }
1565 loop++;
1566 dpriv->iqtx[cur] = 0;
1567 dpriv->iqtx_current++;
1568
1569 if (state_check(state, dpriv, dev, "Tx") < 0)
1570 return;
1571
1572 if (state & SccEvt) {
1573 if (state & Alls) {
1574 struct sk_buff *skb;
1575 struct TxFD *tx_fd;
1576
1577 if (debug > 2)
1578 dscc4_tx_print(dev, dpriv, "Alls");
1579
1580
1581
1582
1583 cur = dpriv->tx_dirty%TX_RING_SIZE;
1584 tx_fd = dpriv->tx_fd + cur;
1585 skb = dpriv->tx_skbuff[cur];
1586 if (skb) {
1587 pci_unmap_single(ppriv->pdev, le32_to_cpu(tx_fd->data),
1588 skb->len, PCI_DMA_TODEVICE);
1589 if (tx_fd->state & FrameEnd) {
1590 dev->stats.tx_packets++;
1591 dev->stats.tx_bytes += skb->len;
1592 }
1593 dev_kfree_skb_irq(skb);
1594 dpriv->tx_skbuff[cur] = NULL;
1595 ++dpriv->tx_dirty;
1596 } else {
1597 if (debug > 1)
1598 printk(KERN_ERR "%s Tx: NULL skb %d\n",
1599 dev->name, cur);
1600 }
1601
1602
1603
1604
1605
1606 tx_fd->data = tx_fd->next;
1607 tx_fd->state = FrameEnd | TO_STATE_TX(2*DUMMY_SKB_SIZE);
1608 tx_fd->complete = 0x00000000;
1609 tx_fd->jiffies = 0;
1610
1611 if (!(state &= ~Alls))
1612 goto try;
1613 }
1614
1615
1616
1617 if (state & Xdu) {
1618 printk(KERN_ERR "%s: XDU. Ask maintainer\n", DRV_NAME);
1619 dpriv->flags = NeedIDT;
1620
1621 writel(MTFi | Rdt,
1622 dpriv->base_addr + 0x0c*dpriv->dev_id + CH0CFG);
1623 writel(Action, dpriv->base_addr + GCMDR);
1624 return;
1625 }
1626 if (state & Cts) {
1627 printk(KERN_INFO "%s: CTS transition\n", dev->name);
1628 if (!(state &= ~Cts))
1629 goto try;
1630 }
1631 if (state & Xmr) {
1632
1633 printk(KERN_ERR "%s: Xmr. Ask maintainer\n", DRV_NAME);
1634 if (!(state &= ~Xmr))
1635 goto try;
1636 }
1637 if (state & Xpr) {
1638 void __iomem *scc_addr;
1639 unsigned long ring;
1640 int i;
1641
1642
1643
1644
1645
1646 for (i = 1; i; i <<= 1) {
1647 if (!(scc_readl_star(dpriv, dev) & SccBusy))
1648 break;
1649 }
1650 if (!i)
1651 printk(KERN_INFO "%s busy in irq\n", dev->name);
1652
1653 scc_addr = dpriv->base_addr + 0x0c*dpriv->dev_id;
1654
1655 if (dpriv->flags & NeedIDT) {
1656 if (debug > 2)
1657 dscc4_tx_print(dev, dpriv, "Xpr");
1658 ring = dpriv->tx_fd_dma +
1659 (dpriv->tx_dirty%TX_RING_SIZE)*
1660 sizeof(struct TxFD);
1661 writel(ring, scc_addr + CH0BTDA);
1662 dscc4_do_tx(dpriv, dev);
1663 writel(MTFi | Idt, scc_addr + CH0CFG);
1664 if (dscc4_do_action(dev, "IDT") < 0)
1665 goto err_xpr;
1666 dpriv->flags &= ~NeedIDT;
1667 }
1668 if (dpriv->flags & NeedIDR) {
1669 ring = dpriv->rx_fd_dma +
1670 (dpriv->rx_current%RX_RING_SIZE)*
1671 sizeof(struct RxFD);
1672 writel(ring, scc_addr + CH0BRDA);
1673 dscc4_rx_update(dpriv, dev);
1674 writel(MTFi | Idr, scc_addr + CH0CFG);
1675 if (dscc4_do_action(dev, "IDR") < 0)
1676 goto err_xpr;
1677 dpriv->flags &= ~NeedIDR;
1678 smp_wmb();
1679
1680 scc_writel(0x08050008, dpriv, dev, CCR2);
1681 }
1682 err_xpr:
1683 if (!(state &= ~Xpr))
1684 goto try;
1685 }
1686 if (state & Cd) {
1687 if (debug > 0)
1688 printk(KERN_INFO "%s: CD transition\n", dev->name);
1689 if (!(state &= ~Cd))
1690 goto try;
1691 }
1692 } else {
1693 if (state & Hi) {
1694#ifdef DSCC4_POLLING
1695 while (!dscc4_tx_poll(dpriv, dev));
1696#endif
1697 printk(KERN_INFO "%s: Tx Hi\n", dev->name);
1698 state &= ~Hi;
1699 }
1700 if (state & Err) {
1701 printk(KERN_INFO "%s: Tx ERR\n", dev->name);
1702 dev->stats.tx_errors++;
1703 state &= ~Err;
1704 }
1705 }
1706 goto try;
1707}
1708
1709static void dscc4_rx_irq(struct dscc4_pci_priv *priv,
1710 struct dscc4_dev_priv *dpriv)
1711{
1712 struct net_device *dev = dscc4_to_dev(dpriv);
1713 u32 state;
1714 int cur;
1715
1716try:
1717 cur = dpriv->iqrx_current%IRQ_RING_SIZE;
1718 state = le32_to_cpu(dpriv->iqrx[cur]);
1719 if (!state)
1720 return;
1721 dpriv->iqrx[cur] = 0;
1722 dpriv->iqrx_current++;
1723
1724 if (state_check(state, dpriv, dev, "Rx") < 0)
1725 return;
1726
1727 if (!(state & SccEvt)){
1728 struct RxFD *rx_fd;
1729
1730 if (debug > 4)
1731 printk(KERN_DEBUG "%s: Rx ISR = 0x%08x\n", dev->name,
1732 state);
1733 state &= 0x00ffffff;
1734 if (state & Err) {
1735 printk(KERN_DEBUG "%s: Rx ERR\n", dev->name);
1736 cur = dpriv->rx_current%RX_RING_SIZE;
1737 rx_fd = dpriv->rx_fd + cur;
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750 while (!(rx_fd->state1 & Hold)) {
1751 rx_fd++;
1752 cur++;
1753 if (!(cur = cur%RX_RING_SIZE))
1754 rx_fd = dpriv->rx_fd;
1755 }
1756
1757 try_get_rx_skb(dpriv, dev);
1758 if (!rx_fd->data)
1759 goto try;
1760 rx_fd->state1 &= ~Hold;
1761 rx_fd->state2 = 0x00000000;
1762 rx_fd->end = cpu_to_le32(0xbabeface);
1763
1764 goto try;
1765 }
1766 if (state & Fi) {
1767 dscc4_rx_skb(dpriv, dev);
1768 goto try;
1769 }
1770 if (state & Hi ) {
1771 printk(KERN_INFO "%s: Rx Hi\n", dev->name);
1772 state &= ~Hi;
1773 goto try;
1774 }
1775 } else {
1776 if (debug > 1) {
1777
1778 static struct {
1779 u32 mask;
1780 const char *irq_name;
1781 } evts[] = {
1782 { 0x00008000, "TIN"},
1783 { 0x00000020, "RSC"},
1784 { 0x00000010, "PCE"},
1785 { 0x00000008, "PLLA"},
1786 { 0, NULL}
1787 }, *evt;
1788
1789 for (evt = evts; evt->irq_name; evt++) {
1790 if (state & evt->mask) {
1791 printk(KERN_DEBUG "%s: %s\n",
1792 dev->name, evt->irq_name);
1793 if (!(state &= ~evt->mask))
1794 goto try;
1795 }
1796 }
1797 } else {
1798 if (!(state &= ~0x0000c03c))
1799 goto try;
1800 }
1801 if (state & Cts) {
1802 printk(KERN_INFO "%s: CTS transition\n", dev->name);
1803 if (!(state &= ~Cts))
1804 goto try;
1805 }
1806
1807
1808
1809 if (state & Rdo) {
1810 struct RxFD *rx_fd;
1811 void __iomem *scc_addr;
1812 int cur;
1813
1814
1815
1816 scc_addr = dpriv->base_addr + 0x0c*dpriv->dev_id;
1817
1818 scc_patchl(RxActivate, 0, dpriv, dev, CCR2);
1819
1820
1821
1822
1823
1824 scc_writel(RxSccRes, dpriv, dev, CMDR);
1825 dpriv->flags |= RdoSet;
1826
1827
1828
1829
1830
1831
1832 do {
1833 cur = dpriv->rx_current++%RX_RING_SIZE;
1834 rx_fd = dpriv->rx_fd + cur;
1835 if (!(rx_fd->state2 & DataComplete))
1836 break;
1837 if (rx_fd->state2 & FrameAborted) {
1838 dev->stats.rx_over_errors++;
1839 rx_fd->state1 |= Hold;
1840 rx_fd->state2 = 0x00000000;
1841 rx_fd->end = cpu_to_le32(0xbabeface);
1842 } else
1843 dscc4_rx_skb(dpriv, dev);
1844 } while (1);
1845
1846 if (debug > 0) {
1847 if (dpriv->flags & RdoSet)
1848 printk(KERN_DEBUG
1849 "%s: no RDO in Rx data\n", DRV_NAME);
1850 }
1851#ifdef DSCC4_RDO_EXPERIMENTAL_RECOVERY
1852
1853
1854
1855#warning "FIXME: CH0BRDA"
1856 writel(dpriv->rx_fd_dma +
1857 (dpriv->rx_current%RX_RING_SIZE)*
1858 sizeof(struct RxFD), scc_addr + CH0BRDA);
1859 writel(MTFi|Rdr|Idr, scc_addr + CH0CFG);
1860 if (dscc4_do_action(dev, "RDR") < 0) {
1861 printk(KERN_ERR "%s: RDO recovery failed(%s)\n",
1862 dev->name, "RDR");
1863 goto rdo_end;
1864 }
1865 writel(MTFi|Idr, scc_addr + CH0CFG);
1866 if (dscc4_do_action(dev, "IDR") < 0) {
1867 printk(KERN_ERR "%s: RDO recovery failed(%s)\n",
1868 dev->name, "IDR");
1869 goto rdo_end;
1870 }
1871 rdo_end:
1872#endif
1873 scc_patchl(0, RxActivate, dpriv, dev, CCR2);
1874 goto try;
1875 }
1876 if (state & Cd) {
1877 printk(KERN_INFO "%s: CD transition\n", dev->name);
1878 if (!(state &= ~Cd))
1879 goto try;
1880 }
1881 if (state & Flex) {
1882 printk(KERN_DEBUG "%s: Flex. Ttttt...\n", DRV_NAME);
1883 if (!(state &= ~Flex))
1884 goto try;
1885 }
1886 }
1887}
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898static struct sk_buff *dscc4_init_dummy_skb(struct dscc4_dev_priv *dpriv)
1899{
1900 struct sk_buff *skb;
1901
1902 skb = dev_alloc_skb(DUMMY_SKB_SIZE);
1903 if (skb) {
1904 int last = dpriv->tx_dirty%TX_RING_SIZE;
1905 struct TxFD *tx_fd = dpriv->tx_fd + last;
1906
1907 skb->len = DUMMY_SKB_SIZE;
1908 skb_copy_to_linear_data(skb, version,
1909 strlen(version) % DUMMY_SKB_SIZE);
1910 tx_fd->state = FrameEnd | TO_STATE_TX(DUMMY_SKB_SIZE);
1911 tx_fd->data = cpu_to_le32(pci_map_single(dpriv->pci_priv->pdev,
1912 skb->data, DUMMY_SKB_SIZE,
1913 PCI_DMA_TODEVICE));
1914 dpriv->tx_skbuff[last] = skb;
1915 }
1916 return skb;
1917}
1918
1919static int dscc4_init_ring(struct net_device *dev)
1920{
1921 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
1922 struct pci_dev *pdev = dpriv->pci_priv->pdev;
1923 struct TxFD *tx_fd;
1924 struct RxFD *rx_fd;
1925 void *ring;
1926 int i;
1927
1928 ring = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &dpriv->rx_fd_dma);
1929 if (!ring)
1930 goto err_out;
1931 dpriv->rx_fd = rx_fd = (struct RxFD *) ring;
1932
1933 ring = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &dpriv->tx_fd_dma);
1934 if (!ring)
1935 goto err_free_dma_rx;
1936 dpriv->tx_fd = tx_fd = (struct TxFD *) ring;
1937
1938 memset(dpriv->tx_skbuff, 0, sizeof(struct sk_buff *)*TX_RING_SIZE);
1939 dpriv->tx_dirty = 0xffffffff;
1940 i = dpriv->tx_current = 0;
1941 do {
1942 tx_fd->state = FrameEnd | TO_STATE_TX(2*DUMMY_SKB_SIZE);
1943 tx_fd->complete = 0x00000000;
1944
1945 tx_fd->data = cpu_to_le32(dpriv->tx_fd_dma);
1946 (tx_fd++)->next = cpu_to_le32(dpriv->tx_fd_dma +
1947 (++i%TX_RING_SIZE)*sizeof(*tx_fd));
1948 } while (i < TX_RING_SIZE);
1949
1950 if (!dscc4_init_dummy_skb(dpriv))
1951 goto err_free_dma_tx;
1952
1953 memset(dpriv->rx_skbuff, 0, sizeof(struct sk_buff *)*RX_RING_SIZE);
1954 i = dpriv->rx_dirty = dpriv->rx_current = 0;
1955 do {
1956
1957 rx_fd->state1 = HiDesc;
1958 rx_fd->state2 = 0x00000000;
1959 rx_fd->end = cpu_to_le32(0xbabeface);
1960 rx_fd->state1 |= TO_STATE_RX(HDLC_MAX_MRU);
1961
1962 if (try_get_rx_skb(dpriv, dev) >= 0)
1963 dpriv->rx_dirty++;
1964 (rx_fd++)->next = cpu_to_le32(dpriv->rx_fd_dma +
1965 (++i%RX_RING_SIZE)*sizeof(*rx_fd));
1966 } while (i < RX_RING_SIZE);
1967
1968 return 0;
1969
1970err_free_dma_tx:
1971 pci_free_consistent(pdev, TX_TOTAL_SIZE, ring, dpriv->tx_fd_dma);
1972err_free_dma_rx:
1973 pci_free_consistent(pdev, RX_TOTAL_SIZE, rx_fd, dpriv->rx_fd_dma);
1974err_out:
1975 return -ENOMEM;
1976}
1977
1978static void __devexit dscc4_remove_one(struct pci_dev *pdev)
1979{
1980 struct dscc4_pci_priv *ppriv;
1981 struct dscc4_dev_priv *root;
1982 void __iomem *ioaddr;
1983 int i;
1984
1985 ppriv = pci_get_drvdata(pdev);
1986 root = ppriv->root;
1987
1988 ioaddr = root->base_addr;
1989
1990 dscc4_pci_reset(pdev, ioaddr);
1991
1992 free_irq(pdev->irq, root);
1993 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), ppriv->iqcfg,
1994 ppriv->iqcfg_dma);
1995 for (i = 0; i < dev_per_card; i++) {
1996 struct dscc4_dev_priv *dpriv = root + i;
1997
1998 dscc4_release_ring(dpriv);
1999 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
2000 dpriv->iqrx, dpriv->iqrx_dma);
2001 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
2002 dpriv->iqtx, dpriv->iqtx_dma);
2003 }
2004
2005 dscc4_free1(pdev);
2006
2007 iounmap(ioaddr);
2008
2009 pci_release_region(pdev, 1);
2010 pci_release_region(pdev, 0);
2011
2012 pci_disable_device(pdev);
2013}
2014
2015static int dscc4_hdlc_attach(struct net_device *dev, unsigned short encoding,
2016 unsigned short parity)
2017{
2018 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
2019
2020 if (encoding != ENCODING_NRZ &&
2021 encoding != ENCODING_NRZI &&
2022 encoding != ENCODING_FM_MARK &&
2023 encoding != ENCODING_FM_SPACE &&
2024 encoding != ENCODING_MANCHESTER)
2025 return -EINVAL;
2026
2027 if (parity != PARITY_NONE &&
2028 parity != PARITY_CRC16_PR0_CCITT &&
2029 parity != PARITY_CRC16_PR1_CCITT &&
2030 parity != PARITY_CRC32_PR0_CCITT &&
2031 parity != PARITY_CRC32_PR1_CCITT)
2032 return -EINVAL;
2033
2034 dpriv->encoding = encoding;
2035 dpriv->parity = parity;
2036 return 0;
2037}
2038
2039#ifndef MODULE
2040static int __init dscc4_setup(char *str)
2041{
2042 int *args[] = { &debug, &quartz, NULL }, **p = args;
2043
2044 while (*p && (get_option(&str, *p) == 2))
2045 p++;
2046 return 1;
2047}
2048
2049__setup("dscc4.setup=", dscc4_setup);
2050#endif
2051
2052static DEFINE_PCI_DEVICE_TABLE(dscc4_pci_tbl) = {
2053 { PCI_VENDOR_ID_SIEMENS, PCI_DEVICE_ID_SIEMENS_DSCC4,
2054 PCI_ANY_ID, PCI_ANY_ID, },
2055 { 0,}
2056};
2057MODULE_DEVICE_TABLE(pci, dscc4_pci_tbl);
2058
2059static struct pci_driver dscc4_driver = {
2060 .name = DRV_NAME,
2061 .id_table = dscc4_pci_tbl,
2062 .probe = dscc4_init_one,
2063 .remove = __devexit_p(dscc4_remove_one),
2064};
2065
2066static int __init dscc4_init_module(void)
2067{
2068 return pci_register_driver(&dscc4_driver);
2069}
2070
2071static void __exit dscc4_cleanup_module(void)
2072{
2073 pci_unregister_driver(&dscc4_driver);
2074}
2075
2076module_init(dscc4_init_module);
2077module_exit(dscc4_cleanup_module);
2078