1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83#include <linux/module.h>
84#include <linux/sched.h>
85#include <linux/types.h>
86#include <linux/errno.h>
87#include <linux/list.h>
88#include <linux/ioport.h>
89#include <linux/pci.h>
90#include <linux/kernel.h>
91#include <linux/mm.h>
92
93#include <asm/system.h>
94#include <asm/cache.h>
95#include <asm/byteorder.h>
96#include <asm/uaccess.h>
97#include <asm/io.h>
98#include <asm/irq.h>
99
100#include <linux/init.h>
101#include <linux/string.h>
102
103#include <linux/if_arp.h>
104#include <linux/netdevice.h>
105#include <linux/skbuff.h>
106#include <linux/delay.h>
107#include <linux/hdlc.h>
108#include <linux/mutex.h>
109
110
111static const char version[] = "$Id: dscc4.c,v 1.173 2003/09/20 23:55:34 romieu Exp $ for Linux\n";
112static int debug;
113static int quartz;
114
115#ifdef CONFIG_DSCC4_PCI_RST
116static DEFINE_MUTEX(dscc4_mutex);
117static u32 dscc4_pci_config_store[16];
118#endif
119
120#define DRV_NAME "dscc4"
121
122#undef DSCC4_POLLING
123
124
125
126MODULE_AUTHOR("Maintainer: Francois Romieu <romieu@cogenit.fr>");
127MODULE_DESCRIPTION("Siemens PEB20534 PCI Controler");
128MODULE_LICENSE("GPL");
129module_param(debug, int, 0);
130MODULE_PARM_DESC(debug,"Enable/disable extra messages");
131module_param(quartz, int, 0);
132MODULE_PARM_DESC(quartz,"If present, on-board quartz frequency (Hz)");
133
134
135
136struct thingie {
137 int define;
138 u32 bits;
139};
140
141struct TxFD {
142 __le32 state;
143 __le32 next;
144 __le32 data;
145 __le32 complete;
146 u32 jiffies;
147
148
149};
150
151struct RxFD {
152 __le32 state1;
153 __le32 next;
154 __le32 data;
155 __le32 state2;
156 __le32 end;
157};
158
159#define DUMMY_SKB_SIZE 64
160#define TX_LOW 8
161#define TX_RING_SIZE 32
162#define RX_RING_SIZE 32
163#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct TxFD)
164#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct RxFD)
165#define IRQ_RING_SIZE 64
166#define TX_TIMEOUT (HZ/10)
167#define DSCC4_HZ_MAX 33000000
168#define BRR_DIVIDER_MAX 64*0x00004000
169#define dev_per_card 4
170#define SCC_REGISTERS_MAX 23
171
172#define SOURCE_ID(flags) (((flags) >> 28) & 0x03)
173#define TO_SIZE(state) (((state) >> 16) & 0x1fff)
174
175
176
177
178
179
180#define TO_STATE_TX(len) cpu_to_le32(((len) & TxSizeMax) << 16)
181#define TO_STATE_RX(len) cpu_to_le32((RX_MAX(len) % RxSizeMax) << 16)
182#define RX_MAX(len) ((((len) >> 5) + 1) << 5)
183#define SCC_REG_START(dpriv) (SCC_START+(dpriv->dev_id)*SCC_OFFSET)
184
185struct dscc4_pci_priv {
186 __le32 *iqcfg;
187 int cfg_cur;
188 spinlock_t lock;
189 struct pci_dev *pdev;
190
191 struct dscc4_dev_priv *root;
192 dma_addr_t iqcfg_dma;
193 u32 xtal_hz;
194};
195
196struct dscc4_dev_priv {
197 struct sk_buff *rx_skbuff[RX_RING_SIZE];
198 struct sk_buff *tx_skbuff[TX_RING_SIZE];
199
200 struct RxFD *rx_fd;
201 struct TxFD *tx_fd;
202 __le32 *iqrx;
203 __le32 *iqtx;
204
205
206 volatile u32 tx_current;
207 u32 rx_current;
208 u32 iqtx_current;
209 u32 iqrx_current;
210
211 volatile u32 tx_dirty;
212 volatile u32 ltda;
213 u32 rx_dirty;
214 u32 lrda;
215
216 dma_addr_t tx_fd_dma;
217 dma_addr_t rx_fd_dma;
218 dma_addr_t iqtx_dma;
219 dma_addr_t iqrx_dma;
220
221 u32 scc_regs[SCC_REGISTERS_MAX];
222
223 struct timer_list timer;
224
225 struct dscc4_pci_priv *pci_priv;
226 spinlock_t lock;
227
228 int dev_id;
229 volatile u32 flags;
230 u32 timer_help;
231
232 unsigned short encoding;
233 unsigned short parity;
234 struct net_device *dev;
235 sync_serial_settings settings;
236 void __iomem *base_addr;
237 u32 __pad __attribute__ ((aligned (4)));
238};
239
240
241#define GCMDR 0x00
242#define GSTAR 0x04
243#define GMODE 0x08
244#define IQLENR0 0x0C
245#define IQLENR1 0x10
246#define IQRX0 0x14
247#define IQTX0 0x24
248#define IQCFG 0x3c
249#define FIFOCR1 0x44
250#define FIFOCR2 0x48
251#define FIFOCR3 0x4c
252#define FIFOCR4 0x34
253#define CH0CFG 0x50
254#define CH0BRDA 0x54
255#define CH0BTDA 0x58
256#define CH0FRDA 0x98
257#define CH0FTDA 0xb0
258#define CH0LRDA 0xc8
259#define CH0LTDA 0xe0
260
261
262#define SCC_START 0x0100
263#define SCC_OFFSET 0x80
264#define CMDR 0x00
265#define STAR 0x04
266#define CCR0 0x08
267#define CCR1 0x0c
268#define CCR2 0x10
269#define BRR 0x2C
270#define RLCR 0x40
271#define IMR 0x54
272#define ISR 0x58
273
274#define GPDIR 0x0400
275#define GPDATA 0x0404
276#define GPIM 0x0408
277
278
279#define EncodingMask 0x00700000
280#define CrcMask 0x00000003
281
282#define IntRxScc0 0x10000000
283#define IntTxScc0 0x01000000
284
285#define TxPollCmd 0x00000400
286#define RxActivate 0x08000000
287#define MTFi 0x04000000
288#define Rdr 0x00400000
289#define Rdt 0x00200000
290#define Idr 0x00100000
291#define Idt 0x00080000
292#define TxSccRes 0x01000000
293#define RxSccRes 0x00010000
294#define TxSizeMax 0x1fff
295#define RxSizeMax 0x1ffc
296
297#define Ccr0ClockMask 0x0000003f
298#define Ccr1LoopMask 0x00000200
299#define IsrMask 0x000fffff
300#define BrrExpMask 0x00000f00
301#define BrrMultMask 0x0000003f
302#define EncodingMask 0x00700000
303#define Hold cpu_to_le32(0x40000000)
304#define SccBusy 0x10000000
305#define PowerUp 0x80000000
306#define Vis 0x00001000
307#define FrameOk (FrameVfr | FrameCrc)
308#define FrameVfr 0x80
309#define FrameRdo 0x40
310#define FrameCrc 0x20
311#define FrameRab 0x10
312#define FrameAborted cpu_to_le32(0x00000200)
313#define FrameEnd cpu_to_le32(0x80000000)
314#define DataComplete cpu_to_le32(0x40000000)
315#define LengthCheck 0x00008000
316#define SccEvt 0x02000000
317#define NoAck 0x00000200
318#define Action 0x00000001
319#define HiDesc cpu_to_le32(0x20000000)
320
321
322#define RxEvt 0xf0000000
323#define TxEvt 0x0f000000
324#define Alls 0x00040000
325#define Xdu 0x00010000
326#define Cts 0x00004000
327#define Xmr 0x00002000
328#define Xpr 0x00001000
329#define Rdo 0x00000080
330#define Rfs 0x00000040
331#define Cd 0x00000004
332#define Rfo 0x00000002
333#define Flex 0x00000001
334
335
336#define Cfg 0x00200000
337#define Hi 0x00040000
338#define Fi 0x00020000
339#define Err 0x00010000
340#define Arf 0x00000002
341#define ArAck 0x00000001
342
343
344#define Ready 0x00000000
345#define NeedIDR 0x00000001
346#define NeedIDT 0x00000002
347#define RdoSet 0x00000004
348#define FakeReset 0x00000008
349
350
351#ifdef DSCC4_POLLING
352#define EventsMask 0xfffeef7f
353#else
354#define EventsMask 0xfffa8f7a
355#endif
356
357
358static void dscc4_rx_irq(struct dscc4_pci_priv *, struct dscc4_dev_priv *);
359static void dscc4_tx_irq(struct dscc4_pci_priv *, struct dscc4_dev_priv *);
360static int dscc4_found1(struct pci_dev *, void __iomem *ioaddr);
361static int dscc4_init_one(struct pci_dev *, const struct pci_device_id *ent);
362static int dscc4_open(struct net_device *);
363static netdev_tx_t dscc4_start_xmit(struct sk_buff *,
364 struct net_device *);
365static int dscc4_close(struct net_device *);
366static int dscc4_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
367static int dscc4_init_ring(struct net_device *);
368static void dscc4_release_ring(struct dscc4_dev_priv *);
369static void dscc4_timer(unsigned long);
370static void dscc4_tx_timeout(struct net_device *);
371static irqreturn_t dscc4_irq(int irq, void *dev_id);
372static int dscc4_hdlc_attach(struct net_device *, unsigned short, unsigned short);
373static int dscc4_set_iface(struct dscc4_dev_priv *, struct net_device *);
374#ifdef DSCC4_POLLING
375static int dscc4_tx_poll(struct dscc4_dev_priv *, struct net_device *);
376#endif
377
378static inline struct dscc4_dev_priv *dscc4_priv(struct net_device *dev)
379{
380 return dev_to_hdlc(dev)->priv;
381}
382
383static inline struct net_device *dscc4_to_dev(struct dscc4_dev_priv *p)
384{
385 return p->dev;
386}
387
388static void scc_patchl(u32 mask, u32 value, struct dscc4_dev_priv *dpriv,
389 struct net_device *dev, int offset)
390{
391 u32 state;
392
393
394 state = dpriv->scc_regs[offset >> 2];
395 state &= ~mask;
396 state |= value;
397 dpriv->scc_regs[offset >> 2] = state;
398 writel(state, dpriv->base_addr + SCC_REG_START(dpriv) + offset);
399}
400
401static void scc_writel(u32 bits, struct dscc4_dev_priv *dpriv,
402 struct net_device *dev, int offset)
403{
404
405
406
407
408 dpriv->scc_regs[offset >> 2] = bits;
409 writel(bits, dpriv->base_addr + SCC_REG_START(dpriv) + offset);
410}
411
412static inline u32 scc_readl(struct dscc4_dev_priv *dpriv, int offset)
413{
414 return dpriv->scc_regs[offset >> 2];
415}
416
417static u32 scc_readl_star(struct dscc4_dev_priv *dpriv, struct net_device *dev)
418{
419
420 readl(dpriv->base_addr + SCC_REG_START(dpriv) + STAR);
421 return readl(dpriv->base_addr + SCC_REG_START(dpriv) + STAR);
422}
423
424static inline void dscc4_do_tx(struct dscc4_dev_priv *dpriv,
425 struct net_device *dev)
426{
427 dpriv->ltda = dpriv->tx_fd_dma +
428 ((dpriv->tx_current-1)%TX_RING_SIZE)*sizeof(struct TxFD);
429 writel(dpriv->ltda, dpriv->base_addr + CH0LTDA + dpriv->dev_id*4);
430
431 readl(dpriv->base_addr + CH0LTDA + dpriv->dev_id*4);
432}
433
434static inline void dscc4_rx_update(struct dscc4_dev_priv *dpriv,
435 struct net_device *dev)
436{
437 dpriv->lrda = dpriv->rx_fd_dma +
438 ((dpriv->rx_dirty - 1)%RX_RING_SIZE)*sizeof(struct RxFD);
439 writel(dpriv->lrda, dpriv->base_addr + CH0LRDA + dpriv->dev_id*4);
440}
441
442static inline unsigned int dscc4_tx_done(struct dscc4_dev_priv *dpriv)
443{
444 return dpriv->tx_current == dpriv->tx_dirty;
445}
446
447static inline unsigned int dscc4_tx_quiescent(struct dscc4_dev_priv *dpriv,
448 struct net_device *dev)
449{
450 return readl(dpriv->base_addr + CH0FTDA + dpriv->dev_id*4) == dpriv->ltda;
451}
452
453static int state_check(u32 state, struct dscc4_dev_priv *dpriv,
454 struct net_device *dev, const char *msg)
455{
456 int ret = 0;
457
458 if (debug > 1) {
459 if (SOURCE_ID(state) != dpriv->dev_id) {
460 printk(KERN_DEBUG "%s (%s): Source Id=%d, state=%08x\n",
461 dev->name, msg, SOURCE_ID(state), state );
462 ret = -1;
463 }
464 if (state & 0x0df80c00) {
465 printk(KERN_DEBUG "%s (%s): state=%08x (UFO alert)\n",
466 dev->name, msg, state);
467 ret = -1;
468 }
469 }
470 return ret;
471}
472
473static void dscc4_tx_print(struct net_device *dev,
474 struct dscc4_dev_priv *dpriv,
475 char *msg)
476{
477 printk(KERN_DEBUG "%s: tx_current=%02d tx_dirty=%02d (%s)\n",
478 dev->name, dpriv->tx_current, dpriv->tx_dirty, msg);
479}
480
481static void dscc4_release_ring(struct dscc4_dev_priv *dpriv)
482{
483 struct pci_dev *pdev = dpriv->pci_priv->pdev;
484 struct TxFD *tx_fd = dpriv->tx_fd;
485 struct RxFD *rx_fd = dpriv->rx_fd;
486 struct sk_buff **skbuff;
487 int i;
488
489 pci_free_consistent(pdev, TX_TOTAL_SIZE, tx_fd, dpriv->tx_fd_dma);
490 pci_free_consistent(pdev, RX_TOTAL_SIZE, rx_fd, dpriv->rx_fd_dma);
491
492 skbuff = dpriv->tx_skbuff;
493 for (i = 0; i < TX_RING_SIZE; i++) {
494 if (*skbuff) {
495 pci_unmap_single(pdev, le32_to_cpu(tx_fd->data),
496 (*skbuff)->len, PCI_DMA_TODEVICE);
497 dev_kfree_skb(*skbuff);
498 }
499 skbuff++;
500 tx_fd++;
501 }
502
503 skbuff = dpriv->rx_skbuff;
504 for (i = 0; i < RX_RING_SIZE; i++) {
505 if (*skbuff) {
506 pci_unmap_single(pdev, le32_to_cpu(rx_fd->data),
507 RX_MAX(HDLC_MAX_MRU), PCI_DMA_FROMDEVICE);
508 dev_kfree_skb(*skbuff);
509 }
510 skbuff++;
511 rx_fd++;
512 }
513}
514
515static inline int try_get_rx_skb(struct dscc4_dev_priv *dpriv,
516 struct net_device *dev)
517{
518 unsigned int dirty = dpriv->rx_dirty%RX_RING_SIZE;
519 struct RxFD *rx_fd = dpriv->rx_fd + dirty;
520 const int len = RX_MAX(HDLC_MAX_MRU);
521 struct sk_buff *skb;
522 int ret = 0;
523
524 skb = dev_alloc_skb(len);
525 dpriv->rx_skbuff[dirty] = skb;
526 if (skb) {
527 skb->protocol = hdlc_type_trans(skb, dev);
528 rx_fd->data = cpu_to_le32(pci_map_single(dpriv->pci_priv->pdev,
529 skb->data, len, PCI_DMA_FROMDEVICE));
530 } else {
531 rx_fd->data = 0;
532 ret = -1;
533 }
534 return ret;
535}
536
537
538
539
540static int dscc4_wait_ack_cec(struct dscc4_dev_priv *dpriv,
541 struct net_device *dev, char *msg)
542{
543 s8 i = 0;
544
545 do {
546 if (!(scc_readl_star(dpriv, dev) & SccBusy)) {
547 printk(KERN_DEBUG "%s: %s ack (%d try)\n", dev->name,
548 msg, i);
549 goto done;
550 }
551 schedule_timeout_uninterruptible(10);
552 rmb();
553 } while (++i > 0);
554 printk(KERN_ERR "%s: %s timeout\n", dev->name, msg);
555done:
556 return (i >= 0) ? i : -EAGAIN;
557}
558
559static int dscc4_do_action(struct net_device *dev, char *msg)
560{
561 void __iomem *ioaddr = dscc4_priv(dev)->base_addr;
562 s16 i = 0;
563
564 writel(Action, ioaddr + GCMDR);
565 ioaddr += GSTAR;
566 do {
567 u32 state = readl(ioaddr);
568
569 if (state & ArAck) {
570 printk(KERN_DEBUG "%s: %s ack\n", dev->name, msg);
571 writel(ArAck, ioaddr);
572 goto done;
573 } else if (state & Arf) {
574 printk(KERN_ERR "%s: %s failed\n", dev->name, msg);
575 writel(Arf, ioaddr);
576 i = -1;
577 goto done;
578 }
579 rmb();
580 } while (++i > 0);
581 printk(KERN_ERR "%s: %s timeout\n", dev->name, msg);
582done:
583 return i;
584}
585
586static inline int dscc4_xpr_ack(struct dscc4_dev_priv *dpriv)
587{
588 int cur = dpriv->iqtx_current%IRQ_RING_SIZE;
589 s8 i = 0;
590
591 do {
592 if (!(dpriv->flags & (NeedIDR | NeedIDT)) ||
593 (dpriv->iqtx[cur] & cpu_to_le32(Xpr)))
594 break;
595 smp_rmb();
596 schedule_timeout_uninterruptible(10);
597 } while (++i > 0);
598
599 return (i >= 0 ) ? i : -EAGAIN;
600}
601
602#if 0
603static void dscc4_rx_reset(struct dscc4_dev_priv *dpriv, struct net_device *dev)
604{
605 unsigned long flags;
606
607 spin_lock_irqsave(&dpriv->pci_priv->lock, flags);
608
609 writel(0x00000000, dpriv->base_addr + CH0LRDA + dpriv->dev_id*4);
610 scc_patchl(PowerUp, 0, dpriv, dev, CCR0);
611 readl(dpriv->base_addr + CH0LRDA + dpriv->dev_id*4);
612 writel(MTFi|Rdr, dpriv->base_addr + dpriv->dev_id*0x0c + CH0CFG);
613 writel(Action, dpriv->base_addr + GCMDR);
614 spin_unlock_irqrestore(&dpriv->pci_priv->lock, flags);
615}
616
617#endif
618
619#if 0
620static void dscc4_tx_reset(struct dscc4_dev_priv *dpriv, struct net_device *dev)
621{
622 u16 i = 0;
623
624
625 scc_patchl(PowerUp, 0, dpriv, dev, CCR0);
626 scc_writel(0x00050000, dpriv, dev, CCR2);
627
628
629
630 while (!dscc4_tx_quiescent(dpriv, dev) && ++i) {
631 udelay(1);
632 wmb();
633 }
634
635 writel(MTFi|Rdt, dpriv->base_addr + dpriv->dev_id*0x0c + CH0CFG);
636 if (dscc4_do_action(dev, "Rdt") < 0)
637 printk(KERN_ERR "%s: Tx reset failed\n", dev->name);
638}
639#endif
640
641
642static inline void dscc4_rx_skb(struct dscc4_dev_priv *dpriv,
643 struct net_device *dev)
644{
645 struct RxFD *rx_fd = dpriv->rx_fd + dpriv->rx_current%RX_RING_SIZE;
646 struct pci_dev *pdev = dpriv->pci_priv->pdev;
647 struct sk_buff *skb;
648 int pkt_len;
649
650 skb = dpriv->rx_skbuff[dpriv->rx_current++%RX_RING_SIZE];
651 if (!skb) {
652 printk(KERN_DEBUG "%s: skb=0 (%s)\n", dev->name, __func__);
653 goto refill;
654 }
655 pkt_len = TO_SIZE(le32_to_cpu(rx_fd->state2));
656 pci_unmap_single(pdev, le32_to_cpu(rx_fd->data),
657 RX_MAX(HDLC_MAX_MRU), PCI_DMA_FROMDEVICE);
658 if ((skb->data[--pkt_len] & FrameOk) == FrameOk) {
659 dev->stats.rx_packets++;
660 dev->stats.rx_bytes += pkt_len;
661 skb_put(skb, pkt_len);
662 if (netif_running(dev))
663 skb->protocol = hdlc_type_trans(skb, dev);
664 netif_rx(skb);
665 } else {
666 if (skb->data[pkt_len] & FrameRdo)
667 dev->stats.rx_fifo_errors++;
668 else if (!(skb->data[pkt_len] & FrameCrc))
669 dev->stats.rx_crc_errors++;
670 else if ((skb->data[pkt_len] & (FrameVfr | FrameRab)) !=
671 (FrameVfr | FrameRab))
672 dev->stats.rx_length_errors++;
673 dev->stats.rx_errors++;
674 dev_kfree_skb_irq(skb);
675 }
676refill:
677 while ((dpriv->rx_dirty - dpriv->rx_current) % RX_RING_SIZE) {
678 if (try_get_rx_skb(dpriv, dev) < 0)
679 break;
680 dpriv->rx_dirty++;
681 }
682 dscc4_rx_update(dpriv, dev);
683 rx_fd->state2 = 0x00000000;
684 rx_fd->end = cpu_to_le32(0xbabeface);
685}
686
687static void dscc4_free1(struct pci_dev *pdev)
688{
689 struct dscc4_pci_priv *ppriv;
690 struct dscc4_dev_priv *root;
691 int i;
692
693 ppriv = pci_get_drvdata(pdev);
694 root = ppriv->root;
695
696 for (i = 0; i < dev_per_card; i++)
697 unregister_hdlc_device(dscc4_to_dev(root + i));
698
699 pci_set_drvdata(pdev, NULL);
700
701 for (i = 0; i < dev_per_card; i++)
702 free_netdev(root[i].dev);
703 kfree(root);
704 kfree(ppriv);
705}
706
707static int __devinit dscc4_init_one(struct pci_dev *pdev,
708 const struct pci_device_id *ent)
709{
710 struct dscc4_pci_priv *priv;
711 struct dscc4_dev_priv *dpriv;
712 void __iomem *ioaddr;
713 int i, rc;
714
715 printk(KERN_DEBUG "%s", version);
716
717 rc = pci_enable_device(pdev);
718 if (rc < 0)
719 goto out;
720
721 rc = pci_request_region(pdev, 0, "registers");
722 if (rc < 0) {
723 printk(KERN_ERR "%s: can't reserve MMIO region (regs)\n",
724 DRV_NAME);
725 goto err_disable_0;
726 }
727 rc = pci_request_region(pdev, 1, "LBI interface");
728 if (rc < 0) {
729 printk(KERN_ERR "%s: can't reserve MMIO region (lbi)\n",
730 DRV_NAME);
731 goto err_free_mmio_region_1;
732 }
733
734 ioaddr = pci_ioremap_bar(pdev, 0);
735 if (!ioaddr) {
736 printk(KERN_ERR "%s: cannot remap MMIO region %llx @ %llx\n",
737 DRV_NAME, (unsigned long long)pci_resource_len(pdev, 0),
738 (unsigned long long)pci_resource_start(pdev, 0));
739 rc = -EIO;
740 goto err_free_mmio_regions_2;
741 }
742 printk(KERN_DEBUG "Siemens DSCC4, MMIO at %#llx (regs), %#llx (lbi), IRQ %d\n",
743 (unsigned long long)pci_resource_start(pdev, 0),
744 (unsigned long long)pci_resource_start(pdev, 1), pdev->irq);
745
746
747 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xf8);
748 pci_set_master(pdev);
749
750 rc = dscc4_found1(pdev, ioaddr);
751 if (rc < 0)
752 goto err_iounmap_3;
753
754 priv = pci_get_drvdata(pdev);
755
756 rc = request_irq(pdev->irq, dscc4_irq, IRQF_SHARED, DRV_NAME, priv->root);
757 if (rc < 0) {
758 printk(KERN_WARNING "%s: IRQ %d busy\n", DRV_NAME, pdev->irq);
759 goto err_release_4;
760 }
761
762
763 writel(0x00000001, ioaddr + GMODE);
764
765 {
766 u32 bits;
767
768 bits = (IRQ_RING_SIZE >> 5) - 1;
769 bits |= bits << 4;
770 bits |= bits << 8;
771 bits |= bits << 16;
772 writel(bits, ioaddr + IQLENR0);
773 }
774
775 writel((u32)(((IRQ_RING_SIZE >> 5) - 1) << 20), ioaddr + IQLENR1);
776 priv->iqcfg = (__le32 *) pci_alloc_consistent(pdev,
777 IRQ_RING_SIZE*sizeof(__le32), &priv->iqcfg_dma);
778 if (!priv->iqcfg)
779 goto err_free_irq_5;
780 writel(priv->iqcfg_dma, ioaddr + IQCFG);
781
782 rc = -ENOMEM;
783
784
785
786
787
788 for (i = 0; i < dev_per_card; i++) {
789 dpriv = priv->root + i;
790 dpriv->iqtx = (__le32 *) pci_alloc_consistent(pdev,
791 IRQ_RING_SIZE*sizeof(u32), &dpriv->iqtx_dma);
792 if (!dpriv->iqtx)
793 goto err_free_iqtx_6;
794 writel(dpriv->iqtx_dma, ioaddr + IQTX0 + i*4);
795 }
796 for (i = 0; i < dev_per_card; i++) {
797 dpriv = priv->root + i;
798 dpriv->iqrx = (__le32 *) pci_alloc_consistent(pdev,
799 IRQ_RING_SIZE*sizeof(u32), &dpriv->iqrx_dma);
800 if (!dpriv->iqrx)
801 goto err_free_iqrx_7;
802 writel(dpriv->iqrx_dma, ioaddr + IQRX0 + i*4);
803 }
804
805
806 writel(0x42104000, ioaddr + FIFOCR1);
807
808 writel(0xdef6d800, ioaddr + FIFOCR2);
809
810 writel(0x18181818, ioaddr + FIFOCR4);
811
812 writel(0x0000000e, ioaddr + FIFOCR3);
813
814 writel(0xff200001, ioaddr + GCMDR);
815
816 rc = 0;
817out:
818 return rc;
819
820err_free_iqrx_7:
821 while (--i >= 0) {
822 dpriv = priv->root + i;
823 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
824 dpriv->iqrx, dpriv->iqrx_dma);
825 }
826 i = dev_per_card;
827err_free_iqtx_6:
828 while (--i >= 0) {
829 dpriv = priv->root + i;
830 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
831 dpriv->iqtx, dpriv->iqtx_dma);
832 }
833 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), priv->iqcfg,
834 priv->iqcfg_dma);
835err_free_irq_5:
836 free_irq(pdev->irq, priv->root);
837err_release_4:
838 dscc4_free1(pdev);
839err_iounmap_3:
840 iounmap (ioaddr);
841err_free_mmio_regions_2:
842 pci_release_region(pdev, 1);
843err_free_mmio_region_1:
844 pci_release_region(pdev, 0);
845err_disable_0:
846 pci_disable_device(pdev);
847 goto out;
848};
849
850
851
852
853
854static void dscc4_init_registers(struct dscc4_dev_priv *dpriv,
855 struct net_device *dev)
856{
857
858 scc_writel(0x00000000, dpriv, dev, CCR0);
859
860 scc_writel(LengthCheck | (HDLC_MAX_MRU >> 5), dpriv, dev, RLCR);
861
862
863
864
865
866
867
868 scc_writel(0x02408000, dpriv, dev, CCR1);
869
870
871 scc_writel(0x00050008 & ~RxActivate, dpriv, dev, CCR2);
872
873
874}
875
876static inline int dscc4_set_quartz(struct dscc4_dev_priv *dpriv, int hz)
877{
878 int ret = 0;
879
880 if ((hz < 0) || (hz > DSCC4_HZ_MAX))
881 ret = -EOPNOTSUPP;
882 else
883 dpriv->pci_priv->xtal_hz = hz;
884
885 return ret;
886}
887
888static const struct net_device_ops dscc4_ops = {
889 .ndo_open = dscc4_open,
890 .ndo_stop = dscc4_close,
891 .ndo_change_mtu = hdlc_change_mtu,
892 .ndo_start_xmit = hdlc_start_xmit,
893 .ndo_do_ioctl = dscc4_ioctl,
894 .ndo_tx_timeout = dscc4_tx_timeout,
895};
896
897static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr)
898{
899 struct dscc4_pci_priv *ppriv;
900 struct dscc4_dev_priv *root;
901 int i, ret = -ENOMEM;
902
903 root = kcalloc(dev_per_card, sizeof(*root), GFP_KERNEL);
904 if (!root) {
905 printk(KERN_ERR "%s: can't allocate data\n", DRV_NAME);
906 goto err_out;
907 }
908
909 for (i = 0; i < dev_per_card; i++) {
910 root[i].dev = alloc_hdlcdev(root + i);
911 if (!root[i].dev)
912 goto err_free_dev;
913 }
914
915 ppriv = kzalloc(sizeof(*ppriv), GFP_KERNEL);
916 if (!ppriv) {
917 printk(KERN_ERR "%s: can't allocate private data\n", DRV_NAME);
918 goto err_free_dev;
919 }
920
921 ppriv->root = root;
922 spin_lock_init(&ppriv->lock);
923
924 for (i = 0; i < dev_per_card; i++) {
925 struct dscc4_dev_priv *dpriv = root + i;
926 struct net_device *d = dscc4_to_dev(dpriv);
927 hdlc_device *hdlc = dev_to_hdlc(d);
928
929 d->base_addr = (unsigned long)ioaddr;
930 d->irq = pdev->irq;
931 d->netdev_ops = &dscc4_ops;
932 d->watchdog_timeo = TX_TIMEOUT;
933 SET_NETDEV_DEV(d, &pdev->dev);
934
935 dpriv->dev_id = i;
936 dpriv->pci_priv = ppriv;
937 dpriv->base_addr = ioaddr;
938 spin_lock_init(&dpriv->lock);
939
940 hdlc->xmit = dscc4_start_xmit;
941 hdlc->attach = dscc4_hdlc_attach;
942
943 dscc4_init_registers(dpriv, d);
944 dpriv->parity = PARITY_CRC16_PR0_CCITT;
945 dpriv->encoding = ENCODING_NRZ;
946
947 ret = dscc4_init_ring(d);
948 if (ret < 0)
949 goto err_unregister;
950
951 ret = register_hdlc_device(d);
952 if (ret < 0) {
953 printk(KERN_ERR "%s: unable to register\n", DRV_NAME);
954 dscc4_release_ring(dpriv);
955 goto err_unregister;
956 }
957 }
958
959 ret = dscc4_set_quartz(root, quartz);
960 if (ret < 0)
961 goto err_unregister;
962
963 pci_set_drvdata(pdev, ppriv);
964 return ret;
965
966err_unregister:
967 while (i-- > 0) {
968 dscc4_release_ring(root + i);
969 unregister_hdlc_device(dscc4_to_dev(root + i));
970 }
971 kfree(ppriv);
972 i = dev_per_card;
973err_free_dev:
974 while (i-- > 0)
975 free_netdev(root[i].dev);
976 kfree(root);
977err_out:
978 return ret;
979};
980
981
982static void dscc4_timer(unsigned long data)
983{
984 struct net_device *dev = (struct net_device *)data;
985 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
986
987
988 goto done;
989done:
990 dpriv->timer.expires = jiffies + TX_TIMEOUT;
991 add_timer(&dpriv->timer);
992}
993
994static void dscc4_tx_timeout(struct net_device *dev)
995{
996
997}
998
999static int dscc4_loopback_check(struct dscc4_dev_priv *dpriv)
1000{
1001 sync_serial_settings *settings = &dpriv->settings;
1002
1003 if (settings->loopback && (settings->clock_type != CLOCK_INT)) {
1004 struct net_device *dev = dscc4_to_dev(dpriv);
1005
1006 printk(KERN_INFO "%s: loopback requires clock\n", dev->name);
1007 return -1;
1008 }
1009 return 0;
1010}
1011
1012#ifdef CONFIG_DSCC4_PCI_RST
1013
1014
1015
1016
1017
1018
1019
1020static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr)
1021{
1022 int i;
1023
1024 mutex_lock(&dscc4_mutex);
1025 for (i = 0; i < 16; i++)
1026 pci_read_config_dword(pdev, i << 2, dscc4_pci_config_store + i);
1027
1028
1029 writel(0x001c0000, ioaddr + GMODE);
1030
1031 writel(0x0000ffff, ioaddr + GPDIR);
1032
1033 writel(0x0000ffff, ioaddr + GPIM);
1034
1035 writel(0x0000ffff, ioaddr + GPDATA);
1036 writel(0x00000000, ioaddr + GPDATA);
1037
1038
1039 readl(ioaddr + GSTAR);
1040
1041 schedule_timeout_uninterruptible(10);
1042
1043 for (i = 0; i < 16; i++)
1044 pci_write_config_dword(pdev, i << 2, dscc4_pci_config_store[i]);
1045 mutex_unlock(&dscc4_mutex);
1046}
1047#else
1048#define dscc4_pci_reset(pdev,ioaddr) do {} while (0)
1049#endif
1050
1051static int dscc4_open(struct net_device *dev)
1052{
1053 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
1054 struct dscc4_pci_priv *ppriv;
1055 int ret = -EAGAIN;
1056
1057 if ((dscc4_loopback_check(dpriv) < 0))
1058 goto err;
1059
1060 if ((ret = hdlc_open(dev)))
1061 goto err;
1062
1063 ppriv = dpriv->pci_priv;
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074 if (dpriv->flags & FakeReset) {
1075 dpriv->flags &= ~FakeReset;
1076 scc_patchl(0, PowerUp, dpriv, dev, CCR0);
1077 scc_patchl(0, 0x00050000, dpriv, dev, CCR2);
1078 scc_writel(EventsMask, dpriv, dev, IMR);
1079 printk(KERN_INFO "%s: up again.\n", dev->name);
1080 goto done;
1081 }
1082
1083
1084 dpriv->flags = NeedIDR | NeedIDT;
1085
1086 scc_patchl(0, PowerUp | Vis, dpriv, dev, CCR0);
1087
1088
1089
1090
1091
1092
1093
1094
1095 if (scc_readl_star(dpriv, dev) & SccBusy) {
1096 printk(KERN_ERR "%s busy. Try later\n", dev->name);
1097 ret = -EAGAIN;
1098 goto err_out;
1099 } else
1100 printk(KERN_INFO "%s: available. Good\n", dev->name);
1101
1102 scc_writel(EventsMask, dpriv, dev, IMR);
1103
1104
1105 scc_writel(TxSccRes | RxSccRes, dpriv, dev, CMDR);
1106
1107 if ((ret = dscc4_wait_ack_cec(dpriv, dev, "Cec")) < 0)
1108 goto err_disable_scc_events;
1109
1110
1111
1112
1113
1114
1115
1116
1117 if ((ret = dscc4_xpr_ack(dpriv)) < 0) {
1118 printk(KERN_ERR "%s: %s timeout\n", DRV_NAME, "XPR");
1119 goto err_disable_scc_events;
1120 }
1121
1122 if (debug > 2)
1123 dscc4_tx_print(dev, dpriv, "Open");
1124
1125done:
1126 netif_start_queue(dev);
1127
1128 init_timer(&dpriv->timer);
1129 dpriv->timer.expires = jiffies + 10*HZ;
1130 dpriv->timer.data = (unsigned long)dev;
1131 dpriv->timer.function = &dscc4_timer;
1132 add_timer(&dpriv->timer);
1133 netif_carrier_on(dev);
1134
1135 return 0;
1136
1137err_disable_scc_events:
1138 scc_writel(0xffffffff, dpriv, dev, IMR);
1139 scc_patchl(PowerUp | Vis, 0, dpriv, dev, CCR0);
1140err_out:
1141 hdlc_close(dev);
1142err:
1143 return ret;
1144}
1145
1146#ifdef DSCC4_POLLING
1147static int dscc4_tx_poll(struct dscc4_dev_priv *dpriv, struct net_device *dev)
1148{
1149
1150}
1151#endif
1152
1153static netdev_tx_t dscc4_start_xmit(struct sk_buff *skb,
1154 struct net_device *dev)
1155{
1156 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
1157 struct dscc4_pci_priv *ppriv = dpriv->pci_priv;
1158 struct TxFD *tx_fd;
1159 int next;
1160
1161 next = dpriv->tx_current%TX_RING_SIZE;
1162 dpriv->tx_skbuff[next] = skb;
1163 tx_fd = dpriv->tx_fd + next;
1164 tx_fd->state = FrameEnd | TO_STATE_TX(skb->len);
1165 tx_fd->data = cpu_to_le32(pci_map_single(ppriv->pdev, skb->data, skb->len,
1166 PCI_DMA_TODEVICE));
1167 tx_fd->complete = 0x00000000;
1168 tx_fd->jiffies = jiffies;
1169 mb();
1170
1171#ifdef DSCC4_POLLING
1172 spin_lock(&dpriv->lock);
1173 while (dscc4_tx_poll(dpriv, dev));
1174 spin_unlock(&dpriv->lock);
1175#endif
1176
1177 dev->trans_start = jiffies;
1178
1179 if (debug > 2)
1180 dscc4_tx_print(dev, dpriv, "Xmit");
1181
1182 if (!((++dpriv->tx_current - dpriv->tx_dirty)%TX_RING_SIZE))
1183 netif_stop_queue(dev);
1184
1185 if (dscc4_tx_quiescent(dpriv, dev))
1186 dscc4_do_tx(dpriv, dev);
1187
1188 return NETDEV_TX_OK;
1189}
1190
1191static int dscc4_close(struct net_device *dev)
1192{
1193 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
1194
1195 del_timer_sync(&dpriv->timer);
1196 netif_stop_queue(dev);
1197
1198 scc_patchl(PowerUp | Vis, 0, dpriv, dev, CCR0);
1199 scc_patchl(0x00050000, 0, dpriv, dev, CCR2);
1200 scc_writel(0xffffffff, dpriv, dev, IMR);
1201
1202 dpriv->flags |= FakeReset;
1203
1204 hdlc_close(dev);
1205
1206 return 0;
1207}
1208
1209static inline int dscc4_check_clock_ability(int port)
1210{
1211 int ret = 0;
1212
1213#ifdef CONFIG_DSCC4_PCISYNC
1214 if (port >= 2)
1215 ret = -1;
1216#endif
1217 return ret;
1218}
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263static int dscc4_set_clock(struct net_device *dev, u32 *bps, u32 *state)
1264{
1265 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
1266 int ret = -1;
1267 u32 brr;
1268
1269 *state &= ~Ccr0ClockMask;
1270 if (*bps) {
1271 u32 n = 0, m = 0, divider;
1272 int xtal;
1273
1274 xtal = dpriv->pci_priv->xtal_hz;
1275 if (!xtal)
1276 goto done;
1277 if (dscc4_check_clock_ability(dpriv->dev_id) < 0)
1278 goto done;
1279 divider = xtal / *bps;
1280 if (divider > BRR_DIVIDER_MAX) {
1281 divider >>= 4;
1282 *state |= 0x00000036;
1283 } else
1284 *state |= 0x00000037;
1285 if (divider >> 22) {
1286 n = 63;
1287 m = 15;
1288 } else if (divider) {
1289
1290 m = 0;
1291 while (0xffffffc0 & divider) {
1292 m++;
1293 divider >>= 1;
1294 }
1295 n = divider;
1296 }
1297 brr = (m << 8) | n;
1298 divider = n << m;
1299 if (!(*state & 0x00000001))
1300 divider <<= 4;
1301 *bps = xtal / divider;
1302 } else {
1303
1304
1305
1306
1307
1308 brr = 0;
1309 }
1310 scc_writel(brr, dpriv, dev, BRR);
1311 ret = 0;
1312done:
1313 return ret;
1314}
1315
1316static int dscc4_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1317{
1318 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
1319 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
1320 const size_t size = sizeof(dpriv->settings);
1321 int ret = 0;
1322
1323 if (dev->flags & IFF_UP)
1324 return -EBUSY;
1325
1326 if (cmd != SIOCWANDEV)
1327 return -EOPNOTSUPP;
1328
1329 switch(ifr->ifr_settings.type) {
1330 case IF_GET_IFACE:
1331 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
1332 if (ifr->ifr_settings.size < size) {
1333 ifr->ifr_settings.size = size;
1334 return -ENOBUFS;
1335 }
1336 if (copy_to_user(line, &dpriv->settings, size))
1337 return -EFAULT;
1338 break;
1339
1340 case IF_IFACE_SYNC_SERIAL:
1341 if (!capable(CAP_NET_ADMIN))
1342 return -EPERM;
1343
1344 if (dpriv->flags & FakeReset) {
1345 printk(KERN_INFO "%s: please reset the device"
1346 " before this command\n", dev->name);
1347 return -EPERM;
1348 }
1349 if (copy_from_user(&dpriv->settings, line, size))
1350 return -EFAULT;
1351 ret = dscc4_set_iface(dpriv, dev);
1352 break;
1353
1354 default:
1355 ret = hdlc_ioctl(dev, ifr, cmd);
1356 break;
1357 }
1358
1359 return ret;
1360}
1361
1362static int dscc4_match(struct thingie *p, int value)
1363{
1364 int i;
1365
1366 for (i = 0; p[i].define != -1; i++) {
1367 if (value == p[i].define)
1368 break;
1369 }
1370 if (p[i].define == -1)
1371 return -1;
1372 else
1373 return i;
1374}
1375
1376static int dscc4_clock_setting(struct dscc4_dev_priv *dpriv,
1377 struct net_device *dev)
1378{
1379 sync_serial_settings *settings = &dpriv->settings;
1380 int ret = -EOPNOTSUPP;
1381 u32 bps, state;
1382
1383 bps = settings->clock_rate;
1384 state = scc_readl(dpriv, CCR0);
1385 if (dscc4_set_clock(dev, &bps, &state) < 0)
1386 goto done;
1387 if (bps) {
1388 printk(KERN_DEBUG "%s: generated RxClk (DCE)\n", dev->name);
1389 if (settings->clock_rate != bps) {
1390 printk(KERN_DEBUG "%s: clock adjusted (%08d -> %08d)\n",
1391 dev->name, settings->clock_rate, bps);
1392 settings->clock_rate = bps;
1393 }
1394 } else {
1395 state |= PowerUp | Vis;
1396 printk(KERN_DEBUG "%s: external RxClk (DTE)\n", dev->name);
1397 }
1398 scc_writel(state, dpriv, dev, CCR0);
1399 ret = 0;
1400done:
1401 return ret;
1402}
1403
1404static int dscc4_encoding_setting(struct dscc4_dev_priv *dpriv,
1405 struct net_device *dev)
1406{
1407 struct thingie encoding[] = {
1408 { ENCODING_NRZ, 0x00000000 },
1409 { ENCODING_NRZI, 0x00200000 },
1410 { ENCODING_FM_MARK, 0x00400000 },
1411 { ENCODING_FM_SPACE, 0x00500000 },
1412 { ENCODING_MANCHESTER, 0x00600000 },
1413 { -1, 0}
1414 };
1415 int i, ret = 0;
1416
1417 i = dscc4_match(encoding, dpriv->encoding);
1418 if (i >= 0)
1419 scc_patchl(EncodingMask, encoding[i].bits, dpriv, dev, CCR0);
1420 else
1421 ret = -EOPNOTSUPP;
1422 return ret;
1423}
1424
1425static int dscc4_loopback_setting(struct dscc4_dev_priv *dpriv,
1426 struct net_device *dev)
1427{
1428 sync_serial_settings *settings = &dpriv->settings;
1429 u32 state;
1430
1431 state = scc_readl(dpriv, CCR1);
1432 if (settings->loopback) {
1433 printk(KERN_DEBUG "%s: loopback\n", dev->name);
1434 state |= 0x00000100;
1435 } else {
1436 printk(KERN_DEBUG "%s: normal\n", dev->name);
1437 state &= ~0x00000100;
1438 }
1439 scc_writel(state, dpriv, dev, CCR1);
1440 return 0;
1441}
1442
1443static int dscc4_crc_setting(struct dscc4_dev_priv *dpriv,
1444 struct net_device *dev)
1445{
1446 struct thingie crc[] = {
1447 { PARITY_CRC16_PR0_CCITT, 0x00000010 },
1448 { PARITY_CRC16_PR1_CCITT, 0x00000000 },
1449 { PARITY_CRC32_PR0_CCITT, 0x00000011 },
1450 { PARITY_CRC32_PR1_CCITT, 0x00000001 }
1451 };
1452 int i, ret = 0;
1453
1454 i = dscc4_match(crc, dpriv->parity);
1455 if (i >= 0)
1456 scc_patchl(CrcMask, crc[i].bits, dpriv, dev, CCR1);
1457 else
1458 ret = -EOPNOTSUPP;
1459 return ret;
1460}
1461
1462static int dscc4_set_iface(struct dscc4_dev_priv *dpriv, struct net_device *dev)
1463{
1464 struct {
1465 int (*action)(struct dscc4_dev_priv *, struct net_device *);
1466 } *p, do_setting[] = {
1467 { dscc4_encoding_setting },
1468 { dscc4_clock_setting },
1469 { dscc4_loopback_setting },
1470 { dscc4_crc_setting },
1471 { NULL }
1472 };
1473 int ret = 0;
1474
1475 for (p = do_setting; p->action; p++) {
1476 if ((ret = p->action(dpriv, dev)) < 0)
1477 break;
1478 }
1479 return ret;
1480}
1481
1482static irqreturn_t dscc4_irq(int irq, void *token)
1483{
1484 struct dscc4_dev_priv *root = token;
1485 struct dscc4_pci_priv *priv;
1486 struct net_device *dev;
1487 void __iomem *ioaddr;
1488 u32 state;
1489 unsigned long flags;
1490 int i, handled = 1;
1491
1492 priv = root->pci_priv;
1493 dev = dscc4_to_dev(root);
1494
1495 spin_lock_irqsave(&priv->lock, flags);
1496
1497 ioaddr = root->base_addr;
1498
1499 state = readl(ioaddr + GSTAR);
1500 if (!state) {
1501 handled = 0;
1502 goto out;
1503 }
1504 if (debug > 3)
1505 printk(KERN_DEBUG "%s: GSTAR = 0x%08x\n", DRV_NAME, state);
1506 writel(state, ioaddr + GSTAR);
1507
1508 if (state & Arf) {
1509 printk(KERN_ERR "%s: failure (Arf). Harass the maintener\n",
1510 dev->name);
1511 goto out;
1512 }
1513 state &= ~ArAck;
1514 if (state & Cfg) {
1515 if (debug > 0)
1516 printk(KERN_DEBUG "%s: CfgIV\n", DRV_NAME);
1517 if (priv->iqcfg[priv->cfg_cur++%IRQ_RING_SIZE] & cpu_to_le32(Arf))
1518 printk(KERN_ERR "%s: %s failed\n", dev->name, "CFG");
1519 if (!(state &= ~Cfg))
1520 goto out;
1521 }
1522 if (state & RxEvt) {
1523 i = dev_per_card - 1;
1524 do {
1525 dscc4_rx_irq(priv, root + i);
1526 } while (--i >= 0);
1527 state &= ~RxEvt;
1528 }
1529 if (state & TxEvt) {
1530 i = dev_per_card - 1;
1531 do {
1532 dscc4_tx_irq(priv, root + i);
1533 } while (--i >= 0);
1534 state &= ~TxEvt;
1535 }
1536out:
1537 spin_unlock_irqrestore(&priv->lock, flags);
1538 return IRQ_RETVAL(handled);
1539}
1540
1541static void dscc4_tx_irq(struct dscc4_pci_priv *ppriv,
1542 struct dscc4_dev_priv *dpriv)
1543{
1544 struct net_device *dev = dscc4_to_dev(dpriv);
1545 u32 state;
1546 int cur, loop = 0;
1547
1548try:
1549 cur = dpriv->iqtx_current%IRQ_RING_SIZE;
1550 state = le32_to_cpu(dpriv->iqtx[cur]);
1551 if (!state) {
1552 if (debug > 4)
1553 printk(KERN_DEBUG "%s: Tx ISR = 0x%08x\n", dev->name,
1554 state);
1555 if ((debug > 1) && (loop > 1))
1556 printk(KERN_DEBUG "%s: Tx irq loop=%d\n", dev->name, loop);
1557 if (loop && netif_queue_stopped(dev))
1558 if ((dpriv->tx_current - dpriv->tx_dirty)%TX_RING_SIZE)
1559 netif_wake_queue(dev);
1560
1561 if (netif_running(dev) && dscc4_tx_quiescent(dpriv, dev) &&
1562 !dscc4_tx_done(dpriv))
1563 dscc4_do_tx(dpriv, dev);
1564 return;
1565 }
1566 loop++;
1567 dpriv->iqtx[cur] = 0;
1568 dpriv->iqtx_current++;
1569
1570 if (state_check(state, dpriv, dev, "Tx") < 0)
1571 return;
1572
1573 if (state & SccEvt) {
1574 if (state & Alls) {
1575 struct sk_buff *skb;
1576 struct TxFD *tx_fd;
1577
1578 if (debug > 2)
1579 dscc4_tx_print(dev, dpriv, "Alls");
1580
1581
1582
1583
1584 cur = dpriv->tx_dirty%TX_RING_SIZE;
1585 tx_fd = dpriv->tx_fd + cur;
1586 skb = dpriv->tx_skbuff[cur];
1587 if (skb) {
1588 pci_unmap_single(ppriv->pdev, le32_to_cpu(tx_fd->data),
1589 skb->len, PCI_DMA_TODEVICE);
1590 if (tx_fd->state & FrameEnd) {
1591 dev->stats.tx_packets++;
1592 dev->stats.tx_bytes += skb->len;
1593 }
1594 dev_kfree_skb_irq(skb);
1595 dpriv->tx_skbuff[cur] = NULL;
1596 ++dpriv->tx_dirty;
1597 } else {
1598 if (debug > 1)
1599 printk(KERN_ERR "%s Tx: NULL skb %d\n",
1600 dev->name, cur);
1601 }
1602
1603
1604
1605
1606
1607 tx_fd->data = tx_fd->next;
1608 tx_fd->state = FrameEnd | TO_STATE_TX(2*DUMMY_SKB_SIZE);
1609 tx_fd->complete = 0x00000000;
1610 tx_fd->jiffies = 0;
1611
1612 if (!(state &= ~Alls))
1613 goto try;
1614 }
1615
1616
1617
1618 if (state & Xdu) {
1619 printk(KERN_ERR "%s: XDU. Ask maintainer\n", DRV_NAME);
1620 dpriv->flags = NeedIDT;
1621
1622 writel(MTFi | Rdt,
1623 dpriv->base_addr + 0x0c*dpriv->dev_id + CH0CFG);
1624 writel(Action, dpriv->base_addr + GCMDR);
1625 return;
1626 }
1627 if (state & Cts) {
1628 printk(KERN_INFO "%s: CTS transition\n", dev->name);
1629 if (!(state &= ~Cts))
1630 goto try;
1631 }
1632 if (state & Xmr) {
1633
1634 printk(KERN_ERR "%s: Xmr. Ask maintainer\n", DRV_NAME);
1635 if (!(state &= ~Xmr))
1636 goto try;
1637 }
1638 if (state & Xpr) {
1639 void __iomem *scc_addr;
1640 unsigned long ring;
1641 int i;
1642
1643
1644
1645
1646
1647 for (i = 1; i; i <<= 1) {
1648 if (!(scc_readl_star(dpriv, dev) & SccBusy))
1649 break;
1650 }
1651 if (!i)
1652 printk(KERN_INFO "%s busy in irq\n", dev->name);
1653
1654 scc_addr = dpriv->base_addr + 0x0c*dpriv->dev_id;
1655
1656 if (dpriv->flags & NeedIDT) {
1657 if (debug > 2)
1658 dscc4_tx_print(dev, dpriv, "Xpr");
1659 ring = dpriv->tx_fd_dma +
1660 (dpriv->tx_dirty%TX_RING_SIZE)*
1661 sizeof(struct TxFD);
1662 writel(ring, scc_addr + CH0BTDA);
1663 dscc4_do_tx(dpriv, dev);
1664 writel(MTFi | Idt, scc_addr + CH0CFG);
1665 if (dscc4_do_action(dev, "IDT") < 0)
1666 goto err_xpr;
1667 dpriv->flags &= ~NeedIDT;
1668 }
1669 if (dpriv->flags & NeedIDR) {
1670 ring = dpriv->rx_fd_dma +
1671 (dpriv->rx_current%RX_RING_SIZE)*
1672 sizeof(struct RxFD);
1673 writel(ring, scc_addr + CH0BRDA);
1674 dscc4_rx_update(dpriv, dev);
1675 writel(MTFi | Idr, scc_addr + CH0CFG);
1676 if (dscc4_do_action(dev, "IDR") < 0)
1677 goto err_xpr;
1678 dpriv->flags &= ~NeedIDR;
1679 smp_wmb();
1680
1681 scc_writel(0x08050008, dpriv, dev, CCR2);
1682 }
1683 err_xpr:
1684 if (!(state &= ~Xpr))
1685 goto try;
1686 }
1687 if (state & Cd) {
1688 if (debug > 0)
1689 printk(KERN_INFO "%s: CD transition\n", dev->name);
1690 if (!(state &= ~Cd))
1691 goto try;
1692 }
1693 } else {
1694 if (state & Hi) {
1695#ifdef DSCC4_POLLING
1696 while (!dscc4_tx_poll(dpriv, dev));
1697#endif
1698 printk(KERN_INFO "%s: Tx Hi\n", dev->name);
1699 state &= ~Hi;
1700 }
1701 if (state & Err) {
1702 printk(KERN_INFO "%s: Tx ERR\n", dev->name);
1703 dev->stats.tx_errors++;
1704 state &= ~Err;
1705 }
1706 }
1707 goto try;
1708}
1709
1710static void dscc4_rx_irq(struct dscc4_pci_priv *priv,
1711 struct dscc4_dev_priv *dpriv)
1712{
1713 struct net_device *dev = dscc4_to_dev(dpriv);
1714 u32 state;
1715 int cur;
1716
1717try:
1718 cur = dpriv->iqrx_current%IRQ_RING_SIZE;
1719 state = le32_to_cpu(dpriv->iqrx[cur]);
1720 if (!state)
1721 return;
1722 dpriv->iqrx[cur] = 0;
1723 dpriv->iqrx_current++;
1724
1725 if (state_check(state, dpriv, dev, "Rx") < 0)
1726 return;
1727
1728 if (!(state & SccEvt)){
1729 struct RxFD *rx_fd;
1730
1731 if (debug > 4)
1732 printk(KERN_DEBUG "%s: Rx ISR = 0x%08x\n", dev->name,
1733 state);
1734 state &= 0x00ffffff;
1735 if (state & Err) {
1736 printk(KERN_DEBUG "%s: Rx ERR\n", dev->name);
1737 cur = dpriv->rx_current%RX_RING_SIZE;
1738 rx_fd = dpriv->rx_fd + cur;
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751 while (!(rx_fd->state1 & Hold)) {
1752 rx_fd++;
1753 cur++;
1754 if (!(cur = cur%RX_RING_SIZE))
1755 rx_fd = dpriv->rx_fd;
1756 }
1757
1758 try_get_rx_skb(dpriv, dev);
1759 if (!rx_fd->data)
1760 goto try;
1761 rx_fd->state1 &= ~Hold;
1762 rx_fd->state2 = 0x00000000;
1763 rx_fd->end = cpu_to_le32(0xbabeface);
1764
1765 goto try;
1766 }
1767 if (state & Fi) {
1768 dscc4_rx_skb(dpriv, dev);
1769 goto try;
1770 }
1771 if (state & Hi ) {
1772 printk(KERN_INFO "%s: Rx Hi\n", dev->name);
1773 state &= ~Hi;
1774 goto try;
1775 }
1776 } else {
1777 if (debug > 1) {
1778
1779 static struct {
1780 u32 mask;
1781 const char *irq_name;
1782 } evts[] = {
1783 { 0x00008000, "TIN"},
1784 { 0x00000020, "RSC"},
1785 { 0x00000010, "PCE"},
1786 { 0x00000008, "PLLA"},
1787 { 0, NULL}
1788 }, *evt;
1789
1790 for (evt = evts; evt->irq_name; evt++) {
1791 if (state & evt->mask) {
1792 printk(KERN_DEBUG "%s: %s\n",
1793 dev->name, evt->irq_name);
1794 if (!(state &= ~evt->mask))
1795 goto try;
1796 }
1797 }
1798 } else {
1799 if (!(state &= ~0x0000c03c))
1800 goto try;
1801 }
1802 if (state & Cts) {
1803 printk(KERN_INFO "%s: CTS transition\n", dev->name);
1804 if (!(state &= ~Cts))
1805 goto try;
1806 }
1807
1808
1809
1810 if (state & Rdo) {
1811 struct RxFD *rx_fd;
1812 void __iomem *scc_addr;
1813 int cur;
1814
1815
1816
1817 scc_addr = dpriv->base_addr + 0x0c*dpriv->dev_id;
1818
1819 scc_patchl(RxActivate, 0, dpriv, dev, CCR2);
1820
1821
1822
1823
1824
1825 scc_writel(RxSccRes, dpriv, dev, CMDR);
1826 dpriv->flags |= RdoSet;
1827
1828
1829
1830
1831
1832
1833 do {
1834 cur = dpriv->rx_current++%RX_RING_SIZE;
1835 rx_fd = dpriv->rx_fd + cur;
1836 if (!(rx_fd->state2 & DataComplete))
1837 break;
1838 if (rx_fd->state2 & FrameAborted) {
1839 dev->stats.rx_over_errors++;
1840 rx_fd->state1 |= Hold;
1841 rx_fd->state2 = 0x00000000;
1842 rx_fd->end = cpu_to_le32(0xbabeface);
1843 } else
1844 dscc4_rx_skb(dpriv, dev);
1845 } while (1);
1846
1847 if (debug > 0) {
1848 if (dpriv->flags & RdoSet)
1849 printk(KERN_DEBUG
1850 "%s: no RDO in Rx data\n", DRV_NAME);
1851 }
1852#ifdef DSCC4_RDO_EXPERIMENTAL_RECOVERY
1853
1854
1855
1856#warning "FIXME: CH0BRDA"
1857 writel(dpriv->rx_fd_dma +
1858 (dpriv->rx_current%RX_RING_SIZE)*
1859 sizeof(struct RxFD), scc_addr + CH0BRDA);
1860 writel(MTFi|Rdr|Idr, scc_addr + CH0CFG);
1861 if (dscc4_do_action(dev, "RDR") < 0) {
1862 printk(KERN_ERR "%s: RDO recovery failed(%s)\n",
1863 dev->name, "RDR");
1864 goto rdo_end;
1865 }
1866 writel(MTFi|Idr, scc_addr + CH0CFG);
1867 if (dscc4_do_action(dev, "IDR") < 0) {
1868 printk(KERN_ERR "%s: RDO recovery failed(%s)\n",
1869 dev->name, "IDR");
1870 goto rdo_end;
1871 }
1872 rdo_end:
1873#endif
1874 scc_patchl(0, RxActivate, dpriv, dev, CCR2);
1875 goto try;
1876 }
1877 if (state & Cd) {
1878 printk(KERN_INFO "%s: CD transition\n", dev->name);
1879 if (!(state &= ~Cd))
1880 goto try;
1881 }
1882 if (state & Flex) {
1883 printk(KERN_DEBUG "%s: Flex. Ttttt...\n", DRV_NAME);
1884 if (!(state &= ~Flex))
1885 goto try;
1886 }
1887 }
1888}
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899static struct sk_buff *dscc4_init_dummy_skb(struct dscc4_dev_priv *dpriv)
1900{
1901 struct sk_buff *skb;
1902
1903 skb = dev_alloc_skb(DUMMY_SKB_SIZE);
1904 if (skb) {
1905 int last = dpriv->tx_dirty%TX_RING_SIZE;
1906 struct TxFD *tx_fd = dpriv->tx_fd + last;
1907
1908 skb->len = DUMMY_SKB_SIZE;
1909 skb_copy_to_linear_data(skb, version,
1910 strlen(version) % DUMMY_SKB_SIZE);
1911 tx_fd->state = FrameEnd | TO_STATE_TX(DUMMY_SKB_SIZE);
1912 tx_fd->data = cpu_to_le32(pci_map_single(dpriv->pci_priv->pdev,
1913 skb->data, DUMMY_SKB_SIZE,
1914 PCI_DMA_TODEVICE));
1915 dpriv->tx_skbuff[last] = skb;
1916 }
1917 return skb;
1918}
1919
1920static int dscc4_init_ring(struct net_device *dev)
1921{
1922 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
1923 struct pci_dev *pdev = dpriv->pci_priv->pdev;
1924 struct TxFD *tx_fd;
1925 struct RxFD *rx_fd;
1926 void *ring;
1927 int i;
1928
1929 ring = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &dpriv->rx_fd_dma);
1930 if (!ring)
1931 goto err_out;
1932 dpriv->rx_fd = rx_fd = (struct RxFD *) ring;
1933
1934 ring = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &dpriv->tx_fd_dma);
1935 if (!ring)
1936 goto err_free_dma_rx;
1937 dpriv->tx_fd = tx_fd = (struct TxFD *) ring;
1938
1939 memset(dpriv->tx_skbuff, 0, sizeof(struct sk_buff *)*TX_RING_SIZE);
1940 dpriv->tx_dirty = 0xffffffff;
1941 i = dpriv->tx_current = 0;
1942 do {
1943 tx_fd->state = FrameEnd | TO_STATE_TX(2*DUMMY_SKB_SIZE);
1944 tx_fd->complete = 0x00000000;
1945
1946 tx_fd->data = cpu_to_le32(dpriv->tx_fd_dma);
1947 (tx_fd++)->next = cpu_to_le32(dpriv->tx_fd_dma +
1948 (++i%TX_RING_SIZE)*sizeof(*tx_fd));
1949 } while (i < TX_RING_SIZE);
1950
1951 if (!dscc4_init_dummy_skb(dpriv))
1952 goto err_free_dma_tx;
1953
1954 memset(dpriv->rx_skbuff, 0, sizeof(struct sk_buff *)*RX_RING_SIZE);
1955 i = dpriv->rx_dirty = dpriv->rx_current = 0;
1956 do {
1957
1958 rx_fd->state1 = HiDesc;
1959 rx_fd->state2 = 0x00000000;
1960 rx_fd->end = cpu_to_le32(0xbabeface);
1961 rx_fd->state1 |= TO_STATE_RX(HDLC_MAX_MRU);
1962
1963 if (try_get_rx_skb(dpriv, dev) >= 0)
1964 dpriv->rx_dirty++;
1965 (rx_fd++)->next = cpu_to_le32(dpriv->rx_fd_dma +
1966 (++i%RX_RING_SIZE)*sizeof(*rx_fd));
1967 } while (i < RX_RING_SIZE);
1968
1969 return 0;
1970
1971err_free_dma_tx:
1972 pci_free_consistent(pdev, TX_TOTAL_SIZE, ring, dpriv->tx_fd_dma);
1973err_free_dma_rx:
1974 pci_free_consistent(pdev, RX_TOTAL_SIZE, rx_fd, dpriv->rx_fd_dma);
1975err_out:
1976 return -ENOMEM;
1977}
1978
1979static void __devexit dscc4_remove_one(struct pci_dev *pdev)
1980{
1981 struct dscc4_pci_priv *ppriv;
1982 struct dscc4_dev_priv *root;
1983 void __iomem *ioaddr;
1984 int i;
1985
1986 ppriv = pci_get_drvdata(pdev);
1987 root = ppriv->root;
1988
1989 ioaddr = root->base_addr;
1990
1991 dscc4_pci_reset(pdev, ioaddr);
1992
1993 free_irq(pdev->irq, root);
1994 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), ppriv->iqcfg,
1995 ppriv->iqcfg_dma);
1996 for (i = 0; i < dev_per_card; i++) {
1997 struct dscc4_dev_priv *dpriv = root + i;
1998
1999 dscc4_release_ring(dpriv);
2000 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
2001 dpriv->iqrx, dpriv->iqrx_dma);
2002 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
2003 dpriv->iqtx, dpriv->iqtx_dma);
2004 }
2005
2006 dscc4_free1(pdev);
2007
2008 iounmap(ioaddr);
2009
2010 pci_release_region(pdev, 1);
2011 pci_release_region(pdev, 0);
2012
2013 pci_disable_device(pdev);
2014}
2015
2016static int dscc4_hdlc_attach(struct net_device *dev, unsigned short encoding,
2017 unsigned short parity)
2018{
2019 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
2020
2021 if (encoding != ENCODING_NRZ &&
2022 encoding != ENCODING_NRZI &&
2023 encoding != ENCODING_FM_MARK &&
2024 encoding != ENCODING_FM_SPACE &&
2025 encoding != ENCODING_MANCHESTER)
2026 return -EINVAL;
2027
2028 if (parity != PARITY_NONE &&
2029 parity != PARITY_CRC16_PR0_CCITT &&
2030 parity != PARITY_CRC16_PR1_CCITT &&
2031 parity != PARITY_CRC32_PR0_CCITT &&
2032 parity != PARITY_CRC32_PR1_CCITT)
2033 return -EINVAL;
2034
2035 dpriv->encoding = encoding;
2036 dpriv->parity = parity;
2037 return 0;
2038}
2039
2040#ifndef MODULE
2041static int __init dscc4_setup(char *str)
2042{
2043 int *args[] = { &debug, &quartz, NULL }, **p = args;
2044
2045 while (*p && (get_option(&str, *p) == 2))
2046 p++;
2047 return 1;
2048}
2049
2050__setup("dscc4.setup=", dscc4_setup);
2051#endif
2052
2053static struct pci_device_id dscc4_pci_tbl[] = {
2054 { PCI_VENDOR_ID_SIEMENS, PCI_DEVICE_ID_SIEMENS_DSCC4,
2055 PCI_ANY_ID, PCI_ANY_ID, },
2056 { 0,}
2057};
2058MODULE_DEVICE_TABLE(pci, dscc4_pci_tbl);
2059
2060static struct pci_driver dscc4_driver = {
2061 .name = DRV_NAME,
2062 .id_table = dscc4_pci_tbl,
2063 .probe = dscc4_init_one,
2064 .remove = __devexit_p(dscc4_remove_one),
2065};
2066
2067static int __init dscc4_init_module(void)
2068{
2069 return pci_register_driver(&dscc4_driver);
2070}
2071
2072static void __exit dscc4_cleanup_module(void)
2073{
2074 pci_unregister_driver(&dscc4_driver);
2075}
2076
2077module_init(dscc4_init_module);
2078module_exit(dscc4_cleanup_module);
2079