1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
84
85#include <linux/module.h>
86#include <linux/sched.h>
87#include <linux/types.h>
88#include <linux/errno.h>
89#include <linux/list.h>
90#include <linux/ioport.h>
91#include <linux/pci.h>
92#include <linux/kernel.h>
93#include <linux/mm.h>
94#include <linux/slab.h>
95
96#include <asm/cache.h>
97#include <asm/byteorder.h>
98#include <linux/uaccess.h>
99#include <asm/io.h>
100#include <asm/irq.h>
101
102#include <linux/init.h>
103#include <linux/interrupt.h>
104#include <linux/string.h>
105
106#include <linux/if_arp.h>
107#include <linux/netdevice.h>
108#include <linux/skbuff.h>
109#include <linux/delay.h>
110#include <linux/hdlc.h>
111#include <linux/mutex.h>
112
113
114static const char version[] = "$Id: dscc4.c,v 1.173 2003/09/20 23:55:34 romieu Exp $ for Linux\n";
115static int debug;
116static int quartz;
117
118#ifdef CONFIG_DSCC4_PCI_RST
119static DEFINE_MUTEX(dscc4_mutex);
120static u32 dscc4_pci_config_store[16];
121#endif
122
123#define DRV_NAME "dscc4"
124
125#undef DSCC4_POLLING
126
127
128
129MODULE_AUTHOR("Maintainer: Francois Romieu <romieu@cogenit.fr>");
130MODULE_DESCRIPTION("Siemens PEB20534 PCI Controller");
131MODULE_LICENSE("GPL");
132module_param(debug, int, 0);
133MODULE_PARM_DESC(debug,"Enable/disable extra messages");
134module_param(quartz, int, 0);
135MODULE_PARM_DESC(quartz,"If present, on-board quartz frequency (Hz)");
136
137
138
139struct thingie {
140 int define;
141 u32 bits;
142};
143
144struct TxFD {
145 __le32 state;
146 __le32 next;
147 __le32 data;
148 __le32 complete;
149 u32 jiffies;
150
151
152};
153
154struct RxFD {
155 __le32 state1;
156 __le32 next;
157 __le32 data;
158 __le32 state2;
159 __le32 end;
160};
161
162#define DUMMY_SKB_SIZE 64
163#define TX_LOW 8
164#define TX_RING_SIZE 32
165#define RX_RING_SIZE 32
166#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct TxFD)
167#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct RxFD)
168#define IRQ_RING_SIZE 64
169#define TX_TIMEOUT (HZ/10)
170#define DSCC4_HZ_MAX 33000000
171#define BRR_DIVIDER_MAX 64*0x00004000
172#define dev_per_card 4
173#define SCC_REGISTERS_MAX 23
174
175#define SOURCE_ID(flags) (((flags) >> 28) & 0x03)
176#define TO_SIZE(state) (((state) >> 16) & 0x1fff)
177
178
179
180
181
182
183#define TO_STATE_TX(len) cpu_to_le32(((len) & TxSizeMax) << 16)
184#define TO_STATE_RX(len) cpu_to_le32((RX_MAX(len) % RxSizeMax) << 16)
185#define RX_MAX(len) ((((len) >> 5) + 1) << 5)
186#define SCC_REG_START(dpriv) (SCC_START+(dpriv->dev_id)*SCC_OFFSET)
187
188struct dscc4_pci_priv {
189 __le32 *iqcfg;
190 int cfg_cur;
191 spinlock_t lock;
192 struct pci_dev *pdev;
193
194 struct dscc4_dev_priv *root;
195 dma_addr_t iqcfg_dma;
196 u32 xtal_hz;
197};
198
199struct dscc4_dev_priv {
200 struct sk_buff *rx_skbuff[RX_RING_SIZE];
201 struct sk_buff *tx_skbuff[TX_RING_SIZE];
202
203 struct RxFD *rx_fd;
204 struct TxFD *tx_fd;
205 __le32 *iqrx;
206 __le32 *iqtx;
207
208
209 volatile u32 tx_current;
210 u32 rx_current;
211 u32 iqtx_current;
212 u32 iqrx_current;
213
214 volatile u32 tx_dirty;
215 volatile u32 ltda;
216 u32 rx_dirty;
217 u32 lrda;
218
219 dma_addr_t tx_fd_dma;
220 dma_addr_t rx_fd_dma;
221 dma_addr_t iqtx_dma;
222 dma_addr_t iqrx_dma;
223
224 u32 scc_regs[SCC_REGISTERS_MAX];
225
226 struct dscc4_pci_priv *pci_priv;
227 spinlock_t lock;
228
229 int dev_id;
230 volatile u32 flags;
231 u32 timer_help;
232
233 unsigned short encoding;
234 unsigned short parity;
235 struct net_device *dev;
236 sync_serial_settings settings;
237 void __iomem *base_addr;
238 u32 __pad __attribute__ ((aligned (4)));
239};
240
241
242#define GCMDR 0x00
243#define GSTAR 0x04
244#define GMODE 0x08
245#define IQLENR0 0x0C
246#define IQLENR1 0x10
247#define IQRX0 0x14
248#define IQTX0 0x24
249#define IQCFG 0x3c
250#define FIFOCR1 0x44
251#define FIFOCR2 0x48
252#define FIFOCR3 0x4c
253#define FIFOCR4 0x34
254#define CH0CFG 0x50
255#define CH0BRDA 0x54
256#define CH0BTDA 0x58
257#define CH0FRDA 0x98
258#define CH0FTDA 0xb0
259#define CH0LRDA 0xc8
260#define CH0LTDA 0xe0
261
262
263#define SCC_START 0x0100
264#define SCC_OFFSET 0x80
265#define CMDR 0x00
266#define STAR 0x04
267#define CCR0 0x08
268#define CCR1 0x0c
269#define CCR2 0x10
270#define BRR 0x2C
271#define RLCR 0x40
272#define IMR 0x54
273#define ISR 0x58
274
275#define GPDIR 0x0400
276#define GPDATA 0x0404
277#define GPIM 0x0408
278
279
280#define EncodingMask 0x00700000
281#define CrcMask 0x00000003
282
283#define IntRxScc0 0x10000000
284#define IntTxScc0 0x01000000
285
286#define TxPollCmd 0x00000400
287#define RxActivate 0x08000000
288#define MTFi 0x04000000
289#define Rdr 0x00400000
290#define Rdt 0x00200000
291#define Idr 0x00100000
292#define Idt 0x00080000
293#define TxSccRes 0x01000000
294#define RxSccRes 0x00010000
295#define TxSizeMax 0x1fff
296#define RxSizeMax 0x1ffc
297
298#define Ccr0ClockMask 0x0000003f
299#define Ccr1LoopMask 0x00000200
300#define IsrMask 0x000fffff
301#define BrrExpMask 0x00000f00
302#define BrrMultMask 0x0000003f
303#define EncodingMask 0x00700000
304#define Hold cpu_to_le32(0x40000000)
305#define SccBusy 0x10000000
306#define PowerUp 0x80000000
307#define Vis 0x00001000
308#define FrameOk (FrameVfr | FrameCrc)
309#define FrameVfr 0x80
310#define FrameRdo 0x40
311#define FrameCrc 0x20
312#define FrameRab 0x10
313#define FrameAborted cpu_to_le32(0x00000200)
314#define FrameEnd cpu_to_le32(0x80000000)
315#define DataComplete cpu_to_le32(0x40000000)
316#define LengthCheck 0x00008000
317#define SccEvt 0x02000000
318#define NoAck 0x00000200
319#define Action 0x00000001
320#define HiDesc cpu_to_le32(0x20000000)
321
322
323#define RxEvt 0xf0000000
324#define TxEvt 0x0f000000
325#define Alls 0x00040000
326#define Xdu 0x00010000
327#define Cts 0x00004000
328#define Xmr 0x00002000
329#define Xpr 0x00001000
330#define Rdo 0x00000080
331#define Rfs 0x00000040
332#define Cd 0x00000004
333#define Rfo 0x00000002
334#define Flex 0x00000001
335
336
337#define Cfg 0x00200000
338#define Hi 0x00040000
339#define Fi 0x00020000
340#define Err 0x00010000
341#define Arf 0x00000002
342#define ArAck 0x00000001
343
344
345#define Ready 0x00000000
346#define NeedIDR 0x00000001
347#define NeedIDT 0x00000002
348#define RdoSet 0x00000004
349#define FakeReset 0x00000008
350
351
352#ifdef DSCC4_POLLING
353#define EventsMask 0xfffeef7f
354#else
355#define EventsMask 0xfffa8f7a
356#endif
357
358
359static void dscc4_rx_irq(struct dscc4_pci_priv *, struct dscc4_dev_priv *);
360static void dscc4_tx_irq(struct dscc4_pci_priv *, struct dscc4_dev_priv *);
361static int dscc4_found1(struct pci_dev *, void __iomem *ioaddr);
362static int dscc4_init_one(struct pci_dev *, const struct pci_device_id *ent);
363static int dscc4_open(struct net_device *);
364static netdev_tx_t dscc4_start_xmit(struct sk_buff *,
365 struct net_device *);
366static int dscc4_close(struct net_device *);
367static int dscc4_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
368static int dscc4_init_ring(struct net_device *);
369static void dscc4_release_ring(struct dscc4_dev_priv *);
370static void dscc4_tx_timeout(struct net_device *);
371static irqreturn_t dscc4_irq(int irq, void *dev_id);
372static int dscc4_hdlc_attach(struct net_device *, unsigned short, unsigned short);
373static int dscc4_set_iface(struct dscc4_dev_priv *, struct net_device *);
374#ifdef DSCC4_POLLING
375static int dscc4_tx_poll(struct dscc4_dev_priv *, struct net_device *);
376#endif
377
378static inline struct dscc4_dev_priv *dscc4_priv(struct net_device *dev)
379{
380 return dev_to_hdlc(dev)->priv;
381}
382
383static inline struct net_device *dscc4_to_dev(struct dscc4_dev_priv *p)
384{
385 return p->dev;
386}
387
388static void scc_patchl(u32 mask, u32 value, struct dscc4_dev_priv *dpriv,
389 struct net_device *dev, int offset)
390{
391 u32 state;
392
393
394 state = dpriv->scc_regs[offset >> 2];
395 state &= ~mask;
396 state |= value;
397 dpriv->scc_regs[offset >> 2] = state;
398 writel(state, dpriv->base_addr + SCC_REG_START(dpriv) + offset);
399}
400
401static void scc_writel(u32 bits, struct dscc4_dev_priv *dpriv,
402 struct net_device *dev, int offset)
403{
404
405
406
407
408 dpriv->scc_regs[offset >> 2] = bits;
409 writel(bits, dpriv->base_addr + SCC_REG_START(dpriv) + offset);
410}
411
412static inline u32 scc_readl(struct dscc4_dev_priv *dpriv, int offset)
413{
414 return dpriv->scc_regs[offset >> 2];
415}
416
417static u32 scc_readl_star(struct dscc4_dev_priv *dpriv, struct net_device *dev)
418{
419
420 readl(dpriv->base_addr + SCC_REG_START(dpriv) + STAR);
421 return readl(dpriv->base_addr + SCC_REG_START(dpriv) + STAR);
422}
423
424static inline void dscc4_do_tx(struct dscc4_dev_priv *dpriv,
425 struct net_device *dev)
426{
427 dpriv->ltda = dpriv->tx_fd_dma +
428 ((dpriv->tx_current-1)%TX_RING_SIZE)*sizeof(struct TxFD);
429 writel(dpriv->ltda, dpriv->base_addr + CH0LTDA + dpriv->dev_id*4);
430
431 readl(dpriv->base_addr + CH0LTDA + dpriv->dev_id*4);
432}
433
434static inline void dscc4_rx_update(struct dscc4_dev_priv *dpriv,
435 struct net_device *dev)
436{
437 dpriv->lrda = dpriv->rx_fd_dma +
438 ((dpriv->rx_dirty - 1)%RX_RING_SIZE)*sizeof(struct RxFD);
439 writel(dpriv->lrda, dpriv->base_addr + CH0LRDA + dpriv->dev_id*4);
440}
441
442static inline unsigned int dscc4_tx_done(struct dscc4_dev_priv *dpriv)
443{
444 return dpriv->tx_current == dpriv->tx_dirty;
445}
446
447static inline unsigned int dscc4_tx_quiescent(struct dscc4_dev_priv *dpriv,
448 struct net_device *dev)
449{
450 return readl(dpriv->base_addr + CH0FTDA + dpriv->dev_id*4) == dpriv->ltda;
451}
452
453static int state_check(u32 state, struct dscc4_dev_priv *dpriv,
454 struct net_device *dev, const char *msg)
455{
456 int ret = 0;
457
458 if (debug > 1) {
459 if (SOURCE_ID(state) != dpriv->dev_id) {
460 printk(KERN_DEBUG "%s (%s): Source Id=%d, state=%08x\n",
461 dev->name, msg, SOURCE_ID(state), state );
462 ret = -1;
463 }
464 if (state & 0x0df80c00) {
465 printk(KERN_DEBUG "%s (%s): state=%08x (UFO alert)\n",
466 dev->name, msg, state);
467 ret = -1;
468 }
469 }
470 return ret;
471}
472
473static void dscc4_tx_print(struct net_device *dev,
474 struct dscc4_dev_priv *dpriv,
475 char *msg)
476{
477 printk(KERN_DEBUG "%s: tx_current=%02d tx_dirty=%02d (%s)\n",
478 dev->name, dpriv->tx_current, dpriv->tx_dirty, msg);
479}
480
481static void dscc4_release_ring(struct dscc4_dev_priv *dpriv)
482{
483 struct device *d = &dpriv->pci_priv->pdev->dev;
484 struct TxFD *tx_fd = dpriv->tx_fd;
485 struct RxFD *rx_fd = dpriv->rx_fd;
486 struct sk_buff **skbuff;
487 int i;
488
489 dma_free_coherent(d, TX_TOTAL_SIZE, tx_fd, dpriv->tx_fd_dma);
490 dma_free_coherent(d, RX_TOTAL_SIZE, rx_fd, dpriv->rx_fd_dma);
491
492 skbuff = dpriv->tx_skbuff;
493 for (i = 0; i < TX_RING_SIZE; i++) {
494 if (*skbuff) {
495 dma_unmap_single(d, le32_to_cpu(tx_fd->data),
496 (*skbuff)->len, DMA_TO_DEVICE);
497 dev_kfree_skb(*skbuff);
498 }
499 skbuff++;
500 tx_fd++;
501 }
502
503 skbuff = dpriv->rx_skbuff;
504 for (i = 0; i < RX_RING_SIZE; i++) {
505 if (*skbuff) {
506 dma_unmap_single(d, le32_to_cpu(rx_fd->data),
507 RX_MAX(HDLC_MAX_MRU),
508 DMA_FROM_DEVICE);
509 dev_kfree_skb(*skbuff);
510 }
511 skbuff++;
512 rx_fd++;
513 }
514}
515
516static inline int try_get_rx_skb(struct dscc4_dev_priv *dpriv,
517 struct net_device *dev)
518{
519 unsigned int dirty = dpriv->rx_dirty%RX_RING_SIZE;
520 struct device *d = &dpriv->pci_priv->pdev->dev;
521 struct RxFD *rx_fd = dpriv->rx_fd + dirty;
522 const int len = RX_MAX(HDLC_MAX_MRU);
523 struct sk_buff *skb;
524 dma_addr_t addr;
525
526 skb = dev_alloc_skb(len);
527 if (!skb)
528 goto err_out;
529
530 skb->protocol = hdlc_type_trans(skb, dev);
531 addr = dma_map_single(d, skb->data, len, DMA_FROM_DEVICE);
532 if (dma_mapping_error(d, addr))
533 goto err_free_skb;
534
535 dpriv->rx_skbuff[dirty] = skb;
536 rx_fd->data = cpu_to_le32(addr);
537 return 0;
538
539err_free_skb:
540 dev_kfree_skb_any(skb);
541err_out:
542 rx_fd->data = 0;
543 return -1;
544}
545
546
547
548
549static int dscc4_wait_ack_cec(struct dscc4_dev_priv *dpriv,
550 struct net_device *dev, char *msg)
551{
552 s8 i = 0;
553
554 do {
555 if (!(scc_readl_star(dpriv, dev) & SccBusy)) {
556 printk(KERN_DEBUG "%s: %s ack (%d try)\n", dev->name,
557 msg, i);
558 goto done;
559 }
560 schedule_timeout_uninterruptible(msecs_to_jiffies(100));
561 rmb();
562 } while (++i > 0);
563 netdev_err(dev, "%s timeout\n", msg);
564done:
565 return (i >= 0) ? i : -EAGAIN;
566}
567
568static int dscc4_do_action(struct net_device *dev, char *msg)
569{
570 void __iomem *ioaddr = dscc4_priv(dev)->base_addr;
571 s16 i = 0;
572
573 writel(Action, ioaddr + GCMDR);
574 ioaddr += GSTAR;
575 do {
576 u32 state = readl(ioaddr);
577
578 if (state & ArAck) {
579 netdev_dbg(dev, "%s ack\n", msg);
580 writel(ArAck, ioaddr);
581 goto done;
582 } else if (state & Arf) {
583 netdev_err(dev, "%s failed\n", msg);
584 writel(Arf, ioaddr);
585 i = -1;
586 goto done;
587 }
588 rmb();
589 } while (++i > 0);
590 netdev_err(dev, "%s timeout\n", msg);
591done:
592 return i;
593}
594
595static inline int dscc4_xpr_ack(struct dscc4_dev_priv *dpriv)
596{
597 int cur = dpriv->iqtx_current%IRQ_RING_SIZE;
598 s8 i = 0;
599
600 do {
601 if (!(dpriv->flags & (NeedIDR | NeedIDT)) ||
602 (dpriv->iqtx[cur] & cpu_to_le32(Xpr)))
603 break;
604 smp_rmb();
605 schedule_timeout_uninterruptible(msecs_to_jiffies(100));
606 } while (++i > 0);
607
608 return (i >= 0 ) ? i : -EAGAIN;
609}
610
611#if 0
612static void dscc4_rx_reset(struct dscc4_dev_priv *dpriv, struct net_device *dev)
613{
614 unsigned long flags;
615
616 spin_lock_irqsave(&dpriv->pci_priv->lock, flags);
617
618 writel(0x00000000, dpriv->base_addr + CH0LRDA + dpriv->dev_id*4);
619 scc_patchl(PowerUp, 0, dpriv, dev, CCR0);
620 readl(dpriv->base_addr + CH0LRDA + dpriv->dev_id*4);
621 writel(MTFi|Rdr, dpriv->base_addr + dpriv->dev_id*0x0c + CH0CFG);
622 writel(Action, dpriv->base_addr + GCMDR);
623 spin_unlock_irqrestore(&dpriv->pci_priv->lock, flags);
624}
625
626#endif
627
628#if 0
629static void dscc4_tx_reset(struct dscc4_dev_priv *dpriv, struct net_device *dev)
630{
631 u16 i = 0;
632
633
634 scc_patchl(PowerUp, 0, dpriv, dev, CCR0);
635 scc_writel(0x00050000, dpriv, dev, CCR2);
636
637
638
639 while (!dscc4_tx_quiescent(dpriv, dev) && ++i) {
640 udelay(1);
641 wmb();
642 }
643
644 writel(MTFi|Rdt, dpriv->base_addr + dpriv->dev_id*0x0c + CH0CFG);
645 if (dscc4_do_action(dev, "Rdt") < 0)
646 netdev_err(dev, "Tx reset failed\n");
647}
648#endif
649
650
651static inline void dscc4_rx_skb(struct dscc4_dev_priv *dpriv,
652 struct net_device *dev)
653{
654 struct RxFD *rx_fd = dpriv->rx_fd + dpriv->rx_current%RX_RING_SIZE;
655 struct device *d = &dpriv->pci_priv->pdev->dev;
656 struct sk_buff *skb;
657 int pkt_len;
658
659 skb = dpriv->rx_skbuff[dpriv->rx_current++%RX_RING_SIZE];
660 if (!skb) {
661 printk(KERN_DEBUG "%s: skb=0 (%s)\n", dev->name, __func__);
662 goto refill;
663 }
664 pkt_len = TO_SIZE(le32_to_cpu(rx_fd->state2));
665 dma_unmap_single(d, le32_to_cpu(rx_fd->data),
666 RX_MAX(HDLC_MAX_MRU), DMA_FROM_DEVICE);
667 if ((skb->data[--pkt_len] & FrameOk) == FrameOk) {
668 dev->stats.rx_packets++;
669 dev->stats.rx_bytes += pkt_len;
670 skb_put(skb, pkt_len);
671 if (netif_running(dev))
672 skb->protocol = hdlc_type_trans(skb, dev);
673 netif_rx(skb);
674 } else {
675 if (skb->data[pkt_len] & FrameRdo)
676 dev->stats.rx_fifo_errors++;
677 else if (!(skb->data[pkt_len] & FrameCrc))
678 dev->stats.rx_crc_errors++;
679 else if ((skb->data[pkt_len] & (FrameVfr | FrameRab)) !=
680 (FrameVfr | FrameRab))
681 dev->stats.rx_length_errors++;
682 dev->stats.rx_errors++;
683 dev_kfree_skb_irq(skb);
684 }
685refill:
686 while ((dpriv->rx_dirty - dpriv->rx_current) % RX_RING_SIZE) {
687 if (try_get_rx_skb(dpriv, dev) < 0)
688 break;
689 dpriv->rx_dirty++;
690 }
691 dscc4_rx_update(dpriv, dev);
692 rx_fd->state2 = 0x00000000;
693 rx_fd->end = cpu_to_le32(0xbabeface);
694}
695
696static void dscc4_free1(struct pci_dev *pdev)
697{
698 struct dscc4_pci_priv *ppriv;
699 struct dscc4_dev_priv *root;
700 int i;
701
702 ppriv = pci_get_drvdata(pdev);
703 root = ppriv->root;
704
705 for (i = 0; i < dev_per_card; i++)
706 unregister_hdlc_device(dscc4_to_dev(root + i));
707
708 for (i = 0; i < dev_per_card; i++)
709 free_netdev(root[i].dev);
710 kfree(root);
711 kfree(ppriv);
712}
713
714static int dscc4_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
715{
716 struct dscc4_pci_priv *priv;
717 struct dscc4_dev_priv *dpriv;
718 void __iomem *ioaddr;
719 int i, rc;
720
721 printk(KERN_DEBUG "%s", version);
722
723 rc = pci_enable_device(pdev);
724 if (rc < 0)
725 goto out;
726
727 rc = pci_request_region(pdev, 0, "registers");
728 if (rc < 0) {
729 pr_err("can't reserve MMIO region (regs)\n");
730 goto err_disable_0;
731 }
732 rc = pci_request_region(pdev, 1, "LBI interface");
733 if (rc < 0) {
734 pr_err("can't reserve MMIO region (lbi)\n");
735 goto err_free_mmio_region_1;
736 }
737
738 ioaddr = pci_ioremap_bar(pdev, 0);
739 if (!ioaddr) {
740 pr_err("cannot remap MMIO region %llx @ %llx\n",
741 (unsigned long long)pci_resource_len(pdev, 0),
742 (unsigned long long)pci_resource_start(pdev, 0));
743 rc = -EIO;
744 goto err_free_mmio_regions_2;
745 }
746 printk(KERN_DEBUG "Siemens DSCC4, MMIO at %#llx (regs), %#llx (lbi), IRQ %d\n",
747 (unsigned long long)pci_resource_start(pdev, 0),
748 (unsigned long long)pci_resource_start(pdev, 1), pdev->irq);
749
750
751 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xf8);
752 pci_set_master(pdev);
753
754 rc = dscc4_found1(pdev, ioaddr);
755 if (rc < 0)
756 goto err_iounmap_3;
757
758 priv = pci_get_drvdata(pdev);
759
760 rc = request_irq(pdev->irq, dscc4_irq, IRQF_SHARED, DRV_NAME, priv->root);
761 if (rc < 0) {
762 pr_warn("IRQ %d busy\n", pdev->irq);
763 goto err_release_4;
764 }
765
766
767 writel(0x00000001, ioaddr + GMODE);
768
769 {
770 u32 bits;
771
772 bits = (IRQ_RING_SIZE >> 5) - 1;
773 bits |= bits << 4;
774 bits |= bits << 8;
775 bits |= bits << 16;
776 writel(bits, ioaddr + IQLENR0);
777 }
778
779 writel((u32)(((IRQ_RING_SIZE >> 5) - 1) << 20), ioaddr + IQLENR1);
780
781 rc = -ENOMEM;
782
783 priv->iqcfg = (__le32 *)dma_alloc_coherent(&pdev->dev,
784 IRQ_RING_SIZE*sizeof(__le32), &priv->iqcfg_dma, GFP_KERNEL);
785 if (!priv->iqcfg)
786 goto err_free_irq_5;
787 writel(priv->iqcfg_dma, ioaddr + IQCFG);
788
789
790
791
792
793 for (i = 0; i < dev_per_card; i++) {
794 dpriv = priv->root + i;
795 dpriv->iqtx = (__le32 *)dma_alloc_coherent(&pdev->dev,
796 IRQ_RING_SIZE*sizeof(u32), &dpriv->iqtx_dma,
797 GFP_KERNEL);
798 if (!dpriv->iqtx)
799 goto err_free_iqtx_6;
800 writel(dpriv->iqtx_dma, ioaddr + IQTX0 + i*4);
801 }
802 for (i = 0; i < dev_per_card; i++) {
803 dpriv = priv->root + i;
804 dpriv->iqrx = (__le32 *)dma_alloc_coherent(&pdev->dev,
805 IRQ_RING_SIZE*sizeof(u32), &dpriv->iqrx_dma,
806 GFP_KERNEL);
807 if (!dpriv->iqrx)
808 goto err_free_iqrx_7;
809 writel(dpriv->iqrx_dma, ioaddr + IQRX0 + i*4);
810 }
811
812
813 writel(0x42104000, ioaddr + FIFOCR1);
814
815 writel(0xdef6d800, ioaddr + FIFOCR2);
816
817 writel(0x18181818, ioaddr + FIFOCR4);
818
819 writel(0x0000000e, ioaddr + FIFOCR3);
820
821 writel(0xff200001, ioaddr + GCMDR);
822
823 rc = 0;
824out:
825 return rc;
826
827err_free_iqrx_7:
828 while (--i >= 0) {
829 dpriv = priv->root + i;
830 dma_free_coherent(&pdev->dev, IRQ_RING_SIZE*sizeof(u32),
831 dpriv->iqrx, dpriv->iqrx_dma);
832 }
833 i = dev_per_card;
834err_free_iqtx_6:
835 while (--i >= 0) {
836 dpriv = priv->root + i;
837 dma_free_coherent(&pdev->dev, IRQ_RING_SIZE*sizeof(u32),
838 dpriv->iqtx, dpriv->iqtx_dma);
839 }
840 dma_free_coherent(&pdev->dev, IRQ_RING_SIZE*sizeof(u32), priv->iqcfg,
841 priv->iqcfg_dma);
842err_free_irq_5:
843 free_irq(pdev->irq, priv->root);
844err_release_4:
845 dscc4_free1(pdev);
846err_iounmap_3:
847 iounmap (ioaddr);
848err_free_mmio_regions_2:
849 pci_release_region(pdev, 1);
850err_free_mmio_region_1:
851 pci_release_region(pdev, 0);
852err_disable_0:
853 pci_disable_device(pdev);
854 goto out;
855};
856
857
858
859
860
861static void dscc4_init_registers(struct dscc4_dev_priv *dpriv,
862 struct net_device *dev)
863{
864
865 scc_writel(0x00000000, dpriv, dev, CCR0);
866
867 scc_writel(LengthCheck | (HDLC_MAX_MRU >> 5), dpriv, dev, RLCR);
868
869
870
871
872
873
874
875 scc_writel(0x02408000, dpriv, dev, CCR1);
876
877
878 scc_writel(0x00050008 & ~RxActivate, dpriv, dev, CCR2);
879
880
881}
882
883static inline int dscc4_set_quartz(struct dscc4_dev_priv *dpriv, int hz)
884{
885 int ret = 0;
886
887 if ((hz < 0) || (hz > DSCC4_HZ_MAX))
888 ret = -EOPNOTSUPP;
889 else
890 dpriv->pci_priv->xtal_hz = hz;
891
892 return ret;
893}
894
895static const struct net_device_ops dscc4_ops = {
896 .ndo_open = dscc4_open,
897 .ndo_stop = dscc4_close,
898 .ndo_start_xmit = hdlc_start_xmit,
899 .ndo_do_ioctl = dscc4_ioctl,
900 .ndo_tx_timeout = dscc4_tx_timeout,
901};
902
903static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr)
904{
905 struct dscc4_pci_priv *ppriv;
906 struct dscc4_dev_priv *root;
907 int i, ret = -ENOMEM;
908
909 root = kcalloc(dev_per_card, sizeof(*root), GFP_KERNEL);
910 if (!root)
911 goto err_out;
912
913 for (i = 0; i < dev_per_card; i++) {
914 root[i].dev = alloc_hdlcdev(root + i);
915 if (!root[i].dev)
916 goto err_free_dev;
917 }
918
919 ppriv = kzalloc(sizeof(*ppriv), GFP_KERNEL);
920 if (!ppriv)
921 goto err_free_dev;
922
923 ppriv->root = root;
924 spin_lock_init(&ppriv->lock);
925
926 for (i = 0; i < dev_per_card; i++) {
927 struct dscc4_dev_priv *dpriv = root + i;
928 struct net_device *d = dscc4_to_dev(dpriv);
929 hdlc_device *hdlc = dev_to_hdlc(d);
930
931 d->base_addr = (unsigned long)ioaddr;
932 d->irq = pdev->irq;
933 d->netdev_ops = &dscc4_ops;
934 d->watchdog_timeo = TX_TIMEOUT;
935 SET_NETDEV_DEV(d, &pdev->dev);
936
937 dpriv->dev_id = i;
938 dpriv->pci_priv = ppriv;
939 dpriv->base_addr = ioaddr;
940 spin_lock_init(&dpriv->lock);
941
942 hdlc->xmit = dscc4_start_xmit;
943 hdlc->attach = dscc4_hdlc_attach;
944
945 dscc4_init_registers(dpriv, d);
946 dpriv->parity = PARITY_CRC16_PR0_CCITT;
947 dpriv->encoding = ENCODING_NRZ;
948
949 ret = dscc4_init_ring(d);
950 if (ret < 0)
951 goto err_unregister;
952
953 ret = register_hdlc_device(d);
954 if (ret < 0) {
955 pr_err("unable to register\n");
956 dscc4_release_ring(dpriv);
957 goto err_unregister;
958 }
959 }
960
961 ret = dscc4_set_quartz(root, quartz);
962 if (ret < 0)
963 goto err_unregister;
964
965 pci_set_drvdata(pdev, ppriv);
966 return ret;
967
968err_unregister:
969 while (i-- > 0) {
970 dscc4_release_ring(root + i);
971 unregister_hdlc_device(dscc4_to_dev(root + i));
972 }
973 kfree(ppriv);
974 i = dev_per_card;
975err_free_dev:
976 while (i-- > 0)
977 free_netdev(root[i].dev);
978 kfree(root);
979err_out:
980 return ret;
981};
982
983static void dscc4_tx_timeout(struct net_device *dev)
984{
985
986}
987
988static int dscc4_loopback_check(struct dscc4_dev_priv *dpriv)
989{
990 sync_serial_settings *settings = &dpriv->settings;
991
992 if (settings->loopback && (settings->clock_type != CLOCK_INT)) {
993 struct net_device *dev = dscc4_to_dev(dpriv);
994
995 netdev_info(dev, "loopback requires clock\n");
996 return -1;
997 }
998 return 0;
999}
1000
1001#ifdef CONFIG_DSCC4_PCI_RST
1002
1003
1004
1005
1006
1007
1008
1009static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr)
1010{
1011 int i;
1012
1013 mutex_lock(&dscc4_mutex);
1014 for (i = 0; i < 16; i++)
1015 pci_read_config_dword(pdev, i << 2, dscc4_pci_config_store + i);
1016
1017
1018 writel(0x001c0000, ioaddr + GMODE);
1019
1020 writel(0x0000ffff, ioaddr + GPDIR);
1021
1022 writel(0x0000ffff, ioaddr + GPIM);
1023
1024 writel(0x0000ffff, ioaddr + GPDATA);
1025 writel(0x00000000, ioaddr + GPDATA);
1026
1027
1028 readl(ioaddr + GSTAR);
1029
1030 schedule_timeout_uninterruptible(msecs_to_jiffies(100));
1031
1032 for (i = 0; i < 16; i++)
1033 pci_write_config_dword(pdev, i << 2, dscc4_pci_config_store[i]);
1034 mutex_unlock(&dscc4_mutex);
1035}
1036#else
1037#define dscc4_pci_reset(pdev,ioaddr) do {} while (0)
1038#endif
1039
1040static int dscc4_open(struct net_device *dev)
1041{
1042 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
1043 int ret = -EAGAIN;
1044
1045 if ((dscc4_loopback_check(dpriv) < 0))
1046 goto err;
1047
1048 if ((ret = hdlc_open(dev)))
1049 goto err;
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060 if (dpriv->flags & FakeReset) {
1061 dpriv->flags &= ~FakeReset;
1062 scc_patchl(0, PowerUp, dpriv, dev, CCR0);
1063 scc_patchl(0, 0x00050000, dpriv, dev, CCR2);
1064 scc_writel(EventsMask, dpriv, dev, IMR);
1065 netdev_info(dev, "up again\n");
1066 goto done;
1067 }
1068
1069
1070 dpriv->flags = NeedIDR | NeedIDT;
1071
1072 scc_patchl(0, PowerUp | Vis, dpriv, dev, CCR0);
1073
1074
1075
1076
1077
1078
1079
1080
1081 if (scc_readl_star(dpriv, dev) & SccBusy) {
1082 netdev_err(dev, "busy - try later\n");
1083 ret = -EAGAIN;
1084 goto err_out;
1085 } else
1086 netdev_info(dev, "available - good\n");
1087
1088 scc_writel(EventsMask, dpriv, dev, IMR);
1089
1090
1091 scc_writel(TxSccRes | RxSccRes, dpriv, dev, CMDR);
1092
1093 if ((ret = dscc4_wait_ack_cec(dpriv, dev, "Cec")) < 0)
1094 goto err_disable_scc_events;
1095
1096
1097
1098
1099
1100
1101
1102
1103 if ((ret = dscc4_xpr_ack(dpriv)) < 0) {
1104 pr_err("XPR timeout\n");
1105 goto err_disable_scc_events;
1106 }
1107
1108 if (debug > 2)
1109 dscc4_tx_print(dev, dpriv, "Open");
1110
1111done:
1112 netif_start_queue(dev);
1113
1114 netif_carrier_on(dev);
1115
1116 return 0;
1117
1118err_disable_scc_events:
1119 scc_writel(0xffffffff, dpriv, dev, IMR);
1120 scc_patchl(PowerUp | Vis, 0, dpriv, dev, CCR0);
1121err_out:
1122 hdlc_close(dev);
1123err:
1124 return ret;
1125}
1126
1127#ifdef DSCC4_POLLING
1128static int dscc4_tx_poll(struct dscc4_dev_priv *dpriv, struct net_device *dev)
1129{
1130
1131}
1132#endif
1133
1134static netdev_tx_t dscc4_start_xmit(struct sk_buff *skb,
1135 struct net_device *dev)
1136{
1137 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
1138 struct device *d = &dpriv->pci_priv->pdev->dev;
1139 struct TxFD *tx_fd;
1140 dma_addr_t addr;
1141 int next;
1142
1143 addr = dma_map_single(d, skb->data, skb->len, DMA_TO_DEVICE);
1144 if (dma_mapping_error(d, addr)) {
1145 dev_kfree_skb_any(skb);
1146 dev->stats.tx_dropped++;
1147 return NETDEV_TX_OK;
1148 }
1149
1150 next = dpriv->tx_current%TX_RING_SIZE;
1151 dpriv->tx_skbuff[next] = skb;
1152 tx_fd = dpriv->tx_fd + next;
1153 tx_fd->state = FrameEnd | TO_STATE_TX(skb->len);
1154 tx_fd->data = cpu_to_le32(addr);
1155 tx_fd->complete = 0x00000000;
1156 tx_fd->jiffies = jiffies;
1157 mb();
1158
1159#ifdef DSCC4_POLLING
1160 spin_lock(&dpriv->lock);
1161 while (dscc4_tx_poll(dpriv, dev));
1162 spin_unlock(&dpriv->lock);
1163#endif
1164
1165 if (debug > 2)
1166 dscc4_tx_print(dev, dpriv, "Xmit");
1167
1168 if (!((++dpriv->tx_current - dpriv->tx_dirty)%TX_RING_SIZE))
1169 netif_stop_queue(dev);
1170
1171 if (dscc4_tx_quiescent(dpriv, dev))
1172 dscc4_do_tx(dpriv, dev);
1173
1174 return NETDEV_TX_OK;
1175}
1176
1177static int dscc4_close(struct net_device *dev)
1178{
1179 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
1180
1181 netif_stop_queue(dev);
1182
1183 scc_patchl(PowerUp | Vis, 0, dpriv, dev, CCR0);
1184 scc_patchl(0x00050000, 0, dpriv, dev, CCR2);
1185 scc_writel(0xffffffff, dpriv, dev, IMR);
1186
1187 dpriv->flags |= FakeReset;
1188
1189 hdlc_close(dev);
1190
1191 return 0;
1192}
1193
1194static inline int dscc4_check_clock_ability(int port)
1195{
1196 int ret = 0;
1197
1198#ifdef CONFIG_DSCC4_PCISYNC
1199 if (port >= 2)
1200 ret = -1;
1201#endif
1202 return ret;
1203}
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248static int dscc4_set_clock(struct net_device *dev, u32 *bps, u32 *state)
1249{
1250 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
1251 int ret = -1;
1252 u32 brr;
1253
1254 *state &= ~Ccr0ClockMask;
1255 if (*bps) {
1256 u32 n = 0, m = 0, divider;
1257 int xtal;
1258
1259 xtal = dpriv->pci_priv->xtal_hz;
1260 if (!xtal)
1261 goto done;
1262 if (dscc4_check_clock_ability(dpriv->dev_id) < 0)
1263 goto done;
1264 divider = xtal / *bps;
1265 if (divider > BRR_DIVIDER_MAX) {
1266 divider >>= 4;
1267 *state |= 0x00000036;
1268 } else
1269 *state |= 0x00000037;
1270 if (divider >> 22) {
1271 n = 63;
1272 m = 15;
1273 } else if (divider) {
1274
1275 m = 0;
1276 while (0xffffffc0 & divider) {
1277 m++;
1278 divider >>= 1;
1279 }
1280 n = divider;
1281 }
1282 brr = (m << 8) | n;
1283 divider = n << m;
1284 if (!(*state & 0x00000001))
1285 divider <<= 4;
1286 *bps = xtal / divider;
1287 } else {
1288
1289
1290
1291
1292
1293 brr = 0;
1294 }
1295 scc_writel(brr, dpriv, dev, BRR);
1296 ret = 0;
1297done:
1298 return ret;
1299}
1300
1301static int dscc4_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1302{
1303 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
1304 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
1305 const size_t size = sizeof(dpriv->settings);
1306 int ret = 0;
1307
1308 if (dev->flags & IFF_UP)
1309 return -EBUSY;
1310
1311 if (cmd != SIOCWANDEV)
1312 return -EOPNOTSUPP;
1313
1314 switch(ifr->ifr_settings.type) {
1315 case IF_GET_IFACE:
1316 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
1317 if (ifr->ifr_settings.size < size) {
1318 ifr->ifr_settings.size = size;
1319 return -ENOBUFS;
1320 }
1321 if (copy_to_user(line, &dpriv->settings, size))
1322 return -EFAULT;
1323 break;
1324
1325 case IF_IFACE_SYNC_SERIAL:
1326 if (!capable(CAP_NET_ADMIN))
1327 return -EPERM;
1328
1329 if (dpriv->flags & FakeReset) {
1330 netdev_info(dev, "please reset the device before this command\n");
1331 return -EPERM;
1332 }
1333 if (copy_from_user(&dpriv->settings, line, size))
1334 return -EFAULT;
1335 ret = dscc4_set_iface(dpriv, dev);
1336 break;
1337
1338 default:
1339 ret = hdlc_ioctl(dev, ifr, cmd);
1340 break;
1341 }
1342
1343 return ret;
1344}
1345
1346static int dscc4_match(const struct thingie *p, int value)
1347{
1348 int i;
1349
1350 for (i = 0; p[i].define != -1; i++) {
1351 if (value == p[i].define)
1352 break;
1353 }
1354 if (p[i].define == -1)
1355 return -1;
1356 else
1357 return i;
1358}
1359
1360static int dscc4_clock_setting(struct dscc4_dev_priv *dpriv,
1361 struct net_device *dev)
1362{
1363 sync_serial_settings *settings = &dpriv->settings;
1364 int ret = -EOPNOTSUPP;
1365 u32 bps, state;
1366
1367 bps = settings->clock_rate;
1368 state = scc_readl(dpriv, CCR0);
1369 if (dscc4_set_clock(dev, &bps, &state) < 0)
1370 goto done;
1371 if (bps) {
1372 printk(KERN_DEBUG "%s: generated RxClk (DCE)\n", dev->name);
1373 if (settings->clock_rate != bps) {
1374 printk(KERN_DEBUG "%s: clock adjusted (%08d -> %08d)\n",
1375 dev->name, settings->clock_rate, bps);
1376 settings->clock_rate = bps;
1377 }
1378 } else {
1379 state |= PowerUp | Vis;
1380 printk(KERN_DEBUG "%s: external RxClk (DTE)\n", dev->name);
1381 }
1382 scc_writel(state, dpriv, dev, CCR0);
1383 ret = 0;
1384done:
1385 return ret;
1386}
1387
1388static int dscc4_encoding_setting(struct dscc4_dev_priv *dpriv,
1389 struct net_device *dev)
1390{
1391 static const struct thingie encoding[] = {
1392 { ENCODING_NRZ, 0x00000000 },
1393 { ENCODING_NRZI, 0x00200000 },
1394 { ENCODING_FM_MARK, 0x00400000 },
1395 { ENCODING_FM_SPACE, 0x00500000 },
1396 { ENCODING_MANCHESTER, 0x00600000 },
1397 { -1, 0}
1398 };
1399 int i, ret = 0;
1400
1401 i = dscc4_match(encoding, dpriv->encoding);
1402 if (i >= 0)
1403 scc_patchl(EncodingMask, encoding[i].bits, dpriv, dev, CCR0);
1404 else
1405 ret = -EOPNOTSUPP;
1406 return ret;
1407}
1408
1409static int dscc4_loopback_setting(struct dscc4_dev_priv *dpriv,
1410 struct net_device *dev)
1411{
1412 sync_serial_settings *settings = &dpriv->settings;
1413 u32 state;
1414
1415 state = scc_readl(dpriv, CCR1);
1416 if (settings->loopback) {
1417 printk(KERN_DEBUG "%s: loopback\n", dev->name);
1418 state |= 0x00000100;
1419 } else {
1420 printk(KERN_DEBUG "%s: normal\n", dev->name);
1421 state &= ~0x00000100;
1422 }
1423 scc_writel(state, dpriv, dev, CCR1);
1424 return 0;
1425}
1426
1427static int dscc4_crc_setting(struct dscc4_dev_priv *dpriv,
1428 struct net_device *dev)
1429{
1430 static const struct thingie crc[] = {
1431 { PARITY_CRC16_PR0_CCITT, 0x00000010 },
1432 { PARITY_CRC16_PR1_CCITT, 0x00000000 },
1433 { PARITY_CRC32_PR0_CCITT, 0x00000011 },
1434 { PARITY_CRC32_PR1_CCITT, 0x00000001 }
1435 };
1436 int i, ret = 0;
1437
1438 i = dscc4_match(crc, dpriv->parity);
1439 if (i >= 0)
1440 scc_patchl(CrcMask, crc[i].bits, dpriv, dev, CCR1);
1441 else
1442 ret = -EOPNOTSUPP;
1443 return ret;
1444}
1445
1446static int dscc4_set_iface(struct dscc4_dev_priv *dpriv, struct net_device *dev)
1447{
1448 struct {
1449 int (*action)(struct dscc4_dev_priv *, struct net_device *);
1450 } *p, do_setting[] = {
1451 { dscc4_encoding_setting },
1452 { dscc4_clock_setting },
1453 { dscc4_loopback_setting },
1454 { dscc4_crc_setting },
1455 { NULL }
1456 };
1457 int ret = 0;
1458
1459 for (p = do_setting; p->action; p++) {
1460 if ((ret = p->action(dpriv, dev)) < 0)
1461 break;
1462 }
1463 return ret;
1464}
1465
1466static irqreturn_t dscc4_irq(int irq, void *token)
1467{
1468 struct dscc4_dev_priv *root = token;
1469 struct dscc4_pci_priv *priv;
1470 struct net_device *dev;
1471 void __iomem *ioaddr;
1472 u32 state;
1473 unsigned long flags;
1474 int i, handled = 1;
1475
1476 priv = root->pci_priv;
1477 dev = dscc4_to_dev(root);
1478
1479 spin_lock_irqsave(&priv->lock, flags);
1480
1481 ioaddr = root->base_addr;
1482
1483 state = readl(ioaddr + GSTAR);
1484 if (!state) {
1485 handled = 0;
1486 goto out;
1487 }
1488 if (debug > 3)
1489 printk(KERN_DEBUG "%s: GSTAR = 0x%08x\n", DRV_NAME, state);
1490 writel(state, ioaddr + GSTAR);
1491
1492 if (state & Arf) {
1493 netdev_err(dev, "failure (Arf). Harass the maintainer\n");
1494 goto out;
1495 }
1496 state &= ~ArAck;
1497 if (state & Cfg) {
1498 if (debug > 0)
1499 printk(KERN_DEBUG "%s: CfgIV\n", DRV_NAME);
1500 if (priv->iqcfg[priv->cfg_cur++%IRQ_RING_SIZE] & cpu_to_le32(Arf))
1501 netdev_err(dev, "CFG failed\n");
1502 if (!(state &= ~Cfg))
1503 goto out;
1504 }
1505 if (state & RxEvt) {
1506 i = dev_per_card - 1;
1507 do {
1508 dscc4_rx_irq(priv, root + i);
1509 } while (--i >= 0);
1510 state &= ~RxEvt;
1511 }
1512 if (state & TxEvt) {
1513 i = dev_per_card - 1;
1514 do {
1515 dscc4_tx_irq(priv, root + i);
1516 } while (--i >= 0);
1517 state &= ~TxEvt;
1518 }
1519out:
1520 spin_unlock_irqrestore(&priv->lock, flags);
1521 return IRQ_RETVAL(handled);
1522}
1523
1524static void dscc4_tx_irq(struct dscc4_pci_priv *ppriv,
1525 struct dscc4_dev_priv *dpriv)
1526{
1527 struct net_device *dev = dscc4_to_dev(dpriv);
1528 u32 state;
1529 int cur, loop = 0;
1530
1531try:
1532 cur = dpriv->iqtx_current%IRQ_RING_SIZE;
1533 state = le32_to_cpu(dpriv->iqtx[cur]);
1534 if (!state) {
1535 if (debug > 4)
1536 printk(KERN_DEBUG "%s: Tx ISR = 0x%08x\n", dev->name,
1537 state);
1538 if ((debug > 1) && (loop > 1))
1539 printk(KERN_DEBUG "%s: Tx irq loop=%d\n", dev->name, loop);
1540 if (loop && netif_queue_stopped(dev))
1541 if ((dpriv->tx_current - dpriv->tx_dirty)%TX_RING_SIZE)
1542 netif_wake_queue(dev);
1543
1544 if (netif_running(dev) && dscc4_tx_quiescent(dpriv, dev) &&
1545 !dscc4_tx_done(dpriv))
1546 dscc4_do_tx(dpriv, dev);
1547 return;
1548 }
1549 loop++;
1550 dpriv->iqtx[cur] = 0;
1551 dpriv->iqtx_current++;
1552
1553 if (state_check(state, dpriv, dev, "Tx") < 0)
1554 return;
1555
1556 if (state & SccEvt) {
1557 if (state & Alls) {
1558 struct sk_buff *skb;
1559 struct TxFD *tx_fd;
1560
1561 if (debug > 2)
1562 dscc4_tx_print(dev, dpriv, "Alls");
1563
1564
1565
1566
1567 cur = dpriv->tx_dirty%TX_RING_SIZE;
1568 tx_fd = dpriv->tx_fd + cur;
1569 skb = dpriv->tx_skbuff[cur];
1570 if (skb) {
1571 dma_unmap_single(&ppriv->pdev->dev,
1572 le32_to_cpu(tx_fd->data),
1573 skb->len, DMA_TO_DEVICE);
1574 if (tx_fd->state & FrameEnd) {
1575 dev->stats.tx_packets++;
1576 dev->stats.tx_bytes += skb->len;
1577 }
1578 dev_kfree_skb_irq(skb);
1579 dpriv->tx_skbuff[cur] = NULL;
1580 ++dpriv->tx_dirty;
1581 } else {
1582 if (debug > 1)
1583 netdev_err(dev, "Tx: NULL skb %d\n",
1584 cur);
1585 }
1586
1587
1588
1589
1590
1591 tx_fd->data = tx_fd->next;
1592 tx_fd->state = FrameEnd | TO_STATE_TX(2*DUMMY_SKB_SIZE);
1593 tx_fd->complete = 0x00000000;
1594 tx_fd->jiffies = 0;
1595
1596 if (!(state &= ~Alls))
1597 goto try;
1598 }
1599
1600
1601
1602 if (state & Xdu) {
1603 netdev_err(dev, "Tx Data Underrun. Ask maintainer\n");
1604 dpriv->flags = NeedIDT;
1605
1606 writel(MTFi | Rdt,
1607 dpriv->base_addr + 0x0c*dpriv->dev_id + CH0CFG);
1608 writel(Action, dpriv->base_addr + GCMDR);
1609 return;
1610 }
1611 if (state & Cts) {
1612 netdev_info(dev, "CTS transition\n");
1613 if (!(state &= ~Cts))
1614 goto try;
1615 }
1616 if (state & Xmr) {
1617
1618 netdev_err(dev, "Tx ReTx. Ask maintainer\n");
1619 if (!(state &= ~Xmr))
1620 goto try;
1621 }
1622 if (state & Xpr) {
1623 void __iomem *scc_addr;
1624 unsigned long ring;
1625 unsigned int i;
1626
1627
1628
1629
1630
1631 for (i = 1; i; i <<= 1) {
1632 if (!(scc_readl_star(dpriv, dev) & SccBusy))
1633 break;
1634 }
1635 if (!i)
1636 netdev_info(dev, "busy in irq\n");
1637
1638 scc_addr = dpriv->base_addr + 0x0c*dpriv->dev_id;
1639
1640 if (dpriv->flags & NeedIDT) {
1641 if (debug > 2)
1642 dscc4_tx_print(dev, dpriv, "Xpr");
1643 ring = dpriv->tx_fd_dma +
1644 (dpriv->tx_dirty%TX_RING_SIZE)*
1645 sizeof(struct TxFD);
1646 writel(ring, scc_addr + CH0BTDA);
1647 dscc4_do_tx(dpriv, dev);
1648 writel(MTFi | Idt, scc_addr + CH0CFG);
1649 if (dscc4_do_action(dev, "IDT") < 0)
1650 goto err_xpr;
1651 dpriv->flags &= ~NeedIDT;
1652 }
1653 if (dpriv->flags & NeedIDR) {
1654 ring = dpriv->rx_fd_dma +
1655 (dpriv->rx_current%RX_RING_SIZE)*
1656 sizeof(struct RxFD);
1657 writel(ring, scc_addr + CH0BRDA);
1658 dscc4_rx_update(dpriv, dev);
1659 writel(MTFi | Idr, scc_addr + CH0CFG);
1660 if (dscc4_do_action(dev, "IDR") < 0)
1661 goto err_xpr;
1662 dpriv->flags &= ~NeedIDR;
1663 smp_wmb();
1664
1665 scc_writel(0x08050008, dpriv, dev, CCR2);
1666 }
1667 err_xpr:
1668 if (!(state &= ~Xpr))
1669 goto try;
1670 }
1671 if (state & Cd) {
1672 if (debug > 0)
1673 netdev_info(dev, "CD transition\n");
1674 if (!(state &= ~Cd))
1675 goto try;
1676 }
1677 } else {
1678 if (state & Hi) {
1679#ifdef DSCC4_POLLING
1680 while (!dscc4_tx_poll(dpriv, dev));
1681#endif
1682 netdev_info(dev, "Tx Hi\n");
1683 state &= ~Hi;
1684 }
1685 if (state & Err) {
1686 netdev_info(dev, "Tx ERR\n");
1687 dev->stats.tx_errors++;
1688 state &= ~Err;
1689 }
1690 }
1691 goto try;
1692}
1693
1694static void dscc4_rx_irq(struct dscc4_pci_priv *priv,
1695 struct dscc4_dev_priv *dpriv)
1696{
1697 struct net_device *dev = dscc4_to_dev(dpriv);
1698 u32 state;
1699 int cur;
1700
1701try:
1702 cur = dpriv->iqrx_current%IRQ_RING_SIZE;
1703 state = le32_to_cpu(dpriv->iqrx[cur]);
1704 if (!state)
1705 return;
1706 dpriv->iqrx[cur] = 0;
1707 dpriv->iqrx_current++;
1708
1709 if (state_check(state, dpriv, dev, "Rx") < 0)
1710 return;
1711
1712 if (!(state & SccEvt)){
1713 struct RxFD *rx_fd;
1714
1715 if (debug > 4)
1716 printk(KERN_DEBUG "%s: Rx ISR = 0x%08x\n", dev->name,
1717 state);
1718 state &= 0x00ffffff;
1719 if (state & Err) {
1720 printk(KERN_DEBUG "%s: Rx ERR\n", dev->name);
1721 cur = dpriv->rx_current%RX_RING_SIZE;
1722 rx_fd = dpriv->rx_fd + cur;
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735 while (!(rx_fd->state1 & Hold)) {
1736 rx_fd++;
1737 cur++;
1738 if (!(cur = cur%RX_RING_SIZE))
1739 rx_fd = dpriv->rx_fd;
1740 }
1741
1742 try_get_rx_skb(dpriv, dev);
1743 if (!rx_fd->data)
1744 goto try;
1745 rx_fd->state1 &= ~Hold;
1746 rx_fd->state2 = 0x00000000;
1747 rx_fd->end = cpu_to_le32(0xbabeface);
1748
1749 goto try;
1750 }
1751 if (state & Fi) {
1752 dscc4_rx_skb(dpriv, dev);
1753 goto try;
1754 }
1755 if (state & Hi ) {
1756 netdev_info(dev, "Rx Hi\n");
1757 state &= ~Hi;
1758 goto try;
1759 }
1760 } else {
1761 if (debug > 1) {
1762
1763 static struct {
1764 u32 mask;
1765 const char *irq_name;
1766 } evts[] = {
1767 { 0x00008000, "TIN"},
1768 { 0x00000020, "RSC"},
1769 { 0x00000010, "PCE"},
1770 { 0x00000008, "PLLA"},
1771 { 0, NULL}
1772 }, *evt;
1773
1774 for (evt = evts; evt->irq_name; evt++) {
1775 if (state & evt->mask) {
1776 printk(KERN_DEBUG "%s: %s\n",
1777 dev->name, evt->irq_name);
1778 if (!(state &= ~evt->mask))
1779 goto try;
1780 }
1781 }
1782 } else {
1783 if (!(state &= ~0x0000c03c))
1784 goto try;
1785 }
1786 if (state & Cts) {
1787 netdev_info(dev, "CTS transition\n");
1788 if (!(state &= ~Cts))
1789 goto try;
1790 }
1791
1792
1793
1794 if (state & Rdo) {
1795 struct RxFD *rx_fd;
1796 void __iomem *scc_addr;
1797 int cur;
1798
1799
1800
1801 scc_addr = dpriv->base_addr + 0x0c*dpriv->dev_id;
1802
1803 scc_patchl(RxActivate, 0, dpriv, dev, CCR2);
1804
1805
1806
1807
1808
1809 scc_writel(RxSccRes, dpriv, dev, CMDR);
1810 dpriv->flags |= RdoSet;
1811
1812
1813
1814
1815
1816
1817 do {
1818 cur = dpriv->rx_current++%RX_RING_SIZE;
1819 rx_fd = dpriv->rx_fd + cur;
1820 if (!(rx_fd->state2 & DataComplete))
1821 break;
1822 if (rx_fd->state2 & FrameAborted) {
1823 dev->stats.rx_over_errors++;
1824 rx_fd->state1 |= Hold;
1825 rx_fd->state2 = 0x00000000;
1826 rx_fd->end = cpu_to_le32(0xbabeface);
1827 } else
1828 dscc4_rx_skb(dpriv, dev);
1829 } while (1);
1830
1831 if (debug > 0) {
1832 if (dpriv->flags & RdoSet)
1833 printk(KERN_DEBUG
1834 "%s: no RDO in Rx data\n", DRV_NAME);
1835 }
1836#ifdef DSCC4_RDO_EXPERIMENTAL_RECOVERY
1837
1838
1839
1840#warning "FIXME: CH0BRDA"
1841 writel(dpriv->rx_fd_dma +
1842 (dpriv->rx_current%RX_RING_SIZE)*
1843 sizeof(struct RxFD), scc_addr + CH0BRDA);
1844 writel(MTFi|Rdr|Idr, scc_addr + CH0CFG);
1845 if (dscc4_do_action(dev, "RDR") < 0) {
1846 netdev_err(dev, "RDO recovery failed(RDR)\n");
1847 goto rdo_end;
1848 }
1849 writel(MTFi|Idr, scc_addr + CH0CFG);
1850 if (dscc4_do_action(dev, "IDR") < 0) {
1851 netdev_err(dev, "RDO recovery failed(IDR)\n");
1852 goto rdo_end;
1853 }
1854 rdo_end:
1855#endif
1856 scc_patchl(0, RxActivate, dpriv, dev, CCR2);
1857 goto try;
1858 }
1859 if (state & Cd) {
1860 netdev_info(dev, "CD transition\n");
1861 if (!(state &= ~Cd))
1862 goto try;
1863 }
1864 if (state & Flex) {
1865 printk(KERN_DEBUG "%s: Flex. Ttttt...\n", DRV_NAME);
1866 if (!(state &= ~Flex))
1867 goto try;
1868 }
1869 }
1870}
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881static struct sk_buff *dscc4_init_dummy_skb(struct dscc4_dev_priv *dpriv)
1882{
1883 struct sk_buff *skb;
1884
1885 skb = dev_alloc_skb(DUMMY_SKB_SIZE);
1886 if (skb) {
1887 struct device *d = &dpriv->pci_priv->pdev->dev;
1888 int last = dpriv->tx_dirty%TX_RING_SIZE;
1889 struct TxFD *tx_fd = dpriv->tx_fd + last;
1890 dma_addr_t addr;
1891
1892 skb->len = DUMMY_SKB_SIZE;
1893 skb_copy_to_linear_data(skb, version,
1894 strlen(version) % DUMMY_SKB_SIZE);
1895 addr = dma_map_single(d, skb->data, DUMMY_SKB_SIZE,
1896 DMA_TO_DEVICE);
1897 if (dma_mapping_error(d, addr)) {
1898 dev_kfree_skb_any(skb);
1899 return NULL;
1900 }
1901 tx_fd->state = FrameEnd | TO_STATE_TX(DUMMY_SKB_SIZE);
1902 tx_fd->data = cpu_to_le32(addr);
1903 dpriv->tx_skbuff[last] = skb;
1904 }
1905 return skb;
1906}
1907
1908static int dscc4_init_ring(struct net_device *dev)
1909{
1910 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
1911 struct device *d = &dpriv->pci_priv->pdev->dev;
1912 struct TxFD *tx_fd;
1913 struct RxFD *rx_fd;
1914 void *ring;
1915 int i;
1916
1917 ring = dma_alloc_coherent(d, RX_TOTAL_SIZE, &dpriv->rx_fd_dma,
1918 GFP_KERNEL);
1919 if (!ring)
1920 goto err_out;
1921 dpriv->rx_fd = rx_fd = (struct RxFD *) ring;
1922
1923 ring = dma_alloc_coherent(d, TX_TOTAL_SIZE, &dpriv->tx_fd_dma,
1924 GFP_KERNEL);
1925 if (!ring)
1926 goto err_free_dma_rx;
1927 dpriv->tx_fd = tx_fd = (struct TxFD *) ring;
1928
1929 memset(dpriv->tx_skbuff, 0, sizeof(struct sk_buff *)*TX_RING_SIZE);
1930 dpriv->tx_dirty = 0xffffffff;
1931 i = dpriv->tx_current = 0;
1932 do {
1933 tx_fd->state = FrameEnd | TO_STATE_TX(2*DUMMY_SKB_SIZE);
1934 tx_fd->complete = 0x00000000;
1935
1936 tx_fd->data = cpu_to_le32(dpriv->tx_fd_dma);
1937 (tx_fd++)->next = cpu_to_le32(dpriv->tx_fd_dma +
1938 (++i%TX_RING_SIZE)*sizeof(*tx_fd));
1939 } while (i < TX_RING_SIZE);
1940
1941 if (!dscc4_init_dummy_skb(dpriv))
1942 goto err_free_dma_tx;
1943
1944 memset(dpriv->rx_skbuff, 0, sizeof(struct sk_buff *)*RX_RING_SIZE);
1945 i = dpriv->rx_dirty = dpriv->rx_current = 0;
1946 do {
1947
1948 rx_fd->state1 = HiDesc;
1949 rx_fd->state2 = 0x00000000;
1950 rx_fd->end = cpu_to_le32(0xbabeface);
1951 rx_fd->state1 |= TO_STATE_RX(HDLC_MAX_MRU);
1952
1953 if (try_get_rx_skb(dpriv, dev) >= 0)
1954 dpriv->rx_dirty++;
1955 (rx_fd++)->next = cpu_to_le32(dpriv->rx_fd_dma +
1956 (++i%RX_RING_SIZE)*sizeof(*rx_fd));
1957 } while (i < RX_RING_SIZE);
1958
1959 return 0;
1960
1961err_free_dma_tx:
1962 dma_free_coherent(d, TX_TOTAL_SIZE, ring, dpriv->tx_fd_dma);
1963err_free_dma_rx:
1964 dma_free_coherent(d, RX_TOTAL_SIZE, rx_fd, dpriv->rx_fd_dma);
1965err_out:
1966 return -ENOMEM;
1967}
1968
1969static void dscc4_remove_one(struct pci_dev *pdev)
1970{
1971 struct dscc4_pci_priv *ppriv;
1972 struct dscc4_dev_priv *root;
1973 void __iomem *ioaddr;
1974 int i;
1975
1976 ppriv = pci_get_drvdata(pdev);
1977 root = ppriv->root;
1978
1979 ioaddr = root->base_addr;
1980
1981 dscc4_pci_reset(pdev, ioaddr);
1982
1983 free_irq(pdev->irq, root);
1984 dma_free_coherent(&pdev->dev, IRQ_RING_SIZE*sizeof(u32), ppriv->iqcfg,
1985 ppriv->iqcfg_dma);
1986 for (i = 0; i < dev_per_card; i++) {
1987 struct dscc4_dev_priv *dpriv = root + i;
1988
1989 dscc4_release_ring(dpriv);
1990 dma_free_coherent(&pdev->dev, IRQ_RING_SIZE*sizeof(u32),
1991 dpriv->iqrx, dpriv->iqrx_dma);
1992 dma_free_coherent(&pdev->dev, IRQ_RING_SIZE*sizeof(u32),
1993 dpriv->iqtx, dpriv->iqtx_dma);
1994 }
1995
1996 dscc4_free1(pdev);
1997
1998 iounmap(ioaddr);
1999
2000 pci_release_region(pdev, 1);
2001 pci_release_region(pdev, 0);
2002
2003 pci_disable_device(pdev);
2004}
2005
2006static int dscc4_hdlc_attach(struct net_device *dev, unsigned short encoding,
2007 unsigned short parity)
2008{
2009 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
2010
2011 if (encoding != ENCODING_NRZ &&
2012 encoding != ENCODING_NRZI &&
2013 encoding != ENCODING_FM_MARK &&
2014 encoding != ENCODING_FM_SPACE &&
2015 encoding != ENCODING_MANCHESTER)
2016 return -EINVAL;
2017
2018 if (parity != PARITY_NONE &&
2019 parity != PARITY_CRC16_PR0_CCITT &&
2020 parity != PARITY_CRC16_PR1_CCITT &&
2021 parity != PARITY_CRC32_PR0_CCITT &&
2022 parity != PARITY_CRC32_PR1_CCITT)
2023 return -EINVAL;
2024
2025 dpriv->encoding = encoding;
2026 dpriv->parity = parity;
2027 return 0;
2028}
2029
2030#ifndef MODULE
2031static int __init dscc4_setup(char *str)
2032{
2033 int *args[] = { &debug, &quartz, NULL }, **p = args;
2034
2035 while (*p && (get_option(&str, *p) == 2))
2036 p++;
2037 return 1;
2038}
2039
2040__setup("dscc4.setup=", dscc4_setup);
2041#endif
2042
2043static const struct pci_device_id dscc4_pci_tbl[] = {
2044 { PCI_VENDOR_ID_SIEMENS, PCI_DEVICE_ID_SIEMENS_DSCC4,
2045 PCI_ANY_ID, PCI_ANY_ID, },
2046 { 0,}
2047};
2048MODULE_DEVICE_TABLE(pci, dscc4_pci_tbl);
2049
2050static struct pci_driver dscc4_driver = {
2051 .name = DRV_NAME,
2052 .id_table = dscc4_pci_tbl,
2053 .probe = dscc4_init_one,
2054 .remove = dscc4_remove_one,
2055};
2056
2057module_pci_driver(dscc4_driver);
2058