1
2
3
4
5
6
7
8
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/mm.h>
14#include <linux/net.h>
15#include <linux/skbuff.h>
16#include <linux/netdevice.h>
17#include <linux/if_arp.h>
18#include <linux/delay.h>
19#include <linux/hdlc.h>
20#include <linux/ioport.h>
21#include <linux/init.h>
22#include <linux/slab.h>
23#include <net/arp.h>
24
25#include <asm/irq.h>
26#include <asm/io.h>
27#include <asm/dma.h>
28#include <asm/byteorder.h>
29#include "z85230.h"
30
31struct slvl_device {
32 struct z8530_channel *chan;
33 int channel;
34};
35
36struct slvl_board {
37 struct slvl_device dev[2];
38 struct z8530_dev board;
39 int iobase;
40};
41
42
43
44static inline struct slvl_device *dev_to_chan(struct net_device *dev)
45{
46 return (struct slvl_device *)dev_to_hdlc(dev)->priv;
47}
48
49
50
51
52
53static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb)
54{
55
56 skb_trim(skb, skb->len - 2);
57 skb->protocol = hdlc_type_trans(skb, c->netdevice);
58 skb_reset_mac_header(skb);
59 skb->dev = c->netdevice;
60 netif_rx(skb);
61}
62
63
64
65static int sealevel_open(struct net_device *d)
66{
67 struct slvl_device *slvl = dev_to_chan(d);
68 int err = -1;
69 int unit = slvl->channel;
70
71
72
73 switch (unit) {
74 case 0:
75 err = z8530_sync_dma_open(d, slvl->chan);
76 break;
77 case 1:
78 err = z8530_sync_open(d, slvl->chan);
79 break;
80 }
81
82 if (err)
83 return err;
84
85 err = hdlc_open(d);
86 if (err) {
87 switch (unit) {
88 case 0:
89 z8530_sync_dma_close(d, slvl->chan);
90 break;
91 case 1:
92 z8530_sync_close(d, slvl->chan);
93 break;
94 }
95 return err;
96 }
97
98 slvl->chan->rx_function = sealevel_input;
99
100 netif_start_queue(d);
101 return 0;
102}
103
104static int sealevel_close(struct net_device *d)
105{
106 struct slvl_device *slvl = dev_to_chan(d);
107 int unit = slvl->channel;
108
109
110
111 slvl->chan->rx_function = z8530_null_rx;
112
113 hdlc_close(d);
114 netif_stop_queue(d);
115
116 switch (unit) {
117 case 0:
118 z8530_sync_dma_close(d, slvl->chan);
119 break;
120 case 1:
121 z8530_sync_close(d, slvl->chan);
122 break;
123 }
124 return 0;
125}
126
127
128
129static netdev_tx_t sealevel_queue_xmit(struct sk_buff *skb,
130 struct net_device *d)
131{
132 return z8530_queue_xmit(dev_to_chan(d)->chan, skb);
133}
134
135static int sealevel_attach(struct net_device *dev, unsigned short encoding,
136 unsigned short parity)
137{
138 if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
139 return 0;
140 return -EINVAL;
141}
142
143static const struct net_device_ops sealevel_ops = {
144 .ndo_open = sealevel_open,
145 .ndo_stop = sealevel_close,
146 .ndo_start_xmit = hdlc_start_xmit,
147 .ndo_siocwandev = hdlc_ioctl,
148};
149
150static int slvl_setup(struct slvl_device *sv, int iobase, int irq)
151{
152 struct net_device *dev = alloc_hdlcdev(sv);
153
154 if (!dev)
155 return -1;
156
157 dev_to_hdlc(dev)->attach = sealevel_attach;
158 dev_to_hdlc(dev)->xmit = sealevel_queue_xmit;
159 dev->netdev_ops = &sealevel_ops;
160 dev->base_addr = iobase;
161 dev->irq = irq;
162
163 if (register_hdlc_device(dev)) {
164 pr_err("unable to register HDLC device\n");
165 free_netdev(dev);
166 return -1;
167 }
168
169 sv->chan->netdevice = dev;
170 return 0;
171}
172
173
174
175static __init struct slvl_board *slvl_init(int iobase, int irq,
176 int txdma, int rxdma, int slow)
177{
178 struct z8530_dev *dev;
179 struct slvl_board *b;
180
181
182
183 if (!request_region(iobase, 8, "Sealevel 4021")) {
184 pr_warn("I/O 0x%X already in use\n", iobase);
185 return NULL;
186 }
187
188 b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL);
189 if (!b)
190 goto err_kzalloc;
191
192 b->dev[0].chan = &b->board.chanA;
193 b->dev[0].channel = 0;
194
195 b->dev[1].chan = &b->board.chanB;
196 b->dev[1].channel = 1;
197
198 dev = &b->board;
199
200
201
202 dev->active = 0;
203
204 b->iobase = iobase;
205
206
207
208 if (slow)
209 iobase |= Z8530_PORT_SLEEP;
210
211 dev->chanA.ctrlio = iobase + 1;
212 dev->chanA.dataio = iobase;
213 dev->chanB.ctrlio = iobase + 3;
214 dev->chanB.dataio = iobase + 2;
215
216 dev->chanA.irqs = &z8530_nop;
217 dev->chanB.irqs = &z8530_nop;
218
219
220
221 outb(3 | (1 << 7), b->iobase + 4);
222
223
224
225
226
227 if (request_irq(irq, z8530_interrupt, 0,
228 "SeaLevel", dev) < 0) {
229 pr_warn("IRQ %d already in use\n", irq);
230 goto err_request_irq;
231 }
232
233 dev->irq = irq;
234 dev->chanA.private = &b->dev[0];
235 dev->chanB.private = &b->dev[1];
236 dev->chanA.dev = dev;
237 dev->chanB.dev = dev;
238
239 dev->chanA.txdma = 3;
240 dev->chanA.rxdma = 1;
241 if (request_dma(dev->chanA.txdma, "SeaLevel (TX)"))
242 goto err_dma_tx;
243
244 if (request_dma(dev->chanA.rxdma, "SeaLevel (RX)"))
245 goto err_dma_rx;
246
247 disable_irq(irq);
248
249
250
251 if (z8530_init(dev) != 0) {
252 pr_err("Z8530 series device not found\n");
253 enable_irq(irq);
254 goto free_hw;
255 }
256 if (dev->type == Z85C30) {
257 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream);
258 z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream);
259 } else {
260 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230);
261 z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream_85230);
262 }
263
264
265
266 enable_irq(irq);
267
268 if (slvl_setup(&b->dev[0], iobase, irq))
269 goto free_hw;
270 if (slvl_setup(&b->dev[1], iobase, irq))
271 goto free_netdev0;
272
273 z8530_describe(dev, "I/O", iobase);
274 dev->active = 1;
275 return b;
276
277free_netdev0:
278 unregister_hdlc_device(b->dev[0].chan->netdevice);
279 free_netdev(b->dev[0].chan->netdevice);
280free_hw:
281 free_dma(dev->chanA.rxdma);
282err_dma_rx:
283 free_dma(dev->chanA.txdma);
284err_dma_tx:
285 free_irq(irq, dev);
286err_request_irq:
287 kfree(b);
288err_kzalloc:
289 release_region(iobase, 8);
290 return NULL;
291}
292
293static void __exit slvl_shutdown(struct slvl_board *b)
294{
295 int u;
296
297 z8530_shutdown(&b->board);
298
299 for (u = 0; u < 2; u++) {
300 struct net_device *d = b->dev[u].chan->netdevice;
301
302 unregister_hdlc_device(d);
303 free_netdev(d);
304 }
305
306 free_irq(b->board.irq, &b->board);
307 free_dma(b->board.chanA.rxdma);
308 free_dma(b->board.chanA.txdma);
309
310 outb(0, b->iobase);
311 release_region(b->iobase, 8);
312 kfree(b);
313}
314
315static int io = 0x238;
316static int txdma = 1;
317static int rxdma = 3;
318static int irq = 5;
319static bool slow;
320
321module_param_hw(io, int, ioport, 0);
322MODULE_PARM_DESC(io, "The I/O base of the Sealevel card");
323module_param_hw(txdma, int, dma, 0);
324MODULE_PARM_DESC(txdma, "Transmit DMA channel");
325module_param_hw(rxdma, int, dma, 0);
326MODULE_PARM_DESC(rxdma, "Receive DMA channel");
327module_param_hw(irq, int, irq, 0);
328MODULE_PARM_DESC(irq, "The interrupt line setting for the SeaLevel card");
329module_param(slow, bool, 0);
330MODULE_PARM_DESC(slow, "Set this for an older Sealevel card such as the 4012");
331
332MODULE_AUTHOR("Alan Cox");
333MODULE_LICENSE("GPL");
334MODULE_DESCRIPTION("Modular driver for the SeaLevel 4021");
335
336static struct slvl_board *slvl_unit;
337
338static int __init slvl_init_module(void)
339{
340 slvl_unit = slvl_init(io, irq, txdma, rxdma, slow);
341
342 return slvl_unit ? 0 : -ENODEV;
343}
344
345static void __exit slvl_cleanup_module(void)
346{
347 if (slvl_unit)
348 slvl_shutdown(slvl_unit);
349}
350
351module_init(slvl_init_module);
352module_exit(slvl_cleanup_module);
353