1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/module.h>
18#include <linux/kernel.h>
19#include <linux/mm.h>
20#include <linux/net.h>
21#include <linux/skbuff.h>
22#include <linux/netdevice.h>
23#include <linux/if_arp.h>
24#include <linux/delay.h>
25#include <linux/hdlc.h>
26#include <linux/ioport.h>
27#include <linux/init.h>
28#include <linux/slab.h>
29#include <net/arp.h>
30
31#include <asm/irq.h>
32#include <asm/io.h>
33#include <asm/dma.h>
34#include <asm/byteorder.h>
35#include "z85230.h"
36
37
38struct slvl_device
39{
40 struct z8530_channel *chan;
41 int channel;
42};
43
44
45struct slvl_board
46{
47 struct slvl_device dev[2];
48 struct z8530_dev board;
49 int iobase;
50};
51
52
53
54
55
56static inline struct slvl_device* dev_to_chan(struct net_device *dev)
57{
58 return (struct slvl_device *)dev_to_hdlc(dev)->priv;
59}
60
61
62
63
64
65
66static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb)
67{
68
69 skb_trim(skb, skb->len - 2);
70 skb->protocol = hdlc_type_trans(skb, c->netdevice);
71 skb_reset_mac_header(skb);
72 skb->dev = c->netdevice;
73 netif_rx(skb);
74}
75
76
77
78
79
80static int sealevel_open(struct net_device *d)
81{
82 struct slvl_device *slvl = dev_to_chan(d);
83 int err = -1;
84 int unit = slvl->channel;
85
86
87
88
89
90 switch (unit) {
91 case 0:
92 err = z8530_sync_dma_open(d, slvl->chan);
93 break;
94 case 1:
95 err = z8530_sync_open(d, slvl->chan);
96 break;
97 }
98
99 if (err)
100 return err;
101
102 err = hdlc_open(d);
103 if (err) {
104 switch (unit) {
105 case 0:
106 z8530_sync_dma_close(d, slvl->chan);
107 break;
108 case 1:
109 z8530_sync_close(d, slvl->chan);
110 break;
111 }
112 return err;
113 }
114
115 slvl->chan->rx_function = sealevel_input;
116
117
118
119
120 netif_start_queue(d);
121 return 0;
122}
123
124static int sealevel_close(struct net_device *d)
125{
126 struct slvl_device *slvl = dev_to_chan(d);
127 int unit = slvl->channel;
128
129
130
131
132
133 slvl->chan->rx_function = z8530_null_rx;
134
135 hdlc_close(d);
136 netif_stop_queue(d);
137
138 switch (unit) {
139 case 0:
140 z8530_sync_dma_close(d, slvl->chan);
141 break;
142 case 1:
143 z8530_sync_close(d, slvl->chan);
144 break;
145 }
146 return 0;
147}
148
149static int sealevel_ioctl(struct net_device *d, struct ifreq *ifr, int cmd)
150{
151
152
153 return hdlc_ioctl(d, ifr, cmd);
154}
155
156
157
158
159
160static netdev_tx_t sealevel_queue_xmit(struct sk_buff *skb,
161 struct net_device *d)
162{
163 return z8530_queue_xmit(dev_to_chan(d)->chan, skb);
164}
165
166static int sealevel_attach(struct net_device *dev, unsigned short encoding,
167 unsigned short parity)
168{
169 if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
170 return 0;
171 return -EINVAL;
172}
173
174static const struct net_device_ops sealevel_ops = {
175 .ndo_open = sealevel_open,
176 .ndo_stop = sealevel_close,
177 .ndo_start_xmit = hdlc_start_xmit,
178 .ndo_do_ioctl = sealevel_ioctl,
179};
180
181static int slvl_setup(struct slvl_device *sv, int iobase, int irq)
182{
183 struct net_device *dev = alloc_hdlcdev(sv);
184 if (!dev)
185 return -1;
186
187 dev_to_hdlc(dev)->attach = sealevel_attach;
188 dev_to_hdlc(dev)->xmit = sealevel_queue_xmit;
189 dev->netdev_ops = &sealevel_ops;
190 dev->base_addr = iobase;
191 dev->irq = irq;
192
193 if (register_hdlc_device(dev)) {
194 pr_err("unable to register HDLC device\n");
195 free_netdev(dev);
196 return -1;
197 }
198
199 sv->chan->netdevice = dev;
200 return 0;
201}
202
203
204
205
206
207
208static __init struct slvl_board *slvl_init(int iobase, int irq,
209 int txdma, int rxdma, int slow)
210{
211 struct z8530_dev *dev;
212 struct slvl_board *b;
213
214
215
216
217
218 if (!request_region(iobase, 8, "Sealevel 4021")) {
219 pr_warn("I/O 0x%X already in use\n", iobase);
220 return NULL;
221 }
222
223 b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL);
224 if (!b)
225 goto err_kzalloc;
226
227 b->dev[0].chan = &b->board.chanA;
228 b->dev[0].channel = 0;
229
230 b->dev[1].chan = &b->board.chanB;
231 b->dev[1].channel = 1;
232
233 dev = &b->board;
234
235
236
237
238
239 dev->active = 0;
240
241 b->iobase = iobase;
242
243
244
245
246
247 if (slow)
248 iobase |= Z8530_PORT_SLEEP;
249
250 dev->chanA.ctrlio = iobase + 1;
251 dev->chanA.dataio = iobase;
252 dev->chanB.ctrlio = iobase + 3;
253 dev->chanB.dataio = iobase + 2;
254
255 dev->chanA.irqs = &z8530_nop;
256 dev->chanB.irqs = &z8530_nop;
257
258
259
260
261
262 outb(3 | (1 << 7), b->iobase + 4);
263
264
265
266
267
268 if (request_irq(irq, z8530_interrupt, 0,
269 "SeaLevel", dev) < 0) {
270 pr_warn("IRQ %d already in use\n", irq);
271 goto err_request_irq;
272 }
273
274 dev->irq = irq;
275 dev->chanA.private = &b->dev[0];
276 dev->chanB.private = &b->dev[1];
277 dev->chanA.dev = dev;
278 dev->chanB.dev = dev;
279
280 dev->chanA.txdma = 3;
281 dev->chanA.rxdma = 1;
282 if (request_dma(dev->chanA.txdma, "SeaLevel (TX)"))
283 goto err_dma_tx;
284
285 if (request_dma(dev->chanA.rxdma, "SeaLevel (RX)"))
286 goto err_dma_rx;
287
288 disable_irq(irq);
289
290
291
292
293
294 if (z8530_init(dev) != 0) {
295 pr_err("Z8530 series device not found\n");
296 enable_irq(irq);
297 goto free_hw;
298 }
299 if (dev->type == Z85C30) {
300 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream);
301 z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream);
302 } else {
303 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230);
304 z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream_85230);
305 }
306
307
308
309
310
311 enable_irq(irq);
312
313 if (slvl_setup(&b->dev[0], iobase, irq))
314 goto free_hw;
315 if (slvl_setup(&b->dev[1], iobase, irq))
316 goto free_netdev0;
317
318 z8530_describe(dev, "I/O", iobase);
319 dev->active = 1;
320 return b;
321
322free_netdev0:
323 unregister_hdlc_device(b->dev[0].chan->netdevice);
324 free_netdev(b->dev[0].chan->netdevice);
325free_hw:
326 free_dma(dev->chanA.rxdma);
327err_dma_rx:
328 free_dma(dev->chanA.txdma);
329err_dma_tx:
330 free_irq(irq, dev);
331err_request_irq:
332 kfree(b);
333err_kzalloc:
334 release_region(iobase, 8);
335 return NULL;
336}
337
338static void __exit slvl_shutdown(struct slvl_board *b)
339{
340 int u;
341
342 z8530_shutdown(&b->board);
343
344 for (u = 0; u < 2; u++) {
345 struct net_device *d = b->dev[u].chan->netdevice;
346 unregister_hdlc_device(d);
347 free_netdev(d);
348 }
349
350 free_irq(b->board.irq, &b->board);
351 free_dma(b->board.chanA.rxdma);
352 free_dma(b->board.chanA.txdma);
353
354 outb(0, b->iobase);
355 release_region(b->iobase, 8);
356 kfree(b);
357}
358
359
360static int io=0x238;
361static int txdma=1;
362static int rxdma=3;
363static int irq=5;
364static bool slow=false;
365
366module_param_hw(io, int, ioport, 0);
367MODULE_PARM_DESC(io, "The I/O base of the Sealevel card");
368module_param_hw(txdma, int, dma, 0);
369MODULE_PARM_DESC(txdma, "Transmit DMA channel");
370module_param_hw(rxdma, int, dma, 0);
371MODULE_PARM_DESC(rxdma, "Receive DMA channel");
372module_param_hw(irq, int, irq, 0);
373MODULE_PARM_DESC(irq, "The interrupt line setting for the SeaLevel card");
374module_param(slow, bool, 0);
375MODULE_PARM_DESC(slow, "Set this for an older Sealevel card such as the 4012");
376
377MODULE_AUTHOR("Alan Cox");
378MODULE_LICENSE("GPL");
379MODULE_DESCRIPTION("Modular driver for the SeaLevel 4021");
380
381static struct slvl_board *slvl_unit;
382
383static int __init slvl_init_module(void)
384{
385 slvl_unit = slvl_init(io, irq, txdma, rxdma, slow);
386
387 return slvl_unit ? 0 : -ENODEV;
388}
389
390static void __exit slvl_cleanup_module(void)
391{
392 if (slvl_unit)
393 slvl_shutdown(slvl_unit);
394}
395
396module_init(slvl_init_module);
397module_exit(slvl_cleanup_module);
398