1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/module.h>
17#include <linux/signal.h>
18#include <linux/kernel.h>
19#include <linux/netdevice.h>
20#include <linux/etherdevice.h>
21#include <linux/skbuff.h>
22#include <linux/inetdevice.h>
23
24#include "hysdn_defs.h"
25
26unsigned int hynet_enable = 0xffffffff;
27module_param(hynet_enable, uint, 0);
28
29#define MAX_SKB_BUFFERS 20
30
31
32
33
34
35
36
37struct net_local {
38
39
40
41
42
43 struct net_device *dev;
44 spinlock_t lock;
45 struct sk_buff *skbs[MAX_SKB_BUFFERS];
46 int in_idx, out_idx;
47 int sk_count;
48};
49
50
51
52
53
54
55
56
57
58
59static int
60net_open(struct net_device *dev)
61{
62 struct in_device *in_dev;
63 hysdn_card *card = dev->ml_priv;
64 int i;
65
66 netif_start_queue(dev);
67
68
69 if (!card->mac_addr[0]) {
70 for (i = 0; i < ETH_ALEN; i++)
71 dev->dev_addr[i] = 0xfc;
72 if ((in_dev = dev->ip_ptr) != NULL) {
73 const struct in_ifaddr *ifa;
74
75 rcu_read_lock();
76 ifa = rcu_dereference(in_dev->ifa_list);
77 if (ifa != NULL)
78 memcpy(dev->dev_addr + (ETH_ALEN - sizeof(ifa->ifa_local)), &ifa->ifa_local, sizeof(ifa->ifa_local));
79 rcu_read_unlock();
80 }
81 } else
82 memcpy(dev->dev_addr, card->mac_addr, ETH_ALEN);
83
84 return (0);
85}
86
87
88
89
90
91static void
92flush_tx_buffers(struct net_local *nl)
93{
94
95 while (nl->sk_count) {
96 dev_kfree_skb(nl->skbs[nl->out_idx++]);
97 if (nl->out_idx >= MAX_SKB_BUFFERS)
98 nl->out_idx = 0;
99 nl->sk_count--;
100 }
101}
102
103
104
105
106
107
108static int
109net_close(struct net_device *dev)
110{
111
112 netif_stop_queue(dev);
113
114 flush_tx_buffers((struct net_local *) dev);
115
116 return (0);
117}
118
119
120
121
122
123static netdev_tx_t
124net_send_packet(struct sk_buff *skb, struct net_device *dev)
125{
126 struct net_local *lp = (struct net_local *) dev;
127
128 spin_lock_irq(&lp->lock);
129
130 lp->skbs[lp->in_idx++] = skb;
131 if (lp->in_idx >= MAX_SKB_BUFFERS)
132 lp->in_idx = 0;
133 lp->sk_count++;
134 netif_trans_update(dev);
135
136
137
138
139
140 if (lp->sk_count >= MAX_SKB_BUFFERS)
141 netif_stop_queue(dev);
142
143
144
145
146
147 spin_unlock_irq(&lp->lock);
148
149 if (lp->sk_count <= 3) {
150 schedule_work(&((hysdn_card *) dev->ml_priv)->irq_queue);
151 }
152 return NETDEV_TX_OK;
153}
154
155
156
157
158
159
160
161void
162hysdn_tx_netack(hysdn_card *card)
163{
164 struct net_local *lp = card->netif;
165
166 if (!lp)
167 return;
168
169
170 if (!lp->sk_count)
171 return;
172
173 lp->dev->stats.tx_packets++;
174 lp->dev->stats.tx_bytes += lp->skbs[lp->out_idx]->len;
175
176 dev_kfree_skb(lp->skbs[lp->out_idx++]);
177 if (lp->out_idx >= MAX_SKB_BUFFERS)
178 lp->out_idx = 0;
179
180 if (lp->sk_count-- == MAX_SKB_BUFFERS)
181 netif_start_queue((struct net_device *) lp);
182}
183
184
185
186
187void
188hysdn_rx_netpkt(hysdn_card *card, unsigned char *buf, unsigned short len)
189{
190 struct net_local *lp = card->netif;
191 struct net_device *dev;
192 struct sk_buff *skb;
193
194 if (!lp)
195 return;
196
197 dev = lp->dev;
198 dev->stats.rx_bytes += len;
199
200 skb = dev_alloc_skb(len);
201 if (skb == NULL) {
202 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n",
203 dev->name);
204 dev->stats.rx_dropped++;
205 return;
206 }
207
208 skb_put_data(skb, buf, len);
209
210
211 skb->protocol = eth_type_trans(skb, dev);
212
213 dev->stats.rx_packets++;
214
215 netif_rx(skb);
216}
217
218
219
220
221struct sk_buff *
222hysdn_tx_netget(hysdn_card *card)
223{
224 struct net_local *lp = card->netif;
225
226 if (!lp)
227 return (NULL);
228
229 if (!lp->sk_count)
230 return (NULL);
231
232 return (lp->skbs[lp->out_idx]);
233}
234
235static const struct net_device_ops hysdn_netdev_ops = {
236 .ndo_open = net_open,
237 .ndo_stop = net_close,
238 .ndo_start_xmit = net_send_packet,
239 .ndo_set_mac_address = eth_mac_addr,
240 .ndo_validate_addr = eth_validate_addr,
241};
242
243
244
245
246
247
248
249int
250hysdn_net_create(hysdn_card *card)
251{
252 struct net_device *dev;
253 int i;
254 struct net_local *lp;
255
256 if (!card) {
257 printk(KERN_WARNING "No card-pt in hysdn_net_create!\n");
258 return (-ENOMEM);
259 }
260 hysdn_net_release(card);
261
262 dev = alloc_etherdev(sizeof(struct net_local));
263 if (!dev) {
264 printk(KERN_WARNING "HYSDN: unable to allocate mem\n");
265 return (-ENOMEM);
266 }
267
268 lp = netdev_priv(dev);
269 lp->dev = dev;
270
271 dev->netdev_ops = &hysdn_netdev_ops;
272 spin_lock_init(&((struct net_local *) dev)->lock);
273
274
275 dev->base_addr = card->iobase;
276 dev->irq = card->irq;
277
278 dev->netdev_ops = &hysdn_netdev_ops;
279 if ((i = register_netdev(dev))) {
280 printk(KERN_WARNING "HYSDN: unable to create network device\n");
281 free_netdev(dev);
282 return (i);
283 }
284 dev->ml_priv = card;
285 card->netif = dev;
286
287 if (card->debug_flags & LOG_NET_INIT)
288 hysdn_addlog(card, "network device created");
289 return 0;
290}
291
292
293
294
295
296int
297hysdn_net_release(hysdn_card *card)
298{
299 struct net_device *dev = card->netif;
300
301 if (!dev)
302 return (0);
303
304 card->netif = NULL;
305 net_close(dev);
306
307 flush_tx_buffers((struct net_local *) dev);
308
309 unregister_netdev(dev);
310 free_netdev(dev);
311 if (card->debug_flags & LOG_NET_INIT)
312 hysdn_addlog(card, "network device deleted");
313
314 return (0);
315}
316
317
318
319
320
321char *
322hysdn_net_getname(hysdn_card *card)
323{
324 struct net_device *dev = card->netif;
325
326 if (!dev)
327 return ("-");
328
329 return (dev->name);
330}
331