1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57#include <linux/errno.h>
58#include <linux/types.h>
59#include <linux/socket.h>
60#include <linux/in.h>
61#include <linux/kernel.h>
62#include <linux/string.h>
63#include <linux/net.h>
64#include <linux/slab.h>
65#include <net/ax25.h>
66#include <linux/inet.h>
67#include <linux/netdevice.h>
68#include <linux/etherdevice.h>
69#include <linux/if_arp.h>
70#include <linux/skbuff.h>
71#include <net/sock.h>
72#include <asm/uaccess.h>
73#include <linux/mm.h>
74#include <linux/interrupt.h>
75#include <linux/notifier.h>
76#include <linux/proc_fs.h>
77#include <linux/seq_file.h>
78#include <linux/stat.h>
79#include <linux/netfilter.h>
80#include <linux/module.h>
81#include <linux/init.h>
82#include <linux/rtnetlink.h>
83
84#include <net/ip.h>
85#include <net/arp.h>
86#include <net/net_namespace.h>
87
88#include <linux/bpqether.h>
89
90static const char banner[] __initconst = KERN_INFO \
91 "AX.25: bpqether driver version 004\n";
92
93static char bcast_addr[6]={0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
94
95static char bpq_eth_addr[6];
96
97static int bpq_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
98static int bpq_device_event(struct notifier_block *, unsigned long, void *);
99
100static struct packet_type bpq_packet_type __read_mostly = {
101 .type = cpu_to_be16(ETH_P_BPQ),
102 .func = bpq_rcv,
103};
104
105static struct notifier_block bpq_dev_notifier = {
106 .notifier_call = bpq_device_event,
107};
108
109
110struct bpqdev {
111 struct list_head bpq_list;
112 struct net_device *ethdev;
113 struct net_device *axdev;
114 char dest_addr[6];
115 char acpt_addr[6];
116};
117
118static LIST_HEAD(bpq_devices);
119
120
121
122
123
124
125static struct lock_class_key bpq_netdev_xmit_lock_key;
126static struct lock_class_key bpq_netdev_addr_lock_key;
127
128static void bpq_set_lockdep_class_one(struct net_device *dev,
129 struct netdev_queue *txq,
130 void *_unused)
131{
132 lockdep_set_class(&txq->_xmit_lock, &bpq_netdev_xmit_lock_key);
133}
134
135static void bpq_set_lockdep_class(struct net_device *dev)
136{
137 lockdep_set_class(&dev->addr_list_lock, &bpq_netdev_addr_lock_key);
138 netdev_for_each_tx_queue(dev, bpq_set_lockdep_class_one, NULL);
139}
140
141
142
143
144
145
146
147static inline struct net_device *bpq_get_ether_dev(struct net_device *dev)
148{
149 struct bpqdev *bpq = netdev_priv(dev);
150
151 return bpq ? bpq->ethdev : NULL;
152}
153
154
155
156
157static inline struct net_device *bpq_get_ax25_dev(struct net_device *dev)
158{
159 struct bpqdev *bpq;
160
161 list_for_each_entry_rcu(bpq, &bpq_devices, bpq_list) {
162 if (bpq->ethdev == dev)
163 return bpq->axdev;
164 }
165 return NULL;
166}
167
168static inline int dev_is_ethdev(struct net_device *dev)
169{
170 return dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5);
171}
172
173
174
175
176
177
178
179static int bpq_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev)
180{
181 int len;
182 char * ptr;
183 struct ethhdr *eth;
184 struct bpqdev *bpq;
185
186 if (!net_eq(dev_net(dev), &init_net))
187 goto drop;
188
189 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
190 return NET_RX_DROP;
191
192 if (!pskb_may_pull(skb, sizeof(struct ethhdr)))
193 goto drop;
194
195 rcu_read_lock();
196 dev = bpq_get_ax25_dev(dev);
197
198 if (dev == NULL || !netif_running(dev))
199 goto drop_unlock;
200
201
202
203
204
205
206 bpq = netdev_priv(dev);
207
208 eth = eth_hdr(skb);
209
210 if (!(bpq->acpt_addr[0] & 0x01) &&
211 memcmp(eth->h_source, bpq->acpt_addr, ETH_ALEN))
212 goto drop_unlock;
213
214 if (skb_cow(skb, sizeof(struct ethhdr)))
215 goto drop_unlock;
216
217 len = skb->data[0] + skb->data[1] * 256 - 5;
218
219 skb_pull(skb, 2);
220 skb_trim(skb, len);
221
222 dev->stats.rx_packets++;
223 dev->stats.rx_bytes += len;
224
225 ptr = skb_push(skb, 1);
226 *ptr = 0;
227
228 skb->protocol = ax25_type_trans(skb, dev);
229 netif_rx(skb);
230unlock:
231
232 rcu_read_unlock();
233
234 return 0;
235drop_unlock:
236 kfree_skb(skb);
237 goto unlock;
238
239drop:
240 kfree_skb(skb);
241 return 0;
242}
243
244
245
246
247static netdev_tx_t bpq_xmit(struct sk_buff *skb, struct net_device *dev)
248{
249 unsigned char *ptr;
250 struct bpqdev *bpq;
251 struct net_device *orig_dev;
252 int size;
253
254
255
256
257
258 if (!netif_running(dev)) {
259 kfree_skb(skb);
260 return NETDEV_TX_OK;
261 }
262
263 skb_pull(skb, 1);
264 size = skb->len;
265
266
267
268
269
270
271 if (skb_cow(skb, AX25_BPQ_HEADER_LEN)) {
272 if (net_ratelimit())
273 pr_err("bpqether: out of memory\n");
274 kfree_skb(skb);
275
276 return NETDEV_TX_OK;
277 }
278
279 ptr = skb_push(skb, 2);
280
281 *ptr++ = (size + 5) % 256;
282 *ptr++ = (size + 5) / 256;
283
284 bpq = netdev_priv(dev);
285
286 orig_dev = dev;
287 if ((dev = bpq_get_ether_dev(dev)) == NULL) {
288 orig_dev->stats.tx_dropped++;
289 kfree_skb(skb);
290 return NETDEV_TX_OK;
291 }
292
293 skb->protocol = ax25_type_trans(skb, dev);
294 skb_reset_network_header(skb);
295 dev_hard_header(skb, dev, ETH_P_BPQ, bpq->dest_addr, NULL, 0);
296 dev->stats.tx_packets++;
297 dev->stats.tx_bytes+=skb->len;
298
299 dev_queue_xmit(skb);
300 netif_wake_queue(dev);
301 return NETDEV_TX_OK;
302}
303
304
305
306
307static int bpq_set_mac_address(struct net_device *dev, void *addr)
308{
309 struct sockaddr *sa = (struct sockaddr *)addr;
310
311 memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
312
313 return 0;
314}
315
316
317
318
319
320
321
322
323static int bpq_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
324{
325 struct bpq_ethaddr __user *ethaddr = ifr->ifr_data;
326 struct bpqdev *bpq = netdev_priv(dev);
327 struct bpq_req req;
328
329 if (!capable(CAP_NET_ADMIN))
330 return -EPERM;
331
332 switch (cmd) {
333 case SIOCSBPQETHOPT:
334 if (copy_from_user(&req, ifr->ifr_data, sizeof(struct bpq_req)))
335 return -EFAULT;
336 switch (req.cmd) {
337 case SIOCGBPQETHPARAM:
338 case SIOCSBPQETHPARAM:
339 default:
340 return -EINVAL;
341 }
342
343 break;
344
345 case SIOCSBPQETHADDR:
346 if (copy_from_user(bpq->dest_addr, ethaddr->destination, ETH_ALEN))
347 return -EFAULT;
348 if (copy_from_user(bpq->acpt_addr, ethaddr->accept, ETH_ALEN))
349 return -EFAULT;
350 break;
351
352 default:
353 return -EINVAL;
354 }
355
356 return 0;
357}
358
359
360
361
362static int bpq_open(struct net_device *dev)
363{
364 netif_start_queue(dev);
365 return 0;
366}
367
368static int bpq_close(struct net_device *dev)
369{
370 netif_stop_queue(dev);
371 return 0;
372}
373
374
375
376
377
378
379
380
381static void *bpq_seq_start(struct seq_file *seq, loff_t *pos)
382 __acquires(RCU)
383{
384 int i = 1;
385 struct bpqdev *bpqdev;
386
387 rcu_read_lock();
388
389 if (*pos == 0)
390 return SEQ_START_TOKEN;
391
392 list_for_each_entry_rcu(bpqdev, &bpq_devices, bpq_list) {
393 if (i == *pos)
394 return bpqdev;
395 }
396 return NULL;
397}
398
399static void *bpq_seq_next(struct seq_file *seq, void *v, loff_t *pos)
400{
401 struct list_head *p;
402 struct bpqdev *bpqdev = v;
403
404 ++*pos;
405
406 if (v == SEQ_START_TOKEN)
407 p = rcu_dereference(list_next_rcu(&bpq_devices));
408 else
409 p = rcu_dereference(list_next_rcu(&bpqdev->bpq_list));
410
411 return (p == &bpq_devices) ? NULL
412 : list_entry(p, struct bpqdev, bpq_list);
413}
414
415static void bpq_seq_stop(struct seq_file *seq, void *v)
416 __releases(RCU)
417{
418 rcu_read_unlock();
419}
420
421
422static int bpq_seq_show(struct seq_file *seq, void *v)
423{
424 if (v == SEQ_START_TOKEN)
425 seq_puts(seq,
426 "dev ether destination accept from\n");
427 else {
428 const struct bpqdev *bpqdev = v;
429
430 seq_printf(seq, "%-5s %-10s %pM ",
431 bpqdev->axdev->name, bpqdev->ethdev->name,
432 bpqdev->dest_addr);
433
434 if (is_multicast_ether_addr(bpqdev->acpt_addr))
435 seq_printf(seq, "*\n");
436 else
437 seq_printf(seq, "%pM\n", bpqdev->acpt_addr);
438
439 }
440 return 0;
441}
442
443static const struct seq_operations bpq_seqops = {
444 .start = bpq_seq_start,
445 .next = bpq_seq_next,
446 .stop = bpq_seq_stop,
447 .show = bpq_seq_show,
448};
449
450static int bpq_info_open(struct inode *inode, struct file *file)
451{
452 return seq_open(file, &bpq_seqops);
453}
454
455static const struct file_operations bpq_info_fops = {
456 .owner = THIS_MODULE,
457 .open = bpq_info_open,
458 .read = seq_read,
459 .llseek = seq_lseek,
460 .release = seq_release,
461};
462
463
464
465
466static const struct net_device_ops bpq_netdev_ops = {
467 .ndo_open = bpq_open,
468 .ndo_stop = bpq_close,
469 .ndo_start_xmit = bpq_xmit,
470 .ndo_set_mac_address = bpq_set_mac_address,
471 .ndo_do_ioctl = bpq_ioctl,
472};
473
474static void bpq_setup(struct net_device *dev)
475{
476 dev->netdev_ops = &bpq_netdev_ops;
477 dev->destructor = free_netdev;
478
479 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
480 memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
481
482 dev->flags = 0;
483
484#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
485 dev->header_ops = &ax25_header_ops;
486#endif
487
488 dev->type = ARPHRD_AX25;
489 dev->hard_header_len = AX25_MAX_HEADER_LEN + AX25_BPQ_HEADER_LEN;
490 dev->mtu = AX25_DEF_PACLEN;
491 dev->addr_len = AX25_ADDR_LEN;
492
493}
494
495
496
497
498static int bpq_new_device(struct net_device *edev)
499{
500 int err;
501 struct net_device *ndev;
502 struct bpqdev *bpq;
503
504 ndev = alloc_netdev(sizeof(struct bpqdev), "bpq%d",
505 bpq_setup);
506 if (!ndev)
507 return -ENOMEM;
508
509
510 bpq = netdev_priv(ndev);
511 dev_hold(edev);
512 bpq->ethdev = edev;
513 bpq->axdev = ndev;
514
515 memcpy(bpq->dest_addr, bcast_addr, sizeof(bpq_eth_addr));
516 memcpy(bpq->acpt_addr, bcast_addr, sizeof(bpq_eth_addr));
517
518 err = register_netdevice(ndev);
519 if (err)
520 goto error;
521 bpq_set_lockdep_class(ndev);
522
523
524 list_add_rcu(&bpq->bpq_list, &bpq_devices);
525 return 0;
526
527 error:
528 dev_put(edev);
529 free_netdev(ndev);
530 return err;
531
532}
533
534static void bpq_free_device(struct net_device *ndev)
535{
536 struct bpqdev *bpq = netdev_priv(ndev);
537
538 dev_put(bpq->ethdev);
539 list_del_rcu(&bpq->bpq_list);
540
541 unregister_netdevice(ndev);
542}
543
544
545
546
547static int bpq_device_event(struct notifier_block *this,
548 unsigned long event, void *ptr)
549{
550 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
551
552 if (!net_eq(dev_net(dev), &init_net))
553 return NOTIFY_DONE;
554
555 if (!dev_is_ethdev(dev))
556 return NOTIFY_DONE;
557
558 switch (event) {
559 case NETDEV_UP:
560 if (bpq_get_ax25_dev(dev) == NULL)
561 bpq_new_device(dev);
562 break;
563
564 case NETDEV_DOWN:
565 if ((dev = bpq_get_ax25_dev(dev)) != NULL)
566 dev_close(dev);
567 break;
568
569 case NETDEV_UNREGISTER:
570 if ((dev = bpq_get_ax25_dev(dev)) != NULL)
571 bpq_free_device(dev);
572 break;
573 default:
574 break;
575 }
576
577 return NOTIFY_DONE;
578}
579
580
581
582
583
584
585
586
587static int __init bpq_init_driver(void)
588{
589#ifdef CONFIG_PROC_FS
590 if (!proc_create("bpqether", S_IRUGO, init_net.proc_net,
591 &bpq_info_fops)) {
592 printk(KERN_ERR
593 "bpq: cannot create /proc/net/bpqether entry.\n");
594 return -ENOENT;
595 }
596#endif
597
598 dev_add_pack(&bpq_packet_type);
599
600 register_netdevice_notifier_rh(&bpq_dev_notifier);
601
602 printk(banner);
603
604 return 0;
605}
606
607static void __exit bpq_cleanup_driver(void)
608{
609 struct bpqdev *bpq;
610
611 dev_remove_pack(&bpq_packet_type);
612
613 unregister_netdevice_notifier_rh(&bpq_dev_notifier);
614
615 remove_proc_entry("bpqether", init_net.proc_net);
616
617 rtnl_lock();
618 while (!list_empty(&bpq_devices)) {
619 bpq = list_entry(bpq_devices.next, struct bpqdev, bpq_list);
620 bpq_free_device(bpq->axdev);
621 }
622 rtnl_unlock();
623}
624
625MODULE_AUTHOR("Joerg Reuter DL1BKE <jreuter@yaina.de>");
626MODULE_DESCRIPTION("Transmit and receive AX.25 packets over Ethernet");
627MODULE_LICENSE("GPL");
628module_init(bpq_init_driver);
629module_exit(bpq_cleanup_driver);
630