1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52#include <linux/errno.h>
53#include <linux/types.h>
54#include <linux/socket.h>
55#include <linux/in.h>
56#include <linux/kernel.h>
57#include <linux/string.h>
58#include <linux/net.h>
59#include <linux/slab.h>
60#include <net/ax25.h>
61#include <linux/inet.h>
62#include <linux/netdevice.h>
63#include <linux/etherdevice.h>
64#include <linux/if_arp.h>
65#include <linux/skbuff.h>
66#include <net/sock.h>
67#include <linux/uaccess.h>
68#include <linux/mm.h>
69#include <linux/interrupt.h>
70#include <linux/notifier.h>
71#include <linux/proc_fs.h>
72#include <linux/seq_file.h>
73#include <linux/stat.h>
74#include <linux/module.h>
75#include <linux/init.h>
76#include <linux/rtnetlink.h>
77
78#include <net/ip.h>
79#include <net/arp.h>
80#include <net/net_namespace.h>
81
82#include <linux/bpqether.h>
83
84static const char banner[] __initconst = KERN_INFO \
85 "AX.25: bpqether driver version 004\n";
86
87static int bpq_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
88static int bpq_device_event(struct notifier_block *, unsigned long, void *);
89
90static struct packet_type bpq_packet_type __read_mostly = {
91 .type = cpu_to_be16(ETH_P_BPQ),
92 .func = bpq_rcv,
93};
94
95static struct notifier_block bpq_dev_notifier = {
96 .notifier_call = bpq_device_event,
97};
98
99
100struct bpqdev {
101 struct list_head bpq_list;
102 struct net_device *ethdev;
103 struct net_device *axdev;
104 char dest_addr[6];
105 char acpt_addr[6];
106};
107
108static LIST_HEAD(bpq_devices);
109
110
111
112
113
114
115static struct lock_class_key bpq_netdev_xmit_lock_key;
116static struct lock_class_key bpq_netdev_addr_lock_key;
117
118static void bpq_set_lockdep_class_one(struct net_device *dev,
119 struct netdev_queue *txq,
120 void *_unused)
121{
122 lockdep_set_class(&txq->_xmit_lock, &bpq_netdev_xmit_lock_key);
123}
124
125static void bpq_set_lockdep_class(struct net_device *dev)
126{
127 lockdep_set_class(&dev->addr_list_lock, &bpq_netdev_addr_lock_key);
128 netdev_for_each_tx_queue(dev, bpq_set_lockdep_class_one, NULL);
129}
130
131
132
133
134
135
136
137static inline struct net_device *bpq_get_ether_dev(struct net_device *dev)
138{
139 struct bpqdev *bpq = netdev_priv(dev);
140
141 return bpq ? bpq->ethdev : NULL;
142}
143
144
145
146
147static inline struct net_device *bpq_get_ax25_dev(struct net_device *dev)
148{
149 struct bpqdev *bpq;
150
151 list_for_each_entry_rcu(bpq, &bpq_devices, bpq_list,
152 lockdep_rtnl_is_held()) {
153 if (bpq->ethdev == dev)
154 return bpq->axdev;
155 }
156 return NULL;
157}
158
159static inline int dev_is_ethdev(struct net_device *dev)
160{
161 return dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5);
162}
163
164
165
166
167
168
169
170static int bpq_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev)
171{
172 int len;
173 char * ptr;
174 struct ethhdr *eth;
175 struct bpqdev *bpq;
176
177 if (!net_eq(dev_net(dev), &init_net))
178 goto drop;
179
180 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
181 return NET_RX_DROP;
182
183 if (!pskb_may_pull(skb, sizeof(struct ethhdr)))
184 goto drop;
185
186 rcu_read_lock();
187 dev = bpq_get_ax25_dev(dev);
188
189 if (dev == NULL || !netif_running(dev))
190 goto drop_unlock;
191
192
193
194
195
196
197 bpq = netdev_priv(dev);
198
199 eth = eth_hdr(skb);
200
201 if (!(bpq->acpt_addr[0] & 0x01) &&
202 !ether_addr_equal(eth->h_source, bpq->acpt_addr))
203 goto drop_unlock;
204
205 if (skb_cow(skb, sizeof(struct ethhdr)))
206 goto drop_unlock;
207
208 len = skb->data[0] + skb->data[1] * 256 - 5;
209
210 skb_pull(skb, 2);
211 skb_trim(skb, len);
212
213 dev->stats.rx_packets++;
214 dev->stats.rx_bytes += len;
215
216 ptr = skb_push(skb, 1);
217 *ptr = 0;
218
219 skb->protocol = ax25_type_trans(skb, dev);
220 netif_rx(skb);
221unlock:
222
223 rcu_read_unlock();
224
225 return 0;
226drop_unlock:
227 kfree_skb(skb);
228 goto unlock;
229
230drop:
231 kfree_skb(skb);
232 return 0;
233}
234
235
236
237
238static netdev_tx_t bpq_xmit(struct sk_buff *skb, struct net_device *dev)
239{
240 unsigned char *ptr;
241 struct bpqdev *bpq;
242 struct net_device *orig_dev;
243 int size;
244
245 if (skb->protocol == htons(ETH_P_IP))
246 return ax25_ip_xmit(skb);
247
248
249
250
251
252 if (!netif_running(dev)) {
253 kfree_skb(skb);
254 return NETDEV_TX_OK;
255 }
256
257 skb_pull(skb, 1);
258 size = skb->len;
259
260
261
262
263
264
265 if (skb_cow(skb, AX25_BPQ_HEADER_LEN)) {
266 if (net_ratelimit())
267 pr_err("bpqether: out of memory\n");
268 kfree_skb(skb);
269
270 return NETDEV_TX_OK;
271 }
272
273 ptr = skb_push(skb, 2);
274
275 *ptr++ = (size + 5) % 256;
276 *ptr++ = (size + 5) / 256;
277
278 bpq = netdev_priv(dev);
279
280 orig_dev = dev;
281 if ((dev = bpq_get_ether_dev(dev)) == NULL) {
282 orig_dev->stats.tx_dropped++;
283 kfree_skb(skb);
284 return NETDEV_TX_OK;
285 }
286
287 skb->protocol = ax25_type_trans(skb, dev);
288 skb_reset_network_header(skb);
289 dev_hard_header(skb, dev, ETH_P_BPQ, bpq->dest_addr, NULL, 0);
290 dev->stats.tx_packets++;
291 dev->stats.tx_bytes+=skb->len;
292
293 dev_queue_xmit(skb);
294 netif_wake_queue(dev);
295 return NETDEV_TX_OK;
296}
297
298
299
300
301static int bpq_set_mac_address(struct net_device *dev, void *addr)
302{
303 struct sockaddr *sa = (struct sockaddr *)addr;
304
305 memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
306
307 return 0;
308}
309
310
311
312
313
314
315
316
317static int bpq_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
318{
319 struct bpq_ethaddr __user *ethaddr = ifr->ifr_data;
320 struct bpqdev *bpq = netdev_priv(dev);
321 struct bpq_req req;
322
323 if (!capable(CAP_NET_ADMIN))
324 return -EPERM;
325
326 switch (cmd) {
327 case SIOCSBPQETHOPT:
328 if (copy_from_user(&req, ifr->ifr_data, sizeof(struct bpq_req)))
329 return -EFAULT;
330 switch (req.cmd) {
331 case SIOCGBPQETHPARAM:
332 case SIOCSBPQETHPARAM:
333 default:
334 return -EINVAL;
335 }
336
337 break;
338
339 case SIOCSBPQETHADDR:
340 if (copy_from_user(bpq->dest_addr, ethaddr->destination, ETH_ALEN))
341 return -EFAULT;
342 if (copy_from_user(bpq->acpt_addr, ethaddr->accept, ETH_ALEN))
343 return -EFAULT;
344 break;
345
346 default:
347 return -EINVAL;
348 }
349
350 return 0;
351}
352
353
354
355
356static int bpq_open(struct net_device *dev)
357{
358 netif_start_queue(dev);
359 return 0;
360}
361
362static int bpq_close(struct net_device *dev)
363{
364 netif_stop_queue(dev);
365 return 0;
366}
367
368
369
370
371#ifdef CONFIG_PROC_FS
372
373
374
375static void *bpq_seq_start(struct seq_file *seq, loff_t *pos)
376 __acquires(RCU)
377{
378 int i = 1;
379 struct bpqdev *bpqdev;
380
381 rcu_read_lock();
382
383 if (*pos == 0)
384 return SEQ_START_TOKEN;
385
386 list_for_each_entry_rcu(bpqdev, &bpq_devices, bpq_list) {
387 if (i == *pos)
388 return bpqdev;
389 }
390 return NULL;
391}
392
393static void *bpq_seq_next(struct seq_file *seq, void *v, loff_t *pos)
394{
395 struct list_head *p;
396 struct bpqdev *bpqdev = v;
397
398 ++*pos;
399
400 if (v == SEQ_START_TOKEN)
401 p = rcu_dereference(list_next_rcu(&bpq_devices));
402 else
403 p = rcu_dereference(list_next_rcu(&bpqdev->bpq_list));
404
405 return (p == &bpq_devices) ? NULL
406 : list_entry(p, struct bpqdev, bpq_list);
407}
408
409static void bpq_seq_stop(struct seq_file *seq, void *v)
410 __releases(RCU)
411{
412 rcu_read_unlock();
413}
414
415
416static int bpq_seq_show(struct seq_file *seq, void *v)
417{
418 if (v == SEQ_START_TOKEN)
419 seq_puts(seq,
420 "dev ether destination accept from\n");
421 else {
422 const struct bpqdev *bpqdev = v;
423
424 seq_printf(seq, "%-5s %-10s %pM ",
425 bpqdev->axdev->name, bpqdev->ethdev->name,
426 bpqdev->dest_addr);
427
428 if (is_multicast_ether_addr(bpqdev->acpt_addr))
429 seq_printf(seq, "*\n");
430 else
431 seq_printf(seq, "%pM\n", bpqdev->acpt_addr);
432
433 }
434 return 0;
435}
436
437static const struct seq_operations bpq_seqops = {
438 .start = bpq_seq_start,
439 .next = bpq_seq_next,
440 .stop = bpq_seq_stop,
441 .show = bpq_seq_show,
442};
443#endif
444
445
446static const struct net_device_ops bpq_netdev_ops = {
447 .ndo_open = bpq_open,
448 .ndo_stop = bpq_close,
449 .ndo_start_xmit = bpq_xmit,
450 .ndo_set_mac_address = bpq_set_mac_address,
451 .ndo_do_ioctl = bpq_ioctl,
452};
453
454static void bpq_setup(struct net_device *dev)
455{
456 dev->netdev_ops = &bpq_netdev_ops;
457 dev->needs_free_netdev = true;
458
459 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
460 memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
461
462 dev->flags = 0;
463 dev->features = NETIF_F_LLTX;
464
465#if IS_ENABLED(CONFIG_AX25)
466 dev->header_ops = &ax25_header_ops;
467#endif
468
469 dev->type = ARPHRD_AX25;
470 dev->hard_header_len = AX25_MAX_HEADER_LEN + AX25_BPQ_HEADER_LEN;
471 dev->mtu = AX25_DEF_PACLEN;
472 dev->addr_len = AX25_ADDR_LEN;
473
474}
475
476
477
478
479static int bpq_new_device(struct net_device *edev)
480{
481 int err;
482 struct net_device *ndev;
483 struct bpqdev *bpq;
484
485 ndev = alloc_netdev(sizeof(struct bpqdev), "bpq%d", NET_NAME_UNKNOWN,
486 bpq_setup);
487 if (!ndev)
488 return -ENOMEM;
489
490
491 bpq = netdev_priv(ndev);
492 dev_hold(edev);
493 bpq->ethdev = edev;
494 bpq->axdev = ndev;
495
496 eth_broadcast_addr(bpq->dest_addr);
497 eth_broadcast_addr(bpq->acpt_addr);
498
499 err = register_netdevice(ndev);
500 if (err)
501 goto error;
502 bpq_set_lockdep_class(ndev);
503
504
505 list_add_rcu(&bpq->bpq_list, &bpq_devices);
506 return 0;
507
508 error:
509 dev_put(edev);
510 free_netdev(ndev);
511 return err;
512
513}
514
515static void bpq_free_device(struct net_device *ndev)
516{
517 struct bpqdev *bpq = netdev_priv(ndev);
518
519 dev_put(bpq->ethdev);
520 list_del_rcu(&bpq->bpq_list);
521
522 unregister_netdevice(ndev);
523}
524
525
526
527
528static int bpq_device_event(struct notifier_block *this,
529 unsigned long event, void *ptr)
530{
531 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
532
533 if (!net_eq(dev_net(dev), &init_net))
534 return NOTIFY_DONE;
535
536 if (!dev_is_ethdev(dev))
537 return NOTIFY_DONE;
538
539 switch (event) {
540 case NETDEV_UP:
541 if (bpq_get_ax25_dev(dev) == NULL)
542 bpq_new_device(dev);
543 break;
544
545 case NETDEV_DOWN:
546 if ((dev = bpq_get_ax25_dev(dev)) != NULL)
547 dev_close(dev);
548 break;
549
550 case NETDEV_UNREGISTER:
551 if ((dev = bpq_get_ax25_dev(dev)) != NULL)
552 bpq_free_device(dev);
553 break;
554 default:
555 break;
556 }
557
558 return NOTIFY_DONE;
559}
560
561
562
563
564
565
566
567
568static int __init bpq_init_driver(void)
569{
570#ifdef CONFIG_PROC_FS
571 if (!proc_create_seq("bpqether", 0444, init_net.proc_net, &bpq_seqops)) {
572 printk(KERN_ERR
573 "bpq: cannot create /proc/net/bpqether entry.\n");
574 return -ENOENT;
575 }
576#endif
577
578 dev_add_pack(&bpq_packet_type);
579
580 register_netdevice_notifier(&bpq_dev_notifier);
581
582 printk(banner);
583
584 return 0;
585}
586
587static void __exit bpq_cleanup_driver(void)
588{
589 struct bpqdev *bpq;
590
591 dev_remove_pack(&bpq_packet_type);
592
593 unregister_netdevice_notifier(&bpq_dev_notifier);
594
595 remove_proc_entry("bpqether", init_net.proc_net);
596
597 rtnl_lock();
598 while (!list_empty(&bpq_devices)) {
599 bpq = list_entry(bpq_devices.next, struct bpqdev, bpq_list);
600 bpq_free_device(bpq->axdev);
601 }
602 rtnl_unlock();
603}
604
605MODULE_AUTHOR("Joerg Reuter DL1BKE <jreuter@yaina.de>");
606MODULE_DESCRIPTION("Transmit and receive AX.25 packets over Ethernet");
607MODULE_LICENSE("GPL");
608module_init(bpq_init_driver);
609module_exit(bpq_cleanup_driver);
610