1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53#include <linux/bitops.h>
54#include <linux/if_arp.h>
55#include <linux/module.h>
56#include <linux/moduleparam.h>
57#include <linux/netdevice.h>
58#include <net/af_ieee802154.h>
59#include <net/ieee802154.h>
60#include <net/ieee802154_netdev.h>
61#include <net/ipv6.h>
62
63#include "6lowpan.h"
64
65static LIST_HEAD(lowpan_devices);
66
67
68struct lowpan_dev_info {
69 struct net_device *real_dev;
70 struct mutex dev_list_mtx;
71 unsigned short fragment_tag;
72};
73
74struct lowpan_dev_record {
75 struct net_device *ldev;
76 struct list_head list;
77};
78
79struct lowpan_fragment {
80 struct sk_buff *skb;
81 u16 length;
82 u32 bytes_rcv;
83 u16 tag;
84 struct timer_list timer;
85 struct list_head list;
86};
87
88static LIST_HEAD(lowpan_fragments);
89static DEFINE_SPINLOCK(flist_lock);
90
91static inline struct
92lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
93{
94 return netdev_priv(dev);
95}
96
97static inline void lowpan_address_flip(u8 *src, u8 *dest)
98{
99 int i;
100 for (i = 0; i < IEEE802154_ADDR_LEN; i++)
101 (dest)[IEEE802154_ADDR_LEN - i - 1] = (src)[i];
102}
103
104static int lowpan_header_create(struct sk_buff *skb,
105 struct net_device *dev,
106 unsigned short type, const void *_daddr,
107 const void *_saddr, unsigned int len)
108{
109 const u8 *saddr = _saddr;
110 const u8 *daddr = _daddr;
111 struct ieee802154_addr sa, da;
112
113
114
115
116 if (type != ETH_P_IPV6)
117 return 0;
118
119 if (!saddr)
120 saddr = dev->dev_addr;
121
122 raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8);
123 raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8);
124
125 lowpan_header_compress(skb, dev, type, daddr, saddr, len);
126
127
128
129
130
131
132
133
134
135
136
137 mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA;
138 mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
139
140
141 sa.addr_type = IEEE802154_ADDR_LONG;
142 sa.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
143
144 memcpy(&(sa.hwaddr), saddr, 8);
145
146 da.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
147
148
149
150
151
152 if (lowpan_is_addr_broadcast(daddr)) {
153 da.addr_type = IEEE802154_ADDR_SHORT;
154 da.short_addr = IEEE802154_ADDR_BROADCAST;
155 } else {
156 da.addr_type = IEEE802154_ADDR_LONG;
157 memcpy(&(da.hwaddr), daddr, IEEE802154_ADDR_LEN);
158
159
160 mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ;
161 }
162
163 return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
164 type, (void *)&da, (void *)&sa, skb->len);
165}
166
167static int lowpan_give_skb_to_devices(struct sk_buff *skb,
168 struct net_device *dev)
169{
170 struct lowpan_dev_record *entry;
171 struct sk_buff *skb_cp;
172 int stat = NET_RX_SUCCESS;
173
174 rcu_read_lock();
175 list_for_each_entry_rcu(entry, &lowpan_devices, list)
176 if (lowpan_dev_info(entry->ldev)->real_dev == skb->dev) {
177 skb_cp = skb_copy(skb, GFP_ATOMIC);
178 if (!skb_cp) {
179 stat = -ENOMEM;
180 break;
181 }
182
183 skb_cp->dev = entry->ldev;
184 stat = netif_rx(skb_cp);
185 }
186 rcu_read_unlock();
187
188 return stat;
189}
190
191static void lowpan_fragment_timer_expired(unsigned long entry_addr)
192{
193 struct lowpan_fragment *entry = (struct lowpan_fragment *)entry_addr;
194
195 pr_debug("timer expired for frame with tag %d\n", entry->tag);
196
197 list_del(&entry->list);
198 dev_kfree_skb(entry->skb);
199 kfree(entry);
200}
201
202static struct lowpan_fragment *
203lowpan_alloc_new_frame(struct sk_buff *skb, u16 len, u16 tag)
204{
205 struct lowpan_fragment *frame;
206
207 frame = kzalloc(sizeof(struct lowpan_fragment),
208 GFP_ATOMIC);
209 if (!frame)
210 goto frame_err;
211
212 INIT_LIST_HEAD(&frame->list);
213
214 frame->length = len;
215 frame->tag = tag;
216
217
218 frame->skb = netdev_alloc_skb_ip_align(skb->dev, frame->length +
219 sizeof(struct ipv6hdr));
220
221 if (!frame->skb)
222 goto skb_err;
223
224 frame->skb->priority = skb->priority;
225
226
227 skb_reserve(frame->skb, sizeof(struct ipv6hdr));
228 skb_put(frame->skb, frame->length);
229
230
231
232
233
234 memcpy(frame->skb->cb, skb->cb, sizeof(skb->cb));
235
236 init_timer(&frame->timer);
237
238 frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT;
239 frame->timer.data = (unsigned long)frame;
240 frame->timer.function = lowpan_fragment_timer_expired;
241
242 add_timer(&frame->timer);
243
244 list_add_tail(&frame->list, &lowpan_fragments);
245
246 return frame;
247
248skb_err:
249 kfree(frame);
250frame_err:
251 return NULL;
252}
253
254static int process_data(struct sk_buff *skb)
255{
256 u8 iphc0, iphc1;
257 const struct ieee802154_addr *_saddr, *_daddr;
258
259 raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len);
260
261 if (skb->len < 2)
262 goto drop;
263
264 if (lowpan_fetch_skb_u8(skb, &iphc0))
265 goto drop;
266
267
268 switch (iphc0 & LOWPAN_DISPATCH_MASK) {
269 case LOWPAN_DISPATCH_FRAG1:
270 case LOWPAN_DISPATCH_FRAGN:
271 {
272 struct lowpan_fragment *frame;
273
274 u8 slen, offset = 0;
275 u16 len, tag;
276 bool found = false;
277
278 if (lowpan_fetch_skb_u8(skb, &slen) ||
279 lowpan_fetch_skb_u16(skb, &tag))
280 goto drop;
281
282
283 len = ((iphc0 & 7) << 8) | slen;
284
285 if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1) {
286 pr_debug("%s received a FRAG1 packet (tag: %d, "
287 "size of the entire IP packet: %d)",
288 __func__, tag, len);
289 } else {
290 if (lowpan_fetch_skb_u8(skb, &offset))
291 goto unlock_and_drop;
292 pr_debug("%s received a FRAGN packet (tag: %d, "
293 "size of the entire IP packet: %d, "
294 "offset: %d)", __func__, tag, len, offset * 8);
295 }
296
297
298
299
300
301 spin_lock_bh(&flist_lock);
302
303 list_for_each_entry(frame, &lowpan_fragments, list)
304 if (frame->tag == tag) {
305 found = true;
306 break;
307 }
308
309
310 if (!found) {
311 pr_debug("%s first fragment received for tag %d, "
312 "begin packet reassembly", __func__, tag);
313 frame = lowpan_alloc_new_frame(skb, len, tag);
314 if (!frame)
315 goto unlock_and_drop;
316 }
317
318
319 if (likely((offset * 8 + skb->len) <= frame->length))
320 skb_copy_to_linear_data_offset(frame->skb, offset * 8,
321 skb->data, skb->len);
322 else
323 goto unlock_and_drop;
324
325 frame->bytes_rcv += skb->len;
326
327
328 if ((frame->bytes_rcv == frame->length) &&
329 frame->timer.expires > jiffies) {
330
331 del_timer_sync(&frame->timer);
332 list_del(&frame->list);
333 spin_unlock_bh(&flist_lock);
334
335 pr_debug("%s successfully reassembled fragment "
336 "(tag %d)", __func__, tag);
337
338 dev_kfree_skb(skb);
339 skb = frame->skb;
340 kfree(frame);
341
342 if (lowpan_fetch_skb_u8(skb, &iphc0))
343 goto drop;
344
345 break;
346 }
347 spin_unlock_bh(&flist_lock);
348
349 return kfree_skb(skb), 0;
350 }
351 default:
352 break;
353 }
354
355 if (lowpan_fetch_skb_u8(skb, &iphc1))
356 goto drop;
357
358 _saddr = &mac_cb(skb)->sa;
359 _daddr = &mac_cb(skb)->da;
360
361 return lowpan_process_data(skb, skb->dev, (u8 *)_saddr->hwaddr,
362 _saddr->addr_type, IEEE802154_ADDR_LEN,
363 (u8 *)_daddr->hwaddr, _daddr->addr_type,
364 IEEE802154_ADDR_LEN, iphc0, iphc1,
365 lowpan_give_skb_to_devices);
366
367unlock_and_drop:
368 spin_unlock_bh(&flist_lock);
369drop:
370 kfree_skb(skb);
371 return -EINVAL;
372}
373
374static int lowpan_set_address(struct net_device *dev, void *p)
375{
376 struct sockaddr *sa = p;
377
378 if (netif_running(dev))
379 return -EBUSY;
380
381
382 memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
383
384 return 0;
385}
386
387static int
388lowpan_fragment_xmit(struct sk_buff *skb, u8 *head,
389 int mlen, int plen, int offset, int type)
390{
391 struct sk_buff *frag;
392 int hlen;
393
394 hlen = (type == LOWPAN_DISPATCH_FRAG1) ?
395 LOWPAN_FRAG1_HEAD_SIZE : LOWPAN_FRAGN_HEAD_SIZE;
396
397 raw_dump_inline(__func__, "6lowpan fragment header", head, hlen);
398
399 frag = netdev_alloc_skb(skb->dev,
400 hlen + mlen + plen + IEEE802154_MFR_SIZE);
401 if (!frag)
402 return -ENOMEM;
403
404 frag->priority = skb->priority;
405
406
407 skb_put(frag, mlen);
408 skb_copy_to_linear_data(frag, skb_mac_header(skb), mlen);
409
410 skb_put(frag, hlen);
411 skb_copy_to_linear_data_offset(frag, mlen, head, hlen);
412
413 skb_put(frag, plen);
414 skb_copy_to_linear_data_offset(frag, mlen + hlen,
415 skb_network_header(skb) + offset, plen);
416
417 raw_dump_table(__func__, " raw fragment dump", frag->data, frag->len);
418
419 return dev_queue_xmit(frag);
420}
421
422static int
423lowpan_skb_fragmentation(struct sk_buff *skb, struct net_device *dev)
424{
425 int err, header_length, payload_length, tag, offset = 0;
426 u8 head[5];
427
428 header_length = skb->mac_len;
429 payload_length = skb->len - header_length;
430 tag = lowpan_dev_info(dev)->fragment_tag++;
431
432
433 head[0] = LOWPAN_DISPATCH_FRAG1 | ((payload_length >> 8) & 0x7);
434 head[1] = payload_length & 0xff;
435 head[2] = tag >> 8;
436 head[3] = tag & 0xff;
437
438 err = lowpan_fragment_xmit(skb, head, header_length, LOWPAN_FRAG_SIZE,
439 0, LOWPAN_DISPATCH_FRAG1);
440
441 if (err) {
442 pr_debug("%s unable to send FRAG1 packet (tag: %d)",
443 __func__, tag);
444 goto exit;
445 }
446
447 offset = LOWPAN_FRAG_SIZE;
448
449
450 head[0] &= ~LOWPAN_DISPATCH_FRAG1;
451 head[0] |= LOWPAN_DISPATCH_FRAGN;
452
453 while (payload_length - offset > 0) {
454 int len = LOWPAN_FRAG_SIZE;
455
456 head[4] = offset / 8;
457
458 if (payload_length - offset < len)
459 len = payload_length - offset;
460
461 err = lowpan_fragment_xmit(skb, head, header_length,
462 len, offset, LOWPAN_DISPATCH_FRAGN);
463 if (err) {
464 pr_debug("%s unable to send a subsequent FRAGN packet "
465 "(tag: %d, offset: %d", __func__, tag, offset);
466 goto exit;
467 }
468
469 offset += len;
470 }
471
472exit:
473 return err;
474}
475
476static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
477{
478 int err = -1;
479
480 pr_debug("package xmit\n");
481
482 skb->dev = lowpan_dev_info(dev)->real_dev;
483 if (skb->dev == NULL) {
484 pr_debug("ERROR: no real wpan device found\n");
485 goto error;
486 }
487
488
489 if (skb->len <= IEEE802154_MTU - IEEE802154_MFR_SIZE) {
490 err = dev_queue_xmit(skb);
491 goto out;
492 }
493
494 pr_debug("frame is too big, fragmentation is needed\n");
495 err = lowpan_skb_fragmentation(skb, dev);
496error:
497 dev_kfree_skb(skb);
498out:
499 if (err)
500 pr_debug("ERROR: xmit failed\n");
501
502 return (err < 0) ? NET_XMIT_DROP : err;
503}
504
505static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)
506{
507 struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
508 return ieee802154_mlme_ops(real_dev)->get_phy(real_dev);
509}
510
511static u16 lowpan_get_pan_id(const struct net_device *dev)
512{
513 struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
514 return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev);
515}
516
517static u16 lowpan_get_short_addr(const struct net_device *dev)
518{
519 struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
520 return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev);
521}
522
523static u8 lowpan_get_dsn(const struct net_device *dev)
524{
525 struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
526 return ieee802154_mlme_ops(real_dev)->get_dsn(real_dev);
527}
528
529static struct header_ops lowpan_header_ops = {
530 .create = lowpan_header_create,
531};
532
533static struct lock_class_key lowpan_tx_busylock;
534static struct lock_class_key lowpan_netdev_xmit_lock_key;
535
536static void lowpan_set_lockdep_class_one(struct net_device *dev,
537 struct netdev_queue *txq,
538 void *_unused)
539{
540 lockdep_set_class(&txq->_xmit_lock,
541 &lowpan_netdev_xmit_lock_key);
542}
543
544
545static int lowpan_dev_init(struct net_device *dev)
546{
547 netdev_for_each_tx_queue(dev, lowpan_set_lockdep_class_one, NULL);
548 dev->qdisc_tx_busylock = &lowpan_tx_busylock;
549 return 0;
550}
551
552static const struct net_device_ops lowpan_netdev_ops = {
553 .ndo_init = lowpan_dev_init,
554 .ndo_start_xmit = lowpan_xmit,
555 .ndo_set_mac_address = lowpan_set_address,
556};
557
558static struct ieee802154_mlme_ops lowpan_mlme = {
559 .get_pan_id = lowpan_get_pan_id,
560 .get_phy = lowpan_get_phy,
561 .get_short_addr = lowpan_get_short_addr,
562 .get_dsn = lowpan_get_dsn,
563};
564
565static void lowpan_setup(struct net_device *dev)
566{
567 dev->addr_len = IEEE802154_ADDR_LEN;
568 memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
569 dev->type = ARPHRD_IEEE802154;
570
571 dev->hard_header_len = 2 + 1 + 20 + 14;
572 dev->needed_tailroom = 2;
573 dev->mtu = 1281;
574 dev->tx_queue_len = 0;
575 dev->flags = IFF_BROADCAST | IFF_MULTICAST;
576 dev->watchdog_timeo = 0;
577
578 dev->netdev_ops = &lowpan_netdev_ops;
579 dev->header_ops = &lowpan_header_ops;
580 dev->ml_priv = &lowpan_mlme;
581 dev->destructor = free_netdev;
582}
583
584static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
585{
586 if (tb[IFLA_ADDRESS]) {
587 if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN)
588 return -EINVAL;
589 }
590 return 0;
591}
592
593static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
594 struct packet_type *pt, struct net_device *orig_dev)
595{
596 struct sk_buff *local_skb;
597
598 if (!netif_running(dev))
599 goto drop;
600
601 if (dev->type != ARPHRD_IEEE802154)
602 goto drop;
603
604
605 if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
606
607
608
609 local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1,
610 skb_tailroom(skb), GFP_ATOMIC);
611 if (!local_skb)
612 goto drop;
613
614 local_skb->protocol = htons(ETH_P_IPV6);
615 local_skb->pkt_type = PACKET_HOST;
616
617
618 skb_pull(local_skb, 1);
619
620 lowpan_give_skb_to_devices(local_skb, NULL);
621
622 kfree_skb(local_skb);
623 kfree_skb(skb);
624 } else {
625 switch (skb->data[0] & 0xe0) {
626 case LOWPAN_DISPATCH_IPHC:
627 case LOWPAN_DISPATCH_FRAG1:
628 case LOWPAN_DISPATCH_FRAGN:
629 local_skb = skb_clone(skb, GFP_ATOMIC);
630 if (!local_skb)
631 goto drop;
632 process_data(local_skb);
633
634 kfree_skb(skb);
635 break;
636 default:
637 break;
638 }
639 }
640
641 return NET_RX_SUCCESS;
642
643drop:
644 kfree_skb(skb);
645 return NET_RX_DROP;
646}
647
648static int lowpan_newlink(struct net *src_net, struct net_device *dev,
649 struct nlattr *tb[], struct nlattr *data[])
650{
651 struct net_device *real_dev;
652 struct lowpan_dev_record *entry;
653
654 pr_debug("adding new link\n");
655
656 if (!tb[IFLA_LINK])
657 return -EINVAL;
658
659 real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
660 if (!real_dev)
661 return -ENODEV;
662 if (real_dev->type != ARPHRD_IEEE802154) {
663 dev_put(real_dev);
664 return -EINVAL;
665 }
666
667 lowpan_dev_info(dev)->real_dev = real_dev;
668 lowpan_dev_info(dev)->fragment_tag = 0;
669 mutex_init(&lowpan_dev_info(dev)->dev_list_mtx);
670
671 entry = kzalloc(sizeof(struct lowpan_dev_record), GFP_KERNEL);
672 if (!entry) {
673 dev_put(real_dev);
674 lowpan_dev_info(dev)->real_dev = NULL;
675 return -ENOMEM;
676 }
677
678 entry->ldev = dev;
679
680
681 memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN);
682
683 mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
684 INIT_LIST_HEAD(&entry->list);
685 list_add_tail(&entry->list, &lowpan_devices);
686 mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
687
688 register_netdevice(dev);
689
690 return 0;
691}
692
693static void lowpan_dellink(struct net_device *dev, struct list_head *head)
694{
695 struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
696 struct net_device *real_dev = lowpan_dev->real_dev;
697 struct lowpan_dev_record *entry, *tmp;
698
699 ASSERT_RTNL();
700
701 mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
702 list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
703 if (entry->ldev == dev) {
704 list_del(&entry->list);
705 kfree(entry);
706 }
707 }
708 mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
709
710 mutex_destroy(&lowpan_dev_info(dev)->dev_list_mtx);
711
712 unregister_netdevice_queue(dev, head);
713
714 dev_put(real_dev);
715}
716
717static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
718 .kind = "lowpan",
719 .priv_size = sizeof(struct lowpan_dev_info),
720 .setup = lowpan_setup,
721 .newlink = lowpan_newlink,
722 .dellink = lowpan_dellink,
723 .validate = lowpan_validate,
724};
725
726static inline int __init lowpan_netlink_init(void)
727{
728 return rtnl_link_register(&lowpan_link_ops);
729}
730
731static inline void lowpan_netlink_fini(void)
732{
733 rtnl_link_unregister(&lowpan_link_ops);
734}
735
736static int lowpan_device_event(struct notifier_block *unused,
737 unsigned long event, void *ptr)
738{
739 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
740 LIST_HEAD(del_list);
741 struct lowpan_dev_record *entry, *tmp;
742
743 if (dev->type != ARPHRD_IEEE802154)
744 goto out;
745
746 if (event == NETDEV_UNREGISTER) {
747 list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
748 if (lowpan_dev_info(entry->ldev)->real_dev == dev)
749 lowpan_dellink(entry->ldev, &del_list);
750 }
751
752 unregister_netdevice_many(&del_list);
753 }
754
755out:
756 return NOTIFY_DONE;
757}
758
759static struct notifier_block lowpan_dev_notifier = {
760 .notifier_call = lowpan_device_event,
761};
762
763static struct packet_type lowpan_packet_type = {
764 .type = __constant_htons(ETH_P_IEEE802154),
765 .func = lowpan_rcv,
766};
767
768static int __init lowpan_init_module(void)
769{
770 int err = 0;
771
772 err = lowpan_netlink_init();
773 if (err < 0)
774 goto out;
775
776 dev_add_pack(&lowpan_packet_type);
777
778 err = register_netdevice_notifier(&lowpan_dev_notifier);
779 if (err < 0) {
780 dev_remove_pack(&lowpan_packet_type);
781 lowpan_netlink_fini();
782 }
783out:
784 return err;
785}
786
787static void __exit lowpan_cleanup_module(void)
788{
789 struct lowpan_fragment *frame, *tframe;
790
791 lowpan_netlink_fini();
792
793 dev_remove_pack(&lowpan_packet_type);
794
795 unregister_netdevice_notifier(&lowpan_dev_notifier);
796
797
798
799
800
801 spin_lock_bh(&flist_lock);
802 list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) {
803 del_timer_sync(&frame->timer);
804 list_del(&frame->list);
805 dev_kfree_skb(frame->skb);
806 kfree(frame);
807 }
808 spin_unlock_bh(&flist_lock);
809}
810
811module_init(lowpan_init_module);
812module_exit(lowpan_cleanup_module);
813MODULE_LICENSE("GPL");
814MODULE_ALIAS_RTNL_LINK("lowpan");
815