1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45#include <linux/module.h>
46#include <linux/init.h>
47#include <linux/kmod.h>
48#include <linux/slab.h>
49#include <linux/list.h>
50#include <linux/spinlock.h>
51#include <linux/rcupdate.h>
52#include <linux/uaccess.h>
53#include <linux/net.h>
54#include <linux/netdevice.h>
55#include <linux/socket.h>
56#include <linux/if_ether.h>
57#include <linux/if_arp.h>
58#include <linux/skbuff.h>
59#include <linux/can.h>
60#include <linux/can/core.h>
61#include <net/net_namespace.h>
62#include <net/sock.h>
63
64#include "af_can.h"
65
66static __initdata const char banner[] = KERN_INFO
67 "can: controller area network core (" CAN_VERSION_STRING ")\n";
68
69MODULE_DESCRIPTION("Controller Area Network PF_CAN core");
70MODULE_LICENSE("Dual BSD/GPL");
71MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>, "
72 "Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
73
74MODULE_ALIAS_NETPROTO(PF_CAN);
75
76static int stats_timer __read_mostly = 1;
77module_param(stats_timer, int, S_IRUGO);
78MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)");
79
80HLIST_HEAD(can_rx_dev_list);
81static struct dev_rcv_lists can_rx_alldev_list;
82static DEFINE_SPINLOCK(can_rcvlists_lock);
83
84static struct kmem_cache *rcv_cache __read_mostly;
85
86
87static struct can_proto *proto_tab[CAN_NPROTO] __read_mostly;
88static DEFINE_SPINLOCK(proto_tab_lock);
89
90struct timer_list can_stattimer;
91struct s_stats can_stats;
92struct s_pstats can_pstats;
93
94
95
96
97
98static int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
99{
100 struct sock *sk = sock->sk;
101
102 switch (cmd) {
103
104 case SIOCGSTAMP:
105 return sock_get_timestamp(sk, (struct timeval __user *)arg);
106
107 default:
108 return -ENOIOCTLCMD;
109 }
110}
111
112static void can_sock_destruct(struct sock *sk)
113{
114 skb_queue_purge(&sk->sk_receive_queue);
115}
116
117static int can_create(struct net *net, struct socket *sock, int protocol)
118{
119 struct sock *sk;
120 struct can_proto *cp;
121 int err = 0;
122
123 sock->state = SS_UNCONNECTED;
124
125 if (protocol < 0 || protocol >= CAN_NPROTO)
126 return -EINVAL;
127
128 if (net != &init_net)
129 return -EAFNOSUPPORT;
130
131#ifdef CONFIG_MODULES
132
133 if (!proto_tab[protocol]) {
134 err = request_module("can-proto-%d", protocol);
135
136
137
138
139
140
141 if (err && printk_ratelimit())
142 printk(KERN_ERR "can: request_module "
143 "(can-proto-%d) failed.\n", protocol);
144 }
145#endif
146
147 spin_lock(&proto_tab_lock);
148 cp = proto_tab[protocol];
149 if (cp && !try_module_get(cp->prot->owner))
150 cp = NULL;
151 spin_unlock(&proto_tab_lock);
152
153
154
155 if (!cp)
156 return -EPROTONOSUPPORT;
157
158 if (cp->type != sock->type) {
159 err = -EPROTONOSUPPORT;
160 goto errout;
161 }
162
163 if (cp->capability >= 0 && !capable(cp->capability)) {
164 err = -EPERM;
165 goto errout;
166 }
167
168 sock->ops = cp->ops;
169
170 sk = sk_alloc(net, PF_CAN, GFP_KERNEL, cp->prot);
171 if (!sk) {
172 err = -ENOMEM;
173 goto errout;
174 }
175
176 sock_init_data(sock, sk);
177 sk->sk_destruct = can_sock_destruct;
178
179 if (sk->sk_prot->init)
180 err = sk->sk_prot->init(sk);
181
182 if (err) {
183
184 sock_orphan(sk);
185 sock_put(sk);
186 }
187
188 errout:
189 module_put(cp->prot->owner);
190 return err;
191}
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212int can_send(struct sk_buff *skb, int loop)
213{
214 struct sk_buff *newskb = NULL;
215 struct can_frame *cf = (struct can_frame *)skb->data;
216 int err;
217
218 if (skb->len != sizeof(struct can_frame) || cf->can_dlc > 8) {
219 kfree_skb(skb);
220 return -EINVAL;
221 }
222
223 if (skb->dev->type != ARPHRD_CAN) {
224 kfree_skb(skb);
225 return -EPERM;
226 }
227
228 if (!(skb->dev->flags & IFF_UP)) {
229 kfree_skb(skb);
230 return -ENETDOWN;
231 }
232
233 skb->protocol = htons(ETH_P_CAN);
234 skb_reset_network_header(skb);
235 skb_reset_transport_header(skb);
236
237 if (loop) {
238
239
240
241 skb->pkt_type = PACKET_LOOPBACK;
242
243
244
245
246
247
248
249
250
251
252 if (!(skb->dev->flags & IFF_ECHO)) {
253
254
255
256
257 newskb = skb_clone(skb, GFP_ATOMIC);
258 if (!newskb) {
259 kfree_skb(skb);
260 return -ENOMEM;
261 }
262
263 newskb->sk = skb->sk;
264 newskb->ip_summed = CHECKSUM_UNNECESSARY;
265 newskb->pkt_type = PACKET_BROADCAST;
266 }
267 } else {
268
269 skb->pkt_type = PACKET_HOST;
270 }
271
272
273 err = dev_queue_xmit(skb);
274 if (err > 0)
275 err = net_xmit_errno(err);
276
277 if (err) {
278 kfree_skb(newskb);
279 return err;
280 }
281
282 if (newskb)
283 netif_rx_ni(newskb);
284
285
286 can_stats.tx_frames++;
287 can_stats.tx_frames_delta++;
288
289 return 0;
290}
291EXPORT_SYMBOL(can_send);
292
293
294
295
296
297static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev)
298{
299 struct dev_rcv_lists *d = NULL;
300 struct hlist_node *n;
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315 hlist_for_each_entry_rcu(d, n, &can_rx_dev_list, list) {
316 if (d->dev == dev)
317 break;
318 }
319
320 return n ? d : NULL;
321}
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
351 struct dev_rcv_lists *d)
352{
353 canid_t inv = *can_id & CAN_INV_FILTER;
354
355
356 if (*mask & CAN_ERR_FLAG) {
357
358 *mask &= CAN_ERR_MASK;
359 return &d->rx[RX_ERR];
360 }
361
362
363
364#define CAN_EFF_RTR_FLAGS (CAN_EFF_FLAG | CAN_RTR_FLAG)
365
366
367 if ((*mask & CAN_EFF_FLAG) && !(*can_id & CAN_EFF_FLAG))
368 *mask &= (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS);
369
370
371 *can_id &= *mask;
372
373
374 if (inv)
375 return &d->rx[RX_INV];
376
377
378 if (!(*mask))
379 return &d->rx[RX_ALL];
380
381
382 if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS)
383 && !(*can_id & CAN_RTR_FLAG)) {
384
385 if (*can_id & CAN_EFF_FLAG) {
386 if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS)) {
387
388 return &d->rx[RX_EFF];
389 }
390 } else {
391 if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS))
392 return &d->rx_sff[*can_id];
393 }
394 }
395
396
397 return &d->rx[RX_FIL];
398}
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
430 void (*func)(struct sk_buff *, void *), void *data,
431 char *ident)
432{
433 struct receiver *r;
434 struct hlist_head *rl;
435 struct dev_rcv_lists *d;
436 int err = 0;
437
438
439
440 r = kmem_cache_alloc(rcv_cache, GFP_KERNEL);
441 if (!r)
442 return -ENOMEM;
443
444 spin_lock(&can_rcvlists_lock);
445
446 d = find_dev_rcv_lists(dev);
447 if (d) {
448 rl = find_rcv_list(&can_id, &mask, d);
449
450 r->can_id = can_id;
451 r->mask = mask;
452 r->matches = 0;
453 r->func = func;
454 r->data = data;
455 r->ident = ident;
456
457 hlist_add_head_rcu(&r->list, rl);
458 d->entries++;
459
460 can_pstats.rcv_entries++;
461 if (can_pstats.rcv_entries_max < can_pstats.rcv_entries)
462 can_pstats.rcv_entries_max = can_pstats.rcv_entries;
463 } else {
464 kmem_cache_free(rcv_cache, r);
465 err = -ENODEV;
466 }
467
468 spin_unlock(&can_rcvlists_lock);
469
470 return err;
471}
472EXPORT_SYMBOL(can_rx_register);
473
474
475
476
477static void can_rx_delete_device(struct rcu_head *rp)
478{
479 struct dev_rcv_lists *d = container_of(rp, struct dev_rcv_lists, rcu);
480
481 kfree(d);
482}
483
484
485
486
487static void can_rx_delete_receiver(struct rcu_head *rp)
488{
489 struct receiver *r = container_of(rp, struct receiver, rcu);
490
491 kmem_cache_free(rcv_cache, r);
492}
493
494
495
496
497
498
499
500
501
502
503
504
505void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
506 void (*func)(struct sk_buff *, void *), void *data)
507{
508 struct receiver *r = NULL;
509 struct hlist_head *rl;
510 struct hlist_node *next;
511 struct dev_rcv_lists *d;
512
513 spin_lock(&can_rcvlists_lock);
514
515 d = find_dev_rcv_lists(dev);
516 if (!d) {
517 printk(KERN_ERR "BUG: receive list not found for "
518 "dev %s, id %03X, mask %03X\n",
519 DNAME(dev), can_id, mask);
520 goto out;
521 }
522
523 rl = find_rcv_list(&can_id, &mask, d);
524
525
526
527
528
529
530
531 hlist_for_each_entry_rcu(r, next, rl, list) {
532 if (r->can_id == can_id && r->mask == mask
533 && r->func == func && r->data == data)
534 break;
535 }
536
537
538
539
540
541
542
543 if (!next) {
544 printk(KERN_ERR "BUG: receive list entry not found for "
545 "dev %s, id %03X, mask %03X\n",
546 DNAME(dev), can_id, mask);
547 r = NULL;
548 d = NULL;
549 goto out;
550 }
551
552 hlist_del_rcu(&r->list);
553 d->entries--;
554
555 if (can_pstats.rcv_entries > 0)
556 can_pstats.rcv_entries--;
557
558
559 if (d->remove_on_zero_entries && !d->entries)
560 hlist_del_rcu(&d->list);
561 else
562 d = NULL;
563
564 out:
565 spin_unlock(&can_rcvlists_lock);
566
567
568 if (r)
569 call_rcu(&r->rcu, can_rx_delete_receiver);
570
571
572 if (d)
573 call_rcu(&d->rcu, can_rx_delete_device);
574}
575EXPORT_SYMBOL(can_rx_unregister);
576
577static inline void deliver(struct sk_buff *skb, struct receiver *r)
578{
579 r->func(skb, r->data);
580 r->matches++;
581}
582
583static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
584{
585 struct receiver *r;
586 struct hlist_node *n;
587 int matches = 0;
588 struct can_frame *cf = (struct can_frame *)skb->data;
589 canid_t can_id = cf->can_id;
590
591 if (d->entries == 0)
592 return 0;
593
594 if (can_id & CAN_ERR_FLAG) {
595
596 hlist_for_each_entry_rcu(r, n, &d->rx[RX_ERR], list) {
597 if (can_id & r->mask) {
598 deliver(skb, r);
599 matches++;
600 }
601 }
602 return matches;
603 }
604
605
606 hlist_for_each_entry_rcu(r, n, &d->rx[RX_ALL], list) {
607 deliver(skb, r);
608 matches++;
609 }
610
611
612 hlist_for_each_entry_rcu(r, n, &d->rx[RX_FIL], list) {
613 if ((can_id & r->mask) == r->can_id) {
614 deliver(skb, r);
615 matches++;
616 }
617 }
618
619
620 hlist_for_each_entry_rcu(r, n, &d->rx[RX_INV], list) {
621 if ((can_id & r->mask) != r->can_id) {
622 deliver(skb, r);
623 matches++;
624 }
625 }
626
627
628 if (can_id & CAN_RTR_FLAG)
629 return matches;
630
631 if (can_id & CAN_EFF_FLAG) {
632 hlist_for_each_entry_rcu(r, n, &d->rx[RX_EFF], list) {
633 if (r->can_id == can_id) {
634 deliver(skb, r);
635 matches++;
636 }
637 }
638 } else {
639 can_id &= CAN_SFF_MASK;
640 hlist_for_each_entry_rcu(r, n, &d->rx_sff[can_id], list) {
641 deliver(skb, r);
642 matches++;
643 }
644 }
645
646 return matches;
647}
648
649static int can_rcv(struct sk_buff *skb, struct net_device *dev,
650 struct packet_type *pt, struct net_device *orig_dev)
651{
652 struct dev_rcv_lists *d;
653 struct can_frame *cf = (struct can_frame *)skb->data;
654 int matches;
655
656 if (!net_eq(dev_net(dev), &init_net))
657 goto drop;
658
659 if (WARN_ONCE(dev->type != ARPHRD_CAN ||
660 skb->len != sizeof(struct can_frame) ||
661 cf->can_dlc > 8,
662 "PF_CAN: dropped non conform skbuf: "
663 "dev type %d, len %d, can_dlc %d\n",
664 dev->type, skb->len, cf->can_dlc))
665 goto drop;
666
667
668 can_stats.rx_frames++;
669 can_stats.rx_frames_delta++;
670
671 rcu_read_lock();
672
673
674 matches = can_rcv_filter(&can_rx_alldev_list, skb);
675
676
677 d = find_dev_rcv_lists(dev);
678 if (d)
679 matches += can_rcv_filter(d, skb);
680
681 rcu_read_unlock();
682
683
684 consume_skb(skb);
685
686 if (matches > 0) {
687 can_stats.matches++;
688 can_stats.matches_delta++;
689 }
690
691 return NET_RX_SUCCESS;
692
693drop:
694 kfree_skb(skb);
695 return NET_RX_DROP;
696}
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712int can_proto_register(struct can_proto *cp)
713{
714 int proto = cp->protocol;
715 int err = 0;
716
717 if (proto < 0 || proto >= CAN_NPROTO) {
718 printk(KERN_ERR "can: protocol number %d out of range\n",
719 proto);
720 return -EINVAL;
721 }
722
723 err = proto_register(cp->prot, 0);
724 if (err < 0)
725 return err;
726
727 spin_lock(&proto_tab_lock);
728 if (proto_tab[proto]) {
729 printk(KERN_ERR "can: protocol %d already registered\n",
730 proto);
731 err = -EBUSY;
732 } else {
733 proto_tab[proto] = cp;
734
735
736 if (!cp->ops->ioctl)
737 cp->ops->ioctl = can_ioctl;
738 }
739 spin_unlock(&proto_tab_lock);
740
741 if (err < 0)
742 proto_unregister(cp->prot);
743
744 return err;
745}
746EXPORT_SYMBOL(can_proto_register);
747
748
749
750
751
752void can_proto_unregister(struct can_proto *cp)
753{
754 int proto = cp->protocol;
755
756 spin_lock(&proto_tab_lock);
757 if (!proto_tab[proto]) {
758 printk(KERN_ERR "BUG: can: protocol %d is not registered\n",
759 proto);
760 }
761 proto_tab[proto] = NULL;
762 spin_unlock(&proto_tab_lock);
763
764 proto_unregister(cp->prot);
765}
766EXPORT_SYMBOL(can_proto_unregister);
767
768
769
770
771static int can_notifier(struct notifier_block *nb, unsigned long msg,
772 void *data)
773{
774 struct net_device *dev = (struct net_device *)data;
775 struct dev_rcv_lists *d;
776
777 if (!net_eq(dev_net(dev), &init_net))
778 return NOTIFY_DONE;
779
780 if (dev->type != ARPHRD_CAN)
781 return NOTIFY_DONE;
782
783 switch (msg) {
784
785 case NETDEV_REGISTER:
786
787
788
789
790
791
792
793
794
795
796 d = kzalloc(sizeof(*d), GFP_KERNEL);
797 if (!d) {
798 printk(KERN_ERR
799 "can: allocation of receive list failed\n");
800 return NOTIFY_DONE;
801 }
802 d->dev = dev;
803
804 spin_lock(&can_rcvlists_lock);
805 hlist_add_head_rcu(&d->list, &can_rx_dev_list);
806 spin_unlock(&can_rcvlists_lock);
807
808 break;
809
810 case NETDEV_UNREGISTER:
811 spin_lock(&can_rcvlists_lock);
812
813 d = find_dev_rcv_lists(dev);
814 if (d) {
815 if (d->entries) {
816 d->remove_on_zero_entries = 1;
817 d = NULL;
818 } else
819 hlist_del_rcu(&d->list);
820 } else
821 printk(KERN_ERR "can: notifier: receive list not "
822 "found for dev %s\n", dev->name);
823
824 spin_unlock(&can_rcvlists_lock);
825
826 if (d)
827 call_rcu(&d->rcu, can_rx_delete_device);
828
829 break;
830 }
831
832 return NOTIFY_DONE;
833}
834
835
836
837
838
839static struct packet_type can_packet __read_mostly = {
840 .type = cpu_to_be16(ETH_P_CAN),
841 .dev = NULL,
842 .func = can_rcv,
843};
844
845static struct net_proto_family can_family_ops __read_mostly = {
846 .family = PF_CAN,
847 .create = can_create,
848 .owner = THIS_MODULE,
849};
850
851
852static struct notifier_block can_netdev_notifier __read_mostly = {
853 .notifier_call = can_notifier,
854};
855
856static __init int can_init(void)
857{
858 printk(banner);
859
860 rcv_cache = kmem_cache_create("can_receiver", sizeof(struct receiver),
861 0, 0, NULL);
862 if (!rcv_cache)
863 return -ENOMEM;
864
865
866
867
868
869
870
871 spin_lock(&can_rcvlists_lock);
872 hlist_add_head_rcu(&can_rx_alldev_list.list, &can_rx_dev_list);
873 spin_unlock(&can_rcvlists_lock);
874
875 if (stats_timer) {
876
877 setup_timer(&can_stattimer, can_stat_update, 0);
878 mod_timer(&can_stattimer, round_jiffies(jiffies + HZ));
879 } else
880 can_stattimer.function = NULL;
881
882 can_init_proc();
883
884
885 sock_register(&can_family_ops);
886 register_netdevice_notifier(&can_netdev_notifier);
887 dev_add_pack(&can_packet);
888
889 return 0;
890}
891
892static __exit void can_exit(void)
893{
894 struct dev_rcv_lists *d;
895 struct hlist_node *n, *next;
896
897 if (stats_timer)
898 del_timer(&can_stattimer);
899
900 can_remove_proc();
901
902
903 dev_remove_pack(&can_packet);
904 unregister_netdevice_notifier(&can_netdev_notifier);
905 sock_unregister(PF_CAN);
906
907
908 spin_lock(&can_rcvlists_lock);
909 hlist_del(&can_rx_alldev_list.list);
910 hlist_for_each_entry_safe(d, n, next, &can_rx_dev_list, list) {
911 hlist_del(&d->list);
912 kfree(d);
913 }
914 spin_unlock(&can_rcvlists_lock);
915
916 rcu_barrier();
917
918 kmem_cache_destroy(rcv_cache);
919}
920
921module_init(can_init);
922module_exit(can_exit);
923