1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43#include <linux/module.h>
44#include <linux/stddef.h>
45#include <linux/init.h>
46#include <linux/kmod.h>
47#include <linux/slab.h>
48#include <linux/list.h>
49#include <linux/spinlock.h>
50#include <linux/rcupdate.h>
51#include <linux/uaccess.h>
52#include <linux/net.h>
53#include <linux/netdevice.h>
54#include <linux/socket.h>
55#include <linux/if_ether.h>
56#include <linux/if_arp.h>
57#include <linux/skbuff.h>
58#include <linux/can.h>
59#include <linux/can/core.h>
60#include <linux/can/skb.h>
61#include <linux/ratelimit.h>
62#include <net/net_namespace.h>
63#include <net/sock.h>
64
65#include "af_can.h"
66
67MODULE_DESCRIPTION("Controller Area Network PF_CAN core");
68MODULE_LICENSE("Dual BSD/GPL");
69MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>, "
70 "Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
71
72MODULE_ALIAS_NETPROTO(PF_CAN);
73
74static int stats_timer __read_mostly = 1;
75module_param(stats_timer, int, S_IRUGO);
76MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)");
77
78static struct kmem_cache *rcv_cache __read_mostly;
79
80
81static const struct can_proto *proto_tab[CAN_NPROTO] __read_mostly;
82static DEFINE_MUTEX(proto_tab_lock);
83
84static atomic_t skbcounter = ATOMIC_INIT(0);
85
86
87
88
89
90int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
91{
92 struct sock *sk = sock->sk;
93
94 switch (cmd) {
95
96 case SIOCGSTAMP:
97 return sock_get_timestamp(sk, (struct timeval __user *)arg);
98
99 default:
100 return -ENOIOCTLCMD;
101 }
102}
103EXPORT_SYMBOL(can_ioctl);
104
105static void can_sock_destruct(struct sock *sk)
106{
107 skb_queue_purge(&sk->sk_receive_queue);
108}
109
110static const struct can_proto *can_get_proto(int protocol)
111{
112 const struct can_proto *cp;
113
114 rcu_read_lock();
115 cp = rcu_dereference(proto_tab[protocol]);
116 if (cp && !try_module_get(cp->prot->owner))
117 cp = NULL;
118 rcu_read_unlock();
119
120 return cp;
121}
122
123static inline void can_put_proto(const struct can_proto *cp)
124{
125 module_put(cp->prot->owner);
126}
127
128static int can_create(struct net *net, struct socket *sock, int protocol,
129 int kern)
130{
131 struct sock *sk;
132 const struct can_proto *cp;
133 int err = 0;
134
135 sock->state = SS_UNCONNECTED;
136
137 if (protocol < 0 || protocol >= CAN_NPROTO)
138 return -EINVAL;
139
140 cp = can_get_proto(protocol);
141
142#ifdef CONFIG_MODULES
143 if (!cp) {
144
145
146 err = request_module("can-proto-%d", protocol);
147
148
149
150
151
152
153 if (err)
154 printk_ratelimited(KERN_ERR "can: request_module "
155 "(can-proto-%d) failed.\n", protocol);
156
157 cp = can_get_proto(protocol);
158 }
159#endif
160
161
162
163 if (!cp)
164 return -EPROTONOSUPPORT;
165
166 if (cp->type != sock->type) {
167 err = -EPROTOTYPE;
168 goto errout;
169 }
170
171 sock->ops = cp->ops;
172
173 sk = sk_alloc(net, PF_CAN, GFP_KERNEL, cp->prot, kern);
174 if (!sk) {
175 err = -ENOMEM;
176 goto errout;
177 }
178
179 sock_init_data(sock, sk);
180 sk->sk_destruct = can_sock_destruct;
181
182 if (sk->sk_prot->init)
183 err = sk->sk_prot->init(sk);
184
185 if (err) {
186
187 sock_orphan(sk);
188 sock_put(sk);
189 }
190
191 errout:
192 can_put_proto(cp);
193 return err;
194}
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216int can_send(struct sk_buff *skb, int loop)
217{
218 struct sk_buff *newskb = NULL;
219 struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
220 struct s_stats *can_stats = dev_net(skb->dev)->can.can_stats;
221 int err = -EINVAL;
222
223 if (skb->len == CAN_MTU) {
224 skb->protocol = htons(ETH_P_CAN);
225 if (unlikely(cfd->len > CAN_MAX_DLEN))
226 goto inval_skb;
227 } else if (skb->len == CANFD_MTU) {
228 skb->protocol = htons(ETH_P_CANFD);
229 if (unlikely(cfd->len > CANFD_MAX_DLEN))
230 goto inval_skb;
231 } else
232 goto inval_skb;
233
234
235
236
237
238
239 if (unlikely(skb->len > skb->dev->mtu && cfd->len > CAN_MAX_DLEN)) {
240 err = -EMSGSIZE;
241 goto inval_skb;
242 }
243
244 if (unlikely(skb->dev->type != ARPHRD_CAN)) {
245 err = -EPERM;
246 goto inval_skb;
247 }
248
249 if (unlikely(!(skb->dev->flags & IFF_UP))) {
250 err = -ENETDOWN;
251 goto inval_skb;
252 }
253
254 skb->ip_summed = CHECKSUM_UNNECESSARY;
255
256 skb_reset_mac_header(skb);
257 skb_reset_network_header(skb);
258 skb_reset_transport_header(skb);
259
260 if (loop) {
261
262
263
264 skb->pkt_type = PACKET_LOOPBACK;
265
266
267
268
269
270
271
272
273
274
275 if (!(skb->dev->flags & IFF_ECHO)) {
276
277
278
279
280 newskb = skb_clone(skb, GFP_ATOMIC);
281 if (!newskb) {
282 kfree_skb(skb);
283 return -ENOMEM;
284 }
285
286 can_skb_set_owner(newskb, skb->sk);
287 newskb->ip_summed = CHECKSUM_UNNECESSARY;
288 newskb->pkt_type = PACKET_BROADCAST;
289 }
290 } else {
291
292 skb->pkt_type = PACKET_HOST;
293 }
294
295
296 err = dev_queue_xmit(skb);
297 if (err > 0)
298 err = net_xmit_errno(err);
299
300 if (err) {
301 kfree_skb(newskb);
302 return err;
303 }
304
305 if (newskb)
306 netif_rx_ni(newskb);
307
308
309 can_stats->tx_frames++;
310 can_stats->tx_frames_delta++;
311
312 return 0;
313
314inval_skb:
315 kfree_skb(skb);
316 return err;
317}
318EXPORT_SYMBOL(can_send);
319
320
321
322
323
324static struct dev_rcv_lists *find_dev_rcv_lists(struct net *net,
325 struct net_device *dev)
326{
327 if (!dev)
328 return net->can.can_rx_alldev_list;
329 else
330 return (struct dev_rcv_lists *)dev->ml_priv;
331}
332
333
334
335
336
337
338
339
340
341
342
343
344
345static unsigned int effhash(canid_t can_id)
346{
347 unsigned int hash;
348
349 hash = can_id;
350 hash ^= can_id >> CAN_EFF_RCV_HASH_BITS;
351 hash ^= can_id >> (2 * CAN_EFF_RCV_HASH_BITS);
352
353 return hash & ((1 << CAN_EFF_RCV_HASH_BITS) - 1);
354}
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
384 struct dev_rcv_lists *d)
385{
386 canid_t inv = *can_id & CAN_INV_FILTER;
387
388
389 if (*mask & CAN_ERR_FLAG) {
390
391 *mask &= CAN_ERR_MASK;
392 return &d->rx[RX_ERR];
393 }
394
395
396
397#define CAN_EFF_RTR_FLAGS (CAN_EFF_FLAG | CAN_RTR_FLAG)
398
399
400 if ((*mask & CAN_EFF_FLAG) && !(*can_id & CAN_EFF_FLAG))
401 *mask &= (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS);
402
403
404 *can_id &= *mask;
405
406
407 if (inv)
408 return &d->rx[RX_INV];
409
410
411 if (!(*mask))
412 return &d->rx[RX_ALL];
413
414
415 if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS) &&
416 !(*can_id & CAN_RTR_FLAG)) {
417
418 if (*can_id & CAN_EFF_FLAG) {
419 if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS))
420 return &d->rx_eff[effhash(*can_id)];
421 } else {
422 if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS))
423 return &d->rx_sff[*can_id];
424 }
425 }
426
427
428 return &d->rx[RX_FIL];
429}
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461int can_rx_register(struct net *net, struct net_device *dev, canid_t can_id,
462 canid_t mask, void (*func)(struct sk_buff *, void *),
463 void *data, char *ident, struct sock *sk)
464{
465 struct receiver *r;
466 struct hlist_head *rl;
467 struct dev_rcv_lists *d;
468 struct s_pstats *can_pstats = net->can.can_pstats;
469 int err = 0;
470
471
472
473 if (dev && dev->type != ARPHRD_CAN)
474 return -ENODEV;
475
476 if (dev && !net_eq(net, dev_net(dev)))
477 return -ENODEV;
478
479 r = kmem_cache_alloc(rcv_cache, GFP_KERNEL);
480 if (!r)
481 return -ENOMEM;
482
483 spin_lock(&net->can.can_rcvlists_lock);
484
485 d = find_dev_rcv_lists(net, dev);
486 if (d) {
487 rl = find_rcv_list(&can_id, &mask, d);
488
489 r->can_id = can_id;
490 r->mask = mask;
491 r->matches = 0;
492 r->func = func;
493 r->data = data;
494 r->ident = ident;
495 r->sk = sk;
496
497 hlist_add_head_rcu(&r->list, rl);
498 d->entries++;
499
500 can_pstats->rcv_entries++;
501 if (can_pstats->rcv_entries_max < can_pstats->rcv_entries)
502 can_pstats->rcv_entries_max = can_pstats->rcv_entries;
503 } else {
504 kmem_cache_free(rcv_cache, r);
505 err = -ENODEV;
506 }
507
508 spin_unlock(&net->can.can_rcvlists_lock);
509
510 return err;
511}
512EXPORT_SYMBOL(can_rx_register);
513
514
515
516
517static void can_rx_delete_receiver(struct rcu_head *rp)
518{
519 struct receiver *r = container_of(rp, struct receiver, rcu);
520 struct sock *sk = r->sk;
521
522 kmem_cache_free(rcv_cache, r);
523 if (sk)
524 sock_put(sk);
525}
526
527
528
529
530
531
532
533
534
535
536
537
538void can_rx_unregister(struct net *net, struct net_device *dev, canid_t can_id,
539 canid_t mask, void (*func)(struct sk_buff *, void *),
540 void *data)
541{
542 struct receiver *r = NULL;
543 struct hlist_head *rl;
544 struct s_pstats *can_pstats = net->can.can_pstats;
545 struct dev_rcv_lists *d;
546
547 if (dev && dev->type != ARPHRD_CAN)
548 return;
549
550 if (dev && !net_eq(net, dev_net(dev)))
551 return;
552
553 spin_lock(&net->can.can_rcvlists_lock);
554
555 d = find_dev_rcv_lists(net, dev);
556 if (!d) {
557 pr_err("BUG: receive list not found for "
558 "dev %s, id %03X, mask %03X\n",
559 DNAME(dev), can_id, mask);
560 goto out;
561 }
562
563 rl = find_rcv_list(&can_id, &mask, d);
564
565
566
567
568
569
570
571 hlist_for_each_entry_rcu(r, rl, list) {
572 if (r->can_id == can_id && r->mask == mask &&
573 r->func == func && r->data == data)
574 break;
575 }
576
577
578
579
580
581
582 if (!r) {
583 WARN(1, "BUG: receive list entry not found for dev %s, "
584 "id %03X, mask %03X\n", DNAME(dev), can_id, mask);
585 goto out;
586 }
587
588 hlist_del_rcu(&r->list);
589 d->entries--;
590
591 if (can_pstats->rcv_entries > 0)
592 can_pstats->rcv_entries--;
593
594
595 if (d->remove_on_zero_entries && !d->entries) {
596 kfree(d);
597 dev->ml_priv = NULL;
598 }
599
600 out:
601 spin_unlock(&net->can.can_rcvlists_lock);
602
603
604 if (r) {
605 if (r->sk)
606 sock_hold(r->sk);
607 call_rcu(&r->rcu, can_rx_delete_receiver);
608 }
609}
610EXPORT_SYMBOL(can_rx_unregister);
611
612static inline void deliver(struct sk_buff *skb, struct receiver *r)
613{
614 r->func(skb, r->data);
615 r->matches++;
616}
617
618static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
619{
620 struct receiver *r;
621 int matches = 0;
622 struct can_frame *cf = (struct can_frame *)skb->data;
623 canid_t can_id = cf->can_id;
624
625 if (d->entries == 0)
626 return 0;
627
628 if (can_id & CAN_ERR_FLAG) {
629
630 hlist_for_each_entry_rcu(r, &d->rx[RX_ERR], list) {
631 if (can_id & r->mask) {
632 deliver(skb, r);
633 matches++;
634 }
635 }
636 return matches;
637 }
638
639
640 hlist_for_each_entry_rcu(r, &d->rx[RX_ALL], list) {
641 deliver(skb, r);
642 matches++;
643 }
644
645
646 hlist_for_each_entry_rcu(r, &d->rx[RX_FIL], list) {
647 if ((can_id & r->mask) == r->can_id) {
648 deliver(skb, r);
649 matches++;
650 }
651 }
652
653
654 hlist_for_each_entry_rcu(r, &d->rx[RX_INV], list) {
655 if ((can_id & r->mask) != r->can_id) {
656 deliver(skb, r);
657 matches++;
658 }
659 }
660
661
662 if (can_id & CAN_RTR_FLAG)
663 return matches;
664
665 if (can_id & CAN_EFF_FLAG) {
666 hlist_for_each_entry_rcu(r, &d->rx_eff[effhash(can_id)], list) {
667 if (r->can_id == can_id) {
668 deliver(skb, r);
669 matches++;
670 }
671 }
672 } else {
673 can_id &= CAN_SFF_MASK;
674 hlist_for_each_entry_rcu(r, &d->rx_sff[can_id], list) {
675 deliver(skb, r);
676 matches++;
677 }
678 }
679
680 return matches;
681}
682
683static void can_receive(struct sk_buff *skb, struct net_device *dev)
684{
685 struct dev_rcv_lists *d;
686 struct net *net = dev_net(dev);
687 struct s_stats *can_stats = net->can.can_stats;
688 int matches;
689
690
691 can_stats->rx_frames++;
692 can_stats->rx_frames_delta++;
693
694
695 while (!(can_skb_prv(skb)->skbcnt))
696 can_skb_prv(skb)->skbcnt = atomic_inc_return(&skbcounter);
697
698 rcu_read_lock();
699
700
701 matches = can_rcv_filter(net->can.can_rx_alldev_list, skb);
702
703
704 d = find_dev_rcv_lists(net, dev);
705 if (d)
706 matches += can_rcv_filter(d, skb);
707
708 rcu_read_unlock();
709
710
711 consume_skb(skb);
712
713 if (matches > 0) {
714 can_stats->matches++;
715 can_stats->matches_delta++;
716 }
717}
718
719static int can_rcv(struct sk_buff *skb, struct net_device *dev,
720 struct packet_type *pt, struct net_device *orig_dev)
721{
722 struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
723
724 if (WARN_ONCE(dev->type != ARPHRD_CAN ||
725 skb->len != CAN_MTU ||
726 cfd->len > CAN_MAX_DLEN,
727 "PF_CAN: dropped non conform CAN skbuf: "
728 "dev type %d, len %d, datalen %d\n",
729 dev->type, skb->len, cfd->len))
730 goto drop;
731
732 can_receive(skb, dev);
733 return NET_RX_SUCCESS;
734
735drop:
736 kfree_skb(skb);
737 return NET_RX_DROP;
738}
739
740static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
741 struct packet_type *pt, struct net_device *orig_dev)
742{
743 struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
744
745 if (WARN_ONCE(dev->type != ARPHRD_CAN ||
746 skb->len != CANFD_MTU ||
747 cfd->len > CANFD_MAX_DLEN,
748 "PF_CAN: dropped non conform CAN FD skbuf: "
749 "dev type %d, len %d, datalen %d\n",
750 dev->type, skb->len, cfd->len))
751 goto drop;
752
753 can_receive(skb, dev);
754 return NET_RX_SUCCESS;
755
756drop:
757 kfree_skb(skb);
758 return NET_RX_DROP;
759}
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775int can_proto_register(const struct can_proto *cp)
776{
777 int proto = cp->protocol;
778 int err = 0;
779
780 if (proto < 0 || proto >= CAN_NPROTO) {
781 pr_err("can: protocol number %d out of range\n", proto);
782 return -EINVAL;
783 }
784
785 err = proto_register(cp->prot, 0);
786 if (err < 0)
787 return err;
788
789 mutex_lock(&proto_tab_lock);
790
791 if (proto_tab[proto]) {
792 pr_err("can: protocol %d already registered\n", proto);
793 err = -EBUSY;
794 } else
795 RCU_INIT_POINTER(proto_tab[proto], cp);
796
797 mutex_unlock(&proto_tab_lock);
798
799 if (err < 0)
800 proto_unregister(cp->prot);
801
802 return err;
803}
804EXPORT_SYMBOL(can_proto_register);
805
806
807
808
809
810void can_proto_unregister(const struct can_proto *cp)
811{
812 int proto = cp->protocol;
813
814 mutex_lock(&proto_tab_lock);
815 BUG_ON(proto_tab[proto] != cp);
816 RCU_INIT_POINTER(proto_tab[proto], NULL);
817 mutex_unlock(&proto_tab_lock);
818
819 synchronize_rcu();
820
821 proto_unregister(cp->prot);
822}
823EXPORT_SYMBOL(can_proto_unregister);
824
825
826
827
828static int can_notifier(struct notifier_block *nb, unsigned long msg,
829 void *ptr)
830{
831 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
832 struct dev_rcv_lists *d;
833
834 if (dev->type != ARPHRD_CAN)
835 return NOTIFY_DONE;
836
837 switch (msg) {
838
839 case NETDEV_REGISTER:
840
841
842 d = kzalloc(sizeof(*d), GFP_KERNEL);
843 if (!d)
844 return NOTIFY_DONE;
845 BUG_ON(dev->ml_priv);
846 dev->ml_priv = d;
847
848 break;
849
850 case NETDEV_UNREGISTER:
851 spin_lock(&dev_net(dev)->can.can_rcvlists_lock);
852
853 d = dev->ml_priv;
854 if (d) {
855 if (d->entries)
856 d->remove_on_zero_entries = 1;
857 else {
858 kfree(d);
859 dev->ml_priv = NULL;
860 }
861 } else
862 pr_err("can: notifier: receive list not found for dev "
863 "%s\n", dev->name);
864
865 spin_unlock(&dev_net(dev)->can.can_rcvlists_lock);
866
867 break;
868 }
869
870 return NOTIFY_DONE;
871}
872
873static int can_pernet_init(struct net *net)
874{
875 spin_lock_init(&net->can.can_rcvlists_lock);
876 net->can.can_rx_alldev_list =
877 kzalloc(sizeof(struct dev_rcv_lists), GFP_KERNEL);
878
879 net->can.can_stats = kzalloc(sizeof(struct s_stats), GFP_KERNEL);
880 net->can.can_pstats = kzalloc(sizeof(struct s_pstats), GFP_KERNEL);
881
882 if (IS_ENABLED(CONFIG_PROC_FS)) {
883
884 if (stats_timer) {
885 setup_timer(&net->can.can_stattimer, can_stat_update,
886 (unsigned long)net);
887 mod_timer(&net->can.can_stattimer,
888 round_jiffies(jiffies + HZ));
889 }
890 net->can.can_stats->jiffies_init = jiffies;
891 can_init_proc(net);
892 }
893
894 return 0;
895}
896
897static void can_pernet_exit(struct net *net)
898{
899 struct net_device *dev;
900
901 if (IS_ENABLED(CONFIG_PROC_FS)) {
902 can_remove_proc(net);
903 if (stats_timer)
904 del_timer_sync(&net->can.can_stattimer);
905 }
906
907
908 rcu_read_lock();
909 for_each_netdev_rcu(net, dev) {
910 if (dev->type == ARPHRD_CAN && dev->ml_priv) {
911 struct dev_rcv_lists *d = dev->ml_priv;
912
913 BUG_ON(d->entries);
914 kfree(d);
915 dev->ml_priv = NULL;
916 }
917 }
918 rcu_read_unlock();
919
920 kfree(net->can.can_rx_alldev_list);
921 kfree(net->can.can_stats);
922 kfree(net->can.can_pstats);
923}
924
925
926
927
928
929static struct packet_type can_packet __read_mostly = {
930 .type = cpu_to_be16(ETH_P_CAN),
931 .func = can_rcv,
932};
933
934static struct packet_type canfd_packet __read_mostly = {
935 .type = cpu_to_be16(ETH_P_CANFD),
936 .func = canfd_rcv,
937};
938
939static const struct net_proto_family can_family_ops = {
940 .family = PF_CAN,
941 .create = can_create,
942 .owner = THIS_MODULE,
943};
944
945
946static struct notifier_block can_netdev_notifier __read_mostly = {
947 .notifier_call = can_notifier,
948};
949
950static struct pernet_operations can_pernet_ops __read_mostly = {
951 .init = can_pernet_init,
952 .exit = can_pernet_exit,
953};
954
955static __init int can_init(void)
956{
957
958 BUILD_BUG_ON(offsetof(struct can_frame, can_dlc) !=
959 offsetof(struct canfd_frame, len) ||
960 offsetof(struct can_frame, data) !=
961 offsetof(struct canfd_frame, data));
962
963 pr_info("can: controller area network core (" CAN_VERSION_STRING ")\n");
964
965 rcv_cache = kmem_cache_create("can_receiver", sizeof(struct receiver),
966 0, 0, NULL);
967 if (!rcv_cache)
968 return -ENOMEM;
969
970 register_pernet_subsys(&can_pernet_ops);
971
972
973 sock_register(&can_family_ops);
974 register_netdevice_notifier(&can_netdev_notifier);
975 dev_add_pack(&can_packet);
976 dev_add_pack(&canfd_packet);
977
978 return 0;
979}
980
981static __exit void can_exit(void)
982{
983
984 dev_remove_pack(&canfd_packet);
985 dev_remove_pack(&can_packet);
986 unregister_netdevice_notifier(&can_netdev_notifier);
987 sock_unregister(PF_CAN);
988
989 unregister_pernet_subsys(&can_pernet_ops);
990
991 rcu_barrier();
992
993 kmem_cache_destroy(rcv_cache);
994}
995
996module_init(can_init);
997module_exit(can_exit);
998