1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43#include <linux/module.h>
44#include <linux/stddef.h>
45#include <linux/init.h>
46#include <linux/kmod.h>
47#include <linux/slab.h>
48#include <linux/list.h>
49#include <linux/spinlock.h>
50#include <linux/rcupdate.h>
51#include <linux/uaccess.h>
52#include <linux/net.h>
53#include <linux/netdevice.h>
54#include <linux/socket.h>
55#include <linux/if_ether.h>
56#include <linux/if_arp.h>
57#include <linux/skbuff.h>
58#include <linux/can.h>
59#include <linux/can/core.h>
60#include <linux/can/skb.h>
61#include <linux/can/can-ml.h>
62#include <linux/ratelimit.h>
63#include <net/net_namespace.h>
64#include <net/sock.h>
65
66#include "af_can.h"
67
68MODULE_DESCRIPTION("Controller Area Network PF_CAN core");
69MODULE_LICENSE("Dual BSD/GPL");
70MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>, "
71 "Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
72
73MODULE_ALIAS_NETPROTO(PF_CAN);
74
75static int stats_timer __read_mostly = 1;
76module_param(stats_timer, int, 0444);
77MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)");
78
79static struct kmem_cache *rcv_cache __read_mostly;
80
81
82static const struct can_proto __rcu *proto_tab[CAN_NPROTO] __read_mostly;
83static DEFINE_MUTEX(proto_tab_lock);
84
85static atomic_t skbcounter = ATOMIC_INIT(0);
86
87
88
89void can_sock_destruct(struct sock *sk)
90{
91 skb_queue_purge(&sk->sk_receive_queue);
92 skb_queue_purge(&sk->sk_error_queue);
93}
94EXPORT_SYMBOL(can_sock_destruct);
95
96static const struct can_proto *can_get_proto(int protocol)
97{
98 const struct can_proto *cp;
99
100 rcu_read_lock();
101 cp = rcu_dereference(proto_tab[protocol]);
102 if (cp && !try_module_get(cp->prot->owner))
103 cp = NULL;
104 rcu_read_unlock();
105
106 return cp;
107}
108
109static inline void can_put_proto(const struct can_proto *cp)
110{
111 module_put(cp->prot->owner);
112}
113
114static int can_create(struct net *net, struct socket *sock, int protocol,
115 int kern)
116{
117 struct sock *sk;
118 const struct can_proto *cp;
119 int err = 0;
120
121 sock->state = SS_UNCONNECTED;
122
123 if (protocol < 0 || protocol >= CAN_NPROTO)
124 return -EINVAL;
125
126 cp = can_get_proto(protocol);
127
128#ifdef CONFIG_MODULES
129 if (!cp) {
130
131
132 err = request_module("can-proto-%d", protocol);
133
134
135
136
137
138 if (err)
139 pr_err_ratelimited("can: request_module (can-proto-%d) failed.\n",
140 protocol);
141
142 cp = can_get_proto(protocol);
143 }
144#endif
145
146
147
148 if (!cp)
149 return -EPROTONOSUPPORT;
150
151 if (cp->type != sock->type) {
152 err = -EPROTOTYPE;
153 goto errout;
154 }
155
156 sock->ops = cp->ops;
157
158 sk = sk_alloc(net, PF_CAN, GFP_KERNEL, cp->prot, kern);
159 if (!sk) {
160 err = -ENOMEM;
161 goto errout;
162 }
163
164 sock_init_data(sock, sk);
165 sk->sk_destruct = can_sock_destruct;
166
167 if (sk->sk_prot->init)
168 err = sk->sk_prot->init(sk);
169
170 if (err) {
171
172 sock_orphan(sk);
173 sock_put(sk);
174 }
175
176 errout:
177 can_put_proto(cp);
178 return err;
179}
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199int can_send(struct sk_buff *skb, int loop)
200{
201 struct sk_buff *newskb = NULL;
202 struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
203 struct can_pkg_stats *pkg_stats = dev_net(skb->dev)->can.pkg_stats;
204 int err = -EINVAL;
205
206 if (skb->len == CAN_MTU) {
207 skb->protocol = htons(ETH_P_CAN);
208 if (unlikely(cfd->len > CAN_MAX_DLEN))
209 goto inval_skb;
210 } else if (skb->len == CANFD_MTU) {
211 skb->protocol = htons(ETH_P_CANFD);
212 if (unlikely(cfd->len > CANFD_MAX_DLEN))
213 goto inval_skb;
214 } else {
215 goto inval_skb;
216 }
217
218
219
220
221
222 if (unlikely(skb->len > skb->dev->mtu && cfd->len > CAN_MAX_DLEN)) {
223 err = -EMSGSIZE;
224 goto inval_skb;
225 }
226
227 if (unlikely(skb->dev->type != ARPHRD_CAN)) {
228 err = -EPERM;
229 goto inval_skb;
230 }
231
232 if (unlikely(!(skb->dev->flags & IFF_UP))) {
233 err = -ENETDOWN;
234 goto inval_skb;
235 }
236
237 skb->ip_summed = CHECKSUM_UNNECESSARY;
238
239 skb_reset_mac_header(skb);
240 skb_reset_network_header(skb);
241 skb_reset_transport_header(skb);
242
243 if (loop) {
244
245
246
247 skb->pkt_type = PACKET_LOOPBACK;
248
249
250
251
252
253
254
255
256
257 if (!(skb->dev->flags & IFF_ECHO)) {
258
259
260
261 newskb = skb_clone(skb, GFP_ATOMIC);
262 if (!newskb) {
263 kfree_skb(skb);
264 return -ENOMEM;
265 }
266
267 can_skb_set_owner(newskb, skb->sk);
268 newskb->ip_summed = CHECKSUM_UNNECESSARY;
269 newskb->pkt_type = PACKET_BROADCAST;
270 }
271 } else {
272
273 skb->pkt_type = PACKET_HOST;
274 }
275
276
277 err = dev_queue_xmit(skb);
278 if (err > 0)
279 err = net_xmit_errno(err);
280
281 if (err) {
282 kfree_skb(newskb);
283 return err;
284 }
285
286 if (newskb)
287 netif_rx_ni(newskb);
288
289
290 pkg_stats->tx_frames++;
291 pkg_stats->tx_frames_delta++;
292
293 return 0;
294
295inval_skb:
296 kfree_skb(skb);
297 return err;
298}
299EXPORT_SYMBOL(can_send);
300
301
302
303static struct can_dev_rcv_lists *can_dev_rcv_lists_find(struct net *net,
304 struct net_device *dev)
305{
306 if (dev) {
307 struct can_ml_priv *can_ml = can_get_ml_priv(dev);
308 return &can_ml->dev_rcv_lists;
309 } else {
310 return net->can.rx_alldev_list;
311 }
312}
313
314
315
316
317
318
319
320
321
322
323
324
325
326static unsigned int effhash(canid_t can_id)
327{
328 unsigned int hash;
329
330 hash = can_id;
331 hash ^= can_id >> CAN_EFF_RCV_HASH_BITS;
332 hash ^= can_id >> (2 * CAN_EFF_RCV_HASH_BITS);
333
334 return hash & ((1 << CAN_EFF_RCV_HASH_BITS) - 1);
335}
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364static struct hlist_head *can_rcv_list_find(canid_t *can_id, canid_t *mask,
365 struct can_dev_rcv_lists *dev_rcv_lists)
366{
367 canid_t inv = *can_id & CAN_INV_FILTER;
368
369
370 if (*mask & CAN_ERR_FLAG) {
371
372 *mask &= CAN_ERR_MASK;
373 return &dev_rcv_lists->rx[RX_ERR];
374 }
375
376
377
378#define CAN_EFF_RTR_FLAGS (CAN_EFF_FLAG | CAN_RTR_FLAG)
379
380
381 if ((*mask & CAN_EFF_FLAG) && !(*can_id & CAN_EFF_FLAG))
382 *mask &= (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS);
383
384
385 *can_id &= *mask;
386
387
388 if (inv)
389 return &dev_rcv_lists->rx[RX_INV];
390
391
392 if (!(*mask))
393 return &dev_rcv_lists->rx[RX_ALL];
394
395
396 if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS) &&
397 !(*can_id & CAN_RTR_FLAG)) {
398 if (*can_id & CAN_EFF_FLAG) {
399 if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS))
400 return &dev_rcv_lists->rx_eff[effhash(*can_id)];
401 } else {
402 if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS))
403 return &dev_rcv_lists->rx_sff[*can_id];
404 }
405 }
406
407
408 return &dev_rcv_lists->rx[RX_FIL];
409}
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442int can_rx_register(struct net *net, struct net_device *dev, canid_t can_id,
443 canid_t mask, void (*func)(struct sk_buff *, void *),
444 void *data, char *ident, struct sock *sk)
445{
446 struct receiver *rcv;
447 struct hlist_head *rcv_list;
448 struct can_dev_rcv_lists *dev_rcv_lists;
449 struct can_rcv_lists_stats *rcv_lists_stats = net->can.rcv_lists_stats;
450 int err = 0;
451
452
453
454 if (dev && dev->type != ARPHRD_CAN)
455 return -ENODEV;
456
457 if (dev && !net_eq(net, dev_net(dev)))
458 return -ENODEV;
459
460 rcv = kmem_cache_alloc(rcv_cache, GFP_KERNEL);
461 if (!rcv)
462 return -ENOMEM;
463
464 spin_lock_bh(&net->can.rcvlists_lock);
465
466 dev_rcv_lists = can_dev_rcv_lists_find(net, dev);
467 rcv_list = can_rcv_list_find(&can_id, &mask, dev_rcv_lists);
468
469 rcv->can_id = can_id;
470 rcv->mask = mask;
471 rcv->matches = 0;
472 rcv->func = func;
473 rcv->data = data;
474 rcv->ident = ident;
475 rcv->sk = sk;
476
477 hlist_add_head_rcu(&rcv->list, rcv_list);
478 dev_rcv_lists->entries++;
479
480 rcv_lists_stats->rcv_entries++;
481 rcv_lists_stats->rcv_entries_max = max(rcv_lists_stats->rcv_entries_max,
482 rcv_lists_stats->rcv_entries);
483 spin_unlock_bh(&net->can.rcvlists_lock);
484
485 return err;
486}
487EXPORT_SYMBOL(can_rx_register);
488
489
490static void can_rx_delete_receiver(struct rcu_head *rp)
491{
492 struct receiver *rcv = container_of(rp, struct receiver, rcu);
493 struct sock *sk = rcv->sk;
494
495 kmem_cache_free(rcv_cache, rcv);
496 if (sk)
497 sock_put(sk);
498}
499
500
501
502
503
504
505
506
507
508
509
510
511
512void can_rx_unregister(struct net *net, struct net_device *dev, canid_t can_id,
513 canid_t mask, void (*func)(struct sk_buff *, void *),
514 void *data)
515{
516 struct receiver *rcv = NULL;
517 struct hlist_head *rcv_list;
518 struct can_rcv_lists_stats *rcv_lists_stats = net->can.rcv_lists_stats;
519 struct can_dev_rcv_lists *dev_rcv_lists;
520
521 if (dev && dev->type != ARPHRD_CAN)
522 return;
523
524 if (dev && !net_eq(net, dev_net(dev)))
525 return;
526
527 spin_lock_bh(&net->can.rcvlists_lock);
528
529 dev_rcv_lists = can_dev_rcv_lists_find(net, dev);
530 rcv_list = can_rcv_list_find(&can_id, &mask, dev_rcv_lists);
531
532
533
534
535
536 hlist_for_each_entry_rcu(rcv, rcv_list, list) {
537 if (rcv->can_id == can_id && rcv->mask == mask &&
538 rcv->func == func && rcv->data == data)
539 break;
540 }
541
542
543
544
545
546
547
548 if (!rcv) {
549 pr_warn("can: receive list entry not found for dev %s, id %03X, mask %03X\n",
550 DNAME(dev), can_id, mask);
551 goto out;
552 }
553
554 hlist_del_rcu(&rcv->list);
555 dev_rcv_lists->entries--;
556
557 if (rcv_lists_stats->rcv_entries > 0)
558 rcv_lists_stats->rcv_entries--;
559
560 out:
561 spin_unlock_bh(&net->can.rcvlists_lock);
562
563
564 if (rcv) {
565 if (rcv->sk)
566 sock_hold(rcv->sk);
567 call_rcu(&rcv->rcu, can_rx_delete_receiver);
568 }
569}
570EXPORT_SYMBOL(can_rx_unregister);
571
572static inline void deliver(struct sk_buff *skb, struct receiver *rcv)
573{
574 rcv->func(skb, rcv->data);
575 rcv->matches++;
576}
577
578static int can_rcv_filter(struct can_dev_rcv_lists *dev_rcv_lists, struct sk_buff *skb)
579{
580 struct receiver *rcv;
581 int matches = 0;
582 struct can_frame *cf = (struct can_frame *)skb->data;
583 canid_t can_id = cf->can_id;
584
585 if (dev_rcv_lists->entries == 0)
586 return 0;
587
588 if (can_id & CAN_ERR_FLAG) {
589
590 hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx[RX_ERR], list) {
591 if (can_id & rcv->mask) {
592 deliver(skb, rcv);
593 matches++;
594 }
595 }
596 return matches;
597 }
598
599
600 hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx[RX_ALL], list) {
601 deliver(skb, rcv);
602 matches++;
603 }
604
605
606 hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx[RX_FIL], list) {
607 if ((can_id & rcv->mask) == rcv->can_id) {
608 deliver(skb, rcv);
609 matches++;
610 }
611 }
612
613
614 hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx[RX_INV], list) {
615 if ((can_id & rcv->mask) != rcv->can_id) {
616 deliver(skb, rcv);
617 matches++;
618 }
619 }
620
621
622 if (can_id & CAN_RTR_FLAG)
623 return matches;
624
625 if (can_id & CAN_EFF_FLAG) {
626 hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx_eff[effhash(can_id)], list) {
627 if (rcv->can_id == can_id) {
628 deliver(skb, rcv);
629 matches++;
630 }
631 }
632 } else {
633 can_id &= CAN_SFF_MASK;
634 hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx_sff[can_id], list) {
635 deliver(skb, rcv);
636 matches++;
637 }
638 }
639
640 return matches;
641}
642
643static void can_receive(struct sk_buff *skb, struct net_device *dev)
644{
645 struct can_dev_rcv_lists *dev_rcv_lists;
646 struct net *net = dev_net(dev);
647 struct can_pkg_stats *pkg_stats = net->can.pkg_stats;
648 int matches;
649
650
651 pkg_stats->rx_frames++;
652 pkg_stats->rx_frames_delta++;
653
654
655 while (!(can_skb_prv(skb)->skbcnt))
656 can_skb_prv(skb)->skbcnt = atomic_inc_return(&skbcounter);
657
658 rcu_read_lock();
659
660
661 matches = can_rcv_filter(net->can.rx_alldev_list, skb);
662
663
664 dev_rcv_lists = can_dev_rcv_lists_find(net, dev);
665 matches += can_rcv_filter(dev_rcv_lists, skb);
666
667 rcu_read_unlock();
668
669
670 consume_skb(skb);
671
672 if (matches > 0) {
673 pkg_stats->matches++;
674 pkg_stats->matches_delta++;
675 }
676}
677
678static int can_rcv(struct sk_buff *skb, struct net_device *dev,
679 struct packet_type *pt, struct net_device *orig_dev)
680{
681 struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
682
683 if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU)) {
684 pr_warn_once("PF_CAN: dropped non conform CAN skbuff: dev type %d, len %d\n",
685 dev->type, skb->len);
686 goto free_skb;
687 }
688
689
690 if (unlikely(cfd->len > CAN_MAX_DLEN)) {
691 pr_warn_once("PF_CAN: dropped non conform CAN skbuff: dev type %d, len %d, datalen %d\n",
692 dev->type, skb->len, cfd->len);
693 goto free_skb;
694 }
695
696 can_receive(skb, dev);
697 return NET_RX_SUCCESS;
698
699free_skb:
700 kfree_skb(skb);
701 return NET_RX_DROP;
702}
703
704static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
705 struct packet_type *pt, struct net_device *orig_dev)
706{
707 struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
708
709 if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU)) {
710 pr_warn_once("PF_CAN: dropped non conform CAN FD skbuff: dev type %d, len %d\n",
711 dev->type, skb->len);
712 goto free_skb;
713 }
714
715
716 if (unlikely(cfd->len > CANFD_MAX_DLEN)) {
717 pr_warn_once("PF_CAN: dropped non conform CAN FD skbuff: dev type %d, len %d, datalen %d\n",
718 dev->type, skb->len, cfd->len);
719 goto free_skb;
720 }
721
722 can_receive(skb, dev);
723 return NET_RX_SUCCESS;
724
725free_skb:
726 kfree_skb(skb);
727 return NET_RX_DROP;
728}
729
730
731
732
733
734
735
736
737
738
739
740
741
742int can_proto_register(const struct can_proto *cp)
743{
744 int proto = cp->protocol;
745 int err = 0;
746
747 if (proto < 0 || proto >= CAN_NPROTO) {
748 pr_err("can: protocol number %d out of range\n", proto);
749 return -EINVAL;
750 }
751
752 err = proto_register(cp->prot, 0);
753 if (err < 0)
754 return err;
755
756 mutex_lock(&proto_tab_lock);
757
758 if (rcu_access_pointer(proto_tab[proto])) {
759 pr_err("can: protocol %d already registered\n", proto);
760 err = -EBUSY;
761 } else {
762 RCU_INIT_POINTER(proto_tab[proto], cp);
763 }
764
765 mutex_unlock(&proto_tab_lock);
766
767 if (err < 0)
768 proto_unregister(cp->prot);
769
770 return err;
771}
772EXPORT_SYMBOL(can_proto_register);
773
774
775
776
777
778void can_proto_unregister(const struct can_proto *cp)
779{
780 int proto = cp->protocol;
781
782 mutex_lock(&proto_tab_lock);
783 BUG_ON(rcu_access_pointer(proto_tab[proto]) != cp);
784 RCU_INIT_POINTER(proto_tab[proto], NULL);
785 mutex_unlock(&proto_tab_lock);
786
787 synchronize_rcu();
788
789 proto_unregister(cp->prot);
790}
791EXPORT_SYMBOL(can_proto_unregister);
792
793static int can_pernet_init(struct net *net)
794{
795 spin_lock_init(&net->can.rcvlists_lock);
796 net->can.rx_alldev_list =
797 kzalloc(sizeof(*net->can.rx_alldev_list), GFP_KERNEL);
798 if (!net->can.rx_alldev_list)
799 goto out;
800 net->can.pkg_stats = kzalloc(sizeof(*net->can.pkg_stats), GFP_KERNEL);
801 if (!net->can.pkg_stats)
802 goto out_free_rx_alldev_list;
803 net->can.rcv_lists_stats = kzalloc(sizeof(*net->can.rcv_lists_stats), GFP_KERNEL);
804 if (!net->can.rcv_lists_stats)
805 goto out_free_pkg_stats;
806
807 if (IS_ENABLED(CONFIG_PROC_FS)) {
808
809 if (stats_timer) {
810 timer_setup(&net->can.stattimer, can_stat_update,
811 0);
812 mod_timer(&net->can.stattimer,
813 round_jiffies(jiffies + HZ));
814 }
815 net->can.pkg_stats->jiffies_init = jiffies;
816 can_init_proc(net);
817 }
818
819 return 0;
820
821 out_free_pkg_stats:
822 kfree(net->can.pkg_stats);
823 out_free_rx_alldev_list:
824 kfree(net->can.rx_alldev_list);
825 out:
826 return -ENOMEM;
827}
828
829static void can_pernet_exit(struct net *net)
830{
831 if (IS_ENABLED(CONFIG_PROC_FS)) {
832 can_remove_proc(net);
833 if (stats_timer)
834 del_timer_sync(&net->can.stattimer);
835 }
836
837 kfree(net->can.rx_alldev_list);
838 kfree(net->can.pkg_stats);
839 kfree(net->can.rcv_lists_stats);
840}
841
842
843
844static struct packet_type can_packet __read_mostly = {
845 .type = cpu_to_be16(ETH_P_CAN),
846 .func = can_rcv,
847};
848
849static struct packet_type canfd_packet __read_mostly = {
850 .type = cpu_to_be16(ETH_P_CANFD),
851 .func = canfd_rcv,
852};
853
854static const struct net_proto_family can_family_ops = {
855 .family = PF_CAN,
856 .create = can_create,
857 .owner = THIS_MODULE,
858};
859
860static struct pernet_operations can_pernet_ops __read_mostly = {
861 .init = can_pernet_init,
862 .exit = can_pernet_exit,
863};
864
865static __init int can_init(void)
866{
867 int err;
868
869
870 BUILD_BUG_ON(offsetof(struct can_frame, len) !=
871 offsetof(struct canfd_frame, len) ||
872 offsetof(struct can_frame, data) !=
873 offsetof(struct canfd_frame, data));
874
875 pr_info("can: controller area network core\n");
876
877 rcv_cache = kmem_cache_create("can_receiver", sizeof(struct receiver),
878 0, 0, NULL);
879 if (!rcv_cache)
880 return -ENOMEM;
881
882 err = register_pernet_subsys(&can_pernet_ops);
883 if (err)
884 goto out_pernet;
885
886
887 err = sock_register(&can_family_ops);
888 if (err)
889 goto out_sock;
890
891 dev_add_pack(&can_packet);
892 dev_add_pack(&canfd_packet);
893
894 return 0;
895
896out_sock:
897 unregister_pernet_subsys(&can_pernet_ops);
898out_pernet:
899 kmem_cache_destroy(rcv_cache);
900
901 return err;
902}
903
904static __exit void can_exit(void)
905{
906
907 dev_remove_pack(&canfd_packet);
908 dev_remove_pack(&can_packet);
909 sock_unregister(PF_CAN);
910
911 unregister_pernet_subsys(&can_pernet_ops);
912
913 rcu_barrier();
914
915 kmem_cache_destroy(rcv_cache);
916}
917
918module_init(can_init);
919module_exit(can_exit);
920