1#include <linux/types.h>
2#include <linux/skbuff.h>
3#include <linux/socket.h>
4#include <linux/sysctl.h>
5#include <linux/net.h>
6#include <linux/module.h>
7#include <linux/if_arp.h>
8#include <linux/ipv6.h>
9#include <linux/mpls.h>
10#include <linux/netconf.h>
11#include <linux/nospec.h>
12#include <linux/vmalloc.h>
13#include <linux/percpu.h>
14#include <net/ip.h>
15#include <net/dst.h>
16#include <net/sock.h>
17#include <net/arp.h>
18#include <net/ip_fib.h>
19#include <net/netevent.h>
20#include <net/ip_tunnels.h>
21#include <net/netns/generic.h>
22#if IS_ENABLED(CONFIG_IPV6)
23#include <net/ipv6.h>
24#endif
25#include <net/ipv6_stubs.h>
26#include <net/nexthop.h>
27#include "internal.h"
28
29
30#define MAX_MPLS_ROUTE_MEM 4096
31
32
33
34
35#define MAX_MP_SELECT_LABELS 4
36
37#define MPLS_NEIGH_TABLE_UNSPEC (NEIGH_LINK_TABLE + 1)
38
39static int label_limit = (1 << 20) - 1;
40static int ttl_max = 255;
41
42#if IS_ENABLED(CONFIG_NET_IP_TUNNEL)
43static size_t ipgre_mpls_encap_hlen(struct ip_tunnel_encap *e)
44{
45 return sizeof(struct mpls_shim_hdr);
46}
47
48static const struct ip_tunnel_encap_ops mpls_iptun_ops = {
49 .encap_hlen = ipgre_mpls_encap_hlen,
50};
51
52static int ipgre_tunnel_encap_add_mpls_ops(void)
53{
54 return ip_tunnel_encap_add_ops(&mpls_iptun_ops, TUNNEL_ENCAP_MPLS);
55}
56
57static void ipgre_tunnel_encap_del_mpls_ops(void)
58{
59 ip_tunnel_encap_del_ops(&mpls_iptun_ops, TUNNEL_ENCAP_MPLS);
60}
61#else
62static int ipgre_tunnel_encap_add_mpls_ops(void)
63{
64 return 0;
65}
66
67static void ipgre_tunnel_encap_del_mpls_ops(void)
68{
69}
70#endif
71
72static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt,
73 struct nlmsghdr *nlh, struct net *net, u32 portid,
74 unsigned int nlm_flags);
75
76static struct mpls_route *mpls_route_input_rcu(struct net *net, unsigned index)
77{
78 struct mpls_route *rt = NULL;
79
80 if (index < net->mpls.platform_labels) {
81 struct mpls_route __rcu **platform_label =
82 rcu_dereference(net->mpls.platform_label);
83 rt = rcu_dereference(platform_label[index]);
84 }
85 return rt;
86}
87
88bool mpls_output_possible(const struct net_device *dev)
89{
90 return dev && (dev->flags & IFF_UP) && netif_carrier_ok(dev);
91}
92EXPORT_SYMBOL_GPL(mpls_output_possible);
93
94static u8 *__mpls_nh_via(struct mpls_route *rt, struct mpls_nh *nh)
95{
96 return (u8 *)nh + rt->rt_via_offset;
97}
98
99static const u8 *mpls_nh_via(const struct mpls_route *rt,
100 const struct mpls_nh *nh)
101{
102 return __mpls_nh_via((struct mpls_route *)rt, (struct mpls_nh *)nh);
103}
104
105static unsigned int mpls_nh_header_size(const struct mpls_nh *nh)
106{
107
108 return nh->nh_labels * sizeof(struct mpls_shim_hdr);
109}
110
111unsigned int mpls_dev_mtu(const struct net_device *dev)
112{
113
114 return dev->mtu;
115}
116EXPORT_SYMBOL_GPL(mpls_dev_mtu);
117
118bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
119{
120 if (skb->len <= mtu)
121 return false;
122
123 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
124 return false;
125
126 return true;
127}
128EXPORT_SYMBOL_GPL(mpls_pkt_too_big);
129
130void mpls_stats_inc_outucastpkts(struct net_device *dev,
131 const struct sk_buff *skb)
132{
133 struct mpls_dev *mdev;
134
135 if (skb->protocol == htons(ETH_P_MPLS_UC)) {
136 mdev = mpls_dev_get(dev);
137 if (mdev)
138 MPLS_INC_STATS_LEN(mdev, skb->len,
139 tx_packets,
140 tx_bytes);
141 } else if (skb->protocol == htons(ETH_P_IP)) {
142 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
143#if IS_ENABLED(CONFIG_IPV6)
144 } else if (skb->protocol == htons(ETH_P_IPV6)) {
145 struct inet6_dev *in6dev = __in6_dev_get(dev);
146
147 if (in6dev)
148 IP6_UPD_PO_STATS(dev_net(dev), in6dev,
149 IPSTATS_MIB_OUT, skb->len);
150#endif
151 }
152}
153EXPORT_SYMBOL_GPL(mpls_stats_inc_outucastpkts);
154
155static u32 mpls_multipath_hash(struct mpls_route *rt, struct sk_buff *skb)
156{
157 struct mpls_entry_decoded dec;
158 unsigned int mpls_hdr_len = 0;
159 struct mpls_shim_hdr *hdr;
160 bool eli_seen = false;
161 int label_index;
162 u32 hash = 0;
163
164 for (label_index = 0; label_index < MAX_MP_SELECT_LABELS;
165 label_index++) {
166 mpls_hdr_len += sizeof(*hdr);
167 if (!pskb_may_pull(skb, mpls_hdr_len))
168 break;
169
170
171 hdr = mpls_hdr(skb) + label_index;
172 dec = mpls_entry_decode(hdr);
173
174
175
176
177 if (likely(dec.label >= MPLS_LABEL_FIRST_UNRESERVED)) {
178 hash = jhash_1word(dec.label, hash);
179
180
181
182
183
184
185
186 if (eli_seen)
187 break;
188 } else if (dec.label == MPLS_LABEL_ENTROPY) {
189 eli_seen = true;
190 }
191
192 if (!dec.bos)
193 continue;
194
195
196 if (pskb_may_pull(skb, mpls_hdr_len + sizeof(struct iphdr))) {
197 const struct iphdr *v4hdr;
198
199 v4hdr = (const struct iphdr *)(hdr + 1);
200 if (v4hdr->version == 4) {
201 hash = jhash_3words(ntohl(v4hdr->saddr),
202 ntohl(v4hdr->daddr),
203 v4hdr->protocol, hash);
204 } else if (v4hdr->version == 6 &&
205 pskb_may_pull(skb, mpls_hdr_len +
206 sizeof(struct ipv6hdr))) {
207 const struct ipv6hdr *v6hdr;
208
209 v6hdr = (const struct ipv6hdr *)(hdr + 1);
210 hash = __ipv6_addr_jhash(&v6hdr->saddr, hash);
211 hash = __ipv6_addr_jhash(&v6hdr->daddr, hash);
212 hash = jhash_1word(v6hdr->nexthdr, hash);
213 }
214 }
215
216 break;
217 }
218
219 return hash;
220}
221
222static struct mpls_nh *mpls_get_nexthop(struct mpls_route *rt, u8 index)
223{
224 return (struct mpls_nh *)((u8 *)rt->rt_nh + index * rt->rt_nh_size);
225}
226
227
228
229
230
231
232static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
233 struct sk_buff *skb)
234{
235 u32 hash = 0;
236 int nh_index = 0;
237 int n = 0;
238 u8 alive;
239
240
241
242
243 if (rt->rt_nhn == 1)
244 return rt->rt_nh;
245
246 alive = READ_ONCE(rt->rt_nhn_alive);
247 if (alive == 0)
248 return NULL;
249
250 hash = mpls_multipath_hash(rt, skb);
251 nh_index = hash % alive;
252 if (alive == rt->rt_nhn)
253 goto out;
254 for_nexthops(rt) {
255 unsigned int nh_flags = READ_ONCE(nh->nh_flags);
256
257 if (nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
258 continue;
259 if (n == nh_index)
260 return nh;
261 n++;
262 } endfor_nexthops(rt);
263
264out:
265 return mpls_get_nexthop(rt, nh_index);
266}
267
268static bool mpls_egress(struct net *net, struct mpls_route *rt,
269 struct sk_buff *skb, struct mpls_entry_decoded dec)
270{
271 enum mpls_payload_type payload_type;
272 bool success = false;
273
274
275
276
277
278
279
280
281
282
283
284 if (!pskb_may_pull(skb, 12))
285 return false;
286
287 payload_type = rt->rt_payload_type;
288 if (payload_type == MPT_UNSPEC)
289 payload_type = ip_hdr(skb)->version;
290
291 switch (payload_type) {
292 case MPT_IPV4: {
293 struct iphdr *hdr4 = ip_hdr(skb);
294 u8 new_ttl;
295 skb->protocol = htons(ETH_P_IP);
296
297
298
299
300
301 if (rt->rt_ttl_propagate == MPLS_TTL_PROP_ENABLED ||
302 (rt->rt_ttl_propagate == MPLS_TTL_PROP_DEFAULT &&
303 net->mpls.ip_ttl_propagate))
304 new_ttl = dec.ttl;
305 else
306 new_ttl = hdr4->ttl ? hdr4->ttl - 1 : 0;
307
308 csum_replace2(&hdr4->check,
309 htons(hdr4->ttl << 8),
310 htons(new_ttl << 8));
311 hdr4->ttl = new_ttl;
312 success = true;
313 break;
314 }
315 case MPT_IPV6: {
316 struct ipv6hdr *hdr6 = ipv6_hdr(skb);
317 skb->protocol = htons(ETH_P_IPV6);
318
319
320
321
322
323 if (rt->rt_ttl_propagate == MPLS_TTL_PROP_ENABLED ||
324 (rt->rt_ttl_propagate == MPLS_TTL_PROP_DEFAULT &&
325 net->mpls.ip_ttl_propagate))
326 hdr6->hop_limit = dec.ttl;
327 else if (hdr6->hop_limit)
328 hdr6->hop_limit = hdr6->hop_limit - 1;
329 success = true;
330 break;
331 }
332 case MPT_UNSPEC:
333
334 break;
335 }
336
337 return success;
338}
339
340static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
341 struct packet_type *pt, struct net_device *orig_dev)
342{
343 struct net *net = dev_net(dev);
344 struct mpls_shim_hdr *hdr;
345 struct mpls_route *rt;
346 struct mpls_nh *nh;
347 struct mpls_entry_decoded dec;
348 struct net_device *out_dev;
349 struct mpls_dev *out_mdev;
350 struct mpls_dev *mdev;
351 unsigned int hh_len;
352 unsigned int new_header_size;
353 unsigned int mtu;
354 int err;
355
356
357
358 mdev = mpls_dev_get(dev);
359 if (!mdev)
360 goto drop;
361
362 MPLS_INC_STATS_LEN(mdev, skb->len, rx_packets,
363 rx_bytes);
364
365 if (!mdev->input_enabled) {
366 MPLS_INC_STATS(mdev, rx_dropped);
367 goto drop;
368 }
369
370 if (skb->pkt_type != PACKET_HOST)
371 goto err;
372
373 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
374 goto err;
375
376 if (!pskb_may_pull(skb, sizeof(*hdr)))
377 goto err;
378
379 skb_dst_drop(skb);
380
381
382 hdr = mpls_hdr(skb);
383 dec = mpls_entry_decode(hdr);
384
385 rt = mpls_route_input_rcu(net, dec.label);
386 if (!rt) {
387 MPLS_INC_STATS(mdev, rx_noroute);
388 goto drop;
389 }
390
391 nh = mpls_select_multipath(rt, skb);
392 if (!nh)
393 goto err;
394
395
396 skb_pull(skb, sizeof(*hdr));
397 skb_reset_network_header(skb);
398
399 skb_orphan(skb);
400
401 if (skb_warn_if_lro(skb))
402 goto err;
403
404 skb_forward_csum(skb);
405
406
407 if (dec.ttl <= 1)
408 goto err;
409 dec.ttl -= 1;
410
411
412 out_dev = rcu_dereference(nh->nh_dev);
413 if (!mpls_output_possible(out_dev))
414 goto tx_err;
415
416
417 new_header_size = mpls_nh_header_size(nh);
418 mtu = mpls_dev_mtu(out_dev);
419 if (mpls_pkt_too_big(skb, mtu - new_header_size))
420 goto tx_err;
421
422 hh_len = LL_RESERVED_SPACE(out_dev);
423 if (!out_dev->header_ops)
424 hh_len = 0;
425
426
427 if (skb_cow(skb, hh_len + new_header_size))
428 goto tx_err;
429
430 skb->dev = out_dev;
431 skb->protocol = htons(ETH_P_MPLS_UC);
432
433 if (unlikely(!new_header_size && dec.bos)) {
434
435 if (!mpls_egress(dev_net(out_dev), rt, skb, dec))
436 goto err;
437 } else {
438 bool bos;
439 int i;
440 skb_push(skb, new_header_size);
441 skb_reset_network_header(skb);
442
443 hdr = mpls_hdr(skb);
444 bos = dec.bos;
445 for (i = nh->nh_labels - 1; i >= 0; i--) {
446 hdr[i] = mpls_entry_encode(nh->nh_label[i],
447 dec.ttl, 0, bos);
448 bos = false;
449 }
450 }
451
452 mpls_stats_inc_outucastpkts(out_dev, skb);
453
454
455 if (nh->nh_via_table == MPLS_NEIGH_TABLE_UNSPEC)
456 err = neigh_xmit(NEIGH_LINK_TABLE, out_dev,
457 out_dev->dev_addr, skb);
458 else
459 err = neigh_xmit(nh->nh_via_table, out_dev,
460 mpls_nh_via(rt, nh), skb);
461 if (err)
462 net_dbg_ratelimited("%s: packet transmission failed: %d\n",
463 __func__, err);
464 return 0;
465
466tx_err:
467 out_mdev = out_dev ? mpls_dev_get(out_dev) : NULL;
468 if (out_mdev)
469 MPLS_INC_STATS(out_mdev, tx_errors);
470 goto drop;
471err:
472 MPLS_INC_STATS(mdev, rx_errors);
473drop:
474 kfree_skb(skb);
475 return NET_RX_DROP;
476}
477
478static struct packet_type mpls_packet_type __read_mostly = {
479 .type = cpu_to_be16(ETH_P_MPLS_UC),
480 .func = mpls_forward,
481};
482
483static const struct nla_policy rtm_mpls_policy[RTA_MAX+1] = {
484 [RTA_DST] = { .type = NLA_U32 },
485 [RTA_OIF] = { .type = NLA_U32 },
486 [RTA_TTL_PROPAGATE] = { .type = NLA_U8 },
487};
488
489struct mpls_route_config {
490 u32 rc_protocol;
491 u32 rc_ifindex;
492 u8 rc_via_table;
493 u8 rc_via_alen;
494 u8 rc_via[MAX_VIA_ALEN];
495 u32 rc_label;
496 u8 rc_ttl_propagate;
497 u8 rc_output_labels;
498 u32 rc_output_label[MAX_NEW_LABELS];
499 u32 rc_nlflags;
500 enum mpls_payload_type rc_payload_type;
501 struct nl_info rc_nlinfo;
502 struct rtnexthop *rc_mp;
503 int rc_mp_len;
504};
505
506
507
508
509static struct mpls_route *mpls_rt_alloc(u8 num_nh, u8 max_alen, u8 max_labels)
510{
511 u8 nh_size = MPLS_NH_SIZE(max_labels, max_alen);
512 struct mpls_route *rt;
513 size_t size;
514
515 size = sizeof(*rt) + num_nh * nh_size;
516 if (size > MAX_MPLS_ROUTE_MEM)
517 return ERR_PTR(-EINVAL);
518
519 rt = kzalloc(size, GFP_KERNEL);
520 if (!rt)
521 return ERR_PTR(-ENOMEM);
522
523 rt->rt_nhn = num_nh;
524 rt->rt_nhn_alive = num_nh;
525 rt->rt_nh_size = nh_size;
526 rt->rt_via_offset = MPLS_NH_VIA_OFF(max_labels);
527
528 return rt;
529}
530
531static void mpls_rt_free(struct mpls_route *rt)
532{
533 if (rt)
534 kfree_rcu(rt, rt_rcu);
535}
536
537static void mpls_notify_route(struct net *net, unsigned index,
538 struct mpls_route *old, struct mpls_route *new,
539 const struct nl_info *info)
540{
541 struct nlmsghdr *nlh = info ? info->nlh : NULL;
542 unsigned portid = info ? info->portid : 0;
543 int event = new ? RTM_NEWROUTE : RTM_DELROUTE;
544 struct mpls_route *rt = new ? new : old;
545 unsigned nlm_flags = (old && new) ? NLM_F_REPLACE : 0;
546
547 if (rt && (index >= MPLS_LABEL_FIRST_UNRESERVED))
548 rtmsg_lfib(event, index, rt, nlh, net, portid, nlm_flags);
549}
550
551static void mpls_route_update(struct net *net, unsigned index,
552 struct mpls_route *new,
553 const struct nl_info *info)
554{
555 struct mpls_route __rcu **platform_label;
556 struct mpls_route *rt;
557
558 ASSERT_RTNL();
559
560 platform_label = rtnl_dereference(net->mpls.platform_label);
561 rt = rtnl_dereference(platform_label[index]);
562 rcu_assign_pointer(platform_label[index], new);
563
564 mpls_notify_route(net, index, rt, new, info);
565
566
567 mpls_rt_free(rt);
568}
569
570static unsigned find_free_label(struct net *net)
571{
572 struct mpls_route __rcu **platform_label;
573 size_t platform_labels;
574 unsigned index;
575
576 platform_label = rtnl_dereference(net->mpls.platform_label);
577 platform_labels = net->mpls.platform_labels;
578 for (index = MPLS_LABEL_FIRST_UNRESERVED; index < platform_labels;
579 index++) {
580 if (!rtnl_dereference(platform_label[index]))
581 return index;
582 }
583 return LABEL_NOT_SPECIFIED;
584}
585
586#if IS_ENABLED(CONFIG_INET)
587static struct net_device *inet_fib_lookup_dev(struct net *net,
588 const void *addr)
589{
590 struct net_device *dev;
591 struct rtable *rt;
592 struct in_addr daddr;
593
594 memcpy(&daddr, addr, sizeof(struct in_addr));
595 rt = ip_route_output(net, daddr.s_addr, 0, 0, 0);
596 if (IS_ERR(rt))
597 return ERR_CAST(rt);
598
599 dev = rt->dst.dev;
600 dev_hold(dev);
601
602 ip_rt_put(rt);
603
604 return dev;
605}
606#else
607static struct net_device *inet_fib_lookup_dev(struct net *net,
608 const void *addr)
609{
610 return ERR_PTR(-EAFNOSUPPORT);
611}
612#endif
613
614#if IS_ENABLED(CONFIG_IPV6)
615static struct net_device *inet6_fib_lookup_dev(struct net *net,
616 const void *addr)
617{
618 struct net_device *dev;
619 struct dst_entry *dst;
620 struct flowi6 fl6;
621
622 if (!ipv6_stub)
623 return ERR_PTR(-EAFNOSUPPORT);
624
625 memset(&fl6, 0, sizeof(fl6));
626 memcpy(&fl6.daddr, addr, sizeof(struct in6_addr));
627 dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &fl6, NULL);
628 if (IS_ERR(dst))
629 return ERR_CAST(dst);
630
631 dev = dst->dev;
632 dev_hold(dev);
633 dst_release(dst);
634
635 return dev;
636}
637#else
638static struct net_device *inet6_fib_lookup_dev(struct net *net,
639 const void *addr)
640{
641 return ERR_PTR(-EAFNOSUPPORT);
642}
643#endif
644
645static struct net_device *find_outdev(struct net *net,
646 struct mpls_route *rt,
647 struct mpls_nh *nh, int oif)
648{
649 struct net_device *dev = NULL;
650
651 if (!oif) {
652 switch (nh->nh_via_table) {
653 case NEIGH_ARP_TABLE:
654 dev = inet_fib_lookup_dev(net, mpls_nh_via(rt, nh));
655 break;
656 case NEIGH_ND_TABLE:
657 dev = inet6_fib_lookup_dev(net, mpls_nh_via(rt, nh));
658 break;
659 case NEIGH_LINK_TABLE:
660 break;
661 }
662 } else {
663 dev = dev_get_by_index(net, oif);
664 }
665
666 if (!dev)
667 return ERR_PTR(-ENODEV);
668
669 if (IS_ERR(dev))
670 return dev;
671
672
673 dev_put(dev);
674
675 return dev;
676}
677
678static int mpls_nh_assign_dev(struct net *net, struct mpls_route *rt,
679 struct mpls_nh *nh, int oif)
680{
681 struct net_device *dev = NULL;
682 int err = -ENODEV;
683
684 dev = find_outdev(net, rt, nh, oif);
685 if (IS_ERR(dev)) {
686 err = PTR_ERR(dev);
687 dev = NULL;
688 goto errout;
689 }
690
691
692 err = -EINVAL;
693 if (!mpls_dev_get(dev))
694 goto errout;
695
696 if ((nh->nh_via_table == NEIGH_LINK_TABLE) &&
697 (dev->addr_len != nh->nh_via_alen))
698 goto errout;
699
700 RCU_INIT_POINTER(nh->nh_dev, dev);
701
702 if (!(dev->flags & IFF_UP)) {
703 nh->nh_flags |= RTNH_F_DEAD;
704 } else {
705 unsigned int flags;
706
707 flags = dev_get_flags(dev);
708 if (!(flags & (IFF_RUNNING | IFF_LOWER_UP)))
709 nh->nh_flags |= RTNH_F_LINKDOWN;
710 }
711
712 return 0;
713
714errout:
715 return err;
716}
717
718static int nla_get_via(const struct nlattr *nla, u8 *via_alen, u8 *via_table,
719 u8 via_addr[], struct netlink_ext_ack *extack)
720{
721 struct rtvia *via = nla_data(nla);
722 int err = -EINVAL;
723 int alen;
724
725 if (nla_len(nla) < offsetof(struct rtvia, rtvia_addr)) {
726 NL_SET_ERR_MSG_ATTR(extack, nla,
727 "Invalid attribute length for RTA_VIA");
728 goto errout;
729 }
730 alen = nla_len(nla) -
731 offsetof(struct rtvia, rtvia_addr);
732 if (alen > MAX_VIA_ALEN) {
733 NL_SET_ERR_MSG_ATTR(extack, nla,
734 "Invalid address length for RTA_VIA");
735 goto errout;
736 }
737
738
739 switch (via->rtvia_family) {
740 case AF_PACKET:
741 *via_table = NEIGH_LINK_TABLE;
742 break;
743 case AF_INET:
744 *via_table = NEIGH_ARP_TABLE;
745 if (alen != 4)
746 goto errout;
747 break;
748 case AF_INET6:
749 *via_table = NEIGH_ND_TABLE;
750 if (alen != 16)
751 goto errout;
752 break;
753 default:
754
755 goto errout;
756 }
757
758 memcpy(via_addr, via->rtvia_addr, alen);
759 *via_alen = alen;
760 err = 0;
761
762errout:
763 return err;
764}
765
766static int mpls_nh_build_from_cfg(struct mpls_route_config *cfg,
767 struct mpls_route *rt)
768{
769 struct net *net = cfg->rc_nlinfo.nl_net;
770 struct mpls_nh *nh = rt->rt_nh;
771 int err;
772 int i;
773
774 if (!nh)
775 return -ENOMEM;
776
777 nh->nh_labels = cfg->rc_output_labels;
778 for (i = 0; i < nh->nh_labels; i++)
779 nh->nh_label[i] = cfg->rc_output_label[i];
780
781 nh->nh_via_table = cfg->rc_via_table;
782 memcpy(__mpls_nh_via(rt, nh), cfg->rc_via, cfg->rc_via_alen);
783 nh->nh_via_alen = cfg->rc_via_alen;
784
785 err = mpls_nh_assign_dev(net, rt, nh, cfg->rc_ifindex);
786 if (err)
787 goto errout;
788
789 if (nh->nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
790 rt->rt_nhn_alive--;
791
792 return 0;
793
794errout:
795 return err;
796}
797
798static int mpls_nh_build(struct net *net, struct mpls_route *rt,
799 struct mpls_nh *nh, int oif, struct nlattr *via,
800 struct nlattr *newdst, u8 max_labels,
801 struct netlink_ext_ack *extack)
802{
803 int err = -ENOMEM;
804
805 if (!nh)
806 goto errout;
807
808 if (newdst) {
809 err = nla_get_labels(newdst, max_labels, &nh->nh_labels,
810 nh->nh_label, extack);
811 if (err)
812 goto errout;
813 }
814
815 if (via) {
816 err = nla_get_via(via, &nh->nh_via_alen, &nh->nh_via_table,
817 __mpls_nh_via(rt, nh), extack);
818 if (err)
819 goto errout;
820 } else {
821 nh->nh_via_table = MPLS_NEIGH_TABLE_UNSPEC;
822 }
823
824 err = mpls_nh_assign_dev(net, rt, nh, oif);
825 if (err)
826 goto errout;
827
828 return 0;
829
830errout:
831 return err;
832}
833
834static u8 mpls_count_nexthops(struct rtnexthop *rtnh, int len,
835 u8 cfg_via_alen, u8 *max_via_alen,
836 u8 *max_labels)
837{
838 int remaining = len;
839 u8 nhs = 0;
840
841 *max_via_alen = 0;
842 *max_labels = 0;
843
844 while (rtnh_ok(rtnh, remaining)) {
845 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
846 int attrlen;
847 u8 n_labels = 0;
848
849 attrlen = rtnh_attrlen(rtnh);
850 nla = nla_find(attrs, attrlen, RTA_VIA);
851 if (nla && nla_len(nla) >=
852 offsetof(struct rtvia, rtvia_addr)) {
853 int via_alen = nla_len(nla) -
854 offsetof(struct rtvia, rtvia_addr);
855
856 if (via_alen <= MAX_VIA_ALEN)
857 *max_via_alen = max_t(u16, *max_via_alen,
858 via_alen);
859 }
860
861 nla = nla_find(attrs, attrlen, RTA_NEWDST);
862 if (nla &&
863 nla_get_labels(nla, MAX_NEW_LABELS, &n_labels,
864 NULL, NULL) != 0)
865 return 0;
866
867 *max_labels = max_t(u8, *max_labels, n_labels);
868
869
870
871
872 if (nhs == 255)
873 return 0;
874 nhs++;
875
876 rtnh = rtnh_next(rtnh, &remaining);
877 }
878
879
880 return remaining > 0 ? 0 : nhs;
881}
882
883static int mpls_nh_build_multi(struct mpls_route_config *cfg,
884 struct mpls_route *rt, u8 max_labels,
885 struct netlink_ext_ack *extack)
886{
887 struct rtnexthop *rtnh = cfg->rc_mp;
888 struct nlattr *nla_via, *nla_newdst;
889 int remaining = cfg->rc_mp_len;
890 int err = 0;
891 u8 nhs = 0;
892
893 change_nexthops(rt) {
894 int attrlen;
895
896 nla_via = NULL;
897 nla_newdst = NULL;
898
899 err = -EINVAL;
900 if (!rtnh_ok(rtnh, remaining))
901 goto errout;
902
903
904
905
906 if (rtnh->rtnh_hops || rtnh->rtnh_flags)
907 goto errout;
908
909 attrlen = rtnh_attrlen(rtnh);
910 if (attrlen > 0) {
911 struct nlattr *attrs = rtnh_attrs(rtnh);
912
913 nla_via = nla_find(attrs, attrlen, RTA_VIA);
914 nla_newdst = nla_find(attrs, attrlen, RTA_NEWDST);
915 }
916
917 err = mpls_nh_build(cfg->rc_nlinfo.nl_net, rt, nh,
918 rtnh->rtnh_ifindex, nla_via, nla_newdst,
919 max_labels, extack);
920 if (err)
921 goto errout;
922
923 if (nh->nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
924 rt->rt_nhn_alive--;
925
926 rtnh = rtnh_next(rtnh, &remaining);
927 nhs++;
928 } endfor_nexthops(rt);
929
930 rt->rt_nhn = nhs;
931
932 return 0;
933
934errout:
935 return err;
936}
937
938static bool mpls_label_ok(struct net *net, unsigned int *index,
939 struct netlink_ext_ack *extack)
940{
941 bool is_ok = true;
942
943
944 if (*index < MPLS_LABEL_FIRST_UNRESERVED) {
945 NL_SET_ERR_MSG(extack,
946 "Invalid label - must be MPLS_LABEL_FIRST_UNRESERVED or higher");
947 is_ok = false;
948 }
949
950
951 if (is_ok && *index >= net->mpls.platform_labels) {
952 NL_SET_ERR_MSG(extack,
953 "Label >= configured maximum in platform_labels");
954 is_ok = false;
955 }
956
957 *index = array_index_nospec(*index, net->mpls.platform_labels);
958 return is_ok;
959}
960
961static int mpls_route_add(struct mpls_route_config *cfg,
962 struct netlink_ext_ack *extack)
963{
964 struct mpls_route __rcu **platform_label;
965 struct net *net = cfg->rc_nlinfo.nl_net;
966 struct mpls_route *rt, *old;
967 int err = -EINVAL;
968 u8 max_via_alen;
969 unsigned index;
970 u8 max_labels;
971 u8 nhs;
972
973 index = cfg->rc_label;
974
975
976 if ((index == LABEL_NOT_SPECIFIED) &&
977 (cfg->rc_nlflags & NLM_F_CREATE)) {
978 index = find_free_label(net);
979 }
980
981 if (!mpls_label_ok(net, &index, extack))
982 goto errout;
983
984
985 err = -EOPNOTSUPP;
986 if (cfg->rc_nlflags & NLM_F_APPEND) {
987 NL_SET_ERR_MSG(extack, "MPLS does not support route append");
988 goto errout;
989 }
990
991 err = -EEXIST;
992 platform_label = rtnl_dereference(net->mpls.platform_label);
993 old = rtnl_dereference(platform_label[index]);
994 if ((cfg->rc_nlflags & NLM_F_EXCL) && old)
995 goto errout;
996
997 err = -EEXIST;
998 if (!(cfg->rc_nlflags & NLM_F_REPLACE) && old)
999 goto errout;
1000
1001 err = -ENOENT;
1002 if (!(cfg->rc_nlflags & NLM_F_CREATE) && !old)
1003 goto errout;
1004
1005 err = -EINVAL;
1006 if (cfg->rc_mp) {
1007 nhs = mpls_count_nexthops(cfg->rc_mp, cfg->rc_mp_len,
1008 cfg->rc_via_alen, &max_via_alen,
1009 &max_labels);
1010 } else {
1011 max_via_alen = cfg->rc_via_alen;
1012 max_labels = cfg->rc_output_labels;
1013 nhs = 1;
1014 }
1015
1016 if (nhs == 0) {
1017 NL_SET_ERR_MSG(extack, "Route does not contain a nexthop");
1018 goto errout;
1019 }
1020
1021 err = -ENOMEM;
1022 rt = mpls_rt_alloc(nhs, max_via_alen, max_labels);
1023 if (IS_ERR(rt)) {
1024 err = PTR_ERR(rt);
1025 goto errout;
1026 }
1027
1028 rt->rt_protocol = cfg->rc_protocol;
1029 rt->rt_payload_type = cfg->rc_payload_type;
1030 rt->rt_ttl_propagate = cfg->rc_ttl_propagate;
1031
1032 if (cfg->rc_mp)
1033 err = mpls_nh_build_multi(cfg, rt, max_labels, extack);
1034 else
1035 err = mpls_nh_build_from_cfg(cfg, rt);
1036 if (err)
1037 goto freert;
1038
1039 mpls_route_update(net, index, rt, &cfg->rc_nlinfo);
1040
1041 return 0;
1042
1043freert:
1044 mpls_rt_free(rt);
1045errout:
1046 return err;
1047}
1048
1049static int mpls_route_del(struct mpls_route_config *cfg,
1050 struct netlink_ext_ack *extack)
1051{
1052 struct net *net = cfg->rc_nlinfo.nl_net;
1053 unsigned index;
1054 int err = -EINVAL;
1055
1056 index = cfg->rc_label;
1057
1058 if (!mpls_label_ok(net, &index, extack))
1059 goto errout;
1060
1061 mpls_route_update(net, index, NULL, &cfg->rc_nlinfo);
1062
1063 err = 0;
1064errout:
1065 return err;
1066}
1067
1068static void mpls_get_stats(struct mpls_dev *mdev,
1069 struct mpls_link_stats *stats)
1070{
1071 struct mpls_pcpu_stats *p;
1072 int i;
1073
1074 memset(stats, 0, sizeof(*stats));
1075
1076 for_each_possible_cpu(i) {
1077 struct mpls_link_stats local;
1078 unsigned int start;
1079
1080 p = per_cpu_ptr(mdev->stats, i);
1081 do {
1082 start = u64_stats_fetch_begin(&p->syncp);
1083 local = p->stats;
1084 } while (u64_stats_fetch_retry(&p->syncp, start));
1085
1086 stats->rx_packets += local.rx_packets;
1087 stats->rx_bytes += local.rx_bytes;
1088 stats->tx_packets += local.tx_packets;
1089 stats->tx_bytes += local.tx_bytes;
1090 stats->rx_errors += local.rx_errors;
1091 stats->tx_errors += local.tx_errors;
1092 stats->rx_dropped += local.rx_dropped;
1093 stats->tx_dropped += local.tx_dropped;
1094 stats->rx_noroute += local.rx_noroute;
1095 }
1096}
1097
1098static int mpls_fill_stats_af(struct sk_buff *skb,
1099 const struct net_device *dev)
1100{
1101 struct mpls_link_stats *stats;
1102 struct mpls_dev *mdev;
1103 struct nlattr *nla;
1104
1105 mdev = mpls_dev_get(dev);
1106 if (!mdev)
1107 return -ENODATA;
1108
1109 nla = nla_reserve_64bit(skb, MPLS_STATS_LINK,
1110 sizeof(struct mpls_link_stats),
1111 MPLS_STATS_UNSPEC);
1112 if (!nla)
1113 return -EMSGSIZE;
1114
1115 stats = nla_data(nla);
1116 mpls_get_stats(mdev, stats);
1117
1118 return 0;
1119}
1120
1121static size_t mpls_get_stats_af_size(const struct net_device *dev)
1122{
1123 struct mpls_dev *mdev;
1124
1125 mdev = mpls_dev_get(dev);
1126 if (!mdev)
1127 return 0;
1128
1129 return nla_total_size_64bit(sizeof(struct mpls_link_stats));
1130}
1131
1132static int mpls_netconf_fill_devconf(struct sk_buff *skb, struct mpls_dev *mdev,
1133 u32 portid, u32 seq, int event,
1134 unsigned int flags, int type)
1135{
1136 struct nlmsghdr *nlh;
1137 struct netconfmsg *ncm;
1138 bool all = false;
1139
1140 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
1141 flags);
1142 if (!nlh)
1143 return -EMSGSIZE;
1144
1145 if (type == NETCONFA_ALL)
1146 all = true;
1147
1148 ncm = nlmsg_data(nlh);
1149 ncm->ncm_family = AF_MPLS;
1150
1151 if (nla_put_s32(skb, NETCONFA_IFINDEX, mdev->dev->ifindex) < 0)
1152 goto nla_put_failure;
1153
1154 if ((all || type == NETCONFA_INPUT) &&
1155 nla_put_s32(skb, NETCONFA_INPUT,
1156 mdev->input_enabled) < 0)
1157 goto nla_put_failure;
1158
1159 nlmsg_end(skb, nlh);
1160 return 0;
1161
1162nla_put_failure:
1163 nlmsg_cancel(skb, nlh);
1164 return -EMSGSIZE;
1165}
1166
1167static int mpls_netconf_msgsize_devconf(int type)
1168{
1169 int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
1170 + nla_total_size(4);
1171 bool all = false;
1172
1173 if (type == NETCONFA_ALL)
1174 all = true;
1175
1176 if (all || type == NETCONFA_INPUT)
1177 size += nla_total_size(4);
1178
1179 return size;
1180}
1181
1182static void mpls_netconf_notify_devconf(struct net *net, int event,
1183 int type, struct mpls_dev *mdev)
1184{
1185 struct sk_buff *skb;
1186 int err = -ENOBUFS;
1187
1188 skb = nlmsg_new(mpls_netconf_msgsize_devconf(type), GFP_KERNEL);
1189 if (!skb)
1190 goto errout;
1191
1192 err = mpls_netconf_fill_devconf(skb, mdev, 0, 0, event, 0, type);
1193 if (err < 0) {
1194
1195 WARN_ON(err == -EMSGSIZE);
1196 kfree_skb(skb);
1197 goto errout;
1198 }
1199
1200 rtnl_notify(skb, net, 0, RTNLGRP_MPLS_NETCONF, NULL, GFP_KERNEL);
1201 return;
1202errout:
1203 if (err < 0)
1204 rtnl_set_sk_err(net, RTNLGRP_MPLS_NETCONF, err);
1205}
1206
1207static const struct nla_policy devconf_mpls_policy[NETCONFA_MAX + 1] = {
1208 [NETCONFA_IFINDEX] = { .len = sizeof(int) },
1209};
1210
1211static int mpls_netconf_valid_get_req(struct sk_buff *skb,
1212 const struct nlmsghdr *nlh,
1213 struct nlattr **tb,
1214 struct netlink_ext_ack *extack)
1215{
1216 int i, err;
1217
1218 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct netconfmsg))) {
1219 NL_SET_ERR_MSG_MOD(extack,
1220 "Invalid header for netconf get request");
1221 return -EINVAL;
1222 }
1223
1224 if (!netlink_strict_get_check(skb))
1225 return nlmsg_parse_deprecated(nlh, sizeof(struct netconfmsg),
1226 tb, NETCONFA_MAX,
1227 devconf_mpls_policy, extack);
1228
1229 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct netconfmsg),
1230 tb, NETCONFA_MAX,
1231 devconf_mpls_policy, extack);
1232 if (err)
1233 return err;
1234
1235 for (i = 0; i <= NETCONFA_MAX; i++) {
1236 if (!tb[i])
1237 continue;
1238
1239 switch (i) {
1240 case NETCONFA_IFINDEX:
1241 break;
1242 default:
1243 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in netconf get request");
1244 return -EINVAL;
1245 }
1246 }
1247
1248 return 0;
1249}
1250
1251static int mpls_netconf_get_devconf(struct sk_buff *in_skb,
1252 struct nlmsghdr *nlh,
1253 struct netlink_ext_ack *extack)
1254{
1255 struct net *net = sock_net(in_skb->sk);
1256 struct nlattr *tb[NETCONFA_MAX + 1];
1257 struct net_device *dev;
1258 struct mpls_dev *mdev;
1259 struct sk_buff *skb;
1260 int ifindex;
1261 int err;
1262
1263 err = mpls_netconf_valid_get_req(in_skb, nlh, tb, extack);
1264 if (err < 0)
1265 goto errout;
1266
1267 err = -EINVAL;
1268 if (!tb[NETCONFA_IFINDEX])
1269 goto errout;
1270
1271 ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
1272 dev = __dev_get_by_index(net, ifindex);
1273 if (!dev)
1274 goto errout;
1275
1276 mdev = mpls_dev_get(dev);
1277 if (!mdev)
1278 goto errout;
1279
1280 err = -ENOBUFS;
1281 skb = nlmsg_new(mpls_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL);
1282 if (!skb)
1283 goto errout;
1284
1285 err = mpls_netconf_fill_devconf(skb, mdev,
1286 NETLINK_CB(in_skb).portid,
1287 nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
1288 NETCONFA_ALL);
1289 if (err < 0) {
1290
1291 WARN_ON(err == -EMSGSIZE);
1292 kfree_skb(skb);
1293 goto errout;
1294 }
1295 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
1296errout:
1297 return err;
1298}
1299
1300static int mpls_netconf_dump_devconf(struct sk_buff *skb,
1301 struct netlink_callback *cb)
1302{
1303 const struct nlmsghdr *nlh = cb->nlh;
1304 struct net *net = sock_net(skb->sk);
1305 struct hlist_head *head;
1306 struct net_device *dev;
1307 struct mpls_dev *mdev;
1308 int idx, s_idx;
1309 int h, s_h;
1310
1311 if (cb->strict_check) {
1312 struct netlink_ext_ack *extack = cb->extack;
1313 struct netconfmsg *ncm;
1314
1315 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ncm))) {
1316 NL_SET_ERR_MSG_MOD(extack, "Invalid header for netconf dump request");
1317 return -EINVAL;
1318 }
1319
1320 if (nlmsg_attrlen(nlh, sizeof(*ncm))) {
1321 NL_SET_ERR_MSG_MOD(extack, "Invalid data after header in netconf dump request");
1322 return -EINVAL;
1323 }
1324 }
1325
1326 s_h = cb->args[0];
1327 s_idx = idx = cb->args[1];
1328
1329 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1330 idx = 0;
1331 head = &net->dev_index_head[h];
1332 rcu_read_lock();
1333 cb->seq = net->dev_base_seq;
1334 hlist_for_each_entry_rcu(dev, head, index_hlist) {
1335 if (idx < s_idx)
1336 goto cont;
1337 mdev = mpls_dev_get(dev);
1338 if (!mdev)
1339 goto cont;
1340 if (mpls_netconf_fill_devconf(skb, mdev,
1341 NETLINK_CB(cb->skb).portid,
1342 nlh->nlmsg_seq,
1343 RTM_NEWNETCONF,
1344 NLM_F_MULTI,
1345 NETCONFA_ALL) < 0) {
1346 rcu_read_unlock();
1347 goto done;
1348 }
1349 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1350cont:
1351 idx++;
1352 }
1353 rcu_read_unlock();
1354 }
1355done:
1356 cb->args[0] = h;
1357 cb->args[1] = idx;
1358
1359 return skb->len;
1360}
1361
1362#define MPLS_PERDEV_SYSCTL_OFFSET(field) \
1363 (&((struct mpls_dev *)0)->field)
1364
1365static int mpls_conf_proc(struct ctl_table *ctl, int write,
1366 void __user *buffer,
1367 size_t *lenp, loff_t *ppos)
1368{
1369 int oval = *(int *)ctl->data;
1370 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
1371
1372 if (write) {
1373 struct mpls_dev *mdev = ctl->extra1;
1374 int i = (int *)ctl->data - (int *)mdev;
1375 struct net *net = ctl->extra2;
1376 int val = *(int *)ctl->data;
1377
1378 if (i == offsetof(struct mpls_dev, input_enabled) &&
1379 val != oval) {
1380 mpls_netconf_notify_devconf(net, RTM_NEWNETCONF,
1381 NETCONFA_INPUT, mdev);
1382 }
1383 }
1384
1385 return ret;
1386}
1387
1388static const struct ctl_table mpls_dev_table[] = {
1389 {
1390 .procname = "input",
1391 .maxlen = sizeof(int),
1392 .mode = 0644,
1393 .proc_handler = mpls_conf_proc,
1394 .data = MPLS_PERDEV_SYSCTL_OFFSET(input_enabled),
1395 },
1396 { }
1397};
1398
1399static int mpls_dev_sysctl_register(struct net_device *dev,
1400 struct mpls_dev *mdev)
1401{
1402 char path[sizeof("net/mpls/conf/") + IFNAMSIZ];
1403 struct net *net = dev_net(dev);
1404 struct ctl_table *table;
1405 int i;
1406
1407 table = kmemdup(&mpls_dev_table, sizeof(mpls_dev_table), GFP_KERNEL);
1408 if (!table)
1409 goto out;
1410
1411
1412
1413
1414 for (i = 0; i < ARRAY_SIZE(mpls_dev_table); i++) {
1415 table[i].data = (char *)mdev + (uintptr_t)table[i].data;
1416 table[i].extra1 = mdev;
1417 table[i].extra2 = net;
1418 }
1419
1420 snprintf(path, sizeof(path), "net/mpls/conf/%s", dev->name);
1421
1422 mdev->sysctl = register_net_sysctl(net, path, table);
1423 if (!mdev->sysctl)
1424 goto free;
1425
1426 mpls_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL, mdev);
1427 return 0;
1428
1429free:
1430 kfree(table);
1431out:
1432 return -ENOBUFS;
1433}
1434
1435static void mpls_dev_sysctl_unregister(struct net_device *dev,
1436 struct mpls_dev *mdev)
1437{
1438 struct net *net = dev_net(dev);
1439 struct ctl_table *table;
1440
1441 table = mdev->sysctl->ctl_table_arg;
1442 unregister_net_sysctl_table(mdev->sysctl);
1443 kfree(table);
1444
1445 mpls_netconf_notify_devconf(net, RTM_DELNETCONF, 0, mdev);
1446}
1447
1448static struct mpls_dev *mpls_add_dev(struct net_device *dev)
1449{
1450 struct mpls_dev *mdev;
1451 int err = -ENOMEM;
1452 int i;
1453
1454 ASSERT_RTNL();
1455
1456 mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
1457 if (!mdev)
1458 return ERR_PTR(err);
1459
1460 mdev->stats = alloc_percpu(struct mpls_pcpu_stats);
1461 if (!mdev->stats)
1462 goto free;
1463
1464 for_each_possible_cpu(i) {
1465 struct mpls_pcpu_stats *mpls_stats;
1466
1467 mpls_stats = per_cpu_ptr(mdev->stats, i);
1468 u64_stats_init(&mpls_stats->syncp);
1469 }
1470
1471 mdev->dev = dev;
1472
1473 err = mpls_dev_sysctl_register(dev, mdev);
1474 if (err)
1475 goto free;
1476
1477 rcu_assign_pointer(dev->mpls_ptr, mdev);
1478
1479 return mdev;
1480
1481free:
1482 free_percpu(mdev->stats);
1483 kfree(mdev);
1484 return ERR_PTR(err);
1485}
1486
1487static void mpls_dev_destroy_rcu(struct rcu_head *head)
1488{
1489 struct mpls_dev *mdev = container_of(head, struct mpls_dev, rcu);
1490
1491 free_percpu(mdev->stats);
1492 kfree(mdev);
1493}
1494
1495static void mpls_ifdown(struct net_device *dev, int event)
1496{
1497 struct mpls_route __rcu **platform_label;
1498 struct net *net = dev_net(dev);
1499 u8 alive, deleted;
1500 unsigned index;
1501
1502 platform_label = rtnl_dereference(net->mpls.platform_label);
1503 for (index = 0; index < net->mpls.platform_labels; index++) {
1504 struct mpls_route *rt = rtnl_dereference(platform_label[index]);
1505
1506 if (!rt)
1507 continue;
1508
1509 alive = 0;
1510 deleted = 0;
1511 change_nexthops(rt) {
1512 unsigned int nh_flags = nh->nh_flags;
1513
1514 if (rtnl_dereference(nh->nh_dev) != dev)
1515 goto next;
1516
1517 switch (event) {
1518 case NETDEV_DOWN:
1519 case NETDEV_UNREGISTER:
1520 nh_flags |= RTNH_F_DEAD;
1521
1522 case NETDEV_CHANGE:
1523 nh_flags |= RTNH_F_LINKDOWN;
1524 break;
1525 }
1526 if (event == NETDEV_UNREGISTER)
1527 RCU_INIT_POINTER(nh->nh_dev, NULL);
1528
1529 if (nh->nh_flags != nh_flags)
1530 WRITE_ONCE(nh->nh_flags, nh_flags);
1531next:
1532 if (!(nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)))
1533 alive++;
1534 if (!rtnl_dereference(nh->nh_dev))
1535 deleted++;
1536 } endfor_nexthops(rt);
1537
1538 WRITE_ONCE(rt->rt_nhn_alive, alive);
1539
1540
1541 if (event == NETDEV_UNREGISTER && deleted == rt->rt_nhn)
1542 mpls_route_update(net, index, NULL, NULL);
1543 }
1544}
1545
1546static void mpls_ifup(struct net_device *dev, unsigned int flags)
1547{
1548 struct mpls_route __rcu **platform_label;
1549 struct net *net = dev_net(dev);
1550 unsigned index;
1551 u8 alive;
1552
1553 platform_label = rtnl_dereference(net->mpls.platform_label);
1554 for (index = 0; index < net->mpls.platform_labels; index++) {
1555 struct mpls_route *rt = rtnl_dereference(platform_label[index]);
1556
1557 if (!rt)
1558 continue;
1559
1560 alive = 0;
1561 change_nexthops(rt) {
1562 unsigned int nh_flags = nh->nh_flags;
1563 struct net_device *nh_dev =
1564 rtnl_dereference(nh->nh_dev);
1565
1566 if (!(nh_flags & flags)) {
1567 alive++;
1568 continue;
1569 }
1570 if (nh_dev != dev)
1571 continue;
1572 alive++;
1573 nh_flags &= ~flags;
1574 WRITE_ONCE(nh->nh_flags, nh_flags);
1575 } endfor_nexthops(rt);
1576
1577 WRITE_ONCE(rt->rt_nhn_alive, alive);
1578 }
1579}
1580
1581static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
1582 void *ptr)
1583{
1584 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1585 struct mpls_dev *mdev;
1586 unsigned int flags;
1587
1588 if (event == NETDEV_REGISTER) {
1589 mdev = mpls_add_dev(dev);
1590 if (IS_ERR(mdev))
1591 return notifier_from_errno(PTR_ERR(mdev));
1592
1593 return NOTIFY_OK;
1594 }
1595
1596 mdev = mpls_dev_get(dev);
1597 if (!mdev)
1598 return NOTIFY_OK;
1599
1600 switch (event) {
1601 case NETDEV_DOWN:
1602 mpls_ifdown(dev, event);
1603 break;
1604 case NETDEV_UP:
1605 flags = dev_get_flags(dev);
1606 if (flags & (IFF_RUNNING | IFF_LOWER_UP))
1607 mpls_ifup(dev, RTNH_F_DEAD | RTNH_F_LINKDOWN);
1608 else
1609 mpls_ifup(dev, RTNH_F_DEAD);
1610 break;
1611 case NETDEV_CHANGE:
1612 flags = dev_get_flags(dev);
1613 if (flags & (IFF_RUNNING | IFF_LOWER_UP))
1614 mpls_ifup(dev, RTNH_F_DEAD | RTNH_F_LINKDOWN);
1615 else
1616 mpls_ifdown(dev, event);
1617 break;
1618 case NETDEV_UNREGISTER:
1619 mpls_ifdown(dev, event);
1620 mdev = mpls_dev_get(dev);
1621 if (mdev) {
1622 mpls_dev_sysctl_unregister(dev, mdev);
1623 RCU_INIT_POINTER(dev->mpls_ptr, NULL);
1624 call_rcu(&mdev->rcu, mpls_dev_destroy_rcu);
1625 }
1626 break;
1627 case NETDEV_CHANGENAME:
1628 mdev = mpls_dev_get(dev);
1629 if (mdev) {
1630 int err;
1631
1632 mpls_dev_sysctl_unregister(dev, mdev);
1633 err = mpls_dev_sysctl_register(dev, mdev);
1634 if (err)
1635 return notifier_from_errno(err);
1636 }
1637 break;
1638 }
1639 return NOTIFY_OK;
1640}
1641
1642static struct notifier_block mpls_dev_notifier = {
1643 .notifier_call = mpls_dev_notify,
1644};
1645
1646static int nla_put_via(struct sk_buff *skb,
1647 u8 table, const void *addr, int alen)
1648{
1649 static const int table_to_family[NEIGH_NR_TABLES + 1] = {
1650 AF_INET, AF_INET6, AF_DECnet, AF_PACKET,
1651 };
1652 struct nlattr *nla;
1653 struct rtvia *via;
1654 int family = AF_UNSPEC;
1655
1656 nla = nla_reserve(skb, RTA_VIA, alen + 2);
1657 if (!nla)
1658 return -EMSGSIZE;
1659
1660 if (table <= NEIGH_NR_TABLES)
1661 family = table_to_family[table];
1662
1663 via = nla_data(nla);
1664 via->rtvia_family = family;
1665 memcpy(via->rtvia_addr, addr, alen);
1666 return 0;
1667}
1668
1669int nla_put_labels(struct sk_buff *skb, int attrtype,
1670 u8 labels, const u32 label[])
1671{
1672 struct nlattr *nla;
1673 struct mpls_shim_hdr *nla_label;
1674 bool bos;
1675 int i;
1676 nla = nla_reserve(skb, attrtype, labels*4);
1677 if (!nla)
1678 return -EMSGSIZE;
1679
1680 nla_label = nla_data(nla);
1681 bos = true;
1682 for (i = labels - 1; i >= 0; i--) {
1683 nla_label[i] = mpls_entry_encode(label[i], 0, 0, bos);
1684 bos = false;
1685 }
1686
1687 return 0;
1688}
1689EXPORT_SYMBOL_GPL(nla_put_labels);
1690
1691int nla_get_labels(const struct nlattr *nla, u8 max_labels, u8 *labels,
1692 u32 label[], struct netlink_ext_ack *extack)
1693{
1694 unsigned len = nla_len(nla);
1695 struct mpls_shim_hdr *nla_label;
1696 u8 nla_labels;
1697 bool bos;
1698 int i;
1699
1700
1701
1702
1703 if (len & 3 || len / 4 > 255) {
1704 NL_SET_ERR_MSG_ATTR(extack, nla,
1705 "Invalid length for labels attribute");
1706 return -EINVAL;
1707 }
1708
1709
1710 nla_labels = len/4;
1711 if (nla_labels > max_labels) {
1712 NL_SET_ERR_MSG(extack, "Too many labels");
1713 return -EINVAL;
1714 }
1715
1716
1717 if (!label)
1718 goto out;
1719
1720 nla_label = nla_data(nla);
1721 bos = true;
1722 for (i = nla_labels - 1; i >= 0; i--, bos = false) {
1723 struct mpls_entry_decoded dec;
1724 dec = mpls_entry_decode(nla_label + i);
1725
1726
1727
1728
1729 if (dec.ttl) {
1730 NL_SET_ERR_MSG_ATTR(extack, nla,
1731 "TTL in label must be 0");
1732 return -EINVAL;
1733 }
1734
1735 if (dec.tc) {
1736 NL_SET_ERR_MSG_ATTR(extack, nla,
1737 "Traffic class in label must be 0");
1738 return -EINVAL;
1739 }
1740
1741 if (dec.bos != bos) {
1742 NL_SET_BAD_ATTR(extack, nla);
1743 if (bos) {
1744 NL_SET_ERR_MSG(extack,
1745 "BOS bit must be set in first label");
1746 } else {
1747 NL_SET_ERR_MSG(extack,
1748 "BOS bit can only be set in first label");
1749 }
1750 return -EINVAL;
1751 }
1752
1753 switch (dec.label) {
1754 case MPLS_LABEL_IMPLNULL:
1755
1756
1757
1758
1759 NL_SET_ERR_MSG_ATTR(extack, nla,
1760 "Implicit NULL Label (3) can not be used in encapsulation");
1761 return -EINVAL;
1762 }
1763
1764 label[i] = dec.label;
1765 }
1766out:
1767 *labels = nla_labels;
1768 return 0;
1769}
1770EXPORT_SYMBOL_GPL(nla_get_labels);
1771
1772static int rtm_to_route_config(struct sk_buff *skb,
1773 struct nlmsghdr *nlh,
1774 struct mpls_route_config *cfg,
1775 struct netlink_ext_ack *extack)
1776{
1777 struct rtmsg *rtm;
1778 struct nlattr *tb[RTA_MAX+1];
1779 int index;
1780 int err;
1781
1782 err = nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
1783 rtm_mpls_policy, extack);
1784 if (err < 0)
1785 goto errout;
1786
1787 err = -EINVAL;
1788 rtm = nlmsg_data(nlh);
1789
1790 if (rtm->rtm_family != AF_MPLS) {
1791 NL_SET_ERR_MSG(extack, "Invalid address family in rtmsg");
1792 goto errout;
1793 }
1794 if (rtm->rtm_dst_len != 20) {
1795 NL_SET_ERR_MSG(extack, "rtm_dst_len must be 20 for MPLS");
1796 goto errout;
1797 }
1798 if (rtm->rtm_src_len != 0) {
1799 NL_SET_ERR_MSG(extack, "rtm_src_len must be 0 for MPLS");
1800 goto errout;
1801 }
1802 if (rtm->rtm_tos != 0) {
1803 NL_SET_ERR_MSG(extack, "rtm_tos must be 0 for MPLS");
1804 goto errout;
1805 }
1806 if (rtm->rtm_table != RT_TABLE_MAIN) {
1807 NL_SET_ERR_MSG(extack,
1808 "MPLS only supports the main route table");
1809 goto errout;
1810 }
1811
1812
1813
1814
1815
1816
1817 if (rtm->rtm_scope != RT_SCOPE_UNIVERSE) {
1818 NL_SET_ERR_MSG(extack,
1819 "Invalid route scope - MPLS only supports UNIVERSE");
1820 goto errout;
1821 }
1822 if (rtm->rtm_type != RTN_UNICAST) {
1823 NL_SET_ERR_MSG(extack,
1824 "Invalid route type - MPLS only supports UNICAST");
1825 goto errout;
1826 }
1827 if (rtm->rtm_flags != 0) {
1828 NL_SET_ERR_MSG(extack, "rtm_flags must be 0 for MPLS");
1829 goto errout;
1830 }
1831
1832 cfg->rc_label = LABEL_NOT_SPECIFIED;
1833 cfg->rc_protocol = rtm->rtm_protocol;
1834 cfg->rc_via_table = MPLS_NEIGH_TABLE_UNSPEC;
1835 cfg->rc_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
1836 cfg->rc_nlflags = nlh->nlmsg_flags;
1837 cfg->rc_nlinfo.portid = NETLINK_CB(skb).portid;
1838 cfg->rc_nlinfo.nlh = nlh;
1839 cfg->rc_nlinfo.nl_net = sock_net(skb->sk);
1840
1841 for (index = 0; index <= RTA_MAX; index++) {
1842 struct nlattr *nla = tb[index];
1843 if (!nla)
1844 continue;
1845
1846 switch (index) {
1847 case RTA_OIF:
1848 cfg->rc_ifindex = nla_get_u32(nla);
1849 break;
1850 case RTA_NEWDST:
1851 if (nla_get_labels(nla, MAX_NEW_LABELS,
1852 &cfg->rc_output_labels,
1853 cfg->rc_output_label, extack))
1854 goto errout;
1855 break;
1856 case RTA_DST:
1857 {
1858 u8 label_count;
1859 if (nla_get_labels(nla, 1, &label_count,
1860 &cfg->rc_label, extack))
1861 goto errout;
1862
1863 if (!mpls_label_ok(cfg->rc_nlinfo.nl_net,
1864 &cfg->rc_label, extack))
1865 goto errout;
1866 break;
1867 }
1868 case RTA_GATEWAY:
1869 NL_SET_ERR_MSG(extack, "MPLS does not support RTA_GATEWAY attribute");
1870 goto errout;
1871 case RTA_VIA:
1872 {
1873 if (nla_get_via(nla, &cfg->rc_via_alen,
1874 &cfg->rc_via_table, cfg->rc_via,
1875 extack))
1876 goto errout;
1877 break;
1878 }
1879 case RTA_MULTIPATH:
1880 {
1881 cfg->rc_mp = nla_data(nla);
1882 cfg->rc_mp_len = nla_len(nla);
1883 break;
1884 }
1885 case RTA_TTL_PROPAGATE:
1886 {
1887 u8 ttl_propagate = nla_get_u8(nla);
1888
1889 if (ttl_propagate > 1) {
1890 NL_SET_ERR_MSG_ATTR(extack, nla,
1891 "RTA_TTL_PROPAGATE can only be 0 or 1");
1892 goto errout;
1893 }
1894 cfg->rc_ttl_propagate = ttl_propagate ?
1895 MPLS_TTL_PROP_ENABLED :
1896 MPLS_TTL_PROP_DISABLED;
1897 break;
1898 }
1899 default:
1900 NL_SET_ERR_MSG_ATTR(extack, nla, "Unknown attribute");
1901
1902 goto errout;
1903 }
1904 }
1905
1906 err = 0;
1907errout:
1908 return err;
1909}
1910
1911static int mpls_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
1912 struct netlink_ext_ack *extack)
1913{
1914 struct mpls_route_config *cfg;
1915 int err;
1916
1917 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
1918 if (!cfg)
1919 return -ENOMEM;
1920
1921 err = rtm_to_route_config(skb, nlh, cfg, extack);
1922 if (err < 0)
1923 goto out;
1924
1925 err = mpls_route_del(cfg, extack);
1926out:
1927 kfree(cfg);
1928
1929 return err;
1930}
1931
1932
1933static int mpls_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
1934 struct netlink_ext_ack *extack)
1935{
1936 struct mpls_route_config *cfg;
1937 int err;
1938
1939 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
1940 if (!cfg)
1941 return -ENOMEM;
1942
1943 err = rtm_to_route_config(skb, nlh, cfg, extack);
1944 if (err < 0)
1945 goto out;
1946
1947 err = mpls_route_add(cfg, extack);
1948out:
1949 kfree(cfg);
1950
1951 return err;
1952}
1953
1954static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
1955 u32 label, struct mpls_route *rt, int flags)
1956{
1957 struct net_device *dev;
1958 struct nlmsghdr *nlh;
1959 struct rtmsg *rtm;
1960
1961 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags);
1962 if (nlh == NULL)
1963 return -EMSGSIZE;
1964
1965 rtm = nlmsg_data(nlh);
1966 rtm->rtm_family = AF_MPLS;
1967 rtm->rtm_dst_len = 20;
1968 rtm->rtm_src_len = 0;
1969 rtm->rtm_tos = 0;
1970 rtm->rtm_table = RT_TABLE_MAIN;
1971 rtm->rtm_protocol = rt->rt_protocol;
1972 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
1973 rtm->rtm_type = RTN_UNICAST;
1974 rtm->rtm_flags = 0;
1975
1976 if (nla_put_labels(skb, RTA_DST, 1, &label))
1977 goto nla_put_failure;
1978
1979 if (rt->rt_ttl_propagate != MPLS_TTL_PROP_DEFAULT) {
1980 bool ttl_propagate =
1981 rt->rt_ttl_propagate == MPLS_TTL_PROP_ENABLED;
1982
1983 if (nla_put_u8(skb, RTA_TTL_PROPAGATE,
1984 ttl_propagate))
1985 goto nla_put_failure;
1986 }
1987 if (rt->rt_nhn == 1) {
1988 const struct mpls_nh *nh = rt->rt_nh;
1989
1990 if (nh->nh_labels &&
1991 nla_put_labels(skb, RTA_NEWDST, nh->nh_labels,
1992 nh->nh_label))
1993 goto nla_put_failure;
1994 if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC &&
1995 nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh),
1996 nh->nh_via_alen))
1997 goto nla_put_failure;
1998 dev = rtnl_dereference(nh->nh_dev);
1999 if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
2000 goto nla_put_failure;
2001 if (nh->nh_flags & RTNH_F_LINKDOWN)
2002 rtm->rtm_flags |= RTNH_F_LINKDOWN;
2003 if (nh->nh_flags & RTNH_F_DEAD)
2004 rtm->rtm_flags |= RTNH_F_DEAD;
2005 } else {
2006 struct rtnexthop *rtnh;
2007 struct nlattr *mp;
2008 u8 linkdown = 0;
2009 u8 dead = 0;
2010
2011 mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
2012 if (!mp)
2013 goto nla_put_failure;
2014
2015 for_nexthops(rt) {
2016 dev = rtnl_dereference(nh->nh_dev);
2017 if (!dev)
2018 continue;
2019
2020 rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
2021 if (!rtnh)
2022 goto nla_put_failure;
2023
2024 rtnh->rtnh_ifindex = dev->ifindex;
2025 if (nh->nh_flags & RTNH_F_LINKDOWN) {
2026 rtnh->rtnh_flags |= RTNH_F_LINKDOWN;
2027 linkdown++;
2028 }
2029 if (nh->nh_flags & RTNH_F_DEAD) {
2030 rtnh->rtnh_flags |= RTNH_F_DEAD;
2031 dead++;
2032 }
2033
2034 if (nh->nh_labels && nla_put_labels(skb, RTA_NEWDST,
2035 nh->nh_labels,
2036 nh->nh_label))
2037 goto nla_put_failure;
2038 if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC &&
2039 nla_put_via(skb, nh->nh_via_table,
2040 mpls_nh_via(rt, nh),
2041 nh->nh_via_alen))
2042 goto nla_put_failure;
2043
2044
2045 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
2046 } endfor_nexthops(rt);
2047
2048 if (linkdown == rt->rt_nhn)
2049 rtm->rtm_flags |= RTNH_F_LINKDOWN;
2050 if (dead == rt->rt_nhn)
2051 rtm->rtm_flags |= RTNH_F_DEAD;
2052
2053 nla_nest_end(skb, mp);
2054 }
2055
2056 nlmsg_end(skb, nlh);
2057 return 0;
2058
2059nla_put_failure:
2060 nlmsg_cancel(skb, nlh);
2061 return -EMSGSIZE;
2062}
2063
2064#if IS_ENABLED(CONFIG_INET)
2065static int mpls_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
2066 struct fib_dump_filter *filter,
2067 struct netlink_callback *cb)
2068{
2069 return ip_valid_fib_dump_req(net, nlh, filter, cb);
2070}
2071#else
2072static int mpls_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
2073 struct fib_dump_filter *filter,
2074 struct netlink_callback *cb)
2075{
2076 struct netlink_ext_ack *extack = cb->extack;
2077 struct nlattr *tb[RTA_MAX + 1];
2078 struct rtmsg *rtm;
2079 int err, i;
2080
2081 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
2082 NL_SET_ERR_MSG_MOD(extack, "Invalid header for FIB dump request");
2083 return -EINVAL;
2084 }
2085
2086 rtm = nlmsg_data(nlh);
2087 if (rtm->rtm_dst_len || rtm->rtm_src_len || rtm->rtm_tos ||
2088 rtm->rtm_table || rtm->rtm_scope || rtm->rtm_type ||
2089 rtm->rtm_flags) {
2090 NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for FIB dump request");
2091 return -EINVAL;
2092 }
2093
2094 if (rtm->rtm_protocol) {
2095 filter->protocol = rtm->rtm_protocol;
2096 filter->filter_set = 1;
2097 cb->answer_flags = NLM_F_DUMP_FILTERED;
2098 }
2099
2100 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
2101 rtm_mpls_policy, extack);
2102 if (err < 0)
2103 return err;
2104
2105 for (i = 0; i <= RTA_MAX; ++i) {
2106 int ifindex;
2107
2108 if (i == RTA_OIF) {
2109 ifindex = nla_get_u32(tb[i]);
2110 filter->dev = __dev_get_by_index(net, ifindex);
2111 if (!filter->dev)
2112 return -ENODEV;
2113 filter->filter_set = 1;
2114 } else if (tb[i]) {
2115 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in dump request");
2116 return -EINVAL;
2117 }
2118 }
2119
2120 return 0;
2121}
2122#endif
2123
2124static bool mpls_rt_uses_dev(struct mpls_route *rt,
2125 const struct net_device *dev)
2126{
2127 struct net_device *nh_dev;
2128
2129 if (rt->rt_nhn == 1) {
2130 struct mpls_nh *nh = rt->rt_nh;
2131
2132 nh_dev = rtnl_dereference(nh->nh_dev);
2133 if (dev == nh_dev)
2134 return true;
2135 } else {
2136 for_nexthops(rt) {
2137 nh_dev = rtnl_dereference(nh->nh_dev);
2138 if (nh_dev == dev)
2139 return true;
2140 } endfor_nexthops(rt);
2141 }
2142
2143 return false;
2144}
2145
2146static int mpls_dump_routes(struct sk_buff *skb, struct netlink_callback *cb)
2147{
2148 const struct nlmsghdr *nlh = cb->nlh;
2149 struct net *net = sock_net(skb->sk);
2150 struct mpls_route __rcu **platform_label;
2151 struct fib_dump_filter filter = {};
2152 unsigned int flags = NLM_F_MULTI;
2153 size_t platform_labels;
2154 unsigned int index;
2155
2156 ASSERT_RTNL();
2157
2158 if (cb->strict_check) {
2159 int err;
2160
2161 err = mpls_valid_fib_dump_req(net, nlh, &filter, cb);
2162 if (err < 0)
2163 return err;
2164
2165
2166
2167
2168 if ((filter.table_id && filter.table_id != RT_TABLE_MAIN) ||
2169 (filter.rt_type && filter.rt_type != RTN_UNICAST) ||
2170 filter.flags)
2171 return skb->len;
2172 }
2173
2174 index = cb->args[0];
2175 if (index < MPLS_LABEL_FIRST_UNRESERVED)
2176 index = MPLS_LABEL_FIRST_UNRESERVED;
2177
2178 platform_label = rtnl_dereference(net->mpls.platform_label);
2179 platform_labels = net->mpls.platform_labels;
2180
2181 if (filter.filter_set)
2182 flags |= NLM_F_DUMP_FILTERED;
2183
2184 for (; index < platform_labels; index++) {
2185 struct mpls_route *rt;
2186
2187 rt = rtnl_dereference(platform_label[index]);
2188 if (!rt)
2189 continue;
2190
2191 if ((filter.dev && !mpls_rt_uses_dev(rt, filter.dev)) ||
2192 (filter.protocol && rt->rt_protocol != filter.protocol))
2193 continue;
2194
2195 if (mpls_dump_route(skb, NETLINK_CB(cb->skb).portid,
2196 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
2197 index, rt, flags) < 0)
2198 break;
2199 }
2200 cb->args[0] = index;
2201
2202 return skb->len;
2203}
2204
2205static inline size_t lfib_nlmsg_size(struct mpls_route *rt)
2206{
2207 size_t payload =
2208 NLMSG_ALIGN(sizeof(struct rtmsg))
2209 + nla_total_size(4)
2210 + nla_total_size(1);
2211
2212 if (rt->rt_nhn == 1) {
2213 struct mpls_nh *nh = rt->rt_nh;
2214
2215 if (nh->nh_dev)
2216 payload += nla_total_size(4);
2217 if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC)
2218 payload += nla_total_size(2 + nh->nh_via_alen);
2219 if (nh->nh_labels)
2220 payload += nla_total_size(nh->nh_labels * 4);
2221 } else {
2222
2223 size_t nhsize = 0;
2224
2225 for_nexthops(rt) {
2226 if (!rtnl_dereference(nh->nh_dev))
2227 continue;
2228 nhsize += nla_total_size(sizeof(struct rtnexthop));
2229
2230 if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC)
2231 nhsize += nla_total_size(2 + nh->nh_via_alen);
2232 if (nh->nh_labels)
2233 nhsize += nla_total_size(nh->nh_labels * 4);
2234 } endfor_nexthops(rt);
2235
2236 payload += nla_total_size(nhsize);
2237 }
2238
2239 return payload;
2240}
2241
2242static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt,
2243 struct nlmsghdr *nlh, struct net *net, u32 portid,
2244 unsigned int nlm_flags)
2245{
2246 struct sk_buff *skb;
2247 u32 seq = nlh ? nlh->nlmsg_seq : 0;
2248 int err = -ENOBUFS;
2249
2250 skb = nlmsg_new(lfib_nlmsg_size(rt), GFP_KERNEL);
2251 if (skb == NULL)
2252 goto errout;
2253
2254 err = mpls_dump_route(skb, portid, seq, event, label, rt, nlm_flags);
2255 if (err < 0) {
2256
2257 WARN_ON(err == -EMSGSIZE);
2258 kfree_skb(skb);
2259 goto errout;
2260 }
2261 rtnl_notify(skb, net, portid, RTNLGRP_MPLS_ROUTE, nlh, GFP_KERNEL);
2262
2263 return;
2264errout:
2265 if (err < 0)
2266 rtnl_set_sk_err(net, RTNLGRP_MPLS_ROUTE, err);
2267}
2268
2269static int mpls_valid_getroute_req(struct sk_buff *skb,
2270 const struct nlmsghdr *nlh,
2271 struct nlattr **tb,
2272 struct netlink_ext_ack *extack)
2273{
2274 struct rtmsg *rtm;
2275 int i, err;
2276
2277 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
2278 NL_SET_ERR_MSG_MOD(extack,
2279 "Invalid header for get route request");
2280 return -EINVAL;
2281 }
2282
2283 if (!netlink_strict_get_check(skb))
2284 return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
2285 rtm_mpls_policy, extack);
2286
2287 rtm = nlmsg_data(nlh);
2288 if ((rtm->rtm_dst_len && rtm->rtm_dst_len != 20) ||
2289 rtm->rtm_src_len || rtm->rtm_tos || rtm->rtm_table ||
2290 rtm->rtm_protocol || rtm->rtm_scope || rtm->rtm_type) {
2291 NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get route request");
2292 return -EINVAL;
2293 }
2294 if (rtm->rtm_flags & ~RTM_F_FIB_MATCH) {
2295 NL_SET_ERR_MSG_MOD(extack,
2296 "Invalid flags for get route request");
2297 return -EINVAL;
2298 }
2299
2300 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
2301 rtm_mpls_policy, extack);
2302 if (err)
2303 return err;
2304
2305 if ((tb[RTA_DST] || tb[RTA_NEWDST]) && !rtm->rtm_dst_len) {
2306 NL_SET_ERR_MSG_MOD(extack, "rtm_dst_len must be 20 for MPLS");
2307 return -EINVAL;
2308 }
2309
2310 for (i = 0; i <= RTA_MAX; i++) {
2311 if (!tb[i])
2312 continue;
2313
2314 switch (i) {
2315 case RTA_DST:
2316 case RTA_NEWDST:
2317 break;
2318 default:
2319 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get route request");
2320 return -EINVAL;
2321 }
2322 }
2323
2324 return 0;
2325}
2326
2327static int mpls_getroute(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
2328 struct netlink_ext_ack *extack)
2329{
2330 struct net *net = sock_net(in_skb->sk);
2331 u32 portid = NETLINK_CB(in_skb).portid;
2332 u32 in_label = LABEL_NOT_SPECIFIED;
2333 struct nlattr *tb[RTA_MAX + 1];
2334 u32 labels[MAX_NEW_LABELS];
2335 struct mpls_shim_hdr *hdr;
2336 unsigned int hdr_size = 0;
2337 struct net_device *dev;
2338 struct mpls_route *rt;
2339 struct rtmsg *rtm, *r;
2340 struct nlmsghdr *nlh;
2341 struct sk_buff *skb;
2342 struct mpls_nh *nh;
2343 u8 n_labels;
2344 int err;
2345
2346 err = mpls_valid_getroute_req(in_skb, in_nlh, tb, extack);
2347 if (err < 0)
2348 goto errout;
2349
2350 rtm = nlmsg_data(in_nlh);
2351
2352 if (tb[RTA_DST]) {
2353 u8 label_count;
2354
2355 if (nla_get_labels(tb[RTA_DST], 1, &label_count,
2356 &in_label, extack)) {
2357 err = -EINVAL;
2358 goto errout;
2359 }
2360
2361 if (!mpls_label_ok(net, &in_label, extack)) {
2362 err = -EINVAL;
2363 goto errout;
2364 }
2365 }
2366
2367 rt = mpls_route_input_rcu(net, in_label);
2368 if (!rt) {
2369 err = -ENETUNREACH;
2370 goto errout;
2371 }
2372
2373 if (rtm->rtm_flags & RTM_F_FIB_MATCH) {
2374 skb = nlmsg_new(lfib_nlmsg_size(rt), GFP_KERNEL);
2375 if (!skb) {
2376 err = -ENOBUFS;
2377 goto errout;
2378 }
2379
2380 err = mpls_dump_route(skb, portid, in_nlh->nlmsg_seq,
2381 RTM_NEWROUTE, in_label, rt, 0);
2382 if (err < 0) {
2383
2384 WARN_ON(err == -EMSGSIZE);
2385 goto errout_free;
2386 }
2387
2388 return rtnl_unicast(skb, net, portid);
2389 }
2390
2391 if (tb[RTA_NEWDST]) {
2392 if (nla_get_labels(tb[RTA_NEWDST], MAX_NEW_LABELS, &n_labels,
2393 labels, extack) != 0) {
2394 err = -EINVAL;
2395 goto errout;
2396 }
2397
2398 hdr_size = n_labels * sizeof(struct mpls_shim_hdr);
2399 }
2400
2401 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2402 if (!skb) {
2403 err = -ENOBUFS;
2404 goto errout;
2405 }
2406
2407 skb->protocol = htons(ETH_P_MPLS_UC);
2408
2409 if (hdr_size) {
2410 bool bos;
2411 int i;
2412
2413 if (skb_cow(skb, hdr_size)) {
2414 err = -ENOBUFS;
2415 goto errout_free;
2416 }
2417
2418 skb_reserve(skb, hdr_size);
2419 skb_push(skb, hdr_size);
2420 skb_reset_network_header(skb);
2421
2422
2423 hdr = mpls_hdr(skb);
2424 bos = true;
2425 for (i = n_labels - 1; i >= 0; i--) {
2426 hdr[i] = mpls_entry_encode(labels[i],
2427 1, 0, bos);
2428 bos = false;
2429 }
2430 }
2431
2432 nh = mpls_select_multipath(rt, skb);
2433 if (!nh) {
2434 err = -ENETUNREACH;
2435 goto errout_free;
2436 }
2437
2438 if (hdr_size) {
2439 skb_pull(skb, hdr_size);
2440 skb_reset_network_header(skb);
2441 }
2442
2443 nlh = nlmsg_put(skb, portid, in_nlh->nlmsg_seq,
2444 RTM_NEWROUTE, sizeof(*r), 0);
2445 if (!nlh) {
2446 err = -EMSGSIZE;
2447 goto errout_free;
2448 }
2449
2450 r = nlmsg_data(nlh);
2451 r->rtm_family = AF_MPLS;
2452 r->rtm_dst_len = 20;
2453 r->rtm_src_len = 0;
2454 r->rtm_table = RT_TABLE_MAIN;
2455 r->rtm_type = RTN_UNICAST;
2456 r->rtm_scope = RT_SCOPE_UNIVERSE;
2457 r->rtm_protocol = rt->rt_protocol;
2458 r->rtm_flags = 0;
2459
2460 if (nla_put_labels(skb, RTA_DST, 1, &in_label))
2461 goto nla_put_failure;
2462
2463 if (nh->nh_labels &&
2464 nla_put_labels(skb, RTA_NEWDST, nh->nh_labels,
2465 nh->nh_label))
2466 goto nla_put_failure;
2467
2468 if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC &&
2469 nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh),
2470 nh->nh_via_alen))
2471 goto nla_put_failure;
2472 dev = rtnl_dereference(nh->nh_dev);
2473 if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
2474 goto nla_put_failure;
2475
2476 nlmsg_end(skb, nlh);
2477
2478 err = rtnl_unicast(skb, net, portid);
2479errout:
2480 return err;
2481
2482nla_put_failure:
2483 nlmsg_cancel(skb, nlh);
2484 err = -EMSGSIZE;
2485errout_free:
2486 kfree_skb(skb);
2487 return err;
2488}
2489
2490static int resize_platform_label_table(struct net *net, size_t limit)
2491{
2492 size_t size = sizeof(struct mpls_route *) * limit;
2493 size_t old_limit;
2494 size_t cp_size;
2495 struct mpls_route __rcu **labels = NULL, **old;
2496 struct mpls_route *rt0 = NULL, *rt2 = NULL;
2497 unsigned index;
2498
2499 if (size) {
2500 labels = kvzalloc(size, GFP_KERNEL);
2501 if (!labels)
2502 goto nolabels;
2503 }
2504
2505
2506 if (limit > MPLS_LABEL_IPV4NULL) {
2507 struct net_device *lo = net->loopback_dev;
2508 rt0 = mpls_rt_alloc(1, lo->addr_len, 0);
2509 if (IS_ERR(rt0))
2510 goto nort0;
2511 RCU_INIT_POINTER(rt0->rt_nh->nh_dev, lo);
2512 rt0->rt_protocol = RTPROT_KERNEL;
2513 rt0->rt_payload_type = MPT_IPV4;
2514 rt0->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
2515 rt0->rt_nh->nh_via_table = NEIGH_LINK_TABLE;
2516 rt0->rt_nh->nh_via_alen = lo->addr_len;
2517 memcpy(__mpls_nh_via(rt0, rt0->rt_nh), lo->dev_addr,
2518 lo->addr_len);
2519 }
2520 if (limit > MPLS_LABEL_IPV6NULL) {
2521 struct net_device *lo = net->loopback_dev;
2522 rt2 = mpls_rt_alloc(1, lo->addr_len, 0);
2523 if (IS_ERR(rt2))
2524 goto nort2;
2525 RCU_INIT_POINTER(rt2->rt_nh->nh_dev, lo);
2526 rt2->rt_protocol = RTPROT_KERNEL;
2527 rt2->rt_payload_type = MPT_IPV6;
2528 rt2->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
2529 rt2->rt_nh->nh_via_table = NEIGH_LINK_TABLE;
2530 rt2->rt_nh->nh_via_alen = lo->addr_len;
2531 memcpy(__mpls_nh_via(rt2, rt2->rt_nh), lo->dev_addr,
2532 lo->addr_len);
2533 }
2534
2535 rtnl_lock();
2536
2537 old = rtnl_dereference(net->mpls.platform_label);
2538 old_limit = net->mpls.platform_labels;
2539
2540
2541 for (index = limit; index < old_limit; index++)
2542 mpls_route_update(net, index, NULL, NULL);
2543
2544
2545 cp_size = size;
2546 if (old_limit < limit)
2547 cp_size = old_limit * sizeof(struct mpls_route *);
2548
2549 memcpy(labels, old, cp_size);
2550
2551
2552 if ((old_limit <= MPLS_LABEL_IPV6NULL) &&
2553 (limit > MPLS_LABEL_IPV6NULL)) {
2554 RCU_INIT_POINTER(labels[MPLS_LABEL_IPV6NULL], rt2);
2555 rt2 = NULL;
2556 }
2557
2558 if ((old_limit <= MPLS_LABEL_IPV4NULL) &&
2559 (limit > MPLS_LABEL_IPV4NULL)) {
2560 RCU_INIT_POINTER(labels[MPLS_LABEL_IPV4NULL], rt0);
2561 rt0 = NULL;
2562 }
2563
2564
2565 net->mpls.platform_labels = limit;
2566 rcu_assign_pointer(net->mpls.platform_label, labels);
2567
2568 rtnl_unlock();
2569
2570 mpls_rt_free(rt2);
2571 mpls_rt_free(rt0);
2572
2573 if (old) {
2574 synchronize_rcu();
2575 kvfree(old);
2576 }
2577 return 0;
2578
2579nort2:
2580 mpls_rt_free(rt0);
2581nort0:
2582 kvfree(labels);
2583nolabels:
2584 return -ENOMEM;
2585}
2586
2587static int mpls_platform_labels(struct ctl_table *table, int write,
2588 void __user *buffer, size_t *lenp, loff_t *ppos)
2589{
2590 struct net *net = table->data;
2591 int platform_labels = net->mpls.platform_labels;
2592 int ret;
2593 struct ctl_table tmp = {
2594 .procname = table->procname,
2595 .data = &platform_labels,
2596 .maxlen = sizeof(int),
2597 .mode = table->mode,
2598 .extra1 = SYSCTL_ZERO,
2599 .extra2 = &label_limit,
2600 };
2601
2602 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2603
2604 if (write && ret == 0)
2605 ret = resize_platform_label_table(net, platform_labels);
2606
2607 return ret;
2608}
2609
2610#define MPLS_NS_SYSCTL_OFFSET(field) \
2611 (&((struct net *)0)->field)
2612
2613static const struct ctl_table mpls_table[] = {
2614 {
2615 .procname = "platform_labels",
2616 .data = NULL,
2617 .maxlen = sizeof(int),
2618 .mode = 0644,
2619 .proc_handler = mpls_platform_labels,
2620 },
2621 {
2622 .procname = "ip_ttl_propagate",
2623 .data = MPLS_NS_SYSCTL_OFFSET(mpls.ip_ttl_propagate),
2624 .maxlen = sizeof(int),
2625 .mode = 0644,
2626 .proc_handler = proc_dointvec_minmax,
2627 .extra1 = SYSCTL_ZERO,
2628 .extra2 = SYSCTL_ONE,
2629 },
2630 {
2631 .procname = "default_ttl",
2632 .data = MPLS_NS_SYSCTL_OFFSET(mpls.default_ttl),
2633 .maxlen = sizeof(int),
2634 .mode = 0644,
2635 .proc_handler = proc_dointvec_minmax,
2636 .extra1 = SYSCTL_ONE,
2637 .extra2 = &ttl_max,
2638 },
2639 { }
2640};
2641
2642static int mpls_net_init(struct net *net)
2643{
2644 struct ctl_table *table;
2645 int i;
2646
2647 net->mpls.platform_labels = 0;
2648 net->mpls.platform_label = NULL;
2649 net->mpls.ip_ttl_propagate = 1;
2650 net->mpls.default_ttl = 255;
2651
2652 table = kmemdup(mpls_table, sizeof(mpls_table), GFP_KERNEL);
2653 if (table == NULL)
2654 return -ENOMEM;
2655
2656
2657
2658
2659 for (i = 0; i < ARRAY_SIZE(mpls_table) - 1; i++)
2660 table[i].data = (char *)net + (uintptr_t)table[i].data;
2661
2662 net->mpls.ctl = register_net_sysctl(net, "net/mpls", table);
2663 if (net->mpls.ctl == NULL) {
2664 kfree(table);
2665 return -ENOMEM;
2666 }
2667
2668 return 0;
2669}
2670
2671static void mpls_net_exit(struct net *net)
2672{
2673 struct mpls_route __rcu **platform_label;
2674 size_t platform_labels;
2675 struct ctl_table *table;
2676 unsigned int index;
2677
2678 table = net->mpls.ctl->ctl_table_arg;
2679 unregister_net_sysctl_table(net->mpls.ctl);
2680 kfree(table);
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691 rtnl_lock();
2692 platform_label = rtnl_dereference(net->mpls.platform_label);
2693 platform_labels = net->mpls.platform_labels;
2694 for (index = 0; index < platform_labels; index++) {
2695 struct mpls_route *rt = rtnl_dereference(platform_label[index]);
2696 RCU_INIT_POINTER(platform_label[index], NULL);
2697 mpls_notify_route(net, index, rt, NULL, NULL);
2698 mpls_rt_free(rt);
2699 }
2700 rtnl_unlock();
2701
2702 kvfree(platform_label);
2703}
2704
2705static struct pernet_operations mpls_net_ops = {
2706 .init = mpls_net_init,
2707 .exit = mpls_net_exit,
2708};
2709
2710static struct rtnl_af_ops mpls_af_ops __read_mostly = {
2711 .family = AF_MPLS,
2712 .fill_stats_af = mpls_fill_stats_af,
2713 .get_stats_af_size = mpls_get_stats_af_size,
2714};
2715
2716static int __init mpls_init(void)
2717{
2718 int err;
2719
2720 BUILD_BUG_ON(sizeof(struct mpls_shim_hdr) != 4);
2721
2722 err = register_pernet_subsys(&mpls_net_ops);
2723 if (err)
2724 goto out;
2725
2726 err = register_netdevice_notifier(&mpls_dev_notifier);
2727 if (err)
2728 goto out_unregister_pernet;
2729
2730 dev_add_pack(&mpls_packet_type);
2731
2732 rtnl_af_register(&mpls_af_ops);
2733
2734 rtnl_register_module(THIS_MODULE, PF_MPLS, RTM_NEWROUTE,
2735 mpls_rtm_newroute, NULL, 0);
2736 rtnl_register_module(THIS_MODULE, PF_MPLS, RTM_DELROUTE,
2737 mpls_rtm_delroute, NULL, 0);
2738 rtnl_register_module(THIS_MODULE, PF_MPLS, RTM_GETROUTE,
2739 mpls_getroute, mpls_dump_routes, 0);
2740 rtnl_register_module(THIS_MODULE, PF_MPLS, RTM_GETNETCONF,
2741 mpls_netconf_get_devconf,
2742 mpls_netconf_dump_devconf, 0);
2743 err = ipgre_tunnel_encap_add_mpls_ops();
2744 if (err)
2745 pr_err("Can't add mpls over gre tunnel ops\n");
2746
2747 err = 0;
2748out:
2749 return err;
2750
2751out_unregister_pernet:
2752 unregister_pernet_subsys(&mpls_net_ops);
2753 goto out;
2754}
2755module_init(mpls_init);
2756
2757static void __exit mpls_exit(void)
2758{
2759 rtnl_unregister_all(PF_MPLS);
2760 rtnl_af_unregister(&mpls_af_ops);
2761 dev_remove_pack(&mpls_packet_type);
2762 unregister_netdevice_notifier(&mpls_dev_notifier);
2763 unregister_pernet_subsys(&mpls_net_ops);
2764 ipgre_tunnel_encap_del_mpls_ops();
2765}
2766module_exit(mpls_exit);
2767
2768MODULE_DESCRIPTION("MultiProtocol Label Switching");
2769MODULE_LICENSE("GPL v2");
2770MODULE_ALIAS_NETPROTO(PF_MPLS);
2771