1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/skbuff.h>
35#include <linux/if_arp.h>
36#include <linux/netdevice.h>
37#include <linux/if.h>
38#include <linux/if_vlan.h>
39#include <net/udp_tunnel.h>
40#include <net/sch_generic.h>
41#include <linux/netfilter.h>
42#include <rdma/ib_addr.h>
43
44#include "rxe.h"
45#include "rxe_net.h"
46#include "rxe_loc.h"
47
48static LIST_HEAD(rxe_dev_list);
49static spinlock_t dev_list_lock;
50
51struct rxe_dev *net_to_rxe(struct net_device *ndev)
52{
53 struct rxe_dev *rxe;
54 struct rxe_dev *found = NULL;
55
56 spin_lock_bh(&dev_list_lock);
57 list_for_each_entry(rxe, &rxe_dev_list, list) {
58 if (rxe->ndev == ndev) {
59 found = rxe;
60 break;
61 }
62 }
63 spin_unlock_bh(&dev_list_lock);
64
65 return found;
66}
67
68struct rxe_dev *get_rxe_by_name(const char *name)
69{
70 struct rxe_dev *rxe;
71 struct rxe_dev *found = NULL;
72
73 spin_lock_bh(&dev_list_lock);
74 list_for_each_entry(rxe, &rxe_dev_list, list) {
75 if (!strcmp(name, rxe->ib_dev.name)) {
76 found = rxe;
77 break;
78 }
79 }
80 spin_unlock_bh(&dev_list_lock);
81 return found;
82}
83
84
85struct rxe_recv_sockets recv_sockets;
86
87static __be64 rxe_mac_to_eui64(struct net_device *ndev)
88{
89 unsigned char *mac_addr = ndev->dev_addr;
90 __be64 eui64;
91 unsigned char *dst = (unsigned char *)&eui64;
92
93 dst[0] = mac_addr[0] ^ 2;
94 dst[1] = mac_addr[1];
95 dst[2] = mac_addr[2];
96 dst[3] = 0xff;
97 dst[4] = 0xfe;
98 dst[5] = mac_addr[3];
99 dst[6] = mac_addr[4];
100 dst[7] = mac_addr[5];
101
102 return eui64;
103}
104
105static __be64 node_guid(struct rxe_dev *rxe)
106{
107 return rxe_mac_to_eui64(rxe->ndev);
108}
109
110static __be64 port_guid(struct rxe_dev *rxe)
111{
112 return rxe_mac_to_eui64(rxe->ndev);
113}
114
115static struct device *dma_device(struct rxe_dev *rxe)
116{
117 struct net_device *ndev;
118
119 ndev = rxe->ndev;
120
121 if (ndev->priv_flags & IFF_802_1Q_VLAN)
122 ndev = vlan_dev_real_dev(ndev);
123
124 return ndev->dev.parent;
125}
126
127static int mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
128{
129 int err;
130 unsigned char ll_addr[ETH_ALEN];
131
132 ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
133 err = dev_mc_add(rxe->ndev, ll_addr);
134
135 return err;
136}
137
138static int mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid)
139{
140 int err;
141 unsigned char ll_addr[ETH_ALEN];
142
143 ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
144 err = dev_mc_del(rxe->ndev, ll_addr);
145
146 return err;
147}
148
149static struct dst_entry *rxe_find_route4(struct net_device *ndev,
150 struct in_addr *saddr,
151 struct in_addr *daddr)
152{
153 struct rtable *rt;
154 struct flowi4 fl = { { 0 } };
155
156 memset(&fl, 0, sizeof(fl));
157 fl.flowi4_oif = ndev->ifindex;
158 memcpy(&fl.saddr, saddr, sizeof(*saddr));
159 memcpy(&fl.daddr, daddr, sizeof(*daddr));
160 fl.flowi4_proto = IPPROTO_UDP;
161
162 rt = ip_route_output_key(&init_net, &fl);
163 if (IS_ERR(rt)) {
164 pr_err_ratelimited("no route to %pI4\n", &daddr->s_addr);
165 return NULL;
166 }
167
168 return &rt->dst;
169}
170
171#if IS_ENABLED(CONFIG_IPV6)
172static struct dst_entry *rxe_find_route6(struct net_device *ndev,
173 struct in6_addr *saddr,
174 struct in6_addr *daddr)
175{
176 struct dst_entry *ndst;
177 struct flowi6 fl6 = { { 0 } };
178
179 memset(&fl6, 0, sizeof(fl6));
180 fl6.flowi6_oif = ndev->ifindex;
181 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
182 memcpy(&fl6.daddr, daddr, sizeof(*daddr));
183 fl6.flowi6_proto = IPPROTO_UDP;
184
185 if (unlikely(ipv6_stub->ipv6_dst_lookup(sock_net(recv_sockets.sk6->sk),
186 recv_sockets.sk6->sk, &ndst, &fl6))) {
187 pr_err_ratelimited("no route to %pI6\n", daddr);
188 goto put;
189 }
190
191 if (unlikely(ndst->error)) {
192 pr_err("no route to %pI6\n", daddr);
193 goto put;
194 }
195
196 return ndst;
197put:
198 dst_release(ndst);
199 return NULL;
200}
201
202#else
203
204static struct dst_entry *rxe_find_route6(struct net_device *ndev,
205 struct in6_addr *saddr,
206 struct in6_addr *daddr)
207{
208 return NULL;
209}
210
211#endif
212
213static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
214{
215 struct udphdr *udph;
216 struct net_device *ndev = skb->dev;
217 struct rxe_dev *rxe = net_to_rxe(ndev);
218 struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
219
220 if (!rxe)
221 goto drop;
222
223 if (skb_linearize(skb)) {
224 pr_err("skb_linearize failed\n");
225 goto drop;
226 }
227
228 udph = udp_hdr(skb);
229 pkt->rxe = rxe;
230 pkt->port_num = 1;
231 pkt->hdr = (u8 *)(udph + 1);
232 pkt->mask = RXE_GRH_MASK;
233 pkt->paylen = be16_to_cpu(udph->len) - sizeof(*udph);
234
235 return rxe_rcv(skb);
236drop:
237 kfree_skb(skb);
238 return 0;
239}
240
241static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
242 bool ipv6)
243{
244 int err;
245 struct socket *sock;
246 struct udp_port_cfg udp_cfg = {0};
247 struct udp_tunnel_sock_cfg tnl_cfg = {0};
248
249 if (ipv6) {
250 udp_cfg.family = AF_INET6;
251 udp_cfg.ipv6_v6only = 1;
252 } else {
253 udp_cfg.family = AF_INET;
254 }
255
256 udp_cfg.local_udp_port = port;
257
258
259 err = udp_sock_create(net, &udp_cfg, &sock);
260 if (err < 0) {
261 pr_err("failed to create udp socket. err = %d\n", err);
262 return ERR_PTR(err);
263 }
264
265 tnl_cfg.encap_type = 1;
266 tnl_cfg.encap_rcv = rxe_udp_encap_recv;
267
268
269 setup_udp_tunnel_sock(net, sock, &tnl_cfg);
270
271 return sock;
272}
273
274void rxe_release_udp_tunnel(struct socket *sk)
275{
276 if (sk)
277 udp_tunnel_sock_release(sk);
278}
279
280static void prepare_udp_hdr(struct sk_buff *skb, __be16 src_port,
281 __be16 dst_port)
282{
283 struct udphdr *udph;
284
285 __skb_push(skb, sizeof(*udph));
286 skb_reset_transport_header(skb);
287 udph = udp_hdr(skb);
288
289 udph->dest = dst_port;
290 udph->source = src_port;
291 udph->len = htons(skb->len);
292 udph->check = 0;
293}
294
295static void prepare_ipv4_hdr(struct dst_entry *dst, struct sk_buff *skb,
296 __be32 saddr, __be32 daddr, __u8 proto,
297 __u8 tos, __u8 ttl, __be16 df, bool xnet)
298{
299 struct iphdr *iph;
300
301 skb_scrub_packet(skb, xnet);
302
303 skb_clear_hash(skb);
304 skb_dst_set(skb, dst);
305 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
306
307 skb_push(skb, sizeof(struct iphdr));
308 skb_reset_network_header(skb);
309
310 iph = ip_hdr(skb);
311
312 iph->version = IPVERSION;
313 iph->ihl = sizeof(struct iphdr) >> 2;
314 iph->frag_off = df;
315 iph->protocol = proto;
316 iph->tos = tos;
317 iph->daddr = daddr;
318 iph->saddr = saddr;
319 iph->ttl = ttl;
320 __ip_select_ident(dev_net(dst->dev), iph,
321 skb_shinfo(skb)->gso_segs ?: 1);
322 iph->tot_len = htons(skb->len);
323 ip_send_check(iph);
324}
325
326static void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb,
327 struct in6_addr *saddr, struct in6_addr *daddr,
328 __u8 proto, __u8 prio, __u8 ttl)
329{
330 struct ipv6hdr *ip6h;
331
332 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
333 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED
334 | IPSKB_REROUTED);
335 skb_dst_set(skb, dst);
336
337 __skb_push(skb, sizeof(*ip6h));
338 skb_reset_network_header(skb);
339 ip6h = ipv6_hdr(skb);
340 ip6_flow_hdr(ip6h, prio, htonl(0));
341 ip6h->payload_len = htons(skb->len);
342 ip6h->nexthdr = proto;
343 ip6h->hop_limit = ttl;
344 ip6h->daddr = *daddr;
345 ip6h->saddr = *saddr;
346 ip6h->payload_len = htons(skb->len - sizeof(*ip6h));
347}
348
349static int prepare4(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
350 struct sk_buff *skb, struct rxe_av *av)
351{
352 struct dst_entry *dst;
353 bool xnet = false;
354 __be16 df = htons(IP_DF);
355 struct in_addr *saddr = &av->sgid_addr._sockaddr_in.sin_addr;
356 struct in_addr *daddr = &av->dgid_addr._sockaddr_in.sin_addr;
357
358 dst = rxe_find_route4(rxe->ndev, saddr, daddr);
359 if (!dst) {
360 pr_err("Host not reachable\n");
361 return -EHOSTUNREACH;
362 }
363
364 if (!memcmp(saddr, daddr, sizeof(*daddr)))
365 pkt->mask |= RXE_LOOPBACK_MASK;
366
367 prepare_udp_hdr(skb, htons(RXE_ROCE_V2_SPORT),
368 htons(ROCE_V2_UDP_DPORT));
369
370 prepare_ipv4_hdr(dst, skb, saddr->s_addr, daddr->s_addr, IPPROTO_UDP,
371 av->grh.traffic_class, av->grh.hop_limit, df, xnet);
372 return 0;
373}
374
375static int prepare6(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
376 struct sk_buff *skb, struct rxe_av *av)
377{
378 struct dst_entry *dst;
379 struct in6_addr *saddr = &av->sgid_addr._sockaddr_in6.sin6_addr;
380 struct in6_addr *daddr = &av->dgid_addr._sockaddr_in6.sin6_addr;
381
382 dst = rxe_find_route6(rxe->ndev, saddr, daddr);
383 if (!dst) {
384 pr_err("Host not reachable\n");
385 return -EHOSTUNREACH;
386 }
387
388 if (!memcmp(saddr, daddr, sizeof(*daddr)))
389 pkt->mask |= RXE_LOOPBACK_MASK;
390
391 prepare_udp_hdr(skb, htons(RXE_ROCE_V2_SPORT),
392 htons(ROCE_V2_UDP_DPORT));
393
394 prepare_ipv6_hdr(dst, skb, saddr, daddr, IPPROTO_UDP,
395 av->grh.traffic_class,
396 av->grh.hop_limit);
397 return 0;
398}
399
400static int prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
401 struct sk_buff *skb, u32 *crc)
402{
403 int err = 0;
404 struct rxe_av *av = rxe_get_av(pkt);
405
406 if (av->network_type == RDMA_NETWORK_IPV4)
407 err = prepare4(rxe, pkt, skb, av);
408 else if (av->network_type == RDMA_NETWORK_IPV6)
409 err = prepare6(rxe, pkt, skb, av);
410
411 *crc = rxe_icrc_hdr(pkt, skb);
412
413 return err;
414}
415
416static void rxe_skb_tx_dtor(struct sk_buff *skb)
417{
418 struct sock *sk = skb->sk;
419 struct rxe_qp *qp = sk->sk_user_data;
420 int skb_out = atomic_dec_return(&qp->skb_out);
421
422 if (unlikely(qp->need_req_skb &&
423 skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW))
424 rxe_run_task(&qp->req.task, 1);
425}
426
427static int send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
428 struct sk_buff *skb)
429{
430 struct sk_buff *nskb;
431 struct rxe_av *av;
432 int err;
433
434 av = rxe_get_av(pkt);
435
436 nskb = skb_clone(skb, GFP_ATOMIC);
437 if (!nskb)
438 return -ENOMEM;
439
440 nskb->destructor = rxe_skb_tx_dtor;
441 nskb->sk = pkt->qp->sk->sk;
442
443 if (av->network_type == RDMA_NETWORK_IPV4) {
444 err = ip_local_out(dev_net(skb_dst(skb)->dev), nskb->sk, nskb);
445 } else if (av->network_type == RDMA_NETWORK_IPV6) {
446 err = ip6_local_out(dev_net(skb_dst(skb)->dev), nskb->sk, nskb);
447 } else {
448 pr_err("Unknown layer 3 protocol: %d\n", av->network_type);
449 kfree_skb(nskb);
450 return -EINVAL;
451 }
452
453 if (unlikely(net_xmit_eval(err))) {
454 pr_debug("error sending packet: %d\n", err);
455 return -EAGAIN;
456 }
457
458 kfree_skb(skb);
459
460 return 0;
461}
462
463static int loopback(struct sk_buff *skb)
464{
465 return rxe_rcv(skb);
466}
467
468static inline int addr_same(struct rxe_dev *rxe, struct rxe_av *av)
469{
470 return rxe->port.port_guid == av->grh.dgid.global.interface_id;
471}
472
473static struct sk_buff *init_packet(struct rxe_dev *rxe, struct rxe_av *av,
474 int paylen, struct rxe_pkt_info *pkt)
475{
476 unsigned int hdr_len;
477 struct sk_buff *skb;
478
479 if (av->network_type == RDMA_NETWORK_IPV4)
480 hdr_len = ETH_HLEN + sizeof(struct udphdr) +
481 sizeof(struct iphdr);
482 else
483 hdr_len = ETH_HLEN + sizeof(struct udphdr) +
484 sizeof(struct ipv6hdr);
485
486 skb = alloc_skb(paylen + hdr_len + LL_RESERVED_SPACE(rxe->ndev),
487 GFP_ATOMIC);
488 if (unlikely(!skb))
489 return NULL;
490
491 skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(rxe->ndev));
492
493 skb->dev = rxe->ndev;
494 if (av->network_type == RDMA_NETWORK_IPV4)
495 skb->protocol = htons(ETH_P_IP);
496 else
497 skb->protocol = htons(ETH_P_IPV6);
498
499 pkt->rxe = rxe;
500 pkt->port_num = 1;
501 pkt->hdr = skb_put(skb, paylen);
502 pkt->mask |= RXE_GRH_MASK;
503
504 memset(pkt->hdr, 0, paylen);
505
506 return skb;
507}
508
509
510
511
512
513static char *parent_name(struct rxe_dev *rxe, unsigned int port_num)
514{
515 return rxe->ndev->name;
516}
517
518static enum rdma_link_layer link_layer(struct rxe_dev *rxe,
519 unsigned int port_num)
520{
521 return IB_LINK_LAYER_ETHERNET;
522}
523
524static struct rxe_ifc_ops ifc_ops = {
525 .node_guid = node_guid,
526 .port_guid = port_guid,
527 .dma_device = dma_device,
528 .mcast_add = mcast_add,
529 .mcast_delete = mcast_delete,
530 .prepare = prepare,
531 .send = send,
532 .loopback = loopback,
533 .init_packet = init_packet,
534 .parent_name = parent_name,
535 .link_layer = link_layer,
536};
537
538struct rxe_dev *rxe_net_add(struct net_device *ndev)
539{
540 int err;
541 struct rxe_dev *rxe = NULL;
542
543 rxe = (struct rxe_dev *)ib_alloc_device(sizeof(*rxe));
544 if (!rxe)
545 return NULL;
546
547 rxe->ifc_ops = &ifc_ops;
548 rxe->ndev = ndev;
549
550 err = rxe_add(rxe, ndev->mtu);
551 if (err) {
552 ib_dealloc_device(&rxe->ib_dev);
553 return NULL;
554 }
555
556 spin_lock_bh(&dev_list_lock);
557 list_add_tail(&rxe_dev_list, &rxe->list);
558 spin_unlock_bh(&dev_list_lock);
559 return rxe;
560}
561
562void rxe_remove_all(void)
563{
564 spin_lock_bh(&dev_list_lock);
565 while (!list_empty(&rxe_dev_list)) {
566 struct rxe_dev *rxe =
567 list_first_entry(&rxe_dev_list, struct rxe_dev, list);
568
569 list_del(&rxe->list);
570 spin_unlock_bh(&dev_list_lock);
571 rxe_remove(rxe);
572 spin_lock_bh(&dev_list_lock);
573 }
574 spin_unlock_bh(&dev_list_lock);
575}
576EXPORT_SYMBOL(rxe_remove_all);
577
578static void rxe_port_event(struct rxe_dev *rxe,
579 enum ib_event_type event)
580{
581 struct ib_event ev;
582
583 ev.device = &rxe->ib_dev;
584 ev.element.port_num = 1;
585 ev.event = event;
586
587 ib_dispatch_event(&ev);
588}
589
590
591void rxe_port_up(struct rxe_dev *rxe)
592{
593 struct rxe_port *port;
594
595 port = &rxe->port;
596 port->attr.state = IB_PORT_ACTIVE;
597 port->attr.phys_state = IB_PHYS_STATE_LINK_UP;
598
599 rxe_port_event(rxe, IB_EVENT_PORT_ACTIVE);
600 pr_info("set %s active\n", rxe->ib_dev.name);
601}
602
603
604void rxe_port_down(struct rxe_dev *rxe)
605{
606 struct rxe_port *port;
607
608 port = &rxe->port;
609 port->attr.state = IB_PORT_DOWN;
610 port->attr.phys_state = IB_PHYS_STATE_LINK_DOWN;
611
612 rxe_port_event(rxe, IB_EVENT_PORT_ERR);
613 pr_info("set %s down\n", rxe->ib_dev.name);
614}
615
616static int rxe_notify(struct notifier_block *not_blk,
617 unsigned long event,
618 void *arg)
619{
620 struct net_device *ndev = netdev_notifier_info_to_dev(arg);
621 struct rxe_dev *rxe = net_to_rxe(ndev);
622
623 if (!rxe)
624 goto out;
625
626 switch (event) {
627 case NETDEV_UNREGISTER:
628 list_del(&rxe->list);
629 rxe_remove(rxe);
630 break;
631 case NETDEV_UP:
632 rxe_port_up(rxe);
633 break;
634 case NETDEV_DOWN:
635 rxe_port_down(rxe);
636 break;
637 case NETDEV_CHANGEMTU:
638 pr_info("%s changed mtu to %d\n", ndev->name, ndev->mtu);
639 rxe_set_mtu(rxe, ndev->mtu);
640 break;
641 case NETDEV_REBOOT:
642 case NETDEV_CHANGE:
643 case NETDEV_GOING_DOWN:
644 case NETDEV_CHANGEADDR:
645 case NETDEV_CHANGENAME:
646 case NETDEV_FEAT_CHANGE:
647 default:
648 pr_info("ignoring netdev event = %ld for %s\n",
649 event, ndev->name);
650 break;
651 }
652out:
653 return NOTIFY_OK;
654}
655
656struct notifier_block rxe_net_notifier = {
657 .notifier_call = rxe_notify,
658};
659
660int rxe_net_ipv4_init(void)
661{
662 spin_lock_init(&dev_list_lock);
663
664 recv_sockets.sk4 = rxe_setup_udp_tunnel(&init_net,
665 htons(ROCE_V2_UDP_DPORT), false);
666 if (IS_ERR(recv_sockets.sk4)) {
667 recv_sockets.sk4 = NULL;
668 pr_err("Failed to create IPv4 UDP tunnel\n");
669 return -1;
670 }
671
672 return 0;
673}
674
675int rxe_net_ipv6_init(void)
676{
677#if IS_ENABLED(CONFIG_IPV6)
678
679 spin_lock_init(&dev_list_lock);
680
681 recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net,
682 htons(ROCE_V2_UDP_DPORT), true);
683 if (IS_ERR(recv_sockets.sk6)) {
684 recv_sockets.sk6 = NULL;
685 pr_err("Failed to create IPv6 UDP tunnel\n");
686 return -1;
687 }
688#endif
689 return 0;
690}
691
692void rxe_net_exit(void)
693{
694 rxe_release_udp_tunnel(recv_sockets.sk6);
695 rxe_release_udp_tunnel(recv_sockets.sk4);
696 unregister_netdevice_notifier(&rxe_net_notifier);
697}
698
699int rxe_net_init(void)
700{
701 int err;
702
703 recv_sockets.sk6 = NULL;
704
705 err = rxe_net_ipv4_init();
706 if (err)
707 return err;
708 err = rxe_net_ipv6_init();
709 if (err)
710 goto err_out;
711 err = register_netdevice_notifier(&rxe_net_notifier);
712 if (err) {
713 pr_err("Failed to register netdev notifier\n");
714 goto err_out;
715 }
716 return 0;
717err_out:
718 rxe_net_exit();
719 return err;
720}
721