linux/net/bridge/br_forward.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *      Forwarding decision
   4 *      Linux ethernet bridge
   5 *
   6 *      Authors:
   7 *      Lennert Buytenhek               <buytenh@gnu.org>
   8 */
   9
  10#include <linux/err.h>
  11#include <linux/slab.h>
  12#include <linux/kernel.h>
  13#include <linux/netdevice.h>
  14#include <linux/netpoll.h>
  15#include <linux/skbuff.h>
  16#include <linux/if_vlan.h>
  17#include <linux/netfilter_bridge.h>
  18#include "br_private.h"
  19
  20/* Don't forward packets to originating port or forwarding disabled */
  21static inline int should_deliver(const struct net_bridge_port *p,
  22                                 const struct sk_buff *skb)
  23{
  24        struct net_bridge_vlan_group *vg;
  25
  26        vg = nbp_vlan_group_rcu(p);
  27        return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
  28                br_allowed_egress(vg, skb) && p->state == BR_STATE_FORWARDING &&
  29                nbp_switchdev_allowed_egress(p, skb) &&
  30                !br_skb_isolated(p, skb);
  31}
  32
  33int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
  34{
  35        skb_push(skb, ETH_HLEN);
  36        if (!is_skb_forwardable(skb->dev, skb))
  37                goto drop;
  38
  39        br_drop_fake_rtable(skb);
  40
  41        if (skb->ip_summed == CHECKSUM_PARTIAL &&
  42            (skb->protocol == htons(ETH_P_8021Q) ||
  43             skb->protocol == htons(ETH_P_8021AD))) {
  44                int depth;
  45
  46                if (!__vlan_get_protocol(skb, skb->protocol, &depth))
  47                        goto drop;
  48
  49                skb_set_network_header(skb, depth);
  50        }
  51
  52        dev_queue_xmit(skb);
  53
  54        return 0;
  55
  56drop:
  57        kfree_skb(skb);
  58        return 0;
  59}
  60EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
  61
  62int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
  63{
  64        skb->tstamp = 0;
  65        return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING,
  66                       net, sk, skb, NULL, skb->dev,
  67                       br_dev_queue_push_xmit);
  68
  69}
  70EXPORT_SYMBOL_GPL(br_forward_finish);
  71
  72static void __br_forward(const struct net_bridge_port *to,
  73                         struct sk_buff *skb, bool local_orig)
  74{
  75        struct net_bridge_vlan_group *vg;
  76        struct net_device *indev;
  77        struct net *net;
  78        int br_hook;
  79
  80        vg = nbp_vlan_group_rcu(to);
  81        skb = br_handle_vlan(to->br, to, vg, skb);
  82        if (!skb)
  83                return;
  84
  85        indev = skb->dev;
  86        skb->dev = to->dev;
  87        if (!local_orig) {
  88                if (skb_warn_if_lro(skb)) {
  89                        kfree_skb(skb);
  90                        return;
  91                }
  92                br_hook = NF_BR_FORWARD;
  93                skb_forward_csum(skb);
  94                net = dev_net(indev);
  95        } else {
  96                if (unlikely(netpoll_tx_running(to->br->dev))) {
  97                        skb_push(skb, ETH_HLEN);
  98                        if (!is_skb_forwardable(skb->dev, skb))
  99                                kfree_skb(skb);
 100                        else
 101                                br_netpoll_send_skb(to, skb);
 102                        return;
 103                }
 104                br_hook = NF_BR_LOCAL_OUT;
 105                net = dev_net(skb->dev);
 106                indev = NULL;
 107        }
 108
 109        NF_HOOK(NFPROTO_BRIDGE, br_hook,
 110                net, NULL, skb, indev, skb->dev,
 111                br_forward_finish);
 112}
 113
 114static int deliver_clone(const struct net_bridge_port *prev,
 115                         struct sk_buff *skb, bool local_orig)
 116{
 117        struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
 118
 119        skb = skb_clone(skb, GFP_ATOMIC);
 120        if (!skb) {
 121                dev->stats.tx_dropped++;
 122                return -ENOMEM;
 123        }
 124
 125        __br_forward(prev, skb, local_orig);
 126        return 0;
 127}
 128
 129/**
 130 * br_forward - forward a packet to a specific port
 131 * @to: destination port
 132 * @skb: packet being forwarded
 133 * @local_rcv: packet will be received locally after forwarding
 134 * @local_orig: packet is locally originated
 135 *
 136 * Should be called with rcu_read_lock.
 137 */
 138void br_forward(const struct net_bridge_port *to,
 139                struct sk_buff *skb, bool local_rcv, bool local_orig)
 140{
 141        if (unlikely(!to))
 142                goto out;
 143
 144        /* redirect to backup link if the destination port is down */
 145        if (rcu_access_pointer(to->backup_port) && !netif_carrier_ok(to->dev)) {
 146                struct net_bridge_port *backup_port;
 147
 148                backup_port = rcu_dereference(to->backup_port);
 149                if (unlikely(!backup_port))
 150                        goto out;
 151                to = backup_port;
 152        }
 153
 154        if (should_deliver(to, skb)) {
 155                if (local_rcv)
 156                        deliver_clone(to, skb, local_orig);
 157                else
 158                        __br_forward(to, skb, local_orig);
 159                return;
 160        }
 161
 162out:
 163        if (!local_rcv)
 164                kfree_skb(skb);
 165}
 166EXPORT_SYMBOL_GPL(br_forward);
 167
 168static struct net_bridge_port *maybe_deliver(
 169        struct net_bridge_port *prev, struct net_bridge_port *p,
 170        struct sk_buff *skb, bool local_orig)
 171{
 172        u8 igmp_type = br_multicast_igmp_type(skb);
 173        int err;
 174
 175        if (!should_deliver(p, skb))
 176                return prev;
 177
 178        if (!prev)
 179                goto out;
 180
 181        err = deliver_clone(prev, skb, local_orig);
 182        if (err)
 183                return ERR_PTR(err);
 184out:
 185        br_multicast_count(p->br, p, skb, igmp_type, BR_MCAST_DIR_TX);
 186
 187        return p;
 188}
 189
 190/* called under rcu_read_lock */
 191void br_flood(struct net_bridge *br, struct sk_buff *skb,
 192              enum br_pkt_type pkt_type, bool local_rcv, bool local_orig)
 193{
 194        struct net_bridge_port *prev = NULL;
 195        struct net_bridge_port *p;
 196
 197        list_for_each_entry_rcu(p, &br->port_list, list) {
 198                /* Do not flood unicast traffic to ports that turn it off, nor
 199                 * other traffic if flood off, except for traffic we originate
 200                 */
 201                switch (pkt_type) {
 202                case BR_PKT_UNICAST:
 203                        if (!(p->flags & BR_FLOOD))
 204                                continue;
 205                        break;
 206                case BR_PKT_MULTICAST:
 207                        if (!(p->flags & BR_MCAST_FLOOD) && skb->dev != br->dev)
 208                                continue;
 209                        break;
 210                case BR_PKT_BROADCAST:
 211                        if (!(p->flags & BR_BCAST_FLOOD) && skb->dev != br->dev)
 212                                continue;
 213                        break;
 214                }
 215
 216                /* Do not flood to ports that enable proxy ARP */
 217                if (p->flags & BR_PROXYARP)
 218                        continue;
 219                if ((p->flags & (BR_PROXYARP_WIFI | BR_NEIGH_SUPPRESS)) &&
 220                    BR_INPUT_SKB_CB(skb)->proxyarp_replied)
 221                        continue;
 222
 223                prev = maybe_deliver(prev, p, skb, local_orig);
 224                if (IS_ERR(prev))
 225                        goto out;
 226        }
 227
 228        if (!prev)
 229                goto out;
 230
 231        if (local_rcv)
 232                deliver_clone(prev, skb, local_orig);
 233        else
 234                __br_forward(prev, skb, local_orig);
 235        return;
 236
 237out:
 238        if (!local_rcv)
 239                kfree_skb(skb);
 240}
 241
 242#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
 243static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb,
 244                               const unsigned char *addr, bool local_orig)
 245{
 246        struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
 247        const unsigned char *src = eth_hdr(skb)->h_source;
 248
 249        if (!should_deliver(p, skb))
 250                return;
 251
 252        /* Even with hairpin, no soliloquies - prevent breaking IPv6 DAD */
 253        if (skb->dev == p->dev && ether_addr_equal(src, addr))
 254                return;
 255
 256        skb = skb_copy(skb, GFP_ATOMIC);
 257        if (!skb) {
 258                dev->stats.tx_dropped++;
 259                return;
 260        }
 261
 262        if (!is_broadcast_ether_addr(addr))
 263                memcpy(eth_hdr(skb)->h_dest, addr, ETH_ALEN);
 264
 265        __br_forward(p, skb, local_orig);
 266}
 267
 268/* called with rcu_read_lock */
 269void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
 270                        struct sk_buff *skb,
 271                        bool local_rcv, bool local_orig)
 272{
 273        struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
 274        struct net_bridge *br = netdev_priv(dev);
 275        struct net_bridge_port *prev = NULL;
 276        struct net_bridge_port_group *p;
 277        struct hlist_node *rp;
 278
 279        rp = rcu_dereference(hlist_first_rcu(&br->router_list));
 280        p = mdst ? rcu_dereference(mdst->ports) : NULL;
 281        while (p || rp) {
 282                struct net_bridge_port *port, *lport, *rport;
 283
 284                lport = p ? p->port : NULL;
 285                rport = hlist_entry_safe(rp, struct net_bridge_port, rlist);
 286
 287                if ((unsigned long)lport > (unsigned long)rport) {
 288                        port = lport;
 289
 290                        if (port->flags & BR_MULTICAST_TO_UNICAST) {
 291                                maybe_deliver_addr(lport, skb, p->eth_addr,
 292                                                   local_orig);
 293                                goto delivered;
 294                        }
 295                } else {
 296                        port = rport;
 297                }
 298
 299                prev = maybe_deliver(prev, port, skb, local_orig);
 300                if (IS_ERR(prev))
 301                        goto out;
 302delivered:
 303                if ((unsigned long)lport >= (unsigned long)port)
 304                        p = rcu_dereference(p->next);
 305                if ((unsigned long)rport >= (unsigned long)port)
 306                        rp = rcu_dereference(hlist_next_rcu(rp));
 307        }
 308
 309        if (!prev)
 310                goto out;
 311
 312        if (local_rcv)
 313                deliver_clone(prev, skb, local_orig);
 314        else
 315                __br_forward(prev, skb, local_orig);
 316        return;
 317
 318out:
 319        if (!local_rcv)
 320                kfree_skb(skb);
 321}
 322#endif
 323