linux/net/bridge/br_mdb.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/err.h>
   3#include <linux/igmp.h>
   4#include <linux/kernel.h>
   5#include <linux/netdevice.h>
   6#include <linux/rculist.h>
   7#include <linux/skbuff.h>
   8#include <linux/if_ether.h>
   9#include <net/ip.h>
  10#include <net/netlink.h>
  11#include <net/switchdev.h>
  12#if IS_ENABLED(CONFIG_IPV6)
  13#include <net/ipv6.h>
  14#include <net/addrconf.h>
  15#endif
  16
  17#include "br_private.h"
  18
  19static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
  20                               struct net_device *dev)
  21{
  22        struct net_bridge *br = netdev_priv(dev);
  23        struct net_bridge_port *p;
  24        struct nlattr *nest, *port_nest;
  25
  26        if (!br->multicast_router || hlist_empty(&br->router_list))
  27                return 0;
  28
  29        nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
  30        if (nest == NULL)
  31                return -EMSGSIZE;
  32
  33        hlist_for_each_entry_rcu(p, &br->router_list, rlist) {
  34                if (!p)
  35                        continue;
  36                port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
  37                if (!port_nest)
  38                        goto fail;
  39                if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) ||
  40                    nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER,
  41                                br_timer_value(&p->multicast_router_timer)) ||
  42                    nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE,
  43                               p->multicast_router)) {
  44                        nla_nest_cancel(skb, port_nest);
  45                        goto fail;
  46                }
  47                nla_nest_end(skb, port_nest);
  48        }
  49
  50        nla_nest_end(skb, nest);
  51        return 0;
  52fail:
  53        nla_nest_cancel(skb, nest);
  54        return -EMSGSIZE;
  55}
  56
  57static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
  58{
  59        e->state = flags & MDB_PG_FLAGS_PERMANENT;
  60        e->flags = 0;
  61        if (flags & MDB_PG_FLAGS_OFFLOAD)
  62                e->flags |= MDB_FLAGS_OFFLOAD;
  63        if (flags & MDB_PG_FLAGS_FAST_LEAVE)
  64                e->flags |= MDB_FLAGS_FAST_LEAVE;
  65}
  66
  67static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip)
  68{
  69        memset(ip, 0, sizeof(struct br_ip));
  70        ip->vid = entry->vid;
  71        ip->proto = entry->addr.proto;
  72        if (ip->proto == htons(ETH_P_IP))
  73                ip->u.ip4 = entry->addr.u.ip4;
  74#if IS_ENABLED(CONFIG_IPV6)
  75        else
  76                ip->u.ip6 = entry->addr.u.ip6;
  77#endif
  78}
  79
  80static int __mdb_fill_info(struct sk_buff *skb,
  81                           struct net_bridge_mdb_entry *mp,
  82                           struct net_bridge_port_group *p)
  83{
  84        struct timer_list *mtimer;
  85        struct nlattr *nest_ent;
  86        struct br_mdb_entry e;
  87        u8 flags = 0;
  88        int ifindex;
  89
  90        memset(&e, 0, sizeof(e));
  91        if (p) {
  92                ifindex = p->port->dev->ifindex;
  93                mtimer = &p->timer;
  94                flags = p->flags;
  95        } else {
  96                ifindex = mp->br->dev->ifindex;
  97                mtimer = &mp->timer;
  98        }
  99
 100        __mdb_entry_fill_flags(&e, flags);
 101        e.ifindex = ifindex;
 102        e.vid = mp->addr.vid;
 103        if (mp->addr.proto == htons(ETH_P_IP))
 104                e.addr.u.ip4 = mp->addr.u.ip4;
 105#if IS_ENABLED(CONFIG_IPV6)
 106        if (mp->addr.proto == htons(ETH_P_IPV6))
 107                e.addr.u.ip6 = mp->addr.u.ip6;
 108#endif
 109        e.addr.proto = mp->addr.proto;
 110        nest_ent = nla_nest_start_noflag(skb,
 111                                         MDBA_MDB_ENTRY_INFO);
 112        if (!nest_ent)
 113                return -EMSGSIZE;
 114
 115        if (nla_put_nohdr(skb, sizeof(e), &e) ||
 116            nla_put_u32(skb,
 117                        MDBA_MDB_EATTR_TIMER,
 118                        br_timer_value(mtimer))) {
 119                nla_nest_cancel(skb, nest_ent);
 120                return -EMSGSIZE;
 121        }
 122        nla_nest_end(skb, nest_ent);
 123
 124        return 0;
 125}
 126
 127static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
 128                            struct net_device *dev)
 129{
 130        int idx = 0, s_idx = cb->args[1], err = 0;
 131        struct net_bridge *br = netdev_priv(dev);
 132        struct net_bridge_mdb_entry *mp;
 133        struct nlattr *nest, *nest2;
 134
 135        if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
 136                return 0;
 137
 138        nest = nla_nest_start_noflag(skb, MDBA_MDB);
 139        if (nest == NULL)
 140                return -EMSGSIZE;
 141
 142        hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
 143                struct net_bridge_port_group *p;
 144                struct net_bridge_port_group __rcu **pp;
 145
 146                if (idx < s_idx)
 147                        goto skip;
 148
 149                nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
 150                if (!nest2) {
 151                        err = -EMSGSIZE;
 152                        break;
 153                }
 154
 155                if (mp->host_joined) {
 156                        err = __mdb_fill_info(skb, mp, NULL);
 157                        if (err) {
 158                                nla_nest_cancel(skb, nest2);
 159                                break;
 160                        }
 161                }
 162
 163                for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
 164                      pp = &p->next) {
 165                        if (!p->port)
 166                                continue;
 167
 168                        err = __mdb_fill_info(skb, mp, p);
 169                        if (err) {
 170                                nla_nest_cancel(skb, nest2);
 171                                goto out;
 172                        }
 173                }
 174                nla_nest_end(skb, nest2);
 175skip:
 176                idx++;
 177        }
 178
 179out:
 180        cb->args[1] = idx;
 181        nla_nest_end(skb, nest);
 182        return err;
 183}
 184
 185static int br_mdb_valid_dump_req(const struct nlmsghdr *nlh,
 186                                 struct netlink_ext_ack *extack)
 187{
 188        struct br_port_msg *bpm;
 189
 190        if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) {
 191                NL_SET_ERR_MSG_MOD(extack, "Invalid header for mdb dump request");
 192                return -EINVAL;
 193        }
 194
 195        bpm = nlmsg_data(nlh);
 196        if (bpm->ifindex) {
 197                NL_SET_ERR_MSG_MOD(extack, "Filtering by device index is not supported for mdb dump request");
 198                return -EINVAL;
 199        }
 200        if (nlmsg_attrlen(nlh, sizeof(*bpm))) {
 201                NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request");
 202                return -EINVAL;
 203        }
 204
 205        return 0;
 206}
 207
 208static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
 209{
 210        struct net_device *dev;
 211        struct net *net = sock_net(skb->sk);
 212        struct nlmsghdr *nlh = NULL;
 213        int idx = 0, s_idx;
 214
 215        if (cb->strict_check) {
 216                int err = br_mdb_valid_dump_req(cb->nlh, cb->extack);
 217
 218                if (err < 0)
 219                        return err;
 220        }
 221
 222        s_idx = cb->args[0];
 223
 224        rcu_read_lock();
 225
 226        cb->seq = net->dev_base_seq;
 227
 228        for_each_netdev_rcu(net, dev) {
 229                if (dev->priv_flags & IFF_EBRIDGE) {
 230                        struct br_port_msg *bpm;
 231
 232                        if (idx < s_idx)
 233                                goto skip;
 234
 235                        nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
 236                                        cb->nlh->nlmsg_seq, RTM_GETMDB,
 237                                        sizeof(*bpm), NLM_F_MULTI);
 238                        if (nlh == NULL)
 239                                break;
 240
 241                        bpm = nlmsg_data(nlh);
 242                        memset(bpm, 0, sizeof(*bpm));
 243                        bpm->ifindex = dev->ifindex;
 244                        if (br_mdb_fill_info(skb, cb, dev) < 0)
 245                                goto out;
 246                        if (br_rports_fill_info(skb, cb, dev) < 0)
 247                                goto out;
 248
 249                        cb->args[1] = 0;
 250                        nlmsg_end(skb, nlh);
 251                skip:
 252                        idx++;
 253                }
 254        }
 255
 256out:
 257        if (nlh)
 258                nlmsg_end(skb, nlh);
 259        rcu_read_unlock();
 260        cb->args[0] = idx;
 261        return skb->len;
 262}
 263
 264static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
 265                                   struct net_device *dev,
 266                                   struct br_mdb_entry *entry, u32 pid,
 267                                   u32 seq, int type, unsigned int flags)
 268{
 269        struct nlmsghdr *nlh;
 270        struct br_port_msg *bpm;
 271        struct nlattr *nest, *nest2;
 272
 273        nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
 274        if (!nlh)
 275                return -EMSGSIZE;
 276
 277        bpm = nlmsg_data(nlh);
 278        memset(bpm, 0, sizeof(*bpm));
 279        bpm->family  = AF_BRIDGE;
 280        bpm->ifindex = dev->ifindex;
 281        nest = nla_nest_start_noflag(skb, MDBA_MDB);
 282        if (nest == NULL)
 283                goto cancel;
 284        nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
 285        if (nest2 == NULL)
 286                goto end;
 287
 288        if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(*entry), entry))
 289                goto end;
 290
 291        nla_nest_end(skb, nest2);
 292        nla_nest_end(skb, nest);
 293        nlmsg_end(skb, nlh);
 294        return 0;
 295
 296end:
 297        nla_nest_end(skb, nest);
 298cancel:
 299        nlmsg_cancel(skb, nlh);
 300        return -EMSGSIZE;
 301}
 302
 303static inline size_t rtnl_mdb_nlmsg_size(void)
 304{
 305        return NLMSG_ALIGN(sizeof(struct br_port_msg))
 306                + nla_total_size(sizeof(struct br_mdb_entry));
 307}
 308
 309struct br_mdb_complete_info {
 310        struct net_bridge_port *port;
 311        struct br_ip ip;
 312};
 313
 314static void br_mdb_complete(struct net_device *dev, int err, void *priv)
 315{
 316        struct br_mdb_complete_info *data = priv;
 317        struct net_bridge_port_group __rcu **pp;
 318        struct net_bridge_port_group *p;
 319        struct net_bridge_mdb_entry *mp;
 320        struct net_bridge_port *port = data->port;
 321        struct net_bridge *br = port->br;
 322
 323        if (err)
 324                goto err;
 325
 326        spin_lock_bh(&br->multicast_lock);
 327        mp = br_mdb_ip_get(br, &data->ip);
 328        if (!mp)
 329                goto out;
 330        for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
 331             pp = &p->next) {
 332                if (p->port != port)
 333                        continue;
 334                p->flags |= MDB_PG_FLAGS_OFFLOAD;
 335        }
 336out:
 337        spin_unlock_bh(&br->multicast_lock);
 338err:
 339        kfree(priv);
 340}
 341
 342static void br_mdb_switchdev_host_port(struct net_device *dev,
 343                                       struct net_device *lower_dev,
 344                                       struct br_mdb_entry *entry, int type)
 345{
 346        struct switchdev_obj_port_mdb mdb = {
 347                .obj = {
 348                        .id = SWITCHDEV_OBJ_ID_HOST_MDB,
 349                        .flags = SWITCHDEV_F_DEFER,
 350                },
 351                .vid = entry->vid,
 352        };
 353
 354        if (entry->addr.proto == htons(ETH_P_IP))
 355                ip_eth_mc_map(entry->addr.u.ip4, mdb.addr);
 356#if IS_ENABLED(CONFIG_IPV6)
 357        else
 358                ipv6_eth_mc_map(&entry->addr.u.ip6, mdb.addr);
 359#endif
 360
 361        mdb.obj.orig_dev = dev;
 362        switch (type) {
 363        case RTM_NEWMDB:
 364                switchdev_port_obj_add(lower_dev, &mdb.obj, NULL);
 365                break;
 366        case RTM_DELMDB:
 367                switchdev_port_obj_del(lower_dev, &mdb.obj);
 368                break;
 369        }
 370}
 371
 372static void br_mdb_switchdev_host(struct net_device *dev,
 373                                  struct br_mdb_entry *entry, int type)
 374{
 375        struct net_device *lower_dev;
 376        struct list_head *iter;
 377
 378        netdev_for_each_lower_dev(dev, lower_dev, iter)
 379                br_mdb_switchdev_host_port(dev, lower_dev, entry, type);
 380}
 381
 382static void __br_mdb_notify(struct net_device *dev, struct net_bridge_port *p,
 383                            struct br_mdb_entry *entry, int type)
 384{
 385        struct br_mdb_complete_info *complete_info;
 386        struct switchdev_obj_port_mdb mdb = {
 387                .obj = {
 388                        .id = SWITCHDEV_OBJ_ID_PORT_MDB,
 389                        .flags = SWITCHDEV_F_DEFER,
 390                },
 391                .vid = entry->vid,
 392        };
 393        struct net_device *port_dev;
 394        struct net *net = dev_net(dev);
 395        struct sk_buff *skb;
 396        int err = -ENOBUFS;
 397
 398        port_dev = __dev_get_by_index(net, entry->ifindex);
 399        if (entry->addr.proto == htons(ETH_P_IP))
 400                ip_eth_mc_map(entry->addr.u.ip4, mdb.addr);
 401#if IS_ENABLED(CONFIG_IPV6)
 402        else
 403                ipv6_eth_mc_map(&entry->addr.u.ip6, mdb.addr);
 404#endif
 405
 406        mdb.obj.orig_dev = port_dev;
 407        if (p && port_dev && type == RTM_NEWMDB) {
 408                complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
 409                if (complete_info) {
 410                        complete_info->port = p;
 411                        __mdb_entry_to_br_ip(entry, &complete_info->ip);
 412                        mdb.obj.complete_priv = complete_info;
 413                        mdb.obj.complete = br_mdb_complete;
 414                        if (switchdev_port_obj_add(port_dev, &mdb.obj, NULL))
 415                                kfree(complete_info);
 416                }
 417        } else if (p && port_dev && type == RTM_DELMDB) {
 418                switchdev_port_obj_del(port_dev, &mdb.obj);
 419        }
 420
 421        if (!p)
 422                br_mdb_switchdev_host(dev, entry, type);
 423
 424        skb = nlmsg_new(rtnl_mdb_nlmsg_size(), GFP_ATOMIC);
 425        if (!skb)
 426                goto errout;
 427
 428        err = nlmsg_populate_mdb_fill(skb, dev, entry, 0, 0, type, NTF_SELF);
 429        if (err < 0) {
 430                kfree_skb(skb);
 431                goto errout;
 432        }
 433
 434        rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
 435        return;
 436errout:
 437        rtnl_set_sk_err(net, RTNLGRP_MDB, err);
 438}
 439
 440void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
 441                   struct br_ip *group, int type, u8 flags)
 442{
 443        struct br_mdb_entry entry;
 444
 445        memset(&entry, 0, sizeof(entry));
 446        if (port)
 447                entry.ifindex = port->dev->ifindex;
 448        else
 449                entry.ifindex = dev->ifindex;
 450        entry.addr.proto = group->proto;
 451        entry.addr.u.ip4 = group->u.ip4;
 452#if IS_ENABLED(CONFIG_IPV6)
 453        entry.addr.u.ip6 = group->u.ip6;
 454#endif
 455        entry.vid = group->vid;
 456        __mdb_entry_fill_flags(&entry, flags);
 457        __br_mdb_notify(dev, port, &entry, type);
 458}
 459
 460static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
 461                                   struct net_device *dev,
 462                                   int ifindex, u32 pid,
 463                                   u32 seq, int type, unsigned int flags)
 464{
 465        struct br_port_msg *bpm;
 466        struct nlmsghdr *nlh;
 467        struct nlattr *nest;
 468
 469        nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
 470        if (!nlh)
 471                return -EMSGSIZE;
 472
 473        bpm = nlmsg_data(nlh);
 474        memset(bpm, 0, sizeof(*bpm));
 475        bpm->family = AF_BRIDGE;
 476        bpm->ifindex = dev->ifindex;
 477        nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
 478        if (!nest)
 479                goto cancel;
 480
 481        if (nla_put_u32(skb, MDBA_ROUTER_PORT, ifindex))
 482                goto end;
 483
 484        nla_nest_end(skb, nest);
 485        nlmsg_end(skb, nlh);
 486        return 0;
 487
 488end:
 489        nla_nest_end(skb, nest);
 490cancel:
 491        nlmsg_cancel(skb, nlh);
 492        return -EMSGSIZE;
 493}
 494
 495static inline size_t rtnl_rtr_nlmsg_size(void)
 496{
 497        return NLMSG_ALIGN(sizeof(struct br_port_msg))
 498                + nla_total_size(sizeof(__u32));
 499}
 500
 501void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
 502                   int type)
 503{
 504        struct net *net = dev_net(dev);
 505        struct sk_buff *skb;
 506        int err = -ENOBUFS;
 507        int ifindex;
 508
 509        ifindex = port ? port->dev->ifindex : 0;
 510        skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
 511        if (!skb)
 512                goto errout;
 513
 514        err = nlmsg_populate_rtr_fill(skb, dev, ifindex, 0, 0, type, NTF_SELF);
 515        if (err < 0) {
 516                kfree_skb(skb);
 517                goto errout;
 518        }
 519
 520        rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
 521        return;
 522
 523errout:
 524        rtnl_set_sk_err(net, RTNLGRP_MDB, err);
 525}
 526
 527static bool is_valid_mdb_entry(struct br_mdb_entry *entry)
 528{
 529        if (entry->ifindex == 0)
 530                return false;
 531
 532        if (entry->addr.proto == htons(ETH_P_IP)) {
 533                if (!ipv4_is_multicast(entry->addr.u.ip4))
 534                        return false;
 535                if (ipv4_is_local_multicast(entry->addr.u.ip4))
 536                        return false;
 537#if IS_ENABLED(CONFIG_IPV6)
 538        } else if (entry->addr.proto == htons(ETH_P_IPV6)) {
 539                if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6))
 540                        return false;
 541#endif
 542        } else
 543                return false;
 544        if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY)
 545                return false;
 546        if (entry->vid >= VLAN_VID_MASK)
 547                return false;
 548
 549        return true;
 550}
 551
 552static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
 553                        struct net_device **pdev, struct br_mdb_entry **pentry)
 554{
 555        struct net *net = sock_net(skb->sk);
 556        struct br_mdb_entry *entry;
 557        struct br_port_msg *bpm;
 558        struct nlattr *tb[MDBA_SET_ENTRY_MAX+1];
 559        struct net_device *dev;
 560        int err;
 561
 562        err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
 563                                     MDBA_SET_ENTRY_MAX, NULL, NULL);
 564        if (err < 0)
 565                return err;
 566
 567        bpm = nlmsg_data(nlh);
 568        if (bpm->ifindex == 0) {
 569                pr_info("PF_BRIDGE: br_mdb_parse() with invalid ifindex\n");
 570                return -EINVAL;
 571        }
 572
 573        dev = __dev_get_by_index(net, bpm->ifindex);
 574        if (dev == NULL) {
 575                pr_info("PF_BRIDGE: br_mdb_parse() with unknown ifindex\n");
 576                return -ENODEV;
 577        }
 578
 579        if (!(dev->priv_flags & IFF_EBRIDGE)) {
 580                pr_info("PF_BRIDGE: br_mdb_parse() with non-bridge\n");
 581                return -EOPNOTSUPP;
 582        }
 583
 584        *pdev = dev;
 585
 586        if (!tb[MDBA_SET_ENTRY] ||
 587            nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) {
 588                pr_info("PF_BRIDGE: br_mdb_parse() with invalid attr\n");
 589                return -EINVAL;
 590        }
 591
 592        entry = nla_data(tb[MDBA_SET_ENTRY]);
 593        if (!is_valid_mdb_entry(entry)) {
 594                pr_info("PF_BRIDGE: br_mdb_parse() with invalid entry\n");
 595                return -EINVAL;
 596        }
 597
 598        *pentry = entry;
 599        return 0;
 600}
 601
 602static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
 603                            struct br_ip *group, unsigned char state)
 604{
 605        struct net_bridge_mdb_entry *mp;
 606        struct net_bridge_port_group *p;
 607        struct net_bridge_port_group __rcu **pp;
 608        unsigned long now = jiffies;
 609        int err;
 610
 611        mp = br_mdb_ip_get(br, group);
 612        if (!mp) {
 613                mp = br_multicast_new_group(br, group);
 614                err = PTR_ERR_OR_ZERO(mp);
 615                if (err)
 616                        return err;
 617        }
 618
 619        /* host join */
 620        if (!port) {
 621                /* don't allow any flags for host-joined groups */
 622                if (state)
 623                        return -EINVAL;
 624                if (mp->host_joined)
 625                        return -EEXIST;
 626
 627                br_multicast_host_join(mp, false);
 628
 629                return 0;
 630        }
 631
 632        for (pp = &mp->ports;
 633             (p = mlock_dereference(*pp, br)) != NULL;
 634             pp = &p->next) {
 635                if (p->port == port)
 636                        return -EEXIST;
 637                if ((unsigned long)p->port < (unsigned long)port)
 638                        break;
 639        }
 640
 641        p = br_multicast_new_port_group(port, group, *pp, state, NULL);
 642        if (unlikely(!p))
 643                return -ENOMEM;
 644        rcu_assign_pointer(*pp, p);
 645        if (state == MDB_TEMPORARY)
 646                mod_timer(&p->timer, now + br->multicast_membership_interval);
 647
 648        return 0;
 649}
 650
 651static int __br_mdb_add(struct net *net, struct net_bridge *br,
 652                        struct br_mdb_entry *entry)
 653{
 654        struct br_ip ip;
 655        struct net_device *dev;
 656        struct net_bridge_port *p = NULL;
 657        int ret;
 658
 659        if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
 660                return -EINVAL;
 661
 662        if (entry->ifindex != br->dev->ifindex) {
 663                dev = __dev_get_by_index(net, entry->ifindex);
 664                if (!dev)
 665                        return -ENODEV;
 666
 667                p = br_port_get_rtnl(dev);
 668                if (!p || p->br != br || p->state == BR_STATE_DISABLED)
 669                        return -EINVAL;
 670        }
 671
 672        __mdb_entry_to_br_ip(entry, &ip);
 673
 674        spin_lock_bh(&br->multicast_lock);
 675        ret = br_mdb_add_group(br, p, &ip, entry->state);
 676        spin_unlock_bh(&br->multicast_lock);
 677        return ret;
 678}
 679
 680static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
 681                      struct netlink_ext_ack *extack)
 682{
 683        struct net *net = sock_net(skb->sk);
 684        struct net_bridge_vlan_group *vg;
 685        struct net_bridge_port *p = NULL;
 686        struct net_device *dev, *pdev;
 687        struct br_mdb_entry *entry;
 688        struct net_bridge_vlan *v;
 689        struct net_bridge *br;
 690        int err;
 691
 692        err = br_mdb_parse(skb, nlh, &dev, &entry);
 693        if (err < 0)
 694                return err;
 695
 696        br = netdev_priv(dev);
 697
 698        if (entry->ifindex != br->dev->ifindex) {
 699                pdev = __dev_get_by_index(net, entry->ifindex);
 700                if (!pdev)
 701                        return -ENODEV;
 702
 703                p = br_port_get_rtnl(pdev);
 704                if (!p || p->br != br || p->state == BR_STATE_DISABLED)
 705                        return -EINVAL;
 706                vg = nbp_vlan_group(p);
 707        } else {
 708                vg = br_vlan_group(br);
 709        }
 710
 711        /* If vlan filtering is enabled and VLAN is not specified
 712         * install mdb entry on all vlans configured on the port.
 713         */
 714        if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
 715                list_for_each_entry(v, &vg->vlan_list, vlist) {
 716                        entry->vid = v->vid;
 717                        err = __br_mdb_add(net, br, entry);
 718                        if (err)
 719                                break;
 720                        __br_mdb_notify(dev, p, entry, RTM_NEWMDB);
 721                }
 722        } else {
 723                err = __br_mdb_add(net, br, entry);
 724                if (!err)
 725                        __br_mdb_notify(dev, p, entry, RTM_NEWMDB);
 726        }
 727
 728        return err;
 729}
 730
 731static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
 732{
 733        struct net_bridge_mdb_entry *mp;
 734        struct net_bridge_port_group *p;
 735        struct net_bridge_port_group __rcu **pp;
 736        struct br_ip ip;
 737        int err = -EINVAL;
 738
 739        if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
 740                return -EINVAL;
 741
 742        __mdb_entry_to_br_ip(entry, &ip);
 743
 744        spin_lock_bh(&br->multicast_lock);
 745        mp = br_mdb_ip_get(br, &ip);
 746        if (!mp)
 747                goto unlock;
 748
 749        /* host leave */
 750        if (entry->ifindex == mp->br->dev->ifindex && mp->host_joined) {
 751                br_multicast_host_leave(mp, false);
 752                err = 0;
 753                if (!mp->ports && netif_running(br->dev))
 754                        mod_timer(&mp->timer, jiffies);
 755                goto unlock;
 756        }
 757
 758        for (pp = &mp->ports;
 759             (p = mlock_dereference(*pp, br)) != NULL;
 760             pp = &p->next) {
 761                if (!p->port || p->port->dev->ifindex != entry->ifindex)
 762                        continue;
 763
 764                if (p->port->state == BR_STATE_DISABLED)
 765                        goto unlock;
 766
 767                __mdb_entry_fill_flags(entry, p->flags);
 768                rcu_assign_pointer(*pp, p->next);
 769                hlist_del_init(&p->mglist);
 770                del_timer(&p->timer);
 771                kfree_rcu(p, rcu);
 772                err = 0;
 773
 774                if (!mp->ports && !mp->host_joined &&
 775                    netif_running(br->dev))
 776                        mod_timer(&mp->timer, jiffies);
 777                break;
 778        }
 779
 780unlock:
 781        spin_unlock_bh(&br->multicast_lock);
 782        return err;
 783}
 784
 785static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
 786                      struct netlink_ext_ack *extack)
 787{
 788        struct net *net = sock_net(skb->sk);
 789        struct net_bridge_vlan_group *vg;
 790        struct net_bridge_port *p = NULL;
 791        struct net_device *dev, *pdev;
 792        struct br_mdb_entry *entry;
 793        struct net_bridge_vlan *v;
 794        struct net_bridge *br;
 795        int err;
 796
 797        err = br_mdb_parse(skb, nlh, &dev, &entry);
 798        if (err < 0)
 799                return err;
 800
 801        br = netdev_priv(dev);
 802
 803        if (entry->ifindex != br->dev->ifindex) {
 804                pdev = __dev_get_by_index(net, entry->ifindex);
 805                if (!pdev)
 806                        return -ENODEV;
 807
 808                p = br_port_get_rtnl(pdev);
 809                if (!p || p->br != br || p->state == BR_STATE_DISABLED)
 810                        return -EINVAL;
 811                vg = nbp_vlan_group(p);
 812        } else {
 813                vg = br_vlan_group(br);
 814        }
 815
 816        /* If vlan filtering is enabled and VLAN is not specified
 817         * delete mdb entry on all vlans configured on the port.
 818         */
 819        if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
 820                list_for_each_entry(v, &vg->vlan_list, vlist) {
 821                        entry->vid = v->vid;
 822                        err = __br_mdb_del(br, entry);
 823                        if (!err)
 824                                __br_mdb_notify(dev, p, entry, RTM_DELMDB);
 825                }
 826        } else {
 827                err = __br_mdb_del(br, entry);
 828                if (!err)
 829                        __br_mdb_notify(dev, p, entry, RTM_DELMDB);
 830        }
 831
 832        return err;
 833}
 834
 835void br_mdb_init(void)
 836{
 837        rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, 0);
 838        rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, 0);
 839        rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, 0);
 840}
 841
 842void br_mdb_uninit(void)
 843{
 844        rtnl_unregister(PF_BRIDGE, RTM_GETMDB);
 845        rtnl_unregister(PF_BRIDGE, RTM_NEWMDB);
 846        rtnl_unregister(PF_BRIDGE, RTM_DELMDB);
 847}
 848