linux/net/dsa/dsa_priv.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/*
   3 * net/dsa/dsa_priv.h - Hardware switch handling
   4 * Copyright (c) 2008-2009 Marvell Semiconductor
   5 */
   6
   7#ifndef __DSA_PRIV_H
   8#define __DSA_PRIV_H
   9
  10#include <linux/if_bridge.h>
  11#include <linux/phy.h>
  12#include <linux/netdevice.h>
  13#include <linux/netpoll.h>
  14#include <net/dsa.h>
  15#include <net/gro_cells.h>
  16
  17#define DSA_MAX_NUM_OFFLOADING_BRIDGES          BITS_PER_LONG
  18
  19enum {
  20        DSA_NOTIFIER_AGEING_TIME,
  21        DSA_NOTIFIER_BRIDGE_JOIN,
  22        DSA_NOTIFIER_BRIDGE_LEAVE,
  23        DSA_NOTIFIER_FDB_ADD,
  24        DSA_NOTIFIER_FDB_DEL,
  25        DSA_NOTIFIER_HOST_FDB_ADD,
  26        DSA_NOTIFIER_HOST_FDB_DEL,
  27        DSA_NOTIFIER_HSR_JOIN,
  28        DSA_NOTIFIER_HSR_LEAVE,
  29        DSA_NOTIFIER_LAG_CHANGE,
  30        DSA_NOTIFIER_LAG_JOIN,
  31        DSA_NOTIFIER_LAG_LEAVE,
  32        DSA_NOTIFIER_MDB_ADD,
  33        DSA_NOTIFIER_MDB_DEL,
  34        DSA_NOTIFIER_HOST_MDB_ADD,
  35        DSA_NOTIFIER_HOST_MDB_DEL,
  36        DSA_NOTIFIER_VLAN_ADD,
  37        DSA_NOTIFIER_VLAN_DEL,
  38        DSA_NOTIFIER_MTU,
  39        DSA_NOTIFIER_TAG_PROTO,
  40        DSA_NOTIFIER_MRP_ADD,
  41        DSA_NOTIFIER_MRP_DEL,
  42        DSA_NOTIFIER_MRP_ADD_RING_ROLE,
  43        DSA_NOTIFIER_MRP_DEL_RING_ROLE,
  44        DSA_NOTIFIER_TAG_8021Q_VLAN_ADD,
  45        DSA_NOTIFIER_TAG_8021Q_VLAN_DEL,
  46};
  47
  48/* DSA_NOTIFIER_AGEING_TIME */
  49struct dsa_notifier_ageing_time_info {
  50        unsigned int ageing_time;
  51};
  52
  53/* DSA_NOTIFIER_BRIDGE_* */
  54struct dsa_notifier_bridge_info {
  55        struct net_device *br;
  56        int tree_index;
  57        int sw_index;
  58        int port;
  59};
  60
  61/* DSA_NOTIFIER_FDB_* */
  62struct dsa_notifier_fdb_info {
  63        int sw_index;
  64        int port;
  65        const unsigned char *addr;
  66        u16 vid;
  67};
  68
  69/* DSA_NOTIFIER_MDB_* */
  70struct dsa_notifier_mdb_info {
  71        const struct switchdev_obj_port_mdb *mdb;
  72        int sw_index;
  73        int port;
  74};
  75
  76/* DSA_NOTIFIER_LAG_* */
  77struct dsa_notifier_lag_info {
  78        struct net_device *lag;
  79        int sw_index;
  80        int port;
  81
  82        struct netdev_lag_upper_info *info;
  83};
  84
  85/* DSA_NOTIFIER_VLAN_* */
  86struct dsa_notifier_vlan_info {
  87        const struct switchdev_obj_port_vlan *vlan;
  88        int sw_index;
  89        int port;
  90        struct netlink_ext_ack *extack;
  91};
  92
  93/* DSA_NOTIFIER_MTU */
  94struct dsa_notifier_mtu_info {
  95        bool targeted_match;
  96        int sw_index;
  97        int port;
  98        int mtu;
  99};
 100
 101/* DSA_NOTIFIER_TAG_PROTO_* */
 102struct dsa_notifier_tag_proto_info {
 103        const struct dsa_device_ops *tag_ops;
 104};
 105
 106/* DSA_NOTIFIER_MRP_* */
 107struct dsa_notifier_mrp_info {
 108        const struct switchdev_obj_mrp *mrp;
 109        int sw_index;
 110        int port;
 111};
 112
 113/* DSA_NOTIFIER_MRP_* */
 114struct dsa_notifier_mrp_ring_role_info {
 115        const struct switchdev_obj_ring_role_mrp *mrp;
 116        int sw_index;
 117        int port;
 118};
 119
 120/* DSA_NOTIFIER_TAG_8021Q_VLAN_* */
 121struct dsa_notifier_tag_8021q_vlan_info {
 122        int tree_index;
 123        int sw_index;
 124        int port;
 125        u16 vid;
 126};
 127
 128struct dsa_switchdev_event_work {
 129        struct dsa_switch *ds;
 130        int port;
 131        struct net_device *dev;
 132        struct work_struct work;
 133        unsigned long event;
 134        /* Specific for SWITCHDEV_FDB_ADD_TO_DEVICE and
 135         * SWITCHDEV_FDB_DEL_TO_DEVICE
 136         */
 137        unsigned char addr[ETH_ALEN];
 138        u16 vid;
 139        bool host_addr;
 140};
 141
 142/* DSA_NOTIFIER_HSR_* */
 143struct dsa_notifier_hsr_info {
 144        struct net_device *hsr;
 145        int sw_index;
 146        int port;
 147};
 148
 149struct dsa_slave_priv {
 150        /* Copy of CPU port xmit for faster access in slave transmit hot path */
 151        struct sk_buff *        (*xmit)(struct sk_buff *skb,
 152                                        struct net_device *dev);
 153
 154        struct gro_cells        gcells;
 155
 156        /* DSA port data, such as switch, port index, etc. */
 157        struct dsa_port         *dp;
 158
 159#ifdef CONFIG_NET_POLL_CONTROLLER
 160        struct netpoll          *netpoll;
 161#endif
 162
 163        /* TC context */
 164        struct list_head        mall_tc_list;
 165};
 166
 167/* dsa.c */
 168const struct dsa_device_ops *dsa_tag_driver_get(int tag_protocol);
 169void dsa_tag_driver_put(const struct dsa_device_ops *ops);
 170const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf);
 171
 172bool dsa_schedule_work(struct work_struct *work);
 173void dsa_flush_workqueue(void);
 174const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
 175
 176static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)
 177{
 178        return ops->needed_headroom + ops->needed_tailroom;
 179}
 180
 181/* master.c */
 182int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp);
 183void dsa_master_teardown(struct net_device *dev);
 184
 185static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
 186                                                       int device, int port)
 187{
 188        struct dsa_port *cpu_dp = dev->dsa_ptr;
 189        struct dsa_switch_tree *dst = cpu_dp->dst;
 190        struct dsa_port *dp;
 191
 192        list_for_each_entry(dp, &dst->ports, list)
 193                if (dp->ds->index == device && dp->index == port &&
 194                    dp->type == DSA_PORT_TYPE_USER)
 195                        return dp->slave;
 196
 197        return NULL;
 198}
 199
 200/* port.c */
 201void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
 202                               const struct dsa_device_ops *tag_ops);
 203int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age);
 204int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy);
 205int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy);
 206void dsa_port_disable_rt(struct dsa_port *dp);
 207void dsa_port_disable(struct dsa_port *dp);
 208int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
 209                         struct netlink_ext_ack *extack);
 210void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br);
 211void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br);
 212int dsa_port_lag_change(struct dsa_port *dp,
 213                        struct netdev_lag_lower_state_info *linfo);
 214int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev,
 215                      struct netdev_lag_upper_info *uinfo,
 216                      struct netlink_ext_ack *extack);
 217void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag_dev);
 218void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev);
 219int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
 220                            struct netlink_ext_ack *extack);
 221bool dsa_port_skip_vlan_configuration(struct dsa_port *dp);
 222int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock);
 223int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
 224                        bool targeted_match);
 225int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
 226                     u16 vid);
 227int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
 228                     u16 vid);
 229int dsa_port_host_fdb_add(struct dsa_port *dp, const unsigned char *addr,
 230                          u16 vid);
 231int dsa_port_host_fdb_del(struct dsa_port *dp, const unsigned char *addr,
 232                          u16 vid);
 233int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data);
 234int dsa_port_mdb_add(const struct dsa_port *dp,
 235                     const struct switchdev_obj_port_mdb *mdb);
 236int dsa_port_mdb_del(const struct dsa_port *dp,
 237                     const struct switchdev_obj_port_mdb *mdb);
 238int dsa_port_host_mdb_add(const struct dsa_port *dp,
 239                          const struct switchdev_obj_port_mdb *mdb);
 240int dsa_port_host_mdb_del(const struct dsa_port *dp,
 241                          const struct switchdev_obj_port_mdb *mdb);
 242int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
 243                              struct switchdev_brport_flags flags,
 244                              struct netlink_ext_ack *extack);
 245int dsa_port_bridge_flags(struct dsa_port *dp,
 246                          struct switchdev_brport_flags flags,
 247                          struct netlink_ext_ack *extack);
 248int dsa_port_vlan_add(struct dsa_port *dp,
 249                      const struct switchdev_obj_port_vlan *vlan,
 250                      struct netlink_ext_ack *extack);
 251int dsa_port_vlan_del(struct dsa_port *dp,
 252                      const struct switchdev_obj_port_vlan *vlan);
 253int dsa_port_mrp_add(const struct dsa_port *dp,
 254                     const struct switchdev_obj_mrp *mrp);
 255int dsa_port_mrp_del(const struct dsa_port *dp,
 256                     const struct switchdev_obj_mrp *mrp);
 257int dsa_port_mrp_add_ring_role(const struct dsa_port *dp,
 258                               const struct switchdev_obj_ring_role_mrp *mrp);
 259int dsa_port_mrp_del_ring_role(const struct dsa_port *dp,
 260                               const struct switchdev_obj_ring_role_mrp *mrp);
 261int dsa_port_link_register_of(struct dsa_port *dp);
 262void dsa_port_link_unregister_of(struct dsa_port *dp);
 263int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr);
 264void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr);
 265int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast);
 266void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast);
 267extern const struct phylink_mac_ops dsa_port_phylink_mac_ops;
 268
 269static inline bool dsa_port_offloads_bridge_port(struct dsa_port *dp,
 270                                                 const struct net_device *dev)
 271{
 272        return dsa_port_to_bridge_port(dp) == dev;
 273}
 274
 275static inline bool dsa_port_offloads_bridge(struct dsa_port *dp,
 276                                            const struct net_device *bridge_dev)
 277{
 278        /* DSA ports connected to a bridge, and event was emitted
 279         * for the bridge.
 280         */
 281        return dp->bridge_dev == bridge_dev;
 282}
 283
 284/* Returns true if any port of this tree offloads the given net_device */
 285static inline bool dsa_tree_offloads_bridge_port(struct dsa_switch_tree *dst,
 286                                                 const struct net_device *dev)
 287{
 288        struct dsa_port *dp;
 289
 290        list_for_each_entry(dp, &dst->ports, list)
 291                if (dsa_port_offloads_bridge_port(dp, dev))
 292                        return true;
 293
 294        return false;
 295}
 296
 297/* Returns true if any port of this tree offloads the given bridge */
 298static inline bool dsa_tree_offloads_bridge(struct dsa_switch_tree *dst,
 299                                            const struct net_device *bridge_dev)
 300{
 301        struct dsa_port *dp;
 302
 303        list_for_each_entry(dp, &dst->ports, list)
 304                if (dsa_port_offloads_bridge(dp, bridge_dev))
 305                        return true;
 306
 307        return false;
 308}
 309
 310/* slave.c */
 311extern const struct dsa_device_ops notag_netdev_ops;
 312extern struct notifier_block dsa_slave_switchdev_notifier;
 313extern struct notifier_block dsa_slave_switchdev_blocking_notifier;
 314
 315void dsa_slave_mii_bus_init(struct dsa_switch *ds);
 316int dsa_slave_create(struct dsa_port *dp);
 317void dsa_slave_destroy(struct net_device *slave_dev);
 318int dsa_slave_suspend(struct net_device *slave_dev);
 319int dsa_slave_resume(struct net_device *slave_dev);
 320int dsa_slave_register_notifier(void);
 321void dsa_slave_unregister_notifier(void);
 322void dsa_slave_setup_tagger(struct net_device *slave);
 323int dsa_slave_change_mtu(struct net_device *dev, int new_mtu);
 324int dsa_slave_manage_vlan_filtering(struct net_device *dev,
 325                                    bool vlan_filtering);
 326
 327static inline struct dsa_port *dsa_slave_to_port(const struct net_device *dev)
 328{
 329        struct dsa_slave_priv *p = netdev_priv(dev);
 330
 331        return p->dp;
 332}
 333
 334static inline struct net_device *
 335dsa_slave_to_master(const struct net_device *dev)
 336{
 337        struct dsa_port *dp = dsa_slave_to_port(dev);
 338
 339        return dp->cpu_dp->master;
 340}
 341
 342/* If under a bridge with vlan_filtering=0, make sure to send pvid-tagged
 343 * frames as untagged, since the bridge will not untag them.
 344 */
 345static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb)
 346{
 347        struct dsa_port *dp = dsa_slave_to_port(skb->dev);
 348        struct net_device *br = dp->bridge_dev;
 349        struct net_device *dev = skb->dev;
 350        struct net_device *upper_dev;
 351        u16 vid, pvid, proto;
 352        int err;
 353
 354        if (!br || br_vlan_enabled(br))
 355                return skb;
 356
 357        err = br_vlan_get_proto(br, &proto);
 358        if (err)
 359                return skb;
 360
 361        /* Move VLAN tag from data to hwaccel */
 362        if (!skb_vlan_tag_present(skb) && skb->protocol == htons(proto)) {
 363                skb = skb_vlan_untag(skb);
 364                if (!skb)
 365                        return NULL;
 366        }
 367
 368        if (!skb_vlan_tag_present(skb))
 369                return skb;
 370
 371        vid = skb_vlan_tag_get_id(skb);
 372
 373        /* We already run under an RCU read-side critical section since
 374         * we are called from netif_receive_skb_list_internal().
 375         */
 376        err = br_vlan_get_pvid_rcu(dev, &pvid);
 377        if (err)
 378                return skb;
 379
 380        if (vid != pvid)
 381                return skb;
 382
 383        /* The sad part about attempting to untag from DSA is that we
 384         * don't know, unless we check, if the skb will end up in
 385         * the bridge's data path - br_allowed_ingress() - or not.
 386         * For example, there might be an 8021q upper for the
 387         * default_pvid of the bridge, which will steal VLAN-tagged traffic
 388         * from the bridge's data path. This is a configuration that DSA
 389         * supports because vlan_filtering is 0. In that case, we should
 390         * definitely keep the tag, to make sure it keeps working.
 391         */
 392        upper_dev = __vlan_find_dev_deep_rcu(br, htons(proto), vid);
 393        if (upper_dev)
 394                return skb;
 395
 396        __vlan_hwaccel_clear_tag(skb);
 397
 398        return skb;
 399}
 400
 401/* For switches without hardware support for DSA tagging to be able
 402 * to support termination through the bridge.
 403 */
 404static inline struct net_device *
 405dsa_find_designated_bridge_port_by_vid(struct net_device *master, u16 vid)
 406{
 407        struct dsa_port *cpu_dp = master->dsa_ptr;
 408        struct dsa_switch_tree *dst = cpu_dp->dst;
 409        struct bridge_vlan_info vinfo;
 410        struct net_device *slave;
 411        struct dsa_port *dp;
 412        int err;
 413
 414        list_for_each_entry(dp, &dst->ports, list) {
 415                if (dp->type != DSA_PORT_TYPE_USER)
 416                        continue;
 417
 418                if (!dp->bridge_dev)
 419                        continue;
 420
 421                if (dp->stp_state != BR_STATE_LEARNING &&
 422                    dp->stp_state != BR_STATE_FORWARDING)
 423                        continue;
 424
 425                /* Since the bridge might learn this packet, keep the CPU port
 426                 * affinity with the port that will be used for the reply on
 427                 * xmit.
 428                 */
 429                if (dp->cpu_dp != cpu_dp)
 430                        continue;
 431
 432                slave = dp->slave;
 433
 434                err = br_vlan_get_info_rcu(slave, vid, &vinfo);
 435                if (err)
 436                        continue;
 437
 438                return slave;
 439        }
 440
 441        return NULL;
 442}
 443
 444/* If the ingress port offloads the bridge, we mark the frame as autonomously
 445 * forwarded by hardware, so the software bridge doesn't forward in twice, back
 446 * to us, because we already did. However, if we're in fallback mode and we do
 447 * software bridging, we are not offloading it, therefore the dp->bridge_dev
 448 * pointer is not populated, and flooding needs to be done by software (we are
 449 * effectively operating in standalone ports mode).
 450 */
 451static inline void dsa_default_offload_fwd_mark(struct sk_buff *skb)
 452{
 453        struct dsa_port *dp = dsa_slave_to_port(skb->dev);
 454
 455        skb->offload_fwd_mark = !!(dp->bridge_dev);
 456}
 457
 458/* Helper for removing DSA header tags from packets in the RX path.
 459 * Must not be called before skb_pull(len).
 460 *                                                                 skb->data
 461 *                                                                         |
 462 *                                                                         v
 463 * |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |
 464 * +-----------------------+-----------------------+---------------+-------+
 465 * |    Destination MAC    |      Source MAC       |  DSA header   | EType |
 466 * +-----------------------+-----------------------+---------------+-------+
 467 *                                                 |               |
 468 * <----- len ----->                               <----- len ----->
 469 *                 |
 470 *       >>>>>>>   v
 471 *       >>>>>>>   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |
 472 *       >>>>>>>   +-----------------------+-----------------------+-------+
 473 *       >>>>>>>   |    Destination MAC    |      Source MAC       | EType |
 474 *                 +-----------------------+-----------------------+-------+
 475 *                                                                         ^
 476 *                                                                         |
 477 *                                                                 skb->data
 478 */
 479static inline void dsa_strip_etype_header(struct sk_buff *skb, int len)
 480{
 481        memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - len, 2 * ETH_ALEN);
 482}
 483
 484/* Helper for creating space for DSA header tags in TX path packets.
 485 * Must not be called before skb_push(len).
 486 *
 487 * Before:
 488 *
 489 *       <<<<<<<   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |
 490 * ^     <<<<<<<   +-----------------------+-----------------------+-------+
 491 * |     <<<<<<<   |    Destination MAC    |      Source MAC       | EType |
 492 * |               +-----------------------+-----------------------+-------+
 493 * <----- len ----->
 494 * |
 495 * |
 496 * skb->data
 497 *
 498 * After:
 499 *
 500 * |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |
 501 * +-----------------------+-----------------------+---------------+-------+
 502 * |    Destination MAC    |      Source MAC       |  DSA header   | EType |
 503 * +-----------------------+-----------------------+---------------+-------+
 504 * ^                                               |               |
 505 * |                                               <----- len ----->
 506 * skb->data
 507 */
 508static inline void dsa_alloc_etype_header(struct sk_buff *skb, int len)
 509{
 510        memmove(skb->data, skb->data + len, 2 * ETH_ALEN);
 511}
 512
 513/* On RX, eth_type_trans() on the DSA master pulls ETH_HLEN bytes starting from
 514 * skb_mac_header(skb), which leaves skb->data pointing at the first byte after
 515 * what the DSA master perceives as the EtherType (the beginning of the L3
 516 * protocol). Since DSA EtherType header taggers treat the EtherType as part of
 517 * the DSA tag itself, and the EtherType is 2 bytes in length, the DSA header
 518 * is located 2 bytes behind skb->data. Note that EtherType in this context
 519 * means the first 2 bytes of the DSA header, not the encapsulated EtherType
 520 * that will become visible after the DSA header is stripped.
 521 */
 522static inline void *dsa_etype_header_pos_rx(struct sk_buff *skb)
 523{
 524        return skb->data - 2;
 525}
 526
 527/* On TX, skb->data points to skb_mac_header(skb), which means that EtherType
 528 * header taggers start exactly where the EtherType is (the EtherType is
 529 * treated as part of the DSA header).
 530 */
 531static inline void *dsa_etype_header_pos_tx(struct sk_buff *skb)
 532{
 533        return skb->data + 2 * ETH_ALEN;
 534}
 535
 536/* switch.c */
 537int dsa_switch_register_notifier(struct dsa_switch *ds);
 538void dsa_switch_unregister_notifier(struct dsa_switch *ds);
 539
 540/* dsa2.c */
 541void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag);
 542void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag);
 543int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v);
 544int dsa_broadcast(unsigned long e, void *v);
 545int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
 546                              struct net_device *master,
 547                              const struct dsa_device_ops *tag_ops,
 548                              const struct dsa_device_ops *old_tag_ops);
 549int dsa_bridge_num_get(const struct net_device *bridge_dev, int max);
 550void dsa_bridge_num_put(const struct net_device *bridge_dev, int bridge_num);
 551
 552/* tag_8021q.c */
 553int dsa_tag_8021q_bridge_join(struct dsa_switch *ds,
 554                              struct dsa_notifier_bridge_info *info);
 555int dsa_tag_8021q_bridge_leave(struct dsa_switch *ds,
 556                               struct dsa_notifier_bridge_info *info);
 557int dsa_switch_tag_8021q_vlan_add(struct dsa_switch *ds,
 558                                  struct dsa_notifier_tag_8021q_vlan_info *info);
 559int dsa_switch_tag_8021q_vlan_del(struct dsa_switch *ds,
 560                                  struct dsa_notifier_tag_8021q_vlan_info *info);
 561
 562extern struct list_head dsa_tree_list;
 563
 564#endif
 565