linux/net/dsa/tag_dsa.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Regular and Ethertype DSA tagging
   4 * Copyright (c) 2008-2009 Marvell Semiconductor
   5 *
   6 * Regular DSA
   7 * -----------
   8
   9 * For untagged (in 802.1Q terms) packets, the switch will splice in
  10 * the tag between the SA and the ethertype of the original
  11 * packet. Tagged frames will instead have their outermost .1Q tag
  12 * converted to a DSA tag. It expects the same layout when receiving
  13 * packets from the CPU.
  14 *
  15 * Example:
  16 *
  17 *     .----.----.----.---------
  18 * Pu: | DA | SA | ET | Payload ...
  19 *     '----'----'----'---------
  20 *       6    6    2       N
  21 *     .----.----.--------.-----.----.---------
  22 * Pt: | DA | SA | 0x8100 | TCI | ET | Payload ...
  23 *     '----'----'--------'-----'----'---------
  24 *       6    6       2      2    2       N
  25 *     .----.----.-----.----.---------
  26 * Pd: | DA | SA | DSA | ET | Payload ...
  27 *     '----'----'-----'----'---------
  28 *       6    6     4    2       N
  29 *
  30 * No matter if a packet is received untagged (Pu) or tagged (Pt),
  31 * they will both have the same layout (Pd) when they are sent to the
  32 * CPU. This is done by ignoring 802.3, replacing the ethertype field
  33 * with more metadata, among which is a bit to signal if the original
  34 * packet was tagged or not.
  35 *
  36 * Ethertype DSA
  37 * -------------
  38 * Uses the exact same tag format as regular DSA, but also includes a
  39 * proper ethertype field (which the mv88e6xxx driver sets to
  40 * ETH_P_EDSA/0xdada) followed by two zero bytes:
  41 *
  42 * .----.----.--------.--------.-----.----.---------
  43 * | DA | SA | 0xdada | 0x0000 | DSA | ET | Payload ...
  44 * '----'----'--------'--------'-----'----'---------
  45 *   6    6       2        2      4    2       N
  46 */
  47
  48#include <linux/dsa/mv88e6xxx.h>
  49#include <linux/etherdevice.h>
  50#include <linux/list.h>
  51#include <linux/slab.h>
  52
  53#include "dsa_priv.h"
  54
  55#define DSA_HLEN        4
  56
  57/**
  58 * enum dsa_cmd - DSA Command
  59 * @DSA_CMD_TO_CPU: Set on packets that were trapped or mirrored to
  60 *     the CPU port. This is needed to implement control protocols,
  61 *     e.g. STP and LLDP, that must not allow those control packets to
  62 *     be switched according to the normal rules.
  63 * @DSA_CMD_FROM_CPU: Used by the CPU to send a packet to a specific
  64 *     port, ignoring all the barriers that the switch normally
  65 *     enforces (VLANs, STP port states etc.). No source address
  66 *     learning takes place. "sudo send packet"
  67 * @DSA_CMD_TO_SNIFFER: Set on the copies of packets that matched some
  68 *     user configured ingress or egress monitor criteria. These are
  69 *     forwarded by the switch tree to the user configured ingress or
  70 *     egress monitor port, which can be set to the CPU port or a
  71 *     regular port. If the destination is a regular port, the tag
  72 *     will be removed before egressing the port. If the destination
  73 *     is the CPU port, the tag will not be removed.
  74 * @DSA_CMD_FORWARD: This tag is used on all bulk traffic passing
  75 *     through the switch tree, including the flows that are directed
  76 *     towards the CPU. Its device/port tuple encodes the original
  77 *     source port on which the packet ingressed. It can also be used
  78 *     on transmit by the CPU to defer the forwarding decision to the
  79 *     hardware, based on the current config of PVT/VTU/ATU
  80 *     etc. Source address learning takes places if enabled on the
  81 *     receiving DSA/CPU port.
  82 */
  83enum dsa_cmd {
  84        DSA_CMD_TO_CPU     = 0,
  85        DSA_CMD_FROM_CPU   = 1,
  86        DSA_CMD_TO_SNIFFER = 2,
  87        DSA_CMD_FORWARD    = 3
  88};
  89
  90/**
  91 * enum dsa_code - TO_CPU Code
  92 *
  93 * @DSA_CODE_MGMT_TRAP: DA was classified as a management
  94 *     address. Typical examples include STP BPDUs and LLDP.
  95 * @DSA_CODE_FRAME2REG: Response to a "remote management" request.
  96 * @DSA_CODE_IGMP_MLD_TRAP: IGMP/MLD signaling.
  97 * @DSA_CODE_POLICY_TRAP: Frame matched some policy configuration on
  98 *     the device. Typical examples are matching on DA/SA/VID and DHCP
  99 *     snooping.
 100 * @DSA_CODE_ARP_MIRROR: The name says it all really.
 101 * @DSA_CODE_POLICY_MIRROR: Same as @DSA_CODE_POLICY_TRAP, but the
 102 *     particular policy was set to trigger a mirror instead of a
 103 *     trap.
 104 * @DSA_CODE_RESERVED_6: Unused on all devices up to at least 6393X.
 105 * @DSA_CODE_RESERVED_7: Unused on all devices up to at least 6393X.
 106 *
 107 * A 3-bit code is used to relay why a particular frame was sent to
 108 * the CPU. We only use this to determine if the packet was mirrored
 109 * or trapped, i.e. whether the packet has been forwarded by hardware
 110 * or not.
 111 *
 112 * This is the superset of all possible codes. Any particular device
 113 * may only implement a subset.
 114 */
 115enum dsa_code {
 116        DSA_CODE_MGMT_TRAP     = 0,
 117        DSA_CODE_FRAME2REG     = 1,
 118        DSA_CODE_IGMP_MLD_TRAP = 2,
 119        DSA_CODE_POLICY_TRAP   = 3,
 120        DSA_CODE_ARP_MIRROR    = 4,
 121        DSA_CODE_POLICY_MIRROR = 5,
 122        DSA_CODE_RESERVED_6    = 6,
 123        DSA_CODE_RESERVED_7    = 7
 124};
 125
 126static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev,
 127                                   u8 extra)
 128{
 129        struct dsa_port *dp = dsa_slave_to_port(dev);
 130        u8 tag_dev, tag_port;
 131        enum dsa_cmd cmd;
 132        u8 *dsa_header;
 133
 134        if (skb->offload_fwd_mark) {
 135                struct dsa_switch_tree *dst = dp->ds->dst;
 136
 137                cmd = DSA_CMD_FORWARD;
 138
 139                /* When offloading forwarding for a bridge, inject FORWARD
 140                 * packets on behalf of a virtual switch device with an index
 141                 * past the physical switches.
 142                 */
 143                tag_dev = dst->last_switch + 1 + dp->bridge_num;
 144                tag_port = 0;
 145        } else {
 146                cmd = DSA_CMD_FROM_CPU;
 147                tag_dev = dp->ds->index;
 148                tag_port = dp->index;
 149        }
 150
 151        if (skb->protocol == htons(ETH_P_8021Q)) {
 152                if (extra) {
 153                        skb_push(skb, extra);
 154                        dsa_alloc_etype_header(skb, extra);
 155                }
 156
 157                /* Construct tagged DSA tag from 802.1Q tag. */
 158                dsa_header = dsa_etype_header_pos_tx(skb) + extra;
 159                dsa_header[0] = (cmd << 6) | 0x20 | tag_dev;
 160                dsa_header[1] = tag_port << 3;
 161
 162                /* Move CFI field from byte 2 to byte 1. */
 163                if (dsa_header[2] & 0x10) {
 164                        dsa_header[1] |= 0x01;
 165                        dsa_header[2] &= ~0x10;
 166                }
 167        } else {
 168                struct net_device *br = dp->bridge_dev;
 169                u16 vid;
 170
 171                vid = br ? MV88E6XXX_VID_BRIDGED : MV88E6XXX_VID_STANDALONE;
 172
 173                skb_push(skb, DSA_HLEN + extra);
 174                dsa_alloc_etype_header(skb, DSA_HLEN + extra);
 175
 176                /* Construct DSA header from untagged frame. */
 177                dsa_header = dsa_etype_header_pos_tx(skb) + extra;
 178
 179                dsa_header[0] = (cmd << 6) | tag_dev;
 180                dsa_header[1] = tag_port << 3;
 181                dsa_header[2] = vid >> 8;
 182                dsa_header[3] = vid & 0xff;
 183        }
 184
 185        return skb;
 186}
 187
 188static struct sk_buff *dsa_rcv_ll(struct sk_buff *skb, struct net_device *dev,
 189                                  u8 extra)
 190{
 191        bool trap = false, trunk = false;
 192        int source_device, source_port;
 193        enum dsa_code code;
 194        enum dsa_cmd cmd;
 195        u8 *dsa_header;
 196
 197        /* The ethertype field is part of the DSA header. */
 198        dsa_header = dsa_etype_header_pos_rx(skb);
 199
 200        cmd = dsa_header[0] >> 6;
 201        switch (cmd) {
 202        case DSA_CMD_FORWARD:
 203                trunk = !!(dsa_header[1] & 4);
 204                break;
 205
 206        case DSA_CMD_TO_CPU:
 207                code = (dsa_header[1] & 0x6) | ((dsa_header[2] >> 4) & 1);
 208
 209                switch (code) {
 210                case DSA_CODE_FRAME2REG:
 211                        /* Remote management is not implemented yet,
 212                         * drop.
 213                         */
 214                        return NULL;
 215                case DSA_CODE_ARP_MIRROR:
 216                case DSA_CODE_POLICY_MIRROR:
 217                        /* Mark mirrored packets to notify any upper
 218                         * device (like a bridge) that forwarding has
 219                         * already been done by hardware.
 220                         */
 221                        break;
 222                case DSA_CODE_MGMT_TRAP:
 223                case DSA_CODE_IGMP_MLD_TRAP:
 224                case DSA_CODE_POLICY_TRAP:
 225                        /* Traps have, by definition, not been
 226                         * forwarded by hardware, so don't mark them.
 227                         */
 228                        trap = true;
 229                        break;
 230                default:
 231                        /* Reserved code, this could be anything. Drop
 232                         * seems like the safest option.
 233                         */
 234                        return NULL;
 235                }
 236
 237                break;
 238
 239        default:
 240                return NULL;
 241        }
 242
 243        source_device = dsa_header[0] & 0x1f;
 244        source_port = (dsa_header[1] >> 3) & 0x1f;
 245
 246        if (trunk) {
 247                struct dsa_port *cpu_dp = dev->dsa_ptr;
 248
 249                /* The exact source port is not available in the tag,
 250                 * so we inject the frame directly on the upper
 251                 * team/bond.
 252                 */
 253                skb->dev = dsa_lag_dev(cpu_dp->dst, source_port);
 254        } else {
 255                skb->dev = dsa_master_find_slave(dev, source_device,
 256                                                 source_port);
 257        }
 258
 259        if (!skb->dev)
 260                return NULL;
 261
 262        /* When using LAG offload, skb->dev is not a DSA slave interface,
 263         * so we cannot call dsa_default_offload_fwd_mark and we need to
 264         * special-case it.
 265         */
 266        if (trunk)
 267                skb->offload_fwd_mark = true;
 268        else if (!trap)
 269                dsa_default_offload_fwd_mark(skb);
 270
 271        /* If the 'tagged' bit is set; convert the DSA tag to a 802.1Q
 272         * tag, and delete the ethertype (extra) if applicable. If the
 273         * 'tagged' bit is cleared; delete the DSA tag, and ethertype
 274         * if applicable.
 275         */
 276        if (dsa_header[0] & 0x20) {
 277                u8 new_header[4];
 278
 279                /* Insert 802.1Q ethertype and copy the VLAN-related
 280                 * fields, but clear the bit that will hold CFI (since
 281                 * DSA uses that bit location for another purpose).
 282                 */
 283                new_header[0] = (ETH_P_8021Q >> 8) & 0xff;
 284                new_header[1] = ETH_P_8021Q & 0xff;
 285                new_header[2] = dsa_header[2] & ~0x10;
 286                new_header[3] = dsa_header[3];
 287
 288                /* Move CFI bit from its place in the DSA header to
 289                 * its 802.1Q-designated place.
 290                 */
 291                if (dsa_header[1] & 0x01)
 292                        new_header[2] |= 0x10;
 293
 294                /* Update packet checksum if skb is CHECKSUM_COMPLETE. */
 295                if (skb->ip_summed == CHECKSUM_COMPLETE) {
 296                        __wsum c = skb->csum;
 297                        c = csum_add(c, csum_partial(new_header + 2, 2, 0));
 298                        c = csum_sub(c, csum_partial(dsa_header + 2, 2, 0));
 299                        skb->csum = c;
 300                }
 301
 302                memcpy(dsa_header, new_header, DSA_HLEN);
 303
 304                if (extra)
 305                        dsa_strip_etype_header(skb, extra);
 306        } else {
 307                skb_pull_rcsum(skb, DSA_HLEN);
 308                dsa_strip_etype_header(skb, DSA_HLEN + extra);
 309        }
 310
 311        return skb;
 312}
 313
 314#if IS_ENABLED(CONFIG_NET_DSA_TAG_DSA)
 315
 316static struct sk_buff *dsa_xmit(struct sk_buff *skb, struct net_device *dev)
 317{
 318        return dsa_xmit_ll(skb, dev, 0);
 319}
 320
 321static struct sk_buff *dsa_rcv(struct sk_buff *skb, struct net_device *dev)
 322{
 323        if (unlikely(!pskb_may_pull(skb, DSA_HLEN)))
 324                return NULL;
 325
 326        return dsa_rcv_ll(skb, dev, 0);
 327}
 328
 329static const struct dsa_device_ops dsa_netdev_ops = {
 330        .name     = "dsa",
 331        .proto    = DSA_TAG_PROTO_DSA,
 332        .xmit     = dsa_xmit,
 333        .rcv      = dsa_rcv,
 334        .needed_headroom = DSA_HLEN,
 335};
 336
 337DSA_TAG_DRIVER(dsa_netdev_ops);
 338MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_DSA);
 339#endif  /* CONFIG_NET_DSA_TAG_DSA */
 340
 341#if IS_ENABLED(CONFIG_NET_DSA_TAG_EDSA)
 342
 343#define EDSA_HLEN 8
 344
 345static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev)
 346{
 347        u8 *edsa_header;
 348
 349        skb = dsa_xmit_ll(skb, dev, EDSA_HLEN - DSA_HLEN);
 350        if (!skb)
 351                return NULL;
 352
 353        edsa_header = dsa_etype_header_pos_tx(skb);
 354        edsa_header[0] = (ETH_P_EDSA >> 8) & 0xff;
 355        edsa_header[1] = ETH_P_EDSA & 0xff;
 356        edsa_header[2] = 0x00;
 357        edsa_header[3] = 0x00;
 358        return skb;
 359}
 360
 361static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev)
 362{
 363        if (unlikely(!pskb_may_pull(skb, EDSA_HLEN)))
 364                return NULL;
 365
 366        skb_pull_rcsum(skb, EDSA_HLEN - DSA_HLEN);
 367
 368        return dsa_rcv_ll(skb, dev, EDSA_HLEN - DSA_HLEN);
 369}
 370
 371static const struct dsa_device_ops edsa_netdev_ops = {
 372        .name     = "edsa",
 373        .proto    = DSA_TAG_PROTO_EDSA,
 374        .xmit     = edsa_xmit,
 375        .rcv      = edsa_rcv,
 376        .needed_headroom = EDSA_HLEN,
 377};
 378
 379DSA_TAG_DRIVER(edsa_netdev_ops);
 380MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_EDSA);
 381#endif  /* CONFIG_NET_DSA_TAG_EDSA */
 382
 383static struct dsa_tag_driver *dsa_tag_drivers[] = {
 384#if IS_ENABLED(CONFIG_NET_DSA_TAG_DSA)
 385        &DSA_TAG_DRIVER_NAME(dsa_netdev_ops),
 386#endif
 387#if IS_ENABLED(CONFIG_NET_DSA_TAG_EDSA)
 388        &DSA_TAG_DRIVER_NAME(edsa_netdev_ops),
 389#endif
 390};
 391
 392module_dsa_tag_drivers(dsa_tag_drivers);
 393
 394MODULE_LICENSE("GPL");
 395