linux/net/hsr/hsr_device.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright 2011-2014 Autronica Fire and Security AS
   3 *
   4 * Author(s):
   5 *      2011-2014 Arvid Brodin, arvid.brodin@alten.se
   6 * This file contains device methods for creating, using and destroying
   7 * virtual HSR or PRP devices.
   8 */
   9
  10#include <linux/netdevice.h>
  11#include <linux/skbuff.h>
  12#include <linux/etherdevice.h>
  13#include <linux/rtnetlink.h>
  14#include <linux/pkt_sched.h>
  15#include "hsr_device.h"
  16#include "hsr_slave.h"
  17#include "hsr_framereg.h"
  18#include "hsr_main.h"
  19#include "hsr_forward.h"
  20
  21static bool is_admin_up(struct net_device *dev)
  22{
  23        return dev && (dev->flags & IFF_UP);
  24}
  25
  26static bool is_slave_up(struct net_device *dev)
  27{
  28        return dev && is_admin_up(dev) && netif_oper_up(dev);
  29}
  30
  31static void __hsr_set_operstate(struct net_device *dev, int transition)
  32{
  33        write_lock_bh(&dev_base_lock);
  34        if (dev->operstate != transition) {
  35                dev->operstate = transition;
  36                write_unlock_bh(&dev_base_lock);
  37                netdev_state_change(dev);
  38        } else {
  39                write_unlock_bh(&dev_base_lock);
  40        }
  41}
  42
  43static void hsr_set_operstate(struct hsr_port *master, bool has_carrier)
  44{
  45        if (!is_admin_up(master->dev)) {
  46                __hsr_set_operstate(master->dev, IF_OPER_DOWN);
  47                return;
  48        }
  49
  50        if (has_carrier)
  51                __hsr_set_operstate(master->dev, IF_OPER_UP);
  52        else
  53                __hsr_set_operstate(master->dev, IF_OPER_LOWERLAYERDOWN);
  54}
  55
  56static bool hsr_check_carrier(struct hsr_port *master)
  57{
  58        struct hsr_port *port;
  59
  60        ASSERT_RTNL();
  61
  62        hsr_for_each_port(master->hsr, port) {
  63                if (port->type != HSR_PT_MASTER && is_slave_up(port->dev)) {
  64                        netif_carrier_on(master->dev);
  65                        return true;
  66                }
  67        }
  68
  69        netif_carrier_off(master->dev);
  70
  71        return false;
  72}
  73
  74static void hsr_check_announce(struct net_device *hsr_dev,
  75                               unsigned char old_operstate)
  76{
  77        struct hsr_priv *hsr;
  78
  79        hsr = netdev_priv(hsr_dev);
  80
  81        if (hsr_dev->operstate == IF_OPER_UP && old_operstate != IF_OPER_UP) {
  82                /* Went up */
  83                hsr->announce_count = 0;
  84                mod_timer(&hsr->announce_timer,
  85                          jiffies + msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL));
  86        }
  87
  88        if (hsr_dev->operstate != IF_OPER_UP && old_operstate == IF_OPER_UP)
  89                /* Went down */
  90                del_timer(&hsr->announce_timer);
  91}
  92
  93void hsr_check_carrier_and_operstate(struct hsr_priv *hsr)
  94{
  95        struct hsr_port *master;
  96        unsigned char old_operstate;
  97        bool has_carrier;
  98
  99        master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
 100        /* netif_stacked_transfer_operstate() cannot be used here since
 101         * it doesn't set IF_OPER_LOWERLAYERDOWN (?)
 102         */
 103        old_operstate = master->dev->operstate;
 104        has_carrier = hsr_check_carrier(master);
 105        hsr_set_operstate(master, has_carrier);
 106        hsr_check_announce(master->dev, old_operstate);
 107}
 108
 109int hsr_get_max_mtu(struct hsr_priv *hsr)
 110{
 111        unsigned int mtu_max;
 112        struct hsr_port *port;
 113
 114        mtu_max = ETH_DATA_LEN;
 115        hsr_for_each_port(hsr, port)
 116                if (port->type != HSR_PT_MASTER)
 117                        mtu_max = min(port->dev->mtu, mtu_max);
 118
 119        if (mtu_max < HSR_HLEN)
 120                return 0;
 121        return mtu_max - HSR_HLEN;
 122}
 123
 124static int hsr_dev_change_mtu(struct net_device *dev, int new_mtu)
 125{
 126        struct hsr_priv *hsr;
 127
 128        hsr = netdev_priv(dev);
 129
 130        if (new_mtu > hsr_get_max_mtu(hsr)) {
 131                netdev_info(dev, "A HSR master's MTU cannot be greater than the smallest MTU of its slaves minus the HSR Tag length (%d octets).\n",
 132                            HSR_HLEN);
 133                return -EINVAL;
 134        }
 135
 136        dev->mtu = new_mtu;
 137
 138        return 0;
 139}
 140
 141static int hsr_dev_open(struct net_device *dev)
 142{
 143        struct hsr_priv *hsr;
 144        struct hsr_port *port;
 145        char designation;
 146
 147        hsr = netdev_priv(dev);
 148        designation = '\0';
 149
 150        hsr_for_each_port(hsr, port) {
 151                if (port->type == HSR_PT_MASTER)
 152                        continue;
 153                switch (port->type) {
 154                case HSR_PT_SLAVE_A:
 155                        designation = 'A';
 156                        break;
 157                case HSR_PT_SLAVE_B:
 158                        designation = 'B';
 159                        break;
 160                default:
 161                        designation = '?';
 162                }
 163                if (!is_slave_up(port->dev))
 164                        netdev_warn(dev, "Slave %c (%s) is not up; please bring it up to get a fully working HSR network\n",
 165                                    designation, port->dev->name);
 166        }
 167
 168        if (designation == '\0')
 169                netdev_warn(dev, "No slave devices configured\n");
 170
 171        return 0;
 172}
 173
 174static int hsr_dev_close(struct net_device *dev)
 175{
 176        /* Nothing to do here. */
 177        return 0;
 178}
 179
 180static netdev_features_t hsr_features_recompute(struct hsr_priv *hsr,
 181                                                netdev_features_t features)
 182{
 183        netdev_features_t mask;
 184        struct hsr_port *port;
 185
 186        mask = features;
 187
 188        /* Mask out all features that, if supported by one device, should be
 189         * enabled for all devices (see NETIF_F_ONE_FOR_ALL).
 190         *
 191         * Anything that's off in mask will not be enabled - so only things
 192         * that were in features originally, and also is in NETIF_F_ONE_FOR_ALL,
 193         * may become enabled.
 194         */
 195        features &= ~NETIF_F_ONE_FOR_ALL;
 196        hsr_for_each_port(hsr, port)
 197                features = netdev_increment_features(features,
 198                                                     port->dev->features,
 199                                                     mask);
 200
 201        return features;
 202}
 203
 204static netdev_features_t hsr_fix_features(struct net_device *dev,
 205                                          netdev_features_t features)
 206{
 207        struct hsr_priv *hsr = netdev_priv(dev);
 208
 209        return hsr_features_recompute(hsr, features);
 210}
 211
 212static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
 213{
 214        struct hsr_priv *hsr = netdev_priv(dev);
 215        struct hsr_port *master;
 216
 217        master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
 218        if (master) {
 219                skb->dev = master->dev;
 220                skb_reset_mac_header(skb);
 221                skb_reset_mac_len(skb);
 222                hsr_forward_skb(skb, master);
 223        } else {
 224                atomic_long_inc(&dev->tx_dropped);
 225                dev_kfree_skb_any(skb);
 226        }
 227        return NETDEV_TX_OK;
 228}
 229
 230static const struct header_ops hsr_header_ops = {
 231        .create  = eth_header,
 232        .parse   = eth_header_parse,
 233};
 234
 235static struct sk_buff *hsr_init_skb(struct hsr_port *master)
 236{
 237        struct hsr_priv *hsr = master->hsr;
 238        struct sk_buff *skb;
 239        int hlen, tlen;
 240
 241        hlen = LL_RESERVED_SPACE(master->dev);
 242        tlen = master->dev->needed_tailroom;
 243        /* skb size is same for PRP/HSR frames, only difference
 244         * being, for PRP it is a trailer and for HSR it is a
 245         * header
 246         */
 247        skb = dev_alloc_skb(sizeof(struct hsr_sup_tag) +
 248                            sizeof(struct hsr_sup_payload) + hlen + tlen);
 249
 250        if (!skb)
 251                return skb;
 252
 253        skb_reserve(skb, hlen);
 254        skb->dev = master->dev;
 255        skb->priority = TC_PRIO_CONTROL;
 256
 257        if (dev_hard_header(skb, skb->dev, ETH_P_PRP,
 258                            hsr->sup_multicast_addr,
 259                            skb->dev->dev_addr, skb->len) <= 0)
 260                goto out;
 261
 262        skb_reset_mac_header(skb);
 263        skb_reset_mac_len(skb);
 264        skb_reset_network_header(skb);
 265        skb_reset_transport_header(skb);
 266
 267        return skb;
 268out:
 269        kfree_skb(skb);
 270
 271        return NULL;
 272}
 273
 274static void send_hsr_supervision_frame(struct hsr_port *master,
 275                                       unsigned long *interval)
 276{
 277        struct hsr_priv *hsr = master->hsr;
 278        __u8 type = HSR_TLV_LIFE_CHECK;
 279        struct hsr_sup_payload *hsr_sp;
 280        struct hsr_sup_tag *hsr_stag;
 281        unsigned long irqflags;
 282        struct sk_buff *skb;
 283
 284        *interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
 285        if (hsr->announce_count < 3 && hsr->prot_version == 0) {
 286                type = HSR_TLV_ANNOUNCE;
 287                *interval = msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
 288                hsr->announce_count++;
 289        }
 290
 291        skb = hsr_init_skb(master);
 292        if (!skb) {
 293                WARN_ONCE(1, "HSR: Could not send supervision frame\n");
 294                return;
 295        }
 296
 297        hsr_stag = skb_put(skb, sizeof(struct hsr_sup_tag));
 298        set_hsr_stag_path(hsr_stag, (hsr->prot_version ? 0x0 : 0xf));
 299        set_hsr_stag_HSR_ver(hsr_stag, hsr->prot_version);
 300
 301        /* From HSRv1 on we have separate supervision sequence numbers. */
 302        spin_lock_irqsave(&master->hsr->seqnr_lock, irqflags);
 303        if (hsr->prot_version > 0) {
 304                hsr_stag->sequence_nr = htons(hsr->sup_sequence_nr);
 305                hsr->sup_sequence_nr++;
 306        } else {
 307                hsr_stag->sequence_nr = htons(hsr->sequence_nr);
 308                hsr->sequence_nr++;
 309        }
 310        spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags);
 311
 312        hsr_stag->HSR_TLV_type = type;
 313        /* TODO: Why 12 in HSRv0? */
 314        hsr_stag->HSR_TLV_length = hsr->prot_version ?
 315                                sizeof(struct hsr_sup_payload) : 12;
 316
 317        /* Payload: MacAddressA */
 318        hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload));
 319        ether_addr_copy(hsr_sp->macaddress_A, master->dev->dev_addr);
 320
 321        if (skb_put_padto(skb, ETH_ZLEN))
 322                return;
 323
 324        hsr_forward_skb(skb, master);
 325
 326        return;
 327}
 328
 329static void send_prp_supervision_frame(struct hsr_port *master,
 330                                       unsigned long *interval)
 331{
 332        struct hsr_priv *hsr = master->hsr;
 333        struct hsr_sup_payload *hsr_sp;
 334        struct hsr_sup_tag *hsr_stag;
 335        unsigned long irqflags;
 336        struct sk_buff *skb;
 337
 338        skb = hsr_init_skb(master);
 339        if (!skb) {
 340                WARN_ONCE(1, "PRP: Could not send supervision frame\n");
 341                return;
 342        }
 343
 344        *interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
 345        hsr_stag = skb_put(skb, sizeof(struct hsr_sup_tag));
 346        set_hsr_stag_path(hsr_stag, (hsr->prot_version ? 0x0 : 0xf));
 347        set_hsr_stag_HSR_ver(hsr_stag, (hsr->prot_version ? 1 : 0));
 348
 349        /* From HSRv1 on we have separate supervision sequence numbers. */
 350        spin_lock_irqsave(&master->hsr->seqnr_lock, irqflags);
 351        hsr_stag->sequence_nr = htons(hsr->sup_sequence_nr);
 352        hsr->sup_sequence_nr++;
 353        hsr_stag->HSR_TLV_type = PRP_TLV_LIFE_CHECK_DD;
 354        hsr_stag->HSR_TLV_length = sizeof(struct hsr_sup_payload);
 355
 356        /* Payload: MacAddressA */
 357        hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload));
 358        ether_addr_copy(hsr_sp->macaddress_A, master->dev->dev_addr);
 359
 360        if (skb_put_padto(skb, ETH_ZLEN)) {
 361                spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags);
 362                return;
 363        }
 364
 365        spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags);
 366
 367        hsr_forward_skb(skb, master);
 368}
 369
 370/* Announce (supervision frame) timer function
 371 */
 372static void hsr_announce(struct timer_list *t)
 373{
 374        struct hsr_priv *hsr;
 375        struct hsr_port *master;
 376        unsigned long interval;
 377
 378        hsr = from_timer(hsr, t, announce_timer);
 379
 380        rcu_read_lock();
 381        master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
 382        hsr->proto_ops->send_sv_frame(master, &interval);
 383
 384        if (is_admin_up(master->dev))
 385                mod_timer(&hsr->announce_timer, jiffies + interval);
 386
 387        rcu_read_unlock();
 388}
 389
 390void hsr_del_ports(struct hsr_priv *hsr)
 391{
 392        struct hsr_port *port;
 393
 394        port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
 395        if (port)
 396                hsr_del_port(port);
 397
 398        port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
 399        if (port)
 400                hsr_del_port(port);
 401
 402        port = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
 403        if (port)
 404                hsr_del_port(port);
 405}
 406
 407static const struct net_device_ops hsr_device_ops = {
 408        .ndo_change_mtu = hsr_dev_change_mtu,
 409        .ndo_open = hsr_dev_open,
 410        .ndo_stop = hsr_dev_close,
 411        .ndo_start_xmit = hsr_dev_xmit,
 412        .ndo_fix_features = hsr_fix_features,
 413};
 414
 415static struct device_type hsr_type = {
 416        .name = "hsr",
 417};
 418
 419static struct hsr_proto_ops hsr_ops = {
 420        .send_sv_frame = send_hsr_supervision_frame,
 421        .create_tagged_frame = hsr_create_tagged_frame,
 422        .get_untagged_frame = hsr_get_untagged_frame,
 423        .drop_frame = hsr_drop_frame,
 424        .fill_frame_info = hsr_fill_frame_info,
 425        .invalid_dan_ingress_frame = hsr_invalid_dan_ingress_frame,
 426};
 427
 428static struct hsr_proto_ops prp_ops = {
 429        .send_sv_frame = send_prp_supervision_frame,
 430        .create_tagged_frame = prp_create_tagged_frame,
 431        .get_untagged_frame = prp_get_untagged_frame,
 432        .drop_frame = prp_drop_frame,
 433        .fill_frame_info = prp_fill_frame_info,
 434        .handle_san_frame = prp_handle_san_frame,
 435        .update_san_info = prp_update_san_info,
 436};
 437
 438void hsr_dev_setup(struct net_device *dev)
 439{
 440        eth_hw_addr_random(dev);
 441
 442        ether_setup(dev);
 443        dev->min_mtu = 0;
 444        dev->header_ops = &hsr_header_ops;
 445        dev->netdev_ops = &hsr_device_ops;
 446        SET_NETDEV_DEVTYPE(dev, &hsr_type);
 447        dev->priv_flags |= IFF_NO_QUEUE;
 448
 449        dev->needs_free_netdev = true;
 450
 451        dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
 452                           NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
 453                           NETIF_F_HW_VLAN_CTAG_TX;
 454
 455        dev->features = dev->hw_features;
 456
 457        /* Prevent recursive tx locking */
 458        dev->features |= NETIF_F_LLTX;
 459        /* VLAN on top of HSR needs testing and probably some work on
 460         * hsr_header_create() etc.
 461         */
 462        dev->features |= NETIF_F_VLAN_CHALLENGED;
 463        /* Not sure about this. Taken from bridge code. netdev_features.h says
 464         * it means "Does not change network namespaces".
 465         */
 466        dev->features |= NETIF_F_NETNS_LOCAL;
 467}
 468
 469/* Return true if dev is a HSR master; return false otherwise.
 470 */
 471bool is_hsr_master(struct net_device *dev)
 472{
 473        return (dev->netdev_ops->ndo_start_xmit == hsr_dev_xmit);
 474}
 475EXPORT_SYMBOL(is_hsr_master);
 476
 477/* Default multicast address for HSR Supervision frames */
 478static const unsigned char def_multicast_addr[ETH_ALEN] __aligned(2) = {
 479        0x01, 0x15, 0x4e, 0x00, 0x01, 0x00
 480};
 481
 482int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
 483                     unsigned char multicast_spec, u8 protocol_version,
 484                     struct netlink_ext_ack *extack)
 485{
 486        bool unregister = false;
 487        struct hsr_priv *hsr;
 488        int res;
 489
 490        hsr = netdev_priv(hsr_dev);
 491        INIT_LIST_HEAD(&hsr->ports);
 492        INIT_LIST_HEAD(&hsr->node_db);
 493        INIT_LIST_HEAD(&hsr->self_node_db);
 494        spin_lock_init(&hsr->list_lock);
 495
 496        ether_addr_copy(hsr_dev->dev_addr, slave[0]->dev_addr);
 497
 498        /* initialize protocol specific functions */
 499        if (protocol_version == PRP_V1) {
 500                /* For PRP, lan_id has most significant 3 bits holding
 501                 * the net_id of PRP_LAN_ID
 502                 */
 503                hsr->net_id = PRP_LAN_ID << 1;
 504                hsr->proto_ops = &prp_ops;
 505        } else {
 506                hsr->proto_ops = &hsr_ops;
 507        }
 508
 509        /* Make sure we recognize frames from ourselves in hsr_rcv() */
 510        res = hsr_create_self_node(hsr, hsr_dev->dev_addr,
 511                                   slave[1]->dev_addr);
 512        if (res < 0)
 513                return res;
 514
 515        spin_lock_init(&hsr->seqnr_lock);
 516        /* Overflow soon to find bugs easier: */
 517        hsr->sequence_nr = HSR_SEQNR_START;
 518        hsr->sup_sequence_nr = HSR_SUP_SEQNR_START;
 519
 520        timer_setup(&hsr->announce_timer, hsr_announce, 0);
 521        timer_setup(&hsr->prune_timer, hsr_prune_nodes, 0);
 522
 523        ether_addr_copy(hsr->sup_multicast_addr, def_multicast_addr);
 524        hsr->sup_multicast_addr[ETH_ALEN - 1] = multicast_spec;
 525
 526        hsr->prot_version = protocol_version;
 527
 528        /* Make sure the 1st call to netif_carrier_on() gets through */
 529        netif_carrier_off(hsr_dev);
 530
 531        res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER, extack);
 532        if (res)
 533                goto err_add_master;
 534
 535        res = register_netdevice(hsr_dev);
 536        if (res)
 537                goto err_unregister;
 538
 539        unregister = true;
 540
 541        res = hsr_add_port(hsr, slave[0], HSR_PT_SLAVE_A, extack);
 542        if (res)
 543                goto err_unregister;
 544
 545        res = hsr_add_port(hsr, slave[1], HSR_PT_SLAVE_B, extack);
 546        if (res)
 547                goto err_unregister;
 548
 549        hsr_debugfs_init(hsr, hsr_dev);
 550        mod_timer(&hsr->prune_timer, jiffies + msecs_to_jiffies(PRUNE_PERIOD));
 551
 552        return 0;
 553
 554err_unregister:
 555        hsr_del_ports(hsr);
 556err_add_master:
 557        hsr_del_self_node(hsr);
 558
 559        if (unregister)
 560                unregister_netdevice(hsr_dev);
 561        return res;
 562}
 563