linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
   3 *
   4 * RMNET Data virtual network driver
   5 */
   6
   7#include <linux/etherdevice.h>
   8#include <linux/if_arp.h>
   9#include <net/pkt_sched.h>
  10#include "rmnet_config.h"
  11#include "rmnet_handlers.h"
  12#include "rmnet_private.h"
  13#include "rmnet_map.h"
  14#include "rmnet_vnd.h"
  15
  16/* RX/TX Fixup */
  17
  18void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev)
  19{
  20        struct rmnet_priv *priv = netdev_priv(dev);
  21        struct rmnet_pcpu_stats *pcpu_ptr;
  22
  23        pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
  24
  25        u64_stats_update_begin(&pcpu_ptr->syncp);
  26        pcpu_ptr->stats.rx_pkts++;
  27        pcpu_ptr->stats.rx_bytes += skb->len;
  28        u64_stats_update_end(&pcpu_ptr->syncp);
  29}
  30
  31void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
  32{
  33        struct rmnet_priv *priv = netdev_priv(dev);
  34        struct rmnet_pcpu_stats *pcpu_ptr;
  35
  36        pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
  37
  38        u64_stats_update_begin(&pcpu_ptr->syncp);
  39        pcpu_ptr->stats.tx_pkts++;
  40        pcpu_ptr->stats.tx_bytes += skb->len;
  41        u64_stats_update_end(&pcpu_ptr->syncp);
  42}
  43
  44/* Network Device Operations */
  45
  46static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
  47                                        struct net_device *dev)
  48{
  49        struct rmnet_priv *priv;
  50
  51        priv = netdev_priv(dev);
  52        if (priv->real_dev) {
  53                rmnet_egress_handler(skb);
  54        } else {
  55                this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
  56                kfree_skb(skb);
  57        }
  58        return NETDEV_TX_OK;
  59}
  60
  61static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu)
  62{
  63        if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE)
  64                return -EINVAL;
  65
  66        rmnet_dev->mtu = new_mtu;
  67        return 0;
  68}
  69
  70static int rmnet_vnd_get_iflink(const struct net_device *dev)
  71{
  72        struct rmnet_priv *priv = netdev_priv(dev);
  73
  74        return priv->real_dev->ifindex;
  75}
  76
  77static int rmnet_vnd_init(struct net_device *dev)
  78{
  79        struct rmnet_priv *priv = netdev_priv(dev);
  80        int err;
  81
  82        priv->pcpu_stats = alloc_percpu(struct rmnet_pcpu_stats);
  83        if (!priv->pcpu_stats)
  84                return -ENOMEM;
  85
  86        err = gro_cells_init(&priv->gro_cells, dev);
  87        if (err) {
  88                free_percpu(priv->pcpu_stats);
  89                return err;
  90        }
  91
  92        return 0;
  93}
  94
  95static void rmnet_vnd_uninit(struct net_device *dev)
  96{
  97        struct rmnet_priv *priv = netdev_priv(dev);
  98
  99        gro_cells_destroy(&priv->gro_cells);
 100        free_percpu(priv->pcpu_stats);
 101}
 102
 103static void rmnet_get_stats64(struct net_device *dev,
 104                              struct rtnl_link_stats64 *s)
 105{
 106        struct rmnet_priv *priv = netdev_priv(dev);
 107        struct rmnet_vnd_stats total_stats;
 108        struct rmnet_pcpu_stats *pcpu_ptr;
 109        unsigned int cpu, start;
 110
 111        memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats));
 112
 113        for_each_possible_cpu(cpu) {
 114                pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
 115
 116                do {
 117                        start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
 118                        total_stats.rx_pkts += pcpu_ptr->stats.rx_pkts;
 119                        total_stats.rx_bytes += pcpu_ptr->stats.rx_bytes;
 120                        total_stats.tx_pkts += pcpu_ptr->stats.tx_pkts;
 121                        total_stats.tx_bytes += pcpu_ptr->stats.tx_bytes;
 122                } while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start));
 123
 124                total_stats.tx_drops += pcpu_ptr->stats.tx_drops;
 125        }
 126
 127        s->rx_packets = total_stats.rx_pkts;
 128        s->rx_bytes = total_stats.rx_bytes;
 129        s->tx_packets = total_stats.tx_pkts;
 130        s->tx_bytes = total_stats.tx_bytes;
 131        s->tx_dropped = total_stats.tx_drops;
 132}
 133
 134static const struct net_device_ops rmnet_vnd_ops = {
 135        .ndo_start_xmit = rmnet_vnd_start_xmit,
 136        .ndo_change_mtu = rmnet_vnd_change_mtu,
 137        .ndo_get_iflink = rmnet_vnd_get_iflink,
 138        .ndo_add_slave  = rmnet_add_bridge,
 139        .ndo_del_slave  = rmnet_del_bridge,
 140        .ndo_init       = rmnet_vnd_init,
 141        .ndo_uninit     = rmnet_vnd_uninit,
 142        .ndo_get_stats64 = rmnet_get_stats64,
 143};
 144
 145static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = {
 146        "Checksum ok",
 147        "Checksum valid bit not set",
 148        "Checksum validation failed",
 149        "Checksum error bad buffer",
 150        "Checksum error bad ip version",
 151        "Checksum error bad transport",
 152        "Checksum skipped on ip fragment",
 153        "Checksum skipped",
 154        "Checksum computed in software",
 155};
 156
 157static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
 158{
 159        switch (stringset) {
 160        case ETH_SS_STATS:
 161                memcpy(buf, &rmnet_gstrings_stats,
 162                       sizeof(rmnet_gstrings_stats));
 163                break;
 164        }
 165}
 166
 167static int rmnet_get_sset_count(struct net_device *dev, int sset)
 168{
 169        switch (sset) {
 170        case ETH_SS_STATS:
 171                return ARRAY_SIZE(rmnet_gstrings_stats);
 172        default:
 173                return -EOPNOTSUPP;
 174        }
 175}
 176
 177static void rmnet_get_ethtool_stats(struct net_device *dev,
 178                                    struct ethtool_stats *stats, u64 *data)
 179{
 180        struct rmnet_priv *priv = netdev_priv(dev);
 181        struct rmnet_priv_stats *st = &priv->stats;
 182
 183        if (!data)
 184                return;
 185
 186        memcpy(data, st, ARRAY_SIZE(rmnet_gstrings_stats) * sizeof(u64));
 187}
 188
 189static const struct ethtool_ops rmnet_ethtool_ops = {
 190        .get_ethtool_stats = rmnet_get_ethtool_stats,
 191        .get_strings = rmnet_get_strings,
 192        .get_sset_count = rmnet_get_sset_count,
 193};
 194
 195/* Called by kernel whenever a new rmnet<n> device is created. Sets MTU,
 196 * flags, ARP type, needed headroom, etc...
 197 */
 198void rmnet_vnd_setup(struct net_device *rmnet_dev)
 199{
 200        rmnet_dev->netdev_ops = &rmnet_vnd_ops;
 201        rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE;
 202        rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM;
 203        eth_random_addr(rmnet_dev->dev_addr);
 204        rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN;
 205
 206        /* Raw IP mode */
 207        rmnet_dev->header_ops = NULL;  /* No header */
 208        rmnet_dev->type = ARPHRD_RAWIP;
 209        rmnet_dev->hard_header_len = 0;
 210        rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
 211
 212        rmnet_dev->needs_free_netdev = true;
 213        rmnet_dev->ethtool_ops = &rmnet_ethtool_ops;
 214
 215        rmnet_dev->features |= NETIF_F_LLTX;
 216
 217        /* This perm addr will be used as interface identifier by IPv6 */
 218        rmnet_dev->addr_assign_type = NET_ADDR_RANDOM;
 219        eth_random_addr(rmnet_dev->perm_addr);
 220}
 221
 222/* Exposed API */
 223
 224int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
 225                      struct rmnet_port *port,
 226                      struct net_device *real_dev,
 227                      struct rmnet_endpoint *ep,
 228                      struct netlink_ext_ack *extack)
 229
 230{
 231        struct rmnet_priv *priv = netdev_priv(rmnet_dev);
 232        int rc;
 233
 234        if (rmnet_get_endpoint(port, id)) {
 235                NL_SET_ERR_MSG_MOD(extack, "MUX ID already exists");
 236                return -EBUSY;
 237        }
 238
 239        rmnet_dev->hw_features = NETIF_F_RXCSUM;
 240        rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
 241        rmnet_dev->hw_features |= NETIF_F_SG;
 242
 243        priv->real_dev = real_dev;
 244
 245        rc = register_netdevice(rmnet_dev);
 246        if (!rc) {
 247                ep->egress_dev = rmnet_dev;
 248                ep->mux_id = id;
 249                port->nr_rmnet_devs++;
 250
 251                rmnet_dev->rtnl_link_ops = &rmnet_link_ops;
 252
 253                priv->mux_id = id;
 254
 255                netdev_dbg(rmnet_dev, "rmnet dev created\n");
 256        }
 257
 258        return rc;
 259}
 260
 261int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
 262                      struct rmnet_endpoint *ep)
 263{
 264        if (id >= RMNET_MAX_LOGICAL_EP || !ep->egress_dev)
 265                return -EINVAL;
 266
 267        ep->egress_dev = NULL;
 268        port->nr_rmnet_devs--;
 269        return 0;
 270}
 271
 272int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable)
 273{
 274        netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable);
 275        /* Although we expect similar number of enable/disable
 276         * commands, optimize for the disable. That is more
 277         * latency sensitive than enable
 278         */
 279        if (unlikely(enable))
 280                netif_wake_queue(rmnet_dev);
 281        else
 282                netif_stop_queue(rmnet_dev);
 283
 284        return 0;
 285}
 286