linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
   3 *
   4 * RMNET Data ingress/egress handler
   5 */
   6
   7#include <linux/netdevice.h>
   8#include <linux/netdev_features.h>
   9#include <linux/if_arp.h>
  10#include <net/sock.h>
  11#include "rmnet_private.h"
  12#include "rmnet_config.h"
  13#include "rmnet_vnd.h"
  14#include "rmnet_map.h"
  15#include "rmnet_handlers.h"
  16
  17#define RMNET_IP_VERSION_4 0x40
  18#define RMNET_IP_VERSION_6 0x60
  19
  20/* Helper Functions */
  21
  22static void rmnet_set_skb_proto(struct sk_buff *skb)
  23{
  24        switch (skb->data[0] & 0xF0) {
  25        case RMNET_IP_VERSION_4:
  26                skb->protocol = htons(ETH_P_IP);
  27                break;
  28        case RMNET_IP_VERSION_6:
  29                skb->protocol = htons(ETH_P_IPV6);
  30                break;
  31        default:
  32                skb->protocol = htons(ETH_P_MAP);
  33                break;
  34        }
  35}
  36
  37/* Generic handler */
  38
  39static void
  40rmnet_deliver_skb(struct sk_buff *skb)
  41{
  42        struct rmnet_priv *priv = netdev_priv(skb->dev);
  43
  44        skb_reset_transport_header(skb);
  45        skb_reset_network_header(skb);
  46        rmnet_vnd_rx_fixup(skb, skb->dev);
  47
  48        skb->pkt_type = PACKET_HOST;
  49        skb_set_mac_header(skb, 0);
  50        gro_cells_receive(&priv->gro_cells, skb);
  51}
  52
  53/* MAP handler */
  54
  55static void
  56__rmnet_map_ingress_handler(struct sk_buff *skb,
  57                            struct rmnet_port *port)
  58{
  59        struct rmnet_endpoint *ep;
  60        u16 len, pad;
  61        u8 mux_id;
  62
  63        if (RMNET_MAP_GET_CD_BIT(skb)) {
  64                if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
  65                        return rmnet_map_command(skb, port);
  66
  67                goto free_skb;
  68        }
  69
  70        mux_id = RMNET_MAP_GET_MUX_ID(skb);
  71        pad = RMNET_MAP_GET_PAD(skb);
  72        len = RMNET_MAP_GET_LENGTH(skb) - pad;
  73
  74        if (mux_id >= RMNET_MAX_LOGICAL_EP)
  75                goto free_skb;
  76
  77        ep = rmnet_get_endpoint(port, mux_id);
  78        if (!ep)
  79                goto free_skb;
  80
  81        skb->dev = ep->egress_dev;
  82
  83        /* Subtract MAP header */
  84        skb_pull(skb, sizeof(struct rmnet_map_header));
  85        rmnet_set_skb_proto(skb);
  86
  87        if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
  88                if (!rmnet_map_checksum_downlink_packet(skb, len + pad))
  89                        skb->ip_summed = CHECKSUM_UNNECESSARY;
  90        }
  91
  92        skb_trim(skb, len);
  93        rmnet_deliver_skb(skb);
  94        return;
  95
  96free_skb:
  97        kfree_skb(skb);
  98}
  99
 100static void
 101rmnet_map_ingress_handler(struct sk_buff *skb,
 102                          struct rmnet_port *port)
 103{
 104        struct sk_buff *skbn;
 105
 106        if (skb->dev->type == ARPHRD_ETHER) {
 107                if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) {
 108                        kfree_skb(skb);
 109                        return;
 110                }
 111
 112                skb_push(skb, ETH_HLEN);
 113        }
 114
 115        if (port->data_format & RMNET_FLAGS_INGRESS_DEAGGREGATION) {
 116                while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL)
 117                        __rmnet_map_ingress_handler(skbn, port);
 118
 119                consume_skb(skb);
 120        } else {
 121                __rmnet_map_ingress_handler(skb, port);
 122        }
 123}
 124
 125static int rmnet_map_egress_handler(struct sk_buff *skb,
 126                                    struct rmnet_port *port, u8 mux_id,
 127                                    struct net_device *orig_dev)
 128{
 129        int required_headroom, additional_header_len;
 130        struct rmnet_map_header *map_header;
 131
 132        additional_header_len = 0;
 133        required_headroom = sizeof(struct rmnet_map_header);
 134
 135        if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) {
 136                additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
 137                required_headroom += additional_header_len;
 138        }
 139
 140        if (skb_headroom(skb) < required_headroom) {
 141                if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
 142                        return -ENOMEM;
 143        }
 144
 145        if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4)
 146                rmnet_map_checksum_uplink_packet(skb, orig_dev);
 147
 148        map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
 149        if (!map_header)
 150                return -ENOMEM;
 151
 152        map_header->mux_id = mux_id;
 153
 154        skb->protocol = htons(ETH_P_MAP);
 155
 156        return 0;
 157}
 158
 159static void
 160rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev)
 161{
 162        if (bridge_dev) {
 163                skb->dev = bridge_dev;
 164                dev_queue_xmit(skb);
 165        }
 166}
 167
 168/* Ingress / Egress Entry Points */
 169
 170/* Processes packet as per ingress data format for receiving device. Logical
 171 * endpoint is determined from packet inspection. Packet is then sent to the
 172 * egress device listed in the logical endpoint configuration.
 173 */
 174rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
 175{
 176        struct sk_buff *skb = *pskb;
 177        struct rmnet_port *port;
 178        struct net_device *dev;
 179
 180        if (!skb)
 181                goto done;
 182
 183        if (skb->pkt_type == PACKET_LOOPBACK)
 184                return RX_HANDLER_PASS;
 185
 186        dev = skb->dev;
 187        port = rmnet_get_port(dev);
 188
 189        switch (port->rmnet_mode) {
 190        case RMNET_EPMODE_VND:
 191                rmnet_map_ingress_handler(skb, port);
 192                break;
 193        case RMNET_EPMODE_BRIDGE:
 194                rmnet_bridge_handler(skb, port->bridge_ep);
 195                break;
 196        }
 197
 198done:
 199        return RX_HANDLER_CONSUMED;
 200}
 201
 202/* Modifies packet as per logical endpoint configuration and egress data format
 203 * for egress device configured in logical endpoint. Packet is then transmitted
 204 * on the egress device.
 205 */
 206void rmnet_egress_handler(struct sk_buff *skb)
 207{
 208        struct net_device *orig_dev;
 209        struct rmnet_port *port;
 210        struct rmnet_priv *priv;
 211        u8 mux_id;
 212
 213        sk_pacing_shift_update(skb->sk, 8);
 214
 215        orig_dev = skb->dev;
 216        priv = netdev_priv(orig_dev);
 217        skb->dev = priv->real_dev;
 218        mux_id = priv->mux_id;
 219
 220        port = rmnet_get_port(skb->dev);
 221        if (!port)
 222                goto drop;
 223
 224        if (rmnet_map_egress_handler(skb, port, mux_id, orig_dev))
 225                goto drop;
 226
 227        rmnet_vnd_tx_fixup(skb, orig_dev);
 228
 229        dev_queue_xmit(skb);
 230        return;
 231
 232drop:
 233        this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
 234        kfree_skb(skb);
 235}
 236