linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
   3 *
   4 * RMNET Data MAP protocol
   5 */
   6
   7#include <linux/netdevice.h>
   8#include <linux/ip.h>
   9#include <linux/ipv6.h>
  10#include <net/ip6_checksum.h>
  11#include "rmnet_config.h"
  12#include "rmnet_map.h"
  13#include "rmnet_private.h"
  14
  15#define RMNET_MAP_DEAGGR_SPACING  64
  16#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
  17
  18static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
  19                                         const void *txporthdr)
  20{
  21        __sum16 *check = NULL;
  22
  23        switch (protocol) {
  24        case IPPROTO_TCP:
  25                check = &(((struct tcphdr *)txporthdr)->check);
  26                break;
  27
  28        case IPPROTO_UDP:
  29                check = &(((struct udphdr *)txporthdr)->check);
  30                break;
  31
  32        default:
  33                check = NULL;
  34                break;
  35        }
  36
  37        return check;
  38}
  39
  40static int
  41rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
  42                               struct rmnet_map_dl_csum_trailer *csum_trailer,
  43                               struct rmnet_priv *priv)
  44{
  45        __sum16 *csum_field, csum_temp, pseudo_csum, hdr_csum, ip_payload_csum;
  46        u16 csum_value, csum_value_final;
  47        struct iphdr *ip4h;
  48        void *txporthdr;
  49        __be16 addend;
  50
  51        ip4h = (struct iphdr *)(skb->data);
  52        if ((ntohs(ip4h->frag_off) & IP_MF) ||
  53            ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0)) {
  54                priv->stats.csum_fragmented_pkt++;
  55                return -EOPNOTSUPP;
  56        }
  57
  58        txporthdr = skb->data + ip4h->ihl * 4;
  59
  60        csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr);
  61
  62        if (!csum_field) {
  63                priv->stats.csum_err_invalid_transport++;
  64                return -EPROTONOSUPPORT;
  65        }
  66
  67        /* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */
  68        if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP) {
  69                priv->stats.csum_skipped++;
  70                return 0;
  71        }
  72
  73        csum_value = ~ntohs(csum_trailer->csum_value);
  74        hdr_csum = ~ip_fast_csum(ip4h, (int)ip4h->ihl);
  75        ip_payload_csum = csum16_sub((__force __sum16)csum_value,
  76                                     (__force __be16)hdr_csum);
  77
  78        pseudo_csum = ~csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
  79                                         ntohs(ip4h->tot_len) - ip4h->ihl * 4,
  80                                         ip4h->protocol, 0);
  81        addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
  82        pseudo_csum = csum16_add(ip_payload_csum, addend);
  83
  84        addend = (__force __be16)ntohs((__force __be16)*csum_field);
  85        csum_temp = ~csum16_sub(pseudo_csum, addend);
  86        csum_value_final = (__force u16)csum_temp;
  87
  88        if (unlikely(csum_value_final == 0)) {
  89                switch (ip4h->protocol) {
  90                case IPPROTO_UDP:
  91                        /* RFC 768 - DL4 1's complement rule for UDP csum 0 */
  92                        csum_value_final = ~csum_value_final;
  93                        break;
  94
  95                case IPPROTO_TCP:
  96                        /* DL4 Non-RFC compliant TCP checksum found */
  97                        if (*csum_field == (__force __sum16)0xFFFF)
  98                                csum_value_final = ~csum_value_final;
  99                        break;
 100                }
 101        }
 102
 103        if (csum_value_final == ntohs((__force __be16)*csum_field)) {
 104                priv->stats.csum_ok++;
 105                return 0;
 106        } else {
 107                priv->stats.csum_validation_failed++;
 108                return -EINVAL;
 109        }
 110}
 111
 112#if IS_ENABLED(CONFIG_IPV6)
 113static int
 114rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
 115                               struct rmnet_map_dl_csum_trailer *csum_trailer,
 116                               struct rmnet_priv *priv)
 117{
 118        __sum16 *csum_field, ip6_payload_csum, pseudo_csum, csum_temp;
 119        u16 csum_value, csum_value_final;
 120        __be16 ip6_hdr_csum, addend;
 121        struct ipv6hdr *ip6h;
 122        void *txporthdr;
 123        u32 length;
 124
 125        ip6h = (struct ipv6hdr *)(skb->data);
 126
 127        txporthdr = skb->data + sizeof(struct ipv6hdr);
 128        csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr);
 129
 130        if (!csum_field) {
 131                priv->stats.csum_err_invalid_transport++;
 132                return -EPROTONOSUPPORT;
 133        }
 134
 135        csum_value = ~ntohs(csum_trailer->csum_value);
 136        ip6_hdr_csum = (__force __be16)
 137                        ~ntohs((__force __be16)ip_compute_csum(ip6h,
 138                               (int)(txporthdr - (void *)(skb->data))));
 139        ip6_payload_csum = csum16_sub((__force __sum16)csum_value,
 140                                      ip6_hdr_csum);
 141
 142        length = (ip6h->nexthdr == IPPROTO_UDP) ?
 143                 ntohs(((struct udphdr *)txporthdr)->len) :
 144                 ntohs(ip6h->payload_len);
 145        pseudo_csum = ~(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
 146                             length, ip6h->nexthdr, 0));
 147        addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
 148        pseudo_csum = csum16_add(ip6_payload_csum, addend);
 149
 150        addend = (__force __be16)ntohs((__force __be16)*csum_field);
 151        csum_temp = ~csum16_sub(pseudo_csum, addend);
 152        csum_value_final = (__force u16)csum_temp;
 153
 154        if (unlikely(csum_value_final == 0)) {
 155                switch (ip6h->nexthdr) {
 156                case IPPROTO_UDP:
 157                        /* RFC 2460 section 8.1
 158                         * DL6 One's complement rule for UDP checksum 0
 159                         */
 160                        csum_value_final = ~csum_value_final;
 161                        break;
 162
 163                case IPPROTO_TCP:
 164                        /* DL6 Non-RFC compliant TCP checksum found */
 165                        if (*csum_field == (__force __sum16)0xFFFF)
 166                                csum_value_final = ~csum_value_final;
 167                        break;
 168                }
 169        }
 170
 171        if (csum_value_final == ntohs((__force __be16)*csum_field)) {
 172                priv->stats.csum_ok++;
 173                return 0;
 174        } else {
 175                priv->stats.csum_validation_failed++;
 176                return -EINVAL;
 177        }
 178}
 179#endif
 180
 181static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
 182{
 183        struct iphdr *ip4h = (struct iphdr *)iphdr;
 184        void *txphdr;
 185        u16 *csum;
 186
 187        txphdr = iphdr + ip4h->ihl * 4;
 188
 189        if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
 190                csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
 191                *csum = ~(*csum);
 192        }
 193}
 194
 195static void
 196rmnet_map_ipv4_ul_csum_header(void *iphdr,
 197                              struct rmnet_map_ul_csum_header *ul_header,
 198                              struct sk_buff *skb)
 199{
 200        struct iphdr *ip4h = iphdr;
 201        u16 val;
 202
 203        val = MAP_CSUM_UL_ENABLED_FLAG;
 204        if (ip4h->protocol == IPPROTO_UDP)
 205                val |= MAP_CSUM_UL_UDP_FLAG;
 206        val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK;
 207
 208        ul_header->csum_start_offset = htons(skb_network_header_len(skb));
 209        ul_header->csum_info = htons(val);
 210
 211        skb->ip_summed = CHECKSUM_NONE;
 212
 213        rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr);
 214}
 215
 216#if IS_ENABLED(CONFIG_IPV6)
 217static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
 218{
 219        struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
 220        void *txphdr;
 221        u16 *csum;
 222
 223        txphdr = ip6hdr + sizeof(struct ipv6hdr);
 224
 225        if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
 226                csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
 227                *csum = ~(*csum);
 228        }
 229}
 230
 231static void
 232rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
 233                              struct rmnet_map_ul_csum_header *ul_header,
 234                              struct sk_buff *skb)
 235{
 236        struct ipv6hdr *ip6h = ip6hdr;
 237        u16 val;
 238
 239        val = MAP_CSUM_UL_ENABLED_FLAG;
 240        if (ip6h->nexthdr == IPPROTO_UDP)
 241                val |= MAP_CSUM_UL_UDP_FLAG;
 242        val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK;
 243
 244        ul_header->csum_start_offset = htons(skb_network_header_len(skb));
 245        ul_header->csum_info = htons(val);
 246
 247        skb->ip_summed = CHECKSUM_NONE;
 248
 249        rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr);
 250}
 251#endif
 252
 253/* Adds MAP header to front of skb->data
 254 * Padding is calculated and set appropriately in MAP header. Mux ID is
 255 * initialized to 0.
 256 */
 257struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
 258                                                  int hdrlen, int pad)
 259{
 260        struct rmnet_map_header *map_header;
 261        u32 padding, map_datalen;
 262        u8 *padbytes;
 263
 264        map_datalen = skb->len - hdrlen;
 265        map_header = (struct rmnet_map_header *)
 266                        skb_push(skb, sizeof(struct rmnet_map_header));
 267        memset(map_header, 0, sizeof(struct rmnet_map_header));
 268
 269        if (pad == RMNET_MAP_NO_PAD_BYTES) {
 270                map_header->pkt_len = htons(map_datalen);
 271                return map_header;
 272        }
 273
 274        BUILD_BUG_ON(MAP_PAD_LEN_MASK < 3);
 275        padding = ALIGN(map_datalen, 4) - map_datalen;
 276
 277        if (padding == 0)
 278                goto done;
 279
 280        if (skb_tailroom(skb) < padding)
 281                return NULL;
 282
 283        padbytes = (u8 *)skb_put(skb, padding);
 284        memset(padbytes, 0, padding);
 285
 286done:
 287        map_header->pkt_len = htons(map_datalen + padding);
 288        /* This is a data packet, so the CMD bit is 0 */
 289        map_header->flags = padding & MAP_PAD_LEN_MASK;
 290
 291        return map_header;
 292}
 293
 294/* Deaggregates a single packet
 295 * A whole new buffer is allocated for each portion of an aggregated frame.
 296 * Caller should keep calling deaggregate() on the source skb until 0 is
 297 * returned, indicating that there are no more packets to deaggregate. Caller
 298 * is responsible for freeing the original skb.
 299 */
 300struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
 301                                      struct rmnet_port *port)
 302{
 303        struct rmnet_map_header *maph;
 304        struct sk_buff *skbn;
 305        u32 packet_len;
 306
 307        if (skb->len == 0)
 308                return NULL;
 309
 310        maph = (struct rmnet_map_header *)skb->data;
 311        packet_len = ntohs(maph->pkt_len) + sizeof(*maph);
 312
 313        if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
 314                packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
 315
 316        if (((int)skb->len - (int)packet_len) < 0)
 317                return NULL;
 318
 319        /* Some hardware can send us empty frames. Catch them */
 320        if (!maph->pkt_len)
 321                return NULL;
 322
 323        skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
 324        if (!skbn)
 325                return NULL;
 326
 327        skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
 328        skb_put(skbn, packet_len);
 329        memcpy(skbn->data, skb->data, packet_len);
 330        skb_pull(skb, packet_len);
 331
 332        return skbn;
 333}
 334
 335/* Validates packet checksums. Function takes a pointer to
 336 * the beginning of a buffer which contains the IP payload +
 337 * padding + checksum trailer.
 338 * Only IPv4 and IPv6 are supported along with TCP & UDP.
 339 * Fragmented or tunneled packets are not supported.
 340 */
 341int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
 342{
 343        struct rmnet_priv *priv = netdev_priv(skb->dev);
 344        struct rmnet_map_dl_csum_trailer *csum_trailer;
 345
 346        if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
 347                priv->stats.csum_sw++;
 348                return -EOPNOTSUPP;
 349        }
 350
 351        csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len);
 352
 353        if (!(csum_trailer->flags & MAP_CSUM_DL_VALID_FLAG)) {
 354                priv->stats.csum_valid_unset++;
 355                return -EINVAL;
 356        }
 357
 358        if (skb->protocol == htons(ETH_P_IP)) {
 359                return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer, priv);
 360        } else if (skb->protocol == htons(ETH_P_IPV6)) {
 361#if IS_ENABLED(CONFIG_IPV6)
 362                return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer, priv);
 363#else
 364                priv->stats.csum_err_invalid_ip_version++;
 365                return -EPROTONOSUPPORT;
 366#endif
 367        } else {
 368                priv->stats.csum_err_invalid_ip_version++;
 369                return -EPROTONOSUPPORT;
 370        }
 371
 372        return 0;
 373}
 374
 375/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
 376 * packets that are supported for UL checksum offload.
 377 */
 378void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
 379                                      struct net_device *orig_dev)
 380{
 381        struct rmnet_priv *priv = netdev_priv(orig_dev);
 382        struct rmnet_map_ul_csum_header *ul_header;
 383        void *iphdr;
 384
 385        ul_header = (struct rmnet_map_ul_csum_header *)
 386                    skb_push(skb, sizeof(struct rmnet_map_ul_csum_header));
 387
 388        if (unlikely(!(orig_dev->features &
 389                     (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
 390                goto sw_csum;
 391
 392        if (skb->ip_summed == CHECKSUM_PARTIAL) {
 393                iphdr = (char *)ul_header +
 394                        sizeof(struct rmnet_map_ul_csum_header);
 395
 396                if (skb->protocol == htons(ETH_P_IP)) {
 397                        rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
 398                        return;
 399                } else if (skb->protocol == htons(ETH_P_IPV6)) {
 400#if IS_ENABLED(CONFIG_IPV6)
 401                        rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
 402                        return;
 403#else
 404                        priv->stats.csum_err_invalid_ip_version++;
 405                        goto sw_csum;
 406#endif
 407                } else {
 408                        priv->stats.csum_err_invalid_ip_version++;
 409                }
 410        }
 411
 412sw_csum:
 413        memset(ul_header, 0, sizeof(*ul_header));
 414
 415        priv->stats.csum_sw++;
 416}
 417