qemu/hw/net/net_tx_pkt.c
<<
>>
Prefs
   1/*
   2 * QEMU TX packets abstractions
   3 *
   4 * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com)
   5 *
   6 * Developed by Daynix Computing LTD (http://www.daynix.com)
   7 *
   8 * Authors:
   9 * Dmitry Fleytman <dmitry@daynix.com>
  10 * Tamir Shomer <tamirs@daynix.com>
  11 * Yan Vugenfirer <yan@daynix.com>
  12 *
  13 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  14 * See the COPYING file in the top-level directory.
  15 *
  16 */
  17
  18#include "qemu/osdep.h"
  19#include "net_tx_pkt.h"
  20#include "net/eth.h"
  21#include "net/checksum.h"
  22#include "net/tap.h"
  23#include "net/net.h"
  24#include "hw/pci/pci_device.h"
  25
  26enum {
  27    NET_TX_PKT_VHDR_FRAG = 0,
  28    NET_TX_PKT_L2HDR_FRAG,
  29    NET_TX_PKT_L3HDR_FRAG,
  30    NET_TX_PKT_PL_START_FRAG
  31};
  32
  33/* TX packet private context */
  34struct NetTxPkt {
  35    PCIDevice *pci_dev;
  36
  37    struct virtio_net_hdr virt_hdr;
  38
  39    struct iovec *raw;
  40    uint32_t raw_frags;
  41    uint32_t max_raw_frags;
  42
  43    struct iovec *vec;
  44
  45    uint8_t l2_hdr[ETH_MAX_L2_HDR_LEN];
  46    union {
  47        struct ip_header ip;
  48        struct ip6_header ip6;
  49        uint8_t octets[ETH_MAX_IP_DGRAM_LEN];
  50    } l3_hdr;
  51
  52    uint32_t payload_len;
  53
  54    uint32_t payload_frags;
  55    uint32_t max_payload_frags;
  56
  57    uint16_t hdr_len;
  58    eth_pkt_types_e packet_type;
  59    uint8_t l4proto;
  60};
  61
  62void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev,
  63    uint32_t max_frags)
  64{
  65    struct NetTxPkt *p = g_malloc0(sizeof *p);
  66
  67    p->pci_dev = pci_dev;
  68
  69    p->vec = g_new(struct iovec, max_frags + NET_TX_PKT_PL_START_FRAG);
  70
  71    p->raw = g_new(struct iovec, max_frags);
  72
  73    p->max_payload_frags = max_frags;
  74    p->max_raw_frags = max_frags;
  75    p->vec[NET_TX_PKT_VHDR_FRAG].iov_base = &p->virt_hdr;
  76    p->vec[NET_TX_PKT_VHDR_FRAG].iov_len = sizeof p->virt_hdr;
  77    p->vec[NET_TX_PKT_L2HDR_FRAG].iov_base = &p->l2_hdr;
  78    p->vec[NET_TX_PKT_L3HDR_FRAG].iov_base = &p->l3_hdr;
  79
  80    *pkt = p;
  81}
  82
  83void net_tx_pkt_uninit(struct NetTxPkt *pkt)
  84{
  85    if (pkt) {
  86        g_free(pkt->vec);
  87        g_free(pkt->raw);
  88        g_free(pkt);
  89    }
  90}
  91
  92void net_tx_pkt_update_ip_hdr_checksum(struct NetTxPkt *pkt)
  93{
  94    uint16_t csum;
  95    assert(pkt);
  96
  97    pkt->l3_hdr.ip.ip_len = cpu_to_be16(pkt->payload_len +
  98        pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len);
  99
 100    pkt->l3_hdr.ip.ip_sum = 0;
 101    csum = net_raw_checksum(pkt->l3_hdr.octets,
 102        pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len);
 103    pkt->l3_hdr.ip.ip_sum = cpu_to_be16(csum);
 104}
 105
 106void net_tx_pkt_update_ip_checksums(struct NetTxPkt *pkt)
 107{
 108    uint16_t csum;
 109    uint32_t cntr, cso;
 110    assert(pkt);
 111    uint8_t gso_type = pkt->virt_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN;
 112    void *ip_hdr = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base;
 113
 114    if (pkt->payload_len + pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len >
 115        ETH_MAX_IP_DGRAM_LEN) {
 116        return;
 117    }
 118
 119    if (gso_type == VIRTIO_NET_HDR_GSO_TCPV4 ||
 120        gso_type == VIRTIO_NET_HDR_GSO_UDP) {
 121        /* Calculate IP header checksum */
 122        net_tx_pkt_update_ip_hdr_checksum(pkt);
 123
 124        /* Calculate IP pseudo header checksum */
 125        cntr = eth_calc_ip4_pseudo_hdr_csum(ip_hdr, pkt->payload_len, &cso);
 126        csum = cpu_to_be16(~net_checksum_finish(cntr));
 127    } else if (gso_type == VIRTIO_NET_HDR_GSO_TCPV6) {
 128        /* Calculate IP pseudo header checksum */
 129        cntr = eth_calc_ip6_pseudo_hdr_csum(ip_hdr, pkt->payload_len,
 130                                            IP_PROTO_TCP, &cso);
 131        csum = cpu_to_be16(~net_checksum_finish(cntr));
 132    } else {
 133        return;
 134    }
 135
 136    iov_from_buf(&pkt->vec[NET_TX_PKT_PL_START_FRAG], pkt->payload_frags,
 137                 pkt->virt_hdr.csum_offset, &csum, sizeof(csum));
 138}
 139
 140static void net_tx_pkt_calculate_hdr_len(struct NetTxPkt *pkt)
 141{
 142    pkt->hdr_len = pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len +
 143        pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len;
 144}
 145
 146static bool net_tx_pkt_parse_headers(struct NetTxPkt *pkt)
 147{
 148    struct iovec *l2_hdr, *l3_hdr;
 149    size_t bytes_read;
 150    size_t full_ip6hdr_len;
 151    uint16_t l3_proto;
 152
 153    assert(pkt);
 154
 155    l2_hdr = &pkt->vec[NET_TX_PKT_L2HDR_FRAG];
 156    l3_hdr = &pkt->vec[NET_TX_PKT_L3HDR_FRAG];
 157
 158    bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, 0, l2_hdr->iov_base,
 159                            ETH_MAX_L2_HDR_LEN);
 160    if (bytes_read < sizeof(struct eth_header)) {
 161        l2_hdr->iov_len = 0;
 162        return false;
 163    }
 164
 165    l2_hdr->iov_len = sizeof(struct eth_header);
 166    switch (be16_to_cpu(PKT_GET_ETH_HDR(l2_hdr->iov_base)->h_proto)) {
 167    case ETH_P_VLAN:
 168        l2_hdr->iov_len += sizeof(struct vlan_header);
 169        break;
 170    case ETH_P_DVLAN:
 171        l2_hdr->iov_len += 2 * sizeof(struct vlan_header);
 172        break;
 173    }
 174
 175    if (bytes_read < l2_hdr->iov_len) {
 176        l2_hdr->iov_len = 0;
 177        l3_hdr->iov_len = 0;
 178        pkt->packet_type = ETH_PKT_UCAST;
 179        return false;
 180    } else {
 181        l2_hdr->iov_len = ETH_MAX_L2_HDR_LEN;
 182        l2_hdr->iov_len = eth_get_l2_hdr_length(l2_hdr->iov_base);
 183        pkt->packet_type = get_eth_packet_type(l2_hdr->iov_base);
 184    }
 185
 186    l3_proto = eth_get_l3_proto(l2_hdr, 1, l2_hdr->iov_len);
 187
 188    switch (l3_proto) {
 189    case ETH_P_IP:
 190        bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, l2_hdr->iov_len,
 191                                l3_hdr->iov_base, sizeof(struct ip_header));
 192
 193        if (bytes_read < sizeof(struct ip_header)) {
 194            l3_hdr->iov_len = 0;
 195            return false;
 196        }
 197
 198        l3_hdr->iov_len = IP_HDR_GET_LEN(l3_hdr->iov_base);
 199
 200        if (l3_hdr->iov_len < sizeof(struct ip_header)) {
 201            l3_hdr->iov_len = 0;
 202            return false;
 203        }
 204
 205        pkt->l4proto = IP_HDR_GET_P(l3_hdr->iov_base);
 206
 207        if (IP_HDR_GET_LEN(l3_hdr->iov_base) != sizeof(struct ip_header)) {
 208            /* copy optional IPv4 header data if any*/
 209            bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags,
 210                                    l2_hdr->iov_len + sizeof(struct ip_header),
 211                                    l3_hdr->iov_base + sizeof(struct ip_header),
 212                                    l3_hdr->iov_len - sizeof(struct ip_header));
 213            if (bytes_read < l3_hdr->iov_len - sizeof(struct ip_header)) {
 214                l3_hdr->iov_len = 0;
 215                return false;
 216            }
 217        }
 218
 219        break;
 220
 221    case ETH_P_IPV6:
 222    {
 223        eth_ip6_hdr_info hdrinfo;
 224
 225        if (!eth_parse_ipv6_hdr(pkt->raw, pkt->raw_frags, l2_hdr->iov_len,
 226                                &hdrinfo)) {
 227            l3_hdr->iov_len = 0;
 228            return false;
 229        }
 230
 231        pkt->l4proto = hdrinfo.l4proto;
 232        full_ip6hdr_len = hdrinfo.full_hdr_len;
 233
 234        if (full_ip6hdr_len > ETH_MAX_IP_DGRAM_LEN) {
 235            l3_hdr->iov_len = 0;
 236            return false;
 237        }
 238
 239        bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, l2_hdr->iov_len,
 240                                l3_hdr->iov_base, full_ip6hdr_len);
 241
 242        if (bytes_read < full_ip6hdr_len) {
 243            l3_hdr->iov_len = 0;
 244            return false;
 245        } else {
 246            l3_hdr->iov_len = full_ip6hdr_len;
 247        }
 248        break;
 249    }
 250    default:
 251        l3_hdr->iov_len = 0;
 252        break;
 253    }
 254
 255    net_tx_pkt_calculate_hdr_len(pkt);
 256    return true;
 257}
 258
 259static void net_tx_pkt_rebuild_payload(struct NetTxPkt *pkt)
 260{
 261    pkt->payload_len = iov_size(pkt->raw, pkt->raw_frags) - pkt->hdr_len;
 262    pkt->payload_frags = iov_copy(&pkt->vec[NET_TX_PKT_PL_START_FRAG],
 263                                pkt->max_payload_frags,
 264                                pkt->raw, pkt->raw_frags,
 265                                pkt->hdr_len, pkt->payload_len);
 266}
 267
 268bool net_tx_pkt_parse(struct NetTxPkt *pkt)
 269{
 270    if (net_tx_pkt_parse_headers(pkt)) {
 271        net_tx_pkt_rebuild_payload(pkt);
 272        return true;
 273    } else {
 274        return false;
 275    }
 276}
 277
 278struct virtio_net_hdr *net_tx_pkt_get_vhdr(struct NetTxPkt *pkt)
 279{
 280    assert(pkt);
 281    return &pkt->virt_hdr;
 282}
 283
 284static uint8_t net_tx_pkt_get_gso_type(struct NetTxPkt *pkt,
 285                                          bool tso_enable)
 286{
 287    uint8_t rc = VIRTIO_NET_HDR_GSO_NONE;
 288    uint16_t l3_proto;
 289
 290    l3_proto = eth_get_l3_proto(&pkt->vec[NET_TX_PKT_L2HDR_FRAG], 1,
 291        pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len);
 292
 293    if (!tso_enable) {
 294        goto func_exit;
 295    }
 296
 297    rc = eth_get_gso_type(l3_proto, pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base,
 298                          pkt->l4proto);
 299
 300func_exit:
 301    return rc;
 302}
 303
 304bool net_tx_pkt_build_vheader(struct NetTxPkt *pkt, bool tso_enable,
 305    bool csum_enable, uint32_t gso_size)
 306{
 307    struct tcp_hdr l4hdr;
 308    size_t bytes_read;
 309    assert(pkt);
 310
 311    /* csum has to be enabled if tso is. */
 312    assert(csum_enable || !tso_enable);
 313
 314    pkt->virt_hdr.gso_type = net_tx_pkt_get_gso_type(pkt, tso_enable);
 315
 316    switch (pkt->virt_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
 317    case VIRTIO_NET_HDR_GSO_NONE:
 318        pkt->virt_hdr.hdr_len = 0;
 319        pkt->virt_hdr.gso_size = 0;
 320        break;
 321
 322    case VIRTIO_NET_HDR_GSO_UDP:
 323        pkt->virt_hdr.gso_size = gso_size;
 324        pkt->virt_hdr.hdr_len = pkt->hdr_len + sizeof(struct udp_header);
 325        break;
 326
 327    case VIRTIO_NET_HDR_GSO_TCPV4:
 328    case VIRTIO_NET_HDR_GSO_TCPV6:
 329        bytes_read = iov_to_buf(&pkt->vec[NET_TX_PKT_PL_START_FRAG],
 330                                pkt->payload_frags, 0, &l4hdr, sizeof(l4hdr));
 331        if (bytes_read < sizeof(l4hdr) ||
 332            l4hdr.th_off * sizeof(uint32_t) < sizeof(l4hdr)) {
 333            return false;
 334        }
 335
 336        pkt->virt_hdr.hdr_len = pkt->hdr_len + l4hdr.th_off * sizeof(uint32_t);
 337        pkt->virt_hdr.gso_size = gso_size;
 338        break;
 339
 340    default:
 341        g_assert_not_reached();
 342    }
 343
 344    if (csum_enable) {
 345        switch (pkt->l4proto) {
 346        case IP_PROTO_TCP:
 347            if (pkt->payload_len < sizeof(struct tcp_hdr)) {
 348                return false;
 349            }
 350            pkt->virt_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
 351            pkt->virt_hdr.csum_start = pkt->hdr_len;
 352            pkt->virt_hdr.csum_offset = offsetof(struct tcp_hdr, th_sum);
 353            break;
 354        case IP_PROTO_UDP:
 355            if (pkt->payload_len < sizeof(struct udp_hdr)) {
 356                return false;
 357            }
 358            pkt->virt_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
 359            pkt->virt_hdr.csum_start = pkt->hdr_len;
 360            pkt->virt_hdr.csum_offset = offsetof(struct udp_hdr, uh_sum);
 361            break;
 362        default:
 363            break;
 364        }
 365    }
 366
 367    return true;
 368}
 369
 370void net_tx_pkt_setup_vlan_header_ex(struct NetTxPkt *pkt,
 371    uint16_t vlan, uint16_t vlan_ethtype)
 372{
 373    bool is_new;
 374    assert(pkt);
 375
 376    eth_setup_vlan_headers_ex(pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_base,
 377        vlan, vlan_ethtype, &is_new);
 378
 379    /* update l2hdrlen */
 380    if (is_new) {
 381        pkt->hdr_len += sizeof(struct vlan_header);
 382        pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len +=
 383            sizeof(struct vlan_header);
 384    }
 385}
 386
 387bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, hwaddr pa,
 388    size_t len)
 389{
 390    hwaddr mapped_len = 0;
 391    struct iovec *ventry;
 392    assert(pkt);
 393
 394    if (pkt->raw_frags >= pkt->max_raw_frags) {
 395        return false;
 396    }
 397
 398    if (!len) {
 399        return true;
 400     }
 401
 402    ventry = &pkt->raw[pkt->raw_frags];
 403    mapped_len = len;
 404
 405    ventry->iov_base = pci_dma_map(pkt->pci_dev, pa,
 406                                   &mapped_len, DMA_DIRECTION_TO_DEVICE);
 407
 408    if ((ventry->iov_base != NULL) && (len == mapped_len)) {
 409        ventry->iov_len = mapped_len;
 410        pkt->raw_frags++;
 411        return true;
 412    } else {
 413        return false;
 414    }
 415}
 416
 417bool net_tx_pkt_has_fragments(struct NetTxPkt *pkt)
 418{
 419    return pkt->raw_frags > 0;
 420}
 421
 422eth_pkt_types_e net_tx_pkt_get_packet_type(struct NetTxPkt *pkt)
 423{
 424    assert(pkt);
 425
 426    return pkt->packet_type;
 427}
 428
 429size_t net_tx_pkt_get_total_len(struct NetTxPkt *pkt)
 430{
 431    assert(pkt);
 432
 433    return pkt->hdr_len + pkt->payload_len;
 434}
 435
 436void net_tx_pkt_dump(struct NetTxPkt *pkt)
 437{
 438#ifdef NET_TX_PKT_DEBUG
 439    assert(pkt);
 440
 441    printf("TX PKT: hdr_len: %d, pkt_type: 0x%X, l2hdr_len: %lu, "
 442        "l3hdr_len: %lu, payload_len: %u\n", pkt->hdr_len, pkt->packet_type,
 443        pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len,
 444        pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len, pkt->payload_len);
 445#endif
 446}
 447
 448void net_tx_pkt_reset(struct NetTxPkt *pkt, PCIDevice *pci_dev)
 449{
 450    int i;
 451
 452    /* no assert, as reset can be called before tx_pkt_init */
 453    if (!pkt) {
 454        return;
 455    }
 456
 457    memset(&pkt->virt_hdr, 0, sizeof(pkt->virt_hdr));
 458
 459    assert(pkt->vec);
 460
 461    pkt->payload_len = 0;
 462    pkt->payload_frags = 0;
 463
 464    if (pkt->max_raw_frags > 0) {
 465        assert(pkt->raw);
 466        for (i = 0; i < pkt->raw_frags; i++) {
 467            assert(pkt->raw[i].iov_base);
 468            pci_dma_unmap(pkt->pci_dev, pkt->raw[i].iov_base,
 469                          pkt->raw[i].iov_len, DMA_DIRECTION_TO_DEVICE, 0);
 470        }
 471    }
 472    pkt->pci_dev = pci_dev;
 473    pkt->raw_frags = 0;
 474
 475    pkt->hdr_len = 0;
 476    pkt->l4proto = 0;
 477}
 478
 479static void net_tx_pkt_do_sw_csum(struct NetTxPkt *pkt,
 480                                  struct iovec *iov, uint32_t iov_len,
 481                                  uint16_t csl)
 482{
 483    uint32_t csum_cntr;
 484    uint16_t csum = 0;
 485    uint32_t cso;
 486    /* num of iovec without vhdr */
 487    size_t csum_offset = pkt->virt_hdr.csum_start + pkt->virt_hdr.csum_offset;
 488    uint16_t l3_proto = eth_get_l3_proto(iov, 1, iov->iov_len);
 489
 490    /* Put zero to checksum field */
 491    iov_from_buf(iov, iov_len, csum_offset, &csum, sizeof csum);
 492
 493    /* Calculate L4 TCP/UDP checksum */
 494    csum_cntr = 0;
 495    cso = 0;
 496    /* add pseudo header to csum */
 497    if (l3_proto == ETH_P_IP) {
 498        csum_cntr = eth_calc_ip4_pseudo_hdr_csum(
 499                pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base,
 500                csl, &cso);
 501    } else if (l3_proto == ETH_P_IPV6) {
 502        csum_cntr = eth_calc_ip6_pseudo_hdr_csum(
 503                pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base,
 504                csl, pkt->l4proto, &cso);
 505    }
 506
 507    /* data checksum */
 508    csum_cntr +=
 509        net_checksum_add_iov(iov, iov_len, pkt->virt_hdr.csum_start, csl, cso);
 510
 511    /* Put the checksum obtained into the packet */
 512    csum = cpu_to_be16(net_checksum_finish_nozero(csum_cntr));
 513    iov_from_buf(iov, iov_len, csum_offset, &csum, sizeof csum);
 514}
 515
 516#define NET_MAX_FRAG_SG_LIST (64)
 517
 518static size_t net_tx_pkt_fetch_fragment(struct NetTxPkt *pkt,
 519    int *src_idx, size_t *src_offset, size_t src_len,
 520    struct iovec *dst, int *dst_idx)
 521{
 522    size_t fetched = 0;
 523    struct iovec *src = pkt->vec;
 524
 525    while (fetched < src_len) {
 526
 527        /* no more place in fragment iov */
 528        if (*dst_idx == NET_MAX_FRAG_SG_LIST) {
 529            break;
 530        }
 531
 532        /* no more data in iovec */
 533        if (*src_idx == (pkt->payload_frags + NET_TX_PKT_PL_START_FRAG)) {
 534            break;
 535        }
 536
 537
 538        dst[*dst_idx].iov_base = src[*src_idx].iov_base + *src_offset;
 539        dst[*dst_idx].iov_len = MIN(src[*src_idx].iov_len - *src_offset,
 540            src_len - fetched);
 541
 542        *src_offset += dst[*dst_idx].iov_len;
 543        fetched += dst[*dst_idx].iov_len;
 544
 545        if (*src_offset == src[*src_idx].iov_len) {
 546            *src_offset = 0;
 547            (*src_idx)++;
 548        }
 549
 550        (*dst_idx)++;
 551    }
 552
 553    return fetched;
 554}
 555
 556static void net_tx_pkt_sendv(
 557    void *opaque, const struct iovec *iov, int iov_cnt,
 558    const struct iovec *virt_iov, int virt_iov_cnt)
 559{
 560    NetClientState *nc = opaque;
 561
 562    if (qemu_get_using_vnet_hdr(nc->peer)) {
 563        qemu_sendv_packet(nc, virt_iov, virt_iov_cnt);
 564    } else {
 565        qemu_sendv_packet(nc, iov, iov_cnt);
 566    }
 567}
 568
 569static bool net_tx_pkt_tcp_fragment_init(struct NetTxPkt *pkt,
 570                                         struct iovec *fragment,
 571                                         int *pl_idx,
 572                                         size_t *l4hdr_len,
 573                                         int *src_idx,
 574                                         size_t *src_offset,
 575                                         size_t *src_len)
 576{
 577    struct iovec *l4 = fragment + NET_TX_PKT_PL_START_FRAG;
 578    size_t bytes_read = 0;
 579    struct tcp_hdr *th;
 580
 581    if (!pkt->payload_frags) {
 582        return false;
 583    }
 584
 585    l4->iov_len = pkt->virt_hdr.hdr_len - pkt->hdr_len;
 586    l4->iov_base = g_malloc(l4->iov_len);
 587
 588    *src_idx = NET_TX_PKT_PL_START_FRAG;
 589    while (pkt->vec[*src_idx].iov_len < l4->iov_len - bytes_read) {
 590        memcpy((char *)l4->iov_base + bytes_read, pkt->vec[*src_idx].iov_base,
 591               pkt->vec[*src_idx].iov_len);
 592
 593        bytes_read += pkt->vec[*src_idx].iov_len;
 594
 595        (*src_idx)++;
 596        if (*src_idx >= pkt->payload_frags + NET_TX_PKT_PL_START_FRAG) {
 597            g_free(l4->iov_base);
 598            return false;
 599        }
 600    }
 601
 602    *src_offset = l4->iov_len - bytes_read;
 603    memcpy((char *)l4->iov_base + bytes_read, pkt->vec[*src_idx].iov_base,
 604           *src_offset);
 605
 606    th = l4->iov_base;
 607    th->th_flags &= ~(TH_FIN | TH_PUSH);
 608
 609    *pl_idx = NET_TX_PKT_PL_START_FRAG + 1;
 610    *l4hdr_len = l4->iov_len;
 611    *src_len = pkt->virt_hdr.gso_size;
 612
 613    return true;
 614}
 615
 616static void net_tx_pkt_tcp_fragment_deinit(struct iovec *fragment)
 617{
 618    g_free(fragment[NET_TX_PKT_PL_START_FRAG].iov_base);
 619}
 620
 621static void net_tx_pkt_tcp_fragment_fix(struct NetTxPkt *pkt,
 622                                        struct iovec *fragment,
 623                                        size_t fragment_len,
 624                                        uint8_t gso_type)
 625{
 626    struct iovec *l3hdr = fragment + NET_TX_PKT_L3HDR_FRAG;
 627    struct iovec *l4hdr = fragment + NET_TX_PKT_PL_START_FRAG;
 628    struct ip_header *ip = l3hdr->iov_base;
 629    struct ip6_header *ip6 = l3hdr->iov_base;
 630    size_t len = l3hdr->iov_len + l4hdr->iov_len + fragment_len;
 631
 632    switch (gso_type) {
 633    case VIRTIO_NET_HDR_GSO_TCPV4:
 634        ip->ip_len = cpu_to_be16(len);
 635        eth_fix_ip4_checksum(l3hdr->iov_base, l3hdr->iov_len);
 636        break;
 637
 638    case VIRTIO_NET_HDR_GSO_TCPV6:
 639        len -= sizeof(struct ip6_header);
 640        ip6->ip6_ctlun.ip6_un1.ip6_un1_plen = cpu_to_be16(len);
 641        break;
 642    }
 643}
 644
 645static void net_tx_pkt_tcp_fragment_advance(struct NetTxPkt *pkt,
 646                                            struct iovec *fragment,
 647                                            size_t fragment_len,
 648                                            uint8_t gso_type)
 649{
 650    struct iovec *l3hdr = fragment + NET_TX_PKT_L3HDR_FRAG;
 651    struct iovec *l4hdr = fragment + NET_TX_PKT_PL_START_FRAG;
 652    struct ip_header *ip = l3hdr->iov_base;
 653    struct tcp_hdr *th = l4hdr->iov_base;
 654
 655    if (gso_type == VIRTIO_NET_HDR_GSO_TCPV4) {
 656        ip->ip_id = cpu_to_be16(be16_to_cpu(ip->ip_id) + 1);
 657    }
 658
 659    th->th_seq = cpu_to_be32(be32_to_cpu(th->th_seq) + fragment_len);
 660    th->th_flags &= ~TH_CWR;
 661}
 662
 663static void net_tx_pkt_udp_fragment_init(struct NetTxPkt *pkt,
 664                                         int *pl_idx,
 665                                         size_t *l4hdr_len,
 666                                         int *src_idx, size_t *src_offset,
 667                                         size_t *src_len)
 668{
 669    *pl_idx = NET_TX_PKT_PL_START_FRAG;
 670    *l4hdr_len = 0;
 671    *src_idx = NET_TX_PKT_PL_START_FRAG;
 672    *src_offset = 0;
 673    *src_len = IP_FRAG_ALIGN_SIZE(pkt->virt_hdr.gso_size);
 674}
 675
 676static void net_tx_pkt_udp_fragment_fix(struct NetTxPkt *pkt,
 677                                        struct iovec *fragment,
 678                                        size_t fragment_offset,
 679                                        size_t fragment_len)
 680{
 681    bool more_frags = fragment_offset + fragment_len < pkt->payload_len;
 682    uint16_t orig_flags;
 683    struct iovec *l3hdr = fragment + NET_TX_PKT_L3HDR_FRAG;
 684    struct ip_header *ip = l3hdr->iov_base;
 685    uint16_t frag_off_units = fragment_offset / IP_FRAG_UNIT_SIZE;
 686    uint16_t new_ip_off;
 687
 688    assert(fragment_offset % IP_FRAG_UNIT_SIZE == 0);
 689    assert((frag_off_units & ~IP_OFFMASK) == 0);
 690
 691    orig_flags = be16_to_cpu(ip->ip_off) & ~(IP_OFFMASK | IP_MF);
 692    new_ip_off = frag_off_units | orig_flags | (more_frags ? IP_MF : 0);
 693    ip->ip_off = cpu_to_be16(new_ip_off);
 694    ip->ip_len = cpu_to_be16(l3hdr->iov_len + fragment_len);
 695
 696    eth_fix_ip4_checksum(l3hdr->iov_base, l3hdr->iov_len);
 697}
 698
 699static bool net_tx_pkt_do_sw_fragmentation(struct NetTxPkt *pkt,
 700                                           NetTxPktCallback callback,
 701                                           void *context)
 702{
 703    uint8_t gso_type = pkt->virt_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN;
 704
 705    struct iovec fragment[NET_MAX_FRAG_SG_LIST];
 706    size_t fragment_len;
 707    size_t l4hdr_len;
 708    size_t src_len;
 709
 710    int src_idx, dst_idx, pl_idx;
 711    size_t src_offset;
 712    size_t fragment_offset = 0;
 713    struct virtio_net_hdr virt_hdr = {
 714        .flags = pkt->virt_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM ?
 715                 VIRTIO_NET_HDR_F_DATA_VALID : 0
 716    };
 717
 718    /* Copy headers */
 719    fragment[NET_TX_PKT_VHDR_FRAG].iov_base = &virt_hdr;
 720    fragment[NET_TX_PKT_VHDR_FRAG].iov_len = sizeof(virt_hdr);
 721    fragment[NET_TX_PKT_L2HDR_FRAG] = pkt->vec[NET_TX_PKT_L2HDR_FRAG];
 722    fragment[NET_TX_PKT_L3HDR_FRAG] = pkt->vec[NET_TX_PKT_L3HDR_FRAG];
 723
 724    switch (gso_type) {
 725    case VIRTIO_NET_HDR_GSO_TCPV4:
 726    case VIRTIO_NET_HDR_GSO_TCPV6:
 727        if (!net_tx_pkt_tcp_fragment_init(pkt, fragment, &pl_idx, &l4hdr_len,
 728                                          &src_idx, &src_offset, &src_len)) {
 729            return false;
 730        }
 731        break;
 732
 733    case VIRTIO_NET_HDR_GSO_UDP:
 734        net_tx_pkt_do_sw_csum(pkt, &pkt->vec[NET_TX_PKT_L2HDR_FRAG],
 735                              pkt->payload_frags + NET_TX_PKT_PL_START_FRAG - 1,
 736                              pkt->payload_len);
 737        net_tx_pkt_udp_fragment_init(pkt, &pl_idx, &l4hdr_len,
 738                                     &src_idx, &src_offset, &src_len);
 739        break;
 740
 741    default:
 742        abort();
 743    }
 744
 745    /* Put as much data as possible and send */
 746    while (true) {
 747        dst_idx = pl_idx;
 748        fragment_len = net_tx_pkt_fetch_fragment(pkt,
 749            &src_idx, &src_offset, src_len, fragment, &dst_idx);
 750        if (!fragment_len) {
 751            break;
 752        }
 753
 754        switch (gso_type) {
 755        case VIRTIO_NET_HDR_GSO_TCPV4:
 756        case VIRTIO_NET_HDR_GSO_TCPV6:
 757            net_tx_pkt_tcp_fragment_fix(pkt, fragment, fragment_len, gso_type);
 758            net_tx_pkt_do_sw_csum(pkt, fragment + NET_TX_PKT_L2HDR_FRAG,
 759                                  dst_idx - NET_TX_PKT_L2HDR_FRAG,
 760                                  l4hdr_len + fragment_len);
 761            break;
 762
 763        case VIRTIO_NET_HDR_GSO_UDP:
 764            net_tx_pkt_udp_fragment_fix(pkt, fragment, fragment_offset,
 765                                        fragment_len);
 766            break;
 767        }
 768
 769        callback(context,
 770                 fragment + NET_TX_PKT_L2HDR_FRAG, dst_idx - NET_TX_PKT_L2HDR_FRAG,
 771                 fragment + NET_TX_PKT_VHDR_FRAG, dst_idx - NET_TX_PKT_VHDR_FRAG);
 772
 773        if (gso_type == VIRTIO_NET_HDR_GSO_TCPV4 ||
 774            gso_type == VIRTIO_NET_HDR_GSO_TCPV6) {
 775            net_tx_pkt_tcp_fragment_advance(pkt, fragment, fragment_len,
 776                                            gso_type);
 777        }
 778
 779        fragment_offset += fragment_len;
 780    }
 781
 782    if (gso_type == VIRTIO_NET_HDR_GSO_TCPV4 ||
 783        gso_type == VIRTIO_NET_HDR_GSO_TCPV6) {
 784        net_tx_pkt_tcp_fragment_deinit(fragment);
 785    }
 786
 787    return true;
 788}
 789
 790bool net_tx_pkt_send(struct NetTxPkt *pkt, NetClientState *nc)
 791{
 792    bool offload = qemu_get_using_vnet_hdr(nc->peer);
 793    return net_tx_pkt_send_custom(pkt, offload, net_tx_pkt_sendv, nc);
 794}
 795
 796bool net_tx_pkt_send_custom(struct NetTxPkt *pkt, bool offload,
 797                            NetTxPktCallback callback, void *context)
 798{
 799    assert(pkt);
 800
 801    uint8_t gso_type = pkt->virt_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN;
 802
 803    /*
 804     * Since underlying infrastructure does not support IP datagrams longer
 805     * than 64K we should drop such packets and don't even try to send
 806     */
 807    if (VIRTIO_NET_HDR_GSO_NONE != gso_type) {
 808        if (pkt->payload_len >
 809            ETH_MAX_IP_DGRAM_LEN -
 810            pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len) {
 811            return false;
 812        }
 813    }
 814
 815    if (offload || gso_type == VIRTIO_NET_HDR_GSO_NONE) {
 816        if (!offload && pkt->virt_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
 817            net_tx_pkt_do_sw_csum(pkt, &pkt->vec[NET_TX_PKT_L2HDR_FRAG],
 818                                  pkt->payload_frags + NET_TX_PKT_PL_START_FRAG - 1,
 819                                  pkt->payload_len);
 820        }
 821
 822        net_tx_pkt_fix_ip6_payload_len(pkt);
 823        callback(context, pkt->vec + NET_TX_PKT_L2HDR_FRAG,
 824                 pkt->payload_frags + NET_TX_PKT_PL_START_FRAG - NET_TX_PKT_L2HDR_FRAG,
 825                 pkt->vec + NET_TX_PKT_VHDR_FRAG,
 826                 pkt->payload_frags + NET_TX_PKT_PL_START_FRAG - NET_TX_PKT_VHDR_FRAG);
 827        return true;
 828    }
 829
 830    return net_tx_pkt_do_sw_fragmentation(pkt, callback, context);
 831}
 832
 833void net_tx_pkt_fix_ip6_payload_len(struct NetTxPkt *pkt)
 834{
 835    struct iovec *l2 = &pkt->vec[NET_TX_PKT_L2HDR_FRAG];
 836    if (eth_get_l3_proto(l2, 1, l2->iov_len) == ETH_P_IPV6) {
 837        /*
 838         * TODO: if qemu would support >64K packets - add jumbo option check
 839         * something like that:
 840         * 'if (ip6->ip6_plen == 0 && !has_jumbo_option(ip6)) {'
 841         */
 842        if (pkt->l3_hdr.ip6.ip6_plen == 0) {
 843            if (pkt->payload_len <= ETH_MAX_IP_DGRAM_LEN) {
 844                pkt->l3_hdr.ip6.ip6_plen = htons(pkt->payload_len);
 845            }
 846            /*
 847             * TODO: if qemu would support >64K packets
 848             * add jumbo option for packets greater then 65,535 bytes
 849             */
 850        }
 851    }
 852}
 853