linux/drivers/net/ethernet/sun/sunvnet_common.c
<<
>>
Prefs
   1/* sunvnet.c: Sun LDOM Virtual Network Driver.
   2 *
   3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
   4 * Copyright (C) 2016 Oracle. All rights reserved.
   5 */
   6
   7#include <linux/module.h>
   8#include <linux/kernel.h>
   9#include <linux/types.h>
  10#include <linux/slab.h>
  11#include <linux/delay.h>
  12#include <linux/init.h>
  13#include <linux/netdevice.h>
  14#include <linux/ethtool.h>
  15#include <linux/etherdevice.h>
  16#include <linux/mutex.h>
  17#include <linux/highmem.h>
  18#include <linux/if_vlan.h>
  19#define CREATE_TRACE_POINTS
  20#include <trace/events/sunvnet.h>
  21
  22#if IS_ENABLED(CONFIG_IPV6)
  23#include <linux/icmpv6.h>
  24#endif
  25
  26#include <net/ip.h>
  27#include <net/icmp.h>
  28#include <net/route.h>
  29
  30#include <asm/vio.h>
  31#include <asm/ldc.h>
  32
  33#include "sunvnet_common.h"
  34
  35/* Heuristic for the number of times to exponentially backoff and
  36 * retry sending an LDC trigger when EAGAIN is encountered
  37 */
  38#define VNET_MAX_RETRIES        10
  39
  40static int __vnet_tx_trigger(struct vnet_port *port, u32 start);
  41static void vnet_port_reset(struct vnet_port *port);
  42
  43static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr)
  44{
  45        return vio_dring_avail(dr, VNET_TX_RING_SIZE);
  46}
  47
  48static int vnet_handle_unknown(struct vnet_port *port, void *arg)
  49{
  50        struct vio_msg_tag *pkt = arg;
  51
  52        pr_err("Received unknown msg [%02x:%02x:%04x:%08x]\n",
  53               pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
  54        pr_err("Resetting connection\n");
  55
  56        ldc_disconnect(port->vio.lp);
  57
  58        return -ECONNRESET;
  59}
  60
  61static int vnet_port_alloc_tx_ring(struct vnet_port *port);
  62
  63int sunvnet_send_attr_common(struct vio_driver_state *vio)
  64{
  65        struct vnet_port *port = to_vnet_port(vio);
  66        struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
  67        struct vio_net_attr_info pkt;
  68        int framelen = ETH_FRAME_LEN;
  69        int i, err;
  70
  71        err = vnet_port_alloc_tx_ring(to_vnet_port(vio));
  72        if (err)
  73                return err;
  74
  75        memset(&pkt, 0, sizeof(pkt));
  76        pkt.tag.type = VIO_TYPE_CTRL;
  77        pkt.tag.stype = VIO_SUBTYPE_INFO;
  78        pkt.tag.stype_env = VIO_ATTR_INFO;
  79        pkt.tag.sid = vio_send_sid(vio);
  80        if (vio_version_before(vio, 1, 2))
  81                pkt.xfer_mode = VIO_DRING_MODE;
  82        else
  83                pkt.xfer_mode = VIO_NEW_DRING_MODE;
  84        pkt.addr_type = VNET_ADDR_ETHERMAC;
  85        pkt.ack_freq = 0;
  86        for (i = 0; i < 6; i++)
  87                pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8);
  88        if (vio_version_after(vio, 1, 3)) {
  89                if (port->rmtu) {
  90                        port->rmtu = min(VNET_MAXPACKET, port->rmtu);
  91                        pkt.mtu = port->rmtu;
  92                } else {
  93                        port->rmtu = VNET_MAXPACKET;
  94                        pkt.mtu = port->rmtu;
  95                }
  96                if (vio_version_after_eq(vio, 1, 6))
  97                        pkt.options = VIO_TX_DRING;
  98        } else if (vio_version_before(vio, 1, 3)) {
  99                pkt.mtu = framelen;
 100        } else { /* v1.3 */
 101                pkt.mtu = framelen + VLAN_HLEN;
 102        }
 103
 104        pkt.cflags = 0;
 105        if (vio_version_after_eq(vio, 1, 7) && port->tso) {
 106                pkt.cflags |= VNET_LSO_IPV4_CAPAB;
 107                if (!port->tsolen)
 108                        port->tsolen = VNET_MAXTSO;
 109                pkt.ipv4_lso_maxlen = port->tsolen;
 110        }
 111
 112        pkt.plnk_updt = PHYSLINK_UPDATE_NONE;
 113
 114        viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
 115               "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] "
 116               "cflags[0x%04x] lso_max[%u]\n",
 117               pkt.xfer_mode, pkt.addr_type,
 118               (unsigned long long)pkt.addr,
 119               pkt.ack_freq, pkt.plnk_updt, pkt.options,
 120               (unsigned long long)pkt.mtu, pkt.cflags, pkt.ipv4_lso_maxlen);
 121
 122        return vio_ldc_send(vio, &pkt, sizeof(pkt));
 123}
 124EXPORT_SYMBOL_GPL(sunvnet_send_attr_common);
 125
 126static int handle_attr_info(struct vio_driver_state *vio,
 127                            struct vio_net_attr_info *pkt)
 128{
 129        struct vnet_port *port = to_vnet_port(vio);
 130        u64     localmtu;
 131        u8      xfer_mode;
 132
 133        viodbg(HS, "GOT NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
 134               "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] "
 135               " (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n",
 136               pkt->xfer_mode, pkt->addr_type,
 137               (unsigned long long)pkt->addr,
 138               pkt->ack_freq, pkt->plnk_updt, pkt->options,
 139               (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags,
 140               pkt->ipv4_lso_maxlen);
 141
 142        pkt->tag.sid = vio_send_sid(vio);
 143
 144        xfer_mode = pkt->xfer_mode;
 145        /* for version < 1.2, VIO_DRING_MODE = 0x3 and no bitmask */
 146        if (vio_version_before(vio, 1, 2) && xfer_mode == VIO_DRING_MODE)
 147                xfer_mode = VIO_NEW_DRING_MODE;
 148
 149        /* MTU negotiation:
 150         *      < v1.3 - ETH_FRAME_LEN exactly
 151         *      > v1.3 - MIN(pkt.mtu, VNET_MAXPACKET, port->rmtu) and change
 152         *                      pkt->mtu for ACK
 153         *      = v1.3 - ETH_FRAME_LEN + VLAN_HLEN exactly
 154         */
 155        if (vio_version_before(vio, 1, 3)) {
 156                localmtu = ETH_FRAME_LEN;
 157        } else if (vio_version_after(vio, 1, 3)) {
 158                localmtu = port->rmtu ? port->rmtu : VNET_MAXPACKET;
 159                localmtu = min(pkt->mtu, localmtu);
 160                pkt->mtu = localmtu;
 161        } else { /* v1.3 */
 162                localmtu = ETH_FRAME_LEN + VLAN_HLEN;
 163        }
 164        port->rmtu = localmtu;
 165
 166        /* LSO negotiation */
 167        if (vio_version_after_eq(vio, 1, 7))
 168                port->tso &= !!(pkt->cflags & VNET_LSO_IPV4_CAPAB);
 169        else
 170                port->tso = false;
 171        if (port->tso) {
 172                if (!port->tsolen)
 173                        port->tsolen = VNET_MAXTSO;
 174                port->tsolen = min(port->tsolen, pkt->ipv4_lso_maxlen);
 175                if (port->tsolen < VNET_MINTSO) {
 176                        port->tso = false;
 177                        port->tsolen = 0;
 178                        pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
 179                }
 180                pkt->ipv4_lso_maxlen = port->tsolen;
 181        } else {
 182                pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
 183                pkt->ipv4_lso_maxlen = 0;
 184        }
 185
 186        /* for version >= 1.6, ACK packet mode we support */
 187        if (vio_version_after_eq(vio, 1, 6)) {
 188                pkt->xfer_mode = VIO_NEW_DRING_MODE;
 189                pkt->options = VIO_TX_DRING;
 190        }
 191
 192        if (!(xfer_mode | VIO_NEW_DRING_MODE) ||
 193            pkt->addr_type != VNET_ADDR_ETHERMAC ||
 194            pkt->mtu != localmtu) {
 195                viodbg(HS, "SEND NET ATTR NACK\n");
 196
 197                pkt->tag.stype = VIO_SUBTYPE_NACK;
 198
 199                (void)vio_ldc_send(vio, pkt, sizeof(*pkt));
 200
 201                return -ECONNRESET;
 202        }
 203
 204        viodbg(HS, "SEND NET ATTR ACK xmode[0x%x] atype[0x%x] "
 205               "addr[%llx] ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] "
 206               "mtu[%llu] (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n",
 207               pkt->xfer_mode, pkt->addr_type,
 208               (unsigned long long)pkt->addr,
 209               pkt->ack_freq, pkt->plnk_updt, pkt->options,
 210               (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags,
 211               pkt->ipv4_lso_maxlen);
 212
 213        pkt->tag.stype = VIO_SUBTYPE_ACK;
 214
 215        return vio_ldc_send(vio, pkt, sizeof(*pkt));
 216}
 217
 218static int handle_attr_ack(struct vio_driver_state *vio,
 219                           struct vio_net_attr_info *pkt)
 220{
 221        viodbg(HS, "GOT NET ATTR ACK\n");
 222
 223        return 0;
 224}
 225
 226static int handle_attr_nack(struct vio_driver_state *vio,
 227                            struct vio_net_attr_info *pkt)
 228{
 229        viodbg(HS, "GOT NET ATTR NACK\n");
 230
 231        return -ECONNRESET;
 232}
 233
 234int sunvnet_handle_attr_common(struct vio_driver_state *vio, void *arg)
 235{
 236        struct vio_net_attr_info *pkt = arg;
 237
 238        switch (pkt->tag.stype) {
 239        case VIO_SUBTYPE_INFO:
 240                return handle_attr_info(vio, pkt);
 241
 242        case VIO_SUBTYPE_ACK:
 243                return handle_attr_ack(vio, pkt);
 244
 245        case VIO_SUBTYPE_NACK:
 246                return handle_attr_nack(vio, pkt);
 247
 248        default:
 249                return -ECONNRESET;
 250        }
 251}
 252EXPORT_SYMBOL_GPL(sunvnet_handle_attr_common);
 253
 254void sunvnet_handshake_complete_common(struct vio_driver_state *vio)
 255{
 256        struct vio_dring_state *dr;
 257
 258        dr = &vio->drings[VIO_DRIVER_RX_RING];
 259        dr->rcv_nxt = 1;
 260        dr->snd_nxt = 1;
 261
 262        dr = &vio->drings[VIO_DRIVER_TX_RING];
 263        dr->rcv_nxt = 1;
 264        dr->snd_nxt = 1;
 265}
 266EXPORT_SYMBOL_GPL(sunvnet_handshake_complete_common);
 267
 268/* The hypervisor interface that implements copying to/from imported
 269 * memory from another domain requires that copies are done to 8-byte
 270 * aligned buffers, and that the lengths of such copies are also 8-byte
 271 * multiples.
 272 *
 273 * So we align skb->data to an 8-byte multiple and pad-out the data
 274 * area so we can round the copy length up to the next multiple of
 275 * 8 for the copy.
 276 *
 277 * The transmitter puts the actual start of the packet 6 bytes into
 278 * the buffer it sends over, so that the IP headers after the ethernet
 279 * header are aligned properly.  These 6 bytes are not in the descriptor
 280 * length, they are simply implied.  This offset is represented using
 281 * the VNET_PACKET_SKIP macro.
 282 */
 283static struct sk_buff *alloc_and_align_skb(struct net_device *dev,
 284                                           unsigned int len)
 285{
 286        struct sk_buff *skb;
 287        unsigned long addr, off;
 288
 289        skb = netdev_alloc_skb(dev, len + VNET_PACKET_SKIP + 8 + 8);
 290        if (unlikely(!skb))
 291                return NULL;
 292
 293        addr = (unsigned long)skb->data;
 294        off = ((addr + 7UL) & ~7UL) - addr;
 295        if (off)
 296                skb_reserve(skb, off);
 297
 298        return skb;
 299}
 300
 301static inline void vnet_fullcsum(struct sk_buff *skb)
 302{
 303        struct iphdr *iph = ip_hdr(skb);
 304        int offset = skb_transport_offset(skb);
 305
 306        if (skb->protocol != htons(ETH_P_IP))
 307                return;
 308        if (iph->protocol != IPPROTO_TCP &&
 309            iph->protocol != IPPROTO_UDP)
 310                return;
 311        skb->ip_summed = CHECKSUM_NONE;
 312        skb->csum_level = 1;
 313        skb->csum = 0;
 314        if (iph->protocol == IPPROTO_TCP) {
 315                struct tcphdr *ptcp = tcp_hdr(skb);
 316
 317                ptcp->check = 0;
 318                skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
 319                ptcp->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
 320                                                skb->len - offset, IPPROTO_TCP,
 321                                                skb->csum);
 322        } else if (iph->protocol == IPPROTO_UDP) {
 323                struct udphdr *pudp = udp_hdr(skb);
 324
 325                pudp->check = 0;
 326                skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
 327                pudp->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
 328                                                skb->len - offset, IPPROTO_UDP,
 329                                                skb->csum);
 330        }
 331}
 332
 333static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc)
 334{
 335        struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
 336        unsigned int len = desc->size;
 337        unsigned int copy_len;
 338        struct sk_buff *skb;
 339        int maxlen;
 340        int err;
 341
 342        err = -EMSGSIZE;
 343        if (port->tso && port->tsolen > port->rmtu)
 344                maxlen = port->tsolen;
 345        else
 346                maxlen = port->rmtu;
 347        if (unlikely(len < ETH_ZLEN || len > maxlen)) {
 348                dev->stats.rx_length_errors++;
 349                goto out_dropped;
 350        }
 351
 352        skb = alloc_and_align_skb(dev, len);
 353        err = -ENOMEM;
 354        if (unlikely(!skb)) {
 355                dev->stats.rx_missed_errors++;
 356                goto out_dropped;
 357        }
 358
 359        copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U;
 360        skb_put(skb, copy_len);
 361        err = ldc_copy(port->vio.lp, LDC_COPY_IN,
 362                       skb->data, copy_len, 0,
 363                       desc->cookies, desc->ncookies);
 364        if (unlikely(err < 0)) {
 365                dev->stats.rx_frame_errors++;
 366                goto out_free_skb;
 367        }
 368
 369        skb_pull(skb, VNET_PACKET_SKIP);
 370        skb_trim(skb, len);
 371        skb->protocol = eth_type_trans(skb, dev);
 372
 373        if (vio_version_after_eq(&port->vio, 1, 8)) {
 374                struct vio_net_dext *dext = vio_net_ext(desc);
 375
 376                skb_reset_network_header(skb);
 377
 378                if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) {
 379                        if (skb->protocol == ETH_P_IP) {
 380                                struct iphdr *iph = ip_hdr(skb);
 381
 382                                iph->check = 0;
 383                                ip_send_check(iph);
 384                        }
 385                }
 386                if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) &&
 387                    skb->ip_summed == CHECKSUM_NONE) {
 388                        if (skb->protocol == htons(ETH_P_IP)) {
 389                                struct iphdr *iph = ip_hdr(skb);
 390                                int ihl = iph->ihl * 4;
 391
 392                                skb_reset_transport_header(skb);
 393                                skb_set_transport_header(skb, ihl);
 394                                vnet_fullcsum(skb);
 395                        }
 396                }
 397                if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) {
 398                        skb->ip_summed = CHECKSUM_PARTIAL;
 399                        skb->csum_level = 0;
 400                        if (dext->flags & VNET_PKT_HCK_FULLCKSUM_OK)
 401                                skb->csum_level = 1;
 402                }
 403        }
 404
 405        skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL;
 406
 407        dev->stats.rx_packets++;
 408        dev->stats.rx_bytes += len;
 409        napi_gro_receive(&port->napi, skb);
 410        return 0;
 411
 412out_free_skb:
 413        kfree_skb(skb);
 414
 415out_dropped:
 416        dev->stats.rx_dropped++;
 417        return err;
 418}
 419
 420static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr,
 421                         u32 start, u32 end, u8 vio_dring_state)
 422{
 423        struct vio_dring_data hdr = {
 424                .tag = {
 425                        .type           = VIO_TYPE_DATA,
 426                        .stype          = VIO_SUBTYPE_ACK,
 427                        .stype_env      = VIO_DRING_DATA,
 428                        .sid            = vio_send_sid(&port->vio),
 429                },
 430                .dring_ident            = dr->ident,
 431                .start_idx              = start,
 432                .end_idx                = end,
 433                .state                  = vio_dring_state,
 434        };
 435        int err, delay;
 436        int retries = 0;
 437
 438        hdr.seq = dr->snd_nxt;
 439        delay = 1;
 440        do {
 441                err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
 442                if (err > 0) {
 443                        dr->snd_nxt++;
 444                        break;
 445                }
 446                udelay(delay);
 447                if ((delay <<= 1) > 128)
 448                        delay = 128;
 449                if (retries++ > VNET_MAX_RETRIES) {
 450                        pr_info("ECONNRESET %x:%x:%x:%x:%x:%x\n",
 451                                port->raddr[0], port->raddr[1],
 452                                port->raddr[2], port->raddr[3],
 453                                port->raddr[4], port->raddr[5]);
 454                        break;
 455                }
 456        } while (err == -EAGAIN);
 457
 458        if (err <= 0 && vio_dring_state == VIO_DRING_STOPPED) {
 459                port->stop_rx_idx = end;
 460                port->stop_rx = true;
 461        } else {
 462                port->stop_rx_idx = 0;
 463                port->stop_rx = false;
 464        }
 465
 466        return err;
 467}
 468
 469static struct vio_net_desc *get_rx_desc(struct vnet_port *port,
 470                                        struct vio_dring_state *dr,
 471                                        u32 index)
 472{
 473        struct vio_net_desc *desc = port->vio.desc_buf;
 474        int err;
 475
 476        err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size,
 477                                  (index * dr->entry_size),
 478                                  dr->cookies, dr->ncookies);
 479        if (err < 0)
 480                return ERR_PTR(err);
 481
 482        return desc;
 483}
 484
 485static int put_rx_desc(struct vnet_port *port,
 486                       struct vio_dring_state *dr,
 487                       struct vio_net_desc *desc,
 488                       u32 index)
 489{
 490        int err;
 491
 492        err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size,
 493                                  (index * dr->entry_size),
 494                                  dr->cookies, dr->ncookies);
 495        if (err < 0)
 496                return err;
 497
 498        return 0;
 499}
 500
 501static int vnet_walk_rx_one(struct vnet_port *port,
 502                            struct vio_dring_state *dr,
 503                            u32 index, int *needs_ack)
 504{
 505        struct vio_net_desc *desc = get_rx_desc(port, dr, index);
 506        struct vio_driver_state *vio = &port->vio;
 507        int err;
 508
 509        BUG_ON(!desc);
 510        if (IS_ERR(desc))
 511                return PTR_ERR(desc);
 512
 513        if (desc->hdr.state != VIO_DESC_READY)
 514                return 1;
 515
 516        dma_rmb();
 517
 518        viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
 519               desc->hdr.state, desc->hdr.ack,
 520               desc->size, desc->ncookies,
 521               desc->cookies[0].cookie_addr,
 522               desc->cookies[0].cookie_size);
 523
 524        err = vnet_rx_one(port, desc);
 525        if (err == -ECONNRESET)
 526                return err;
 527        trace_vnet_rx_one(port->vio._local_sid, port->vio._peer_sid,
 528                          index, desc->hdr.ack);
 529        desc->hdr.state = VIO_DESC_DONE;
 530        err = put_rx_desc(port, dr, desc, index);
 531        if (err < 0)
 532                return err;
 533        *needs_ack = desc->hdr.ack;
 534        return 0;
 535}
 536
 537static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
 538                        u32 start, u32 end, int *npkts, int budget)
 539{
 540        struct vio_driver_state *vio = &port->vio;
 541        int ack_start = -1, ack_end = -1;
 542        bool send_ack = true;
 543
 544        end = (end == (u32)-1) ? vio_dring_prev(dr, start)
 545                               : vio_dring_next(dr, end);
 546
 547        viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end);
 548
 549        while (start != end) {
 550                int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack);
 551
 552                if (err == -ECONNRESET)
 553                        return err;
 554                if (err != 0)
 555                        break;
 556                (*npkts)++;
 557                if (ack_start == -1)
 558                        ack_start = start;
 559                ack_end = start;
 560                start = vio_dring_next(dr, start);
 561                if (ack && start != end) {
 562                        err = vnet_send_ack(port, dr, ack_start, ack_end,
 563                                            VIO_DRING_ACTIVE);
 564                        if (err == -ECONNRESET)
 565                                return err;
 566                        ack_start = -1;
 567                }
 568                if ((*npkts) >= budget) {
 569                        send_ack = false;
 570                        break;
 571                }
 572        }
 573        if (unlikely(ack_start == -1)) {
 574                ack_end = vio_dring_prev(dr, start);
 575                ack_start = ack_end;
 576        }
 577        if (send_ack) {
 578                port->napi_resume = false;
 579                trace_vnet_tx_send_stopped_ack(port->vio._local_sid,
 580                                               port->vio._peer_sid,
 581                                               ack_end, *npkts);
 582                return vnet_send_ack(port, dr, ack_start, ack_end,
 583                                     VIO_DRING_STOPPED);
 584        } else  {
 585                trace_vnet_tx_defer_stopped_ack(port->vio._local_sid,
 586                                                port->vio._peer_sid,
 587                                                ack_end, *npkts);
 588                port->napi_resume = true;
 589                port->napi_stop_idx = ack_end;
 590                return 1;
 591        }
 592}
 593
 594static int vnet_rx(struct vnet_port *port, void *msgbuf, int *npkts,
 595                   int budget)
 596{
 597        struct vio_dring_data *pkt = msgbuf;
 598        struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING];
 599        struct vio_driver_state *vio = &port->vio;
 600
 601        viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n",
 602               pkt->tag.stype_env, pkt->seq, dr->rcv_nxt);
 603
 604        if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
 605                return 0;
 606        if (unlikely(pkt->seq != dr->rcv_nxt)) {
 607                pr_err("RX out of sequence seq[0x%llx] rcv_nxt[0x%llx]\n",
 608                       pkt->seq, dr->rcv_nxt);
 609                return 0;
 610        }
 611
 612        if (!port->napi_resume)
 613                dr->rcv_nxt++;
 614
 615        /* XXX Validate pkt->start_idx and pkt->end_idx XXX */
 616
 617        return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx,
 618                            npkts, budget);
 619}
 620
 621static int idx_is_pending(struct vio_dring_state *dr, u32 end)
 622{
 623        u32 idx = dr->cons;
 624        int found = 0;
 625
 626        while (idx != dr->prod) {
 627                if (idx == end) {
 628                        found = 1;
 629                        break;
 630                }
 631                idx = vio_dring_next(dr, idx);
 632        }
 633        return found;
 634}
 635
 636static int vnet_ack(struct vnet_port *port, void *msgbuf)
 637{
 638        struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
 639        struct vio_dring_data *pkt = msgbuf;
 640        struct net_device *dev;
 641        u32 end;
 642        struct vio_net_desc *desc;
 643        struct netdev_queue *txq;
 644
 645        if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
 646                return 0;
 647
 648        end = pkt->end_idx;
 649        dev = VNET_PORT_TO_NET_DEVICE(port);
 650        netif_tx_lock(dev);
 651        if (unlikely(!idx_is_pending(dr, end))) {
 652                netif_tx_unlock(dev);
 653                return 0;
 654        }
 655
 656        /* sync for race conditions with vnet_start_xmit() and tell xmit it
 657         * is time to send a trigger.
 658         */
 659        trace_vnet_rx_stopped_ack(port->vio._local_sid,
 660                                  port->vio._peer_sid, end);
 661        dr->cons = vio_dring_next(dr, end);
 662        desc = vio_dring_entry(dr, dr->cons);
 663        if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) {
 664                /* vnet_start_xmit() just populated this dring but missed
 665                 * sending the "start" LDC message to the consumer.
 666                 * Send a "start" trigger on its behalf.
 667                 */
 668                if (__vnet_tx_trigger(port, dr->cons) > 0)
 669                        port->start_cons = false;
 670                else
 671                        port->start_cons = true;
 672        } else {
 673                port->start_cons = true;
 674        }
 675        netif_tx_unlock(dev);
 676
 677        txq = netdev_get_tx_queue(dev, port->q_index);
 678        if (unlikely(netif_tx_queue_stopped(txq) &&
 679                     vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr)))
 680                return 1;
 681
 682        return 0;
 683}
 684
 685static int vnet_nack(struct vnet_port *port, void *msgbuf)
 686{
 687        /* XXX just reset or similar XXX */
 688        return 0;
 689}
 690
 691static int handle_mcast(struct vnet_port *port, void *msgbuf)
 692{
 693        struct vio_net_mcast_info *pkt = msgbuf;
 694        struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
 695
 696        if (pkt->tag.stype != VIO_SUBTYPE_ACK)
 697                pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n",
 698                       dev->name,
 699                       pkt->tag.type,
 700                       pkt->tag.stype,
 701                       pkt->tag.stype_env,
 702                       pkt->tag.sid);
 703
 704        return 0;
 705}
 706
 707/* Got back a STOPPED LDC message on port. If the queue is stopped,
 708 * wake it up so that we'll send out another START message at the
 709 * next TX.
 710 */
 711static void maybe_tx_wakeup(struct vnet_port *port)
 712{
 713        struct netdev_queue *txq;
 714
 715        txq = netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port),
 716                                  port->q_index);
 717        __netif_tx_lock(txq, smp_processor_id());
 718        if (likely(netif_tx_queue_stopped(txq))) {
 719                struct vio_dring_state *dr;
 720
 721                dr = &port->vio.drings[VIO_DRIVER_TX_RING];
 722                netif_tx_wake_queue(txq);
 723        }
 724        __netif_tx_unlock(txq);
 725}
 726
 727bool sunvnet_port_is_up_common(struct vnet_port *vnet)
 728{
 729        struct vio_driver_state *vio = &vnet->vio;
 730
 731        return !!(vio->hs_state & VIO_HS_COMPLETE);
 732}
 733EXPORT_SYMBOL_GPL(sunvnet_port_is_up_common);
 734
 735static int vnet_event_napi(struct vnet_port *port, int budget)
 736{
 737        struct vio_driver_state *vio = &port->vio;
 738        int tx_wakeup, err;
 739        int npkts = 0;
 740        int event = (port->rx_event & LDC_EVENT_RESET);
 741
 742ldc_ctrl:
 743        if (unlikely(event == LDC_EVENT_RESET ||
 744                     event == LDC_EVENT_UP)) {
 745                vio_link_state_change(vio, event);
 746
 747                if (event == LDC_EVENT_RESET) {
 748                        vnet_port_reset(port);
 749                        vio_port_up(vio);
 750                }
 751                port->rx_event = 0;
 752                return 0;
 753        }
 754        /* We may have multiple LDC events in rx_event. Unroll send_events() */
 755        event = (port->rx_event & LDC_EVENT_UP);
 756        port->rx_event &= ~(LDC_EVENT_RESET | LDC_EVENT_UP);
 757        if (event == LDC_EVENT_UP)
 758                goto ldc_ctrl;
 759        event = port->rx_event;
 760        if (!(event & LDC_EVENT_DATA_READY))
 761                return 0;
 762
 763        /* we dont expect any other bits than RESET, UP, DATA_READY */
 764        BUG_ON(event != LDC_EVENT_DATA_READY);
 765
 766        err = 0;
 767        tx_wakeup = 0;
 768        while (1) {
 769                union {
 770                        struct vio_msg_tag tag;
 771                        u64 raw[8];
 772                } msgbuf;
 773
 774                if (port->napi_resume) {
 775                        struct vio_dring_data *pkt =
 776                                (struct vio_dring_data *)&msgbuf;
 777                        struct vio_dring_state *dr =
 778                                &port->vio.drings[VIO_DRIVER_RX_RING];
 779
 780                        pkt->tag.type = VIO_TYPE_DATA;
 781                        pkt->tag.stype = VIO_SUBTYPE_INFO;
 782                        pkt->tag.stype_env = VIO_DRING_DATA;
 783                        pkt->seq = dr->rcv_nxt;
 784                        pkt->start_idx = vio_dring_next(dr,
 785                                                        port->napi_stop_idx);
 786                        pkt->end_idx = -1;
 787                        goto napi_resume;
 788                }
 789                err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
 790                if (unlikely(err < 0)) {
 791                        if (err == -ECONNRESET)
 792                                vio_conn_reset(vio);
 793                        break;
 794                }
 795                if (err == 0)
 796                        break;
 797                viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
 798                       msgbuf.tag.type,
 799                       msgbuf.tag.stype,
 800                       msgbuf.tag.stype_env,
 801                       msgbuf.tag.sid);
 802                err = vio_validate_sid(vio, &msgbuf.tag);
 803                if (err < 0)
 804                        break;
 805napi_resume:
 806                if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
 807                        if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) {
 808                                if (!sunvnet_port_is_up_common(port)) {
 809                                        /* failures like handshake_failure()
 810                                         * may have cleaned up dring, but
 811                                         * NAPI polling may bring us here.
 812                                         */
 813                                        err = -ECONNRESET;
 814                                        break;
 815                                }
 816                                err = vnet_rx(port, &msgbuf, &npkts, budget);
 817                                if (npkts >= budget)
 818                                        break;
 819                                if (npkts == 0)
 820                                        break;
 821                        } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) {
 822                                err = vnet_ack(port, &msgbuf);
 823                                if (err > 0)
 824                                        tx_wakeup |= err;
 825                        } else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) {
 826                                err = vnet_nack(port, &msgbuf);
 827                        }
 828                } else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
 829                        if (msgbuf.tag.stype_env == VNET_MCAST_INFO)
 830                                err = handle_mcast(port, &msgbuf);
 831                        else
 832                                err = vio_control_pkt_engine(vio, &msgbuf);
 833                        if (err)
 834                                break;
 835                } else {
 836                        err = vnet_handle_unknown(port, &msgbuf);
 837                }
 838                if (err == -ECONNRESET)
 839                        break;
 840        }
 841        if (unlikely(tx_wakeup && err != -ECONNRESET))
 842                maybe_tx_wakeup(port);
 843        return npkts;
 844}
 845
 846int sunvnet_poll_common(struct napi_struct *napi, int budget)
 847{
 848        struct vnet_port *port = container_of(napi, struct vnet_port, napi);
 849        struct vio_driver_state *vio = &port->vio;
 850        int processed = vnet_event_napi(port, budget);
 851
 852        if (processed < budget) {
 853                napi_complete(napi);
 854                port->rx_event &= ~LDC_EVENT_DATA_READY;
 855                vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED);
 856        }
 857        return processed;
 858}
 859EXPORT_SYMBOL_GPL(sunvnet_poll_common);
 860
 861void sunvnet_event_common(void *arg, int event)
 862{
 863        struct vnet_port *port = arg;
 864        struct vio_driver_state *vio = &port->vio;
 865
 866        port->rx_event |= event;
 867        vio_set_intr(vio->vdev->rx_ino, HV_INTR_DISABLED);
 868        napi_schedule(&port->napi);
 869}
 870EXPORT_SYMBOL_GPL(sunvnet_event_common);
 871
 872static int __vnet_tx_trigger(struct vnet_port *port, u32 start)
 873{
 874        struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
 875        struct vio_dring_data hdr = {
 876                .tag = {
 877                        .type           = VIO_TYPE_DATA,
 878                        .stype          = VIO_SUBTYPE_INFO,
 879                        .stype_env      = VIO_DRING_DATA,
 880                        .sid            = vio_send_sid(&port->vio),
 881                },
 882                .dring_ident            = dr->ident,
 883                .start_idx              = start,
 884                .end_idx                = (u32)-1,
 885        };
 886        int err, delay;
 887        int retries = 0;
 888
 889        if (port->stop_rx) {
 890                trace_vnet_tx_pending_stopped_ack(port->vio._local_sid,
 891                                                  port->vio._peer_sid,
 892                                                  port->stop_rx_idx, -1);
 893                err = vnet_send_ack(port,
 894                                    &port->vio.drings[VIO_DRIVER_RX_RING],
 895                                    port->stop_rx_idx, -1,
 896                                    VIO_DRING_STOPPED);
 897                if (err <= 0)
 898                        return err;
 899        }
 900
 901        hdr.seq = dr->snd_nxt;
 902        delay = 1;
 903        do {
 904                err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
 905                if (err > 0) {
 906                        dr->snd_nxt++;
 907                        break;
 908                }
 909                udelay(delay);
 910                if ((delay <<= 1) > 128)
 911                        delay = 128;
 912                if (retries++ > VNET_MAX_RETRIES)
 913                        break;
 914        } while (err == -EAGAIN);
 915        trace_vnet_tx_trigger(port->vio._local_sid,
 916                              port->vio._peer_sid, start, err);
 917
 918        return err;
 919}
 920
 921static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port,
 922                                          unsigned *pending)
 923{
 924        struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
 925        struct sk_buff *skb = NULL;
 926        int i, txi;
 927
 928        *pending = 0;
 929
 930        txi = dr->prod;
 931        for (i = 0; i < VNET_TX_RING_SIZE; ++i) {
 932                struct vio_net_desc *d;
 933
 934                --txi;
 935                if (txi < 0)
 936                        txi = VNET_TX_RING_SIZE - 1;
 937
 938                d = vio_dring_entry(dr, txi);
 939
 940                if (d->hdr.state == VIO_DESC_READY) {
 941                        (*pending)++;
 942                        continue;
 943                }
 944                if (port->tx_bufs[txi].skb) {
 945                        if (d->hdr.state != VIO_DESC_DONE)
 946                                pr_notice("invalid ring buffer state %d\n",
 947                                          d->hdr.state);
 948                        BUG_ON(port->tx_bufs[txi].skb->next);
 949
 950                        port->tx_bufs[txi].skb->next = skb;
 951                        skb = port->tx_bufs[txi].skb;
 952                        port->tx_bufs[txi].skb = NULL;
 953
 954                        ldc_unmap(port->vio.lp,
 955                                  port->tx_bufs[txi].cookies,
 956                                  port->tx_bufs[txi].ncookies);
 957                } else if (d->hdr.state == VIO_DESC_FREE) {
 958                        break;
 959                }
 960                d->hdr.state = VIO_DESC_FREE;
 961        }
 962        return skb;
 963}
 964
 965static inline void vnet_free_skbs(struct sk_buff *skb)
 966{
 967        struct sk_buff *next;
 968
 969        while (skb) {
 970                next = skb->next;
 971                skb->next = NULL;
 972                dev_kfree_skb(skb);
 973                skb = next;
 974        }
 975}
 976
 977void sunvnet_clean_timer_expire_common(unsigned long port0)
 978{
 979        struct vnet_port *port = (struct vnet_port *)port0;
 980        struct sk_buff *freeskbs;
 981        unsigned pending;
 982
 983        netif_tx_lock(VNET_PORT_TO_NET_DEVICE(port));
 984        freeskbs = vnet_clean_tx_ring(port, &pending);
 985        netif_tx_unlock(VNET_PORT_TO_NET_DEVICE(port));
 986
 987        vnet_free_skbs(freeskbs);
 988
 989        if (pending)
 990                (void)mod_timer(&port->clean_timer,
 991                                jiffies + VNET_CLEAN_TIMEOUT);
 992         else
 993                del_timer(&port->clean_timer);
 994}
 995EXPORT_SYMBOL_GPL(sunvnet_clean_timer_expire_common);
 996
 997static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb,
 998                               struct ldc_trans_cookie *cookies, int ncookies,
 999                               unsigned int map_perm)
1000{
1001        int i, nc, err, blen;
1002
1003        /* header */
1004        blen = skb_headlen(skb);
1005        if (blen < ETH_ZLEN)
1006                blen = ETH_ZLEN;
1007        blen += VNET_PACKET_SKIP;
1008        blen += 8 - (blen & 7);
1009
1010        err = ldc_map_single(lp, skb->data - VNET_PACKET_SKIP, blen, cookies,
1011                             ncookies, map_perm);
1012        if (err < 0)
1013                return err;
1014        nc = err;
1015
1016        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1017                skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1018                u8 *vaddr;
1019
1020                if (nc < ncookies) {
1021                        vaddr = kmap_atomic(skb_frag_page(f));
1022                        blen = skb_frag_size(f);
1023                        blen += 8 - (blen & 7);
1024                        err = ldc_map_single(lp, vaddr + f->page_offset,
1025                                             blen, cookies + nc, ncookies - nc,
1026                                             map_perm);
1027                        kunmap_atomic(vaddr);
1028                } else {
1029                        err = -EMSGSIZE;
1030                }
1031
1032                if (err < 0) {
1033                        ldc_unmap(lp, cookies, nc);
1034                        return err;
1035                }
1036                nc += err;
1037        }
1038        return nc;
1039}
1040
1041static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
1042{
1043        struct sk_buff *nskb;
1044        int i, len, pad, docopy;
1045
1046        len = skb->len;
1047        pad = 0;
1048        if (len < ETH_ZLEN) {
1049                pad += ETH_ZLEN - skb->len;
1050                len += pad;
1051        }
1052        len += VNET_PACKET_SKIP;
1053        pad += 8 - (len & 7);
1054
1055        /* make sure we have enough cookies and alignment in every frag */
1056        docopy = skb_shinfo(skb)->nr_frags >= ncookies;
1057        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1058                skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1059
1060                docopy |= f->page_offset & 7;
1061        }
1062        if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP ||
1063            skb_tailroom(skb) < pad ||
1064            skb_headroom(skb) < VNET_PACKET_SKIP || docopy) {
1065                int start = 0, offset;
1066                __wsum csum;
1067
1068                len = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN;
1069                nskb = alloc_and_align_skb(skb->dev, len);
1070                if (!nskb) {
1071                        dev_kfree_skb(skb);
1072                        return NULL;
1073                }
1074                skb_reserve(nskb, VNET_PACKET_SKIP);
1075
1076                nskb->protocol = skb->protocol;
1077                offset = skb_mac_header(skb) - skb->data;
1078                skb_set_mac_header(nskb, offset);
1079                offset = skb_network_header(skb) - skb->data;
1080                skb_set_network_header(nskb, offset);
1081                offset = skb_transport_header(skb) - skb->data;
1082                skb_set_transport_header(nskb, offset);
1083
1084                offset = 0;
1085                nskb->csum_offset = skb->csum_offset;
1086                nskb->ip_summed = skb->ip_summed;
1087
1088                if (skb->ip_summed == CHECKSUM_PARTIAL)
1089                        start = skb_checksum_start_offset(skb);
1090                if (start) {
1091                        struct iphdr *iph = ip_hdr(nskb);
1092                        int offset = start + nskb->csum_offset;
1093
1094                        if (skb_copy_bits(skb, 0, nskb->data, start)) {
1095                                dev_kfree_skb(nskb);
1096                                dev_kfree_skb(skb);
1097                                return NULL;
1098                        }
1099                        *(__sum16 *)(skb->data + offset) = 0;
1100                        csum = skb_copy_and_csum_bits(skb, start,
1101                                                      nskb->data + start,
1102                                                      skb->len - start, 0);
1103                        if (iph->protocol == IPPROTO_TCP ||
1104                            iph->protocol == IPPROTO_UDP) {
1105                                csum = csum_tcpudp_magic(iph->saddr, iph->daddr,
1106                                                         skb->len - start,
1107                                                         iph->protocol, csum);
1108                        }
1109                        *(__sum16 *)(nskb->data + offset) = csum;
1110
1111                        nskb->ip_summed = CHECKSUM_NONE;
1112                } else if (skb_copy_bits(skb, 0, nskb->data, skb->len)) {
1113                        dev_kfree_skb(nskb);
1114                        dev_kfree_skb(skb);
1115                        return NULL;
1116                }
1117                (void)skb_put(nskb, skb->len);
1118                if (skb_is_gso(skb)) {
1119                        skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size;
1120                        skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
1121                }
1122                nskb->queue_mapping = skb->queue_mapping;
1123                dev_kfree_skb(skb);
1124                skb = nskb;
1125        }
1126        return skb;
1127}
1128
1129static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb,
1130                                struct vnet_port *(*vnet_tx_port)
1131                                (struct sk_buff *, struct net_device *))
1132{
1133        struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
1134        struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1135        struct sk_buff *segs;
1136        int maclen, datalen;
1137        int status;
1138        int gso_size, gso_type, gso_segs;
1139        int hlen = skb_transport_header(skb) - skb_mac_header(skb);
1140        int proto = IPPROTO_IP;
1141
1142        if (skb->protocol == htons(ETH_P_IP))
1143                proto = ip_hdr(skb)->protocol;
1144        else if (skb->protocol == htons(ETH_P_IPV6))
1145                proto = ipv6_hdr(skb)->nexthdr;
1146
1147        if (proto == IPPROTO_TCP) {
1148                hlen += tcp_hdr(skb)->doff * 4;
1149        } else if (proto == IPPROTO_UDP) {
1150                hlen += sizeof(struct udphdr);
1151        } else {
1152                pr_err("vnet_handle_offloads GSO with unknown transport "
1153                       "protocol %d tproto %d\n", skb->protocol, proto);
1154                hlen = 128; /* XXX */
1155        }
1156        datalen = port->tsolen - hlen;
1157
1158        gso_size = skb_shinfo(skb)->gso_size;
1159        gso_type = skb_shinfo(skb)->gso_type;
1160        gso_segs = skb_shinfo(skb)->gso_segs;
1161
1162        if (port->tso && gso_size < datalen)
1163                gso_segs = DIV_ROUND_UP(skb->len - hlen, datalen);
1164
1165        if (unlikely(vnet_tx_dring_avail(dr) < gso_segs)) {
1166                struct netdev_queue *txq;
1167
1168                txq  = netdev_get_tx_queue(dev, port->q_index);
1169                netif_tx_stop_queue(txq);
1170                if (vnet_tx_dring_avail(dr) < skb_shinfo(skb)->gso_segs)
1171                        return NETDEV_TX_BUSY;
1172                netif_tx_wake_queue(txq);
1173        }
1174
1175        maclen = skb_network_header(skb) - skb_mac_header(skb);
1176        skb_pull(skb, maclen);
1177
1178        if (port->tso && gso_size < datalen) {
1179                if (skb_unclone(skb, GFP_ATOMIC))
1180                        goto out_dropped;
1181
1182                /* segment to TSO size */
1183                skb_shinfo(skb)->gso_size = datalen;
1184                skb_shinfo(skb)->gso_segs = gso_segs;
1185        }
1186        segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO);
1187        if (IS_ERR(segs))
1188                goto out_dropped;
1189
1190        skb_push(skb, maclen);
1191        skb_reset_mac_header(skb);
1192
1193        status = 0;
1194        while (segs) {
1195                struct sk_buff *curr = segs;
1196
1197                segs = segs->next;
1198                curr->next = NULL;
1199                if (port->tso && curr->len > dev->mtu) {
1200                        skb_shinfo(curr)->gso_size = gso_size;
1201                        skb_shinfo(curr)->gso_type = gso_type;
1202                        skb_shinfo(curr)->gso_segs =
1203                                DIV_ROUND_UP(curr->len - hlen, gso_size);
1204                } else {
1205                        skb_shinfo(curr)->gso_size = 0;
1206                }
1207
1208                skb_push(curr, maclen);
1209                skb_reset_mac_header(curr);
1210                memcpy(skb_mac_header(curr), skb_mac_header(skb),
1211                       maclen);
1212                curr->csum_start = skb_transport_header(curr) - curr->head;
1213                if (ip_hdr(curr)->protocol == IPPROTO_TCP)
1214                        curr->csum_offset = offsetof(struct tcphdr, check);
1215                else if (ip_hdr(curr)->protocol == IPPROTO_UDP)
1216                        curr->csum_offset = offsetof(struct udphdr, check);
1217
1218                if (!(status & NETDEV_TX_MASK))
1219                        status = sunvnet_start_xmit_common(curr, dev,
1220                                                           vnet_tx_port);
1221                if (status & NETDEV_TX_MASK)
1222                        dev_kfree_skb_any(curr);
1223        }
1224
1225        if (!(status & NETDEV_TX_MASK))
1226                dev_kfree_skb_any(skb);
1227        return status;
1228out_dropped:
1229        dev->stats.tx_dropped++;
1230        dev_kfree_skb_any(skb);
1231        return NETDEV_TX_OK;
1232}
1233
1234int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev,
1235                              struct vnet_port *(*vnet_tx_port)
1236                              (struct sk_buff *, struct net_device *))
1237{
1238        struct vnet_port *port = NULL;
1239        struct vio_dring_state *dr;
1240        struct vio_net_desc *d;
1241        unsigned int len;
1242        struct sk_buff *freeskbs = NULL;
1243        int i, err, txi;
1244        unsigned pending = 0;
1245        struct netdev_queue *txq;
1246
1247        rcu_read_lock();
1248        port = vnet_tx_port(skb, dev);
1249        if (unlikely(!port)) {
1250                rcu_read_unlock();
1251                goto out_dropped;
1252        }
1253
1254        if (skb_is_gso(skb) && skb->len > port->tsolen) {
1255                err = vnet_handle_offloads(port, skb, vnet_tx_port);
1256                rcu_read_unlock();
1257                return err;
1258        }
1259
1260        if (!skb_is_gso(skb) && skb->len > port->rmtu) {
1261                unsigned long localmtu = port->rmtu - ETH_HLEN;
1262
1263                if (vio_version_after_eq(&port->vio, 1, 3))
1264                        localmtu -= VLAN_HLEN;
1265
1266                if (skb->protocol == htons(ETH_P_IP)) {
1267                        struct flowi4 fl4;
1268                        struct rtable *rt = NULL;
1269
1270                        memset(&fl4, 0, sizeof(fl4));
1271                        fl4.flowi4_oif = dev->ifindex;
1272                        fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
1273                        fl4.daddr = ip_hdr(skb)->daddr;
1274                        fl4.saddr = ip_hdr(skb)->saddr;
1275
1276                        rt = ip_route_output_key(dev_net(dev), &fl4);
1277                        rcu_read_unlock();
1278                        if (!IS_ERR(rt)) {
1279                                skb_dst_set(skb, &rt->dst);
1280                                icmp_send(skb, ICMP_DEST_UNREACH,
1281                                          ICMP_FRAG_NEEDED,
1282                                          htonl(localmtu));
1283                        }
1284                }
1285#if IS_ENABLED(CONFIG_IPV6)
1286                else if (skb->protocol == htons(ETH_P_IPV6))
1287                        icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu);
1288#endif
1289                goto out_dropped;
1290        }
1291
1292        skb = vnet_skb_shape(skb, 2);
1293
1294        if (unlikely(!skb))
1295                goto out_dropped;
1296
1297        if (skb->ip_summed == CHECKSUM_PARTIAL)
1298                vnet_fullcsum(skb);
1299
1300        dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1301        i = skb_get_queue_mapping(skb);
1302        txq = netdev_get_tx_queue(dev, i);
1303        if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
1304                if (!netif_tx_queue_stopped(txq)) {
1305                        netif_tx_stop_queue(txq);
1306
1307                        /* This is a hard error, log it. */
1308                        netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
1309                        dev->stats.tx_errors++;
1310                }
1311                rcu_read_unlock();
1312                return NETDEV_TX_BUSY;
1313        }
1314
1315        d = vio_dring_cur(dr);
1316
1317        txi = dr->prod;
1318
1319        freeskbs = vnet_clean_tx_ring(port, &pending);
1320
1321        BUG_ON(port->tx_bufs[txi].skb);
1322
1323        len = skb->len;
1324        if (len < ETH_ZLEN)
1325                len = ETH_ZLEN;
1326
1327        err = vnet_skb_map(port->vio.lp, skb, port->tx_bufs[txi].cookies, 2,
1328                           (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW));
1329        if (err < 0) {
1330                netdev_info(dev, "tx buffer map error %d\n", err);
1331                goto out_dropped;
1332        }
1333
1334        port->tx_bufs[txi].skb = skb;
1335        skb = NULL;
1336        port->tx_bufs[txi].ncookies = err;
1337
1338        /* We don't rely on the ACKs to free the skb in vnet_start_xmit(),
1339         * thus it is safe to not set VIO_ACK_ENABLE for each transmission:
1340         * the protocol itself does not require it as long as the peer
1341         * sends a VIO_SUBTYPE_ACK for VIO_DRING_STOPPED.
1342         *
1343         * An ACK for every packet in the ring is expensive as the
1344         * sending of LDC messages is slow and affects performance.
1345         */
1346        d->hdr.ack = VIO_ACK_DISABLE;
1347        d->size = len;
1348        d->ncookies = port->tx_bufs[txi].ncookies;
1349        for (i = 0; i < d->ncookies; i++)
1350                d->cookies[i] = port->tx_bufs[txi].cookies[i];
1351        if (vio_version_after_eq(&port->vio, 1, 7)) {
1352                struct vio_net_dext *dext = vio_net_ext(d);
1353
1354                memset(dext, 0, sizeof(*dext));
1355                if (skb_is_gso(port->tx_bufs[txi].skb)) {
1356                        dext->ipv4_lso_mss = skb_shinfo(port->tx_bufs[txi].skb)
1357                                             ->gso_size;
1358                        dext->flags |= VNET_PKT_IPV4_LSO;
1359                }
1360                if (vio_version_after_eq(&port->vio, 1, 8) &&
1361                    !port->switch_port) {
1362                        dext->flags |= VNET_PKT_HCK_IPV4_HDRCKSUM_OK;
1363                        dext->flags |= VNET_PKT_HCK_FULLCKSUM_OK;
1364                }
1365        }
1366
1367        /* This has to be a non-SMP write barrier because we are writing
1368         * to memory which is shared with the peer LDOM.
1369         */
1370        dma_wmb();
1371
1372        d->hdr.state = VIO_DESC_READY;
1373
1374        /* Exactly one ldc "start" trigger (for dr->cons) needs to be sent
1375         * to notify the consumer that some descriptors are READY.
1376         * After that "start" trigger, no additional triggers are needed until
1377         * a DRING_STOPPED is received from the consumer. The dr->cons field
1378         * (set up by vnet_ack()) has the value of the next dring index
1379         * that has not yet been ack-ed. We send a "start" trigger here
1380         * if, and only if, start_cons is true (reset it afterward). Conversely,
1381         * vnet_ack() should check if the dring corresponding to cons
1382         * is marked READY, but start_cons was false.
1383         * If so, vnet_ack() should send out the missed "start" trigger.
1384         *
1385         * Note that the dma_wmb() above makes sure the cookies et al. are
1386         * not globally visible before the VIO_DESC_READY, and that the
1387         * stores are ordered correctly by the compiler. The consumer will
1388         * not proceed until the VIO_DESC_READY is visible assuring that
1389         * the consumer does not observe anything related to descriptors
1390         * out of order. The HV trap from the LDC start trigger is the
1391         * producer to consumer announcement that work is available to the
1392         * consumer
1393         */
1394        if (!port->start_cons) { /* previous trigger suffices */
1395                trace_vnet_skip_tx_trigger(port->vio._local_sid,
1396                                           port->vio._peer_sid, dr->cons);
1397                goto ldc_start_done;
1398        }
1399
1400        err = __vnet_tx_trigger(port, dr->cons);
1401        if (unlikely(err < 0)) {
1402                netdev_info(dev, "TX trigger error %d\n", err);
1403                d->hdr.state = VIO_DESC_FREE;
1404                skb = port->tx_bufs[txi].skb;
1405                port->tx_bufs[txi].skb = NULL;
1406                dev->stats.tx_carrier_errors++;
1407                goto out_dropped;
1408        }
1409
1410ldc_start_done:
1411        port->start_cons = false;
1412
1413        dev->stats.tx_packets++;
1414        dev->stats.tx_bytes += port->tx_bufs[txi].skb->len;
1415
1416        dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
1417        if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
1418                netif_tx_stop_queue(txq);
1419                if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
1420                        netif_tx_wake_queue(txq);
1421        }
1422
1423        (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT);
1424        rcu_read_unlock();
1425
1426        vnet_free_skbs(freeskbs);
1427
1428        return NETDEV_TX_OK;
1429
1430out_dropped:
1431        if (pending)
1432                (void)mod_timer(&port->clean_timer,
1433                                jiffies + VNET_CLEAN_TIMEOUT);
1434        else if (port)
1435                del_timer(&port->clean_timer);
1436        if (port)
1437                rcu_read_unlock();
1438        if (skb)
1439                dev_kfree_skb(skb);
1440        vnet_free_skbs(freeskbs);
1441        dev->stats.tx_dropped++;
1442        return NETDEV_TX_OK;
1443}
1444EXPORT_SYMBOL_GPL(sunvnet_start_xmit_common);
1445
1446void sunvnet_tx_timeout_common(struct net_device *dev)
1447{
1448        /* XXX Implement me XXX */
1449}
1450EXPORT_SYMBOL_GPL(sunvnet_tx_timeout_common);
1451
1452int sunvnet_open_common(struct net_device *dev)
1453{
1454        netif_carrier_on(dev);
1455        netif_tx_start_all_queues(dev);
1456
1457        return 0;
1458}
1459EXPORT_SYMBOL_GPL(sunvnet_open_common);
1460
1461int sunvnet_close_common(struct net_device *dev)
1462{
1463        netif_tx_stop_all_queues(dev);
1464        netif_carrier_off(dev);
1465
1466        return 0;
1467}
1468EXPORT_SYMBOL_GPL(sunvnet_close_common);
1469
1470static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr)
1471{
1472        struct vnet_mcast_entry *m;
1473
1474        for (m = vp->mcast_list; m; m = m->next) {
1475                if (ether_addr_equal(m->addr, addr))
1476                        return m;
1477        }
1478        return NULL;
1479}
1480
1481static void __update_mc_list(struct vnet *vp, struct net_device *dev)
1482{
1483        struct netdev_hw_addr *ha;
1484
1485        netdev_for_each_mc_addr(ha, dev) {
1486                struct vnet_mcast_entry *m;
1487
1488                m = __vnet_mc_find(vp, ha->addr);
1489                if (m) {
1490                        m->hit = 1;
1491                        continue;
1492                }
1493
1494                if (!m) {
1495                        m = kzalloc(sizeof(*m), GFP_ATOMIC);
1496                        if (!m)
1497                                continue;
1498                        memcpy(m->addr, ha->addr, ETH_ALEN);
1499                        m->hit = 1;
1500
1501                        m->next = vp->mcast_list;
1502                        vp->mcast_list = m;
1503                }
1504        }
1505}
1506
1507static void __send_mc_list(struct vnet *vp, struct vnet_port *port)
1508{
1509        struct vio_net_mcast_info info;
1510        struct vnet_mcast_entry *m, **pp;
1511        int n_addrs;
1512
1513        memset(&info, 0, sizeof(info));
1514
1515        info.tag.type = VIO_TYPE_CTRL;
1516        info.tag.stype = VIO_SUBTYPE_INFO;
1517        info.tag.stype_env = VNET_MCAST_INFO;
1518        info.tag.sid = vio_send_sid(&port->vio);
1519        info.set = 1;
1520
1521        n_addrs = 0;
1522        for (m = vp->mcast_list; m; m = m->next) {
1523                if (m->sent)
1524                        continue;
1525                m->sent = 1;
1526                memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
1527                       m->addr, ETH_ALEN);
1528                if (++n_addrs == VNET_NUM_MCAST) {
1529                        info.count = n_addrs;
1530
1531                        (void)vio_ldc_send(&port->vio, &info,
1532                                           sizeof(info));
1533                        n_addrs = 0;
1534                }
1535        }
1536        if (n_addrs) {
1537                info.count = n_addrs;
1538                (void)vio_ldc_send(&port->vio, &info, sizeof(info));
1539        }
1540
1541        info.set = 0;
1542
1543        n_addrs = 0;
1544        pp = &vp->mcast_list;
1545        while ((m = *pp) != NULL) {
1546                if (m->hit) {
1547                        m->hit = 0;
1548                        pp = &m->next;
1549                        continue;
1550                }
1551
1552                memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
1553                       m->addr, ETH_ALEN);
1554                if (++n_addrs == VNET_NUM_MCAST) {
1555                        info.count = n_addrs;
1556                        (void)vio_ldc_send(&port->vio, &info,
1557                                           sizeof(info));
1558                        n_addrs = 0;
1559                }
1560
1561                *pp = m->next;
1562                kfree(m);
1563        }
1564        if (n_addrs) {
1565                info.count = n_addrs;
1566                (void)vio_ldc_send(&port->vio, &info, sizeof(info));
1567        }
1568}
1569
1570void sunvnet_set_rx_mode_common(struct net_device *dev, struct vnet *vp)
1571{
1572        struct vnet_port *port;
1573
1574        rcu_read_lock();
1575        list_for_each_entry_rcu(port, &vp->port_list, list) {
1576                if (port->switch_port) {
1577                        __update_mc_list(vp, dev);
1578                        __send_mc_list(vp, port);
1579                        break;
1580                }
1581        }
1582        rcu_read_unlock();
1583}
1584EXPORT_SYMBOL_GPL(sunvnet_set_rx_mode_common);
1585
1586int sunvnet_change_mtu_common(struct net_device *dev, int new_mtu)
1587{
1588        if (new_mtu < 68 || new_mtu > 65535)
1589                return -EINVAL;
1590
1591        dev->mtu = new_mtu;
1592        return 0;
1593}
1594EXPORT_SYMBOL_GPL(sunvnet_change_mtu_common);
1595
1596int sunvnet_set_mac_addr_common(struct net_device *dev, void *p)
1597{
1598        return -EINVAL;
1599}
1600EXPORT_SYMBOL_GPL(sunvnet_set_mac_addr_common);
1601
1602void sunvnet_port_free_tx_bufs_common(struct vnet_port *port)
1603{
1604        struct vio_dring_state *dr;
1605        int i;
1606
1607        dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1608
1609        if (!dr->base)
1610                return;
1611
1612        for (i = 0; i < VNET_TX_RING_SIZE; i++) {
1613                struct vio_net_desc *d;
1614                void *skb = port->tx_bufs[i].skb;
1615
1616                if (!skb)
1617                        continue;
1618
1619                d = vio_dring_entry(dr, i);
1620
1621                ldc_unmap(port->vio.lp,
1622                          port->tx_bufs[i].cookies,
1623                          port->tx_bufs[i].ncookies);
1624                dev_kfree_skb(skb);
1625                port->tx_bufs[i].skb = NULL;
1626                d->hdr.state = VIO_DESC_FREE;
1627        }
1628        ldc_free_exp_dring(port->vio.lp, dr->base,
1629                           (dr->entry_size * dr->num_entries),
1630                           dr->cookies, dr->ncookies);
1631        dr->base = NULL;
1632        dr->entry_size = 0;
1633        dr->num_entries = 0;
1634        dr->pending = 0;
1635        dr->ncookies = 0;
1636}
1637EXPORT_SYMBOL_GPL(sunvnet_port_free_tx_bufs_common);
1638
1639static void vnet_port_reset(struct vnet_port *port)
1640{
1641        del_timer(&port->clean_timer);
1642        sunvnet_port_free_tx_bufs_common(port);
1643        port->rmtu = 0;
1644        port->tso = true;
1645        port->tsolen = 0;
1646}
1647
1648static int vnet_port_alloc_tx_ring(struct vnet_port *port)
1649{
1650        struct vio_dring_state *dr;
1651        unsigned long len, elen;
1652        int i, err, ncookies;
1653        void *dring;
1654
1655        dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1656
1657        elen = sizeof(struct vio_net_desc) +
1658               sizeof(struct ldc_trans_cookie) * 2;
1659        if (vio_version_after_eq(&port->vio, 1, 7))
1660                elen += sizeof(struct vio_net_dext);
1661        len = VNET_TX_RING_SIZE * elen;
1662
1663        ncookies = VIO_MAX_RING_COOKIES;
1664        dring = ldc_alloc_exp_dring(port->vio.lp, len,
1665                                    dr->cookies, &ncookies,
1666                                    (LDC_MAP_SHADOW |
1667                                     LDC_MAP_DIRECT |
1668                                     LDC_MAP_RW));
1669        if (IS_ERR(dring)) {
1670                err = PTR_ERR(dring);
1671                goto err_out;
1672        }
1673
1674        dr->base = dring;
1675        dr->entry_size = elen;
1676        dr->num_entries = VNET_TX_RING_SIZE;
1677        dr->prod = 0;
1678        dr->cons = 0;
1679        port->start_cons  = true; /* need an initial trigger */
1680        dr->pending = VNET_TX_RING_SIZE;
1681        dr->ncookies = ncookies;
1682
1683        for (i = 0; i < VNET_TX_RING_SIZE; ++i) {
1684                struct vio_net_desc *d;
1685
1686                d = vio_dring_entry(dr, i);
1687                d->hdr.state = VIO_DESC_FREE;
1688        }
1689        return 0;
1690
1691err_out:
1692        sunvnet_port_free_tx_bufs_common(port);
1693
1694        return err;
1695}
1696
1697#ifdef CONFIG_NET_POLL_CONTROLLER
1698void sunvnet_poll_controller_common(struct net_device *dev, struct vnet *vp)
1699{
1700        struct vnet_port *port;
1701        unsigned long flags;
1702
1703        spin_lock_irqsave(&vp->lock, flags);
1704        if (!list_empty(&vp->port_list)) {
1705                port = list_entry(vp->port_list.next, struct vnet_port, list);
1706                napi_schedule(&port->napi);
1707        }
1708        spin_unlock_irqrestore(&vp->lock, flags);
1709}
1710EXPORT_SYMBOL_GPL(sunvnet_poll_controller_common);
1711#endif
1712
1713void sunvnet_port_add_txq_common(struct vnet_port *port)
1714{
1715        struct vnet *vp = port->vp;
1716        int n;
1717
1718        n = vp->nports++;
1719        n = n & (VNET_MAX_TXQS - 1);
1720        port->q_index = n;
1721        netif_tx_wake_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port),
1722                                                port->q_index));
1723}
1724EXPORT_SYMBOL_GPL(sunvnet_port_add_txq_common);
1725
1726void sunvnet_port_rm_txq_common(struct vnet_port *port)
1727{
1728        port->vp->nports--;
1729        netif_tx_stop_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port),
1730                                                port->q_index));
1731}
1732EXPORT_SYMBOL_GPL(sunvnet_port_rm_txq_common);
1733