linux/drivers/net/xen-netback/netback.c
<<
>>
Prefs
   1/*
   2 * Back-end of the driver for virtual network devices. This portion of the
   3 * driver exports a 'unified' network-device interface that can be accessed
   4 * by any operating system that implements a compatible front end. A
   5 * reference front-end implementation can be found in:
   6 *  drivers/net/xen-netfront.c
   7 *
   8 * Copyright (c) 2002-2005, K A Fraser
   9 *
  10 * This program is free software; you can redistribute it and/or
  11 * modify it under the terms of the GNU General Public License version 2
  12 * as published by the Free Software Foundation; or, when distributed
  13 * separately from the Linux kernel or incorporated into other
  14 * software packages, subject to the following license:
  15 *
  16 * Permission is hereby granted, free of charge, to any person obtaining a copy
  17 * of this source file (the "Software"), to deal in the Software without
  18 * restriction, including without limitation the rights to use, copy, modify,
  19 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  20 * and to permit persons to whom the Software is furnished to do so, subject to
  21 * the following conditions:
  22 *
  23 * The above copyright notice and this permission notice shall be included in
  24 * all copies or substantial portions of the Software.
  25 *
  26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  29 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  30 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  31 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  32 * IN THE SOFTWARE.
  33 */
  34
  35#include "common.h"
  36
  37#include <linux/kthread.h>
  38#include <linux/if_vlan.h>
  39#include <linux/udp.h>
  40#include <linux/highmem.h>
  41
  42#include <net/tcp.h>
  43
  44#include <xen/xen.h>
  45#include <xen/events.h>
  46#include <xen/interface/memory.h>
  47#include <xen/page.h>
  48
  49#include <asm/xen/hypercall.h>
  50
  51/* Provide an option to disable split event channels at load time as
  52 * event channels are limited resource. Split event channels are
  53 * enabled by default.
  54 */
  55bool separate_tx_rx_irq = true;
  56module_param(separate_tx_rx_irq, bool, 0644);
  57
  58/* The time that packets can stay on the guest Rx internal queue
  59 * before they are dropped.
  60 */
  61unsigned int rx_drain_timeout_msecs = 10000;
  62module_param(rx_drain_timeout_msecs, uint, 0444);
  63
  64/* The length of time before the frontend is considered unresponsive
  65 * because it isn't providing Rx slots.
  66 */
  67unsigned int rx_stall_timeout_msecs = 60000;
  68module_param(rx_stall_timeout_msecs, uint, 0444);
  69
  70unsigned int xenvif_max_queues;
  71module_param_named(max_queues, xenvif_max_queues, uint, 0644);
  72MODULE_PARM_DESC(max_queues,
  73                 "Maximum number of queues per virtual interface");
  74
  75/*
  76 * This is the maximum slots a skb can have. If a guest sends a skb
  77 * which exceeds this limit it is considered malicious.
  78 */
  79#define FATAL_SKB_SLOTS_DEFAULT 20
  80static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
  81module_param(fatal_skb_slots, uint, 0444);
  82
  83/* The amount to copy out of the first guest Tx slot into the skb's
  84 * linear area.  If the first slot has more data, it will be mapped
  85 * and put into the first frag.
  86 *
  87 * This is sized to avoid pulling headers from the frags for most
  88 * TCP/IP packets.
  89 */
  90#define XEN_NETBACK_TX_COPY_LEN 128
  91
  92
  93static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
  94                               u8 status);
  95
  96static void make_tx_response(struct xenvif_queue *queue,
  97                             struct xen_netif_tx_request *txp,
  98                             unsigned int extra_count,
  99                             s8       st);
 100static void push_tx_responses(struct xenvif_queue *queue);
 101
 102static inline int tx_work_todo(struct xenvif_queue *queue);
 103
 104static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
 105                                             u16      id,
 106                                             s8       st,
 107                                             u16      offset,
 108                                             u16      size,
 109                                             u16      flags);
 110
 111static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
 112                                       u16 idx)
 113{
 114        return page_to_pfn(queue->mmap_pages[idx]);
 115}
 116
 117static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
 118                                         u16 idx)
 119{
 120        return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
 121}
 122
 123#define callback_param(vif, pending_idx) \
 124        (vif->pending_tx_info[pending_idx].callback_struct)
 125
 126/* Find the containing VIF's structure from a pointer in pending_tx_info array
 127 */
 128static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
 129{
 130        u16 pending_idx = ubuf->desc;
 131        struct pending_tx_info *temp =
 132                container_of(ubuf, struct pending_tx_info, callback_struct);
 133        return container_of(temp - pending_idx,
 134                            struct xenvif_queue,
 135                            pending_tx_info[0]);
 136}
 137
 138static u16 frag_get_pending_idx(skb_frag_t *frag)
 139{
 140        return (u16)frag->page_offset;
 141}
 142
 143static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
 144{
 145        frag->page_offset = pending_idx;
 146}
 147
 148static inline pending_ring_idx_t pending_index(unsigned i)
 149{
 150        return i & (MAX_PENDING_REQS-1);
 151}
 152
 153static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
 154{
 155        RING_IDX prod, cons;
 156        struct sk_buff *skb;
 157        int needed;
 158
 159        skb = skb_peek(&queue->rx_queue);
 160        if (!skb)
 161                return false;
 162
 163        needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
 164        if (skb_is_gso(skb))
 165                needed++;
 166
 167        do {
 168                prod = queue->rx.sring->req_prod;
 169                cons = queue->rx.req_cons;
 170
 171                if (prod - cons >= needed)
 172                        return true;
 173
 174                queue->rx.sring->req_event = prod + 1;
 175
 176                /* Make sure event is visible before we check prod
 177                 * again.
 178                 */
 179                mb();
 180        } while (queue->rx.sring->req_prod != prod);
 181
 182        return false;
 183}
 184
 185void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
 186{
 187        unsigned long flags;
 188
 189        spin_lock_irqsave(&queue->rx_queue.lock, flags);
 190
 191        __skb_queue_tail(&queue->rx_queue, skb);
 192
 193        queue->rx_queue_len += skb->len;
 194        if (queue->rx_queue_len > queue->rx_queue_max)
 195                netif_tx_stop_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
 196
 197        spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
 198}
 199
 200static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
 201{
 202        struct sk_buff *skb;
 203
 204        spin_lock_irq(&queue->rx_queue.lock);
 205
 206        skb = __skb_dequeue(&queue->rx_queue);
 207        if (skb)
 208                queue->rx_queue_len -= skb->len;
 209
 210        spin_unlock_irq(&queue->rx_queue.lock);
 211
 212        return skb;
 213}
 214
 215static void xenvif_rx_queue_maybe_wake(struct xenvif_queue *queue)
 216{
 217        spin_lock_irq(&queue->rx_queue.lock);
 218
 219        if (queue->rx_queue_len < queue->rx_queue_max)
 220                netif_tx_wake_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
 221
 222        spin_unlock_irq(&queue->rx_queue.lock);
 223}
 224
 225
 226static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
 227{
 228        struct sk_buff *skb;
 229        while ((skb = xenvif_rx_dequeue(queue)) != NULL)
 230                kfree_skb(skb);
 231}
 232
 233static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
 234{
 235        struct sk_buff *skb;
 236
 237        for(;;) {
 238                skb = skb_peek(&queue->rx_queue);
 239                if (!skb)
 240                        break;
 241                if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
 242                        break;
 243                xenvif_rx_dequeue(queue);
 244                kfree_skb(skb);
 245        }
 246}
 247
 248struct netrx_pending_operations {
 249        unsigned copy_prod, copy_cons;
 250        unsigned meta_prod, meta_cons;
 251        struct gnttab_copy *copy;
 252        struct xenvif_rx_meta *meta;
 253        int copy_off;
 254        grant_ref_t copy_gref;
 255};
 256
 257static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
 258                                                 struct netrx_pending_operations *npo)
 259{
 260        struct xenvif_rx_meta *meta;
 261        struct xen_netif_rx_request req;
 262
 263        RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
 264
 265        meta = npo->meta + npo->meta_prod++;
 266        meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
 267        meta->gso_size = 0;
 268        meta->size = 0;
 269        meta->id = req.id;
 270
 271        npo->copy_off = 0;
 272        npo->copy_gref = req.gref;
 273
 274        return meta;
 275}
 276
 277struct gop_frag_copy {
 278        struct xenvif_queue *queue;
 279        struct netrx_pending_operations *npo;
 280        struct xenvif_rx_meta *meta;
 281        int head;
 282        int gso_type;
 283
 284        struct page *page;
 285};
 286
 287static void xenvif_setup_copy_gop(unsigned long gfn,
 288                                  unsigned int offset,
 289                                  unsigned int *len,
 290                                  struct gop_frag_copy *info)
 291{
 292        struct gnttab_copy *copy_gop;
 293        struct xen_page_foreign *foreign;
 294        /* Convenient aliases */
 295        struct xenvif_queue *queue = info->queue;
 296        struct netrx_pending_operations *npo = info->npo;
 297        struct page *page = info->page;
 298
 299        BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
 300
 301        if (npo->copy_off == MAX_BUFFER_OFFSET)
 302                info->meta = get_next_rx_buffer(queue, npo);
 303
 304        if (npo->copy_off + *len > MAX_BUFFER_OFFSET)
 305                *len = MAX_BUFFER_OFFSET - npo->copy_off;
 306
 307        copy_gop = npo->copy + npo->copy_prod++;
 308        copy_gop->flags = GNTCOPY_dest_gref;
 309        copy_gop->len = *len;
 310
 311        foreign = xen_page_foreign(page);
 312        if (foreign) {
 313                copy_gop->source.domid = foreign->domid;
 314                copy_gop->source.u.ref = foreign->gref;
 315                copy_gop->flags |= GNTCOPY_source_gref;
 316        } else {
 317                copy_gop->source.domid = DOMID_SELF;
 318                copy_gop->source.u.gmfn = gfn;
 319        }
 320        copy_gop->source.offset = offset;
 321
 322        copy_gop->dest.domid = queue->vif->domid;
 323        copy_gop->dest.offset = npo->copy_off;
 324        copy_gop->dest.u.ref = npo->copy_gref;
 325
 326        npo->copy_off += *len;
 327        info->meta->size += *len;
 328
 329        /* Leave a gap for the GSO descriptor. */
 330        if (info->head && ((1 << info->gso_type) & queue->vif->gso_mask))
 331                queue->rx.req_cons++;
 332
 333        info->head = 0; /* There must be something in this buffer now */
 334}
 335
 336static void xenvif_gop_frag_copy_grant(unsigned long gfn,
 337                                       unsigned offset,
 338                                       unsigned int len,
 339                                       void *data)
 340{
 341        unsigned int bytes;
 342
 343        while (len) {
 344                bytes = len;
 345                xenvif_setup_copy_gop(gfn, offset, &bytes, data);
 346                offset += bytes;
 347                len -= bytes;
 348        }
 349}
 350
 351/*
 352 * Set up the grant operations for this fragment. If it's a flipping
 353 * interface, we also set up the unmap request from here.
 354 */
 355static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb,
 356                                 struct netrx_pending_operations *npo,
 357                                 struct page *page, unsigned long size,
 358                                 unsigned long offset, int *head)
 359{
 360        struct gop_frag_copy info = {
 361                .queue = queue,
 362                .npo = npo,
 363                .head = *head,
 364                .gso_type = XEN_NETIF_GSO_TYPE_NONE,
 365        };
 366        unsigned long bytes;
 367
 368        if (skb_is_gso(skb)) {
 369                if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
 370                        info.gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
 371                else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
 372                        info.gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
 373        }
 374
 375        /* Data must not cross a page boundary. */
 376        BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
 377
 378        info.meta = npo->meta + npo->meta_prod - 1;
 379
 380        /* Skip unused frames from start of page */
 381        page += offset >> PAGE_SHIFT;
 382        offset &= ~PAGE_MASK;
 383
 384        while (size > 0) {
 385                BUG_ON(offset >= PAGE_SIZE);
 386
 387                bytes = PAGE_SIZE - offset;
 388                if (bytes > size)
 389                        bytes = size;
 390
 391                info.page = page;
 392                gnttab_foreach_grant_in_range(page, offset, bytes,
 393                                              xenvif_gop_frag_copy_grant,
 394                                              &info);
 395                size -= bytes;
 396                offset = 0;
 397
 398                /* Next page */
 399                if (size) {
 400                        BUG_ON(!PageCompound(page));
 401                        page++;
 402                }
 403        }
 404
 405        *head = info.head;
 406}
 407
 408/*
 409 * Prepare an SKB to be transmitted to the frontend.
 410 *
 411 * This function is responsible for allocating grant operations, meta
 412 * structures, etc.
 413 *
 414 * It returns the number of meta structures consumed. The number of
 415 * ring slots used is always equal to the number of meta slots used
 416 * plus the number of GSO descriptors used. Currently, we use either
 417 * zero GSO descriptors (for non-GSO packets) or one descriptor (for
 418 * frontend-side LRO).
 419 */
 420static int xenvif_gop_skb(struct sk_buff *skb,
 421                          struct netrx_pending_operations *npo,
 422                          struct xenvif_queue *queue)
 423{
 424        struct xenvif *vif = netdev_priv(skb->dev);
 425        int nr_frags = skb_shinfo(skb)->nr_frags;
 426        int i;
 427        struct xen_netif_rx_request req;
 428        struct xenvif_rx_meta *meta;
 429        unsigned char *data;
 430        int head = 1;
 431        int old_meta_prod;
 432        int gso_type;
 433
 434        old_meta_prod = npo->meta_prod;
 435
 436        gso_type = XEN_NETIF_GSO_TYPE_NONE;
 437        if (skb_is_gso(skb)) {
 438                if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
 439                        gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
 440                else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
 441                        gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
 442        }
 443
 444        /* Set up a GSO prefix descriptor, if necessary */
 445        if ((1 << gso_type) & vif->gso_prefix_mask) {
 446                RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
 447                meta = npo->meta + npo->meta_prod++;
 448                meta->gso_type = gso_type;
 449                meta->gso_size = skb_shinfo(skb)->gso_size;
 450                meta->size = 0;
 451                meta->id = req.id;
 452        }
 453
 454        RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
 455        meta = npo->meta + npo->meta_prod++;
 456
 457        if ((1 << gso_type) & vif->gso_mask) {
 458                meta->gso_type = gso_type;
 459                meta->gso_size = skb_shinfo(skb)->gso_size;
 460        } else {
 461                meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
 462                meta->gso_size = 0;
 463        }
 464
 465        meta->size = 0;
 466        meta->id = req.id;
 467        npo->copy_off = 0;
 468        npo->copy_gref = req.gref;
 469
 470        data = skb->data;
 471        while (data < skb_tail_pointer(skb)) {
 472                unsigned int offset = offset_in_page(data);
 473                unsigned int len = PAGE_SIZE - offset;
 474
 475                if (data + len > skb_tail_pointer(skb))
 476                        len = skb_tail_pointer(skb) - data;
 477
 478                xenvif_gop_frag_copy(queue, skb, npo,
 479                                     virt_to_page(data), len, offset, &head);
 480                data += len;
 481        }
 482
 483        for (i = 0; i < nr_frags; i++) {
 484                xenvif_gop_frag_copy(queue, skb, npo,
 485                                     skb_frag_page(&skb_shinfo(skb)->frags[i]),
 486                                     skb_frag_size(&skb_shinfo(skb)->frags[i]),
 487                                     skb_shinfo(skb)->frags[i].page_offset,
 488                                     &head);
 489        }
 490
 491        return npo->meta_prod - old_meta_prod;
 492}
 493
 494/*
 495 * This is a twin to xenvif_gop_skb.  Assume that xenvif_gop_skb was
 496 * used to set up the operations on the top of
 497 * netrx_pending_operations, which have since been done.  Check that
 498 * they didn't give any errors and advance over them.
 499 */
 500static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
 501                            struct netrx_pending_operations *npo)
 502{
 503        struct gnttab_copy     *copy_op;
 504        int status = XEN_NETIF_RSP_OKAY;
 505        int i;
 506
 507        for (i = 0; i < nr_meta_slots; i++) {
 508                copy_op = npo->copy + npo->copy_cons++;
 509                if (copy_op->status != GNTST_okay) {
 510                        netdev_dbg(vif->dev,
 511                                   "Bad status %d from copy to DOM%d.\n",
 512                                   copy_op->status, vif->domid);
 513                        status = XEN_NETIF_RSP_ERROR;
 514                }
 515        }
 516
 517        return status;
 518}
 519
 520static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status,
 521                                      struct xenvif_rx_meta *meta,
 522                                      int nr_meta_slots)
 523{
 524        int i;
 525        unsigned long offset;
 526
 527        /* No fragments used */
 528        if (nr_meta_slots <= 1)
 529                return;
 530
 531        nr_meta_slots--;
 532
 533        for (i = 0; i < nr_meta_slots; i++) {
 534                int flags;
 535                if (i == nr_meta_slots - 1)
 536                        flags = 0;
 537                else
 538                        flags = XEN_NETRXF_more_data;
 539
 540                offset = 0;
 541                make_rx_response(queue, meta[i].id, status, offset,
 542                                 meta[i].size, flags);
 543        }
 544}
 545
 546void xenvif_kick_thread(struct xenvif_queue *queue)
 547{
 548        wake_up(&queue->wq);
 549}
 550
 551static void xenvif_rx_action(struct xenvif_queue *queue)
 552{
 553        s8 status;
 554        u16 flags;
 555        struct xen_netif_rx_response *resp;
 556        struct sk_buff_head rxq;
 557        struct sk_buff *skb;
 558        LIST_HEAD(notify);
 559        int ret;
 560        unsigned long offset;
 561        bool need_to_notify = false;
 562
 563        struct netrx_pending_operations npo = {
 564                .copy  = queue->grant_copy_op,
 565                .meta  = queue->meta,
 566        };
 567
 568        skb_queue_head_init(&rxq);
 569
 570        while (xenvif_rx_ring_slots_available(queue)
 571               && (skb = xenvif_rx_dequeue(queue)) != NULL) {
 572                queue->last_rx_time = jiffies;
 573
 574                XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
 575
 576                __skb_queue_tail(&rxq, skb);
 577        }
 578
 579        BUG_ON(npo.meta_prod > ARRAY_SIZE(queue->meta));
 580
 581        if (!npo.copy_prod)
 582                goto done;
 583
 584        BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
 585        gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);
 586
 587        while ((skb = __skb_dequeue(&rxq)) != NULL) {
 588
 589                if ((1 << queue->meta[npo.meta_cons].gso_type) &
 590                    queue->vif->gso_prefix_mask) {
 591                        resp = RING_GET_RESPONSE(&queue->rx,
 592                                                 queue->rx.rsp_prod_pvt++);
 593
 594                        resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
 595
 596                        resp->offset = queue->meta[npo.meta_cons].gso_size;
 597                        resp->id = queue->meta[npo.meta_cons].id;
 598                        resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
 599
 600                        npo.meta_cons++;
 601                        XENVIF_RX_CB(skb)->meta_slots_used--;
 602                }
 603
 604
 605                queue->stats.tx_bytes += skb->len;
 606                queue->stats.tx_packets++;
 607
 608                status = xenvif_check_gop(queue->vif,
 609                                          XENVIF_RX_CB(skb)->meta_slots_used,
 610                                          &npo);
 611
 612                if (XENVIF_RX_CB(skb)->meta_slots_used == 1)
 613                        flags = 0;
 614                else
 615                        flags = XEN_NETRXF_more_data;
 616
 617                if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
 618                        flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
 619                else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
 620                        /* remote but checksummed. */
 621                        flags |= XEN_NETRXF_data_validated;
 622
 623                offset = 0;
 624                resp = make_rx_response(queue, queue->meta[npo.meta_cons].id,
 625                                        status, offset,
 626                                        queue->meta[npo.meta_cons].size,
 627                                        flags);
 628
 629                if ((1 << queue->meta[npo.meta_cons].gso_type) &
 630                    queue->vif->gso_mask) {
 631                        struct xen_netif_extra_info *gso =
 632                                (struct xen_netif_extra_info *)
 633                                RING_GET_RESPONSE(&queue->rx,
 634                                                  queue->rx.rsp_prod_pvt++);
 635
 636                        resp->flags |= XEN_NETRXF_extra_info;
 637
 638                        gso->u.gso.type = queue->meta[npo.meta_cons].gso_type;
 639                        gso->u.gso.size = queue->meta[npo.meta_cons].gso_size;
 640                        gso->u.gso.pad = 0;
 641                        gso->u.gso.features = 0;
 642
 643                        gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
 644                        gso->flags = 0;
 645                }
 646
 647                xenvif_add_frag_responses(queue, status,
 648                                          queue->meta + npo.meta_cons + 1,
 649                                          XENVIF_RX_CB(skb)->meta_slots_used);
 650
 651                RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, ret);
 652
 653                need_to_notify |= !!ret;
 654
 655                npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used;
 656                dev_kfree_skb(skb);
 657        }
 658
 659done:
 660        if (need_to_notify)
 661                notify_remote_via_irq(queue->rx_irq);
 662}
 663
 664void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
 665{
 666        int more_to_do;
 667
 668        RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
 669
 670        if (more_to_do)
 671                napi_schedule(&queue->napi);
 672}
 673
 674static void tx_add_credit(struct xenvif_queue *queue)
 675{
 676        unsigned long max_burst, max_credit;
 677
 678        /*
 679         * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
 680         * Otherwise the interface can seize up due to insufficient credit.
 681         */
 682        max_burst = max(131072UL, queue->credit_bytes);
 683
 684        /* Take care that adding a new chunk of credit doesn't wrap to zero. */
 685        max_credit = queue->remaining_credit + queue->credit_bytes;
 686        if (max_credit < queue->remaining_credit)
 687                max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
 688
 689        queue->remaining_credit = min(max_credit, max_burst);
 690}
 691
 692void xenvif_tx_credit_callback(unsigned long data)
 693{
 694        struct xenvif_queue *queue = (struct xenvif_queue *)data;
 695        tx_add_credit(queue);
 696        xenvif_napi_schedule_or_enable_events(queue);
 697}
 698
 699static void xenvif_tx_err(struct xenvif_queue *queue,
 700                          struct xen_netif_tx_request *txp,
 701                          unsigned int extra_count, RING_IDX end)
 702{
 703        RING_IDX cons = queue->tx.req_cons;
 704        unsigned long flags;
 705
 706        do {
 707                spin_lock_irqsave(&queue->response_lock, flags);
 708                make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
 709                push_tx_responses(queue);
 710                spin_unlock_irqrestore(&queue->response_lock, flags);
 711                if (cons == end)
 712                        break;
 713                RING_COPY_REQUEST(&queue->tx, cons++, txp);
 714                extra_count = 0; /* only the first frag can have extras */
 715        } while (1);
 716        queue->tx.req_cons = cons;
 717}
 718
 719static void xenvif_fatal_tx_err(struct xenvif *vif)
 720{
 721        netdev_err(vif->dev, "fatal error; disabling device\n");
 722        vif->disabled = true;
 723        /* Disable the vif from queue 0's kthread */
 724        if (vif->queues)
 725                xenvif_kick_thread(&vif->queues[0]);
 726}
 727
 728static int xenvif_count_requests(struct xenvif_queue *queue,
 729                                 struct xen_netif_tx_request *first,
 730                                 unsigned int extra_count,
 731                                 struct xen_netif_tx_request *txp,
 732                                 int work_to_do)
 733{
 734        RING_IDX cons = queue->tx.req_cons;
 735        int slots = 0;
 736        int drop_err = 0;
 737        int more_data;
 738
 739        if (!(first->flags & XEN_NETTXF_more_data))
 740                return 0;
 741
 742        do {
 743                struct xen_netif_tx_request dropped_tx = { 0 };
 744
 745                if (slots >= work_to_do) {
 746                        netdev_err(queue->vif->dev,
 747                                   "Asked for %d slots but exceeds this limit\n",
 748                                   work_to_do);
 749                        xenvif_fatal_tx_err(queue->vif);
 750                        return -ENODATA;
 751                }
 752
 753                /* This guest is really using too many slots and
 754                 * considered malicious.
 755                 */
 756                if (unlikely(slots >= fatal_skb_slots)) {
 757                        netdev_err(queue->vif->dev,
 758                                   "Malicious frontend using %d slots, threshold %u\n",
 759                                   slots, fatal_skb_slots);
 760                        xenvif_fatal_tx_err(queue->vif);
 761                        return -E2BIG;
 762                }
 763
 764                /* Xen network protocol had implicit dependency on
 765                 * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
 766                 * the historical MAX_SKB_FRAGS value 18 to honor the
 767                 * same behavior as before. Any packet using more than
 768                 * 18 slots but less than fatal_skb_slots slots is
 769                 * dropped
 770                 */
 771                if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
 772                        if (net_ratelimit())
 773                                netdev_dbg(queue->vif->dev,
 774                                           "Too many slots (%d) exceeding limit (%d), dropping packet\n",
 775                                           slots, XEN_NETBK_LEGACY_SLOTS_MAX);
 776                        drop_err = -E2BIG;
 777                }
 778
 779                if (drop_err)
 780                        txp = &dropped_tx;
 781
 782                RING_COPY_REQUEST(&queue->tx, cons + slots, txp);
 783
 784                /* If the guest submitted a frame >= 64 KiB then
 785                 * first->size overflowed and following slots will
 786                 * appear to be larger than the frame.
 787                 *
 788                 * This cannot be fatal error as there are buggy
 789                 * frontends that do this.
 790                 *
 791                 * Consume all slots and drop the packet.
 792                 */
 793                if (!drop_err && txp->size > first->size) {
 794                        if (net_ratelimit())
 795                                netdev_dbg(queue->vif->dev,
 796                                           "Invalid tx request, slot size %u > remaining size %u\n",
 797                                           txp->size, first->size);
 798                        drop_err = -EIO;
 799                }
 800
 801                first->size -= txp->size;
 802                slots++;
 803
 804                if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) {
 805                        netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n",
 806                                 txp->offset, txp->size);
 807                        xenvif_fatal_tx_err(queue->vif);
 808                        return -EINVAL;
 809                }
 810
 811                more_data = txp->flags & XEN_NETTXF_more_data;
 812
 813                if (!drop_err)
 814                        txp++;
 815
 816        } while (more_data);
 817
 818        if (drop_err) {
 819                xenvif_tx_err(queue, first, extra_count, cons + slots);
 820                return drop_err;
 821        }
 822
 823        return slots;
 824}
 825
 826
 827struct xenvif_tx_cb {
 828        u16 pending_idx;
 829};
 830
 831#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
 832
 833static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
 834                                           u16 pending_idx,
 835                                           struct xen_netif_tx_request *txp,
 836                                           unsigned int extra_count,
 837                                           struct gnttab_map_grant_ref *mop)
 838{
 839        queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
 840        gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
 841                          GNTMAP_host_map | GNTMAP_readonly,
 842                          txp->gref, queue->vif->domid);
 843
 844        memcpy(&queue->pending_tx_info[pending_idx].req, txp,
 845               sizeof(*txp));
 846        queue->pending_tx_info[pending_idx].extra_count = extra_count;
 847}
 848
 849static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
 850{
 851        struct sk_buff *skb =
 852                alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
 853                          GFP_ATOMIC | __GFP_NOWARN);
 854        if (unlikely(skb == NULL))
 855                return NULL;
 856
 857        /* Packets passed to netif_rx() must have some headroom. */
 858        skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
 859
 860        /* Initialize it here to avoid later surprises */
 861        skb_shinfo(skb)->destructor_arg = NULL;
 862
 863        return skb;
 864}
 865
 866static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
 867                                                        struct sk_buff *skb,
 868                                                        struct xen_netif_tx_request *txp,
 869                                                        struct gnttab_map_grant_ref *gop,
 870                                                        unsigned int frag_overflow,
 871                                                        struct sk_buff *nskb)
 872{
 873        struct skb_shared_info *shinfo = skb_shinfo(skb);
 874        skb_frag_t *frags = shinfo->frags;
 875        u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
 876        int start;
 877        pending_ring_idx_t index;
 878        unsigned int nr_slots;
 879
 880        nr_slots = shinfo->nr_frags;
 881
 882        /* Skip first skb fragment if it is on same page as header fragment. */
 883        start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
 884
 885        for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
 886             shinfo->nr_frags++, txp++, gop++) {
 887                index = pending_index(queue->pending_cons++);
 888                pending_idx = queue->pending_ring[index];
 889                xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop);
 890                frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
 891        }
 892
 893        if (frag_overflow) {
 894
 895                shinfo = skb_shinfo(nskb);
 896                frags = shinfo->frags;
 897
 898                for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
 899                     shinfo->nr_frags++, txp++, gop++) {
 900                        index = pending_index(queue->pending_cons++);
 901                        pending_idx = queue->pending_ring[index];
 902                        xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
 903                                                gop);
 904                        frag_set_pending_idx(&frags[shinfo->nr_frags],
 905                                             pending_idx);
 906                }
 907
 908                skb_shinfo(skb)->frag_list = nskb;
 909        }
 910
 911        return gop;
 912}
 913
 914static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
 915                                           u16 pending_idx,
 916                                           grant_handle_t handle)
 917{
 918        if (unlikely(queue->grant_tx_handle[pending_idx] !=
 919                     NETBACK_INVALID_HANDLE)) {
 920                netdev_err(queue->vif->dev,
 921                           "Trying to overwrite active handle! pending_idx: 0x%x\n",
 922                           pending_idx);
 923                BUG();
 924        }
 925        queue->grant_tx_handle[pending_idx] = handle;
 926}
 927
 928static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
 929                                             u16 pending_idx)
 930{
 931        if (unlikely(queue->grant_tx_handle[pending_idx] ==
 932                     NETBACK_INVALID_HANDLE)) {
 933                netdev_err(queue->vif->dev,
 934                           "Trying to unmap invalid handle! pending_idx: 0x%x\n",
 935                           pending_idx);
 936                BUG();
 937        }
 938        queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
 939}
 940
 941static int xenvif_tx_check_gop(struct xenvif_queue *queue,
 942                               struct sk_buff *skb,
 943                               struct gnttab_map_grant_ref **gopp_map,
 944                               struct gnttab_copy **gopp_copy)
 945{
 946        struct gnttab_map_grant_ref *gop_map = *gopp_map;
 947        u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
 948        /* This always points to the shinfo of the skb being checked, which
 949         * could be either the first or the one on the frag_list
 950         */
 951        struct skb_shared_info *shinfo = skb_shinfo(skb);
 952        /* If this is non-NULL, we are currently checking the frag_list skb, and
 953         * this points to the shinfo of the first one
 954         */
 955        struct skb_shared_info *first_shinfo = NULL;
 956        int nr_frags = shinfo->nr_frags;
 957        const bool sharedslot = nr_frags &&
 958                                frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
 959        int i, err;
 960
 961        /* Check status of header. */
 962        err = (*gopp_copy)->status;
 963        if (unlikely(err)) {
 964                if (net_ratelimit())
 965                        netdev_dbg(queue->vif->dev,
 966                                   "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
 967                                   (*gopp_copy)->status,
 968                                   pending_idx,
 969                                   (*gopp_copy)->source.u.ref);
 970                /* The first frag might still have this slot mapped */
 971                if (!sharedslot)
 972                        xenvif_idx_release(queue, pending_idx,
 973                                           XEN_NETIF_RSP_ERROR);
 974        }
 975        (*gopp_copy)++;
 976
 977check_frags:
 978        for (i = 0; i < nr_frags; i++, gop_map++) {
 979                int j, newerr;
 980
 981                pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
 982
 983                /* Check error status: if okay then remember grant handle. */
 984                newerr = gop_map->status;
 985
 986                if (likely(!newerr)) {
 987                        xenvif_grant_handle_set(queue,
 988                                                pending_idx,
 989                                                gop_map->handle);
 990                        /* Had a previous error? Invalidate this fragment. */
 991                        if (unlikely(err)) {
 992                                xenvif_idx_unmap(queue, pending_idx);
 993                                /* If the mapping of the first frag was OK, but
 994                                 * the header's copy failed, and they are
 995                                 * sharing a slot, send an error
 996                                 */
 997                                if (i == 0 && sharedslot)
 998                                        xenvif_idx_release(queue, pending_idx,
 999                                                           XEN_NETIF_RSP_ERROR);
1000                                else
1001                                        xenvif_idx_release(queue, pending_idx,
1002                                                           XEN_NETIF_RSP_OKAY);
1003                        }
1004                        continue;
1005                }
1006
1007                /* Error on this fragment: respond to client with an error. */
1008                if (net_ratelimit())
1009                        netdev_dbg(queue->vif->dev,
1010                                   "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
1011                                   i,
1012                                   gop_map->status,
1013                                   pending_idx,
1014                                   gop_map->ref);
1015
1016                xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
1017
1018                /* Not the first error? Preceding frags already invalidated. */
1019                if (err)
1020                        continue;
1021
1022                /* First error: if the header haven't shared a slot with the
1023                 * first frag, release it as well.
1024                 */
1025                if (!sharedslot)
1026                        xenvif_idx_release(queue,
1027                                           XENVIF_TX_CB(skb)->pending_idx,
1028                                           XEN_NETIF_RSP_OKAY);
1029
1030                /* Invalidate preceding fragments of this skb. */
1031                for (j = 0; j < i; j++) {
1032                        pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1033                        xenvif_idx_unmap(queue, pending_idx);
1034                        xenvif_idx_release(queue, pending_idx,
1035                                           XEN_NETIF_RSP_OKAY);
1036                }
1037
1038                /* And if we found the error while checking the frag_list, unmap
1039                 * the first skb's frags
1040                 */
1041                if (first_shinfo) {
1042                        for (j = 0; j < first_shinfo->nr_frags; j++) {
1043                                pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
1044                                xenvif_idx_unmap(queue, pending_idx);
1045                                xenvif_idx_release(queue, pending_idx,
1046                                                   XEN_NETIF_RSP_OKAY);
1047                        }
1048                }
1049
1050                /* Remember the error: invalidate all subsequent fragments. */
1051                err = newerr;
1052        }
1053
1054        if (skb_has_frag_list(skb) && !first_shinfo) {
1055                first_shinfo = skb_shinfo(skb);
1056                shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
1057                nr_frags = shinfo->nr_frags;
1058
1059                goto check_frags;
1060        }
1061
1062        *gopp_map = gop_map;
1063        return err;
1064}
1065
1066static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
1067{
1068        struct skb_shared_info *shinfo = skb_shinfo(skb);
1069        int nr_frags = shinfo->nr_frags;
1070        int i;
1071        u16 prev_pending_idx = INVALID_PENDING_IDX;
1072
1073        for (i = 0; i < nr_frags; i++) {
1074                skb_frag_t *frag = shinfo->frags + i;
1075                struct xen_netif_tx_request *txp;
1076                struct page *page;
1077                u16 pending_idx;
1078
1079                pending_idx = frag_get_pending_idx(frag);
1080
1081                /* If this is not the first frag, chain it to the previous*/
1082                if (prev_pending_idx == INVALID_PENDING_IDX)
1083                        skb_shinfo(skb)->destructor_arg =
1084                                &callback_param(queue, pending_idx);
1085                else
1086                        callback_param(queue, prev_pending_idx).ctx =
1087                                &callback_param(queue, pending_idx);
1088
1089                callback_param(queue, pending_idx).ctx = NULL;
1090                prev_pending_idx = pending_idx;
1091
1092                txp = &queue->pending_tx_info[pending_idx].req;
1093                page = virt_to_page(idx_to_kaddr(queue, pending_idx));
1094                __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
1095                skb->len += txp->size;
1096                skb->data_len += txp->size;
1097                skb->truesize += txp->size;
1098
1099                /* Take an extra reference to offset network stack's put_page */
1100                get_page(queue->mmap_pages[pending_idx]);
1101        }
1102}
1103
1104static int xenvif_get_extras(struct xenvif_queue *queue,
1105                             struct xen_netif_extra_info *extras,
1106                             unsigned int *extra_count,
1107                             int work_to_do)
1108{
1109        struct xen_netif_extra_info extra;
1110        RING_IDX cons = queue->tx.req_cons;
1111
1112        do {
1113                if (unlikely(work_to_do-- <= 0)) {
1114                        netdev_err(queue->vif->dev, "Missing extra info\n");
1115                        xenvif_fatal_tx_err(queue->vif);
1116                        return -EBADR;
1117                }
1118
1119                RING_COPY_REQUEST(&queue->tx, cons, &extra);
1120
1121                queue->tx.req_cons = ++cons;
1122                (*extra_count)++;
1123
1124                if (unlikely(!extra.type ||
1125                             extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1126                        netdev_err(queue->vif->dev,
1127                                   "Invalid extra type: %d\n", extra.type);
1128                        xenvif_fatal_tx_err(queue->vif);
1129                        return -EINVAL;
1130                }
1131
1132                memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
1133        } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
1134
1135        return work_to_do;
1136}
1137
1138static int xenvif_set_skb_gso(struct xenvif *vif,
1139                              struct sk_buff *skb,
1140                              struct xen_netif_extra_info *gso)
1141{
1142        if (!gso->u.gso.size) {
1143                netdev_err(vif->dev, "GSO size must not be zero.\n");
1144                xenvif_fatal_tx_err(vif);
1145                return -EINVAL;
1146        }
1147
1148        switch (gso->u.gso.type) {
1149        case XEN_NETIF_GSO_TYPE_TCPV4:
1150                skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1151                break;
1152        case XEN_NETIF_GSO_TYPE_TCPV6:
1153                skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1154                break;
1155        default:
1156                netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1157                xenvif_fatal_tx_err(vif);
1158                return -EINVAL;
1159        }
1160
1161        skb_shinfo(skb)->gso_size = gso->u.gso.size;
1162        /* gso_segs will be calculated later */
1163
1164        return 0;
1165}
1166
1167static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
1168{
1169        bool recalculate_partial_csum = false;
1170
1171        /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1172         * peers can fail to set NETRXF_csum_blank when sending a GSO
1173         * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1174         * recalculate the partial checksum.
1175         */
1176        if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1177                queue->stats.rx_gso_checksum_fixup++;
1178                skb->ip_summed = CHECKSUM_PARTIAL;
1179                recalculate_partial_csum = true;
1180        }
1181
1182        /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1183        if (skb->ip_summed != CHECKSUM_PARTIAL)
1184                return 0;
1185
1186        return skb_checksum_setup(skb, recalculate_partial_csum);
1187}
1188
1189static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
1190{
1191        u64 now = get_jiffies_64();
1192        u64 next_credit = queue->credit_window_start +
1193                msecs_to_jiffies(queue->credit_usec / 1000);
1194
1195        /* Timer could already be pending in rare cases. */
1196        if (timer_pending(&queue->credit_timeout))
1197                return true;
1198
1199        /* Passed the point where we can replenish credit? */
1200        if (time_after_eq64(now, next_credit)) {
1201                queue->credit_window_start = now;
1202                tx_add_credit(queue);
1203        }
1204
1205        /* Still too big to send right now? Set a callback. */
1206        if (size > queue->remaining_credit) {
1207                queue->credit_timeout.data     =
1208                        (unsigned long)queue;
1209                mod_timer(&queue->credit_timeout,
1210                          next_credit);
1211                queue->credit_window_start = next_credit;
1212
1213                return true;
1214        }
1215
1216        return false;
1217}
1218
1219/* No locking is required in xenvif_mcast_add/del() as they are
1220 * only ever invoked from NAPI poll. An RCU list is used because
1221 * xenvif_mcast_match() is called asynchronously, during start_xmit.
1222 */
1223
1224static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr)
1225{
1226        struct xenvif_mcast_addr *mcast;
1227
1228        if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) {
1229                if (net_ratelimit())
1230                        netdev_err(vif->dev,
1231                                   "Too many multicast addresses\n");
1232                return -ENOSPC;
1233        }
1234
1235        mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
1236        if (!mcast)
1237                return -ENOMEM;
1238
1239        ether_addr_copy(mcast->addr, addr);
1240        list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr);
1241        vif->fe_mcast_count++;
1242
1243        return 0;
1244}
1245
1246static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr)
1247{
1248        struct xenvif_mcast_addr *mcast;
1249
1250        list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
1251                if (ether_addr_equal(addr, mcast->addr)) {
1252                        --vif->fe_mcast_count;
1253                        list_del_rcu(&mcast->entry);
1254                        kfree_rcu(mcast, rcu);
1255                        break;
1256                }
1257        }
1258}
1259
1260bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr)
1261{
1262        struct xenvif_mcast_addr *mcast;
1263
1264        rcu_read_lock();
1265        list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
1266                if (ether_addr_equal(addr, mcast->addr)) {
1267                        rcu_read_unlock();
1268                        return true;
1269                }
1270        }
1271        rcu_read_unlock();
1272
1273        return false;
1274}
1275
1276void xenvif_mcast_addr_list_free(struct xenvif *vif)
1277{
1278        /* No need for locking or RCU here. NAPI poll and TX queue
1279         * are stopped.
1280         */
1281        while (!list_empty(&vif->fe_mcast_addr)) {
1282                struct xenvif_mcast_addr *mcast;
1283
1284                mcast = list_first_entry(&vif->fe_mcast_addr,
1285                                         struct xenvif_mcast_addr,
1286                                         entry);
1287                --vif->fe_mcast_count;
1288                list_del(&mcast->entry);
1289                kfree(mcast);
1290        }
1291}
1292
1293static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1294                                     int budget,
1295                                     unsigned *copy_ops,
1296                                     unsigned *map_ops)
1297{
1298        struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
1299        struct sk_buff *skb, *nskb;
1300        int ret;
1301        unsigned int frag_overflow;
1302
1303        while (skb_queue_len(&queue->tx_queue) < budget) {
1304                struct xen_netif_tx_request txreq;
1305                struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
1306                struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
1307                unsigned int extra_count;
1308                u16 pending_idx;
1309                RING_IDX idx;
1310                int work_to_do;
1311                unsigned int data_len;
1312                pending_ring_idx_t index;
1313
1314                if (queue->tx.sring->req_prod - queue->tx.req_cons >
1315                    XEN_NETIF_TX_RING_SIZE) {
1316                        netdev_err(queue->vif->dev,
1317                                   "Impossible number of requests. "
1318                                   "req_prod %d, req_cons %d, size %ld\n",
1319                                   queue->tx.sring->req_prod, queue->tx.req_cons,
1320                                   XEN_NETIF_TX_RING_SIZE);
1321                        xenvif_fatal_tx_err(queue->vif);
1322                        break;
1323                }
1324
1325                work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
1326                if (!work_to_do)
1327                        break;
1328
1329                idx = queue->tx.req_cons;
1330                rmb(); /* Ensure that we see the request before we copy it. */
1331                RING_COPY_REQUEST(&queue->tx, idx, &txreq);
1332
1333                /* Credit-based scheduling. */
1334                if (txreq.size > queue->remaining_credit &&
1335                    tx_credit_exceeded(queue, txreq.size))
1336                        break;
1337
1338                queue->remaining_credit -= txreq.size;
1339
1340                work_to_do--;
1341                queue->tx.req_cons = ++idx;
1342
1343                memset(extras, 0, sizeof(extras));
1344                extra_count = 0;
1345                if (txreq.flags & XEN_NETTXF_extra_info) {
1346                        work_to_do = xenvif_get_extras(queue, extras,
1347                                                       &extra_count,
1348                                                       work_to_do);
1349                        idx = queue->tx.req_cons;
1350                        if (unlikely(work_to_do < 0))
1351                                break;
1352                }
1353
1354                if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) {
1355                        struct xen_netif_extra_info *extra;
1356
1357                        extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
1358                        ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
1359
1360                        make_tx_response(queue, &txreq, extra_count,
1361                                         (ret == 0) ?
1362                                         XEN_NETIF_RSP_OKAY :
1363                                         XEN_NETIF_RSP_ERROR);
1364                        push_tx_responses(queue);
1365                        continue;
1366                }
1367
1368                if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) {
1369                        struct xen_netif_extra_info *extra;
1370
1371                        extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
1372                        xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
1373
1374                        make_tx_response(queue, &txreq, extra_count,
1375                                         XEN_NETIF_RSP_OKAY);
1376                        push_tx_responses(queue);
1377                        continue;
1378                }
1379
1380                ret = xenvif_count_requests(queue, &txreq, extra_count,
1381                                            txfrags, work_to_do);
1382                if (unlikely(ret < 0))
1383                        break;
1384
1385                idx += ret;
1386
1387                if (unlikely(txreq.size < ETH_HLEN)) {
1388                        netdev_dbg(queue->vif->dev,
1389                                   "Bad packet size: %d\n", txreq.size);
1390                        xenvif_tx_err(queue, &txreq, extra_count, idx);
1391                        break;
1392                }
1393
1394                /* No crossing a page as the payload mustn't fragment. */
1395                if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
1396                        netdev_err(queue->vif->dev,
1397                                   "txreq.offset: %u, size: %u, end: %lu\n",
1398                                   txreq.offset, txreq.size,
1399                                   (unsigned long)(txreq.offset&~XEN_PAGE_MASK) + txreq.size);
1400                        xenvif_fatal_tx_err(queue->vif);
1401                        break;
1402                }
1403
1404                index = pending_index(queue->pending_cons);
1405                pending_idx = queue->pending_ring[index];
1406
1407                data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN &&
1408                            ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
1409                        XEN_NETBACK_TX_COPY_LEN : txreq.size;
1410
1411                skb = xenvif_alloc_skb(data_len);
1412                if (unlikely(skb == NULL)) {
1413                        netdev_dbg(queue->vif->dev,
1414                                   "Can't allocate a skb in start_xmit.\n");
1415                        xenvif_tx_err(queue, &txreq, extra_count, idx);
1416                        break;
1417                }
1418
1419                skb_shinfo(skb)->nr_frags = ret;
1420                if (data_len < txreq.size)
1421                        skb_shinfo(skb)->nr_frags++;
1422                /* At this point shinfo->nr_frags is in fact the number of
1423                 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
1424                 */
1425                frag_overflow = 0;
1426                nskb = NULL;
1427                if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
1428                        frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
1429                        BUG_ON(frag_overflow > MAX_SKB_FRAGS);
1430                        skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
1431                        nskb = xenvif_alloc_skb(0);
1432                        if (unlikely(nskb == NULL)) {
1433                                kfree_skb(skb);
1434                                xenvif_tx_err(queue, &txreq, extra_count, idx);
1435                                if (net_ratelimit())
1436                                        netdev_err(queue->vif->dev,
1437                                                   "Can't allocate the frag_list skb.\n");
1438                                break;
1439                        }
1440                }
1441
1442                if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1443                        struct xen_netif_extra_info *gso;
1444                        gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1445
1446                        if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
1447                                /* Failure in xenvif_set_skb_gso is fatal. */
1448                                kfree_skb(skb);
1449                                kfree_skb(nskb);
1450                                break;
1451                        }
1452                }
1453
1454                XENVIF_TX_CB(skb)->pending_idx = pending_idx;
1455
1456                __skb_put(skb, data_len);
1457                queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
1458                queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
1459                queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
1460
1461                queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
1462                        virt_to_gfn(skb->data);
1463                queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
1464                queue->tx_copy_ops[*copy_ops].dest.offset =
1465                        offset_in_page(skb->data) & ~XEN_PAGE_MASK;
1466
1467                queue->tx_copy_ops[*copy_ops].len = data_len;
1468                queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
1469
1470                (*copy_ops)++;
1471
1472                if (data_len < txreq.size) {
1473                        frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1474                                             pending_idx);
1475                        xenvif_tx_create_map_op(queue, pending_idx, &txreq,
1476                                                extra_count, gop);
1477                        gop++;
1478                } else {
1479                        frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1480                                             INVALID_PENDING_IDX);
1481                        memcpy(&queue->pending_tx_info[pending_idx].req,
1482                               &txreq, sizeof(txreq));
1483                        queue->pending_tx_info[pending_idx].extra_count =
1484                                extra_count;
1485                }
1486
1487                queue->pending_cons++;
1488
1489                gop = xenvif_get_requests(queue, skb, txfrags, gop,
1490                                          frag_overflow, nskb);
1491
1492                __skb_queue_tail(&queue->tx_queue, skb);
1493
1494                queue->tx.req_cons = idx;
1495
1496                if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
1497                    (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
1498                        break;
1499        }
1500
1501        (*map_ops) = gop - queue->tx_map_ops;
1502        return;
1503}
1504
1505/* Consolidate skb with a frag_list into a brand new one with local pages on
1506 * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1507 */
1508static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
1509{
1510        unsigned int offset = skb_headlen(skb);
1511        skb_frag_t frags[MAX_SKB_FRAGS];
1512        int i, f;
1513        struct ubuf_info *uarg;
1514        struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1515
1516        queue->stats.tx_zerocopy_sent += 2;
1517        queue->stats.tx_frag_overflow++;
1518
1519        xenvif_fill_frags(queue, nskb);
1520        /* Subtract frags size, we will correct it later */
1521        skb->truesize -= skb->data_len;
1522        skb->len += nskb->len;
1523        skb->data_len += nskb->len;
1524
1525        /* create a brand new frags array and coalesce there */
1526        for (i = 0; offset < skb->len; i++) {
1527                struct page *page;
1528                unsigned int len;
1529
1530                BUG_ON(i >= MAX_SKB_FRAGS);
1531                page = alloc_page(GFP_ATOMIC);
1532                if (!page) {
1533                        int j;
1534                        skb->truesize += skb->data_len;
1535                        for (j = 0; j < i; j++)
1536                                put_page(frags[j].page.p);
1537                        return -ENOMEM;
1538                }
1539
1540                if (offset + PAGE_SIZE < skb->len)
1541                        len = PAGE_SIZE;
1542                else
1543                        len = skb->len - offset;
1544                if (skb_copy_bits(skb, offset, page_address(page), len))
1545                        BUG();
1546
1547                offset += len;
1548                frags[i].page.p = page;
1549                frags[i].page_offset = 0;
1550                skb_frag_size_set(&frags[i], len);
1551        }
1552
1553        /* Copied all the bits from the frag list -- free it. */
1554        skb_frag_list_init(skb);
1555        xenvif_skb_zerocopy_prepare(queue, nskb);
1556        kfree_skb(nskb);
1557
1558        /* Release all the original (foreign) frags. */
1559        for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1560                skb_frag_unref(skb, f);
1561        uarg = skb_shinfo(skb)->destructor_arg;
1562        /* increase inflight counter to offset decrement in callback */
1563        atomic_inc(&queue->inflight_packets);
1564        uarg->callback(uarg, true);
1565        skb_shinfo(skb)->destructor_arg = NULL;
1566
1567        /* Fill the skb with the new (local) frags. */
1568        memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
1569        skb_shinfo(skb)->nr_frags = i;
1570        skb->truesize += i * PAGE_SIZE;
1571
1572        return 0;
1573}
1574
1575static int xenvif_tx_submit(struct xenvif_queue *queue)
1576{
1577        struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
1578        struct gnttab_copy *gop_copy = queue->tx_copy_ops;
1579        struct sk_buff *skb;
1580        int work_done = 0;
1581
1582        while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
1583                struct xen_netif_tx_request *txp;
1584                u16 pending_idx;
1585                unsigned data_len;
1586
1587                pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1588                txp = &queue->pending_tx_info[pending_idx].req;
1589
1590                /* Check the remap error code. */
1591                if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
1592                        /* If there was an error, xenvif_tx_check_gop is
1593                         * expected to release all the frags which were mapped,
1594                         * so kfree_skb shouldn't do it again
1595                         */
1596                        skb_shinfo(skb)->nr_frags = 0;
1597                        if (skb_has_frag_list(skb)) {
1598                                struct sk_buff *nskb =
1599                                                skb_shinfo(skb)->frag_list;
1600                                skb_shinfo(nskb)->nr_frags = 0;
1601                        }
1602                        kfree_skb(skb);
1603                        continue;
1604                }
1605
1606                data_len = skb->len;
1607                callback_param(queue, pending_idx).ctx = NULL;
1608                if (data_len < txp->size) {
1609                        /* Append the packet payload as a fragment. */
1610                        txp->offset += data_len;
1611                        txp->size -= data_len;
1612                } else {
1613                        /* Schedule a response immediately. */
1614                        xenvif_idx_release(queue, pending_idx,
1615                                           XEN_NETIF_RSP_OKAY);
1616                }
1617
1618                if (txp->flags & XEN_NETTXF_csum_blank)
1619                        skb->ip_summed = CHECKSUM_PARTIAL;
1620                else if (txp->flags & XEN_NETTXF_data_validated)
1621                        skb->ip_summed = CHECKSUM_UNNECESSARY;
1622
1623                xenvif_fill_frags(queue, skb);
1624
1625                if (unlikely(skb_has_frag_list(skb))) {
1626                        if (xenvif_handle_frag_list(queue, skb)) {
1627                                if (net_ratelimit())
1628                                        netdev_err(queue->vif->dev,
1629                                                   "Not enough memory to consolidate frag_list!\n");
1630                                xenvif_skb_zerocopy_prepare(queue, skb);
1631                                kfree_skb(skb);
1632                                continue;
1633                        }
1634                }
1635
1636                skb->dev      = queue->vif->dev;
1637                skb->protocol = eth_type_trans(skb, skb->dev);
1638                skb_reset_network_header(skb);
1639
1640                if (checksum_setup(queue, skb)) {
1641                        netdev_dbg(queue->vif->dev,
1642                                   "Can't setup checksum in net_tx_action\n");
1643                        /* We have to set this flag to trigger the callback */
1644                        if (skb_shinfo(skb)->destructor_arg)
1645                                xenvif_skb_zerocopy_prepare(queue, skb);
1646                        kfree_skb(skb);
1647                        continue;
1648                }
1649
1650                skb_probe_transport_header(skb, 0);
1651
1652                /* If the packet is GSO then we will have just set up the
1653                 * transport header offset in checksum_setup so it's now
1654                 * straightforward to calculate gso_segs.
1655                 */
1656                if (skb_is_gso(skb)) {
1657                        int mss = skb_shinfo(skb)->gso_size;
1658                        int hdrlen = skb_transport_header(skb) -
1659                                skb_mac_header(skb) +
1660                                tcp_hdrlen(skb);
1661
1662                        skb_shinfo(skb)->gso_segs =
1663                                DIV_ROUND_UP(skb->len - hdrlen, mss);
1664                }
1665
1666                queue->stats.rx_bytes += skb->len;
1667                queue->stats.rx_packets++;
1668
1669                work_done++;
1670
1671                /* Set this flag right before netif_receive_skb, otherwise
1672                 * someone might think this packet already left netback, and
1673                 * do a skb_copy_ubufs while we are still in control of the
1674                 * skb. E.g. the __pskb_pull_tail earlier can do such thing.
1675                 */
1676                if (skb_shinfo(skb)->destructor_arg) {
1677                        xenvif_skb_zerocopy_prepare(queue, skb);
1678                        queue->stats.tx_zerocopy_sent++;
1679                }
1680
1681                netif_receive_skb(skb);
1682        }
1683
1684        return work_done;
1685}
1686
1687void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
1688{
1689        unsigned long flags;
1690        pending_ring_idx_t index;
1691        struct xenvif_queue *queue = ubuf_to_queue(ubuf);
1692
1693        /* This is the only place where we grab this lock, to protect callbacks
1694         * from each other.
1695         */
1696        spin_lock_irqsave(&queue->callback_lock, flags);
1697        do {
1698                u16 pending_idx = ubuf->desc;
1699                ubuf = (struct ubuf_info *) ubuf->ctx;
1700                BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
1701                        MAX_PENDING_REQS);
1702                index = pending_index(queue->dealloc_prod);
1703                queue->dealloc_ring[index] = pending_idx;
1704                /* Sync with xenvif_tx_dealloc_action:
1705                 * insert idx then incr producer.
1706                 */
1707                smp_wmb();
1708                queue->dealloc_prod++;
1709        } while (ubuf);
1710        spin_unlock_irqrestore(&queue->callback_lock, flags);
1711
1712        if (likely(zerocopy_success))
1713                queue->stats.tx_zerocopy_success++;
1714        else
1715                queue->stats.tx_zerocopy_fail++;
1716        xenvif_skb_zerocopy_complete(queue);
1717}
1718
1719static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
1720{
1721        struct gnttab_unmap_grant_ref *gop;
1722        pending_ring_idx_t dc, dp;
1723        u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
1724        unsigned int i = 0;
1725
1726        dc = queue->dealloc_cons;
1727        gop = queue->tx_unmap_ops;
1728
1729        /* Free up any grants we have finished using */
1730        do {
1731                dp = queue->dealloc_prod;
1732
1733                /* Ensure we see all indices enqueued by all
1734                 * xenvif_zerocopy_callback().
1735                 */
1736                smp_rmb();
1737
1738                while (dc != dp) {
1739                        BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
1740                        pending_idx =
1741                                queue->dealloc_ring[pending_index(dc++)];
1742
1743                        pending_idx_release[gop - queue->tx_unmap_ops] =
1744                                pending_idx;
1745                        queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
1746                                queue->mmap_pages[pending_idx];
1747                        gnttab_set_unmap_op(gop,
1748                                            idx_to_kaddr(queue, pending_idx),
1749                                            GNTMAP_host_map,
1750                                            queue->grant_tx_handle[pending_idx]);
1751                        xenvif_grant_handle_reset(queue, pending_idx);
1752                        ++gop;
1753                }
1754
1755        } while (dp != queue->dealloc_prod);
1756
1757        queue->dealloc_cons = dc;
1758
1759        if (gop - queue->tx_unmap_ops > 0) {
1760                int ret;
1761                ret = gnttab_unmap_refs(queue->tx_unmap_ops,
1762                                        NULL,
1763                                        queue->pages_to_unmap,
1764                                        gop - queue->tx_unmap_ops);
1765                if (ret) {
1766                        netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n",
1767                                   gop - queue->tx_unmap_ops, ret);
1768                        for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
1769                                if (gop[i].status != GNTST_okay)
1770                                        netdev_err(queue->vif->dev,
1771                                                   " host_addr: 0x%llx handle: 0x%x status: %d\n",
1772                                                   gop[i].host_addr,
1773                                                   gop[i].handle,
1774                                                   gop[i].status);
1775                        }
1776                        BUG();
1777                }
1778        }
1779
1780        for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
1781                xenvif_idx_release(queue, pending_idx_release[i],
1782                                   XEN_NETIF_RSP_OKAY);
1783}
1784
1785
1786/* Called after netfront has transmitted */
1787int xenvif_tx_action(struct xenvif_queue *queue, int budget)
1788{
1789        unsigned nr_mops, nr_cops = 0;
1790        int work_done, ret;
1791
1792        if (unlikely(!tx_work_todo(queue)))
1793                return 0;
1794
1795        xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
1796
1797        if (nr_cops == 0)
1798                return 0;
1799
1800        gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
1801        if (nr_mops != 0) {
1802                ret = gnttab_map_refs(queue->tx_map_ops,
1803                                      NULL,
1804                                      queue->pages_to_map,
1805                                      nr_mops);
1806                BUG_ON(ret);
1807        }
1808
1809        work_done = xenvif_tx_submit(queue);
1810
1811        return work_done;
1812}
1813
1814static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1815                               u8 status)
1816{
1817        struct pending_tx_info *pending_tx_info;
1818        pending_ring_idx_t index;
1819        unsigned long flags;
1820
1821        pending_tx_info = &queue->pending_tx_info[pending_idx];
1822
1823        spin_lock_irqsave(&queue->response_lock, flags);
1824
1825        make_tx_response(queue, &pending_tx_info->req,
1826                         pending_tx_info->extra_count, status);
1827
1828        /* Release the pending index before pusing the Tx response so
1829         * its available before a new Tx request is pushed by the
1830         * frontend.
1831         */
1832        index = pending_index(queue->pending_prod++);
1833        queue->pending_ring[index] = pending_idx;
1834
1835        push_tx_responses(queue);
1836
1837        spin_unlock_irqrestore(&queue->response_lock, flags);
1838}
1839
1840
1841static void make_tx_response(struct xenvif_queue *queue,
1842                             struct xen_netif_tx_request *txp,
1843                             unsigned int extra_count,
1844                             s8       st)
1845{
1846        RING_IDX i = queue->tx.rsp_prod_pvt;
1847        struct xen_netif_tx_response *resp;
1848
1849        resp = RING_GET_RESPONSE(&queue->tx, i);
1850        resp->id     = txp->id;
1851        resp->status = st;
1852
1853        while (extra_count-- != 0)
1854                RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1855
1856        queue->tx.rsp_prod_pvt = ++i;
1857}
1858
1859static void push_tx_responses(struct xenvif_queue *queue)
1860{
1861        int notify;
1862
1863        RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1864        if (notify)
1865                notify_remote_via_irq(queue->tx_irq);
1866}
1867
1868static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
1869                                             u16      id,
1870                                             s8       st,
1871                                             u16      offset,
1872                                             u16      size,
1873                                             u16      flags)
1874{
1875        RING_IDX i = queue->rx.rsp_prod_pvt;
1876        struct xen_netif_rx_response *resp;
1877
1878        resp = RING_GET_RESPONSE(&queue->rx, i);
1879        resp->offset     = offset;
1880        resp->flags      = flags;
1881        resp->id         = id;
1882        resp->status     = (s16)size;
1883        if (st < 0)
1884                resp->status = (s16)st;
1885
1886        queue->rx.rsp_prod_pvt = ++i;
1887
1888        return resp;
1889}
1890
1891void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
1892{
1893        int ret;
1894        struct gnttab_unmap_grant_ref tx_unmap_op;
1895
1896        gnttab_set_unmap_op(&tx_unmap_op,
1897                            idx_to_kaddr(queue, pending_idx),
1898                            GNTMAP_host_map,
1899                            queue->grant_tx_handle[pending_idx]);
1900        xenvif_grant_handle_reset(queue, pending_idx);
1901
1902        ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
1903                                &queue->mmap_pages[pending_idx], 1);
1904        if (ret) {
1905                netdev_err(queue->vif->dev,
1906                           "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d\n",
1907                           ret,
1908                           pending_idx,
1909                           tx_unmap_op.host_addr,
1910                           tx_unmap_op.handle,
1911                           tx_unmap_op.status);
1912                BUG();
1913        }
1914}
1915
1916static inline int tx_work_todo(struct xenvif_queue *queue)
1917{
1918        if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
1919                return 1;
1920
1921        return 0;
1922}
1923
1924static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
1925{
1926        return queue->dealloc_cons != queue->dealloc_prod;
1927}
1928
1929void xenvif_unmap_frontend_rings(struct xenvif_queue *queue)
1930{
1931        if (queue->tx.sring)
1932                xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1933                                        queue->tx.sring);
1934        if (queue->rx.sring)
1935                xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1936                                        queue->rx.sring);
1937}
1938
1939int xenvif_map_frontend_rings(struct xenvif_queue *queue,
1940                              grant_ref_t tx_ring_ref,
1941                              grant_ref_t rx_ring_ref)
1942{
1943        void *addr;
1944        struct xen_netif_tx_sring *txs;
1945        struct xen_netif_rx_sring *rxs;
1946
1947        int err = -ENOMEM;
1948
1949        err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1950                                     &tx_ring_ref, 1, &addr);
1951        if (err)
1952                goto err;
1953
1954        txs = (struct xen_netif_tx_sring *)addr;
1955        BACK_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
1956
1957        err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1958                                     &rx_ring_ref, 1, &addr);
1959        if (err)
1960                goto err;
1961
1962        rxs = (struct xen_netif_rx_sring *)addr;
1963        BACK_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
1964
1965        return 0;
1966
1967err:
1968        xenvif_unmap_frontend_rings(queue);
1969        return err;
1970}
1971
1972static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
1973{
1974        struct xenvif *vif = queue->vif;
1975
1976        queue->stalled = true;
1977
1978        /* At least one queue has stalled? Disable the carrier. */
1979        spin_lock(&vif->lock);
1980        if (vif->stalled_queues++ == 0) {
1981                netdev_info(vif->dev, "Guest Rx stalled");
1982                netif_carrier_off(vif->dev);
1983        }
1984        spin_unlock(&vif->lock);
1985}
1986
1987static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
1988{
1989        struct xenvif *vif = queue->vif;
1990
1991        queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
1992        queue->stalled = false;
1993
1994        /* All queues are ready? Enable the carrier. */
1995        spin_lock(&vif->lock);
1996        if (--vif->stalled_queues == 0) {
1997                netdev_info(vif->dev, "Guest Rx ready");
1998                netif_carrier_on(vif->dev);
1999        }
2000        spin_unlock(&vif->lock);
2001}
2002
2003static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
2004{
2005        RING_IDX prod, cons;
2006
2007        prod = queue->rx.sring->req_prod;
2008        cons = queue->rx.req_cons;
2009
2010        return !queue->stalled && prod - cons < 1
2011                && time_after(jiffies,
2012                              queue->last_rx_time + queue->vif->stall_timeout);
2013}
2014
2015static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
2016{
2017        RING_IDX prod, cons;
2018
2019        prod = queue->rx.sring->req_prod;
2020        cons = queue->rx.req_cons;
2021
2022        return queue->stalled && prod - cons >= 1;
2023}
2024
2025static bool xenvif_have_rx_work(struct xenvif_queue *queue)
2026{
2027        return xenvif_rx_ring_slots_available(queue)
2028                || (queue->vif->stall_timeout &&
2029                    (xenvif_rx_queue_stalled(queue)
2030                     || xenvif_rx_queue_ready(queue)))
2031                || kthread_should_stop()
2032                || queue->vif->disabled;
2033}
2034
2035static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
2036{
2037        struct sk_buff *skb;
2038        long timeout;
2039
2040        skb = skb_peek(&queue->rx_queue);
2041        if (!skb)
2042                return MAX_SCHEDULE_TIMEOUT;
2043
2044        timeout = XENVIF_RX_CB(skb)->expires - jiffies;
2045        return timeout < 0 ? 0 : timeout;
2046}
2047
2048/* Wait until the guest Rx thread has work.
2049 *
2050 * The timeout needs to be adjusted based on the current head of the
2051 * queue (and not just the head at the beginning).  In particular, if
2052 * the queue is initially empty an infinite timeout is used and this
2053 * needs to be reduced when a skb is queued.
2054 *
2055 * This cannot be done with wait_event_timeout() because it only
2056 * calculates the timeout once.
2057 */
2058static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
2059{
2060        DEFINE_WAIT(wait);
2061
2062        if (xenvif_have_rx_work(queue))
2063                return;
2064
2065        for (;;) {
2066                long ret;
2067
2068                prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
2069                if (xenvif_have_rx_work(queue))
2070                        break;
2071                ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
2072                if (!ret)
2073                        break;
2074        }
2075        finish_wait(&queue->wq, &wait);
2076}
2077
2078int xenvif_kthread_guest_rx(void *data)
2079{
2080        struct xenvif_queue *queue = data;
2081        struct xenvif *vif = queue->vif;
2082
2083        if (!vif->stall_timeout)
2084                xenvif_queue_carrier_on(queue);
2085
2086        for (;;) {
2087                xenvif_wait_for_rx_work(queue);
2088
2089                if (kthread_should_stop())
2090                        break;
2091
2092                /* This frontend is found to be rogue, disable it in
2093                 * kthread context. Currently this is only set when
2094                 * netback finds out frontend sends malformed packet,
2095                 * but we cannot disable the interface in softirq
2096                 * context so we defer it here, if this thread is
2097                 * associated with queue 0.
2098                 */
2099                if (unlikely(vif->disabled && queue->id == 0)) {
2100                        xenvif_carrier_off(vif);
2101                        break;
2102                }
2103
2104                if (!skb_queue_empty(&queue->rx_queue))
2105                        xenvif_rx_action(queue);
2106
2107                /* If the guest hasn't provided any Rx slots for a
2108                 * while it's probably not responsive, drop the
2109                 * carrier so packets are dropped earlier.
2110                 */
2111                if (vif->stall_timeout) {
2112                        if (xenvif_rx_queue_stalled(queue))
2113                                xenvif_queue_carrier_off(queue);
2114                        else if (xenvif_rx_queue_ready(queue))
2115                                xenvif_queue_carrier_on(queue);
2116                }
2117
2118                /* Queued packets may have foreign pages from other
2119                 * domains.  These cannot be queued indefinitely as
2120                 * this would starve guests of grant refs and transmit
2121                 * slots.
2122                 */
2123                xenvif_rx_queue_drop_expired(queue);
2124
2125                xenvif_rx_queue_maybe_wake(queue);
2126
2127                cond_resched();
2128        }
2129
2130        /* Bin any remaining skbs */
2131        xenvif_rx_queue_purge(queue);
2132
2133        return 0;
2134}
2135
2136static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
2137{
2138        /* Dealloc thread must remain running until all inflight
2139         * packets complete.
2140         */
2141        return kthread_should_stop() &&
2142                !atomic_read(&queue->inflight_packets);
2143}
2144
2145int xenvif_dealloc_kthread(void *data)
2146{
2147        struct xenvif_queue *queue = data;
2148
2149        for (;;) {
2150                wait_event_interruptible(queue->dealloc_wq,
2151                                         tx_dealloc_work_todo(queue) ||
2152                                         xenvif_dealloc_kthread_should_stop(queue));
2153                if (xenvif_dealloc_kthread_should_stop(queue))
2154                        break;
2155
2156                xenvif_tx_dealloc_action(queue);
2157                cond_resched();
2158        }
2159
2160        /* Unmap anything remaining*/
2161        if (tx_dealloc_work_todo(queue))
2162                xenvif_tx_dealloc_action(queue);
2163
2164        return 0;
2165}
2166
2167static int __init netback_init(void)
2168{
2169        int rc = 0;
2170
2171        if (!xen_domain())
2172                return -ENODEV;
2173
2174        /* Allow as many queues as there are CPUs if user has not
2175         * specified a value.
2176         */
2177        if (xenvif_max_queues == 0)
2178                xenvif_max_queues = num_online_cpus();
2179
2180        if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
2181                pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
2182                        fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
2183                fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
2184        }
2185
2186        rc = xenvif_xenbus_init();
2187        if (rc)
2188                goto failed_init;
2189
2190#ifdef CONFIG_DEBUG_FS
2191        xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
2192        if (IS_ERR_OR_NULL(xen_netback_dbg_root))
2193                pr_warn("Init of debugfs returned %ld!\n",
2194                        PTR_ERR(xen_netback_dbg_root));
2195#endif /* CONFIG_DEBUG_FS */
2196
2197        return 0;
2198
2199failed_init:
2200        return rc;
2201}
2202
2203module_init(netback_init);
2204
2205static void __exit netback_fini(void)
2206{
2207#ifdef CONFIG_DEBUG_FS
2208        if (!IS_ERR_OR_NULL(xen_netback_dbg_root))
2209                debugfs_remove_recursive(xen_netback_dbg_root);
2210#endif /* CONFIG_DEBUG_FS */
2211        xenvif_xenbus_fini();
2212}
2213module_exit(netback_fini);
2214
2215MODULE_LICENSE("Dual BSD/GPL");
2216MODULE_ALIAS("xen-backend:vif");
2217