linux/include/net/gro_cells.h
<<
>>
Prefs
   1#ifndef _NET_GRO_CELLS_H
   2#define _NET_GRO_CELLS_H
   3
   4#include <linux/skbuff.h>
   5#include <linux/slab.h>
   6#include <linux/netdevice.h>
   7
   8struct gro_cell {
   9        struct sk_buff_head     napi_skbs;
  10        struct napi_struct      napi;
  11};
  12
  13struct gro_cells {
  14        struct gro_cell __percpu        *cells;
  15};
  16
  17static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
  18{
  19        struct gro_cell *cell;
  20        struct net_device *dev = skb->dev;
  21
  22        if (!gcells->cells || skb_cloned(skb) || !(dev->features & NETIF_F_GRO)) {
  23                netif_rx(skb);
  24                return;
  25        }
  26
  27        cell = this_cpu_ptr(gcells->cells);
  28
  29        if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
  30                atomic_long_inc(&dev->rx_dropped);
  31                kfree_skb(skb);
  32                return;
  33        }
  34
  35        __skb_queue_tail(&cell->napi_skbs, skb);
  36        if (skb_queue_len(&cell->napi_skbs) == 1)
  37                napi_schedule(&cell->napi);
  38}
  39
  40/* called under BH context */
  41static inline int gro_cell_poll(struct napi_struct *napi, int budget)
  42{
  43        struct gro_cell *cell = container_of(napi, struct gro_cell, napi);
  44        struct sk_buff *skb;
  45        int work_done = 0;
  46
  47        while (work_done < budget) {
  48                skb = __skb_dequeue(&cell->napi_skbs);
  49                if (!skb)
  50                        break;
  51                napi_gro_receive(napi, skb);
  52                work_done++;
  53        }
  54
  55        if (work_done < budget)
  56                napi_complete_done(napi, work_done);
  57        return work_done;
  58}
  59
  60static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *dev)
  61{
  62        int i;
  63
  64        gcells->cells = alloc_percpu(struct gro_cell);
  65        if (!gcells->cells)
  66                return -ENOMEM;
  67
  68        for_each_possible_cpu(i) {
  69                struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
  70
  71                __skb_queue_head_init(&cell->napi_skbs);
  72                netif_napi_add(dev, &cell->napi, gro_cell_poll, 64);
  73                napi_enable(&cell->napi);
  74        }
  75        return 0;
  76}
  77
  78static inline void gro_cells_destroy(struct gro_cells *gcells)
  79{
  80        int i;
  81
  82        if (!gcells->cells)
  83                return;
  84        for_each_possible_cpu(i) {
  85                struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
  86
  87                netif_napi_del(&cell->napi);
  88                __skb_queue_purge(&cell->napi_skbs);
  89        }
  90        free_percpu(gcells->cells);
  91        gcells->cells = NULL;
  92}
  93
  94#endif
  95