linux/include/net/gro_cells.h
<<
>>
Prefs
   1#ifndef _NET_GRO_CELLS_H
   2#define _NET_GRO_CELLS_H
   3
   4#include <linux/skbuff.h>
   5#include <linux/slab.h>
   6#include <linux/netdevice.h>
   7
   8struct gro_cell {
   9        struct sk_buff_head     napi_skbs;
  10        struct napi_struct      napi;
  11};
  12
  13struct gro_cells {
  14        struct gro_cell __percpu        *cells;
  15};
  16
  17static inline int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
  18{
  19        struct gro_cell *cell;
  20        struct net_device *dev = skb->dev;
  21
  22        if (!gcells->cells || skb_cloned(skb) || !(dev->features & NETIF_F_GRO))
  23                return netif_rx(skb);
  24
  25        cell = this_cpu_ptr(gcells->cells);
  26
  27        if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
  28                atomic_long_inc(&dev->rx_dropped);
  29                kfree_skb(skb);
  30                return NET_RX_DROP;
  31        }
  32
  33        __skb_queue_tail(&cell->napi_skbs, skb);
  34        if (skb_queue_len(&cell->napi_skbs) == 1)
  35                napi_schedule(&cell->napi);
  36        return NET_RX_SUCCESS;
  37}
  38
  39/* called under BH context */
  40static inline int gro_cell_poll(struct napi_struct *napi, int budget)
  41{
  42        struct gro_cell *cell = container_of(napi, struct gro_cell, napi);
  43        struct sk_buff *skb;
  44        int work_done = 0;
  45
  46        while (work_done < budget) {
  47                skb = __skb_dequeue(&cell->napi_skbs);
  48                if (!skb)
  49                        break;
  50                napi_gro_receive(napi, skb);
  51                work_done++;
  52        }
  53
  54        if (work_done < budget)
  55                napi_complete_done(napi, work_done);
  56        return work_done;
  57}
  58
  59static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *dev)
  60{
  61        int i;
  62
  63        gcells->cells = alloc_percpu(struct gro_cell);
  64        if (!gcells->cells)
  65                return -ENOMEM;
  66
  67        for_each_possible_cpu(i) {
  68                struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
  69
  70                __skb_queue_head_init(&cell->napi_skbs);
  71                netif_napi_add(dev, &cell->napi, gro_cell_poll, 64);
  72                napi_enable(&cell->napi);
  73        }
  74        return 0;
  75}
  76
  77static inline void gro_cells_destroy(struct gro_cells *gcells)
  78{
  79        int i;
  80
  81        if (!gcells->cells)
  82                return;
  83        for_each_possible_cpu(i) {
  84                struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
  85
  86                netif_napi_del(&cell->napi);
  87                __skb_queue_purge(&cell->napi_skbs);
  88        }
  89        free_percpu(gcells->cells);
  90        gcells->cells = NULL;
  91}
  92
  93#endif
  94