linux/include/net/busy_poll.h
<<
>>
Prefs
   1/*
   2 * net busy poll support
   3 * Copyright(c) 2013 Intel Corporation.
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms and conditions of the GNU General Public License,
   7 * version 2, as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program; if not, write to the Free Software Foundation, Inc.,
  16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  17 *
  18 * Author: Eliezer Tamir
  19 *
  20 * Contact Information:
  21 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  22 */
  23
  24#ifndef _LINUX_NET_BUSY_POLL_H
  25#define _LINUX_NET_BUSY_POLL_H
  26
  27#include <linux/netdevice.h>
  28#include <net/ip.h>
  29
  30#ifdef CONFIG_NET_RX_BUSY_POLL
  31
  32struct napi_struct;
  33extern unsigned int sysctl_net_busy_read __read_mostly;
  34extern unsigned int sysctl_net_busy_poll __read_mostly;
  35
  36/* return values from ndo_ll_poll */
  37#define LL_FLUSH_FAILED         -1
  38#define LL_FLUSH_BUSY           -2
  39
  40static inline bool net_busy_loop_on(void)
  41{
  42        return sysctl_net_busy_poll;
  43}
  44
  45static inline u64 busy_loop_us_clock(void)
  46{
  47        return local_clock() >> 10;
  48}
  49
  50static inline unsigned long sk_busy_loop_end_time(struct sock *sk)
  51{
  52        return busy_loop_us_clock() + ACCESS_ONCE(sk->sk_ll_usec);
  53}
  54
  55/* in poll/select we use the global sysctl_net_ll_poll value */
  56static inline unsigned long busy_loop_end_time(void)
  57{
  58        return busy_loop_us_clock() + ACCESS_ONCE(sysctl_net_busy_poll);
  59}
  60
  61static inline bool sk_can_busy_loop(struct sock *sk)
  62{
  63        return sk->sk_ll_usec && sk->sk_napi_id &&
  64               !need_resched() && !signal_pending(current);
  65}
  66
  67
  68static inline bool busy_loop_timeout(unsigned long end_time)
  69{
  70        unsigned long now = busy_loop_us_clock();
  71
  72        return time_after(now, end_time);
  73}
  74
  75/* when used in sock_poll() nonblock is known at compile time to be true
  76 * so the loop and end_time will be optimized out
  77 */
  78static inline bool sk_busy_loop(struct sock *sk, int nonblock)
  79{
  80        unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0;
  81        const struct net_device_ops *ops;
  82        struct napi_struct *napi;
  83        int rc = false;
  84
  85        /*
  86         * rcu read lock for napi hash
  87         * bh so we don't race with net_rx_action
  88         */
  89        rcu_read_lock_bh();
  90
  91        napi = napi_by_id(sk->sk_napi_id);
  92        if (!napi)
  93                goto out;
  94
  95        ops = napi->dev->netdev_ops;
  96        if (!ops->ndo_busy_poll)
  97                goto out;
  98
  99        do {
 100                rc = ops->ndo_busy_poll(napi);
 101
 102                if (rc == LL_FLUSH_FAILED)
 103                        break; /* permanent failure */
 104
 105                if (rc > 0)
 106                        /* local bh are disabled so it is ok to use _BH */
 107                        NET_ADD_STATS_BH(sock_net(sk),
 108                                         LINUX_MIB_BUSYPOLLRXPACKETS, rc);
 109                cpu_relax();
 110
 111        } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
 112                 !need_resched() && !busy_loop_timeout(end_time));
 113
 114        rc = !skb_queue_empty(&sk->sk_receive_queue);
 115out:
 116        rcu_read_unlock_bh();
 117        return rc;
 118}
 119
 120/* used in the NIC receive handler to mark the skb */
 121static inline void skb_mark_napi_id(struct sk_buff *skb,
 122                                    struct napi_struct *napi)
 123{
 124        skb->napi_id = napi->napi_id;
 125}
 126
 127/* used in the protocol hanlder to propagate the napi_id to the socket */
 128static inline void sk_mark_napi_id(struct sock *sk, struct sk_buff *skb)
 129{
 130        sk->sk_napi_id = skb->napi_id;
 131}
 132
 133#else /* CONFIG_NET_RX_BUSY_POLL */
 134static inline unsigned long net_busy_loop_on(void)
 135{
 136        return 0;
 137}
 138
 139static inline unsigned long busy_loop_end_time(void)
 140{
 141        return 0;
 142}
 143
 144static inline bool sk_can_busy_loop(struct sock *sk)
 145{
 146        return false;
 147}
 148
 149static inline void skb_mark_napi_id(struct sk_buff *skb,
 150                                    struct napi_struct *napi)
 151{
 152}
 153
 154static inline void sk_mark_napi_id(struct sock *sk, struct sk_buff *skb)
 155{
 156}
 157
 158static inline bool busy_loop_timeout(unsigned long end_time)
 159{
 160        return true;
 161}
 162
 163static inline bool sk_busy_loop(struct sock *sk, int nonblock)
 164{
 165        return false;
 166}
 167
 168#endif /* CONFIG_NET_RX_BUSY_POLL */
 169#endif /* _LINUX_NET_BUSY_POLL_H */
 170