linux/drivers/net/ethernet/sfc/efx.h
<<
>>
Prefs
   1/****************************************************************************
   2 * Driver for Solarflare network controllers and boards
   3 * Copyright 2005-2006 Fen Systems Ltd.
   4 * Copyright 2006-2013 Solarflare Communications Inc.
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms of the GNU General Public License version 2 as published
   8 * by the Free Software Foundation, incorporated herein by reference.
   9 */
  10
  11#ifndef EFX_EFX_H
  12#define EFX_EFX_H
  13
  14#include "net_driver.h"
  15#include "filter.h"
  16
  17/* All controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
  18/* All VFs use BAR 0/1 for memory */
  19#define EFX_MEM_BAR 2
  20#define EFX_MEM_VF_BAR 0
  21
  22int efx_net_open(struct net_device *net_dev);
  23int efx_net_stop(struct net_device *net_dev);
  24
  25/* TX */
  26int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
  27void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
  28void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
  29void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
  30void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
  31netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
  32                                struct net_device *net_dev);
  33netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
  34void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
  35int efx_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
  36                 struct tc_to_netdev *tc);
  37unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
  38extern unsigned int efx_piobuf_size;
  39extern bool efx_separate_tx_channels;
  40
  41/* RX */
  42void efx_set_default_rx_indir_table(struct efx_nic *efx);
  43void efx_rx_config_page_split(struct efx_nic *efx);
  44int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
  45void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
  46void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
  47void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
  48void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic);
  49void efx_rx_slow_fill(unsigned long context);
  50void __efx_rx_packet(struct efx_channel *channel);
  51void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
  52                   unsigned int n_frags, unsigned int len, u16 flags);
  53static inline void efx_rx_flush_packet(struct efx_channel *channel)
  54{
  55        if (channel->rx_pkt_n_frags)
  56                __efx_rx_packet(channel);
  57}
  58void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
  59
  60#define EFX_MAX_DMAQ_SIZE 4096UL
  61#define EFX_DEFAULT_DMAQ_SIZE 1024UL
  62#define EFX_MIN_DMAQ_SIZE 512UL
  63
  64#define EFX_MAX_EVQ_SIZE 16384UL
  65#define EFX_MIN_EVQ_SIZE 512UL
  66
  67/* Maximum number of TCP segments we support for soft-TSO */
  68#define EFX_TSO_MAX_SEGS        100
  69
  70/* The smallest [rt]xq_entries that the driver supports.  RX minimum
  71 * is a bit arbitrary.  For TX, we must have space for at least 2
  72 * TSO skbs.
  73 */
  74#define EFX_RXQ_MIN_ENT         128U
  75#define EFX_TXQ_MIN_ENT(efx)    (2 * efx_tx_max_skb_descs(efx))
  76
  77#define EFX_TXQ_MAX_ENT(efx)    (EFX_WORKAROUND_35388(efx) ? \
  78                                 EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE)
  79
  80static inline bool efx_rss_enabled(struct efx_nic *efx)
  81{
  82        return efx->rss_spread > 1;
  83}
  84
  85/* Filters */
  86
  87void efx_mac_reconfigure(struct efx_nic *efx);
  88
  89/**
  90 * efx_filter_insert_filter - add or replace a filter
  91 * @efx: NIC in which to insert the filter
  92 * @spec: Specification for the filter
  93 * @replace_equal: Flag for whether the specified filter may replace an
  94 *      existing filter with equal priority
  95 *
  96 * On success, return the filter ID.
  97 * On failure, return a negative error code.
  98 *
  99 * If existing filters have equal match values to the new filter spec,
 100 * then the new filter might replace them or the function might fail,
 101 * as follows.
 102 *
 103 * 1. If the existing filters have lower priority, or @replace_equal
 104 *    is set and they have equal priority, replace them.
 105 *
 106 * 2. If the existing filters have higher priority, return -%EPERM.
 107 *
 108 * 3. If !efx_filter_is_mc_recipient(@spec), or the NIC does not
 109 *    support delivery to multiple recipients, return -%EEXIST.
 110 *
 111 * This implies that filters for multiple multicast recipients must
 112 * all be inserted with the same priority and @replace_equal = %false.
 113 */
 114static inline s32 efx_filter_insert_filter(struct efx_nic *efx,
 115                                           struct efx_filter_spec *spec,
 116                                           bool replace_equal)
 117{
 118        return efx->type->filter_insert(efx, spec, replace_equal);
 119}
 120
 121/**
 122 * efx_filter_remove_id_safe - remove a filter by ID, carefully
 123 * @efx: NIC from which to remove the filter
 124 * @priority: Priority of filter, as passed to @efx_filter_insert_filter
 125 * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
 126 *
 127 * This function will range-check @filter_id, so it is safe to call
 128 * with a value passed from userland.
 129 */
 130static inline int efx_filter_remove_id_safe(struct efx_nic *efx,
 131                                            enum efx_filter_priority priority,
 132                                            u32 filter_id)
 133{
 134        return efx->type->filter_remove_safe(efx, priority, filter_id);
 135}
 136
 137/**
 138 * efx_filter_get_filter_safe - retrieve a filter by ID, carefully
 139 * @efx: NIC from which to remove the filter
 140 * @priority: Priority of filter, as passed to @efx_filter_insert_filter
 141 * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
 142 * @spec: Buffer in which to store filter specification
 143 *
 144 * This function will range-check @filter_id, so it is safe to call
 145 * with a value passed from userland.
 146 */
 147static inline int
 148efx_filter_get_filter_safe(struct efx_nic *efx,
 149                           enum efx_filter_priority priority,
 150                           u32 filter_id, struct efx_filter_spec *spec)
 151{
 152        return efx->type->filter_get_safe(efx, priority, filter_id, spec);
 153}
 154
 155static inline u32 efx_filter_count_rx_used(struct efx_nic *efx,
 156                                           enum efx_filter_priority priority)
 157{
 158        return efx->type->filter_count_rx_used(efx, priority);
 159}
 160static inline u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
 161{
 162        return efx->type->filter_get_rx_id_limit(efx);
 163}
 164static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
 165                                        enum efx_filter_priority priority,
 166                                        u32 *buf, u32 size)
 167{
 168        return efx->type->filter_get_rx_ids(efx, priority, buf, size);
 169}
 170#ifdef CONFIG_RFS_ACCEL
 171int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
 172                   u16 rxq_index, u32 flow_id);
 173bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota);
 174static inline void efx_filter_rfs_expire(struct efx_channel *channel)
 175{
 176        if (channel->rfs_filters_added >= 60 &&
 177            __efx_filter_rfs_expire(channel->efx, 100))
 178                channel->rfs_filters_added -= 60;
 179}
 180#define efx_filter_rfs_enabled() 1
 181#else
 182static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
 183#define efx_filter_rfs_enabled() 0
 184#endif
 185bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
 186
 187/* Channels */
 188int efx_channel_dummy_op_int(struct efx_channel *channel);
 189void efx_channel_dummy_op_void(struct efx_channel *channel);
 190int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
 191
 192/* Ports */
 193int efx_reconfigure_port(struct efx_nic *efx);
 194int __efx_reconfigure_port(struct efx_nic *efx);
 195
 196/* Ethtool support */
 197extern const struct ethtool_ops efx_ethtool_ops;
 198
 199/* Reset handling */
 200int efx_reset(struct efx_nic *efx, enum reset_type method);
 201void efx_reset_down(struct efx_nic *efx, enum reset_type method);
 202int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
 203int efx_try_recovery(struct efx_nic *efx);
 204
 205/* Global */
 206void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
 207int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
 208                            unsigned int rx_usecs, bool rx_adaptive,
 209                            bool rx_may_override_tx);
 210void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
 211                            unsigned int *rx_usecs, bool *rx_adaptive);
 212void efx_stop_eventq(struct efx_channel *channel);
 213void efx_start_eventq(struct efx_channel *channel);
 214
 215/* Dummy PHY ops for PHY drivers */
 216int efx_port_dummy_op_int(struct efx_nic *efx);
 217void efx_port_dummy_op_void(struct efx_nic *efx);
 218
 219/* Update the generic software stats in the passed stats array */
 220void efx_update_sw_stats(struct efx_nic *efx, u64 *stats);
 221
 222/* MTD */
 223#ifdef CONFIG_SFC_MTD
 224int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
 225                size_t n_parts, size_t sizeof_part);
 226static inline int efx_mtd_probe(struct efx_nic *efx)
 227{
 228        return efx->type->mtd_probe(efx);
 229}
 230void efx_mtd_rename(struct efx_nic *efx);
 231void efx_mtd_remove(struct efx_nic *efx);
 232#else
 233static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; }
 234static inline void efx_mtd_rename(struct efx_nic *efx) {}
 235static inline void efx_mtd_remove(struct efx_nic *efx) {}
 236#endif
 237
 238#ifdef CONFIG_SFC_SRIOV
 239static inline unsigned int efx_vf_size(struct efx_nic *efx)
 240{
 241        return 1 << efx->vi_scale;
 242}
 243#endif
 244
 245static inline void efx_schedule_channel(struct efx_channel *channel)
 246{
 247        netif_vdbg(channel->efx, intr, channel->efx->net_dev,
 248                   "channel %d scheduling NAPI poll on CPU%d\n",
 249                   channel->channel, raw_smp_processor_id());
 250
 251        napi_schedule(&channel->napi_str);
 252}
 253
 254static inline void efx_schedule_channel_irq(struct efx_channel *channel)
 255{
 256        channel->event_test_cpu = raw_smp_processor_id();
 257        efx_schedule_channel(channel);
 258}
 259
 260void efx_link_status_changed(struct efx_nic *efx);
 261void efx_link_set_advertising(struct efx_nic *efx, u32);
 262void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
 263
 264static inline void efx_device_detach_sync(struct efx_nic *efx)
 265{
 266        struct net_device *dev = efx->net_dev;
 267
 268        /* Lock/freeze all TX queues so that we can be sure the
 269         * TX scheduler is stopped when we're done and before
 270         * netif_device_present() becomes false.
 271         */
 272        netif_tx_lock_bh(dev);
 273        netif_device_detach(dev);
 274        netif_tx_unlock_bh(dev);
 275}
 276
 277#endif /* EFX_EFX_H */
 278