1
2
3
4
5
6
7
8#ifndef EFX_EFX_H
9#define EFX_EFX_H
10
11#include <linux/indirect_call_wrapper.h>
12#include "net_driver.h"
13#include "ef100_rx.h"
14#include "ef100_tx.h"
15#include "filter.h"
16
17int efx_net_open(struct net_device *net_dev);
18int efx_net_stop(struct net_device *net_dev);
19
20
21void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
22netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
23 struct net_device *net_dev);
24netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
25static inline netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
26{
27 return INDIRECT_CALL_2(tx_queue->efx->type->tx_enqueue,
28 ef100_enqueue_skb, __efx_enqueue_skb,
29 tx_queue, skb);
30}
31void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
32void efx_xmit_done_single(struct efx_tx_queue *tx_queue);
33int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
34 void *type_data);
35extern unsigned int efx_piobuf_size;
36
37
38void __efx_rx_packet(struct efx_channel *channel);
39void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
40 unsigned int n_frags, unsigned int len, u16 flags);
41static inline void efx_rx_flush_packet(struct efx_channel *channel)
42{
43 if (channel->rx_pkt_n_frags)
44 INDIRECT_CALL_2(channel->efx->type->rx_packet,
45 __ef100_rx_packet, __efx_rx_packet,
46 channel);
47}
48static inline bool efx_rx_buf_hash_valid(struct efx_nic *efx, const u8 *prefix)
49{
50 if (efx->type->rx_buf_hash_valid)
51 return INDIRECT_CALL_1(efx->type->rx_buf_hash_valid,
52 ef100_rx_buf_hash_valid,
53 prefix);
54 return true;
55}
56
57
58#define EFX_TSO_MAX_SEGS 100
59
60
61
62
63
64#define EFX_RXQ_MIN_ENT 128U
65#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
66
67
68
69
70#define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_EF10(efx) ? \
71 EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE)
72
73static inline bool efx_rss_enabled(struct efx_nic *efx)
74{
75 return efx->rss_spread > 1;
76}
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105static inline s32 efx_filter_insert_filter(struct efx_nic *efx,
106 struct efx_filter_spec *spec,
107 bool replace_equal)
108{
109 return efx->type->filter_insert(efx, spec, replace_equal);
110}
111
112
113
114
115
116
117
118
119
120
121static inline int efx_filter_remove_id_safe(struct efx_nic *efx,
122 enum efx_filter_priority priority,
123 u32 filter_id)
124{
125 return efx->type->filter_remove_safe(efx, priority, filter_id);
126}
127
128
129
130
131
132
133
134
135
136
137
138static inline int
139efx_filter_get_filter_safe(struct efx_nic *efx,
140 enum efx_filter_priority priority,
141 u32 filter_id, struct efx_filter_spec *spec)
142{
143 return efx->type->filter_get_safe(efx, priority, filter_id, spec);
144}
145
146static inline u32 efx_filter_count_rx_used(struct efx_nic *efx,
147 enum efx_filter_priority priority)
148{
149 return efx->type->filter_count_rx_used(efx, priority);
150}
151static inline u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
152{
153 return efx->type->filter_get_rx_id_limit(efx);
154}
155static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
156 enum efx_filter_priority priority,
157 u32 *buf, u32 size)
158{
159 return efx->type->filter_get_rx_ids(efx, priority, buf, size);
160}
161
162
163static inline bool efx_rss_active(struct efx_rss_context *ctx)
164{
165 return ctx->context_id != EFX_MCDI_RSS_CONTEXT_INVALID;
166}
167
168
169extern const struct ethtool_ops efx_ethtool_ops;
170
171
172unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs);
173unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks);
174int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
175 unsigned int rx_usecs, bool rx_adaptive,
176 bool rx_may_override_tx);
177void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
178 unsigned int *rx_usecs, bool *rx_adaptive);
179
180
181void efx_update_sw_stats(struct efx_nic *efx, u64 *stats);
182
183
184#ifdef CONFIG_SFC_MTD
185int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
186 size_t n_parts, size_t sizeof_part);
187static inline int efx_mtd_probe(struct efx_nic *efx)
188{
189 return efx->type->mtd_probe(efx);
190}
191void efx_mtd_rename(struct efx_nic *efx);
192void efx_mtd_remove(struct efx_nic *efx);
193#else
194static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; }
195static inline void efx_mtd_rename(struct efx_nic *efx) {}
196static inline void efx_mtd_remove(struct efx_nic *efx) {}
197#endif
198
199#ifdef CONFIG_SFC_SRIOV
200static inline unsigned int efx_vf_size(struct efx_nic *efx)
201{
202 return 1 << efx->vi_scale;
203}
204#endif
205
206static inline void efx_device_detach_sync(struct efx_nic *efx)
207{
208 struct net_device *dev = efx->net_dev;
209
210
211
212
213
214 netif_tx_lock_bh(dev);
215 netif_device_detach(dev);
216 netif_tx_unlock_bh(dev);
217}
218
219static inline void efx_device_attach_if_not_resetting(struct efx_nic *efx)
220{
221 if ((efx->state != STATE_DISABLED) && !efx->reset_pending)
222 netif_device_attach(efx->net_dev);
223}
224
225static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem)
226{
227 if (WARN_ON(down_read_trylock(sem))) {
228 up_read(sem);
229 return false;
230 }
231 return true;
232}
233
234int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
235 bool flush);
236
237#endif
238