1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#ifndef ENA_H
34#define ENA_H
35
36#include <linux/bitops.h>
37#include <linux/dim.h>
38#include <linux/etherdevice.h>
39#include <linux/if_vlan.h>
40#include <linux/inetdevice.h>
41#include <linux/interrupt.h>
42#include <linux/netdevice.h>
43#include <linux/skbuff.h>
44
45#include "ena_com.h"
46#include "ena_eth_com.h"
47
48#define DRV_MODULE_GEN_MAJOR 2
49#define DRV_MODULE_GEN_MINOR 1
50#define DRV_MODULE_GEN_SUBMINOR 0
51
52#define DRV_MODULE_NAME "ena"
53
54#define DEVICE_NAME "Elastic Network Adapter (ENA)"
55
56
57#define ENA_ADMIN_MSIX_VEC 1
58#define ENA_MAX_MSIX_VEC(io_queues) (ENA_ADMIN_MSIX_VEC + (io_queues))
59
60
61
62
63
64
65#if PAGE_SIZE > SZ_16K
66#define ENA_PAGE_SIZE (_AC(SZ_16K, UL))
67#else
68#define ENA_PAGE_SIZE PAGE_SIZE
69#endif
70
71#define ENA_MIN_MSIX_VEC 2
72
73#define ENA_REG_BAR 0
74#define ENA_MEM_BAR 2
75#define ENA_BAR_MASK (BIT(ENA_REG_BAR) | BIT(ENA_MEM_BAR))
76
77#define ENA_DEFAULT_RING_SIZE (1024)
78#define ENA_MIN_RING_SIZE (256)
79
80#define ENA_MIN_NUM_IO_QUEUES (1)
81
82#define ENA_TX_WAKEUP_THRESH (MAX_SKB_FRAGS + 2)
83#define ENA_DEFAULT_RX_COPYBREAK (256 - NET_IP_ALIGN)
84
85
86
87
88
89#define ENA_DEFAULT_MIN_RX_BUFF_ALLOC_SIZE 600
90
91#define ENA_MIN_MTU 128
92
93#define ENA_NAME_MAX_LEN 20
94#define ENA_IRQNAME_SIZE 40
95
96#define ENA_PKT_MAX_BUFS 19
97
98#define ENA_RX_RSS_TABLE_LOG_SIZE 7
99#define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE)
100
101
102
103
104#define ENA_TX_POLL_BUDGET_DIVIDER 4
105
106
107
108
109#define ENA_RX_REFILL_THRESH_DIVIDER 8
110#define ENA_RX_REFILL_THRESH_PACKET 256
111
112
113#define ENA_MONITORED_TX_QUEUES 4
114
115#define MAX_NUM_OF_TIMEOUTED_PACKETS 128
116
117#define ENA_TX_RING_IDX_NEXT(idx, ring_size) (((idx) + 1) & ((ring_size) - 1))
118
119#define ENA_RX_RING_IDX_NEXT(idx, ring_size) (((idx) + 1) & ((ring_size) - 1))
120#define ENA_RX_RING_IDX_ADD(idx, n, ring_size) \
121 (((idx) + (n)) & ((ring_size) - 1))
122
123#define ENA_IO_TXQ_IDX(q) (2 * (q))
124#define ENA_IO_RXQ_IDX(q) (2 * (q) + 1)
125#define ENA_IO_TXQ_IDX_TO_COMBINED_IDX(q) ((q) / 2)
126#define ENA_IO_RXQ_IDX_TO_COMBINED_IDX(q) (((q) - 1) / 2)
127
128#define ENA_MGMNT_IRQ_IDX 0
129#define ENA_IO_IRQ_FIRST_IDX 1
130#define ENA_IO_IRQ_IDX(q) (ENA_IO_IRQ_FIRST_IDX + (q))
131
132#define ENA_ADMIN_POLL_DELAY_US 100
133
134
135
136
137#define ENA_DEVICE_KALIVE_TIMEOUT (6 * HZ)
138#define ENA_MAX_NO_INTERRUPT_ITERATIONS 3
139
140#define ENA_MMIO_DISABLE_REG_READ BIT(0)
141
142
143
144
145
146
147
148#define ENA_XDP_MAX_MTU (ENA_PAGE_SIZE - ETH_HLEN - ETH_FCS_LEN - \
149 VLAN_HLEN - XDP_PACKET_HEADROOM - \
150 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
151
152#define ENA_IS_XDP_INDEX(adapter, index) (((index) >= (adapter)->xdp_first_ring) && \
153 ((index) < (adapter)->xdp_first_ring + (adapter)->xdp_num_queues))
154
155struct ena_irq {
156 irq_handler_t handler;
157 void *data;
158 int cpu;
159 u32 vector;
160 cpumask_t affinity_hint_mask;
161 char name[ENA_IRQNAME_SIZE];
162};
163
164struct ena_napi {
165 struct napi_struct napi ____cacheline_aligned;
166 struct ena_ring *tx_ring;
167 struct ena_ring *rx_ring;
168 struct ena_ring *xdp_ring;
169 bool first_interrupt;
170 bool interrupts_masked;
171 u32 qid;
172 struct dim dim;
173};
174
175struct ena_calc_queue_size_ctx {
176 struct ena_com_dev_get_features_ctx *get_feat_ctx;
177 struct ena_com_dev *ena_dev;
178 struct pci_dev *pdev;
179 u32 tx_queue_size;
180 u32 rx_queue_size;
181 u32 max_tx_queue_size;
182 u32 max_rx_queue_size;
183 u16 max_tx_sgl_size;
184 u16 max_rx_sgl_size;
185};
186
187struct ena_tx_buffer {
188 struct sk_buff *skb;
189
190
191
192 u32 tx_descs;
193
194 u32 num_of_bufs;
195
196
197
198
199 struct xdp_frame *xdpf;
200
201
202
203
204
205 struct page *xdp_rx_page;
206
207
208 u8 map_linear_data;
209
210
211 u32 print_once;
212
213
214
215
216
217
218
219
220
221 unsigned long last_jiffies;
222 struct ena_com_buf bufs[ENA_PKT_MAX_BUFS];
223} ____cacheline_aligned;
224
225struct ena_rx_buffer {
226 struct sk_buff *skb;
227 struct page *page;
228 u32 page_offset;
229 struct ena_com_buf ena_buf;
230} ____cacheline_aligned;
231
232struct ena_stats_tx {
233 u64 cnt;
234 u64 bytes;
235 u64 queue_stop;
236 u64 prepare_ctx_err;
237 u64 queue_wakeup;
238 u64 dma_mapping_err;
239 u64 linearize;
240 u64 linearize_failed;
241 u64 napi_comp;
242 u64 tx_poll;
243 u64 doorbells;
244 u64 bad_req_id;
245 u64 llq_buffer_copy;
246 u64 missed_tx;
247 u64 unmask_interrupt;
248};
249
250struct ena_stats_rx {
251 u64 cnt;
252 u64 bytes;
253 u64 rx_copybreak_pkt;
254 u64 csum_good;
255 u64 refil_partial;
256 u64 bad_csum;
257 u64 page_alloc_fail;
258 u64 skb_alloc_fail;
259 u64 dma_mapping_err;
260 u64 bad_desc_num;
261 u64 bad_req_id;
262 u64 empty_rx_ring;
263 u64 csum_unchecked;
264};
265
266struct ena_ring {
267
268
269
270 u16 *free_ids;
271
272 union {
273 struct ena_tx_buffer *tx_buffer_info;
274 struct ena_rx_buffer *rx_buffer_info;
275 };
276
277
278 struct device *dev;
279 struct pci_dev *pdev;
280 struct napi_struct *napi;
281 struct net_device *netdev;
282 struct ena_com_dev *ena_dev;
283 struct ena_adapter *adapter;
284 struct ena_com_io_cq *ena_com_io_cq;
285 struct ena_com_io_sq *ena_com_io_sq;
286 struct bpf_prog *xdp_bpf_prog;
287 struct xdp_rxq_info xdp_rxq;
288
289 u16 next_to_use;
290 u16 next_to_clean;
291 u16 rx_copybreak;
292 u16 rx_headroom;
293 u16 qid;
294 u16 mtu;
295 u16 sgl_size;
296
297
298 u8 tx_max_header_size;
299
300 bool first_interrupt;
301 bool disable_meta_caching;
302 u16 no_interrupt_event_cnt;
303
304
305 int cpu;
306
307 int ring_size;
308
309 enum ena_admin_placement_policy_type tx_mem_queue_type;
310
311 struct ena_com_rx_buf_info ena_bufs[ENA_PKT_MAX_BUFS];
312 u32 smoothed_interval;
313 u32 per_napi_packets;
314 u16 non_empty_napi_events;
315 struct u64_stats_sync syncp;
316 union {
317 struct ena_stats_tx tx_stats;
318 struct ena_stats_rx rx_stats;
319 };
320
321 u8 *push_buf_intermediate_buf;
322 int empty_rx_queue;
323} ____cacheline_aligned;
324
325struct ena_stats_dev {
326 u64 tx_timeout;
327 u64 suspend;
328 u64 resume;
329 u64 wd_expired;
330 u64 interface_up;
331 u64 interface_down;
332 u64 admin_q_pause;
333 u64 rx_drops;
334 u64 tx_drops;
335};
336
337enum ena_flags_t {
338 ENA_FLAG_DEVICE_RUNNING,
339 ENA_FLAG_DEV_UP,
340 ENA_FLAG_LINK_UP,
341 ENA_FLAG_MSIX_ENABLED,
342 ENA_FLAG_TRIGGER_RESET,
343 ENA_FLAG_ONGOING_RESET
344};
345
346
347struct ena_adapter {
348 struct ena_com_dev *ena_dev;
349
350 struct net_device *netdev;
351 struct pci_dev *pdev;
352
353
354
355
356 u32 rx_copybreak;
357 u32 max_mtu;
358
359 u32 num_io_queues;
360 u32 max_num_io_queues;
361
362 int msix_vecs;
363
364 u32 missing_tx_completion_threshold;
365
366 u32 requested_tx_ring_size;
367 u32 requested_rx_ring_size;
368
369 u32 max_tx_ring_size;
370 u32 max_rx_ring_size;
371
372 u32 msg_enable;
373
374 u16 max_tx_sgl_size;
375 u16 max_rx_sgl_size;
376
377 u8 mac_addr[ETH_ALEN];
378
379 unsigned long keep_alive_timeout;
380 unsigned long missing_tx_completion_to;
381
382 char name[ENA_NAME_MAX_LEN];
383
384 unsigned long flags;
385
386 struct ena_ring tx_ring[ENA_MAX_NUM_IO_QUEUES]
387 ____cacheline_aligned_in_smp;
388
389
390 struct ena_ring rx_ring[ENA_MAX_NUM_IO_QUEUES]
391 ____cacheline_aligned_in_smp;
392
393 struct ena_napi ena_napi[ENA_MAX_NUM_IO_QUEUES];
394
395 struct ena_irq irq_tbl[ENA_MAX_MSIX_VEC(ENA_MAX_NUM_IO_QUEUES)];
396
397
398 struct work_struct reset_task;
399 struct timer_list timer_service;
400
401 bool wd_state;
402 bool dev_up_before_reset;
403 bool disable_meta_caching;
404 unsigned long last_keep_alive_jiffies;
405
406 struct u64_stats_sync syncp;
407 struct ena_stats_dev dev_stats;
408
409
410 u32 last_monitored_tx_qid;
411
412 enum ena_regs_reset_reason_types reset_reason;
413
414 struct bpf_prog *xdp_bpf_prog;
415 u32 xdp_first_ring;
416 u32 xdp_num_queues;
417};
418
419void ena_set_ethtool_ops(struct net_device *netdev);
420
421void ena_dump_stats_to_dmesg(struct ena_adapter *adapter);
422
423void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
424
425int ena_update_queue_sizes(struct ena_adapter *adapter,
426 u32 new_tx_size,
427 u32 new_rx_size);
428
429int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count);
430
431int ena_get_sset_count(struct net_device *netdev, int sset);
432
433enum ena_xdp_errors_t {
434 ENA_XDP_ALLOWED = 0,
435 ENA_XDP_CURRENT_MTU_TOO_LARGE,
436 ENA_XDP_NO_ENOUGH_QUEUES,
437};
438
439static inline bool ena_xdp_queues_present(struct ena_adapter *adapter)
440{
441 return adapter->xdp_first_ring != 0;
442}
443
444static inline bool ena_xdp_present(struct ena_adapter *adapter)
445{
446 return !!adapter->xdp_bpf_prog;
447}
448
449static inline bool ena_xdp_present_ring(struct ena_ring *ring)
450{
451 return !!ring->xdp_bpf_prog;
452}
453
454static inline int ena_xdp_legal_queue_count(struct ena_adapter *adapter,
455 u32 queues)
456{
457 return 2 * queues <= adapter->max_num_io_queues;
458}
459
460static inline enum ena_xdp_errors_t ena_xdp_allowed(struct ena_adapter *adapter)
461{
462 enum ena_xdp_errors_t rc = ENA_XDP_ALLOWED;
463
464 if (adapter->netdev->mtu > ENA_XDP_MAX_MTU)
465 rc = ENA_XDP_CURRENT_MTU_TOO_LARGE;
466 else if (!ena_xdp_legal_queue_count(adapter, adapter->num_io_queues))
467 rc = ENA_XDP_NO_ENOUGH_QUEUES;
468
469 return rc;
470}
471
472#endif
473