1
2
3
4
5
6
7
8
9
10
11#ifndef EFX_NIC_H
12#define EFX_NIC_H
13
14#include <linux/net_tstamp.h>
15#include <linux/i2c-algo-bit.h>
16#include "net_driver.h"
17#include "efx.h"
18#include "mcdi.h"
19
20enum {
21 EFX_REV_SIENA_A0 = 0,
22 EFX_REV_HUNT_A0 = 1,
23};
24
25static inline int efx_nic_rev(struct efx_nic *efx)
26{
27 return efx->type->revision;
28}
29
30u32 efx_farch_fpga_ver(struct efx_nic *efx);
31
32
33static inline efx_qword_t *efx_event(struct efx_channel *channel,
34 unsigned int index)
35{
36 return ((efx_qword_t *) (channel->eventq.buf.addr)) +
37 (index & channel->eventq_mask);
38}
39
40
41
42
43
44
45
46
47
48
49
50static inline int efx_event_present(efx_qword_t *event)
51{
52 return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
53 EFX_DWORD_IS_ALL_ONES(event->dword[1]));
54}
55
56
57
58
59static inline efx_qword_t *
60efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
61{
62 return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index;
63}
64
65
66static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
67{
68 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
69 return tx_queue - EFX_TXQ_TYPE_OFFLOAD;
70 else
71 return tx_queue + EFX_TXQ_TYPE_OFFLOAD;
72}
73
74
75
76
77static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue,
78 unsigned int write_count)
79{
80 unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
81
82 if (empty_read_count == 0)
83 return false;
84
85 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
86}
87
88
89
90
91
92
93
94static inline bool efx_nic_may_tx_pio(struct efx_tx_queue *tx_queue)
95{
96 struct efx_tx_queue *partner = efx_tx_queue_partner(tx_queue);
97 return tx_queue->piobuf &&
98 __efx_nic_tx_is_empty(tx_queue, tx_queue->insert_count) &&
99 __efx_nic_tx_is_empty(partner, partner->insert_count);
100}
101
102
103
104
105
106
107
108
109
110static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue,
111 unsigned int write_count)
112{
113 bool was_empty = __efx_nic_tx_is_empty(tx_queue, write_count);
114
115 tx_queue->empty_read_count = 0;
116 return was_empty && tx_queue->write_count - write_count == 1;
117}
118
119
120static inline efx_qword_t *
121efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
122{
123 return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index;
124}
125
126enum {
127 PHY_TYPE_NONE = 0,
128 PHY_TYPE_TXC43128 = 1,
129 PHY_TYPE_88E1111 = 2,
130 PHY_TYPE_SFX7101 = 3,
131 PHY_TYPE_QT2022C2 = 4,
132 PHY_TYPE_PM8358 = 6,
133 PHY_TYPE_SFT9001A = 8,
134 PHY_TYPE_QT2025C = 9,
135 PHY_TYPE_SFT9001B = 10,
136};
137
138
139#define EFX_PAGE_SIZE 4096
140
141#define EFX_BUF_SIZE EFX_PAGE_SIZE
142
143
144enum {
145 GENERIC_STAT_rx_noskb_drops,
146 GENERIC_STAT_rx_nodesc_trunc,
147 GENERIC_STAT_COUNT
148};
149
150enum {
151 SIENA_STAT_tx_bytes = GENERIC_STAT_COUNT,
152 SIENA_STAT_tx_good_bytes,
153 SIENA_STAT_tx_bad_bytes,
154 SIENA_STAT_tx_packets,
155 SIENA_STAT_tx_bad,
156 SIENA_STAT_tx_pause,
157 SIENA_STAT_tx_control,
158 SIENA_STAT_tx_unicast,
159 SIENA_STAT_tx_multicast,
160 SIENA_STAT_tx_broadcast,
161 SIENA_STAT_tx_lt64,
162 SIENA_STAT_tx_64,
163 SIENA_STAT_tx_65_to_127,
164 SIENA_STAT_tx_128_to_255,
165 SIENA_STAT_tx_256_to_511,
166 SIENA_STAT_tx_512_to_1023,
167 SIENA_STAT_tx_1024_to_15xx,
168 SIENA_STAT_tx_15xx_to_jumbo,
169 SIENA_STAT_tx_gtjumbo,
170 SIENA_STAT_tx_collision,
171 SIENA_STAT_tx_single_collision,
172 SIENA_STAT_tx_multiple_collision,
173 SIENA_STAT_tx_excessive_collision,
174 SIENA_STAT_tx_deferred,
175 SIENA_STAT_tx_late_collision,
176 SIENA_STAT_tx_excessive_deferred,
177 SIENA_STAT_tx_non_tcpudp,
178 SIENA_STAT_tx_mac_src_error,
179 SIENA_STAT_tx_ip_src_error,
180 SIENA_STAT_rx_bytes,
181 SIENA_STAT_rx_good_bytes,
182 SIENA_STAT_rx_bad_bytes,
183 SIENA_STAT_rx_packets,
184 SIENA_STAT_rx_good,
185 SIENA_STAT_rx_bad,
186 SIENA_STAT_rx_pause,
187 SIENA_STAT_rx_control,
188 SIENA_STAT_rx_unicast,
189 SIENA_STAT_rx_multicast,
190 SIENA_STAT_rx_broadcast,
191 SIENA_STAT_rx_lt64,
192 SIENA_STAT_rx_64,
193 SIENA_STAT_rx_65_to_127,
194 SIENA_STAT_rx_128_to_255,
195 SIENA_STAT_rx_256_to_511,
196 SIENA_STAT_rx_512_to_1023,
197 SIENA_STAT_rx_1024_to_15xx,
198 SIENA_STAT_rx_15xx_to_jumbo,
199 SIENA_STAT_rx_gtjumbo,
200 SIENA_STAT_rx_bad_gtjumbo,
201 SIENA_STAT_rx_overflow,
202 SIENA_STAT_rx_false_carrier,
203 SIENA_STAT_rx_symbol_error,
204 SIENA_STAT_rx_align_error,
205 SIENA_STAT_rx_length_error,
206 SIENA_STAT_rx_internal_error,
207 SIENA_STAT_rx_nodesc_drop_cnt,
208 SIENA_STAT_COUNT
209};
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225struct siena_nic_data {
226 struct efx_nic *efx;
227 int wol_filter_id;
228 u64 stats[SIENA_STAT_COUNT];
229#ifdef CONFIG_SFC_SRIOV
230 struct siena_vf *vf;
231 struct efx_channel *vfdi_channel;
232 unsigned vf_buftbl_base;
233 struct efx_buffer vfdi_status;
234 struct list_head local_addr_list;
235 struct list_head local_page_list;
236 struct mutex local_lock;
237 struct work_struct peer_work;
238#endif
239};
240
241enum {
242 EF10_STAT_port_tx_bytes = GENERIC_STAT_COUNT,
243 EF10_STAT_port_tx_packets,
244 EF10_STAT_port_tx_pause,
245 EF10_STAT_port_tx_control,
246 EF10_STAT_port_tx_unicast,
247 EF10_STAT_port_tx_multicast,
248 EF10_STAT_port_tx_broadcast,
249 EF10_STAT_port_tx_lt64,
250 EF10_STAT_port_tx_64,
251 EF10_STAT_port_tx_65_to_127,
252 EF10_STAT_port_tx_128_to_255,
253 EF10_STAT_port_tx_256_to_511,
254 EF10_STAT_port_tx_512_to_1023,
255 EF10_STAT_port_tx_1024_to_15xx,
256 EF10_STAT_port_tx_15xx_to_jumbo,
257 EF10_STAT_port_rx_bytes,
258 EF10_STAT_port_rx_bytes_minus_good_bytes,
259 EF10_STAT_port_rx_good_bytes,
260 EF10_STAT_port_rx_bad_bytes,
261 EF10_STAT_port_rx_packets,
262 EF10_STAT_port_rx_good,
263 EF10_STAT_port_rx_bad,
264 EF10_STAT_port_rx_pause,
265 EF10_STAT_port_rx_control,
266 EF10_STAT_port_rx_unicast,
267 EF10_STAT_port_rx_multicast,
268 EF10_STAT_port_rx_broadcast,
269 EF10_STAT_port_rx_lt64,
270 EF10_STAT_port_rx_64,
271 EF10_STAT_port_rx_65_to_127,
272 EF10_STAT_port_rx_128_to_255,
273 EF10_STAT_port_rx_256_to_511,
274 EF10_STAT_port_rx_512_to_1023,
275 EF10_STAT_port_rx_1024_to_15xx,
276 EF10_STAT_port_rx_15xx_to_jumbo,
277 EF10_STAT_port_rx_gtjumbo,
278 EF10_STAT_port_rx_bad_gtjumbo,
279 EF10_STAT_port_rx_overflow,
280 EF10_STAT_port_rx_align_error,
281 EF10_STAT_port_rx_length_error,
282 EF10_STAT_port_rx_nodesc_drops,
283 EF10_STAT_port_rx_pm_trunc_bb_overflow,
284 EF10_STAT_port_rx_pm_discard_bb_overflow,
285 EF10_STAT_port_rx_pm_trunc_vfifo_full,
286 EF10_STAT_port_rx_pm_discard_vfifo_full,
287 EF10_STAT_port_rx_pm_trunc_qbb,
288 EF10_STAT_port_rx_pm_discard_qbb,
289 EF10_STAT_port_rx_pm_discard_mapping,
290 EF10_STAT_port_rx_dp_q_disabled_packets,
291 EF10_STAT_port_rx_dp_di_dropped_packets,
292 EF10_STAT_port_rx_dp_streaming_packets,
293 EF10_STAT_port_rx_dp_hlb_fetch,
294 EF10_STAT_port_rx_dp_hlb_wait,
295 EF10_STAT_rx_unicast,
296 EF10_STAT_rx_unicast_bytes,
297 EF10_STAT_rx_multicast,
298 EF10_STAT_rx_multicast_bytes,
299 EF10_STAT_rx_broadcast,
300 EF10_STAT_rx_broadcast_bytes,
301 EF10_STAT_rx_bad,
302 EF10_STAT_rx_bad_bytes,
303 EF10_STAT_rx_overflow,
304 EF10_STAT_tx_unicast,
305 EF10_STAT_tx_unicast_bytes,
306 EF10_STAT_tx_multicast,
307 EF10_STAT_tx_multicast_bytes,
308 EF10_STAT_tx_broadcast,
309 EF10_STAT_tx_broadcast_bytes,
310 EF10_STAT_tx_bad,
311 EF10_STAT_tx_bad_bytes,
312 EF10_STAT_tx_overflow,
313 EF10_STAT_COUNT
314};
315
316
317
318
319
320#define EF10_TX_PIOBUF_COUNT 16
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361struct efx_ef10_nic_data {
362 struct efx_buffer mcdi_buf;
363 u16 warm_boot_count;
364 unsigned int vi_base;
365 unsigned int n_allocated_vis;
366 bool must_realloc_vis;
367 bool must_restore_filters;
368 unsigned int n_piobufs;
369 void __iomem *wc_membase, *pio_write_base;
370 unsigned int pio_write_vi_base;
371 unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT];
372 bool must_restore_piobufs;
373 u32 rx_rss_context;
374 bool rx_rss_context_exclusive;
375 u64 stats[EF10_STAT_COUNT];
376 bool workaround_35388;
377 bool workaround_26807;
378 bool workaround_61265;
379 bool must_check_datapath_caps;
380 u32 datapath_caps;
381 u32 datapath_caps2;
382 unsigned int rx_dpcpu_fw_id;
383 unsigned int tx_dpcpu_fw_id;
384 unsigned int vport_id;
385 bool must_probe_vswitching;
386 unsigned int pf_index;
387 u8 port_id[ETH_ALEN];
388#ifdef CONFIG_SFC_SRIOV
389 unsigned int vf_index;
390 struct ef10_vf *vf;
391#endif
392 u8 vport_mac[ETH_ALEN];
393 struct list_head vlan_list;
394 struct mutex vlan_lock;
395};
396
397int efx_init_sriov(void);
398void efx_fini_sriov(void);
399
400struct ethtool_ts_info;
401int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
402void efx_ptp_defer_probe_with_channel(struct efx_nic *efx);
403void efx_ptp_remove(struct efx_nic *efx);
404int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr);
405int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr);
406void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info);
407bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
408int efx_ptp_get_mode(struct efx_nic *efx);
409int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
410 unsigned int new_mode);
411int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
412void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
413size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings);
414size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats);
415void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev);
416void __efx_rx_skb_attach_timestamp(struct efx_channel *channel,
417 struct sk_buff *skb);
418static inline void efx_rx_skb_attach_timestamp(struct efx_channel *channel,
419 struct sk_buff *skb)
420{
421 if (channel->sync_events_state == SYNC_EVENTS_VALID)
422 __efx_rx_skb_attach_timestamp(channel, skb);
423}
424void efx_ptp_start_datapath(struct efx_nic *efx);
425void efx_ptp_stop_datapath(struct efx_nic *efx);
426
427extern const struct efx_nic_type falcon_a1_nic_type;
428extern const struct efx_nic_type falcon_b0_nic_type;
429extern const struct efx_nic_type siena_a0_nic_type;
430extern const struct efx_nic_type efx_hunt_a0_nic_type;
431extern const struct efx_nic_type efx_hunt_a0_vf_nic_type;
432
433
434
435
436
437
438
439
440int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
441
442
443static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
444{
445 return tx_queue->efx->type->tx_probe(tx_queue);
446}
447static inline void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
448{
449 tx_queue->efx->type->tx_init(tx_queue);
450}
451static inline void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
452{
453 tx_queue->efx->type->tx_remove(tx_queue);
454}
455static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
456{
457 tx_queue->efx->type->tx_write(tx_queue);
458}
459
460
461static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
462{
463 return rx_queue->efx->type->rx_probe(rx_queue);
464}
465static inline void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
466{
467 rx_queue->efx->type->rx_init(rx_queue);
468}
469static inline void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
470{
471 rx_queue->efx->type->rx_remove(rx_queue);
472}
473static inline void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
474{
475 rx_queue->efx->type->rx_write(rx_queue);
476}
477static inline void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
478{
479 rx_queue->efx->type->rx_defer_refill(rx_queue);
480}
481
482
483static inline int efx_nic_probe_eventq(struct efx_channel *channel)
484{
485 return channel->efx->type->ev_probe(channel);
486}
487static inline int efx_nic_init_eventq(struct efx_channel *channel)
488{
489 return channel->efx->type->ev_init(channel);
490}
491static inline void efx_nic_fini_eventq(struct efx_channel *channel)
492{
493 channel->efx->type->ev_fini(channel);
494}
495static inline void efx_nic_remove_eventq(struct efx_channel *channel)
496{
497 channel->efx->type->ev_remove(channel);
498}
499static inline int
500efx_nic_process_eventq(struct efx_channel *channel, int quota)
501{
502 return channel->efx->type->ev_process(channel, quota);
503}
504static inline void efx_nic_eventq_read_ack(struct efx_channel *channel)
505{
506 channel->efx->type->ev_read_ack(channel);
507}
508void efx_nic_event_test_start(struct efx_channel *channel);
509
510
511int efx_farch_tx_probe(struct efx_tx_queue *tx_queue);
512void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
513void efx_farch_tx_fini(struct efx_tx_queue *tx_queue);
514void efx_farch_tx_remove(struct efx_tx_queue *tx_queue);
515void efx_farch_tx_write(struct efx_tx_queue *tx_queue);
516unsigned int efx_farch_tx_limit_len(struct efx_tx_queue *tx_queue,
517 dma_addr_t dma_addr, unsigned int len);
518int efx_farch_rx_probe(struct efx_rx_queue *rx_queue);
519void efx_farch_rx_init(struct efx_rx_queue *rx_queue);
520void efx_farch_rx_fini(struct efx_rx_queue *rx_queue);
521void efx_farch_rx_remove(struct efx_rx_queue *rx_queue);
522void efx_farch_rx_write(struct efx_rx_queue *rx_queue);
523void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue);
524int efx_farch_ev_probe(struct efx_channel *channel);
525int efx_farch_ev_init(struct efx_channel *channel);
526void efx_farch_ev_fini(struct efx_channel *channel);
527void efx_farch_ev_remove(struct efx_channel *channel);
528int efx_farch_ev_process(struct efx_channel *channel, int quota);
529void efx_farch_ev_read_ack(struct efx_channel *channel);
530void efx_farch_ev_test_generate(struct efx_channel *channel);
531
532
533int efx_farch_filter_table_probe(struct efx_nic *efx);
534void efx_farch_filter_table_restore(struct efx_nic *efx);
535void efx_farch_filter_table_remove(struct efx_nic *efx);
536void efx_farch_filter_update_rx_scatter(struct efx_nic *efx);
537s32 efx_farch_filter_insert(struct efx_nic *efx, struct efx_filter_spec *spec,
538 bool replace);
539int efx_farch_filter_remove_safe(struct efx_nic *efx,
540 enum efx_filter_priority priority,
541 u32 filter_id);
542int efx_farch_filter_get_safe(struct efx_nic *efx,
543 enum efx_filter_priority priority, u32 filter_id,
544 struct efx_filter_spec *);
545int efx_farch_filter_clear_rx(struct efx_nic *efx,
546 enum efx_filter_priority priority);
547u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
548 enum efx_filter_priority priority);
549u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
550s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
551 enum efx_filter_priority priority, u32 *buf,
552 u32 size);
553#ifdef CONFIG_RFS_ACCEL
554s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
555 struct efx_filter_spec *spec);
556bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
557 unsigned int index);
558#endif
559void efx_farch_filter_sync_rx_mode(struct efx_nic *efx);
560
561bool efx_nic_event_present(struct efx_channel *channel);
562
563
564
565
566
567
568
569
570
571
572
573
574
575static inline void efx_update_diff_stat(u64 *stat, u64 diff)
576{
577 if ((s64)(diff - *stat) > 0)
578 *stat = diff;
579}
580
581
582int efx_nic_init_interrupt(struct efx_nic *efx);
583int efx_nic_irq_test_start(struct efx_nic *efx);
584void efx_nic_fini_interrupt(struct efx_nic *efx);
585
586
587void efx_farch_irq_enable_master(struct efx_nic *efx);
588int efx_farch_irq_test_generate(struct efx_nic *efx);
589void efx_farch_irq_disable_master(struct efx_nic *efx);
590irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id);
591irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
592irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
593
594static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
595{
596 return ACCESS_ONCE(channel->event_test_cpu);
597}
598static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
599{
600 return ACCESS_ONCE(efx->last_irq_cpu);
601}
602
603
604int efx_nic_flush_queues(struct efx_nic *efx);
605void siena_prepare_flush(struct efx_nic *efx);
606int efx_farch_fini_dmaq(struct efx_nic *efx);
607void efx_farch_finish_flr(struct efx_nic *efx);
608void siena_finish_flush(struct efx_nic *efx);
609void falcon_start_nic_stats(struct efx_nic *efx);
610void falcon_stop_nic_stats(struct efx_nic *efx);
611int falcon_reset_xaui(struct efx_nic *efx);
612void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
613void efx_farch_init_common(struct efx_nic *efx);
614void efx_ef10_handle_drain_event(struct efx_nic *efx);
615void efx_farch_rx_push_indir_table(struct efx_nic *efx);
616
617int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
618 unsigned int len, gfp_t gfp_flags);
619void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer);
620
621
622struct efx_farch_register_test {
623 unsigned address;
624 efx_oword_t mask;
625};
626int efx_farch_test_registers(struct efx_nic *efx,
627 const struct efx_farch_register_test *regs,
628 size_t n_regs);
629
630size_t efx_nic_get_regs_len(struct efx_nic *efx);
631void efx_nic_get_regs(struct efx_nic *efx, void *buf);
632
633size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
634 const unsigned long *mask, u8 *names);
635void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
636 const unsigned long *mask, u64 *stats,
637 const void *dma_buf, bool accumulate);
638void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *stat);
639
640#define EFX_MAX_FLUSH_TIME 5000
641
642void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
643 efx_qword_t *event);
644
645#endif
646