linux/drivers/net/ethernet/intel/i40e/i40e_xsk.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/* Copyright(c) 2018 Intel Corporation. */
   3
   4#ifndef _I40E_XSK_H_
   5#define _I40E_XSK_H_
   6
   7/* This value should match the pragma in the loop_unrolled_for
   8 * macro. Why 4? It is strictly empirical. It seems to be a good
   9 * compromise between the advantage of having simultaneous outstanding
  10 * reads to the DMA array that can hide each others latency and the
  11 * disadvantage of having a larger code path.
  12 */
  13#define PKTS_PER_BATCH 4
  14
  15#ifdef __clang__
  16#define loop_unrolled_for _Pragma("clang loop unroll_count(4)") for
  17#elif __GNUC__ >= 8
  18#define loop_unrolled_for _Pragma("GCC unroll 4") for
  19#else
  20#define loop_unrolled_for for
  21#endif
  22
  23struct i40e_vsi;
  24struct xsk_buff_pool;
  25struct zero_copy_allocator;
  26
  27int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair);
  28int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair);
  29int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool,
  30                        u16 qid);
  31bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 cleaned_count);
  32int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget);
  33
  34bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring);
  35int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
  36int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring);
  37void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring);
  38
  39#endif /* _I40E_XSK_H_ */
  40