linux/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
   2/* Copyright (c) 2019 Mellanox Technologies. */
   3
   4#include "en/params.h"
   5
   6static inline bool mlx5e_rx_is_xdp(struct mlx5e_params *params,
   7                                   struct mlx5e_xsk_param *xsk)
   8{
   9        return params->xdp_prog || xsk;
  10}
  11
  12u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
  13                                 struct mlx5e_xsk_param *xsk)
  14{
  15        u16 headroom = NET_IP_ALIGN;
  16
  17        if (mlx5e_rx_is_xdp(params, xsk)) {
  18                headroom += XDP_PACKET_HEADROOM;
  19                if (xsk)
  20                        headroom += xsk->headroom;
  21        } else {
  22                headroom += MLX5_RX_HEADROOM;
  23        }
  24
  25        return headroom;
  26}
  27
  28u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params,
  29                                struct mlx5e_xsk_param *xsk)
  30{
  31        u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
  32        u16 linear_rq_headroom = mlx5e_get_linear_rq_headroom(params, xsk);
  33        u32 frag_sz = linear_rq_headroom + hw_mtu;
  34
  35        /* AF_XDP doesn't build SKBs in place. */
  36        if (!xsk)
  37                frag_sz = MLX5_SKB_FRAG_SZ(frag_sz);
  38
  39        /* XDP in mlx5e doesn't support multiple packets per page. */
  40        if (mlx5e_rx_is_xdp(params, xsk))
  41                frag_sz = max_t(u32, frag_sz, PAGE_SIZE);
  42
  43        /* Even if we can go with a smaller fragment size, we must not put
  44         * multiple packets into a single frame.
  45         */
  46        if (xsk)
  47                frag_sz = max_t(u32, frag_sz, xsk->chunk_size);
  48
  49        return frag_sz;
  50}
  51
  52u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params,
  53                                struct mlx5e_xsk_param *xsk)
  54{
  55        u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params, xsk);
  56
  57        return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz);
  58}
  59
  60bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params,
  61                            struct mlx5e_xsk_param *xsk)
  62{
  63        /* AF_XDP allocates SKBs on XDP_PASS - ensure they don't occupy more
  64         * than one page. For this, check both with and without xsk.
  65         */
  66        u32 linear_frag_sz = max(mlx5e_rx_get_linear_frag_sz(params, xsk),
  67                                 mlx5e_rx_get_linear_frag_sz(params, NULL));
  68
  69        return !params->lro_en && linear_frag_sz <= PAGE_SIZE;
  70}
  71
  72#define MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ ((BIT(__mlx5_bit_sz(wq, log_wqe_stride_size)) - 1) + \
  73                                          MLX5_MPWQE_LOG_STRIDE_SZ_BASE)
  74bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
  75                                  struct mlx5e_params *params,
  76                                  struct mlx5e_xsk_param *xsk)
  77{
  78        u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params, xsk);
  79        s8 signed_log_num_strides_param;
  80        u8 log_num_strides;
  81
  82        if (!mlx5e_rx_is_linear_skb(params, xsk))
  83                return false;
  84
  85        if (order_base_2(linear_frag_sz) > MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ)
  86                return false;
  87
  88        if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
  89                return true;
  90
  91        log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz);
  92        signed_log_num_strides_param =
  93                (s8)log_num_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
  94
  95        return signed_log_num_strides_param >= 0;
  96}
  97
  98u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params,
  99                               struct mlx5e_xsk_param *xsk)
 100{
 101        u8 log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(params, xsk);
 102
 103        /* Numbers are unsigned, don't subtract to avoid underflow. */
 104        if (params->log_rq_mtu_frames <
 105            log_pkts_per_wqe + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW)
 106                return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
 107
 108        return params->log_rq_mtu_frames - log_pkts_per_wqe;
 109}
 110
 111u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
 112                                   struct mlx5e_params *params,
 113                                   struct mlx5e_xsk_param *xsk)
 114{
 115        if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
 116                return order_base_2(mlx5e_rx_get_linear_frag_sz(params, xsk));
 117
 118        return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
 119}
 120
 121u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
 122                                   struct mlx5e_params *params,
 123                                   struct mlx5e_xsk_param *xsk)
 124{
 125        return MLX5_MPWRQ_LOG_WQE_SZ -
 126                mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
 127}
 128
 129u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
 130                          struct mlx5e_params *params,
 131                          struct mlx5e_xsk_param *xsk)
 132{
 133        bool is_linear_skb = (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC) ?
 134                mlx5e_rx_is_linear_skb(params, xsk) :
 135                mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk);
 136
 137        return is_linear_skb ? mlx5e_get_linear_rq_headroom(params, xsk) : 0;
 138}
 139