linux/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32#ifndef __MLX5_EN_XDP_H__
  33#define __MLX5_EN_XDP_H__
  34
  35#include <linux/indirect_call_wrapper.h>
  36
  37#include "en.h"
  38#include "en/txrx.h"
  39
  40#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
  41#define MLX5E_XDP_TX_DS_COUNT (MLX5E_TX_WQE_EMPTY_DS_COUNT + 1 /* SG DS */)
  42
  43#define MLX5E_XDP_INLINE_WQE_MAX_DS_CNT 16
  44#define MLX5E_XDP_INLINE_WQE_SZ_THRSD \
  45        (MLX5E_XDP_INLINE_WQE_MAX_DS_CNT * MLX5_SEND_WQE_DS - \
  46         sizeof(struct mlx5_wqe_inline_seg))
  47
  48struct mlx5e_xsk_param;
  49int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk);
  50bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
  51                      u32 *len, struct xdp_buff *xdp);
  52void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq);
  53bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
  54void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
  55void mlx5e_set_xmit_fp(struct mlx5e_xdpsq *sq, bool is_mpw);
  56void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq);
  57int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
  58                   u32 flags);
  59
  60INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
  61                                                          struct mlx5e_xmit_data *xdptxd,
  62                                                          struct mlx5e_xdp_info *xdpi,
  63                                                          int check_result));
  64INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq,
  65                                                    struct mlx5e_xmit_data *xdptxd,
  66                                                    struct mlx5e_xdp_info *xdpi,
  67                                                    int check_result));
  68INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq));
  69INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq));
  70
  71static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv)
  72{
  73        set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
  74
  75        if (priv->channels.params.xdp_prog)
  76                set_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state);
  77}
  78
  79static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv)
  80{
  81        if (priv->channels.params.xdp_prog)
  82                clear_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state);
  83
  84        clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
  85        /* Let other device's napi(s) and XSK wakeups see our new state. */
  86        synchronize_net();
  87}
  88
  89static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv)
  90{
  91        return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
  92}
  93
  94static inline bool mlx5e_xdp_is_active(struct mlx5e_priv *priv)
  95{
  96        return test_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state);
  97}
  98
  99static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
 100{
 101        if (sq->doorbell_cseg) {
 102                mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg);
 103                sq->doorbell_cseg = NULL;
 104        }
 105}
 106
 107/* Enable inline WQEs to shift some load from a congested HCA (HW) to
 108 * a less congested cpu (SW).
 109 */
 110static inline bool mlx5e_xdp_get_inline_state(struct mlx5e_xdpsq *sq, bool cur)
 111{
 112        u16 outstanding = sq->xdpi_fifo_pc - sq->xdpi_fifo_cc;
 113
 114#define MLX5E_XDP_INLINE_WATERMARK_LOW  10
 115#define MLX5E_XDP_INLINE_WATERMARK_HIGH 128
 116
 117        if (cur && outstanding <= MLX5E_XDP_INLINE_WATERMARK_LOW)
 118                return false;
 119
 120        if (!cur && outstanding >= MLX5E_XDP_INLINE_WATERMARK_HIGH)
 121                return true;
 122
 123        return cur;
 124}
 125
 126static inline bool mlx5e_xdp_mpqwe_is_full(struct mlx5e_tx_mpwqe *session)
 127{
 128        if (session->inline_on)
 129                return session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT >
 130                       MLX5E_TX_MPW_MAX_NUM_DS;
 131        return mlx5e_tx_mpwqe_is_full(session);
 132}
 133
 134struct mlx5e_xdp_wqe_info {
 135        u8 num_wqebbs;
 136        u8 num_pkts;
 137};
 138
 139static inline void
 140mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq,
 141                         struct mlx5e_xmit_data *xdptxd,
 142                         struct mlx5e_xdpsq_stats *stats)
 143{
 144        struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
 145        struct mlx5_wqe_data_seg *dseg =
 146                (struct mlx5_wqe_data_seg *)session->wqe + session->ds_count;
 147        u32 dma_len = xdptxd->len;
 148
 149        session->pkt_count++;
 150        session->bytes_count += dma_len;
 151
 152        if (session->inline_on && dma_len <= MLX5E_XDP_INLINE_WQE_SZ_THRSD) {
 153                struct mlx5_wqe_inline_seg *inline_dseg =
 154                        (struct mlx5_wqe_inline_seg *)dseg;
 155                u16 ds_len = sizeof(*inline_dseg) + dma_len;
 156                u16 ds_cnt = DIV_ROUND_UP(ds_len, MLX5_SEND_WQE_DS);
 157
 158                inline_dseg->byte_count = cpu_to_be32(dma_len | MLX5_INLINE_SEG);
 159                memcpy(inline_dseg->data, xdptxd->data, dma_len);
 160
 161                session->ds_count += ds_cnt;
 162                stats->inlnw++;
 163                return;
 164        }
 165
 166        dseg->addr       = cpu_to_be64(xdptxd->dma_addr);
 167        dseg->byte_count = cpu_to_be32(dma_len);
 168        dseg->lkey       = sq->mkey_be;
 169        session->ds_count++;
 170}
 171
 172static inline void
 173mlx5e_xdpi_fifo_push(struct mlx5e_xdp_info_fifo *fifo,
 174                     struct mlx5e_xdp_info *xi)
 175{
 176        u32 i = (*fifo->pc)++ & fifo->mask;
 177
 178        fifo->xi[i] = *xi;
 179}
 180
 181static inline struct mlx5e_xdp_info
 182mlx5e_xdpi_fifo_pop(struct mlx5e_xdp_info_fifo *fifo)
 183{
 184        return fifo->xi[(*fifo->cc)++ & fifo->mask];
 185}
 186#endif
 187