linux/drivers/infiniband/hw/mlx5/wr.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
   2/*
   3 * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
   4 */
   5
   6#ifndef _MLX5_IB_WR_H
   7#define _MLX5_IB_WR_H
   8
   9#include "mlx5_ib.h"
  10
  11enum {
  12        MLX5_IB_SQ_UMR_INLINE_THRESHOLD = 64,
  13};
  14
  15struct mlx5_wqe_eth_pad {
  16        u8 rsvd0[16];
  17};
  18
  19
  20/* get_sq_edge - Get the next nearby edge.
  21 *
  22 * An 'edge' is defined as the first following address after the end
  23 * of the fragment or the SQ. Accordingly, during the WQE construction
  24 * which repetitively increases the pointer to write the next data, it
  25 * simply should check if it gets to an edge.
  26 *
  27 * @sq - SQ buffer.
  28 * @idx - Stride index in the SQ buffer.
  29 *
  30 * Return:
  31 *      The new edge.
  32 */
  33static inline void *get_sq_edge(struct mlx5_ib_wq *sq, u32 idx)
  34{
  35        void *fragment_end;
  36
  37        fragment_end = mlx5_frag_buf_get_wqe
  38                (&sq->fbc,
  39                 mlx5_frag_buf_get_idx_last_contig_stride(&sq->fbc, idx));
  40
  41        return fragment_end + MLX5_SEND_WQE_BB;
  42}
  43
  44int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
  45                      const struct ib_send_wr **bad_wr, bool drain);
  46int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
  47                      const struct ib_recv_wr **bad_wr, bool drain);
  48
  49static inline int mlx5_ib_post_send_nodrain(struct ib_qp *ibqp,
  50                                            const struct ib_send_wr *wr,
  51                                            const struct ib_send_wr **bad_wr)
  52{
  53        return mlx5_ib_post_send(ibqp, wr, bad_wr, false);
  54}
  55
  56static inline int mlx5_ib_post_send_drain(struct ib_qp *ibqp,
  57                                          const struct ib_send_wr *wr,
  58                                          const struct ib_send_wr **bad_wr)
  59{
  60        return mlx5_ib_post_send(ibqp, wr, bad_wr, true);
  61}
  62
  63static inline int mlx5_ib_post_recv_nodrain(struct ib_qp *ibqp,
  64                                            const struct ib_recv_wr *wr,
  65                                            const struct ib_recv_wr **bad_wr)
  66{
  67        return mlx5_ib_post_recv(ibqp, wr, bad_wr, false);
  68}
  69
  70static inline int mlx5_ib_post_recv_drain(struct ib_qp *ibqp,
  71                                          const struct ib_recv_wr *wr,
  72                                          const struct ib_recv_wr **bad_wr)
  73{
  74        return mlx5_ib_post_recv(ibqp, wr, bad_wr, true);
  75}
  76#endif /* _MLX5_IB_WR_H */
  77