1
2
3
4
5
6#ifndef _MLX5_IB_WR_H
7#define _MLX5_IB_WR_H
8
9#include "mlx5_ib.h"
10
11enum {
12 MLX5_IB_SQ_UMR_INLINE_THRESHOLD = 64,
13};
14
15struct mlx5_wqe_eth_pad {
16 u8 rsvd0[16];
17};
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33static inline void *get_sq_edge(struct mlx5_ib_wq *sq, u32 idx)
34{
35 void *fragment_end;
36
37 fragment_end = mlx5_frag_buf_get_wqe
38 (&sq->fbc,
39 mlx5_frag_buf_get_idx_last_contig_stride(&sq->fbc, idx));
40
41 return fragment_end + MLX5_SEND_WQE_BB;
42}
43
44int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
45 const struct ib_send_wr **bad_wr, bool drain);
46int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
47 const struct ib_recv_wr **bad_wr, bool drain);
48
49static inline int mlx5_ib_post_send_nodrain(struct ib_qp *ibqp,
50 const struct ib_send_wr *wr,
51 const struct ib_send_wr **bad_wr)
52{
53 return mlx5_ib_post_send(ibqp, wr, bad_wr, false);
54}
55
56static inline int mlx5_ib_post_send_drain(struct ib_qp *ibqp,
57 const struct ib_send_wr *wr,
58 const struct ib_send_wr **bad_wr)
59{
60 return mlx5_ib_post_send(ibqp, wr, bad_wr, true);
61}
62
63static inline int mlx5_ib_post_recv_nodrain(struct ib_qp *ibqp,
64 const struct ib_recv_wr *wr,
65 const struct ib_recv_wr **bad_wr)
66{
67 return mlx5_ib_post_recv(ibqp, wr, bad_wr, false);
68}
69
70static inline int mlx5_ib_post_recv_drain(struct ib_qp *ibqp,
71 const struct ib_recv_wr *wr,
72 const struct ib_recv_wr **bad_wr)
73{
74 return mlx5_ib_post_recv(ibqp, wr, bad_wr, true);
75}
76#endif
77