1
2
3
4#ifndef __MLX5_EN_TXRX_H___
5#define __MLX5_EN_TXRX_H___
6
7#include "en.h"
8
9#define MLX5E_SQ_NOPS_ROOM (MLX5_SEND_WQE_MAX_WQEBBS - 1)
10#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
11 MLX5E_SQ_NOPS_ROOM)
12
13#ifndef CONFIG_MLX5_EN_TLS
14#define MLX5E_SQ_TLS_ROOM (0)
15#else
16
17
18
19
20
21
22
23#define MLX5E_SQ_TLS_ROOM \
24 (MLX5_SEND_WQE_MAX_WQEBBS + \
25 MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS)
26#endif
27
28#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
29
30static inline bool
31mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
32{
33 return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
34}
35
36static inline void *
37mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq, size_t size, u16 *pi)
38{
39 struct mlx5_wq_cyc *wq = &sq->wq;
40 void *wqe;
41
42 *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
43 wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
44 memset(wqe, 0, size);
45
46 return wqe;
47}
48
49static inline struct mlx5e_tx_wqe *
50mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
51{
52 u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc);
53 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
54 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
55
56 memset(cseg, 0, sizeof(*cseg));
57
58 cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
59 cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01);
60
61 (*pc)++;
62
63 return wqe;
64}
65
66static inline struct mlx5e_tx_wqe *
67mlx5e_post_nop_fence(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
68{
69 u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc);
70 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
71 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
72
73 memset(cseg, 0, sizeof(*cseg));
74
75 cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
76 cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01);
77 cseg->fm_ce_se = MLX5_FENCE_MODE_INITIATOR_SMALL;
78
79 (*pc)++;
80
81 return wqe;
82}
83
84static inline void
85mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq, struct mlx5_wq_cyc *wq,
86 u16 pi, u16 nnops)
87{
88 struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
89
90 edge_wi = wi + nnops;
91
92
93 for (; wi < edge_wi; wi++) {
94 memset(wi, 0, sizeof(*wi));
95 wi->num_wqebbs = 1;
96 mlx5e_post_nop(wq, sq->sqn, &sq->pc);
97 }
98 sq->stats->nop += nnops;
99}
100
101static inline void
102mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
103 struct mlx5_wqe_ctrl_seg *ctrl)
104{
105 ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
106
107 dma_wmb();
108
109 *wq->db = cpu_to_be32(pc);
110
111
112
113
114 wmb();
115
116 mlx5_write64((__be32 *)ctrl, uar_map);
117}
118
119static inline bool mlx5e_transport_inline_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg)
120{
121 return cseg && !!cseg->tisn;
122}
123
124static inline u8
125mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg,
126 struct sk_buff *skb)
127{
128 u8 mode;
129
130 if (mlx5e_transport_inline_tx_wqe(cseg))
131 return MLX5_INLINE_MODE_TCP_UDP;
132
133 mode = sq->min_inline_mode;
134
135 if (skb_vlan_tag_present(skb) &&
136 test_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state))
137 mode = max_t(u8, MLX5_INLINE_MODE_L2, mode);
138
139 return mode;
140}
141
142static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
143{
144 struct mlx5_core_cq *mcq;
145
146 mcq = &cq->mcq;
147 mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
148}
149
150static inline struct mlx5e_sq_dma *
151mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
152{
153 return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
154}
155
156static inline void
157mlx5e_dma_push(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size,
158 enum mlx5e_dma_map_type map_type)
159{
160 struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++);
161
162 dma->addr = addr;
163 dma->size = size;
164 dma->type = map_type;
165}
166
167static inline void
168mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
169{
170 switch (dma->type) {
171 case MLX5E_DMA_MAP_SINGLE:
172 dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
173 break;
174 case MLX5E_DMA_MAP_PAGE:
175 dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
176 break;
177 default:
178 WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
179 }
180}
181
182
183
184struct mlx5e_swp_spec {
185 __be16 l3_proto;
186 u8 l4_proto;
187 u8 is_tun;
188 __be16 tun_l3_proto;
189 u8 tun_l4_proto;
190};
191
192static inline void
193mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
194 struct mlx5e_swp_spec *swp_spec)
195{
196
197 eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
198 if (swp_spec->l3_proto == htons(ETH_P_IPV6))
199 eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
200 if (swp_spec->l4_proto) {
201 eseg->swp_outer_l4_offset = skb_transport_offset(skb) / 2;
202 if (swp_spec->l4_proto == IPPROTO_UDP)
203 eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
204 }
205
206 if (swp_spec->is_tun) {
207 eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
208 if (swp_spec->tun_l3_proto == htons(ETH_P_IPV6))
209 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
210 } else {
211 eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
212 if (swp_spec->l3_proto == htons(ETH_P_IPV6))
213 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
214 }
215 switch (swp_spec->tun_l4_proto) {
216 case IPPROTO_UDP:
217 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
218
219 case IPPROTO_TCP:
220 eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
221 break;
222 }
223}
224
225#endif
226