1
2
3
4#ifndef __MLX5_EN_TXRX_H___
5#define __MLX5_EN_TXRX_H___
6
7#include "en.h"
8
9#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
10#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
11 MLX5E_SQ_NOPS_ROOM)
12
13#ifndef CONFIG_MLX5_EN_TLS
14#define MLX5E_SQ_TLS_ROOM (0)
15#else
16
17
18
19
20
21
22
23#define MLX5E_SQ_TLS_ROOM \
24 (MLX5_SEND_WQE_MAX_WQEBBS + \
25 MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS + \
26 MAX_SKB_FRAGS * MLX5E_KTLS_MAX_DUMP_WQEBBS)
27#endif
28
29#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
30
31static inline bool
32mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
33{
34 return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
35}
36
37static inline void *
38mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq, size_t size, u16 *pi)
39{
40 struct mlx5_wq_cyc *wq = &sq->wq;
41 void *wqe;
42
43 *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
44 wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
45 memset(wqe, 0, size);
46
47 return wqe;
48}
49
50static inline struct mlx5e_tx_wqe *
51mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
52{
53 u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc);
54 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
55 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
56
57 memset(cseg, 0, sizeof(*cseg));
58
59 cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
60 cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01);
61
62 (*pc)++;
63
64 return wqe;
65}
66
67static inline struct mlx5e_tx_wqe *
68mlx5e_post_nop_fence(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
69{
70 u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc);
71 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
72 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
73
74 memset(cseg, 0, sizeof(*cseg));
75
76 cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
77 cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01);
78 cseg->fm_ce_se = MLX5_FENCE_MODE_INITIATOR_SMALL;
79
80 (*pc)++;
81
82 return wqe;
83}
84
85static inline void
86mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq, struct mlx5_wq_cyc *wq,
87 u16 pi, u16 nnops)
88{
89 struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
90
91 edge_wi = wi + nnops;
92
93
94 for (; wi < edge_wi; wi++) {
95 wi->skb = NULL;
96 wi->num_wqebbs = 1;
97 mlx5e_post_nop(wq, sq->sqn, &sq->pc);
98 }
99 sq->stats->nop += nnops;
100}
101
102static inline void
103mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
104 struct mlx5_wqe_ctrl_seg *ctrl)
105{
106 ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
107
108 dma_wmb();
109
110 *wq->db = cpu_to_be32(pc);
111
112
113
114
115 wmb();
116
117 mlx5_write64((__be32 *)ctrl, uar_map);
118}
119
120static inline bool mlx5e_transport_inline_tx_wqe(struct mlx5e_tx_wqe *wqe)
121{
122 return !!wqe->ctrl.tisn;
123}
124
125static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
126{
127 struct mlx5_core_cq *mcq;
128
129 mcq = &cq->mcq;
130 mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
131}
132
133static inline struct mlx5e_sq_dma *
134mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
135{
136 return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
137}
138
139static inline void
140mlx5e_dma_push(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size,
141 enum mlx5e_dma_map_type map_type)
142{
143 struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++);
144
145 dma->addr = addr;
146 dma->size = size;
147 dma->type = map_type;
148}
149
150static inline void
151mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
152{
153 switch (dma->type) {
154 case MLX5E_DMA_MAP_SINGLE:
155 dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
156 break;
157 case MLX5E_DMA_MAP_PAGE:
158 dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
159 break;
160 default:
161 WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
162 }
163}
164
165
166
167struct mlx5e_swp_spec {
168 __be16 l3_proto;
169 u8 l4_proto;
170 u8 is_tun;
171 __be16 tun_l3_proto;
172 u8 tun_l4_proto;
173};
174
175static inline void
176mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
177 struct mlx5e_swp_spec *swp_spec)
178{
179
180 eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
181 if (swp_spec->l3_proto == htons(ETH_P_IPV6))
182 eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
183 if (swp_spec->l4_proto) {
184 eseg->swp_outer_l4_offset = skb_transport_offset(skb) / 2;
185 if (swp_spec->l4_proto == IPPROTO_UDP)
186 eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
187 }
188
189 if (swp_spec->is_tun) {
190 eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
191 if (swp_spec->tun_l3_proto == htons(ETH_P_IPV6))
192 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
193 } else {
194 eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
195 if (swp_spec->l3_proto == htons(ETH_P_IPV6))
196 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
197 }
198 switch (swp_spec->tun_l4_proto) {
199 case IPPROTO_UDP:
200 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
201
202 case IPPROTO_TCP:
203 eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
204 break;
205 }
206}
207
208#endif
209