1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <crypto/aead.h>
35#include <net/xfrm.h>
36#include <net/esp.h>
37
38#include "en_accel/ipsec_rxtx.h"
39#include "en_accel/ipsec.h"
40#include "en.h"
41
42enum {
43 MLX5E_IPSEC_RX_SYNDROME_DECRYPTED = 0x11,
44 MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED = 0x12,
45 MLX5E_IPSEC_RX_SYNDROME_BAD_PROTO = 0x17,
46};
47
48struct mlx5e_ipsec_rx_metadata {
49 unsigned char nexthdr;
50 __be32 sa_handle;
51} __packed;
52
53enum {
54 MLX5E_IPSEC_TX_SYNDROME_OFFLOAD = 0x8,
55 MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP = 0x9,
56};
57
58struct mlx5e_ipsec_tx_metadata {
59 __be16 mss_inv;
60 __be16 seq;
61 u8 esp_next_proto;
62} __packed;
63
64struct mlx5e_ipsec_metadata {
65 unsigned char syndrome;
66 union {
67 unsigned char raw[5];
68
69 struct mlx5e_ipsec_rx_metadata rx;
70
71 struct mlx5e_ipsec_tx_metadata tx;
72 } __packed content;
73
74 __be16 ethertype;
75} __packed;
76
77#define MAX_LSO_MSS 2048
78
79
80static __be16 mlx5e_ipsec_inverse_table[MAX_LSO_MSS];
81
82static inline __be16 mlx5e_ipsec_mss_inv(struct sk_buff *skb)
83{
84 return mlx5e_ipsec_inverse_table[skb_shinfo(skb)->gso_size];
85}
86
87static struct mlx5e_ipsec_metadata *mlx5e_ipsec_add_metadata(struct sk_buff *skb)
88{
89 struct mlx5e_ipsec_metadata *mdata;
90 struct ethhdr *eth;
91
92 if (unlikely(skb_cow_head(skb, sizeof(*mdata))))
93 return ERR_PTR(-ENOMEM);
94
95 eth = (struct ethhdr *)skb_push(skb, sizeof(*mdata));
96 skb->mac_header -= sizeof(*mdata);
97 mdata = (struct mlx5e_ipsec_metadata *)(eth + 1);
98
99 memmove(skb->data, skb->data + sizeof(*mdata),
100 2 * ETH_ALEN);
101
102 eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE);
103
104 memset(mdata->content.raw, 0, sizeof(mdata->content.raw));
105 return mdata;
106}
107
108static int mlx5e_ipsec_remove_trailer(struct sk_buff *skb, struct xfrm_state *x)
109{
110 unsigned int alen = crypto_aead_authsize(x->data);
111 struct ipv6hdr *ipv6hdr = ipv6_hdr(skb);
112 struct iphdr *ipv4hdr = ip_hdr(skb);
113 unsigned int trailer_len;
114 u8 plen;
115 int ret;
116
117 ret = skb_copy_bits(skb, skb->len - alen - 2, &plen, 1);
118 if (unlikely(ret))
119 return ret;
120
121 trailer_len = alen + plen + 2;
122
123 pskb_trim(skb, skb->len - trailer_len);
124 if (skb->protocol == htons(ETH_P_IP)) {
125 ipv4hdr->tot_len = htons(ntohs(ipv4hdr->tot_len) - trailer_len);
126 ip_send_check(ipv4hdr);
127 } else {
128 ipv6hdr->payload_len = htons(ntohs(ipv6hdr->payload_len) -
129 trailer_len);
130 }
131 return 0;
132}
133
134static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
135 struct mlx5_wqe_eth_seg *eseg, u8 mode,
136 struct xfrm_offload *xo)
137{
138 u8 proto;
139
140
141
142
143
144
145
146
147
148
149
150
151 eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
152 if (skb->protocol == htons(ETH_P_IPV6))
153 eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
154
155 if (mode == XFRM_MODE_TUNNEL) {
156 eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
157 if (xo->proto == IPPROTO_IPV6) {
158 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
159 proto = inner_ipv6_hdr(skb)->nexthdr;
160 } else {
161 proto = inner_ip_hdr(skb)->protocol;
162 }
163 } else {
164 eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
165 if (skb->protocol == htons(ETH_P_IPV6))
166 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
167 proto = xo->proto;
168 }
169 switch (proto) {
170 case IPPROTO_UDP:
171 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
172
173 case IPPROTO_TCP:
174 eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
175 break;
176 }
177}
178
179void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
180 struct xfrm_offload *xo)
181{
182 struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
183 __u32 oseq = replay_esn->oseq;
184 int iv_offset;
185 __be64 seqno;
186 u32 seq_hi;
187
188 if (unlikely(skb_is_gso(skb) && oseq < MLX5E_IPSEC_ESN_SCOPE_MID &&
189 MLX5E_IPSEC_ESN_SCOPE_MID < (oseq - skb_shinfo(skb)->gso_segs))) {
190 seq_hi = xo->seq.hi - 1;
191 } else {
192 seq_hi = xo->seq.hi;
193 }
194
195
196 seqno = cpu_to_be64(xo->seq.low + ((u64)seq_hi << 32));
197 iv_offset = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr);
198 skb_store_bits(skb, iv_offset, &seqno, 8);
199}
200
201void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
202 struct xfrm_offload *xo)
203{
204 int iv_offset;
205 __be64 seqno;
206
207
208 seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
209 iv_offset = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr);
210 skb_store_bits(skb, iv_offset, &seqno, 8);
211}
212
213static void mlx5e_ipsec_set_metadata(struct sk_buff *skb,
214 struct mlx5e_ipsec_metadata *mdata,
215 struct xfrm_offload *xo)
216{
217 struct ip_esp_hdr *esph;
218 struct tcphdr *tcph;
219
220 if (skb_is_gso(skb)) {
221
222 esph = ip_esp_hdr(skb);
223 tcph = inner_tcp_hdr(skb);
224 netdev_dbg(skb->dev, " Offloading GSO packet outer L3 %u; L4 %u; Inner L3 %u; L4 %u\n",
225 skb->network_header,
226 skb->transport_header,
227 skb->inner_network_header,
228 skb->inner_transport_header);
229 netdev_dbg(skb->dev, " Offloading GSO packet of len %u; mss %u; TCP sp %u dp %u seq 0x%x ESP seq 0x%x\n",
230 skb->len, skb_shinfo(skb)->gso_size,
231 ntohs(tcph->source), ntohs(tcph->dest),
232 ntohl(tcph->seq), ntohl(esph->seq_no));
233 mdata->syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP;
234 mdata->content.tx.mss_inv = mlx5e_ipsec_mss_inv(skb);
235 mdata->content.tx.seq = htons(ntohl(tcph->seq) & 0xFFFF);
236 } else {
237 mdata->syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD;
238 }
239 mdata->content.tx.esp_next_proto = xo->proto;
240
241 netdev_dbg(skb->dev, " TX metadata syndrome %u proto %u mss_inv %04x seq %04x\n",
242 mdata->syndrome, mdata->content.tx.esp_next_proto,
243 ntohs(mdata->content.tx.mss_inv),
244 ntohs(mdata->content.tx.seq));
245}
246
247struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
248 struct mlx5e_tx_wqe *wqe,
249 struct sk_buff *skb)
250{
251 struct mlx5e_priv *priv = netdev_priv(netdev);
252 struct xfrm_offload *xo = xfrm_offload(skb);
253 struct mlx5e_ipsec_metadata *mdata;
254 struct mlx5e_ipsec_sa_entry *sa_entry;
255 struct xfrm_state *x;
256
257 if (!xo)
258 return skb;
259
260 if (unlikely(skb->sp->len != 1)) {
261 atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_bundle);
262 goto drop;
263 }
264
265 x = xfrm_input_state(skb);
266 if (unlikely(!x)) {
267 atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_no_state);
268 goto drop;
269 }
270
271 if (unlikely(!x->xso.offload_handle ||
272 (skb->protocol != htons(ETH_P_IP) &&
273 skb->protocol != htons(ETH_P_IPV6)))) {
274 atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_not_ip);
275 goto drop;
276 }
277
278 if (!skb_is_gso(skb))
279 if (unlikely(mlx5e_ipsec_remove_trailer(skb, x))) {
280 atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_trailer);
281 goto drop;
282 }
283 mdata = mlx5e_ipsec_add_metadata(skb);
284 if (IS_ERR(mdata)) {
285 atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata);
286 goto drop;
287 }
288 mlx5e_ipsec_set_swp(skb, &wqe->eth, x->props.mode, xo);
289 sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
290 sa_entry->set_iv_op(skb, x, xo);
291 mlx5e_ipsec_set_metadata(skb, mdata, xo);
292
293 return skb;
294
295drop:
296 kfree_skb(skb);
297 return NULL;
298}
299
300static inline struct xfrm_state *
301mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb,
302 struct mlx5e_ipsec_metadata *mdata)
303{
304 struct mlx5e_priv *priv = netdev_priv(netdev);
305 struct xfrm_offload *xo;
306 struct xfrm_state *xs;
307 u32 sa_handle;
308
309 skb->sp = secpath_dup(skb->sp);
310 if (unlikely(!skb->sp)) {
311 atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
312 return NULL;
313 }
314
315 sa_handle = be32_to_cpu(mdata->content.rx.sa_handle);
316 xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle);
317 if (unlikely(!xs)) {
318 atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
319 return NULL;
320 }
321
322 skb->sp->xvec[skb->sp->len++] = xs;
323 skb->sp->olen++;
324
325 xo = xfrm_offload(skb);
326 xo->flags = CRYPTO_DONE;
327 switch (mdata->syndrome) {
328 case MLX5E_IPSEC_RX_SYNDROME_DECRYPTED:
329 xo->status = CRYPTO_SUCCESS;
330 if (likely(priv->ipsec->no_trailer)) {
331 xo->flags |= XFRM_ESP_NO_TRAILER;
332 xo->proto = mdata->content.rx.nexthdr;
333 }
334 break;
335 case MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED:
336 xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
337 break;
338 case MLX5E_IPSEC_RX_SYNDROME_BAD_PROTO:
339 xo->status = CRYPTO_INVALID_PROTOCOL;
340 break;
341 default:
342 atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
343 return NULL;
344 }
345 return xs;
346}
347
348struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
349 struct sk_buff *skb)
350{
351 struct mlx5e_ipsec_metadata *mdata;
352 struct ethhdr *old_eth;
353 struct ethhdr *new_eth;
354 struct xfrm_state *xs;
355 __be16 *ethtype;
356
357
358 if (skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN)
359 return skb;
360 ethtype = (__be16 *)(skb->data + ETH_ALEN * 2);
361 if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE))
362 return skb;
363
364
365 mdata = (struct mlx5e_ipsec_metadata *)(skb->data + ETH_HLEN);
366 xs = mlx5e_ipsec_build_sp(netdev, skb, mdata);
367 if (unlikely(!xs)) {
368 kfree_skb(skb);
369 return NULL;
370 }
371
372
373 old_eth = (struct ethhdr *)skb->data;
374 new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN);
375 memmove(new_eth, old_eth, 2 * ETH_ALEN);
376
377 skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN);
378
379 return skb;
380}
381
382bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
383 netdev_features_t features)
384{
385 struct xfrm_state *x;
386
387 if (skb->sp && skb->sp->len) {
388 x = skb->sp->xvec[0];
389 if (x && x->xso.offload_handle)
390 return true;
391 }
392 return false;
393}
394
395void mlx5e_ipsec_build_inverse_table(void)
396{
397 u16 mss_inv;
398 u32 mss;
399
400
401
402
403
404
405
406 mlx5e_ipsec_inverse_table[1] = htons(0xFFFF);
407 for (mss = 2; mss < MAX_LSO_MSS; mss++) {
408 mss_inv = div_u64(1ULL << 32, mss) >> 16;
409 mlx5e_ipsec_inverse_table[mss] = htons(mss_inv);
410 }
411}
412