1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#ifndef __MLX5E_EN_ACCEL_H__
35#define __MLX5E_EN_ACCEL_H__
36
37#include <linux/skbuff.h>
38#include <linux/netdevice.h>
39#include "en_accel/ipsec_rxtx.h"
40#include "en_accel/tls.h"
41#include "en_accel/tls_rxtx.h"
42#include "en.h"
43#include "en/txrx.h"
44
45#if IS_ENABLED(CONFIG_GENEVE)
46#include <net/geneve.h>
47
48static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
49{
50 return mlx5_tx_swp_supported(mdev);
51}
52
53static inline void
54mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, u16 ihs)
55{
56 struct mlx5e_swp_spec swp_spec = {};
57 unsigned int offset = 0;
58 __be16 l3_proto;
59 u8 l4_proto;
60
61 l3_proto = vlan_get_protocol(skb);
62 switch (l3_proto) {
63 case htons(ETH_P_IP):
64 l4_proto = ip_hdr(skb)->protocol;
65 break;
66 case htons(ETH_P_IPV6):
67 l4_proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
68 break;
69 default:
70 return;
71 }
72
73 if (l4_proto != IPPROTO_UDP ||
74 udp_hdr(skb)->dest != cpu_to_be16(GENEVE_UDP_PORT))
75 return;
76 swp_spec.l3_proto = l3_proto;
77 swp_spec.l4_proto = l4_proto;
78 swp_spec.is_tun = true;
79 if (inner_ip_hdr(skb)->version == 6) {
80 swp_spec.tun_l3_proto = htons(ETH_P_IPV6);
81 swp_spec.tun_l4_proto = inner_ipv6_hdr(skb)->nexthdr;
82 } else {
83 swp_spec.tun_l3_proto = htons(ETH_P_IP);
84 swp_spec.tun_l4_proto = inner_ip_hdr(skb)->protocol;
85 }
86
87 mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
88 if (skb_vlan_tag_present(skb) && ihs)
89 mlx5e_eseg_swp_offsets_add_vlan(eseg);
90}
91
92#else
93static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
94{
95 return false;
96}
97
98#endif
99
100static inline void
101mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
102{
103 int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
104
105 udp_hdr(skb)->len = htons(payload_len);
106}
107
108struct mlx5e_accel_tx_state {
109#ifdef CONFIG_MLX5_EN_TLS
110 struct mlx5e_accel_tx_tls_state tls;
111#endif
112#ifdef CONFIG_MLX5_EN_IPSEC
113 struct mlx5e_accel_tx_ipsec_state ipsec;
114#endif
115};
116
117static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
118 struct mlx5e_txqsq *sq,
119 struct sk_buff *skb,
120 struct mlx5e_accel_tx_state *state)
121{
122 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
123 mlx5e_udp_gso_handle_tx_skb(skb);
124
125#ifdef CONFIG_MLX5_EN_TLS
126
127 if (mlx5e_tls_skb_offloaded(skb))
128 if (unlikely(!mlx5e_tls_handle_tx_skb(dev, sq, skb, &state->tls)))
129 return false;
130#endif
131
132#ifdef CONFIG_MLX5_EN_IPSEC
133 if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) && xfrm_offload(skb)) {
134 if (unlikely(!mlx5e_ipsec_handle_tx_skb(dev, skb, &state->ipsec)))
135 return false;
136 }
137#endif
138
139 return true;
140}
141
142static inline bool mlx5e_accel_tx_is_ipsec_flow(struct mlx5e_accel_tx_state *state)
143{
144#ifdef CONFIG_MLX5_EN_IPSEC
145 return mlx5e_ipsec_is_tx_flow(&state->ipsec);
146#else
147 return false;
148#endif
149}
150
151static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
152 struct mlx5e_accel_tx_state *state)
153{
154#ifdef CONFIG_MLX5_EN_IPSEC
155 if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state))
156 return mlx5e_ipsec_tx_ids_len(&state->ipsec);
157#endif
158
159 return 0;
160}
161
162
163#define MLX5E_ACCEL_ESEG_LEN offsetof(struct mlx5_wqe_eth_seg, mss)
164
165static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
166 struct sk_buff *skb,
167 struct mlx5_wqe_eth_seg *eseg, u16 ihs)
168{
169#ifdef CONFIG_MLX5_EN_IPSEC
170 if (xfrm_offload(skb))
171 mlx5e_ipsec_tx_build_eseg(priv, skb, eseg);
172#endif
173
174#if IS_ENABLED(CONFIG_GENEVE)
175 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
176 mlx5e_tx_tunnel_accel(skb, eseg, ihs);
177#endif
178
179 return true;
180}
181
182static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
183 struct mlx5e_tx_wqe *wqe,
184 struct mlx5e_accel_tx_state *state,
185 struct mlx5_wqe_inline_seg *inlseg)
186{
187#ifdef CONFIG_MLX5_EN_TLS
188 mlx5e_tls_handle_tx_wqe(&wqe->ctrl, &state->tls);
189#endif
190
191#ifdef CONFIG_MLX5_EN_IPSEC
192 if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) &&
193 state->ipsec.xo && state->ipsec.tailen)
194 mlx5e_ipsec_handle_tx_wqe(wqe, &state->ipsec, inlseg);
195#endif
196}
197
198static inline int mlx5e_accel_init_rx(struct mlx5e_priv *priv)
199{
200 return mlx5e_ktls_init_rx(priv);
201}
202
203static inline void mlx5e_accel_cleanup_rx(struct mlx5e_priv *priv)
204{
205 mlx5e_ktls_cleanup_rx(priv);
206}
207#endif
208