1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/netdevice.h>
35#include <net/ipv6.h>
36#include "en_accel/tls.h"
37#include "accel/tls.h"
38
39static void mlx5e_tls_set_ipv4_flow(void *flow, struct sock *sk)
40{
41 struct inet_sock *inet = inet_sk(sk);
42
43 MLX5_SET(tls_flow, flow, ipv6, 0);
44 memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
45 &inet->inet_daddr, MLX5_FLD_SZ_BYTES(ipv4_layout, ipv4));
46 memcpy(MLX5_ADDR_OF(tls_flow, flow, src_ipv4_src_ipv6.ipv4_layout.ipv4),
47 &inet->inet_rcv_saddr, MLX5_FLD_SZ_BYTES(ipv4_layout, ipv4));
48}
49
50#if IS_ENABLED(CONFIG_IPV6)
51static void mlx5e_tls_set_ipv6_flow(void *flow, struct sock *sk)
52{
53 struct ipv6_pinfo *np = inet6_sk(sk);
54
55 MLX5_SET(tls_flow, flow, ipv6, 1);
56 memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
57 &sk->sk_v6_daddr, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
58 memcpy(MLX5_ADDR_OF(tls_flow, flow, src_ipv4_src_ipv6.ipv6_layout.ipv6),
59 &np->saddr, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
60}
61#endif
62
63static void mlx5e_tls_set_flow_tcp_ports(void *flow, struct sock *sk)
64{
65 struct inet_sock *inet = inet_sk(sk);
66
67 memcpy(MLX5_ADDR_OF(tls_flow, flow, src_port), &inet->inet_sport,
68 MLX5_FLD_SZ_BYTES(tls_flow, src_port));
69 memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_port), &inet->inet_dport,
70 MLX5_FLD_SZ_BYTES(tls_flow, dst_port));
71}
72
73static int mlx5e_tls_set_flow(void *flow, struct sock *sk, u32 caps)
74{
75 switch (sk->sk_family) {
76 case AF_INET:
77 mlx5e_tls_set_ipv4_flow(flow, sk);
78 break;
79#if IS_ENABLED(CONFIG_IPV6)
80 case AF_INET6:
81 if (!sk->sk_ipv6only &&
82 ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
83 mlx5e_tls_set_ipv4_flow(flow, sk);
84 break;
85 }
86 if (!(caps & MLX5_ACCEL_TLS_IPV6))
87 goto error_out;
88
89 mlx5e_tls_set_ipv6_flow(flow, sk);
90 break;
91#endif
92 default:
93 goto error_out;
94 }
95
96 mlx5e_tls_set_flow_tcp_ports(flow, sk);
97 return 0;
98error_out:
99 return -EINVAL;
100}
101
102static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk,
103 enum tls_offload_ctx_dir direction,
104 struct tls_crypto_info *crypto_info,
105 u32 start_offload_tcp_sn)
106{
107 struct mlx5e_priv *priv = netdev_priv(netdev);
108 struct tls_context *tls_ctx = tls_get_ctx(sk);
109 struct mlx5_core_dev *mdev = priv->mdev;
110 u32 caps = mlx5_accel_tls_device_caps(mdev);
111 int ret = -ENOMEM;
112 void *flow;
113 u32 swid;
114
115 flow = kzalloc(MLX5_ST_SZ_BYTES(tls_flow), GFP_KERNEL);
116 if (!flow)
117 return ret;
118
119 ret = mlx5e_tls_set_flow(flow, sk, caps);
120 if (ret)
121 goto free_flow;
122
123 ret = mlx5_accel_tls_add_flow(mdev, flow, crypto_info,
124 start_offload_tcp_sn, &swid,
125 direction == TLS_OFFLOAD_CTX_DIR_TX);
126 if (ret < 0)
127 goto free_flow;
128
129 if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
130 struct mlx5e_tls_offload_context_tx *tx_ctx =
131 mlx5e_get_tls_tx_context(tls_ctx);
132
133 tx_ctx->swid = htonl(swid);
134 tx_ctx->expected_seq = start_offload_tcp_sn;
135 } else {
136 struct mlx5e_tls_offload_context_rx *rx_ctx =
137 mlx5e_get_tls_rx_context(tls_ctx);
138
139 rx_ctx->handle = htonl(swid);
140 }
141
142 return 0;
143free_flow:
144 kfree(flow);
145 return ret;
146}
147
148static void mlx5e_tls_del(struct net_device *netdev,
149 struct tls_context *tls_ctx,
150 enum tls_offload_ctx_dir direction)
151{
152 struct mlx5e_priv *priv = netdev_priv(netdev);
153 unsigned int handle;
154
155 handle = ntohl((direction == TLS_OFFLOAD_CTX_DIR_TX) ?
156 mlx5e_get_tls_tx_context(tls_ctx)->swid :
157 mlx5e_get_tls_rx_context(tls_ctx)->handle);
158
159 mlx5_accel_tls_del_flow(priv->mdev, handle,
160 direction == TLS_OFFLOAD_CTX_DIR_TX);
161}
162
163static int mlx5e_tls_resync(struct net_device *netdev, struct sock *sk,
164 u32 seq, u8 *rcd_sn_data,
165 enum tls_offload_ctx_dir direction)
166{
167 struct tls_context *tls_ctx = tls_get_ctx(sk);
168 struct mlx5e_priv *priv = netdev_priv(netdev);
169 struct mlx5e_tls_offload_context_rx *rx_ctx;
170 u64 rcd_sn = *(u64 *)rcd_sn_data;
171
172 if (WARN_ON_ONCE(direction != TLS_OFFLOAD_CTX_DIR_RX))
173 return -EINVAL;
174 rx_ctx = mlx5e_get_tls_rx_context(tls_ctx);
175
176 netdev_info(netdev, "resyncing seq %d rcd %lld\n", seq,
177 be64_to_cpu(rcd_sn));
178 mlx5_accel_tls_resync_rx(priv->mdev, rx_ctx->handle, seq, rcd_sn);
179 atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_reply);
180
181 return 0;
182}
183
184static const struct tlsdev_ops mlx5e_tls_ops = {
185 .tls_dev_add = mlx5e_tls_add,
186 .tls_dev_del = mlx5e_tls_del,
187 .tls_dev_resync = mlx5e_tls_resync,
188};
189
190void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
191{
192 struct net_device *netdev = priv->netdev;
193 u32 caps;
194
195 if (mlx5_accel_is_ktls_device(priv->mdev)) {
196 mlx5e_ktls_build_netdev(priv);
197 return;
198 }
199
200 if (!mlx5_accel_is_tls_device(priv->mdev))
201 return;
202
203 caps = mlx5_accel_tls_device_caps(priv->mdev);
204 if (caps & MLX5_ACCEL_TLS_TX) {
205 netdev->features |= NETIF_F_HW_TLS_TX;
206 netdev->hw_features |= NETIF_F_HW_TLS_TX;
207 }
208
209 if (caps & MLX5_ACCEL_TLS_RX) {
210 netdev->features |= NETIF_F_HW_TLS_RX;
211 netdev->hw_features |= NETIF_F_HW_TLS_RX;
212 }
213
214 if (!(caps & MLX5_ACCEL_TLS_LRO)) {
215 netdev->features &= ~NETIF_F_LRO;
216 netdev->hw_features &= ~NETIF_F_LRO;
217 }
218
219 netdev->tlsdev_ops = &mlx5e_tls_ops;
220}
221
222int mlx5e_tls_init(struct mlx5e_priv *priv)
223{
224 struct mlx5e_tls *tls = kzalloc(sizeof(*tls), GFP_KERNEL);
225
226 if (!tls)
227 return -ENOMEM;
228
229 priv->tls = tls;
230 return 0;
231}
232
233void mlx5e_tls_cleanup(struct mlx5e_priv *priv)
234{
235 struct mlx5e_tls *tls = priv->tls;
236
237 if (!tls)
238 return;
239
240 kfree(tls);
241 priv->tls = NULL;
242}
243