1
2
3
4#include <linux/netdevice.h>
5#include "en_accel/fs_tcp.h"
6#include "fs_core.h"
7
8enum accel_fs_tcp_type {
9 ACCEL_FS_IPV4_TCP,
10 ACCEL_FS_IPV6_TCP,
11 ACCEL_FS_TCP_NUM_TYPES,
12};
13
14struct mlx5e_accel_fs_tcp {
15 struct mlx5e_flow_table tables[ACCEL_FS_TCP_NUM_TYPES];
16 struct mlx5_flow_handle *default_rules[ACCEL_FS_TCP_NUM_TYPES];
17};
18
19static enum mlx5e_traffic_types fs_accel2tt(enum accel_fs_tcp_type i)
20{
21 switch (i) {
22 case ACCEL_FS_IPV4_TCP:
23 return MLX5E_TT_IPV4_TCP;
24 default:
25 return MLX5E_TT_IPV6_TCP;
26 }
27}
28
29static void accel_fs_tcp_set_ipv4_flow(struct mlx5_flow_spec *spec, struct sock *sk)
30{
31 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
32 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_TCP);
33 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
34 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 4);
35 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
36 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
37 &inet_sk(sk)->inet_daddr, 4);
38 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
39 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
40 &inet_sk(sk)->inet_rcv_saddr, 4);
41 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
42 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
43 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
44 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
45}
46
47static void accel_fs_tcp_set_ipv6_flow(struct mlx5_flow_spec *spec, struct sock *sk)
48{
49 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
50 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_TCP);
51 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
52 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 6);
53 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
54 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
55 &sk->sk_v6_daddr, 16);
56 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
57 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
58 &inet6_sk(sk)->saddr, 16);
59 memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
60 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
61 0xff, 16);
62 memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
63 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
64 0xff, 16);
65}
66
67void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule)
68{
69 mlx5_del_flow_rules(rule);
70}
71
72struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
73 struct sock *sk, u32 tirn,
74 uint32_t flow_tag)
75{
76 struct mlx5_flow_destination dest = {};
77 struct mlx5e_flow_table *ft = NULL;
78 struct mlx5e_accel_fs_tcp *fs_tcp;
79 MLX5_DECLARE_FLOW_ACT(flow_act);
80 struct mlx5_flow_handle *flow;
81 struct mlx5_flow_spec *spec;
82
83 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
84 if (!spec)
85 return ERR_PTR(-ENOMEM);
86
87 fs_tcp = priv->fs.accel_tcp;
88
89 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
90
91 switch (sk->sk_family) {
92 case AF_INET:
93 accel_fs_tcp_set_ipv4_flow(spec, sk);
94 ft = &fs_tcp->tables[ACCEL_FS_IPV4_TCP];
95 mlx5e_dbg(HW, priv, "%s flow is %pI4:%d -> %pI4:%d\n", __func__,
96 &inet_sk(sk)->inet_rcv_saddr,
97 inet_sk(sk)->inet_sport,
98 &inet_sk(sk)->inet_daddr,
99 inet_sk(sk)->inet_dport);
100 break;
101#if IS_ENABLED(CONFIG_IPV6)
102 case AF_INET6:
103 if (!sk->sk_ipv6only &&
104 ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
105 accel_fs_tcp_set_ipv4_flow(spec, sk);
106 ft = &fs_tcp->tables[ACCEL_FS_IPV4_TCP];
107 } else {
108 accel_fs_tcp_set_ipv6_flow(spec, sk);
109 ft = &fs_tcp->tables[ACCEL_FS_IPV6_TCP];
110 }
111 break;
112#endif
113 default:
114 break;
115 }
116
117 if (!ft) {
118 flow = ERR_PTR(-EINVAL);
119 goto out;
120 }
121
122 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
123 outer_headers.tcp_dport);
124 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
125 outer_headers.tcp_sport);
126 MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_dport,
127 ntohs(inet_sk(sk)->inet_sport));
128 MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_sport,
129 ntohs(inet_sk(sk)->inet_dport));
130
131 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
132 dest.tir_num = tirn;
133 if (flow_tag != MLX5_FS_DEFAULT_FLOW_TAG) {
134 spec->flow_context.flow_tag = flow_tag;
135 spec->flow_context.flags = FLOW_CONTEXT_HAS_TAG;
136 }
137
138 flow = mlx5_add_flow_rules(ft->t, spec, &flow_act, &dest, 1);
139
140 if (IS_ERR(flow))
141 netdev_err(priv->netdev, "mlx5_add_flow_rules() failed, flow is %ld\n",
142 PTR_ERR(flow));
143
144out:
145 kvfree(spec);
146 return flow;
147}
148
149static int accel_fs_tcp_add_default_rule(struct mlx5e_priv *priv,
150 enum accel_fs_tcp_type type)
151{
152 struct mlx5e_flow_table *accel_fs_t;
153 struct mlx5_flow_destination dest;
154 struct mlx5e_accel_fs_tcp *fs_tcp;
155 MLX5_DECLARE_FLOW_ACT(flow_act);
156 struct mlx5_flow_handle *rule;
157 int err = 0;
158
159 fs_tcp = priv->fs.accel_tcp;
160 accel_fs_t = &fs_tcp->tables[type];
161
162 dest = mlx5e_ttc_get_default_dest(priv, fs_accel2tt(type));
163 rule = mlx5_add_flow_rules(accel_fs_t->t, NULL, &flow_act, &dest, 1);
164 if (IS_ERR(rule)) {
165 err = PTR_ERR(rule);
166 netdev_err(priv->netdev,
167 "%s: add default rule failed, accel_fs type=%d, err %d\n",
168 __func__, type, err);
169 return err;
170 }
171
172 fs_tcp->default_rules[type] = rule;
173 return 0;
174}
175
176#define MLX5E_ACCEL_FS_TCP_NUM_GROUPS (2)
177#define MLX5E_ACCEL_FS_TCP_GROUP1_SIZE (BIT(16) - 1)
178#define MLX5E_ACCEL_FS_TCP_GROUP2_SIZE (BIT(0))
179#define MLX5E_ACCEL_FS_TCP_TABLE_SIZE (MLX5E_ACCEL_FS_TCP_GROUP1_SIZE +\
180 MLX5E_ACCEL_FS_TCP_GROUP2_SIZE)
181static int accel_fs_tcp_create_groups(struct mlx5e_flow_table *ft,
182 enum accel_fs_tcp_type type)
183{
184 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
185 void *outer_headers_c;
186 int ix = 0;
187 u32 *in;
188 int err;
189 u8 *mc;
190
191 ft->g = kcalloc(MLX5E_ACCEL_FS_TCP_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
192 in = kvzalloc(inlen, GFP_KERNEL);
193 if (!in || !ft->g) {
194 kvfree(ft->g);
195 kvfree(in);
196 return -ENOMEM;
197 }
198
199 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
200 outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers);
201 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol);
202 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_version);
203
204 switch (type) {
205 case ACCEL_FS_IPV4_TCP:
206 case ACCEL_FS_IPV6_TCP:
207 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport);
208 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport);
209 break;
210 default:
211 err = -EINVAL;
212 goto out;
213 }
214
215 switch (type) {
216 case ACCEL_FS_IPV4_TCP:
217 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
218 src_ipv4_src_ipv6.ipv4_layout.ipv4);
219 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
220 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
221 break;
222 case ACCEL_FS_IPV6_TCP:
223 memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
224 src_ipv4_src_ipv6.ipv6_layout.ipv6),
225 0xff, 16);
226 memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
227 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
228 0xff, 16);
229 break;
230 default:
231 err = -EINVAL;
232 goto out;
233 }
234
235 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
236 MLX5_SET_CFG(in, start_flow_index, ix);
237 ix += MLX5E_ACCEL_FS_TCP_GROUP1_SIZE;
238 MLX5_SET_CFG(in, end_flow_index, ix - 1);
239 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
240 if (IS_ERR(ft->g[ft->num_groups]))
241 goto err;
242 ft->num_groups++;
243
244
245 memset(in, 0, inlen);
246 MLX5_SET_CFG(in, start_flow_index, ix);
247 ix += MLX5E_ACCEL_FS_TCP_GROUP2_SIZE;
248 MLX5_SET_CFG(in, end_flow_index, ix - 1);
249 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
250 if (IS_ERR(ft->g[ft->num_groups]))
251 goto err;
252 ft->num_groups++;
253
254 kvfree(in);
255 return 0;
256
257err:
258 err = PTR_ERR(ft->g[ft->num_groups]);
259 ft->g[ft->num_groups] = NULL;
260out:
261 kvfree(in);
262
263 return err;
264}
265
266static int accel_fs_tcp_create_table(struct mlx5e_priv *priv, enum accel_fs_tcp_type type)
267{
268 struct mlx5e_flow_table *ft = &priv->fs.accel_tcp->tables[type];
269 struct mlx5_flow_table_attr ft_attr = {};
270 int err;
271
272 ft->num_groups = 0;
273
274 ft_attr.max_fte = MLX5E_ACCEL_FS_TCP_TABLE_SIZE;
275 ft_attr.level = MLX5E_ACCEL_FS_TCP_FT_LEVEL;
276 ft_attr.prio = MLX5E_NIC_PRIO;
277
278 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
279 if (IS_ERR(ft->t)) {
280 err = PTR_ERR(ft->t);
281 ft->t = NULL;
282 return err;
283 }
284
285 netdev_dbg(priv->netdev, "Created fs accel table id %u level %u\n",
286 ft->t->id, ft->t->level);
287
288 err = accel_fs_tcp_create_groups(ft, type);
289 if (err)
290 goto err;
291
292 err = accel_fs_tcp_add_default_rule(priv, type);
293 if (err)
294 goto err;
295
296 return 0;
297err:
298 mlx5e_destroy_flow_table(ft);
299 return err;
300}
301
302static int accel_fs_tcp_disable(struct mlx5e_priv *priv)
303{
304 int err, i;
305
306 for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
307
308 err = mlx5e_ttc_fwd_default_dest(priv, fs_accel2tt(i));
309 if (err) {
310 netdev_err(priv->netdev,
311 "%s: modify ttc[%d] default destination failed, err(%d)\n",
312 __func__, fs_accel2tt(i), err);
313 return err;
314 }
315 }
316
317 return 0;
318}
319
320static int accel_fs_tcp_enable(struct mlx5e_priv *priv)
321{
322 struct mlx5_flow_destination dest = {};
323 int err, i;
324
325 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
326 for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
327 dest.ft = priv->fs.accel_tcp->tables[i].t;
328
329
330 err = mlx5e_ttc_fwd_dest(priv, fs_accel2tt(i), &dest);
331 if (err) {
332 netdev_err(priv->netdev,
333 "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
334 __func__, fs_accel2tt(i), err);
335 return err;
336 }
337 }
338 return 0;
339}
340
341static void accel_fs_tcp_destroy_table(struct mlx5e_priv *priv, int i)
342{
343 struct mlx5e_accel_fs_tcp *fs_tcp;
344
345 fs_tcp = priv->fs.accel_tcp;
346 if (IS_ERR_OR_NULL(fs_tcp->tables[i].t))
347 return;
348
349 mlx5_del_flow_rules(fs_tcp->default_rules[i]);
350 mlx5e_destroy_flow_table(&fs_tcp->tables[i]);
351 fs_tcp->tables[i].t = NULL;
352}
353
354void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv)
355{
356 int i;
357
358 if (!priv->fs.accel_tcp)
359 return;
360
361 accel_fs_tcp_disable(priv);
362
363 for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++)
364 accel_fs_tcp_destroy_table(priv, i);
365
366 kfree(priv->fs.accel_tcp);
367 priv->fs.accel_tcp = NULL;
368}
369
370int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv)
371{
372 int i, err;
373
374 if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version))
375 return -EOPNOTSUPP;
376
377 priv->fs.accel_tcp = kzalloc(sizeof(*priv->fs.accel_tcp), GFP_KERNEL);
378 if (!priv->fs.accel_tcp)
379 return -ENOMEM;
380
381 for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
382 err = accel_fs_tcp_create_table(priv, i);
383 if (err)
384 goto err_destroy_tables;
385 }
386
387 err = accel_fs_tcp_enable(priv);
388 if (err)
389 goto err_destroy_tables;
390
391 return 0;
392
393err_destroy_tables:
394 while (--i >= 0)
395 accel_fs_tcp_destroy_table(priv, i);
396
397 kfree(priv->fs.accel_tcp);
398 priv->fs.accel_tcp = NULL;
399 return err;
400}
401