1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/mlx5/driver.h>
34#include <linux/mlx5/device.h>
35#include <linux/mlx5/mlx5_ifc.h>
36
37#include "fs_core.h"
38#include "fs_cmd.h"
39#include "mlx5_core.h"
40#include "eswitch.h"
41
42static int mlx5_cmd_stub_update_root_ft(struct mlx5_flow_root_namespace *ns,
43 struct mlx5_flow_table *ft,
44 u32 underlay_qpn,
45 bool disconnect)
46{
47 return 0;
48}
49
50static int mlx5_cmd_stub_create_flow_table(struct mlx5_flow_root_namespace *ns,
51 struct mlx5_flow_table *ft,
52 unsigned int log_size,
53 struct mlx5_flow_table *next_ft)
54{
55 return 0;
56}
57
58static int mlx5_cmd_stub_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
59 struct mlx5_flow_table *ft)
60{
61 return 0;
62}
63
64static int mlx5_cmd_stub_modify_flow_table(struct mlx5_flow_root_namespace *ns,
65 struct mlx5_flow_table *ft,
66 struct mlx5_flow_table *next_ft)
67{
68 return 0;
69}
70
71static int mlx5_cmd_stub_create_flow_group(struct mlx5_flow_root_namespace *ns,
72 struct mlx5_flow_table *ft,
73 u32 *in,
74 struct mlx5_flow_group *fg)
75{
76 return 0;
77}
78
79static int mlx5_cmd_stub_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
80 struct mlx5_flow_table *ft,
81 struct mlx5_flow_group *fg)
82{
83 return 0;
84}
85
86static int mlx5_cmd_stub_create_fte(struct mlx5_flow_root_namespace *ns,
87 struct mlx5_flow_table *ft,
88 struct mlx5_flow_group *group,
89 struct fs_fte *fte)
90{
91 return 0;
92}
93
94static int mlx5_cmd_stub_update_fte(struct mlx5_flow_root_namespace *ns,
95 struct mlx5_flow_table *ft,
96 struct mlx5_flow_group *group,
97 int modify_mask,
98 struct fs_fte *fte)
99{
100 return -EOPNOTSUPP;
101}
102
103static int mlx5_cmd_stub_delete_fte(struct mlx5_flow_root_namespace *ns,
104 struct mlx5_flow_table *ft,
105 struct fs_fte *fte)
106{
107 return 0;
108}
109
110static int mlx5_cmd_stub_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
111 int reformat_type,
112 size_t size,
113 void *reformat_data,
114 enum mlx5_flow_namespace_type namespace,
115 struct mlx5_pkt_reformat *pkt_reformat)
116{
117 return 0;
118}
119
120static void mlx5_cmd_stub_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
121 struct mlx5_pkt_reformat *pkt_reformat)
122{
123}
124
125static int mlx5_cmd_stub_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
126 u8 namespace, u8 num_actions,
127 void *modify_actions,
128 struct mlx5_modify_hdr *modify_hdr)
129{
130 return 0;
131}
132
133static void mlx5_cmd_stub_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
134 struct mlx5_modify_hdr *modify_hdr)
135{
136}
137
138static int mlx5_cmd_stub_set_peer(struct mlx5_flow_root_namespace *ns,
139 struct mlx5_flow_root_namespace *peer_ns)
140{
141 return 0;
142}
143
144static int mlx5_cmd_stub_create_ns(struct mlx5_flow_root_namespace *ns)
145{
146 return 0;
147}
148
149static int mlx5_cmd_stub_destroy_ns(struct mlx5_flow_root_namespace *ns)
150{
151 return 0;
152}
153
154static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
155 struct mlx5_flow_table *ft, u32 underlay_qpn,
156 bool disconnect)
157{
158 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
159 struct mlx5_core_dev *dev = ns->dev;
160
161 if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
162 underlay_qpn == 0)
163 return 0;
164
165 MLX5_SET(set_flow_table_root_in, in, opcode,
166 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
167 MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
168
169 if (disconnect)
170 MLX5_SET(set_flow_table_root_in, in, op_mod, 1);
171 else
172 MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
173
174 MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
175 MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
176 MLX5_SET(set_flow_table_root_in, in, other_vport,
177 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
178
179 return mlx5_cmd_exec_in(dev, set_flow_table_root, in);
180}
181
182static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
183 struct mlx5_flow_table *ft,
184 unsigned int log_size,
185 struct mlx5_flow_table *next_ft)
186{
187 int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
188 int en_decap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
189 int term = !!(ft->flags & MLX5_FLOW_TABLE_TERMINATION);
190 u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {};
191 u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {};
192 struct mlx5_core_dev *dev = ns->dev;
193 int err;
194
195 MLX5_SET(create_flow_table_in, in, opcode,
196 MLX5_CMD_OP_CREATE_FLOW_TABLE);
197
198 MLX5_SET(create_flow_table_in, in, table_type, ft->type);
199 MLX5_SET(create_flow_table_in, in, flow_table_context.level, ft->level);
200 MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, log_size);
201 MLX5_SET(create_flow_table_in, in, vport_number, ft->vport);
202 MLX5_SET(create_flow_table_in, in, other_vport,
203 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
204
205 MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
206 en_decap);
207 MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
208 en_encap);
209 MLX5_SET(create_flow_table_in, in, flow_table_context.termination_table,
210 term);
211
212 switch (ft->op_mod) {
213 case FS_FT_OP_MOD_NORMAL:
214 if (next_ft) {
215 MLX5_SET(create_flow_table_in, in,
216 flow_table_context.table_miss_action,
217 MLX5_FLOW_TABLE_MISS_ACTION_FWD);
218 MLX5_SET(create_flow_table_in, in,
219 flow_table_context.table_miss_id, next_ft->id);
220 } else {
221 MLX5_SET(create_flow_table_in, in,
222 flow_table_context.table_miss_action,
223 ft->def_miss_action);
224 }
225 break;
226
227 case FS_FT_OP_MOD_LAG_DEMUX:
228 MLX5_SET(create_flow_table_in, in, op_mod, 0x1);
229 if (next_ft)
230 MLX5_SET(create_flow_table_in, in,
231 flow_table_context.lag_master_next_table_id,
232 next_ft->id);
233 break;
234 }
235
236 err = mlx5_cmd_exec_inout(dev, create_flow_table, in, out);
237 if (!err)
238 ft->id = MLX5_GET(create_flow_table_out, out,
239 table_id);
240 return err;
241}
242
243static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
244 struct mlx5_flow_table *ft)
245{
246 u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {};
247 struct mlx5_core_dev *dev = ns->dev;
248
249 MLX5_SET(destroy_flow_table_in, in, opcode,
250 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
251 MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
252 MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
253 MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
254 MLX5_SET(destroy_flow_table_in, in, other_vport,
255 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
256
257 return mlx5_cmd_exec_in(dev, destroy_flow_table, in);
258}
259
260static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
261 struct mlx5_flow_table *ft,
262 struct mlx5_flow_table *next_ft)
263{
264 u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {};
265 struct mlx5_core_dev *dev = ns->dev;
266
267 MLX5_SET(modify_flow_table_in, in, opcode,
268 MLX5_CMD_OP_MODIFY_FLOW_TABLE);
269 MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
270 MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
271
272 if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) {
273 MLX5_SET(modify_flow_table_in, in, modify_field_select,
274 MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID);
275 if (next_ft) {
276 MLX5_SET(modify_flow_table_in, in,
277 flow_table_context.lag_master_next_table_id, next_ft->id);
278 } else {
279 MLX5_SET(modify_flow_table_in, in,
280 flow_table_context.lag_master_next_table_id, 0);
281 }
282 } else {
283 MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport);
284 MLX5_SET(modify_flow_table_in, in, other_vport,
285 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
286 MLX5_SET(modify_flow_table_in, in, modify_field_select,
287 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
288 if (next_ft) {
289 MLX5_SET(modify_flow_table_in, in,
290 flow_table_context.table_miss_action,
291 MLX5_FLOW_TABLE_MISS_ACTION_FWD);
292 MLX5_SET(modify_flow_table_in, in,
293 flow_table_context.table_miss_id,
294 next_ft->id);
295 } else {
296 MLX5_SET(modify_flow_table_in, in,
297 flow_table_context.table_miss_action,
298 ft->def_miss_action);
299 }
300 }
301
302 return mlx5_cmd_exec_in(dev, modify_flow_table, in);
303}
304
305static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
306 struct mlx5_flow_table *ft,
307 u32 *in,
308 struct mlx5_flow_group *fg)
309{
310 u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {};
311 struct mlx5_core_dev *dev = ns->dev;
312 int err;
313
314 MLX5_SET(create_flow_group_in, in, opcode,
315 MLX5_CMD_OP_CREATE_FLOW_GROUP);
316 MLX5_SET(create_flow_group_in, in, table_type, ft->type);
317 MLX5_SET(create_flow_group_in, in, table_id, ft->id);
318 if (ft->vport) {
319 MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
320 MLX5_SET(create_flow_group_in, in, other_vport, 1);
321 }
322
323 MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
324 MLX5_SET(create_flow_group_in, in, other_vport,
325 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
326 err = mlx5_cmd_exec_inout(dev, create_flow_group, in, out);
327 if (!err)
328 fg->id = MLX5_GET(create_flow_group_out, out,
329 group_id);
330 return err;
331}
332
333static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
334 struct mlx5_flow_table *ft,
335 struct mlx5_flow_group *fg)
336{
337 u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
338 struct mlx5_core_dev *dev = ns->dev;
339
340 MLX5_SET(destroy_flow_group_in, in, opcode,
341 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
342 MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
343 MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
344 MLX5_SET(destroy_flow_group_in, in, group_id, fg->id);
345 MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
346 MLX5_SET(destroy_flow_group_in, in, other_vport,
347 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
348 return mlx5_cmd_exec_in(dev, destroy_flow_group, in);
349}
350
351static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
352 struct fs_fte *fte, bool *extended_dest)
353{
354 int fw_log_max_fdb_encap_uplink =
355 MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
356 int num_fwd_destinations = 0;
357 struct mlx5_flow_rule *dst;
358 int num_encap = 0;
359
360 *extended_dest = false;
361 if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
362 return 0;
363
364 list_for_each_entry(dst, &fte->node.children, node.list) {
365 if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
366 continue;
367 if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
368 dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
369 num_encap++;
370 num_fwd_destinations++;
371 }
372 if (num_fwd_destinations > 1 && num_encap > 0)
373 *extended_dest = true;
374
375 if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
376 mlx5_core_warn(dev, "FW does not support extended destination");
377 return -EOPNOTSUPP;
378 }
379 if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
380 mlx5_core_warn(dev, "FW does not support more than %d encaps",
381 1 << fw_log_max_fdb_encap_uplink);
382 return -EOPNOTSUPP;
383 }
384
385 return 0;
386}
387static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
388 int opmod, int modify_mask,
389 struct mlx5_flow_table *ft,
390 unsigned group_id,
391 struct fs_fte *fte)
392{
393 u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
394 bool extended_dest = false;
395 struct mlx5_flow_rule *dst;
396 void *in_flow_context, *vlan;
397 void *in_match_value;
398 unsigned int inlen;
399 int dst_cnt_size;
400 void *in_dests;
401 u32 *in;
402 int err;
403
404 if (mlx5_set_extended_dest(dev, fte, &extended_dest))
405 return -EOPNOTSUPP;
406
407 if (!extended_dest)
408 dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
409 else
410 dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
411
412 inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
413 in = kvzalloc(inlen, GFP_KERNEL);
414 if (!in)
415 return -ENOMEM;
416
417 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
418 MLX5_SET(set_fte_in, in, op_mod, opmod);
419 MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
420 MLX5_SET(set_fte_in, in, table_type, ft->type);
421 MLX5_SET(set_fte_in, in, table_id, ft->id);
422 MLX5_SET(set_fte_in, in, flow_index, fte->index);
423 MLX5_SET(set_fte_in, in, ignore_flow_level,
424 !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL));
425
426 MLX5_SET(set_fte_in, in, vport_number, ft->vport);
427 MLX5_SET(set_fte_in, in, other_vport,
428 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
429
430 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
431 MLX5_SET(flow_context, in_flow_context, group_id, group_id);
432
433 MLX5_SET(flow_context, in_flow_context, flow_tag,
434 fte->flow_context.flow_tag);
435 MLX5_SET(flow_context, in_flow_context, flow_source,
436 fte->flow_context.flow_source);
437
438 MLX5_SET(flow_context, in_flow_context, extended_destination,
439 extended_dest);
440 if (extended_dest) {
441 u32 action;
442
443 action = fte->action.action &
444 ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
445 MLX5_SET(flow_context, in_flow_context, action, action);
446 } else {
447 MLX5_SET(flow_context, in_flow_context, action,
448 fte->action.action);
449 if (fte->action.pkt_reformat)
450 MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
451 fte->action.pkt_reformat->id);
452 }
453 if (fte->action.modify_hdr)
454 MLX5_SET(flow_context, in_flow_context, modify_header_id,
455 fte->action.modify_hdr->id);
456
457 MLX5_SET(flow_context, in_flow_context, ipsec_obj_id, fte->action.ipsec_obj_id);
458
459 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
460
461 MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
462 MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
463 MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
464
465 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
466
467 MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
468 MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
469 MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
470
471 in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
472 match_value);
473 memcpy(in_match_value, &fte->val, sizeof(fte->val));
474
475 in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
476 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
477 int list_size = 0;
478
479 list_for_each_entry(dst, &fte->node.children, node.list) {
480 unsigned int id, type = dst->dest_attr.type;
481
482 if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
483 continue;
484
485 switch (type) {
486 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
487 id = dst->dest_attr.ft_num;
488 type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
489 break;
490 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
491 id = dst->dest_attr.ft->id;
492 break;
493 case MLX5_FLOW_DESTINATION_TYPE_VPORT:
494 id = dst->dest_attr.vport.num;
495 MLX5_SET(dest_format_struct, in_dests,
496 destination_eswitch_owner_vhca_id_valid,
497 !!(dst->dest_attr.vport.flags &
498 MLX5_FLOW_DEST_VPORT_VHCA_ID));
499 MLX5_SET(dest_format_struct, in_dests,
500 destination_eswitch_owner_vhca_id,
501 dst->dest_attr.vport.vhca_id);
502 if (extended_dest &&
503 dst->dest_attr.vport.pkt_reformat) {
504 MLX5_SET(dest_format_struct, in_dests,
505 packet_reformat,
506 !!(dst->dest_attr.vport.flags &
507 MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
508 MLX5_SET(extended_dest_format, in_dests,
509 packet_reformat_id,
510 dst->dest_attr.vport.pkt_reformat->id);
511 }
512 break;
513 case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
514 id = dst->dest_attr.sampler_id;
515 break;
516 default:
517 id = dst->dest_attr.tir_num;
518 }
519
520 MLX5_SET(dest_format_struct, in_dests, destination_type,
521 type);
522 MLX5_SET(dest_format_struct, in_dests, destination_id, id);
523 in_dests += dst_cnt_size;
524 list_size++;
525 }
526
527 MLX5_SET(flow_context, in_flow_context, destination_list_size,
528 list_size);
529 }
530
531 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
532 int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
533 log_max_flow_counter,
534 ft->type));
535 int list_size = 0;
536
537 list_for_each_entry(dst, &fte->node.children, node.list) {
538 if (dst->dest_attr.type !=
539 MLX5_FLOW_DESTINATION_TYPE_COUNTER)
540 continue;
541
542 MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
543 dst->dest_attr.counter_id);
544 in_dests += dst_cnt_size;
545 list_size++;
546 }
547 if (list_size > max_list_size) {
548 err = -EINVAL;
549 goto err_out;
550 }
551
552 MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
553 list_size);
554 }
555
556 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
557err_out:
558 kvfree(in);
559 return err;
560}
561
562static int mlx5_cmd_create_fte(struct mlx5_flow_root_namespace *ns,
563 struct mlx5_flow_table *ft,
564 struct mlx5_flow_group *group,
565 struct fs_fte *fte)
566{
567 struct mlx5_core_dev *dev = ns->dev;
568 unsigned int group_id = group->id;
569
570 return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
571}
572
573static int mlx5_cmd_update_fte(struct mlx5_flow_root_namespace *ns,
574 struct mlx5_flow_table *ft,
575 struct mlx5_flow_group *fg,
576 int modify_mask,
577 struct fs_fte *fte)
578{
579 int opmod;
580 struct mlx5_core_dev *dev = ns->dev;
581 int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
582 flow_table_properties_nic_receive.
583 flow_modify_en);
584 if (!atomic_mod_cap)
585 return -EOPNOTSUPP;
586 opmod = 1;
587
588 return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, fg->id, fte);
589}
590
591static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
592 struct mlx5_flow_table *ft,
593 struct fs_fte *fte)
594{
595 u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
596 struct mlx5_core_dev *dev = ns->dev;
597
598 MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
599 MLX5_SET(delete_fte_in, in, table_type, ft->type);
600 MLX5_SET(delete_fte_in, in, table_id, ft->id);
601 MLX5_SET(delete_fte_in, in, flow_index, fte->index);
602 MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
603 MLX5_SET(delete_fte_in, in, other_vport,
604 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
605
606 return mlx5_cmd_exec_in(dev, delete_fte, in);
607}
608
609int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
610 enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
611 u32 *id)
612{
613 u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {};
614 u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {};
615 int err;
616
617 MLX5_SET(alloc_flow_counter_in, in, opcode,
618 MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
619 MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, alloc_bitmask);
620
621 err = mlx5_cmd_exec_inout(dev, alloc_flow_counter, in, out);
622 if (!err)
623 *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
624 return err;
625}
626
627int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
628{
629 return mlx5_cmd_fc_bulk_alloc(dev, 0, id);
630}
631
632int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
633{
634 u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {};
635
636 MLX5_SET(dealloc_flow_counter_in, in, opcode,
637 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
638 MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
639 return mlx5_cmd_exec_in(dev, dealloc_flow_counter, in);
640}
641
642int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
643 u64 *packets, u64 *bytes)
644{
645 u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
646 MLX5_ST_SZ_BYTES(traffic_counter)] = {};
647 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {};
648 void *stats;
649 int err = 0;
650
651 MLX5_SET(query_flow_counter_in, in, opcode,
652 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
653 MLX5_SET(query_flow_counter_in, in, op_mod, 0);
654 MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
655 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
656 if (err)
657 return err;
658
659 stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
660 *packets = MLX5_GET64(traffic_counter, stats, packets);
661 *bytes = MLX5_GET64(traffic_counter, stats, octets);
662 return 0;
663}
664
665int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len)
666{
667 return MLX5_ST_SZ_BYTES(query_flow_counter_out) +
668 MLX5_ST_SZ_BYTES(traffic_counter) * bulk_len;
669}
670
671int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
672 u32 *out)
673{
674 int outlen = mlx5_cmd_fc_get_bulk_query_out_len(bulk_len);
675 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {};
676
677 MLX5_SET(query_flow_counter_in, in, opcode,
678 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
679 MLX5_SET(query_flow_counter_in, in, flow_counter_id, base_id);
680 MLX5_SET(query_flow_counter_in, in, num_of_counters, bulk_len);
681 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
682}
683
684static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
685 int reformat_type,
686 size_t size,
687 void *reformat_data,
688 enum mlx5_flow_namespace_type namespace,
689 struct mlx5_pkt_reformat *pkt_reformat)
690{
691 u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {};
692 struct mlx5_core_dev *dev = ns->dev;
693 void *packet_reformat_context_in;
694 int max_encap_size;
695 void *reformat;
696 int inlen;
697 int err;
698 u32 *in;
699
700 if (namespace == MLX5_FLOW_NAMESPACE_FDB)
701 max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
702 else
703 max_encap_size = MLX5_CAP_FLOWTABLE(dev, max_encap_header_size);
704
705 if (size > max_encap_size) {
706 mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n",
707 size, max_encap_size);
708 return -EINVAL;
709 }
710
711 in = kzalloc(MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in) + size,
712 GFP_KERNEL);
713 if (!in)
714 return -ENOMEM;
715
716 packet_reformat_context_in = MLX5_ADDR_OF(alloc_packet_reformat_context_in,
717 in, packet_reformat_context);
718 reformat = MLX5_ADDR_OF(packet_reformat_context_in,
719 packet_reformat_context_in,
720 reformat_data);
721 inlen = reformat - (void *)in + size;
722
723 MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
724 MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
725 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
726 reformat_data_size, size);
727 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
728 reformat_type, reformat_type);
729 memcpy(reformat, reformat_data, size);
730
731 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
732
733 pkt_reformat->id = MLX5_GET(alloc_packet_reformat_context_out,
734 out, packet_reformat_id);
735 kfree(in);
736 return err;
737}
738
739static void mlx5_cmd_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
740 struct mlx5_pkt_reformat *pkt_reformat)
741{
742 u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {};
743 struct mlx5_core_dev *dev = ns->dev;
744
745 MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
746 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
747 MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
748 pkt_reformat->id);
749
750 mlx5_cmd_exec_in(dev, dealloc_packet_reformat_context, in);
751}
752
753static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
754 u8 namespace, u8 num_actions,
755 void *modify_actions,
756 struct mlx5_modify_hdr *modify_hdr)
757{
758 u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)] = {};
759 int max_actions, actions_size, inlen, err;
760 struct mlx5_core_dev *dev = ns->dev;
761 void *actions_in;
762 u8 table_type;
763 u32 *in;
764
765 switch (namespace) {
766 case MLX5_FLOW_NAMESPACE_FDB:
767 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions);
768 table_type = FS_FT_FDB;
769 break;
770 case MLX5_FLOW_NAMESPACE_KERNEL:
771 case MLX5_FLOW_NAMESPACE_BYPASS:
772 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions);
773 table_type = FS_FT_NIC_RX;
774 break;
775 case MLX5_FLOW_NAMESPACE_EGRESS:
776#ifdef CONFIG_MLX5_IPSEC
777 case MLX5_FLOW_NAMESPACE_EGRESS_KERNEL:
778#endif
779 max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions);
780 table_type = FS_FT_NIC_TX;
781 break;
782 case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
783 max_actions = MLX5_CAP_ESW_INGRESS_ACL(dev, max_modify_header_actions);
784 table_type = FS_FT_ESW_INGRESS_ACL;
785 break;
786 case MLX5_FLOW_NAMESPACE_RDMA_TX:
787 max_actions = MLX5_CAP_FLOWTABLE_RDMA_TX(dev, max_modify_header_actions);
788 table_type = FS_FT_RDMA_TX;
789 break;
790 default:
791 return -EOPNOTSUPP;
792 }
793
794 if (num_actions > max_actions) {
795 mlx5_core_warn(dev, "too many modify header actions %d, max supported %d\n",
796 num_actions, max_actions);
797 return -EOPNOTSUPP;
798 }
799
800 actions_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) * num_actions;
801 inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) + actions_size;
802
803 in = kzalloc(inlen, GFP_KERNEL);
804 if (!in)
805 return -ENOMEM;
806
807 MLX5_SET(alloc_modify_header_context_in, in, opcode,
808 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
809 MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
810 MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_actions);
811
812 actions_in = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
813 memcpy(actions_in, modify_actions, actions_size);
814
815 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
816
817 modify_hdr->id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id);
818 kfree(in);
819 return err;
820}
821
822static void mlx5_cmd_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
823 struct mlx5_modify_hdr *modify_hdr)
824{
825 u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {};
826 struct mlx5_core_dev *dev = ns->dev;
827
828 MLX5_SET(dealloc_modify_header_context_in, in, opcode,
829 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
830 MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
831 modify_hdr->id);
832
833 mlx5_cmd_exec_in(dev, dealloc_modify_header_context, in);
834}
835
836static const struct mlx5_flow_cmds mlx5_flow_cmds = {
837 .create_flow_table = mlx5_cmd_create_flow_table,
838 .destroy_flow_table = mlx5_cmd_destroy_flow_table,
839 .modify_flow_table = mlx5_cmd_modify_flow_table,
840 .create_flow_group = mlx5_cmd_create_flow_group,
841 .destroy_flow_group = mlx5_cmd_destroy_flow_group,
842 .create_fte = mlx5_cmd_create_fte,
843 .update_fte = mlx5_cmd_update_fte,
844 .delete_fte = mlx5_cmd_delete_fte,
845 .update_root_ft = mlx5_cmd_update_root_ft,
846 .packet_reformat_alloc = mlx5_cmd_packet_reformat_alloc,
847 .packet_reformat_dealloc = mlx5_cmd_packet_reformat_dealloc,
848 .modify_header_alloc = mlx5_cmd_modify_header_alloc,
849 .modify_header_dealloc = mlx5_cmd_modify_header_dealloc,
850 .set_peer = mlx5_cmd_stub_set_peer,
851 .create_ns = mlx5_cmd_stub_create_ns,
852 .destroy_ns = mlx5_cmd_stub_destroy_ns,
853};
854
855static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
856 .create_flow_table = mlx5_cmd_stub_create_flow_table,
857 .destroy_flow_table = mlx5_cmd_stub_destroy_flow_table,
858 .modify_flow_table = mlx5_cmd_stub_modify_flow_table,
859 .create_flow_group = mlx5_cmd_stub_create_flow_group,
860 .destroy_flow_group = mlx5_cmd_stub_destroy_flow_group,
861 .create_fte = mlx5_cmd_stub_create_fte,
862 .update_fte = mlx5_cmd_stub_update_fte,
863 .delete_fte = mlx5_cmd_stub_delete_fte,
864 .update_root_ft = mlx5_cmd_stub_update_root_ft,
865 .packet_reformat_alloc = mlx5_cmd_stub_packet_reformat_alloc,
866 .packet_reformat_dealloc = mlx5_cmd_stub_packet_reformat_dealloc,
867 .modify_header_alloc = mlx5_cmd_stub_modify_header_alloc,
868 .modify_header_dealloc = mlx5_cmd_stub_modify_header_dealloc,
869 .set_peer = mlx5_cmd_stub_set_peer,
870 .create_ns = mlx5_cmd_stub_create_ns,
871 .destroy_ns = mlx5_cmd_stub_destroy_ns,
872};
873
874const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void)
875{
876 return &mlx5_flow_cmds;
877}
878
879static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_stub_cmds(void)
880{
881 return &mlx5_flow_cmd_stubs;
882}
883
884const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type)
885{
886 switch (type) {
887 case FS_FT_NIC_RX:
888 case FS_FT_ESW_EGRESS_ACL:
889 case FS_FT_ESW_INGRESS_ACL:
890 case FS_FT_FDB:
891 case FS_FT_SNIFFER_RX:
892 case FS_FT_SNIFFER_TX:
893 case FS_FT_NIC_TX:
894 case FS_FT_RDMA_RX:
895 case FS_FT_RDMA_TX:
896 return mlx5_fs_cmd_get_fw_cmds();
897 default:
898 return mlx5_fs_cmd_get_stub_cmds();
899 }
900}
901