1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/mlx5/driver.h>
34#include <linux/mlx5/device.h>
35#include <linux/mlx5/mlx5_ifc.h>
36
37#include "fs_core.h"
38#include "fs_cmd.h"
39#include "fs_ft_pool.h"
40#include "mlx5_core.h"
41#include "eswitch.h"
42
43static int mlx5_cmd_stub_update_root_ft(struct mlx5_flow_root_namespace *ns,
44 struct mlx5_flow_table *ft,
45 u32 underlay_qpn,
46 bool disconnect)
47{
48 return 0;
49}
50
51static int mlx5_cmd_stub_create_flow_table(struct mlx5_flow_root_namespace *ns,
52 struct mlx5_flow_table *ft,
53 unsigned int size,
54 struct mlx5_flow_table *next_ft)
55{
56 ft->max_fte = size ? roundup_pow_of_two(size) : 1;
57
58 return 0;
59}
60
61static int mlx5_cmd_stub_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
62 struct mlx5_flow_table *ft)
63{
64 return 0;
65}
66
67static int mlx5_cmd_stub_modify_flow_table(struct mlx5_flow_root_namespace *ns,
68 struct mlx5_flow_table *ft,
69 struct mlx5_flow_table *next_ft)
70{
71 return 0;
72}
73
74static int mlx5_cmd_stub_create_flow_group(struct mlx5_flow_root_namespace *ns,
75 struct mlx5_flow_table *ft,
76 u32 *in,
77 struct mlx5_flow_group *fg)
78{
79 return 0;
80}
81
82static int mlx5_cmd_stub_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
83 struct mlx5_flow_table *ft,
84 struct mlx5_flow_group *fg)
85{
86 return 0;
87}
88
89static int mlx5_cmd_stub_create_fte(struct mlx5_flow_root_namespace *ns,
90 struct mlx5_flow_table *ft,
91 struct mlx5_flow_group *group,
92 struct fs_fte *fte)
93{
94 return 0;
95}
96
97static int mlx5_cmd_stub_update_fte(struct mlx5_flow_root_namespace *ns,
98 struct mlx5_flow_table *ft,
99 struct mlx5_flow_group *group,
100 int modify_mask,
101 struct fs_fte *fte)
102{
103 return -EOPNOTSUPP;
104}
105
106static int mlx5_cmd_stub_delete_fte(struct mlx5_flow_root_namespace *ns,
107 struct mlx5_flow_table *ft,
108 struct fs_fte *fte)
109{
110 return 0;
111}
112
113static int mlx5_cmd_stub_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
114 struct mlx5_pkt_reformat_params *params,
115 enum mlx5_flow_namespace_type namespace,
116 struct mlx5_pkt_reformat *pkt_reformat)
117{
118 return 0;
119}
120
121static void mlx5_cmd_stub_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
122 struct mlx5_pkt_reformat *pkt_reformat)
123{
124}
125
126static int mlx5_cmd_stub_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
127 u8 namespace, u8 num_actions,
128 void *modify_actions,
129 struct mlx5_modify_hdr *modify_hdr)
130{
131 return 0;
132}
133
134static void mlx5_cmd_stub_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
135 struct mlx5_modify_hdr *modify_hdr)
136{
137}
138
139static int mlx5_cmd_stub_set_peer(struct mlx5_flow_root_namespace *ns,
140 struct mlx5_flow_root_namespace *peer_ns)
141{
142 return 0;
143}
144
145static int mlx5_cmd_stub_create_ns(struct mlx5_flow_root_namespace *ns)
146{
147 return 0;
148}
149
150static int mlx5_cmd_stub_destroy_ns(struct mlx5_flow_root_namespace *ns)
151{
152 return 0;
153}
154
155static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
156 struct mlx5_flow_table *ft, u32 underlay_qpn,
157 bool disconnect)
158{
159 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
160 struct mlx5_core_dev *dev = ns->dev;
161
162 if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
163 underlay_qpn == 0)
164 return 0;
165
166 MLX5_SET(set_flow_table_root_in, in, opcode,
167 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
168 MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
169
170 if (disconnect)
171 MLX5_SET(set_flow_table_root_in, in, op_mod, 1);
172 else
173 MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
174
175 MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
176 MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
177 MLX5_SET(set_flow_table_root_in, in, other_vport,
178 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
179
180 return mlx5_cmd_exec_in(dev, set_flow_table_root, in);
181}
182
183static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
184 struct mlx5_flow_table *ft,
185 unsigned int size,
186 struct mlx5_flow_table *next_ft)
187{
188 int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
189 int en_decap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
190 int term = !!(ft->flags & MLX5_FLOW_TABLE_TERMINATION);
191 u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {};
192 u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {};
193 struct mlx5_core_dev *dev = ns->dev;
194 int err;
195
196 if (size != POOL_NEXT_SIZE)
197 size = roundup_pow_of_two(size);
198 size = mlx5_ft_pool_get_avail_sz(dev, ft->type, size);
199 if (!size)
200 return -ENOSPC;
201
202 MLX5_SET(create_flow_table_in, in, opcode,
203 MLX5_CMD_OP_CREATE_FLOW_TABLE);
204
205 MLX5_SET(create_flow_table_in, in, table_type, ft->type);
206 MLX5_SET(create_flow_table_in, in, flow_table_context.level, ft->level);
207 MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, size ? ilog2(size) : 0);
208 MLX5_SET(create_flow_table_in, in, vport_number, ft->vport);
209 MLX5_SET(create_flow_table_in, in, other_vport,
210 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
211
212 MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
213 en_decap);
214 MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
215 en_encap);
216 MLX5_SET(create_flow_table_in, in, flow_table_context.termination_table,
217 term);
218
219 switch (ft->op_mod) {
220 case FS_FT_OP_MOD_NORMAL:
221 if (next_ft) {
222 MLX5_SET(create_flow_table_in, in,
223 flow_table_context.table_miss_action,
224 MLX5_FLOW_TABLE_MISS_ACTION_FWD);
225 MLX5_SET(create_flow_table_in, in,
226 flow_table_context.table_miss_id, next_ft->id);
227 } else {
228 MLX5_SET(create_flow_table_in, in,
229 flow_table_context.table_miss_action,
230 ft->def_miss_action);
231 }
232 break;
233
234 case FS_FT_OP_MOD_LAG_DEMUX:
235 MLX5_SET(create_flow_table_in, in, op_mod, 0x1);
236 if (next_ft)
237 MLX5_SET(create_flow_table_in, in,
238 flow_table_context.lag_master_next_table_id,
239 next_ft->id);
240 break;
241 }
242
243 err = mlx5_cmd_exec_inout(dev, create_flow_table, in, out);
244 if (!err) {
245 ft->id = MLX5_GET(create_flow_table_out, out,
246 table_id);
247 ft->max_fte = size;
248 } else {
249 mlx5_ft_pool_put_sz(ns->dev, size);
250 }
251
252 return err;
253}
254
255static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
256 struct mlx5_flow_table *ft)
257{
258 u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {};
259 struct mlx5_core_dev *dev = ns->dev;
260 int err;
261
262 MLX5_SET(destroy_flow_table_in, in, opcode,
263 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
264 MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
265 MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
266 MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
267 MLX5_SET(destroy_flow_table_in, in, other_vport,
268 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
269
270 err = mlx5_cmd_exec_in(dev, destroy_flow_table, in);
271 if (!err)
272 mlx5_ft_pool_put_sz(ns->dev, ft->max_fte);
273
274 return err;
275}
276
277static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
278 struct mlx5_flow_table *ft,
279 struct mlx5_flow_table *next_ft)
280{
281 u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {};
282 struct mlx5_core_dev *dev = ns->dev;
283
284 MLX5_SET(modify_flow_table_in, in, opcode,
285 MLX5_CMD_OP_MODIFY_FLOW_TABLE);
286 MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
287 MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
288
289 if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) {
290 MLX5_SET(modify_flow_table_in, in, modify_field_select,
291 MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID);
292 if (next_ft) {
293 MLX5_SET(modify_flow_table_in, in,
294 flow_table_context.lag_master_next_table_id, next_ft->id);
295 } else {
296 MLX5_SET(modify_flow_table_in, in,
297 flow_table_context.lag_master_next_table_id, 0);
298 }
299 } else {
300 MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport);
301 MLX5_SET(modify_flow_table_in, in, other_vport,
302 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
303 MLX5_SET(modify_flow_table_in, in, modify_field_select,
304 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
305 if (next_ft) {
306 MLX5_SET(modify_flow_table_in, in,
307 flow_table_context.table_miss_action,
308 MLX5_FLOW_TABLE_MISS_ACTION_FWD);
309 MLX5_SET(modify_flow_table_in, in,
310 flow_table_context.table_miss_id,
311 next_ft->id);
312 } else {
313 MLX5_SET(modify_flow_table_in, in,
314 flow_table_context.table_miss_action,
315 ft->def_miss_action);
316 }
317 }
318
319 return mlx5_cmd_exec_in(dev, modify_flow_table, in);
320}
321
322static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
323 struct mlx5_flow_table *ft,
324 u32 *in,
325 struct mlx5_flow_group *fg)
326{
327 u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {};
328 struct mlx5_core_dev *dev = ns->dev;
329 int err;
330
331 MLX5_SET(create_flow_group_in, in, opcode,
332 MLX5_CMD_OP_CREATE_FLOW_GROUP);
333 MLX5_SET(create_flow_group_in, in, table_type, ft->type);
334 MLX5_SET(create_flow_group_in, in, table_id, ft->id);
335 if (ft->vport) {
336 MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
337 MLX5_SET(create_flow_group_in, in, other_vport, 1);
338 }
339
340 MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
341 MLX5_SET(create_flow_group_in, in, other_vport,
342 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
343 err = mlx5_cmd_exec_inout(dev, create_flow_group, in, out);
344 if (!err)
345 fg->id = MLX5_GET(create_flow_group_out, out,
346 group_id);
347 return err;
348}
349
350static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
351 struct mlx5_flow_table *ft,
352 struct mlx5_flow_group *fg)
353{
354 u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
355 struct mlx5_core_dev *dev = ns->dev;
356
357 MLX5_SET(destroy_flow_group_in, in, opcode,
358 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
359 MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
360 MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
361 MLX5_SET(destroy_flow_group_in, in, group_id, fg->id);
362 MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
363 MLX5_SET(destroy_flow_group_in, in, other_vport,
364 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
365 return mlx5_cmd_exec_in(dev, destroy_flow_group, in);
366}
367
368static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
369 struct fs_fte *fte, bool *extended_dest)
370{
371 int fw_log_max_fdb_encap_uplink =
372 MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
373 int num_fwd_destinations = 0;
374 struct mlx5_flow_rule *dst;
375 int num_encap = 0;
376
377 *extended_dest = false;
378 if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
379 return 0;
380
381 list_for_each_entry(dst, &fte->node.children, node.list) {
382 if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
383 continue;
384 if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
385 dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
386 num_encap++;
387 num_fwd_destinations++;
388 }
389 if (num_fwd_destinations > 1 && num_encap > 0)
390 *extended_dest = true;
391
392 if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
393 mlx5_core_warn(dev, "FW does not support extended destination");
394 return -EOPNOTSUPP;
395 }
396 if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
397 mlx5_core_warn(dev, "FW does not support more than %d encaps",
398 1 << fw_log_max_fdb_encap_uplink);
399 return -EOPNOTSUPP;
400 }
401
402 return 0;
403}
404static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
405 int opmod, int modify_mask,
406 struct mlx5_flow_table *ft,
407 unsigned group_id,
408 struct fs_fte *fte)
409{
410 u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
411 bool extended_dest = false;
412 struct mlx5_flow_rule *dst;
413 void *in_flow_context, *vlan;
414 void *in_match_value;
415 unsigned int inlen;
416 int dst_cnt_size;
417 void *in_dests;
418 u32 *in;
419 int err;
420
421 if (mlx5_set_extended_dest(dev, fte, &extended_dest))
422 return -EOPNOTSUPP;
423
424 if (!extended_dest)
425 dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
426 else
427 dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
428
429 inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
430 in = kvzalloc(inlen, GFP_KERNEL);
431 if (!in)
432 return -ENOMEM;
433
434 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
435 MLX5_SET(set_fte_in, in, op_mod, opmod);
436 MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
437 MLX5_SET(set_fte_in, in, table_type, ft->type);
438 MLX5_SET(set_fte_in, in, table_id, ft->id);
439 MLX5_SET(set_fte_in, in, flow_index, fte->index);
440 MLX5_SET(set_fte_in, in, ignore_flow_level,
441 !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL));
442
443 MLX5_SET(set_fte_in, in, vport_number, ft->vport);
444 MLX5_SET(set_fte_in, in, other_vport,
445 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
446
447 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
448 MLX5_SET(flow_context, in_flow_context, group_id, group_id);
449
450 MLX5_SET(flow_context, in_flow_context, flow_tag,
451 fte->flow_context.flow_tag);
452 MLX5_SET(flow_context, in_flow_context, flow_source,
453 fte->flow_context.flow_source);
454
455 MLX5_SET(flow_context, in_flow_context, extended_destination,
456 extended_dest);
457 if (extended_dest) {
458 u32 action;
459
460 action = fte->action.action &
461 ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
462 MLX5_SET(flow_context, in_flow_context, action, action);
463 } else {
464 MLX5_SET(flow_context, in_flow_context, action,
465 fte->action.action);
466 if (fte->action.pkt_reformat)
467 MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
468 fte->action.pkt_reformat->id);
469 }
470 if (fte->action.modify_hdr)
471 MLX5_SET(flow_context, in_flow_context, modify_header_id,
472 fte->action.modify_hdr->id);
473
474 MLX5_SET(flow_context, in_flow_context, ipsec_obj_id, fte->action.ipsec_obj_id);
475
476 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
477
478 MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
479 MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
480 MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
481
482 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
483
484 MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
485 MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
486 MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
487
488 in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
489 match_value);
490 memcpy(in_match_value, &fte->val, sizeof(fte->val));
491
492 in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
493 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
494 int list_size = 0;
495
496 list_for_each_entry(dst, &fte->node.children, node.list) {
497 unsigned int id, type = dst->dest_attr.type;
498
499 if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
500 continue;
501
502 switch (type) {
503 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
504 id = dst->dest_attr.ft_num;
505 type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
506 break;
507 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
508 id = dst->dest_attr.ft->id;
509 break;
510 case MLX5_FLOW_DESTINATION_TYPE_VPORT:
511 id = dst->dest_attr.vport.num;
512 MLX5_SET(dest_format_struct, in_dests,
513 destination_eswitch_owner_vhca_id_valid,
514 !!(dst->dest_attr.vport.flags &
515 MLX5_FLOW_DEST_VPORT_VHCA_ID));
516 MLX5_SET(dest_format_struct, in_dests,
517 destination_eswitch_owner_vhca_id,
518 dst->dest_attr.vport.vhca_id);
519 if (extended_dest &&
520 dst->dest_attr.vport.pkt_reformat) {
521 MLX5_SET(dest_format_struct, in_dests,
522 packet_reformat,
523 !!(dst->dest_attr.vport.flags &
524 MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
525 MLX5_SET(extended_dest_format, in_dests,
526 packet_reformat_id,
527 dst->dest_attr.vport.pkt_reformat->id);
528 }
529 break;
530 case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
531 id = dst->dest_attr.sampler_id;
532 break;
533 default:
534 id = dst->dest_attr.tir_num;
535 }
536
537 MLX5_SET(dest_format_struct, in_dests, destination_type,
538 type);
539 MLX5_SET(dest_format_struct, in_dests, destination_id, id);
540 in_dests += dst_cnt_size;
541 list_size++;
542 }
543
544 MLX5_SET(flow_context, in_flow_context, destination_list_size,
545 list_size);
546 }
547
548 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
549 int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
550 log_max_flow_counter,
551 ft->type));
552 int list_size = 0;
553
554 list_for_each_entry(dst, &fte->node.children, node.list) {
555 if (dst->dest_attr.type !=
556 MLX5_FLOW_DESTINATION_TYPE_COUNTER)
557 continue;
558
559 MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
560 dst->dest_attr.counter_id);
561 in_dests += dst_cnt_size;
562 list_size++;
563 }
564 if (list_size > max_list_size) {
565 err = -EINVAL;
566 goto err_out;
567 }
568
569 MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
570 list_size);
571 }
572
573 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
574err_out:
575 kvfree(in);
576 return err;
577}
578
579static int mlx5_cmd_create_fte(struct mlx5_flow_root_namespace *ns,
580 struct mlx5_flow_table *ft,
581 struct mlx5_flow_group *group,
582 struct fs_fte *fte)
583{
584 struct mlx5_core_dev *dev = ns->dev;
585 unsigned int group_id = group->id;
586
587 return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
588}
589
590static int mlx5_cmd_update_fte(struct mlx5_flow_root_namespace *ns,
591 struct mlx5_flow_table *ft,
592 struct mlx5_flow_group *fg,
593 int modify_mask,
594 struct fs_fte *fte)
595{
596 int opmod;
597 struct mlx5_core_dev *dev = ns->dev;
598 int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
599 flow_table_properties_nic_receive.
600 flow_modify_en);
601 if (!atomic_mod_cap)
602 return -EOPNOTSUPP;
603 opmod = 1;
604
605 return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, fg->id, fte);
606}
607
608static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
609 struct mlx5_flow_table *ft,
610 struct fs_fte *fte)
611{
612 u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
613 struct mlx5_core_dev *dev = ns->dev;
614
615 MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
616 MLX5_SET(delete_fte_in, in, table_type, ft->type);
617 MLX5_SET(delete_fte_in, in, table_id, ft->id);
618 MLX5_SET(delete_fte_in, in, flow_index, fte->index);
619 MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
620 MLX5_SET(delete_fte_in, in, other_vport,
621 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
622
623 return mlx5_cmd_exec_in(dev, delete_fte, in);
624}
625
626int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
627 enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
628 u32 *id)
629{
630 u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {};
631 u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {};
632 int err;
633
634 MLX5_SET(alloc_flow_counter_in, in, opcode,
635 MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
636 MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, alloc_bitmask);
637
638 err = mlx5_cmd_exec_inout(dev, alloc_flow_counter, in, out);
639 if (!err)
640 *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
641 return err;
642}
643
644int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
645{
646 return mlx5_cmd_fc_bulk_alloc(dev, 0, id);
647}
648
649int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
650{
651 u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {};
652
653 MLX5_SET(dealloc_flow_counter_in, in, opcode,
654 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
655 MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
656 return mlx5_cmd_exec_in(dev, dealloc_flow_counter, in);
657}
658
659int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
660 u64 *packets, u64 *bytes)
661{
662 u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
663 MLX5_ST_SZ_BYTES(traffic_counter)] = {};
664 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {};
665 void *stats;
666 int err = 0;
667
668 MLX5_SET(query_flow_counter_in, in, opcode,
669 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
670 MLX5_SET(query_flow_counter_in, in, op_mod, 0);
671 MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
672 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
673 if (err)
674 return err;
675
676 stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
677 *packets = MLX5_GET64(traffic_counter, stats, packets);
678 *bytes = MLX5_GET64(traffic_counter, stats, octets);
679 return 0;
680}
681
682int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len)
683{
684 return MLX5_ST_SZ_BYTES(query_flow_counter_out) +
685 MLX5_ST_SZ_BYTES(traffic_counter) * bulk_len;
686}
687
688int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
689 u32 *out)
690{
691 int outlen = mlx5_cmd_fc_get_bulk_query_out_len(bulk_len);
692 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {};
693
694 MLX5_SET(query_flow_counter_in, in, opcode,
695 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
696 MLX5_SET(query_flow_counter_in, in, flow_counter_id, base_id);
697 MLX5_SET(query_flow_counter_in, in, num_of_counters, bulk_len);
698 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
699}
700
701static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
702 struct mlx5_pkt_reformat_params *params,
703 enum mlx5_flow_namespace_type namespace,
704 struct mlx5_pkt_reformat *pkt_reformat)
705{
706 u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {};
707 struct mlx5_core_dev *dev = ns->dev;
708 void *packet_reformat_context_in;
709 int max_encap_size;
710 void *reformat;
711 int inlen;
712 int err;
713 u32 *in;
714
715 if (namespace == MLX5_FLOW_NAMESPACE_FDB)
716 max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
717 else
718 max_encap_size = MLX5_CAP_FLOWTABLE(dev, max_encap_header_size);
719
720 if (params->size > max_encap_size) {
721 mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n",
722 params->size, max_encap_size);
723 return -EINVAL;
724 }
725
726 in = kzalloc(MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in) +
727 params->size, GFP_KERNEL);
728 if (!in)
729 return -ENOMEM;
730
731 packet_reformat_context_in = MLX5_ADDR_OF(alloc_packet_reformat_context_in,
732 in, packet_reformat_context);
733 reformat = MLX5_ADDR_OF(packet_reformat_context_in,
734 packet_reformat_context_in,
735 reformat_data);
736 inlen = reformat - (void *)in + params->size;
737
738 MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
739 MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
740 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
741 reformat_data_size, params->size);
742 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
743 reformat_type, params->type);
744 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
745 reformat_param_0, params->param_0);
746 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
747 reformat_param_1, params->param_1);
748 if (params->data && params->size)
749 memcpy(reformat, params->data, params->size);
750
751 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
752
753 pkt_reformat->id = MLX5_GET(alloc_packet_reformat_context_out,
754 out, packet_reformat_id);
755 kfree(in);
756 return err;
757}
758
759static void mlx5_cmd_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
760 struct mlx5_pkt_reformat *pkt_reformat)
761{
762 u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {};
763 struct mlx5_core_dev *dev = ns->dev;
764
765 MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
766 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
767 MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
768 pkt_reformat->id);
769
770 mlx5_cmd_exec_in(dev, dealloc_packet_reformat_context, in);
771}
772
773static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
774 u8 namespace, u8 num_actions,
775 void *modify_actions,
776 struct mlx5_modify_hdr *modify_hdr)
777{
778 u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)] = {};
779 int max_actions, actions_size, inlen, err;
780 struct mlx5_core_dev *dev = ns->dev;
781 void *actions_in;
782 u8 table_type;
783 u32 *in;
784
785 switch (namespace) {
786 case MLX5_FLOW_NAMESPACE_FDB:
787 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions);
788 table_type = FS_FT_FDB;
789 break;
790 case MLX5_FLOW_NAMESPACE_KERNEL:
791 case MLX5_FLOW_NAMESPACE_BYPASS:
792 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions);
793 table_type = FS_FT_NIC_RX;
794 break;
795 case MLX5_FLOW_NAMESPACE_EGRESS:
796#ifdef CONFIG_MLX5_IPSEC
797 case MLX5_FLOW_NAMESPACE_EGRESS_KERNEL:
798#endif
799 max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions);
800 table_type = FS_FT_NIC_TX;
801 break;
802 case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
803 max_actions = MLX5_CAP_ESW_INGRESS_ACL(dev, max_modify_header_actions);
804 table_type = FS_FT_ESW_INGRESS_ACL;
805 break;
806 case MLX5_FLOW_NAMESPACE_RDMA_TX:
807 max_actions = MLX5_CAP_FLOWTABLE_RDMA_TX(dev, max_modify_header_actions);
808 table_type = FS_FT_RDMA_TX;
809 break;
810 default:
811 return -EOPNOTSUPP;
812 }
813
814 if (num_actions > max_actions) {
815 mlx5_core_warn(dev, "too many modify header actions %d, max supported %d\n",
816 num_actions, max_actions);
817 return -EOPNOTSUPP;
818 }
819
820 actions_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) * num_actions;
821 inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) + actions_size;
822
823 in = kzalloc(inlen, GFP_KERNEL);
824 if (!in)
825 return -ENOMEM;
826
827 MLX5_SET(alloc_modify_header_context_in, in, opcode,
828 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
829 MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
830 MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_actions);
831
832 actions_in = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
833 memcpy(actions_in, modify_actions, actions_size);
834
835 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
836
837 modify_hdr->id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id);
838 kfree(in);
839 return err;
840}
841
842static void mlx5_cmd_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
843 struct mlx5_modify_hdr *modify_hdr)
844{
845 u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {};
846 struct mlx5_core_dev *dev = ns->dev;
847
848 MLX5_SET(dealloc_modify_header_context_in, in, opcode,
849 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
850 MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
851 modify_hdr->id);
852
853 mlx5_cmd_exec_in(dev, dealloc_modify_header_context, in);
854}
855
856static const struct mlx5_flow_cmds mlx5_flow_cmds = {
857 .create_flow_table = mlx5_cmd_create_flow_table,
858 .destroy_flow_table = mlx5_cmd_destroy_flow_table,
859 .modify_flow_table = mlx5_cmd_modify_flow_table,
860 .create_flow_group = mlx5_cmd_create_flow_group,
861 .destroy_flow_group = mlx5_cmd_destroy_flow_group,
862 .create_fte = mlx5_cmd_create_fte,
863 .update_fte = mlx5_cmd_update_fte,
864 .delete_fte = mlx5_cmd_delete_fte,
865 .update_root_ft = mlx5_cmd_update_root_ft,
866 .packet_reformat_alloc = mlx5_cmd_packet_reformat_alloc,
867 .packet_reformat_dealloc = mlx5_cmd_packet_reformat_dealloc,
868 .modify_header_alloc = mlx5_cmd_modify_header_alloc,
869 .modify_header_dealloc = mlx5_cmd_modify_header_dealloc,
870 .set_peer = mlx5_cmd_stub_set_peer,
871 .create_ns = mlx5_cmd_stub_create_ns,
872 .destroy_ns = mlx5_cmd_stub_destroy_ns,
873};
874
875static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
876 .create_flow_table = mlx5_cmd_stub_create_flow_table,
877 .destroy_flow_table = mlx5_cmd_stub_destroy_flow_table,
878 .modify_flow_table = mlx5_cmd_stub_modify_flow_table,
879 .create_flow_group = mlx5_cmd_stub_create_flow_group,
880 .destroy_flow_group = mlx5_cmd_stub_destroy_flow_group,
881 .create_fte = mlx5_cmd_stub_create_fte,
882 .update_fte = mlx5_cmd_stub_update_fte,
883 .delete_fte = mlx5_cmd_stub_delete_fte,
884 .update_root_ft = mlx5_cmd_stub_update_root_ft,
885 .packet_reformat_alloc = mlx5_cmd_stub_packet_reformat_alloc,
886 .packet_reformat_dealloc = mlx5_cmd_stub_packet_reformat_dealloc,
887 .modify_header_alloc = mlx5_cmd_stub_modify_header_alloc,
888 .modify_header_dealloc = mlx5_cmd_stub_modify_header_dealloc,
889 .set_peer = mlx5_cmd_stub_set_peer,
890 .create_ns = mlx5_cmd_stub_create_ns,
891 .destroy_ns = mlx5_cmd_stub_destroy_ns,
892};
893
894const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void)
895{
896 return &mlx5_flow_cmds;
897}
898
899static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_stub_cmds(void)
900{
901 return &mlx5_flow_cmd_stubs;
902}
903
904const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type)
905{
906 switch (type) {
907 case FS_FT_NIC_RX:
908 case FS_FT_ESW_EGRESS_ACL:
909 case FS_FT_ESW_INGRESS_ACL:
910 case FS_FT_FDB:
911 case FS_FT_SNIFFER_RX:
912 case FS_FT_SNIFFER_TX:
913 case FS_FT_NIC_TX:
914 case FS_FT_RDMA_RX:
915 case FS_FT_RDMA_TX:
916 return mlx5_fs_cmd_get_fw_cmds();
917 default:
918 return mlx5_fs_cmd_get_stub_cmds();
919 }
920}
921