1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/mlx5/driver.h>
34#include <linux/mlx5/device.h>
35#include <linux/mlx5/mlx5_ifc.h>
36
37#include "fs_core.h"
38#include "fs_cmd.h"
39#include "mlx5_core.h"
40#include "eswitch.h"
41
42static int mlx5_cmd_stub_update_root_ft(struct mlx5_flow_root_namespace *ns,
43 struct mlx5_flow_table *ft,
44 u32 underlay_qpn,
45 bool disconnect)
46{
47 return 0;
48}
49
50static int mlx5_cmd_stub_create_flow_table(struct mlx5_flow_root_namespace *ns,
51 struct mlx5_flow_table *ft,
52 unsigned int log_size,
53 struct mlx5_flow_table *next_ft)
54{
55 return 0;
56}
57
58static int mlx5_cmd_stub_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
59 struct mlx5_flow_table *ft)
60{
61 return 0;
62}
63
64static int mlx5_cmd_stub_modify_flow_table(struct mlx5_flow_root_namespace *ns,
65 struct mlx5_flow_table *ft,
66 struct mlx5_flow_table *next_ft)
67{
68 return 0;
69}
70
71static int mlx5_cmd_stub_create_flow_group(struct mlx5_flow_root_namespace *ns,
72 struct mlx5_flow_table *ft,
73 u32 *in,
74 struct mlx5_flow_group *fg)
75{
76 return 0;
77}
78
79static int mlx5_cmd_stub_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
80 struct mlx5_flow_table *ft,
81 struct mlx5_flow_group *fg)
82{
83 return 0;
84}
85
86static int mlx5_cmd_stub_create_fte(struct mlx5_flow_root_namespace *ns,
87 struct mlx5_flow_table *ft,
88 struct mlx5_flow_group *group,
89 struct fs_fte *fte)
90{
91 return 0;
92}
93
94static int mlx5_cmd_stub_update_fte(struct mlx5_flow_root_namespace *ns,
95 struct mlx5_flow_table *ft,
96 struct mlx5_flow_group *group,
97 int modify_mask,
98 struct fs_fte *fte)
99{
100 return -EOPNOTSUPP;
101}
102
103static int mlx5_cmd_stub_delete_fte(struct mlx5_flow_root_namespace *ns,
104 struct mlx5_flow_table *ft,
105 struct fs_fte *fte)
106{
107 return 0;
108}
109
110static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
111 struct mlx5_flow_table *ft, u32 underlay_qpn,
112 bool disconnect)
113{
114 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0};
115 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
116 struct mlx5_core_dev *dev = ns->dev;
117
118 if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
119 underlay_qpn == 0)
120 return 0;
121
122 MLX5_SET(set_flow_table_root_in, in, opcode,
123 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
124 MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
125
126 if (disconnect) {
127 MLX5_SET(set_flow_table_root_in, in, op_mod, 1);
128 MLX5_SET(set_flow_table_root_in, in, table_id, 0);
129 } else {
130 MLX5_SET(set_flow_table_root_in, in, op_mod, 0);
131 MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
132 }
133
134 MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
135 if (ft->vport) {
136 MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
137 MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
138 }
139
140 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
141}
142
143static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
144 struct mlx5_flow_table *ft,
145 unsigned int log_size,
146 struct mlx5_flow_table *next_ft)
147{
148 int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
149 int en_decap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
150 int term = !!(ft->flags & MLX5_FLOW_TABLE_TERMINATION);
151 u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
152 u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
153 struct mlx5_core_dev *dev = ns->dev;
154 int err;
155
156 MLX5_SET(create_flow_table_in, in, opcode,
157 MLX5_CMD_OP_CREATE_FLOW_TABLE);
158
159 MLX5_SET(create_flow_table_in, in, table_type, ft->type);
160 MLX5_SET(create_flow_table_in, in, flow_table_context.level, ft->level);
161 MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, log_size);
162 if (ft->vport) {
163 MLX5_SET(create_flow_table_in, in, vport_number, ft->vport);
164 MLX5_SET(create_flow_table_in, in, other_vport, 1);
165 }
166
167 MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
168 en_decap);
169 MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
170 en_encap);
171 MLX5_SET(create_flow_table_in, in, flow_table_context.termination_table,
172 term);
173
174 switch (ft->op_mod) {
175 case FS_FT_OP_MOD_NORMAL:
176 if (next_ft) {
177 MLX5_SET(create_flow_table_in, in,
178 flow_table_context.table_miss_action,
179 MLX5_FLOW_TABLE_MISS_ACTION_FWD);
180 MLX5_SET(create_flow_table_in, in,
181 flow_table_context.table_miss_id, next_ft->id);
182 } else {
183 MLX5_SET(create_flow_table_in, in,
184 flow_table_context.table_miss_action,
185 ns->def_miss_action);
186 }
187 break;
188
189 case FS_FT_OP_MOD_LAG_DEMUX:
190 MLX5_SET(create_flow_table_in, in, op_mod, 0x1);
191 if (next_ft)
192 MLX5_SET(create_flow_table_in, in,
193 flow_table_context.lag_master_next_table_id,
194 next_ft->id);
195 break;
196 }
197
198 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
199 if (!err)
200 ft->id = MLX5_GET(create_flow_table_out, out,
201 table_id);
202 return err;
203}
204
205static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
206 struct mlx5_flow_table *ft)
207{
208 u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0};
209 u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0};
210 struct mlx5_core_dev *dev = ns->dev;
211
212 MLX5_SET(destroy_flow_table_in, in, opcode,
213 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
214 MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
215 MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
216 if (ft->vport) {
217 MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
218 MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
219 }
220
221 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
222}
223
224static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
225 struct mlx5_flow_table *ft,
226 struct mlx5_flow_table *next_ft)
227{
228 u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0};
229 u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0};
230 struct mlx5_core_dev *dev = ns->dev;
231
232 MLX5_SET(modify_flow_table_in, in, opcode,
233 MLX5_CMD_OP_MODIFY_FLOW_TABLE);
234 MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
235 MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
236
237 if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) {
238 MLX5_SET(modify_flow_table_in, in, modify_field_select,
239 MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID);
240 if (next_ft) {
241 MLX5_SET(modify_flow_table_in, in,
242 flow_table_context.lag_master_next_table_id, next_ft->id);
243 } else {
244 MLX5_SET(modify_flow_table_in, in,
245 flow_table_context.lag_master_next_table_id, 0);
246 }
247 } else {
248 if (ft->vport) {
249 MLX5_SET(modify_flow_table_in, in, vport_number,
250 ft->vport);
251 MLX5_SET(modify_flow_table_in, in, other_vport, 1);
252 }
253 MLX5_SET(modify_flow_table_in, in, modify_field_select,
254 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
255 if (next_ft) {
256 MLX5_SET(modify_flow_table_in, in,
257 flow_table_context.table_miss_action,
258 MLX5_FLOW_TABLE_MISS_ACTION_FWD);
259 MLX5_SET(modify_flow_table_in, in,
260 flow_table_context.table_miss_id,
261 next_ft->id);
262 } else {
263 MLX5_SET(modify_flow_table_in, in,
264 flow_table_context.table_miss_action,
265 ns->def_miss_action);
266 }
267 }
268
269 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
270}
271
272static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
273 struct mlx5_flow_table *ft,
274 u32 *in,
275 struct mlx5_flow_group *fg)
276{
277 u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
278 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
279 struct mlx5_core_dev *dev = ns->dev;
280 int err;
281
282 MLX5_SET(create_flow_group_in, in, opcode,
283 MLX5_CMD_OP_CREATE_FLOW_GROUP);
284 MLX5_SET(create_flow_group_in, in, table_type, ft->type);
285 MLX5_SET(create_flow_group_in, in, table_id, ft->id);
286 if (ft->vport) {
287 MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
288 MLX5_SET(create_flow_group_in, in, other_vport, 1);
289 }
290
291 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
292 if (!err)
293 fg->id = MLX5_GET(create_flow_group_out, out,
294 group_id);
295 return err;
296}
297
298static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
299 struct mlx5_flow_table *ft,
300 struct mlx5_flow_group *fg)
301{
302 u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0};
303 u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {0};
304 struct mlx5_core_dev *dev = ns->dev;
305
306 MLX5_SET(destroy_flow_group_in, in, opcode,
307 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
308 MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
309 MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
310 MLX5_SET(destroy_flow_group_in, in, group_id, fg->id);
311 if (ft->vport) {
312 MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
313 MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
314 }
315
316 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
317}
318
319static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
320 struct fs_fte *fte, bool *extended_dest)
321{
322 int fw_log_max_fdb_encap_uplink =
323 MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
324 int num_fwd_destinations = 0;
325 struct mlx5_flow_rule *dst;
326 int num_encap = 0;
327
328 *extended_dest = false;
329 if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
330 return 0;
331
332 list_for_each_entry(dst, &fte->node.children, node.list) {
333 if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
334 continue;
335 if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
336 dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
337 num_encap++;
338 num_fwd_destinations++;
339 }
340 if (num_fwd_destinations > 1 && num_encap > 0)
341 *extended_dest = true;
342
343 if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
344 mlx5_core_warn(dev, "FW does not support extended destination");
345 return -EOPNOTSUPP;
346 }
347 if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
348 mlx5_core_warn(dev, "FW does not support more than %d encaps",
349 1 << fw_log_max_fdb_encap_uplink);
350 return -EOPNOTSUPP;
351 }
352
353 return 0;
354}
355static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
356 int opmod, int modify_mask,
357 struct mlx5_flow_table *ft,
358 unsigned group_id,
359 struct fs_fte *fte)
360{
361 u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
362 bool extended_dest = false;
363 struct mlx5_flow_rule *dst;
364 void *in_flow_context, *vlan;
365 void *in_match_value;
366 unsigned int inlen;
367 int dst_cnt_size;
368 void *in_dests;
369 u32 *in;
370 int err;
371
372 if (mlx5_set_extended_dest(dev, fte, &extended_dest))
373 return -EOPNOTSUPP;
374
375 if (!extended_dest)
376 dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
377 else
378 dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
379
380 inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
381 in = kvzalloc(inlen, GFP_KERNEL);
382 if (!in)
383 return -ENOMEM;
384
385 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
386 MLX5_SET(set_fte_in, in, op_mod, opmod);
387 MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
388 MLX5_SET(set_fte_in, in, table_type, ft->type);
389 MLX5_SET(set_fte_in, in, table_id, ft->id);
390 MLX5_SET(set_fte_in, in, flow_index, fte->index);
391 if (ft->vport) {
392 MLX5_SET(set_fte_in, in, vport_number, ft->vport);
393 MLX5_SET(set_fte_in, in, other_vport, 1);
394 }
395
396 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
397 MLX5_SET(flow_context, in_flow_context, group_id, group_id);
398
399 MLX5_SET(flow_context, in_flow_context, flow_tag,
400 fte->flow_context.flow_tag);
401 MLX5_SET(flow_context, in_flow_context, flow_source,
402 fte->flow_context.flow_source);
403
404 MLX5_SET(flow_context, in_flow_context, extended_destination,
405 extended_dest);
406 if (extended_dest) {
407 u32 action;
408
409 action = fte->action.action &
410 ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
411 MLX5_SET(flow_context, in_flow_context, action, action);
412 } else {
413 MLX5_SET(flow_context, in_flow_context, action,
414 fte->action.action);
415 MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
416 fte->action.reformat_id);
417 }
418 MLX5_SET(flow_context, in_flow_context, modify_header_id,
419 fte->action.modify_id);
420
421 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
422
423 MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
424 MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
425 MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
426
427 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
428
429 MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
430 MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
431 MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
432
433 in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
434 match_value);
435 memcpy(in_match_value, &fte->val, sizeof(fte->val));
436
437 in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
438 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
439 int list_size = 0;
440
441 list_for_each_entry(dst, &fte->node.children, node.list) {
442 unsigned int id, type = dst->dest_attr.type;
443
444 if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
445 continue;
446
447 switch (type) {
448 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
449 id = dst->dest_attr.ft_num;
450 type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
451 break;
452 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
453 id = dst->dest_attr.ft->id;
454 break;
455 case MLX5_FLOW_DESTINATION_TYPE_VPORT:
456 id = dst->dest_attr.vport.num;
457 MLX5_SET(dest_format_struct, in_dests,
458 destination_eswitch_owner_vhca_id_valid,
459 !!(dst->dest_attr.vport.flags &
460 MLX5_FLOW_DEST_VPORT_VHCA_ID));
461 MLX5_SET(dest_format_struct, in_dests,
462 destination_eswitch_owner_vhca_id,
463 dst->dest_attr.vport.vhca_id);
464 if (extended_dest) {
465 MLX5_SET(dest_format_struct, in_dests,
466 packet_reformat,
467 !!(dst->dest_attr.vport.flags &
468 MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
469 MLX5_SET(extended_dest_format, in_dests,
470 packet_reformat_id,
471 dst->dest_attr.vport.reformat_id);
472 }
473 break;
474 default:
475 id = dst->dest_attr.tir_num;
476 }
477
478 MLX5_SET(dest_format_struct, in_dests, destination_type,
479 type);
480 MLX5_SET(dest_format_struct, in_dests, destination_id, id);
481 in_dests += dst_cnt_size;
482 list_size++;
483 }
484
485 MLX5_SET(flow_context, in_flow_context, destination_list_size,
486 list_size);
487 }
488
489 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
490 int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
491 log_max_flow_counter,
492 ft->type));
493 int list_size = 0;
494
495 list_for_each_entry(dst, &fte->node.children, node.list) {
496 if (dst->dest_attr.type !=
497 MLX5_FLOW_DESTINATION_TYPE_COUNTER)
498 continue;
499
500 MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
501 dst->dest_attr.counter_id);
502 in_dests += dst_cnt_size;
503 list_size++;
504 }
505 if (list_size > max_list_size) {
506 err = -EINVAL;
507 goto err_out;
508 }
509
510 MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
511 list_size);
512 }
513
514 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
515err_out:
516 kvfree(in);
517 return err;
518}
519
520static int mlx5_cmd_create_fte(struct mlx5_flow_root_namespace *ns,
521 struct mlx5_flow_table *ft,
522 struct mlx5_flow_group *group,
523 struct fs_fte *fte)
524{
525 struct mlx5_core_dev *dev = ns->dev;
526 unsigned int group_id = group->id;
527
528 return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
529}
530
531static int mlx5_cmd_update_fte(struct mlx5_flow_root_namespace *ns,
532 struct mlx5_flow_table *ft,
533 struct mlx5_flow_group *fg,
534 int modify_mask,
535 struct fs_fte *fte)
536{
537 int opmod;
538 struct mlx5_core_dev *dev = ns->dev;
539 int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
540 flow_table_properties_nic_receive.
541 flow_modify_en);
542 if (!atomic_mod_cap)
543 return -EOPNOTSUPP;
544 opmod = 1;
545
546 return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, fg->id, fte);
547}
548
549static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
550 struct mlx5_flow_table *ft,
551 struct fs_fte *fte)
552{
553 u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0};
554 u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {0};
555 struct mlx5_core_dev *dev = ns->dev;
556
557 MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
558 MLX5_SET(delete_fte_in, in, table_type, ft->type);
559 MLX5_SET(delete_fte_in, in, table_id, ft->id);
560 MLX5_SET(delete_fte_in, in, flow_index, fte->index);
561 if (ft->vport) {
562 MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
563 MLX5_SET(delete_fte_in, in, other_vport, 1);
564 }
565
566 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
567}
568
569int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
570{
571 u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0};
572 u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
573 int err;
574
575 MLX5_SET(alloc_flow_counter_in, in, opcode,
576 MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
577
578 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
579 if (!err)
580 *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
581 return err;
582}
583
584int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
585{
586 u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0};
587 u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)] = {0};
588
589 MLX5_SET(dealloc_flow_counter_in, in, opcode,
590 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
591 MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
592 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
593}
594
595int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
596 u64 *packets, u64 *bytes)
597{
598 u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
599 MLX5_ST_SZ_BYTES(traffic_counter)] = {0};
600 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
601 void *stats;
602 int err = 0;
603
604 MLX5_SET(query_flow_counter_in, in, opcode,
605 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
606 MLX5_SET(query_flow_counter_in, in, op_mod, 0);
607 MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
608 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
609 if (err)
610 return err;
611
612 stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
613 *packets = MLX5_GET64(traffic_counter, stats, packets);
614 *bytes = MLX5_GET64(traffic_counter, stats, octets);
615 return 0;
616}
617
618struct mlx5_cmd_fc_bulk {
619 u32 id;
620 int num;
621 int outlen;
622 u32 out[0];
623};
624
625struct mlx5_cmd_fc_bulk *
626mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num)
627{
628 struct mlx5_cmd_fc_bulk *b;
629 int outlen =
630 MLX5_ST_SZ_BYTES(query_flow_counter_out) +
631 MLX5_ST_SZ_BYTES(traffic_counter) * num;
632
633 b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL);
634 if (!b)
635 return NULL;
636
637 b->id = id;
638 b->num = num;
639 b->outlen = outlen;
640
641 return b;
642}
643
644void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b)
645{
646 kfree(b);
647}
648
649int
650mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b)
651{
652 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
653
654 MLX5_SET(query_flow_counter_in, in, opcode,
655 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
656 MLX5_SET(query_flow_counter_in, in, op_mod, 0);
657 MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id);
658 MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num);
659 return mlx5_cmd_exec(dev, in, sizeof(in), b->out, b->outlen);
660}
661
662void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
663 struct mlx5_cmd_fc_bulk *b, u32 id,
664 u64 *packets, u64 *bytes)
665{
666 int index = id - b->id;
667 void *stats;
668
669 if (index < 0 || index >= b->num) {
670 mlx5_core_warn(dev, "Flow counter id (0x%x) out of range (0x%x..0x%x). Counter ignored.\n",
671 id, b->id, b->id + b->num - 1);
672 return;
673 }
674
675 stats = MLX5_ADDR_OF(query_flow_counter_out, b->out,
676 flow_statistics[index]);
677 *packets = MLX5_GET64(traffic_counter, stats, packets);
678 *bytes = MLX5_GET64(traffic_counter, stats, octets);
679}
680
681int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
682 int reformat_type,
683 size_t size,
684 void *reformat_data,
685 enum mlx5_flow_namespace_type namespace,
686 u32 *packet_reformat_id)
687{
688 u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)];
689 void *packet_reformat_context_in;
690 int max_encap_size;
691 void *reformat;
692 int inlen;
693 int err;
694 u32 *in;
695
696 if (namespace == MLX5_FLOW_NAMESPACE_FDB)
697 max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
698 else
699 max_encap_size = MLX5_CAP_FLOWTABLE(dev, max_encap_header_size);
700
701 if (size > max_encap_size) {
702 mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n",
703 size, max_encap_size);
704 return -EINVAL;
705 }
706
707 in = kzalloc(MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in) + size,
708 GFP_KERNEL);
709 if (!in)
710 return -ENOMEM;
711
712 packet_reformat_context_in = MLX5_ADDR_OF(alloc_packet_reformat_context_in,
713 in, packet_reformat_context);
714 reformat = MLX5_ADDR_OF(packet_reformat_context_in,
715 packet_reformat_context_in,
716 reformat_data);
717 inlen = reformat - (void *)in + size;
718
719 memset(in, 0, inlen);
720 MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
721 MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
722 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
723 reformat_data_size, size);
724 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
725 reformat_type, reformat_type);
726 memcpy(reformat, reformat_data, size);
727
728 memset(out, 0, sizeof(out));
729 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
730
731 *packet_reformat_id = MLX5_GET(alloc_packet_reformat_context_out,
732 out, packet_reformat_id);
733 kfree(in);
734 return err;
735}
736EXPORT_SYMBOL(mlx5_packet_reformat_alloc);
737
738void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
739 u32 packet_reformat_id)
740{
741 u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)];
742 u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_out)];
743
744 memset(in, 0, sizeof(in));
745 MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
746 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
747 MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
748 packet_reformat_id);
749
750 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
751}
752EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
753
754int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
755 u8 namespace, u8 num_actions,
756 void *modify_actions, u32 *modify_header_id)
757{
758 u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)];
759 int max_actions, actions_size, inlen, err;
760 void *actions_in;
761 u8 table_type;
762 u32 *in;
763
764 switch (namespace) {
765 case MLX5_FLOW_NAMESPACE_FDB:
766 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions);
767 table_type = FS_FT_FDB;
768 break;
769 case MLX5_FLOW_NAMESPACE_KERNEL:
770 case MLX5_FLOW_NAMESPACE_BYPASS:
771 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions);
772 table_type = FS_FT_NIC_RX;
773 break;
774 case MLX5_FLOW_NAMESPACE_EGRESS:
775 max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions);
776 table_type = FS_FT_NIC_TX;
777 break;
778 case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
779 max_actions = MLX5_CAP_ESW_INGRESS_ACL(dev, max_modify_header_actions);
780 table_type = FS_FT_ESW_INGRESS_ACL;
781 break;
782 default:
783 return -EOPNOTSUPP;
784 }
785
786 if (num_actions > max_actions) {
787 mlx5_core_warn(dev, "too many modify header actions %d, max supported %d\n",
788 num_actions, max_actions);
789 return -EOPNOTSUPP;
790 }
791
792 actions_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) * num_actions;
793 inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) + actions_size;
794
795 in = kzalloc(inlen, GFP_KERNEL);
796 if (!in)
797 return -ENOMEM;
798
799 MLX5_SET(alloc_modify_header_context_in, in, opcode,
800 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
801 MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
802 MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_actions);
803
804 actions_in = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
805 memcpy(actions_in, modify_actions, actions_size);
806
807 memset(out, 0, sizeof(out));
808 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
809
810 *modify_header_id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id);
811 kfree(in);
812 return err;
813}
814EXPORT_SYMBOL(mlx5_modify_header_alloc);
815
816void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id)
817{
818 u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)];
819 u32 out[MLX5_ST_SZ_DW(dealloc_modify_header_context_out)];
820
821 memset(in, 0, sizeof(in));
822 MLX5_SET(dealloc_modify_header_context_in, in, opcode,
823 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
824 MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
825 modify_header_id);
826
827 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
828}
829EXPORT_SYMBOL(mlx5_modify_header_dealloc);
830
831static const struct mlx5_flow_cmds mlx5_flow_cmds = {
832 .create_flow_table = mlx5_cmd_create_flow_table,
833 .destroy_flow_table = mlx5_cmd_destroy_flow_table,
834 .modify_flow_table = mlx5_cmd_modify_flow_table,
835 .create_flow_group = mlx5_cmd_create_flow_group,
836 .destroy_flow_group = mlx5_cmd_destroy_flow_group,
837 .create_fte = mlx5_cmd_create_fte,
838 .update_fte = mlx5_cmd_update_fte,
839 .delete_fte = mlx5_cmd_delete_fte,
840 .update_root_ft = mlx5_cmd_update_root_ft,
841};
842
843static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
844 .create_flow_table = mlx5_cmd_stub_create_flow_table,
845 .destroy_flow_table = mlx5_cmd_stub_destroy_flow_table,
846 .modify_flow_table = mlx5_cmd_stub_modify_flow_table,
847 .create_flow_group = mlx5_cmd_stub_create_flow_group,
848 .destroy_flow_group = mlx5_cmd_stub_destroy_flow_group,
849 .create_fte = mlx5_cmd_stub_create_fte,
850 .update_fte = mlx5_cmd_stub_update_fte,
851 .delete_fte = mlx5_cmd_stub_delete_fte,
852 .update_root_ft = mlx5_cmd_stub_update_root_ft,
853};
854
855static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void)
856{
857 return &mlx5_flow_cmds;
858}
859
860static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_stub_cmds(void)
861{
862 return &mlx5_flow_cmd_stubs;
863}
864
865const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type)
866{
867 switch (type) {
868 case FS_FT_NIC_RX:
869 case FS_FT_ESW_EGRESS_ACL:
870 case FS_FT_ESW_INGRESS_ACL:
871 case FS_FT_FDB:
872 case FS_FT_SNIFFER_RX:
873 case FS_FT_SNIFFER_TX:
874 case FS_FT_NIC_TX:
875 case FS_FT_RDMA_RX:
876 return mlx5_fs_cmd_get_fw_cmds();
877 default:
878 return mlx5_fs_cmd_get_stub_cmds();
879 }
880}
881