1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/mlx5/driver.h>
34#include <linux/mlx5/device.h>
35#include <linux/mlx5/mlx5_ifc.h>
36
37#include "fs_core.h"
38#include "fs_cmd.h"
39#include "mlx5_core.h"
40#include "eswitch.h"
41
42static int mlx5_cmd_stub_update_root_ft(struct mlx5_core_dev *dev,
43 struct mlx5_flow_table *ft,
44 u32 underlay_qpn,
45 bool disconnect)
46{
47 return 0;
48}
49
50static int mlx5_cmd_stub_create_flow_table(struct mlx5_core_dev *dev,
51 u16 vport,
52 enum fs_flow_table_op_mod op_mod,
53 enum fs_flow_table_type type,
54 unsigned int level,
55 unsigned int log_size,
56 struct mlx5_flow_table *next_ft,
57 unsigned int *table_id, u32 flags)
58{
59 return 0;
60}
61
62static int mlx5_cmd_stub_destroy_flow_table(struct mlx5_core_dev *dev,
63 struct mlx5_flow_table *ft)
64{
65 return 0;
66}
67
68static int mlx5_cmd_stub_modify_flow_table(struct mlx5_core_dev *dev,
69 struct mlx5_flow_table *ft,
70 struct mlx5_flow_table *next_ft)
71{
72 return 0;
73}
74
75static int mlx5_cmd_stub_create_flow_group(struct mlx5_core_dev *dev,
76 struct mlx5_flow_table *ft,
77 u32 *in,
78 unsigned int *group_id)
79{
80 return 0;
81}
82
83static int mlx5_cmd_stub_destroy_flow_group(struct mlx5_core_dev *dev,
84 struct mlx5_flow_table *ft,
85 unsigned int group_id)
86{
87 return 0;
88}
89
90static int mlx5_cmd_stub_create_fte(struct mlx5_core_dev *dev,
91 struct mlx5_flow_table *ft,
92 struct mlx5_flow_group *group,
93 struct fs_fte *fte)
94{
95 return 0;
96}
97
98static int mlx5_cmd_stub_update_fte(struct mlx5_core_dev *dev,
99 struct mlx5_flow_table *ft,
100 unsigned int group_id,
101 int modify_mask,
102 struct fs_fte *fte)
103{
104 return -EOPNOTSUPP;
105}
106
107static int mlx5_cmd_stub_delete_fte(struct mlx5_core_dev *dev,
108 struct mlx5_flow_table *ft,
109 struct fs_fte *fte)
110{
111 return 0;
112}
113
114static int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
115 struct mlx5_flow_table *ft, u32 underlay_qpn,
116 bool disconnect)
117{
118 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0};
119 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
120
121 if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
122 underlay_qpn == 0)
123 return 0;
124
125 MLX5_SET(set_flow_table_root_in, in, opcode,
126 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
127 MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
128
129 if (disconnect) {
130 MLX5_SET(set_flow_table_root_in, in, op_mod, 1);
131 MLX5_SET(set_flow_table_root_in, in, table_id, 0);
132 } else {
133 MLX5_SET(set_flow_table_root_in, in, op_mod, 0);
134 MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
135 }
136
137 MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
138 if (ft->vport) {
139 MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
140 MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
141 }
142
143 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
144}
145
146static int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
147 u16 vport,
148 enum fs_flow_table_op_mod op_mod,
149 enum fs_flow_table_type type,
150 unsigned int level,
151 unsigned int log_size,
152 struct mlx5_flow_table *next_ft,
153 unsigned int *table_id, u32 flags)
154{
155 int en_encap_decap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN);
156 u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
157 u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
158 int err;
159
160 MLX5_SET(create_flow_table_in, in, opcode,
161 MLX5_CMD_OP_CREATE_FLOW_TABLE);
162
163 MLX5_SET(create_flow_table_in, in, table_type, type);
164 MLX5_SET(create_flow_table_in, in, flow_table_context.level, level);
165 MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, log_size);
166 if (vport) {
167 MLX5_SET(create_flow_table_in, in, vport_number, vport);
168 MLX5_SET(create_flow_table_in, in, other_vport, 1);
169 }
170
171 MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
172 en_encap_decap);
173 MLX5_SET(create_flow_table_in, in, flow_table_context.encap_en,
174 en_encap_decap);
175
176 switch (op_mod) {
177 case FS_FT_OP_MOD_NORMAL:
178 if (next_ft) {
179 MLX5_SET(create_flow_table_in, in,
180 flow_table_context.table_miss_action, 1);
181 MLX5_SET(create_flow_table_in, in,
182 flow_table_context.table_miss_id, next_ft->id);
183 }
184 break;
185
186 case FS_FT_OP_MOD_LAG_DEMUX:
187 MLX5_SET(create_flow_table_in, in, op_mod, 0x1);
188 if (next_ft)
189 MLX5_SET(create_flow_table_in, in,
190 flow_table_context.lag_master_next_table_id,
191 next_ft->id);
192 break;
193 }
194
195 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
196 if (!err)
197 *table_id = MLX5_GET(create_flow_table_out, out,
198 table_id);
199 return err;
200}
201
202static int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
203 struct mlx5_flow_table *ft)
204{
205 u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0};
206 u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0};
207
208 MLX5_SET(destroy_flow_table_in, in, opcode,
209 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
210 MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
211 MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
212 if (ft->vport) {
213 MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
214 MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
215 }
216
217 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
218}
219
220static int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
221 struct mlx5_flow_table *ft,
222 struct mlx5_flow_table *next_ft)
223{
224 u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0};
225 u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0};
226
227 MLX5_SET(modify_flow_table_in, in, opcode,
228 MLX5_CMD_OP_MODIFY_FLOW_TABLE);
229 MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
230 MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
231
232 if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) {
233 MLX5_SET(modify_flow_table_in, in, modify_field_select,
234 MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID);
235 if (next_ft) {
236 MLX5_SET(modify_flow_table_in, in,
237 flow_table_context.lag_master_next_table_id, next_ft->id);
238 } else {
239 MLX5_SET(modify_flow_table_in, in,
240 flow_table_context.lag_master_next_table_id, 0);
241 }
242 } else {
243 if (ft->vport) {
244 MLX5_SET(modify_flow_table_in, in, vport_number,
245 ft->vport);
246 MLX5_SET(modify_flow_table_in, in, other_vport, 1);
247 }
248 MLX5_SET(modify_flow_table_in, in, modify_field_select,
249 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
250 if (next_ft) {
251 MLX5_SET(modify_flow_table_in, in,
252 flow_table_context.table_miss_action, 1);
253 MLX5_SET(modify_flow_table_in, in,
254 flow_table_context.table_miss_id,
255 next_ft->id);
256 } else {
257 MLX5_SET(modify_flow_table_in, in,
258 flow_table_context.table_miss_action, 0);
259 }
260 }
261
262 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
263}
264
265static int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
266 struct mlx5_flow_table *ft,
267 u32 *in,
268 unsigned int *group_id)
269{
270 u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
271 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
272 int err;
273
274 MLX5_SET(create_flow_group_in, in, opcode,
275 MLX5_CMD_OP_CREATE_FLOW_GROUP);
276 MLX5_SET(create_flow_group_in, in, table_type, ft->type);
277 MLX5_SET(create_flow_group_in, in, table_id, ft->id);
278 if (ft->vport) {
279 MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
280 MLX5_SET(create_flow_group_in, in, other_vport, 1);
281 }
282
283 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
284 if (!err)
285 *group_id = MLX5_GET(create_flow_group_out, out,
286 group_id);
287 return err;
288}
289
290static int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
291 struct mlx5_flow_table *ft,
292 unsigned int group_id)
293{
294 u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0};
295 u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {0};
296
297 MLX5_SET(destroy_flow_group_in, in, opcode,
298 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
299 MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
300 MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
301 MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
302 if (ft->vport) {
303 MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
304 MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
305 }
306
307 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
308}
309
310static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
311 int opmod, int modify_mask,
312 struct mlx5_flow_table *ft,
313 unsigned group_id,
314 struct fs_fte *fte)
315{
316 unsigned int inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
317 fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct);
318 u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
319 struct mlx5_flow_rule *dst;
320 void *in_flow_context, *vlan;
321 void *in_match_value;
322 void *in_dests;
323 u32 *in;
324 int err;
325
326 in = kvzalloc(inlen, GFP_KERNEL);
327 if (!in)
328 return -ENOMEM;
329
330 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
331 MLX5_SET(set_fte_in, in, op_mod, opmod);
332 MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
333 MLX5_SET(set_fte_in, in, table_type, ft->type);
334 MLX5_SET(set_fte_in, in, table_id, ft->id);
335 MLX5_SET(set_fte_in, in, flow_index, fte->index);
336 if (ft->vport) {
337 MLX5_SET(set_fte_in, in, vport_number, ft->vport);
338 MLX5_SET(set_fte_in, in, other_vport, 1);
339 }
340
341 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
342 MLX5_SET(flow_context, in_flow_context, group_id, group_id);
343
344 MLX5_SET(flow_context, in_flow_context, flow_tag, fte->action.flow_tag);
345 MLX5_SET(flow_context, in_flow_context, action, fte->action.action);
346 MLX5_SET(flow_context, in_flow_context, encap_id, fte->action.encap_id);
347 MLX5_SET(flow_context, in_flow_context, modify_header_id,
348 fte->action.modify_id);
349
350 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
351
352 MLX5_SET(vlan, vlan, ethtype, fte->action.vlan.ethtype);
353 MLX5_SET(vlan, vlan, vid, fte->action.vlan.vid);
354 MLX5_SET(vlan, vlan, prio, fte->action.vlan.prio);
355
356 in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
357 match_value);
358 memcpy(in_match_value, &fte->val, sizeof(fte->val));
359
360 in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
361 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
362 int list_size = 0;
363
364 list_for_each_entry(dst, &fte->node.children, node.list) {
365 unsigned int id;
366
367 if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
368 continue;
369
370 MLX5_SET(dest_format_struct, in_dests, destination_type,
371 dst->dest_attr.type);
372 if (dst->dest_attr.type ==
373 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
374 id = dst->dest_attr.ft->id;
375 } else {
376 id = dst->dest_attr.tir_num;
377 }
378 MLX5_SET(dest_format_struct, in_dests, destination_id, id);
379 in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
380 list_size++;
381 }
382
383 MLX5_SET(flow_context, in_flow_context, destination_list_size,
384 list_size);
385 }
386
387 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
388 int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
389 log_max_flow_counter,
390 ft->type));
391 int list_size = 0;
392
393 list_for_each_entry(dst, &fte->node.children, node.list) {
394 if (dst->dest_attr.type !=
395 MLX5_FLOW_DESTINATION_TYPE_COUNTER)
396 continue;
397
398 MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
399 dst->dest_attr.counter->id);
400 in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
401 list_size++;
402 }
403 if (list_size > max_list_size) {
404 err = -EINVAL;
405 goto err_out;
406 }
407
408 MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
409 list_size);
410 }
411
412 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
413err_out:
414 kvfree(in);
415 return err;
416}
417
418static int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
419 struct mlx5_flow_table *ft,
420 struct mlx5_flow_group *group,
421 struct fs_fte *fte)
422{
423 unsigned int group_id = group->id;
424
425 return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
426}
427
428static int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
429 struct mlx5_flow_table *ft,
430 unsigned int group_id,
431 int modify_mask,
432 struct fs_fte *fte)
433{
434 int opmod;
435 int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
436 flow_table_properties_nic_receive.
437 flow_modify_en);
438 if (!atomic_mod_cap)
439 return -EOPNOTSUPP;
440 opmod = 1;
441
442 return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
443}
444
445static int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
446 struct mlx5_flow_table *ft,
447 struct fs_fte *fte)
448{
449 u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0};
450 u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {0};
451
452 MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
453 MLX5_SET(delete_fte_in, in, table_type, ft->type);
454 MLX5_SET(delete_fte_in, in, table_id, ft->id);
455 MLX5_SET(delete_fte_in, in, flow_index, fte->index);
456 if (ft->vport) {
457 MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
458 MLX5_SET(delete_fte_in, in, other_vport, 1);
459 }
460
461 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
462}
463
464int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
465{
466 u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0};
467 u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
468 int err;
469
470 MLX5_SET(alloc_flow_counter_in, in, opcode,
471 MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
472
473 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
474 if (!err)
475 *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
476 return err;
477}
478
479int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
480{
481 u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0};
482 u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)] = {0};
483
484 MLX5_SET(dealloc_flow_counter_in, in, opcode,
485 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
486 MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
487 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
488}
489
490int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
491 u64 *packets, u64 *bytes)
492{
493 u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
494 MLX5_ST_SZ_BYTES(traffic_counter)] = {0};
495 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
496 void *stats;
497 int err = 0;
498
499 MLX5_SET(query_flow_counter_in, in, opcode,
500 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
501 MLX5_SET(query_flow_counter_in, in, op_mod, 0);
502 MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
503 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
504 if (err)
505 return err;
506
507 stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
508 *packets = MLX5_GET64(traffic_counter, stats, packets);
509 *bytes = MLX5_GET64(traffic_counter, stats, octets);
510 return 0;
511}
512
513struct mlx5_cmd_fc_bulk {
514 u32 id;
515 int num;
516 int outlen;
517 u32 out[0];
518};
519
520struct mlx5_cmd_fc_bulk *
521mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num)
522{
523 struct mlx5_cmd_fc_bulk *b;
524 int outlen =
525 MLX5_ST_SZ_BYTES(query_flow_counter_out) +
526 MLX5_ST_SZ_BYTES(traffic_counter) * num;
527
528 b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL);
529 if (!b)
530 return NULL;
531
532 b->id = id;
533 b->num = num;
534 b->outlen = outlen;
535
536 return b;
537}
538
539void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b)
540{
541 kfree(b);
542}
543
544int
545mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b)
546{
547 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
548
549 MLX5_SET(query_flow_counter_in, in, opcode,
550 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
551 MLX5_SET(query_flow_counter_in, in, op_mod, 0);
552 MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id);
553 MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num);
554 return mlx5_cmd_exec(dev, in, sizeof(in), b->out, b->outlen);
555}
556
557void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
558 struct mlx5_cmd_fc_bulk *b, u32 id,
559 u64 *packets, u64 *bytes)
560{
561 int index = id - b->id;
562 void *stats;
563
564 if (index < 0 || index >= b->num) {
565 mlx5_core_warn(dev, "Flow counter id (0x%x) out of range (0x%x..0x%x). Counter ignored.\n",
566 id, b->id, b->id + b->num - 1);
567 return;
568 }
569
570 stats = MLX5_ADDR_OF(query_flow_counter_out, b->out,
571 flow_statistics[index]);
572 *packets = MLX5_GET64(traffic_counter, stats, packets);
573 *bytes = MLX5_GET64(traffic_counter, stats, octets);
574}
575
576int mlx5_encap_alloc(struct mlx5_core_dev *dev,
577 int header_type,
578 size_t size,
579 void *encap_header,
580 u32 *encap_id)
581{
582 int max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
583 u32 out[MLX5_ST_SZ_DW(alloc_encap_header_out)];
584 void *encap_header_in;
585 void *header;
586 int inlen;
587 int err;
588 u32 *in;
589
590 if (size > max_encap_size) {
591 mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n",
592 size, max_encap_size);
593 return -EINVAL;
594 }
595
596 in = kzalloc(MLX5_ST_SZ_BYTES(alloc_encap_header_in) + size,
597 GFP_KERNEL);
598 if (!in)
599 return -ENOMEM;
600
601 encap_header_in = MLX5_ADDR_OF(alloc_encap_header_in, in, encap_header);
602 header = MLX5_ADDR_OF(encap_header_in, encap_header_in, encap_header);
603 inlen = header - (void *)in + size;
604
605 memset(in, 0, inlen);
606 MLX5_SET(alloc_encap_header_in, in, opcode,
607 MLX5_CMD_OP_ALLOC_ENCAP_HEADER);
608 MLX5_SET(encap_header_in, encap_header_in, encap_header_size, size);
609 MLX5_SET(encap_header_in, encap_header_in, header_type, header_type);
610 memcpy(header, encap_header, size);
611
612 memset(out, 0, sizeof(out));
613 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
614
615 *encap_id = MLX5_GET(alloc_encap_header_out, out, encap_id);
616 kfree(in);
617 return err;
618}
619
620void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id)
621{
622 u32 in[MLX5_ST_SZ_DW(dealloc_encap_header_in)];
623 u32 out[MLX5_ST_SZ_DW(dealloc_encap_header_out)];
624
625 memset(in, 0, sizeof(in));
626 MLX5_SET(dealloc_encap_header_in, in, opcode,
627 MLX5_CMD_OP_DEALLOC_ENCAP_HEADER);
628 MLX5_SET(dealloc_encap_header_in, in, encap_id, encap_id);
629
630 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
631}
632
633int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
634 u8 namespace, u8 num_actions,
635 void *modify_actions, u32 *modify_header_id)
636{
637 u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)];
638 int max_actions, actions_size, inlen, err;
639 void *actions_in;
640 u8 table_type;
641 u32 *in;
642
643 switch (namespace) {
644 case MLX5_FLOW_NAMESPACE_FDB:
645 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions);
646 table_type = FS_FT_FDB;
647 break;
648 case MLX5_FLOW_NAMESPACE_KERNEL:
649 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions);
650 table_type = FS_FT_NIC_RX;
651 break;
652 default:
653 return -EOPNOTSUPP;
654 }
655
656 if (num_actions > max_actions) {
657 mlx5_core_warn(dev, "too many modify header actions %d, max supported %d\n",
658 num_actions, max_actions);
659 return -EOPNOTSUPP;
660 }
661
662 actions_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) * num_actions;
663 inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) + actions_size;
664
665 in = kzalloc(inlen, GFP_KERNEL);
666 if (!in)
667 return -ENOMEM;
668
669 MLX5_SET(alloc_modify_header_context_in, in, opcode,
670 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
671 MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
672 MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_actions);
673
674 actions_in = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
675 memcpy(actions_in, modify_actions, actions_size);
676
677 memset(out, 0, sizeof(out));
678 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
679
680 *modify_header_id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id);
681 kfree(in);
682 return err;
683}
684
685void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id)
686{
687 u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)];
688 u32 out[MLX5_ST_SZ_DW(dealloc_modify_header_context_out)];
689
690 memset(in, 0, sizeof(in));
691 MLX5_SET(dealloc_modify_header_context_in, in, opcode,
692 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
693 MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
694 modify_header_id);
695
696 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
697}
698
699static const struct mlx5_flow_cmds mlx5_flow_cmds = {
700 .create_flow_table = mlx5_cmd_create_flow_table,
701 .destroy_flow_table = mlx5_cmd_destroy_flow_table,
702 .modify_flow_table = mlx5_cmd_modify_flow_table,
703 .create_flow_group = mlx5_cmd_create_flow_group,
704 .destroy_flow_group = mlx5_cmd_destroy_flow_group,
705 .create_fte = mlx5_cmd_create_fte,
706 .update_fte = mlx5_cmd_update_fte,
707 .delete_fte = mlx5_cmd_delete_fte,
708 .update_root_ft = mlx5_cmd_update_root_ft,
709};
710
711static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
712 .create_flow_table = mlx5_cmd_stub_create_flow_table,
713 .destroy_flow_table = mlx5_cmd_stub_destroy_flow_table,
714 .modify_flow_table = mlx5_cmd_stub_modify_flow_table,
715 .create_flow_group = mlx5_cmd_stub_create_flow_group,
716 .destroy_flow_group = mlx5_cmd_stub_destroy_flow_group,
717 .create_fte = mlx5_cmd_stub_create_fte,
718 .update_fte = mlx5_cmd_stub_update_fte,
719 .delete_fte = mlx5_cmd_stub_delete_fte,
720 .update_root_ft = mlx5_cmd_stub_update_root_ft,
721};
722
723static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void)
724{
725 return &mlx5_flow_cmds;
726}
727
728static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_stub_cmds(void)
729{
730 return &mlx5_flow_cmd_stubs;
731}
732
733const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type)
734{
735 switch (type) {
736 case FS_FT_NIC_RX:
737 case FS_FT_ESW_EGRESS_ACL:
738 case FS_FT_ESW_INGRESS_ACL:
739 case FS_FT_FDB:
740 case FS_FT_SNIFFER_RX:
741 case FS_FT_SNIFFER_TX:
742 return mlx5_fs_cmd_get_fw_cmds();
743 case FS_FT_NIC_TX:
744 default:
745 return mlx5_fs_cmd_get_stub_cmds();
746 }
747}
748