1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/mlx5/driver.h>
34#include <linux/mlx5/device.h>
35#include <linux/mlx5/mlx5_ifc.h>
36
37#include "fs_core.h"
38#include "fs_cmd.h"
39#include "mlx5_core.h"
40#include "eswitch.h"
41
42int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
43 struct mlx5_flow_table *ft, u32 underlay_qpn)
44{
45 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0};
46 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
47
48 if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
49 underlay_qpn == 0)
50 return 0;
51
52 MLX5_SET(set_flow_table_root_in, in, opcode,
53 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
54 MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
55 MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
56 MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
57 if (ft->vport) {
58 MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
59 MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
60 }
61
62 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
63}
64
65int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
66 u16 vport,
67 enum fs_flow_table_op_mod op_mod,
68 enum fs_flow_table_type type, unsigned int level,
69 unsigned int log_size, struct mlx5_flow_table
70 *next_ft, unsigned int *table_id, u32 flags)
71{
72 int en_encap_decap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN);
73 u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
74 u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
75 int err;
76
77 MLX5_SET(create_flow_table_in, in, opcode,
78 MLX5_CMD_OP_CREATE_FLOW_TABLE);
79
80 MLX5_SET(create_flow_table_in, in, table_type, type);
81 MLX5_SET(create_flow_table_in, in, flow_table_context.level, level);
82 MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, log_size);
83 if (vport) {
84 MLX5_SET(create_flow_table_in, in, vport_number, vport);
85 MLX5_SET(create_flow_table_in, in, other_vport, 1);
86 }
87
88 MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
89 en_encap_decap);
90 MLX5_SET(create_flow_table_in, in, flow_table_context.encap_en,
91 en_encap_decap);
92
93 switch (op_mod) {
94 case FS_FT_OP_MOD_NORMAL:
95 if (next_ft) {
96 MLX5_SET(create_flow_table_in, in,
97 flow_table_context.table_miss_action, 1);
98 MLX5_SET(create_flow_table_in, in,
99 flow_table_context.table_miss_id, next_ft->id);
100 }
101 break;
102
103 case FS_FT_OP_MOD_LAG_DEMUX:
104 MLX5_SET(create_flow_table_in, in, op_mod, 0x1);
105 if (next_ft)
106 MLX5_SET(create_flow_table_in, in,
107 flow_table_context.lag_master_next_table_id,
108 next_ft->id);
109 break;
110 }
111
112 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
113 if (!err)
114 *table_id = MLX5_GET(create_flow_table_out, out,
115 table_id);
116 return err;
117}
118
119int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
120 struct mlx5_flow_table *ft)
121{
122 u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0};
123 u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0};
124
125 MLX5_SET(destroy_flow_table_in, in, opcode,
126 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
127 MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
128 MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
129 if (ft->vport) {
130 MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
131 MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
132 }
133
134 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
135}
136
137int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
138 struct mlx5_flow_table *ft,
139 struct mlx5_flow_table *next_ft)
140{
141 u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0};
142 u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0};
143
144 MLX5_SET(modify_flow_table_in, in, opcode,
145 MLX5_CMD_OP_MODIFY_FLOW_TABLE);
146 MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
147 MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
148
149 if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) {
150 MLX5_SET(modify_flow_table_in, in, modify_field_select,
151 MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID);
152 if (next_ft) {
153 MLX5_SET(modify_flow_table_in, in,
154 flow_table_context.lag_master_next_table_id, next_ft->id);
155 } else {
156 MLX5_SET(modify_flow_table_in, in,
157 flow_table_context.lag_master_next_table_id, 0);
158 }
159 } else {
160 if (ft->vport) {
161 MLX5_SET(modify_flow_table_in, in, vport_number,
162 ft->vport);
163 MLX5_SET(modify_flow_table_in, in, other_vport, 1);
164 }
165 MLX5_SET(modify_flow_table_in, in, modify_field_select,
166 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
167 if (next_ft) {
168 MLX5_SET(modify_flow_table_in, in,
169 flow_table_context.table_miss_action, 1);
170 MLX5_SET(modify_flow_table_in, in,
171 flow_table_context.table_miss_id,
172 next_ft->id);
173 } else {
174 MLX5_SET(modify_flow_table_in, in,
175 flow_table_context.table_miss_action, 0);
176 }
177 }
178
179 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
180}
181
182int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
183 struct mlx5_flow_table *ft,
184 u32 *in,
185 unsigned int *group_id)
186{
187 u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
188 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
189 int err;
190
191 MLX5_SET(create_flow_group_in, in, opcode,
192 MLX5_CMD_OP_CREATE_FLOW_GROUP);
193 MLX5_SET(create_flow_group_in, in, table_type, ft->type);
194 MLX5_SET(create_flow_group_in, in, table_id, ft->id);
195 if (ft->vport) {
196 MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
197 MLX5_SET(create_flow_group_in, in, other_vport, 1);
198 }
199
200 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
201 if (!err)
202 *group_id = MLX5_GET(create_flow_group_out, out,
203 group_id);
204 return err;
205}
206
207int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
208 struct mlx5_flow_table *ft,
209 unsigned int group_id)
210{
211 u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0};
212 u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {0};
213
214 MLX5_SET(destroy_flow_group_in, in, opcode,
215 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
216 MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
217 MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
218 MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
219 if (ft->vport) {
220 MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
221 MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
222 }
223
224 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
225}
226
227static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
228 int opmod, int modify_mask,
229 struct mlx5_flow_table *ft,
230 unsigned group_id,
231 struct fs_fte *fte)
232{
233 unsigned int inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
234 fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct);
235 u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
236 struct mlx5_flow_rule *dst;
237 void *in_flow_context;
238 void *in_match_value;
239 void *in_dests;
240 u32 *in;
241 int err;
242
243 in = kvzalloc(inlen, GFP_KERNEL);
244 if (!in)
245 return -ENOMEM;
246
247 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
248 MLX5_SET(set_fte_in, in, op_mod, opmod);
249 MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
250 MLX5_SET(set_fte_in, in, table_type, ft->type);
251 MLX5_SET(set_fte_in, in, table_id, ft->id);
252 MLX5_SET(set_fte_in, in, flow_index, fte->index);
253 if (ft->vport) {
254 MLX5_SET(set_fte_in, in, vport_number, ft->vport);
255 MLX5_SET(set_fte_in, in, other_vport, 1);
256 }
257
258 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
259 MLX5_SET(flow_context, in_flow_context, group_id, group_id);
260 MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag);
261 MLX5_SET(flow_context, in_flow_context, action, fte->action);
262 MLX5_SET(flow_context, in_flow_context, encap_id, fte->encap_id);
263 MLX5_SET(flow_context, in_flow_context, modify_header_id, fte->modify_id);
264 in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
265 match_value);
266 memcpy(in_match_value, &fte->val, sizeof(fte->val));
267
268 in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
269 if (fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
270 int list_size = 0;
271
272 list_for_each_entry(dst, &fte->node.children, node.list) {
273 unsigned int id;
274
275 if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
276 continue;
277
278 MLX5_SET(dest_format_struct, in_dests, destination_type,
279 dst->dest_attr.type);
280 if (dst->dest_attr.type ==
281 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
282 id = dst->dest_attr.ft->id;
283 } else {
284 id = dst->dest_attr.tir_num;
285 }
286 MLX5_SET(dest_format_struct, in_dests, destination_id, id);
287 in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
288 list_size++;
289 }
290
291 MLX5_SET(flow_context, in_flow_context, destination_list_size,
292 list_size);
293 }
294
295 if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
296 int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
297 log_max_flow_counter,
298 ft->type));
299 int list_size = 0;
300
301 list_for_each_entry(dst, &fte->node.children, node.list) {
302 if (dst->dest_attr.type !=
303 MLX5_FLOW_DESTINATION_TYPE_COUNTER)
304 continue;
305
306 MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
307 dst->dest_attr.counter->id);
308 in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
309 list_size++;
310 }
311 if (list_size > max_list_size) {
312 err = -EINVAL;
313 goto err_out;
314 }
315
316 MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
317 list_size);
318 }
319
320 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
321err_out:
322 kvfree(in);
323 return err;
324}
325
326int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
327 struct mlx5_flow_table *ft,
328 unsigned group_id,
329 struct fs_fte *fte)
330{
331 return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
332}
333
334int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
335 struct mlx5_flow_table *ft,
336 unsigned group_id,
337 int modify_mask,
338 struct fs_fte *fte)
339{
340 int opmod;
341 int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
342 flow_table_properties_nic_receive.
343 flow_modify_en);
344 if (!atomic_mod_cap)
345 return -EOPNOTSUPP;
346 opmod = 1;
347
348 return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
349}
350
351int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
352 struct mlx5_flow_table *ft,
353 unsigned int index)
354{
355 u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0};
356 u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {0};
357
358 MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
359 MLX5_SET(delete_fte_in, in, table_type, ft->type);
360 MLX5_SET(delete_fte_in, in, table_id, ft->id);
361 MLX5_SET(delete_fte_in, in, flow_index, index);
362 if (ft->vport) {
363 MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
364 MLX5_SET(delete_fte_in, in, other_vport, 1);
365 }
366
367 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
368}
369
370int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
371{
372 u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0};
373 u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
374 int err;
375
376 MLX5_SET(alloc_flow_counter_in, in, opcode,
377 MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
378
379 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
380 if (!err)
381 *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
382 return err;
383}
384
385int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
386{
387 u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0};
388 u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)] = {0};
389
390 MLX5_SET(dealloc_flow_counter_in, in, opcode,
391 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
392 MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
393 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
394}
395
396int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
397 u64 *packets, u64 *bytes)
398{
399 u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
400 MLX5_ST_SZ_BYTES(traffic_counter)] = {0};
401 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
402 void *stats;
403 int err = 0;
404
405 MLX5_SET(query_flow_counter_in, in, opcode,
406 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
407 MLX5_SET(query_flow_counter_in, in, op_mod, 0);
408 MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
409 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
410 if (err)
411 return err;
412
413 stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
414 *packets = MLX5_GET64(traffic_counter, stats, packets);
415 *bytes = MLX5_GET64(traffic_counter, stats, octets);
416 return 0;
417}
418
419struct mlx5_cmd_fc_bulk {
420 u32 id;
421 int num;
422 int outlen;
423 u32 out[0];
424};
425
426struct mlx5_cmd_fc_bulk *
427mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num)
428{
429 struct mlx5_cmd_fc_bulk *b;
430 int outlen =
431 MLX5_ST_SZ_BYTES(query_flow_counter_out) +
432 MLX5_ST_SZ_BYTES(traffic_counter) * num;
433
434 b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL);
435 if (!b)
436 return NULL;
437
438 b->id = id;
439 b->num = num;
440 b->outlen = outlen;
441
442 return b;
443}
444
445void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b)
446{
447 kfree(b);
448}
449
450int
451mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b)
452{
453 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
454
455 MLX5_SET(query_flow_counter_in, in, opcode,
456 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
457 MLX5_SET(query_flow_counter_in, in, op_mod, 0);
458 MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id);
459 MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num);
460 return mlx5_cmd_exec(dev, in, sizeof(in), b->out, b->outlen);
461}
462
463void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
464 struct mlx5_cmd_fc_bulk *b, u32 id,
465 u64 *packets, u64 *bytes)
466{
467 int index = id - b->id;
468 void *stats;
469
470 if (index < 0 || index >= b->num) {
471 mlx5_core_warn(dev, "Flow counter id (0x%x) out of range (0x%x..0x%x). Counter ignored.\n",
472 id, b->id, b->id + b->num - 1);
473 return;
474 }
475
476 stats = MLX5_ADDR_OF(query_flow_counter_out, b->out,
477 flow_statistics[index]);
478 *packets = MLX5_GET64(traffic_counter, stats, packets);
479 *bytes = MLX5_GET64(traffic_counter, stats, octets);
480}
481
482int mlx5_encap_alloc(struct mlx5_core_dev *dev,
483 int header_type,
484 size_t size,
485 void *encap_header,
486 u32 *encap_id)
487{
488 int max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
489 u32 out[MLX5_ST_SZ_DW(alloc_encap_header_out)];
490 void *encap_header_in;
491 void *header;
492 int inlen;
493 int err;
494 u32 *in;
495
496 if (size > max_encap_size) {
497 mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n",
498 size, max_encap_size);
499 return -EINVAL;
500 }
501
502 in = kzalloc(MLX5_ST_SZ_BYTES(alloc_encap_header_in) + size,
503 GFP_KERNEL);
504 if (!in)
505 return -ENOMEM;
506
507 encap_header_in = MLX5_ADDR_OF(alloc_encap_header_in, in, encap_header);
508 header = MLX5_ADDR_OF(encap_header_in, encap_header_in, encap_header);
509 inlen = header - (void *)in + size;
510
511 memset(in, 0, inlen);
512 MLX5_SET(alloc_encap_header_in, in, opcode,
513 MLX5_CMD_OP_ALLOC_ENCAP_HEADER);
514 MLX5_SET(encap_header_in, encap_header_in, encap_header_size, size);
515 MLX5_SET(encap_header_in, encap_header_in, header_type, header_type);
516 memcpy(header, encap_header, size);
517
518 memset(out, 0, sizeof(out));
519 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
520
521 *encap_id = MLX5_GET(alloc_encap_header_out, out, encap_id);
522 kfree(in);
523 return err;
524}
525
526void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id)
527{
528 u32 in[MLX5_ST_SZ_DW(dealloc_encap_header_in)];
529 u32 out[MLX5_ST_SZ_DW(dealloc_encap_header_out)];
530
531 memset(in, 0, sizeof(in));
532 MLX5_SET(dealloc_encap_header_in, in, opcode,
533 MLX5_CMD_OP_DEALLOC_ENCAP_HEADER);
534 MLX5_SET(dealloc_encap_header_in, in, encap_id, encap_id);
535
536 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
537}
538
539int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
540 u8 namespace, u8 num_actions,
541 void *modify_actions, u32 *modify_header_id)
542{
543 u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)];
544 int max_actions, actions_size, inlen, err;
545 void *actions_in;
546 u8 table_type;
547 u32 *in;
548
549 switch (namespace) {
550 case MLX5_FLOW_NAMESPACE_FDB:
551 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions);
552 table_type = FS_FT_FDB;
553 break;
554 case MLX5_FLOW_NAMESPACE_KERNEL:
555 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions);
556 table_type = FS_FT_NIC_RX;
557 break;
558 default:
559 return -EOPNOTSUPP;
560 }
561
562 if (num_actions > max_actions) {
563 mlx5_core_warn(dev, "too many modify header actions %d, max supported %d\n",
564 num_actions, max_actions);
565 return -EOPNOTSUPP;
566 }
567
568 actions_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) * num_actions;
569 inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) + actions_size;
570
571 in = kzalloc(inlen, GFP_KERNEL);
572 if (!in)
573 return -ENOMEM;
574
575 MLX5_SET(alloc_modify_header_context_in, in, opcode,
576 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
577 MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
578 MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_actions);
579
580 actions_in = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
581 memcpy(actions_in, modify_actions, actions_size);
582
583 memset(out, 0, sizeof(out));
584 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
585
586 *modify_header_id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id);
587 kfree(in);
588 return err;
589}
590
591void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id)
592{
593 u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)];
594 u32 out[MLX5_ST_SZ_DW(dealloc_modify_header_context_out)];
595
596 memset(in, 0, sizeof(in));
597 MLX5_SET(dealloc_modify_header_context_in, in, opcode,
598 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
599 MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
600 modify_header_id);
601
602 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
603}
604