1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/mlx5/driver.h>
34#include <linux/mlx5/device.h>
35#include <linux/mlx5/mlx5_ifc.h>
36
37#include "fs_core.h"
38#include "fs_cmd.h"
39#include "mlx5_core.h"
40
41int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
42 struct mlx5_flow_table *ft)
43{
44 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0};
45 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
46
47 MLX5_SET(set_flow_table_root_in, in, opcode,
48 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
49 MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
50 MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
51 if (ft->vport) {
52 MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
53 MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
54 }
55
56 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
57}
58
59int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
60 u16 vport,
61 enum fs_flow_table_op_mod op_mod,
62 enum fs_flow_table_type type, unsigned int level,
63 unsigned int log_size, struct mlx5_flow_table
64 *next_ft, unsigned int *table_id)
65{
66 u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
67 u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
68 int err;
69
70 MLX5_SET(create_flow_table_in, in, opcode,
71 MLX5_CMD_OP_CREATE_FLOW_TABLE);
72
73 MLX5_SET(create_flow_table_in, in, table_type, type);
74 MLX5_SET(create_flow_table_in, in, level, level);
75 MLX5_SET(create_flow_table_in, in, log_size, log_size);
76 if (vport) {
77 MLX5_SET(create_flow_table_in, in, vport_number, vport);
78 MLX5_SET(create_flow_table_in, in, other_vport, 1);
79 }
80
81 switch (op_mod) {
82 case FS_FT_OP_MOD_NORMAL:
83 if (next_ft) {
84 MLX5_SET(create_flow_table_in, in, table_miss_mode, 1);
85 MLX5_SET(create_flow_table_in, in, table_miss_id, next_ft->id);
86 }
87 break;
88
89 case FS_FT_OP_MOD_LAG_DEMUX:
90 MLX5_SET(create_flow_table_in, in, op_mod, 0x1);
91 if (next_ft)
92 MLX5_SET(create_flow_table_in, in, lag_master_next_table_id,
93 next_ft->id);
94 break;
95 }
96
97 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
98 if (!err)
99 *table_id = MLX5_GET(create_flow_table_out, out,
100 table_id);
101 return err;
102}
103
104int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
105 struct mlx5_flow_table *ft)
106{
107 u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0};
108 u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0};
109
110 MLX5_SET(destroy_flow_table_in, in, opcode,
111 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
112 MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
113 MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
114 if (ft->vport) {
115 MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
116 MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
117 }
118
119 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
120}
121
122int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
123 struct mlx5_flow_table *ft,
124 struct mlx5_flow_table *next_ft)
125{
126 u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0};
127 u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0};
128
129 MLX5_SET(modify_flow_table_in, in, opcode,
130 MLX5_CMD_OP_MODIFY_FLOW_TABLE);
131 MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
132 MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
133
134 if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) {
135 MLX5_SET(modify_flow_table_in, in, modify_field_select,
136 MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID);
137 if (next_ft) {
138 MLX5_SET(modify_flow_table_in, in,
139 lag_master_next_table_id, next_ft->id);
140 } else {
141 MLX5_SET(modify_flow_table_in, in,
142 lag_master_next_table_id, 0);
143 }
144 } else {
145 if (ft->vport) {
146 MLX5_SET(modify_flow_table_in, in, vport_number,
147 ft->vport);
148 MLX5_SET(modify_flow_table_in, in, other_vport, 1);
149 }
150 MLX5_SET(modify_flow_table_in, in, modify_field_select,
151 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
152 if (next_ft) {
153 MLX5_SET(modify_flow_table_in, in, table_miss_mode, 1);
154 MLX5_SET(modify_flow_table_in, in, table_miss_id,
155 next_ft->id);
156 } else {
157 MLX5_SET(modify_flow_table_in, in, table_miss_mode, 0);
158 }
159 }
160
161 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
162}
163
164int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
165 struct mlx5_flow_table *ft,
166 u32 *in,
167 unsigned int *group_id)
168{
169 u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
170 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
171 int err;
172
173 MLX5_SET(create_flow_group_in, in, opcode,
174 MLX5_CMD_OP_CREATE_FLOW_GROUP);
175 MLX5_SET(create_flow_group_in, in, table_type, ft->type);
176 MLX5_SET(create_flow_group_in, in, table_id, ft->id);
177 if (ft->vport) {
178 MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
179 MLX5_SET(create_flow_group_in, in, other_vport, 1);
180 }
181
182 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
183 if (!err)
184 *group_id = MLX5_GET(create_flow_group_out, out,
185 group_id);
186 return err;
187}
188
189int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
190 struct mlx5_flow_table *ft,
191 unsigned int group_id)
192{
193 u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0};
194 u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {0};
195
196 MLX5_SET(destroy_flow_group_in, in, opcode,
197 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
198 MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
199 MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
200 MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
201 if (ft->vport) {
202 MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
203 MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
204 }
205
206 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
207}
208
209static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
210 int opmod, int modify_mask,
211 struct mlx5_flow_table *ft,
212 unsigned group_id,
213 struct fs_fte *fte)
214{
215 unsigned int inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
216 fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct);
217 u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
218 struct mlx5_flow_rule *dst;
219 void *in_flow_context;
220 void *in_match_value;
221 void *in_dests;
222 u32 *in;
223 int err;
224
225 in = mlx5_vzalloc(inlen);
226 if (!in) {
227 mlx5_core_warn(dev, "failed to allocate inbox\n");
228 return -ENOMEM;
229 }
230
231 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
232 MLX5_SET(set_fte_in, in, op_mod, opmod);
233 MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
234 MLX5_SET(set_fte_in, in, table_type, ft->type);
235 MLX5_SET(set_fte_in, in, table_id, ft->id);
236 MLX5_SET(set_fte_in, in, flow_index, fte->index);
237 if (ft->vport) {
238 MLX5_SET(set_fte_in, in, vport_number, ft->vport);
239 MLX5_SET(set_fte_in, in, other_vport, 1);
240 }
241
242 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
243 MLX5_SET(flow_context, in_flow_context, group_id, group_id);
244 MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag);
245 MLX5_SET(flow_context, in_flow_context, action, fte->action);
246 in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
247 match_value);
248 memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param));
249
250 in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
251 if (fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
252 int list_size = 0;
253
254 list_for_each_entry(dst, &fte->node.children, node.list) {
255 unsigned int id;
256
257 if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
258 continue;
259
260 MLX5_SET(dest_format_struct, in_dests, destination_type,
261 dst->dest_attr.type);
262 if (dst->dest_attr.type ==
263 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
264 id = dst->dest_attr.ft->id;
265 } else {
266 id = dst->dest_attr.tir_num;
267 }
268 MLX5_SET(dest_format_struct, in_dests, destination_id, id);
269 in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
270 list_size++;
271 }
272
273 MLX5_SET(flow_context, in_flow_context, destination_list_size,
274 list_size);
275 }
276
277 if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
278 int list_size = 0;
279
280 list_for_each_entry(dst, &fte->node.children, node.list) {
281 if (dst->dest_attr.type !=
282 MLX5_FLOW_DESTINATION_TYPE_COUNTER)
283 continue;
284
285 MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
286 dst->dest_attr.counter->id);
287 in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
288 list_size++;
289 }
290
291 MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
292 list_size);
293 }
294
295 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
296 kvfree(in);
297 return err;
298}
299
300int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
301 struct mlx5_flow_table *ft,
302 unsigned group_id,
303 struct fs_fte *fte)
304{
305 return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
306}
307
308int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
309 struct mlx5_flow_table *ft,
310 unsigned group_id,
311 int modify_mask,
312 struct fs_fte *fte)
313{
314 int opmod;
315 int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
316 flow_table_properties_nic_receive.
317 flow_modify_en);
318 if (!atomic_mod_cap)
319 return -ENOTSUPP;
320 opmod = 1;
321
322 return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
323}
324
325int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
326 struct mlx5_flow_table *ft,
327 unsigned int index)
328{
329 u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0};
330 u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {0};
331
332 MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
333 MLX5_SET(delete_fte_in, in, table_type, ft->type);
334 MLX5_SET(delete_fte_in, in, table_id, ft->id);
335 MLX5_SET(delete_fte_in, in, flow_index, index);
336 if (ft->vport) {
337 MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
338 MLX5_SET(delete_fte_in, in, other_vport, 1);
339 }
340
341 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
342}
343
344int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id)
345{
346 u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0};
347 u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
348 int err;
349
350 MLX5_SET(alloc_flow_counter_in, in, opcode,
351 MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
352
353 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
354 if (!err)
355 *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
356 return err;
357}
358
359int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id)
360{
361 u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0};
362 u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)] = {0};
363
364 MLX5_SET(dealloc_flow_counter_in, in, opcode,
365 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
366 MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
367 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
368}
369
370int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id,
371 u64 *packets, u64 *bytes)
372{
373 u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
374 MLX5_ST_SZ_BYTES(traffic_counter)] = {0};
375 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
376 void *stats;
377 int err = 0;
378
379 MLX5_SET(query_flow_counter_in, in, opcode,
380 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
381 MLX5_SET(query_flow_counter_in, in, op_mod, 0);
382 MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
383 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
384 if (err)
385 return err;
386
387 stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
388 *packets = MLX5_GET64(traffic_counter, stats, packets);
389 *bytes = MLX5_GET64(traffic_counter, stats, octets);
390 return 0;
391}
392
393struct mlx5_cmd_fc_bulk {
394 u16 id;
395 int num;
396 int outlen;
397 u32 out[0];
398};
399
400struct mlx5_cmd_fc_bulk *
401mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num)
402{
403 struct mlx5_cmd_fc_bulk *b;
404 int outlen =
405 MLX5_ST_SZ_BYTES(query_flow_counter_out) +
406 MLX5_ST_SZ_BYTES(traffic_counter) * num;
407
408 b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL);
409 if (!b)
410 return NULL;
411
412 b->id = id;
413 b->num = num;
414 b->outlen = outlen;
415
416 return b;
417}
418
419void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b)
420{
421 kfree(b);
422}
423
424int
425mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b)
426{
427 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
428
429 MLX5_SET(query_flow_counter_in, in, opcode,
430 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
431 MLX5_SET(query_flow_counter_in, in, op_mod, 0);
432 MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id);
433 MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num);
434 return mlx5_cmd_exec(dev, in, sizeof(in), b->out, b->outlen);
435}
436
437void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
438 struct mlx5_cmd_fc_bulk *b, u16 id,
439 u64 *packets, u64 *bytes)
440{
441 int index = id - b->id;
442 void *stats;
443
444 if (index < 0 || index >= b->num) {
445 mlx5_core_warn(dev, "Flow counter id (0x%x) out of range (0x%x..0x%x). Counter ignored.\n",
446 id, b->id, b->id + b->num - 1);
447 return;
448 }
449
450 stats = MLX5_ADDR_OF(query_flow_counter_out, b->out,
451 flow_statistics[index]);
452 *packets = MLX5_GET64(traffic_counter, stats, packets);
453 *bytes = MLX5_GET64(traffic_counter, stats, octets);
454}
455
456#define MAX_ENCAP_SIZE (128)
457
458int mlx5_cmd_alloc_encap(struct mlx5_core_dev *dev,
459 int header_type,
460 size_t size,
461 void *encap_header,
462 u32 *encap_id)
463{
464 u32 out[MLX5_ST_SZ_DW(alloc_encap_header_out)];
465 u32 in[MLX5_ST_SZ_DW(alloc_encap_header_in) +
466 (MAX_ENCAP_SIZE / sizeof(u32))];
467 void *encap_header_in = MLX5_ADDR_OF(alloc_encap_header_in, in,
468 encap_header);
469 void *header = MLX5_ADDR_OF(encap_header_in, encap_header_in,
470 encap_header);
471 int inlen = header - (void *)in + size;
472 int err;
473
474 if (size > MAX_ENCAP_SIZE)
475 return -EINVAL;
476
477 memset(in, 0, inlen);
478 MLX5_SET(alloc_encap_header_in, in, opcode,
479 MLX5_CMD_OP_ALLOC_ENCAP_HEADER);
480 MLX5_SET(encap_header_in, encap_header_in, encap_header_size, size);
481 MLX5_SET(encap_header_in, encap_header_in, header_type, header_type);
482 memcpy(header, encap_header, size);
483
484 memset(out, 0, sizeof(out));
485 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
486
487 *encap_id = MLX5_GET(alloc_encap_header_out, out, encap_id);
488 return err;
489}
490
491void mlx5_cmd_dealloc_encap(struct mlx5_core_dev *dev, u32 encap_id)
492{
493 u32 in[MLX5_ST_SZ_DW(dealloc_encap_header_in)];
494 u32 out[MLX5_ST_SZ_DW(dealloc_encap_header_out)];
495
496 memset(in, 0, sizeof(in));
497 MLX5_SET(dealloc_encap_header_in, in, opcode,
498 MLX5_CMD_OP_DEALLOC_ENCAP_HEADER);
499 MLX5_SET(dealloc_encap_header_in, in, encap_id, encap_id);
500
501 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
502}
503