1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/export.h>
34#include <linux/etherdevice.h>
35#include <linux/mlx5/driver.h>
36#include <linux/mlx5/vport.h>
37#include "mlx5_core.h"
38
39
40static DEFINE_MUTEX(mlx5_roce_en_lock);
41
42static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
43 u16 vport, u32 *out, int outlen)
44{
45 u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {0};
46
47 MLX5_SET(query_vport_state_in, in, opcode,
48 MLX5_CMD_OP_QUERY_VPORT_STATE);
49 MLX5_SET(query_vport_state_in, in, op_mod, opmod);
50 MLX5_SET(query_vport_state_in, in, vport_number, vport);
51 if (vport)
52 MLX5_SET(query_vport_state_in, in, other_vport, 1);
53
54 return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
55}
56
57u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
58{
59 u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
60
61 _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
62
63 return MLX5_GET(query_vport_state_out, out, state);
64}
65
66int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
67 u16 vport, u8 other_vport, u8 state)
68{
69 u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {0};
70 u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)] = {0};
71
72 MLX5_SET(modify_vport_state_in, in, opcode,
73 MLX5_CMD_OP_MODIFY_VPORT_STATE);
74 MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
75 MLX5_SET(modify_vport_state_in, in, vport_number, vport);
76 MLX5_SET(modify_vport_state_in, in, other_vport, other_vport);
77 MLX5_SET(modify_vport_state_in, in, admin_state, state);
78
79 return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
80}
81
82static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
83 u32 *out, int outlen)
84{
85 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
86
87 MLX5_SET(query_nic_vport_context_in, in, opcode,
88 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
89 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
90 if (vport)
91 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
92
93 return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
94}
95
96static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
97 int inlen)
98{
99 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
100
101 MLX5_SET(modify_nic_vport_context_in, in, opcode,
102 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
103 return mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
104}
105
106int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
107 u16 vport, u8 *min_inline)
108{
109 u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};
110 int err;
111
112 err = mlx5_query_nic_vport_context(mdev, vport, out, sizeof(out));
113 if (!err)
114 *min_inline = MLX5_GET(query_nic_vport_context_out, out,
115 nic_vport_context.min_wqe_inline_mode);
116 return err;
117}
118EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
119
120void mlx5_query_min_inline(struct mlx5_core_dev *mdev,
121 u8 *min_inline_mode)
122{
123 switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
124 case MLX5_CAP_INLINE_MODE_L2:
125 *min_inline_mode = MLX5_INLINE_MODE_L2;
126 break;
127 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
128 mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode);
129 break;
130 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
131 *min_inline_mode = MLX5_INLINE_MODE_NONE;
132 break;
133 }
134}
135EXPORT_SYMBOL_GPL(mlx5_query_min_inline);
136
137int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
138 u16 vport, u8 min_inline)
139{
140 u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
141 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
142 void *nic_vport_ctx;
143
144 MLX5_SET(modify_nic_vport_context_in, in,
145 field_select.min_inline, 1);
146 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
147 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
148
149 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
150 in, nic_vport_context);
151 MLX5_SET(nic_vport_context, nic_vport_ctx,
152 min_wqe_inline_mode, min_inline);
153
154 return mlx5_modify_nic_vport_context(mdev, in, inlen);
155}
156
157int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
158 u16 vport, u8 *addr)
159{
160 u32 *out;
161 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
162 u8 *out_addr;
163 int err;
164
165 out = kvzalloc(outlen, GFP_KERNEL);
166 if (!out)
167 return -ENOMEM;
168
169 out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
170 nic_vport_context.permanent_address);
171
172 err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
173 if (!err)
174 ether_addr_copy(addr, &out_addr[2]);
175
176 kvfree(out);
177 return err;
178}
179EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
180
181int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
182 u16 vport, u8 *addr)
183{
184 void *in;
185 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
186 int err;
187 void *nic_vport_ctx;
188 u8 *perm_mac;
189
190 in = kvzalloc(inlen, GFP_KERNEL);
191 if (!in)
192 return -ENOMEM;
193
194 MLX5_SET(modify_nic_vport_context_in, in,
195 field_select.permanent_address, 1);
196 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
197
198 if (vport)
199 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
200
201 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
202 in, nic_vport_context);
203 perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
204 permanent_address);
205
206 ether_addr_copy(&perm_mac[2], addr);
207
208 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
209
210 kvfree(in);
211
212 return err;
213}
214EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
215
216int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
217{
218 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
219 u32 *out;
220 int err;
221
222 out = kvzalloc(outlen, GFP_KERNEL);
223 if (!out)
224 return -ENOMEM;
225
226 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
227 if (!err)
228 *mtu = MLX5_GET(query_nic_vport_context_out, out,
229 nic_vport_context.mtu);
230
231 kvfree(out);
232 return err;
233}
234EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu);
235
236int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
237{
238 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
239 void *in;
240 int err;
241
242 in = kvzalloc(inlen, GFP_KERNEL);
243 if (!in)
244 return -ENOMEM;
245
246 MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
247 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
248
249 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
250
251 kvfree(in);
252 return err;
253}
254EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
255
256int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
257 u16 vport,
258 enum mlx5_list_type list_type,
259 u8 addr_list[][ETH_ALEN],
260 int *list_size)
261{
262 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
263 void *nic_vport_ctx;
264 int max_list_size;
265 int req_list_size;
266 int out_sz;
267 void *out;
268 int err;
269 int i;
270
271 req_list_size = *list_size;
272
273 max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
274 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
275 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
276
277 if (req_list_size > max_list_size) {
278 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
279 req_list_size, max_list_size);
280 req_list_size = max_list_size;
281 }
282
283 out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
284 req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
285
286 out = kzalloc(out_sz, GFP_KERNEL);
287 if (!out)
288 return -ENOMEM;
289
290 MLX5_SET(query_nic_vport_context_in, in, opcode,
291 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
292 MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
293 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
294
295 if (vport)
296 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
297
298 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
299 if (err)
300 goto out;
301
302 nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
303 nic_vport_context);
304 req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
305 allowed_list_size);
306
307 *list_size = req_list_size;
308 for (i = 0; i < req_list_size; i++) {
309 u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context,
310 nic_vport_ctx,
311 current_uc_mac_address[i]) + 2;
312 ether_addr_copy(addr_list[i], mac_addr);
313 }
314out:
315 kfree(out);
316 return err;
317}
318EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
319
320int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
321 enum mlx5_list_type list_type,
322 u8 addr_list[][ETH_ALEN],
323 int list_size)
324{
325 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
326 void *nic_vport_ctx;
327 int max_list_size;
328 int in_sz;
329 void *in;
330 int err;
331 int i;
332
333 max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
334 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
335 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
336
337 if (list_size > max_list_size)
338 return -ENOSPC;
339
340 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
341 list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
342
343 memset(out, 0, sizeof(out));
344 in = kzalloc(in_sz, GFP_KERNEL);
345 if (!in)
346 return -ENOMEM;
347
348 MLX5_SET(modify_nic_vport_context_in, in, opcode,
349 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
350 MLX5_SET(modify_nic_vport_context_in, in,
351 field_select.addresses_list, 1);
352
353 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
354 nic_vport_context);
355
356 MLX5_SET(nic_vport_context, nic_vport_ctx,
357 allowed_list_type, list_type);
358 MLX5_SET(nic_vport_context, nic_vport_ctx,
359 allowed_list_size, list_size);
360
361 for (i = 0; i < list_size; i++) {
362 u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
363 nic_vport_ctx,
364 current_uc_mac_address[i]) + 2;
365 ether_addr_copy(curr_mac, addr_list[i]);
366 }
367
368 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
369 kfree(in);
370 return err;
371}
372EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
373
374int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
375 u16 vport,
376 u16 vlans[],
377 int *size)
378{
379 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
380 void *nic_vport_ctx;
381 int req_list_size;
382 int max_list_size;
383 int out_sz;
384 void *out;
385 int err;
386 int i;
387
388 req_list_size = *size;
389 max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
390 if (req_list_size > max_list_size) {
391 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n",
392 req_list_size, max_list_size);
393 req_list_size = max_list_size;
394 }
395
396 out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
397 req_list_size * MLX5_ST_SZ_BYTES(vlan_layout);
398
399 memset(in, 0, sizeof(in));
400 out = kzalloc(out_sz, GFP_KERNEL);
401 if (!out)
402 return -ENOMEM;
403
404 MLX5_SET(query_nic_vport_context_in, in, opcode,
405 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
406 MLX5_SET(query_nic_vport_context_in, in, allowed_list_type,
407 MLX5_NVPRT_LIST_TYPE_VLAN);
408 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
409
410 if (vport)
411 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
412
413 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
414 if (err)
415 goto out;
416
417 nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
418 nic_vport_context);
419 req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
420 allowed_list_size);
421
422 *size = req_list_size;
423 for (i = 0; i < req_list_size; i++) {
424 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
425 nic_vport_ctx,
426 current_uc_mac_address[i]);
427 vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan);
428 }
429out:
430 kfree(out);
431 return err;
432}
433EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans);
434
435int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
436 u16 vlans[],
437 int list_size)
438{
439 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
440 void *nic_vport_ctx;
441 int max_list_size;
442 int in_sz;
443 void *in;
444 int err;
445 int i;
446
447 max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
448
449 if (list_size > max_list_size)
450 return -ENOSPC;
451
452 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
453 list_size * MLX5_ST_SZ_BYTES(vlan_layout);
454
455 memset(out, 0, sizeof(out));
456 in = kzalloc(in_sz, GFP_KERNEL);
457 if (!in)
458 return -ENOMEM;
459
460 MLX5_SET(modify_nic_vport_context_in, in, opcode,
461 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
462 MLX5_SET(modify_nic_vport_context_in, in,
463 field_select.addresses_list, 1);
464
465 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
466 nic_vport_context);
467
468 MLX5_SET(nic_vport_context, nic_vport_ctx,
469 allowed_list_type, MLX5_NVPRT_LIST_TYPE_VLAN);
470 MLX5_SET(nic_vport_context, nic_vport_ctx,
471 allowed_list_size, list_size);
472
473 for (i = 0; i < list_size; i++) {
474 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
475 nic_vport_ctx,
476 current_uc_mac_address[i]);
477 MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
478 }
479
480 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
481 kfree(in);
482 return err;
483}
484EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
485
486int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
487 u64 *system_image_guid)
488{
489 u32 *out;
490 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
491
492 out = kvzalloc(outlen, GFP_KERNEL);
493 if (!out)
494 return -ENOMEM;
495
496 mlx5_query_nic_vport_context(mdev, 0, out, outlen);
497
498 *system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
499 nic_vport_context.system_image_guid);
500
501 kvfree(out);
502
503 return 0;
504}
505EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
506
507int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
508{
509 u32 *out;
510 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
511
512 out = kvzalloc(outlen, GFP_KERNEL);
513 if (!out)
514 return -ENOMEM;
515
516 mlx5_query_nic_vport_context(mdev, 0, out, outlen);
517
518 *node_guid = MLX5_GET64(query_nic_vport_context_out, out,
519 nic_vport_context.node_guid);
520
521 kvfree(out);
522
523 return 0;
524}
525EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
526
527int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
528 u16 vport, u64 node_guid)
529{
530 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
531 void *nic_vport_context;
532 void *in;
533 int err;
534
535 if (!vport)
536 return -EINVAL;
537 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
538 return -EACCES;
539
540 in = kvzalloc(inlen, GFP_KERNEL);
541 if (!in)
542 return -ENOMEM;
543
544 MLX5_SET(modify_nic_vport_context_in, in,
545 field_select.node_guid, 1);
546 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
547 MLX5_SET(modify_nic_vport_context_in, in, other_vport, !!vport);
548
549 nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
550 in, nic_vport_context);
551 MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
552
553 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
554
555 kvfree(in);
556
557 return err;
558}
559
560int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
561 u16 *qkey_viol_cntr)
562{
563 u32 *out;
564 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
565
566 out = kvzalloc(outlen, GFP_KERNEL);
567 if (!out)
568 return -ENOMEM;
569
570 mlx5_query_nic_vport_context(mdev, 0, out, outlen);
571
572 *qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
573 nic_vport_context.qkey_violation_counter);
574
575 kvfree(out);
576
577 return 0;
578}
579EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
580
581int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
582 u8 port_num, u16 vf_num, u16 gid_index,
583 union ib_gid *gid)
584{
585 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
586 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
587 int is_group_manager;
588 void *out = NULL;
589 void *in = NULL;
590 union ib_gid *tmp;
591 int tbsz;
592 int nout;
593 int err;
594
595 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
596 tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
597 mlx5_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n",
598 vf_num, gid_index, tbsz);
599
600 if (gid_index > tbsz && gid_index != 0xffff)
601 return -EINVAL;
602
603 if (gid_index == 0xffff)
604 nout = tbsz;
605 else
606 nout = 1;
607
608 out_sz += nout * sizeof(*gid);
609
610 in = kzalloc(in_sz, GFP_KERNEL);
611 out = kzalloc(out_sz, GFP_KERNEL);
612 if (!in || !out) {
613 err = -ENOMEM;
614 goto out;
615 }
616
617 MLX5_SET(query_hca_vport_gid_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
618 if (other_vport) {
619 if (is_group_manager) {
620 MLX5_SET(query_hca_vport_gid_in, in, vport_number, vf_num);
621 MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
622 } else {
623 err = -EPERM;
624 goto out;
625 }
626 }
627 MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
628
629 if (MLX5_CAP_GEN(dev, num_ports) == 2)
630 MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
631
632 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
633 if (err)
634 goto out;
635
636 tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
637 gid->global.subnet_prefix = tmp->global.subnet_prefix;
638 gid->global.interface_id = tmp->global.interface_id;
639
640out:
641 kfree(in);
642 kfree(out);
643 return err;
644}
645EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
646
647int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
648 u8 port_num, u16 vf_num, u16 pkey_index,
649 u16 *pkey)
650{
651 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
652 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
653 int is_group_manager;
654 void *out = NULL;
655 void *in = NULL;
656 void *pkarr;
657 int nout;
658 int tbsz;
659 int err;
660 int i;
661
662 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
663
664 tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
665 if (pkey_index > tbsz && pkey_index != 0xffff)
666 return -EINVAL;
667
668 if (pkey_index == 0xffff)
669 nout = tbsz;
670 else
671 nout = 1;
672
673 out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
674
675 in = kzalloc(in_sz, GFP_KERNEL);
676 out = kzalloc(out_sz, GFP_KERNEL);
677 if (!in || !out) {
678 err = -ENOMEM;
679 goto out;
680 }
681
682 MLX5_SET(query_hca_vport_pkey_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
683 if (other_vport) {
684 if (is_group_manager) {
685 MLX5_SET(query_hca_vport_pkey_in, in, vport_number, vf_num);
686 MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
687 } else {
688 err = -EPERM;
689 goto out;
690 }
691 }
692 MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
693
694 if (MLX5_CAP_GEN(dev, num_ports) == 2)
695 MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
696
697 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
698 if (err)
699 goto out;
700
701 pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
702 for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey))
703 *pkey = MLX5_GET_PR(pkey, pkarr, pkey);
704
705out:
706 kfree(in);
707 kfree(out);
708 return err;
709}
710EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
711
712int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
713 u8 other_vport, u8 port_num,
714 u16 vf_num,
715 struct mlx5_hca_vport_context *rep)
716{
717 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
718 int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {0};
719 int is_group_manager;
720 void *out;
721 void *ctx;
722 int err;
723
724 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
725
726 out = kzalloc(out_sz, GFP_KERNEL);
727 if (!out)
728 return -ENOMEM;
729
730 MLX5_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
731
732 if (other_vport) {
733 if (is_group_manager) {
734 MLX5_SET(query_hca_vport_context_in, in, other_vport, 1);
735 MLX5_SET(query_hca_vport_context_in, in, vport_number, vf_num);
736 } else {
737 err = -EPERM;
738 goto ex;
739 }
740 }
741
742 if (MLX5_CAP_GEN(dev, num_ports) == 2)
743 MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
744
745 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
746 if (err)
747 goto ex;
748
749 ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context);
750 rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select);
751 rep->sm_virt_aware = MLX5_GET_PR(hca_vport_context, ctx, sm_virt_aware);
752 rep->has_smi = MLX5_GET_PR(hca_vport_context, ctx, has_smi);
753 rep->has_raw = MLX5_GET_PR(hca_vport_context, ctx, has_raw);
754 rep->policy = MLX5_GET_PR(hca_vport_context, ctx, vport_state_policy);
755 rep->phys_state = MLX5_GET_PR(hca_vport_context, ctx,
756 port_physical_state);
757 rep->vport_state = MLX5_GET_PR(hca_vport_context, ctx, vport_state);
758 rep->port_physical_state = MLX5_GET_PR(hca_vport_context, ctx,
759 port_physical_state);
760 rep->port_guid = MLX5_GET64_PR(hca_vport_context, ctx, port_guid);
761 rep->node_guid = MLX5_GET64_PR(hca_vport_context, ctx, node_guid);
762 rep->cap_mask1 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask1);
763 rep->cap_mask1_perm = MLX5_GET_PR(hca_vport_context, ctx,
764 cap_mask1_field_select);
765 rep->cap_mask2 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask2);
766 rep->cap_mask2_perm = MLX5_GET_PR(hca_vport_context, ctx,
767 cap_mask2_field_select);
768 rep->lid = MLX5_GET_PR(hca_vport_context, ctx, lid);
769 rep->init_type_reply = MLX5_GET_PR(hca_vport_context, ctx,
770 init_type_reply);
771 rep->lmc = MLX5_GET_PR(hca_vport_context, ctx, lmc);
772 rep->subnet_timeout = MLX5_GET_PR(hca_vport_context, ctx,
773 subnet_timeout);
774 rep->sm_lid = MLX5_GET_PR(hca_vport_context, ctx, sm_lid);
775 rep->sm_sl = MLX5_GET_PR(hca_vport_context, ctx, sm_sl);
776 rep->qkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
777 qkey_violation_counter);
778 rep->pkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
779 pkey_violation_counter);
780 rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required);
781 rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx,
782 system_image_guid);
783
784ex:
785 kfree(out);
786 return err;
787}
788EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context);
789
790int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
791 u64 *sys_image_guid)
792{
793 struct mlx5_hca_vport_context *rep;
794 int err;
795
796 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
797 if (!rep)
798 return -ENOMEM;
799
800 err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
801 if (!err)
802 *sys_image_guid = rep->sys_image_guid;
803
804 kfree(rep);
805 return err;
806}
807EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
808
809int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
810 u64 *node_guid)
811{
812 struct mlx5_hca_vport_context *rep;
813 int err;
814
815 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
816 if (!rep)
817 return -ENOMEM;
818
819 err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
820 if (!err)
821 *node_guid = rep->node_guid;
822
823 kfree(rep);
824 return err;
825}
826EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
827
828int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
829 u16 vport,
830 int *promisc_uc,
831 int *promisc_mc,
832 int *promisc_all)
833{
834 u32 *out;
835 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
836 int err;
837
838 out = kzalloc(outlen, GFP_KERNEL);
839 if (!out)
840 return -ENOMEM;
841
842 err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
843 if (err)
844 goto out;
845
846 *promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
847 nic_vport_context.promisc_uc);
848 *promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
849 nic_vport_context.promisc_mc);
850 *promisc_all = MLX5_GET(query_nic_vport_context_out, out,
851 nic_vport_context.promisc_all);
852
853out:
854 kfree(out);
855 return err;
856}
857EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
858
859int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
860 int promisc_uc,
861 int promisc_mc,
862 int promisc_all)
863{
864 void *in;
865 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
866 int err;
867
868 in = kvzalloc(inlen, GFP_KERNEL);
869 if (!in)
870 return -ENOMEM;
871
872 MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
873 MLX5_SET(modify_nic_vport_context_in, in,
874 nic_vport_context.promisc_uc, promisc_uc);
875 MLX5_SET(modify_nic_vport_context_in, in,
876 nic_vport_context.promisc_mc, promisc_mc);
877 MLX5_SET(modify_nic_vport_context_in, in,
878 nic_vport_context.promisc_all, promisc_all);
879
880 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
881
882 kvfree(in);
883
884 return err;
885}
886EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
887
888enum {
889 UC_LOCAL_LB,
890 MC_LOCAL_LB
891};
892
893int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable)
894{
895 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
896 void *in;
897 int err;
898
899 if (!MLX5_CAP_GEN(mdev, disable_local_lb_mc) &&
900 !MLX5_CAP_GEN(mdev, disable_local_lb_uc))
901 return 0;
902
903 in = kvzalloc(inlen, GFP_KERNEL);
904 if (!in)
905 return -ENOMEM;
906
907 MLX5_SET(modify_nic_vport_context_in, in,
908 nic_vport_context.disable_mc_local_lb, !enable);
909 MLX5_SET(modify_nic_vport_context_in, in,
910 nic_vport_context.disable_uc_local_lb, !enable);
911
912 if (MLX5_CAP_GEN(mdev, disable_local_lb_mc))
913 MLX5_SET(modify_nic_vport_context_in, in,
914 field_select.disable_mc_local_lb, 1);
915
916 if (MLX5_CAP_GEN(mdev, disable_local_lb_uc))
917 MLX5_SET(modify_nic_vport_context_in, in,
918 field_select.disable_uc_local_lb, 1);
919
920 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
921
922 if (!err)
923 mlx5_core_dbg(mdev, "%s local_lb\n",
924 enable ? "enable" : "disable");
925
926 kvfree(in);
927 return err;
928}
929EXPORT_SYMBOL_GPL(mlx5_nic_vport_update_local_lb);
930
931int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status)
932{
933 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
934 u32 *out;
935 int value;
936 int err;
937
938 out = kzalloc(outlen, GFP_KERNEL);
939 if (!out)
940 return -ENOMEM;
941
942 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
943 if (err)
944 goto out;
945
946 value = MLX5_GET(query_nic_vport_context_out, out,
947 nic_vport_context.disable_mc_local_lb) << MC_LOCAL_LB;
948
949 value |= MLX5_GET(query_nic_vport_context_out, out,
950 nic_vport_context.disable_uc_local_lb) << UC_LOCAL_LB;
951
952 *status = !value;
953
954out:
955 kfree(out);
956 return err;
957}
958EXPORT_SYMBOL_GPL(mlx5_nic_vport_query_local_lb);
959
960enum mlx5_vport_roce_state {
961 MLX5_VPORT_ROCE_DISABLED = 0,
962 MLX5_VPORT_ROCE_ENABLED = 1,
963};
964
965static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
966 enum mlx5_vport_roce_state state)
967{
968 void *in;
969 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
970 int err;
971
972 in = kvzalloc(inlen, GFP_KERNEL);
973 if (!in)
974 return -ENOMEM;
975
976 MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
977 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
978 state);
979
980 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
981
982 kvfree(in);
983
984 return err;
985}
986
987int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
988{
989 int err = 0;
990
991 mutex_lock(&mlx5_roce_en_lock);
992 if (!mdev->roce.roce_en)
993 err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
994
995 if (!err)
996 mdev->roce.roce_en++;
997 mutex_unlock(&mlx5_roce_en_lock);
998
999 return err;
1000}
1001EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
1002
1003int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
1004{
1005 int err = 0;
1006
1007 mutex_lock(&mlx5_roce_en_lock);
1008 if (mdev->roce.roce_en) {
1009 mdev->roce.roce_en--;
1010 if (mdev->roce.roce_en == 0)
1011 err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
1012
1013 if (err)
1014 mdev->roce.roce_en++;
1015 }
1016 mutex_unlock(&mlx5_roce_en_lock);
1017 return err;
1018}
1019EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
1020
1021int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
1022 int vf, u8 port_num, void *out,
1023 size_t out_sz)
1024{
1025 int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
1026 int is_group_manager;
1027 void *in;
1028 int err;
1029
1030 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1031 in = kvzalloc(in_sz, GFP_KERNEL);
1032 if (!in) {
1033 err = -ENOMEM;
1034 return err;
1035 }
1036
1037 MLX5_SET(query_vport_counter_in, in, opcode,
1038 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1039 if (other_vport) {
1040 if (is_group_manager) {
1041 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
1042 MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1);
1043 } else {
1044 err = -EPERM;
1045 goto free;
1046 }
1047 }
1048 if (MLX5_CAP_GEN(dev, num_ports) == 2)
1049 MLX5_SET(query_vport_counter_in, in, port_num, port_num);
1050
1051 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
1052free:
1053 kvfree(in);
1054 return err;
1055}
1056EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
1057
1058int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport,
1059 u8 other_vport, u64 *rx_discard_vport_down,
1060 u64 *tx_discard_vport_down)
1061{
1062 u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {0};
1063 u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {0};
1064 int err;
1065
1066 MLX5_SET(query_vnic_env_in, in, opcode,
1067 MLX5_CMD_OP_QUERY_VNIC_ENV);
1068 MLX5_SET(query_vnic_env_in, in, op_mod, 0);
1069 MLX5_SET(query_vnic_env_in, in, vport_number, vport);
1070 MLX5_SET(query_vnic_env_in, in, other_vport, other_vport);
1071
1072 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
1073 if (err)
1074 return err;
1075
1076 *rx_discard_vport_down = MLX5_GET64(query_vnic_env_out, out,
1077 vport_env.receive_discard_vport_down);
1078 *tx_discard_vport_down = MLX5_GET64(query_vnic_env_out, out,
1079 vport_env.transmit_discard_vport_down);
1080 return 0;
1081}
1082
1083int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
1084 u8 other_vport, u8 port_num,
1085 int vf,
1086 struct mlx5_hca_vport_context *req)
1087{
1088 int in_sz = MLX5_ST_SZ_BYTES(modify_hca_vport_context_in);
1089 u8 out[MLX5_ST_SZ_BYTES(modify_hca_vport_context_out)];
1090 int is_group_manager;
1091 void *in;
1092 int err;
1093 void *ctx;
1094
1095 mlx5_core_dbg(dev, "vf %d\n", vf);
1096 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1097 in = kzalloc(in_sz, GFP_KERNEL);
1098 if (!in)
1099 return -ENOMEM;
1100
1101 memset(out, 0, sizeof(out));
1102 MLX5_SET(modify_hca_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT);
1103 if (other_vport) {
1104 if (is_group_manager) {
1105 MLX5_SET(modify_hca_vport_context_in, in, other_vport, 1);
1106 MLX5_SET(modify_hca_vport_context_in, in, vport_number, vf);
1107 } else {
1108 err = -EPERM;
1109 goto ex;
1110 }
1111 }
1112
1113 if (MLX5_CAP_GEN(dev, num_ports) > 1)
1114 MLX5_SET(modify_hca_vport_context_in, in, port_num, port_num);
1115
1116 ctx = MLX5_ADDR_OF(modify_hca_vport_context_in, in, hca_vport_context);
1117 MLX5_SET(hca_vport_context, ctx, field_select, req->field_select);
1118 MLX5_SET(hca_vport_context, ctx, sm_virt_aware, req->sm_virt_aware);
1119 MLX5_SET(hca_vport_context, ctx, has_smi, req->has_smi);
1120 MLX5_SET(hca_vport_context, ctx, has_raw, req->has_raw);
1121 MLX5_SET(hca_vport_context, ctx, vport_state_policy, req->policy);
1122 MLX5_SET(hca_vport_context, ctx, port_physical_state, req->phys_state);
1123 MLX5_SET(hca_vport_context, ctx, vport_state, req->vport_state);
1124 MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid);
1125 MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
1126 MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
1127 MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select, req->cap_mask1_perm);
1128 MLX5_SET(hca_vport_context, ctx, cap_mask2, req->cap_mask2);
1129 MLX5_SET(hca_vport_context, ctx, cap_mask2_field_select, req->cap_mask2_perm);
1130 MLX5_SET(hca_vport_context, ctx, lid, req->lid);
1131 MLX5_SET(hca_vport_context, ctx, init_type_reply, req->init_type_reply);
1132 MLX5_SET(hca_vport_context, ctx, lmc, req->lmc);
1133 MLX5_SET(hca_vport_context, ctx, subnet_timeout, req->subnet_timeout);
1134 MLX5_SET(hca_vport_context, ctx, sm_lid, req->sm_lid);
1135 MLX5_SET(hca_vport_context, ctx, sm_sl, req->sm_sl);
1136 MLX5_SET(hca_vport_context, ctx, qkey_violation_counter, req->qkey_violation_counter);
1137 MLX5_SET(hca_vport_context, ctx, pkey_violation_counter, req->pkey_violation_counter);
1138 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
1139ex:
1140 kfree(in);
1141 return err;
1142}
1143EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context);
1144
1145int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
1146 struct mlx5_core_dev *port_mdev)
1147{
1148 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1149 void *in;
1150 int err;
1151
1152 in = kvzalloc(inlen, GFP_KERNEL);
1153 if (!in)
1154 return -ENOMEM;
1155
1156 err = mlx5_nic_vport_enable_roce(port_mdev);
1157 if (err)
1158 goto free;
1159
1160 MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1);
1161 MLX5_SET(modify_nic_vport_context_in, in,
1162 nic_vport_context.affiliated_vhca_id,
1163 MLX5_CAP_GEN(master_mdev, vhca_id));
1164 MLX5_SET(modify_nic_vport_context_in, in,
1165 nic_vport_context.affiliation_criteria,
1166 MLX5_CAP_GEN(port_mdev, affiliate_nic_vport_criteria));
1167
1168 err = mlx5_modify_nic_vport_context(port_mdev, in, inlen);
1169 if (err)
1170 mlx5_nic_vport_disable_roce(port_mdev);
1171
1172free:
1173 kvfree(in);
1174 return err;
1175}
1176EXPORT_SYMBOL_GPL(mlx5_nic_vport_affiliate_multiport);
1177
1178int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev)
1179{
1180 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1181 void *in;
1182 int err;
1183
1184 in = kvzalloc(inlen, GFP_KERNEL);
1185 if (!in)
1186 return -ENOMEM;
1187
1188 MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1);
1189 MLX5_SET(modify_nic_vport_context_in, in,
1190 nic_vport_context.affiliated_vhca_id, 0);
1191 MLX5_SET(modify_nic_vport_context_in, in,
1192 nic_vport_context.affiliation_criteria, 0);
1193
1194 err = mlx5_modify_nic_vport_context(port_mdev, in, inlen);
1195 if (!err)
1196 mlx5_nic_vport_disable_roce(port_mdev);
1197
1198 kvfree(in);
1199 return err;
1200}
1201EXPORT_SYMBOL_GPL(mlx5_nic_vport_unaffiliate_multiport);
1202
1203u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev)
1204{
1205 int port_type_cap = MLX5_CAP_GEN(mdev, port_type);
1206 u64 tmp = 0;
1207
1208 if (mdev->sys_image_guid)
1209 return mdev->sys_image_guid;
1210
1211 if (port_type_cap == MLX5_CAP_PORT_TYPE_ETH)
1212 mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
1213 else
1214 mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
1215
1216 mdev->sys_image_guid = tmp;
1217
1218 return tmp;
1219}
1220EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid);
1221