1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/export.h>
34#include <linux/etherdevice.h>
35#include <linux/mlx5/driver.h>
36#include <linux/mlx5/vport.h>
37#include "mlx5_core.h"
38
39static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
40 u16 vport, u32 *out, int outlen)
41{
42 u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {0};
43
44 MLX5_SET(query_vport_state_in, in, opcode,
45 MLX5_CMD_OP_QUERY_VPORT_STATE);
46 MLX5_SET(query_vport_state_in, in, op_mod, opmod);
47 MLX5_SET(query_vport_state_in, in, vport_number, vport);
48 if (vport)
49 MLX5_SET(query_vport_state_in, in, other_vport, 1);
50
51 return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
52}
53
54u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
55{
56 u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
57
58 _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
59
60 return MLX5_GET(query_vport_state_out, out, state);
61}
62EXPORT_SYMBOL_GPL(mlx5_query_vport_state);
63
64u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
65{
66 u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
67
68 _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
69
70 return MLX5_GET(query_vport_state_out, out, admin_state);
71}
72EXPORT_SYMBOL_GPL(mlx5_query_vport_admin_state);
73
74int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
75 u16 vport, u8 state)
76{
77 u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {0};
78 u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)] = {0};
79
80 MLX5_SET(modify_vport_state_in, in, opcode,
81 MLX5_CMD_OP_MODIFY_VPORT_STATE);
82 MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
83 MLX5_SET(modify_vport_state_in, in, vport_number, vport);
84 if (vport)
85 MLX5_SET(modify_vport_state_in, in, other_vport, 1);
86 MLX5_SET(modify_vport_state_in, in, admin_state, state);
87
88 return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
89}
90EXPORT_SYMBOL_GPL(mlx5_modify_vport_admin_state);
91
92static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
93 u32 *out, int outlen)
94{
95 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
96
97 MLX5_SET(query_nic_vport_context_in, in, opcode,
98 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
99 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
100 if (vport)
101 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
102
103 return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
104}
105
106static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
107 int inlen)
108{
109 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
110
111 MLX5_SET(modify_nic_vport_context_in, in, opcode,
112 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
113 return mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
114}
115
116int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
117 u16 vport, u8 *min_inline)
118{
119 u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};
120 int err;
121
122 err = mlx5_query_nic_vport_context(mdev, vport, out, sizeof(out));
123 if (!err)
124 *min_inline = MLX5_GET(query_nic_vport_context_out, out,
125 nic_vport_context.min_wqe_inline_mode);
126 return err;
127}
128EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
129
130void mlx5_query_min_inline(struct mlx5_core_dev *mdev,
131 u8 *min_inline_mode)
132{
133 switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
134 case MLX5_CAP_INLINE_MODE_L2:
135 *min_inline_mode = MLX5_INLINE_MODE_L2;
136 break;
137 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
138 mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode);
139 break;
140 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
141 *min_inline_mode = MLX5_INLINE_MODE_NONE;
142 break;
143 }
144}
145EXPORT_SYMBOL_GPL(mlx5_query_min_inline);
146
147int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
148 u16 vport, u8 min_inline)
149{
150 u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
151 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
152 void *nic_vport_ctx;
153
154 MLX5_SET(modify_nic_vport_context_in, in,
155 field_select.min_inline, 1);
156 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
157 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
158
159 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
160 in, nic_vport_context);
161 MLX5_SET(nic_vport_context, nic_vport_ctx,
162 min_wqe_inline_mode, min_inline);
163
164 return mlx5_modify_nic_vport_context(mdev, in, inlen);
165}
166
167int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
168 u16 vport, u8 *addr)
169{
170 u32 *out;
171 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
172 u8 *out_addr;
173 int err;
174
175 out = kvzalloc(outlen, GFP_KERNEL);
176 if (!out)
177 return -ENOMEM;
178
179 out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
180 nic_vport_context.permanent_address);
181
182 err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
183 if (!err)
184 ether_addr_copy(addr, &out_addr[2]);
185
186 kvfree(out);
187 return err;
188}
189EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
190
191int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
192 u16 vport, u8 *addr)
193{
194 void *in;
195 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
196 int err;
197 void *nic_vport_ctx;
198 u8 *perm_mac;
199
200 in = kvzalloc(inlen, GFP_KERNEL);
201 if (!in)
202 return -ENOMEM;
203
204 MLX5_SET(modify_nic_vport_context_in, in,
205 field_select.permanent_address, 1);
206 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
207
208 if (vport)
209 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
210
211 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
212 in, nic_vport_context);
213 perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
214 permanent_address);
215
216 ether_addr_copy(&perm_mac[2], addr);
217
218 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
219
220 kvfree(in);
221
222 return err;
223}
224EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
225
226int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
227{
228 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
229 u32 *out;
230 int err;
231
232 out = kvzalloc(outlen, GFP_KERNEL);
233 if (!out)
234 return -ENOMEM;
235
236 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
237 if (!err)
238 *mtu = MLX5_GET(query_nic_vport_context_out, out,
239 nic_vport_context.mtu);
240
241 kvfree(out);
242 return err;
243}
244EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu);
245
246int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
247{
248 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
249 void *in;
250 int err;
251
252 in = kvzalloc(inlen, GFP_KERNEL);
253 if (!in)
254 return -ENOMEM;
255
256 MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
257 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
258
259 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
260
261 kvfree(in);
262 return err;
263}
264EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
265
266int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
267 u32 vport,
268 enum mlx5_list_type list_type,
269 u8 addr_list[][ETH_ALEN],
270 int *list_size)
271{
272 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
273 void *nic_vport_ctx;
274 int max_list_size;
275 int req_list_size;
276 int out_sz;
277 void *out;
278 int err;
279 int i;
280
281 req_list_size = *list_size;
282
283 max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
284 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
285 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
286
287 if (req_list_size > max_list_size) {
288 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
289 req_list_size, max_list_size);
290 req_list_size = max_list_size;
291 }
292
293 out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
294 req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
295
296 out = kzalloc(out_sz, GFP_KERNEL);
297 if (!out)
298 return -ENOMEM;
299
300 MLX5_SET(query_nic_vport_context_in, in, opcode,
301 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
302 MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
303 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
304
305 if (vport)
306 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
307
308 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
309 if (err)
310 goto out;
311
312 nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
313 nic_vport_context);
314 req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
315 allowed_list_size);
316
317 *list_size = req_list_size;
318 for (i = 0; i < req_list_size; i++) {
319 u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context,
320 nic_vport_ctx,
321 current_uc_mac_address[i]) + 2;
322 ether_addr_copy(addr_list[i], mac_addr);
323 }
324out:
325 kfree(out);
326 return err;
327}
328EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
329
330int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
331 enum mlx5_list_type list_type,
332 u8 addr_list[][ETH_ALEN],
333 int list_size)
334{
335 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
336 void *nic_vport_ctx;
337 int max_list_size;
338 int in_sz;
339 void *in;
340 int err;
341 int i;
342
343 max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
344 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
345 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
346
347 if (list_size > max_list_size)
348 return -ENOSPC;
349
350 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
351 list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
352
353 memset(out, 0, sizeof(out));
354 in = kzalloc(in_sz, GFP_KERNEL);
355 if (!in)
356 return -ENOMEM;
357
358 MLX5_SET(modify_nic_vport_context_in, in, opcode,
359 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
360 MLX5_SET(modify_nic_vport_context_in, in,
361 field_select.addresses_list, 1);
362
363 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
364 nic_vport_context);
365
366 MLX5_SET(nic_vport_context, nic_vport_ctx,
367 allowed_list_type, list_type);
368 MLX5_SET(nic_vport_context, nic_vport_ctx,
369 allowed_list_size, list_size);
370
371 for (i = 0; i < list_size; i++) {
372 u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
373 nic_vport_ctx,
374 current_uc_mac_address[i]) + 2;
375 ether_addr_copy(curr_mac, addr_list[i]);
376 }
377
378 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
379 kfree(in);
380 return err;
381}
382EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
383
384int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
385 u32 vport,
386 u16 vlans[],
387 int *size)
388{
389 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
390 void *nic_vport_ctx;
391 int req_list_size;
392 int max_list_size;
393 int out_sz;
394 void *out;
395 int err;
396 int i;
397
398 req_list_size = *size;
399 max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
400 if (req_list_size > max_list_size) {
401 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n",
402 req_list_size, max_list_size);
403 req_list_size = max_list_size;
404 }
405
406 out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
407 req_list_size * MLX5_ST_SZ_BYTES(vlan_layout);
408
409 memset(in, 0, sizeof(in));
410 out = kzalloc(out_sz, GFP_KERNEL);
411 if (!out)
412 return -ENOMEM;
413
414 MLX5_SET(query_nic_vport_context_in, in, opcode,
415 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
416 MLX5_SET(query_nic_vport_context_in, in, allowed_list_type,
417 MLX5_NVPRT_LIST_TYPE_VLAN);
418 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
419
420 if (vport)
421 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
422
423 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
424 if (err)
425 goto out;
426
427 nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
428 nic_vport_context);
429 req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
430 allowed_list_size);
431
432 *size = req_list_size;
433 for (i = 0; i < req_list_size; i++) {
434 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
435 nic_vport_ctx,
436 current_uc_mac_address[i]);
437 vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan);
438 }
439out:
440 kfree(out);
441 return err;
442}
443EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans);
444
445int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
446 u16 vlans[],
447 int list_size)
448{
449 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
450 void *nic_vport_ctx;
451 int max_list_size;
452 int in_sz;
453 void *in;
454 int err;
455 int i;
456
457 max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
458
459 if (list_size > max_list_size)
460 return -ENOSPC;
461
462 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
463 list_size * MLX5_ST_SZ_BYTES(vlan_layout);
464
465 memset(out, 0, sizeof(out));
466 in = kzalloc(in_sz, GFP_KERNEL);
467 if (!in)
468 return -ENOMEM;
469
470 MLX5_SET(modify_nic_vport_context_in, in, opcode,
471 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
472 MLX5_SET(modify_nic_vport_context_in, in,
473 field_select.addresses_list, 1);
474
475 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
476 nic_vport_context);
477
478 MLX5_SET(nic_vport_context, nic_vport_ctx,
479 allowed_list_type, MLX5_NVPRT_LIST_TYPE_VLAN);
480 MLX5_SET(nic_vport_context, nic_vport_ctx,
481 allowed_list_size, list_size);
482
483 for (i = 0; i < list_size; i++) {
484 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
485 nic_vport_ctx,
486 current_uc_mac_address[i]);
487 MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
488 }
489
490 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
491 kfree(in);
492 return err;
493}
494EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
495
496int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
497 u64 *system_image_guid)
498{
499 u32 *out;
500 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
501
502 out = kvzalloc(outlen, GFP_KERNEL);
503 if (!out)
504 return -ENOMEM;
505
506 mlx5_query_nic_vport_context(mdev, 0, out, outlen);
507
508 *system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
509 nic_vport_context.system_image_guid);
510
511 kfree(out);
512
513 return 0;
514}
515EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
516
517int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
518{
519 u32 *out;
520 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
521
522 out = kvzalloc(outlen, GFP_KERNEL);
523 if (!out)
524 return -ENOMEM;
525
526 mlx5_query_nic_vport_context(mdev, 0, out, outlen);
527
528 *node_guid = MLX5_GET64(query_nic_vport_context_out, out,
529 nic_vport_context.node_guid);
530
531 kfree(out);
532
533 return 0;
534}
535EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
536
537int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
538 u32 vport, u64 node_guid)
539{
540 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
541 void *nic_vport_context;
542 void *in;
543 int err;
544
545 if (!vport)
546 return -EINVAL;
547 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
548 return -EACCES;
549 if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
550 return -EOPNOTSUPP;
551
552 in = kvzalloc(inlen, GFP_KERNEL);
553 if (!in)
554 return -ENOMEM;
555
556 MLX5_SET(modify_nic_vport_context_in, in,
557 field_select.node_guid, 1);
558 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
559 MLX5_SET(modify_nic_vport_context_in, in, other_vport, !!vport);
560
561 nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
562 in, nic_vport_context);
563 MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
564
565 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
566
567 kvfree(in);
568
569 return err;
570}
571
572int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
573 u16 *qkey_viol_cntr)
574{
575 u32 *out;
576 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
577
578 out = kvzalloc(outlen, GFP_KERNEL);
579 if (!out)
580 return -ENOMEM;
581
582 mlx5_query_nic_vport_context(mdev, 0, out, outlen);
583
584 *qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
585 nic_vport_context.qkey_violation_counter);
586
587 kfree(out);
588
589 return 0;
590}
591EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
592
593int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
594 u8 port_num, u16 vf_num, u16 gid_index,
595 union ib_gid *gid)
596{
597 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
598 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
599 int is_group_manager;
600 void *out = NULL;
601 void *in = NULL;
602 union ib_gid *tmp;
603 int tbsz;
604 int nout;
605 int err;
606
607 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
608 tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
609 mlx5_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n",
610 vf_num, gid_index, tbsz);
611
612 if (gid_index > tbsz && gid_index != 0xffff)
613 return -EINVAL;
614
615 if (gid_index == 0xffff)
616 nout = tbsz;
617 else
618 nout = 1;
619
620 out_sz += nout * sizeof(*gid);
621
622 in = kzalloc(in_sz, GFP_KERNEL);
623 out = kzalloc(out_sz, GFP_KERNEL);
624 if (!in || !out) {
625 err = -ENOMEM;
626 goto out;
627 }
628
629 MLX5_SET(query_hca_vport_gid_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
630 if (other_vport) {
631 if (is_group_manager) {
632 MLX5_SET(query_hca_vport_gid_in, in, vport_number, vf_num);
633 MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
634 } else {
635 err = -EPERM;
636 goto out;
637 }
638 }
639 MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
640
641 if (MLX5_CAP_GEN(dev, num_ports) == 2)
642 MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
643
644 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
645 if (err)
646 goto out;
647
648 tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
649 gid->global.subnet_prefix = tmp->global.subnet_prefix;
650 gid->global.interface_id = tmp->global.interface_id;
651
652out:
653 kfree(in);
654 kfree(out);
655 return err;
656}
657EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
658
659int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
660 u8 port_num, u16 vf_num, u16 pkey_index,
661 u16 *pkey)
662{
663 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
664 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
665 int is_group_manager;
666 void *out = NULL;
667 void *in = NULL;
668 void *pkarr;
669 int nout;
670 int tbsz;
671 int err;
672 int i;
673
674 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
675
676 tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
677 if (pkey_index > tbsz && pkey_index != 0xffff)
678 return -EINVAL;
679
680 if (pkey_index == 0xffff)
681 nout = tbsz;
682 else
683 nout = 1;
684
685 out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
686
687 in = kzalloc(in_sz, GFP_KERNEL);
688 out = kzalloc(out_sz, GFP_KERNEL);
689 if (!in || !out) {
690 err = -ENOMEM;
691 goto out;
692 }
693
694 MLX5_SET(query_hca_vport_pkey_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
695 if (other_vport) {
696 if (is_group_manager) {
697 MLX5_SET(query_hca_vport_pkey_in, in, vport_number, vf_num);
698 MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
699 } else {
700 err = -EPERM;
701 goto out;
702 }
703 }
704 MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
705
706 if (MLX5_CAP_GEN(dev, num_ports) == 2)
707 MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
708
709 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
710 if (err)
711 goto out;
712
713 pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
714 for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey))
715 *pkey = MLX5_GET_PR(pkey, pkarr, pkey);
716
717out:
718 kfree(in);
719 kfree(out);
720 return err;
721}
722EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
723
724int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
725 u8 other_vport, u8 port_num,
726 u16 vf_num,
727 struct mlx5_hca_vport_context *rep)
728{
729 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
730 int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {0};
731 int is_group_manager;
732 void *out;
733 void *ctx;
734 int err;
735
736 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
737
738 out = kzalloc(out_sz, GFP_KERNEL);
739 if (!out)
740 return -ENOMEM;
741
742 MLX5_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
743
744 if (other_vport) {
745 if (is_group_manager) {
746 MLX5_SET(query_hca_vport_context_in, in, other_vport, 1);
747 MLX5_SET(query_hca_vport_context_in, in, vport_number, vf_num);
748 } else {
749 err = -EPERM;
750 goto ex;
751 }
752 }
753
754 if (MLX5_CAP_GEN(dev, num_ports) == 2)
755 MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
756
757 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
758 if (err)
759 goto ex;
760
761 ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context);
762 rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select);
763 rep->sm_virt_aware = MLX5_GET_PR(hca_vport_context, ctx, sm_virt_aware);
764 rep->has_smi = MLX5_GET_PR(hca_vport_context, ctx, has_smi);
765 rep->has_raw = MLX5_GET_PR(hca_vport_context, ctx, has_raw);
766 rep->policy = MLX5_GET_PR(hca_vport_context, ctx, vport_state_policy);
767 rep->phys_state = MLX5_GET_PR(hca_vport_context, ctx,
768 port_physical_state);
769 rep->vport_state = MLX5_GET_PR(hca_vport_context, ctx, vport_state);
770 rep->port_physical_state = MLX5_GET_PR(hca_vport_context, ctx,
771 port_physical_state);
772 rep->port_guid = MLX5_GET64_PR(hca_vport_context, ctx, port_guid);
773 rep->node_guid = MLX5_GET64_PR(hca_vport_context, ctx, node_guid);
774 rep->cap_mask1 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask1);
775 rep->cap_mask1_perm = MLX5_GET_PR(hca_vport_context, ctx,
776 cap_mask1_field_select);
777 rep->cap_mask2 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask2);
778 rep->cap_mask2_perm = MLX5_GET_PR(hca_vport_context, ctx,
779 cap_mask2_field_select);
780 rep->lid = MLX5_GET_PR(hca_vport_context, ctx, lid);
781 rep->init_type_reply = MLX5_GET_PR(hca_vport_context, ctx,
782 init_type_reply);
783 rep->lmc = MLX5_GET_PR(hca_vport_context, ctx, lmc);
784 rep->subnet_timeout = MLX5_GET_PR(hca_vport_context, ctx,
785 subnet_timeout);
786 rep->sm_lid = MLX5_GET_PR(hca_vport_context, ctx, sm_lid);
787 rep->sm_sl = MLX5_GET_PR(hca_vport_context, ctx, sm_sl);
788 rep->qkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
789 qkey_violation_counter);
790 rep->pkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
791 pkey_violation_counter);
792 rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required);
793 rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx,
794 system_image_guid);
795
796ex:
797 kfree(out);
798 return err;
799}
800EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context);
801
802int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
803 u64 *sys_image_guid)
804{
805 struct mlx5_hca_vport_context *rep;
806 int err;
807
808 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
809 if (!rep)
810 return -ENOMEM;
811
812 err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
813 if (!err)
814 *sys_image_guid = rep->sys_image_guid;
815
816 kfree(rep);
817 return err;
818}
819EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
820
821int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
822 u64 *node_guid)
823{
824 struct mlx5_hca_vport_context *rep;
825 int err;
826
827 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
828 if (!rep)
829 return -ENOMEM;
830
831 err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
832 if (!err)
833 *node_guid = rep->node_guid;
834
835 kfree(rep);
836 return err;
837}
838EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
839
840int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
841 u32 vport,
842 int *promisc_uc,
843 int *promisc_mc,
844 int *promisc_all)
845{
846 u32 *out;
847 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
848 int err;
849
850 out = kzalloc(outlen, GFP_KERNEL);
851 if (!out)
852 return -ENOMEM;
853
854 err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
855 if (err)
856 goto out;
857
858 *promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
859 nic_vport_context.promisc_uc);
860 *promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
861 nic_vport_context.promisc_mc);
862 *promisc_all = MLX5_GET(query_nic_vport_context_out, out,
863 nic_vport_context.promisc_all);
864
865out:
866 kfree(out);
867 return err;
868}
869EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
870
871int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
872 int promisc_uc,
873 int promisc_mc,
874 int promisc_all)
875{
876 void *in;
877 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
878 int err;
879
880 in = kvzalloc(inlen, GFP_KERNEL);
881 if (!in)
882 return -ENOMEM;
883
884 MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
885 MLX5_SET(modify_nic_vport_context_in, in,
886 nic_vport_context.promisc_uc, promisc_uc);
887 MLX5_SET(modify_nic_vport_context_in, in,
888 nic_vport_context.promisc_mc, promisc_mc);
889 MLX5_SET(modify_nic_vport_context_in, in,
890 nic_vport_context.promisc_all, promisc_all);
891
892 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
893
894 kvfree(in);
895
896 return err;
897}
898EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
899
900enum {
901 UC_LOCAL_LB,
902 MC_LOCAL_LB
903};
904
905int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable)
906{
907 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
908 void *in;
909 int err;
910
911 if (!MLX5_CAP_GEN(mdev, disable_local_lb_mc) &&
912 !MLX5_CAP_GEN(mdev, disable_local_lb_uc))
913 return 0;
914
915 in = kvzalloc(inlen, GFP_KERNEL);
916 if (!in)
917 return -ENOMEM;
918
919 MLX5_SET(modify_nic_vport_context_in, in,
920 nic_vport_context.disable_mc_local_lb, !enable);
921 MLX5_SET(modify_nic_vport_context_in, in,
922 nic_vport_context.disable_uc_local_lb, !enable);
923
924 if (MLX5_CAP_GEN(mdev, disable_local_lb_mc))
925 MLX5_SET(modify_nic_vport_context_in, in,
926 field_select.disable_mc_local_lb, 1);
927
928 if (MLX5_CAP_GEN(mdev, disable_local_lb_uc))
929 MLX5_SET(modify_nic_vport_context_in, in,
930 field_select.disable_uc_local_lb, 1);
931
932 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
933
934 if (!err)
935 mlx5_core_dbg(mdev, "%s local_lb\n",
936 enable ? "enable" : "disable");
937
938 kvfree(in);
939 return err;
940}
941EXPORT_SYMBOL_GPL(mlx5_nic_vport_update_local_lb);
942
943int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status)
944{
945 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
946 u32 *out;
947 int value;
948 int err;
949
950 out = kzalloc(outlen, GFP_KERNEL);
951 if (!out)
952 return -ENOMEM;
953
954 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
955 if (err)
956 goto out;
957
958 value = MLX5_GET(query_nic_vport_context_out, out,
959 nic_vport_context.disable_mc_local_lb) << MC_LOCAL_LB;
960
961 value |= MLX5_GET(query_nic_vport_context_out, out,
962 nic_vport_context.disable_uc_local_lb) << UC_LOCAL_LB;
963
964 *status = !value;
965
966out:
967 kfree(out);
968 return err;
969}
970EXPORT_SYMBOL_GPL(mlx5_nic_vport_query_local_lb);
971
972enum mlx5_vport_roce_state {
973 MLX5_VPORT_ROCE_DISABLED = 0,
974 MLX5_VPORT_ROCE_ENABLED = 1,
975};
976
977static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
978 enum mlx5_vport_roce_state state)
979{
980 void *in;
981 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
982 int err;
983
984 in = kvzalloc(inlen, GFP_KERNEL);
985 if (!in)
986 return -ENOMEM;
987
988 MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
989 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
990 state);
991
992 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
993
994 kvfree(in);
995
996 return err;
997}
998
999int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
1000{
1001 if (atomic_inc_return(&mdev->roce.roce_en) != 1)
1002 return 0;
1003 return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
1004}
1005EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
1006
1007int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
1008{
1009 if (atomic_dec_return(&mdev->roce.roce_en) != 0)
1010 return 0;
1011 return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
1012}
1013EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
1014
1015int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
1016 int vf, u8 port_num, void *out,
1017 size_t out_sz)
1018{
1019 int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
1020 int is_group_manager;
1021 void *in;
1022 int err;
1023
1024 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1025 in = kvzalloc(in_sz, GFP_KERNEL);
1026 if (!in) {
1027 err = -ENOMEM;
1028 return err;
1029 }
1030
1031 MLX5_SET(query_vport_counter_in, in, opcode,
1032 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1033 if (other_vport) {
1034 if (is_group_manager) {
1035 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
1036 MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1);
1037 } else {
1038 err = -EPERM;
1039 goto free;
1040 }
1041 }
1042 if (MLX5_CAP_GEN(dev, num_ports) == 2)
1043 MLX5_SET(query_vport_counter_in, in, port_num, port_num);
1044
1045 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
1046free:
1047 kvfree(in);
1048 return err;
1049}
1050EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
1051
1052int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
1053 u8 other_vport, u8 port_num,
1054 int vf,
1055 struct mlx5_hca_vport_context *req)
1056{
1057 int in_sz = MLX5_ST_SZ_BYTES(modify_hca_vport_context_in);
1058 u8 out[MLX5_ST_SZ_BYTES(modify_hca_vport_context_out)];
1059 int is_group_manager;
1060 void *in;
1061 int err;
1062 void *ctx;
1063
1064 mlx5_core_dbg(dev, "vf %d\n", vf);
1065 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1066 in = kzalloc(in_sz, GFP_KERNEL);
1067 if (!in)
1068 return -ENOMEM;
1069
1070 memset(out, 0, sizeof(out));
1071 MLX5_SET(modify_hca_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT);
1072 if (other_vport) {
1073 if (is_group_manager) {
1074 MLX5_SET(modify_hca_vport_context_in, in, other_vport, 1);
1075 MLX5_SET(modify_hca_vport_context_in, in, vport_number, vf);
1076 } else {
1077 err = -EPERM;
1078 goto ex;
1079 }
1080 }
1081
1082 if (MLX5_CAP_GEN(dev, num_ports) > 1)
1083 MLX5_SET(modify_hca_vport_context_in, in, port_num, port_num);
1084
1085 ctx = MLX5_ADDR_OF(modify_hca_vport_context_in, in, hca_vport_context);
1086 MLX5_SET(hca_vport_context, ctx, field_select, req->field_select);
1087 MLX5_SET(hca_vport_context, ctx, sm_virt_aware, req->sm_virt_aware);
1088 MLX5_SET(hca_vport_context, ctx, has_smi, req->has_smi);
1089 MLX5_SET(hca_vport_context, ctx, has_raw, req->has_raw);
1090 MLX5_SET(hca_vport_context, ctx, vport_state_policy, req->policy);
1091 MLX5_SET(hca_vport_context, ctx, port_physical_state, req->phys_state);
1092 MLX5_SET(hca_vport_context, ctx, vport_state, req->vport_state);
1093 MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid);
1094 MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
1095 MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
1096 MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select, req->cap_mask1_perm);
1097 MLX5_SET(hca_vport_context, ctx, cap_mask2, req->cap_mask2);
1098 MLX5_SET(hca_vport_context, ctx, cap_mask2_field_select, req->cap_mask2_perm);
1099 MLX5_SET(hca_vport_context, ctx, lid, req->lid);
1100 MLX5_SET(hca_vport_context, ctx, init_type_reply, req->init_type_reply);
1101 MLX5_SET(hca_vport_context, ctx, lmc, req->lmc);
1102 MLX5_SET(hca_vport_context, ctx, subnet_timeout, req->subnet_timeout);
1103 MLX5_SET(hca_vport_context, ctx, sm_lid, req->sm_lid);
1104 MLX5_SET(hca_vport_context, ctx, sm_sl, req->sm_sl);
1105 MLX5_SET(hca_vport_context, ctx, qkey_violation_counter, req->qkey_violation_counter);
1106 MLX5_SET(hca_vport_context, ctx, pkey_violation_counter, req->pkey_violation_counter);
1107 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
1108ex:
1109 kfree(in);
1110 return err;
1111}
1112EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context);
1113