1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/export.h>
34#include <linux/etherdevice.h>
35#include <linux/mlx5/driver.h>
36#include <linux/mlx5/vport.h>
37#include "mlx5_core.h"
38
39static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
40 u16 vport, u32 *out, int outlen)
41{
42 int err;
43 u32 in[MLX5_ST_SZ_DW(query_vport_state_in)];
44
45 memset(in, 0, sizeof(in));
46
47 MLX5_SET(query_vport_state_in, in, opcode,
48 MLX5_CMD_OP_QUERY_VPORT_STATE);
49 MLX5_SET(query_vport_state_in, in, op_mod, opmod);
50 MLX5_SET(query_vport_state_in, in, vport_number, vport);
51 if (vport)
52 MLX5_SET(query_vport_state_in, in, other_vport, 1);
53
54 err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
55 if (err)
56 mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
57
58 return err;
59}
60
61u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
62{
63 u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
64
65 _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
66
67 return MLX5_GET(query_vport_state_out, out, state);
68}
69EXPORT_SYMBOL_GPL(mlx5_query_vport_state);
70
71u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
72{
73 u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
74
75 _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
76
77 return MLX5_GET(query_vport_state_out, out, admin_state);
78}
79EXPORT_SYMBOL_GPL(mlx5_query_vport_admin_state);
80
81int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
82 u16 vport, u8 state)
83{
84 u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)];
85 u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)];
86 int err;
87
88 memset(in, 0, sizeof(in));
89
90 MLX5_SET(modify_vport_state_in, in, opcode,
91 MLX5_CMD_OP_MODIFY_VPORT_STATE);
92 MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
93 MLX5_SET(modify_vport_state_in, in, vport_number, vport);
94
95 if (vport)
96 MLX5_SET(modify_vport_state_in, in, other_vport, 1);
97
98 MLX5_SET(modify_vport_state_in, in, admin_state, state);
99
100 err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
101 sizeof(out));
102 if (err)
103 mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_VPORT_STATE failed\n");
104
105 return err;
106}
107EXPORT_SYMBOL_GPL(mlx5_modify_vport_admin_state);
108
109static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
110 u32 *out, int outlen)
111{
112 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
113
114 memset(in, 0, sizeof(in));
115
116 MLX5_SET(query_nic_vport_context_in, in, opcode,
117 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
118
119 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
120 if (vport)
121 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
122
123 return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
124}
125
126static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
127 int inlen)
128{
129 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
130
131 MLX5_SET(modify_nic_vport_context_in, in, opcode,
132 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
133
134 memset(out, 0, sizeof(out));
135 return mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
136}
137
138int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
139 u16 vport, u8 *addr)
140{
141 u32 *out;
142 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
143 u8 *out_addr;
144 int err;
145
146 out = mlx5_vzalloc(outlen);
147 if (!out)
148 return -ENOMEM;
149
150 out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
151 nic_vport_context.permanent_address);
152
153 err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
154 if (!err)
155 ether_addr_copy(addr, &out_addr[2]);
156
157 kvfree(out);
158 return err;
159}
160EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
161
162int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
163 u16 vport, u8 *addr)
164{
165 void *in;
166 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
167 int err;
168 void *nic_vport_ctx;
169 u8 *perm_mac;
170
171 in = mlx5_vzalloc(inlen);
172 if (!in) {
173 mlx5_core_warn(mdev, "failed to allocate inbox\n");
174 return -ENOMEM;
175 }
176
177 MLX5_SET(modify_nic_vport_context_in, in,
178 field_select.permanent_address, 1);
179 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
180
181 if (vport)
182 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
183
184 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
185 in, nic_vport_context);
186 perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
187 permanent_address);
188
189 ether_addr_copy(&perm_mac[2], addr);
190
191 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
192
193 kvfree(in);
194
195 return err;
196}
197EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
198
199int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
200{
201 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
202 u32 *out;
203 int err;
204
205 out = mlx5_vzalloc(outlen);
206 if (!out)
207 return -ENOMEM;
208
209 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
210 if (!err)
211 *mtu = MLX5_GET(query_nic_vport_context_out, out,
212 nic_vport_context.mtu);
213
214 kvfree(out);
215 return err;
216}
217EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu);
218
219int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
220{
221 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
222 void *in;
223 int err;
224
225 in = mlx5_vzalloc(inlen);
226 if (!in)
227 return -ENOMEM;
228
229 MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
230 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
231
232 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
233
234 kvfree(in);
235 return err;
236}
237EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
238
239int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
240 u32 vport,
241 enum mlx5_list_type list_type,
242 u8 addr_list[][ETH_ALEN],
243 int *list_size)
244{
245 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
246 void *nic_vport_ctx;
247 int max_list_size;
248 int req_list_size;
249 int out_sz;
250 void *out;
251 int err;
252 int i;
253
254 req_list_size = *list_size;
255
256 max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
257 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
258 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
259
260 if (req_list_size > max_list_size) {
261 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
262 req_list_size, max_list_size);
263 req_list_size = max_list_size;
264 }
265
266 out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
267 req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
268
269 memset(in, 0, sizeof(in));
270 out = kzalloc(out_sz, GFP_KERNEL);
271 if (!out)
272 return -ENOMEM;
273
274 MLX5_SET(query_nic_vport_context_in, in, opcode,
275 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
276 MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
277 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
278
279 if (vport)
280 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
281
282 err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz);
283 if (err)
284 goto out;
285
286 nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
287 nic_vport_context);
288 req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
289 allowed_list_size);
290
291 *list_size = req_list_size;
292 for (i = 0; i < req_list_size; i++) {
293 u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context,
294 nic_vport_ctx,
295 current_uc_mac_address[i]) + 2;
296 ether_addr_copy(addr_list[i], mac_addr);
297 }
298out:
299 kfree(out);
300 return err;
301}
302EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
303
304int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
305 enum mlx5_list_type list_type,
306 u8 addr_list[][ETH_ALEN],
307 int list_size)
308{
309 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
310 void *nic_vport_ctx;
311 int max_list_size;
312 int in_sz;
313 void *in;
314 int err;
315 int i;
316
317 max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
318 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
319 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
320
321 if (list_size > max_list_size)
322 return -ENOSPC;
323
324 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
325 list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
326
327 memset(out, 0, sizeof(out));
328 in = kzalloc(in_sz, GFP_KERNEL);
329 if (!in)
330 return -ENOMEM;
331
332 MLX5_SET(modify_nic_vport_context_in, in, opcode,
333 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
334 MLX5_SET(modify_nic_vport_context_in, in,
335 field_select.addresses_list, 1);
336
337 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
338 nic_vport_context);
339
340 MLX5_SET(nic_vport_context, nic_vport_ctx,
341 allowed_list_type, list_type);
342 MLX5_SET(nic_vport_context, nic_vport_ctx,
343 allowed_list_size, list_size);
344
345 for (i = 0; i < list_size; i++) {
346 u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
347 nic_vport_ctx,
348 current_uc_mac_address[i]) + 2;
349 ether_addr_copy(curr_mac, addr_list[i]);
350 }
351
352 err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out));
353 kfree(in);
354 return err;
355}
356EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
357
358int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
359 u32 vport,
360 u16 vlans[],
361 int *size)
362{
363 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
364 void *nic_vport_ctx;
365 int req_list_size;
366 int max_list_size;
367 int out_sz;
368 void *out;
369 int err;
370 int i;
371
372 req_list_size = *size;
373 max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
374 if (req_list_size > max_list_size) {
375 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n",
376 req_list_size, max_list_size);
377 req_list_size = max_list_size;
378 }
379
380 out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
381 req_list_size * MLX5_ST_SZ_BYTES(vlan_layout);
382
383 memset(in, 0, sizeof(in));
384 out = kzalloc(out_sz, GFP_KERNEL);
385 if (!out)
386 return -ENOMEM;
387
388 MLX5_SET(query_nic_vport_context_in, in, opcode,
389 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
390 MLX5_SET(query_nic_vport_context_in, in, allowed_list_type,
391 MLX5_NVPRT_LIST_TYPE_VLAN);
392 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
393
394 if (vport)
395 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
396
397 err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz);
398 if (err)
399 goto out;
400
401 nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
402 nic_vport_context);
403 req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
404 allowed_list_size);
405
406 *size = req_list_size;
407 for (i = 0; i < req_list_size; i++) {
408 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
409 nic_vport_ctx,
410 current_uc_mac_address[i]);
411 vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan);
412 }
413out:
414 kfree(out);
415 return err;
416}
417EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans);
418
419int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
420 u16 vlans[],
421 int list_size)
422{
423 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
424 void *nic_vport_ctx;
425 int max_list_size;
426 int in_sz;
427 void *in;
428 int err;
429 int i;
430
431 max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
432
433 if (list_size > max_list_size)
434 return -ENOSPC;
435
436 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
437 list_size * MLX5_ST_SZ_BYTES(vlan_layout);
438
439 memset(out, 0, sizeof(out));
440 in = kzalloc(in_sz, GFP_KERNEL);
441 if (!in)
442 return -ENOMEM;
443
444 MLX5_SET(modify_nic_vport_context_in, in, opcode,
445 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
446 MLX5_SET(modify_nic_vport_context_in, in,
447 field_select.addresses_list, 1);
448
449 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
450 nic_vport_context);
451
452 MLX5_SET(nic_vport_context, nic_vport_ctx,
453 allowed_list_type, MLX5_NVPRT_LIST_TYPE_VLAN);
454 MLX5_SET(nic_vport_context, nic_vport_ctx,
455 allowed_list_size, list_size);
456
457 for (i = 0; i < list_size; i++) {
458 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
459 nic_vport_ctx,
460 current_uc_mac_address[i]);
461 MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
462 }
463
464 err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out));
465 kfree(in);
466 return err;
467}
468EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
469
470int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
471 u64 *system_image_guid)
472{
473 u32 *out;
474 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
475
476 out = mlx5_vzalloc(outlen);
477 if (!out)
478 return -ENOMEM;
479
480 mlx5_query_nic_vport_context(mdev, 0, out, outlen);
481
482 *system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
483 nic_vport_context.system_image_guid);
484
485 kfree(out);
486
487 return 0;
488}
489EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
490
491int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
492{
493 u32 *out;
494 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
495
496 out = mlx5_vzalloc(outlen);
497 if (!out)
498 return -ENOMEM;
499
500 mlx5_query_nic_vport_context(mdev, 0, out, outlen);
501
502 *node_guid = MLX5_GET64(query_nic_vport_context_out, out,
503 nic_vport_context.node_guid);
504
505 kfree(out);
506
507 return 0;
508}
509EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
510
511int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
512 u16 *qkey_viol_cntr)
513{
514 u32 *out;
515 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
516
517 out = mlx5_vzalloc(outlen);
518 if (!out)
519 return -ENOMEM;
520
521 mlx5_query_nic_vport_context(mdev, 0, out, outlen);
522
523 *qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
524 nic_vport_context.qkey_violation_counter);
525
526 kfree(out);
527
528 return 0;
529}
530EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
531
532int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
533 u8 port_num, u16 vf_num, u16 gid_index,
534 union ib_gid *gid)
535{
536 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
537 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
538 int is_group_manager;
539 void *out = NULL;
540 void *in = NULL;
541 union ib_gid *tmp;
542 int tbsz;
543 int nout;
544 int err;
545
546 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
547 tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
548 mlx5_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n",
549 vf_num, gid_index, tbsz);
550
551 if (gid_index > tbsz && gid_index != 0xffff)
552 return -EINVAL;
553
554 if (gid_index == 0xffff)
555 nout = tbsz;
556 else
557 nout = 1;
558
559 out_sz += nout * sizeof(*gid);
560
561 in = kzalloc(in_sz, GFP_KERNEL);
562 out = kzalloc(out_sz, GFP_KERNEL);
563 if (!in || !out) {
564 err = -ENOMEM;
565 goto out;
566 }
567
568 MLX5_SET(query_hca_vport_gid_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
569 if (other_vport) {
570 if (is_group_manager) {
571 MLX5_SET(query_hca_vport_gid_in, in, vport_number, vf_num);
572 MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
573 } else {
574 err = -EPERM;
575 goto out;
576 }
577 }
578 MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
579
580 if (MLX5_CAP_GEN(dev, num_ports) == 2)
581 MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
582
583 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
584 if (err)
585 goto out;
586
587 err = mlx5_cmd_status_to_err_v2(out);
588 if (err)
589 goto out;
590
591 tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
592 gid->global.subnet_prefix = tmp->global.subnet_prefix;
593 gid->global.interface_id = tmp->global.interface_id;
594
595out:
596 kfree(in);
597 kfree(out);
598 return err;
599}
600EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
601
602int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
603 u8 port_num, u16 vf_num, u16 pkey_index,
604 u16 *pkey)
605{
606 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
607 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
608 int is_group_manager;
609 void *out = NULL;
610 void *in = NULL;
611 void *pkarr;
612 int nout;
613 int tbsz;
614 int err;
615 int i;
616
617 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
618
619 tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
620 if (pkey_index > tbsz && pkey_index != 0xffff)
621 return -EINVAL;
622
623 if (pkey_index == 0xffff)
624 nout = tbsz;
625 else
626 nout = 1;
627
628 out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
629
630 in = kzalloc(in_sz, GFP_KERNEL);
631 out = kzalloc(out_sz, GFP_KERNEL);
632 if (!in || !out) {
633 err = -ENOMEM;
634 goto out;
635 }
636
637 MLX5_SET(query_hca_vport_pkey_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
638 if (other_vport) {
639 if (is_group_manager) {
640 MLX5_SET(query_hca_vport_pkey_in, in, vport_number, vf_num);
641 MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
642 } else {
643 err = -EPERM;
644 goto out;
645 }
646 }
647 MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
648
649 if (MLX5_CAP_GEN(dev, num_ports) == 2)
650 MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
651
652 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
653 if (err)
654 goto out;
655
656 err = mlx5_cmd_status_to_err_v2(out);
657 if (err)
658 goto out;
659
660 pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
661 for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey))
662 *pkey = MLX5_GET_PR(pkey, pkarr, pkey);
663
664out:
665 kfree(in);
666 kfree(out);
667 return err;
668}
669EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
670
671int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
672 u8 other_vport, u8 port_num,
673 u16 vf_num,
674 struct mlx5_hca_vport_context *rep)
675{
676 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
677 int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)];
678 int is_group_manager;
679 void *out;
680 void *ctx;
681 int err;
682
683 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
684
685 memset(in, 0, sizeof(in));
686 out = kzalloc(out_sz, GFP_KERNEL);
687 if (!out)
688 return -ENOMEM;
689
690 MLX5_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
691
692 if (other_vport) {
693 if (is_group_manager) {
694 MLX5_SET(query_hca_vport_context_in, in, other_vport, 1);
695 MLX5_SET(query_hca_vport_context_in, in, vport_number, vf_num);
696 } else {
697 err = -EPERM;
698 goto ex;
699 }
700 }
701
702 if (MLX5_CAP_GEN(dev, num_ports) == 2)
703 MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
704
705 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
706 if (err)
707 goto ex;
708 err = mlx5_cmd_status_to_err_v2(out);
709 if (err)
710 goto ex;
711
712 ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context);
713 rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select);
714 rep->sm_virt_aware = MLX5_GET_PR(hca_vport_context, ctx, sm_virt_aware);
715 rep->has_smi = MLX5_GET_PR(hca_vport_context, ctx, has_smi);
716 rep->has_raw = MLX5_GET_PR(hca_vport_context, ctx, has_raw);
717 rep->policy = MLX5_GET_PR(hca_vport_context, ctx, vport_state_policy);
718 rep->phys_state = MLX5_GET_PR(hca_vport_context, ctx,
719 port_physical_state);
720 rep->vport_state = MLX5_GET_PR(hca_vport_context, ctx, vport_state);
721 rep->port_physical_state = MLX5_GET_PR(hca_vport_context, ctx,
722 port_physical_state);
723 rep->port_guid = MLX5_GET64_PR(hca_vport_context, ctx, port_guid);
724 rep->node_guid = MLX5_GET64_PR(hca_vport_context, ctx, node_guid);
725 rep->cap_mask1 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask1);
726 rep->cap_mask1_perm = MLX5_GET_PR(hca_vport_context, ctx,
727 cap_mask1_field_select);
728 rep->cap_mask2 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask2);
729 rep->cap_mask2_perm = MLX5_GET_PR(hca_vport_context, ctx,
730 cap_mask2_field_select);
731 rep->lid = MLX5_GET_PR(hca_vport_context, ctx, lid);
732 rep->init_type_reply = MLX5_GET_PR(hca_vport_context, ctx,
733 init_type_reply);
734 rep->lmc = MLX5_GET_PR(hca_vport_context, ctx, lmc);
735 rep->subnet_timeout = MLX5_GET_PR(hca_vport_context, ctx,
736 subnet_timeout);
737 rep->sm_lid = MLX5_GET_PR(hca_vport_context, ctx, sm_lid);
738 rep->sm_sl = MLX5_GET_PR(hca_vport_context, ctx, sm_sl);
739 rep->qkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
740 qkey_violation_counter);
741 rep->pkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
742 pkey_violation_counter);
743 rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required);
744 rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx,
745 system_image_guid);
746
747ex:
748 kfree(out);
749 return err;
750}
751EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context);
752
753int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
754 u64 *sys_image_guid)
755{
756 struct mlx5_hca_vport_context *rep;
757 int err;
758
759 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
760 if (!rep)
761 return -ENOMEM;
762
763 err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
764 if (!err)
765 *sys_image_guid = rep->sys_image_guid;
766
767 kfree(rep);
768 return err;
769}
770EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
771
772int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
773 u64 *node_guid)
774{
775 struct mlx5_hca_vport_context *rep;
776 int err;
777
778 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
779 if (!rep)
780 return -ENOMEM;
781
782 err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
783 if (!err)
784 *node_guid = rep->node_guid;
785
786 kfree(rep);
787 return err;
788}
789EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
790
791int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
792 u32 vport,
793 int *promisc_uc,
794 int *promisc_mc,
795 int *promisc_all)
796{
797 u32 *out;
798 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
799 int err;
800
801 out = kzalloc(outlen, GFP_KERNEL);
802 if (!out)
803 return -ENOMEM;
804
805 err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
806 if (err)
807 goto out;
808
809 *promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
810 nic_vport_context.promisc_uc);
811 *promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
812 nic_vport_context.promisc_mc);
813 *promisc_all = MLX5_GET(query_nic_vport_context_out, out,
814 nic_vport_context.promisc_all);
815
816out:
817 kfree(out);
818 return err;
819}
820EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
821
822int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
823 int promisc_uc,
824 int promisc_mc,
825 int promisc_all)
826{
827 void *in;
828 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
829 int err;
830
831 in = mlx5_vzalloc(inlen);
832 if (!in) {
833 mlx5_core_err(mdev, "failed to allocate inbox\n");
834 return -ENOMEM;
835 }
836
837 MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
838 MLX5_SET(modify_nic_vport_context_in, in,
839 nic_vport_context.promisc_uc, promisc_uc);
840 MLX5_SET(modify_nic_vport_context_in, in,
841 nic_vport_context.promisc_mc, promisc_mc);
842 MLX5_SET(modify_nic_vport_context_in, in,
843 nic_vport_context.promisc_all, promisc_all);
844
845 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
846
847 kvfree(in);
848
849 return err;
850}
851EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
852
853enum mlx5_vport_roce_state {
854 MLX5_VPORT_ROCE_DISABLED = 0,
855 MLX5_VPORT_ROCE_ENABLED = 1,
856};
857
858static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
859 enum mlx5_vport_roce_state state)
860{
861 void *in;
862 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
863 int err;
864
865 in = mlx5_vzalloc(inlen);
866 if (!in) {
867 mlx5_core_warn(mdev, "failed to allocate inbox\n");
868 return -ENOMEM;
869 }
870
871 MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
872 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
873 state);
874
875 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
876
877 kvfree(in);
878
879 return err;
880}
881
882int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
883{
884 return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
885}
886EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
887
888int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
889{
890 return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
891}
892EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
893
894int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
895 int vf, u8 port_num, void *out,
896 size_t out_sz)
897{
898 int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
899 int is_group_manager;
900 void *in;
901 int err;
902
903 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
904 in = mlx5_vzalloc(in_sz);
905 if (!in) {
906 err = -ENOMEM;
907 return err;
908 }
909
910 MLX5_SET(query_vport_counter_in, in, opcode,
911 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
912 if (other_vport) {
913 if (is_group_manager) {
914 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
915 MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1);
916 } else {
917 err = -EPERM;
918 goto free;
919 }
920 }
921 if (MLX5_CAP_GEN(dev, num_ports) == 2)
922 MLX5_SET(query_vport_counter_in, in, port_num, port_num);
923
924 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
925 if (err)
926 goto free;
927 err = mlx5_cmd_status_to_err_v2(out);
928
929free:
930 kvfree(in);
931 return err;
932}
933EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
934
935int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
936 u8 other_vport, u8 port_num,
937 int vf,
938 struct mlx5_hca_vport_context *req)
939{
940 int in_sz = MLX5_ST_SZ_BYTES(modify_hca_vport_context_in);
941 u8 out[MLX5_ST_SZ_BYTES(modify_hca_vport_context_out)];
942 int is_group_manager;
943 void *in;
944 int err;
945 void *ctx;
946
947 mlx5_core_dbg(dev, "vf %d\n", vf);
948 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
949 in = kzalloc(in_sz, GFP_KERNEL);
950 if (!in)
951 return -ENOMEM;
952
953 memset(out, 0, sizeof(out));
954 MLX5_SET(modify_hca_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT);
955 if (other_vport) {
956 if (is_group_manager) {
957 MLX5_SET(modify_hca_vport_context_in, in, other_vport, 1);
958 MLX5_SET(modify_hca_vport_context_in, in, vport_number, vf);
959 } else {
960 err = -EPERM;
961 goto ex;
962 }
963 }
964
965 if (MLX5_CAP_GEN(dev, num_ports) > 1)
966 MLX5_SET(modify_hca_vport_context_in, in, port_num, port_num);
967
968 ctx = MLX5_ADDR_OF(modify_hca_vport_context_in, in, hca_vport_context);
969 MLX5_SET(hca_vport_context, ctx, field_select, req->field_select);
970 MLX5_SET(hca_vport_context, ctx, sm_virt_aware, req->sm_virt_aware);
971 MLX5_SET(hca_vport_context, ctx, has_smi, req->has_smi);
972 MLX5_SET(hca_vport_context, ctx, has_raw, req->has_raw);
973 MLX5_SET(hca_vport_context, ctx, vport_state_policy, req->policy);
974 MLX5_SET(hca_vport_context, ctx, port_physical_state, req->phys_state);
975 MLX5_SET(hca_vport_context, ctx, vport_state, req->vport_state);
976 MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid);
977 MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
978 MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
979 MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select, req->cap_mask1_perm);
980 MLX5_SET(hca_vport_context, ctx, cap_mask2, req->cap_mask2);
981 MLX5_SET(hca_vport_context, ctx, cap_mask2_field_select, req->cap_mask2_perm);
982 MLX5_SET(hca_vport_context, ctx, lid, req->lid);
983 MLX5_SET(hca_vport_context, ctx, init_type_reply, req->init_type_reply);
984 MLX5_SET(hca_vport_context, ctx, lmc, req->lmc);
985 MLX5_SET(hca_vport_context, ctx, subnet_timeout, req->subnet_timeout);
986 MLX5_SET(hca_vport_context, ctx, sm_lid, req->sm_lid);
987 MLX5_SET(hca_vport_context, ctx, sm_sl, req->sm_sl);
988 MLX5_SET(hca_vport_context, ctx, qkey_violation_counter, req->qkey_violation_counter);
989 MLX5_SET(hca_vport_context, ctx, pkey_violation_counter, req->pkey_violation_counter);
990 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
991 if (err)
992 goto ex;
993
994 err = mlx5_cmd_status_to_err_v2(out);
995
996ex:
997 kfree(in);
998 return err;
999}
1000EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context);
1001