1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53#include <linux/module.h>
54#include <linux/xarray.h>
55#include <rdma/ib_addr.h>
56#include <rdma/ib_verbs.h>
57#include <rdma/opa_smi.h>
58#include <rdma/opa_port_info.h>
59
60#include "opa_vnic_internal.h"
61
62#define DRV_VERSION "1.0"
63char opa_vnic_driver_name[] = "opa_vnic";
64const char opa_vnic_driver_version[] = DRV_VERSION;
65
66
67
68
69
70#define GET_TRAP_SL_FROM_CLASS_PORT_INFO(x) (((x) >> 3) & 0x1f)
71
72
73#define OPA_VNIC_TRAP_BURST_LIMIT 4
74
75
76
77
78
79
80
81#define OPA_VNIC_TRAP_TIMEOUT ((4096 * (1UL << 18)) / 1000)
82
83#define OPA_VNIC_UNSUP_ATTR \
84 cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB)
85
86#define OPA_VNIC_INVAL_ATTR \
87 cpu_to_be16(IB_MGMT_MAD_STATUS_INVALID_ATTRIB_VALUE)
88
89#define OPA_VNIC_CLASS_CAP_TRAP 0x1
90
91
92#define OPA_VNIC_MAX_NUM_VPORT 255
93
94
95
96
97
98
99
100
101
102
103
104
105struct opa_vnic_vema_port {
106 struct opa_vnic_ctrl_port *cport;
107 struct ib_mad_agent *mad_agent;
108 struct opa_class_port_info class_port_info;
109 u64 tid;
110 u8 port_num;
111 struct xarray vports;
112 struct ib_event_handler event_handler;
113
114
115 struct mutex lock;
116};
117
118static void opa_vnic_vema_add_one(struct ib_device *device);
119static void opa_vnic_vema_rem_one(struct ib_device *device,
120 void *client_data);
121
122static struct ib_client opa_vnic_client = {
123 .name = opa_vnic_driver_name,
124 .add = opa_vnic_vema_add_one,
125 .remove = opa_vnic_vema_rem_one,
126};
127
128
129
130
131
132
133
134static inline u8 vema_get_vport_num(struct opa_vnic_vema_mad *recvd_mad)
135{
136 return be32_to_cpu(recvd_mad->mad_hdr.attr_mod) & 0xff;
137}
138
139
140
141
142
143
144
145
146static inline struct opa_vnic_adapter *
147vema_get_vport_adapter(struct opa_vnic_vema_mad *recvd_mad,
148 struct opa_vnic_vema_port *port)
149{
150 u8 vport_num = vema_get_vport_num(recvd_mad);
151
152 return xa_load(&port->vports, vport_num);
153}
154
155
156
157
158
159
160
161
162
163
164static inline bool vema_mac_tbl_req_ok(struct opa_veswport_mactable *mac_tbl)
165{
166 u16 offset, num_entries;
167 u16 req_entries = ((OPA_VNIC_EMA_DATA - sizeof(*mac_tbl)) /
168 sizeof(mac_tbl->tbl_entries[0]));
169
170 offset = be16_to_cpu(mac_tbl->offset);
171 num_entries = be16_to_cpu(mac_tbl->num_entries);
172
173 return ((num_entries <= req_entries) &&
174 (offset + num_entries <= OPA_VNIC_MAC_TBL_MAX_ENTRIES));
175}
176
177
178
179
180
181static inline void vema_get_pod_values(struct opa_veswport_info *port_info)
182{
183 memset(port_info, 0, sizeof(*port_info));
184 port_info->vport.max_mac_tbl_ent =
185 cpu_to_be16(OPA_VNIC_MAC_TBL_MAX_ENTRIES);
186 port_info->vport.max_smac_ent =
187 cpu_to_be16(OPA_VNIC_MAX_SMAC_LIMIT);
188 port_info->vport.oper_state = OPA_VNIC_STATE_DROP_ALL;
189 port_info->vport.config_state = OPA_VNIC_STATE_DROP_ALL;
190 port_info->vesw.eth_mtu = cpu_to_be16(ETH_DATA_LEN);
191}
192
193
194
195
196
197
198
199
200static struct opa_vnic_adapter *vema_add_vport(struct opa_vnic_vema_port *port,
201 u8 vport_num)
202{
203 struct opa_vnic_ctrl_port *cport = port->cport;
204 struct opa_vnic_adapter *adapter;
205
206 adapter = opa_vnic_add_netdev(cport->ibdev, port->port_num, vport_num);
207 if (!IS_ERR(adapter)) {
208 int rc;
209
210 adapter->cport = cport;
211 rc = xa_insert(&port->vports, vport_num, adapter, GFP_KERNEL);
212 if (rc < 0) {
213 opa_vnic_rem_netdev(adapter);
214 adapter = ERR_PTR(rc);
215 }
216 }
217
218 return adapter;
219}
220
221
222
223
224
225
226
227
228
229
230static void vema_get_class_port_info(struct opa_vnic_vema_port *port,
231 struct opa_vnic_vema_mad *recvd_mad,
232 struct opa_vnic_vema_mad *rsp_mad)
233{
234 struct opa_class_port_info *port_info;
235
236 port_info = (struct opa_class_port_info *)rsp_mad->data;
237 memcpy(port_info, &port->class_port_info, sizeof(*port_info));
238 port_info->base_version = OPA_MGMT_BASE_VERSION,
239 port_info->class_version = OPA_EMA_CLASS_VERSION;
240
241
242
243
244
245 port_info->cap_mask = cpu_to_be16((OPA_VNIC_CLASS_CAP_TRAP |
246 (OPA_VNIC_MAX_NUM_VPORT << 8)));
247
248
249
250
251
252
253 port_info->cap_mask2_resp_time = cpu_to_be32(18);
254}
255
256
257
258
259
260
261
262
263
264
265static void vema_set_class_port_info(struct opa_vnic_vema_port *port,
266 struct opa_vnic_vema_mad *recvd_mad,
267 struct opa_vnic_vema_mad *rsp_mad)
268{
269 memcpy(&port->class_port_info, recvd_mad->data,
270 sizeof(port->class_port_info));
271
272 vema_get_class_port_info(port, recvd_mad, rsp_mad);
273}
274
275
276
277
278
279
280
281static void vema_get_veswport_info(struct opa_vnic_vema_port *port,
282 struct opa_vnic_vema_mad *recvd_mad,
283 struct opa_vnic_vema_mad *rsp_mad)
284{
285 struct opa_veswport_info *port_info =
286 (struct opa_veswport_info *)rsp_mad->data;
287 struct opa_vnic_adapter *adapter;
288
289 adapter = vema_get_vport_adapter(recvd_mad, port);
290 if (adapter) {
291 memset(port_info, 0, sizeof(*port_info));
292 opa_vnic_get_vesw_info(adapter, &port_info->vesw);
293 opa_vnic_get_per_veswport_info(adapter,
294 &port_info->vport);
295 } else {
296 vema_get_pod_values(port_info);
297 }
298}
299
300
301
302
303
304
305
306
307
308static void vema_set_veswport_info(struct opa_vnic_vema_port *port,
309 struct opa_vnic_vema_mad *recvd_mad,
310 struct opa_vnic_vema_mad *rsp_mad)
311{
312 struct opa_vnic_ctrl_port *cport = port->cport;
313 struct opa_veswport_info *port_info;
314 struct opa_vnic_adapter *adapter;
315 u8 vport_num;
316
317 vport_num = vema_get_vport_num(recvd_mad);
318
319 adapter = vema_get_vport_adapter(recvd_mad, port);
320 if (!adapter) {
321 adapter = vema_add_vport(port, vport_num);
322 if (IS_ERR(adapter)) {
323 c_err("failed to add vport %d: %ld\n",
324 vport_num, PTR_ERR(adapter));
325 goto err_exit;
326 }
327 }
328
329 port_info = (struct opa_veswport_info *)recvd_mad->data;
330 opa_vnic_set_vesw_info(adapter, &port_info->vesw);
331 opa_vnic_set_per_veswport_info(adapter, &port_info->vport);
332
333
334 opa_vnic_process_vema_config(adapter);
335
336 vema_get_veswport_info(port, recvd_mad, rsp_mad);
337 return;
338
339err_exit:
340 rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
341}
342
343
344
345
346
347
348
349
350
351
352
353
354static void vema_get_mac_entries(struct opa_vnic_vema_port *port,
355 struct opa_vnic_vema_mad *recvd_mad,
356 struct opa_vnic_vema_mad *rsp_mad)
357{
358 struct opa_veswport_mactable *mac_tbl_in, *mac_tbl_out;
359 struct opa_vnic_adapter *adapter;
360
361 adapter = vema_get_vport_adapter(recvd_mad, port);
362 if (!adapter) {
363 rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
364 return;
365 }
366
367 mac_tbl_in = (struct opa_veswport_mactable *)recvd_mad->data;
368 mac_tbl_out = (struct opa_veswport_mactable *)rsp_mad->data;
369
370 if (vema_mac_tbl_req_ok(mac_tbl_in)) {
371 mac_tbl_out->offset = mac_tbl_in->offset;
372 mac_tbl_out->num_entries = mac_tbl_in->num_entries;
373 opa_vnic_query_mac_tbl(adapter, mac_tbl_out);
374 } else {
375 rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
376 }
377}
378
379
380
381
382
383
384
385
386
387
388
389static void vema_set_mac_entries(struct opa_vnic_vema_port *port,
390 struct opa_vnic_vema_mad *recvd_mad,
391 struct opa_vnic_vema_mad *rsp_mad)
392{
393 struct opa_veswport_mactable *mac_tbl;
394 struct opa_vnic_adapter *adapter;
395
396 adapter = vema_get_vport_adapter(recvd_mad, port);
397 if (!adapter) {
398 rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
399 return;
400 }
401
402 mac_tbl = (struct opa_veswport_mactable *)recvd_mad->data;
403 if (vema_mac_tbl_req_ok(mac_tbl)) {
404 if (opa_vnic_update_mac_tbl(adapter, mac_tbl))
405 rsp_mad->mad_hdr.status = OPA_VNIC_UNSUP_ATTR;
406 } else {
407 rsp_mad->mad_hdr.status = OPA_VNIC_UNSUP_ATTR;
408 }
409 vema_get_mac_entries(port, recvd_mad, rsp_mad);
410}
411
412
413
414
415
416
417
418
419
420
421
422static void vema_set_delete_vesw(struct opa_vnic_vema_port *port,
423 struct opa_vnic_vema_mad *recvd_mad,
424 struct opa_vnic_vema_mad *rsp_mad)
425{
426 struct opa_veswport_info *port_info =
427 (struct opa_veswport_info *)rsp_mad->data;
428 struct opa_vnic_adapter *adapter;
429
430 adapter = vema_get_vport_adapter(recvd_mad, port);
431 if (!adapter) {
432 rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
433 return;
434 }
435
436 vema_get_pod_values(port_info);
437 opa_vnic_set_vesw_info(adapter, &port_info->vesw);
438 opa_vnic_set_per_veswport_info(adapter, &port_info->vport);
439
440
441 opa_vnic_process_vema_config(adapter);
442
443 opa_vnic_release_mac_tbl(adapter);
444
445 vema_get_veswport_info(port, recvd_mad, rsp_mad);
446}
447
448
449
450
451
452
453
454
455static void vema_get_mac_list(struct opa_vnic_vema_port *port,
456 struct opa_vnic_vema_mad *recvd_mad,
457 struct opa_vnic_vema_mad *rsp_mad,
458 u16 attr_id)
459{
460 struct opa_veswport_iface_macs *macs_in, *macs_out;
461 int max_entries = (OPA_VNIC_EMA_DATA - sizeof(*macs_out)) / ETH_ALEN;
462 struct opa_vnic_adapter *adapter;
463
464 adapter = vema_get_vport_adapter(recvd_mad, port);
465 if (!adapter) {
466 rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
467 return;
468 }
469
470 macs_in = (struct opa_veswport_iface_macs *)recvd_mad->data;
471 macs_out = (struct opa_veswport_iface_macs *)rsp_mad->data;
472
473 macs_out->start_idx = macs_in->start_idx;
474 if (macs_in->num_macs_in_msg)
475 macs_out->num_macs_in_msg = macs_in->num_macs_in_msg;
476 else
477 macs_out->num_macs_in_msg = cpu_to_be16(max_entries);
478
479 if (attr_id == OPA_EM_ATTR_IFACE_MCAST_MACS)
480 opa_vnic_query_mcast_macs(adapter, macs_out);
481 else
482 opa_vnic_query_ucast_macs(adapter, macs_out);
483}
484
485
486
487
488
489
490
491static void vema_get_summary_counters(struct opa_vnic_vema_port *port,
492 struct opa_vnic_vema_mad *recvd_mad,
493 struct opa_vnic_vema_mad *rsp_mad)
494{
495 struct opa_veswport_summary_counters *cntrs;
496 struct opa_vnic_adapter *adapter;
497
498 adapter = vema_get_vport_adapter(recvd_mad, port);
499 if (adapter) {
500 cntrs = (struct opa_veswport_summary_counters *)rsp_mad->data;
501 opa_vnic_get_summary_counters(adapter, cntrs);
502 } else {
503 rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
504 }
505}
506
507
508
509
510
511
512
513static void vema_get_error_counters(struct opa_vnic_vema_port *port,
514 struct opa_vnic_vema_mad *recvd_mad,
515 struct opa_vnic_vema_mad *rsp_mad)
516{
517 struct opa_veswport_error_counters *cntrs;
518 struct opa_vnic_adapter *adapter;
519
520 adapter = vema_get_vport_adapter(recvd_mad, port);
521 if (adapter) {
522 cntrs = (struct opa_veswport_error_counters *)rsp_mad->data;
523 opa_vnic_get_error_counters(adapter, cntrs);
524 } else {
525 rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
526 }
527}
528
529
530
531
532
533
534
535static void vema_get(struct opa_vnic_vema_port *port,
536 struct opa_vnic_vema_mad *recvd_mad,
537 struct opa_vnic_vema_mad *rsp_mad)
538{
539 u16 attr_id = be16_to_cpu(recvd_mad->mad_hdr.attr_id);
540
541 switch (attr_id) {
542 case OPA_EM_ATTR_CLASS_PORT_INFO:
543 vema_get_class_port_info(port, recvd_mad, rsp_mad);
544 break;
545 case OPA_EM_ATTR_VESWPORT_INFO:
546 vema_get_veswport_info(port, recvd_mad, rsp_mad);
547 break;
548 case OPA_EM_ATTR_VESWPORT_MAC_ENTRIES:
549 vema_get_mac_entries(port, recvd_mad, rsp_mad);
550 break;
551 case OPA_EM_ATTR_IFACE_UCAST_MACS:
552
553 case OPA_EM_ATTR_IFACE_MCAST_MACS:
554 vema_get_mac_list(port, recvd_mad, rsp_mad, attr_id);
555 break;
556 case OPA_EM_ATTR_VESWPORT_SUMMARY_COUNTERS:
557 vema_get_summary_counters(port, recvd_mad, rsp_mad);
558 break;
559 case OPA_EM_ATTR_VESWPORT_ERROR_COUNTERS:
560 vema_get_error_counters(port, recvd_mad, rsp_mad);
561 break;
562 default:
563 rsp_mad->mad_hdr.status = OPA_VNIC_UNSUP_ATTR;
564 break;
565 }
566}
567
568
569
570
571
572
573
574static void vema_set(struct opa_vnic_vema_port *port,
575 struct opa_vnic_vema_mad *recvd_mad,
576 struct opa_vnic_vema_mad *rsp_mad)
577{
578 u16 attr_id = be16_to_cpu(recvd_mad->mad_hdr.attr_id);
579
580 switch (attr_id) {
581 case OPA_EM_ATTR_CLASS_PORT_INFO:
582 vema_set_class_port_info(port, recvd_mad, rsp_mad);
583 break;
584 case OPA_EM_ATTR_VESWPORT_INFO:
585 vema_set_veswport_info(port, recvd_mad, rsp_mad);
586 break;
587 case OPA_EM_ATTR_VESWPORT_MAC_ENTRIES:
588 vema_set_mac_entries(port, recvd_mad, rsp_mad);
589 break;
590 case OPA_EM_ATTR_DELETE_VESW:
591 vema_set_delete_vesw(port, recvd_mad, rsp_mad);
592 break;
593 default:
594 rsp_mad->mad_hdr.status = OPA_VNIC_UNSUP_ATTR;
595 break;
596 }
597}
598
599
600
601
602
603
604
605
606static void vema_send(struct ib_mad_agent *mad_agent,
607 struct ib_mad_send_wc *mad_wc)
608{
609 rdma_destroy_ah(mad_wc->send_buf->ah, RDMA_DESTROY_AH_SLEEPABLE);
610 ib_free_send_mad(mad_wc->send_buf);
611}
612
613
614
615
616
617
618
619
620
621
622
623static void vema_recv(struct ib_mad_agent *mad_agent,
624 struct ib_mad_send_buf *send_buf,
625 struct ib_mad_recv_wc *mad_wc)
626{
627 struct opa_vnic_vema_port *port;
628 struct ib_ah *ah;
629 struct ib_mad_send_buf *rsp;
630 struct opa_vnic_vema_mad *vema_mad;
631
632 if (!mad_wc || !mad_wc->recv_buf.mad)
633 return;
634
635 port = mad_agent->context;
636 ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc,
637 mad_wc->recv_buf.grh, mad_agent->port_num);
638 if (IS_ERR(ah))
639 goto free_recv_mad;
640
641 rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp,
642 mad_wc->wc->pkey_index, 0,
643 IB_MGMT_VENDOR_HDR, OPA_VNIC_EMA_DATA,
644 GFP_KERNEL, OPA_MGMT_BASE_VERSION);
645 if (IS_ERR(rsp))
646 goto err_rsp;
647
648 rsp->ah = ah;
649 vema_mad = rsp->mad;
650 memcpy(vema_mad, mad_wc->recv_buf.mad, IB_MGMT_VENDOR_HDR);
651 vema_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
652 vema_mad->mad_hdr.status = 0;
653
654
655 mutex_lock(&port->lock);
656
657 switch (mad_wc->recv_buf.mad->mad_hdr.method) {
658 case IB_MGMT_METHOD_GET:
659 vema_get(port, (struct opa_vnic_vema_mad *)mad_wc->recv_buf.mad,
660 vema_mad);
661 break;
662 case IB_MGMT_METHOD_SET:
663 vema_set(port, (struct opa_vnic_vema_mad *)mad_wc->recv_buf.mad,
664 vema_mad);
665 break;
666 default:
667 vema_mad->mad_hdr.status = OPA_VNIC_UNSUP_ATTR;
668 break;
669 }
670 mutex_unlock(&port->lock);
671
672 if (!ib_post_send_mad(rsp, NULL)) {
673
674
675
676
677 goto free_recv_mad;
678 }
679
680 ib_free_send_mad(rsp);
681
682err_rsp:
683 rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE);
684free_recv_mad:
685 ib_free_recv_mad(mad_wc);
686}
687
688
689
690
691
692
693
694
695
696
697
698
699
700static struct opa_vnic_vema_port *
701vema_get_port(struct opa_vnic_ctrl_port *cport, u8 port_num)
702{
703 struct opa_vnic_vema_port *port = (void *)cport + sizeof(*cport);
704
705 if (port_num > cport->num_ports)
706 return NULL;
707
708 return port + (port_num - 1);
709}
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727void opa_vnic_vema_send_trap(struct opa_vnic_adapter *adapter,
728 struct __opa_veswport_trap *data, u32 lid)
729{
730 struct opa_vnic_ctrl_port *cport = adapter->cport;
731 struct ib_mad_send_buf *send_buf;
732 struct opa_vnic_vema_port *port;
733 struct ib_device *ibp;
734 struct opa_vnic_vema_mad_trap *trap_mad;
735 struct opa_class_port_info *class;
736 struct rdma_ah_attr ah_attr;
737 struct ib_ah *ah;
738 struct opa_veswport_trap *trap;
739 u32 trap_lid;
740 u16 pkey_idx;
741
742 if (!cport)
743 goto err_exit;
744 ibp = cport->ibdev;
745 port = vema_get_port(cport, data->opaportnum);
746 if (!port || !port->mad_agent)
747 goto err_exit;
748
749 if (time_before(jiffies, adapter->trap_timeout)) {
750 if (adapter->trap_count == OPA_VNIC_TRAP_BURST_LIMIT) {
751 v_warn("Trap rate exceeded\n");
752 goto err_exit;
753 } else {
754 adapter->trap_count++;
755 }
756 } else {
757 adapter->trap_count = 0;
758 }
759
760 class = &port->class_port_info;
761
762 memset(&ah_attr, 0, sizeof(ah_attr));
763 ah_attr.type = rdma_ah_find_type(ibp, port->port_num);
764 rdma_ah_set_sl(&ah_attr,
765 GET_TRAP_SL_FROM_CLASS_PORT_INFO(class->trap_sl_rsvd));
766 rdma_ah_set_port_num(&ah_attr, port->port_num);
767 trap_lid = be32_to_cpu(class->trap_lid);
768
769
770
771
772
773
774 if (!trap_lid) {
775 c_err("%s: Invalid dlid\n", __func__);
776 goto err_exit;
777 }
778
779 rdma_ah_set_dlid(&ah_attr, trap_lid);
780 ah = rdma_create_ah(port->mad_agent->qp->pd, &ah_attr, 0);
781 if (IS_ERR(ah)) {
782 c_err("%s:Couldn't create new AH = %p\n", __func__, ah);
783 c_err("%s:dlid = %d, sl = %d, port = %d\n", __func__,
784 rdma_ah_get_dlid(&ah_attr), rdma_ah_get_sl(&ah_attr),
785 rdma_ah_get_port_num(&ah_attr));
786 goto err_exit;
787 }
788
789 if (ib_find_pkey(ibp, data->opaportnum, IB_DEFAULT_PKEY_FULL,
790 &pkey_idx) < 0) {
791 c_err("%s:full key not found, defaulting to partial\n",
792 __func__);
793 if (ib_find_pkey(ibp, data->opaportnum, IB_DEFAULT_PKEY_PARTIAL,
794 &pkey_idx) < 0)
795 pkey_idx = 1;
796 }
797
798 send_buf = ib_create_send_mad(port->mad_agent, 1, pkey_idx, 0,
799 IB_MGMT_VENDOR_HDR, IB_MGMT_MAD_DATA,
800 GFP_ATOMIC, OPA_MGMT_BASE_VERSION);
801 if (IS_ERR(send_buf)) {
802 c_err("%s:Couldn't allocate send buf\n", __func__);
803 goto err_sndbuf;
804 }
805
806 send_buf->ah = ah;
807
808
809 trap_mad = send_buf->mad;
810 trap_mad->mad_hdr.base_version = OPA_MGMT_BASE_VERSION;
811 trap_mad->mad_hdr.mgmt_class = OPA_MGMT_CLASS_INTEL_EMA;
812 trap_mad->mad_hdr.class_version = OPA_EMA_CLASS_VERSION;
813 trap_mad->mad_hdr.method = IB_MGMT_METHOD_TRAP;
814 port->tid++;
815 trap_mad->mad_hdr.tid = cpu_to_be64(port->tid);
816 trap_mad->mad_hdr.attr_id = IB_SMP_ATTR_NOTICE;
817
818
819 trap_mad->oui[0] = INTEL_OUI_1;
820 trap_mad->oui[1] = INTEL_OUI_2;
821 trap_mad->oui[2] = INTEL_OUI_3;
822
823
824 trap_mad->notice.gen_type = OPA_INTEL_EMA_NOTICE_TYPE_INFO << 1;
825 trap_mad->notice.oui_1 = INTEL_OUI_1;
826 trap_mad->notice.oui_2 = INTEL_OUI_2;
827 trap_mad->notice.oui_3 = INTEL_OUI_3;
828 trap_mad->notice.issuer_lid = cpu_to_be32(lid);
829
830
831 trap = (struct opa_veswport_trap *)trap_mad->notice.raw_data;
832 trap->fabric_id = cpu_to_be16(data->fabric_id);
833 trap->veswid = cpu_to_be16(data->veswid);
834 trap->veswportnum = cpu_to_be32(data->veswportnum);
835 trap->opaportnum = cpu_to_be16(data->opaportnum);
836 trap->veswportindex = data->veswportindex;
837 trap->opcode = data->opcode;
838
839
840 if (ib_post_send_mad(send_buf, NULL)) {
841 ib_free_send_mad(send_buf);
842 } else {
843 if (adapter->trap_count)
844 return;
845 adapter->trap_timeout = jiffies +
846 usecs_to_jiffies(OPA_VNIC_TRAP_TIMEOUT);
847 return;
848 }
849
850err_sndbuf:
851 rdma_destroy_ah(ah, 0);
852err_exit:
853 v_err("Aborting trap\n");
854}
855
856static void opa_vnic_event(struct ib_event_handler *handler,
857 struct ib_event *record)
858{
859 struct opa_vnic_vema_port *port =
860 container_of(handler, struct opa_vnic_vema_port, event_handler);
861 struct opa_vnic_ctrl_port *cport = port->cport;
862 struct opa_vnic_adapter *adapter;
863 unsigned long index;
864
865 if (record->element.port_num != port->port_num)
866 return;
867
868 c_dbg("OPA_VNIC received event %d on device %s port %d\n",
869 record->event, dev_name(&record->device->dev),
870 record->element.port_num);
871
872 if (record->event != IB_EVENT_PORT_ERR &&
873 record->event != IB_EVENT_PORT_ACTIVE)
874 return;
875
876 xa_for_each(&port->vports, index, adapter) {
877 if (record->event == IB_EVENT_PORT_ACTIVE)
878 netif_carrier_on(adapter->netdev);
879 else
880 netif_carrier_off(adapter->netdev);
881 }
882}
883
884
885
886
887
888
889
890static void vema_unregister(struct opa_vnic_ctrl_port *cport)
891{
892 struct opa_vnic_adapter *adapter;
893 unsigned long index;
894 int i;
895
896 for (i = 1; i <= cport->num_ports; i++) {
897 struct opa_vnic_vema_port *port = vema_get_port(cport, i);
898
899 if (!port->mad_agent)
900 continue;
901
902
903 mutex_lock(&port->lock);
904 xa_for_each(&port->vports, index, adapter)
905 opa_vnic_rem_netdev(adapter);
906 mutex_unlock(&port->lock);
907
908 ib_unregister_mad_agent(port->mad_agent);
909 port->mad_agent = NULL;
910 mutex_destroy(&port->lock);
911 xa_destroy(&port->vports);
912 ib_unregister_event_handler(&port->event_handler);
913 }
914}
915
916
917
918
919
920
921
922
923
924static int vema_register(struct opa_vnic_ctrl_port *cport)
925{
926 struct ib_mad_reg_req reg_req = {
927 .mgmt_class = OPA_MGMT_CLASS_INTEL_EMA,
928 .mgmt_class_version = OPA_MGMT_BASE_VERSION,
929 .oui = { INTEL_OUI_1, INTEL_OUI_2, INTEL_OUI_3 }
930 };
931 int i;
932
933 set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
934 set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
935
936
937 for (i = 1; i <= cport->num_ports; i++) {
938 struct opa_vnic_vema_port *port = vema_get_port(cport, i);
939 int ret;
940
941 port->cport = cport;
942 port->port_num = i;
943
944 INIT_IB_EVENT_HANDLER(&port->event_handler,
945 cport->ibdev, opa_vnic_event);
946 ib_register_event_handler(&port->event_handler);
947
948 xa_init(&port->vports);
949 mutex_init(&port->lock);
950 port->mad_agent = ib_register_mad_agent(cport->ibdev, i,
951 IB_QPT_GSI, ®_req,
952 IB_MGMT_RMPP_VERSION,
953 vema_send, vema_recv,
954 port, 0);
955 if (IS_ERR(port->mad_agent)) {
956 ret = PTR_ERR(port->mad_agent);
957 port->mad_agent = NULL;
958 mutex_destroy(&port->lock);
959 vema_unregister(cport);
960 return ret;
961 }
962 }
963
964 return 0;
965}
966
967
968
969
970
971
972
973
974static void opa_vnic_ctrl_config_dev(struct opa_vnic_ctrl_port *cport, bool en)
975{
976 struct ib_port_modify pm = { 0 };
977 int i;
978
979 if (en)
980 pm.set_port_cap_mask = OPA_CAP_MASK3_IsEthOnFabricSupported;
981 else
982 pm.clr_port_cap_mask = OPA_CAP_MASK3_IsEthOnFabricSupported;
983
984 for (i = 1; i <= cport->num_ports; i++)
985 ib_modify_port(cport->ibdev, i, IB_PORT_OPA_MASK_CHG, &pm);
986}
987
988
989
990
991
992
993
994static void opa_vnic_vema_add_one(struct ib_device *device)
995{
996 struct opa_vnic_ctrl_port *cport;
997 int rc, size = sizeof(*cport);
998
999 if (!rdma_cap_opa_vnic(device))
1000 return;
1001
1002 size += device->phys_port_cnt * sizeof(struct opa_vnic_vema_port);
1003 cport = kzalloc(size, GFP_KERNEL);
1004 if (!cport)
1005 return;
1006
1007 cport->num_ports = device->phys_port_cnt;
1008 cport->ibdev = device;
1009
1010
1011 rc = vema_register(cport);
1012 if (!rc)
1013 c_info("VNIC client initialized\n");
1014
1015 ib_set_client_data(device, &opa_vnic_client, cport);
1016 opa_vnic_ctrl_config_dev(cport, true);
1017}
1018
1019
1020
1021
1022
1023
1024
1025
1026static void opa_vnic_vema_rem_one(struct ib_device *device,
1027 void *client_data)
1028{
1029 struct opa_vnic_ctrl_port *cport = client_data;
1030
1031 if (!cport)
1032 return;
1033
1034 c_info("removing VNIC client\n");
1035 opa_vnic_ctrl_config_dev(cport, false);
1036 vema_unregister(cport);
1037 kfree(cport);
1038}
1039
1040static int __init opa_vnic_init(void)
1041{
1042 int rc;
1043
1044 pr_info("OPA Virtual Network Driver - v%s\n",
1045 opa_vnic_driver_version);
1046
1047 rc = ib_register_client(&opa_vnic_client);
1048 if (rc)
1049 pr_err("VNIC driver register failed %d\n", rc);
1050
1051 return rc;
1052}
1053module_init(opa_vnic_init);
1054
1055static void opa_vnic_deinit(void)
1056{
1057 ib_unregister_client(&opa_vnic_client);
1058}
1059module_exit(opa_vnic_deinit);
1060
1061MODULE_LICENSE("Dual BSD/GPL");
1062MODULE_AUTHOR("Intel Corporation");
1063MODULE_DESCRIPTION("Intel OPA Virtual Network driver");
1064