1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53#include <linux/module.h>
54#include <linux/xarray.h>
55#include <rdma/ib_addr.h>
56#include <rdma/ib_verbs.h>
57#include <rdma/opa_smi.h>
58#include <rdma/opa_port_info.h>
59
60#include "opa_vnic_internal.h"
61
62char opa_vnic_driver_name[] = "opa_vnic";
63
64
65
66
67
68#define GET_TRAP_SL_FROM_CLASS_PORT_INFO(x) (((x) >> 3) & 0x1f)
69
70
71#define OPA_VNIC_TRAP_BURST_LIMIT 4
72
73
74
75
76
77
78
79#define OPA_VNIC_TRAP_TIMEOUT ((4096 * (1UL << 18)) / 1000)
80
81#define OPA_VNIC_UNSUP_ATTR \
82 cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB)
83
84#define OPA_VNIC_INVAL_ATTR \
85 cpu_to_be16(IB_MGMT_MAD_STATUS_INVALID_ATTRIB_VALUE)
86
87#define OPA_VNIC_CLASS_CAP_TRAP 0x1
88
89
90#define OPA_VNIC_MAX_NUM_VPORT 255
91
92
93
94
95
96
97
98
99
100
101
102
103struct opa_vnic_vema_port {
104 struct opa_vnic_ctrl_port *cport;
105 struct ib_mad_agent *mad_agent;
106 struct opa_class_port_info class_port_info;
107 u64 tid;
108 u8 port_num;
109 struct xarray vports;
110 struct ib_event_handler event_handler;
111
112
113 struct mutex lock;
114};
115
116static int opa_vnic_vema_add_one(struct ib_device *device);
117static void opa_vnic_vema_rem_one(struct ib_device *device,
118 void *client_data);
119
120static struct ib_client opa_vnic_client = {
121 .name = opa_vnic_driver_name,
122 .add = opa_vnic_vema_add_one,
123 .remove = opa_vnic_vema_rem_one,
124};
125
126
127
128
129
130
131
132static inline u8 vema_get_vport_num(struct opa_vnic_vema_mad *recvd_mad)
133{
134 return be32_to_cpu(recvd_mad->mad_hdr.attr_mod) & 0xff;
135}
136
137
138
139
140
141
142
143
144static inline struct opa_vnic_adapter *
145vema_get_vport_adapter(struct opa_vnic_vema_mad *recvd_mad,
146 struct opa_vnic_vema_port *port)
147{
148 u8 vport_num = vema_get_vport_num(recvd_mad);
149
150 return xa_load(&port->vports, vport_num);
151}
152
153
154
155
156
157
158
159
160
161
162static inline bool vema_mac_tbl_req_ok(struct opa_veswport_mactable *mac_tbl)
163{
164 u16 offset, num_entries;
165 u16 req_entries = ((OPA_VNIC_EMA_DATA - sizeof(*mac_tbl)) /
166 sizeof(mac_tbl->tbl_entries[0]));
167
168 offset = be16_to_cpu(mac_tbl->offset);
169 num_entries = be16_to_cpu(mac_tbl->num_entries);
170
171 return ((num_entries <= req_entries) &&
172 (offset + num_entries <= OPA_VNIC_MAC_TBL_MAX_ENTRIES));
173}
174
175
176
177
178
179static inline void vema_get_pod_values(struct opa_veswport_info *port_info)
180{
181 memset(port_info, 0, sizeof(*port_info));
182 port_info->vport.max_mac_tbl_ent =
183 cpu_to_be16(OPA_VNIC_MAC_TBL_MAX_ENTRIES);
184 port_info->vport.max_smac_ent =
185 cpu_to_be16(OPA_VNIC_MAX_SMAC_LIMIT);
186 port_info->vport.oper_state = OPA_VNIC_STATE_DROP_ALL;
187 port_info->vport.config_state = OPA_VNIC_STATE_DROP_ALL;
188 port_info->vesw.eth_mtu = cpu_to_be16(ETH_DATA_LEN);
189}
190
191
192
193
194
195
196
197
198static struct opa_vnic_adapter *vema_add_vport(struct opa_vnic_vema_port *port,
199 u8 vport_num)
200{
201 struct opa_vnic_ctrl_port *cport = port->cport;
202 struct opa_vnic_adapter *adapter;
203
204 adapter = opa_vnic_add_netdev(cport->ibdev, port->port_num, vport_num);
205 if (!IS_ERR(adapter)) {
206 int rc;
207
208 adapter->cport = cport;
209 rc = xa_insert(&port->vports, vport_num, adapter, GFP_KERNEL);
210 if (rc < 0) {
211 opa_vnic_rem_netdev(adapter);
212 adapter = ERR_PTR(rc);
213 }
214 }
215
216 return adapter;
217}
218
219
220
221
222
223
224
225
226
227
228static void vema_get_class_port_info(struct opa_vnic_vema_port *port,
229 struct opa_vnic_vema_mad *recvd_mad,
230 struct opa_vnic_vema_mad *rsp_mad)
231{
232 struct opa_class_port_info *port_info;
233
234 port_info = (struct opa_class_port_info *)rsp_mad->data;
235 memcpy(port_info, &port->class_port_info, sizeof(*port_info));
236 port_info->base_version = OPA_MGMT_BASE_VERSION,
237 port_info->class_version = OPA_EMA_CLASS_VERSION;
238
239
240
241
242
243 port_info->cap_mask = cpu_to_be16((OPA_VNIC_CLASS_CAP_TRAP |
244 (OPA_VNIC_MAX_NUM_VPORT << 8)));
245
246
247
248
249
250
251 port_info->cap_mask2_resp_time = cpu_to_be32(18);
252}
253
254
255
256
257
258
259
260
261
262
263static void vema_set_class_port_info(struct opa_vnic_vema_port *port,
264 struct opa_vnic_vema_mad *recvd_mad,
265 struct opa_vnic_vema_mad *rsp_mad)
266{
267 memcpy(&port->class_port_info, recvd_mad->data,
268 sizeof(port->class_port_info));
269
270 vema_get_class_port_info(port, recvd_mad, rsp_mad);
271}
272
273
274
275
276
277
278
279static void vema_get_veswport_info(struct opa_vnic_vema_port *port,
280 struct opa_vnic_vema_mad *recvd_mad,
281 struct opa_vnic_vema_mad *rsp_mad)
282{
283 struct opa_veswport_info *port_info =
284 (struct opa_veswport_info *)rsp_mad->data;
285 struct opa_vnic_adapter *adapter;
286
287 adapter = vema_get_vport_adapter(recvd_mad, port);
288 if (adapter) {
289 memset(port_info, 0, sizeof(*port_info));
290 opa_vnic_get_vesw_info(adapter, &port_info->vesw);
291 opa_vnic_get_per_veswport_info(adapter,
292 &port_info->vport);
293 } else {
294 vema_get_pod_values(port_info);
295 }
296}
297
298
299
300
301
302
303
304
305
306static void vema_set_veswport_info(struct opa_vnic_vema_port *port,
307 struct opa_vnic_vema_mad *recvd_mad,
308 struct opa_vnic_vema_mad *rsp_mad)
309{
310 struct opa_vnic_ctrl_port *cport = port->cport;
311 struct opa_veswport_info *port_info;
312 struct opa_vnic_adapter *adapter;
313 u8 vport_num;
314
315 vport_num = vema_get_vport_num(recvd_mad);
316
317 adapter = vema_get_vport_adapter(recvd_mad, port);
318 if (!adapter) {
319 adapter = vema_add_vport(port, vport_num);
320 if (IS_ERR(adapter)) {
321 c_err("failed to add vport %d: %ld\n",
322 vport_num, PTR_ERR(adapter));
323 goto err_exit;
324 }
325 }
326
327 port_info = (struct opa_veswport_info *)recvd_mad->data;
328 opa_vnic_set_vesw_info(adapter, &port_info->vesw);
329 opa_vnic_set_per_veswport_info(adapter, &port_info->vport);
330
331
332 opa_vnic_process_vema_config(adapter);
333
334 vema_get_veswport_info(port, recvd_mad, rsp_mad);
335 return;
336
337err_exit:
338 rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
339}
340
341
342
343
344
345
346
347
348
349
350
351
352static void vema_get_mac_entries(struct opa_vnic_vema_port *port,
353 struct opa_vnic_vema_mad *recvd_mad,
354 struct opa_vnic_vema_mad *rsp_mad)
355{
356 struct opa_veswport_mactable *mac_tbl_in, *mac_tbl_out;
357 struct opa_vnic_adapter *adapter;
358
359 adapter = vema_get_vport_adapter(recvd_mad, port);
360 if (!adapter) {
361 rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
362 return;
363 }
364
365 mac_tbl_in = (struct opa_veswport_mactable *)recvd_mad->data;
366 mac_tbl_out = (struct opa_veswport_mactable *)rsp_mad->data;
367
368 if (vema_mac_tbl_req_ok(mac_tbl_in)) {
369 mac_tbl_out->offset = mac_tbl_in->offset;
370 mac_tbl_out->num_entries = mac_tbl_in->num_entries;
371 opa_vnic_query_mac_tbl(adapter, mac_tbl_out);
372 } else {
373 rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
374 }
375}
376
377
378
379
380
381
382
383
384
385
386
387static void vema_set_mac_entries(struct opa_vnic_vema_port *port,
388 struct opa_vnic_vema_mad *recvd_mad,
389 struct opa_vnic_vema_mad *rsp_mad)
390{
391 struct opa_veswport_mactable *mac_tbl;
392 struct opa_vnic_adapter *adapter;
393
394 adapter = vema_get_vport_adapter(recvd_mad, port);
395 if (!adapter) {
396 rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
397 return;
398 }
399
400 mac_tbl = (struct opa_veswport_mactable *)recvd_mad->data;
401 if (vema_mac_tbl_req_ok(mac_tbl)) {
402 if (opa_vnic_update_mac_tbl(adapter, mac_tbl))
403 rsp_mad->mad_hdr.status = OPA_VNIC_UNSUP_ATTR;
404 } else {
405 rsp_mad->mad_hdr.status = OPA_VNIC_UNSUP_ATTR;
406 }
407 vema_get_mac_entries(port, recvd_mad, rsp_mad);
408}
409
410
411
412
413
414
415
416
417
418
419
420static void vema_set_delete_vesw(struct opa_vnic_vema_port *port,
421 struct opa_vnic_vema_mad *recvd_mad,
422 struct opa_vnic_vema_mad *rsp_mad)
423{
424 struct opa_veswport_info *port_info =
425 (struct opa_veswport_info *)rsp_mad->data;
426 struct opa_vnic_adapter *adapter;
427
428 adapter = vema_get_vport_adapter(recvd_mad, port);
429 if (!adapter) {
430 rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
431 return;
432 }
433
434 vema_get_pod_values(port_info);
435 opa_vnic_set_vesw_info(adapter, &port_info->vesw);
436 opa_vnic_set_per_veswport_info(adapter, &port_info->vport);
437
438
439 opa_vnic_process_vema_config(adapter);
440
441 opa_vnic_release_mac_tbl(adapter);
442
443 vema_get_veswport_info(port, recvd_mad, rsp_mad);
444}
445
446
447
448
449
450
451
452
453static void vema_get_mac_list(struct opa_vnic_vema_port *port,
454 struct opa_vnic_vema_mad *recvd_mad,
455 struct opa_vnic_vema_mad *rsp_mad,
456 u16 attr_id)
457{
458 struct opa_veswport_iface_macs *macs_in, *macs_out;
459 int max_entries = (OPA_VNIC_EMA_DATA - sizeof(*macs_out)) / ETH_ALEN;
460 struct opa_vnic_adapter *adapter;
461
462 adapter = vema_get_vport_adapter(recvd_mad, port);
463 if (!adapter) {
464 rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
465 return;
466 }
467
468 macs_in = (struct opa_veswport_iface_macs *)recvd_mad->data;
469 macs_out = (struct opa_veswport_iface_macs *)rsp_mad->data;
470
471 macs_out->start_idx = macs_in->start_idx;
472 if (macs_in->num_macs_in_msg)
473 macs_out->num_macs_in_msg = macs_in->num_macs_in_msg;
474 else
475 macs_out->num_macs_in_msg = cpu_to_be16(max_entries);
476
477 if (attr_id == OPA_EM_ATTR_IFACE_MCAST_MACS)
478 opa_vnic_query_mcast_macs(adapter, macs_out);
479 else
480 opa_vnic_query_ucast_macs(adapter, macs_out);
481}
482
483
484
485
486
487
488
489static void vema_get_summary_counters(struct opa_vnic_vema_port *port,
490 struct opa_vnic_vema_mad *recvd_mad,
491 struct opa_vnic_vema_mad *rsp_mad)
492{
493 struct opa_veswport_summary_counters *cntrs;
494 struct opa_vnic_adapter *adapter;
495
496 adapter = vema_get_vport_adapter(recvd_mad, port);
497 if (adapter) {
498 cntrs = (struct opa_veswport_summary_counters *)rsp_mad->data;
499 opa_vnic_get_summary_counters(adapter, cntrs);
500 } else {
501 rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
502 }
503}
504
505
506
507
508
509
510
511static void vema_get_error_counters(struct opa_vnic_vema_port *port,
512 struct opa_vnic_vema_mad *recvd_mad,
513 struct opa_vnic_vema_mad *rsp_mad)
514{
515 struct opa_veswport_error_counters *cntrs;
516 struct opa_vnic_adapter *adapter;
517
518 adapter = vema_get_vport_adapter(recvd_mad, port);
519 if (adapter) {
520 cntrs = (struct opa_veswport_error_counters *)rsp_mad->data;
521 opa_vnic_get_error_counters(adapter, cntrs);
522 } else {
523 rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
524 }
525}
526
527
528
529
530
531
532
533static void vema_get(struct opa_vnic_vema_port *port,
534 struct opa_vnic_vema_mad *recvd_mad,
535 struct opa_vnic_vema_mad *rsp_mad)
536{
537 u16 attr_id = be16_to_cpu(recvd_mad->mad_hdr.attr_id);
538
539 switch (attr_id) {
540 case OPA_EM_ATTR_CLASS_PORT_INFO:
541 vema_get_class_port_info(port, recvd_mad, rsp_mad);
542 break;
543 case OPA_EM_ATTR_VESWPORT_INFO:
544 vema_get_veswport_info(port, recvd_mad, rsp_mad);
545 break;
546 case OPA_EM_ATTR_VESWPORT_MAC_ENTRIES:
547 vema_get_mac_entries(port, recvd_mad, rsp_mad);
548 break;
549 case OPA_EM_ATTR_IFACE_UCAST_MACS:
550 case OPA_EM_ATTR_IFACE_MCAST_MACS:
551 vema_get_mac_list(port, recvd_mad, rsp_mad, attr_id);
552 break;
553 case OPA_EM_ATTR_VESWPORT_SUMMARY_COUNTERS:
554 vema_get_summary_counters(port, recvd_mad, rsp_mad);
555 break;
556 case OPA_EM_ATTR_VESWPORT_ERROR_COUNTERS:
557 vema_get_error_counters(port, recvd_mad, rsp_mad);
558 break;
559 default:
560 rsp_mad->mad_hdr.status = OPA_VNIC_UNSUP_ATTR;
561 break;
562 }
563}
564
565
566
567
568
569
570
571static void vema_set(struct opa_vnic_vema_port *port,
572 struct opa_vnic_vema_mad *recvd_mad,
573 struct opa_vnic_vema_mad *rsp_mad)
574{
575 u16 attr_id = be16_to_cpu(recvd_mad->mad_hdr.attr_id);
576
577 switch (attr_id) {
578 case OPA_EM_ATTR_CLASS_PORT_INFO:
579 vema_set_class_port_info(port, recvd_mad, rsp_mad);
580 break;
581 case OPA_EM_ATTR_VESWPORT_INFO:
582 vema_set_veswport_info(port, recvd_mad, rsp_mad);
583 break;
584 case OPA_EM_ATTR_VESWPORT_MAC_ENTRIES:
585 vema_set_mac_entries(port, recvd_mad, rsp_mad);
586 break;
587 case OPA_EM_ATTR_DELETE_VESW:
588 vema_set_delete_vesw(port, recvd_mad, rsp_mad);
589 break;
590 default:
591 rsp_mad->mad_hdr.status = OPA_VNIC_UNSUP_ATTR;
592 break;
593 }
594}
595
596
597
598
599
600
601
602
603static void vema_send(struct ib_mad_agent *mad_agent,
604 struct ib_mad_send_wc *mad_wc)
605{
606 rdma_destroy_ah(mad_wc->send_buf->ah, RDMA_DESTROY_AH_SLEEPABLE);
607 ib_free_send_mad(mad_wc->send_buf);
608}
609
610
611
612
613
614
615
616
617
618
619
620static void vema_recv(struct ib_mad_agent *mad_agent,
621 struct ib_mad_send_buf *send_buf,
622 struct ib_mad_recv_wc *mad_wc)
623{
624 struct opa_vnic_vema_port *port;
625 struct ib_ah *ah;
626 struct ib_mad_send_buf *rsp;
627 struct opa_vnic_vema_mad *vema_mad;
628
629 if (!mad_wc || !mad_wc->recv_buf.mad)
630 return;
631
632 port = mad_agent->context;
633 ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc,
634 mad_wc->recv_buf.grh, mad_agent->port_num);
635 if (IS_ERR(ah))
636 goto free_recv_mad;
637
638 rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp,
639 mad_wc->wc->pkey_index, 0,
640 IB_MGMT_VENDOR_HDR, OPA_VNIC_EMA_DATA,
641 GFP_KERNEL, OPA_MGMT_BASE_VERSION);
642 if (IS_ERR(rsp))
643 goto err_rsp;
644
645 rsp->ah = ah;
646 vema_mad = rsp->mad;
647 memcpy(vema_mad, mad_wc->recv_buf.mad, IB_MGMT_VENDOR_HDR);
648 vema_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
649 vema_mad->mad_hdr.status = 0;
650
651
652 mutex_lock(&port->lock);
653
654 switch (mad_wc->recv_buf.mad->mad_hdr.method) {
655 case IB_MGMT_METHOD_GET:
656 vema_get(port, (struct opa_vnic_vema_mad *)mad_wc->recv_buf.mad,
657 vema_mad);
658 break;
659 case IB_MGMT_METHOD_SET:
660 vema_set(port, (struct opa_vnic_vema_mad *)mad_wc->recv_buf.mad,
661 vema_mad);
662 break;
663 default:
664 vema_mad->mad_hdr.status = OPA_VNIC_UNSUP_ATTR;
665 break;
666 }
667 mutex_unlock(&port->lock);
668
669 if (!ib_post_send_mad(rsp, NULL)) {
670
671
672
673
674 goto free_recv_mad;
675 }
676
677 ib_free_send_mad(rsp);
678
679err_rsp:
680 rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE);
681free_recv_mad:
682 ib_free_recv_mad(mad_wc);
683}
684
685
686
687
688
689
690
691
692
693
694
695
696
697static struct opa_vnic_vema_port *
698vema_get_port(struct opa_vnic_ctrl_port *cport, u8 port_num)
699{
700 struct opa_vnic_vema_port *port = (void *)cport + sizeof(*cport);
701
702 if (port_num > cport->num_ports)
703 return NULL;
704
705 return port + (port_num - 1);
706}
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724void opa_vnic_vema_send_trap(struct opa_vnic_adapter *adapter,
725 struct __opa_veswport_trap *data, u32 lid)
726{
727 struct opa_vnic_ctrl_port *cport = adapter->cport;
728 struct ib_mad_send_buf *send_buf;
729 struct opa_vnic_vema_port *port;
730 struct ib_device *ibp;
731 struct opa_vnic_vema_mad_trap *trap_mad;
732 struct opa_class_port_info *class;
733 struct rdma_ah_attr ah_attr;
734 struct ib_ah *ah;
735 struct opa_veswport_trap *trap;
736 u32 trap_lid;
737 u16 pkey_idx;
738
739 if (!cport)
740 goto err_exit;
741 ibp = cport->ibdev;
742 port = vema_get_port(cport, data->opaportnum);
743 if (!port || !port->mad_agent)
744 goto err_exit;
745
746 if (time_before(jiffies, adapter->trap_timeout)) {
747 if (adapter->trap_count == OPA_VNIC_TRAP_BURST_LIMIT) {
748 v_warn("Trap rate exceeded\n");
749 goto err_exit;
750 } else {
751 adapter->trap_count++;
752 }
753 } else {
754 adapter->trap_count = 0;
755 }
756
757 class = &port->class_port_info;
758
759 memset(&ah_attr, 0, sizeof(ah_attr));
760 ah_attr.type = rdma_ah_find_type(ibp, port->port_num);
761 rdma_ah_set_sl(&ah_attr,
762 GET_TRAP_SL_FROM_CLASS_PORT_INFO(class->trap_sl_rsvd));
763 rdma_ah_set_port_num(&ah_attr, port->port_num);
764 trap_lid = be32_to_cpu(class->trap_lid);
765
766
767
768
769
770
771 if (!trap_lid) {
772 c_err("%s: Invalid dlid\n", __func__);
773 goto err_exit;
774 }
775
776 rdma_ah_set_dlid(&ah_attr, trap_lid);
777 ah = rdma_create_ah(port->mad_agent->qp->pd, &ah_attr, 0);
778 if (IS_ERR(ah)) {
779 c_err("%s:Couldn't create new AH = %p\n", __func__, ah);
780 c_err("%s:dlid = %d, sl = %d, port = %d\n", __func__,
781 rdma_ah_get_dlid(&ah_attr), rdma_ah_get_sl(&ah_attr),
782 rdma_ah_get_port_num(&ah_attr));
783 goto err_exit;
784 }
785
786 if (ib_find_pkey(ibp, data->opaportnum, IB_DEFAULT_PKEY_FULL,
787 &pkey_idx) < 0) {
788 c_err("%s:full key not found, defaulting to partial\n",
789 __func__);
790 if (ib_find_pkey(ibp, data->opaportnum, IB_DEFAULT_PKEY_PARTIAL,
791 &pkey_idx) < 0)
792 pkey_idx = 1;
793 }
794
795 send_buf = ib_create_send_mad(port->mad_agent, 1, pkey_idx, 0,
796 IB_MGMT_VENDOR_HDR, IB_MGMT_MAD_DATA,
797 GFP_ATOMIC, OPA_MGMT_BASE_VERSION);
798 if (IS_ERR(send_buf)) {
799 c_err("%s:Couldn't allocate send buf\n", __func__);
800 goto err_sndbuf;
801 }
802
803 send_buf->ah = ah;
804
805
806 trap_mad = send_buf->mad;
807 trap_mad->mad_hdr.base_version = OPA_MGMT_BASE_VERSION;
808 trap_mad->mad_hdr.mgmt_class = OPA_MGMT_CLASS_INTEL_EMA;
809 trap_mad->mad_hdr.class_version = OPA_EMA_CLASS_VERSION;
810 trap_mad->mad_hdr.method = IB_MGMT_METHOD_TRAP;
811 port->tid++;
812 trap_mad->mad_hdr.tid = cpu_to_be64(port->tid);
813 trap_mad->mad_hdr.attr_id = IB_SMP_ATTR_NOTICE;
814
815
816 trap_mad->oui[0] = INTEL_OUI_1;
817 trap_mad->oui[1] = INTEL_OUI_2;
818 trap_mad->oui[2] = INTEL_OUI_3;
819
820
821 trap_mad->notice.gen_type = OPA_INTEL_EMA_NOTICE_TYPE_INFO << 1;
822 trap_mad->notice.oui_1 = INTEL_OUI_1;
823 trap_mad->notice.oui_2 = INTEL_OUI_2;
824 trap_mad->notice.oui_3 = INTEL_OUI_3;
825 trap_mad->notice.issuer_lid = cpu_to_be32(lid);
826
827
828 trap = (struct opa_veswport_trap *)trap_mad->notice.raw_data;
829 trap->fabric_id = cpu_to_be16(data->fabric_id);
830 trap->veswid = cpu_to_be16(data->veswid);
831 trap->veswportnum = cpu_to_be32(data->veswportnum);
832 trap->opaportnum = cpu_to_be16(data->opaportnum);
833 trap->veswportindex = data->veswportindex;
834 trap->opcode = data->opcode;
835
836
837 if (ib_post_send_mad(send_buf, NULL)) {
838 ib_free_send_mad(send_buf);
839 } else {
840 if (adapter->trap_count)
841 return;
842 adapter->trap_timeout = jiffies +
843 usecs_to_jiffies(OPA_VNIC_TRAP_TIMEOUT);
844 return;
845 }
846
847err_sndbuf:
848 rdma_destroy_ah(ah, 0);
849err_exit:
850 v_err("Aborting trap\n");
851}
852
853static void opa_vnic_event(struct ib_event_handler *handler,
854 struct ib_event *record)
855{
856 struct opa_vnic_vema_port *port =
857 container_of(handler, struct opa_vnic_vema_port, event_handler);
858 struct opa_vnic_ctrl_port *cport = port->cport;
859 struct opa_vnic_adapter *adapter;
860 unsigned long index;
861
862 if (record->element.port_num != port->port_num)
863 return;
864
865 c_dbg("OPA_VNIC received event %d on device %s port %d\n",
866 record->event, dev_name(&record->device->dev),
867 record->element.port_num);
868
869 if (record->event != IB_EVENT_PORT_ERR &&
870 record->event != IB_EVENT_PORT_ACTIVE)
871 return;
872
873 xa_for_each(&port->vports, index, adapter) {
874 if (record->event == IB_EVENT_PORT_ACTIVE)
875 netif_carrier_on(adapter->netdev);
876 else
877 netif_carrier_off(adapter->netdev);
878 }
879}
880
881
882
883
884
885
886
887static void vema_unregister(struct opa_vnic_ctrl_port *cport)
888{
889 struct opa_vnic_adapter *adapter;
890 unsigned long index;
891 int i;
892
893 for (i = 1; i <= cport->num_ports; i++) {
894 struct opa_vnic_vema_port *port = vema_get_port(cport, i);
895
896 if (!port->mad_agent)
897 continue;
898
899
900 mutex_lock(&port->lock);
901 xa_for_each(&port->vports, index, adapter)
902 opa_vnic_rem_netdev(adapter);
903 mutex_unlock(&port->lock);
904
905 ib_unregister_mad_agent(port->mad_agent);
906 port->mad_agent = NULL;
907 mutex_destroy(&port->lock);
908 xa_destroy(&port->vports);
909 ib_unregister_event_handler(&port->event_handler);
910 }
911}
912
913
914
915
916
917
918
919
920
921static int vema_register(struct opa_vnic_ctrl_port *cport)
922{
923 struct ib_mad_reg_req reg_req = {
924 .mgmt_class = OPA_MGMT_CLASS_INTEL_EMA,
925 .mgmt_class_version = OPA_MGMT_BASE_VERSION,
926 .oui = { INTEL_OUI_1, INTEL_OUI_2, INTEL_OUI_3 }
927 };
928 int i;
929
930 set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
931 set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
932
933
934 for (i = 1; i <= cport->num_ports; i++) {
935 struct opa_vnic_vema_port *port = vema_get_port(cport, i);
936 int ret;
937
938 port->cport = cport;
939 port->port_num = i;
940
941 INIT_IB_EVENT_HANDLER(&port->event_handler,
942 cport->ibdev, opa_vnic_event);
943 ib_register_event_handler(&port->event_handler);
944
945 xa_init(&port->vports);
946 mutex_init(&port->lock);
947 port->mad_agent = ib_register_mad_agent(cport->ibdev, i,
948 IB_QPT_GSI, ®_req,
949 IB_MGMT_RMPP_VERSION,
950 vema_send, vema_recv,
951 port, 0);
952 if (IS_ERR(port->mad_agent)) {
953 ret = PTR_ERR(port->mad_agent);
954 port->mad_agent = NULL;
955 mutex_destroy(&port->lock);
956 vema_unregister(cport);
957 return ret;
958 }
959 }
960
961 return 0;
962}
963
964
965
966
967
968
969
970
971static void opa_vnic_ctrl_config_dev(struct opa_vnic_ctrl_port *cport, bool en)
972{
973 struct ib_port_modify pm = { 0 };
974 int i;
975
976 if (en)
977 pm.set_port_cap_mask = OPA_CAP_MASK3_IsEthOnFabricSupported;
978 else
979 pm.clr_port_cap_mask = OPA_CAP_MASK3_IsEthOnFabricSupported;
980
981 for (i = 1; i <= cport->num_ports; i++)
982 ib_modify_port(cport->ibdev, i, IB_PORT_OPA_MASK_CHG, &pm);
983}
984
985
986
987
988
989
990
991static int opa_vnic_vema_add_one(struct ib_device *device)
992{
993 struct opa_vnic_ctrl_port *cport;
994 int rc, size = sizeof(*cport);
995
996 if (!rdma_cap_opa_vnic(device))
997 return -EOPNOTSUPP;
998
999 size += device->phys_port_cnt * sizeof(struct opa_vnic_vema_port);
1000 cport = kzalloc(size, GFP_KERNEL);
1001 if (!cport)
1002 return -ENOMEM;
1003
1004 cport->num_ports = device->phys_port_cnt;
1005 cport->ibdev = device;
1006
1007
1008 rc = vema_register(cport);
1009 if (!rc)
1010 c_info("VNIC client initialized\n");
1011
1012 ib_set_client_data(device, &opa_vnic_client, cport);
1013 opa_vnic_ctrl_config_dev(cport, true);
1014 return 0;
1015}
1016
1017
1018
1019
1020
1021
1022
1023
1024static void opa_vnic_vema_rem_one(struct ib_device *device,
1025 void *client_data)
1026{
1027 struct opa_vnic_ctrl_port *cport = client_data;
1028
1029 c_info("removing VNIC client\n");
1030 opa_vnic_ctrl_config_dev(cport, false);
1031 vema_unregister(cport);
1032 kfree(cport);
1033}
1034
1035static int __init opa_vnic_init(void)
1036{
1037 int rc;
1038
1039 rc = ib_register_client(&opa_vnic_client);
1040 if (rc)
1041 pr_err("VNIC driver register failed %d\n", rc);
1042
1043 return rc;
1044}
1045module_init(opa_vnic_init);
1046
1047static void opa_vnic_deinit(void)
1048{
1049 ib_unregister_client(&opa_vnic_client);
1050}
1051module_exit(opa_vnic_deinit);
1052
1053MODULE_LICENSE("Dual BSD/GPL");
1054MODULE_AUTHOR("Intel Corporation");
1055MODULE_DESCRIPTION("Intel OPA Virtual Network driver");
1056