1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <rdma/ib_smi.h>
35#include <rdma/ib_pma.h>
36
37#include "ipath_kernel.h"
38#include "ipath_verbs.h"
39#include "ipath_common.h"
40
41#define IB_SMP_UNSUP_VERSION cpu_to_be16(0x0004)
42#define IB_SMP_UNSUP_METHOD cpu_to_be16(0x0008)
43#define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C)
44#define IB_SMP_INVALID_FIELD cpu_to_be16(0x001C)
45
46static int reply(struct ib_smp *smp)
47{
48
49
50
51
52 smp->method = IB_MGMT_METHOD_GET_RESP;
53 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
54 smp->status |= IB_SMP_DIRECTION;
55 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
56}
57
58static int recv_subn_get_nodedescription(struct ib_smp *smp,
59 struct ib_device *ibdev)
60{
61 if (smp->attr_mod)
62 smp->status |= IB_SMP_INVALID_FIELD;
63
64 memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));
65
66 return reply(smp);
67}
68
69struct nodeinfo {
70 u8 base_version;
71 u8 class_version;
72 u8 node_type;
73 u8 num_ports;
74 __be64 sys_guid;
75 __be64 node_guid;
76 __be64 port_guid;
77 __be16 partition_cap;
78 __be16 device_id;
79 __be32 revision;
80 u8 local_port_num;
81 u8 vendor_id[3];
82} __attribute__ ((packed));
83
84static int recv_subn_get_nodeinfo(struct ib_smp *smp,
85 struct ib_device *ibdev, u8 port)
86{
87 struct nodeinfo *nip = (struct nodeinfo *)&smp->data;
88 struct ipath_devdata *dd = to_idev(ibdev)->dd;
89 u32 vendor, majrev, minrev;
90
91
92 if (smp->attr_mod || (dd->ipath_guid == 0))
93 smp->status |= IB_SMP_INVALID_FIELD;
94
95 nip->base_version = 1;
96 nip->class_version = 1;
97 nip->node_type = 1;
98
99
100
101
102
103 nip->num_ports = ibdev->phys_port_cnt;
104
105 nip->sys_guid = to_idev(ibdev)->sys_image_guid;
106 nip->node_guid = dd->ipath_guid;
107 nip->port_guid = dd->ipath_guid;
108 nip->partition_cap = cpu_to_be16(ipath_get_npkeys(dd));
109 nip->device_id = cpu_to_be16(dd->ipath_deviceid);
110 majrev = dd->ipath_majrev;
111 minrev = dd->ipath_minrev;
112 nip->revision = cpu_to_be32((majrev << 16) | minrev);
113 nip->local_port_num = port;
114 vendor = dd->ipath_vendorid;
115 nip->vendor_id[0] = IPATH_SRC_OUI_1;
116 nip->vendor_id[1] = IPATH_SRC_OUI_2;
117 nip->vendor_id[2] = IPATH_SRC_OUI_3;
118
119 return reply(smp);
120}
121
122static int recv_subn_get_guidinfo(struct ib_smp *smp,
123 struct ib_device *ibdev)
124{
125 u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
126 __be64 *p = (__be64 *) smp->data;
127
128
129
130 memset(smp->data, 0, sizeof(smp->data));
131
132
133
134
135
136 if (startgx == 0) {
137 __be64 g = to_idev(ibdev)->dd->ipath_guid;
138 if (g == 0)
139
140 smp->status |= IB_SMP_INVALID_FIELD;
141 else
142
143 *p = g;
144 } else
145 smp->status |= IB_SMP_INVALID_FIELD;
146
147 return reply(smp);
148}
149
150static void set_link_width_enabled(struct ipath_devdata *dd, u32 w)
151{
152 (void) dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LWID_ENB, w);
153}
154
155static void set_link_speed_enabled(struct ipath_devdata *dd, u32 s)
156{
157 (void) dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_SPD_ENB, s);
158}
159
160static int get_overrunthreshold(struct ipath_devdata *dd)
161{
162 return (dd->ipath_ibcctrl >>
163 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
164 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
165}
166
167
168
169
170
171
172
173
174static int set_overrunthreshold(struct ipath_devdata *dd, unsigned n)
175{
176 unsigned v;
177
178 v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
179 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
180 if (v != n) {
181 dd->ipath_ibcctrl &=
182 ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK <<
183 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT);
184 dd->ipath_ibcctrl |=
185 (u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
186 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
187 dd->ipath_ibcctrl);
188 }
189 return 0;
190}
191
192static int get_phyerrthreshold(struct ipath_devdata *dd)
193{
194 return (dd->ipath_ibcctrl >>
195 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
196 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
197}
198
199
200
201
202
203
204
205
206static int set_phyerrthreshold(struct ipath_devdata *dd, unsigned n)
207{
208 unsigned v;
209
210 v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
211 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
212 if (v != n) {
213 dd->ipath_ibcctrl &=
214 ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK <<
215 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT);
216 dd->ipath_ibcctrl |=
217 (u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
218 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
219 dd->ipath_ibcctrl);
220 }
221 return 0;
222}
223
224
225
226
227
228
229
230static int get_linkdowndefaultstate(struct ipath_devdata *dd)
231{
232 return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE);
233}
234
235static int recv_subn_get_portinfo(struct ib_smp *smp,
236 struct ib_device *ibdev, u8 port)
237{
238 struct ipath_ibdev *dev;
239 struct ipath_devdata *dd;
240 struct ib_port_info *pip = (struct ib_port_info *)smp->data;
241 u16 lid;
242 u8 ibcstat;
243 u8 mtu;
244 int ret;
245
246 if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt) {
247 smp->status |= IB_SMP_INVALID_FIELD;
248 ret = reply(smp);
249 goto bail;
250 }
251
252 dev = to_idev(ibdev);
253 dd = dev->dd;
254
255
256 memset(smp->data, 0, sizeof(smp->data));
257
258
259 if (smp->method == IB_MGMT_METHOD_SET || dev->mkey == smp->mkey ||
260 dev->mkeyprot == 0)
261 pip->mkey = dev->mkey;
262 pip->gid_prefix = dev->gid_prefix;
263 lid = dd->ipath_lid;
264 pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE;
265 pip->sm_lid = cpu_to_be16(dev->sm_lid);
266 pip->cap_mask = cpu_to_be32(dev->port_cap_flags);
267
268 pip->mkey_lease_period = cpu_to_be16(dev->mkey_lease_period);
269 pip->local_port_num = port;
270 pip->link_width_enabled = dd->ipath_link_width_enabled;
271 pip->link_width_supported = dd->ipath_link_width_supported;
272 pip->link_width_active = dd->ipath_link_width_active;
273 pip->linkspeed_portstate = dd->ipath_link_speed_supported << 4;
274 ibcstat = dd->ipath_lastibcstat;
275
276 pip->linkspeed_portstate |= ipath_ib_linkstate(dd, ibcstat) + 1;
277
278 pip->portphysstate_linkdown =
279 (ipath_cvt_physportstate[ibcstat & dd->ibcs_lts_mask] << 4) |
280 (get_linkdowndefaultstate(dd) ? 1 : 2);
281 pip->mkeyprot_resv_lmc = (dev->mkeyprot << 6) | dd->ipath_lmc;
282 pip->linkspeedactive_enabled = (dd->ipath_link_speed_active << 4) |
283 dd->ipath_link_speed_enabled;
284 switch (dd->ipath_ibmtu) {
285 case 4096:
286 mtu = IB_MTU_4096;
287 break;
288 case 2048:
289 mtu = IB_MTU_2048;
290 break;
291 case 1024:
292 mtu = IB_MTU_1024;
293 break;
294 case 512:
295 mtu = IB_MTU_512;
296 break;
297 case 256:
298 mtu = IB_MTU_256;
299 break;
300 default:
301 mtu = IB_MTU_2048;
302 break;
303 }
304 pip->neighbormtu_mastersmsl = (mtu << 4) | dev->sm_sl;
305 pip->vlcap_inittype = 0x10;
306 pip->vl_high_limit = dev->vl_high_limit;
307
308
309
310
311 pip->inittypereply_mtucap = ipath_mtu4096 ? IB_MTU_4096 : IB_MTU_2048;
312
313
314 pip->operationalvl_pei_peo_fpi_fpo = 0x10;
315 pip->mkey_violations = cpu_to_be16(dev->mkey_violations);
316
317 pip->pkey_violations =
318 cpu_to_be16((ipath_get_cr_errpkey(dd) -
319 dev->z_pkey_violations) & 0xFFFF);
320 pip->qkey_violations = cpu_to_be16(dev->qkey_violations);
321
322 pip->guid_cap = 1;
323 pip->clientrereg_resv_subnetto = dev->subnet_timeout;
324
325 pip->resv_resptimevalue = 3;
326 pip->localphyerrors_overrunerrors =
327 (get_phyerrthreshold(dd) << 4) |
328 get_overrunthreshold(dd);
329
330 if (dev->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {
331 u32 v;
332
333 v = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LINKLATENCY);
334 pip->link_roundtrip_latency[0] = v >> 16;
335 pip->link_roundtrip_latency[1] = v >> 8;
336 pip->link_roundtrip_latency[2] = v;
337 }
338
339 ret = reply(smp);
340
341bail:
342 return ret;
343}
344
345
346
347
348
349
350static int get_pkeys(struct ipath_devdata *dd, u16 * pkeys)
351{
352
353 struct ipath_portdata *pd = dd->ipath_pd[0];
354
355 memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys));
356
357 return 0;
358}
359
360static int recv_subn_get_pkeytable(struct ib_smp *smp,
361 struct ib_device *ibdev)
362{
363 u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
364 u16 *p = (u16 *) smp->data;
365 __be16 *q = (__be16 *) smp->data;
366
367
368
369 memset(smp->data, 0, sizeof(smp->data));
370 if (startpx == 0) {
371 struct ipath_ibdev *dev = to_idev(ibdev);
372 unsigned i, n = ipath_get_npkeys(dev->dd);
373
374 get_pkeys(dev->dd, p);
375
376 for (i = 0; i < n; i++)
377 q[i] = cpu_to_be16(p[i]);
378 } else
379 smp->status |= IB_SMP_INVALID_FIELD;
380
381 return reply(smp);
382}
383
384static int recv_subn_set_guidinfo(struct ib_smp *smp,
385 struct ib_device *ibdev)
386{
387
388 return recv_subn_get_guidinfo(smp, ibdev);
389}
390
391
392
393
394
395
396
397
398static int set_linkdowndefaultstate(struct ipath_devdata *dd, int sleep)
399{
400 if (sleep)
401 dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
402 else
403 dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
404 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
405 dd->ipath_ibcctrl);
406 return 0;
407}
408
409
410
411
412
413
414
415
416
417static int recv_subn_set_portinfo(struct ib_smp *smp,
418 struct ib_device *ibdev, u8 port)
419{
420 struct ib_port_info *pip = (struct ib_port_info *)smp->data;
421 struct ib_event event;
422 struct ipath_ibdev *dev;
423 struct ipath_devdata *dd;
424 char clientrereg = 0;
425 u16 lid, smlid;
426 u8 lwe;
427 u8 lse;
428 u8 state;
429 u16 lstate;
430 u32 mtu;
431 int ret, ore;
432
433 if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt)
434 goto err;
435
436 dev = to_idev(ibdev);
437 dd = dev->dd;
438 event.device = ibdev;
439 event.element.port_num = port;
440
441 dev->mkey = pip->mkey;
442 dev->gid_prefix = pip->gid_prefix;
443 dev->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
444
445 lid = be16_to_cpu(pip->lid);
446 if (dd->ipath_lid != lid ||
447 dd->ipath_lmc != (pip->mkeyprot_resv_lmc & 7)) {
448
449 if (lid == 0 || lid >= IPATH_MULTICAST_LID_BASE)
450 goto err;
451 ipath_set_lid(dd, lid, pip->mkeyprot_resv_lmc & 7);
452 event.event = IB_EVENT_LID_CHANGE;
453 ib_dispatch_event(&event);
454 }
455
456 smlid = be16_to_cpu(pip->sm_lid);
457 if (smlid != dev->sm_lid) {
458
459 if (smlid == 0 || smlid >= IPATH_MULTICAST_LID_BASE)
460 goto err;
461 dev->sm_lid = smlid;
462 event.event = IB_EVENT_SM_CHANGE;
463 ib_dispatch_event(&event);
464 }
465
466
467 lwe = pip->link_width_enabled;
468 if (lwe) {
469 if (lwe == 0xFF)
470 lwe = dd->ipath_link_width_supported;
471 else if (lwe >= 16 || (lwe & ~dd->ipath_link_width_supported))
472 goto err;
473 set_link_width_enabled(dd, lwe);
474 }
475
476
477 lse = pip->linkspeedactive_enabled & 0xF;
478 if (lse) {
479 if (lse == 15)
480 lse = dd->ipath_link_speed_supported;
481 else if (lse >= 8 || (lse & ~dd->ipath_link_speed_supported))
482 goto err;
483 set_link_speed_enabled(dd, lse);
484 }
485
486
487 switch (pip->portphysstate_linkdown & 0xF) {
488 case 0:
489 break;
490 case 1:
491 if (set_linkdowndefaultstate(dd, 1))
492 goto err;
493 break;
494 case 2:
495 if (set_linkdowndefaultstate(dd, 0))
496 goto err;
497 break;
498 default:
499 goto err;
500 }
501
502 dev->mkeyprot = pip->mkeyprot_resv_lmc >> 6;
503 dev->vl_high_limit = pip->vl_high_limit;
504
505 switch ((pip->neighbormtu_mastersmsl >> 4) & 0xF) {
506 case IB_MTU_256:
507 mtu = 256;
508 break;
509 case IB_MTU_512:
510 mtu = 512;
511 break;
512 case IB_MTU_1024:
513 mtu = 1024;
514 break;
515 case IB_MTU_2048:
516 mtu = 2048;
517 break;
518 case IB_MTU_4096:
519 if (!ipath_mtu4096)
520 goto err;
521 mtu = 4096;
522 break;
523 default:
524
525 goto err;
526 }
527 ipath_set_mtu(dd, mtu);
528
529 dev->sm_sl = pip->neighbormtu_mastersmsl & 0xF;
530
531
532 if (((pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF) > 1)
533 goto err;
534
535 if (pip->mkey_violations == 0)
536 dev->mkey_violations = 0;
537
538
539
540
541
542 if (pip->pkey_violations == 0)
543 dev->z_pkey_violations = ipath_get_cr_errpkey(dd);
544
545 if (pip->qkey_violations == 0)
546 dev->qkey_violations = 0;
547
548 ore = pip->localphyerrors_overrunerrors;
549 if (set_phyerrthreshold(dd, (ore >> 4) & 0xF))
550 goto err;
551
552 if (set_overrunthreshold(dd, (ore & 0xF)))
553 goto err;
554
555 dev->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
556
557 if (pip->clientrereg_resv_subnetto & 0x80) {
558 clientrereg = 1;
559 event.event = IB_EVENT_CLIENT_REREGISTER;
560 ib_dispatch_event(&event);
561 }
562
563
564
565
566
567
568
569 state = pip->linkspeed_portstate & 0xF;
570 lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
571 if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
572 goto err;
573
574
575
576
577
578 switch (state) {
579 case IB_PORT_NOP:
580 if (lstate == 0)
581 break;
582
583 case IB_PORT_DOWN:
584 if (lstate == 0)
585 lstate = IPATH_IB_LINKDOWN_ONLY;
586 else if (lstate == 1)
587 lstate = IPATH_IB_LINKDOWN_SLEEP;
588 else if (lstate == 2)
589 lstate = IPATH_IB_LINKDOWN;
590 else if (lstate == 3)
591 lstate = IPATH_IB_LINKDOWN_DISABLE;
592 else
593 goto err;
594 ipath_set_linkstate(dd, lstate);
595 if (lstate == IPATH_IB_LINKDOWN_DISABLE) {
596 ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
597 goto done;
598 }
599 ipath_wait_linkstate(dd, IPATH_LINKINIT | IPATH_LINKARMED |
600 IPATH_LINKACTIVE, 1000);
601 break;
602 case IB_PORT_ARMED:
603 ipath_set_linkstate(dd, IPATH_IB_LINKARM);
604 break;
605 case IB_PORT_ACTIVE:
606 ipath_set_linkstate(dd, IPATH_IB_LINKACTIVE);
607 break;
608 default:
609
610 goto err;
611 }
612
613 ret = recv_subn_get_portinfo(smp, ibdev, port);
614
615 if (clientrereg)
616 pip->clientrereg_resv_subnetto |= 0x80;
617
618 goto done;
619
620err:
621 smp->status |= IB_SMP_INVALID_FIELD;
622 ret = recv_subn_get_portinfo(smp, ibdev, port);
623
624done:
625 return ret;
626}
627
628
629
630
631
632
633
634
635
636static int rm_pkey(struct ipath_devdata *dd, u16 key)
637{
638 int i;
639 int ret;
640
641 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
642 if (dd->ipath_pkeys[i] != key)
643 continue;
644 if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) {
645 dd->ipath_pkeys[i] = 0;
646 ret = 1;
647 goto bail;
648 }
649 break;
650 }
651
652 ret = 0;
653
654bail:
655 return ret;
656}
657
658
659
660
661
662
663
664
665
666static int add_pkey(struct ipath_devdata *dd, u16 key)
667{
668 int i;
669 u16 lkey = key & 0x7FFF;
670 int any = 0;
671 int ret;
672
673 if (lkey == 0x7FFF) {
674 ret = 0;
675 goto bail;
676 }
677
678
679 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
680 if (!dd->ipath_pkeys[i]) {
681 any++;
682 continue;
683 }
684
685 if (dd->ipath_pkeys[i] == key) {
686 if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) {
687 ret = 0;
688 goto bail;
689 }
690
691 atomic_dec(&dd->ipath_pkeyrefs[i]);
692 any++;
693 }
694
695
696
697
698
699 if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
700 ret = -EEXIST;
701 goto bail;
702 }
703 }
704 if (!any) {
705 ret = -EBUSY;
706 goto bail;
707 }
708 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
709 if (!dd->ipath_pkeys[i] &&
710 atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
711
712 ipath_stats.sps_pkeys[i] = lkey;
713 dd->ipath_pkeys[i] = key;
714 ret = 1;
715 goto bail;
716 }
717 }
718 ret = -EBUSY;
719
720bail:
721 return ret;
722}
723
724
725
726
727
728
729static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys, u8 port)
730{
731 struct ipath_portdata *pd;
732 int i;
733 int changed = 0;
734
735
736 pd = dd->ipath_pd[0];
737
738 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
739 u16 key = pkeys[i];
740 u16 okey = pd->port_pkeys[i];
741
742 if (key == okey)
743 continue;
744
745
746
747
748 if (okey & 0x7FFF)
749 changed |= rm_pkey(dd, okey);
750 if (key & 0x7FFF) {
751 int ret = add_pkey(dd, key);
752
753 if (ret < 0)
754 key = 0;
755 else
756 changed |= ret;
757 }
758 pd->port_pkeys[i] = key;
759 }
760 if (changed) {
761 u64 pkey;
762 struct ib_event event;
763
764 pkey = (u64) dd->ipath_pkeys[0] |
765 ((u64) dd->ipath_pkeys[1] << 16) |
766 ((u64) dd->ipath_pkeys[2] << 32) |
767 ((u64) dd->ipath_pkeys[3] << 48);
768 ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n",
769 (unsigned long long) pkey);
770 ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
771 pkey);
772
773 event.event = IB_EVENT_PKEY_CHANGE;
774 event.device = &dd->verbs_dev->ibdev;
775 event.element.port_num = port;
776 ib_dispatch_event(&event);
777 }
778 return 0;
779}
780
781static int recv_subn_set_pkeytable(struct ib_smp *smp,
782 struct ib_device *ibdev, u8 port)
783{
784 u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
785 __be16 *p = (__be16 *) smp->data;
786 u16 *q = (u16 *) smp->data;
787 struct ipath_ibdev *dev = to_idev(ibdev);
788 unsigned i, n = ipath_get_npkeys(dev->dd);
789
790 for (i = 0; i < n; i++)
791 q[i] = be16_to_cpu(p[i]);
792
793 if (startpx != 0 || set_pkeys(dev->dd, q, port) != 0)
794 smp->status |= IB_SMP_INVALID_FIELD;
795
796 return recv_subn_get_pkeytable(smp, ibdev);
797}
798
799static int recv_pma_get_classportinfo(struct ib_pma_mad *pmp)
800{
801 struct ib_class_port_info *p =
802 (struct ib_class_port_info *)pmp->data;
803
804 memset(pmp->data, 0, sizeof(pmp->data));
805
806 if (pmp->mad_hdr.attr_mod != 0)
807 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
808
809
810 p->capability_mask = cpu_to_be16(1 << 8);
811 p->base_version = 1;
812 p->class_version = 1;
813
814
815
816
817 ib_set_cpi_resp_time(p, 18);
818
819 return reply((struct ib_smp *) pmp);
820}
821
822
823
824
825
826
827#define COUNTER_MASK(q, n) (q << ((9 - n) * 3))
828#define COUNTER_MASK0_9 cpu_to_be32(COUNTER_MASK(1, 0) | \
829 COUNTER_MASK(1, 1) | \
830 COUNTER_MASK(1, 2) | \
831 COUNTER_MASK(1, 3) | \
832 COUNTER_MASK(1, 4))
833
834static int recv_pma_get_portsamplescontrol(struct ib_pma_mad *pmp,
835 struct ib_device *ibdev, u8 port)
836{
837 struct ib_pma_portsamplescontrol *p =
838 (struct ib_pma_portsamplescontrol *)pmp->data;
839 struct ipath_ibdev *dev = to_idev(ibdev);
840 struct ipath_cregs const *crp = dev->dd->ipath_cregs;
841 unsigned long flags;
842 u8 port_select = p->port_select;
843
844 memset(pmp->data, 0, sizeof(pmp->data));
845
846 p->port_select = port_select;
847 if (pmp->mad_hdr.attr_mod != 0 ||
848 (port_select != port && port_select != 0xFF))
849 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
850
851
852
853
854
855
856
857
858
859 if (crp->cr_psstat)
860 p->tick = dev->dd->ipath_link_speed_active - 1;
861 else
862 p->tick = 250;
863 p->counter_width = 4;
864 p->counter_mask0_9 = COUNTER_MASK0_9;
865 spin_lock_irqsave(&dev->pending_lock, flags);
866 if (crp->cr_psstat)
867 p->sample_status = ipath_read_creg32(dev->dd, crp->cr_psstat);
868 else
869 p->sample_status = dev->pma_sample_status;
870 p->sample_start = cpu_to_be32(dev->pma_sample_start);
871 p->sample_interval = cpu_to_be32(dev->pma_sample_interval);
872 p->tag = cpu_to_be16(dev->pma_tag);
873 p->counter_select[0] = dev->pma_counter_select[0];
874 p->counter_select[1] = dev->pma_counter_select[1];
875 p->counter_select[2] = dev->pma_counter_select[2];
876 p->counter_select[3] = dev->pma_counter_select[3];
877 p->counter_select[4] = dev->pma_counter_select[4];
878 spin_unlock_irqrestore(&dev->pending_lock, flags);
879
880 return reply((struct ib_smp *) pmp);
881}
882
883static int recv_pma_set_portsamplescontrol(struct ib_pma_mad *pmp,
884 struct ib_device *ibdev, u8 port)
885{
886 struct ib_pma_portsamplescontrol *p =
887 (struct ib_pma_portsamplescontrol *)pmp->data;
888 struct ipath_ibdev *dev = to_idev(ibdev);
889 struct ipath_cregs const *crp = dev->dd->ipath_cregs;
890 unsigned long flags;
891 u8 status;
892 int ret;
893
894 if (pmp->mad_hdr.attr_mod != 0 ||
895 (p->port_select != port && p->port_select != 0xFF)) {
896 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
897 ret = reply((struct ib_smp *) pmp);
898 goto bail;
899 }
900
901 spin_lock_irqsave(&dev->pending_lock, flags);
902 if (crp->cr_psstat)
903 status = ipath_read_creg32(dev->dd, crp->cr_psstat);
904 else
905 status = dev->pma_sample_status;
906 if (status == IB_PMA_SAMPLE_STATUS_DONE) {
907 dev->pma_sample_start = be32_to_cpu(p->sample_start);
908 dev->pma_sample_interval = be32_to_cpu(p->sample_interval);
909 dev->pma_tag = be16_to_cpu(p->tag);
910 dev->pma_counter_select[0] = p->counter_select[0];
911 dev->pma_counter_select[1] = p->counter_select[1];
912 dev->pma_counter_select[2] = p->counter_select[2];
913 dev->pma_counter_select[3] = p->counter_select[3];
914 dev->pma_counter_select[4] = p->counter_select[4];
915 if (crp->cr_psstat) {
916 ipath_write_creg(dev->dd, crp->cr_psinterval,
917 dev->pma_sample_interval);
918 ipath_write_creg(dev->dd, crp->cr_psstart,
919 dev->pma_sample_start);
920 } else
921 dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_STARTED;
922 }
923 spin_unlock_irqrestore(&dev->pending_lock, flags);
924
925 ret = recv_pma_get_portsamplescontrol(pmp, ibdev, port);
926
927bail:
928 return ret;
929}
930
931static u64 get_counter(struct ipath_ibdev *dev,
932 struct ipath_cregs const *crp,
933 __be16 sel)
934{
935 u64 ret;
936
937 switch (sel) {
938 case IB_PMA_PORT_XMIT_DATA:
939 ret = (crp->cr_psxmitdatacount) ?
940 ipath_read_creg32(dev->dd, crp->cr_psxmitdatacount) :
941 dev->ipath_sword;
942 break;
943 case IB_PMA_PORT_RCV_DATA:
944 ret = (crp->cr_psrcvdatacount) ?
945 ipath_read_creg32(dev->dd, crp->cr_psrcvdatacount) :
946 dev->ipath_rword;
947 break;
948 case IB_PMA_PORT_XMIT_PKTS:
949 ret = (crp->cr_psxmitpktscount) ?
950 ipath_read_creg32(dev->dd, crp->cr_psxmitpktscount) :
951 dev->ipath_spkts;
952 break;
953 case IB_PMA_PORT_RCV_PKTS:
954 ret = (crp->cr_psrcvpktscount) ?
955 ipath_read_creg32(dev->dd, crp->cr_psrcvpktscount) :
956 dev->ipath_rpkts;
957 break;
958 case IB_PMA_PORT_XMIT_WAIT:
959 ret = (crp->cr_psxmitwaitcount) ?
960 ipath_read_creg32(dev->dd, crp->cr_psxmitwaitcount) :
961 dev->ipath_xmit_wait;
962 break;
963 default:
964 ret = 0;
965 }
966
967 return ret;
968}
969
970static int recv_pma_get_portsamplesresult(struct ib_pma_mad *pmp,
971 struct ib_device *ibdev)
972{
973 struct ib_pma_portsamplesresult *p =
974 (struct ib_pma_portsamplesresult *)pmp->data;
975 struct ipath_ibdev *dev = to_idev(ibdev);
976 struct ipath_cregs const *crp = dev->dd->ipath_cregs;
977 u8 status;
978 int i;
979
980 memset(pmp->data, 0, sizeof(pmp->data));
981 p->tag = cpu_to_be16(dev->pma_tag);
982 if (crp->cr_psstat)
983 status = ipath_read_creg32(dev->dd, crp->cr_psstat);
984 else
985 status = dev->pma_sample_status;
986 p->sample_status = cpu_to_be16(status);
987 for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)
988 p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 :
989 cpu_to_be32(
990 get_counter(dev, crp, dev->pma_counter_select[i]));
991
992 return reply((struct ib_smp *) pmp);
993}
994
995static int recv_pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp,
996 struct ib_device *ibdev)
997{
998 struct ib_pma_portsamplesresult_ext *p =
999 (struct ib_pma_portsamplesresult_ext *)pmp->data;
1000 struct ipath_ibdev *dev = to_idev(ibdev);
1001 struct ipath_cregs const *crp = dev->dd->ipath_cregs;
1002 u8 status;
1003 int i;
1004
1005 memset(pmp->data, 0, sizeof(pmp->data));
1006 p->tag = cpu_to_be16(dev->pma_tag);
1007 if (crp->cr_psstat)
1008 status = ipath_read_creg32(dev->dd, crp->cr_psstat);
1009 else
1010 status = dev->pma_sample_status;
1011 p->sample_status = cpu_to_be16(status);
1012
1013 p->extended_width = cpu_to_be32(0x80000000);
1014 for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)
1015 p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 :
1016 cpu_to_be64(
1017 get_counter(dev, crp, dev->pma_counter_select[i]));
1018
1019 return reply((struct ib_smp *) pmp);
1020}
1021
1022static int recv_pma_get_portcounters(struct ib_pma_mad *pmp,
1023 struct ib_device *ibdev, u8 port)
1024{
1025 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1026 pmp->data;
1027 struct ipath_ibdev *dev = to_idev(ibdev);
1028 struct ipath_verbs_counters cntrs;
1029 u8 port_select = p->port_select;
1030
1031 ipath_get_counters(dev->dd, &cntrs);
1032
1033
1034 cntrs.symbol_error_counter -= dev->z_symbol_error_counter;
1035 cntrs.link_error_recovery_counter -=
1036 dev->z_link_error_recovery_counter;
1037 cntrs.link_downed_counter -= dev->z_link_downed_counter;
1038 cntrs.port_rcv_errors += dev->rcv_errors;
1039 cntrs.port_rcv_errors -= dev->z_port_rcv_errors;
1040 cntrs.port_rcv_remphys_errors -= dev->z_port_rcv_remphys_errors;
1041 cntrs.port_xmit_discards -= dev->z_port_xmit_discards;
1042 cntrs.port_xmit_data -= dev->z_port_xmit_data;
1043 cntrs.port_rcv_data -= dev->z_port_rcv_data;
1044 cntrs.port_xmit_packets -= dev->z_port_xmit_packets;
1045 cntrs.port_rcv_packets -= dev->z_port_rcv_packets;
1046 cntrs.local_link_integrity_errors -=
1047 dev->z_local_link_integrity_errors;
1048 cntrs.excessive_buffer_overrun_errors -=
1049 dev->z_excessive_buffer_overrun_errors;
1050 cntrs.vl15_dropped -= dev->z_vl15_dropped;
1051 cntrs.vl15_dropped += dev->n_vl15_dropped;
1052
1053 memset(pmp->data, 0, sizeof(pmp->data));
1054
1055 p->port_select = port_select;
1056 if (pmp->mad_hdr.attr_mod != 0 ||
1057 (port_select != port && port_select != 0xFF))
1058 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1059
1060 if (cntrs.symbol_error_counter > 0xFFFFUL)
1061 p->symbol_error_counter = cpu_to_be16(0xFFFF);
1062 else
1063 p->symbol_error_counter =
1064 cpu_to_be16((u16)cntrs.symbol_error_counter);
1065 if (cntrs.link_error_recovery_counter > 0xFFUL)
1066 p->link_error_recovery_counter = 0xFF;
1067 else
1068 p->link_error_recovery_counter =
1069 (u8)cntrs.link_error_recovery_counter;
1070 if (cntrs.link_downed_counter > 0xFFUL)
1071 p->link_downed_counter = 0xFF;
1072 else
1073 p->link_downed_counter = (u8)cntrs.link_downed_counter;
1074 if (cntrs.port_rcv_errors > 0xFFFFUL)
1075 p->port_rcv_errors = cpu_to_be16(0xFFFF);
1076 else
1077 p->port_rcv_errors =
1078 cpu_to_be16((u16) cntrs.port_rcv_errors);
1079 if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
1080 p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
1081 else
1082 p->port_rcv_remphys_errors =
1083 cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);
1084 if (cntrs.port_xmit_discards > 0xFFFFUL)
1085 p->port_xmit_discards = cpu_to_be16(0xFFFF);
1086 else
1087 p->port_xmit_discards =
1088 cpu_to_be16((u16)cntrs.port_xmit_discards);
1089 if (cntrs.local_link_integrity_errors > 0xFUL)
1090 cntrs.local_link_integrity_errors = 0xFUL;
1091 if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
1092 cntrs.excessive_buffer_overrun_errors = 0xFUL;
1093 p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
1094 cntrs.excessive_buffer_overrun_errors;
1095 if (cntrs.vl15_dropped > 0xFFFFUL)
1096 p->vl15_dropped = cpu_to_be16(0xFFFF);
1097 else
1098 p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
1099 if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
1100 p->port_xmit_data = cpu_to_be32(0xFFFFFFFF);
1101 else
1102 p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
1103 if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
1104 p->port_rcv_data = cpu_to_be32(0xFFFFFFFF);
1105 else
1106 p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
1107 if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
1108 p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF);
1109 else
1110 p->port_xmit_packets =
1111 cpu_to_be32((u32)cntrs.port_xmit_packets);
1112 if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
1113 p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF);
1114 else
1115 p->port_rcv_packets =
1116 cpu_to_be32((u32) cntrs.port_rcv_packets);
1117
1118 return reply((struct ib_smp *) pmp);
1119}
1120
1121static int recv_pma_get_portcounters_ext(struct ib_pma_mad *pmp,
1122 struct ib_device *ibdev, u8 port)
1123{
1124 struct ib_pma_portcounters_ext *p =
1125 (struct ib_pma_portcounters_ext *)pmp->data;
1126 struct ipath_ibdev *dev = to_idev(ibdev);
1127 u64 swords, rwords, spkts, rpkts, xwait;
1128 u8 port_select = p->port_select;
1129
1130 ipath_snapshot_counters(dev->dd, &swords, &rwords, &spkts,
1131 &rpkts, &xwait);
1132
1133
1134 swords -= dev->z_port_xmit_data;
1135 rwords -= dev->z_port_rcv_data;
1136 spkts -= dev->z_port_xmit_packets;
1137 rpkts -= dev->z_port_rcv_packets;
1138
1139 memset(pmp->data, 0, sizeof(pmp->data));
1140
1141 p->port_select = port_select;
1142 if (pmp->mad_hdr.attr_mod != 0 ||
1143 (port_select != port && port_select != 0xFF))
1144 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1145
1146 p->port_xmit_data = cpu_to_be64(swords);
1147 p->port_rcv_data = cpu_to_be64(rwords);
1148 p->port_xmit_packets = cpu_to_be64(spkts);
1149 p->port_rcv_packets = cpu_to_be64(rpkts);
1150 p->port_unicast_xmit_packets = cpu_to_be64(dev->n_unicast_xmit);
1151 p->port_unicast_rcv_packets = cpu_to_be64(dev->n_unicast_rcv);
1152 p->port_multicast_xmit_packets = cpu_to_be64(dev->n_multicast_xmit);
1153 p->port_multicast_rcv_packets = cpu_to_be64(dev->n_multicast_rcv);
1154
1155 return reply((struct ib_smp *) pmp);
1156}
1157
1158static int recv_pma_set_portcounters(struct ib_pma_mad *pmp,
1159 struct ib_device *ibdev, u8 port)
1160{
1161 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1162 pmp->data;
1163 struct ipath_ibdev *dev = to_idev(ibdev);
1164 struct ipath_verbs_counters cntrs;
1165
1166
1167
1168
1169
1170 ipath_get_counters(dev->dd, &cntrs);
1171
1172 if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)
1173 dev->z_symbol_error_counter = cntrs.symbol_error_counter;
1174
1175 if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY)
1176 dev->z_link_error_recovery_counter =
1177 cntrs.link_error_recovery_counter;
1178
1179 if (p->counter_select & IB_PMA_SEL_LINK_DOWNED)
1180 dev->z_link_downed_counter = cntrs.link_downed_counter;
1181
1182 if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS)
1183 dev->z_port_rcv_errors =
1184 cntrs.port_rcv_errors + dev->rcv_errors;
1185
1186 if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS)
1187 dev->z_port_rcv_remphys_errors =
1188 cntrs.port_rcv_remphys_errors;
1189
1190 if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS)
1191 dev->z_port_xmit_discards = cntrs.port_xmit_discards;
1192
1193 if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS)
1194 dev->z_local_link_integrity_errors =
1195 cntrs.local_link_integrity_errors;
1196
1197 if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS)
1198 dev->z_excessive_buffer_overrun_errors =
1199 cntrs.excessive_buffer_overrun_errors;
1200
1201 if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) {
1202 dev->n_vl15_dropped = 0;
1203 dev->z_vl15_dropped = cntrs.vl15_dropped;
1204 }
1205
1206 if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)
1207 dev->z_port_xmit_data = cntrs.port_xmit_data;
1208
1209 if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA)
1210 dev->z_port_rcv_data = cntrs.port_rcv_data;
1211
1212 if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS)
1213 dev->z_port_xmit_packets = cntrs.port_xmit_packets;
1214
1215 if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS)
1216 dev->z_port_rcv_packets = cntrs.port_rcv_packets;
1217
1218 return recv_pma_get_portcounters(pmp, ibdev, port);
1219}
1220
1221static int recv_pma_set_portcounters_ext(struct ib_pma_mad *pmp,
1222 struct ib_device *ibdev, u8 port)
1223{
1224 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1225 pmp->data;
1226 struct ipath_ibdev *dev = to_idev(ibdev);
1227 u64 swords, rwords, spkts, rpkts, xwait;
1228
1229 ipath_snapshot_counters(dev->dd, &swords, &rwords, &spkts,
1230 &rpkts, &xwait);
1231
1232 if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)
1233 dev->z_port_xmit_data = swords;
1234
1235 if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA)
1236 dev->z_port_rcv_data = rwords;
1237
1238 if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS)
1239 dev->z_port_xmit_packets = spkts;
1240
1241 if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)
1242 dev->z_port_rcv_packets = rpkts;
1243
1244 if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS)
1245 dev->n_unicast_xmit = 0;
1246
1247 if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS)
1248 dev->n_unicast_rcv = 0;
1249
1250 if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS)
1251 dev->n_multicast_xmit = 0;
1252
1253 if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS)
1254 dev->n_multicast_rcv = 0;
1255
1256 return recv_pma_get_portcounters_ext(pmp, ibdev, port);
1257}
1258
1259static int process_subn(struct ib_device *ibdev, int mad_flags,
1260 u8 port_num, const struct ib_mad *in_mad,
1261 struct ib_mad *out_mad)
1262{
1263 struct ib_smp *smp = (struct ib_smp *)out_mad;
1264 struct ipath_ibdev *dev = to_idev(ibdev);
1265 int ret;
1266
1267 *out_mad = *in_mad;
1268 if (smp->class_version != 1) {
1269 smp->status |= IB_SMP_UNSUP_VERSION;
1270 ret = reply(smp);
1271 goto bail;
1272 }
1273
1274
1275 if (dev->mkey_lease_timeout &&
1276 time_after_eq(jiffies, dev->mkey_lease_timeout)) {
1277
1278 dev->mkey_lease_timeout = 0;
1279 dev->mkeyprot = 0;
1280 }
1281
1282
1283
1284
1285
1286 if ((mad_flags & IB_MAD_IGNORE_MKEY) == 0 && dev->mkey != 0 &&
1287 dev->mkey != smp->mkey &&
1288 (smp->method == IB_MGMT_METHOD_SET ||
1289 (smp->method == IB_MGMT_METHOD_GET &&
1290 dev->mkeyprot >= 2))) {
1291 if (dev->mkey_violations != 0xFFFF)
1292 ++dev->mkey_violations;
1293 if (dev->mkey_lease_timeout ||
1294 dev->mkey_lease_period == 0) {
1295 ret = IB_MAD_RESULT_SUCCESS |
1296 IB_MAD_RESULT_CONSUMED;
1297 goto bail;
1298 }
1299 dev->mkey_lease_timeout = jiffies +
1300 dev->mkey_lease_period * HZ;
1301
1302 ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
1303 goto bail;
1304 } else if (dev->mkey_lease_timeout)
1305 dev->mkey_lease_timeout = 0;
1306
1307 switch (smp->method) {
1308 case IB_MGMT_METHOD_GET:
1309 switch (smp->attr_id) {
1310 case IB_SMP_ATTR_NODE_DESC:
1311 ret = recv_subn_get_nodedescription(smp, ibdev);
1312 goto bail;
1313 case IB_SMP_ATTR_NODE_INFO:
1314 ret = recv_subn_get_nodeinfo(smp, ibdev, port_num);
1315 goto bail;
1316 case IB_SMP_ATTR_GUID_INFO:
1317 ret = recv_subn_get_guidinfo(smp, ibdev);
1318 goto bail;
1319 case IB_SMP_ATTR_PORT_INFO:
1320 ret = recv_subn_get_portinfo(smp, ibdev, port_num);
1321 goto bail;
1322 case IB_SMP_ATTR_PKEY_TABLE:
1323 ret = recv_subn_get_pkeytable(smp, ibdev);
1324 goto bail;
1325 case IB_SMP_ATTR_SM_INFO:
1326 if (dev->port_cap_flags & IB_PORT_SM_DISABLED) {
1327 ret = IB_MAD_RESULT_SUCCESS |
1328 IB_MAD_RESULT_CONSUMED;
1329 goto bail;
1330 }
1331 if (dev->port_cap_flags & IB_PORT_SM) {
1332 ret = IB_MAD_RESULT_SUCCESS;
1333 goto bail;
1334 }
1335
1336 default:
1337 smp->status |= IB_SMP_UNSUP_METH_ATTR;
1338 ret = reply(smp);
1339 goto bail;
1340 }
1341
1342 case IB_MGMT_METHOD_SET:
1343 switch (smp->attr_id) {
1344 case IB_SMP_ATTR_GUID_INFO:
1345 ret = recv_subn_set_guidinfo(smp, ibdev);
1346 goto bail;
1347 case IB_SMP_ATTR_PORT_INFO:
1348 ret = recv_subn_set_portinfo(smp, ibdev, port_num);
1349 goto bail;
1350 case IB_SMP_ATTR_PKEY_TABLE:
1351 ret = recv_subn_set_pkeytable(smp, ibdev, port_num);
1352 goto bail;
1353 case IB_SMP_ATTR_SM_INFO:
1354 if (dev->port_cap_flags & IB_PORT_SM_DISABLED) {
1355 ret = IB_MAD_RESULT_SUCCESS |
1356 IB_MAD_RESULT_CONSUMED;
1357 goto bail;
1358 }
1359 if (dev->port_cap_flags & IB_PORT_SM) {
1360 ret = IB_MAD_RESULT_SUCCESS;
1361 goto bail;
1362 }
1363
1364 default:
1365 smp->status |= IB_SMP_UNSUP_METH_ATTR;
1366 ret = reply(smp);
1367 goto bail;
1368 }
1369
1370 case IB_MGMT_METHOD_TRAP:
1371 case IB_MGMT_METHOD_REPORT:
1372 case IB_MGMT_METHOD_REPORT_RESP:
1373 case IB_MGMT_METHOD_TRAP_REPRESS:
1374 case IB_MGMT_METHOD_GET_RESP:
1375
1376
1377
1378
1379
1380 ret = IB_MAD_RESULT_SUCCESS;
1381 goto bail;
1382 default:
1383 smp->status |= IB_SMP_UNSUP_METHOD;
1384 ret = reply(smp);
1385 }
1386
1387bail:
1388 return ret;
1389}
1390
1391static int process_perf(struct ib_device *ibdev, u8 port_num,
1392 const struct ib_mad *in_mad,
1393 struct ib_mad *out_mad)
1394{
1395 struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad;
1396 int ret;
1397
1398 *out_mad = *in_mad;
1399 if (pmp->mad_hdr.class_version != 1) {
1400 pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
1401 ret = reply((struct ib_smp *) pmp);
1402 goto bail;
1403 }
1404
1405 switch (pmp->mad_hdr.method) {
1406 case IB_MGMT_METHOD_GET:
1407 switch (pmp->mad_hdr.attr_id) {
1408 case IB_PMA_CLASS_PORT_INFO:
1409 ret = recv_pma_get_classportinfo(pmp);
1410 goto bail;
1411 case IB_PMA_PORT_SAMPLES_CONTROL:
1412 ret = recv_pma_get_portsamplescontrol(pmp, ibdev,
1413 port_num);
1414 goto bail;
1415 case IB_PMA_PORT_SAMPLES_RESULT:
1416 ret = recv_pma_get_portsamplesresult(pmp, ibdev);
1417 goto bail;
1418 case IB_PMA_PORT_SAMPLES_RESULT_EXT:
1419 ret = recv_pma_get_portsamplesresult_ext(pmp,
1420 ibdev);
1421 goto bail;
1422 case IB_PMA_PORT_COUNTERS:
1423 ret = recv_pma_get_portcounters(pmp, ibdev,
1424 port_num);
1425 goto bail;
1426 case IB_PMA_PORT_COUNTERS_EXT:
1427 ret = recv_pma_get_portcounters_ext(pmp, ibdev,
1428 port_num);
1429 goto bail;
1430 default:
1431 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
1432 ret = reply((struct ib_smp *) pmp);
1433 goto bail;
1434 }
1435
1436 case IB_MGMT_METHOD_SET:
1437 switch (pmp->mad_hdr.attr_id) {
1438 case IB_PMA_PORT_SAMPLES_CONTROL:
1439 ret = recv_pma_set_portsamplescontrol(pmp, ibdev,
1440 port_num);
1441 goto bail;
1442 case IB_PMA_PORT_COUNTERS:
1443 ret = recv_pma_set_portcounters(pmp, ibdev,
1444 port_num);
1445 goto bail;
1446 case IB_PMA_PORT_COUNTERS_EXT:
1447 ret = recv_pma_set_portcounters_ext(pmp, ibdev,
1448 port_num);
1449 goto bail;
1450 default:
1451 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
1452 ret = reply((struct ib_smp *) pmp);
1453 goto bail;
1454 }
1455
1456 case IB_MGMT_METHOD_GET_RESP:
1457
1458
1459
1460
1461
1462 ret = IB_MAD_RESULT_SUCCESS;
1463 goto bail;
1464 default:
1465 pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
1466 ret = reply((struct ib_smp *) pmp);
1467 }
1468
1469bail:
1470 return ret;
1471}
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492int ipath_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
1493 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
1494 const struct ib_mad_hdr *in, size_t in_mad_size,
1495 struct ib_mad_hdr *out, size_t *out_mad_size,
1496 u16 *out_mad_pkey_index)
1497{
1498 int ret;
1499 const struct ib_mad *in_mad = (const struct ib_mad *)in;
1500 struct ib_mad *out_mad = (struct ib_mad *)out;
1501
1502 if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
1503 *out_mad_size != sizeof(*out_mad)))
1504 return IB_MAD_RESULT_FAILURE;
1505
1506 switch (in_mad->mad_hdr.mgmt_class) {
1507 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
1508 case IB_MGMT_CLASS_SUBN_LID_ROUTED:
1509 ret = process_subn(ibdev, mad_flags, port_num,
1510 in_mad, out_mad);
1511 goto bail;
1512 case IB_MGMT_CLASS_PERF_MGMT:
1513 ret = process_perf(ibdev, port_num, in_mad, out_mad);
1514 goto bail;
1515 default:
1516 ret = IB_MAD_RESULT_SUCCESS;
1517 }
1518
1519bail:
1520 return ret;
1521}
1522