1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48#include <linux/net.h>
49#include <rdma/opa_addr.h>
50#define OPA_NUM_PKEY_BLOCKS_PER_SMP (OPA_SMP_DR_DATA_SIZE \
51 / (OPA_PARTITION_TABLE_BLK_SIZE * sizeof(u16)))
52
53#include "hfi.h"
54#include "mad.h"
55#include "trace.h"
56#include "qp.h"
57#include "vnic.h"
58
59
60#define OPA_LINK_WIDTH_RESET_OLD 0x0fff
61#define OPA_LINK_WIDTH_RESET 0xffff
62
63struct trap_node {
64 struct list_head list;
65 struct opa_mad_notice_attr data;
66 __be64 tid;
67 int len;
68 u32 retry;
69 u8 in_use;
70 u8 repress;
71};
72
73static int smp_length_check(u32 data_size, u32 request_len)
74{
75 if (unlikely(request_len < data_size))
76 return -EINVAL;
77
78 return 0;
79}
80
81static int reply(struct ib_mad_hdr *smp)
82{
83
84
85
86
87 smp->method = IB_MGMT_METHOD_GET_RESP;
88 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
89 smp->status |= IB_SMP_DIRECTION;
90 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
91}
92
93static inline void clear_opa_smp_data(struct opa_smp *smp)
94{
95 void *data = opa_get_smp_data(smp);
96 size_t size = opa_get_smp_data_size(smp);
97
98 memset(data, 0, size);
99}
100
101static u16 hfi1_lookup_pkey_value(struct hfi1_ibport *ibp, int pkey_idx)
102{
103 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
104
105 if (pkey_idx < ARRAY_SIZE(ppd->pkeys))
106 return ppd->pkeys[pkey_idx];
107
108 return 0;
109}
110
111void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port)
112{
113 struct ib_event event;
114
115 event.event = IB_EVENT_PKEY_CHANGE;
116 event.device = &dd->verbs_dev.rdi.ibdev;
117 event.element.port_num = port;
118 ib_dispatch_event(&event);
119}
120
121
122
123
124
125static void cleanup_traps(struct hfi1_ibport *ibp, struct trap_node *trap)
126{
127 struct trap_node *node, *q;
128 unsigned long flags;
129 struct list_head trap_list;
130 int i;
131
132 for (i = 0; i < RVT_MAX_TRAP_LISTS; i++) {
133 spin_lock_irqsave(&ibp->rvp.lock, flags);
134 list_replace_init(&ibp->rvp.trap_lists[i].list, &trap_list);
135 ibp->rvp.trap_lists[i].list_len = 0;
136 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
137
138
139
140
141
142 list_for_each_entry_safe(node, q, &trap_list, list) {
143 list_del(&node->list);
144 if (node != trap)
145 kfree(node);
146 }
147 }
148
149
150
151
152
153 kfree(trap);
154}
155
156static struct trap_node *check_and_add_trap(struct hfi1_ibport *ibp,
157 struct trap_node *trap)
158{
159 struct trap_node *node;
160 struct trap_list *trap_list;
161 unsigned long flags;
162 unsigned long timeout;
163 int found = 0;
164 unsigned int queue_id;
165 static int trap_count;
166
167 queue_id = trap->data.generic_type & 0x0F;
168 if (queue_id >= RVT_MAX_TRAP_LISTS) {
169 trap_count++;
170 pr_err_ratelimited("hfi1: Invalid trap 0x%0x dropped. Total dropped: %d\n",
171 trap->data.generic_type, trap_count);
172 kfree(trap);
173 return NULL;
174 }
175
176
177
178
179
180 spin_lock_irqsave(&ibp->rvp.lock, flags);
181 trap_list = &ibp->rvp.trap_lists[queue_id];
182
183 list_for_each_entry(node, &trap_list->list, list) {
184 if (node == trap) {
185 node->retry++;
186 found = 1;
187 break;
188 }
189 }
190
191
192 if (!found) {
193 if (trap_list->list_len < RVT_MAX_TRAP_LEN) {
194 trap_list->list_len++;
195 list_add_tail(&trap->list, &trap_list->list);
196 } else {
197 pr_warn_ratelimited("hfi1: Maximum trap limit reached for 0x%0x traps\n",
198 trap->data.generic_type);
199 kfree(trap);
200 }
201 }
202
203
204
205
206
207 node = NULL;
208 if (!timer_pending(&ibp->rvp.trap_timer)) {
209
210
211
212
213
214
215 timeout = (RVT_TRAP_TIMEOUT *
216 (1UL << ibp->rvp.subnet_timeout)) / 1000;
217 mod_timer(&ibp->rvp.trap_timer,
218 jiffies + usecs_to_jiffies(timeout));
219 node = list_first_entry(&trap_list->list, struct trap_node,
220 list);
221 node->in_use = 1;
222 }
223 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
224
225 return node;
226}
227
228static void subn_handle_opa_trap_repress(struct hfi1_ibport *ibp,
229 struct opa_smp *smp)
230{
231 struct trap_list *trap_list;
232 struct trap_node *trap;
233 unsigned long flags;
234 int i;
235
236 if (smp->attr_id != IB_SMP_ATTR_NOTICE)
237 return;
238
239 spin_lock_irqsave(&ibp->rvp.lock, flags);
240 for (i = 0; i < RVT_MAX_TRAP_LISTS; i++) {
241 trap_list = &ibp->rvp.trap_lists[i];
242 trap = list_first_entry_or_null(&trap_list->list,
243 struct trap_node, list);
244 if (trap && trap->tid == smp->tid) {
245 if (trap->in_use) {
246 trap->repress = 1;
247 } else {
248 trap_list->list_len--;
249 list_del(&trap->list);
250 kfree(trap);
251 }
252 break;
253 }
254 }
255 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
256}
257
258static void hfi1_update_sm_ah_attr(struct hfi1_ibport *ibp,
259 struct rdma_ah_attr *attr, u32 dlid)
260{
261 rdma_ah_set_dlid(attr, dlid);
262 rdma_ah_set_port_num(attr, ppd_from_ibp(ibp)->port);
263 if (dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) {
264 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
265
266 rdma_ah_set_ah_flags(attr, IB_AH_GRH);
267 grh->sgid_index = 0;
268 grh->hop_limit = 1;
269 grh->dgid.global.subnet_prefix =
270 ibp->rvp.gid_prefix;
271 grh->dgid.global.interface_id = OPA_MAKE_ID(dlid);
272 }
273}
274
275static int hfi1_modify_qp0_ah(struct hfi1_ibport *ibp,
276 struct rvt_ah *ah, u32 dlid)
277{
278 struct rdma_ah_attr attr;
279 struct rvt_qp *qp0;
280 int ret = -EINVAL;
281
282 memset(&attr, 0, sizeof(attr));
283 attr.type = ah->ibah.type;
284 hfi1_update_sm_ah_attr(ibp, &attr, dlid);
285 rcu_read_lock();
286 qp0 = rcu_dereference(ibp->rvp.qp[0]);
287 if (qp0)
288 ret = rdma_modify_ah(&ah->ibah, &attr);
289 rcu_read_unlock();
290 return ret;
291}
292
293static struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u32 dlid)
294{
295 struct rdma_ah_attr attr;
296 struct ib_ah *ah = ERR_PTR(-EINVAL);
297 struct rvt_qp *qp0;
298 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
299 struct hfi1_devdata *dd = dd_from_ppd(ppd);
300 u8 port_num = ppd->port;
301
302 memset(&attr, 0, sizeof(attr));
303 attr.type = rdma_ah_find_type(&dd->verbs_dev.rdi.ibdev, port_num);
304 hfi1_update_sm_ah_attr(ibp, &attr, dlid);
305 rcu_read_lock();
306 qp0 = rcu_dereference(ibp->rvp.qp[0]);
307 if (qp0)
308 ah = rdma_create_ah(qp0->ibqp.pd, &attr, 0);
309 rcu_read_unlock();
310 return ah;
311}
312
313static void send_trap(struct hfi1_ibport *ibp, struct trap_node *trap)
314{
315 struct ib_mad_send_buf *send_buf;
316 struct ib_mad_agent *agent;
317 struct opa_smp *smp;
318 unsigned long flags;
319 int pkey_idx;
320 u32 qpn = ppd_from_ibp(ibp)->sm_trap_qp;
321
322 agent = ibp->rvp.send_agent;
323 if (!agent) {
324 cleanup_traps(ibp, trap);
325 return;
326 }
327
328
329 if (driver_lstate(ppd_from_ibp(ibp)) != IB_PORT_ACTIVE) {
330 cleanup_traps(ibp, trap);
331 return;
332 }
333
334
335 trap = check_and_add_trap(ibp, trap);
336 if (!trap)
337 return;
338
339 pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY);
340 if (pkey_idx < 0) {
341 pr_warn("%s: failed to find limited mgmt pkey, defaulting 0x%x\n",
342 __func__, hfi1_get_pkey(ibp, 1));
343 pkey_idx = 1;
344 }
345
346 send_buf = ib_create_send_mad(agent, qpn, pkey_idx, 0,
347 IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
348 GFP_ATOMIC, IB_MGMT_BASE_VERSION);
349 if (IS_ERR(send_buf))
350 return;
351
352 smp = send_buf->mad;
353 smp->base_version = OPA_MGMT_BASE_VERSION;
354 smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
355 smp->class_version = OPA_SM_CLASS_VERSION;
356 smp->method = IB_MGMT_METHOD_TRAP;
357
358
359 if (trap->tid == 0) {
360 ibp->rvp.tid++;
361
362 if (ibp->rvp.tid == 0)
363 ibp->rvp.tid++;
364 trap->tid = cpu_to_be64(ibp->rvp.tid);
365 }
366 smp->tid = trap->tid;
367
368 smp->attr_id = IB_SMP_ATTR_NOTICE;
369
370
371 memcpy(smp->route.lid.data, &trap->data, trap->len);
372
373 spin_lock_irqsave(&ibp->rvp.lock, flags);
374 if (!ibp->rvp.sm_ah) {
375 if (ibp->rvp.sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
376 struct ib_ah *ah;
377
378 ah = hfi1_create_qp0_ah(ibp, ibp->rvp.sm_lid);
379 if (IS_ERR(ah)) {
380 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
381 return;
382 }
383 send_buf->ah = ah;
384 ibp->rvp.sm_ah = ibah_to_rvtah(ah);
385 } else {
386 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
387 return;
388 }
389 } else {
390 send_buf->ah = &ibp->rvp.sm_ah->ibah;
391 }
392
393
394
395
396
397 if (trap->repress) {
398 list_del(&trap->list);
399 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
400 kfree(trap);
401 ib_free_send_mad(send_buf);
402 return;
403 }
404
405 trap->in_use = 0;
406 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
407
408 if (ib_post_send_mad(send_buf, NULL))
409 ib_free_send_mad(send_buf);
410}
411
412void hfi1_handle_trap_timer(struct timer_list *t)
413{
414 struct hfi1_ibport *ibp = from_timer(ibp, t, rvp.trap_timer);
415 struct trap_node *trap = NULL;
416 unsigned long flags;
417 int i;
418
419
420 spin_lock_irqsave(&ibp->rvp.lock, flags);
421 for (i = 0; !trap && i < RVT_MAX_TRAP_LISTS; i++) {
422 trap = list_first_entry_or_null(&ibp->rvp.trap_lists[i].list,
423 struct trap_node, list);
424 }
425 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
426
427 if (trap)
428 send_trap(ibp, trap);
429}
430
431static struct trap_node *create_trap_node(u8 type, __be16 trap_num, u32 lid)
432{
433 struct trap_node *trap;
434
435 trap = kzalloc(sizeof(*trap), GFP_ATOMIC);
436 if (!trap)
437 return NULL;
438
439 INIT_LIST_HEAD(&trap->list);
440 trap->data.generic_type = type;
441 trap->data.prod_type_lsb = IB_NOTICE_PROD_CA;
442 trap->data.trap_num = trap_num;
443 trap->data.issuer_lid = cpu_to_be32(lid);
444
445 return trap;
446}
447
448
449
450
451void hfi1_bad_pkey(struct hfi1_ibport *ibp, u32 key, u32 sl,
452 u32 qp1, u32 qp2, u32 lid1, u32 lid2)
453{
454 struct trap_node *trap;
455 u32 lid = ppd_from_ibp(ibp)->lid;
456
457 ibp->rvp.n_pkt_drops++;
458 ibp->rvp.pkey_violations++;
459
460 trap = create_trap_node(IB_NOTICE_TYPE_SECURITY, OPA_TRAP_BAD_P_KEY,
461 lid);
462 if (!trap)
463 return;
464
465
466 trap->data.ntc_257_258.lid1 = cpu_to_be32(lid1);
467 trap->data.ntc_257_258.lid2 = cpu_to_be32(lid2);
468 trap->data.ntc_257_258.key = cpu_to_be32(key);
469 trap->data.ntc_257_258.sl = sl << 3;
470 trap->data.ntc_257_258.qp1 = cpu_to_be32(qp1);
471 trap->data.ntc_257_258.qp2 = cpu_to_be32(qp2);
472
473 trap->len = sizeof(trap->data);
474 send_trap(ibp, trap);
475}
476
477
478
479
480static void bad_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
481 __be64 mkey, __be32 dr_slid, u8 return_path[], u8 hop_cnt)
482{
483 struct trap_node *trap;
484 u32 lid = ppd_from_ibp(ibp)->lid;
485
486 trap = create_trap_node(IB_NOTICE_TYPE_SECURITY, OPA_TRAP_BAD_M_KEY,
487 lid);
488 if (!trap)
489 return;
490
491
492 trap->data.ntc_256.lid = trap->data.issuer_lid;
493 trap->data.ntc_256.method = mad->method;
494 trap->data.ntc_256.attr_id = mad->attr_id;
495 trap->data.ntc_256.attr_mod = mad->attr_mod;
496 trap->data.ntc_256.mkey = mkey;
497 if (mad->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
498 trap->data.ntc_256.dr_slid = dr_slid;
499 trap->data.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE;
500 if (hop_cnt > ARRAY_SIZE(trap->data.ntc_256.dr_rtn_path)) {
501 trap->data.ntc_256.dr_trunc_hop |=
502 IB_NOTICE_TRAP_DR_TRUNC;
503 hop_cnt = ARRAY_SIZE(trap->data.ntc_256.dr_rtn_path);
504 }
505 trap->data.ntc_256.dr_trunc_hop |= hop_cnt;
506 memcpy(trap->data.ntc_256.dr_rtn_path, return_path,
507 hop_cnt);
508 }
509
510 trap->len = sizeof(trap->data);
511
512 send_trap(ibp, trap);
513}
514
515
516
517
518void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num)
519{
520 struct trap_node *trap;
521 struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
522 struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
523 struct hfi1_ibport *ibp = &dd->pport[port_num - 1].ibport_data;
524 u32 lid = ppd_from_ibp(ibp)->lid;
525
526 trap = create_trap_node(IB_NOTICE_TYPE_INFO,
527 OPA_TRAP_CHANGE_CAPABILITY,
528 lid);
529 if (!trap)
530 return;
531
532 trap->data.ntc_144.lid = trap->data.issuer_lid;
533 trap->data.ntc_144.new_cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags);
534 trap->data.ntc_144.cap_mask3 = cpu_to_be16(ibp->rvp.port_cap3_flags);
535
536 trap->len = sizeof(trap->data);
537 send_trap(ibp, trap);
538}
539
540
541
542
543void hfi1_sys_guid_chg(struct hfi1_ibport *ibp)
544{
545 struct trap_node *trap;
546 u32 lid = ppd_from_ibp(ibp)->lid;
547
548 trap = create_trap_node(IB_NOTICE_TYPE_INFO, OPA_TRAP_CHANGE_SYSGUID,
549 lid);
550 if (!trap)
551 return;
552
553 trap->data.ntc_145.new_sys_guid = ib_hfi1_sys_image_guid;
554 trap->data.ntc_145.lid = trap->data.issuer_lid;
555
556 trap->len = sizeof(trap->data);
557 send_trap(ibp, trap);
558}
559
560
561
562
563void hfi1_node_desc_chg(struct hfi1_ibport *ibp)
564{
565 struct trap_node *trap;
566 u32 lid = ppd_from_ibp(ibp)->lid;
567
568 trap = create_trap_node(IB_NOTICE_TYPE_INFO,
569 OPA_TRAP_CHANGE_CAPABILITY,
570 lid);
571 if (!trap)
572 return;
573
574 trap->data.ntc_144.lid = trap->data.issuer_lid;
575 trap->data.ntc_144.change_flags =
576 cpu_to_be16(OPA_NOTICE_TRAP_NODE_DESC_CHG);
577
578 trap->len = sizeof(trap->data);
579 send_trap(ibp, trap);
580}
581
582static int __subn_get_opa_nodedesc(struct opa_smp *smp, u32 am,
583 u8 *data, struct ib_device *ibdev,
584 u8 port, u32 *resp_len, u32 max_len)
585{
586 struct opa_node_description *nd;
587
588 if (am || smp_length_check(sizeof(*nd), max_len)) {
589 smp->status |= IB_SMP_INVALID_FIELD;
590 return reply((struct ib_mad_hdr *)smp);
591 }
592
593 nd = (struct opa_node_description *)data;
594
595 memcpy(nd->data, ibdev->node_desc, sizeof(nd->data));
596
597 if (resp_len)
598 *resp_len += sizeof(*nd);
599
600 return reply((struct ib_mad_hdr *)smp);
601}
602
603static int __subn_get_opa_nodeinfo(struct opa_smp *smp, u32 am, u8 *data,
604 struct ib_device *ibdev, u8 port,
605 u32 *resp_len, u32 max_len)
606{
607 struct opa_node_info *ni;
608 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
609 unsigned pidx = port - 1;
610
611 ni = (struct opa_node_info *)data;
612
613
614 if (am || pidx >= dd->num_pports || ibdev->node_guid == 0 ||
615 smp_length_check(sizeof(*ni), max_len) ||
616 get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX) == 0) {
617 smp->status |= IB_SMP_INVALID_FIELD;
618 return reply((struct ib_mad_hdr *)smp);
619 }
620
621 ni->port_guid = get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX);
622 ni->base_version = OPA_MGMT_BASE_VERSION;
623 ni->class_version = OPA_SM_CLASS_VERSION;
624 ni->node_type = 1;
625 ni->num_ports = ibdev->phys_port_cnt;
626
627 ni->system_image_guid = ib_hfi1_sys_image_guid;
628 ni->node_guid = ibdev->node_guid;
629 ni->partition_cap = cpu_to_be16(hfi1_get_npkeys(dd));
630 ni->device_id = cpu_to_be16(dd->pcidev->device);
631 ni->revision = cpu_to_be32(dd->minrev);
632 ni->local_port_num = port;
633 ni->vendor_id[0] = dd->oui1;
634 ni->vendor_id[1] = dd->oui2;
635 ni->vendor_id[2] = dd->oui3;
636
637 if (resp_len)
638 *resp_len += sizeof(*ni);
639
640 return reply((struct ib_mad_hdr *)smp);
641}
642
643static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
644 u8 port)
645{
646 struct ib_node_info *nip = (struct ib_node_info *)&smp->data;
647 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
648 unsigned pidx = port - 1;
649
650
651 if (smp->attr_mod || pidx >= dd->num_pports ||
652 ibdev->node_guid == 0 ||
653 get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX) == 0) {
654 smp->status |= IB_SMP_INVALID_FIELD;
655 return reply((struct ib_mad_hdr *)smp);
656 }
657
658 nip->port_guid = get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX);
659 nip->base_version = OPA_MGMT_BASE_VERSION;
660 nip->class_version = OPA_SM_CLASS_VERSION;
661 nip->node_type = 1;
662 nip->num_ports = ibdev->phys_port_cnt;
663
664 nip->sys_guid = ib_hfi1_sys_image_guid;
665 nip->node_guid = ibdev->node_guid;
666 nip->partition_cap = cpu_to_be16(hfi1_get_npkeys(dd));
667 nip->device_id = cpu_to_be16(dd->pcidev->device);
668 nip->revision = cpu_to_be32(dd->minrev);
669 nip->local_port_num = port;
670 nip->vendor_id[0] = dd->oui1;
671 nip->vendor_id[1] = dd->oui2;
672 nip->vendor_id[2] = dd->oui3;
673
674 return reply((struct ib_mad_hdr *)smp);
675}
676
677static void set_link_width_enabled(struct hfi1_pportdata *ppd, u32 w)
678{
679 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LWID_ENB, w);
680}
681
682static void set_link_width_downgrade_enabled(struct hfi1_pportdata *ppd, u32 w)
683{
684 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LWID_DG_ENB, w);
685}
686
687static void set_link_speed_enabled(struct hfi1_pportdata *ppd, u32 s)
688{
689 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_SPD_ENB, s);
690}
691
692static int check_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
693 int mad_flags, __be64 mkey, __be32 dr_slid,
694 u8 return_path[], u8 hop_cnt)
695{
696 int valid_mkey = 0;
697 int ret = 0;
698
699
700 if (ibp->rvp.mkey_lease_timeout &&
701 time_after_eq(jiffies, ibp->rvp.mkey_lease_timeout)) {
702
703 ibp->rvp.mkey_lease_timeout = 0;
704 ibp->rvp.mkeyprot = 0;
705 }
706
707 if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->rvp.mkey == 0 ||
708 ibp->rvp.mkey == mkey)
709 valid_mkey = 1;
710
711
712 if (valid_mkey && ibp->rvp.mkey_lease_timeout &&
713 (mad->method == IB_MGMT_METHOD_GET ||
714 mad->method == IB_MGMT_METHOD_SET ||
715 mad->method == IB_MGMT_METHOD_TRAP_REPRESS))
716 ibp->rvp.mkey_lease_timeout = 0;
717
718 if (!valid_mkey) {
719 switch (mad->method) {
720 case IB_MGMT_METHOD_GET:
721
722 if (ibp->rvp.mkeyprot < 2)
723 break;
724
725 case IB_MGMT_METHOD_SET:
726 case IB_MGMT_METHOD_TRAP_REPRESS:
727 if (ibp->rvp.mkey_violations != 0xFFFF)
728 ++ibp->rvp.mkey_violations;
729 if (!ibp->rvp.mkey_lease_timeout &&
730 ibp->rvp.mkey_lease_period)
731 ibp->rvp.mkey_lease_timeout = jiffies +
732 ibp->rvp.mkey_lease_period * HZ;
733
734 bad_mkey(ibp, mad, mkey, dr_slid, return_path,
735 hop_cnt);
736 ret = 1;
737 }
738 }
739
740 return ret;
741}
742
743
744
745
746
747struct lcb_datum {
748 u32 off;
749 u64 val;
750};
751
752static struct lcb_datum lcb_cache[] = {
753 { DC_LCB_STS_ROUND_TRIP_LTP_CNT, 0 },
754};
755
756static int write_lcb_cache(u32 off, u64 val)
757{
758 int i;
759
760 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
761 if (lcb_cache[i].off == off) {
762 lcb_cache[i].val = val;
763 return 0;
764 }
765 }
766
767 pr_warn("%s bad offset 0x%x\n", __func__, off);
768 return -1;
769}
770
771static int read_lcb_cache(u32 off, u64 *val)
772{
773 int i;
774
775 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
776 if (lcb_cache[i].off == off) {
777 *val = lcb_cache[i].val;
778 return 0;
779 }
780 }
781
782 pr_warn("%s bad offset 0x%x\n", __func__, off);
783 return -1;
784}
785
786void read_ltp_rtt(struct hfi1_devdata *dd)
787{
788 u64 reg;
789
790 if (read_lcb_csr(dd, DC_LCB_STS_ROUND_TRIP_LTP_CNT, ®))
791 dd_dev_err(dd, "%s: unable to read LTP RTT\n", __func__);
792 else
793 write_lcb_cache(DC_LCB_STS_ROUND_TRIP_LTP_CNT, reg);
794}
795
796static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
797 struct ib_device *ibdev, u8 port,
798 u32 *resp_len, u32 max_len)
799{
800 int i;
801 struct hfi1_devdata *dd;
802 struct hfi1_pportdata *ppd;
803 struct hfi1_ibport *ibp;
804 struct opa_port_info *pi = (struct opa_port_info *)data;
805 u8 mtu;
806 u8 credit_rate;
807 u8 is_beaconing_active;
808 u32 state;
809 u32 num_ports = OPA_AM_NPORT(am);
810 u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
811 u32 buffer_units;
812 u64 tmp = 0;
813
814 if (num_ports != 1 || smp_length_check(sizeof(*pi), max_len)) {
815 smp->status |= IB_SMP_INVALID_FIELD;
816 return reply((struct ib_mad_hdr *)smp);
817 }
818
819 dd = dd_from_ibdev(ibdev);
820
821 ppd = dd->pport + (port - 1);
822 ibp = &ppd->ibport_data;
823
824 if (ppd->vls_supported / 2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) ||
825 ppd->vls_supported > ARRAY_SIZE(dd->vld)) {
826 smp->status |= IB_SMP_INVALID_FIELD;
827 return reply((struct ib_mad_hdr *)smp);
828 }
829
830 pi->lid = cpu_to_be32(ppd->lid);
831
832
833 if (!(smp->method == IB_MGMT_METHOD_GET &&
834 ibp->rvp.mkey != smp->mkey &&
835 ibp->rvp.mkeyprot == 1))
836 pi->mkey = ibp->rvp.mkey;
837
838 pi->subnet_prefix = ibp->rvp.gid_prefix;
839 pi->sm_lid = cpu_to_be32(ibp->rvp.sm_lid);
840 pi->ib_cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags);
841 pi->mkey_lease_period = cpu_to_be16(ibp->rvp.mkey_lease_period);
842 pi->sm_trap_qp = cpu_to_be32(ppd->sm_trap_qp);
843 pi->sa_qp = cpu_to_be32(ppd->sa_qp);
844
845 pi->link_width.enabled = cpu_to_be16(ppd->link_width_enabled);
846 pi->link_width.supported = cpu_to_be16(ppd->link_width_supported);
847 pi->link_width.active = cpu_to_be16(ppd->link_width_active);
848
849 pi->link_width_downgrade.supported =
850 cpu_to_be16(ppd->link_width_downgrade_supported);
851 pi->link_width_downgrade.enabled =
852 cpu_to_be16(ppd->link_width_downgrade_enabled);
853 pi->link_width_downgrade.tx_active =
854 cpu_to_be16(ppd->link_width_downgrade_tx_active);
855 pi->link_width_downgrade.rx_active =
856 cpu_to_be16(ppd->link_width_downgrade_rx_active);
857
858 pi->link_speed.supported = cpu_to_be16(ppd->link_speed_supported);
859 pi->link_speed.active = cpu_to_be16(ppd->link_speed_active);
860 pi->link_speed.enabled = cpu_to_be16(ppd->link_speed_enabled);
861
862 state = driver_lstate(ppd);
863
864 if (start_of_sm_config && (state == IB_PORT_INIT))
865 ppd->is_sm_config_started = 1;
866
867 pi->port_phys_conf = (ppd->port_type & 0xf);
868
869 pi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4;
870 pi->port_states.ledenable_offlinereason |=
871 ppd->is_sm_config_started << 5;
872
873
874
875
876
877 smp_rmb();
878 is_beaconing_active = !!atomic_read(&ppd->led_override_timer_active);
879 pi->port_states.ledenable_offlinereason |= is_beaconing_active << 6;
880 pi->port_states.ledenable_offlinereason |=
881 ppd->offline_disabled_reason;
882
883 pi->port_states.portphysstate_portstate =
884 (driver_pstate(ppd) << 4) | state;
885
886 pi->mkeyprotect_lmc = (ibp->rvp.mkeyprot << 6) | ppd->lmc;
887
888 memset(pi->neigh_mtu.pvlx_to_mtu, 0, sizeof(pi->neigh_mtu.pvlx_to_mtu));
889 for (i = 0; i < ppd->vls_supported; i++) {
890 mtu = mtu_to_enum(dd->vld[i].mtu, HFI1_DEFAULT_ACTIVE_MTU);
891 if ((i % 2) == 0)
892 pi->neigh_mtu.pvlx_to_mtu[i / 2] |= (mtu << 4);
893 else
894 pi->neigh_mtu.pvlx_to_mtu[i / 2] |= mtu;
895 }
896
897 mtu = mtu_to_enum(dd->vld[15].mtu, 2048);
898 pi->neigh_mtu.pvlx_to_mtu[15 / 2] |= mtu;
899 pi->smsl = ibp->rvp.sm_sl & OPA_PI_MASK_SMSL;
900 pi->operational_vls = hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_OP_VLS);
901 pi->partenforce_filterraw |=
902 (ppd->linkinit_reason & OPA_PI_MASK_LINKINIT_REASON);
903 if (ppd->part_enforce & HFI1_PART_ENFORCE_IN)
904 pi->partenforce_filterraw |= OPA_PI_MASK_PARTITION_ENFORCE_IN;
905 if (ppd->part_enforce & HFI1_PART_ENFORCE_OUT)
906 pi->partenforce_filterraw |= OPA_PI_MASK_PARTITION_ENFORCE_OUT;
907 pi->mkey_violations = cpu_to_be16(ibp->rvp.mkey_violations);
908
909 pi->pkey_violations = cpu_to_be16(ibp->rvp.pkey_violations);
910 pi->qkey_violations = cpu_to_be16(ibp->rvp.qkey_violations);
911
912 pi->vl.cap = ppd->vls_supported;
913 pi->vl.high_limit = cpu_to_be16(ibp->rvp.vl_high_limit);
914 pi->vl.arb_high_cap = (u8)hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_VL_HIGH_CAP);
915 pi->vl.arb_low_cap = (u8)hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_VL_LOW_CAP);
916
917 pi->clientrereg_subnettimeout = ibp->rvp.subnet_timeout;
918
919 pi->port_link_mode = cpu_to_be16(OPA_PORT_LINK_MODE_OPA << 10 |
920 OPA_PORT_LINK_MODE_OPA << 5 |
921 OPA_PORT_LINK_MODE_OPA);
922
923 pi->port_ltp_crc_mode = cpu_to_be16(ppd->port_ltp_crc_mode);
924
925 pi->port_mode = cpu_to_be16(
926 ppd->is_active_optimize_enabled ?
927 OPA_PI_MASK_PORT_ACTIVE_OPTOMIZE : 0);
928
929 pi->port_packet_format.supported =
930 cpu_to_be16(OPA_PORT_PACKET_FORMAT_9B |
931 OPA_PORT_PACKET_FORMAT_16B);
932 pi->port_packet_format.enabled =
933 cpu_to_be16(OPA_PORT_PACKET_FORMAT_9B |
934 OPA_PORT_PACKET_FORMAT_16B);
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949 pi->flit_control.interleave = cpu_to_be16(0x1400);
950
951 pi->link_down_reason = ppd->local_link_down_reason.sma;
952 pi->neigh_link_down_reason = ppd->neigh_link_down_reason.sma;
953 pi->port_error_action = cpu_to_be32(ppd->port_error_action);
954 pi->mtucap = mtu_to_enum(hfi1_max_mtu, IB_MTU_4096);
955
956
957 pi->resptimevalue = 3;
958
959 pi->local_port_num = port;
960
961
962 pi->overall_buffer_space = cpu_to_be16(dd->link_credits);
963
964 pi->neigh_node_guid = cpu_to_be64(ppd->neighbor_guid);
965 pi->neigh_port_num = ppd->neighbor_port_number;
966 pi->port_neigh_mode =
967 (ppd->neighbor_type & OPA_PI_MASK_NEIGH_NODE_TYPE) |
968 (ppd->mgmt_allowed ? OPA_PI_MASK_NEIGH_MGMT_ALLOWED : 0) |
969 (ppd->neighbor_fm_security ?
970 OPA_PI_MASK_NEIGH_FW_AUTH_BYPASS : 0);
971
972
973
974
975 credit_rate = 0;
976 buffer_units = (dd->vau) & OPA_PI_MASK_BUF_UNIT_BUF_ALLOC;
977 buffer_units |= (dd->vcu << 3) & OPA_PI_MASK_BUF_UNIT_CREDIT_ACK;
978 buffer_units |= (credit_rate << 6) &
979 OPA_PI_MASK_BUF_UNIT_VL15_CREDIT_RATE;
980 buffer_units |= (dd->vl15_init << 11) & OPA_PI_MASK_BUF_UNIT_VL15_INIT;
981 pi->buffer_units = cpu_to_be32(buffer_units);
982
983 pi->opa_cap_mask = cpu_to_be16(ibp->rvp.port_cap3_flags);
984 pi->collectivemask_multicastmask = ((OPA_COLLECTIVE_NR & 0x7)
985 << 3 | (OPA_MCAST_NR & 0x7));
986
987
988 pi->replay_depth.buffer = 0x80;
989
990 read_lcb_cache(DC_LCB_STS_ROUND_TRIP_LTP_CNT, &tmp);
991
992
993
994
995
996 if (tmp > 0xff)
997 tmp = 0xff;
998 pi->replay_depth.wire = tmp;
999
1000 if (resp_len)
1001 *resp_len += sizeof(struct opa_port_info);
1002
1003 return reply((struct ib_mad_hdr *)smp);
1004}
1005
1006
1007
1008
1009
1010
1011
1012static int get_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
1013{
1014 struct hfi1_pportdata *ppd = dd->pport + port - 1;
1015
1016 memcpy(pkeys, ppd->pkeys, sizeof(ppd->pkeys));
1017
1018 return 0;
1019}
1020
1021static int __subn_get_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
1022 struct ib_device *ibdev, u8 port,
1023 u32 *resp_len, u32 max_len)
1024{
1025 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1026 u32 n_blocks_req = OPA_AM_NBLK(am);
1027 u32 start_block = am & 0x7ff;
1028 __be16 *p;
1029 u16 *q;
1030 int i;
1031 u16 n_blocks_avail;
1032 unsigned npkeys = hfi1_get_npkeys(dd);
1033 size_t size;
1034
1035 if (n_blocks_req == 0) {
1036 pr_warn("OPA Get PKey AM Invalid : P = %d; B = 0x%x; N = 0x%x\n",
1037 port, start_block, n_blocks_req);
1038 smp->status |= IB_SMP_INVALID_FIELD;
1039 return reply((struct ib_mad_hdr *)smp);
1040 }
1041
1042 n_blocks_avail = (u16)(npkeys / OPA_PARTITION_TABLE_BLK_SIZE) + 1;
1043
1044 size = (n_blocks_req * OPA_PARTITION_TABLE_BLK_SIZE) * sizeof(u16);
1045
1046 if (smp_length_check(size, max_len)) {
1047 smp->status |= IB_SMP_INVALID_FIELD;
1048 return reply((struct ib_mad_hdr *)smp);
1049 }
1050
1051 if (start_block + n_blocks_req > n_blocks_avail ||
1052 n_blocks_req > OPA_NUM_PKEY_BLOCKS_PER_SMP) {
1053 pr_warn("OPA Get PKey AM Invalid : s 0x%x; req 0x%x; "
1054 "avail 0x%x; blk/smp 0x%lx\n",
1055 start_block, n_blocks_req, n_blocks_avail,
1056 OPA_NUM_PKEY_BLOCKS_PER_SMP);
1057 smp->status |= IB_SMP_INVALID_FIELD;
1058 return reply((struct ib_mad_hdr *)smp);
1059 }
1060
1061 p = (__be16 *)data;
1062 q = (u16 *)data;
1063
1064 if (start_block == 0) {
1065 get_pkeys(dd, port, q);
1066 for (i = 0; i < npkeys; i++)
1067 p[i] = cpu_to_be16(q[i]);
1068 if (resp_len)
1069 *resp_len += size;
1070 } else {
1071 smp->status |= IB_SMP_INVALID_FIELD;
1072 }
1073 return reply((struct ib_mad_hdr *)smp);
1074}
1075
1076enum {
1077 HFI_TRANSITION_DISALLOWED,
1078 HFI_TRANSITION_IGNORED,
1079 HFI_TRANSITION_ALLOWED,
1080 HFI_TRANSITION_UNDEFINED,
1081};
1082
1083
1084
1085
1086
1087enum {
1088 __D = HFI_TRANSITION_DISALLOWED,
1089 __I = HFI_TRANSITION_IGNORED,
1090 __A = HFI_TRANSITION_ALLOWED,
1091 __U = HFI_TRANSITION_UNDEFINED,
1092};
1093
1094
1095
1096
1097
1098#define __N_PHYSTATES (OPA_PORTPHYSSTATE_MAX - IB_PORTPHYSSTATE_POLLING + 1)
1099
1100
1101
1102
1103
1104
1105
1106static const struct {
1107 u8 allowed[__N_PHYSTATES][__N_PHYSTATES];
1108} physical_state_transitions = {
1109 {
1110
1111 { __A, __A, __D, __D, __D, __D, __D, __D, __D, __D },
1112 { __A, __I, __D, __D, __D, __D, __D, __D, __D, __A },
1113 { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
1114 { __A, __A, __D, __I, __D, __D, __D, __D, __D, __D },
1115 { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
1116 { __D, __A, __D, __D, __D, __I, __D, __D, __D, __D },
1117 { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
1118 { __I, __A, __D, __D, __D, __D, __D, __I, __D, __D },
1119 { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
1120 { __D, __A, __D, __D, __D, __D, __D, __D, __D, __I },
1121 }
1122};
1123
1124
1125
1126
1127
1128
1129#define __N_LOGICAL_STATES (IB_PORT_ACTIVE_DEFER - IB_PORT_DOWN + 1)
1130
1131
1132
1133
1134
1135
1136
1137static const struct {
1138 u8 allowed[__N_LOGICAL_STATES][__N_LOGICAL_STATES];
1139} logical_state_transitions = {
1140 {
1141
1142 { __I, __D, __D, __D, __U},
1143 { __D, __I, __A, __D, __U},
1144 { __D, __D, __I, __A, __U},
1145 { __D, __D, __I, __I, __U},
1146 { __U, __U, __U, __U, __U},
1147 }
1148};
1149
1150static int logical_transition_allowed(int old, int new)
1151{
1152 if (old < IB_PORT_NOP || old > IB_PORT_ACTIVE_DEFER ||
1153 new < IB_PORT_NOP || new > IB_PORT_ACTIVE_DEFER) {
1154 pr_warn("invalid logical state(s) (old %d new %d)\n",
1155 old, new);
1156 return HFI_TRANSITION_UNDEFINED;
1157 }
1158
1159 if (new == IB_PORT_NOP)
1160 return HFI_TRANSITION_ALLOWED;
1161
1162
1163 old -= IB_PORT_DOWN;
1164 new -= IB_PORT_DOWN;
1165
1166 if (old < 0 || new < 0)
1167 return HFI_TRANSITION_UNDEFINED;
1168 return logical_state_transitions.allowed[old][new];
1169}
1170
1171static int physical_transition_allowed(int old, int new)
1172{
1173 if (old < IB_PORTPHYSSTATE_NOP || old > OPA_PORTPHYSSTATE_MAX ||
1174 new < IB_PORTPHYSSTATE_NOP || new > OPA_PORTPHYSSTATE_MAX) {
1175 pr_warn("invalid physical state(s) (old %d new %d)\n",
1176 old, new);
1177 return HFI_TRANSITION_UNDEFINED;
1178 }
1179
1180 if (new == IB_PORTPHYSSTATE_NOP)
1181 return HFI_TRANSITION_ALLOWED;
1182
1183
1184 old -= IB_PORTPHYSSTATE_POLLING;
1185 new -= IB_PORTPHYSSTATE_POLLING;
1186
1187 if (old < 0 || new < 0)
1188 return HFI_TRANSITION_UNDEFINED;
1189 return physical_state_transitions.allowed[old][new];
1190}
1191
1192static int port_states_transition_allowed(struct hfi1_pportdata *ppd,
1193 u32 logical_new, u32 physical_new)
1194{
1195 u32 physical_old = driver_pstate(ppd);
1196 u32 logical_old = driver_lstate(ppd);
1197 int ret, logical_allowed, physical_allowed;
1198
1199 ret = logical_transition_allowed(logical_old, logical_new);
1200 logical_allowed = ret;
1201
1202 if (ret == HFI_TRANSITION_DISALLOWED ||
1203 ret == HFI_TRANSITION_UNDEFINED) {
1204 pr_warn("invalid logical state transition %s -> %s\n",
1205 opa_lstate_name(logical_old),
1206 opa_lstate_name(logical_new));
1207 return ret;
1208 }
1209
1210 ret = physical_transition_allowed(physical_old, physical_new);
1211 physical_allowed = ret;
1212
1213 if (ret == HFI_TRANSITION_DISALLOWED ||
1214 ret == HFI_TRANSITION_UNDEFINED) {
1215 pr_warn("invalid physical state transition %s -> %s\n",
1216 opa_pstate_name(physical_old),
1217 opa_pstate_name(physical_new));
1218 return ret;
1219 }
1220
1221 if (logical_allowed == HFI_TRANSITION_IGNORED &&
1222 physical_allowed == HFI_TRANSITION_IGNORED)
1223 return HFI_TRANSITION_IGNORED;
1224
1225
1226
1227
1228
1229 if ((physical_old == OPA_PORTPHYSSTATE_OFFLINE) &&
1230 (physical_new == IB_PORTPHYSSTATE_POLLING))
1231 return HFI_TRANSITION_IGNORED;
1232
1233
1234
1235
1236
1237 return HFI_TRANSITION_ALLOWED;
1238}
1239
1240static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp,
1241 u32 logical_state, u32 phys_state, int local_mad)
1242{
1243 struct hfi1_devdata *dd = ppd->dd;
1244 u32 link_state;
1245 int ret;
1246
1247 ret = port_states_transition_allowed(ppd, logical_state, phys_state);
1248 if (ret == HFI_TRANSITION_DISALLOWED ||
1249 ret == HFI_TRANSITION_UNDEFINED) {
1250
1251 smp->status |= IB_SMP_INVALID_FIELD;
1252 return 0;
1253 }
1254
1255 if (ret == HFI_TRANSITION_IGNORED)
1256 return 0;
1257
1258 if ((phys_state != IB_PORTPHYSSTATE_NOP) &&
1259 !(logical_state == IB_PORT_DOWN ||
1260 logical_state == IB_PORT_NOP)){
1261 pr_warn("SubnSet(OPA_PortInfo) port state invalid: logical_state 0x%x physical_state 0x%x\n",
1262 logical_state, phys_state);
1263 smp->status |= IB_SMP_INVALID_FIELD;
1264 }
1265
1266
1267
1268
1269
1270
1271 switch (logical_state) {
1272 case IB_PORT_NOP:
1273 if (phys_state == IB_PORTPHYSSTATE_NOP)
1274 break;
1275
1276 case IB_PORT_DOWN:
1277 if (phys_state == IB_PORTPHYSSTATE_NOP) {
1278 link_state = HLS_DN_DOWNDEF;
1279 } else if (phys_state == IB_PORTPHYSSTATE_POLLING) {
1280 link_state = HLS_DN_POLL;
1281 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_FM_BOUNCE,
1282 0, OPA_LINKDOWN_REASON_FM_BOUNCE);
1283 } else if (phys_state == IB_PORTPHYSSTATE_DISABLED) {
1284 link_state = HLS_DN_DISABLE;
1285 } else {
1286 pr_warn("SubnSet(OPA_PortInfo) invalid physical state 0x%x\n",
1287 phys_state);
1288 smp->status |= IB_SMP_INVALID_FIELD;
1289 break;
1290 }
1291
1292 if ((link_state == HLS_DN_POLL ||
1293 link_state == HLS_DN_DOWNDEF)) {
1294
1295
1296
1297
1298
1299
1300
1301 set_link_state(ppd, HLS_DN_OFFLINE);
1302 start_link(ppd);
1303 } else {
1304 set_link_state(ppd, link_state);
1305 }
1306 if (link_state == HLS_DN_DISABLE &&
1307 (ppd->offline_disabled_reason >
1308 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED) ||
1309 ppd->offline_disabled_reason ==
1310 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
1311 ppd->offline_disabled_reason =
1312 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
1313
1314
1315
1316
1317 if (link_state == HLS_DN_DISABLE && !local_mad)
1318 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
1319 break;
1320 case IB_PORT_ARMED:
1321 ret = set_link_state(ppd, HLS_UP_ARMED);
1322 if (!ret)
1323 send_idle_sma(dd, SMA_IDLE_ARM);
1324 break;
1325 case IB_PORT_ACTIVE:
1326 if (ppd->neighbor_normal) {
1327 ret = set_link_state(ppd, HLS_UP_ACTIVE);
1328 if (ret == 0)
1329 send_idle_sma(dd, SMA_IDLE_ACTIVE);
1330 } else {
1331 pr_warn("SubnSet(OPA_PortInfo) Cannot move to Active with NeighborNormal 0\n");
1332 smp->status |= IB_SMP_INVALID_FIELD;
1333 }
1334 break;
1335 default:
1336 pr_warn("SubnSet(OPA_PortInfo) invalid logical state 0x%x\n",
1337 logical_state);
1338 smp->status |= IB_SMP_INVALID_FIELD;
1339 }
1340
1341 return 0;
1342}
1343
1344
1345
1346
1347
1348
1349
1350
1351static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
1352 struct ib_device *ibdev, u8 port,
1353 u32 *resp_len, u32 max_len, int local_mad)
1354{
1355 struct opa_port_info *pi = (struct opa_port_info *)data;
1356 struct ib_event event;
1357 struct hfi1_devdata *dd;
1358 struct hfi1_pportdata *ppd;
1359 struct hfi1_ibport *ibp;
1360 u8 clientrereg;
1361 unsigned long flags;
1362 u32 smlid;
1363 u32 lid;
1364 u8 ls_old, ls_new, ps_new;
1365 u8 vls;
1366 u8 msl;
1367 u8 crc_enabled;
1368 u16 lse, lwe, mtu;
1369 u32 num_ports = OPA_AM_NPORT(am);
1370 u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
1371 int ret, i, invalid = 0, call_set_mtu = 0;
1372 int call_link_downgrade_policy = 0;
1373
1374 if (num_ports != 1 ||
1375 smp_length_check(sizeof(*pi), max_len)) {
1376 smp->status |= IB_SMP_INVALID_FIELD;
1377 return reply((struct ib_mad_hdr *)smp);
1378 }
1379
1380 lid = be32_to_cpu(pi->lid);
1381 if (lid & 0xFF000000) {
1382 pr_warn("OPA_PortInfo lid out of range: %X\n", lid);
1383 smp->status |= IB_SMP_INVALID_FIELD;
1384 goto get_only;
1385 }
1386
1387
1388 smlid = be32_to_cpu(pi->sm_lid);
1389 if (smlid & 0xFF000000) {
1390 pr_warn("OPA_PortInfo SM lid out of range: %X\n", smlid);
1391 smp->status |= IB_SMP_INVALID_FIELD;
1392 goto get_only;
1393 }
1394
1395 clientrereg = (pi->clientrereg_subnettimeout &
1396 OPA_PI_MASK_CLIENT_REREGISTER);
1397
1398 dd = dd_from_ibdev(ibdev);
1399
1400 ppd = dd->pport + (port - 1);
1401 ibp = &ppd->ibport_data;
1402 event.device = ibdev;
1403 event.element.port_num = port;
1404
1405 ls_old = driver_lstate(ppd);
1406
1407 ibp->rvp.mkey = pi->mkey;
1408 if (ibp->rvp.gid_prefix != pi->subnet_prefix) {
1409 ibp->rvp.gid_prefix = pi->subnet_prefix;
1410 event.event = IB_EVENT_GID_CHANGE;
1411 ib_dispatch_event(&event);
1412 }
1413 ibp->rvp.mkey_lease_period = be16_to_cpu(pi->mkey_lease_period);
1414
1415
1416 if ((lid == 0 && ls_old > IB_PORT_INIT) ||
1417 (hfi1_is_16B_mcast(lid))) {
1418 smp->status |= IB_SMP_INVALID_FIELD;
1419 pr_warn("SubnSet(OPA_PortInfo) lid invalid 0x%x\n",
1420 lid);
1421 } else if (ppd->lid != lid ||
1422 ppd->lmc != (pi->mkeyprotect_lmc & OPA_PI_MASK_LMC)) {
1423 if (ppd->lid != lid)
1424 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_LID_CHANGE_BIT);
1425 if (ppd->lmc != (pi->mkeyprotect_lmc & OPA_PI_MASK_LMC))
1426 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_LMC_CHANGE_BIT);
1427 hfi1_set_lid(ppd, lid, pi->mkeyprotect_lmc & OPA_PI_MASK_LMC);
1428 event.event = IB_EVENT_LID_CHANGE;
1429 ib_dispatch_event(&event);
1430
1431 if (HFI1_PORT_GUID_INDEX + 1 < HFI1_GUIDS_PER_PORT) {
1432
1433
1434
1435 ppd->guids[HFI1_PORT_GUID_INDEX + 1] =
1436 be64_to_cpu(OPA_MAKE_ID(lid));
1437 event.event = IB_EVENT_GID_CHANGE;
1438 ib_dispatch_event(&event);
1439 }
1440 }
1441
1442 msl = pi->smsl & OPA_PI_MASK_SMSL;
1443 if (pi->partenforce_filterraw & OPA_PI_MASK_LINKINIT_REASON)
1444 ppd->linkinit_reason =
1445 (pi->partenforce_filterraw &
1446 OPA_PI_MASK_LINKINIT_REASON);
1447
1448
1449 if ((smlid == 0 && ls_old > IB_PORT_INIT) ||
1450 (hfi1_is_16B_mcast(smlid))) {
1451 smp->status |= IB_SMP_INVALID_FIELD;
1452 pr_warn("SubnSet(OPA_PortInfo) smlid invalid 0x%x\n", smlid);
1453 } else if (smlid != ibp->rvp.sm_lid || msl != ibp->rvp.sm_sl) {
1454 pr_warn("SubnSet(OPA_PortInfo) smlid 0x%x\n", smlid);
1455 spin_lock_irqsave(&ibp->rvp.lock, flags);
1456 if (ibp->rvp.sm_ah) {
1457 if (smlid != ibp->rvp.sm_lid)
1458 hfi1_modify_qp0_ah(ibp, ibp->rvp.sm_ah, smlid);
1459 if (msl != ibp->rvp.sm_sl)
1460 rdma_ah_set_sl(&ibp->rvp.sm_ah->attr, msl);
1461 }
1462 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
1463 if (smlid != ibp->rvp.sm_lid)
1464 ibp->rvp.sm_lid = smlid;
1465 if (msl != ibp->rvp.sm_sl)
1466 ibp->rvp.sm_sl = msl;
1467 event.event = IB_EVENT_SM_CHANGE;
1468 ib_dispatch_event(&event);
1469 }
1470
1471 if (pi->link_down_reason == 0) {
1472 ppd->local_link_down_reason.sma = 0;
1473 ppd->local_link_down_reason.latest = 0;
1474 }
1475
1476 if (pi->neigh_link_down_reason == 0) {
1477 ppd->neigh_link_down_reason.sma = 0;
1478 ppd->neigh_link_down_reason.latest = 0;
1479 }
1480
1481 ppd->sm_trap_qp = be32_to_cpu(pi->sm_trap_qp);
1482 ppd->sa_qp = be32_to_cpu(pi->sa_qp);
1483
1484 ppd->port_error_action = be32_to_cpu(pi->port_error_action);
1485 lwe = be16_to_cpu(pi->link_width.enabled);
1486 if (lwe) {
1487 if (lwe == OPA_LINK_WIDTH_RESET ||
1488 lwe == OPA_LINK_WIDTH_RESET_OLD)
1489 set_link_width_enabled(ppd, ppd->link_width_supported);
1490 else if ((lwe & ~ppd->link_width_supported) == 0)
1491 set_link_width_enabled(ppd, lwe);
1492 else
1493 smp->status |= IB_SMP_INVALID_FIELD;
1494 }
1495 lwe = be16_to_cpu(pi->link_width_downgrade.enabled);
1496
1497 if (lwe == OPA_LINK_WIDTH_RESET ||
1498 lwe == OPA_LINK_WIDTH_RESET_OLD) {
1499 set_link_width_downgrade_enabled(ppd,
1500 ppd->
1501 link_width_downgrade_supported
1502 );
1503 } else if ((lwe & ~ppd->link_width_downgrade_supported) == 0) {
1504
1505 if (lwe != ppd->link_width_downgrade_enabled) {
1506 set_link_width_downgrade_enabled(ppd, lwe);
1507 call_link_downgrade_policy = 1;
1508 }
1509 } else {
1510 smp->status |= IB_SMP_INVALID_FIELD;
1511 }
1512 lse = be16_to_cpu(pi->link_speed.enabled);
1513 if (lse) {
1514 if (lse & be16_to_cpu(pi->link_speed.supported))
1515 set_link_speed_enabled(ppd, lse);
1516 else
1517 smp->status |= IB_SMP_INVALID_FIELD;
1518 }
1519
1520 ibp->rvp.mkeyprot =
1521 (pi->mkeyprotect_lmc & OPA_PI_MASK_MKEY_PROT_BIT) >> 6;
1522 ibp->rvp.vl_high_limit = be16_to_cpu(pi->vl.high_limit) & 0xFF;
1523 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_VL_HIGH_LIMIT,
1524 ibp->rvp.vl_high_limit);
1525
1526 if (ppd->vls_supported / 2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) ||
1527 ppd->vls_supported > ARRAY_SIZE(dd->vld)) {
1528 smp->status |= IB_SMP_INVALID_FIELD;
1529 return reply((struct ib_mad_hdr *)smp);
1530 }
1531 for (i = 0; i < ppd->vls_supported; i++) {
1532 if ((i % 2) == 0)
1533 mtu = enum_to_mtu((pi->neigh_mtu.pvlx_to_mtu[i / 2] >>
1534 4) & 0xF);
1535 else
1536 mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[i / 2] &
1537 0xF);
1538 if (mtu == 0xffff) {
1539 pr_warn("SubnSet(OPA_PortInfo) mtu invalid %d (0x%x)\n",
1540 mtu,
1541 (pi->neigh_mtu.pvlx_to_mtu[0] >> 4) & 0xF);
1542 smp->status |= IB_SMP_INVALID_FIELD;
1543 mtu = hfi1_max_mtu;
1544 }
1545 if (dd->vld[i].mtu != mtu) {
1546 dd_dev_info(dd,
1547 "MTU change on vl %d from %d to %d\n",
1548 i, dd->vld[i].mtu, mtu);
1549 dd->vld[i].mtu = mtu;
1550 call_set_mtu++;
1551 }
1552 }
1553
1554
1555
1556 mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[15 / 2] & 0xF);
1557 if (mtu < 2048 || mtu == 0xffff)
1558 mtu = 2048;
1559 if (dd->vld[15].mtu != mtu) {
1560 dd_dev_info(dd,
1561 "MTU change on vl 15 from %d to %d\n",
1562 dd->vld[15].mtu, mtu);
1563 dd->vld[15].mtu = mtu;
1564 call_set_mtu++;
1565 }
1566 if (call_set_mtu)
1567 set_mtu(ppd);
1568
1569
1570 vls = pi->operational_vls & OPA_PI_MASK_OPERATIONAL_VL;
1571 if (vls) {
1572 if (vls > ppd->vls_supported) {
1573 pr_warn("SubnSet(OPA_PortInfo) VL's supported invalid %d\n",
1574 pi->operational_vls);
1575 smp->status |= IB_SMP_INVALID_FIELD;
1576 } else {
1577 if (hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_OP_VLS,
1578 vls) == -EINVAL)
1579 smp->status |= IB_SMP_INVALID_FIELD;
1580 }
1581 }
1582
1583 if (pi->mkey_violations == 0)
1584 ibp->rvp.mkey_violations = 0;
1585
1586 if (pi->pkey_violations == 0)
1587 ibp->rvp.pkey_violations = 0;
1588
1589 if (pi->qkey_violations == 0)
1590 ibp->rvp.qkey_violations = 0;
1591
1592 ibp->rvp.subnet_timeout =
1593 pi->clientrereg_subnettimeout & OPA_PI_MASK_SUBNET_TIMEOUT;
1594
1595 crc_enabled = be16_to_cpu(pi->port_ltp_crc_mode);
1596 crc_enabled >>= 4;
1597 crc_enabled &= 0xf;
1598
1599 if (crc_enabled != 0)
1600 ppd->port_crc_mode_enabled = port_ltp_to_cap(crc_enabled);
1601
1602 ppd->is_active_optimize_enabled =
1603 !!(be16_to_cpu(pi->port_mode)
1604 & OPA_PI_MASK_PORT_ACTIVE_OPTOMIZE);
1605
1606 ls_new = pi->port_states.portphysstate_portstate &
1607 OPA_PI_MASK_PORT_STATE;
1608 ps_new = (pi->port_states.portphysstate_portstate &
1609 OPA_PI_MASK_PORT_PHYSICAL_STATE) >> 4;
1610
1611 if (ls_old == IB_PORT_INIT) {
1612 if (start_of_sm_config) {
1613 if (ls_new == ls_old || (ls_new == IB_PORT_ARMED))
1614 ppd->is_sm_config_started = 1;
1615 } else if (ls_new == IB_PORT_ARMED) {
1616 if (ppd->is_sm_config_started == 0) {
1617 invalid = 1;
1618 smp->status |= IB_SMP_INVALID_FIELD;
1619 }
1620 }
1621 }
1622
1623
1624 if (clientrereg) {
1625 event.event = IB_EVENT_CLIENT_REREGISTER;
1626 ib_dispatch_event(&event);
1627 }
1628
1629
1630
1631
1632
1633
1634
1635
1636 if (!invalid) {
1637 ret = set_port_states(ppd, smp, ls_new, ps_new, local_mad);
1638 if (ret)
1639 return ret;
1640 }
1641
1642 ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len,
1643 max_len);
1644
1645
1646 pi->clientrereg_subnettimeout |= clientrereg;
1647
1648
1649
1650
1651
1652
1653
1654 if (call_link_downgrade_policy)
1655 apply_link_downgrade_policy(ppd, 0);
1656
1657 return ret;
1658
1659get_only:
1660 return __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len,
1661 max_len);
1662}
1663
1664
1665
1666
1667
1668
1669
1670static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
1671{
1672 struct hfi1_pportdata *ppd;
1673 int i;
1674 int changed = 0;
1675 int update_includes_mgmt_partition = 0;
1676
1677
1678
1679
1680
1681
1682
1683 ppd = dd->pport + (port - 1);
1684
1685
1686
1687 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
1688 if (pkeys[i] == LIM_MGMT_P_KEY) {
1689 update_includes_mgmt_partition = 1;
1690 break;
1691 }
1692 }
1693
1694 if (!update_includes_mgmt_partition)
1695 return 1;
1696
1697 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
1698 u16 key = pkeys[i];
1699 u16 okey = ppd->pkeys[i];
1700
1701 if (key == okey)
1702 continue;
1703
1704
1705
1706
1707
1708 ppd->pkeys[i] = key;
1709 changed = 1;
1710 }
1711
1712 if (changed) {
1713 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
1714 hfi1_event_pkey_change(dd, port);
1715 }
1716
1717 return 0;
1718}
1719
1720static int __subn_set_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
1721 struct ib_device *ibdev, u8 port,
1722 u32 *resp_len, u32 max_len)
1723{
1724 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1725 u32 n_blocks_sent = OPA_AM_NBLK(am);
1726 u32 start_block = am & 0x7ff;
1727 u16 *p = (u16 *)data;
1728 __be16 *q = (__be16 *)data;
1729 int i;
1730 u16 n_blocks_avail;
1731 unsigned npkeys = hfi1_get_npkeys(dd);
1732 u32 size = 0;
1733
1734 if (n_blocks_sent == 0) {
1735 pr_warn("OPA Get PKey AM Invalid : P = %d; B = 0x%x; N = 0x%x\n",
1736 port, start_block, n_blocks_sent);
1737 smp->status |= IB_SMP_INVALID_FIELD;
1738 return reply((struct ib_mad_hdr *)smp);
1739 }
1740
1741 n_blocks_avail = (u16)(npkeys / OPA_PARTITION_TABLE_BLK_SIZE) + 1;
1742
1743 size = sizeof(u16) * (n_blocks_sent * OPA_PARTITION_TABLE_BLK_SIZE);
1744
1745 if (smp_length_check(size, max_len)) {
1746 smp->status |= IB_SMP_INVALID_FIELD;
1747 return reply((struct ib_mad_hdr *)smp);
1748 }
1749
1750 if (start_block + n_blocks_sent > n_blocks_avail ||
1751 n_blocks_sent > OPA_NUM_PKEY_BLOCKS_PER_SMP) {
1752 pr_warn("OPA Set PKey AM Invalid : s 0x%x; req 0x%x; avail 0x%x; blk/smp 0x%lx\n",
1753 start_block, n_blocks_sent, n_blocks_avail,
1754 OPA_NUM_PKEY_BLOCKS_PER_SMP);
1755 smp->status |= IB_SMP_INVALID_FIELD;
1756 return reply((struct ib_mad_hdr *)smp);
1757 }
1758
1759 for (i = 0; i < n_blocks_sent * OPA_PARTITION_TABLE_BLK_SIZE; i++)
1760 p[i] = be16_to_cpu(q[i]);
1761
1762 if (start_block == 0 && set_pkeys(dd, port, p) != 0) {
1763 smp->status |= IB_SMP_INVALID_FIELD;
1764 return reply((struct ib_mad_hdr *)smp);
1765 }
1766
1767 return __subn_get_opa_pkeytable(smp, am, data, ibdev, port, resp_len,
1768 max_len);
1769}
1770
1771#define ILLEGAL_VL 12
1772
1773
1774
1775
1776
1777
1778
1779static void filter_sc2vlt(void *data, bool set)
1780{
1781 int i;
1782 u8 *pd = data;
1783
1784 for (i = 0; i < OPA_MAX_SCS; i++) {
1785 if (i == 15)
1786 continue;
1787
1788 if (set) {
1789 if ((pd[i] & 0x1f) == 0xf)
1790 pd[i] = ILLEGAL_VL;
1791 } else {
1792 if ((pd[i] & 0x1f) == ILLEGAL_VL)
1793 pd[i] = 0xf;
1794 }
1795 }
1796}
1797
1798static int set_sc2vlt_tables(struct hfi1_devdata *dd, void *data)
1799{
1800 u64 *val = data;
1801
1802 filter_sc2vlt(data, true);
1803
1804 write_csr(dd, SEND_SC2VLT0, *val++);
1805 write_csr(dd, SEND_SC2VLT1, *val++);
1806 write_csr(dd, SEND_SC2VLT2, *val++);
1807 write_csr(dd, SEND_SC2VLT3, *val++);
1808 write_seqlock_irq(&dd->sc2vl_lock);
1809 memcpy(dd->sc2vl, data, sizeof(dd->sc2vl));
1810 write_sequnlock_irq(&dd->sc2vl_lock);
1811 return 0;
1812}
1813
1814static int get_sc2vlt_tables(struct hfi1_devdata *dd, void *data)
1815{
1816 u64 *val = (u64 *)data;
1817
1818 *val++ = read_csr(dd, SEND_SC2VLT0);
1819 *val++ = read_csr(dd, SEND_SC2VLT1);
1820 *val++ = read_csr(dd, SEND_SC2VLT2);
1821 *val++ = read_csr(dd, SEND_SC2VLT3);
1822
1823 filter_sc2vlt((u64 *)data, false);
1824 return 0;
1825}
1826
1827static int __subn_get_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
1828 struct ib_device *ibdev, u8 port,
1829 u32 *resp_len, u32 max_len)
1830{
1831 struct hfi1_ibport *ibp = to_iport(ibdev, port);
1832 u8 *p = data;
1833 size_t size = ARRAY_SIZE(ibp->sl_to_sc);
1834 unsigned i;
1835
1836 if (am || smp_length_check(size, max_len)) {
1837 smp->status |= IB_SMP_INVALID_FIELD;
1838 return reply((struct ib_mad_hdr *)smp);
1839 }
1840
1841 for (i = 0; i < ARRAY_SIZE(ibp->sl_to_sc); i++)
1842 *p++ = ibp->sl_to_sc[i];
1843
1844 if (resp_len)
1845 *resp_len += size;
1846
1847 return reply((struct ib_mad_hdr *)smp);
1848}
1849
1850static int __subn_set_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
1851 struct ib_device *ibdev, u8 port,
1852 u32 *resp_len, u32 max_len)
1853{
1854 struct hfi1_ibport *ibp = to_iport(ibdev, port);
1855 u8 *p = data;
1856 size_t size = ARRAY_SIZE(ibp->sl_to_sc);
1857 int i;
1858 u8 sc;
1859
1860 if (am || smp_length_check(size, max_len)) {
1861 smp->status |= IB_SMP_INVALID_FIELD;
1862 return reply((struct ib_mad_hdr *)smp);
1863 }
1864
1865 for (i = 0; i < ARRAY_SIZE(ibp->sl_to_sc); i++) {
1866 sc = *p++;
1867 if (ibp->sl_to_sc[i] != sc) {
1868 ibp->sl_to_sc[i] = sc;
1869
1870
1871 hfi1_error_port_qps(ibp, i);
1872 }
1873 }
1874
1875 return __subn_get_opa_sl_to_sc(smp, am, data, ibdev, port, resp_len,
1876 max_len);
1877}
1878
1879static int __subn_get_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
1880 struct ib_device *ibdev, u8 port,
1881 u32 *resp_len, u32 max_len)
1882{
1883 struct hfi1_ibport *ibp = to_iport(ibdev, port);
1884 u8 *p = data;
1885 size_t size = ARRAY_SIZE(ibp->sc_to_sl);
1886 unsigned i;
1887
1888 if (am || smp_length_check(size, max_len)) {
1889 smp->status |= IB_SMP_INVALID_FIELD;
1890 return reply((struct ib_mad_hdr *)smp);
1891 }
1892
1893 for (i = 0; i < ARRAY_SIZE(ibp->sc_to_sl); i++)
1894 *p++ = ibp->sc_to_sl[i];
1895
1896 if (resp_len)
1897 *resp_len += size;
1898
1899 return reply((struct ib_mad_hdr *)smp);
1900}
1901
1902static int __subn_set_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
1903 struct ib_device *ibdev, u8 port,
1904 u32 *resp_len, u32 max_len)
1905{
1906 struct hfi1_ibport *ibp = to_iport(ibdev, port);
1907 size_t size = ARRAY_SIZE(ibp->sc_to_sl);
1908 u8 *p = data;
1909 int i;
1910
1911 if (am || smp_length_check(size, max_len)) {
1912 smp->status |= IB_SMP_INVALID_FIELD;
1913 return reply((struct ib_mad_hdr *)smp);
1914 }
1915
1916 for (i = 0; i < ARRAY_SIZE(ibp->sc_to_sl); i++)
1917 ibp->sc_to_sl[i] = *p++;
1918
1919 return __subn_get_opa_sc_to_sl(smp, am, data, ibdev, port, resp_len,
1920 max_len);
1921}
1922
1923static int __subn_get_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
1924 struct ib_device *ibdev, u8 port,
1925 u32 *resp_len, u32 max_len)
1926{
1927 u32 n_blocks = OPA_AM_NBLK(am);
1928 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1929 void *vp = (void *)data;
1930 size_t size = 4 * sizeof(u64);
1931
1932 if (n_blocks != 1 || smp_length_check(size, max_len)) {
1933 smp->status |= IB_SMP_INVALID_FIELD;
1934 return reply((struct ib_mad_hdr *)smp);
1935 }
1936
1937 get_sc2vlt_tables(dd, vp);
1938
1939 if (resp_len)
1940 *resp_len += size;
1941
1942 return reply((struct ib_mad_hdr *)smp);
1943}
1944
1945static int __subn_set_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
1946 struct ib_device *ibdev, u8 port,
1947 u32 *resp_len, u32 max_len)
1948{
1949 u32 n_blocks = OPA_AM_NBLK(am);
1950 int async_update = OPA_AM_ASYNC(am);
1951 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1952 void *vp = (void *)data;
1953 struct hfi1_pportdata *ppd;
1954 int lstate;
1955
1956
1957
1958
1959
1960
1961 size_t size = 4 * sizeof(u64);
1962
1963 if (n_blocks != 1 || async_update || smp_length_check(size, max_len)) {
1964 smp->status |= IB_SMP_INVALID_FIELD;
1965 return reply((struct ib_mad_hdr *)smp);
1966 }
1967
1968
1969 ppd = dd->pport + (port - 1);
1970 lstate = driver_lstate(ppd);
1971
1972
1973
1974
1975 if (!async_update &&
1976 (lstate == IB_PORT_ARMED || lstate == IB_PORT_ACTIVE)) {
1977 smp->status |= IB_SMP_INVALID_FIELD;
1978 return reply((struct ib_mad_hdr *)smp);
1979 }
1980
1981 set_sc2vlt_tables(dd, vp);
1982
1983 return __subn_get_opa_sc_to_vlt(smp, am, data, ibdev, port, resp_len,
1984 max_len);
1985}
1986
1987static int __subn_get_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
1988 struct ib_device *ibdev, u8 port,
1989 u32 *resp_len, u32 max_len)
1990{
1991 u32 n_blocks = OPA_AM_NPORT(am);
1992 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1993 struct hfi1_pportdata *ppd;
1994 void *vp = (void *)data;
1995 int size = sizeof(struct sc2vlnt);
1996
1997 if (n_blocks != 1 || smp_length_check(size, max_len)) {
1998 smp->status |= IB_SMP_INVALID_FIELD;
1999 return reply((struct ib_mad_hdr *)smp);
2000 }
2001
2002 ppd = dd->pport + (port - 1);
2003
2004 fm_get_table(ppd, FM_TBL_SC2VLNT, vp);
2005
2006 if (resp_len)
2007 *resp_len += size;
2008
2009 return reply((struct ib_mad_hdr *)smp);
2010}
2011
2012static int __subn_set_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
2013 struct ib_device *ibdev, u8 port,
2014 u32 *resp_len, u32 max_len)
2015{
2016 u32 n_blocks = OPA_AM_NPORT(am);
2017 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2018 struct hfi1_pportdata *ppd;
2019 void *vp = (void *)data;
2020 int lstate;
2021 int size = sizeof(struct sc2vlnt);
2022
2023 if (n_blocks != 1 || smp_length_check(size, max_len)) {
2024 smp->status |= IB_SMP_INVALID_FIELD;
2025 return reply((struct ib_mad_hdr *)smp);
2026 }
2027
2028
2029 ppd = dd->pport + (port - 1);
2030 lstate = driver_lstate(ppd);
2031 if (lstate == IB_PORT_ARMED || lstate == IB_PORT_ACTIVE) {
2032 smp->status |= IB_SMP_INVALID_FIELD;
2033 return reply((struct ib_mad_hdr *)smp);
2034 }
2035
2036 ppd = dd->pport + (port - 1);
2037
2038 fm_set_table(ppd, FM_TBL_SC2VLNT, vp);
2039
2040 return __subn_get_opa_sc_to_vlnt(smp, am, data, ibdev, port,
2041 resp_len, max_len);
2042}
2043
2044static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
2045 struct ib_device *ibdev, u8 port,
2046 u32 *resp_len, u32 max_len)
2047{
2048 u32 nports = OPA_AM_NPORT(am);
2049 u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
2050 u32 lstate;
2051 struct hfi1_ibport *ibp;
2052 struct hfi1_pportdata *ppd;
2053 struct opa_port_state_info *psi = (struct opa_port_state_info *)data;
2054
2055 if (nports != 1 || smp_length_check(sizeof(*psi), max_len)) {
2056 smp->status |= IB_SMP_INVALID_FIELD;
2057 return reply((struct ib_mad_hdr *)smp);
2058 }
2059
2060 ibp = to_iport(ibdev, port);
2061 ppd = ppd_from_ibp(ibp);
2062
2063 lstate = driver_lstate(ppd);
2064
2065 if (start_of_sm_config && (lstate == IB_PORT_INIT))
2066 ppd->is_sm_config_started = 1;
2067
2068 psi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4;
2069 psi->port_states.ledenable_offlinereason |=
2070 ppd->is_sm_config_started << 5;
2071 psi->port_states.ledenable_offlinereason |=
2072 ppd->offline_disabled_reason;
2073
2074 psi->port_states.portphysstate_portstate =
2075 (driver_pstate(ppd) << 4) | (lstate & 0xf);
2076 psi->link_width_downgrade_tx_active =
2077 cpu_to_be16(ppd->link_width_downgrade_tx_active);
2078 psi->link_width_downgrade_rx_active =
2079 cpu_to_be16(ppd->link_width_downgrade_rx_active);
2080 if (resp_len)
2081 *resp_len += sizeof(struct opa_port_state_info);
2082
2083 return reply((struct ib_mad_hdr *)smp);
2084}
2085
2086static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
2087 struct ib_device *ibdev, u8 port,
2088 u32 *resp_len, u32 max_len, int local_mad)
2089{
2090 u32 nports = OPA_AM_NPORT(am);
2091 u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
2092 u32 ls_old;
2093 u8 ls_new, ps_new;
2094 struct hfi1_ibport *ibp;
2095 struct hfi1_pportdata *ppd;
2096 struct opa_port_state_info *psi = (struct opa_port_state_info *)data;
2097 int ret, invalid = 0;
2098
2099 if (nports != 1 || smp_length_check(sizeof(*psi), max_len)) {
2100 smp->status |= IB_SMP_INVALID_FIELD;
2101 return reply((struct ib_mad_hdr *)smp);
2102 }
2103
2104 ibp = to_iport(ibdev, port);
2105 ppd = ppd_from_ibp(ibp);
2106
2107 ls_old = driver_lstate(ppd);
2108
2109 ls_new = port_states_to_logical_state(&psi->port_states);
2110 ps_new = port_states_to_phys_state(&psi->port_states);
2111
2112 if (ls_old == IB_PORT_INIT) {
2113 if (start_of_sm_config) {
2114 if (ls_new == ls_old || (ls_new == IB_PORT_ARMED))
2115 ppd->is_sm_config_started = 1;
2116 } else if (ls_new == IB_PORT_ARMED) {
2117 if (ppd->is_sm_config_started == 0) {
2118 invalid = 1;
2119 smp->status |= IB_SMP_INVALID_FIELD;
2120 }
2121 }
2122 }
2123
2124 if (!invalid) {
2125 ret = set_port_states(ppd, smp, ls_new, ps_new, local_mad);
2126 if (ret)
2127 return ret;
2128 }
2129
2130 return __subn_get_opa_psi(smp, am, data, ibdev, port, resp_len,
2131 max_len);
2132}
2133
2134static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data,
2135 struct ib_device *ibdev, u8 port,
2136 u32 *resp_len, u32 max_len)
2137{
2138 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2139 u32 addr = OPA_AM_CI_ADDR(am);
2140 u32 len = OPA_AM_CI_LEN(am) + 1;
2141 int ret;
2142
2143 if (dd->pport->port_type != PORT_TYPE_QSFP ||
2144 smp_length_check(len, max_len)) {
2145 smp->status |= IB_SMP_INVALID_FIELD;
2146 return reply((struct ib_mad_hdr *)smp);
2147 }
2148
2149#define __CI_PAGE_SIZE BIT(7)
2150#define __CI_PAGE_MASK ~(__CI_PAGE_SIZE - 1)
2151#define __CI_PAGE_NUM(a) ((a) & __CI_PAGE_MASK)
2152
2153
2154
2155
2156
2157 if (addr >= 4096 ||
2158 (__CI_PAGE_NUM(addr) != __CI_PAGE_NUM(addr + len - 1))) {
2159 smp->status |= IB_SMP_INVALID_FIELD;
2160 return reply((struct ib_mad_hdr *)smp);
2161 }
2162
2163 ret = get_cable_info(dd, port, addr, len, data);
2164
2165 if (ret == -ENODEV) {
2166 smp->status |= IB_SMP_UNSUP_METH_ATTR;
2167 return reply((struct ib_mad_hdr *)smp);
2168 }
2169
2170
2171
2172
2173
2174
2175 if (ret < 0 && ret != -ERANGE) {
2176 smp->status |= IB_SMP_INVALID_FIELD;
2177 return reply((struct ib_mad_hdr *)smp);
2178 }
2179
2180 if (resp_len)
2181 *resp_len += len;
2182
2183 return reply((struct ib_mad_hdr *)smp);
2184}
2185
2186static int __subn_get_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
2187 struct ib_device *ibdev, u8 port, u32 *resp_len,
2188 u32 max_len)
2189{
2190 u32 num_ports = OPA_AM_NPORT(am);
2191 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2192 struct hfi1_pportdata *ppd;
2193 struct buffer_control *p = (struct buffer_control *)data;
2194 int size = sizeof(struct buffer_control);
2195
2196 if (num_ports != 1 || smp_length_check(size, max_len)) {
2197 smp->status |= IB_SMP_INVALID_FIELD;
2198 return reply((struct ib_mad_hdr *)smp);
2199 }
2200
2201 ppd = dd->pport + (port - 1);
2202 fm_get_table(ppd, FM_TBL_BUFFER_CONTROL, p);
2203 trace_bct_get(dd, p);
2204 if (resp_len)
2205 *resp_len += size;
2206
2207 return reply((struct ib_mad_hdr *)smp);
2208}
2209
2210static int __subn_set_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
2211 struct ib_device *ibdev, u8 port, u32 *resp_len,
2212 u32 max_len)
2213{
2214 u32 num_ports = OPA_AM_NPORT(am);
2215 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2216 struct hfi1_pportdata *ppd;
2217 struct buffer_control *p = (struct buffer_control *)data;
2218
2219 if (num_ports != 1 || smp_length_check(sizeof(*p), max_len)) {
2220 smp->status |= IB_SMP_INVALID_FIELD;
2221 return reply((struct ib_mad_hdr *)smp);
2222 }
2223 ppd = dd->pport + (port - 1);
2224 trace_bct_set(dd, p);
2225 if (fm_set_table(ppd, FM_TBL_BUFFER_CONTROL, p) < 0) {
2226 smp->status |= IB_SMP_INVALID_FIELD;
2227 return reply((struct ib_mad_hdr *)smp);
2228 }
2229
2230 return __subn_get_opa_bct(smp, am, data, ibdev, port, resp_len,
2231 max_len);
2232}
2233
2234static int __subn_get_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
2235 struct ib_device *ibdev, u8 port,
2236 u32 *resp_len, u32 max_len)
2237{
2238 struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
2239 u32 num_ports = OPA_AM_NPORT(am);
2240 u8 section = (am & 0x00ff0000) >> 16;
2241 u8 *p = data;
2242 int size = 256;
2243
2244 if (num_ports != 1 || smp_length_check(size, max_len)) {
2245 smp->status |= IB_SMP_INVALID_FIELD;
2246 return reply((struct ib_mad_hdr *)smp);
2247 }
2248
2249 switch (section) {
2250 case OPA_VLARB_LOW_ELEMENTS:
2251 fm_get_table(ppd, FM_TBL_VL_LOW_ARB, p);
2252 break;
2253 case OPA_VLARB_HIGH_ELEMENTS:
2254 fm_get_table(ppd, FM_TBL_VL_HIGH_ARB, p);
2255 break;
2256 case OPA_VLARB_PREEMPT_ELEMENTS:
2257 fm_get_table(ppd, FM_TBL_VL_PREEMPT_ELEMS, p);
2258 break;
2259 case OPA_VLARB_PREEMPT_MATRIX:
2260 fm_get_table(ppd, FM_TBL_VL_PREEMPT_MATRIX, p);
2261 break;
2262 default:
2263 pr_warn("OPA SubnGet(VL Arb) AM Invalid : 0x%x\n",
2264 be32_to_cpu(smp->attr_mod));
2265 smp->status |= IB_SMP_INVALID_FIELD;
2266 size = 0;
2267 break;
2268 }
2269
2270 if (size > 0 && resp_len)
2271 *resp_len += size;
2272
2273 return reply((struct ib_mad_hdr *)smp);
2274}
2275
2276static int __subn_set_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
2277 struct ib_device *ibdev, u8 port,
2278 u32 *resp_len, u32 max_len)
2279{
2280 struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
2281 u32 num_ports = OPA_AM_NPORT(am);
2282 u8 section = (am & 0x00ff0000) >> 16;
2283 u8 *p = data;
2284 int size = 256;
2285
2286 if (num_ports != 1 || smp_length_check(size, max_len)) {
2287 smp->status |= IB_SMP_INVALID_FIELD;
2288 return reply((struct ib_mad_hdr *)smp);
2289 }
2290
2291 switch (section) {
2292 case OPA_VLARB_LOW_ELEMENTS:
2293 (void)fm_set_table(ppd, FM_TBL_VL_LOW_ARB, p);
2294 break;
2295 case OPA_VLARB_HIGH_ELEMENTS:
2296 (void)fm_set_table(ppd, FM_TBL_VL_HIGH_ARB, p);
2297 break;
2298
2299
2300
2301
2302 case OPA_VLARB_PREEMPT_ELEMENTS:
2303
2304 case OPA_VLARB_PREEMPT_MATRIX:
2305 smp->status |= IB_SMP_UNSUP_METH_ATTR;
2306 break;
2307 default:
2308 pr_warn("OPA SubnSet(VL Arb) AM Invalid : 0x%x\n",
2309 be32_to_cpu(smp->attr_mod));
2310 smp->status |= IB_SMP_INVALID_FIELD;
2311 break;
2312 }
2313
2314 return __subn_get_opa_vl_arb(smp, am, data, ibdev, port, resp_len,
2315 max_len);
2316}
2317
2318struct opa_pma_mad {
2319 struct ib_mad_hdr mad_hdr;
2320 u8 data[2024];
2321} __packed;
2322
2323struct opa_port_status_req {
2324 __u8 port_num;
2325 __u8 reserved[3];
2326 __be32 vl_select_mask;
2327};
2328
2329#define VL_MASK_ALL 0x00000000000080ffUL
2330
2331struct opa_port_status_rsp {
2332 __u8 port_num;
2333 __u8 reserved[3];
2334 __be32 vl_select_mask;
2335
2336
2337 __be64 port_xmit_data;
2338 __be64 port_rcv_data;
2339 __be64 port_xmit_pkts;
2340 __be64 port_rcv_pkts;
2341 __be64 port_multicast_xmit_pkts;
2342 __be64 port_multicast_rcv_pkts;
2343 __be64 port_xmit_wait;
2344 __be64 sw_port_congestion;
2345 __be64 port_rcv_fecn;
2346 __be64 port_rcv_becn;
2347 __be64 port_xmit_time_cong;
2348 __be64 port_xmit_wasted_bw;
2349 __be64 port_xmit_wait_data;
2350 __be64 port_rcv_bubble;
2351 __be64 port_mark_fecn;
2352
2353 __be64 port_rcv_constraint_errors;
2354 __be64 port_rcv_switch_relay_errors;
2355 __be64 port_xmit_discards;
2356 __be64 port_xmit_constraint_errors;
2357 __be64 port_rcv_remote_physical_errors;
2358 __be64 local_link_integrity_errors;
2359 __be64 port_rcv_errors;
2360 __be64 excessive_buffer_overruns;
2361 __be64 fm_config_errors;
2362 __be32 link_error_recovery;
2363 __be32 link_downed;
2364 u8 uncorrectable_errors;
2365
2366 u8 link_quality_indicator;
2367 u8 res2[6];
2368 struct _vls_pctrs {
2369
2370 __be64 port_vl_xmit_data;
2371 __be64 port_vl_rcv_data;
2372 __be64 port_vl_xmit_pkts;
2373 __be64 port_vl_rcv_pkts;
2374 __be64 port_vl_xmit_wait;
2375 __be64 sw_port_vl_congestion;
2376 __be64 port_vl_rcv_fecn;
2377 __be64 port_vl_rcv_becn;
2378 __be64 port_xmit_time_cong;
2379 __be64 port_vl_xmit_wasted_bw;
2380 __be64 port_vl_xmit_wait_data;
2381 __be64 port_vl_rcv_bubble;
2382 __be64 port_vl_mark_fecn;
2383 __be64 port_vl_xmit_discards;
2384 } vls[0];
2385};
2386
2387enum counter_selects {
2388 CS_PORT_XMIT_DATA = (1 << 31),
2389 CS_PORT_RCV_DATA = (1 << 30),
2390 CS_PORT_XMIT_PKTS = (1 << 29),
2391 CS_PORT_RCV_PKTS = (1 << 28),
2392 CS_PORT_MCAST_XMIT_PKTS = (1 << 27),
2393 CS_PORT_MCAST_RCV_PKTS = (1 << 26),
2394 CS_PORT_XMIT_WAIT = (1 << 25),
2395 CS_SW_PORT_CONGESTION = (1 << 24),
2396 CS_PORT_RCV_FECN = (1 << 23),
2397 CS_PORT_RCV_BECN = (1 << 22),
2398 CS_PORT_XMIT_TIME_CONG = (1 << 21),
2399 CS_PORT_XMIT_WASTED_BW = (1 << 20),
2400 CS_PORT_XMIT_WAIT_DATA = (1 << 19),
2401 CS_PORT_RCV_BUBBLE = (1 << 18),
2402 CS_PORT_MARK_FECN = (1 << 17),
2403 CS_PORT_RCV_CONSTRAINT_ERRORS = (1 << 16),
2404 CS_PORT_RCV_SWITCH_RELAY_ERRORS = (1 << 15),
2405 CS_PORT_XMIT_DISCARDS = (1 << 14),
2406 CS_PORT_XMIT_CONSTRAINT_ERRORS = (1 << 13),
2407 CS_PORT_RCV_REMOTE_PHYSICAL_ERRORS = (1 << 12),
2408 CS_LOCAL_LINK_INTEGRITY_ERRORS = (1 << 11),
2409 CS_PORT_RCV_ERRORS = (1 << 10),
2410 CS_EXCESSIVE_BUFFER_OVERRUNS = (1 << 9),
2411 CS_FM_CONFIG_ERRORS = (1 << 8),
2412 CS_LINK_ERROR_RECOVERY = (1 << 7),
2413 CS_LINK_DOWNED = (1 << 6),
2414 CS_UNCORRECTABLE_ERRORS = (1 << 5),
2415};
2416
2417struct opa_clear_port_status {
2418 __be64 port_select_mask[4];
2419 __be32 counter_select_mask;
2420};
2421
2422struct opa_aggregate {
2423 __be16 attr_id;
2424 __be16 err_reqlength;
2425 __be32 attr_mod;
2426 u8 data[0];
2427};
2428
2429#define MSK_LLI 0x000000f0
2430#define MSK_LLI_SFT 4
2431#define MSK_LER 0x0000000f
2432#define MSK_LER_SFT 0
2433#define ADD_LLI 8
2434#define ADD_LER 2
2435
2436
2437struct opa_port_data_counters_msg {
2438 __be64 port_select_mask[4];
2439 __be32 vl_select_mask;
2440 __be32 resolution;
2441
2442
2443 struct _port_dctrs {
2444 u8 port_number;
2445 u8 reserved2[3];
2446 __be32 link_quality_indicator;
2447
2448
2449 __be64 port_xmit_data;
2450 __be64 port_rcv_data;
2451 __be64 port_xmit_pkts;
2452 __be64 port_rcv_pkts;
2453 __be64 port_multicast_xmit_pkts;
2454 __be64 port_multicast_rcv_pkts;
2455 __be64 port_xmit_wait;
2456 __be64 sw_port_congestion;
2457 __be64 port_rcv_fecn;
2458 __be64 port_rcv_becn;
2459 __be64 port_xmit_time_cong;
2460 __be64 port_xmit_wasted_bw;
2461 __be64 port_xmit_wait_data;
2462 __be64 port_rcv_bubble;
2463 __be64 port_mark_fecn;
2464
2465 __be64 port_error_counter_summary;
2466
2467
2468 struct _vls_dctrs {
2469
2470 __be64 port_vl_xmit_data;
2471 __be64 port_vl_rcv_data;
2472 __be64 port_vl_xmit_pkts;
2473 __be64 port_vl_rcv_pkts;
2474 __be64 port_vl_xmit_wait;
2475 __be64 sw_port_vl_congestion;
2476 __be64 port_vl_rcv_fecn;
2477 __be64 port_vl_rcv_becn;
2478 __be64 port_xmit_time_cong;
2479 __be64 port_vl_xmit_wasted_bw;
2480 __be64 port_vl_xmit_wait_data;
2481 __be64 port_vl_rcv_bubble;
2482 __be64 port_vl_mark_fecn;
2483 } vls[0];
2484
2485 } port[1];
2486};
2487
2488struct opa_port_error_counters64_msg {
2489
2490
2491
2492
2493 __be64 port_select_mask[4];
2494 __be32 vl_select_mask;
2495
2496
2497 __be32 reserved1;
2498 struct _port_ectrs {
2499 u8 port_number;
2500 u8 reserved2[7];
2501 __be64 port_rcv_constraint_errors;
2502 __be64 port_rcv_switch_relay_errors;
2503 __be64 port_xmit_discards;
2504 __be64 port_xmit_constraint_errors;
2505 __be64 port_rcv_remote_physical_errors;
2506 __be64 local_link_integrity_errors;
2507 __be64 port_rcv_errors;
2508 __be64 excessive_buffer_overruns;
2509 __be64 fm_config_errors;
2510 __be32 link_error_recovery;
2511 __be32 link_downed;
2512 u8 uncorrectable_errors;
2513 u8 reserved3[7];
2514 struct _vls_ectrs {
2515 __be64 port_vl_xmit_discards;
2516 } vls[0];
2517
2518 } port[1];
2519};
2520
2521struct opa_port_error_info_msg {
2522 __be64 port_select_mask[4];
2523 __be32 error_info_select_mask;
2524 __be32 reserved1;
2525 struct _port_ei {
2526 u8 port_number;
2527 u8 reserved2[7];
2528
2529
2530 struct {
2531 u8 status_and_code;
2532 union {
2533 u8 raw[17];
2534 struct {
2535
2536 u8 packet_flit1[8];
2537 u8 packet_flit2[8];
2538 u8 remaining_flit_bits12;
2539 } ei1to12;
2540 struct {
2541 u8 packet_bytes[8];
2542 u8 remaining_flit_bits;
2543 } ei13;
2544 } ei;
2545 u8 reserved3[6];
2546 } __packed port_rcv_ei;
2547
2548
2549 struct {
2550 u8 status_and_sc;
2551 u8 reserved4[7];
2552 } __packed excessive_buffer_overrun_ei;
2553
2554
2555 struct {
2556 u8 status;
2557 u8 reserved5;
2558 __be16 pkey;
2559 __be32 slid;
2560 } __packed port_xmit_constraint_ei;
2561
2562
2563 struct {
2564 u8 status;
2565 u8 reserved6;
2566 __be16 pkey;
2567 __be32 slid;
2568 } __packed port_rcv_constraint_ei;
2569
2570
2571 struct {
2572 u8 status_and_code;
2573 u8 reserved7[3];
2574 __u32 error_info;
2575 } __packed port_rcv_switch_relay_ei;
2576
2577
2578 struct {
2579 u8 status_and_code;
2580 u8 reserved8;
2581 } __packed uncorrectable_ei;
2582
2583
2584 struct {
2585 u8 status_and_code;
2586 u8 error_info;
2587 } __packed fm_config_ei;
2588 __u32 reserved9;
2589 } port[1];
2590};
2591
2592
2593enum error_info_selects {
2594 ES_PORT_RCV_ERROR_INFO = (1 << 31),
2595 ES_EXCESSIVE_BUFFER_OVERRUN_INFO = (1 << 30),
2596 ES_PORT_XMIT_CONSTRAINT_ERROR_INFO = (1 << 29),
2597 ES_PORT_RCV_CONSTRAINT_ERROR_INFO = (1 << 28),
2598 ES_PORT_RCV_SWITCH_RELAY_ERROR_INFO = (1 << 27),
2599 ES_UNCORRECTABLE_ERROR_INFO = (1 << 26),
2600 ES_FM_CONFIG_ERROR_INFO = (1 << 25)
2601};
2602
2603static int pma_get_opa_classportinfo(struct opa_pma_mad *pmp,
2604 struct ib_device *ibdev, u32 *resp_len)
2605{
2606 struct opa_class_port_info *p =
2607 (struct opa_class_port_info *)pmp->data;
2608
2609 memset(pmp->data, 0, sizeof(pmp->data));
2610
2611 if (pmp->mad_hdr.attr_mod != 0)
2612 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2613
2614 p->base_version = OPA_MGMT_BASE_VERSION;
2615 p->class_version = OPA_SM_CLASS_VERSION;
2616
2617
2618
2619 p->cap_mask2_resp_time = cpu_to_be32(18);
2620
2621 if (resp_len)
2622 *resp_len += sizeof(*p);
2623
2624 return reply((struct ib_mad_hdr *)pmp);
2625}
2626
2627static void a0_portstatus(struct hfi1_pportdata *ppd,
2628 struct opa_port_status_rsp *rsp)
2629{
2630 if (!is_bx(ppd->dd)) {
2631 unsigned long vl;
2632 u64 sum_vl_xmit_wait = 0;
2633 unsigned long vl_all_mask = VL_MASK_ALL;
2634
2635 for_each_set_bit(vl, &vl_all_mask, BITS_PER_LONG) {
2636 u64 tmp = sum_vl_xmit_wait +
2637 read_port_cntr(ppd, C_TX_WAIT_VL,
2638 idx_from_vl(vl));
2639 if (tmp < sum_vl_xmit_wait) {
2640
2641 sum_vl_xmit_wait = (u64)~0;
2642 break;
2643 }
2644 sum_vl_xmit_wait = tmp;
2645 }
2646 if (be64_to_cpu(rsp->port_xmit_wait) > sum_vl_xmit_wait)
2647 rsp->port_xmit_wait = cpu_to_be64(sum_vl_xmit_wait);
2648 }
2649}
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660u16 tx_link_width(u16 link_width)
2661{
2662 int n = LINK_WIDTH_DEFAULT;
2663 u16 tx_width = n;
2664
2665 while (link_width && n) {
2666 if (link_width & (1 << (n - 1))) {
2667 tx_width = n;
2668 break;
2669 }
2670 n--;
2671 }
2672
2673 return tx_width;
2674}
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693u64 get_xmit_wait_counters(struct hfi1_pportdata *ppd,
2694 u16 link_width, u16 link_speed, int vl)
2695{
2696 u64 port_vl_xmit_wait_curr;
2697 u64 delta_vl_xmit_wait;
2698 u64 xmit_wait_val;
2699
2700 if (vl > C_VL_COUNT)
2701 return 0;
2702 if (vl < C_VL_COUNT)
2703 port_vl_xmit_wait_curr =
2704 read_port_cntr(ppd, C_TX_WAIT_VL, vl);
2705 else
2706 port_vl_xmit_wait_curr =
2707 read_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL);
2708
2709 xmit_wait_val =
2710 port_vl_xmit_wait_curr -
2711 ppd->port_vl_xmit_wait_last[vl];
2712 delta_vl_xmit_wait =
2713 convert_xmit_counter(xmit_wait_val,
2714 ppd->prev_link_width,
2715 link_speed);
2716
2717 ppd->vl_xmit_flit_cnt[vl] += delta_vl_xmit_wait;
2718 ppd->port_vl_xmit_wait_last[vl] = port_vl_xmit_wait_curr;
2719 ppd->prev_link_width = link_width;
2720
2721 return ppd->vl_xmit_flit_cnt[vl];
2722}
2723
2724static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
2725 struct ib_device *ibdev,
2726 u8 port, u32 *resp_len)
2727{
2728 struct opa_port_status_req *req =
2729 (struct opa_port_status_req *)pmp->data;
2730 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2731 struct opa_port_status_rsp *rsp;
2732 unsigned long vl_select_mask = be32_to_cpu(req->vl_select_mask);
2733 unsigned long vl;
2734 size_t response_data_size;
2735 u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
2736 u8 port_num = req->port_num;
2737 u8 num_vls = hweight64(vl_select_mask);
2738 struct _vls_pctrs *vlinfo;
2739 struct hfi1_ibport *ibp = to_iport(ibdev, port);
2740 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2741 int vfi;
2742 u64 tmp, tmp2;
2743 u16 link_width;
2744 u16 link_speed;
2745
2746 response_data_size = struct_size(rsp, vls, num_vls);
2747 if (response_data_size > sizeof(pmp->data)) {
2748 pmp->mad_hdr.status |= OPA_PM_STATUS_REQUEST_TOO_LARGE;
2749 return reply((struct ib_mad_hdr *)pmp);
2750 }
2751
2752 if (nports != 1 || (port_num && port_num != port) ||
2753 num_vls > OPA_MAX_VLS || (vl_select_mask & ~VL_MASK_ALL)) {
2754 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2755 return reply((struct ib_mad_hdr *)pmp);
2756 }
2757
2758 memset(pmp->data, 0, sizeof(pmp->data));
2759
2760 rsp = (struct opa_port_status_rsp *)pmp->data;
2761 if (port_num)
2762 rsp->port_num = port_num;
2763 else
2764 rsp->port_num = port;
2765
2766 rsp->port_rcv_constraint_errors =
2767 cpu_to_be64(read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
2768 CNTR_INVALID_VL));
2769
2770 hfi1_read_link_quality(dd, &rsp->link_quality_indicator);
2771
2772 rsp->vl_select_mask = cpu_to_be32((u32)vl_select_mask);
2773 rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS,
2774 CNTR_INVALID_VL));
2775 rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
2776 CNTR_INVALID_VL));
2777 rsp->port_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_PKTS,
2778 CNTR_INVALID_VL));
2779 rsp->port_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_PKTS,
2780 CNTR_INVALID_VL));
2781 rsp->port_multicast_xmit_pkts =
2782 cpu_to_be64(read_dev_cntr(dd, C_DC_MC_XMIT_PKTS,
2783 CNTR_INVALID_VL));
2784 rsp->port_multicast_rcv_pkts =
2785 cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS,
2786 CNTR_INVALID_VL));
2787
2788
2789
2790
2791 link_width =
2792 tx_link_width(ppd->link_width_downgrade_tx_active);
2793 link_speed = get_link_speed(ppd->link_speed_active);
2794 rsp->port_xmit_wait =
2795 cpu_to_be64(get_xmit_wait_counters(ppd, link_width,
2796 link_speed, C_VL_COUNT));
2797 rsp->port_rcv_fecn =
2798 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL));
2799 rsp->port_rcv_becn =
2800 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL));
2801 rsp->port_xmit_discards =
2802 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD,
2803 CNTR_INVALID_VL));
2804 rsp->port_xmit_constraint_errors =
2805 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
2806 CNTR_INVALID_VL));
2807 rsp->port_rcv_remote_physical_errors =
2808 cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
2809 CNTR_INVALID_VL));
2810 rsp->local_link_integrity_errors =
2811 cpu_to_be64(read_dev_cntr(dd, C_DC_RX_REPLAY,
2812 CNTR_INVALID_VL));
2813 tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
2814 tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
2815 CNTR_INVALID_VL);
2816 if (tmp2 > (u32)UINT_MAX || tmp2 < tmp) {
2817
2818 rsp->link_error_recovery = cpu_to_be32(~0);
2819 } else {
2820 rsp->link_error_recovery = cpu_to_be32(tmp2);
2821 }
2822 rsp->port_rcv_errors =
2823 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL));
2824 rsp->excessive_buffer_overruns =
2825 cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL));
2826 rsp->fm_config_errors =
2827 cpu_to_be64(read_dev_cntr(dd, C_DC_FM_CFG_ERR,
2828 CNTR_INVALID_VL));
2829 rsp->link_downed = cpu_to_be32(read_port_cntr(ppd, C_SW_LINK_DOWN,
2830 CNTR_INVALID_VL));
2831
2832
2833 tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
2834 rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff;
2835
2836 vlinfo = &rsp->vls[0];
2837 vfi = 0;
2838
2839
2840
2841
2842
2843 for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
2844 memset(vlinfo, 0, sizeof(*vlinfo));
2845
2846 tmp = read_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl));
2847 rsp->vls[vfi].port_vl_rcv_data = cpu_to_be64(tmp);
2848
2849 rsp->vls[vfi].port_vl_rcv_pkts =
2850 cpu_to_be64(read_dev_cntr(dd, C_DC_RX_PKT_VL,
2851 idx_from_vl(vl)));
2852
2853 rsp->vls[vfi].port_vl_xmit_data =
2854 cpu_to_be64(read_port_cntr(ppd, C_TX_FLIT_VL,
2855 idx_from_vl(vl)));
2856
2857 rsp->vls[vfi].port_vl_xmit_pkts =
2858 cpu_to_be64(read_port_cntr(ppd, C_TX_PKT_VL,
2859 idx_from_vl(vl)));
2860
2861
2862
2863
2864 rsp->vls[vfi].port_vl_xmit_wait =
2865 cpu_to_be64(get_xmit_wait_counters(ppd, link_width,
2866 link_speed,
2867 idx_from_vl(vl)));
2868
2869 rsp->vls[vfi].port_vl_rcv_fecn =
2870 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN_VL,
2871 idx_from_vl(vl)));
2872
2873 rsp->vls[vfi].port_vl_rcv_becn =
2874 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL,
2875 idx_from_vl(vl)));
2876
2877 rsp->vls[vfi].port_vl_xmit_discards =
2878 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
2879 idx_from_vl(vl)));
2880 vlinfo++;
2881 vfi++;
2882 }
2883
2884 a0_portstatus(ppd, rsp);
2885
2886 if (resp_len)
2887 *resp_len += response_data_size;
2888
2889 return reply((struct ib_mad_hdr *)pmp);
2890}
2891
2892static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port,
2893 u8 res_lli, u8 res_ler)
2894{
2895 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2896 struct hfi1_ibport *ibp = to_iport(ibdev, port);
2897 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2898 u64 error_counter_summary = 0, tmp;
2899
2900 error_counter_summary += read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
2901 CNTR_INVALID_VL);
2902
2903 error_counter_summary += read_port_cntr(ppd, C_SW_XMIT_DSCD,
2904 CNTR_INVALID_VL);
2905 error_counter_summary += read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
2906 CNTR_INVALID_VL);
2907 error_counter_summary += read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
2908 CNTR_INVALID_VL);
2909
2910 error_counter_summary += (read_dev_cntr(dd, C_DC_RX_REPLAY,
2911 CNTR_INVALID_VL) >> res_lli);
2912
2913 tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
2914 tmp += read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL);
2915 error_counter_summary += (tmp >> res_ler);
2916 error_counter_summary += read_dev_cntr(dd, C_DC_RCV_ERR,
2917 CNTR_INVALID_VL);
2918 error_counter_summary += read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
2919 error_counter_summary += read_dev_cntr(dd, C_DC_FM_CFG_ERR,
2920 CNTR_INVALID_VL);
2921
2922 error_counter_summary += read_port_cntr(ppd, C_SW_LINK_DOWN,
2923 CNTR_INVALID_VL);
2924 tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
2925
2926 error_counter_summary += tmp < 0x100 ? (tmp & 0xff) : 0xff;
2927
2928 return error_counter_summary;
2929}
2930
2931static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp)
2932{
2933 if (!is_bx(ppd->dd)) {
2934 unsigned long vl;
2935 u64 sum_vl_xmit_wait = 0;
2936 unsigned long vl_all_mask = VL_MASK_ALL;
2937
2938 for_each_set_bit(vl, &vl_all_mask, BITS_PER_LONG) {
2939 u64 tmp = sum_vl_xmit_wait +
2940 read_port_cntr(ppd, C_TX_WAIT_VL,
2941 idx_from_vl(vl));
2942 if (tmp < sum_vl_xmit_wait) {
2943
2944 sum_vl_xmit_wait = (u64)~0;
2945 break;
2946 }
2947 sum_vl_xmit_wait = tmp;
2948 }
2949 if (be64_to_cpu(rsp->port_xmit_wait) > sum_vl_xmit_wait)
2950 rsp->port_xmit_wait = cpu_to_be64(sum_vl_xmit_wait);
2951 }
2952}
2953
2954static void pma_get_opa_port_dctrs(struct ib_device *ibdev,
2955 struct _port_dctrs *rsp)
2956{
2957 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2958
2959 rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS,
2960 CNTR_INVALID_VL));
2961 rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
2962 CNTR_INVALID_VL));
2963 rsp->port_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_PKTS,
2964 CNTR_INVALID_VL));
2965 rsp->port_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_PKTS,
2966 CNTR_INVALID_VL));
2967 rsp->port_multicast_xmit_pkts =
2968 cpu_to_be64(read_dev_cntr(dd, C_DC_MC_XMIT_PKTS,
2969 CNTR_INVALID_VL));
2970 rsp->port_multicast_rcv_pkts =
2971 cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS,
2972 CNTR_INVALID_VL));
2973}
2974
2975static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
2976 struct ib_device *ibdev,
2977 u8 port, u32 *resp_len)
2978{
2979 struct opa_port_data_counters_msg *req =
2980 (struct opa_port_data_counters_msg *)pmp->data;
2981 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2982 struct hfi1_ibport *ibp = to_iport(ibdev, port);
2983 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2984 struct _port_dctrs *rsp;
2985 struct _vls_dctrs *vlinfo;
2986 size_t response_data_size;
2987 u32 num_ports;
2988 u8 lq, num_vls;
2989 u8 res_lli, res_ler;
2990 u64 port_mask;
2991 u8 port_num;
2992 unsigned long vl;
2993 unsigned long vl_select_mask;
2994 int vfi;
2995 u16 link_width;
2996 u16 link_speed;
2997
2998 num_ports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
2999 num_vls = hweight32(be32_to_cpu(req->vl_select_mask));
3000 vl_select_mask = be32_to_cpu(req->vl_select_mask);
3001 res_lli = (u8)(be32_to_cpu(req->resolution) & MSK_LLI) >> MSK_LLI_SFT;
3002 res_lli = res_lli ? res_lli + ADD_LLI : 0;
3003 res_ler = (u8)(be32_to_cpu(req->resolution) & MSK_LER) >> MSK_LER_SFT;
3004 res_ler = res_ler ? res_ler + ADD_LER : 0;
3005
3006 if (num_ports != 1 || (vl_select_mask & ~VL_MASK_ALL)) {
3007 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3008 return reply((struct ib_mad_hdr *)pmp);
3009 }
3010
3011
3012 response_data_size = struct_size(req, port[0].vls, num_vls);
3013
3014 if (response_data_size > sizeof(pmp->data)) {
3015 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3016 return reply((struct ib_mad_hdr *)pmp);
3017 }
3018
3019
3020
3021
3022
3023 port_mask = be64_to_cpu(req->port_select_mask[3]);
3024 port_num = find_first_bit((unsigned long *)&port_mask,
3025 sizeof(port_mask) * 8);
3026
3027 if (port_num != port) {
3028 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3029 return reply((struct ib_mad_hdr *)pmp);
3030 }
3031
3032 rsp = &req->port[0];
3033 memset(rsp, 0, sizeof(*rsp));
3034
3035 rsp->port_number = port;
3036
3037
3038
3039
3040
3041 hfi1_read_link_quality(dd, &lq);
3042 rsp->link_quality_indicator = cpu_to_be32((u32)lq);
3043 pma_get_opa_port_dctrs(ibdev, rsp);
3044
3045
3046
3047
3048
3049 link_width =
3050 tx_link_width(ppd->link_width_downgrade_tx_active);
3051 link_speed = get_link_speed(ppd->link_speed_active);
3052 rsp->port_xmit_wait =
3053 cpu_to_be64(get_xmit_wait_counters(ppd, link_width,
3054 link_speed, C_VL_COUNT));
3055 rsp->port_rcv_fecn =
3056 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL));
3057 rsp->port_rcv_becn =
3058 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL));
3059 rsp->port_error_counter_summary =
3060 cpu_to_be64(get_error_counter_summary(ibdev, port,
3061 res_lli, res_ler));
3062
3063 vlinfo = &rsp->vls[0];
3064 vfi = 0;
3065
3066
3067
3068
3069
3070 for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
3071 memset(vlinfo, 0, sizeof(*vlinfo));
3072
3073 rsp->vls[vfi].port_vl_xmit_data =
3074 cpu_to_be64(read_port_cntr(ppd, C_TX_FLIT_VL,
3075 idx_from_vl(vl)));
3076
3077 rsp->vls[vfi].port_vl_rcv_data =
3078 cpu_to_be64(read_dev_cntr(dd, C_DC_RX_FLIT_VL,
3079 idx_from_vl(vl)));
3080
3081 rsp->vls[vfi].port_vl_xmit_pkts =
3082 cpu_to_be64(read_port_cntr(ppd, C_TX_PKT_VL,
3083 idx_from_vl(vl)));
3084
3085 rsp->vls[vfi].port_vl_rcv_pkts =
3086 cpu_to_be64(read_dev_cntr(dd, C_DC_RX_PKT_VL,
3087 idx_from_vl(vl)));
3088
3089
3090
3091
3092
3093 rsp->vls[vfi].port_vl_xmit_wait =
3094 cpu_to_be64(get_xmit_wait_counters(ppd, link_width,
3095 link_speed,
3096 idx_from_vl(vl)));
3097
3098 rsp->vls[vfi].port_vl_rcv_fecn =
3099 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN_VL,
3100 idx_from_vl(vl)));
3101 rsp->vls[vfi].port_vl_rcv_becn =
3102 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL,
3103 idx_from_vl(vl)));
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114 vlinfo++;
3115 vfi++;
3116 }
3117
3118 a0_datacounters(ppd, rsp);
3119
3120 if (resp_len)
3121 *resp_len += response_data_size;
3122
3123 return reply((struct ib_mad_hdr *)pmp);
3124}
3125
3126static int pma_get_ib_portcounters_ext(struct ib_pma_mad *pmp,
3127 struct ib_device *ibdev, u8 port)
3128{
3129 struct ib_pma_portcounters_ext *p = (struct ib_pma_portcounters_ext *)
3130 pmp->data;
3131 struct _port_dctrs rsp;
3132
3133 if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) {
3134 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3135 goto bail;
3136 }
3137
3138 memset(&rsp, 0, sizeof(rsp));
3139 pma_get_opa_port_dctrs(ibdev, &rsp);
3140
3141 p->port_xmit_data = rsp.port_xmit_data;
3142 p->port_rcv_data = rsp.port_rcv_data;
3143 p->port_xmit_packets = rsp.port_xmit_pkts;
3144 p->port_rcv_packets = rsp.port_rcv_pkts;
3145 p->port_unicast_xmit_packets = 0;
3146 p->port_unicast_rcv_packets = 0;
3147 p->port_multicast_xmit_packets = rsp.port_multicast_xmit_pkts;
3148 p->port_multicast_rcv_packets = rsp.port_multicast_rcv_pkts;
3149
3150bail:
3151 return reply((struct ib_mad_hdr *)pmp);
3152}
3153
3154static void pma_get_opa_port_ectrs(struct ib_device *ibdev,
3155 struct _port_ectrs *rsp, u8 port)
3156{
3157 u64 tmp, tmp2;
3158 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
3159 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3160 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3161
3162 tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
3163 tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
3164 CNTR_INVALID_VL);
3165 if (tmp2 > (u32)UINT_MAX || tmp2 < tmp) {
3166
3167 rsp->link_error_recovery = cpu_to_be32(~0);
3168 } else {
3169 rsp->link_error_recovery = cpu_to_be32(tmp2);
3170 }
3171
3172 rsp->link_downed = cpu_to_be32(read_port_cntr(ppd, C_SW_LINK_DOWN,
3173 CNTR_INVALID_VL));
3174 rsp->port_rcv_errors =
3175 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL));
3176 rsp->port_rcv_remote_physical_errors =
3177 cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
3178 CNTR_INVALID_VL));
3179 rsp->port_rcv_switch_relay_errors = 0;
3180 rsp->port_xmit_discards =
3181 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD,
3182 CNTR_INVALID_VL));
3183 rsp->port_xmit_constraint_errors =
3184 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
3185 CNTR_INVALID_VL));
3186 rsp->port_rcv_constraint_errors =
3187 cpu_to_be64(read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
3188 CNTR_INVALID_VL));
3189 rsp->local_link_integrity_errors =
3190 cpu_to_be64(read_dev_cntr(dd, C_DC_RX_REPLAY,
3191 CNTR_INVALID_VL));
3192 rsp->excessive_buffer_overruns =
3193 cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL));
3194}
3195
3196static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
3197 struct ib_device *ibdev,
3198 u8 port, u32 *resp_len)
3199{
3200 size_t response_data_size;
3201 struct _port_ectrs *rsp;
3202 u8 port_num;
3203 struct opa_port_error_counters64_msg *req;
3204 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
3205 u32 num_ports;
3206 u8 num_pslm;
3207 u8 num_vls;
3208 struct hfi1_ibport *ibp;
3209 struct hfi1_pportdata *ppd;
3210 struct _vls_ectrs *vlinfo;
3211 unsigned long vl;
3212 u64 port_mask, tmp;
3213 unsigned long vl_select_mask;
3214 int vfi;
3215
3216 req = (struct opa_port_error_counters64_msg *)pmp->data;
3217
3218 num_ports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
3219
3220 num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
3221 num_vls = hweight32(be32_to_cpu(req->vl_select_mask));
3222
3223 if (num_ports != 1 || num_ports != num_pslm) {
3224 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3225 return reply((struct ib_mad_hdr *)pmp);
3226 }
3227
3228 response_data_size = struct_size(req, port[0].vls, num_vls);
3229
3230 if (response_data_size > sizeof(pmp->data)) {
3231 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3232 return reply((struct ib_mad_hdr *)pmp);
3233 }
3234
3235
3236
3237
3238 port_mask = be64_to_cpu(req->port_select_mask[3]);
3239 port_num = find_first_bit((unsigned long *)&port_mask,
3240 sizeof(port_mask) * 8);
3241
3242 if (port_num != port) {
3243 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3244 return reply((struct ib_mad_hdr *)pmp);
3245 }
3246
3247 rsp = &req->port[0];
3248
3249 ibp = to_iport(ibdev, port_num);
3250 ppd = ppd_from_ibp(ibp);
3251
3252 memset(rsp, 0, sizeof(*rsp));
3253 rsp->port_number = port_num;
3254
3255 pma_get_opa_port_ectrs(ibdev, rsp, port_num);
3256
3257 rsp->port_rcv_remote_physical_errors =
3258 cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
3259 CNTR_INVALID_VL));
3260 rsp->fm_config_errors =
3261 cpu_to_be64(read_dev_cntr(dd, C_DC_FM_CFG_ERR,
3262 CNTR_INVALID_VL));
3263 tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
3264
3265 rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff;
3266 rsp->port_rcv_errors =
3267 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL));
3268 vlinfo = &rsp->vls[0];
3269 vfi = 0;
3270 vl_select_mask = be32_to_cpu(req->vl_select_mask);
3271 for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
3272 memset(vlinfo, 0, sizeof(*vlinfo));
3273 rsp->vls[vfi].port_vl_xmit_discards =
3274 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
3275 idx_from_vl(vl)));
3276 vlinfo += 1;
3277 vfi++;
3278 }
3279
3280 if (resp_len)
3281 *resp_len += response_data_size;
3282
3283 return reply((struct ib_mad_hdr *)pmp);
3284}
3285
3286static int pma_get_ib_portcounters(struct ib_pma_mad *pmp,
3287 struct ib_device *ibdev, u8 port)
3288{
3289 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
3290 pmp->data;
3291 struct _port_ectrs rsp;
3292 u64 temp_link_overrun_errors;
3293 u64 temp_64;
3294 u32 temp_32;
3295
3296 memset(&rsp, 0, sizeof(rsp));
3297 pma_get_opa_port_ectrs(ibdev, &rsp, port);
3298
3299 if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) {
3300 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3301 goto bail;
3302 }
3303
3304 p->symbol_error_counter = 0;
3305
3306 temp_32 = be32_to_cpu(rsp.link_error_recovery);
3307 if (temp_32 > 0xFFUL)
3308 p->link_error_recovery_counter = 0xFF;
3309 else
3310 p->link_error_recovery_counter = (u8)temp_32;
3311
3312 temp_32 = be32_to_cpu(rsp.link_downed);
3313 if (temp_32 > 0xFFUL)
3314 p->link_downed_counter = 0xFF;
3315 else
3316 p->link_downed_counter = (u8)temp_32;
3317
3318 temp_64 = be64_to_cpu(rsp.port_rcv_errors);
3319 if (temp_64 > 0xFFFFUL)
3320 p->port_rcv_errors = cpu_to_be16(0xFFFF);
3321 else
3322 p->port_rcv_errors = cpu_to_be16((u16)temp_64);
3323
3324 temp_64 = be64_to_cpu(rsp.port_rcv_remote_physical_errors);
3325 if (temp_64 > 0xFFFFUL)
3326 p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
3327 else
3328 p->port_rcv_remphys_errors = cpu_to_be16((u16)temp_64);
3329
3330 temp_64 = be64_to_cpu(rsp.port_rcv_switch_relay_errors);
3331 p->port_rcv_switch_relay_errors = cpu_to_be16((u16)temp_64);
3332
3333 temp_64 = be64_to_cpu(rsp.port_xmit_discards);
3334 if (temp_64 > 0xFFFFUL)
3335 p->port_xmit_discards = cpu_to_be16(0xFFFF);
3336 else
3337 p->port_xmit_discards = cpu_to_be16((u16)temp_64);
3338
3339 temp_64 = be64_to_cpu(rsp.port_xmit_constraint_errors);
3340 if (temp_64 > 0xFFUL)
3341 p->port_xmit_constraint_errors = 0xFF;
3342 else
3343 p->port_xmit_constraint_errors = (u8)temp_64;
3344
3345 temp_64 = be64_to_cpu(rsp.port_rcv_constraint_errors);
3346 if (temp_64 > 0xFFUL)
3347 p->port_rcv_constraint_errors = 0xFFUL;
3348 else
3349 p->port_rcv_constraint_errors = (u8)temp_64;
3350
3351
3352 temp_64 = be64_to_cpu(rsp.local_link_integrity_errors);
3353 if (temp_64 > 0xFUL)
3354 temp_64 = 0xFUL;
3355
3356 temp_link_overrun_errors = temp_64 << 4;
3357
3358 temp_64 = be64_to_cpu(rsp.excessive_buffer_overruns);
3359 if (temp_64 > 0xFUL)
3360 temp_64 = 0xFUL;
3361 temp_link_overrun_errors |= temp_64;
3362
3363 p->link_overrun_errors = (u8)temp_link_overrun_errors;
3364
3365 p->vl15_dropped = 0;
3366
3367bail:
3368 return reply((struct ib_mad_hdr *)pmp);
3369}
3370
3371static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
3372 struct ib_device *ibdev,
3373 u8 port, u32 *resp_len)
3374{
3375 size_t response_data_size;
3376 struct _port_ei *rsp;
3377 struct opa_port_error_info_msg *req;
3378 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
3379 u64 port_mask;
3380 u32 num_ports;
3381 u8 port_num;
3382 u8 num_pslm;
3383 u64 reg;
3384
3385 req = (struct opa_port_error_info_msg *)pmp->data;
3386 rsp = &req->port[0];
3387
3388 num_ports = OPA_AM_NPORT(be32_to_cpu(pmp->mad_hdr.attr_mod));
3389 num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
3390
3391 memset(rsp, 0, sizeof(*rsp));
3392
3393 if (num_ports != 1 || num_ports != num_pslm) {
3394 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3395 return reply((struct ib_mad_hdr *)pmp);
3396 }
3397
3398
3399 response_data_size = sizeof(struct opa_port_error_info_msg);
3400
3401 if (response_data_size > sizeof(pmp->data)) {
3402 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3403 return reply((struct ib_mad_hdr *)pmp);
3404 }
3405
3406
3407
3408
3409
3410 port_mask = be64_to_cpu(req->port_select_mask[3]);
3411 port_num = find_first_bit((unsigned long *)&port_mask,
3412 sizeof(port_mask) * 8);
3413
3414 if (port_num != port) {
3415 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3416 return reply((struct ib_mad_hdr *)pmp);
3417 }
3418 rsp->port_number = port;
3419
3420
3421 rsp->port_rcv_ei.status_and_code =
3422 dd->err_info_rcvport.status_and_code;
3423 memcpy(&rsp->port_rcv_ei.ei.ei1to12.packet_flit1,
3424 &dd->err_info_rcvport.packet_flit1, sizeof(u64));
3425 memcpy(&rsp->port_rcv_ei.ei.ei1to12.packet_flit2,
3426 &dd->err_info_rcvport.packet_flit2, sizeof(u64));
3427
3428
3429 reg = read_csr(dd, RCV_ERR_INFO);
3430 if (reg & RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK) {
3431
3432
3433
3434
3435 u8 tmp = (u8)reg;
3436
3437 tmp &= RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SC_SMASK;
3438 tmp <<= 2;
3439 rsp->excessive_buffer_overrun_ei.status_and_sc = tmp;
3440
3441 rsp->excessive_buffer_overrun_ei.status_and_sc |= 0x80;
3442 }
3443
3444 rsp->port_xmit_constraint_ei.status =
3445 dd->err_info_xmit_constraint.status;
3446 rsp->port_xmit_constraint_ei.pkey =
3447 cpu_to_be16(dd->err_info_xmit_constraint.pkey);
3448 rsp->port_xmit_constraint_ei.slid =
3449 cpu_to_be32(dd->err_info_xmit_constraint.slid);
3450
3451 rsp->port_rcv_constraint_ei.status =
3452 dd->err_info_rcv_constraint.status;
3453 rsp->port_rcv_constraint_ei.pkey =
3454 cpu_to_be16(dd->err_info_rcv_constraint.pkey);
3455 rsp->port_rcv_constraint_ei.slid =
3456 cpu_to_be32(dd->err_info_rcv_constraint.slid);
3457
3458
3459 rsp->uncorrectable_ei.status_and_code = dd->err_info_uncorrectable;
3460
3461
3462 rsp->fm_config_ei.status_and_code = dd->err_info_fmconfig;
3463
3464 if (resp_len)
3465 *resp_len += response_data_size;
3466
3467 return reply((struct ib_mad_hdr *)pmp);
3468}
3469
3470static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
3471 struct ib_device *ibdev,
3472 u8 port, u32 *resp_len)
3473{
3474 struct opa_clear_port_status *req =
3475 (struct opa_clear_port_status *)pmp->data;
3476 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
3477 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3478 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3479 u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
3480 u64 portn = be64_to_cpu(req->port_select_mask[3]);
3481 u32 counter_select = be32_to_cpu(req->counter_select_mask);
3482 unsigned long vl_select_mask = VL_MASK_ALL;
3483 unsigned long vl;
3484
3485 if ((nports != 1) || (portn != 1 << port)) {
3486 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3487 return reply((struct ib_mad_hdr *)pmp);
3488 }
3489
3490
3491
3492
3493
3494
3495 if (counter_select & CS_PORT_XMIT_DATA)
3496 write_dev_cntr(dd, C_DC_XMIT_FLITS, CNTR_INVALID_VL, 0);
3497
3498 if (counter_select & CS_PORT_RCV_DATA)
3499 write_dev_cntr(dd, C_DC_RCV_FLITS, CNTR_INVALID_VL, 0);
3500
3501 if (counter_select & CS_PORT_XMIT_PKTS)
3502 write_dev_cntr(dd, C_DC_XMIT_PKTS, CNTR_INVALID_VL, 0);
3503
3504 if (counter_select & CS_PORT_RCV_PKTS)
3505 write_dev_cntr(dd, C_DC_RCV_PKTS, CNTR_INVALID_VL, 0);
3506
3507 if (counter_select & CS_PORT_MCAST_XMIT_PKTS)
3508 write_dev_cntr(dd, C_DC_MC_XMIT_PKTS, CNTR_INVALID_VL, 0);
3509
3510 if (counter_select & CS_PORT_MCAST_RCV_PKTS)
3511 write_dev_cntr(dd, C_DC_MC_RCV_PKTS, CNTR_INVALID_VL, 0);
3512
3513 if (counter_select & CS_PORT_XMIT_WAIT) {
3514 write_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL, 0);
3515 ppd->port_vl_xmit_wait_last[C_VL_COUNT] = 0;
3516 ppd->vl_xmit_flit_cnt[C_VL_COUNT] = 0;
3517 }
3518
3519
3520 if (counter_select & CS_PORT_RCV_FECN)
3521 write_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL, 0);
3522
3523 if (counter_select & CS_PORT_RCV_BECN)
3524 write_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL, 0);
3525
3526
3527
3528
3529 if (counter_select & CS_PORT_RCV_BUBBLE)
3530 write_dev_cntr(dd, C_DC_RCV_BBL, CNTR_INVALID_VL, 0);
3531
3532
3533
3534
3535
3536
3537 if (counter_select & CS_PORT_RCV_CONSTRAINT_ERRORS)
3538 write_port_cntr(ppd, C_SW_RCV_CSTR_ERR, CNTR_INVALID_VL, 0);
3539
3540
3541 if (counter_select & CS_PORT_XMIT_DISCARDS)
3542 write_port_cntr(ppd, C_SW_XMIT_DSCD, CNTR_INVALID_VL, 0);
3543
3544 if (counter_select & CS_PORT_XMIT_CONSTRAINT_ERRORS)
3545 write_port_cntr(ppd, C_SW_XMIT_CSTR_ERR, CNTR_INVALID_VL, 0);
3546
3547 if (counter_select & CS_PORT_RCV_REMOTE_PHYSICAL_ERRORS)
3548 write_dev_cntr(dd, C_DC_RMT_PHY_ERR, CNTR_INVALID_VL, 0);
3549
3550 if (counter_select & CS_LOCAL_LINK_INTEGRITY_ERRORS)
3551 write_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL, 0);
3552
3553 if (counter_select & CS_LINK_ERROR_RECOVERY) {
3554 write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0);
3555 write_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
3556 CNTR_INVALID_VL, 0);
3557 }
3558
3559 if (counter_select & CS_PORT_RCV_ERRORS)
3560 write_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL, 0);
3561
3562 if (counter_select & CS_EXCESSIVE_BUFFER_OVERRUNS) {
3563 write_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL, 0);
3564 dd->rcv_ovfl_cnt = 0;
3565 }
3566
3567 if (counter_select & CS_FM_CONFIG_ERRORS)
3568 write_dev_cntr(dd, C_DC_FM_CFG_ERR, CNTR_INVALID_VL, 0);
3569
3570 if (counter_select & CS_LINK_DOWNED)
3571 write_port_cntr(ppd, C_SW_LINK_DOWN, CNTR_INVALID_VL, 0);
3572
3573 if (counter_select & CS_UNCORRECTABLE_ERRORS)
3574 write_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL, 0);
3575
3576 for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
3577 if (counter_select & CS_PORT_XMIT_DATA)
3578 write_port_cntr(ppd, C_TX_FLIT_VL, idx_from_vl(vl), 0);
3579
3580 if (counter_select & CS_PORT_RCV_DATA)
3581 write_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl), 0);
3582
3583 if (counter_select & CS_PORT_XMIT_PKTS)
3584 write_port_cntr(ppd, C_TX_PKT_VL, idx_from_vl(vl), 0);
3585
3586 if (counter_select & CS_PORT_RCV_PKTS)
3587 write_dev_cntr(dd, C_DC_RX_PKT_VL, idx_from_vl(vl), 0);
3588
3589 if (counter_select & CS_PORT_XMIT_WAIT) {
3590 write_port_cntr(ppd, C_TX_WAIT_VL, idx_from_vl(vl), 0);
3591 ppd->port_vl_xmit_wait_last[idx_from_vl(vl)] = 0;
3592 ppd->vl_xmit_flit_cnt[idx_from_vl(vl)] = 0;
3593 }
3594
3595
3596 if (counter_select & CS_PORT_RCV_FECN)
3597 write_dev_cntr(dd, C_DC_RCV_FCN_VL, idx_from_vl(vl), 0);
3598
3599 if (counter_select & CS_PORT_RCV_BECN)
3600 write_dev_cntr(dd, C_DC_RCV_BCN_VL, idx_from_vl(vl), 0);
3601
3602
3603
3604
3605 if (counter_select & CS_PORT_RCV_BUBBLE)
3606 write_dev_cntr(dd, C_DC_RCV_BBL_VL, idx_from_vl(vl), 0);
3607
3608
3609
3610
3611 if (counter_select & C_SW_XMIT_DSCD_VL)
3612 write_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
3613 idx_from_vl(vl), 0);
3614 }
3615
3616 if (resp_len)
3617 *resp_len += sizeof(*req);
3618
3619 return reply((struct ib_mad_hdr *)pmp);
3620}
3621
3622static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp,
3623 struct ib_device *ibdev,
3624 u8 port, u32 *resp_len)
3625{
3626 struct _port_ei *rsp;
3627 struct opa_port_error_info_msg *req;
3628 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
3629 u64 port_mask;
3630 u32 num_ports;
3631 u8 port_num;
3632 u8 num_pslm;
3633 u32 error_info_select;
3634
3635 req = (struct opa_port_error_info_msg *)pmp->data;
3636 rsp = &req->port[0];
3637
3638 num_ports = OPA_AM_NPORT(be32_to_cpu(pmp->mad_hdr.attr_mod));
3639 num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
3640
3641 memset(rsp, 0, sizeof(*rsp));
3642
3643 if (num_ports != 1 || num_ports != num_pslm) {
3644 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3645 return reply((struct ib_mad_hdr *)pmp);
3646 }
3647
3648
3649
3650
3651
3652 port_mask = be64_to_cpu(req->port_select_mask[3]);
3653 port_num = find_first_bit((unsigned long *)&port_mask,
3654 sizeof(port_mask) * 8);
3655
3656 if (port_num != port) {
3657 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3658 return reply((struct ib_mad_hdr *)pmp);
3659 }
3660
3661 error_info_select = be32_to_cpu(req->error_info_select_mask);
3662
3663
3664 if (error_info_select & ES_PORT_RCV_ERROR_INFO)
3665
3666 dd->err_info_rcvport.status_and_code &= ~OPA_EI_STATUS_SMASK;
3667
3668
3669 if (error_info_select & ES_EXCESSIVE_BUFFER_OVERRUN_INFO)
3670
3671
3672
3673
3674 write_csr(dd, RCV_ERR_INFO,
3675 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
3676
3677 if (error_info_select & ES_PORT_XMIT_CONSTRAINT_ERROR_INFO)
3678 dd->err_info_xmit_constraint.status &= ~OPA_EI_STATUS_SMASK;
3679
3680 if (error_info_select & ES_PORT_RCV_CONSTRAINT_ERROR_INFO)
3681 dd->err_info_rcv_constraint.status &= ~OPA_EI_STATUS_SMASK;
3682
3683
3684 if (error_info_select & ES_UNCORRECTABLE_ERROR_INFO)
3685
3686 dd->err_info_uncorrectable &= ~OPA_EI_STATUS_SMASK;
3687
3688
3689 if (error_info_select & ES_FM_CONFIG_ERROR_INFO)
3690
3691 dd->err_info_fmconfig &= ~OPA_EI_STATUS_SMASK;
3692
3693 if (resp_len)
3694 *resp_len += sizeof(*req);
3695
3696 return reply((struct ib_mad_hdr *)pmp);
3697}
3698
3699struct opa_congestion_info_attr {
3700 __be16 congestion_info;
3701 u8 control_table_cap;
3702 u8 congestion_log_length;
3703} __packed;
3704
3705static int __subn_get_opa_cong_info(struct opa_smp *smp, u32 am, u8 *data,
3706 struct ib_device *ibdev, u8 port,
3707 u32 *resp_len, u32 max_len)
3708{
3709 struct opa_congestion_info_attr *p =
3710 (struct opa_congestion_info_attr *)data;
3711 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3712 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3713
3714 if (smp_length_check(sizeof(*p), max_len)) {
3715 smp->status |= IB_SMP_INVALID_FIELD;
3716 return reply((struct ib_mad_hdr *)smp);
3717 }
3718
3719 p->congestion_info = 0;
3720 p->control_table_cap = ppd->cc_max_table_entries;
3721 p->congestion_log_length = OPA_CONG_LOG_ELEMS;
3722
3723 if (resp_len)
3724 *resp_len += sizeof(*p);
3725
3726 return reply((struct ib_mad_hdr *)smp);
3727}
3728
3729static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am,
3730 u8 *data, struct ib_device *ibdev,
3731 u8 port, u32 *resp_len, u32 max_len)
3732{
3733 int i;
3734 struct opa_congestion_setting_attr *p =
3735 (struct opa_congestion_setting_attr *)data;
3736 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3737 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3738 struct opa_congestion_setting_entry_shadow *entries;
3739 struct cc_state *cc_state;
3740
3741 if (smp_length_check(sizeof(*p), max_len)) {
3742 smp->status |= IB_SMP_INVALID_FIELD;
3743 return reply((struct ib_mad_hdr *)smp);
3744 }
3745
3746 rcu_read_lock();
3747
3748 cc_state = get_cc_state(ppd);
3749
3750 if (!cc_state) {
3751 rcu_read_unlock();
3752 return reply((struct ib_mad_hdr *)smp);
3753 }
3754
3755 entries = cc_state->cong_setting.entries;
3756 p->port_control = cpu_to_be16(cc_state->cong_setting.port_control);
3757 p->control_map = cpu_to_be32(cc_state->cong_setting.control_map);
3758 for (i = 0; i < OPA_MAX_SLS; i++) {
3759 p->entries[i].ccti_increase = entries[i].ccti_increase;
3760 p->entries[i].ccti_timer = cpu_to_be16(entries[i].ccti_timer);
3761 p->entries[i].trigger_threshold =
3762 entries[i].trigger_threshold;
3763 p->entries[i].ccti_min = entries[i].ccti_min;
3764 }
3765
3766 rcu_read_unlock();
3767
3768 if (resp_len)
3769 *resp_len += sizeof(*p);
3770
3771 return reply((struct ib_mad_hdr *)smp);
3772}
3773
3774
3775
3776
3777
3778static void apply_cc_state(struct hfi1_pportdata *ppd)
3779{
3780 struct cc_state *old_cc_state, *new_cc_state;
3781
3782 new_cc_state = kzalloc(sizeof(*new_cc_state), GFP_KERNEL);
3783 if (!new_cc_state)
3784 return;
3785
3786
3787
3788
3789
3790 spin_lock(&ppd->cc_state_lock);
3791
3792 old_cc_state = get_cc_state_protected(ppd);
3793 if (!old_cc_state) {
3794
3795 spin_unlock(&ppd->cc_state_lock);
3796 kfree(new_cc_state);
3797 return;
3798 }
3799
3800 *new_cc_state = *old_cc_state;
3801
3802 if (ppd->total_cct_entry)
3803 new_cc_state->cct.ccti_limit = ppd->total_cct_entry - 1;
3804 else
3805 new_cc_state->cct.ccti_limit = 0;
3806
3807 memcpy(new_cc_state->cct.entries, ppd->ccti_entries,
3808 ppd->total_cct_entry * sizeof(struct ib_cc_table_entry));
3809
3810 new_cc_state->cong_setting.port_control = IB_CC_CCS_PC_SL_BASED;
3811 new_cc_state->cong_setting.control_map = ppd->cc_sl_control_map;
3812 memcpy(new_cc_state->cong_setting.entries, ppd->congestion_entries,
3813 OPA_MAX_SLS * sizeof(struct opa_congestion_setting_entry));
3814
3815 rcu_assign_pointer(ppd->cc_state, new_cc_state);
3816
3817 spin_unlock(&ppd->cc_state_lock);
3818
3819 kfree_rcu(old_cc_state, rcu);
3820}
3821
3822static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
3823 struct ib_device *ibdev, u8 port,
3824 u32 *resp_len, u32 max_len)
3825{
3826 struct opa_congestion_setting_attr *p =
3827 (struct opa_congestion_setting_attr *)data;
3828 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3829 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3830 struct opa_congestion_setting_entry_shadow *entries;
3831 int i;
3832
3833 if (smp_length_check(sizeof(*p), max_len)) {
3834 smp->status |= IB_SMP_INVALID_FIELD;
3835 return reply((struct ib_mad_hdr *)smp);
3836 }
3837
3838
3839
3840
3841
3842 spin_lock(&ppd->cc_state_lock);
3843 ppd->cc_sl_control_map = be32_to_cpu(p->control_map);
3844
3845 entries = ppd->congestion_entries;
3846 for (i = 0; i < OPA_MAX_SLS; i++) {
3847 entries[i].ccti_increase = p->entries[i].ccti_increase;
3848 entries[i].ccti_timer = be16_to_cpu(p->entries[i].ccti_timer);
3849 entries[i].trigger_threshold =
3850 p->entries[i].trigger_threshold;
3851 entries[i].ccti_min = p->entries[i].ccti_min;
3852 }
3853 spin_unlock(&ppd->cc_state_lock);
3854
3855
3856 apply_cc_state(ppd);
3857
3858 return __subn_get_opa_cong_setting(smp, am, data, ibdev, port,
3859 resp_len, max_len);
3860}
3861
3862static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am,
3863 u8 *data, struct ib_device *ibdev,
3864 u8 port, u32 *resp_len, u32 max_len)
3865{
3866 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3867 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3868 struct opa_hfi1_cong_log *cong_log = (struct opa_hfi1_cong_log *)data;
3869 u64 ts;
3870 int i;
3871
3872 if (am || smp_length_check(sizeof(*cong_log), max_len)) {
3873 smp->status |= IB_SMP_INVALID_FIELD;
3874 return reply((struct ib_mad_hdr *)smp);
3875 }
3876
3877 spin_lock_irq(&ppd->cc_log_lock);
3878
3879 cong_log->log_type = OPA_CC_LOG_TYPE_HFI;
3880 cong_log->congestion_flags = 0;
3881 cong_log->threshold_event_counter =
3882 cpu_to_be16(ppd->threshold_event_counter);
3883 memcpy(cong_log->threshold_cong_event_map,
3884 ppd->threshold_cong_event_map,
3885 sizeof(cong_log->threshold_cong_event_map));
3886
3887 ts = ktime_get_ns() / 1024;
3888 cong_log->current_time_stamp = cpu_to_be32(ts);
3889 for (i = 0; i < OPA_CONG_LOG_ELEMS; i++) {
3890 struct opa_hfi1_cong_log_event_internal *cce =
3891 &ppd->cc_events[ppd->cc_mad_idx++];
3892 if (ppd->cc_mad_idx == OPA_CONG_LOG_ELEMS)
3893 ppd->cc_mad_idx = 0;
3894
3895
3896
3897
3898
3899 if ((ts - cce->timestamp) / 2 > U32_MAX)
3900 continue;
3901 memcpy(cong_log->events[i].local_qp_cn_entry, &cce->lqpn, 3);
3902 memcpy(cong_log->events[i].remote_qp_number_cn_entry,
3903 &cce->rqpn, 3);
3904 cong_log->events[i].sl_svc_type_cn_entry =
3905 ((cce->sl & 0x1f) << 3) | (cce->svc_type & 0x7);
3906 cong_log->events[i].remote_lid_cn_entry =
3907 cpu_to_be32(cce->rlid);
3908 cong_log->events[i].timestamp_cn_entry =
3909 cpu_to_be32(cce->timestamp);
3910 }
3911
3912
3913
3914
3915
3916 memset(ppd->threshold_cong_event_map, 0x0,
3917 sizeof(ppd->threshold_cong_event_map));
3918 ppd->threshold_event_counter = 0;
3919
3920 spin_unlock_irq(&ppd->cc_log_lock);
3921
3922 if (resp_len)
3923 *resp_len += sizeof(struct opa_hfi1_cong_log);
3924
3925 return reply((struct ib_mad_hdr *)smp);
3926}
3927
3928static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
3929 struct ib_device *ibdev, u8 port,
3930 u32 *resp_len, u32 max_len)
3931{
3932 struct ib_cc_table_attr *cc_table_attr =
3933 (struct ib_cc_table_attr *)data;
3934 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3935 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3936 u32 start_block = OPA_AM_START_BLK(am);
3937 u32 n_blocks = OPA_AM_NBLK(am);
3938 struct ib_cc_table_entry_shadow *entries;
3939 int i, j;
3940 u32 sentry, eentry;
3941 struct cc_state *cc_state;
3942 u32 size = sizeof(u16) * (IB_CCT_ENTRIES * n_blocks + 1);
3943
3944
3945 if (n_blocks == 0 || smp_length_check(size, max_len) ||
3946 start_block + n_blocks > ppd->cc_max_table_entries) {
3947 smp->status |= IB_SMP_INVALID_FIELD;
3948 return reply((struct ib_mad_hdr *)smp);
3949 }
3950
3951 rcu_read_lock();
3952
3953 cc_state = get_cc_state(ppd);
3954
3955 if (!cc_state) {
3956 rcu_read_unlock();
3957 return reply((struct ib_mad_hdr *)smp);
3958 }
3959
3960 sentry = start_block * IB_CCT_ENTRIES;
3961 eentry = sentry + (IB_CCT_ENTRIES * n_blocks);
3962
3963 cc_table_attr->ccti_limit = cpu_to_be16(cc_state->cct.ccti_limit);
3964
3965 entries = cc_state->cct.entries;
3966
3967
3968 for (j = 0, i = sentry; i < eentry; j++, i++)
3969 cc_table_attr->ccti_entries[j].entry =
3970 cpu_to_be16(entries[i].entry);
3971
3972 rcu_read_unlock();
3973
3974 if (resp_len)
3975 *resp_len += size;
3976
3977 return reply((struct ib_mad_hdr *)smp);
3978}
3979
3980static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
3981 struct ib_device *ibdev, u8 port,
3982 u32 *resp_len, u32 max_len)
3983{
3984 struct ib_cc_table_attr *p = (struct ib_cc_table_attr *)data;
3985 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3986 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3987 u32 start_block = OPA_AM_START_BLK(am);
3988 u32 n_blocks = OPA_AM_NBLK(am);
3989 struct ib_cc_table_entry_shadow *entries;
3990 int i, j;
3991 u32 sentry, eentry;
3992 u16 ccti_limit;
3993 u32 size = sizeof(u16) * (IB_CCT_ENTRIES * n_blocks + 1);
3994
3995
3996 if (n_blocks == 0 || smp_length_check(size, max_len) ||
3997 start_block + n_blocks > ppd->cc_max_table_entries) {
3998 smp->status |= IB_SMP_INVALID_FIELD;
3999 return reply((struct ib_mad_hdr *)smp);
4000 }
4001
4002 sentry = start_block * IB_CCT_ENTRIES;
4003 eentry = sentry + ((n_blocks - 1) * IB_CCT_ENTRIES) +
4004 (be16_to_cpu(p->ccti_limit)) % IB_CCT_ENTRIES + 1;
4005
4006
4007 ccti_limit = be16_to_cpu(p->ccti_limit);
4008 if (ccti_limit + 1 > eentry) {
4009 smp->status |= IB_SMP_INVALID_FIELD;
4010 return reply((struct ib_mad_hdr *)smp);
4011 }
4012
4013
4014
4015
4016
4017 spin_lock(&ppd->cc_state_lock);
4018 ppd->total_cct_entry = ccti_limit + 1;
4019 entries = ppd->ccti_entries;
4020 for (j = 0, i = sentry; i < eentry; j++, i++)
4021 entries[i].entry = be16_to_cpu(p->ccti_entries[j].entry);
4022 spin_unlock(&ppd->cc_state_lock);
4023
4024
4025 apply_cc_state(ppd);
4026
4027 return __subn_get_opa_cc_table(smp, am, data, ibdev, port, resp_len,
4028 max_len);
4029}
4030
4031struct opa_led_info {
4032 __be32 rsvd_led_mask;
4033 __be32 rsvd;
4034};
4035
4036#define OPA_LED_SHIFT 31
4037#define OPA_LED_MASK BIT(OPA_LED_SHIFT)
4038
4039static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
4040 struct ib_device *ibdev, u8 port,
4041 u32 *resp_len, u32 max_len)
4042{
4043 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
4044 struct hfi1_pportdata *ppd = dd->pport;
4045 struct opa_led_info *p = (struct opa_led_info *)data;
4046 u32 nport = OPA_AM_NPORT(am);
4047 u32 is_beaconing_active;
4048
4049 if (nport != 1 || smp_length_check(sizeof(*p), max_len)) {
4050 smp->status |= IB_SMP_INVALID_FIELD;
4051 return reply((struct ib_mad_hdr *)smp);
4052 }
4053
4054
4055
4056
4057
4058
4059 smp_rmb();
4060 is_beaconing_active = !!atomic_read(&ppd->led_override_timer_active);
4061 p->rsvd_led_mask = cpu_to_be32(is_beaconing_active << OPA_LED_SHIFT);
4062
4063 if (resp_len)
4064 *resp_len += sizeof(struct opa_led_info);
4065
4066 return reply((struct ib_mad_hdr *)smp);
4067}
4068
4069static int __subn_set_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
4070 struct ib_device *ibdev, u8 port,
4071 u32 *resp_len, u32 max_len)
4072{
4073 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
4074 struct opa_led_info *p = (struct opa_led_info *)data;
4075 u32 nport = OPA_AM_NPORT(am);
4076 int on = !!(be32_to_cpu(p->rsvd_led_mask) & OPA_LED_MASK);
4077
4078 if (nport != 1 || smp_length_check(sizeof(*p), max_len)) {
4079 smp->status |= IB_SMP_INVALID_FIELD;
4080 return reply((struct ib_mad_hdr *)smp);
4081 }
4082
4083 if (on)
4084 hfi1_start_led_override(dd->pport, 2000, 1500);
4085 else
4086 shutdown_led_override(dd->pport);
4087
4088 return __subn_get_opa_led_info(smp, am, data, ibdev, port, resp_len,
4089 max_len);
4090}
4091
4092static int subn_get_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
4093 u8 *data, struct ib_device *ibdev, u8 port,
4094 u32 *resp_len, u32 max_len)
4095{
4096 int ret;
4097 struct hfi1_ibport *ibp = to_iport(ibdev, port);
4098
4099 switch (attr_id) {
4100 case IB_SMP_ATTR_NODE_DESC:
4101 ret = __subn_get_opa_nodedesc(smp, am, data, ibdev, port,
4102 resp_len, max_len);
4103 break;
4104 case IB_SMP_ATTR_NODE_INFO:
4105 ret = __subn_get_opa_nodeinfo(smp, am, data, ibdev, port,
4106 resp_len, max_len);
4107 break;
4108 case IB_SMP_ATTR_PORT_INFO:
4109 ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port,
4110 resp_len, max_len);
4111 break;
4112 case IB_SMP_ATTR_PKEY_TABLE:
4113 ret = __subn_get_opa_pkeytable(smp, am, data, ibdev, port,
4114 resp_len, max_len);
4115 break;
4116 case OPA_ATTRIB_ID_SL_TO_SC_MAP:
4117 ret = __subn_get_opa_sl_to_sc(smp, am, data, ibdev, port,
4118 resp_len, max_len);
4119 break;
4120 case OPA_ATTRIB_ID_SC_TO_SL_MAP:
4121 ret = __subn_get_opa_sc_to_sl(smp, am, data, ibdev, port,
4122 resp_len, max_len);
4123 break;
4124 case OPA_ATTRIB_ID_SC_TO_VLT_MAP:
4125 ret = __subn_get_opa_sc_to_vlt(smp, am, data, ibdev, port,
4126 resp_len, max_len);
4127 break;
4128 case OPA_ATTRIB_ID_SC_TO_VLNT_MAP:
4129 ret = __subn_get_opa_sc_to_vlnt(smp, am, data, ibdev, port,
4130 resp_len, max_len);
4131 break;
4132 case OPA_ATTRIB_ID_PORT_STATE_INFO:
4133 ret = __subn_get_opa_psi(smp, am, data, ibdev, port,
4134 resp_len, max_len);
4135 break;
4136 case OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE:
4137 ret = __subn_get_opa_bct(smp, am, data, ibdev, port,
4138 resp_len, max_len);
4139 break;
4140 case OPA_ATTRIB_ID_CABLE_INFO:
4141 ret = __subn_get_opa_cable_info(smp, am, data, ibdev, port,
4142 resp_len, max_len);
4143 break;
4144 case IB_SMP_ATTR_VL_ARB_TABLE:
4145 ret = __subn_get_opa_vl_arb(smp, am, data, ibdev, port,
4146 resp_len, max_len);
4147 break;
4148 case OPA_ATTRIB_ID_CONGESTION_INFO:
4149 ret = __subn_get_opa_cong_info(smp, am, data, ibdev, port,
4150 resp_len, max_len);
4151 break;
4152 case OPA_ATTRIB_ID_HFI_CONGESTION_SETTING:
4153 ret = __subn_get_opa_cong_setting(smp, am, data, ibdev,
4154 port, resp_len, max_len);
4155 break;
4156 case OPA_ATTRIB_ID_HFI_CONGESTION_LOG:
4157 ret = __subn_get_opa_hfi1_cong_log(smp, am, data, ibdev,
4158 port, resp_len, max_len);
4159 break;
4160 case OPA_ATTRIB_ID_CONGESTION_CONTROL_TABLE:
4161 ret = __subn_get_opa_cc_table(smp, am, data, ibdev, port,
4162 resp_len, max_len);
4163 break;
4164 case IB_SMP_ATTR_LED_INFO:
4165 ret = __subn_get_opa_led_info(smp, am, data, ibdev, port,
4166 resp_len, max_len);
4167 break;
4168 case IB_SMP_ATTR_SM_INFO:
4169 if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED)
4170 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
4171 if (ibp->rvp.port_cap_flags & IB_PORT_SM)
4172 return IB_MAD_RESULT_SUCCESS;
4173
4174 default:
4175 smp->status |= IB_SMP_UNSUP_METH_ATTR;
4176 ret = reply((struct ib_mad_hdr *)smp);
4177 break;
4178 }
4179 return ret;
4180}
4181
4182static int subn_set_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
4183 u8 *data, struct ib_device *ibdev, u8 port,
4184 u32 *resp_len, u32 max_len, int local_mad)
4185{
4186 int ret;
4187 struct hfi1_ibport *ibp = to_iport(ibdev, port);
4188
4189 switch (attr_id) {
4190 case IB_SMP_ATTR_PORT_INFO:
4191 ret = __subn_set_opa_portinfo(smp, am, data, ibdev, port,
4192 resp_len, max_len, local_mad);
4193 break;
4194 case IB_SMP_ATTR_PKEY_TABLE:
4195 ret = __subn_set_opa_pkeytable(smp, am, data, ibdev, port,
4196 resp_len, max_len);
4197 break;
4198 case OPA_ATTRIB_ID_SL_TO_SC_MAP:
4199 ret = __subn_set_opa_sl_to_sc(smp, am, data, ibdev, port,
4200 resp_len, max_len);
4201 break;
4202 case OPA_ATTRIB_ID_SC_TO_SL_MAP:
4203 ret = __subn_set_opa_sc_to_sl(smp, am, data, ibdev, port,
4204 resp_len, max_len);
4205 break;
4206 case OPA_ATTRIB_ID_SC_TO_VLT_MAP:
4207 ret = __subn_set_opa_sc_to_vlt(smp, am, data, ibdev, port,
4208 resp_len, max_len);
4209 break;
4210 case OPA_ATTRIB_ID_SC_TO_VLNT_MAP:
4211 ret = __subn_set_opa_sc_to_vlnt(smp, am, data, ibdev, port,
4212 resp_len, max_len);
4213 break;
4214 case OPA_ATTRIB_ID_PORT_STATE_INFO:
4215 ret = __subn_set_opa_psi(smp, am, data, ibdev, port,
4216 resp_len, max_len, local_mad);
4217 break;
4218 case OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE:
4219 ret = __subn_set_opa_bct(smp, am, data, ibdev, port,
4220 resp_len, max_len);
4221 break;
4222 case IB_SMP_ATTR_VL_ARB_TABLE:
4223 ret = __subn_set_opa_vl_arb(smp, am, data, ibdev, port,
4224 resp_len, max_len);
4225 break;
4226 case OPA_ATTRIB_ID_HFI_CONGESTION_SETTING:
4227 ret = __subn_set_opa_cong_setting(smp, am, data, ibdev,
4228 port, resp_len, max_len);
4229 break;
4230 case OPA_ATTRIB_ID_CONGESTION_CONTROL_TABLE:
4231 ret = __subn_set_opa_cc_table(smp, am, data, ibdev, port,
4232 resp_len, max_len);
4233 break;
4234 case IB_SMP_ATTR_LED_INFO:
4235 ret = __subn_set_opa_led_info(smp, am, data, ibdev, port,
4236 resp_len, max_len);
4237 break;
4238 case IB_SMP_ATTR_SM_INFO:
4239 if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED)
4240 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
4241 if (ibp->rvp.port_cap_flags & IB_PORT_SM)
4242 return IB_MAD_RESULT_SUCCESS;
4243
4244 default:
4245 smp->status |= IB_SMP_UNSUP_METH_ATTR;
4246 ret = reply((struct ib_mad_hdr *)smp);
4247 break;
4248 }
4249 return ret;
4250}
4251
4252static inline void set_aggr_error(struct opa_aggregate *ag)
4253{
4254 ag->err_reqlength |= cpu_to_be16(0x8000);
4255}
4256
4257static int subn_get_opa_aggregate(struct opa_smp *smp,
4258 struct ib_device *ibdev, u8 port,
4259 u32 *resp_len)
4260{
4261 int i;
4262 u32 num_attr = be32_to_cpu(smp->attr_mod) & 0x000000ff;
4263 u8 *next_smp = opa_get_smp_data(smp);
4264
4265 if (num_attr < 1 || num_attr > 117) {
4266 smp->status |= IB_SMP_INVALID_FIELD;
4267 return reply((struct ib_mad_hdr *)smp);
4268 }
4269
4270 for (i = 0; i < num_attr; i++) {
4271 struct opa_aggregate *agg;
4272 size_t agg_data_len;
4273 size_t agg_size;
4274 u32 am;
4275
4276 agg = (struct opa_aggregate *)next_smp;
4277 agg_data_len = (be16_to_cpu(agg->err_reqlength) & 0x007f) * 8;
4278 agg_size = sizeof(*agg) + agg_data_len;
4279 am = be32_to_cpu(agg->attr_mod);
4280
4281 *resp_len += agg_size;
4282
4283 if (next_smp + agg_size > ((u8 *)smp) + sizeof(*smp)) {
4284 smp->status |= IB_SMP_INVALID_FIELD;
4285 return reply((struct ib_mad_hdr *)smp);
4286 }
4287
4288
4289 memset(next_smp + sizeof(*agg), 0, agg_data_len);
4290
4291 (void)subn_get_opa_sma(agg->attr_id, smp, am, agg->data,
4292 ibdev, port, NULL, (u32)agg_data_len);
4293
4294 if (smp->status & IB_SMP_INVALID_FIELD)
4295 break;
4296 if (smp->status & ~IB_SMP_DIRECTION) {
4297 set_aggr_error(agg);
4298 return reply((struct ib_mad_hdr *)smp);
4299 }
4300 next_smp += agg_size;
4301 }
4302
4303 return reply((struct ib_mad_hdr *)smp);
4304}
4305
4306static int subn_set_opa_aggregate(struct opa_smp *smp,
4307 struct ib_device *ibdev, u8 port,
4308 u32 *resp_len, int local_mad)
4309{
4310 int i;
4311 u32 num_attr = be32_to_cpu(smp->attr_mod) & 0x000000ff;
4312 u8 *next_smp = opa_get_smp_data(smp);
4313
4314 if (num_attr < 1 || num_attr > 117) {
4315 smp->status |= IB_SMP_INVALID_FIELD;
4316 return reply((struct ib_mad_hdr *)smp);
4317 }
4318
4319 for (i = 0; i < num_attr; i++) {
4320 struct opa_aggregate *agg;
4321 size_t agg_data_len;
4322 size_t agg_size;
4323 u32 am;
4324
4325 agg = (struct opa_aggregate *)next_smp;
4326 agg_data_len = (be16_to_cpu(agg->err_reqlength) & 0x007f) * 8;
4327 agg_size = sizeof(*agg) + agg_data_len;
4328 am = be32_to_cpu(agg->attr_mod);
4329
4330 *resp_len += agg_size;
4331
4332 if (next_smp + agg_size > ((u8 *)smp) + sizeof(*smp)) {
4333 smp->status |= IB_SMP_INVALID_FIELD;
4334 return reply((struct ib_mad_hdr *)smp);
4335 }
4336
4337 (void)subn_set_opa_sma(agg->attr_id, smp, am, agg->data,
4338 ibdev, port, NULL, (u32)agg_data_len,
4339 local_mad);
4340
4341 if (smp->status & IB_SMP_INVALID_FIELD)
4342 break;
4343 if (smp->status & ~IB_SMP_DIRECTION) {
4344 set_aggr_error(agg);
4345 return reply((struct ib_mad_hdr *)smp);
4346 }
4347 next_smp += agg_size;
4348 }
4349
4350 return reply((struct ib_mad_hdr *)smp);
4351}
4352
4353
4354
4355
4356
4357
4358
4359
4360
4361
4362
4363
4364void clear_linkup_counters(struct hfi1_devdata *dd)
4365{
4366
4367 write_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL, 0);
4368 dd->err_info_rcvport.status_and_code &= ~OPA_EI_STATUS_SMASK;
4369
4370 write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0);
4371 write_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL, 0);
4372
4373 write_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL, 0);
4374
4375 write_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL, 0);
4376 dd->rcv_ovfl_cnt = 0;
4377 dd->err_info_xmit_constraint.status &= ~OPA_EI_STATUS_SMASK;
4378}
4379
4380static int is_full_mgmt_pkey_in_table(struct hfi1_ibport *ibp)
4381{
4382 unsigned int i;
4383 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
4384
4385 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); ++i)
4386 if (ppd->pkeys[i] == FULL_MGMT_P_KEY)
4387 return 1;
4388
4389 return 0;
4390}
4391
4392
4393
4394
4395
4396static int is_local_mad(struct hfi1_ibport *ibp, const struct opa_mad *mad,
4397 const struct ib_wc *in_wc)
4398{
4399 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
4400 const struct opa_smp *smp = (const struct opa_smp *)mad;
4401
4402 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
4403 return (smp->hop_cnt == 0 &&
4404 smp->route.dr.dr_slid == OPA_LID_PERMISSIVE &&
4405 smp->route.dr.dr_dlid == OPA_LID_PERMISSIVE);
4406 }
4407
4408 return (in_wc->slid == ppd->lid);
4409}
4410
4411
4412
4413
4414
4415
4416
4417
4418
4419
4420
4421static int opa_local_smp_check(struct hfi1_ibport *ibp,
4422 const struct ib_wc *in_wc)
4423{
4424 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
4425 u16 pkey;
4426
4427 if (in_wc->pkey_index >= ARRAY_SIZE(ppd->pkeys))
4428 return 1;
4429
4430 pkey = ppd->pkeys[in_wc->pkey_index];
4431
4432
4433
4434
4435
4436
4437
4438
4439
4440
4441
4442
4443
4444
4445
4446
4447
4448
4449 if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY)
4450 return 0;
4451 ingress_pkey_table_fail(ppd, pkey, in_wc->slid);
4452 return 1;
4453}
4454
4455
4456
4457
4458
4459
4460
4461
4462
4463
4464
4465
4466
4467
4468
4469
4470
4471
4472
4473
4474
4475
4476
4477
4478
4479
4480
4481
4482
4483
4484
4485
4486
4487
4488
4489
4490
4491
4492static int hfi1_pkey_validation_pma(struct hfi1_ibport *ibp,
4493 const struct opa_mad *in_mad,
4494 const struct ib_wc *in_wc)
4495{
4496 u16 pkey_value = hfi1_lookup_pkey_value(ibp, in_wc->pkey_index);
4497
4498
4499 if (!is_local_mad(ibp, in_mad, in_wc) &&
4500 pkey_value != LIM_MGMT_P_KEY &&
4501 pkey_value != FULL_MGMT_P_KEY)
4502 return -EINVAL;
4503
4504
4505 if (pkey_value == LIM_MGMT_P_KEY &&
4506 is_full_mgmt_pkey_in_table(ibp))
4507 return -EINVAL;
4508
4509 return 0;
4510}
4511
4512static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
4513 u8 port, const struct opa_mad *in_mad,
4514 struct opa_mad *out_mad,
4515 u32 *resp_len, int local_mad)
4516{
4517 struct opa_smp *smp = (struct opa_smp *)out_mad;
4518 struct hfi1_ibport *ibp = to_iport(ibdev, port);
4519 u8 *data;
4520 u32 am, data_size;
4521 __be16 attr_id;
4522 int ret;
4523
4524 *out_mad = *in_mad;
4525 data = opa_get_smp_data(smp);
4526 data_size = (u32)opa_get_smp_data_size(smp);
4527
4528 am = be32_to_cpu(smp->attr_mod);
4529 attr_id = smp->attr_id;
4530 if (smp->class_version != OPA_SM_CLASS_VERSION) {
4531 smp->status |= IB_SMP_UNSUP_VERSION;
4532 ret = reply((struct ib_mad_hdr *)smp);
4533 return ret;
4534 }
4535 ret = check_mkey(ibp, (struct ib_mad_hdr *)smp, mad_flags, smp->mkey,
4536 smp->route.dr.dr_slid, smp->route.dr.return_path,
4537 smp->hop_cnt);
4538 if (ret) {
4539 u32 port_num = be32_to_cpu(smp->attr_mod);
4540
4541
4542
4543
4544
4545
4546
4547
4548 if (attr_id == IB_SMP_ATTR_PORT_INFO &&
4549 (smp->method == IB_MGMT_METHOD_GET ||
4550 smp->method == IB_MGMT_METHOD_SET) &&
4551 port_num && port_num <= ibdev->phys_port_cnt &&
4552 port != port_num)
4553 (void)check_mkey(to_iport(ibdev, port_num),
4554 (struct ib_mad_hdr *)smp, 0,
4555 smp->mkey, smp->route.dr.dr_slid,
4556 smp->route.dr.return_path,
4557 smp->hop_cnt);
4558 ret = IB_MAD_RESULT_FAILURE;
4559 return ret;
4560 }
4561
4562 *resp_len = opa_get_smp_header_size(smp);
4563
4564 switch (smp->method) {
4565 case IB_MGMT_METHOD_GET:
4566 switch (attr_id) {
4567 default:
4568 clear_opa_smp_data(smp);
4569 ret = subn_get_opa_sma(attr_id, smp, am, data,
4570 ibdev, port, resp_len,
4571 data_size);
4572 break;
4573 case OPA_ATTRIB_ID_AGGREGATE:
4574 ret = subn_get_opa_aggregate(smp, ibdev, port,
4575 resp_len);
4576 break;
4577 }
4578 break;
4579 case IB_MGMT_METHOD_SET:
4580 switch (attr_id) {
4581 default:
4582 ret = subn_set_opa_sma(attr_id, smp, am, data,
4583 ibdev, port, resp_len,
4584 data_size, local_mad);
4585 break;
4586 case OPA_ATTRIB_ID_AGGREGATE:
4587 ret = subn_set_opa_aggregate(smp, ibdev, port,
4588 resp_len, local_mad);
4589 break;
4590 }
4591 break;
4592 case IB_MGMT_METHOD_TRAP:
4593 case IB_MGMT_METHOD_REPORT:
4594 case IB_MGMT_METHOD_REPORT_RESP:
4595 case IB_MGMT_METHOD_GET_RESP:
4596
4597
4598
4599
4600
4601 ret = IB_MAD_RESULT_SUCCESS;
4602 break;
4603 case IB_MGMT_METHOD_TRAP_REPRESS:
4604 subn_handle_opa_trap_repress(ibp, smp);
4605
4606 ret = IB_MAD_RESULT_SUCCESS;
4607 break;
4608 default:
4609 smp->status |= IB_SMP_UNSUP_METHOD;
4610 ret = reply((struct ib_mad_hdr *)smp);
4611 break;
4612 }
4613
4614 return ret;
4615}
4616
4617static int process_subn(struct ib_device *ibdev, int mad_flags,
4618 u8 port, const struct ib_mad *in_mad,
4619 struct ib_mad *out_mad)
4620{
4621 struct ib_smp *smp = (struct ib_smp *)out_mad;
4622 struct hfi1_ibport *ibp = to_iport(ibdev, port);
4623 int ret;
4624
4625 *out_mad = *in_mad;
4626 if (smp->class_version != 1) {
4627 smp->status |= IB_SMP_UNSUP_VERSION;
4628 ret = reply((struct ib_mad_hdr *)smp);
4629 return ret;
4630 }
4631
4632 ret = check_mkey(ibp, (struct ib_mad_hdr *)smp, mad_flags,
4633 smp->mkey, (__force __be32)smp->dr_slid,
4634 smp->return_path, smp->hop_cnt);
4635 if (ret) {
4636 u32 port_num = be32_to_cpu(smp->attr_mod);
4637
4638
4639
4640
4641
4642
4643
4644
4645 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
4646 (smp->method == IB_MGMT_METHOD_GET ||
4647 smp->method == IB_MGMT_METHOD_SET) &&
4648 port_num && port_num <= ibdev->phys_port_cnt &&
4649 port != port_num)
4650 (void)check_mkey(to_iport(ibdev, port_num),
4651 (struct ib_mad_hdr *)smp, 0,
4652 smp->mkey,
4653 (__force __be32)smp->dr_slid,
4654 smp->return_path, smp->hop_cnt);
4655 ret = IB_MAD_RESULT_FAILURE;
4656 return ret;
4657 }
4658
4659 switch (smp->method) {
4660 case IB_MGMT_METHOD_GET:
4661 switch (smp->attr_id) {
4662 case IB_SMP_ATTR_NODE_INFO:
4663 ret = subn_get_nodeinfo(smp, ibdev, port);
4664 break;
4665 default:
4666 smp->status |= IB_SMP_UNSUP_METH_ATTR;
4667 ret = reply((struct ib_mad_hdr *)smp);
4668 break;
4669 }
4670 break;
4671 }
4672
4673 return ret;
4674}
4675
4676static int process_perf(struct ib_device *ibdev, u8 port,
4677 const struct ib_mad *in_mad,
4678 struct ib_mad *out_mad)
4679{
4680 struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad;
4681 struct ib_class_port_info *cpi = (struct ib_class_port_info *)
4682 &pmp->data;
4683 int ret = IB_MAD_RESULT_FAILURE;
4684
4685 *out_mad = *in_mad;
4686 if (pmp->mad_hdr.class_version != 1) {
4687 pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
4688 ret = reply((struct ib_mad_hdr *)pmp);
4689 return ret;
4690 }
4691
4692 switch (pmp->mad_hdr.method) {
4693 case IB_MGMT_METHOD_GET:
4694 switch (pmp->mad_hdr.attr_id) {
4695 case IB_PMA_PORT_COUNTERS:
4696 ret = pma_get_ib_portcounters(pmp, ibdev, port);
4697 break;
4698 case IB_PMA_PORT_COUNTERS_EXT:
4699 ret = pma_get_ib_portcounters_ext(pmp, ibdev, port);
4700 break;
4701 case IB_PMA_CLASS_PORT_INFO:
4702 cpi->capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
4703 ret = reply((struct ib_mad_hdr *)pmp);
4704 break;
4705 default:
4706 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
4707 ret = reply((struct ib_mad_hdr *)pmp);
4708 break;
4709 }
4710 break;
4711
4712 case IB_MGMT_METHOD_SET:
4713 if (pmp->mad_hdr.attr_id) {
4714 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
4715 ret = reply((struct ib_mad_hdr *)pmp);
4716 }
4717 break;
4718
4719 case IB_MGMT_METHOD_TRAP:
4720 case IB_MGMT_METHOD_GET_RESP:
4721
4722
4723
4724
4725
4726 ret = IB_MAD_RESULT_SUCCESS;
4727 break;
4728
4729 default:
4730 pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
4731 ret = reply((struct ib_mad_hdr *)pmp);
4732 break;
4733 }
4734
4735 return ret;
4736}
4737
4738static int process_perf_opa(struct ib_device *ibdev, u8 port,
4739 const struct opa_mad *in_mad,
4740 struct opa_mad *out_mad, u32 *resp_len)
4741{
4742 struct opa_pma_mad *pmp = (struct opa_pma_mad *)out_mad;
4743 int ret;
4744
4745 *out_mad = *in_mad;
4746
4747 if (pmp->mad_hdr.class_version != OPA_SM_CLASS_VERSION) {
4748 pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
4749 return reply((struct ib_mad_hdr *)pmp);
4750 }
4751
4752 *resp_len = sizeof(pmp->mad_hdr);
4753
4754 switch (pmp->mad_hdr.method) {
4755 case IB_MGMT_METHOD_GET:
4756 switch (pmp->mad_hdr.attr_id) {
4757 case IB_PMA_CLASS_PORT_INFO:
4758 ret = pma_get_opa_classportinfo(pmp, ibdev, resp_len);
4759 break;
4760 case OPA_PM_ATTRIB_ID_PORT_STATUS:
4761 ret = pma_get_opa_portstatus(pmp, ibdev, port,
4762 resp_len);
4763 break;
4764 case OPA_PM_ATTRIB_ID_DATA_PORT_COUNTERS:
4765 ret = pma_get_opa_datacounters(pmp, ibdev, port,
4766 resp_len);
4767 break;
4768 case OPA_PM_ATTRIB_ID_ERROR_PORT_COUNTERS:
4769 ret = pma_get_opa_porterrors(pmp, ibdev, port,
4770 resp_len);
4771 break;
4772 case OPA_PM_ATTRIB_ID_ERROR_INFO:
4773 ret = pma_get_opa_errorinfo(pmp, ibdev, port,
4774 resp_len);
4775 break;
4776 default:
4777 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
4778 ret = reply((struct ib_mad_hdr *)pmp);
4779 break;
4780 }
4781 break;
4782
4783 case IB_MGMT_METHOD_SET:
4784 switch (pmp->mad_hdr.attr_id) {
4785 case OPA_PM_ATTRIB_ID_CLEAR_PORT_STATUS:
4786 ret = pma_set_opa_portstatus(pmp, ibdev, port,
4787 resp_len);
4788 break;
4789 case OPA_PM_ATTRIB_ID_ERROR_INFO:
4790 ret = pma_set_opa_errorinfo(pmp, ibdev, port,
4791 resp_len);
4792 break;
4793 default:
4794 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
4795 ret = reply((struct ib_mad_hdr *)pmp);
4796 break;
4797 }
4798 break;
4799
4800 case IB_MGMT_METHOD_TRAP:
4801 case IB_MGMT_METHOD_GET_RESP:
4802
4803
4804
4805
4806
4807 ret = IB_MAD_RESULT_SUCCESS;
4808 break;
4809
4810 default:
4811 pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
4812 ret = reply((struct ib_mad_hdr *)pmp);
4813 break;
4814 }
4815
4816 return ret;
4817}
4818
4819static int hfi1_process_opa_mad(struct ib_device *ibdev, int mad_flags,
4820 u8 port, const struct ib_wc *in_wc,
4821 const struct ib_grh *in_grh,
4822 const struct opa_mad *in_mad,
4823 struct opa_mad *out_mad, size_t *out_mad_size,
4824 u16 *out_mad_pkey_index)
4825{
4826 int ret;
4827 int pkey_idx;
4828 int local_mad = 0;
4829 u32 resp_len = in_wc->byte_len - sizeof(*in_grh);
4830 struct hfi1_ibport *ibp = to_iport(ibdev, port);
4831
4832 pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY);
4833 if (pkey_idx < 0) {
4834 pr_warn("failed to find limited mgmt pkey, defaulting 0x%x\n",
4835 hfi1_get_pkey(ibp, 1));
4836 pkey_idx = 1;
4837 }
4838 *out_mad_pkey_index = (u16)pkey_idx;
4839
4840 switch (in_mad->mad_hdr.mgmt_class) {
4841 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
4842 case IB_MGMT_CLASS_SUBN_LID_ROUTED:
4843 local_mad = is_local_mad(ibp, in_mad, in_wc);
4844 if (local_mad) {
4845 ret = opa_local_smp_check(ibp, in_wc);
4846 if (ret)
4847 return IB_MAD_RESULT_FAILURE;
4848 }
4849 ret = process_subn_opa(ibdev, mad_flags, port, in_mad,
4850 out_mad, &resp_len, local_mad);
4851 goto bail;
4852 case IB_MGMT_CLASS_PERF_MGMT:
4853 ret = hfi1_pkey_validation_pma(ibp, in_mad, in_wc);
4854 if (ret)
4855 return IB_MAD_RESULT_FAILURE;
4856
4857 ret = process_perf_opa(ibdev, port, in_mad, out_mad, &resp_len);
4858 goto bail;
4859
4860 default:
4861 ret = IB_MAD_RESULT_SUCCESS;
4862 }
4863
4864bail:
4865 if (ret & IB_MAD_RESULT_REPLY)
4866 *out_mad_size = round_up(resp_len, 8);
4867 else if (ret & IB_MAD_RESULT_SUCCESS)
4868 *out_mad_size = in_wc->byte_len - sizeof(struct ib_grh);
4869
4870 return ret;
4871}
4872
4873static int hfi1_process_ib_mad(struct ib_device *ibdev, int mad_flags, u8 port,
4874 const struct ib_wc *in_wc,
4875 const struct ib_grh *in_grh,
4876 const struct ib_mad *in_mad,
4877 struct ib_mad *out_mad)
4878{
4879 int ret;
4880
4881 switch (in_mad->mad_hdr.mgmt_class) {
4882 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
4883 case IB_MGMT_CLASS_SUBN_LID_ROUTED:
4884 ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad);
4885 break;
4886 case IB_MGMT_CLASS_PERF_MGMT:
4887 ret = process_perf(ibdev, port, in_mad, out_mad);
4888 break;
4889 default:
4890 ret = IB_MAD_RESULT_SUCCESS;
4891 break;
4892 }
4893
4894 return ret;
4895}
4896
4897
4898
4899
4900
4901
4902
4903
4904
4905
4906
4907
4908
4909
4910
4911
4912
4913
4914
4915
4916int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
4917 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
4918 const struct ib_mad *in_mad, struct ib_mad *out_mad,
4919 size_t *out_mad_size, u16 *out_mad_pkey_index)
4920{
4921 switch (in_mad->mad_hdr.base_version) {
4922 case OPA_MGMT_BASE_VERSION:
4923 return hfi1_process_opa_mad(ibdev, mad_flags, port,
4924 in_wc, in_grh,
4925 (struct opa_mad *)in_mad,
4926 (struct opa_mad *)out_mad,
4927 out_mad_size,
4928 out_mad_pkey_index);
4929 case IB_MGMT_BASE_VERSION:
4930 return hfi1_process_ib_mad(ibdev, mad_flags, port, in_wc,
4931 in_grh, in_mad, out_mad);
4932 default:
4933 break;
4934 }
4935
4936 return IB_MAD_RESULT_FAILURE;
4937}
4938