1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48#include <linux/net.h>
49#include <rdma/ib_smi.h>
50
51#include "hfi.h"
52#include "mad.h"
53#include "verbs_txreq.h"
54#include "qp.h"
55
56
57static const hfi1_make_req hfi1_make_ud_req_tbl[2] = {
58 [HFI1_PKT_TYPE_9B] = &hfi1_make_ud_req_9B,
59 [HFI1_PKT_TYPE_16B] = &hfi1_make_ud_req_16B
60};
61
62
63
64
65
66
67
68
69
70
71
72static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
73{
74 struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
75 struct hfi1_pportdata *ppd;
76 struct hfi1_qp_priv *priv = sqp->priv;
77 struct rvt_qp *qp;
78 struct rdma_ah_attr *ah_attr;
79 unsigned long flags;
80 struct rvt_sge_state ssge;
81 struct rvt_sge *sge;
82 struct ib_wc wc;
83 u32 length;
84 enum ib_qp_type sqptype, dqptype;
85
86 rcu_read_lock();
87
88 qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp,
89 swqe->ud_wr.remote_qpn);
90 if (!qp) {
91 ibp->rvp.n_pkt_drops++;
92 rcu_read_unlock();
93 return;
94 }
95
96 sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ?
97 IB_QPT_UD : sqp->ibqp.qp_type;
98 dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
99 IB_QPT_UD : qp->ibqp.qp_type;
100
101 if (dqptype != sqptype ||
102 !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
103 ibp->rvp.n_pkt_drops++;
104 goto drop;
105 }
106
107 ah_attr = &ibah_to_rvtah(swqe->ud_wr.ah)->attr;
108 ppd = ppd_from_ibp(ibp);
109
110 if (qp->ibqp.qp_num > 1) {
111 u16 pkey;
112 u32 slid;
113 u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
114
115 pkey = hfi1_get_pkey(ibp, sqp->s_pkey_index);
116 slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
117 ((1 << ppd->lmc) - 1));
118 if (unlikely(ingress_pkey_check(ppd, pkey, sc5,
119 qp->s_pkey_index,
120 slid, false))) {
121 hfi1_bad_pkey(ibp, pkey,
122 rdma_ah_get_sl(ah_attr),
123 sqp->ibqp.qp_num, qp->ibqp.qp_num,
124 slid, rdma_ah_get_dlid(ah_attr));
125 goto drop;
126 }
127 }
128
129
130
131
132
133
134 if (qp->ibqp.qp_num) {
135 u32 qkey;
136
137 qkey = (int)swqe->ud_wr.remote_qkey < 0 ?
138 sqp->qkey : swqe->ud_wr.remote_qkey;
139 if (unlikely(qkey != qp->qkey))
140 goto drop;
141 }
142
143
144
145
146
147 length = swqe->length;
148 memset(&wc, 0, sizeof(wc));
149 wc.byte_len = length + sizeof(struct ib_grh);
150
151 if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
152 wc.wc_flags = IB_WC_WITH_IMM;
153 wc.ex.imm_data = swqe->wr.ex.imm_data;
154 }
155
156 spin_lock_irqsave(&qp->r_lock, flags);
157
158
159
160
161 if (qp->r_flags & RVT_R_REUSE_SGE) {
162 qp->r_flags &= ~RVT_R_REUSE_SGE;
163 } else {
164 int ret;
165
166 ret = hfi1_rvt_get_rwqe(qp, 0);
167 if (ret < 0) {
168 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
169 goto bail_unlock;
170 }
171 if (!ret) {
172 if (qp->ibqp.qp_num == 0)
173 ibp->rvp.n_vl15_dropped++;
174 goto bail_unlock;
175 }
176 }
177
178 if (unlikely(wc.byte_len > qp->r_len)) {
179 qp->r_flags |= RVT_R_REUSE_SGE;
180 ibp->rvp.n_pkt_drops++;
181 goto bail_unlock;
182 }
183
184 if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
185 struct ib_grh grh;
186 struct ib_global_route grd = *(rdma_ah_read_grh(ah_attr));
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203 if (priv->hdr_type == HFI1_PKT_TYPE_16B) {
204 if (grd.sgid_index == 0)
205 grd.sgid_index = OPA_GID_INDEX;
206
207 if (ib_is_opa_gid(&grd.dgid))
208 grd.dgid.global.interface_id =
209 cpu_to_be64(ppd->guids[HFI1_PORT_GUID_INDEX]);
210 }
211
212 hfi1_make_grh(ibp, &grh, &grd, 0, 0);
213 hfi1_copy_sge(&qp->r_sge, &grh,
214 sizeof(grh), true, false);
215 wc.wc_flags |= IB_WC_GRH;
216 } else {
217 rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
218 }
219 ssge.sg_list = swqe->sg_list + 1;
220 ssge.sge = *swqe->sg_list;
221 ssge.num_sge = swqe->wr.num_sge;
222 sge = &ssge.sge;
223 while (length) {
224 u32 len = sge->length;
225
226 if (len > length)
227 len = length;
228 if (len > sge->sge_length)
229 len = sge->sge_length;
230 WARN_ON_ONCE(len == 0);
231 hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, true, false);
232 sge->vaddr += len;
233 sge->length -= len;
234 sge->sge_length -= len;
235 if (sge->sge_length == 0) {
236 if (--ssge.num_sge)
237 *sge = *ssge.sg_list++;
238 } else if (sge->length == 0 && sge->mr->lkey) {
239 if (++sge->n >= RVT_SEGSZ) {
240 if (++sge->m >= sge->mr->mapsz)
241 break;
242 sge->n = 0;
243 }
244 sge->vaddr =
245 sge->mr->map[sge->m]->segs[sge->n].vaddr;
246 sge->length =
247 sge->mr->map[sge->m]->segs[sge->n].length;
248 }
249 length -= len;
250 }
251 rvt_put_ss(&qp->r_sge);
252 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
253 goto bail_unlock;
254 wc.wr_id = qp->r_wr_id;
255 wc.status = IB_WC_SUCCESS;
256 wc.opcode = IB_WC_RECV;
257 wc.qp = &qp->ibqp;
258 wc.src_qp = sqp->ibqp.qp_num;
259 if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI) {
260 if (sqp->ibqp.qp_type == IB_QPT_GSI ||
261 sqp->ibqp.qp_type == IB_QPT_SMI)
262 wc.pkey_index = swqe->ud_wr.pkey_index;
263 else
264 wc.pkey_index = sqp->s_pkey_index;
265 } else {
266 wc.pkey_index = 0;
267 }
268 wc.slid = (ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
269 ((1 << ppd->lmc) - 1))) & U16_MAX;
270
271 if (wc.slid == 0 && sqp->ibqp.qp_type == IB_QPT_GSI)
272 wc.slid = be16_to_cpu(IB_LID_PERMISSIVE);
273 wc.sl = rdma_ah_get_sl(ah_attr);
274 wc.dlid_path_bits = rdma_ah_get_dlid(ah_attr) & ((1 << ppd->lmc) - 1);
275 wc.port_num = qp->port_num;
276
277 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
278 swqe->wr.send_flags & IB_SEND_SOLICITED);
279 ibp->rvp.n_loop_pkts++;
280bail_unlock:
281 spin_unlock_irqrestore(&qp->r_lock, flags);
282drop:
283 rcu_read_unlock();
284}
285
286static void hfi1_make_bth_deth(struct rvt_qp *qp, struct rvt_swqe *wqe,
287 struct ib_other_headers *ohdr,
288 u16 *pkey, u32 extra_bytes, bool bypass)
289{
290 u32 bth0;
291 struct hfi1_ibport *ibp;
292
293 ibp = to_iport(qp->ibqp.device, qp->port_num);
294 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
295 ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
296 bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
297 } else {
298 bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
299 }
300
301 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
302 bth0 |= IB_BTH_SOLICITED;
303 bth0 |= extra_bytes << 20;
304 if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI)
305 *pkey = hfi1_get_pkey(ibp, wqe->ud_wr.pkey_index);
306 else
307 *pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
308 if (!bypass)
309 bth0 |= *pkey;
310 ohdr->bth[0] = cpu_to_be32(bth0);
311 ohdr->bth[1] = cpu_to_be32(wqe->ud_wr.remote_qpn);
312 ohdr->bth[2] = cpu_to_be32(mask_psn(wqe->psn));
313
314
315
316
317 ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->ud_wr.remote_qkey < 0 ?
318 qp->qkey : wqe->ud_wr.remote_qkey);
319 ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
320}
321
322void hfi1_make_ud_req_9B(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
323 struct rvt_swqe *wqe)
324{
325 u32 nwords, extra_bytes;
326 u16 len, slid, dlid, pkey;
327 u16 lrh0 = 0;
328 u8 sc5;
329 struct hfi1_qp_priv *priv = qp->priv;
330 struct ib_other_headers *ohdr;
331 struct rdma_ah_attr *ah_attr;
332 struct hfi1_pportdata *ppd;
333 struct hfi1_ibport *ibp;
334 struct ib_grh *grh;
335
336 ibp = to_iport(qp->ibqp.device, qp->port_num);
337 ppd = ppd_from_ibp(ibp);
338 ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr;
339
340 extra_bytes = -wqe->length & 3;
341 nwords = ((wqe->length + extra_bytes) >> 2) + SIZE_OF_CRC;
342
343 ps->s_txreq->hdr_dwords = 7;
344 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
345 ps->s_txreq->hdr_dwords++;
346
347 if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
348 grh = &ps->s_txreq->phdr.hdr.ibh.u.l.grh;
349 ps->s_txreq->hdr_dwords +=
350 hfi1_make_grh(ibp, grh, rdma_ah_read_grh(ah_attr),
351 ps->s_txreq->hdr_dwords - LRH_9B_DWORDS,
352 nwords);
353 lrh0 = HFI1_LRH_GRH;
354 ohdr = &ps->s_txreq->phdr.hdr.ibh.u.l.oth;
355 } else {
356 lrh0 = HFI1_LRH_BTH;
357 ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth;
358 }
359
360 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
361 lrh0 |= (rdma_ah_get_sl(ah_attr) & 0xf) << 4;
362 if (qp->ibqp.qp_type == IB_QPT_SMI) {
363 lrh0 |= 0xF000;
364 priv->s_sc = 0xf;
365 } else {
366 lrh0 |= (sc5 & 0xf) << 12;
367 priv->s_sc = sc5;
368 }
369
370 dlid = opa_get_lid(rdma_ah_get_dlid(ah_attr), 9B);
371 if (dlid == be16_to_cpu(IB_LID_PERMISSIVE)) {
372 slid = be16_to_cpu(IB_LID_PERMISSIVE);
373 } else {
374 u16 lid = (u16)ppd->lid;
375
376 if (lid) {
377 lid |= rdma_ah_get_path_bits(ah_attr) &
378 ((1 << ppd->lmc) - 1);
379 slid = lid;
380 } else {
381 slid = be16_to_cpu(IB_LID_PERMISSIVE);
382 }
383 }
384 hfi1_make_bth_deth(qp, wqe, ohdr, &pkey, extra_bytes, false);
385 len = ps->s_txreq->hdr_dwords + nwords;
386
387
388 ps->s_txreq->phdr.hdr.hdr_type = HFI1_PKT_TYPE_9B;
389 hfi1_make_ib_hdr(&ps->s_txreq->phdr.hdr.ibh,
390 lrh0, len, dlid, slid);
391}
392
393void hfi1_make_ud_req_16B(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
394 struct rvt_swqe *wqe)
395{
396 struct hfi1_qp_priv *priv = qp->priv;
397 struct ib_other_headers *ohdr;
398 struct rdma_ah_attr *ah_attr;
399 struct hfi1_pportdata *ppd;
400 struct hfi1_ibport *ibp;
401 u32 dlid, slid, nwords, extra_bytes;
402 u16 len, pkey;
403 u8 l4, sc5;
404
405 ibp = to_iport(qp->ibqp.device, qp->port_num);
406 ppd = ppd_from_ibp(ibp);
407 ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr;
408
409 ps->s_txreq->hdr_dwords = 9;
410 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
411 ps->s_txreq->hdr_dwords++;
412
413
414 extra_bytes = hfi1_get_16b_padding((ps->s_txreq->hdr_dwords << 2),
415 wqe->length);
416 nwords = ((wqe->length + extra_bytes + SIZE_OF_LT) >> 2) + SIZE_OF_CRC;
417
418 if ((rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) &&
419 hfi1_check_mcast(rdma_ah_get_dlid(ah_attr))) {
420 struct ib_grh *grh;
421 struct ib_global_route *grd = rdma_ah_retrieve_grh(ah_attr);
422
423
424
425
426 if (grd->sgid_index == OPA_GID_INDEX) {
427 dd_dev_warn(ppd->dd, "Bad sgid_index. sgid_index: %d\n",
428 grd->sgid_index);
429 grd->sgid_index = 0;
430 }
431 grh = &ps->s_txreq->phdr.hdr.opah.u.l.grh;
432 ps->s_txreq->hdr_dwords += hfi1_make_grh(
433 ibp, grh, grd,
434 ps->s_txreq->hdr_dwords - LRH_16B_DWORDS,
435 nwords);
436 ohdr = &ps->s_txreq->phdr.hdr.opah.u.l.oth;
437 l4 = OPA_16B_L4_IB_GLOBAL;
438 } else {
439 ohdr = &ps->s_txreq->phdr.hdr.opah.u.oth;
440 l4 = OPA_16B_L4_IB_LOCAL;
441 }
442
443 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
444 if (qp->ibqp.qp_type == IB_QPT_SMI)
445 priv->s_sc = 0xf;
446 else
447 priv->s_sc = sc5;
448
449 dlid = opa_get_lid(rdma_ah_get_dlid(ah_attr), 16B);
450 if (!ppd->lid)
451 slid = be32_to_cpu(OPA_LID_PERMISSIVE);
452 else
453 slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
454 ((1 << ppd->lmc) - 1));
455
456 hfi1_make_bth_deth(qp, wqe, ohdr, &pkey, extra_bytes, true);
457
458 len = (ps->s_txreq->hdr_dwords + nwords) >> 1;
459
460
461 ps->s_txreq->phdr.hdr.hdr_type = HFI1_PKT_TYPE_16B;
462 hfi1_make_16b_hdr(&ps->s_txreq->phdr.hdr.opah,
463 slid, dlid, len, pkey, 0, 0, l4, priv->s_sc);
464}
465
466
467
468
469
470
471
472
473
474int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
475{
476 struct hfi1_qp_priv *priv = qp->priv;
477 struct rdma_ah_attr *ah_attr;
478 struct hfi1_pportdata *ppd;
479 struct hfi1_ibport *ibp;
480 struct rvt_swqe *wqe;
481 int next_cur;
482 u32 lid;
483
484 ps->s_txreq = get_txreq(ps->dev, qp);
485 if (IS_ERR(ps->s_txreq))
486 goto bail_no_tx;
487
488 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
489 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
490 goto bail;
491
492 if (qp->s_last == READ_ONCE(qp->s_head))
493 goto bail;
494
495 if (iowait_sdma_pending(&priv->s_iowait)) {
496 qp->s_flags |= RVT_S_WAIT_DMA;
497 goto bail;
498 }
499 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
500 hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
501 goto done_free_tx;
502 }
503
504
505 if (qp->s_cur == READ_ONCE(qp->s_head))
506 goto bail;
507
508 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
509 next_cur = qp->s_cur + 1;
510 if (next_cur >= qp->s_size)
511 next_cur = 0;
512
513
514 ibp = to_iport(qp->ibqp.device, qp->port_num);
515 ppd = ppd_from_ibp(ibp);
516 ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr;
517 priv->hdr_type = hfi1_get_hdr_type(ppd->lid, ah_attr);
518 if ((!hfi1_check_mcast(rdma_ah_get_dlid(ah_attr))) ||
519 (rdma_ah_get_dlid(ah_attr) == be32_to_cpu(OPA_LID_PERMISSIVE))) {
520 lid = rdma_ah_get_dlid(ah_attr) & ~((1 << ppd->lmc) - 1);
521 if (unlikely(!loopback &&
522 ((lid == ppd->lid) ||
523 ((lid == be32_to_cpu(OPA_LID_PERMISSIVE)) &&
524 (qp->ibqp.qp_type == IB_QPT_GSI))))) {
525 unsigned long tflags = ps->flags;
526
527
528
529
530
531
532
533 if (iowait_sdma_pending(&priv->s_iowait)) {
534 qp->s_flags |= RVT_S_WAIT_DMA;
535 goto bail;
536 }
537 qp->s_cur = next_cur;
538 spin_unlock_irqrestore(&qp->s_lock, tflags);
539 ud_loopback(qp, wqe);
540 spin_lock_irqsave(&qp->s_lock, tflags);
541 ps->flags = tflags;
542 hfi1_send_complete(qp, wqe, IB_WC_SUCCESS);
543 goto done_free_tx;
544 }
545 }
546
547 qp->s_cur = next_cur;
548 ps->s_txreq->s_cur_size = wqe->length;
549 ps->s_txreq->ss = &qp->s_sge;
550 qp->s_srate = rdma_ah_get_static_rate(ah_attr);
551 qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
552 qp->s_wqe = wqe;
553 qp->s_sge.sge = wqe->sg_list[0];
554 qp->s_sge.sg_list = wqe->sg_list + 1;
555 qp->s_sge.num_sge = wqe->wr.num_sge;
556 qp->s_sge.total_len = wqe->length;
557
558
559 hfi1_make_ud_req_tbl[priv->hdr_type](qp, ps, qp->s_wqe);
560 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
561 ps->s_txreq->sde = priv->s_sde;
562 priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc);
563 ps->s_txreq->psc = priv->s_sendcontext;
564
565 priv->s_ahg->ahgcount = 0;
566 priv->s_ahg->ahgidx = 0;
567 priv->s_ahg->tx_flags = 0;
568
569 return 1;
570
571done_free_tx:
572 hfi1_put_txreq(ps->s_txreq);
573 ps->s_txreq = NULL;
574 return 1;
575
576bail:
577 hfi1_put_txreq(ps->s_txreq);
578
579bail_no_tx:
580 ps->s_txreq = NULL;
581 qp->s_flags &= ~RVT_S_BUSY;
582 return 0;
583}
584
585
586
587
588
589
590
591
592
593
594int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey)
595{
596 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
597 unsigned i;
598
599 if (pkey == FULL_MGMT_P_KEY || pkey == LIM_MGMT_P_KEY) {
600 unsigned lim_idx = -1;
601
602 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); ++i) {
603
604 if (ppd->pkeys[i] == pkey)
605 return i;
606 if (ppd->pkeys[i] == LIM_MGMT_P_KEY)
607 lim_idx = i;
608 }
609
610
611 if (pkey == FULL_MGMT_P_KEY)
612 return lim_idx;
613
614
615 return -1;
616 }
617
618 pkey &= 0x7fff;
619
620 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); ++i)
621 if ((ppd->pkeys[i] & 0x7fff) == pkey)
622 return i;
623
624
625
626
627 return -1;
628}
629
630void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp,
631 u32 remote_qpn, u16 pkey, u32 slid, u32 dlid,
632 u8 sc5, const struct ib_grh *old_grh)
633{
634 u64 pbc, pbc_flags = 0;
635 u32 bth0, plen, vl, hwords = 7;
636 u16 len;
637 u8 l4;
638 struct hfi1_16b_header hdr;
639 struct ib_other_headers *ohdr;
640 struct pio_buf *pbuf;
641 struct send_context *ctxt = qp_to_send_context(qp, sc5);
642 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
643 u32 nwords;
644
645
646 nwords = ((hfi1_get_16b_padding(hwords << 2, 0) +
647 SIZE_OF_LT) >> 2) + SIZE_OF_CRC;
648 if (old_grh) {
649 struct ib_grh *grh = &hdr.u.l.grh;
650
651 grh->version_tclass_flow = old_grh->version_tclass_flow;
652 grh->paylen = cpu_to_be16(
653 (hwords - LRH_16B_DWORDS + nwords) << 2);
654 grh->hop_limit = 0xff;
655 grh->sgid = old_grh->dgid;
656 grh->dgid = old_grh->sgid;
657 ohdr = &hdr.u.l.oth;
658 l4 = OPA_16B_L4_IB_GLOBAL;
659 hwords += sizeof(struct ib_grh) / sizeof(u32);
660 } else {
661 ohdr = &hdr.u.oth;
662 l4 = OPA_16B_L4_IB_LOCAL;
663 }
664
665
666 bth0 = (IB_OPCODE_CNP << 24) | (1 << 16) |
667 (hfi1_get_16b_padding(hwords << 2, 0) << 20);
668 ohdr->bth[0] = cpu_to_be32(bth0);
669
670 ohdr->bth[1] = cpu_to_be32(remote_qpn);
671 ohdr->bth[2] = 0;
672
673
674 len = (hwords + nwords) >> 1;
675 hfi1_make_16b_hdr(&hdr, slid, dlid, len, pkey, 1, 0, l4, sc5);
676
677 plen = 2 + hwords + nwords;
678 pbc_flags |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC;
679 vl = sc_to_vlt(ppd->dd, sc5);
680 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
681 if (ctxt) {
682 pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL);
683 if (pbuf)
684 ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
685 &hdr, hwords);
686 }
687}
688
689void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
690 u16 pkey, u32 slid, u32 dlid, u8 sc5,
691 const struct ib_grh *old_grh)
692{
693 u64 pbc, pbc_flags = 0;
694 u32 bth0, plen, vl, hwords = 5;
695 u16 lrh0;
696 u8 sl = ibp->sc_to_sl[sc5];
697 struct ib_header hdr;
698 struct ib_other_headers *ohdr;
699 struct pio_buf *pbuf;
700 struct send_context *ctxt = qp_to_send_context(qp, sc5);
701 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
702
703 if (old_grh) {
704 struct ib_grh *grh = &hdr.u.l.grh;
705
706 grh->version_tclass_flow = old_grh->version_tclass_flow;
707 grh->paylen = cpu_to_be16(
708 (hwords - LRH_9B_DWORDS + SIZE_OF_CRC) << 2);
709 grh->hop_limit = 0xff;
710 grh->sgid = old_grh->dgid;
711 grh->dgid = old_grh->sgid;
712 ohdr = &hdr.u.l.oth;
713 lrh0 = HFI1_LRH_GRH;
714 hwords += sizeof(struct ib_grh) / sizeof(u32);
715 } else {
716 ohdr = &hdr.u.oth;
717 lrh0 = HFI1_LRH_BTH;
718 }
719
720 lrh0 |= (sc5 & 0xf) << 12 | sl << 4;
721
722 bth0 = pkey | (IB_OPCODE_CNP << 24);
723 ohdr->bth[0] = cpu_to_be32(bth0);
724
725 ohdr->bth[1] = cpu_to_be32(remote_qpn | (1 << IB_BECN_SHIFT));
726 ohdr->bth[2] = 0;
727
728 hfi1_make_ib_hdr(&hdr, lrh0, hwords + SIZE_OF_CRC, dlid, slid);
729 plen = 2 + hwords;
730 pbc_flags |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
731 vl = sc_to_vlt(ppd->dd, sc5);
732 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
733 if (ctxt) {
734 pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL);
735 if (pbuf)
736 ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
737 &hdr, hwords);
738 }
739}
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759static int opa_smp_check(struct hfi1_ibport *ibp, u16 pkey, u8 sc5,
760 struct rvt_qp *qp, u16 slid, struct opa_smp *smp)
761{
762 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
763
764
765
766
767
768 if (sc5 != 0xf)
769 return 1;
770
771 if (rcv_pkey_check(ppd, pkey, sc5, slid))
772 return 1;
773
774
775
776
777
778
779 if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE &&
780 smp->mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED) {
781 ingress_pkey_table_fail(ppd, pkey, slid);
782 return 1;
783 }
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805 switch (smp->method) {
806 case IB_MGMT_METHOD_GET_RESP:
807 case IB_MGMT_METHOD_REPORT_RESP:
808 break;
809 case IB_MGMT_METHOD_GET:
810 case IB_MGMT_METHOD_SET:
811 case IB_MGMT_METHOD_REPORT:
812 case IB_MGMT_METHOD_TRAP_REPRESS:
813 if (pkey != FULL_MGMT_P_KEY) {
814 ingress_pkey_table_fail(ppd, pkey, slid);
815 return 1;
816 }
817 break;
818 default:
819 if (ibp->rvp.port_cap_flags & IB_PORT_SM)
820 return 0;
821 if (smp->method == IB_MGMT_METHOD_TRAP)
822 return 1;
823 if (pkey == FULL_MGMT_P_KEY) {
824 smp->status |= IB_SMP_UNSUP_METHOD;
825 return 0;
826 }
827 ingress_pkey_table_fail(ppd, pkey, slid);
828 return 1;
829 }
830 return 0;
831}
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846void hfi1_ud_rcv(struct hfi1_packet *packet)
847{
848 struct ib_other_headers *ohdr = packet->ohdr;
849 u32 hdrsize = packet->hlen;
850 struct ib_wc wc;
851 u32 qkey;
852 u32 src_qp;
853 u16 pkey;
854 int mgmt_pkey_idx = -1;
855 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
856 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
857 void *data = packet->payload;
858 u32 tlen = packet->tlen;
859 struct rvt_qp *qp = packet->qp;
860 u8 sc5 = packet->sc;
861 u8 sl_from_sc;
862 u8 opcode = packet->opcode;
863 u8 sl = packet->sl;
864 u32 dlid = packet->dlid;
865 u32 slid = packet->slid;
866 u8 extra_bytes;
867 bool dlid_is_permissive;
868 bool slid_is_permissive;
869
870 extra_bytes = packet->pad + packet->extra_byte + (SIZE_OF_CRC << 2);
871 qkey = ib_get_qkey(ohdr);
872 src_qp = ib_get_sqpn(ohdr);
873
874 if (packet->etype == RHF_RCV_TYPE_BYPASS) {
875 u32 permissive_lid =
876 opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B);
877
878 pkey = hfi1_16B_get_pkey(packet->hdr);
879 dlid_is_permissive = (dlid == permissive_lid);
880 slid_is_permissive = (slid == permissive_lid);
881 } else {
882 pkey = ib_bth_get_pkey(ohdr);
883 dlid_is_permissive = (dlid == be16_to_cpu(IB_LID_PERMISSIVE));
884 slid_is_permissive = (slid == be16_to_cpu(IB_LID_PERMISSIVE));
885 }
886 sl_from_sc = ibp->sc_to_sl[sc5];
887
888 process_ecn(qp, packet, (opcode != IB_OPCODE_CNP));
889
890
891
892
893 if (unlikely(tlen < (hdrsize + extra_bytes)))
894 goto drop;
895
896 tlen -= hdrsize + extra_bytes;
897
898
899
900
901
902 if (qp->ibqp.qp_num) {
903 if (unlikely(dlid_is_permissive || slid_is_permissive))
904 goto drop;
905 if (qp->ibqp.qp_num > 1) {
906 if (unlikely(rcv_pkey_check(ppd, pkey, sc5, slid))) {
907
908
909
910
911
912
913 hfi1_bad_pkey(ibp,
914 pkey, sl,
915 src_qp, qp->ibqp.qp_num,
916 slid, dlid);
917 return;
918 }
919 } else {
920
921 mgmt_pkey_idx = hfi1_lookup_pkey_idx(ibp, pkey);
922 if (mgmt_pkey_idx < 0)
923 goto drop;
924 }
925 if (unlikely(qkey != qp->qkey))
926 return;
927
928
929 if (unlikely(qp->ibqp.qp_num == 1 &&
930 (tlen > 2048 || (sc5 == 0xF))))
931 goto drop;
932 } else {
933
934 struct opa_smp *smp = (struct opa_smp *)data;
935
936 if (opa_smp_check(ibp, pkey, sc5, qp, slid, smp))
937 goto drop;
938
939 if (tlen > 2048)
940 goto drop;
941 if ((dlid_is_permissive || slid_is_permissive) &&
942 smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
943 goto drop;
944
945
946 mgmt_pkey_idx = hfi1_lookup_pkey_idx(ibp, pkey);
947 if (mgmt_pkey_idx < 0)
948 goto drop;
949 }
950
951 if (qp->ibqp.qp_num > 1 &&
952 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
953 wc.ex.imm_data = ohdr->u.ud.imm_data;
954 wc.wc_flags = IB_WC_WITH_IMM;
955 tlen -= sizeof(u32);
956 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
957 wc.ex.imm_data = 0;
958 wc.wc_flags = 0;
959 } else {
960 goto drop;
961 }
962
963
964
965
966
967 wc.byte_len = tlen + sizeof(struct ib_grh);
968
969
970
971
972 if (qp->r_flags & RVT_R_REUSE_SGE) {
973 qp->r_flags &= ~RVT_R_REUSE_SGE;
974 } else {
975 int ret;
976
977 ret = hfi1_rvt_get_rwqe(qp, 0);
978 if (ret < 0) {
979 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
980 return;
981 }
982 if (!ret) {
983 if (qp->ibqp.qp_num == 0)
984 ibp->rvp.n_vl15_dropped++;
985 return;
986 }
987 }
988
989 if (unlikely(wc.byte_len > qp->r_len)) {
990 qp->r_flags |= RVT_R_REUSE_SGE;
991 goto drop;
992 }
993 if (packet->grh) {
994 hfi1_copy_sge(&qp->r_sge, packet->grh,
995 sizeof(struct ib_grh), true, false);
996 wc.wc_flags |= IB_WC_GRH;
997 } else if (packet->etype == RHF_RCV_TYPE_BYPASS) {
998 struct ib_grh grh;
999
1000
1001
1002
1003
1004 hfi1_make_ext_grh(packet, &grh, slid, dlid);
1005 hfi1_copy_sge(&qp->r_sge, &grh,
1006 sizeof(struct ib_grh), true, false);
1007 wc.wc_flags |= IB_WC_GRH;
1008 } else {
1009 rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
1010 }
1011 hfi1_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh),
1012 true, false);
1013 rvt_put_ss(&qp->r_sge);
1014 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
1015 return;
1016 wc.wr_id = qp->r_wr_id;
1017 wc.status = IB_WC_SUCCESS;
1018 wc.opcode = IB_WC_RECV;
1019 wc.vendor_err = 0;
1020 wc.qp = &qp->ibqp;
1021 wc.src_qp = src_qp;
1022
1023 if (qp->ibqp.qp_type == IB_QPT_GSI ||
1024 qp->ibqp.qp_type == IB_QPT_SMI) {
1025 if (mgmt_pkey_idx < 0) {
1026 if (net_ratelimit()) {
1027 struct hfi1_devdata *dd = ppd->dd;
1028
1029 dd_dev_err(dd, "QP type %d mgmt_pkey_idx < 0 and packet not dropped???\n",
1030 qp->ibqp.qp_type);
1031 mgmt_pkey_idx = 0;
1032 }
1033 }
1034 wc.pkey_index = (unsigned)mgmt_pkey_idx;
1035 } else {
1036 wc.pkey_index = 0;
1037 }
1038 if (slid_is_permissive)
1039 slid = be32_to_cpu(OPA_LID_PERMISSIVE);
1040 wc.slid = slid & U16_MAX;
1041 wc.sl = sl_from_sc;
1042
1043
1044
1045
1046 wc.dlid_path_bits = hfi1_check_mcast(dlid) ? 0 :
1047 dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
1048 wc.port_num = qp->port_num;
1049
1050 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
1051 ib_bth_is_solicited(ohdr));
1052 return;
1053
1054drop:
1055 ibp->rvp.n_pkt_drops++;
1056}
1057