1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <rdma/ib_mad.h>
34#include <rdma/ib_smi.h>
35#include <rdma/ib_sa.h>
36#include <rdma/ib_cache.h>
37
38#include <linux/random.h>
39#include <linux/mlx4/cmd.h>
40#include <linux/gfp.h>
41#include <rdma/ib_pma.h>
42#include <linux/ip.h>
43#include <net/ipv6.h>
44
45#include <linux/mlx4/driver.h>
46#include "mlx4_ib.h"
47
48enum {
49 MLX4_IB_VENDOR_CLASS1 = 0x9,
50 MLX4_IB_VENDOR_CLASS2 = 0xa
51};
52
53#define MLX4_TUN_SEND_WRID_SHIFT 34
54#define MLX4_TUN_QPN_SHIFT 32
55#define MLX4_TUN_WRID_RECV (((u64) 1) << MLX4_TUN_SEND_WRID_SHIFT)
56#define MLX4_TUN_SET_WRID_QPN(a) (((u64) ((a) & 0x3)) << MLX4_TUN_QPN_SHIFT)
57
58#define MLX4_TUN_IS_RECV(a) (((a) >> MLX4_TUN_SEND_WRID_SHIFT) & 0x1)
59#define MLX4_TUN_WRID_QPN(a) (((a) >> MLX4_TUN_QPN_SHIFT) & 0x3)
60
61
62
63#define GET_BLK_PTR_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.block_ptr)
64#define GET_MASK_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.tbl_entries_mask)
65#define NUM_IDX_IN_PKEY_TBL_BLK 32
66#define GUID_TBL_ENTRY_SIZE 8
67#define GUID_TBL_BLK_NUM_ENTRIES 8
68#define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
69
70struct mlx4_mad_rcv_buf {
71 struct ib_grh grh;
72 u8 payload[256];
73} __packed;
74
75struct mlx4_mad_snd_buf {
76 u8 payload[256];
77} __packed;
78
79struct mlx4_tunnel_mad {
80 struct ib_grh grh;
81 struct mlx4_ib_tunnel_header hdr;
82 struct ib_mad mad;
83} __packed;
84
85struct mlx4_rcv_tunnel_mad {
86 struct mlx4_rcv_tunnel_hdr hdr;
87 struct ib_grh grh;
88 struct ib_mad mad;
89} __packed;
90
91static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num);
92static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num);
93static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
94 int block, u32 change_bitmap);
95
96__be64 mlx4_ib_gen_node_guid(void)
97{
98#define NODE_GUID_HI ((u64) (((u64)IB_OPENIB_OUI) << 40))
99 return cpu_to_be64(NODE_GUID_HI | prandom_u32());
100}
101
102__be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
103{
104 return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
105 cpu_to_be64(0xff00000000000000LL);
106}
107
108int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
109 int port, const struct ib_wc *in_wc,
110 const struct ib_grh *in_grh,
111 const void *in_mad, void *response_mad)
112{
113 struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
114 void *inbox;
115 int err;
116 u32 in_modifier = port;
117 u8 op_modifier = 0;
118
119 inmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
120 if (IS_ERR(inmailbox))
121 return PTR_ERR(inmailbox);
122 inbox = inmailbox->buf;
123
124 outmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
125 if (IS_ERR(outmailbox)) {
126 mlx4_free_cmd_mailbox(dev->dev, inmailbox);
127 return PTR_ERR(outmailbox);
128 }
129
130 memcpy(inbox, in_mad, 256);
131
132
133
134
135
136 if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_MKEY) || !in_wc)
137 op_modifier |= 0x1;
138 if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_BKEY) || !in_wc)
139 op_modifier |= 0x2;
140 if (mlx4_is_mfunc(dev->dev) &&
141 (mad_ifc_flags & MLX4_MAD_IFC_NET_VIEW || in_wc))
142 op_modifier |= 0x8;
143
144 if (in_wc) {
145 struct {
146 __be32 my_qpn;
147 u32 reserved1;
148 __be32 rqpn;
149 u8 sl;
150 u8 g_path;
151 u16 reserved2[2];
152 __be16 pkey;
153 u32 reserved3[11];
154 u8 grh[40];
155 } *ext_info;
156
157 memset(inbox + 256, 0, 256);
158 ext_info = inbox + 256;
159
160 ext_info->my_qpn = cpu_to_be32(in_wc->qp->qp_num);
161 ext_info->rqpn = cpu_to_be32(in_wc->src_qp);
162 ext_info->sl = in_wc->sl << 4;
163 ext_info->g_path = in_wc->dlid_path_bits |
164 (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0);
165 ext_info->pkey = cpu_to_be16(in_wc->pkey_index);
166
167 if (in_grh)
168 memcpy(ext_info->grh, in_grh, 40);
169
170 op_modifier |= 0x4;
171
172 in_modifier |= ib_lid_cpu16(in_wc->slid) << 16;
173 }
174
175 err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma, in_modifier,
176 mlx4_is_master(dev->dev) ? (op_modifier & ~0x8) : op_modifier,
177 MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
178 (op_modifier & 0x8) ? MLX4_CMD_NATIVE : MLX4_CMD_WRAPPED);
179
180 if (!err)
181 memcpy(response_mad, outmailbox->buf, 256);
182
183 mlx4_free_cmd_mailbox(dev->dev, inmailbox);
184 mlx4_free_cmd_mailbox(dev->dev, outmailbox);
185
186 return err;
187}
188
189static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
190{
191 struct ib_ah *new_ah;
192 struct rdma_ah_attr ah_attr;
193 unsigned long flags;
194
195 if (!dev->send_agent[port_num - 1][0])
196 return;
197
198 memset(&ah_attr, 0, sizeof ah_attr);
199 ah_attr.type = rdma_ah_find_type(&dev->ib_dev, port_num);
200 rdma_ah_set_dlid(&ah_attr, lid);
201 rdma_ah_set_sl(&ah_attr, sl);
202 rdma_ah_set_port_num(&ah_attr, port_num);
203
204 new_ah = rdma_create_ah(dev->send_agent[port_num - 1][0]->qp->pd,
205 &ah_attr, 0);
206 if (IS_ERR(new_ah))
207 return;
208
209 spin_lock_irqsave(&dev->sm_lock, flags);
210 if (dev->sm_ah[port_num - 1])
211 rdma_destroy_ah(dev->sm_ah[port_num - 1], 0);
212 dev->sm_ah[port_num - 1] = new_ah;
213 spin_unlock_irqrestore(&dev->sm_lock, flags);
214}
215
216
217
218
219
220static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad *mad,
221 u16 prev_lid)
222{
223 struct ib_port_info *pinfo;
224 u16 lid;
225 __be16 *base;
226 u32 bn, pkey_change_bitmap;
227 int i;
228
229
230 struct mlx4_ib_dev *dev = to_mdev(ibdev);
231 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
232 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
233 mad->mad_hdr.method == IB_MGMT_METHOD_SET)
234 switch (mad->mad_hdr.attr_id) {
235 case IB_SMP_ATTR_PORT_INFO:
236 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
237 return;
238 pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data;
239 lid = be16_to_cpu(pinfo->lid);
240
241 update_sm_ah(dev, port_num,
242 be16_to_cpu(pinfo->sm_lid),
243 pinfo->neighbormtu_mastersmsl & 0xf);
244
245 if (pinfo->clientrereg_resv_subnetto & 0x80)
246 handle_client_rereg_event(dev, port_num);
247
248 if (prev_lid != lid)
249 handle_lid_change_event(dev, port_num);
250 break;
251
252 case IB_SMP_ATTR_PKEY_TABLE:
253 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
254 return;
255 if (!mlx4_is_mfunc(dev->dev)) {
256 mlx4_ib_dispatch_event(dev, port_num,
257 IB_EVENT_PKEY_CHANGE);
258 break;
259 }
260
261
262
263
264 bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod) & 0xFFFF;
265 base = (__be16 *) &(((struct ib_smp *)mad)->data[0]);
266 pkey_change_bitmap = 0;
267 for (i = 0; i < 32; i++) {
268 pr_debug("PKEY[%d] = x%x\n",
269 i + bn*32, be16_to_cpu(base[i]));
270 if (be16_to_cpu(base[i]) !=
271 dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32]) {
272 pkey_change_bitmap |= (1 << i);
273 dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32] =
274 be16_to_cpu(base[i]);
275 }
276 }
277 pr_debug("PKEY Change event: port=%d, "
278 "block=0x%x, change_bitmap=0x%x\n",
279 port_num, bn, pkey_change_bitmap);
280
281 if (pkey_change_bitmap) {
282 mlx4_ib_dispatch_event(dev, port_num,
283 IB_EVENT_PKEY_CHANGE);
284 if (!dev->sriov.is_going_down)
285 __propagate_pkey_ev(dev, port_num, bn,
286 pkey_change_bitmap);
287 }
288 break;
289
290 case IB_SMP_ATTR_GUID_INFO:
291 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
292 return;
293
294 if (!mlx4_is_master(dev->dev))
295 mlx4_ib_dispatch_event(dev, port_num,
296 IB_EVENT_GID_CHANGE);
297
298 if (mlx4_is_master(dev->dev) &&
299 !dev->sriov.is_going_down) {
300 bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod);
301 mlx4_ib_update_cache_on_guid_change(dev, bn, port_num,
302 (u8 *)(&((struct ib_smp *)mad)->data));
303 mlx4_ib_notify_slaves_on_guid_change(dev, bn, port_num,
304 (u8 *)(&((struct ib_smp *)mad)->data));
305 }
306 break;
307
308 case IB_SMP_ATTR_SL_TO_VL_TABLE:
309
310
311
312 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV &&
313 dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT)
314 return;
315 if (!mlx4_is_slave(dev->dev)) {
316 union sl2vl_tbl_to_u64 sl2vl64;
317 int jj;
318
319 for (jj = 0; jj < 8; jj++) {
320 sl2vl64.sl8[jj] = ((struct ib_smp *)mad)->data[jj];
321 pr_debug("port %u, sl2vl[%d] = %02x\n",
322 port_num, jj, sl2vl64.sl8[jj]);
323 }
324 atomic64_set(&dev->sl2vl[port_num - 1], sl2vl64.sl64);
325 }
326 break;
327
328 default:
329 break;
330 }
331}
332
333static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
334 int block, u32 change_bitmap)
335{
336 int i, ix, slave, err;
337 int have_event = 0;
338
339 for (slave = 0; slave < dev->dev->caps.sqp_demux; slave++) {
340 if (slave == mlx4_master_func_num(dev->dev))
341 continue;
342 if (!mlx4_is_slave_active(dev->dev, slave))
343 continue;
344
345 have_event = 0;
346 for (i = 0; i < 32; i++) {
347 if (!(change_bitmap & (1 << i)))
348 continue;
349 for (ix = 0;
350 ix < dev->dev->caps.pkey_table_len[port_num]; ix++) {
351 if (dev->pkeys.virt2phys_pkey[slave][port_num - 1]
352 [ix] == i + 32 * block) {
353 err = mlx4_gen_pkey_eqe(dev->dev, slave, port_num);
354 pr_debug("propagate_pkey_ev: slave %d,"
355 " port %d, ix %d (%d)\n",
356 slave, port_num, ix, err);
357 have_event = 1;
358 break;
359 }
360 }
361 if (have_event)
362 break;
363 }
364 }
365}
366
367static void node_desc_override(struct ib_device *dev,
368 struct ib_mad *mad)
369{
370 unsigned long flags;
371
372 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
373 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
374 mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
375 mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
376 spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags);
377 memcpy(((struct ib_smp *) mad)->data, dev->node_desc,
378 IB_DEVICE_NODE_DESC_MAX);
379 spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags);
380 }
381}
382
383static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, const struct ib_mad *mad)
384{
385 int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;
386 struct ib_mad_send_buf *send_buf;
387 struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
388 int ret;
389 unsigned long flags;
390
391 if (agent) {
392 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
393 IB_MGMT_MAD_DATA, GFP_ATOMIC,
394 IB_MGMT_BASE_VERSION);
395 if (IS_ERR(send_buf))
396 return;
397
398
399
400
401
402
403 spin_lock_irqsave(&dev->sm_lock, flags);
404 memcpy(send_buf->mad, mad, sizeof *mad);
405 if ((send_buf->ah = dev->sm_ah[port_num - 1]))
406 ret = ib_post_send_mad(send_buf, NULL);
407 else
408 ret = -EINVAL;
409 spin_unlock_irqrestore(&dev->sm_lock, flags);
410
411 if (ret)
412 ib_free_send_mad(send_buf);
413 }
414}
415
416static int mlx4_ib_demux_sa_handler(struct ib_device *ibdev, int port, int slave,
417 struct ib_sa_mad *sa_mad)
418{
419 int ret = 0;
420
421
422 switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) {
423 case IB_SA_ATTR_MC_MEMBER_REC:
424 ret = mlx4_ib_mcg_demux_handler(ibdev, port, slave, sa_mad);
425 break;
426 default:
427 break;
428 }
429 return ret;
430}
431
432int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid)
433{
434 struct mlx4_ib_dev *dev = to_mdev(ibdev);
435 int i;
436
437 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
438 if (dev->sriov.demux[port - 1].guid_cache[i] == guid)
439 return i;
440 }
441 return -1;
442}
443
444
445static int find_slave_port_pkey_ix(struct mlx4_ib_dev *dev, int slave,
446 u8 port, u16 pkey, u16 *ix)
447{
448 int i, ret;
449 u8 unassigned_pkey_ix, pkey_ix, partial_ix = 0xFF;
450 u16 slot_pkey;
451
452 if (slave == mlx4_master_func_num(dev->dev))
453 return ib_find_cached_pkey(&dev->ib_dev, port, pkey, ix);
454
455 unassigned_pkey_ix = dev->dev->phys_caps.pkey_phys_table_len[port] - 1;
456
457 for (i = 0; i < dev->dev->caps.pkey_table_len[port]; i++) {
458 if (dev->pkeys.virt2phys_pkey[slave][port - 1][i] == unassigned_pkey_ix)
459 continue;
460
461 pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][i];
462
463 ret = ib_get_cached_pkey(&dev->ib_dev, port, pkey_ix, &slot_pkey);
464 if (ret)
465 continue;
466 if ((slot_pkey & 0x7FFF) == (pkey & 0x7FFF)) {
467 if (slot_pkey & 0x8000) {
468 *ix = (u16) pkey_ix;
469 return 0;
470 } else {
471
472 if (partial_ix == 0xFF)
473 partial_ix = pkey_ix;
474 }
475 }
476 }
477
478 if (partial_ix < 0xFF) {
479 *ix = (u16) partial_ix;
480 return 0;
481 }
482
483 return -EINVAL;
484}
485
486static int get_gids_from_l3_hdr(struct ib_grh *grh, union ib_gid *sgid,
487 union ib_gid *dgid)
488{
489 int version = ib_get_rdma_header_version((const union rdma_network_hdr *)grh);
490 enum rdma_network_type net_type;
491
492 if (version == 4)
493 net_type = RDMA_NETWORK_IPV4;
494 else if (version == 6)
495 net_type = RDMA_NETWORK_IPV6;
496 else
497 return -EINVAL;
498
499 return ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
500 sgid, dgid);
501}
502
503static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave)
504{
505 int proxy_start = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave;
506
507 return (qpn >= proxy_start && qpn <= proxy_start + 1);
508}
509
510int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
511 enum ib_qp_type dest_qpt, struct ib_wc *wc,
512 struct ib_grh *grh, struct ib_mad *mad)
513{
514 struct ib_sge list;
515 struct ib_ud_wr wr;
516 const struct ib_send_wr *bad_wr;
517 struct mlx4_ib_demux_pv_ctx *tun_ctx;
518 struct mlx4_ib_demux_pv_qp *tun_qp;
519 struct mlx4_rcv_tunnel_mad *tun_mad;
520 struct rdma_ah_attr attr;
521 struct ib_ah *ah;
522 struct ib_qp *src_qp = NULL;
523 unsigned tun_tx_ix = 0;
524 int dqpn;
525 int ret = 0;
526 u16 tun_pkey_ix;
527 u16 cached_pkey;
528 u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
529
530 if (dest_qpt > IB_QPT_GSI) {
531 pr_debug("dest_qpt (%d) > IB_QPT_GSI\n", dest_qpt);
532 return -EINVAL;
533 }
534
535 tun_ctx = dev->sriov.demux[port-1].tun[slave];
536
537
538 if (!tun_ctx || tun_ctx->state != DEMUX_PV_STATE_ACTIVE)
539 return -EAGAIN;
540
541 if (!dest_qpt)
542 tun_qp = &tun_ctx->qp[0];
543 else
544 tun_qp = &tun_ctx->qp[1];
545
546
547 if (dest_qpt) {
548 u16 pkey_ix;
549 ret = ib_get_cached_pkey(&dev->ib_dev, port, wc->pkey_index, &cached_pkey);
550 if (ret) {
551 pr_debug("unable to get %s cached pkey for index %d, ret %d\n",
552 is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI",
553 wc->pkey_index, ret);
554 return -EINVAL;
555 }
556
557 ret = find_slave_port_pkey_ix(dev, slave, port, cached_pkey, &pkey_ix);
558 if (ret) {
559 pr_debug("unable to get %s pkey ix for pkey 0x%x, ret %d\n",
560 is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI",
561 cached_pkey, ret);
562 return -EINVAL;
563 }
564 tun_pkey_ix = pkey_ix;
565 } else
566 tun_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
567
568 dqpn = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave + port + (dest_qpt * 2) - 1;
569
570
571 src_qp = tun_qp->qp;
572
573
574
575 memset(&attr, 0, sizeof attr);
576 attr.type = rdma_ah_find_type(&dev->ib_dev, port);
577
578 rdma_ah_set_port_num(&attr, port);
579 if (is_eth) {
580 union ib_gid sgid;
581 union ib_gid dgid;
582
583 if (get_gids_from_l3_hdr(grh, &sgid, &dgid))
584 return -EINVAL;
585 rdma_ah_set_grh(&attr, &dgid, 0, 0, 0, 0);
586 }
587 ah = rdma_create_ah(tun_ctx->pd, &attr, 0);
588 if (IS_ERR(ah))
589 return -ENOMEM;
590
591
592 spin_lock(&tun_qp->tx_lock);
593 if (tun_qp->tx_ix_head - tun_qp->tx_ix_tail >=
594 (MLX4_NUM_TUNNEL_BUFS - 1))
595 ret = -EAGAIN;
596 else
597 tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
598 spin_unlock(&tun_qp->tx_lock);
599 if (ret)
600 goto end;
601
602 tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
603 if (tun_qp->tx_ring[tun_tx_ix].ah)
604 rdma_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah, 0);
605 tun_qp->tx_ring[tun_tx_ix].ah = ah;
606 ib_dma_sync_single_for_cpu(&dev->ib_dev,
607 tun_qp->tx_ring[tun_tx_ix].buf.map,
608 sizeof (struct mlx4_rcv_tunnel_mad),
609 DMA_TO_DEVICE);
610
611
612 if (grh)
613 memcpy(&tun_mad->grh, grh, sizeof *grh);
614 memcpy(&tun_mad->mad, mad, sizeof *mad);
615
616
617 tun_mad->hdr.pkey_index = cpu_to_be16(tun_pkey_ix);
618 tun_mad->hdr.flags_src_qp = cpu_to_be32(wc->src_qp & 0xFFFFFF);
619 tun_mad->hdr.g_ml_path = (grh && (wc->wc_flags & IB_WC_GRH)) ? 0x80 : 0;
620
621 if (is_eth) {
622 u16 vlan = 0;
623 if (mlx4_get_slave_default_vlan(dev->dev, port, slave, &vlan,
624 NULL)) {
625
626 if (vlan != wc->vlan_id)
627
628
629
630 goto out;
631 else
632
633
634
635 vlan = 0xffff;
636 } else {
637 vlan = wc->vlan_id;
638 }
639
640 tun_mad->hdr.sl_vid = cpu_to_be16(vlan);
641 memcpy((char *)&tun_mad->hdr.mac_31_0, &(wc->smac[0]), 4);
642 memcpy((char *)&tun_mad->hdr.slid_mac_47_32, &(wc->smac[4]), 2);
643 } else {
644 tun_mad->hdr.sl_vid = cpu_to_be16(((u16)(wc->sl)) << 12);
645 tun_mad->hdr.slid_mac_47_32 = ib_lid_be16(wc->slid);
646 }
647
648 ib_dma_sync_single_for_device(&dev->ib_dev,
649 tun_qp->tx_ring[tun_tx_ix].buf.map,
650 sizeof (struct mlx4_rcv_tunnel_mad),
651 DMA_TO_DEVICE);
652
653 list.addr = tun_qp->tx_ring[tun_tx_ix].buf.map;
654 list.length = sizeof (struct mlx4_rcv_tunnel_mad);
655 list.lkey = tun_ctx->pd->local_dma_lkey;
656
657 wr.ah = ah;
658 wr.port_num = port;
659 wr.remote_qkey = IB_QP_SET_QKEY;
660 wr.remote_qpn = dqpn;
661 wr.wr.next = NULL;
662 wr.wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt);
663 wr.wr.sg_list = &list;
664 wr.wr.num_sge = 1;
665 wr.wr.opcode = IB_WR_SEND;
666 wr.wr.send_flags = IB_SEND_SIGNALED;
667
668 ret = ib_post_send(src_qp, &wr.wr, &bad_wr);
669 if (!ret)
670 return 0;
671 out:
672 spin_lock(&tun_qp->tx_lock);
673 tun_qp->tx_ix_tail++;
674 spin_unlock(&tun_qp->tx_lock);
675 tun_qp->tx_ring[tun_tx_ix].ah = NULL;
676end:
677 rdma_destroy_ah(ah, 0);
678 return ret;
679}
680
681static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
682 struct ib_wc *wc, struct ib_grh *grh,
683 struct ib_mad *mad)
684{
685 struct mlx4_ib_dev *dev = to_mdev(ibdev);
686 int err, other_port;
687 int slave = -1;
688 u8 *slave_id;
689 int is_eth = 0;
690
691 if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
692 is_eth = 0;
693 else
694 is_eth = 1;
695
696 if (is_eth) {
697 union ib_gid dgid;
698 union ib_gid sgid;
699
700 if (get_gids_from_l3_hdr(grh, &sgid, &dgid))
701 return -EINVAL;
702 if (!(wc->wc_flags & IB_WC_GRH)) {
703 mlx4_ib_warn(ibdev, "RoCE grh not present.\n");
704 return -EINVAL;
705 }
706 if (mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_CM) {
707 mlx4_ib_warn(ibdev, "RoCE mgmt class is not CM\n");
708 return -EINVAL;
709 }
710 err = mlx4_get_slave_from_roce_gid(dev->dev, port, dgid.raw, &slave);
711 if (err && mlx4_is_mf_bonded(dev->dev)) {
712 other_port = (port == 1) ? 2 : 1;
713 err = mlx4_get_slave_from_roce_gid(dev->dev, other_port, dgid.raw, &slave);
714 if (!err) {
715 port = other_port;
716 pr_debug("resolved slave %d from gid %pI6 wire port %d other %d\n",
717 slave, grh->dgid.raw, port, other_port);
718 }
719 }
720 if (err) {
721 mlx4_ib_warn(ibdev, "failed matching grh\n");
722 return -ENOENT;
723 }
724 if (slave >= dev->dev->caps.sqp_demux) {
725 mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n",
726 slave, dev->dev->caps.sqp_demux);
727 return -ENOENT;
728 }
729
730 if (mlx4_ib_demux_cm_handler(ibdev, port, NULL, mad))
731 return 0;
732
733 err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
734 if (err)
735 pr_debug("failed sending %s to slave %d via tunnel qp (%d)\n",
736 is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI",
737 slave, err);
738 return 0;
739 }
740
741
742 slave = mlx4_master_func_num(dev->dev);
743
744
745 if (mad->mad_hdr.method & 0x80) {
746 slave_id = (u8 *) &mad->mad_hdr.tid;
747 slave = *slave_id;
748 if (slave != 255)
749 *slave_id = 0;
750 }
751
752
753 if (wc->wc_flags & IB_WC_GRH) {
754 if (grh->dgid.global.interface_id ==
755 cpu_to_be64(IB_SA_WELL_KNOWN_GUID) &&
756 grh->dgid.global.subnet_prefix == cpu_to_be64(
757 atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix))) {
758 slave = 0;
759 } else {
760 slave = mlx4_ib_find_real_gid(ibdev, port,
761 grh->dgid.global.interface_id);
762 if (slave < 0) {
763 mlx4_ib_warn(ibdev, "failed matching grh\n");
764 return -ENOENT;
765 }
766 }
767 }
768
769 switch (mad->mad_hdr.mgmt_class) {
770 case IB_MGMT_CLASS_SUBN_LID_ROUTED:
771 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
772
773 if (slave != 255 && slave != mlx4_master_func_num(dev->dev)) {
774 if (!mlx4_vf_smi_enabled(dev->dev, slave, port))
775 return -EPERM;
776
777 if (!(mad->mad_hdr.method & IB_MGMT_METHOD_RESP)) {
778 mlx4_ib_warn(ibdev, "demux QP0. rejecting unsolicited mad for slave %d class 0x%x, method 0x%x\n",
779 slave, mad->mad_hdr.mgmt_class,
780 mad->mad_hdr.method);
781 return -EINVAL;
782 }
783 }
784 break;
785 case IB_MGMT_CLASS_SUBN_ADM:
786 if (mlx4_ib_demux_sa_handler(ibdev, port, slave,
787 (struct ib_sa_mad *) mad))
788 return 0;
789 break;
790 case IB_MGMT_CLASS_CM:
791 if (mlx4_ib_demux_cm_handler(ibdev, port, &slave, mad))
792 return 0;
793 break;
794 case IB_MGMT_CLASS_DEVICE_MGMT:
795 if (mad->mad_hdr.method != IB_MGMT_METHOD_GET_RESP)
796 return 0;
797 break;
798 default:
799
800 if (slave != mlx4_master_func_num(dev->dev)) {
801 pr_debug("dropping unsupported ingress mad from class:%d "
802 "for slave:%d\n", mad->mad_hdr.mgmt_class, slave);
803 return 0;
804 }
805 }
806
807 if (slave >= dev->dev->caps.sqp_demux) {
808 mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n",
809 slave, dev->dev->caps.sqp_demux);
810 return -ENOENT;
811 }
812
813 err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
814 if (err)
815 pr_debug("failed sending %s to slave %d via tunnel qp (%d)\n",
816 is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI",
817 slave, err);
818 return 0;
819}
820
821static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
822 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
823 const struct ib_mad *in_mad, struct ib_mad *out_mad)
824{
825 u16 slid, prev_lid = 0;
826 int err;
827 struct ib_port_attr pattr;
828
829 slid = in_wc ? ib_lid_cpu16(in_wc->slid) : be16_to_cpu(IB_LID_PERMISSIVE);
830
831 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) {
832 forward_trap(to_mdev(ibdev), port_num, in_mad);
833 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
834 }
835
836 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
837 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
838 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
839 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET &&
840 in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
841 return IB_MAD_RESULT_SUCCESS;
842
843
844
845
846 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
847 return IB_MAD_RESULT_SUCCESS;
848 } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
849 in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1 ||
850 in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS2 ||
851 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) {
852 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
853 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET)
854 return IB_MAD_RESULT_SUCCESS;
855 } else
856 return IB_MAD_RESULT_SUCCESS;
857
858 if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
859 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
860 in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
861 in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
862 !ib_query_port(ibdev, port_num, &pattr))
863 prev_lid = ib_lid_cpu16(pattr.lid);
864
865 err = mlx4_MAD_IFC(to_mdev(ibdev),
866 (mad_flags & IB_MAD_IGNORE_MKEY ? MLX4_MAD_IFC_IGNORE_MKEY : 0) |
867 (mad_flags & IB_MAD_IGNORE_BKEY ? MLX4_MAD_IFC_IGNORE_BKEY : 0) |
868 MLX4_MAD_IFC_NET_VIEW,
869 port_num, in_wc, in_grh, in_mad, out_mad);
870 if (err)
871 return IB_MAD_RESULT_FAILURE;
872
873 if (!out_mad->mad_hdr.status) {
874 smp_snoop(ibdev, port_num, in_mad, prev_lid);
875
876 if (!mlx4_is_slave(to_mdev(ibdev)->dev))
877 node_desc_override(ibdev, out_mad);
878 }
879
880
881 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
882 out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
883
884 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
885
886 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
887
888 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
889}
890
891static void edit_counter(struct mlx4_counter *cnt, void *counters,
892 __be16 attr_id)
893{
894 switch (attr_id) {
895 case IB_PMA_PORT_COUNTERS:
896 {
897 struct ib_pma_portcounters *pma_cnt =
898 (struct ib_pma_portcounters *)counters;
899
900 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
901 (be64_to_cpu(cnt->tx_bytes) >> 2));
902 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
903 (be64_to_cpu(cnt->rx_bytes) >> 2));
904 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
905 be64_to_cpu(cnt->tx_frames));
906 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
907 be64_to_cpu(cnt->rx_frames));
908 break;
909 }
910 case IB_PMA_PORT_COUNTERS_EXT:
911 {
912 struct ib_pma_portcounters_ext *pma_cnt_ext =
913 (struct ib_pma_portcounters_ext *)counters;
914
915 pma_cnt_ext->port_xmit_data =
916 cpu_to_be64(be64_to_cpu(cnt->tx_bytes) >> 2);
917 pma_cnt_ext->port_rcv_data =
918 cpu_to_be64(be64_to_cpu(cnt->rx_bytes) >> 2);
919 pma_cnt_ext->port_xmit_packets = cnt->tx_frames;
920 pma_cnt_ext->port_rcv_packets = cnt->rx_frames;
921 break;
922 }
923 }
924}
925
926static int iboe_process_mad_port_info(void *out_mad)
927{
928 struct ib_class_port_info cpi = {};
929
930 cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
931 memcpy(out_mad, &cpi, sizeof(cpi));
932 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
933}
934
935static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
936 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
937 const struct ib_mad *in_mad, struct ib_mad *out_mad)
938{
939 struct mlx4_counter counter_stats;
940 struct mlx4_ib_dev *dev = to_mdev(ibdev);
941 struct counter_index *tmp_counter;
942 int err = IB_MAD_RESULT_FAILURE, stats_avail = 0;
943
944 if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
945 return -EINVAL;
946
947 if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)
948 return iboe_process_mad_port_info((void *)(out_mad->data + 40));
949
950 memset(&counter_stats, 0, sizeof(counter_stats));
951 mutex_lock(&dev->counters_table[port_num - 1].mutex);
952 list_for_each_entry(tmp_counter,
953 &dev->counters_table[port_num - 1].counters_list,
954 list) {
955 err = mlx4_get_counter_stats(dev->dev,
956 tmp_counter->index,
957 &counter_stats, 0);
958 if (err) {
959 err = IB_MAD_RESULT_FAILURE;
960 stats_avail = 0;
961 break;
962 }
963 stats_avail = 1;
964 }
965 mutex_unlock(&dev->counters_table[port_num - 1].mutex);
966 if (stats_avail) {
967 switch (counter_stats.counter_mode & 0xf) {
968 case 0:
969 edit_counter(&counter_stats,
970 (void *)(out_mad->data + 40),
971 in_mad->mad_hdr.attr_id);
972 err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
973 break;
974 default:
975 err = IB_MAD_RESULT_FAILURE;
976 }
977 }
978
979 return err;
980}
981
982int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
983 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
984 const struct ib_mad *in, struct ib_mad *out,
985 size_t *out_mad_size, u16 *out_mad_pkey_index)
986{
987 struct mlx4_ib_dev *dev = to_mdev(ibdev);
988 enum rdma_link_layer link = rdma_port_get_link_layer(ibdev, port_num);
989
990
991
992
993 if (link == IB_LINK_LAYER_INFINIBAND) {
994 if (mlx4_is_slave(dev->dev) &&
995 (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
996 (in->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS ||
997 in->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT ||
998 in->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)))
999 return iboe_process_mad(ibdev, mad_flags, port_num,
1000 in_wc, in_grh, in, out);
1001
1002 return ib_process_mad(ibdev, mad_flags, port_num, in_wc, in_grh,
1003 in, out);
1004 }
1005
1006 if (link == IB_LINK_LAYER_ETHERNET)
1007 return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
1008 in_grh, in, out);
1009
1010 return -EINVAL;
1011}
1012
1013static void send_handler(struct ib_mad_agent *agent,
1014 struct ib_mad_send_wc *mad_send_wc)
1015{
1016 if (mad_send_wc->send_buf->context[0])
1017 rdma_destroy_ah(mad_send_wc->send_buf->context[0], 0);
1018 ib_free_send_mad(mad_send_wc->send_buf);
1019}
1020
1021int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
1022{
1023 struct ib_mad_agent *agent;
1024 int p, q;
1025 int ret;
1026 enum rdma_link_layer ll;
1027
1028 for (p = 0; p < dev->num_ports; ++p) {
1029 ll = rdma_port_get_link_layer(&dev->ib_dev, p + 1);
1030 for (q = 0; q <= 1; ++q) {
1031 if (ll == IB_LINK_LAYER_INFINIBAND) {
1032 agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
1033 q ? IB_QPT_GSI : IB_QPT_SMI,
1034 NULL, 0, send_handler,
1035 NULL, NULL, 0);
1036 if (IS_ERR(agent)) {
1037 ret = PTR_ERR(agent);
1038 goto err;
1039 }
1040 dev->send_agent[p][q] = agent;
1041 } else
1042 dev->send_agent[p][q] = NULL;
1043 }
1044 }
1045
1046 return 0;
1047
1048err:
1049 for (p = 0; p < dev->num_ports; ++p)
1050 for (q = 0; q <= 1; ++q)
1051 if (dev->send_agent[p][q])
1052 ib_unregister_mad_agent(dev->send_agent[p][q]);
1053
1054 return ret;
1055}
1056
1057void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
1058{
1059 struct ib_mad_agent *agent;
1060 int p, q;
1061
1062 for (p = 0; p < dev->num_ports; ++p) {
1063 for (q = 0; q <= 1; ++q) {
1064 agent = dev->send_agent[p][q];
1065 if (agent) {
1066 dev->send_agent[p][q] = NULL;
1067 ib_unregister_mad_agent(agent);
1068 }
1069 }
1070
1071 if (dev->sm_ah[p])
1072 rdma_destroy_ah(dev->sm_ah[p], 0);
1073 }
1074}
1075
1076static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num)
1077{
1078 mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_LID_CHANGE);
1079
1080 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
1081 mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
1082 MLX4_EQ_PORT_INFO_LID_CHANGE_MASK);
1083}
1084
1085static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num)
1086{
1087
1088 if (mlx4_is_master(dev->dev)) {
1089 mlx4_ib_invalidate_all_guid_record(dev, port_num);
1090
1091 if (!dev->sriov.is_going_down) {
1092 mlx4_ib_mcg_port_cleanup(&dev->sriov.demux[port_num - 1], 0);
1093 mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
1094 MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK);
1095 }
1096 }
1097
1098
1099
1100
1101
1102 if (!mlx4_is_slave(dev->dev) &&
1103 dev->dev->flags & MLX4_FLAG_SECURE_HOST &&
1104 !(dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT)) {
1105 if (mlx4_is_master(dev->dev))
1106
1107
1108
1109
1110 mlx4_ib_sl2vl_update(dev, port_num);
1111 else
1112 mlx4_sched_ib_sl2vl_update_work(dev, port_num);
1113 }
1114 mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_CLIENT_REREGISTER);
1115}
1116
1117static void propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
1118 struct mlx4_eqe *eqe)
1119{
1120 __propagate_pkey_ev(dev, port_num, GET_BLK_PTR_FROM_EQE(eqe),
1121 GET_MASK_FROM_EQE(eqe));
1122}
1123
1124static void handle_slaves_guid_change(struct mlx4_ib_dev *dev, u8 port_num,
1125 u32 guid_tbl_blk_num, u32 change_bitmap)
1126{
1127 struct ib_smp *in_mad = NULL;
1128 struct ib_smp *out_mad = NULL;
1129 u16 i;
1130
1131 if (!mlx4_is_mfunc(dev->dev) || !mlx4_is_master(dev->dev))
1132 return;
1133
1134 in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
1135 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1136 if (!in_mad || !out_mad)
1137 goto out;
1138
1139 guid_tbl_blk_num *= 4;
1140
1141 for (i = 0; i < 4; i++) {
1142 if (change_bitmap && (!((change_bitmap >> (8 * i)) & 0xff)))
1143 continue;
1144 memset(in_mad, 0, sizeof *in_mad);
1145 memset(out_mad, 0, sizeof *out_mad);
1146
1147 in_mad->base_version = 1;
1148 in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
1149 in_mad->class_version = 1;
1150 in_mad->method = IB_MGMT_METHOD_GET;
1151 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
1152 in_mad->attr_mod = cpu_to_be32(guid_tbl_blk_num + i);
1153
1154 if (mlx4_MAD_IFC(dev,
1155 MLX4_MAD_IFC_IGNORE_KEYS | MLX4_MAD_IFC_NET_VIEW,
1156 port_num, NULL, NULL, in_mad, out_mad)) {
1157 mlx4_ib_warn(&dev->ib_dev, "Failed in get GUID INFO MAD_IFC\n");
1158 goto out;
1159 }
1160
1161 mlx4_ib_update_cache_on_guid_change(dev, guid_tbl_blk_num + i,
1162 port_num,
1163 (u8 *)(&((struct ib_smp *)out_mad)->data));
1164 mlx4_ib_notify_slaves_on_guid_change(dev, guid_tbl_blk_num + i,
1165 port_num,
1166 (u8 *)(&((struct ib_smp *)out_mad)->data));
1167 }
1168
1169out:
1170 kfree(in_mad);
1171 kfree(out_mad);
1172 return;
1173}
1174
1175void handle_port_mgmt_change_event(struct work_struct *work)
1176{
1177 struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
1178 struct mlx4_ib_dev *dev = ew->ib_dev;
1179 struct mlx4_eqe *eqe = &(ew->ib_eqe);
1180 u8 port = eqe->event.port_mgmt_change.port;
1181 u32 changed_attr;
1182 u32 tbl_block;
1183 u32 change_bitmap;
1184
1185 switch (eqe->subtype) {
1186 case MLX4_DEV_PMC_SUBTYPE_PORT_INFO:
1187 changed_attr = be32_to_cpu(eqe->event.port_mgmt_change.params.port_info.changed_attr);
1188
1189
1190
1191 if (changed_attr & MSTR_SM_CHANGE_MASK) {
1192 u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid);
1193 u8 sl = eqe->event.port_mgmt_change.params.port_info.mstr_sm_sl & 0xf;
1194 update_sm_ah(dev, port, lid, sl);
1195 }
1196
1197
1198 if (changed_attr & MLX4_EQ_PORT_INFO_LID_CHANGE_MASK)
1199 handle_lid_change_event(dev, port);
1200
1201
1202 if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) {
1203 if (mlx4_is_master(dev->dev)) {
1204 union ib_gid gid;
1205 int err = 0;
1206
1207 if (!eqe->event.port_mgmt_change.params.port_info.gid_prefix)
1208 err = __mlx4_ib_query_gid(&dev->ib_dev, port, 0, &gid, 1);
1209 else
1210 gid.global.subnet_prefix =
1211 eqe->event.port_mgmt_change.params.port_info.gid_prefix;
1212 if (err) {
1213 pr_warn("Could not change QP1 subnet prefix for port %d: query_gid error (%d)\n",
1214 port, err);
1215 } else {
1216 pr_debug("Changing QP1 subnet prefix for port %d. old=0x%llx. new=0x%llx\n",
1217 port,
1218 (u64)atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix),
1219 be64_to_cpu(gid.global.subnet_prefix));
1220 atomic64_set(&dev->sriov.demux[port - 1].subnet_prefix,
1221 be64_to_cpu(gid.global.subnet_prefix));
1222 }
1223 }
1224 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
1225
1226 if (mlx4_is_master(dev->dev))
1227 mlx4_gen_slaves_port_mgt_ev(dev->dev, port,
1228 MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK);
1229 }
1230
1231 if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK)
1232 handle_client_rereg_event(dev, port);
1233 break;
1234
1235 case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE:
1236 mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE);
1237 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
1238 propagate_pkey_ev(dev, port, eqe);
1239 break;
1240 case MLX4_DEV_PMC_SUBTYPE_GUID_INFO:
1241
1242 if (!mlx4_is_master(dev->dev))
1243 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
1244
1245 else if (!dev->sriov.is_going_down) {
1246 tbl_block = GET_BLK_PTR_FROM_EQE(eqe);
1247 change_bitmap = GET_MASK_FROM_EQE(eqe);
1248 handle_slaves_guid_change(dev, port, tbl_block, change_bitmap);
1249 }
1250 break;
1251
1252 case MLX4_DEV_PMC_SUBTYPE_SL_TO_VL_MAP:
1253
1254
1255
1256 if (!mlx4_is_slave(dev->dev)) {
1257 union sl2vl_tbl_to_u64 sl2vl64;
1258 int jj;
1259
1260 for (jj = 0; jj < 8; jj++) {
1261 sl2vl64.sl8[jj] =
1262 eqe->event.port_mgmt_change.params.sl2vl_tbl_change_info.sl2vl_table[jj];
1263 pr_debug("port %u, sl2vl[%d] = %02x\n",
1264 port, jj, sl2vl64.sl8[jj]);
1265 }
1266 atomic64_set(&dev->sl2vl[port - 1], sl2vl64.sl64);
1267 }
1268 break;
1269 default:
1270 pr_warn("Unsupported subtype 0x%x for "
1271 "Port Management Change event\n", eqe->subtype);
1272 }
1273
1274 kfree(ew);
1275}
1276
1277void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
1278 enum ib_event_type type)
1279{
1280 struct ib_event event;
1281
1282 event.device = &dev->ib_dev;
1283 event.element.port_num = port_num;
1284 event.event = type;
1285
1286 ib_dispatch_event(&event);
1287}
1288
1289static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg)
1290{
1291 unsigned long flags;
1292 struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context;
1293 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
1294 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
1295 if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE)
1296 queue_work(ctx->wq, &ctx->work);
1297 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
1298}
1299
1300static void mlx4_ib_wire_comp_handler(struct ib_cq *cq, void *arg)
1301{
1302 unsigned long flags;
1303 struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context;
1304 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
1305
1306 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
1307 if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE)
1308 queue_work(ctx->wi_wq, &ctx->work);
1309 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
1310}
1311
1312static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx,
1313 struct mlx4_ib_demux_pv_qp *tun_qp,
1314 int index)
1315{
1316 struct ib_sge sg_list;
1317 struct ib_recv_wr recv_wr;
1318 const struct ib_recv_wr *bad_recv_wr;
1319 int size;
1320
1321 size = (tun_qp->qp->qp_type == IB_QPT_UD) ?
1322 sizeof (struct mlx4_tunnel_mad) : sizeof (struct mlx4_mad_rcv_buf);
1323
1324 sg_list.addr = tun_qp->ring[index].map;
1325 sg_list.length = size;
1326 sg_list.lkey = ctx->pd->local_dma_lkey;
1327
1328 recv_wr.next = NULL;
1329 recv_wr.sg_list = &sg_list;
1330 recv_wr.num_sge = 1;
1331 recv_wr.wr_id = (u64) index | MLX4_TUN_WRID_RECV |
1332 MLX4_TUN_SET_WRID_QPN(tun_qp->proxy_qpt);
1333 ib_dma_sync_single_for_device(ctx->ib_dev, tun_qp->ring[index].map,
1334 size, DMA_FROM_DEVICE);
1335 return ib_post_recv(tun_qp->qp, &recv_wr, &bad_recv_wr);
1336}
1337
1338static int mlx4_ib_multiplex_sa_handler(struct ib_device *ibdev, int port,
1339 int slave, struct ib_sa_mad *sa_mad)
1340{
1341 int ret = 0;
1342
1343
1344 switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) {
1345 case IB_SA_ATTR_MC_MEMBER_REC:
1346 ret = mlx4_ib_mcg_multiplex_handler(ibdev, port, slave, sa_mad);
1347 break;
1348 default:
1349 break;
1350 }
1351 return ret;
1352}
1353
1354int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
1355 enum ib_qp_type dest_qpt, u16 pkey_index,
1356 u32 remote_qpn, u32 qkey, struct rdma_ah_attr *attr,
1357 u8 *s_mac, u16 vlan_id, struct ib_mad *mad)
1358{
1359 struct ib_sge list;
1360 struct ib_ud_wr wr;
1361 const struct ib_send_wr *bad_wr;
1362 struct mlx4_ib_demux_pv_ctx *sqp_ctx;
1363 struct mlx4_ib_demux_pv_qp *sqp;
1364 struct mlx4_mad_snd_buf *sqp_mad;
1365 struct ib_ah *ah;
1366 struct ib_qp *send_qp = NULL;
1367 unsigned wire_tx_ix = 0;
1368 u16 wire_pkey_ix;
1369 int src_qpnum;
1370 int ret;
1371
1372 sqp_ctx = dev->sriov.sqps[port-1];
1373
1374
1375 if (!sqp_ctx || sqp_ctx->state != DEMUX_PV_STATE_ACTIVE)
1376 return -EAGAIN;
1377
1378 if (dest_qpt == IB_QPT_SMI) {
1379 src_qpnum = 0;
1380 sqp = &sqp_ctx->qp[0];
1381 wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
1382 } else {
1383 src_qpnum = 1;
1384 sqp = &sqp_ctx->qp[1];
1385 wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][pkey_index];
1386 }
1387
1388 send_qp = sqp->qp;
1389
1390 ah = rdma_zalloc_drv_obj(sqp_ctx->pd->device, ib_ah);
1391 if (!ah)
1392 return -ENOMEM;
1393
1394 ah->device = sqp_ctx->pd->device;
1395 ah->pd = sqp_ctx->pd;
1396
1397
1398 ret = mlx4_ib_create_ah_slave(ah, attr,
1399 rdma_ah_retrieve_grh(attr)->sgid_index,
1400 s_mac, vlan_id);
1401 if (ret)
1402 goto out;
1403
1404 spin_lock(&sqp->tx_lock);
1405 if (sqp->tx_ix_head - sqp->tx_ix_tail >=
1406 (MLX4_NUM_WIRE_BUFS - 1))
1407 ret = -EAGAIN;
1408 else
1409 wire_tx_ix = (++sqp->tx_ix_head) & (MLX4_NUM_WIRE_BUFS - 1);
1410 spin_unlock(&sqp->tx_lock);
1411 if (ret)
1412 goto out;
1413
1414 sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr);
1415 kfree(sqp->tx_ring[wire_tx_ix].ah);
1416 sqp->tx_ring[wire_tx_ix].ah = ah;
1417 ib_dma_sync_single_for_cpu(&dev->ib_dev,
1418 sqp->tx_ring[wire_tx_ix].buf.map,
1419 sizeof (struct mlx4_mad_snd_buf),
1420 DMA_TO_DEVICE);
1421
1422 memcpy(&sqp_mad->payload, mad, sizeof *mad);
1423
1424 ib_dma_sync_single_for_device(&dev->ib_dev,
1425 sqp->tx_ring[wire_tx_ix].buf.map,
1426 sizeof (struct mlx4_mad_snd_buf),
1427 DMA_TO_DEVICE);
1428
1429 list.addr = sqp->tx_ring[wire_tx_ix].buf.map;
1430 list.length = sizeof (struct mlx4_mad_snd_buf);
1431 list.lkey = sqp_ctx->pd->local_dma_lkey;
1432
1433 wr.ah = ah;
1434 wr.port_num = port;
1435 wr.pkey_index = wire_pkey_ix;
1436 wr.remote_qkey = qkey;
1437 wr.remote_qpn = remote_qpn;
1438 wr.wr.next = NULL;
1439 wr.wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum);
1440 wr.wr.sg_list = &list;
1441 wr.wr.num_sge = 1;
1442 wr.wr.opcode = IB_WR_SEND;
1443 wr.wr.send_flags = IB_SEND_SIGNALED;
1444
1445 ret = ib_post_send(send_qp, &wr.wr, &bad_wr);
1446 if (!ret)
1447 return 0;
1448
1449 spin_lock(&sqp->tx_lock);
1450 sqp->tx_ix_tail++;
1451 spin_unlock(&sqp->tx_lock);
1452 sqp->tx_ring[wire_tx_ix].ah = NULL;
1453out:
1454 kfree(ah);
1455 return ret;
1456}
1457
1458static int get_slave_base_gid_ix(struct mlx4_ib_dev *dev, int slave, int port)
1459{
1460 if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND)
1461 return slave;
1462 return mlx4_get_base_gid_ix(dev->dev, slave, port);
1463}
1464
1465static void fill_in_real_sgid_index(struct mlx4_ib_dev *dev, int slave, int port,
1466 struct rdma_ah_attr *ah_attr)
1467{
1468 struct ib_global_route *grh = rdma_ah_retrieve_grh(ah_attr);
1469 if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND)
1470 grh->sgid_index = slave;
1471 else
1472 grh->sgid_index += get_slave_base_gid_ix(dev, slave, port);
1473}
1474
1475static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc *wc)
1476{
1477 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
1478 struct mlx4_ib_demux_pv_qp *tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc->wr_id)];
1479 int wr_ix = wc->wr_id & (MLX4_NUM_TUNNEL_BUFS - 1);
1480 struct mlx4_tunnel_mad *tunnel = tun_qp->ring[wr_ix].addr;
1481 struct mlx4_ib_ah ah;
1482 struct rdma_ah_attr ah_attr;
1483 u8 *slave_id;
1484 int slave;
1485 int port;
1486 u16 vlan_id;
1487 u8 qos;
1488 u8 *dmac;
1489 int sts;
1490
1491
1492 if (wc->src_qp < dev->dev->phys_caps.base_proxy_sqpn ||
1493 wc->src_qp >= dev->dev->phys_caps.base_proxy_sqpn + 8 * MLX4_MFUNC_MAX ||
1494 (wc->src_qp & 0x1) != ctx->port - 1 ||
1495 wc->src_qp & 0x4) {
1496 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d\n", wc->src_qp);
1497 return;
1498 }
1499 slave = ((wc->src_qp & ~0x7) - dev->dev->phys_caps.base_proxy_sqpn) / 8;
1500 if (slave != ctx->slave) {
1501 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: "
1502 "belongs to another slave\n", wc->src_qp);
1503 return;
1504 }
1505
1506
1507 ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map,
1508 sizeof (struct mlx4_tunnel_mad),
1509 DMA_FROM_DEVICE);
1510 switch (tunnel->mad.mad_hdr.method) {
1511 case IB_MGMT_METHOD_SET:
1512 case IB_MGMT_METHOD_GET:
1513 case IB_MGMT_METHOD_REPORT:
1514 case IB_SA_METHOD_GET_TABLE:
1515 case IB_SA_METHOD_DELETE:
1516 case IB_SA_METHOD_GET_MULTI:
1517 case IB_SA_METHOD_GET_TRACE_TBL:
1518 slave_id = (u8 *) &tunnel->mad.mad_hdr.tid;
1519 if (*slave_id) {
1520 mlx4_ib_warn(ctx->ib_dev, "egress mad has non-null tid msb:%d "
1521 "class:%d slave:%d\n", *slave_id,
1522 tunnel->mad.mad_hdr.mgmt_class, slave);
1523 return;
1524 } else
1525 *slave_id = slave;
1526 break;
1527 default:
1528 ;
1529 }
1530
1531
1532 switch (tunnel->mad.mad_hdr.mgmt_class) {
1533 case IB_MGMT_CLASS_SUBN_LID_ROUTED:
1534 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
1535 if (slave != mlx4_master_func_num(dev->dev) &&
1536 !mlx4_vf_smi_enabled(dev->dev, slave, ctx->port))
1537 return;
1538 break;
1539 case IB_MGMT_CLASS_SUBN_ADM:
1540 if (mlx4_ib_multiplex_sa_handler(ctx->ib_dev, ctx->port, slave,
1541 (struct ib_sa_mad *) &tunnel->mad))
1542 return;
1543 break;
1544 case IB_MGMT_CLASS_CM:
1545 if (mlx4_ib_multiplex_cm_handler(ctx->ib_dev, ctx->port, slave,
1546 (struct ib_mad *) &tunnel->mad))
1547 return;
1548 break;
1549 case IB_MGMT_CLASS_DEVICE_MGMT:
1550 if (tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_GET &&
1551 tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_SET)
1552 return;
1553 break;
1554 default:
1555
1556 if (slave != mlx4_master_func_num(dev->dev)) {
1557 mlx4_ib_warn(ctx->ib_dev, "dropping unsupported egress mad from class:%d "
1558 "for slave:%d\n", tunnel->mad.mad_hdr.mgmt_class, slave);
1559 return;
1560 }
1561 }
1562
1563
1564
1565 memcpy(&ah.av, &tunnel->hdr.av, sizeof (struct mlx4_av));
1566 ah.ibah.device = ctx->ib_dev;
1567
1568 port = be32_to_cpu(ah.av.ib.port_pd) >> 24;
1569 port = mlx4_slave_convert_port(dev->dev, slave, port);
1570 if (port < 0)
1571 return;
1572 ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff));
1573 ah.ibah.type = rdma_ah_find_type(&dev->ib_dev, port);
1574
1575 mlx4_ib_query_ah(&ah.ibah, &ah_attr);
1576 if (rdma_ah_get_ah_flags(&ah_attr) & IB_AH_GRH)
1577 fill_in_real_sgid_index(dev, slave, ctx->port, &ah_attr);
1578 dmac = rdma_ah_retrieve_dmac(&ah_attr);
1579 if (dmac)
1580 memcpy(dmac, tunnel->hdr.mac, ETH_ALEN);
1581 vlan_id = be16_to_cpu(tunnel->hdr.vlan);
1582
1583 if (mlx4_get_slave_default_vlan(dev->dev, ctx->port, slave,
1584 &vlan_id, &qos))
1585 rdma_ah_set_sl(&ah_attr, qos);
1586
1587 sts = mlx4_ib_send_to_wire(dev, slave, ctx->port,
1588 is_proxy_qp0(dev, wc->src_qp, slave) ?
1589 IB_QPT_SMI : IB_QPT_GSI,
1590 be16_to_cpu(tunnel->hdr.pkey_index),
1591 be32_to_cpu(tunnel->hdr.remote_qpn),
1592 be32_to_cpu(tunnel->hdr.qkey),
1593 &ah_attr, wc->smac, vlan_id, &tunnel->mad);
1594 if (sts)
1595 pr_debug("failed sending %s to wire on behalf of slave %d (%d)\n",
1596 is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI",
1597 slave, sts);
1598}
1599
1600static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
1601 enum ib_qp_type qp_type, int is_tun)
1602{
1603 int i;
1604 struct mlx4_ib_demux_pv_qp *tun_qp;
1605 int rx_buf_size, tx_buf_size;
1606 const int nmbr_bufs = is_tun ? MLX4_NUM_TUNNEL_BUFS : MLX4_NUM_WIRE_BUFS;
1607
1608 if (qp_type > IB_QPT_GSI)
1609 return -EINVAL;
1610
1611 tun_qp = &ctx->qp[qp_type];
1612
1613 tun_qp->ring = kcalloc(nmbr_bufs,
1614 sizeof(struct mlx4_ib_buf),
1615 GFP_KERNEL);
1616 if (!tun_qp->ring)
1617 return -ENOMEM;
1618
1619 tun_qp->tx_ring = kcalloc(nmbr_bufs,
1620 sizeof (struct mlx4_ib_tun_tx_buf),
1621 GFP_KERNEL);
1622 if (!tun_qp->tx_ring) {
1623 kfree(tun_qp->ring);
1624 tun_qp->ring = NULL;
1625 return -ENOMEM;
1626 }
1627
1628 if (is_tun) {
1629 rx_buf_size = sizeof (struct mlx4_tunnel_mad);
1630 tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
1631 } else {
1632 rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
1633 tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
1634 }
1635
1636 for (i = 0; i < nmbr_bufs; i++) {
1637 tun_qp->ring[i].addr = kmalloc(rx_buf_size, GFP_KERNEL);
1638 if (!tun_qp->ring[i].addr)
1639 goto err;
1640 tun_qp->ring[i].map = ib_dma_map_single(ctx->ib_dev,
1641 tun_qp->ring[i].addr,
1642 rx_buf_size,
1643 DMA_FROM_DEVICE);
1644 if (ib_dma_mapping_error(ctx->ib_dev, tun_qp->ring[i].map)) {
1645 kfree(tun_qp->ring[i].addr);
1646 goto err;
1647 }
1648 }
1649
1650 for (i = 0; i < nmbr_bufs; i++) {
1651 tun_qp->tx_ring[i].buf.addr =
1652 kmalloc(tx_buf_size, GFP_KERNEL);
1653 if (!tun_qp->tx_ring[i].buf.addr)
1654 goto tx_err;
1655 tun_qp->tx_ring[i].buf.map =
1656 ib_dma_map_single(ctx->ib_dev,
1657 tun_qp->tx_ring[i].buf.addr,
1658 tx_buf_size,
1659 DMA_TO_DEVICE);
1660 if (ib_dma_mapping_error(ctx->ib_dev,
1661 tun_qp->tx_ring[i].buf.map)) {
1662 kfree(tun_qp->tx_ring[i].buf.addr);
1663 goto tx_err;
1664 }
1665 tun_qp->tx_ring[i].ah = NULL;
1666 }
1667 spin_lock_init(&tun_qp->tx_lock);
1668 tun_qp->tx_ix_head = 0;
1669 tun_qp->tx_ix_tail = 0;
1670 tun_qp->proxy_qpt = qp_type;
1671
1672 return 0;
1673
1674tx_err:
1675 while (i > 0) {
1676 --i;
1677 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
1678 tx_buf_size, DMA_TO_DEVICE);
1679 kfree(tun_qp->tx_ring[i].buf.addr);
1680 }
1681 i = nmbr_bufs;
1682err:
1683 while (i > 0) {
1684 --i;
1685 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
1686 rx_buf_size, DMA_FROM_DEVICE);
1687 kfree(tun_qp->ring[i].addr);
1688 }
1689 kfree(tun_qp->tx_ring);
1690 tun_qp->tx_ring = NULL;
1691 kfree(tun_qp->ring);
1692 tun_qp->ring = NULL;
1693 return -ENOMEM;
1694}
1695
1696static void mlx4_ib_free_pv_qp_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
1697 enum ib_qp_type qp_type, int is_tun)
1698{
1699 int i;
1700 struct mlx4_ib_demux_pv_qp *tun_qp;
1701 int rx_buf_size, tx_buf_size;
1702 const int nmbr_bufs = is_tun ? MLX4_NUM_TUNNEL_BUFS : MLX4_NUM_WIRE_BUFS;
1703
1704 if (qp_type > IB_QPT_GSI)
1705 return;
1706
1707 tun_qp = &ctx->qp[qp_type];
1708 if (is_tun) {
1709 rx_buf_size = sizeof (struct mlx4_tunnel_mad);
1710 tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
1711 } else {
1712 rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
1713 tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
1714 }
1715
1716
1717 for (i = 0; i < nmbr_bufs; i++) {
1718 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
1719 rx_buf_size, DMA_FROM_DEVICE);
1720 kfree(tun_qp->ring[i].addr);
1721 }
1722
1723 for (i = 0; i < nmbr_bufs; i++) {
1724 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
1725 tx_buf_size, DMA_TO_DEVICE);
1726 kfree(tun_qp->tx_ring[i].buf.addr);
1727 if (tun_qp->tx_ring[i].ah)
1728 rdma_destroy_ah(tun_qp->tx_ring[i].ah, 0);
1729 }
1730 kfree(tun_qp->tx_ring);
1731 kfree(tun_qp->ring);
1732}
1733
1734static void mlx4_ib_tunnel_comp_worker(struct work_struct *work)
1735{
1736 struct mlx4_ib_demux_pv_ctx *ctx;
1737 struct mlx4_ib_demux_pv_qp *tun_qp;
1738 struct ib_wc wc;
1739 int ret;
1740 ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
1741 ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1742
1743 while (ib_poll_cq(ctx->cq, 1, &wc) == 1) {
1744 tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
1745 if (wc.status == IB_WC_SUCCESS) {
1746 switch (wc.opcode) {
1747 case IB_WC_RECV:
1748 mlx4_ib_multiplex_mad(ctx, &wc);
1749 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp,
1750 wc.wr_id &
1751 (MLX4_NUM_TUNNEL_BUFS - 1));
1752 if (ret)
1753 pr_err("Failed reposting tunnel "
1754 "buf:%lld\n", wc.wr_id);
1755 break;
1756 case IB_WC_SEND:
1757 rdma_destroy_ah(tun_qp->tx_ring[wc.wr_id &
1758 (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0);
1759 tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1760 = NULL;
1761 spin_lock(&tun_qp->tx_lock);
1762 tun_qp->tx_ix_tail++;
1763 spin_unlock(&tun_qp->tx_lock);
1764
1765 break;
1766 default:
1767 break;
1768 }
1769 } else {
1770 pr_debug("mlx4_ib: completion error in tunnel: %d."
1771 " status = %d, wrid = 0x%llx\n",
1772 ctx->slave, wc.status, wc.wr_id);
1773 if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
1774 rdma_destroy_ah(tun_qp->tx_ring[wc.wr_id &
1775 (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0);
1776 tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1777 = NULL;
1778 spin_lock(&tun_qp->tx_lock);
1779 tun_qp->tx_ix_tail++;
1780 spin_unlock(&tun_qp->tx_lock);
1781 }
1782 }
1783 }
1784}
1785
1786static void pv_qp_event_handler(struct ib_event *event, void *qp_context)
1787{
1788 struct mlx4_ib_demux_pv_ctx *sqp = qp_context;
1789
1790
1791 pr_err("Fatal error (%d) on a MAD QP on port %d\n",
1792 event->event, sqp->port);
1793}
1794
1795static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
1796 enum ib_qp_type qp_type, int create_tun)
1797{
1798 int i, ret;
1799 struct mlx4_ib_demux_pv_qp *tun_qp;
1800 struct mlx4_ib_qp_tunnel_init_attr qp_init_attr;
1801 struct ib_qp_attr attr;
1802 int qp_attr_mask_INIT;
1803 const int nmbr_bufs = create_tun ? MLX4_NUM_TUNNEL_BUFS : MLX4_NUM_WIRE_BUFS;
1804
1805 if (qp_type > IB_QPT_GSI)
1806 return -EINVAL;
1807
1808 tun_qp = &ctx->qp[qp_type];
1809
1810 memset(&qp_init_attr, 0, sizeof qp_init_attr);
1811 qp_init_attr.init_attr.send_cq = ctx->cq;
1812 qp_init_attr.init_attr.recv_cq = ctx->cq;
1813 qp_init_attr.init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
1814 qp_init_attr.init_attr.cap.max_send_wr = nmbr_bufs;
1815 qp_init_attr.init_attr.cap.max_recv_wr = nmbr_bufs;
1816 qp_init_attr.init_attr.cap.max_send_sge = 1;
1817 qp_init_attr.init_attr.cap.max_recv_sge = 1;
1818 if (create_tun) {
1819 qp_init_attr.init_attr.qp_type = IB_QPT_UD;
1820 qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_TUNNEL_QP;
1821 qp_init_attr.port = ctx->port;
1822 qp_init_attr.slave = ctx->slave;
1823 qp_init_attr.proxy_qp_type = qp_type;
1824 qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX |
1825 IB_QP_QKEY | IB_QP_PORT;
1826 } else {
1827 qp_init_attr.init_attr.qp_type = qp_type;
1828 qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_SQP;
1829 qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY;
1830 }
1831 qp_init_attr.init_attr.port_num = ctx->port;
1832 qp_init_attr.init_attr.qp_context = ctx;
1833 qp_init_attr.init_attr.event_handler = pv_qp_event_handler;
1834 tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr);
1835 if (IS_ERR(tun_qp->qp)) {
1836 ret = PTR_ERR(tun_qp->qp);
1837 tun_qp->qp = NULL;
1838 pr_err("Couldn't create %s QP (%d)\n",
1839 create_tun ? "tunnel" : "special", ret);
1840 return ret;
1841 }
1842
1843 memset(&attr, 0, sizeof attr);
1844 attr.qp_state = IB_QPS_INIT;
1845 ret = 0;
1846 if (create_tun)
1847 ret = find_slave_port_pkey_ix(to_mdev(ctx->ib_dev), ctx->slave,
1848 ctx->port, IB_DEFAULT_PKEY_FULL,
1849 &attr.pkey_index);
1850 if (ret || !create_tun)
1851 attr.pkey_index =
1852 to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0];
1853 attr.qkey = IB_QP1_QKEY;
1854 attr.port_num = ctx->port;
1855 ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT);
1856 if (ret) {
1857 pr_err("Couldn't change %s qp state to INIT (%d)\n",
1858 create_tun ? "tunnel" : "special", ret);
1859 goto err_qp;
1860 }
1861 attr.qp_state = IB_QPS_RTR;
1862 ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE);
1863 if (ret) {
1864 pr_err("Couldn't change %s qp state to RTR (%d)\n",
1865 create_tun ? "tunnel" : "special", ret);
1866 goto err_qp;
1867 }
1868 attr.qp_state = IB_QPS_RTS;
1869 attr.sq_psn = 0;
1870 ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN);
1871 if (ret) {
1872 pr_err("Couldn't change %s qp state to RTS (%d)\n",
1873 create_tun ? "tunnel" : "special", ret);
1874 goto err_qp;
1875 }
1876
1877 for (i = 0; i < nmbr_bufs; i++) {
1878 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, i);
1879 if (ret) {
1880 pr_err(" mlx4_ib_post_pv_buf error"
1881 " (err = %d, i = %d)\n", ret, i);
1882 goto err_qp;
1883 }
1884 }
1885 return 0;
1886
1887err_qp:
1888 ib_destroy_qp(tun_qp->qp);
1889 tun_qp->qp = NULL;
1890 return ret;
1891}
1892
1893
1894
1895
1896static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
1897{
1898 struct mlx4_ib_demux_pv_ctx *ctx;
1899 struct mlx4_ib_demux_pv_qp *sqp;
1900 struct ib_wc wc;
1901 struct ib_grh *grh;
1902 struct ib_mad *mad;
1903
1904 ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
1905 ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1906
1907 while (mlx4_ib_poll_cq(ctx->cq, 1, &wc) == 1) {
1908 sqp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
1909 if (wc.status == IB_WC_SUCCESS) {
1910 switch (wc.opcode) {
1911 case IB_WC_SEND:
1912 kfree(sqp->tx_ring[wc.wr_id &
1913 (MLX4_NUM_WIRE_BUFS - 1)].ah);
1914 sqp->tx_ring[wc.wr_id & (MLX4_NUM_WIRE_BUFS - 1)].ah
1915 = NULL;
1916 spin_lock(&sqp->tx_lock);
1917 sqp->tx_ix_tail++;
1918 spin_unlock(&sqp->tx_lock);
1919 break;
1920 case IB_WC_RECV:
1921 mad = (struct ib_mad *) &(((struct mlx4_mad_rcv_buf *)
1922 (sqp->ring[wc.wr_id &
1923 (MLX4_NUM_WIRE_BUFS - 1)].addr))->payload);
1924 grh = &(((struct mlx4_mad_rcv_buf *)
1925 (sqp->ring[wc.wr_id &
1926 (MLX4_NUM_WIRE_BUFS - 1)].addr))->grh);
1927 mlx4_ib_demux_mad(ctx->ib_dev, ctx->port, &wc, grh, mad);
1928 if (mlx4_ib_post_pv_qp_buf(ctx, sqp, wc.wr_id &
1929 (MLX4_NUM_WIRE_BUFS - 1)))
1930 pr_err("Failed reposting SQP "
1931 "buf:%lld\n", wc.wr_id);
1932 break;
1933 default:
1934 break;
1935 }
1936 } else {
1937 pr_debug("mlx4_ib: completion error in tunnel: %d."
1938 " status = %d, wrid = 0x%llx\n",
1939 ctx->slave, wc.status, wc.wr_id);
1940 if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
1941 kfree(sqp->tx_ring[wc.wr_id &
1942 (MLX4_NUM_WIRE_BUFS - 1)].ah);
1943 sqp->tx_ring[wc.wr_id & (MLX4_NUM_WIRE_BUFS - 1)].ah
1944 = NULL;
1945 spin_lock(&sqp->tx_lock);
1946 sqp->tx_ix_tail++;
1947 spin_unlock(&sqp->tx_lock);
1948 }
1949 }
1950 }
1951}
1952
1953static int alloc_pv_object(struct mlx4_ib_dev *dev, int slave, int port,
1954 struct mlx4_ib_demux_pv_ctx **ret_ctx)
1955{
1956 struct mlx4_ib_demux_pv_ctx *ctx;
1957
1958 *ret_ctx = NULL;
1959 ctx = kzalloc(sizeof (struct mlx4_ib_demux_pv_ctx), GFP_KERNEL);
1960 if (!ctx)
1961 return -ENOMEM;
1962
1963 ctx->ib_dev = &dev->ib_dev;
1964 ctx->port = port;
1965 ctx->slave = slave;
1966 *ret_ctx = ctx;
1967 return 0;
1968}
1969
1970static void free_pv_object(struct mlx4_ib_dev *dev, int slave, int port)
1971{
1972 if (dev->sriov.demux[port - 1].tun[slave]) {
1973 kfree(dev->sriov.demux[port - 1].tun[slave]);
1974 dev->sriov.demux[port - 1].tun[slave] = NULL;
1975 }
1976}
1977
1978static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
1979 int create_tun, struct mlx4_ib_demux_pv_ctx *ctx)
1980{
1981 int ret, cq_size;
1982 struct ib_cq_init_attr cq_attr = {};
1983 const int nmbr_bufs = create_tun ? MLX4_NUM_TUNNEL_BUFS : MLX4_NUM_WIRE_BUFS;
1984
1985 if (ctx->state != DEMUX_PV_STATE_DOWN)
1986 return -EEXIST;
1987
1988 ctx->state = DEMUX_PV_STATE_STARTING;
1989
1990 if (rdma_port_get_link_layer(ibdev, ctx->port) ==
1991 IB_LINK_LAYER_INFINIBAND)
1992 ctx->has_smi = 1;
1993
1994 if (ctx->has_smi) {
1995 ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_SMI, create_tun);
1996 if (ret) {
1997 pr_err("Failed allocating qp0 tunnel bufs (%d)\n", ret);
1998 goto err_out;
1999 }
2000 }
2001
2002 ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_GSI, create_tun);
2003 if (ret) {
2004 pr_err("Failed allocating qp1 tunnel bufs (%d)\n", ret);
2005 goto err_out_qp0;
2006 }
2007
2008 cq_size = 2 * nmbr_bufs;
2009 if (ctx->has_smi)
2010 cq_size *= 2;
2011
2012 cq_attr.cqe = cq_size;
2013 ctx->cq = ib_create_cq(ctx->ib_dev,
2014 create_tun ? mlx4_ib_tunnel_comp_handler : mlx4_ib_wire_comp_handler,
2015 NULL, ctx, &cq_attr);
2016 if (IS_ERR(ctx->cq)) {
2017 ret = PTR_ERR(ctx->cq);
2018 pr_err("Couldn't create tunnel CQ (%d)\n", ret);
2019 goto err_buf;
2020 }
2021
2022 ctx->pd = ib_alloc_pd(ctx->ib_dev, 0);
2023 if (IS_ERR(ctx->pd)) {
2024 ret = PTR_ERR(ctx->pd);
2025 pr_err("Couldn't create tunnel PD (%d)\n", ret);
2026 goto err_cq;
2027 }
2028
2029 if (ctx->has_smi) {
2030 ret = create_pv_sqp(ctx, IB_QPT_SMI, create_tun);
2031 if (ret) {
2032 pr_err("Couldn't create %s QP0 (%d)\n",
2033 create_tun ? "tunnel for" : "", ret);
2034 goto err_pd;
2035 }
2036 }
2037
2038 ret = create_pv_sqp(ctx, IB_QPT_GSI, create_tun);
2039 if (ret) {
2040 pr_err("Couldn't create %s QP1 (%d)\n",
2041 create_tun ? "tunnel for" : "", ret);
2042 goto err_qp0;
2043 }
2044
2045 if (create_tun)
2046 INIT_WORK(&ctx->work, mlx4_ib_tunnel_comp_worker);
2047 else
2048 INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker);
2049
2050 ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq;
2051 ctx->wi_wq = to_mdev(ibdev)->sriov.demux[port - 1].wi_wq;
2052
2053 ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
2054 if (ret) {
2055 pr_err("Couldn't arm tunnel cq (%d)\n", ret);
2056 goto err_wq;
2057 }
2058 ctx->state = DEMUX_PV_STATE_ACTIVE;
2059 return 0;
2060
2061err_wq:
2062 ctx->wq = NULL;
2063 ib_destroy_qp(ctx->qp[1].qp);
2064 ctx->qp[1].qp = NULL;
2065
2066
2067err_qp0:
2068 if (ctx->has_smi)
2069 ib_destroy_qp(ctx->qp[0].qp);
2070 ctx->qp[0].qp = NULL;
2071
2072err_pd:
2073 ib_dealloc_pd(ctx->pd);
2074 ctx->pd = NULL;
2075
2076err_cq:
2077 ib_destroy_cq(ctx->cq);
2078 ctx->cq = NULL;
2079
2080err_buf:
2081 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, create_tun);
2082
2083err_out_qp0:
2084 if (ctx->has_smi)
2085 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, create_tun);
2086err_out:
2087 ctx->state = DEMUX_PV_STATE_DOWN;
2088 return ret;
2089}
2090
2091static void destroy_pv_resources(struct mlx4_ib_dev *dev, int slave, int port,
2092 struct mlx4_ib_demux_pv_ctx *ctx, int flush)
2093{
2094 if (!ctx)
2095 return;
2096 if (ctx->state > DEMUX_PV_STATE_DOWN) {
2097 ctx->state = DEMUX_PV_STATE_DOWNING;
2098 if (flush)
2099 flush_workqueue(ctx->wq);
2100 if (ctx->has_smi) {
2101 ib_destroy_qp(ctx->qp[0].qp);
2102 ctx->qp[0].qp = NULL;
2103 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, 1);
2104 }
2105 ib_destroy_qp(ctx->qp[1].qp);
2106 ctx->qp[1].qp = NULL;
2107 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, 1);
2108 ib_dealloc_pd(ctx->pd);
2109 ctx->pd = NULL;
2110 ib_destroy_cq(ctx->cq);
2111 ctx->cq = NULL;
2112 ctx->state = DEMUX_PV_STATE_DOWN;
2113 }
2114}
2115
2116static int mlx4_ib_tunnels_update(struct mlx4_ib_dev *dev, int slave,
2117 int port, int do_init)
2118{
2119 int ret = 0;
2120
2121 if (!do_init) {
2122 clean_vf_mcast(&dev->sriov.demux[port - 1], slave);
2123
2124 if (slave == mlx4_master_func_num(dev->dev))
2125 destroy_pv_resources(dev, slave, port,
2126 dev->sriov.sqps[port - 1], 1);
2127
2128 destroy_pv_resources(dev, slave, port,
2129 dev->sriov.demux[port - 1].tun[slave], 1);
2130 return 0;
2131 }
2132
2133
2134 ret = create_pv_resources(&dev->ib_dev, slave, port, 1,
2135 dev->sriov.demux[port - 1].tun[slave]);
2136
2137
2138 if (!ret && slave == mlx4_master_func_num(dev->dev))
2139 ret = create_pv_resources(&dev->ib_dev, slave, port, 0,
2140 dev->sriov.sqps[port - 1]);
2141 return ret;
2142}
2143
2144void mlx4_ib_tunnels_update_work(struct work_struct *work)
2145{
2146 struct mlx4_ib_demux_work *dmxw;
2147
2148 dmxw = container_of(work, struct mlx4_ib_demux_work, work);
2149 mlx4_ib_tunnels_update(dmxw->dev, dmxw->slave, (int) dmxw->port,
2150 dmxw->do_init);
2151 kfree(dmxw);
2152 return;
2153}
2154
2155static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
2156 struct mlx4_ib_demux_ctx *ctx,
2157 int port)
2158{
2159 char name[12];
2160 int ret = 0;
2161 int i;
2162
2163 ctx->tun = kcalloc(dev->dev->caps.sqp_demux,
2164 sizeof (struct mlx4_ib_demux_pv_ctx *), GFP_KERNEL);
2165 if (!ctx->tun)
2166 return -ENOMEM;
2167
2168 ctx->dev = dev;
2169 ctx->port = port;
2170 ctx->ib_dev = &dev->ib_dev;
2171
2172 for (i = 0;
2173 i < min(dev->dev->caps.sqp_demux,
2174 (u16)(dev->dev->persist->num_vfs + 1));
2175 i++) {
2176 struct mlx4_active_ports actv_ports =
2177 mlx4_get_active_ports(dev->dev, i);
2178
2179 if (!test_bit(port - 1, actv_ports.ports))
2180 continue;
2181
2182 ret = alloc_pv_object(dev, i, port, &ctx->tun[i]);
2183 if (ret) {
2184 ret = -ENOMEM;
2185 goto err_mcg;
2186 }
2187 }
2188
2189 ret = mlx4_ib_mcg_port_init(ctx);
2190 if (ret) {
2191 pr_err("Failed initializing mcg para-virt (%d)\n", ret);
2192 goto err_mcg;
2193 }
2194
2195 snprintf(name, sizeof(name), "mlx4_ibt%d", port);
2196 ctx->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
2197 if (!ctx->wq) {
2198 pr_err("Failed to create tunnelling WQ for port %d\n", port);
2199 ret = -ENOMEM;
2200 goto err_wq;
2201 }
2202
2203 snprintf(name, sizeof(name), "mlx4_ibwi%d", port);
2204 ctx->wi_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
2205 if (!ctx->wi_wq) {
2206 pr_err("Failed to create wire WQ for port %d\n", port);
2207 ret = -ENOMEM;
2208 goto err_wiwq;
2209 }
2210
2211 snprintf(name, sizeof(name), "mlx4_ibud%d", port);
2212 ctx->ud_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
2213 if (!ctx->ud_wq) {
2214 pr_err("Failed to create up/down WQ for port %d\n", port);
2215 ret = -ENOMEM;
2216 goto err_udwq;
2217 }
2218
2219 return 0;
2220
2221err_udwq:
2222 destroy_workqueue(ctx->wi_wq);
2223 ctx->wi_wq = NULL;
2224
2225err_wiwq:
2226 destroy_workqueue(ctx->wq);
2227 ctx->wq = NULL;
2228
2229err_wq:
2230 mlx4_ib_mcg_port_cleanup(ctx, 1);
2231err_mcg:
2232 for (i = 0; i < dev->dev->caps.sqp_demux; i++)
2233 free_pv_object(dev, i, port);
2234 kfree(ctx->tun);
2235 ctx->tun = NULL;
2236 return ret;
2237}
2238
2239static void mlx4_ib_free_sqp_ctx(struct mlx4_ib_demux_pv_ctx *sqp_ctx)
2240{
2241 if (sqp_ctx->state > DEMUX_PV_STATE_DOWN) {
2242 sqp_ctx->state = DEMUX_PV_STATE_DOWNING;
2243 flush_workqueue(sqp_ctx->wq);
2244 if (sqp_ctx->has_smi) {
2245 ib_destroy_qp(sqp_ctx->qp[0].qp);
2246 sqp_ctx->qp[0].qp = NULL;
2247 mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_SMI, 0);
2248 }
2249 ib_destroy_qp(sqp_ctx->qp[1].qp);
2250 sqp_ctx->qp[1].qp = NULL;
2251 mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_GSI, 0);
2252 ib_dealloc_pd(sqp_ctx->pd);
2253 sqp_ctx->pd = NULL;
2254 ib_destroy_cq(sqp_ctx->cq);
2255 sqp_ctx->cq = NULL;
2256 sqp_ctx->state = DEMUX_PV_STATE_DOWN;
2257 }
2258}
2259
2260static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx)
2261{
2262 int i;
2263 if (ctx) {
2264 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
2265 mlx4_ib_mcg_port_cleanup(ctx, 1);
2266 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
2267 if (!ctx->tun[i])
2268 continue;
2269 if (ctx->tun[i]->state > DEMUX_PV_STATE_DOWN)
2270 ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING;
2271 }
2272 flush_workqueue(ctx->wq);
2273 flush_workqueue(ctx->wi_wq);
2274 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
2275 destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0);
2276 free_pv_object(dev, i, ctx->port);
2277 }
2278 kfree(ctx->tun);
2279 destroy_workqueue(ctx->ud_wq);
2280 destroy_workqueue(ctx->wi_wq);
2281 destroy_workqueue(ctx->wq);
2282 }
2283}
2284
2285static void mlx4_ib_master_tunnels(struct mlx4_ib_dev *dev, int do_init)
2286{
2287 int i;
2288
2289 if (!mlx4_is_master(dev->dev))
2290 return;
2291
2292 for (i = 0; i < dev->dev->caps.num_ports; i++)
2293 mlx4_ib_tunnels_update(dev, mlx4_master_func_num(dev->dev), i + 1, do_init);
2294 return;
2295}
2296
2297int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
2298{
2299 int i = 0;
2300 int err;
2301
2302 if (!mlx4_is_mfunc(dev->dev))
2303 return 0;
2304
2305 dev->sriov.is_going_down = 0;
2306 spin_lock_init(&dev->sriov.going_down_lock);
2307 mlx4_ib_cm_paravirt_init(dev);
2308
2309 mlx4_ib_warn(&dev->ib_dev, "multi-function enabled\n");
2310
2311 if (mlx4_is_slave(dev->dev)) {
2312 mlx4_ib_warn(&dev->ib_dev, "operating in qp1 tunnel mode\n");
2313 return 0;
2314 }
2315
2316 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
2317 if (i == mlx4_master_func_num(dev->dev))
2318 mlx4_put_slave_node_guid(dev->dev, i, dev->ib_dev.node_guid);
2319 else
2320 mlx4_put_slave_node_guid(dev->dev, i, mlx4_ib_gen_node_guid());
2321 }
2322
2323 err = mlx4_ib_init_alias_guid_service(dev);
2324 if (err) {
2325 mlx4_ib_warn(&dev->ib_dev, "Failed init alias guid process.\n");
2326 goto paravirt_err;
2327 }
2328 err = mlx4_ib_device_register_sysfs(dev);
2329 if (err) {
2330 mlx4_ib_warn(&dev->ib_dev, "Failed to register sysfs\n");
2331 goto sysfs_err;
2332 }
2333
2334 mlx4_ib_warn(&dev->ib_dev, "initializing demux service for %d qp1 clients\n",
2335 dev->dev->caps.sqp_demux);
2336 for (i = 0; i < dev->num_ports; i++) {
2337 union ib_gid gid;
2338 err = __mlx4_ib_query_gid(&dev->ib_dev, i + 1, 0, &gid, 1);
2339 if (err)
2340 goto demux_err;
2341 dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id;
2342 atomic64_set(&dev->sriov.demux[i].subnet_prefix,
2343 be64_to_cpu(gid.global.subnet_prefix));
2344 err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1,
2345 &dev->sriov.sqps[i]);
2346 if (err)
2347 goto demux_err;
2348 err = mlx4_ib_alloc_demux_ctx(dev, &dev->sriov.demux[i], i + 1);
2349 if (err)
2350 goto free_pv;
2351 }
2352 mlx4_ib_master_tunnels(dev, 1);
2353 return 0;
2354
2355free_pv:
2356 free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
2357demux_err:
2358 while (--i >= 0) {
2359 free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
2360 mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
2361 }
2362 mlx4_ib_device_unregister_sysfs(dev);
2363
2364sysfs_err:
2365 mlx4_ib_destroy_alias_guid_service(dev);
2366
2367paravirt_err:
2368 mlx4_ib_cm_paravirt_clean(dev, -1);
2369
2370 return err;
2371}
2372
2373void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev)
2374{
2375 int i;
2376 unsigned long flags;
2377
2378 if (!mlx4_is_mfunc(dev->dev))
2379 return;
2380
2381 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
2382 dev->sriov.is_going_down = 1;
2383 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
2384 if (mlx4_is_master(dev->dev)) {
2385 for (i = 0; i < dev->num_ports; i++) {
2386 flush_workqueue(dev->sriov.demux[i].ud_wq);
2387 mlx4_ib_free_sqp_ctx(dev->sriov.sqps[i]);
2388 kfree(dev->sriov.sqps[i]);
2389 dev->sriov.sqps[i] = NULL;
2390 mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
2391 }
2392
2393 mlx4_ib_cm_paravirt_clean(dev, -1);
2394 mlx4_ib_destroy_alias_guid_service(dev);
2395 mlx4_ib_device_unregister_sysfs(dev);
2396 }
2397}
2398