1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39#include <linux/errno.h>
40#include <linux/err.h>
41#include <linux/export.h>
42#include <linux/string.h>
43#include <linux/slab.h>
44#include <linux/in.h>
45#include <linux/in6.h>
46#include <net/addrconf.h>
47#include <linux/security.h>
48
49#include <rdma/ib_verbs.h>
50#include <rdma/ib_cache.h>
51#include <rdma/ib_addr.h>
52#include <rdma/rw.h>
53
54#include "core_priv.h"
55
56static int ib_resolve_eth_dmac(struct ib_device *device,
57 struct rdma_ah_attr *ah_attr);
58
59static const char * const ib_events[] = {
60 [IB_EVENT_CQ_ERR] = "CQ error",
61 [IB_EVENT_QP_FATAL] = "QP fatal error",
62 [IB_EVENT_QP_REQ_ERR] = "QP request error",
63 [IB_EVENT_QP_ACCESS_ERR] = "QP access error",
64 [IB_EVENT_COMM_EST] = "communication established",
65 [IB_EVENT_SQ_DRAINED] = "send queue drained",
66 [IB_EVENT_PATH_MIG] = "path migration successful",
67 [IB_EVENT_PATH_MIG_ERR] = "path migration error",
68 [IB_EVENT_DEVICE_FATAL] = "device fatal error",
69 [IB_EVENT_PORT_ACTIVE] = "port active",
70 [IB_EVENT_PORT_ERR] = "port error",
71 [IB_EVENT_LID_CHANGE] = "LID change",
72 [IB_EVENT_PKEY_CHANGE] = "P_key change",
73 [IB_EVENT_SM_CHANGE] = "SM change",
74 [IB_EVENT_SRQ_ERR] = "SRQ error",
75 [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached",
76 [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached",
77 [IB_EVENT_CLIENT_REREGISTER] = "client reregister",
78 [IB_EVENT_GID_CHANGE] = "GID changed",
79};
80
81const char *__attribute_const__ ib_event_msg(enum ib_event_type event)
82{
83 size_t index = event;
84
85 return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ?
86 ib_events[index] : "unrecognized event";
87}
88EXPORT_SYMBOL(ib_event_msg);
89
90static const char * const wc_statuses[] = {
91 [IB_WC_SUCCESS] = "success",
92 [IB_WC_LOC_LEN_ERR] = "local length error",
93 [IB_WC_LOC_QP_OP_ERR] = "local QP operation error",
94 [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error",
95 [IB_WC_LOC_PROT_ERR] = "local protection error",
96 [IB_WC_WR_FLUSH_ERR] = "WR flushed",
97 [IB_WC_MW_BIND_ERR] = "memory management operation error",
98 [IB_WC_BAD_RESP_ERR] = "bad response error",
99 [IB_WC_LOC_ACCESS_ERR] = "local access error",
100 [IB_WC_REM_INV_REQ_ERR] = "invalid request error",
101 [IB_WC_REM_ACCESS_ERR] = "remote access error",
102 [IB_WC_REM_OP_ERR] = "remote operation error",
103 [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded",
104 [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded",
105 [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error",
106 [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request",
107 [IB_WC_REM_ABORT_ERR] = "operation aborted",
108 [IB_WC_INV_EECN_ERR] = "invalid EE context number",
109 [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state",
110 [IB_WC_FATAL_ERR] = "fatal error",
111 [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error",
112 [IB_WC_GENERAL_ERR] = "general error",
113};
114
115const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status)
116{
117 size_t index = status;
118
119 return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ?
120 wc_statuses[index] : "unrecognized status";
121}
122EXPORT_SYMBOL(ib_wc_status_msg);
123
124__attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
125{
126 switch (rate) {
127 case IB_RATE_2_5_GBPS: return 1;
128 case IB_RATE_5_GBPS: return 2;
129 case IB_RATE_10_GBPS: return 4;
130 case IB_RATE_20_GBPS: return 8;
131 case IB_RATE_30_GBPS: return 12;
132 case IB_RATE_40_GBPS: return 16;
133 case IB_RATE_60_GBPS: return 24;
134 case IB_RATE_80_GBPS: return 32;
135 case IB_RATE_120_GBPS: return 48;
136 case IB_RATE_14_GBPS: return 6;
137 case IB_RATE_56_GBPS: return 22;
138 case IB_RATE_112_GBPS: return 45;
139 case IB_RATE_168_GBPS: return 67;
140 case IB_RATE_25_GBPS: return 10;
141 case IB_RATE_100_GBPS: return 40;
142 case IB_RATE_200_GBPS: return 80;
143 case IB_RATE_300_GBPS: return 120;
144 case IB_RATE_28_GBPS: return 11;
145 case IB_RATE_50_GBPS: return 20;
146 case IB_RATE_400_GBPS: return 160;
147 case IB_RATE_600_GBPS: return 240;
148 default: return -1;
149 }
150}
151EXPORT_SYMBOL(ib_rate_to_mult);
152
153__attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
154{
155 switch (mult) {
156 case 1: return IB_RATE_2_5_GBPS;
157 case 2: return IB_RATE_5_GBPS;
158 case 4: return IB_RATE_10_GBPS;
159 case 8: return IB_RATE_20_GBPS;
160 case 12: return IB_RATE_30_GBPS;
161 case 16: return IB_RATE_40_GBPS;
162 case 24: return IB_RATE_60_GBPS;
163 case 32: return IB_RATE_80_GBPS;
164 case 48: return IB_RATE_120_GBPS;
165 case 6: return IB_RATE_14_GBPS;
166 case 22: return IB_RATE_56_GBPS;
167 case 45: return IB_RATE_112_GBPS;
168 case 67: return IB_RATE_168_GBPS;
169 case 10: return IB_RATE_25_GBPS;
170 case 40: return IB_RATE_100_GBPS;
171 case 80: return IB_RATE_200_GBPS;
172 case 120: return IB_RATE_300_GBPS;
173 case 11: return IB_RATE_28_GBPS;
174 case 20: return IB_RATE_50_GBPS;
175 case 160: return IB_RATE_400_GBPS;
176 case 240: return IB_RATE_600_GBPS;
177 default: return IB_RATE_PORT_CURRENT;
178 }
179}
180EXPORT_SYMBOL(mult_to_ib_rate);
181
182__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
183{
184 switch (rate) {
185 case IB_RATE_2_5_GBPS: return 2500;
186 case IB_RATE_5_GBPS: return 5000;
187 case IB_RATE_10_GBPS: return 10000;
188 case IB_RATE_20_GBPS: return 20000;
189 case IB_RATE_30_GBPS: return 30000;
190 case IB_RATE_40_GBPS: return 40000;
191 case IB_RATE_60_GBPS: return 60000;
192 case IB_RATE_80_GBPS: return 80000;
193 case IB_RATE_120_GBPS: return 120000;
194 case IB_RATE_14_GBPS: return 14062;
195 case IB_RATE_56_GBPS: return 56250;
196 case IB_RATE_112_GBPS: return 112500;
197 case IB_RATE_168_GBPS: return 168750;
198 case IB_RATE_25_GBPS: return 25781;
199 case IB_RATE_100_GBPS: return 103125;
200 case IB_RATE_200_GBPS: return 206250;
201 case IB_RATE_300_GBPS: return 309375;
202 case IB_RATE_28_GBPS: return 28125;
203 case IB_RATE_50_GBPS: return 53125;
204 case IB_RATE_400_GBPS: return 425000;
205 case IB_RATE_600_GBPS: return 637500;
206 default: return -1;
207 }
208}
209EXPORT_SYMBOL(ib_rate_to_mbps);
210
211__attribute_const__ enum rdma_transport_type
212rdma_node_get_transport(enum rdma_node_type node_type)
213{
214
215 if (node_type == RDMA_NODE_USNIC)
216 return RDMA_TRANSPORT_USNIC;
217 if (node_type == RDMA_NODE_USNIC_UDP)
218 return RDMA_TRANSPORT_USNIC_UDP;
219 if (node_type == RDMA_NODE_RNIC)
220 return RDMA_TRANSPORT_IWARP;
221
222 return RDMA_TRANSPORT_IB;
223}
224EXPORT_SYMBOL(rdma_node_get_transport);
225
226enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
227{
228 enum rdma_transport_type lt;
229 if (device->ops.get_link_layer)
230 return device->ops.get_link_layer(device, port_num);
231
232 lt = rdma_node_get_transport(device->node_type);
233 if (lt == RDMA_TRANSPORT_IB)
234 return IB_LINK_LAYER_INFINIBAND;
235
236 return IB_LINK_LAYER_ETHERNET;
237}
238EXPORT_SYMBOL(rdma_port_get_link_layer);
239
240
241
242
243
244
245
246
247
248
249
250
251
252struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
253 const char *caller)
254{
255 struct ib_pd *pd;
256 int mr_access_flags = 0;
257
258 pd = device->ops.alloc_pd(device, NULL, NULL);
259 if (IS_ERR(pd))
260 return pd;
261
262 pd->device = device;
263 pd->uobject = NULL;
264 pd->__internal_mr = NULL;
265 atomic_set(&pd->usecnt, 0);
266 pd->flags = flags;
267
268 if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
269 pd->local_dma_lkey = device->local_dma_lkey;
270 else
271 mr_access_flags |= IB_ACCESS_LOCAL_WRITE;
272
273 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
274 pr_warn("%s: enabling unsafe global rkey\n", caller);
275 mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE;
276 }
277
278 pd->res.type = RDMA_RESTRACK_PD;
279 rdma_restrack_set_task(&pd->res, caller);
280 rdma_restrack_kadd(&pd->res);
281
282 if (mr_access_flags) {
283 struct ib_mr *mr;
284
285 mr = pd->device->ops.get_dma_mr(pd, mr_access_flags);
286 if (IS_ERR(mr)) {
287 ib_dealloc_pd(pd);
288 return ERR_CAST(mr);
289 }
290
291 mr->device = pd->device;
292 mr->pd = pd;
293 mr->uobject = NULL;
294 mr->need_inval = false;
295
296 pd->__internal_mr = mr;
297
298 if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY))
299 pd->local_dma_lkey = pd->__internal_mr->lkey;
300
301 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY)
302 pd->unsafe_global_rkey = pd->__internal_mr->rkey;
303 }
304
305 return pd;
306}
307EXPORT_SYMBOL(__ib_alloc_pd);
308
309
310
311
312
313
314
315
316
317void ib_dealloc_pd(struct ib_pd *pd)
318{
319 int ret;
320
321 if (pd->__internal_mr) {
322 ret = pd->device->ops.dereg_mr(pd->__internal_mr);
323 WARN_ON(ret);
324 pd->__internal_mr = NULL;
325 }
326
327
328
329 WARN_ON(atomic_read(&pd->usecnt));
330
331 rdma_restrack_del(&pd->res);
332
333
334 ret = pd->device->ops.dealloc_pd(pd);
335 WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd");
336}
337EXPORT_SYMBOL(ib_dealloc_pd);
338
339
340
341
342
343
344
345
346
347void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
348 const struct rdma_ah_attr *src)
349{
350 *dest = *src;
351 if (dest->grh.sgid_attr)
352 rdma_hold_gid_attr(dest->grh.sgid_attr);
353}
354EXPORT_SYMBOL(rdma_copy_ah_attr);
355
356
357
358
359
360
361
362
363
364
365
366void rdma_replace_ah_attr(struct rdma_ah_attr *old,
367 const struct rdma_ah_attr *new)
368{
369 rdma_destroy_ah_attr(old);
370 *old = *new;
371 if (old->grh.sgid_attr)
372 rdma_hold_gid_attr(old->grh.sgid_attr);
373}
374EXPORT_SYMBOL(rdma_replace_ah_attr);
375
376
377
378
379
380
381
382
383
384
385
386
387void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src)
388{
389 rdma_destroy_ah_attr(dest);
390 *dest = *src;
391 src->grh.sgid_attr = NULL;
392}
393EXPORT_SYMBOL(rdma_move_ah_attr);
394
395
396
397
398
399static int rdma_check_ah_attr(struct ib_device *device,
400 struct rdma_ah_attr *ah_attr)
401{
402 if (!rdma_is_port_valid(device, ah_attr->port_num))
403 return -EINVAL;
404
405 if ((rdma_is_grh_required(device, ah_attr->port_num) ||
406 ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) &&
407 !(ah_attr->ah_flags & IB_AH_GRH))
408 return -EINVAL;
409
410 if (ah_attr->grh.sgid_attr) {
411
412
413
414
415 if (ah_attr->grh.sgid_attr->index != ah_attr->grh.sgid_index ||
416 ah_attr->grh.sgid_attr->port_num != ah_attr->port_num)
417 return -EINVAL;
418 }
419 return 0;
420}
421
422
423
424
425
426static int rdma_fill_sgid_attr(struct ib_device *device,
427 struct rdma_ah_attr *ah_attr,
428 const struct ib_gid_attr **old_sgid_attr)
429{
430 const struct ib_gid_attr *sgid_attr;
431 struct ib_global_route *grh;
432 int ret;
433
434 *old_sgid_attr = ah_attr->grh.sgid_attr;
435
436 ret = rdma_check_ah_attr(device, ah_attr);
437 if (ret)
438 return ret;
439
440 if (!(ah_attr->ah_flags & IB_AH_GRH))
441 return 0;
442
443 grh = rdma_ah_retrieve_grh(ah_attr);
444 if (grh->sgid_attr)
445 return 0;
446
447 sgid_attr =
448 rdma_get_gid_attr(device, ah_attr->port_num, grh->sgid_index);
449 if (IS_ERR(sgid_attr))
450 return PTR_ERR(sgid_attr);
451
452
453 grh->sgid_attr = sgid_attr;
454 return 0;
455}
456
457static void rdma_unfill_sgid_attr(struct rdma_ah_attr *ah_attr,
458 const struct ib_gid_attr *old_sgid_attr)
459{
460
461
462
463
464 if (ah_attr->grh.sgid_attr == old_sgid_attr)
465 return;
466
467
468
469
470
471
472 rdma_destroy_ah_attr(ah_attr);
473}
474
475static const struct ib_gid_attr *
476rdma_update_sgid_attr(struct rdma_ah_attr *ah_attr,
477 const struct ib_gid_attr *old_attr)
478{
479 if (old_attr)
480 rdma_put_gid_attr(old_attr);
481 if (ah_attr->ah_flags & IB_AH_GRH) {
482 rdma_hold_gid_attr(ah_attr->grh.sgid_attr);
483 return ah_attr->grh.sgid_attr;
484 }
485 return NULL;
486}
487
488static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
489 struct rdma_ah_attr *ah_attr,
490 u32 flags,
491 struct ib_udata *udata)
492{
493 struct ib_ah *ah;
494
495 might_sleep_if(flags & RDMA_CREATE_AH_SLEEPABLE);
496
497 if (!pd->device->ops.create_ah)
498 return ERR_PTR(-EOPNOTSUPP);
499
500 ah = pd->device->ops.create_ah(pd, ah_attr, flags, udata);
501
502 if (!IS_ERR(ah)) {
503 ah->device = pd->device;
504 ah->pd = pd;
505 ah->uobject = NULL;
506 ah->type = ah_attr->type;
507 ah->sgid_attr = rdma_update_sgid_attr(ah_attr, NULL);
508
509 atomic_inc(&pd->usecnt);
510 }
511
512 return ah;
513}
514
515
516
517
518
519
520
521
522
523
524
525
526struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
527 u32 flags)
528{
529 const struct ib_gid_attr *old_sgid_attr;
530 struct ib_ah *ah;
531 int ret;
532
533 ret = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr);
534 if (ret)
535 return ERR_PTR(ret);
536
537 ah = _rdma_create_ah(pd, ah_attr, flags, NULL);
538
539 rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
540 return ah;
541}
542EXPORT_SYMBOL(rdma_create_ah);
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
558 struct rdma_ah_attr *ah_attr,
559 struct ib_udata *udata)
560{
561 const struct ib_gid_attr *old_sgid_attr;
562 struct ib_ah *ah;
563 int err;
564
565 err = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr);
566 if (err)
567 return ERR_PTR(err);
568
569 if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
570 err = ib_resolve_eth_dmac(pd->device, ah_attr);
571 if (err) {
572 ah = ERR_PTR(err);
573 goto out;
574 }
575 }
576
577 ah = _rdma_create_ah(pd, ah_attr, RDMA_CREATE_AH_SLEEPABLE, udata);
578
579out:
580 rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
581 return ah;
582}
583EXPORT_SYMBOL(rdma_create_user_ah);
584
585int ib_get_rdma_header_version(const union rdma_network_hdr *hdr)
586{
587 const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh;
588 struct iphdr ip4h_checked;
589 const struct ipv6hdr *ip6h = (struct ipv6hdr *)&hdr->ibgrh;
590
591
592
593
594 if (ip6h->version != 6)
595 return (ip4h->version == 4) ? 4 : 0;
596
597
598
599
600
601 if (ip4h->ihl != 5)
602 return 6;
603
604
605
606
607
608 memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked));
609 ip4h_checked.check = 0;
610 ip4h_checked.check = ip_fast_csum((u8 *)&ip4h_checked, 5);
611
612 if (ip4h->check == ip4h_checked.check)
613 return 4;
614 return 6;
615}
616EXPORT_SYMBOL(ib_get_rdma_header_version);
617
618static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
619 u8 port_num,
620 const struct ib_grh *grh)
621{
622 int grh_version;
623
624 if (rdma_protocol_ib(device, port_num))
625 return RDMA_NETWORK_IB;
626
627 grh_version = ib_get_rdma_header_version((union rdma_network_hdr *)grh);
628
629 if (grh_version == 4)
630 return RDMA_NETWORK_IPV4;
631
632 if (grh->next_hdr == IPPROTO_UDP)
633 return RDMA_NETWORK_IPV6;
634
635 return RDMA_NETWORK_ROCE_V1;
636}
637
638struct find_gid_index_context {
639 u16 vlan_id;
640 enum ib_gid_type gid_type;
641};
642
643static bool find_gid_index(const union ib_gid *gid,
644 const struct ib_gid_attr *gid_attr,
645 void *context)
646{
647 struct find_gid_index_context *ctx = context;
648
649 if (ctx->gid_type != gid_attr->gid_type)
650 return false;
651
652 if ((!!(ctx->vlan_id != 0xffff) == !is_vlan_dev(gid_attr->ndev)) ||
653 (is_vlan_dev(gid_attr->ndev) &&
654 vlan_dev_vlan_id(gid_attr->ndev) != ctx->vlan_id))
655 return false;
656
657 return true;
658}
659
660static const struct ib_gid_attr *
661get_sgid_attr_from_eth(struct ib_device *device, u8 port_num,
662 u16 vlan_id, const union ib_gid *sgid,
663 enum ib_gid_type gid_type)
664{
665 struct find_gid_index_context context = {.vlan_id = vlan_id,
666 .gid_type = gid_type};
667
668 return rdma_find_gid_by_filter(device, sgid, port_num, find_gid_index,
669 &context);
670}
671
672int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
673 enum rdma_network_type net_type,
674 union ib_gid *sgid, union ib_gid *dgid)
675{
676 struct sockaddr_in src_in;
677 struct sockaddr_in dst_in;
678 __be32 src_saddr, dst_saddr;
679
680 if (!sgid || !dgid)
681 return -EINVAL;
682
683 if (net_type == RDMA_NETWORK_IPV4) {
684 memcpy(&src_in.sin_addr.s_addr,
685 &hdr->roce4grh.saddr, 4);
686 memcpy(&dst_in.sin_addr.s_addr,
687 &hdr->roce4grh.daddr, 4);
688 src_saddr = src_in.sin_addr.s_addr;
689 dst_saddr = dst_in.sin_addr.s_addr;
690 ipv6_addr_set_v4mapped(src_saddr,
691 (struct in6_addr *)sgid);
692 ipv6_addr_set_v4mapped(dst_saddr,
693 (struct in6_addr *)dgid);
694 return 0;
695 } else if (net_type == RDMA_NETWORK_IPV6 ||
696 net_type == RDMA_NETWORK_IB) {
697 *dgid = hdr->ibgrh.dgid;
698 *sgid = hdr->ibgrh.sgid;
699 return 0;
700 } else {
701 return -EINVAL;
702 }
703}
704EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
705
706
707
708
709
710static int ib_resolve_unicast_gid_dmac(struct ib_device *device,
711 struct rdma_ah_attr *ah_attr)
712{
713 struct ib_global_route *grh = rdma_ah_retrieve_grh(ah_attr);
714 const struct ib_gid_attr *sgid_attr = grh->sgid_attr;
715 int hop_limit = 0xff;
716 int ret = 0;
717
718
719
720
721 if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw) &&
722 sgid_attr->gid_type == IB_GID_TYPE_ROCE) {
723 rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw,
724 ah_attr->roce.dmac);
725 return ret;
726 }
727
728 ret = rdma_addr_find_l2_eth_by_grh(&sgid_attr->gid, &grh->dgid,
729 ah_attr->roce.dmac,
730 sgid_attr, &hop_limit);
731
732 grh->hop_limit = hop_limit;
733 return ret;
734}
735
736
737
738
739
740
741
742
743
744
745
746
747
748int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
749 const struct ib_wc *wc, const struct ib_grh *grh,
750 struct rdma_ah_attr *ah_attr)
751{
752 u32 flow_class;
753 int ret;
754 enum rdma_network_type net_type = RDMA_NETWORK_IB;
755 enum ib_gid_type gid_type = IB_GID_TYPE_IB;
756 const struct ib_gid_attr *sgid_attr;
757 int hoplimit = 0xff;
758 union ib_gid dgid;
759 union ib_gid sgid;
760
761 might_sleep();
762
763 memset(ah_attr, 0, sizeof *ah_attr);
764 ah_attr->type = rdma_ah_find_type(device, port_num);
765 if (rdma_cap_eth_ah(device, port_num)) {
766 if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE)
767 net_type = wc->network_hdr_type;
768 else
769 net_type = ib_get_net_type_by_grh(device, port_num, grh);
770 gid_type = ib_network_to_gid_type(net_type);
771 }
772 ret = ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
773 &sgid, &dgid);
774 if (ret)
775 return ret;
776
777 rdma_ah_set_sl(ah_attr, wc->sl);
778 rdma_ah_set_port_num(ah_attr, port_num);
779
780 if (rdma_protocol_roce(device, port_num)) {
781 u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ?
782 wc->vlan_id : 0xffff;
783
784 if (!(wc->wc_flags & IB_WC_GRH))
785 return -EPROTOTYPE;
786
787 sgid_attr = get_sgid_attr_from_eth(device, port_num,
788 vlan_id, &dgid,
789 gid_type);
790 if (IS_ERR(sgid_attr))
791 return PTR_ERR(sgid_attr);
792
793 flow_class = be32_to_cpu(grh->version_tclass_flow);
794 rdma_move_grh_sgid_attr(ah_attr,
795 &sgid,
796 flow_class & 0xFFFFF,
797 hoplimit,
798 (flow_class >> 20) & 0xFF,
799 sgid_attr);
800
801 ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
802 if (ret)
803 rdma_destroy_ah_attr(ah_attr);
804
805 return ret;
806 } else {
807 rdma_ah_set_dlid(ah_attr, wc->slid);
808 rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits);
809
810 if ((wc->wc_flags & IB_WC_GRH) == 0)
811 return 0;
812
813 if (dgid.global.interface_id !=
814 cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
815 sgid_attr = rdma_find_gid_by_port(
816 device, &dgid, IB_GID_TYPE_IB, port_num, NULL);
817 } else
818 sgid_attr = rdma_get_gid_attr(device, port_num, 0);
819
820 if (IS_ERR(sgid_attr))
821 return PTR_ERR(sgid_attr);
822 flow_class = be32_to_cpu(grh->version_tclass_flow);
823 rdma_move_grh_sgid_attr(ah_attr,
824 &sgid,
825 flow_class & 0xFFFFF,
826 hoplimit,
827 (flow_class >> 20) & 0xFF,
828 sgid_attr);
829
830 return 0;
831 }
832}
833EXPORT_SYMBOL(ib_init_ah_attr_from_wc);
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
851 u32 flow_label, u8 hop_limit, u8 traffic_class,
852 const struct ib_gid_attr *sgid_attr)
853{
854 rdma_ah_set_grh(attr, dgid, flow_label, sgid_attr->index, hop_limit,
855 traffic_class);
856 attr->grh.sgid_attr = sgid_attr;
857}
858EXPORT_SYMBOL(rdma_move_grh_sgid_attr);
859
860
861
862
863
864
865
866
867
868
869void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr)
870{
871 if (ah_attr->grh.sgid_attr) {
872 rdma_put_gid_attr(ah_attr->grh.sgid_attr);
873 ah_attr->grh.sgid_attr = NULL;
874 }
875}
876EXPORT_SYMBOL(rdma_destroy_ah_attr);
877
878struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
879 const struct ib_grh *grh, u8 port_num)
880{
881 struct rdma_ah_attr ah_attr;
882 struct ib_ah *ah;
883 int ret;
884
885 ret = ib_init_ah_attr_from_wc(pd->device, port_num, wc, grh, &ah_attr);
886 if (ret)
887 return ERR_PTR(ret);
888
889 ah = rdma_create_ah(pd, &ah_attr, RDMA_CREATE_AH_SLEEPABLE);
890
891 rdma_destroy_ah_attr(&ah_attr);
892 return ah;
893}
894EXPORT_SYMBOL(ib_create_ah_from_wc);
895
896int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
897{
898 const struct ib_gid_attr *old_sgid_attr;
899 int ret;
900
901 if (ah->type != ah_attr->type)
902 return -EINVAL;
903
904 ret = rdma_fill_sgid_attr(ah->device, ah_attr, &old_sgid_attr);
905 if (ret)
906 return ret;
907
908 ret = ah->device->ops.modify_ah ?
909 ah->device->ops.modify_ah(ah, ah_attr) :
910 -EOPNOTSUPP;
911
912 ah->sgid_attr = rdma_update_sgid_attr(ah_attr, ah->sgid_attr);
913 rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
914 return ret;
915}
916EXPORT_SYMBOL(rdma_modify_ah);
917
918int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
919{
920 ah_attr->grh.sgid_attr = NULL;
921
922 return ah->device->ops.query_ah ?
923 ah->device->ops.query_ah(ah, ah_attr) :
924 -EOPNOTSUPP;
925}
926EXPORT_SYMBOL(rdma_query_ah);
927
928int rdma_destroy_ah(struct ib_ah *ah, u32 flags)
929{
930 const struct ib_gid_attr *sgid_attr = ah->sgid_attr;
931 struct ib_pd *pd;
932 int ret;
933
934 might_sleep_if(flags & RDMA_DESTROY_AH_SLEEPABLE);
935
936 pd = ah->pd;
937 ret = ah->device->ops.destroy_ah(ah, flags);
938 if (!ret) {
939 atomic_dec(&pd->usecnt);
940 if (sgid_attr)
941 rdma_put_gid_attr(sgid_attr);
942 }
943
944 return ret;
945}
946EXPORT_SYMBOL(rdma_destroy_ah);
947
948
949
950struct ib_srq *ib_create_srq(struct ib_pd *pd,
951 struct ib_srq_init_attr *srq_init_attr)
952{
953 struct ib_srq *srq;
954
955 if (!pd->device->ops.create_srq)
956 return ERR_PTR(-EOPNOTSUPP);
957
958 srq = pd->device->ops.create_srq(pd, srq_init_attr, NULL);
959
960 if (!IS_ERR(srq)) {
961 srq->device = pd->device;
962 srq->pd = pd;
963 srq->uobject = NULL;
964 srq->event_handler = srq_init_attr->event_handler;
965 srq->srq_context = srq_init_attr->srq_context;
966 srq->srq_type = srq_init_attr->srq_type;
967 if (ib_srq_has_cq(srq->srq_type)) {
968 srq->ext.cq = srq_init_attr->ext.cq;
969 atomic_inc(&srq->ext.cq->usecnt);
970 }
971 if (srq->srq_type == IB_SRQT_XRC) {
972 srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
973 atomic_inc(&srq->ext.xrc.xrcd->usecnt);
974 }
975 atomic_inc(&pd->usecnt);
976 atomic_set(&srq->usecnt, 0);
977 }
978
979 return srq;
980}
981EXPORT_SYMBOL(ib_create_srq);
982
983int ib_modify_srq(struct ib_srq *srq,
984 struct ib_srq_attr *srq_attr,
985 enum ib_srq_attr_mask srq_attr_mask)
986{
987 return srq->device->ops.modify_srq ?
988 srq->device->ops.modify_srq(srq, srq_attr, srq_attr_mask,
989 NULL) : -EOPNOTSUPP;
990}
991EXPORT_SYMBOL(ib_modify_srq);
992
993int ib_query_srq(struct ib_srq *srq,
994 struct ib_srq_attr *srq_attr)
995{
996 return srq->device->ops.query_srq ?
997 srq->device->ops.query_srq(srq, srq_attr) : -EOPNOTSUPP;
998}
999EXPORT_SYMBOL(ib_query_srq);
1000
1001int ib_destroy_srq(struct ib_srq *srq)
1002{
1003 struct ib_pd *pd;
1004 enum ib_srq_type srq_type;
1005 struct ib_xrcd *uninitialized_var(xrcd);
1006 struct ib_cq *uninitialized_var(cq);
1007 int ret;
1008
1009 if (atomic_read(&srq->usecnt))
1010 return -EBUSY;
1011
1012 pd = srq->pd;
1013 srq_type = srq->srq_type;
1014 if (ib_srq_has_cq(srq_type))
1015 cq = srq->ext.cq;
1016 if (srq_type == IB_SRQT_XRC)
1017 xrcd = srq->ext.xrc.xrcd;
1018
1019 ret = srq->device->ops.destroy_srq(srq);
1020 if (!ret) {
1021 atomic_dec(&pd->usecnt);
1022 if (srq_type == IB_SRQT_XRC)
1023 atomic_dec(&xrcd->usecnt);
1024 if (ib_srq_has_cq(srq_type))
1025 atomic_dec(&cq->usecnt);
1026 }
1027
1028 return ret;
1029}
1030EXPORT_SYMBOL(ib_destroy_srq);
1031
1032
1033
1034static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
1035{
1036 struct ib_qp *qp = context;
1037 unsigned long flags;
1038
1039 spin_lock_irqsave(&qp->device->event_handler_lock, flags);
1040 list_for_each_entry(event->element.qp, &qp->open_list, open_list)
1041 if (event->element.qp->event_handler)
1042 event->element.qp->event_handler(event, event->element.qp->qp_context);
1043 spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
1044}
1045
1046static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
1047{
1048 mutex_lock(&xrcd->tgt_qp_mutex);
1049 list_add(&qp->xrcd_list, &xrcd->tgt_qp_list);
1050 mutex_unlock(&xrcd->tgt_qp_mutex);
1051}
1052
1053static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
1054 void (*event_handler)(struct ib_event *, void *),
1055 void *qp_context)
1056{
1057 struct ib_qp *qp;
1058 unsigned long flags;
1059 int err;
1060
1061 qp = kzalloc(sizeof *qp, GFP_KERNEL);
1062 if (!qp)
1063 return ERR_PTR(-ENOMEM);
1064
1065 qp->real_qp = real_qp;
1066 err = ib_open_shared_qp_security(qp, real_qp->device);
1067 if (err) {
1068 kfree(qp);
1069 return ERR_PTR(err);
1070 }
1071
1072 qp->real_qp = real_qp;
1073 atomic_inc(&real_qp->usecnt);
1074 qp->device = real_qp->device;
1075 qp->event_handler = event_handler;
1076 qp->qp_context = qp_context;
1077 qp->qp_num = real_qp->qp_num;
1078 qp->qp_type = real_qp->qp_type;
1079
1080 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
1081 list_add(&qp->open_list, &real_qp->open_list);
1082 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
1083
1084 return qp;
1085}
1086
1087struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
1088 struct ib_qp_open_attr *qp_open_attr)
1089{
1090 struct ib_qp *qp, *real_qp;
1091
1092 if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
1093 return ERR_PTR(-EINVAL);
1094
1095 qp = ERR_PTR(-EINVAL);
1096 mutex_lock(&xrcd->tgt_qp_mutex);
1097 list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) {
1098 if (real_qp->qp_num == qp_open_attr->qp_num) {
1099 qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
1100 qp_open_attr->qp_context);
1101 break;
1102 }
1103 }
1104 mutex_unlock(&xrcd->tgt_qp_mutex);
1105 return qp;
1106}
1107EXPORT_SYMBOL(ib_open_qp);
1108
1109static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp,
1110 struct ib_qp_init_attr *qp_init_attr)
1111{
1112 struct ib_qp *real_qp = qp;
1113
1114 qp->event_handler = __ib_shared_qp_event_handler;
1115 qp->qp_context = qp;
1116 qp->pd = NULL;
1117 qp->send_cq = qp->recv_cq = NULL;
1118 qp->srq = NULL;
1119 qp->xrcd = qp_init_attr->xrcd;
1120 atomic_inc(&qp_init_attr->xrcd->usecnt);
1121 INIT_LIST_HEAD(&qp->open_list);
1122
1123 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
1124 qp_init_attr->qp_context);
1125 if (!IS_ERR(qp))
1126 __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
1127 else
1128 real_qp->device->ops.destroy_qp(real_qp);
1129 return qp;
1130}
1131
1132struct ib_qp *ib_create_qp(struct ib_pd *pd,
1133 struct ib_qp_init_attr *qp_init_attr)
1134{
1135 struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device;
1136 struct ib_qp *qp;
1137 int ret;
1138
1139 if (qp_init_attr->rwq_ind_tbl &&
1140 (qp_init_attr->recv_cq ||
1141 qp_init_attr->srq || qp_init_attr->cap.max_recv_wr ||
1142 qp_init_attr->cap.max_recv_sge))
1143 return ERR_PTR(-EINVAL);
1144
1145
1146
1147
1148
1149
1150
1151 if (qp_init_attr->cap.max_rdma_ctxs)
1152 rdma_rw_init_qp(device, qp_init_attr);
1153
1154 qp = _ib_create_qp(device, pd, qp_init_attr, NULL, NULL);
1155 if (IS_ERR(qp))
1156 return qp;
1157
1158 ret = ib_create_qp_security(qp, device);
1159 if (ret) {
1160 ib_destroy_qp(qp);
1161 return ERR_PTR(ret);
1162 }
1163
1164 qp->real_qp = qp;
1165 qp->qp_type = qp_init_attr->qp_type;
1166 qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl;
1167
1168 atomic_set(&qp->usecnt, 0);
1169 qp->mrs_used = 0;
1170 spin_lock_init(&qp->mr_lock);
1171 INIT_LIST_HEAD(&qp->rdma_mrs);
1172 INIT_LIST_HEAD(&qp->sig_mrs);
1173 qp->port = 0;
1174
1175 if (qp_init_attr->qp_type == IB_QPT_XRC_TGT)
1176 return ib_create_xrc_qp(qp, qp_init_attr);
1177
1178 qp->event_handler = qp_init_attr->event_handler;
1179 qp->qp_context = qp_init_attr->qp_context;
1180 if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
1181 qp->recv_cq = NULL;
1182 qp->srq = NULL;
1183 } else {
1184 qp->recv_cq = qp_init_attr->recv_cq;
1185 if (qp_init_attr->recv_cq)
1186 atomic_inc(&qp_init_attr->recv_cq->usecnt);
1187 qp->srq = qp_init_attr->srq;
1188 if (qp->srq)
1189 atomic_inc(&qp_init_attr->srq->usecnt);
1190 }
1191
1192 qp->send_cq = qp_init_attr->send_cq;
1193 qp->xrcd = NULL;
1194
1195 atomic_inc(&pd->usecnt);
1196 if (qp_init_attr->send_cq)
1197 atomic_inc(&qp_init_attr->send_cq->usecnt);
1198 if (qp_init_attr->rwq_ind_tbl)
1199 atomic_inc(&qp->rwq_ind_tbl->usecnt);
1200
1201 if (qp_init_attr->cap.max_rdma_ctxs) {
1202 ret = rdma_rw_init_mrs(qp, qp_init_attr);
1203 if (ret) {
1204 pr_err("failed to init MR pool ret= %d\n", ret);
1205 ib_destroy_qp(qp);
1206 return ERR_PTR(ret);
1207 }
1208 }
1209
1210
1211
1212
1213
1214
1215 qp->max_write_sge = qp_init_attr->cap.max_send_sge;
1216 qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge,
1217 device->attrs.max_sge_rd);
1218
1219 return qp;
1220}
1221EXPORT_SYMBOL(ib_create_qp);
1222
1223static const struct {
1224 int valid;
1225 enum ib_qp_attr_mask req_param[IB_QPT_MAX];
1226 enum ib_qp_attr_mask opt_param[IB_QPT_MAX];
1227} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
1228 [IB_QPS_RESET] = {
1229 [IB_QPS_RESET] = { .valid = 1 },
1230 [IB_QPS_INIT] = {
1231 .valid = 1,
1232 .req_param = {
1233 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1234 IB_QP_PORT |
1235 IB_QP_QKEY),
1236 [IB_QPT_RAW_PACKET] = IB_QP_PORT,
1237 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
1238 IB_QP_PORT |
1239 IB_QP_ACCESS_FLAGS),
1240 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
1241 IB_QP_PORT |
1242 IB_QP_ACCESS_FLAGS),
1243 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
1244 IB_QP_PORT |
1245 IB_QP_ACCESS_FLAGS),
1246 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
1247 IB_QP_PORT |
1248 IB_QP_ACCESS_FLAGS),
1249 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1250 IB_QP_QKEY),
1251 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1252 IB_QP_QKEY),
1253 }
1254 },
1255 },
1256 [IB_QPS_INIT] = {
1257 [IB_QPS_RESET] = { .valid = 1 },
1258 [IB_QPS_ERR] = { .valid = 1 },
1259 [IB_QPS_INIT] = {
1260 .valid = 1,
1261 .opt_param = {
1262 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1263 IB_QP_PORT |
1264 IB_QP_QKEY),
1265 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
1266 IB_QP_PORT |
1267 IB_QP_ACCESS_FLAGS),
1268 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
1269 IB_QP_PORT |
1270 IB_QP_ACCESS_FLAGS),
1271 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
1272 IB_QP_PORT |
1273 IB_QP_ACCESS_FLAGS),
1274 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
1275 IB_QP_PORT |
1276 IB_QP_ACCESS_FLAGS),
1277 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1278 IB_QP_QKEY),
1279 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1280 IB_QP_QKEY),
1281 }
1282 },
1283 [IB_QPS_RTR] = {
1284 .valid = 1,
1285 .req_param = {
1286 [IB_QPT_UC] = (IB_QP_AV |
1287 IB_QP_PATH_MTU |
1288 IB_QP_DEST_QPN |
1289 IB_QP_RQ_PSN),
1290 [IB_QPT_RC] = (IB_QP_AV |
1291 IB_QP_PATH_MTU |
1292 IB_QP_DEST_QPN |
1293 IB_QP_RQ_PSN |
1294 IB_QP_MAX_DEST_RD_ATOMIC |
1295 IB_QP_MIN_RNR_TIMER),
1296 [IB_QPT_XRC_INI] = (IB_QP_AV |
1297 IB_QP_PATH_MTU |
1298 IB_QP_DEST_QPN |
1299 IB_QP_RQ_PSN),
1300 [IB_QPT_XRC_TGT] = (IB_QP_AV |
1301 IB_QP_PATH_MTU |
1302 IB_QP_DEST_QPN |
1303 IB_QP_RQ_PSN |
1304 IB_QP_MAX_DEST_RD_ATOMIC |
1305 IB_QP_MIN_RNR_TIMER),
1306 },
1307 .opt_param = {
1308 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1309 IB_QP_QKEY),
1310 [IB_QPT_UC] = (IB_QP_ALT_PATH |
1311 IB_QP_ACCESS_FLAGS |
1312 IB_QP_PKEY_INDEX),
1313 [IB_QPT_RC] = (IB_QP_ALT_PATH |
1314 IB_QP_ACCESS_FLAGS |
1315 IB_QP_PKEY_INDEX),
1316 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH |
1317 IB_QP_ACCESS_FLAGS |
1318 IB_QP_PKEY_INDEX),
1319 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH |
1320 IB_QP_ACCESS_FLAGS |
1321 IB_QP_PKEY_INDEX),
1322 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1323 IB_QP_QKEY),
1324 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1325 IB_QP_QKEY),
1326 },
1327 },
1328 },
1329 [IB_QPS_RTR] = {
1330 [IB_QPS_RESET] = { .valid = 1 },
1331 [IB_QPS_ERR] = { .valid = 1 },
1332 [IB_QPS_RTS] = {
1333 .valid = 1,
1334 .req_param = {
1335 [IB_QPT_UD] = IB_QP_SQ_PSN,
1336 [IB_QPT_UC] = IB_QP_SQ_PSN,
1337 [IB_QPT_RC] = (IB_QP_TIMEOUT |
1338 IB_QP_RETRY_CNT |
1339 IB_QP_RNR_RETRY |
1340 IB_QP_SQ_PSN |
1341 IB_QP_MAX_QP_RD_ATOMIC),
1342 [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT |
1343 IB_QP_RETRY_CNT |
1344 IB_QP_RNR_RETRY |
1345 IB_QP_SQ_PSN |
1346 IB_QP_MAX_QP_RD_ATOMIC),
1347 [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT |
1348 IB_QP_SQ_PSN),
1349 [IB_QPT_SMI] = IB_QP_SQ_PSN,
1350 [IB_QPT_GSI] = IB_QP_SQ_PSN,
1351 },
1352 .opt_param = {
1353 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1354 IB_QP_QKEY),
1355 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1356 IB_QP_ALT_PATH |
1357 IB_QP_ACCESS_FLAGS |
1358 IB_QP_PATH_MIG_STATE),
1359 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1360 IB_QP_ALT_PATH |
1361 IB_QP_ACCESS_FLAGS |
1362 IB_QP_MIN_RNR_TIMER |
1363 IB_QP_PATH_MIG_STATE),
1364 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1365 IB_QP_ALT_PATH |
1366 IB_QP_ACCESS_FLAGS |
1367 IB_QP_PATH_MIG_STATE),
1368 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1369 IB_QP_ALT_PATH |
1370 IB_QP_ACCESS_FLAGS |
1371 IB_QP_MIN_RNR_TIMER |
1372 IB_QP_PATH_MIG_STATE),
1373 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1374 IB_QP_QKEY),
1375 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1376 IB_QP_QKEY),
1377 [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
1378 }
1379 }
1380 },
1381 [IB_QPS_RTS] = {
1382 [IB_QPS_RESET] = { .valid = 1 },
1383 [IB_QPS_ERR] = { .valid = 1 },
1384 [IB_QPS_RTS] = {
1385 .valid = 1,
1386 .opt_param = {
1387 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1388 IB_QP_QKEY),
1389 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1390 IB_QP_ACCESS_FLAGS |
1391 IB_QP_ALT_PATH |
1392 IB_QP_PATH_MIG_STATE),
1393 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1394 IB_QP_ACCESS_FLAGS |
1395 IB_QP_ALT_PATH |
1396 IB_QP_PATH_MIG_STATE |
1397 IB_QP_MIN_RNR_TIMER),
1398 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1399 IB_QP_ACCESS_FLAGS |
1400 IB_QP_ALT_PATH |
1401 IB_QP_PATH_MIG_STATE),
1402 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1403 IB_QP_ACCESS_FLAGS |
1404 IB_QP_ALT_PATH |
1405 IB_QP_PATH_MIG_STATE |
1406 IB_QP_MIN_RNR_TIMER),
1407 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1408 IB_QP_QKEY),
1409 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1410 IB_QP_QKEY),
1411 [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
1412 }
1413 },
1414 [IB_QPS_SQD] = {
1415 .valid = 1,
1416 .opt_param = {
1417 [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1418 [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1419 [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1420 [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1421 [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1422 [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1423 [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
1424 }
1425 },
1426 },
1427 [IB_QPS_SQD] = {
1428 [IB_QPS_RESET] = { .valid = 1 },
1429 [IB_QPS_ERR] = { .valid = 1 },
1430 [IB_QPS_RTS] = {
1431 .valid = 1,
1432 .opt_param = {
1433 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1434 IB_QP_QKEY),
1435 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1436 IB_QP_ALT_PATH |
1437 IB_QP_ACCESS_FLAGS |
1438 IB_QP_PATH_MIG_STATE),
1439 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1440 IB_QP_ALT_PATH |
1441 IB_QP_ACCESS_FLAGS |
1442 IB_QP_MIN_RNR_TIMER |
1443 IB_QP_PATH_MIG_STATE),
1444 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1445 IB_QP_ALT_PATH |
1446 IB_QP_ACCESS_FLAGS |
1447 IB_QP_PATH_MIG_STATE),
1448 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1449 IB_QP_ALT_PATH |
1450 IB_QP_ACCESS_FLAGS |
1451 IB_QP_MIN_RNR_TIMER |
1452 IB_QP_PATH_MIG_STATE),
1453 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1454 IB_QP_QKEY),
1455 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1456 IB_QP_QKEY),
1457 }
1458 },
1459 [IB_QPS_SQD] = {
1460 .valid = 1,
1461 .opt_param = {
1462 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1463 IB_QP_QKEY),
1464 [IB_QPT_UC] = (IB_QP_AV |
1465 IB_QP_ALT_PATH |
1466 IB_QP_ACCESS_FLAGS |
1467 IB_QP_PKEY_INDEX |
1468 IB_QP_PATH_MIG_STATE),
1469 [IB_QPT_RC] = (IB_QP_PORT |
1470 IB_QP_AV |
1471 IB_QP_TIMEOUT |
1472 IB_QP_RETRY_CNT |
1473 IB_QP_RNR_RETRY |
1474 IB_QP_MAX_QP_RD_ATOMIC |
1475 IB_QP_MAX_DEST_RD_ATOMIC |
1476 IB_QP_ALT_PATH |
1477 IB_QP_ACCESS_FLAGS |
1478 IB_QP_PKEY_INDEX |
1479 IB_QP_MIN_RNR_TIMER |
1480 IB_QP_PATH_MIG_STATE),
1481 [IB_QPT_XRC_INI] = (IB_QP_PORT |
1482 IB_QP_AV |
1483 IB_QP_TIMEOUT |
1484 IB_QP_RETRY_CNT |
1485 IB_QP_RNR_RETRY |
1486 IB_QP_MAX_QP_RD_ATOMIC |
1487 IB_QP_ALT_PATH |
1488 IB_QP_ACCESS_FLAGS |
1489 IB_QP_PKEY_INDEX |
1490 IB_QP_PATH_MIG_STATE),
1491 [IB_QPT_XRC_TGT] = (IB_QP_PORT |
1492 IB_QP_AV |
1493 IB_QP_TIMEOUT |
1494 IB_QP_MAX_DEST_RD_ATOMIC |
1495 IB_QP_ALT_PATH |
1496 IB_QP_ACCESS_FLAGS |
1497 IB_QP_PKEY_INDEX |
1498 IB_QP_MIN_RNR_TIMER |
1499 IB_QP_PATH_MIG_STATE),
1500 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1501 IB_QP_QKEY),
1502 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1503 IB_QP_QKEY),
1504 }
1505 }
1506 },
1507 [IB_QPS_SQE] = {
1508 [IB_QPS_RESET] = { .valid = 1 },
1509 [IB_QPS_ERR] = { .valid = 1 },
1510 [IB_QPS_RTS] = {
1511 .valid = 1,
1512 .opt_param = {
1513 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1514 IB_QP_QKEY),
1515 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1516 IB_QP_ACCESS_FLAGS),
1517 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1518 IB_QP_QKEY),
1519 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1520 IB_QP_QKEY),
1521 }
1522 }
1523 },
1524 [IB_QPS_ERR] = {
1525 [IB_QPS_RESET] = { .valid = 1 },
1526 [IB_QPS_ERR] = { .valid = 1 }
1527 }
1528};
1529
1530bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1531 enum ib_qp_type type, enum ib_qp_attr_mask mask)
1532{
1533 enum ib_qp_attr_mask req_param, opt_param;
1534
1535 if (mask & IB_QP_CUR_STATE &&
1536 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
1537 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
1538 return false;
1539
1540 if (!qp_state_table[cur_state][next_state].valid)
1541 return false;
1542
1543 req_param = qp_state_table[cur_state][next_state].req_param[type];
1544 opt_param = qp_state_table[cur_state][next_state].opt_param[type];
1545
1546 if ((mask & req_param) != req_param)
1547 return false;
1548
1549 if (mask & ~(req_param | opt_param | IB_QP_STATE))
1550 return false;
1551
1552 return true;
1553}
1554EXPORT_SYMBOL(ib_modify_qp_is_ok);
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565static int ib_resolve_eth_dmac(struct ib_device *device,
1566 struct rdma_ah_attr *ah_attr)
1567{
1568 int ret = 0;
1569
1570 if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) {
1571 if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) {
1572 __be32 addr = 0;
1573
1574 memcpy(&addr, ah_attr->grh.dgid.raw + 12, 4);
1575 ip_eth_mc_map(addr, (char *)ah_attr->roce.dmac);
1576 } else {
1577 ipv6_eth_mc_map((struct in6_addr *)ah_attr->grh.dgid.raw,
1578 (char *)ah_attr->roce.dmac);
1579 }
1580 } else {
1581 ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
1582 }
1583 return ret;
1584}
1585
1586static bool is_qp_type_connected(const struct ib_qp *qp)
1587{
1588 return (qp->qp_type == IB_QPT_UC ||
1589 qp->qp_type == IB_QPT_RC ||
1590 qp->qp_type == IB_QPT_XRC_INI ||
1591 qp->qp_type == IB_QPT_XRC_TGT);
1592}
1593
1594
1595
1596
1597static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
1598 int attr_mask, struct ib_udata *udata)
1599{
1600 u8 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1601 const struct ib_gid_attr *old_sgid_attr_av;
1602 const struct ib_gid_attr *old_sgid_attr_alt_av;
1603 int ret;
1604
1605 if (attr_mask & IB_QP_AV) {
1606 ret = rdma_fill_sgid_attr(qp->device, &attr->ah_attr,
1607 &old_sgid_attr_av);
1608 if (ret)
1609 return ret;
1610 }
1611 if (attr_mask & IB_QP_ALT_PATH) {
1612
1613
1614
1615
1616
1617
1618
1619 ret = rdma_fill_sgid_attr(qp->device, &attr->alt_ah_attr,
1620 &old_sgid_attr_alt_av);
1621 if (ret)
1622 goto out_av;
1623
1624
1625
1626
1627
1628 if (!(rdma_protocol_ib(qp->device,
1629 attr->alt_ah_attr.port_num) &&
1630 rdma_protocol_ib(qp->device, port))) {
1631 ret = EINVAL;
1632 goto out;
1633 }
1634 }
1635
1636
1637
1638
1639
1640 if (udata && (attr_mask & IB_QP_AV) &&
1641 attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE &&
1642 is_qp_type_connected(qp)) {
1643 ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr);
1644 if (ret)
1645 goto out;
1646 }
1647
1648 if (rdma_ib_or_roce(qp->device, port)) {
1649 if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) {
1650 dev_warn(&qp->device->dev,
1651 "%s rq_psn overflow, masking to 24 bits\n",
1652 __func__);
1653 attr->rq_psn &= 0xffffff;
1654 }
1655
1656 if (attr_mask & IB_QP_SQ_PSN && attr->sq_psn & ~0xffffff) {
1657 dev_warn(&qp->device->dev,
1658 " %s sq_psn overflow, masking to 24 bits\n",
1659 __func__);
1660 attr->sq_psn &= 0xffffff;
1661 }
1662 }
1663
1664 ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
1665 if (ret)
1666 goto out;
1667
1668 if (attr_mask & IB_QP_PORT)
1669 qp->port = attr->port_num;
1670 if (attr_mask & IB_QP_AV)
1671 qp->av_sgid_attr =
1672 rdma_update_sgid_attr(&attr->ah_attr, qp->av_sgid_attr);
1673 if (attr_mask & IB_QP_ALT_PATH)
1674 qp->alt_path_sgid_attr = rdma_update_sgid_attr(
1675 &attr->alt_ah_attr, qp->alt_path_sgid_attr);
1676
1677out:
1678 if (attr_mask & IB_QP_ALT_PATH)
1679 rdma_unfill_sgid_attr(&attr->alt_ah_attr, old_sgid_attr_alt_av);
1680out_av:
1681 if (attr_mask & IB_QP_AV)
1682 rdma_unfill_sgid_attr(&attr->ah_attr, old_sgid_attr_av);
1683 return ret;
1684}
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr,
1698 int attr_mask, struct ib_udata *udata)
1699{
1700 return _ib_modify_qp(ib_qp->real_qp, attr, attr_mask, udata);
1701}
1702EXPORT_SYMBOL(ib_modify_qp_with_udata);
1703
1704int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width)
1705{
1706 int rc;
1707 u32 netdev_speed;
1708 struct net_device *netdev;
1709 struct ethtool_link_ksettings lksettings;
1710
1711 if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET)
1712 return -EINVAL;
1713
1714 if (!dev->ops.get_netdev)
1715 return -EOPNOTSUPP;
1716
1717 netdev = dev->ops.get_netdev(dev, port_num);
1718 if (!netdev)
1719 return -ENODEV;
1720
1721 rtnl_lock();
1722 rc = __ethtool_get_link_ksettings(netdev, &lksettings);
1723 rtnl_unlock();
1724
1725 dev_put(netdev);
1726
1727 if (!rc) {
1728 netdev_speed = lksettings.base.speed;
1729 } else {
1730 netdev_speed = SPEED_1000;
1731 pr_warn("%s speed is unknown, defaulting to %d\n", netdev->name,
1732 netdev_speed);
1733 }
1734
1735 if (netdev_speed <= SPEED_1000) {
1736 *width = IB_WIDTH_1X;
1737 *speed = IB_SPEED_SDR;
1738 } else if (netdev_speed <= SPEED_10000) {
1739 *width = IB_WIDTH_1X;
1740 *speed = IB_SPEED_FDR10;
1741 } else if (netdev_speed <= SPEED_20000) {
1742 *width = IB_WIDTH_4X;
1743 *speed = IB_SPEED_DDR;
1744 } else if (netdev_speed <= SPEED_25000) {
1745 *width = IB_WIDTH_1X;
1746 *speed = IB_SPEED_EDR;
1747 } else if (netdev_speed <= SPEED_40000) {
1748 *width = IB_WIDTH_4X;
1749 *speed = IB_SPEED_FDR10;
1750 } else {
1751 *width = IB_WIDTH_4X;
1752 *speed = IB_SPEED_EDR;
1753 }
1754
1755 return 0;
1756}
1757EXPORT_SYMBOL(ib_get_eth_speed);
1758
1759int ib_modify_qp(struct ib_qp *qp,
1760 struct ib_qp_attr *qp_attr,
1761 int qp_attr_mask)
1762{
1763 return _ib_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
1764}
1765EXPORT_SYMBOL(ib_modify_qp);
1766
1767int ib_query_qp(struct ib_qp *qp,
1768 struct ib_qp_attr *qp_attr,
1769 int qp_attr_mask,
1770 struct ib_qp_init_attr *qp_init_attr)
1771{
1772 qp_attr->ah_attr.grh.sgid_attr = NULL;
1773 qp_attr->alt_ah_attr.grh.sgid_attr = NULL;
1774
1775 return qp->device->ops.query_qp ?
1776 qp->device->ops.query_qp(qp->real_qp, qp_attr, qp_attr_mask,
1777 qp_init_attr) : -EOPNOTSUPP;
1778}
1779EXPORT_SYMBOL(ib_query_qp);
1780
1781int ib_close_qp(struct ib_qp *qp)
1782{
1783 struct ib_qp *real_qp;
1784 unsigned long flags;
1785
1786 real_qp = qp->real_qp;
1787 if (real_qp == qp)
1788 return -EINVAL;
1789
1790 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
1791 list_del(&qp->open_list);
1792 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
1793
1794 atomic_dec(&real_qp->usecnt);
1795 if (qp->qp_sec)
1796 ib_close_shared_qp_security(qp->qp_sec);
1797 kfree(qp);
1798
1799 return 0;
1800}
1801EXPORT_SYMBOL(ib_close_qp);
1802
1803static int __ib_destroy_shared_qp(struct ib_qp *qp)
1804{
1805 struct ib_xrcd *xrcd;
1806 struct ib_qp *real_qp;
1807 int ret;
1808
1809 real_qp = qp->real_qp;
1810 xrcd = real_qp->xrcd;
1811
1812 mutex_lock(&xrcd->tgt_qp_mutex);
1813 ib_close_qp(qp);
1814 if (atomic_read(&real_qp->usecnt) == 0)
1815 list_del(&real_qp->xrcd_list);
1816 else
1817 real_qp = NULL;
1818 mutex_unlock(&xrcd->tgt_qp_mutex);
1819
1820 if (real_qp) {
1821 ret = ib_destroy_qp(real_qp);
1822 if (!ret)
1823 atomic_dec(&xrcd->usecnt);
1824 else
1825 __ib_insert_xrcd_qp(xrcd, real_qp);
1826 }
1827
1828 return 0;
1829}
1830
1831int ib_destroy_qp(struct ib_qp *qp)
1832{
1833 const struct ib_gid_attr *alt_path_sgid_attr = qp->alt_path_sgid_attr;
1834 const struct ib_gid_attr *av_sgid_attr = qp->av_sgid_attr;
1835 struct ib_pd *pd;
1836 struct ib_cq *scq, *rcq;
1837 struct ib_srq *srq;
1838 struct ib_rwq_ind_table *ind_tbl;
1839 struct ib_qp_security *sec;
1840 int ret;
1841
1842 WARN_ON_ONCE(qp->mrs_used > 0);
1843
1844 if (atomic_read(&qp->usecnt))
1845 return -EBUSY;
1846
1847 if (qp->real_qp != qp)
1848 return __ib_destroy_shared_qp(qp);
1849
1850 pd = qp->pd;
1851 scq = qp->send_cq;
1852 rcq = qp->recv_cq;
1853 srq = qp->srq;
1854 ind_tbl = qp->rwq_ind_tbl;
1855 sec = qp->qp_sec;
1856 if (sec)
1857 ib_destroy_qp_security_begin(sec);
1858
1859 if (!qp->uobject)
1860 rdma_rw_cleanup_mrs(qp);
1861
1862 rdma_restrack_del(&qp->res);
1863 ret = qp->device->ops.destroy_qp(qp);
1864 if (!ret) {
1865 if (alt_path_sgid_attr)
1866 rdma_put_gid_attr(alt_path_sgid_attr);
1867 if (av_sgid_attr)
1868 rdma_put_gid_attr(av_sgid_attr);
1869 if (pd)
1870 atomic_dec(&pd->usecnt);
1871 if (scq)
1872 atomic_dec(&scq->usecnt);
1873 if (rcq)
1874 atomic_dec(&rcq->usecnt);
1875 if (srq)
1876 atomic_dec(&srq->usecnt);
1877 if (ind_tbl)
1878 atomic_dec(&ind_tbl->usecnt);
1879 if (sec)
1880 ib_destroy_qp_security_end(sec);
1881 } else {
1882 if (sec)
1883 ib_destroy_qp_security_abort(sec);
1884 }
1885
1886 return ret;
1887}
1888EXPORT_SYMBOL(ib_destroy_qp);
1889
1890
1891
1892struct ib_cq *__ib_create_cq(struct ib_device *device,
1893 ib_comp_handler comp_handler,
1894 void (*event_handler)(struct ib_event *, void *),
1895 void *cq_context,
1896 const struct ib_cq_init_attr *cq_attr,
1897 const char *caller)
1898{
1899 struct ib_cq *cq;
1900
1901 cq = device->ops.create_cq(device, cq_attr, NULL, NULL);
1902
1903 if (!IS_ERR(cq)) {
1904 cq->device = device;
1905 cq->uobject = NULL;
1906 cq->comp_handler = comp_handler;
1907 cq->event_handler = event_handler;
1908 cq->cq_context = cq_context;
1909 atomic_set(&cq->usecnt, 0);
1910 cq->res.type = RDMA_RESTRACK_CQ;
1911 rdma_restrack_set_task(&cq->res, caller);
1912 rdma_restrack_kadd(&cq->res);
1913 }
1914
1915 return cq;
1916}
1917EXPORT_SYMBOL(__ib_create_cq);
1918
1919int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
1920{
1921 return cq->device->ops.modify_cq ?
1922 cq->device->ops.modify_cq(cq, cq_count,
1923 cq_period) : -EOPNOTSUPP;
1924}
1925EXPORT_SYMBOL(rdma_set_cq_moderation);
1926
1927int ib_destroy_cq(struct ib_cq *cq)
1928{
1929 if (atomic_read(&cq->usecnt))
1930 return -EBUSY;
1931
1932 rdma_restrack_del(&cq->res);
1933 return cq->device->ops.destroy_cq(cq);
1934}
1935EXPORT_SYMBOL(ib_destroy_cq);
1936
1937int ib_resize_cq(struct ib_cq *cq, int cqe)
1938{
1939 return cq->device->ops.resize_cq ?
1940 cq->device->ops.resize_cq(cq, cqe, NULL) : -EOPNOTSUPP;
1941}
1942EXPORT_SYMBOL(ib_resize_cq);
1943
1944
1945
1946int ib_dereg_mr(struct ib_mr *mr)
1947{
1948 struct ib_pd *pd = mr->pd;
1949 struct ib_dm *dm = mr->dm;
1950 int ret;
1951
1952 rdma_restrack_del(&mr->res);
1953 ret = mr->device->ops.dereg_mr(mr);
1954 if (!ret) {
1955 atomic_dec(&pd->usecnt);
1956 if (dm)
1957 atomic_dec(&dm->usecnt);
1958 }
1959
1960 return ret;
1961}
1962EXPORT_SYMBOL(ib_dereg_mr);
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
1977 enum ib_mr_type mr_type,
1978 u32 max_num_sg)
1979{
1980 struct ib_mr *mr;
1981
1982 if (!pd->device->ops.alloc_mr)
1983 return ERR_PTR(-EOPNOTSUPP);
1984
1985 mr = pd->device->ops.alloc_mr(pd, mr_type, max_num_sg);
1986 if (!IS_ERR(mr)) {
1987 mr->device = pd->device;
1988 mr->pd = pd;
1989 mr->dm = NULL;
1990 mr->uobject = NULL;
1991 atomic_inc(&pd->usecnt);
1992 mr->need_inval = false;
1993 mr->res.type = RDMA_RESTRACK_MR;
1994 rdma_restrack_kadd(&mr->res);
1995 }
1996
1997 return mr;
1998}
1999EXPORT_SYMBOL(ib_alloc_mr);
2000
2001
2002
2003struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
2004 int mr_access_flags,
2005 struct ib_fmr_attr *fmr_attr)
2006{
2007 struct ib_fmr *fmr;
2008
2009 if (!pd->device->ops.alloc_fmr)
2010 return ERR_PTR(-EOPNOTSUPP);
2011
2012 fmr = pd->device->ops.alloc_fmr(pd, mr_access_flags, fmr_attr);
2013 if (!IS_ERR(fmr)) {
2014 fmr->device = pd->device;
2015 fmr->pd = pd;
2016 atomic_inc(&pd->usecnt);
2017 }
2018
2019 return fmr;
2020}
2021EXPORT_SYMBOL(ib_alloc_fmr);
2022
2023int ib_unmap_fmr(struct list_head *fmr_list)
2024{
2025 struct ib_fmr *fmr;
2026
2027 if (list_empty(fmr_list))
2028 return 0;
2029
2030 fmr = list_entry(fmr_list->next, struct ib_fmr, list);
2031 return fmr->device->ops.unmap_fmr(fmr_list);
2032}
2033EXPORT_SYMBOL(ib_unmap_fmr);
2034
2035int ib_dealloc_fmr(struct ib_fmr *fmr)
2036{
2037 struct ib_pd *pd;
2038 int ret;
2039
2040 pd = fmr->pd;
2041 ret = fmr->device->ops.dealloc_fmr(fmr);
2042 if (!ret)
2043 atomic_dec(&pd->usecnt);
2044
2045 return ret;
2046}
2047EXPORT_SYMBOL(ib_dealloc_fmr);
2048
2049
2050
2051static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
2052{
2053 struct ib_qp_init_attr init_attr = {};
2054 struct ib_qp_attr attr = {};
2055 int num_eth_ports = 0;
2056 int port;
2057
2058
2059
2060
2061 if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) {
2062 if (attr.qp_state >= IB_QPS_INIT) {
2063 if (rdma_port_get_link_layer(qp->device, attr.port_num) !=
2064 IB_LINK_LAYER_INFINIBAND)
2065 return true;
2066 goto lid_check;
2067 }
2068 }
2069
2070
2071 for (port = 0; port < qp->device->phys_port_cnt; port++)
2072 if (rdma_port_get_link_layer(qp->device, port) !=
2073 IB_LINK_LAYER_INFINIBAND)
2074 num_eth_ports++;
2075
2076
2077
2078
2079
2080 if (num_eth_ports)
2081 return true;
2082
2083
2084lid_check:
2085 return !(lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
2086 lid == be16_to_cpu(IB_LID_PERMISSIVE));
2087}
2088
2089int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
2090{
2091 int ret;
2092
2093 if (!qp->device->ops.attach_mcast)
2094 return -EOPNOTSUPP;
2095
2096 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
2097 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
2098 return -EINVAL;
2099
2100 ret = qp->device->ops.attach_mcast(qp, gid, lid);
2101 if (!ret)
2102 atomic_inc(&qp->usecnt);
2103 return ret;
2104}
2105EXPORT_SYMBOL(ib_attach_mcast);
2106
2107int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
2108{
2109 int ret;
2110
2111 if (!qp->device->ops.detach_mcast)
2112 return -EOPNOTSUPP;
2113
2114 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
2115 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
2116 return -EINVAL;
2117
2118 ret = qp->device->ops.detach_mcast(qp, gid, lid);
2119 if (!ret)
2120 atomic_dec(&qp->usecnt);
2121 return ret;
2122}
2123EXPORT_SYMBOL(ib_detach_mcast);
2124
2125struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller)
2126{
2127 struct ib_xrcd *xrcd;
2128
2129 if (!device->ops.alloc_xrcd)
2130 return ERR_PTR(-EOPNOTSUPP);
2131
2132 xrcd = device->ops.alloc_xrcd(device, NULL, NULL);
2133 if (!IS_ERR(xrcd)) {
2134 xrcd->device = device;
2135 xrcd->inode = NULL;
2136 atomic_set(&xrcd->usecnt, 0);
2137 mutex_init(&xrcd->tgt_qp_mutex);
2138 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
2139 }
2140
2141 return xrcd;
2142}
2143EXPORT_SYMBOL(__ib_alloc_xrcd);
2144
2145int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
2146{
2147 struct ib_qp *qp;
2148 int ret;
2149
2150 if (atomic_read(&xrcd->usecnt))
2151 return -EBUSY;
2152
2153 while (!list_empty(&xrcd->tgt_qp_list)) {
2154 qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list);
2155 ret = ib_destroy_qp(qp);
2156 if (ret)
2157 return ret;
2158 }
2159
2160 return xrcd->device->ops.dealloc_xrcd(xrcd);
2161}
2162EXPORT_SYMBOL(ib_dealloc_xrcd);
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178struct ib_wq *ib_create_wq(struct ib_pd *pd,
2179 struct ib_wq_init_attr *wq_attr)
2180{
2181 struct ib_wq *wq;
2182
2183 if (!pd->device->ops.create_wq)
2184 return ERR_PTR(-EOPNOTSUPP);
2185
2186 wq = pd->device->ops.create_wq(pd, wq_attr, NULL);
2187 if (!IS_ERR(wq)) {
2188 wq->event_handler = wq_attr->event_handler;
2189 wq->wq_context = wq_attr->wq_context;
2190 wq->wq_type = wq_attr->wq_type;
2191 wq->cq = wq_attr->cq;
2192 wq->device = pd->device;
2193 wq->pd = pd;
2194 wq->uobject = NULL;
2195 atomic_inc(&pd->usecnt);
2196 atomic_inc(&wq_attr->cq->usecnt);
2197 atomic_set(&wq->usecnt, 0);
2198 }
2199 return wq;
2200}
2201EXPORT_SYMBOL(ib_create_wq);
2202
2203
2204
2205
2206
2207int ib_destroy_wq(struct ib_wq *wq)
2208{
2209 int err;
2210 struct ib_cq *cq = wq->cq;
2211 struct ib_pd *pd = wq->pd;
2212
2213 if (atomic_read(&wq->usecnt))
2214 return -EBUSY;
2215
2216 err = wq->device->ops.destroy_wq(wq);
2217 if (!err) {
2218 atomic_dec(&pd->usecnt);
2219 atomic_dec(&cq->usecnt);
2220 }
2221 return err;
2222}
2223EXPORT_SYMBOL(ib_destroy_wq);
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
2234 u32 wq_attr_mask)
2235{
2236 int err;
2237
2238 if (!wq->device->ops.modify_wq)
2239 return -EOPNOTSUPP;
2240
2241 err = wq->device->ops.modify_wq(wq, wq_attr, wq_attr_mask, NULL);
2242 return err;
2243}
2244EXPORT_SYMBOL(ib_modify_wq);
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
2257 struct ib_rwq_ind_table_init_attr *init_attr)
2258{
2259 struct ib_rwq_ind_table *rwq_ind_table;
2260 int i;
2261 u32 table_size;
2262
2263 if (!device->ops.create_rwq_ind_table)
2264 return ERR_PTR(-EOPNOTSUPP);
2265
2266 table_size = (1 << init_attr->log_ind_tbl_size);
2267 rwq_ind_table = device->ops.create_rwq_ind_table(device,
2268 init_attr, NULL);
2269 if (IS_ERR(rwq_ind_table))
2270 return rwq_ind_table;
2271
2272 rwq_ind_table->ind_tbl = init_attr->ind_tbl;
2273 rwq_ind_table->log_ind_tbl_size = init_attr->log_ind_tbl_size;
2274 rwq_ind_table->device = device;
2275 rwq_ind_table->uobject = NULL;
2276 atomic_set(&rwq_ind_table->usecnt, 0);
2277
2278 for (i = 0; i < table_size; i++)
2279 atomic_inc(&rwq_ind_table->ind_tbl[i]->usecnt);
2280
2281 return rwq_ind_table;
2282}
2283EXPORT_SYMBOL(ib_create_rwq_ind_table);
2284
2285
2286
2287
2288
2289int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table)
2290{
2291 int err, i;
2292 u32 table_size = (1 << rwq_ind_table->log_ind_tbl_size);
2293 struct ib_wq **ind_tbl = rwq_ind_table->ind_tbl;
2294
2295 if (atomic_read(&rwq_ind_table->usecnt))
2296 return -EBUSY;
2297
2298 err = rwq_ind_table->device->ops.destroy_rwq_ind_table(rwq_ind_table);
2299 if (!err) {
2300 for (i = 0; i < table_size; i++)
2301 atomic_dec(&ind_tbl[i]->usecnt);
2302 }
2303
2304 return err;
2305}
2306EXPORT_SYMBOL(ib_destroy_rwq_ind_table);
2307
2308int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
2309 struct ib_mr_status *mr_status)
2310{
2311 if (!mr->device->ops.check_mr_status)
2312 return -EOPNOTSUPP;
2313
2314 return mr->device->ops.check_mr_status(mr, check_mask, mr_status);
2315}
2316EXPORT_SYMBOL(ib_check_mr_status);
2317
2318int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
2319 int state)
2320{
2321 if (!device->ops.set_vf_link_state)
2322 return -EOPNOTSUPP;
2323
2324 return device->ops.set_vf_link_state(device, vf, port, state);
2325}
2326EXPORT_SYMBOL(ib_set_vf_link_state);
2327
2328int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
2329 struct ifla_vf_info *info)
2330{
2331 if (!device->ops.get_vf_config)
2332 return -EOPNOTSUPP;
2333
2334 return device->ops.get_vf_config(device, vf, port, info);
2335}
2336EXPORT_SYMBOL(ib_get_vf_config);
2337
2338int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
2339 struct ifla_vf_stats *stats)
2340{
2341 if (!device->ops.get_vf_stats)
2342 return -EOPNOTSUPP;
2343
2344 return device->ops.get_vf_stats(device, vf, port, stats);
2345}
2346EXPORT_SYMBOL(ib_get_vf_stats);
2347
2348int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
2349 int type)
2350{
2351 if (!device->ops.set_vf_guid)
2352 return -EOPNOTSUPP;
2353
2354 return device->ops.set_vf_guid(device, vf, port, guid, type);
2355}
2356EXPORT_SYMBOL(ib_set_vf_guid);
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2384 unsigned int *sg_offset, unsigned int page_size)
2385{
2386 if (unlikely(!mr->device->ops.map_mr_sg))
2387 return -EOPNOTSUPP;
2388
2389 mr->page_size = page_size;
2390
2391 return mr->device->ops.map_mr_sg(mr, sg, sg_nents, sg_offset);
2392}
2393EXPORT_SYMBOL(ib_map_mr_sg);
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
2416 unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64))
2417{
2418 struct scatterlist *sg;
2419 u64 last_end_dma_addr = 0;
2420 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
2421 unsigned int last_page_off = 0;
2422 u64 page_mask = ~((u64)mr->page_size - 1);
2423 int i, ret;
2424
2425 if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0])))
2426 return -EINVAL;
2427
2428 mr->iova = sg_dma_address(&sgl[0]) + sg_offset;
2429 mr->length = 0;
2430
2431 for_each_sg(sgl, sg, sg_nents, i) {
2432 u64 dma_addr = sg_dma_address(sg) + sg_offset;
2433 u64 prev_addr = dma_addr;
2434 unsigned int dma_len = sg_dma_len(sg) - sg_offset;
2435 u64 end_dma_addr = dma_addr + dma_len;
2436 u64 page_addr = dma_addr & page_mask;
2437
2438
2439
2440
2441
2442
2443 if (i && (last_page_off != 0 || page_addr != dma_addr)) {
2444
2445 if (last_end_dma_addr != dma_addr)
2446 break;
2447
2448
2449
2450
2451
2452
2453 goto next_page;
2454 }
2455
2456 do {
2457 ret = set_page(mr, page_addr);
2458 if (unlikely(ret < 0)) {
2459 sg_offset = prev_addr - sg_dma_address(sg);
2460 mr->length += prev_addr - dma_addr;
2461 if (sg_offset_p)
2462 *sg_offset_p = sg_offset;
2463 return i || sg_offset ? i : ret;
2464 }
2465 prev_addr = page_addr;
2466next_page:
2467 page_addr += mr->page_size;
2468 } while (page_addr < end_dma_addr);
2469
2470 mr->length += dma_len;
2471 last_end_dma_addr = end_dma_addr;
2472 last_page_off = end_dma_addr & ~page_mask;
2473
2474 sg_offset = 0;
2475 }
2476
2477 if (sg_offset_p)
2478 *sg_offset_p = 0;
2479 return i;
2480}
2481EXPORT_SYMBOL(ib_sg_to_pages);
2482
2483struct ib_drain_cqe {
2484 struct ib_cqe cqe;
2485 struct completion done;
2486};
2487
2488static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
2489{
2490 struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
2491 cqe);
2492
2493 complete(&cqe->done);
2494}
2495
2496
2497
2498
2499static void __ib_drain_sq(struct ib_qp *qp)
2500{
2501 struct ib_cq *cq = qp->send_cq;
2502 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2503 struct ib_drain_cqe sdrain;
2504 struct ib_rdma_wr swr = {
2505 .wr = {
2506 .next = NULL,
2507 { .wr_cqe = &sdrain.cqe, },
2508 .opcode = IB_WR_RDMA_WRITE,
2509 },
2510 };
2511 int ret;
2512
2513 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2514 if (ret) {
2515 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2516 return;
2517 }
2518
2519 sdrain.cqe.done = ib_drain_qp_done;
2520 init_completion(&sdrain.done);
2521
2522 ret = ib_post_send(qp, &swr.wr, NULL);
2523 if (ret) {
2524 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2525 return;
2526 }
2527
2528 if (cq->poll_ctx == IB_POLL_DIRECT)
2529 while (wait_for_completion_timeout(&sdrain.done, HZ / 10) <= 0)
2530 ib_process_cq_direct(cq, -1);
2531 else
2532 wait_for_completion(&sdrain.done);
2533}
2534
2535
2536
2537
2538static void __ib_drain_rq(struct ib_qp *qp)
2539{
2540 struct ib_cq *cq = qp->recv_cq;
2541 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2542 struct ib_drain_cqe rdrain;
2543 struct ib_recv_wr rwr = {};
2544 int ret;
2545
2546 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2547 if (ret) {
2548 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2549 return;
2550 }
2551
2552 rwr.wr_cqe = &rdrain.cqe;
2553 rdrain.cqe.done = ib_drain_qp_done;
2554 init_completion(&rdrain.done);
2555
2556 ret = ib_post_recv(qp, &rwr, NULL);
2557 if (ret) {
2558 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2559 return;
2560 }
2561
2562 if (cq->poll_ctx == IB_POLL_DIRECT)
2563 while (wait_for_completion_timeout(&rdrain.done, HZ / 10) <= 0)
2564 ib_process_cq_direct(cq, -1);
2565 else
2566 wait_for_completion(&rdrain.done);
2567}
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588void ib_drain_sq(struct ib_qp *qp)
2589{
2590 if (qp->device->ops.drain_sq)
2591 qp->device->ops.drain_sq(qp);
2592 else
2593 __ib_drain_sq(qp);
2594}
2595EXPORT_SYMBOL(ib_drain_sq);
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616void ib_drain_rq(struct ib_qp *qp)
2617{
2618 if (qp->device->ops.drain_rq)
2619 qp->device->ops.drain_rq(qp);
2620 else
2621 __ib_drain_rq(qp);
2622}
2623EXPORT_SYMBOL(ib_drain_rq);
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640void ib_drain_qp(struct ib_qp *qp)
2641{
2642 ib_drain_sq(qp);
2643 if (!qp->srq)
2644 ib_drain_rq(qp);
2645}
2646EXPORT_SYMBOL(ib_drain_qp);
2647
2648struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
2649 enum rdma_netdev_t type, const char *name,
2650 unsigned char name_assign_type,
2651 void (*setup)(struct net_device *))
2652{
2653 struct rdma_netdev_alloc_params params;
2654 struct net_device *netdev;
2655 int rc;
2656
2657 if (!device->ops.rdma_netdev_get_params)
2658 return ERR_PTR(-EOPNOTSUPP);
2659
2660 rc = device->ops.rdma_netdev_get_params(device, port_num, type,
2661 ¶ms);
2662 if (rc)
2663 return ERR_PTR(rc);
2664
2665 netdev = alloc_netdev_mqs(params.sizeof_priv, name, name_assign_type,
2666 setup, params.txqs, params.rxqs);
2667 if (!netdev)
2668 return ERR_PTR(-ENOMEM);
2669
2670 return netdev;
2671}
2672EXPORT_SYMBOL(rdma_alloc_netdev);
2673
2674int rdma_init_netdev(struct ib_device *device, u8 port_num,
2675 enum rdma_netdev_t type, const char *name,
2676 unsigned char name_assign_type,
2677 void (*setup)(struct net_device *),
2678 struct net_device *netdev)
2679{
2680 struct rdma_netdev_alloc_params params;
2681 int rc;
2682
2683 if (!device->ops.rdma_netdev_get_params)
2684 return -EOPNOTSUPP;
2685
2686 rc = device->ops.rdma_netdev_get_params(device, port_num, type,
2687 ¶ms);
2688 if (rc)
2689 return rc;
2690
2691 return params.initialize_rdma_netdev(device, port_num,
2692 netdev, params.param);
2693}
2694EXPORT_SYMBOL(rdma_init_netdev);
2695