1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39#include <linux/errno.h>
40#include <linux/err.h>
41#include <linux/export.h>
42#include <linux/string.h>
43#include <linux/slab.h>
44#include <linux/in.h>
45#include <linux/in6.h>
46#include <net/addrconf.h>
47#include <linux/security.h>
48
49#include <rdma/ib_verbs.h>
50#include <rdma/ib_cache.h>
51#include <rdma/ib_addr.h>
52#include <rdma/rw.h>
53#include <rdma/lag.h>
54
55#include "core_priv.h"
56#include <trace/events/rdma_core.h>
57
58static int ib_resolve_eth_dmac(struct ib_device *device,
59 struct rdma_ah_attr *ah_attr);
60
61static const char * const ib_events[] = {
62 [IB_EVENT_CQ_ERR] = "CQ error",
63 [IB_EVENT_QP_FATAL] = "QP fatal error",
64 [IB_EVENT_QP_REQ_ERR] = "QP request error",
65 [IB_EVENT_QP_ACCESS_ERR] = "QP access error",
66 [IB_EVENT_COMM_EST] = "communication established",
67 [IB_EVENT_SQ_DRAINED] = "send queue drained",
68 [IB_EVENT_PATH_MIG] = "path migration successful",
69 [IB_EVENT_PATH_MIG_ERR] = "path migration error",
70 [IB_EVENT_DEVICE_FATAL] = "device fatal error",
71 [IB_EVENT_PORT_ACTIVE] = "port active",
72 [IB_EVENT_PORT_ERR] = "port error",
73 [IB_EVENT_LID_CHANGE] = "LID change",
74 [IB_EVENT_PKEY_CHANGE] = "P_key change",
75 [IB_EVENT_SM_CHANGE] = "SM change",
76 [IB_EVENT_SRQ_ERR] = "SRQ error",
77 [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached",
78 [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached",
79 [IB_EVENT_CLIENT_REREGISTER] = "client reregister",
80 [IB_EVENT_GID_CHANGE] = "GID changed",
81};
82
83const char *__attribute_const__ ib_event_msg(enum ib_event_type event)
84{
85 size_t index = event;
86
87 return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ?
88 ib_events[index] : "unrecognized event";
89}
90EXPORT_SYMBOL(ib_event_msg);
91
92static const char * const wc_statuses[] = {
93 [IB_WC_SUCCESS] = "success",
94 [IB_WC_LOC_LEN_ERR] = "local length error",
95 [IB_WC_LOC_QP_OP_ERR] = "local QP operation error",
96 [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error",
97 [IB_WC_LOC_PROT_ERR] = "local protection error",
98 [IB_WC_WR_FLUSH_ERR] = "WR flushed",
99 [IB_WC_MW_BIND_ERR] = "memory bind operation error",
100 [IB_WC_BAD_RESP_ERR] = "bad response error",
101 [IB_WC_LOC_ACCESS_ERR] = "local access error",
102 [IB_WC_REM_INV_REQ_ERR] = "remote invalid request error",
103 [IB_WC_REM_ACCESS_ERR] = "remote access error",
104 [IB_WC_REM_OP_ERR] = "remote operation error",
105 [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded",
106 [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded",
107 [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error",
108 [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request",
109 [IB_WC_REM_ABORT_ERR] = "operation aborted",
110 [IB_WC_INV_EECN_ERR] = "invalid EE context number",
111 [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state",
112 [IB_WC_FATAL_ERR] = "fatal error",
113 [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error",
114 [IB_WC_GENERAL_ERR] = "general error",
115};
116
117const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status)
118{
119 size_t index = status;
120
121 return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ?
122 wc_statuses[index] : "unrecognized status";
123}
124EXPORT_SYMBOL(ib_wc_status_msg);
125
126__attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
127{
128 switch (rate) {
129 case IB_RATE_2_5_GBPS: return 1;
130 case IB_RATE_5_GBPS: return 2;
131 case IB_RATE_10_GBPS: return 4;
132 case IB_RATE_20_GBPS: return 8;
133 case IB_RATE_30_GBPS: return 12;
134 case IB_RATE_40_GBPS: return 16;
135 case IB_RATE_60_GBPS: return 24;
136 case IB_RATE_80_GBPS: return 32;
137 case IB_RATE_120_GBPS: return 48;
138 case IB_RATE_14_GBPS: return 6;
139 case IB_RATE_56_GBPS: return 22;
140 case IB_RATE_112_GBPS: return 45;
141 case IB_RATE_168_GBPS: return 67;
142 case IB_RATE_25_GBPS: return 10;
143 case IB_RATE_100_GBPS: return 40;
144 case IB_RATE_200_GBPS: return 80;
145 case IB_RATE_300_GBPS: return 120;
146 case IB_RATE_28_GBPS: return 11;
147 case IB_RATE_50_GBPS: return 20;
148 case IB_RATE_400_GBPS: return 160;
149 case IB_RATE_600_GBPS: return 240;
150 default: return -1;
151 }
152}
153EXPORT_SYMBOL(ib_rate_to_mult);
154
155__attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
156{
157 switch (mult) {
158 case 1: return IB_RATE_2_5_GBPS;
159 case 2: return IB_RATE_5_GBPS;
160 case 4: return IB_RATE_10_GBPS;
161 case 8: return IB_RATE_20_GBPS;
162 case 12: return IB_RATE_30_GBPS;
163 case 16: return IB_RATE_40_GBPS;
164 case 24: return IB_RATE_60_GBPS;
165 case 32: return IB_RATE_80_GBPS;
166 case 48: return IB_RATE_120_GBPS;
167 case 6: return IB_RATE_14_GBPS;
168 case 22: return IB_RATE_56_GBPS;
169 case 45: return IB_RATE_112_GBPS;
170 case 67: return IB_RATE_168_GBPS;
171 case 10: return IB_RATE_25_GBPS;
172 case 40: return IB_RATE_100_GBPS;
173 case 80: return IB_RATE_200_GBPS;
174 case 120: return IB_RATE_300_GBPS;
175 case 11: return IB_RATE_28_GBPS;
176 case 20: return IB_RATE_50_GBPS;
177 case 160: return IB_RATE_400_GBPS;
178 case 240: return IB_RATE_600_GBPS;
179 default: return IB_RATE_PORT_CURRENT;
180 }
181}
182EXPORT_SYMBOL(mult_to_ib_rate);
183
184__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
185{
186 switch (rate) {
187 case IB_RATE_2_5_GBPS: return 2500;
188 case IB_RATE_5_GBPS: return 5000;
189 case IB_RATE_10_GBPS: return 10000;
190 case IB_RATE_20_GBPS: return 20000;
191 case IB_RATE_30_GBPS: return 30000;
192 case IB_RATE_40_GBPS: return 40000;
193 case IB_RATE_60_GBPS: return 60000;
194 case IB_RATE_80_GBPS: return 80000;
195 case IB_RATE_120_GBPS: return 120000;
196 case IB_RATE_14_GBPS: return 14062;
197 case IB_RATE_56_GBPS: return 56250;
198 case IB_RATE_112_GBPS: return 112500;
199 case IB_RATE_168_GBPS: return 168750;
200 case IB_RATE_25_GBPS: return 25781;
201 case IB_RATE_100_GBPS: return 103125;
202 case IB_RATE_200_GBPS: return 206250;
203 case IB_RATE_300_GBPS: return 309375;
204 case IB_RATE_28_GBPS: return 28125;
205 case IB_RATE_50_GBPS: return 53125;
206 case IB_RATE_400_GBPS: return 425000;
207 case IB_RATE_600_GBPS: return 637500;
208 default: return -1;
209 }
210}
211EXPORT_SYMBOL(ib_rate_to_mbps);
212
213__attribute_const__ enum rdma_transport_type
214rdma_node_get_transport(unsigned int node_type)
215{
216
217 if (node_type == RDMA_NODE_USNIC)
218 return RDMA_TRANSPORT_USNIC;
219 if (node_type == RDMA_NODE_USNIC_UDP)
220 return RDMA_TRANSPORT_USNIC_UDP;
221 if (node_type == RDMA_NODE_RNIC)
222 return RDMA_TRANSPORT_IWARP;
223 if (node_type == RDMA_NODE_UNSPECIFIED)
224 return RDMA_TRANSPORT_UNSPECIFIED;
225
226 return RDMA_TRANSPORT_IB;
227}
228EXPORT_SYMBOL(rdma_node_get_transport);
229
230enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
231 u32 port_num)
232{
233 enum rdma_transport_type lt;
234 if (device->ops.get_link_layer)
235 return device->ops.get_link_layer(device, port_num);
236
237 lt = rdma_node_get_transport(device->node_type);
238 if (lt == RDMA_TRANSPORT_IB)
239 return IB_LINK_LAYER_INFINIBAND;
240
241 return IB_LINK_LAYER_ETHERNET;
242}
243EXPORT_SYMBOL(rdma_port_get_link_layer);
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
260 const char *caller)
261{
262 struct ib_pd *pd;
263 int mr_access_flags = 0;
264 int ret;
265
266 pd = rdma_zalloc_drv_obj(device, ib_pd);
267 if (!pd)
268 return ERR_PTR(-ENOMEM);
269
270 pd->device = device;
271 pd->uobject = NULL;
272 pd->__internal_mr = NULL;
273 atomic_set(&pd->usecnt, 0);
274 pd->flags = flags;
275
276 rdma_restrack_new(&pd->res, RDMA_RESTRACK_PD);
277 rdma_restrack_set_name(&pd->res, caller);
278
279 ret = device->ops.alloc_pd(pd, NULL);
280 if (ret) {
281 rdma_restrack_put(&pd->res);
282 kfree(pd);
283 return ERR_PTR(ret);
284 }
285 rdma_restrack_add(&pd->res);
286
287 if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
288 pd->local_dma_lkey = device->local_dma_lkey;
289 else
290 mr_access_flags |= IB_ACCESS_LOCAL_WRITE;
291
292 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
293 pr_warn("%s: enabling unsafe global rkey\n", caller);
294 mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE;
295 }
296
297 if (mr_access_flags) {
298 struct ib_mr *mr;
299
300 mr = pd->device->ops.get_dma_mr(pd, mr_access_flags);
301 if (IS_ERR(mr)) {
302 ib_dealloc_pd(pd);
303 return ERR_CAST(mr);
304 }
305
306 mr->device = pd->device;
307 mr->pd = pd;
308 mr->type = IB_MR_TYPE_DMA;
309 mr->uobject = NULL;
310 mr->need_inval = false;
311
312 pd->__internal_mr = mr;
313
314 if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY))
315 pd->local_dma_lkey = pd->__internal_mr->lkey;
316
317 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY)
318 pd->unsafe_global_rkey = pd->__internal_mr->rkey;
319 }
320
321 return pd;
322}
323EXPORT_SYMBOL(__ib_alloc_pd);
324
325
326
327
328
329
330
331
332
333
334int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata)
335{
336 int ret;
337
338 if (pd->__internal_mr) {
339 ret = pd->device->ops.dereg_mr(pd->__internal_mr, NULL);
340 WARN_ON(ret);
341 pd->__internal_mr = NULL;
342 }
343
344
345
346
347 WARN_ON(atomic_read(&pd->usecnt));
348
349 ret = pd->device->ops.dealloc_pd(pd, udata);
350 if (ret)
351 return ret;
352
353 rdma_restrack_del(&pd->res);
354 kfree(pd);
355 return ret;
356}
357EXPORT_SYMBOL(ib_dealloc_pd_user);
358
359
360
361
362
363
364
365
366
367void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
368 const struct rdma_ah_attr *src)
369{
370 *dest = *src;
371 if (dest->grh.sgid_attr)
372 rdma_hold_gid_attr(dest->grh.sgid_attr);
373}
374EXPORT_SYMBOL(rdma_copy_ah_attr);
375
376
377
378
379
380
381
382
383
384
385
386void rdma_replace_ah_attr(struct rdma_ah_attr *old,
387 const struct rdma_ah_attr *new)
388{
389 rdma_destroy_ah_attr(old);
390 *old = *new;
391 if (old->grh.sgid_attr)
392 rdma_hold_gid_attr(old->grh.sgid_attr);
393}
394EXPORT_SYMBOL(rdma_replace_ah_attr);
395
396
397
398
399
400
401
402
403
404
405
406
407void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src)
408{
409 rdma_destroy_ah_attr(dest);
410 *dest = *src;
411 src->grh.sgid_attr = NULL;
412}
413EXPORT_SYMBOL(rdma_move_ah_attr);
414
415
416
417
418
419static int rdma_check_ah_attr(struct ib_device *device,
420 struct rdma_ah_attr *ah_attr)
421{
422 if (!rdma_is_port_valid(device, ah_attr->port_num))
423 return -EINVAL;
424
425 if ((rdma_is_grh_required(device, ah_attr->port_num) ||
426 ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) &&
427 !(ah_attr->ah_flags & IB_AH_GRH))
428 return -EINVAL;
429
430 if (ah_attr->grh.sgid_attr) {
431
432
433
434
435 if (ah_attr->grh.sgid_attr->index != ah_attr->grh.sgid_index ||
436 ah_attr->grh.sgid_attr->port_num != ah_attr->port_num)
437 return -EINVAL;
438 }
439 return 0;
440}
441
442
443
444
445
446static int rdma_fill_sgid_attr(struct ib_device *device,
447 struct rdma_ah_attr *ah_attr,
448 const struct ib_gid_attr **old_sgid_attr)
449{
450 const struct ib_gid_attr *sgid_attr;
451 struct ib_global_route *grh;
452 int ret;
453
454 *old_sgid_attr = ah_attr->grh.sgid_attr;
455
456 ret = rdma_check_ah_attr(device, ah_attr);
457 if (ret)
458 return ret;
459
460 if (!(ah_attr->ah_flags & IB_AH_GRH))
461 return 0;
462
463 grh = rdma_ah_retrieve_grh(ah_attr);
464 if (grh->sgid_attr)
465 return 0;
466
467 sgid_attr =
468 rdma_get_gid_attr(device, ah_attr->port_num, grh->sgid_index);
469 if (IS_ERR(sgid_attr))
470 return PTR_ERR(sgid_attr);
471
472
473 grh->sgid_attr = sgid_attr;
474 return 0;
475}
476
477static void rdma_unfill_sgid_attr(struct rdma_ah_attr *ah_attr,
478 const struct ib_gid_attr *old_sgid_attr)
479{
480
481
482
483
484 if (ah_attr->grh.sgid_attr == old_sgid_attr)
485 return;
486
487
488
489
490
491
492 rdma_destroy_ah_attr(ah_attr);
493}
494
495static const struct ib_gid_attr *
496rdma_update_sgid_attr(struct rdma_ah_attr *ah_attr,
497 const struct ib_gid_attr *old_attr)
498{
499 if (old_attr)
500 rdma_put_gid_attr(old_attr);
501 if (ah_attr->ah_flags & IB_AH_GRH) {
502 rdma_hold_gid_attr(ah_attr->grh.sgid_attr);
503 return ah_attr->grh.sgid_attr;
504 }
505 return NULL;
506}
507
508static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
509 struct rdma_ah_attr *ah_attr,
510 u32 flags,
511 struct ib_udata *udata,
512 struct net_device *xmit_slave)
513{
514 struct rdma_ah_init_attr init_attr = {};
515 struct ib_device *device = pd->device;
516 struct ib_ah *ah;
517 int ret;
518
519 might_sleep_if(flags & RDMA_CREATE_AH_SLEEPABLE);
520
521 if (!udata && !device->ops.create_ah)
522 return ERR_PTR(-EOPNOTSUPP);
523
524 ah = rdma_zalloc_drv_obj_gfp(
525 device, ib_ah,
526 (flags & RDMA_CREATE_AH_SLEEPABLE) ? GFP_KERNEL : GFP_ATOMIC);
527 if (!ah)
528 return ERR_PTR(-ENOMEM);
529
530 ah->device = device;
531 ah->pd = pd;
532 ah->type = ah_attr->type;
533 ah->sgid_attr = rdma_update_sgid_attr(ah_attr, NULL);
534 init_attr.ah_attr = ah_attr;
535 init_attr.flags = flags;
536 init_attr.xmit_slave = xmit_slave;
537
538 if (udata)
539 ret = device->ops.create_user_ah(ah, &init_attr, udata);
540 else
541 ret = device->ops.create_ah(ah, &init_attr, NULL);
542 if (ret) {
543 kfree(ah);
544 return ERR_PTR(ret);
545 }
546
547 atomic_inc(&pd->usecnt);
548 return ah;
549}
550
551
552
553
554
555
556
557
558
559
560
561
562struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
563 u32 flags)
564{
565 const struct ib_gid_attr *old_sgid_attr;
566 struct net_device *slave;
567 struct ib_ah *ah;
568 int ret;
569
570 ret = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr);
571 if (ret)
572 return ERR_PTR(ret);
573 slave = rdma_lag_get_ah_roce_slave(pd->device, ah_attr,
574 (flags & RDMA_CREATE_AH_SLEEPABLE) ?
575 GFP_KERNEL : GFP_ATOMIC);
576 if (IS_ERR(slave)) {
577 rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
578 return (void *)slave;
579 }
580 ah = _rdma_create_ah(pd, ah_attr, flags, NULL, slave);
581 rdma_lag_put_ah_roce_slave(slave);
582 rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
583 return ah;
584}
585EXPORT_SYMBOL(rdma_create_ah);
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
601 struct rdma_ah_attr *ah_attr,
602 struct ib_udata *udata)
603{
604 const struct ib_gid_attr *old_sgid_attr;
605 struct ib_ah *ah;
606 int err;
607
608 err = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr);
609 if (err)
610 return ERR_PTR(err);
611
612 if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
613 err = ib_resolve_eth_dmac(pd->device, ah_attr);
614 if (err) {
615 ah = ERR_PTR(err);
616 goto out;
617 }
618 }
619
620 ah = _rdma_create_ah(pd, ah_attr, RDMA_CREATE_AH_SLEEPABLE,
621 udata, NULL);
622
623out:
624 rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
625 return ah;
626}
627EXPORT_SYMBOL(rdma_create_user_ah);
628
629int ib_get_rdma_header_version(const union rdma_network_hdr *hdr)
630{
631 const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh;
632 struct iphdr ip4h_checked;
633 const struct ipv6hdr *ip6h = (struct ipv6hdr *)&hdr->ibgrh;
634
635
636
637
638 if (ip6h->version != 6)
639 return (ip4h->version == 4) ? 4 : 0;
640
641
642
643
644
645 if (ip4h->ihl != 5)
646 return 6;
647
648
649
650
651
652 memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked));
653 ip4h_checked.check = 0;
654 ip4h_checked.check = ip_fast_csum((u8 *)&ip4h_checked, 5);
655
656 if (ip4h->check == ip4h_checked.check)
657 return 4;
658 return 6;
659}
660EXPORT_SYMBOL(ib_get_rdma_header_version);
661
662static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
663 u32 port_num,
664 const struct ib_grh *grh)
665{
666 int grh_version;
667
668 if (rdma_protocol_ib(device, port_num))
669 return RDMA_NETWORK_IB;
670
671 grh_version = ib_get_rdma_header_version((union rdma_network_hdr *)grh);
672
673 if (grh_version == 4)
674 return RDMA_NETWORK_IPV4;
675
676 if (grh->next_hdr == IPPROTO_UDP)
677 return RDMA_NETWORK_IPV6;
678
679 return RDMA_NETWORK_ROCE_V1;
680}
681
682struct find_gid_index_context {
683 u16 vlan_id;
684 enum ib_gid_type gid_type;
685};
686
687static bool find_gid_index(const union ib_gid *gid,
688 const struct ib_gid_attr *gid_attr,
689 void *context)
690{
691 struct find_gid_index_context *ctx = context;
692 u16 vlan_id = 0xffff;
693 int ret;
694
695 if (ctx->gid_type != gid_attr->gid_type)
696 return false;
697
698 ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
699 if (ret)
700 return false;
701
702 return ctx->vlan_id == vlan_id;
703}
704
705static const struct ib_gid_attr *
706get_sgid_attr_from_eth(struct ib_device *device, u32 port_num,
707 u16 vlan_id, const union ib_gid *sgid,
708 enum ib_gid_type gid_type)
709{
710 struct find_gid_index_context context = {.vlan_id = vlan_id,
711 .gid_type = gid_type};
712
713 return rdma_find_gid_by_filter(device, sgid, port_num, find_gid_index,
714 &context);
715}
716
717int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
718 enum rdma_network_type net_type,
719 union ib_gid *sgid, union ib_gid *dgid)
720{
721 struct sockaddr_in src_in;
722 struct sockaddr_in dst_in;
723 __be32 src_saddr, dst_saddr;
724
725 if (!sgid || !dgid)
726 return -EINVAL;
727
728 if (net_type == RDMA_NETWORK_IPV4) {
729 memcpy(&src_in.sin_addr.s_addr,
730 &hdr->roce4grh.saddr, 4);
731 memcpy(&dst_in.sin_addr.s_addr,
732 &hdr->roce4grh.daddr, 4);
733 src_saddr = src_in.sin_addr.s_addr;
734 dst_saddr = dst_in.sin_addr.s_addr;
735 ipv6_addr_set_v4mapped(src_saddr,
736 (struct in6_addr *)sgid);
737 ipv6_addr_set_v4mapped(dst_saddr,
738 (struct in6_addr *)dgid);
739 return 0;
740 } else if (net_type == RDMA_NETWORK_IPV6 ||
741 net_type == RDMA_NETWORK_IB || RDMA_NETWORK_ROCE_V1) {
742 *dgid = hdr->ibgrh.dgid;
743 *sgid = hdr->ibgrh.sgid;
744 return 0;
745 } else {
746 return -EINVAL;
747 }
748}
749EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
750
751
752
753
754
755static int ib_resolve_unicast_gid_dmac(struct ib_device *device,
756 struct rdma_ah_attr *ah_attr)
757{
758 struct ib_global_route *grh = rdma_ah_retrieve_grh(ah_attr);
759 const struct ib_gid_attr *sgid_attr = grh->sgid_attr;
760 int hop_limit = 0xff;
761 int ret = 0;
762
763
764
765
766 if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw) &&
767 sgid_attr->gid_type == IB_GID_TYPE_ROCE) {
768 rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw,
769 ah_attr->roce.dmac);
770 return ret;
771 }
772
773 ret = rdma_addr_find_l2_eth_by_grh(&sgid_attr->gid, &grh->dgid,
774 ah_attr->roce.dmac,
775 sgid_attr, &hop_limit);
776
777 grh->hop_limit = hop_limit;
778 return ret;
779}
780
781
782
783
784
785
786
787
788
789
790
791
792
793int ib_init_ah_attr_from_wc(struct ib_device *device, u32 port_num,
794 const struct ib_wc *wc, const struct ib_grh *grh,
795 struct rdma_ah_attr *ah_attr)
796{
797 u32 flow_class;
798 int ret;
799 enum rdma_network_type net_type = RDMA_NETWORK_IB;
800 enum ib_gid_type gid_type = IB_GID_TYPE_IB;
801 const struct ib_gid_attr *sgid_attr;
802 int hoplimit = 0xff;
803 union ib_gid dgid;
804 union ib_gid sgid;
805
806 might_sleep();
807
808 memset(ah_attr, 0, sizeof *ah_attr);
809 ah_attr->type = rdma_ah_find_type(device, port_num);
810 if (rdma_cap_eth_ah(device, port_num)) {
811 if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE)
812 net_type = wc->network_hdr_type;
813 else
814 net_type = ib_get_net_type_by_grh(device, port_num, grh);
815 gid_type = ib_network_to_gid_type(net_type);
816 }
817 ret = ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
818 &sgid, &dgid);
819 if (ret)
820 return ret;
821
822 rdma_ah_set_sl(ah_attr, wc->sl);
823 rdma_ah_set_port_num(ah_attr, port_num);
824
825 if (rdma_protocol_roce(device, port_num)) {
826 u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ?
827 wc->vlan_id : 0xffff;
828
829 if (!(wc->wc_flags & IB_WC_GRH))
830 return -EPROTOTYPE;
831
832 sgid_attr = get_sgid_attr_from_eth(device, port_num,
833 vlan_id, &dgid,
834 gid_type);
835 if (IS_ERR(sgid_attr))
836 return PTR_ERR(sgid_attr);
837
838 flow_class = be32_to_cpu(grh->version_tclass_flow);
839 rdma_move_grh_sgid_attr(ah_attr,
840 &sgid,
841 flow_class & 0xFFFFF,
842 hoplimit,
843 (flow_class >> 20) & 0xFF,
844 sgid_attr);
845
846 ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
847 if (ret)
848 rdma_destroy_ah_attr(ah_attr);
849
850 return ret;
851 } else {
852 rdma_ah_set_dlid(ah_attr, wc->slid);
853 rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits);
854
855 if ((wc->wc_flags & IB_WC_GRH) == 0)
856 return 0;
857
858 if (dgid.global.interface_id !=
859 cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
860 sgid_attr = rdma_find_gid_by_port(
861 device, &dgid, IB_GID_TYPE_IB, port_num, NULL);
862 } else
863 sgid_attr = rdma_get_gid_attr(device, port_num, 0);
864
865 if (IS_ERR(sgid_attr))
866 return PTR_ERR(sgid_attr);
867 flow_class = be32_to_cpu(grh->version_tclass_flow);
868 rdma_move_grh_sgid_attr(ah_attr,
869 &sgid,
870 flow_class & 0xFFFFF,
871 hoplimit,
872 (flow_class >> 20) & 0xFF,
873 sgid_attr);
874
875 return 0;
876 }
877}
878EXPORT_SYMBOL(ib_init_ah_attr_from_wc);
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
896 u32 flow_label, u8 hop_limit, u8 traffic_class,
897 const struct ib_gid_attr *sgid_attr)
898{
899 rdma_ah_set_grh(attr, dgid, flow_label, sgid_attr->index, hop_limit,
900 traffic_class);
901 attr->grh.sgid_attr = sgid_attr;
902}
903EXPORT_SYMBOL(rdma_move_grh_sgid_attr);
904
905
906
907
908
909
910
911
912
913
914void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr)
915{
916 if (ah_attr->grh.sgid_attr) {
917 rdma_put_gid_attr(ah_attr->grh.sgid_attr);
918 ah_attr->grh.sgid_attr = NULL;
919 }
920}
921EXPORT_SYMBOL(rdma_destroy_ah_attr);
922
923struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
924 const struct ib_grh *grh, u32 port_num)
925{
926 struct rdma_ah_attr ah_attr;
927 struct ib_ah *ah;
928 int ret;
929
930 ret = ib_init_ah_attr_from_wc(pd->device, port_num, wc, grh, &ah_attr);
931 if (ret)
932 return ERR_PTR(ret);
933
934 ah = rdma_create_ah(pd, &ah_attr, RDMA_CREATE_AH_SLEEPABLE);
935
936 rdma_destroy_ah_attr(&ah_attr);
937 return ah;
938}
939EXPORT_SYMBOL(ib_create_ah_from_wc);
940
941int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
942{
943 const struct ib_gid_attr *old_sgid_attr;
944 int ret;
945
946 if (ah->type != ah_attr->type)
947 return -EINVAL;
948
949 ret = rdma_fill_sgid_attr(ah->device, ah_attr, &old_sgid_attr);
950 if (ret)
951 return ret;
952
953 ret = ah->device->ops.modify_ah ?
954 ah->device->ops.modify_ah(ah, ah_attr) :
955 -EOPNOTSUPP;
956
957 ah->sgid_attr = rdma_update_sgid_attr(ah_attr, ah->sgid_attr);
958 rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
959 return ret;
960}
961EXPORT_SYMBOL(rdma_modify_ah);
962
963int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
964{
965 ah_attr->grh.sgid_attr = NULL;
966
967 return ah->device->ops.query_ah ?
968 ah->device->ops.query_ah(ah, ah_attr) :
969 -EOPNOTSUPP;
970}
971EXPORT_SYMBOL(rdma_query_ah);
972
973int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata)
974{
975 const struct ib_gid_attr *sgid_attr = ah->sgid_attr;
976 struct ib_pd *pd;
977 int ret;
978
979 might_sleep_if(flags & RDMA_DESTROY_AH_SLEEPABLE);
980
981 pd = ah->pd;
982
983 ret = ah->device->ops.destroy_ah(ah, flags);
984 if (ret)
985 return ret;
986
987 atomic_dec(&pd->usecnt);
988 if (sgid_attr)
989 rdma_put_gid_attr(sgid_attr);
990
991 kfree(ah);
992 return ret;
993}
994EXPORT_SYMBOL(rdma_destroy_ah_user);
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
1014 struct ib_srq_init_attr *srq_init_attr,
1015 struct ib_usrq_object *uobject,
1016 struct ib_udata *udata)
1017{
1018 struct ib_srq *srq;
1019 int ret;
1020
1021 srq = rdma_zalloc_drv_obj(pd->device, ib_srq);
1022 if (!srq)
1023 return ERR_PTR(-ENOMEM);
1024
1025 srq->device = pd->device;
1026 srq->pd = pd;
1027 srq->event_handler = srq_init_attr->event_handler;
1028 srq->srq_context = srq_init_attr->srq_context;
1029 srq->srq_type = srq_init_attr->srq_type;
1030 srq->uobject = uobject;
1031
1032 if (ib_srq_has_cq(srq->srq_type)) {
1033 srq->ext.cq = srq_init_attr->ext.cq;
1034 atomic_inc(&srq->ext.cq->usecnt);
1035 }
1036 if (srq->srq_type == IB_SRQT_XRC) {
1037 srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
1038 if (srq->ext.xrc.xrcd)
1039 atomic_inc(&srq->ext.xrc.xrcd->usecnt);
1040 }
1041 atomic_inc(&pd->usecnt);
1042
1043 rdma_restrack_new(&srq->res, RDMA_RESTRACK_SRQ);
1044 rdma_restrack_parent_name(&srq->res, &pd->res);
1045
1046 ret = pd->device->ops.create_srq(srq, srq_init_attr, udata);
1047 if (ret) {
1048 rdma_restrack_put(&srq->res);
1049 atomic_dec(&srq->pd->usecnt);
1050 if (srq->srq_type == IB_SRQT_XRC && srq->ext.xrc.xrcd)
1051 atomic_dec(&srq->ext.xrc.xrcd->usecnt);
1052 if (ib_srq_has_cq(srq->srq_type))
1053 atomic_dec(&srq->ext.cq->usecnt);
1054 kfree(srq);
1055 return ERR_PTR(ret);
1056 }
1057
1058 rdma_restrack_add(&srq->res);
1059
1060 return srq;
1061}
1062EXPORT_SYMBOL(ib_create_srq_user);
1063
1064int ib_modify_srq(struct ib_srq *srq,
1065 struct ib_srq_attr *srq_attr,
1066 enum ib_srq_attr_mask srq_attr_mask)
1067{
1068 return srq->device->ops.modify_srq ?
1069 srq->device->ops.modify_srq(srq, srq_attr, srq_attr_mask,
1070 NULL) : -EOPNOTSUPP;
1071}
1072EXPORT_SYMBOL(ib_modify_srq);
1073
1074int ib_query_srq(struct ib_srq *srq,
1075 struct ib_srq_attr *srq_attr)
1076{
1077 return srq->device->ops.query_srq ?
1078 srq->device->ops.query_srq(srq, srq_attr) : -EOPNOTSUPP;
1079}
1080EXPORT_SYMBOL(ib_query_srq);
1081
1082int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata)
1083{
1084 int ret;
1085
1086 if (atomic_read(&srq->usecnt))
1087 return -EBUSY;
1088
1089 ret = srq->device->ops.destroy_srq(srq, udata);
1090 if (ret)
1091 return ret;
1092
1093 atomic_dec(&srq->pd->usecnt);
1094 if (srq->srq_type == IB_SRQT_XRC && srq->ext.xrc.xrcd)
1095 atomic_dec(&srq->ext.xrc.xrcd->usecnt);
1096 if (ib_srq_has_cq(srq->srq_type))
1097 atomic_dec(&srq->ext.cq->usecnt);
1098 rdma_restrack_del(&srq->res);
1099 kfree(srq);
1100
1101 return ret;
1102}
1103EXPORT_SYMBOL(ib_destroy_srq_user);
1104
1105
1106
1107static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
1108{
1109 struct ib_qp *qp = context;
1110 unsigned long flags;
1111
1112 spin_lock_irqsave(&qp->device->qp_open_list_lock, flags);
1113 list_for_each_entry(event->element.qp, &qp->open_list, open_list)
1114 if (event->element.qp->event_handler)
1115 event->element.qp->event_handler(event, event->element.qp->qp_context);
1116 spin_unlock_irqrestore(&qp->device->qp_open_list_lock, flags);
1117}
1118
1119static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
1120 void (*event_handler)(struct ib_event *, void *),
1121 void *qp_context)
1122{
1123 struct ib_qp *qp;
1124 unsigned long flags;
1125 int err;
1126
1127 qp = kzalloc(sizeof *qp, GFP_KERNEL);
1128 if (!qp)
1129 return ERR_PTR(-ENOMEM);
1130
1131 qp->real_qp = real_qp;
1132 err = ib_open_shared_qp_security(qp, real_qp->device);
1133 if (err) {
1134 kfree(qp);
1135 return ERR_PTR(err);
1136 }
1137
1138 qp->real_qp = real_qp;
1139 atomic_inc(&real_qp->usecnt);
1140 qp->device = real_qp->device;
1141 qp->event_handler = event_handler;
1142 qp->qp_context = qp_context;
1143 qp->qp_num = real_qp->qp_num;
1144 qp->qp_type = real_qp->qp_type;
1145
1146 spin_lock_irqsave(&real_qp->device->qp_open_list_lock, flags);
1147 list_add(&qp->open_list, &real_qp->open_list);
1148 spin_unlock_irqrestore(&real_qp->device->qp_open_list_lock, flags);
1149
1150 return qp;
1151}
1152
1153struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
1154 struct ib_qp_open_attr *qp_open_attr)
1155{
1156 struct ib_qp *qp, *real_qp;
1157
1158 if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
1159 return ERR_PTR(-EINVAL);
1160
1161 down_read(&xrcd->tgt_qps_rwsem);
1162 real_qp = xa_load(&xrcd->tgt_qps, qp_open_attr->qp_num);
1163 if (!real_qp) {
1164 up_read(&xrcd->tgt_qps_rwsem);
1165 return ERR_PTR(-EINVAL);
1166 }
1167 qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
1168 qp_open_attr->qp_context);
1169 up_read(&xrcd->tgt_qps_rwsem);
1170 return qp;
1171}
1172EXPORT_SYMBOL(ib_open_qp);
1173
1174static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp,
1175 struct ib_qp_init_attr *qp_init_attr)
1176{
1177 struct ib_qp *real_qp = qp;
1178 int err;
1179
1180 qp->event_handler = __ib_shared_qp_event_handler;
1181 qp->qp_context = qp;
1182 qp->pd = NULL;
1183 qp->send_cq = qp->recv_cq = NULL;
1184 qp->srq = NULL;
1185 qp->xrcd = qp_init_attr->xrcd;
1186 atomic_inc(&qp_init_attr->xrcd->usecnt);
1187 INIT_LIST_HEAD(&qp->open_list);
1188
1189 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
1190 qp_init_attr->qp_context);
1191 if (IS_ERR(qp))
1192 return qp;
1193
1194 err = xa_err(xa_store(&qp_init_attr->xrcd->tgt_qps, real_qp->qp_num,
1195 real_qp, GFP_KERNEL));
1196 if (err) {
1197 ib_close_qp(qp);
1198 return ERR_PTR(err);
1199 }
1200 return qp;
1201}
1202
1203static struct ib_qp *create_qp(struct ib_device *dev, struct ib_pd *pd,
1204 struct ib_qp_init_attr *attr,
1205 struct ib_udata *udata,
1206 struct ib_uqp_object *uobj, const char *caller)
1207{
1208 struct ib_udata dummy = {};
1209 struct ib_qp *qp;
1210 int ret;
1211
1212 if (!dev->ops.create_qp)
1213 return ERR_PTR(-EOPNOTSUPP);
1214
1215 qp = rdma_zalloc_drv_obj_numa(dev, ib_qp);
1216 if (!qp)
1217 return ERR_PTR(-ENOMEM);
1218
1219 qp->device = dev;
1220 qp->pd = pd;
1221 qp->uobject = uobj;
1222 qp->real_qp = qp;
1223
1224 qp->qp_type = attr->qp_type;
1225 qp->rwq_ind_tbl = attr->rwq_ind_tbl;
1226 qp->srq = attr->srq;
1227 qp->event_handler = attr->event_handler;
1228 qp->port = attr->port_num;
1229 qp->qp_context = attr->qp_context;
1230
1231 spin_lock_init(&qp->mr_lock);
1232 INIT_LIST_HEAD(&qp->rdma_mrs);
1233 INIT_LIST_HEAD(&qp->sig_mrs);
1234
1235 qp->send_cq = attr->send_cq;
1236 qp->recv_cq = attr->recv_cq;
1237
1238 rdma_restrack_new(&qp->res, RDMA_RESTRACK_QP);
1239 WARN_ONCE(!udata && !caller, "Missing kernel QP owner");
1240 rdma_restrack_set_name(&qp->res, udata ? NULL : caller);
1241 ret = dev->ops.create_qp(qp, attr, udata);
1242 if (ret)
1243 goto err_create;
1244
1245
1246
1247
1248
1249 qp->send_cq = attr->send_cq;
1250 qp->recv_cq = attr->recv_cq;
1251
1252 ret = ib_create_qp_security(qp, dev);
1253 if (ret)
1254 goto err_security;
1255
1256 rdma_restrack_add(&qp->res);
1257 return qp;
1258
1259err_security:
1260 qp->device->ops.destroy_qp(qp, udata ? &dummy : NULL);
1261err_create:
1262 rdma_restrack_put(&qp->res);
1263 kfree(qp);
1264 return ERR_PTR(ret);
1265
1266}
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280struct ib_qp *ib_create_qp_user(struct ib_device *dev, struct ib_pd *pd,
1281 struct ib_qp_init_attr *attr,
1282 struct ib_udata *udata,
1283 struct ib_uqp_object *uobj, const char *caller)
1284{
1285 struct ib_qp *qp, *xrc_qp;
1286
1287 if (attr->qp_type == IB_QPT_XRC_TGT)
1288 qp = create_qp(dev, pd, attr, NULL, NULL, caller);
1289 else
1290 qp = create_qp(dev, pd, attr, udata, uobj, NULL);
1291 if (attr->qp_type != IB_QPT_XRC_TGT || IS_ERR(qp))
1292 return qp;
1293
1294 xrc_qp = create_xrc_qp_user(qp, attr);
1295 if (IS_ERR(xrc_qp)) {
1296 ib_destroy_qp(qp);
1297 return xrc_qp;
1298 }
1299
1300 xrc_qp->uobject = uobj;
1301 return xrc_qp;
1302}
1303EXPORT_SYMBOL(ib_create_qp_user);
1304
1305void ib_qp_usecnt_inc(struct ib_qp *qp)
1306{
1307 if (qp->pd)
1308 atomic_inc(&qp->pd->usecnt);
1309 if (qp->send_cq)
1310 atomic_inc(&qp->send_cq->usecnt);
1311 if (qp->recv_cq)
1312 atomic_inc(&qp->recv_cq->usecnt);
1313 if (qp->srq)
1314 atomic_inc(&qp->srq->usecnt);
1315 if (qp->rwq_ind_tbl)
1316 atomic_inc(&qp->rwq_ind_tbl->usecnt);
1317}
1318EXPORT_SYMBOL(ib_qp_usecnt_inc);
1319
1320void ib_qp_usecnt_dec(struct ib_qp *qp)
1321{
1322 if (qp->rwq_ind_tbl)
1323 atomic_dec(&qp->rwq_ind_tbl->usecnt);
1324 if (qp->srq)
1325 atomic_dec(&qp->srq->usecnt);
1326 if (qp->recv_cq)
1327 atomic_dec(&qp->recv_cq->usecnt);
1328 if (qp->send_cq)
1329 atomic_dec(&qp->send_cq->usecnt);
1330 if (qp->pd)
1331 atomic_dec(&qp->pd->usecnt);
1332}
1333EXPORT_SYMBOL(ib_qp_usecnt_dec);
1334
1335struct ib_qp *ib_create_qp_kernel(struct ib_pd *pd,
1336 struct ib_qp_init_attr *qp_init_attr,
1337 const char *caller)
1338{
1339 struct ib_device *device = pd->device;
1340 struct ib_qp *qp;
1341 int ret;
1342
1343
1344
1345
1346
1347
1348
1349 if (qp_init_attr->cap.max_rdma_ctxs)
1350 rdma_rw_init_qp(device, qp_init_attr);
1351
1352 qp = create_qp(device, pd, qp_init_attr, NULL, NULL, caller);
1353 if (IS_ERR(qp))
1354 return qp;
1355
1356 ib_qp_usecnt_inc(qp);
1357
1358 if (qp_init_attr->cap.max_rdma_ctxs) {
1359 ret = rdma_rw_init_mrs(qp, qp_init_attr);
1360 if (ret)
1361 goto err;
1362 }
1363
1364
1365
1366
1367
1368
1369 qp->max_write_sge = qp_init_attr->cap.max_send_sge;
1370 qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge,
1371 device->attrs.max_sge_rd);
1372 if (qp_init_attr->create_flags & IB_QP_CREATE_INTEGRITY_EN)
1373 qp->integrity_en = true;
1374
1375 return qp;
1376
1377err:
1378 ib_destroy_qp(qp);
1379 return ERR_PTR(ret);
1380
1381}
1382EXPORT_SYMBOL(ib_create_qp_kernel);
1383
1384static const struct {
1385 int valid;
1386 enum ib_qp_attr_mask req_param[IB_QPT_MAX];
1387 enum ib_qp_attr_mask opt_param[IB_QPT_MAX];
1388} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
1389 [IB_QPS_RESET] = {
1390 [IB_QPS_RESET] = { .valid = 1 },
1391 [IB_QPS_INIT] = {
1392 .valid = 1,
1393 .req_param = {
1394 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1395 IB_QP_PORT |
1396 IB_QP_QKEY),
1397 [IB_QPT_RAW_PACKET] = IB_QP_PORT,
1398 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
1399 IB_QP_PORT |
1400 IB_QP_ACCESS_FLAGS),
1401 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
1402 IB_QP_PORT |
1403 IB_QP_ACCESS_FLAGS),
1404 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
1405 IB_QP_PORT |
1406 IB_QP_ACCESS_FLAGS),
1407 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
1408 IB_QP_PORT |
1409 IB_QP_ACCESS_FLAGS),
1410 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1411 IB_QP_QKEY),
1412 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1413 IB_QP_QKEY),
1414 }
1415 },
1416 },
1417 [IB_QPS_INIT] = {
1418 [IB_QPS_RESET] = { .valid = 1 },
1419 [IB_QPS_ERR] = { .valid = 1 },
1420 [IB_QPS_INIT] = {
1421 .valid = 1,
1422 .opt_param = {
1423 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1424 IB_QP_PORT |
1425 IB_QP_QKEY),
1426 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
1427 IB_QP_PORT |
1428 IB_QP_ACCESS_FLAGS),
1429 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
1430 IB_QP_PORT |
1431 IB_QP_ACCESS_FLAGS),
1432 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
1433 IB_QP_PORT |
1434 IB_QP_ACCESS_FLAGS),
1435 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
1436 IB_QP_PORT |
1437 IB_QP_ACCESS_FLAGS),
1438 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1439 IB_QP_QKEY),
1440 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1441 IB_QP_QKEY),
1442 }
1443 },
1444 [IB_QPS_RTR] = {
1445 .valid = 1,
1446 .req_param = {
1447 [IB_QPT_UC] = (IB_QP_AV |
1448 IB_QP_PATH_MTU |
1449 IB_QP_DEST_QPN |
1450 IB_QP_RQ_PSN),
1451 [IB_QPT_RC] = (IB_QP_AV |
1452 IB_QP_PATH_MTU |
1453 IB_QP_DEST_QPN |
1454 IB_QP_RQ_PSN |
1455 IB_QP_MAX_DEST_RD_ATOMIC |
1456 IB_QP_MIN_RNR_TIMER),
1457 [IB_QPT_XRC_INI] = (IB_QP_AV |
1458 IB_QP_PATH_MTU |
1459 IB_QP_DEST_QPN |
1460 IB_QP_RQ_PSN),
1461 [IB_QPT_XRC_TGT] = (IB_QP_AV |
1462 IB_QP_PATH_MTU |
1463 IB_QP_DEST_QPN |
1464 IB_QP_RQ_PSN |
1465 IB_QP_MAX_DEST_RD_ATOMIC |
1466 IB_QP_MIN_RNR_TIMER),
1467 },
1468 .opt_param = {
1469 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1470 IB_QP_QKEY),
1471 [IB_QPT_UC] = (IB_QP_ALT_PATH |
1472 IB_QP_ACCESS_FLAGS |
1473 IB_QP_PKEY_INDEX),
1474 [IB_QPT_RC] = (IB_QP_ALT_PATH |
1475 IB_QP_ACCESS_FLAGS |
1476 IB_QP_PKEY_INDEX),
1477 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH |
1478 IB_QP_ACCESS_FLAGS |
1479 IB_QP_PKEY_INDEX),
1480 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH |
1481 IB_QP_ACCESS_FLAGS |
1482 IB_QP_PKEY_INDEX),
1483 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1484 IB_QP_QKEY),
1485 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1486 IB_QP_QKEY),
1487 },
1488 },
1489 },
1490 [IB_QPS_RTR] = {
1491 [IB_QPS_RESET] = { .valid = 1 },
1492 [IB_QPS_ERR] = { .valid = 1 },
1493 [IB_QPS_RTS] = {
1494 .valid = 1,
1495 .req_param = {
1496 [IB_QPT_UD] = IB_QP_SQ_PSN,
1497 [IB_QPT_UC] = IB_QP_SQ_PSN,
1498 [IB_QPT_RC] = (IB_QP_TIMEOUT |
1499 IB_QP_RETRY_CNT |
1500 IB_QP_RNR_RETRY |
1501 IB_QP_SQ_PSN |
1502 IB_QP_MAX_QP_RD_ATOMIC),
1503 [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT |
1504 IB_QP_RETRY_CNT |
1505 IB_QP_RNR_RETRY |
1506 IB_QP_SQ_PSN |
1507 IB_QP_MAX_QP_RD_ATOMIC),
1508 [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT |
1509 IB_QP_SQ_PSN),
1510 [IB_QPT_SMI] = IB_QP_SQ_PSN,
1511 [IB_QPT_GSI] = IB_QP_SQ_PSN,
1512 },
1513 .opt_param = {
1514 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1515 IB_QP_QKEY),
1516 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1517 IB_QP_ALT_PATH |
1518 IB_QP_ACCESS_FLAGS |
1519 IB_QP_PATH_MIG_STATE),
1520 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1521 IB_QP_ALT_PATH |
1522 IB_QP_ACCESS_FLAGS |
1523 IB_QP_MIN_RNR_TIMER |
1524 IB_QP_PATH_MIG_STATE),
1525 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1526 IB_QP_ALT_PATH |
1527 IB_QP_ACCESS_FLAGS |
1528 IB_QP_PATH_MIG_STATE),
1529 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1530 IB_QP_ALT_PATH |
1531 IB_QP_ACCESS_FLAGS |
1532 IB_QP_MIN_RNR_TIMER |
1533 IB_QP_PATH_MIG_STATE),
1534 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1535 IB_QP_QKEY),
1536 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1537 IB_QP_QKEY),
1538 [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
1539 }
1540 }
1541 },
1542 [IB_QPS_RTS] = {
1543 [IB_QPS_RESET] = { .valid = 1 },
1544 [IB_QPS_ERR] = { .valid = 1 },
1545 [IB_QPS_RTS] = {
1546 .valid = 1,
1547 .opt_param = {
1548 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1549 IB_QP_QKEY),
1550 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1551 IB_QP_ACCESS_FLAGS |
1552 IB_QP_ALT_PATH |
1553 IB_QP_PATH_MIG_STATE),
1554 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1555 IB_QP_ACCESS_FLAGS |
1556 IB_QP_ALT_PATH |
1557 IB_QP_PATH_MIG_STATE |
1558 IB_QP_MIN_RNR_TIMER),
1559 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1560 IB_QP_ACCESS_FLAGS |
1561 IB_QP_ALT_PATH |
1562 IB_QP_PATH_MIG_STATE),
1563 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1564 IB_QP_ACCESS_FLAGS |
1565 IB_QP_ALT_PATH |
1566 IB_QP_PATH_MIG_STATE |
1567 IB_QP_MIN_RNR_TIMER),
1568 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1569 IB_QP_QKEY),
1570 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1571 IB_QP_QKEY),
1572 [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
1573 }
1574 },
1575 [IB_QPS_SQD] = {
1576 .valid = 1,
1577 .opt_param = {
1578 [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1579 [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1580 [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1581 [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1582 [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1583 [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1584 [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
1585 }
1586 },
1587 },
1588 [IB_QPS_SQD] = {
1589 [IB_QPS_RESET] = { .valid = 1 },
1590 [IB_QPS_ERR] = { .valid = 1 },
1591 [IB_QPS_RTS] = {
1592 .valid = 1,
1593 .opt_param = {
1594 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1595 IB_QP_QKEY),
1596 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1597 IB_QP_ALT_PATH |
1598 IB_QP_ACCESS_FLAGS |
1599 IB_QP_PATH_MIG_STATE),
1600 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1601 IB_QP_ALT_PATH |
1602 IB_QP_ACCESS_FLAGS |
1603 IB_QP_MIN_RNR_TIMER |
1604 IB_QP_PATH_MIG_STATE),
1605 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1606 IB_QP_ALT_PATH |
1607 IB_QP_ACCESS_FLAGS |
1608 IB_QP_PATH_MIG_STATE),
1609 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1610 IB_QP_ALT_PATH |
1611 IB_QP_ACCESS_FLAGS |
1612 IB_QP_MIN_RNR_TIMER |
1613 IB_QP_PATH_MIG_STATE),
1614 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1615 IB_QP_QKEY),
1616 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1617 IB_QP_QKEY),
1618 }
1619 },
1620 [IB_QPS_SQD] = {
1621 .valid = 1,
1622 .opt_param = {
1623 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1624 IB_QP_QKEY),
1625 [IB_QPT_UC] = (IB_QP_AV |
1626 IB_QP_ALT_PATH |
1627 IB_QP_ACCESS_FLAGS |
1628 IB_QP_PKEY_INDEX |
1629 IB_QP_PATH_MIG_STATE),
1630 [IB_QPT_RC] = (IB_QP_PORT |
1631 IB_QP_AV |
1632 IB_QP_TIMEOUT |
1633 IB_QP_RETRY_CNT |
1634 IB_QP_RNR_RETRY |
1635 IB_QP_MAX_QP_RD_ATOMIC |
1636 IB_QP_MAX_DEST_RD_ATOMIC |
1637 IB_QP_ALT_PATH |
1638 IB_QP_ACCESS_FLAGS |
1639 IB_QP_PKEY_INDEX |
1640 IB_QP_MIN_RNR_TIMER |
1641 IB_QP_PATH_MIG_STATE),
1642 [IB_QPT_XRC_INI] = (IB_QP_PORT |
1643 IB_QP_AV |
1644 IB_QP_TIMEOUT |
1645 IB_QP_RETRY_CNT |
1646 IB_QP_RNR_RETRY |
1647 IB_QP_MAX_QP_RD_ATOMIC |
1648 IB_QP_ALT_PATH |
1649 IB_QP_ACCESS_FLAGS |
1650 IB_QP_PKEY_INDEX |
1651 IB_QP_PATH_MIG_STATE),
1652 [IB_QPT_XRC_TGT] = (IB_QP_PORT |
1653 IB_QP_AV |
1654 IB_QP_TIMEOUT |
1655 IB_QP_MAX_DEST_RD_ATOMIC |
1656 IB_QP_ALT_PATH |
1657 IB_QP_ACCESS_FLAGS |
1658 IB_QP_PKEY_INDEX |
1659 IB_QP_MIN_RNR_TIMER |
1660 IB_QP_PATH_MIG_STATE),
1661 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1662 IB_QP_QKEY),
1663 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1664 IB_QP_QKEY),
1665 }
1666 }
1667 },
1668 [IB_QPS_SQE] = {
1669 [IB_QPS_RESET] = { .valid = 1 },
1670 [IB_QPS_ERR] = { .valid = 1 },
1671 [IB_QPS_RTS] = {
1672 .valid = 1,
1673 .opt_param = {
1674 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1675 IB_QP_QKEY),
1676 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1677 IB_QP_ACCESS_FLAGS),
1678 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1679 IB_QP_QKEY),
1680 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1681 IB_QP_QKEY),
1682 }
1683 }
1684 },
1685 [IB_QPS_ERR] = {
1686 [IB_QPS_RESET] = { .valid = 1 },
1687 [IB_QPS_ERR] = { .valid = 1 }
1688 }
1689};
1690
1691bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1692 enum ib_qp_type type, enum ib_qp_attr_mask mask)
1693{
1694 enum ib_qp_attr_mask req_param, opt_param;
1695
1696 if (mask & IB_QP_CUR_STATE &&
1697 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
1698 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
1699 return false;
1700
1701 if (!qp_state_table[cur_state][next_state].valid)
1702 return false;
1703
1704 req_param = qp_state_table[cur_state][next_state].req_param[type];
1705 opt_param = qp_state_table[cur_state][next_state].opt_param[type];
1706
1707 if ((mask & req_param) != req_param)
1708 return false;
1709
1710 if (mask & ~(req_param | opt_param | IB_QP_STATE))
1711 return false;
1712
1713 return true;
1714}
1715EXPORT_SYMBOL(ib_modify_qp_is_ok);
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726static int ib_resolve_eth_dmac(struct ib_device *device,
1727 struct rdma_ah_attr *ah_attr)
1728{
1729 int ret = 0;
1730
1731 if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) {
1732 if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) {
1733 __be32 addr = 0;
1734
1735 memcpy(&addr, ah_attr->grh.dgid.raw + 12, 4);
1736 ip_eth_mc_map(addr, (char *)ah_attr->roce.dmac);
1737 } else {
1738 ipv6_eth_mc_map((struct in6_addr *)ah_attr->grh.dgid.raw,
1739 (char *)ah_attr->roce.dmac);
1740 }
1741 } else {
1742 ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
1743 }
1744 return ret;
1745}
1746
1747static bool is_qp_type_connected(const struct ib_qp *qp)
1748{
1749 return (qp->qp_type == IB_QPT_UC ||
1750 qp->qp_type == IB_QPT_RC ||
1751 qp->qp_type == IB_QPT_XRC_INI ||
1752 qp->qp_type == IB_QPT_XRC_TGT);
1753}
1754
1755
1756
1757
1758static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
1759 int attr_mask, struct ib_udata *udata)
1760{
1761 u32 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1762 const struct ib_gid_attr *old_sgid_attr_av;
1763 const struct ib_gid_attr *old_sgid_attr_alt_av;
1764 int ret;
1765
1766 attr->xmit_slave = NULL;
1767 if (attr_mask & IB_QP_AV) {
1768 ret = rdma_fill_sgid_attr(qp->device, &attr->ah_attr,
1769 &old_sgid_attr_av);
1770 if (ret)
1771 return ret;
1772
1773 if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE &&
1774 is_qp_type_connected(qp)) {
1775 struct net_device *slave;
1776
1777
1778
1779
1780
1781
1782 if (udata) {
1783 ret = ib_resolve_eth_dmac(qp->device,
1784 &attr->ah_attr);
1785 if (ret)
1786 goto out_av;
1787 }
1788 slave = rdma_lag_get_ah_roce_slave(qp->device,
1789 &attr->ah_attr,
1790 GFP_KERNEL);
1791 if (IS_ERR(slave)) {
1792 ret = PTR_ERR(slave);
1793 goto out_av;
1794 }
1795 attr->xmit_slave = slave;
1796 }
1797 }
1798 if (attr_mask & IB_QP_ALT_PATH) {
1799
1800
1801
1802
1803
1804
1805
1806 ret = rdma_fill_sgid_attr(qp->device, &attr->alt_ah_attr,
1807 &old_sgid_attr_alt_av);
1808 if (ret)
1809 goto out_av;
1810
1811
1812
1813
1814
1815 if (!(rdma_protocol_ib(qp->device,
1816 attr->alt_ah_attr.port_num) &&
1817 rdma_protocol_ib(qp->device, port))) {
1818 ret = -EINVAL;
1819 goto out;
1820 }
1821 }
1822
1823 if (rdma_ib_or_roce(qp->device, port)) {
1824 if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) {
1825 dev_warn(&qp->device->dev,
1826 "%s rq_psn overflow, masking to 24 bits\n",
1827 __func__);
1828 attr->rq_psn &= 0xffffff;
1829 }
1830
1831 if (attr_mask & IB_QP_SQ_PSN && attr->sq_psn & ~0xffffff) {
1832 dev_warn(&qp->device->dev,
1833 " %s sq_psn overflow, masking to 24 bits\n",
1834 __func__);
1835 attr->sq_psn &= 0xffffff;
1836 }
1837 }
1838
1839
1840
1841
1842
1843 if (!qp->counter && (attr_mask & IB_QP_PORT) &&
1844 ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_INIT))
1845 rdma_counter_bind_qp_auto(qp, attr->port_num);
1846
1847 ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
1848 if (ret)
1849 goto out;
1850
1851 if (attr_mask & IB_QP_PORT)
1852 qp->port = attr->port_num;
1853 if (attr_mask & IB_QP_AV)
1854 qp->av_sgid_attr =
1855 rdma_update_sgid_attr(&attr->ah_attr, qp->av_sgid_attr);
1856 if (attr_mask & IB_QP_ALT_PATH)
1857 qp->alt_path_sgid_attr = rdma_update_sgid_attr(
1858 &attr->alt_ah_attr, qp->alt_path_sgid_attr);
1859
1860out:
1861 if (attr_mask & IB_QP_ALT_PATH)
1862 rdma_unfill_sgid_attr(&attr->alt_ah_attr, old_sgid_attr_alt_av);
1863out_av:
1864 if (attr_mask & IB_QP_AV) {
1865 rdma_lag_put_ah_roce_slave(attr->xmit_slave);
1866 rdma_unfill_sgid_attr(&attr->ah_attr, old_sgid_attr_av);
1867 }
1868 return ret;
1869}
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr,
1883 int attr_mask, struct ib_udata *udata)
1884{
1885 return _ib_modify_qp(ib_qp->real_qp, attr, attr_mask, udata);
1886}
1887EXPORT_SYMBOL(ib_modify_qp_with_udata);
1888
1889int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed, u8 *width)
1890{
1891 int rc;
1892 u32 netdev_speed;
1893 struct net_device *netdev;
1894 struct ethtool_link_ksettings lksettings;
1895
1896 if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET)
1897 return -EINVAL;
1898
1899 netdev = ib_device_get_netdev(dev, port_num);
1900 if (!netdev)
1901 return -ENODEV;
1902
1903 rtnl_lock();
1904 rc = __ethtool_get_link_ksettings(netdev, &lksettings);
1905 rtnl_unlock();
1906
1907 dev_put(netdev);
1908
1909 if (!rc && lksettings.base.speed != (u32)SPEED_UNKNOWN) {
1910 netdev_speed = lksettings.base.speed;
1911 } else {
1912 netdev_speed = SPEED_1000;
1913 pr_warn("%s speed is unknown, defaulting to %u\n", netdev->name,
1914 netdev_speed);
1915 }
1916
1917 if (netdev_speed <= SPEED_1000) {
1918 *width = IB_WIDTH_1X;
1919 *speed = IB_SPEED_SDR;
1920 } else if (netdev_speed <= SPEED_10000) {
1921 *width = IB_WIDTH_1X;
1922 *speed = IB_SPEED_FDR10;
1923 } else if (netdev_speed <= SPEED_20000) {
1924 *width = IB_WIDTH_4X;
1925 *speed = IB_SPEED_DDR;
1926 } else if (netdev_speed <= SPEED_25000) {
1927 *width = IB_WIDTH_1X;
1928 *speed = IB_SPEED_EDR;
1929 } else if (netdev_speed <= SPEED_40000) {
1930 *width = IB_WIDTH_4X;
1931 *speed = IB_SPEED_FDR10;
1932 } else {
1933 *width = IB_WIDTH_4X;
1934 *speed = IB_SPEED_EDR;
1935 }
1936
1937 return 0;
1938}
1939EXPORT_SYMBOL(ib_get_eth_speed);
1940
1941int ib_modify_qp(struct ib_qp *qp,
1942 struct ib_qp_attr *qp_attr,
1943 int qp_attr_mask)
1944{
1945 return _ib_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
1946}
1947EXPORT_SYMBOL(ib_modify_qp);
1948
1949int ib_query_qp(struct ib_qp *qp,
1950 struct ib_qp_attr *qp_attr,
1951 int qp_attr_mask,
1952 struct ib_qp_init_attr *qp_init_attr)
1953{
1954 qp_attr->ah_attr.grh.sgid_attr = NULL;
1955 qp_attr->alt_ah_attr.grh.sgid_attr = NULL;
1956
1957 return qp->device->ops.query_qp ?
1958 qp->device->ops.query_qp(qp->real_qp, qp_attr, qp_attr_mask,
1959 qp_init_attr) : -EOPNOTSUPP;
1960}
1961EXPORT_SYMBOL(ib_query_qp);
1962
1963int ib_close_qp(struct ib_qp *qp)
1964{
1965 struct ib_qp *real_qp;
1966 unsigned long flags;
1967
1968 real_qp = qp->real_qp;
1969 if (real_qp == qp)
1970 return -EINVAL;
1971
1972 spin_lock_irqsave(&real_qp->device->qp_open_list_lock, flags);
1973 list_del(&qp->open_list);
1974 spin_unlock_irqrestore(&real_qp->device->qp_open_list_lock, flags);
1975
1976 atomic_dec(&real_qp->usecnt);
1977 if (qp->qp_sec)
1978 ib_close_shared_qp_security(qp->qp_sec);
1979 kfree(qp);
1980
1981 return 0;
1982}
1983EXPORT_SYMBOL(ib_close_qp);
1984
1985static int __ib_destroy_shared_qp(struct ib_qp *qp)
1986{
1987 struct ib_xrcd *xrcd;
1988 struct ib_qp *real_qp;
1989 int ret;
1990
1991 real_qp = qp->real_qp;
1992 xrcd = real_qp->xrcd;
1993 down_write(&xrcd->tgt_qps_rwsem);
1994 ib_close_qp(qp);
1995 if (atomic_read(&real_qp->usecnt) == 0)
1996 xa_erase(&xrcd->tgt_qps, real_qp->qp_num);
1997 else
1998 real_qp = NULL;
1999 up_write(&xrcd->tgt_qps_rwsem);
2000
2001 if (real_qp) {
2002 ret = ib_destroy_qp(real_qp);
2003 if (!ret)
2004 atomic_dec(&xrcd->usecnt);
2005 }
2006
2007 return 0;
2008}
2009
2010int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata)
2011{
2012 const struct ib_gid_attr *alt_path_sgid_attr = qp->alt_path_sgid_attr;
2013 const struct ib_gid_attr *av_sgid_attr = qp->av_sgid_attr;
2014 struct ib_qp_security *sec;
2015 int ret;
2016
2017 WARN_ON_ONCE(qp->mrs_used > 0);
2018
2019 if (atomic_read(&qp->usecnt))
2020 return -EBUSY;
2021
2022 if (qp->real_qp != qp)
2023 return __ib_destroy_shared_qp(qp);
2024
2025 sec = qp->qp_sec;
2026 if (sec)
2027 ib_destroy_qp_security_begin(sec);
2028
2029 if (!qp->uobject)
2030 rdma_rw_cleanup_mrs(qp);
2031
2032 rdma_counter_unbind_qp(qp, true);
2033 ret = qp->device->ops.destroy_qp(qp, udata);
2034 if (ret) {
2035 if (sec)
2036 ib_destroy_qp_security_abort(sec);
2037 return ret;
2038 }
2039
2040 if (alt_path_sgid_attr)
2041 rdma_put_gid_attr(alt_path_sgid_attr);
2042 if (av_sgid_attr)
2043 rdma_put_gid_attr(av_sgid_attr);
2044
2045 ib_qp_usecnt_dec(qp);
2046 if (sec)
2047 ib_destroy_qp_security_end(sec);
2048
2049 rdma_restrack_del(&qp->res);
2050 kfree(qp);
2051 return ret;
2052}
2053EXPORT_SYMBOL(ib_destroy_qp_user);
2054
2055
2056
2057struct ib_cq *__ib_create_cq(struct ib_device *device,
2058 ib_comp_handler comp_handler,
2059 void (*event_handler)(struct ib_event *, void *),
2060 void *cq_context,
2061 const struct ib_cq_init_attr *cq_attr,
2062 const char *caller)
2063{
2064 struct ib_cq *cq;
2065 int ret;
2066
2067 cq = rdma_zalloc_drv_obj(device, ib_cq);
2068 if (!cq)
2069 return ERR_PTR(-ENOMEM);
2070
2071 cq->device = device;
2072 cq->uobject = NULL;
2073 cq->comp_handler = comp_handler;
2074 cq->event_handler = event_handler;
2075 cq->cq_context = cq_context;
2076 atomic_set(&cq->usecnt, 0);
2077
2078 rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ);
2079 rdma_restrack_set_name(&cq->res, caller);
2080
2081 ret = device->ops.create_cq(cq, cq_attr, NULL);
2082 if (ret) {
2083 rdma_restrack_put(&cq->res);
2084 kfree(cq);
2085 return ERR_PTR(ret);
2086 }
2087
2088 rdma_restrack_add(&cq->res);
2089 return cq;
2090}
2091EXPORT_SYMBOL(__ib_create_cq);
2092
2093int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
2094{
2095 if (cq->shared)
2096 return -EOPNOTSUPP;
2097
2098 return cq->device->ops.modify_cq ?
2099 cq->device->ops.modify_cq(cq, cq_count,
2100 cq_period) : -EOPNOTSUPP;
2101}
2102EXPORT_SYMBOL(rdma_set_cq_moderation);
2103
2104int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata)
2105{
2106 int ret;
2107
2108 if (WARN_ON_ONCE(cq->shared))
2109 return -EOPNOTSUPP;
2110
2111 if (atomic_read(&cq->usecnt))
2112 return -EBUSY;
2113
2114 ret = cq->device->ops.destroy_cq(cq, udata);
2115 if (ret)
2116 return ret;
2117
2118 rdma_restrack_del(&cq->res);
2119 kfree(cq);
2120 return ret;
2121}
2122EXPORT_SYMBOL(ib_destroy_cq_user);
2123
2124int ib_resize_cq(struct ib_cq *cq, int cqe)
2125{
2126 if (cq->shared)
2127 return -EOPNOTSUPP;
2128
2129 return cq->device->ops.resize_cq ?
2130 cq->device->ops.resize_cq(cq, cqe, NULL) : -EOPNOTSUPP;
2131}
2132EXPORT_SYMBOL(ib_resize_cq);
2133
2134
2135
2136struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
2137 u64 virt_addr, int access_flags)
2138{
2139 struct ib_mr *mr;
2140
2141 if (access_flags & IB_ACCESS_ON_DEMAND) {
2142 if (!(pd->device->attrs.device_cap_flags &
2143 IB_DEVICE_ON_DEMAND_PAGING)) {
2144 pr_debug("ODP support not available\n");
2145 return ERR_PTR(-EINVAL);
2146 }
2147 }
2148
2149 mr = pd->device->ops.reg_user_mr(pd, start, length, virt_addr,
2150 access_flags, NULL);
2151
2152 if (IS_ERR(mr))
2153 return mr;
2154
2155 mr->device = pd->device;
2156 mr->pd = pd;
2157 mr->dm = NULL;
2158 atomic_inc(&pd->usecnt);
2159
2160 rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
2161 rdma_restrack_parent_name(&mr->res, &pd->res);
2162 rdma_restrack_add(&mr->res);
2163
2164 return mr;
2165}
2166EXPORT_SYMBOL(ib_reg_user_mr);
2167
2168int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
2169 u32 flags, struct ib_sge *sg_list, u32 num_sge)
2170{
2171 if (!pd->device->ops.advise_mr)
2172 return -EOPNOTSUPP;
2173
2174 if (!num_sge)
2175 return 0;
2176
2177 return pd->device->ops.advise_mr(pd, advice, flags, sg_list, num_sge,
2178 NULL);
2179}
2180EXPORT_SYMBOL(ib_advise_mr);
2181
2182int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata)
2183{
2184 struct ib_pd *pd = mr->pd;
2185 struct ib_dm *dm = mr->dm;
2186 struct ib_sig_attrs *sig_attrs = mr->sig_attrs;
2187 int ret;
2188
2189 trace_mr_dereg(mr);
2190 rdma_restrack_del(&mr->res);
2191 ret = mr->device->ops.dereg_mr(mr, udata);
2192 if (!ret) {
2193 atomic_dec(&pd->usecnt);
2194 if (dm)
2195 atomic_dec(&dm->usecnt);
2196 kfree(sig_attrs);
2197 }
2198
2199 return ret;
2200}
2201EXPORT_SYMBOL(ib_dereg_mr_user);
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
2216 u32 max_num_sg)
2217{
2218 struct ib_mr *mr;
2219
2220 if (!pd->device->ops.alloc_mr) {
2221 mr = ERR_PTR(-EOPNOTSUPP);
2222 goto out;
2223 }
2224
2225 if (mr_type == IB_MR_TYPE_INTEGRITY) {
2226 WARN_ON_ONCE(1);
2227 mr = ERR_PTR(-EINVAL);
2228 goto out;
2229 }
2230
2231 mr = pd->device->ops.alloc_mr(pd, mr_type, max_num_sg);
2232 if (IS_ERR(mr))
2233 goto out;
2234
2235 mr->device = pd->device;
2236 mr->pd = pd;
2237 mr->dm = NULL;
2238 mr->uobject = NULL;
2239 atomic_inc(&pd->usecnt);
2240 mr->need_inval = false;
2241 mr->type = mr_type;
2242 mr->sig_attrs = NULL;
2243
2244 rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
2245 rdma_restrack_parent_name(&mr->res, &pd->res);
2246 rdma_restrack_add(&mr->res);
2247out:
2248 trace_mr_alloc(pd, mr_type, max_num_sg, mr);
2249 return mr;
2250}
2251EXPORT_SYMBOL(ib_alloc_mr);
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
2266 u32 max_num_data_sg,
2267 u32 max_num_meta_sg)
2268{
2269 struct ib_mr *mr;
2270 struct ib_sig_attrs *sig_attrs;
2271
2272 if (!pd->device->ops.alloc_mr_integrity ||
2273 !pd->device->ops.map_mr_sg_pi) {
2274 mr = ERR_PTR(-EOPNOTSUPP);
2275 goto out;
2276 }
2277
2278 if (!max_num_meta_sg) {
2279 mr = ERR_PTR(-EINVAL);
2280 goto out;
2281 }
2282
2283 sig_attrs = kzalloc(sizeof(struct ib_sig_attrs), GFP_KERNEL);
2284 if (!sig_attrs) {
2285 mr = ERR_PTR(-ENOMEM);
2286 goto out;
2287 }
2288
2289 mr = pd->device->ops.alloc_mr_integrity(pd, max_num_data_sg,
2290 max_num_meta_sg);
2291 if (IS_ERR(mr)) {
2292 kfree(sig_attrs);
2293 goto out;
2294 }
2295
2296 mr->device = pd->device;
2297 mr->pd = pd;
2298 mr->dm = NULL;
2299 mr->uobject = NULL;
2300 atomic_inc(&pd->usecnt);
2301 mr->need_inval = false;
2302 mr->type = IB_MR_TYPE_INTEGRITY;
2303 mr->sig_attrs = sig_attrs;
2304
2305 rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
2306 rdma_restrack_parent_name(&mr->res, &pd->res);
2307 rdma_restrack_add(&mr->res);
2308out:
2309 trace_mr_integ_alloc(pd, max_num_data_sg, max_num_meta_sg, mr);
2310 return mr;
2311}
2312EXPORT_SYMBOL(ib_alloc_mr_integrity);
2313
2314
2315
2316static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
2317{
2318 struct ib_qp_init_attr init_attr = {};
2319 struct ib_qp_attr attr = {};
2320 int num_eth_ports = 0;
2321 unsigned int port;
2322
2323
2324
2325
2326 if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) {
2327 if (attr.qp_state >= IB_QPS_INIT) {
2328 if (rdma_port_get_link_layer(qp->device, attr.port_num) !=
2329 IB_LINK_LAYER_INFINIBAND)
2330 return true;
2331 goto lid_check;
2332 }
2333 }
2334
2335
2336 rdma_for_each_port(qp->device, port)
2337 if (rdma_port_get_link_layer(qp->device, port) !=
2338 IB_LINK_LAYER_INFINIBAND)
2339 num_eth_ports++;
2340
2341
2342
2343
2344
2345 if (num_eth_ports)
2346 return true;
2347
2348
2349lid_check:
2350 return !(lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
2351 lid == be16_to_cpu(IB_LID_PERMISSIVE));
2352}
2353
2354int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
2355{
2356 int ret;
2357
2358 if (!qp->device->ops.attach_mcast)
2359 return -EOPNOTSUPP;
2360
2361 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
2362 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
2363 return -EINVAL;
2364
2365 ret = qp->device->ops.attach_mcast(qp, gid, lid);
2366 if (!ret)
2367 atomic_inc(&qp->usecnt);
2368 return ret;
2369}
2370EXPORT_SYMBOL(ib_attach_mcast);
2371
2372int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
2373{
2374 int ret;
2375
2376 if (!qp->device->ops.detach_mcast)
2377 return -EOPNOTSUPP;
2378
2379 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
2380 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
2381 return -EINVAL;
2382
2383 ret = qp->device->ops.detach_mcast(qp, gid, lid);
2384 if (!ret)
2385 atomic_dec(&qp->usecnt);
2386 return ret;
2387}
2388EXPORT_SYMBOL(ib_detach_mcast);
2389
2390
2391
2392
2393
2394
2395
2396struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device,
2397 struct inode *inode, struct ib_udata *udata)
2398{
2399 struct ib_xrcd *xrcd;
2400 int ret;
2401
2402 if (!device->ops.alloc_xrcd)
2403 return ERR_PTR(-EOPNOTSUPP);
2404
2405 xrcd = rdma_zalloc_drv_obj(device, ib_xrcd);
2406 if (!xrcd)
2407 return ERR_PTR(-ENOMEM);
2408
2409 xrcd->device = device;
2410 xrcd->inode = inode;
2411 atomic_set(&xrcd->usecnt, 0);
2412 init_rwsem(&xrcd->tgt_qps_rwsem);
2413 xa_init(&xrcd->tgt_qps);
2414
2415 ret = device->ops.alloc_xrcd(xrcd, udata);
2416 if (ret)
2417 goto err;
2418 return xrcd;
2419err:
2420 kfree(xrcd);
2421 return ERR_PTR(ret);
2422}
2423EXPORT_SYMBOL(ib_alloc_xrcd_user);
2424
2425
2426
2427
2428
2429
2430int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata)
2431{
2432 int ret;
2433
2434 if (atomic_read(&xrcd->usecnt))
2435 return -EBUSY;
2436
2437 WARN_ON(!xa_empty(&xrcd->tgt_qps));
2438 ret = xrcd->device->ops.dealloc_xrcd(xrcd, udata);
2439 if (ret)
2440 return ret;
2441 kfree(xrcd);
2442 return ret;
2443}
2444EXPORT_SYMBOL(ib_dealloc_xrcd_user);
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460struct ib_wq *ib_create_wq(struct ib_pd *pd,
2461 struct ib_wq_init_attr *wq_attr)
2462{
2463 struct ib_wq *wq;
2464
2465 if (!pd->device->ops.create_wq)
2466 return ERR_PTR(-EOPNOTSUPP);
2467
2468 wq = pd->device->ops.create_wq(pd, wq_attr, NULL);
2469 if (!IS_ERR(wq)) {
2470 wq->event_handler = wq_attr->event_handler;
2471 wq->wq_context = wq_attr->wq_context;
2472 wq->wq_type = wq_attr->wq_type;
2473 wq->cq = wq_attr->cq;
2474 wq->device = pd->device;
2475 wq->pd = pd;
2476 wq->uobject = NULL;
2477 atomic_inc(&pd->usecnt);
2478 atomic_inc(&wq_attr->cq->usecnt);
2479 atomic_set(&wq->usecnt, 0);
2480 }
2481 return wq;
2482}
2483EXPORT_SYMBOL(ib_create_wq);
2484
2485
2486
2487
2488
2489
2490int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata)
2491{
2492 struct ib_cq *cq = wq->cq;
2493 struct ib_pd *pd = wq->pd;
2494 int ret;
2495
2496 if (atomic_read(&wq->usecnt))
2497 return -EBUSY;
2498
2499 ret = wq->device->ops.destroy_wq(wq, udata);
2500 if (ret)
2501 return ret;
2502
2503 atomic_dec(&pd->usecnt);
2504 atomic_dec(&cq->usecnt);
2505 return ret;
2506}
2507EXPORT_SYMBOL(ib_destroy_wq_user);
2508
2509int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
2510 struct ib_mr_status *mr_status)
2511{
2512 if (!mr->device->ops.check_mr_status)
2513 return -EOPNOTSUPP;
2514
2515 return mr->device->ops.check_mr_status(mr, check_mask, mr_status);
2516}
2517EXPORT_SYMBOL(ib_check_mr_status);
2518
2519int ib_set_vf_link_state(struct ib_device *device, int vf, u32 port,
2520 int state)
2521{
2522 if (!device->ops.set_vf_link_state)
2523 return -EOPNOTSUPP;
2524
2525 return device->ops.set_vf_link_state(device, vf, port, state);
2526}
2527EXPORT_SYMBOL(ib_set_vf_link_state);
2528
2529int ib_get_vf_config(struct ib_device *device, int vf, u32 port,
2530 struct ifla_vf_info *info)
2531{
2532 if (!device->ops.get_vf_config)
2533 return -EOPNOTSUPP;
2534
2535 return device->ops.get_vf_config(device, vf, port, info);
2536}
2537EXPORT_SYMBOL(ib_get_vf_config);
2538
2539int ib_get_vf_stats(struct ib_device *device, int vf, u32 port,
2540 struct ifla_vf_stats *stats)
2541{
2542 if (!device->ops.get_vf_stats)
2543 return -EOPNOTSUPP;
2544
2545 return device->ops.get_vf_stats(device, vf, port, stats);
2546}
2547EXPORT_SYMBOL(ib_get_vf_stats);
2548
2549int ib_set_vf_guid(struct ib_device *device, int vf, u32 port, u64 guid,
2550 int type)
2551{
2552 if (!device->ops.set_vf_guid)
2553 return -EOPNOTSUPP;
2554
2555 return device->ops.set_vf_guid(device, vf, port, guid, type);
2556}
2557EXPORT_SYMBOL(ib_set_vf_guid);
2558
2559int ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
2560 struct ifla_vf_guid *node_guid,
2561 struct ifla_vf_guid *port_guid)
2562{
2563 if (!device->ops.get_vf_guid)
2564 return -EOPNOTSUPP;
2565
2566 return device->ops.get_vf_guid(device, vf, port, node_guid, port_guid);
2567}
2568EXPORT_SYMBOL(ib_get_vf_guid);
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
2590 int data_sg_nents, unsigned int *data_sg_offset,
2591 struct scatterlist *meta_sg, int meta_sg_nents,
2592 unsigned int *meta_sg_offset, unsigned int page_size)
2593{
2594 if (unlikely(!mr->device->ops.map_mr_sg_pi ||
2595 WARN_ON_ONCE(mr->type != IB_MR_TYPE_INTEGRITY)))
2596 return -EOPNOTSUPP;
2597
2598 mr->page_size = page_size;
2599
2600 return mr->device->ops.map_mr_sg_pi(mr, data_sg, data_sg_nents,
2601 data_sg_offset, meta_sg,
2602 meta_sg_nents, meta_sg_offset);
2603}
2604EXPORT_SYMBOL(ib_map_mr_sg_pi);
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2633 unsigned int *sg_offset, unsigned int page_size)
2634{
2635 if (unlikely(!mr->device->ops.map_mr_sg))
2636 return -EOPNOTSUPP;
2637
2638 mr->page_size = page_size;
2639
2640 return mr->device->ops.map_mr_sg(mr, sg, sg_nents, sg_offset);
2641}
2642EXPORT_SYMBOL(ib_map_mr_sg);
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
2667 unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64))
2668{
2669 struct scatterlist *sg;
2670 u64 last_end_dma_addr = 0;
2671 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
2672 unsigned int last_page_off = 0;
2673 u64 page_mask = ~((u64)mr->page_size - 1);
2674 int i, ret;
2675
2676 if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0])))
2677 return -EINVAL;
2678
2679 mr->iova = sg_dma_address(&sgl[0]) + sg_offset;
2680 mr->length = 0;
2681
2682 for_each_sg(sgl, sg, sg_nents, i) {
2683 u64 dma_addr = sg_dma_address(sg) + sg_offset;
2684 u64 prev_addr = dma_addr;
2685 unsigned int dma_len = sg_dma_len(sg) - sg_offset;
2686 u64 end_dma_addr = dma_addr + dma_len;
2687 u64 page_addr = dma_addr & page_mask;
2688
2689
2690
2691
2692
2693
2694 if (i && (last_page_off != 0 || page_addr != dma_addr)) {
2695
2696 if (last_end_dma_addr != dma_addr)
2697 break;
2698
2699
2700
2701
2702
2703
2704 goto next_page;
2705 }
2706
2707 do {
2708 ret = set_page(mr, page_addr);
2709 if (unlikely(ret < 0)) {
2710 sg_offset = prev_addr - sg_dma_address(sg);
2711 mr->length += prev_addr - dma_addr;
2712 if (sg_offset_p)
2713 *sg_offset_p = sg_offset;
2714 return i || sg_offset ? i : ret;
2715 }
2716 prev_addr = page_addr;
2717next_page:
2718 page_addr += mr->page_size;
2719 } while (page_addr < end_dma_addr);
2720
2721 mr->length += dma_len;
2722 last_end_dma_addr = end_dma_addr;
2723 last_page_off = end_dma_addr & ~page_mask;
2724
2725 sg_offset = 0;
2726 }
2727
2728 if (sg_offset_p)
2729 *sg_offset_p = 0;
2730 return i;
2731}
2732EXPORT_SYMBOL(ib_sg_to_pages);
2733
2734struct ib_drain_cqe {
2735 struct ib_cqe cqe;
2736 struct completion done;
2737};
2738
2739static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
2740{
2741 struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
2742 cqe);
2743
2744 complete(&cqe->done);
2745}
2746
2747
2748
2749
2750static void __ib_drain_sq(struct ib_qp *qp)
2751{
2752 struct ib_cq *cq = qp->send_cq;
2753 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2754 struct ib_drain_cqe sdrain;
2755 struct ib_rdma_wr swr = {
2756 .wr = {
2757 .next = NULL,
2758 { .wr_cqe = &sdrain.cqe, },
2759 .opcode = IB_WR_RDMA_WRITE,
2760 },
2761 };
2762 int ret;
2763
2764 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2765 if (ret) {
2766 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2767 return;
2768 }
2769
2770 sdrain.cqe.done = ib_drain_qp_done;
2771 init_completion(&sdrain.done);
2772
2773 ret = ib_post_send(qp, &swr.wr, NULL);
2774 if (ret) {
2775 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2776 return;
2777 }
2778
2779 if (cq->poll_ctx == IB_POLL_DIRECT)
2780 while (wait_for_completion_timeout(&sdrain.done, HZ / 10) <= 0)
2781 ib_process_cq_direct(cq, -1);
2782 else
2783 wait_for_completion(&sdrain.done);
2784}
2785
2786
2787
2788
2789static void __ib_drain_rq(struct ib_qp *qp)
2790{
2791 struct ib_cq *cq = qp->recv_cq;
2792 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2793 struct ib_drain_cqe rdrain;
2794 struct ib_recv_wr rwr = {};
2795 int ret;
2796
2797 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2798 if (ret) {
2799 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2800 return;
2801 }
2802
2803 rwr.wr_cqe = &rdrain.cqe;
2804 rdrain.cqe.done = ib_drain_qp_done;
2805 init_completion(&rdrain.done);
2806
2807 ret = ib_post_recv(qp, &rwr, NULL);
2808 if (ret) {
2809 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2810 return;
2811 }
2812
2813 if (cq->poll_ctx == IB_POLL_DIRECT)
2814 while (wait_for_completion_timeout(&rdrain.done, HZ / 10) <= 0)
2815 ib_process_cq_direct(cq, -1);
2816 else
2817 wait_for_completion(&rdrain.done);
2818}
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839void ib_drain_sq(struct ib_qp *qp)
2840{
2841 if (qp->device->ops.drain_sq)
2842 qp->device->ops.drain_sq(qp);
2843 else
2844 __ib_drain_sq(qp);
2845 trace_cq_drain_complete(qp->send_cq);
2846}
2847EXPORT_SYMBOL(ib_drain_sq);
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868void ib_drain_rq(struct ib_qp *qp)
2869{
2870 if (qp->device->ops.drain_rq)
2871 qp->device->ops.drain_rq(qp);
2872 else
2873 __ib_drain_rq(qp);
2874 trace_cq_drain_complete(qp->recv_cq);
2875}
2876EXPORT_SYMBOL(ib_drain_rq);
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893void ib_drain_qp(struct ib_qp *qp)
2894{
2895 ib_drain_sq(qp);
2896 if (!qp->srq)
2897 ib_drain_rq(qp);
2898}
2899EXPORT_SYMBOL(ib_drain_qp);
2900
2901struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num,
2902 enum rdma_netdev_t type, const char *name,
2903 unsigned char name_assign_type,
2904 void (*setup)(struct net_device *))
2905{
2906 struct rdma_netdev_alloc_params params;
2907 struct net_device *netdev;
2908 int rc;
2909
2910 if (!device->ops.rdma_netdev_get_params)
2911 return ERR_PTR(-EOPNOTSUPP);
2912
2913 rc = device->ops.rdma_netdev_get_params(device, port_num, type,
2914 ¶ms);
2915 if (rc)
2916 return ERR_PTR(rc);
2917
2918 netdev = alloc_netdev_mqs(params.sizeof_priv, name, name_assign_type,
2919 setup, params.txqs, params.rxqs);
2920 if (!netdev)
2921 return ERR_PTR(-ENOMEM);
2922
2923 return netdev;
2924}
2925EXPORT_SYMBOL(rdma_alloc_netdev);
2926
2927int rdma_init_netdev(struct ib_device *device, u32 port_num,
2928 enum rdma_netdev_t type, const char *name,
2929 unsigned char name_assign_type,
2930 void (*setup)(struct net_device *),
2931 struct net_device *netdev)
2932{
2933 struct rdma_netdev_alloc_params params;
2934 int rc;
2935
2936 if (!device->ops.rdma_netdev_get_params)
2937 return -EOPNOTSUPP;
2938
2939 rc = device->ops.rdma_netdev_get_params(device, port_num, type,
2940 ¶ms);
2941 if (rc)
2942 return rc;
2943
2944 return params.initialize_rdma_netdev(device, port_num,
2945 netdev, params.param);
2946}
2947EXPORT_SYMBOL(rdma_init_netdev);
2948
2949void __rdma_block_iter_start(struct ib_block_iter *biter,
2950 struct scatterlist *sglist, unsigned int nents,
2951 unsigned long pgsz)
2952{
2953 memset(biter, 0, sizeof(struct ib_block_iter));
2954 biter->__sg = sglist;
2955 biter->__sg_nents = nents;
2956
2957
2958 biter->__pg_bit = __fls(pgsz);
2959}
2960EXPORT_SYMBOL(__rdma_block_iter_start);
2961
2962bool __rdma_block_iter_next(struct ib_block_iter *biter)
2963{
2964 unsigned int block_offset;
2965
2966 if (!biter->__sg_nents || !biter->__sg)
2967 return false;
2968
2969 biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance;
2970 block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1);
2971 biter->__sg_advance += BIT_ULL(biter->__pg_bit) - block_offset;
2972
2973 if (biter->__sg_advance >= sg_dma_len(biter->__sg)) {
2974 biter->__sg_advance = 0;
2975 biter->__sg = sg_next(biter->__sg);
2976 biter->__sg_nents--;
2977 }
2978
2979 return true;
2980}
2981EXPORT_SYMBOL(__rdma_block_iter_next);
2982
2983
2984
2985
2986
2987
2988
2989
2990struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
2991 const struct rdma_stat_desc *descs, int num_counters,
2992 unsigned long lifespan)
2993{
2994 struct rdma_hw_stats *stats;
2995
2996 stats = kzalloc(struct_size(stats, value, num_counters), GFP_KERNEL);
2997 if (!stats)
2998 return NULL;
2999
3000 stats->is_disabled = kcalloc(BITS_TO_LONGS(num_counters),
3001 sizeof(*stats->is_disabled), GFP_KERNEL);
3002 if (!stats->is_disabled)
3003 goto err;
3004
3005 stats->descs = descs;
3006 stats->num_counters = num_counters;
3007 stats->lifespan = msecs_to_jiffies(lifespan);
3008 mutex_init(&stats->lock);
3009
3010 return stats;
3011
3012err:
3013 kfree(stats);
3014 return NULL;
3015}
3016EXPORT_SYMBOL(rdma_alloc_hw_stats_struct);
3017
3018
3019
3020
3021
3022void rdma_free_hw_stats_struct(struct rdma_hw_stats *stats)
3023{
3024 if (!stats)
3025 return;
3026
3027 kfree(stats->is_disabled);
3028 kfree(stats);
3029}
3030EXPORT_SYMBOL(rdma_free_hw_stats_struct);
3031