1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/device.h>
35#include <linux/netdevice.h>
36#include <linux/etherdevice.h>
37#include <linux/delay.h>
38#include <linux/errno.h>
39#include <linux/list.h>
40#include <linux/spinlock.h>
41#include <linux/ethtool.h>
42#include <linux/rtnetlink.h>
43#include <linux/inetdevice.h>
44#include <linux/io.h>
45
46#include <asm/irq.h>
47#include <asm/byteorder.h>
48
49#include <rdma/iw_cm.h>
50#include <rdma/ib_verbs.h>
51#include <rdma/ib_smi.h>
52#include <rdma/ib_umem.h>
53#include <rdma/ib_user_verbs.h>
54
55#include "iw_cxgb4.h"
56
57static int fastreg_support = 1;
58module_param(fastreg_support, int, 0644);
59MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=1)");
60
61static void c4iw_dealloc_ucontext(struct ib_ucontext *context)
62{
63 struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
64 struct c4iw_dev *rhp;
65 struct c4iw_mm_entry *mm, *tmp;
66
67 pr_debug("context %p\n", context);
68 rhp = to_c4iw_dev(ucontext->ibucontext.device);
69
70 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
71 kfree(mm);
72 c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
73}
74
75static int c4iw_alloc_ucontext(struct ib_ucontext *ucontext,
76 struct ib_udata *udata)
77{
78 struct ib_device *ibdev = ucontext->device;
79 struct c4iw_ucontext *context = to_c4iw_ucontext(ucontext);
80 struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
81 struct c4iw_alloc_ucontext_resp uresp;
82 int ret = 0;
83 struct c4iw_mm_entry *mm = NULL;
84
85 pr_debug("ibdev %p\n", ibdev);
86 c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
87 INIT_LIST_HEAD(&context->mmaps);
88 spin_lock_init(&context->mmap_lock);
89
90 if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
91 pr_err_once("Warning - downlevel libcxgb4 (non-fatal), device status page disabled\n");
92 rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED;
93 } else {
94 mm = kmalloc(sizeof(*mm), GFP_KERNEL);
95 if (!mm) {
96 ret = -ENOMEM;
97 goto err;
98 }
99
100 uresp.status_page_size = PAGE_SIZE;
101
102 spin_lock(&context->mmap_lock);
103 uresp.status_page_key = context->key;
104 context->key += PAGE_SIZE;
105 spin_unlock(&context->mmap_lock);
106
107 ret = ib_copy_to_udata(udata, &uresp,
108 sizeof(uresp) - sizeof(uresp.reserved));
109 if (ret)
110 goto err_mm;
111
112 mm->key = uresp.status_page_key;
113 mm->addr = virt_to_phys(rhp->rdev.status_page);
114 mm->len = PAGE_SIZE;
115 insert_mmap(context, mm);
116 }
117 return 0;
118err_mm:
119 kfree(mm);
120err:
121 return ret;
122}
123
124static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
125{
126 int len = vma->vm_end - vma->vm_start;
127 u32 key = vma->vm_pgoff << PAGE_SHIFT;
128 struct c4iw_rdev *rdev;
129 int ret = 0;
130 struct c4iw_mm_entry *mm;
131 struct c4iw_ucontext *ucontext;
132 u64 addr;
133
134 pr_debug("pgoff 0x%lx key 0x%x len %d\n", vma->vm_pgoff,
135 key, len);
136
137 if (vma->vm_start & (PAGE_SIZE-1))
138 return -EINVAL;
139
140 rdev = &(to_c4iw_dev(context->device)->rdev);
141 ucontext = to_c4iw_ucontext(context);
142
143 mm = remove_mmap(ucontext, key, len);
144 if (!mm)
145 return -EINVAL;
146 addr = mm->addr;
147 kfree(mm);
148
149 if ((addr >= pci_resource_start(rdev->lldi.pdev, 0)) &&
150 (addr < (pci_resource_start(rdev->lldi.pdev, 0) +
151 pci_resource_len(rdev->lldi.pdev, 0)))) {
152
153
154
155
156 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
157 ret = io_remap_pfn_range(vma, vma->vm_start,
158 addr >> PAGE_SHIFT,
159 len, vma->vm_page_prot);
160 } else if ((addr >= pci_resource_start(rdev->lldi.pdev, 2)) &&
161 (addr < (pci_resource_start(rdev->lldi.pdev, 2) +
162 pci_resource_len(rdev->lldi.pdev, 2)))) {
163
164
165
166
167 if (addr >= rdev->oc_mw_pa)
168 vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot);
169 else {
170 if (!is_t4(rdev->lldi.adapter_type))
171 vma->vm_page_prot =
172 t4_pgprot_wc(vma->vm_page_prot);
173 else
174 vma->vm_page_prot =
175 pgprot_noncached(vma->vm_page_prot);
176 }
177 ret = io_remap_pfn_range(vma, vma->vm_start,
178 addr >> PAGE_SHIFT,
179 len, vma->vm_page_prot);
180 } else {
181
182
183
184
185 ret = remap_pfn_range(vma, vma->vm_start,
186 addr >> PAGE_SHIFT,
187 len, vma->vm_page_prot);
188 }
189
190 return ret;
191}
192
193static void c4iw_deallocate_pd(struct ib_pd *pd, struct ib_udata *udata)
194{
195 struct c4iw_dev *rhp;
196 struct c4iw_pd *php;
197
198 php = to_c4iw_pd(pd);
199 rhp = php->rhp;
200 pr_debug("ibpd %p pdid 0x%x\n", pd, php->pdid);
201 c4iw_put_resource(&rhp->rdev.resource.pdid_table, php->pdid);
202 mutex_lock(&rhp->rdev.stats.lock);
203 rhp->rdev.stats.pd.cur--;
204 mutex_unlock(&rhp->rdev.stats.lock);
205}
206
207static int c4iw_allocate_pd(struct ib_pd *pd, struct ib_udata *udata)
208{
209 struct c4iw_pd *php = to_c4iw_pd(pd);
210 struct ib_device *ibdev = pd->device;
211 u32 pdid;
212 struct c4iw_dev *rhp;
213
214 pr_debug("ibdev %p\n", ibdev);
215 rhp = (struct c4iw_dev *) ibdev;
216 pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_table);
217 if (!pdid)
218 return -EINVAL;
219
220 php->pdid = pdid;
221 php->rhp = rhp;
222 if (udata) {
223 struct c4iw_alloc_pd_resp uresp = {.pdid = php->pdid};
224
225 if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
226 c4iw_deallocate_pd(&php->ibpd, udata);
227 return -EFAULT;
228 }
229 }
230 mutex_lock(&rhp->rdev.stats.lock);
231 rhp->rdev.stats.pd.cur++;
232 if (rhp->rdev.stats.pd.cur > rhp->rdev.stats.pd.max)
233 rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur;
234 mutex_unlock(&rhp->rdev.stats.lock);
235 pr_debug("pdid 0x%0x ptr 0x%p\n", pdid, php);
236 return 0;
237}
238
239static int c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
240 u16 *pkey)
241{
242 pr_debug("ibdev %p\n", ibdev);
243 *pkey = 0;
244 return 0;
245}
246
247static int c4iw_query_gid(struct ib_device *ibdev, u8 port, int index,
248 union ib_gid *gid)
249{
250 struct c4iw_dev *dev;
251
252 pr_debug("ibdev %p, port %d, index %d, gid %p\n",
253 ibdev, port, index, gid);
254 if (!port)
255 return -EINVAL;
256 dev = to_c4iw_dev(ibdev);
257 memset(&(gid->raw[0]), 0, sizeof(gid->raw));
258 memcpy(&(gid->raw[0]), dev->rdev.lldi.ports[port-1]->dev_addr, 6);
259 return 0;
260}
261
262static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
263 struct ib_udata *uhw)
264{
265
266 struct c4iw_dev *dev;
267
268 pr_debug("ibdev %p\n", ibdev);
269
270 if (uhw->inlen || uhw->outlen)
271 return -EINVAL;
272
273 dev = to_c4iw_dev(ibdev);
274 memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
275 props->hw_ver = CHELSIO_CHIP_RELEASE(dev->rdev.lldi.adapter_type);
276 props->fw_ver = dev->rdev.lldi.fw_vers;
277 props->device_cap_flags = dev->device_cap_flags;
278 props->page_size_cap = T4_PAGESIZE_MASK;
279 props->vendor_id = (u32)dev->rdev.lldi.pdev->vendor;
280 props->vendor_part_id = (u32)dev->rdev.lldi.pdev->device;
281 props->max_mr_size = T4_MAX_MR_SIZE;
282 props->max_qp = dev->rdev.lldi.vr->qp.size / 2;
283 props->max_srq = dev->rdev.lldi.vr->srq.size;
284 props->max_qp_wr = dev->rdev.hw_queue.t4_max_qp_depth;
285 props->max_srq_wr = dev->rdev.hw_queue.t4_max_qp_depth;
286 props->max_send_sge = min(T4_MAX_SEND_SGE, T4_MAX_WRITE_SGE);
287 props->max_recv_sge = T4_MAX_RECV_SGE;
288 props->max_srq_sge = T4_MAX_RECV_SGE;
289 props->max_sge_rd = 1;
290 props->max_res_rd_atom = dev->rdev.lldi.max_ird_adapter;
291 props->max_qp_rd_atom = min(dev->rdev.lldi.max_ordird_qp,
292 c4iw_max_read_depth);
293 props->max_qp_init_rd_atom = props->max_qp_rd_atom;
294 props->max_cq = dev->rdev.lldi.vr->qp.size;
295 props->max_cqe = dev->rdev.hw_queue.t4_max_cq_depth;
296 props->max_mr = c4iw_num_stags(&dev->rdev);
297 props->max_pd = T4_MAX_NUM_PD;
298 props->local_ca_ack_delay = 0;
299 props->max_fast_reg_page_list_len =
300 t4_max_fr_depth(dev->rdev.lldi.ulptx_memwrite_dsgl && use_dsgl);
301
302 return 0;
303}
304
305static int c4iw_query_port(struct ib_device *ibdev, u8 port,
306 struct ib_port_attr *props)
307{
308 struct c4iw_dev *dev;
309 struct net_device *netdev;
310 struct in_device *inetdev;
311
312 pr_debug("ibdev %p\n", ibdev);
313
314 dev = to_c4iw_dev(ibdev);
315 netdev = dev->rdev.lldi.ports[port-1];
316
317 props->max_mtu = IB_MTU_4096;
318 props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
319
320 if (!netif_carrier_ok(netdev))
321 props->state = IB_PORT_DOWN;
322 else {
323 inetdev = in_dev_get(netdev);
324 if (inetdev) {
325 if (inetdev->ifa_list)
326 props->state = IB_PORT_ACTIVE;
327 else
328 props->state = IB_PORT_INIT;
329 in_dev_put(inetdev);
330 } else
331 props->state = IB_PORT_INIT;
332 }
333
334 props->port_cap_flags =
335 IB_PORT_CM_SUP |
336 IB_PORT_SNMP_TUNNEL_SUP |
337 IB_PORT_REINIT_SUP |
338 IB_PORT_DEVICE_MGMT_SUP |
339 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
340 props->gid_tbl_len = 1;
341 props->pkey_tbl_len = 1;
342 props->active_width = 2;
343 props->active_speed = IB_SPEED_DDR;
344 props->max_msg_sz = -1;
345
346 return 0;
347}
348
349static ssize_t hw_rev_show(struct device *dev,
350 struct device_attribute *attr, char *buf)
351{
352 struct c4iw_dev *c4iw_dev =
353 rdma_device_to_drv_device(dev, struct c4iw_dev, ibdev);
354
355 pr_debug("dev 0x%p\n", dev);
356 return sprintf(buf, "%d\n",
357 CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type));
358}
359static DEVICE_ATTR_RO(hw_rev);
360
361static ssize_t hca_type_show(struct device *dev,
362 struct device_attribute *attr, char *buf)
363{
364 struct c4iw_dev *c4iw_dev =
365 rdma_device_to_drv_device(dev, struct c4iw_dev, ibdev);
366 struct ethtool_drvinfo info;
367 struct net_device *lldev = c4iw_dev->rdev.lldi.ports[0];
368
369 pr_debug("dev 0x%p\n", dev);
370 lldev->ethtool_ops->get_drvinfo(lldev, &info);
371 return sprintf(buf, "%s\n", info.driver);
372}
373static DEVICE_ATTR_RO(hca_type);
374
375static ssize_t board_id_show(struct device *dev, struct device_attribute *attr,
376 char *buf)
377{
378 struct c4iw_dev *c4iw_dev =
379 rdma_device_to_drv_device(dev, struct c4iw_dev, ibdev);
380
381 pr_debug("dev 0x%p\n", dev);
382 return sprintf(buf, "%x.%x\n", c4iw_dev->rdev.lldi.pdev->vendor,
383 c4iw_dev->rdev.lldi.pdev->device);
384}
385static DEVICE_ATTR_RO(board_id);
386
387enum counters {
388 IP4INSEGS,
389 IP4OUTSEGS,
390 IP4RETRANSSEGS,
391 IP4OUTRSTS,
392 IP6INSEGS,
393 IP6OUTSEGS,
394 IP6RETRANSSEGS,
395 IP6OUTRSTS,
396 NR_COUNTERS
397};
398
399static const char * const names[] = {
400 [IP4INSEGS] = "ip4InSegs",
401 [IP4OUTSEGS] = "ip4OutSegs",
402 [IP4RETRANSSEGS] = "ip4RetransSegs",
403 [IP4OUTRSTS] = "ip4OutRsts",
404 [IP6INSEGS] = "ip6InSegs",
405 [IP6OUTSEGS] = "ip6OutSegs",
406 [IP6RETRANSSEGS] = "ip6RetransSegs",
407 [IP6OUTRSTS] = "ip6OutRsts"
408};
409
410static struct rdma_hw_stats *c4iw_alloc_stats(struct ib_device *ibdev,
411 u8 port_num)
412{
413 BUILD_BUG_ON(ARRAY_SIZE(names) != NR_COUNTERS);
414
415 if (port_num != 0)
416 return NULL;
417
418 return rdma_alloc_hw_stats_struct(names, NR_COUNTERS,
419 RDMA_HW_STATS_DEFAULT_LIFESPAN);
420}
421
422static int c4iw_get_mib(struct ib_device *ibdev,
423 struct rdma_hw_stats *stats,
424 u8 port, int index)
425{
426 struct tp_tcp_stats v4, v6;
427 struct c4iw_dev *c4iw_dev = to_c4iw_dev(ibdev);
428
429 cxgb4_get_tcp_stats(c4iw_dev->rdev.lldi.pdev, &v4, &v6);
430 stats->value[IP4INSEGS] = v4.tcp_in_segs;
431 stats->value[IP4OUTSEGS] = v4.tcp_out_segs;
432 stats->value[IP4RETRANSSEGS] = v4.tcp_retrans_segs;
433 stats->value[IP4OUTRSTS] = v4.tcp_out_rsts;
434 stats->value[IP6INSEGS] = v6.tcp_in_segs;
435 stats->value[IP6OUTSEGS] = v6.tcp_out_segs;
436 stats->value[IP6RETRANSSEGS] = v6.tcp_retrans_segs;
437 stats->value[IP6OUTRSTS] = v6.tcp_out_rsts;
438
439 return stats->num_counters;
440}
441
442static struct attribute *c4iw_class_attributes[] = {
443 &dev_attr_hw_rev.attr,
444 &dev_attr_hca_type.attr,
445 &dev_attr_board_id.attr,
446 NULL
447};
448
449static const struct attribute_group c4iw_attr_group = {
450 .attrs = c4iw_class_attributes,
451};
452
453static int c4iw_port_immutable(struct ib_device *ibdev, u8 port_num,
454 struct ib_port_immutable *immutable)
455{
456 struct ib_port_attr attr;
457 int err;
458
459 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
460
461 err = ib_query_port(ibdev, port_num, &attr);
462 if (err)
463 return err;
464
465 immutable->pkey_tbl_len = attr.pkey_tbl_len;
466 immutable->gid_tbl_len = attr.gid_tbl_len;
467
468 return 0;
469}
470
471static void get_dev_fw_str(struct ib_device *dev, char *str)
472{
473 struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
474 ibdev);
475 pr_debug("dev 0x%p\n", dev);
476
477 snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u.%u.%u",
478 FW_HDR_FW_VER_MAJOR_G(c4iw_dev->rdev.lldi.fw_vers),
479 FW_HDR_FW_VER_MINOR_G(c4iw_dev->rdev.lldi.fw_vers),
480 FW_HDR_FW_VER_MICRO_G(c4iw_dev->rdev.lldi.fw_vers),
481 FW_HDR_FW_VER_BUILD_G(c4iw_dev->rdev.lldi.fw_vers));
482}
483
484static int fill_res_entry(struct sk_buff *msg, struct rdma_restrack_entry *res)
485{
486 return (res->type < ARRAY_SIZE(c4iw_restrack_funcs) &&
487 c4iw_restrack_funcs[res->type]) ?
488 c4iw_restrack_funcs[res->type](msg, res) : 0;
489}
490
491static const struct ib_device_ops c4iw_dev_ops = {
492 .owner = THIS_MODULE,
493 .driver_id = RDMA_DRIVER_CXGB4,
494 .uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION,
495
496 .alloc_hw_stats = c4iw_alloc_stats,
497 .alloc_mr = c4iw_alloc_mr,
498 .alloc_mw = c4iw_alloc_mw,
499 .alloc_pd = c4iw_allocate_pd,
500 .alloc_ucontext = c4iw_alloc_ucontext,
501 .create_cq = c4iw_create_cq,
502 .create_qp = c4iw_create_qp,
503 .create_srq = c4iw_create_srq,
504 .dealloc_mw = c4iw_dealloc_mw,
505 .dealloc_pd = c4iw_deallocate_pd,
506 .dealloc_ucontext = c4iw_dealloc_ucontext,
507 .dereg_mr = c4iw_dereg_mr,
508 .destroy_cq = c4iw_destroy_cq,
509 .destroy_qp = c4iw_destroy_qp,
510 .destroy_srq = c4iw_destroy_srq,
511 .fill_res_entry = fill_res_entry,
512 .get_dev_fw_str = get_dev_fw_str,
513 .get_dma_mr = c4iw_get_dma_mr,
514 .get_hw_stats = c4iw_get_mib,
515 .get_port_immutable = c4iw_port_immutable,
516 .iw_accept = c4iw_accept_cr,
517 .iw_add_ref = c4iw_qp_add_ref,
518 .iw_connect = c4iw_connect,
519 .iw_create_listen = c4iw_create_listen,
520 .iw_destroy_listen = c4iw_destroy_listen,
521 .iw_get_qp = c4iw_get_qp,
522 .iw_reject = c4iw_reject_cr,
523 .iw_rem_ref = c4iw_qp_rem_ref,
524 .map_mr_sg = c4iw_map_mr_sg,
525 .mmap = c4iw_mmap,
526 .modify_qp = c4iw_ib_modify_qp,
527 .modify_srq = c4iw_modify_srq,
528 .poll_cq = c4iw_poll_cq,
529 .post_recv = c4iw_post_receive,
530 .post_send = c4iw_post_send,
531 .post_srq_recv = c4iw_post_srq_recv,
532 .query_device = c4iw_query_device,
533 .query_gid = c4iw_query_gid,
534 .query_pkey = c4iw_query_pkey,
535 .query_port = c4iw_query_port,
536 .query_qp = c4iw_ib_query_qp,
537 .reg_user_mr = c4iw_reg_user_mr,
538 .req_notify_cq = c4iw_arm_cq,
539 INIT_RDMA_OBJ_SIZE(ib_pd, c4iw_pd, ibpd),
540 INIT_RDMA_OBJ_SIZE(ib_cq, c4iw_cq, ibcq),
541 INIT_RDMA_OBJ_SIZE(ib_srq, c4iw_srq, ibsrq),
542 INIT_RDMA_OBJ_SIZE(ib_ucontext, c4iw_ucontext, ibucontext),
543};
544
545static int set_netdevs(struct ib_device *ib_dev, struct c4iw_rdev *rdev)
546{
547 int ret;
548 int i;
549
550 for (i = 0; i < rdev->lldi.nports; i++) {
551 ret = ib_device_set_netdev(ib_dev, rdev->lldi.ports[i],
552 i + 1);
553 if (ret)
554 return ret;
555 }
556 return 0;
557}
558
559void c4iw_register_device(struct work_struct *work)
560{
561 int ret;
562 struct uld_ctx *ctx = container_of(work, struct uld_ctx, reg_work);
563 struct c4iw_dev *dev = ctx->dev;
564
565 pr_debug("c4iw_dev %p\n", dev);
566 memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
567 memcpy(&dev->ibdev.node_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
568 dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW;
569 if (fastreg_support)
570 dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
571 dev->ibdev.local_dma_lkey = 0;
572 dev->ibdev.uverbs_cmd_mask =
573 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
574 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
575 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
576 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
577 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
578 (1ull << IB_USER_VERBS_CMD_REG_MR) |
579 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
580 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
581 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
582 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
583 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
584 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
585 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
586 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
587 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
588 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
589 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
590 (1ull << IB_USER_VERBS_CMD_POST_RECV) |
591 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
592 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
593 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
594 dev->ibdev.node_type = RDMA_NODE_RNIC;
595 BUILD_BUG_ON(sizeof(C4IW_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
596 memcpy(dev->ibdev.node_desc, C4IW_NODE_DESC, sizeof(C4IW_NODE_DESC));
597 dev->ibdev.phys_port_cnt = dev->rdev.lldi.nports;
598 dev->ibdev.num_comp_vectors = dev->rdev.lldi.nciq;
599 dev->ibdev.dev.parent = &dev->rdev.lldi.pdev->dev;
600
601 memcpy(dev->ibdev.iw_ifname, dev->rdev.lldi.ports[0]->name,
602 sizeof(dev->ibdev.iw_ifname));
603
604 rdma_set_device_sysfs_group(&dev->ibdev, &c4iw_attr_group);
605 ib_set_device_ops(&dev->ibdev, &c4iw_dev_ops);
606 ret = set_netdevs(&dev->ibdev, &dev->rdev);
607 if (ret)
608 goto err_dealloc_ctx;
609 ret = ib_register_device(&dev->ibdev, "cxgb4_%d");
610 if (ret)
611 goto err_dealloc_ctx;
612 return;
613
614err_dealloc_ctx:
615 pr_err("%s - Failed registering iwarp device: %d\n",
616 pci_name(ctx->lldi.pdev), ret);
617 c4iw_dealloc(ctx);
618 return;
619}
620
621void c4iw_unregister_device(struct c4iw_dev *dev)
622{
623 pr_debug("c4iw_dev %p\n", dev);
624 ib_unregister_device(&dev->ibdev);
625 return;
626}
627