1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42#include <linux/module.h>
43#include <linux/inetdevice.h>
44#include <linux/init.h>
45#include <linux/slab.h>
46#include <linux/errno.h>
47#include <linux/pci.h>
48#include <linux/netdevice.h>
49
50#include <rdma/ib_user_verbs.h>
51#include <rdma/ib_addr.h>
52
53#include "usnic_abi.h"
54#include "usnic_common_util.h"
55#include "usnic_ib.h"
56#include "usnic_ib_qp_grp.h"
57#include "usnic_log.h"
58#include "usnic_fwd.h"
59#include "usnic_debugfs.h"
60#include "usnic_ib_verbs.h"
61#include "usnic_transport.h"
62#include "usnic_uiom.h"
63#include "usnic_ib_sysfs.h"
64
65unsigned int usnic_log_lvl = USNIC_LOG_LVL_ERR;
66unsigned int usnic_ib_share_vf = 1;
67
68static const char usnic_version[] =
69 DRV_NAME ": Cisco VIC (USNIC) Verbs Driver v"
70 DRV_VERSION " (" DRV_RELDATE ")\n";
71
72static DEFINE_MUTEX(usnic_ib_ibdev_list_lock);
73static LIST_HEAD(usnic_ib_ibdev_list);
74
75
76static int usnic_ib_dump_vf_hdr(void *obj, char *buf, int buf_sz)
77{
78 struct usnic_ib_vf *vf = obj;
79 return scnprintf(buf, buf_sz, "PF: %s ", dev_name(&vf->pf->ib_dev.dev));
80}
81
82
83static void usnic_ib_dump_vf(struct usnic_ib_vf *vf, char *buf, int buf_sz)
84{
85 usnic_vnic_dump(vf->vnic, buf, buf_sz, vf,
86 usnic_ib_dump_vf_hdr,
87 usnic_ib_qp_grp_dump_hdr, usnic_ib_qp_grp_dump_rows);
88}
89
90void usnic_ib_log_vf(struct usnic_ib_vf *vf)
91{
92 char buf[1000];
93 usnic_ib_dump_vf(vf, buf, sizeof(buf));
94 usnic_dbg("%s\n", buf);
95}
96
97
98static void usnic_ib_qp_grp_modify_active_to_err(struct usnic_ib_dev *us_ibdev)
99{
100 struct usnic_ib_ucontext *ctx;
101 struct usnic_ib_qp_grp *qp_grp;
102 enum ib_qp_state cur_state;
103 int status;
104
105 BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock));
106
107 list_for_each_entry(ctx, &us_ibdev->ctx_list, link) {
108 list_for_each_entry(qp_grp, &ctx->qp_grp_list, link) {
109 cur_state = qp_grp->state;
110 if (cur_state == IB_QPS_INIT ||
111 cur_state == IB_QPS_RTR ||
112 cur_state == IB_QPS_RTS) {
113 status = usnic_ib_qp_grp_modify(qp_grp,
114 IB_QPS_ERR,
115 NULL);
116 if (status) {
117 usnic_err("Failed to transistion qp grp %u from %s to %s\n",
118 qp_grp->grp_id,
119 usnic_ib_qp_grp_state_to_string
120 (cur_state),
121 usnic_ib_qp_grp_state_to_string
122 (IB_QPS_ERR));
123 }
124 }
125 }
126 }
127}
128
129static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
130 unsigned long event)
131{
132 struct net_device *netdev;
133 struct ib_event ib_event;
134
135 memset(&ib_event, 0, sizeof(ib_event));
136
137 mutex_lock(&us_ibdev->usdev_lock);
138 netdev = us_ibdev->netdev;
139 switch (event) {
140 case NETDEV_REBOOT:
141 usnic_info("PF Reset on %s\n", dev_name(&us_ibdev->ib_dev.dev));
142 usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
143 ib_event.event = IB_EVENT_PORT_ERR;
144 ib_event.device = &us_ibdev->ib_dev;
145 ib_event.element.port_num = 1;
146 ib_dispatch_event(&ib_event);
147 break;
148 case NETDEV_UP:
149 case NETDEV_DOWN:
150 case NETDEV_CHANGE:
151 if (!us_ibdev->ufdev->link_up &&
152 netif_carrier_ok(netdev)) {
153 usnic_fwd_carrier_up(us_ibdev->ufdev);
154 usnic_info("Link UP on %s\n",
155 dev_name(&us_ibdev->ib_dev.dev));
156 ib_event.event = IB_EVENT_PORT_ACTIVE;
157 ib_event.device = &us_ibdev->ib_dev;
158 ib_event.element.port_num = 1;
159 ib_dispatch_event(&ib_event);
160 } else if (us_ibdev->ufdev->link_up &&
161 !netif_carrier_ok(netdev)) {
162 usnic_fwd_carrier_down(us_ibdev->ufdev);
163 usnic_info("Link DOWN on %s\n",
164 dev_name(&us_ibdev->ib_dev.dev));
165 usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
166 ib_event.event = IB_EVENT_PORT_ERR;
167 ib_event.device = &us_ibdev->ib_dev;
168 ib_event.element.port_num = 1;
169 ib_dispatch_event(&ib_event);
170 } else {
171 usnic_dbg("Ignoring %s on %s\n",
172 netdev_cmd_to_name(event),
173 dev_name(&us_ibdev->ib_dev.dev));
174 }
175 break;
176 case NETDEV_CHANGEADDR:
177 if (!memcmp(us_ibdev->ufdev->mac, netdev->dev_addr,
178 sizeof(us_ibdev->ufdev->mac))) {
179 usnic_dbg("Ignoring addr change on %s\n",
180 dev_name(&us_ibdev->ib_dev.dev));
181 } else {
182 usnic_info(" %s old mac: %pM new mac: %pM\n",
183 dev_name(&us_ibdev->ib_dev.dev),
184 us_ibdev->ufdev->mac,
185 netdev->dev_addr);
186 usnic_fwd_set_mac(us_ibdev->ufdev, netdev->dev_addr);
187 usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
188 ib_event.event = IB_EVENT_GID_CHANGE;
189 ib_event.device = &us_ibdev->ib_dev;
190 ib_event.element.port_num = 1;
191 ib_dispatch_event(&ib_event);
192 }
193
194 break;
195 case NETDEV_CHANGEMTU:
196 if (us_ibdev->ufdev->mtu != netdev->mtu) {
197 usnic_info("MTU Change on %s old: %u new: %u\n",
198 dev_name(&us_ibdev->ib_dev.dev),
199 us_ibdev->ufdev->mtu, netdev->mtu);
200 usnic_fwd_set_mtu(us_ibdev->ufdev, netdev->mtu);
201 usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
202 } else {
203 usnic_dbg("Ignoring MTU change on %s\n",
204 dev_name(&us_ibdev->ib_dev.dev));
205 }
206 break;
207 default:
208 usnic_dbg("Ignoring event %s on %s",
209 netdev_cmd_to_name(event),
210 dev_name(&us_ibdev->ib_dev.dev));
211 }
212 mutex_unlock(&us_ibdev->usdev_lock);
213}
214
215static int usnic_ib_netdevice_event(struct notifier_block *notifier,
216 unsigned long event, void *ptr)
217{
218 struct usnic_ib_dev *us_ibdev;
219 struct ib_device *ibdev;
220
221 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
222
223 ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_USNIC);
224 if (!ibdev)
225 return NOTIFY_DONE;
226
227 us_ibdev = container_of(ibdev, struct usnic_ib_dev, ib_dev);
228 usnic_ib_handle_usdev_event(us_ibdev, event);
229 ib_device_put(ibdev);
230 return NOTIFY_DONE;
231}
232
233static struct notifier_block usnic_ib_netdevice_notifier = {
234 .notifier_call = usnic_ib_netdevice_event
235};
236
237
238
239static int usnic_ib_handle_inet_event(struct usnic_ib_dev *us_ibdev,
240 unsigned long event, void *ptr)
241{
242 struct in_ifaddr *ifa = ptr;
243 struct ib_event ib_event;
244
245 mutex_lock(&us_ibdev->usdev_lock);
246
247 switch (event) {
248 case NETDEV_DOWN:
249 usnic_info("%s via ip notifiers",
250 netdev_cmd_to_name(event));
251 usnic_fwd_del_ipaddr(us_ibdev->ufdev);
252 usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
253 ib_event.event = IB_EVENT_GID_CHANGE;
254 ib_event.device = &us_ibdev->ib_dev;
255 ib_event.element.port_num = 1;
256 ib_dispatch_event(&ib_event);
257 break;
258 case NETDEV_UP:
259 usnic_fwd_add_ipaddr(us_ibdev->ufdev, ifa->ifa_address);
260 usnic_info("%s via ip notifiers: ip %pI4",
261 netdev_cmd_to_name(event),
262 &us_ibdev->ufdev->inaddr);
263 ib_event.event = IB_EVENT_GID_CHANGE;
264 ib_event.device = &us_ibdev->ib_dev;
265 ib_event.element.port_num = 1;
266 ib_dispatch_event(&ib_event);
267 break;
268 default:
269 usnic_info("Ignoring event %s on %s",
270 netdev_cmd_to_name(event),
271 dev_name(&us_ibdev->ib_dev.dev));
272 }
273 mutex_unlock(&us_ibdev->usdev_lock);
274
275 return NOTIFY_DONE;
276}
277
278static int usnic_ib_inetaddr_event(struct notifier_block *notifier,
279 unsigned long event, void *ptr)
280{
281 struct usnic_ib_dev *us_ibdev;
282 struct in_ifaddr *ifa = ptr;
283 struct net_device *netdev = ifa->ifa_dev->dev;
284 struct ib_device *ibdev;
285
286 ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_USNIC);
287 if (!ibdev)
288 return NOTIFY_DONE;
289
290 us_ibdev = container_of(ibdev, struct usnic_ib_dev, ib_dev);
291 usnic_ib_handle_inet_event(us_ibdev, event, ptr);
292 ib_device_put(ibdev);
293 return NOTIFY_DONE;
294}
295static struct notifier_block usnic_ib_inetaddr_notifier = {
296 .notifier_call = usnic_ib_inetaddr_event
297};
298
299
300static int usnic_port_immutable(struct ib_device *ibdev, u8 port_num,
301 struct ib_port_immutable *immutable)
302{
303 struct ib_port_attr attr;
304 int err;
305
306 immutable->core_cap_flags = RDMA_CORE_PORT_USNIC;
307
308 err = ib_query_port(ibdev, port_num, &attr);
309 if (err)
310 return err;
311
312 immutable->pkey_tbl_len = attr.pkey_tbl_len;
313 immutable->gid_tbl_len = attr.gid_tbl_len;
314
315 return 0;
316}
317
318static void usnic_get_dev_fw_str(struct ib_device *device, char *str)
319{
320 struct usnic_ib_dev *us_ibdev =
321 container_of(device, struct usnic_ib_dev, ib_dev);
322 struct ethtool_drvinfo info;
323
324 mutex_lock(&us_ibdev->usdev_lock);
325 us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
326 mutex_unlock(&us_ibdev->usdev_lock);
327
328 snprintf(str, IB_FW_VERSION_NAME_MAX, "%s", info.fw_version);
329}
330
331static const struct ib_device_ops usnic_dev_ops = {
332 .alloc_pd = usnic_ib_alloc_pd,
333 .alloc_ucontext = usnic_ib_alloc_ucontext,
334 .create_cq = usnic_ib_create_cq,
335 .create_qp = usnic_ib_create_qp,
336 .dealloc_pd = usnic_ib_dealloc_pd,
337 .dealloc_ucontext = usnic_ib_dealloc_ucontext,
338 .dereg_mr = usnic_ib_dereg_mr,
339 .destroy_cq = usnic_ib_destroy_cq,
340 .destroy_qp = usnic_ib_destroy_qp,
341 .get_dev_fw_str = usnic_get_dev_fw_str,
342 .get_link_layer = usnic_ib_port_link_layer,
343 .get_port_immutable = usnic_port_immutable,
344 .mmap = usnic_ib_mmap,
345 .modify_qp = usnic_ib_modify_qp,
346 .query_device = usnic_ib_query_device,
347 .query_gid = usnic_ib_query_gid,
348 .query_pkey = usnic_ib_query_pkey,
349 .query_port = usnic_ib_query_port,
350 .query_qp = usnic_ib_query_qp,
351 .reg_user_mr = usnic_ib_reg_mr,
352 INIT_RDMA_OBJ_SIZE(ib_pd, usnic_ib_pd, ibpd),
353 INIT_RDMA_OBJ_SIZE(ib_ucontext, usnic_ib_ucontext, ibucontext),
354};
355
356
357static void *usnic_ib_device_add(struct pci_dev *dev)
358{
359 struct usnic_ib_dev *us_ibdev;
360 union ib_gid gid;
361 struct in_device *ind;
362 struct net_device *netdev;
363 int ret;
364
365 usnic_dbg("\n");
366 netdev = pci_get_drvdata(dev);
367
368 us_ibdev = ib_alloc_device(usnic_ib_dev, ib_dev);
369 if (!us_ibdev) {
370 usnic_err("Device %s context alloc failed\n",
371 netdev_name(pci_get_drvdata(dev)));
372 return ERR_PTR(-EFAULT);
373 }
374
375 us_ibdev->ufdev = usnic_fwd_dev_alloc(dev);
376 if (!us_ibdev->ufdev) {
377 usnic_err("Failed to alloc ufdev for %s\n", pci_name(dev));
378 goto err_dealloc;
379 }
380
381 mutex_init(&us_ibdev->usdev_lock);
382 INIT_LIST_HEAD(&us_ibdev->vf_dev_list);
383 INIT_LIST_HEAD(&us_ibdev->ctx_list);
384
385 us_ibdev->pdev = dev;
386 us_ibdev->netdev = pci_get_drvdata(dev);
387 us_ibdev->ib_dev.owner = THIS_MODULE;
388 us_ibdev->ib_dev.node_type = RDMA_NODE_USNIC_UDP;
389 us_ibdev->ib_dev.phys_port_cnt = USNIC_IB_PORT_CNT;
390 us_ibdev->ib_dev.num_comp_vectors = USNIC_IB_NUM_COMP_VECTORS;
391 us_ibdev->ib_dev.dev.parent = &dev->dev;
392 us_ibdev->ib_dev.uverbs_abi_ver = USNIC_UVERBS_ABI_VERSION;
393
394 us_ibdev->ib_dev.uverbs_cmd_mask =
395 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
396 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
397 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
398 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
399 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
400 (1ull << IB_USER_VERBS_CMD_REG_MR) |
401 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
402 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
403 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
404 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
405 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
406 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
407 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
408 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
409 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
410 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
411 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
412
413 ib_set_device_ops(&us_ibdev->ib_dev, &usnic_dev_ops);
414
415 us_ibdev->ib_dev.driver_id = RDMA_DRIVER_USNIC;
416 rdma_set_device_sysfs_group(&us_ibdev->ib_dev, &usnic_attr_group);
417
418 ret = ib_device_set_netdev(&us_ibdev->ib_dev, us_ibdev->netdev, 1);
419 if (ret)
420 goto err_fwd_dealloc;
421
422 if (ib_register_device(&us_ibdev->ib_dev, "usnic_%d"))
423 goto err_fwd_dealloc;
424
425 usnic_fwd_set_mtu(us_ibdev->ufdev, us_ibdev->netdev->mtu);
426 usnic_fwd_set_mac(us_ibdev->ufdev, us_ibdev->netdev->dev_addr);
427 if (netif_carrier_ok(us_ibdev->netdev))
428 usnic_fwd_carrier_up(us_ibdev->ufdev);
429
430 ind = in_dev_get(netdev);
431 if (ind->ifa_list)
432 usnic_fwd_add_ipaddr(us_ibdev->ufdev,
433 ind->ifa_list->ifa_address);
434 in_dev_put(ind);
435
436 usnic_mac_ip_to_gid(us_ibdev->netdev->perm_addr,
437 us_ibdev->ufdev->inaddr, &gid.raw[0]);
438 memcpy(&us_ibdev->ib_dev.node_guid, &gid.global.interface_id,
439 sizeof(gid.global.interface_id));
440 kref_init(&us_ibdev->vf_cnt);
441
442 usnic_info("Added ibdev: %s netdev: %s with mac %pM Link: %u MTU: %u\n",
443 dev_name(&us_ibdev->ib_dev.dev),
444 netdev_name(us_ibdev->netdev), us_ibdev->ufdev->mac,
445 us_ibdev->ufdev->link_up, us_ibdev->ufdev->mtu);
446 return us_ibdev;
447
448err_fwd_dealloc:
449 usnic_fwd_dev_free(us_ibdev->ufdev);
450err_dealloc:
451 usnic_err("failed -- deallocing device\n");
452 ib_dealloc_device(&us_ibdev->ib_dev);
453 return NULL;
454}
455
456static void usnic_ib_device_remove(struct usnic_ib_dev *us_ibdev)
457{
458 usnic_info("Unregistering %s\n", dev_name(&us_ibdev->ib_dev.dev));
459 usnic_ib_sysfs_unregister_usdev(us_ibdev);
460 usnic_fwd_dev_free(us_ibdev->ufdev);
461 ib_unregister_device(&us_ibdev->ib_dev);
462 ib_dealloc_device(&us_ibdev->ib_dev);
463}
464
465static void usnic_ib_undiscover_pf(struct kref *kref)
466{
467 struct usnic_ib_dev *us_ibdev, *tmp;
468 struct pci_dev *dev;
469 bool found = false;
470
471 dev = container_of(kref, struct usnic_ib_dev, vf_cnt)->pdev;
472 mutex_lock(&usnic_ib_ibdev_list_lock);
473 list_for_each_entry_safe(us_ibdev, tmp,
474 &usnic_ib_ibdev_list, ib_dev_link) {
475 if (us_ibdev->pdev == dev) {
476 list_del(&us_ibdev->ib_dev_link);
477 found = true;
478 break;
479 }
480 }
481
482
483 mutex_unlock(&usnic_ib_ibdev_list_lock);
484 if (found)
485 usnic_ib_device_remove(us_ibdev);
486 else
487 WARN(1, "Failed to remove PF %s\n", pci_name(dev));
488}
489
490static struct usnic_ib_dev *usnic_ib_discover_pf(struct usnic_vnic *vnic)
491{
492 struct usnic_ib_dev *us_ibdev;
493 struct pci_dev *parent_pci, *vf_pci;
494 int err;
495
496 vf_pci = usnic_vnic_get_pdev(vnic);
497 parent_pci = pci_physfn(vf_pci);
498
499 BUG_ON(!parent_pci);
500
501 mutex_lock(&usnic_ib_ibdev_list_lock);
502 list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) {
503 if (us_ibdev->pdev == parent_pci) {
504 kref_get(&us_ibdev->vf_cnt);
505 goto out;
506 }
507 }
508
509 us_ibdev = usnic_ib_device_add(parent_pci);
510 if (IS_ERR_OR_NULL(us_ibdev)) {
511 us_ibdev = us_ibdev ? us_ibdev : ERR_PTR(-EFAULT);
512 goto out;
513 }
514
515 err = usnic_ib_sysfs_register_usdev(us_ibdev);
516 if (err) {
517 usnic_ib_device_remove(us_ibdev);
518 us_ibdev = ERR_PTR(err);
519 goto out;
520 }
521
522 list_add(&us_ibdev->ib_dev_link, &usnic_ib_ibdev_list);
523out:
524 mutex_unlock(&usnic_ib_ibdev_list_lock);
525 return us_ibdev;
526}
527
528
529
530
531static const struct pci_device_id usnic_ib_pci_ids[] = {
532 {PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC)},
533 {0,}
534};
535
536static int usnic_ib_pci_probe(struct pci_dev *pdev,
537 const struct pci_device_id *id)
538{
539 int err;
540 struct usnic_ib_dev *pf;
541 struct usnic_ib_vf *vf;
542 enum usnic_vnic_res_type res_type;
543
544 vf = kzalloc(sizeof(*vf), GFP_KERNEL);
545 if (!vf)
546 return -ENOMEM;
547
548 err = pci_enable_device(pdev);
549 if (err) {
550 usnic_err("Failed to enable %s with err %d\n",
551 pci_name(pdev), err);
552 goto out_clean_vf;
553 }
554
555 err = pci_request_regions(pdev, DRV_NAME);
556 if (err) {
557 usnic_err("Failed to request region for %s with err %d\n",
558 pci_name(pdev), err);
559 goto out_disable_device;
560 }
561
562 pci_set_master(pdev);
563 pci_set_drvdata(pdev, vf);
564
565 vf->vnic = usnic_vnic_alloc(pdev);
566 if (IS_ERR_OR_NULL(vf->vnic)) {
567 err = vf->vnic ? PTR_ERR(vf->vnic) : -ENOMEM;
568 usnic_err("Failed to alloc vnic for %s with err %d\n",
569 pci_name(pdev), err);
570 goto out_release_regions;
571 }
572
573 pf = usnic_ib_discover_pf(vf->vnic);
574 if (IS_ERR_OR_NULL(pf)) {
575 usnic_err("Failed to discover pf of vnic %s with err%ld\n",
576 pci_name(pdev), PTR_ERR(pf));
577 err = pf ? PTR_ERR(pf) : -EFAULT;
578 goto out_clean_vnic;
579 }
580
581 vf->pf = pf;
582 spin_lock_init(&vf->lock);
583 mutex_lock(&pf->usdev_lock);
584 list_add_tail(&vf->link, &pf->vf_dev_list);
585
586
587
588
589 for (res_type = USNIC_VNIC_RES_TYPE_EOL+1;
590 res_type < USNIC_VNIC_RES_TYPE_MAX;
591 res_type++) {
592 pf->vf_res_cnt[res_type] = usnic_vnic_res_cnt(vf->vnic,
593 res_type);
594 }
595
596 mutex_unlock(&pf->usdev_lock);
597
598 usnic_info("Registering usnic VF %s into PF %s\n", pci_name(pdev),
599 dev_name(&pf->ib_dev.dev));
600 usnic_ib_log_vf(vf);
601 return 0;
602
603out_clean_vnic:
604 usnic_vnic_free(vf->vnic);
605out_release_regions:
606 pci_set_drvdata(pdev, NULL);
607 pci_clear_master(pdev);
608 pci_release_regions(pdev);
609out_disable_device:
610 pci_disable_device(pdev);
611out_clean_vf:
612 kfree(vf);
613 return err;
614}
615
616static void usnic_ib_pci_remove(struct pci_dev *pdev)
617{
618 struct usnic_ib_vf *vf = pci_get_drvdata(pdev);
619 struct usnic_ib_dev *pf = vf->pf;
620
621 mutex_lock(&pf->usdev_lock);
622 list_del(&vf->link);
623 mutex_unlock(&pf->usdev_lock);
624
625 kref_put(&pf->vf_cnt, usnic_ib_undiscover_pf);
626 usnic_vnic_free(vf->vnic);
627 pci_set_drvdata(pdev, NULL);
628 pci_clear_master(pdev);
629 pci_release_regions(pdev);
630 pci_disable_device(pdev);
631 kfree(vf);
632
633 usnic_info("Removed VF %s\n", pci_name(pdev));
634}
635
636
637static struct pci_driver usnic_ib_pci_driver = {
638 .name = DRV_NAME,
639 .id_table = usnic_ib_pci_ids,
640 .probe = usnic_ib_pci_probe,
641 .remove = usnic_ib_pci_remove,
642};
643
644
645
646static int __init usnic_ib_init(void)
647{
648 int err;
649
650 printk_once(KERN_INFO "%s", usnic_version);
651
652 err = usnic_uiom_init(DRV_NAME);
653 if (err) {
654 usnic_err("Unable to initialize umem with err %d\n", err);
655 return err;
656 }
657
658 err = pci_register_driver(&usnic_ib_pci_driver);
659 if (err) {
660 usnic_err("Unable to register with PCI\n");
661 goto out_umem_fini;
662 }
663
664 err = register_netdevice_notifier(&usnic_ib_netdevice_notifier);
665 if (err) {
666 usnic_err("Failed to register netdev notifier\n");
667 goto out_pci_unreg;
668 }
669
670 err = register_inetaddr_notifier(&usnic_ib_inetaddr_notifier);
671 if (err) {
672 usnic_err("Failed to register inet addr notifier\n");
673 goto out_unreg_netdev_notifier;
674 }
675
676 err = usnic_transport_init();
677 if (err) {
678 usnic_err("Failed to initialize transport\n");
679 goto out_unreg_inetaddr_notifier;
680 }
681
682 usnic_debugfs_init();
683
684 return 0;
685
686out_unreg_inetaddr_notifier:
687 unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier);
688out_unreg_netdev_notifier:
689 unregister_netdevice_notifier(&usnic_ib_netdevice_notifier);
690out_pci_unreg:
691 pci_unregister_driver(&usnic_ib_pci_driver);
692out_umem_fini:
693
694 return err;
695}
696
697static void __exit usnic_ib_destroy(void)
698{
699 usnic_dbg("\n");
700 usnic_debugfs_exit();
701 usnic_transport_fini();
702 unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier);
703 unregister_netdevice_notifier(&usnic_ib_netdevice_notifier);
704 pci_unregister_driver(&usnic_ib_pci_driver);
705}
706
707MODULE_DESCRIPTION("Cisco VIC (usNIC) Verbs Driver");
708MODULE_AUTHOR("Upinder Malhi <umalhi@cisco.com>");
709MODULE_LICENSE("Dual BSD/GPL");
710module_param(usnic_log_lvl, uint, S_IRUGO | S_IWUSR);
711module_param(usnic_ib_share_vf, uint, S_IRUGO | S_IWUSR);
712MODULE_PARM_DESC(usnic_log_lvl, " Off=0, Err=1, Info=2, Debug=3");
713MODULE_PARM_DESC(usnic_ib_share_vf, "Off=0, On=1 VF sharing amongst QPs");
714MODULE_DEVICE_TABLE(pci, usnic_ib_pci_ids);
715
716module_init(usnic_ib_init);
717module_exit(usnic_ib_destroy);
718
719