1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/module.h>
35#include <linux/string.h>
36#include <linux/errno.h>
37#include <linux/kernel.h>
38#include <linux/slab.h>
39#include <linux/init.h>
40#include <linux/mutex.h>
41#include <linux/netdevice.h>
42#include <rdma/rdma_netlink.h>
43#include <rdma/ib_addr.h>
44#include <rdma/ib_cache.h>
45
46#include "core_priv.h"
47
48MODULE_AUTHOR("Roland Dreier");
49MODULE_DESCRIPTION("core kernel InfiniBand API");
50MODULE_LICENSE("Dual BSD/GPL");
51
52struct ib_client_data {
53 struct list_head list;
54 struct ib_client *client;
55 void * data;
56
57
58 bool going_down;
59};
60
61struct workqueue_struct *ib_wq;
62EXPORT_SYMBOL_GPL(ib_wq);
63
64
65
66
67static LIST_HEAD(device_list);
68static LIST_HEAD(client_list);
69
70
71
72
73
74
75
76
77
78
79
80
81static DEFINE_MUTEX(device_mutex);
82static DECLARE_RWSEM(lists_rwsem);
83
84
85static int ib_device_check_mandatory(struct ib_device *device)
86{
87#define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x }
88 static const struct {
89 size_t offset;
90 char *name;
91 } mandatory_table[] = {
92 IB_MANDATORY_FUNC(query_device),
93 IB_MANDATORY_FUNC(query_port),
94 IB_MANDATORY_FUNC(query_pkey),
95 IB_MANDATORY_FUNC(query_gid),
96 IB_MANDATORY_FUNC(alloc_pd),
97 IB_MANDATORY_FUNC(dealloc_pd),
98 IB_MANDATORY_FUNC(create_ah),
99 IB_MANDATORY_FUNC(destroy_ah),
100 IB_MANDATORY_FUNC(create_qp),
101 IB_MANDATORY_FUNC(modify_qp),
102 IB_MANDATORY_FUNC(destroy_qp),
103 IB_MANDATORY_FUNC(post_send),
104 IB_MANDATORY_FUNC(post_recv),
105 IB_MANDATORY_FUNC(create_cq),
106 IB_MANDATORY_FUNC(destroy_cq),
107 IB_MANDATORY_FUNC(poll_cq),
108 IB_MANDATORY_FUNC(req_notify_cq),
109 IB_MANDATORY_FUNC(get_dma_mr),
110 IB_MANDATORY_FUNC(dereg_mr),
111 IB_MANDATORY_FUNC(get_port_immutable)
112 };
113 int i;
114
115 for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
116 if (!*(void **) ((void *) device + mandatory_table[i].offset)) {
117 printk(KERN_WARNING "Device %s is missing mandatory function %s\n",
118 device->name, mandatory_table[i].name);
119 return -EINVAL;
120 }
121 }
122
123 return 0;
124}
125
126static struct ib_device *__ib_device_get_by_name(const char *name)
127{
128 struct ib_device *device;
129
130 list_for_each_entry(device, &device_list, core_list)
131 if (!strncmp(name, device->name, IB_DEVICE_NAME_MAX))
132 return device;
133
134 return NULL;
135}
136
137
138static int alloc_name(char *name)
139{
140 unsigned long *inuse;
141 char buf[IB_DEVICE_NAME_MAX];
142 struct ib_device *device;
143 int i;
144
145 inuse = (unsigned long *) get_zeroed_page(GFP_KERNEL);
146 if (!inuse)
147 return -ENOMEM;
148
149 list_for_each_entry(device, &device_list, core_list) {
150 if (!sscanf(device->name, name, &i))
151 continue;
152 if (i < 0 || i >= PAGE_SIZE * 8)
153 continue;
154 snprintf(buf, sizeof buf, name, i);
155 if (!strncmp(buf, device->name, IB_DEVICE_NAME_MAX))
156 set_bit(i, inuse);
157 }
158
159 i = find_first_zero_bit(inuse, PAGE_SIZE * 8);
160 free_page((unsigned long) inuse);
161 snprintf(buf, sizeof buf, name, i);
162
163 if (__ib_device_get_by_name(buf))
164 return -ENFILE;
165
166 strlcpy(name, buf, IB_DEVICE_NAME_MAX);
167 return 0;
168}
169
170static void ib_device_release(struct device *device)
171{
172 struct ib_device *dev = container_of(device, struct ib_device, dev);
173
174 ib_cache_release_one(dev);
175 kfree(dev->port_immutable);
176 kfree(dev);
177}
178
179static int ib_device_uevent(struct device *device,
180 struct kobj_uevent_env *env)
181{
182 struct ib_device *dev = container_of(device, struct ib_device, dev);
183
184 if (add_uevent_var(env, "NAME=%s", dev->name))
185 return -ENOMEM;
186
187
188
189
190
191 return 0;
192}
193
194static struct class ib_class = {
195 .name = "infiniband",
196 .dev_release = ib_device_release,
197 .dev_uevent = ib_device_uevent,
198};
199
200
201
202
203
204
205
206
207
208
209
210struct ib_device *ib_alloc_device(size_t size)
211{
212 struct ib_device *device;
213
214 if (WARN_ON(size < sizeof(struct ib_device)))
215 return NULL;
216
217 device = kzalloc(size, GFP_KERNEL);
218 if (!device)
219 return NULL;
220
221 device->dev.class = &ib_class;
222 device_initialize(&device->dev);
223
224 dev_set_drvdata(&device->dev, device);
225
226 INIT_LIST_HEAD(&device->event_handler_list);
227 spin_lock_init(&device->event_handler_lock);
228 spin_lock_init(&device->client_data_lock);
229 INIT_LIST_HEAD(&device->client_data_list);
230 INIT_LIST_HEAD(&device->port_list);
231
232 return device;
233}
234EXPORT_SYMBOL(ib_alloc_device);
235
236
237
238
239
240
241
242void ib_dealloc_device(struct ib_device *device)
243{
244 WARN_ON(device->reg_state != IB_DEV_UNREGISTERED &&
245 device->reg_state != IB_DEV_UNINITIALIZED);
246 kobject_put(&device->dev.kobj);
247}
248EXPORT_SYMBOL(ib_dealloc_device);
249
250static int add_client_context(struct ib_device *device, struct ib_client *client)
251{
252 struct ib_client_data *context;
253 unsigned long flags;
254
255 context = kmalloc(sizeof *context, GFP_KERNEL);
256 if (!context) {
257 printk(KERN_WARNING "Couldn't allocate client context for %s/%s\n",
258 device->name, client->name);
259 return -ENOMEM;
260 }
261
262 context->client = client;
263 context->data = NULL;
264 context->going_down = false;
265
266 down_write(&lists_rwsem);
267 spin_lock_irqsave(&device->client_data_lock, flags);
268 list_add(&context->list, &device->client_data_list);
269 spin_unlock_irqrestore(&device->client_data_lock, flags);
270 up_write(&lists_rwsem);
271
272 return 0;
273}
274
275static int verify_immutable(const struct ib_device *dev, u8 port)
276{
277 return WARN_ON(!rdma_cap_ib_mad(dev, port) &&
278 rdma_max_mad_size(dev, port) != 0);
279}
280
281static int read_port_immutable(struct ib_device *device)
282{
283 int ret;
284 u8 start_port = rdma_start_port(device);
285 u8 end_port = rdma_end_port(device);
286 u8 port;
287
288
289
290
291
292
293
294
295 device->port_immutable = kzalloc(sizeof(*device->port_immutable)
296 * (end_port + 1),
297 GFP_KERNEL);
298 if (!device->port_immutable)
299 return -ENOMEM;
300
301 for (port = start_port; port <= end_port; ++port) {
302 ret = device->get_port_immutable(device, port,
303 &device->port_immutable[port]);
304 if (ret)
305 return ret;
306
307 if (verify_immutable(device, port))
308 return -EINVAL;
309 }
310 return 0;
311}
312
313
314
315
316
317
318
319
320
321
322int ib_register_device(struct ib_device *device,
323 int (*port_callback)(struct ib_device *,
324 u8, struct kobject *))
325{
326 int ret;
327 struct ib_client *client;
328
329 mutex_lock(&device_mutex);
330
331 if (strchr(device->name, '%')) {
332 ret = alloc_name(device->name);
333 if (ret)
334 goto out;
335 }
336
337 if (ib_device_check_mandatory(device)) {
338 ret = -EINVAL;
339 goto out;
340 }
341
342 ret = read_port_immutable(device);
343 if (ret) {
344 printk(KERN_WARNING "Couldn't create per port immutable data %s\n",
345 device->name);
346 goto out;
347 }
348
349 ret = ib_cache_setup_one(device);
350 if (ret) {
351 printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n");
352 goto out;
353 }
354
355 ret = ib_device_register_sysfs(device, port_callback);
356 if (ret) {
357 printk(KERN_WARNING "Couldn't register device %s with driver model\n",
358 device->name);
359 ib_cache_cleanup_one(device);
360 goto out;
361 }
362
363 device->reg_state = IB_DEV_REGISTERED;
364
365 list_for_each_entry(client, &client_list, list)
366 if (client->add && !add_client_context(device, client))
367 client->add(device);
368
369 down_write(&lists_rwsem);
370 list_add_tail(&device->core_list, &device_list);
371 up_write(&lists_rwsem);
372out:
373 mutex_unlock(&device_mutex);
374 return ret;
375}
376EXPORT_SYMBOL(ib_register_device);
377
378
379
380
381
382
383
384void ib_unregister_device(struct ib_device *device)
385{
386 struct ib_client_data *context, *tmp;
387 unsigned long flags;
388
389 mutex_lock(&device_mutex);
390
391 down_write(&lists_rwsem);
392 list_del(&device->core_list);
393 spin_lock_irqsave(&device->client_data_lock, flags);
394 list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
395 context->going_down = true;
396 spin_unlock_irqrestore(&device->client_data_lock, flags);
397 downgrade_write(&lists_rwsem);
398
399 list_for_each_entry_safe(context, tmp, &device->client_data_list,
400 list) {
401 if (context->client->remove)
402 context->client->remove(device, context->data);
403 }
404 up_read(&lists_rwsem);
405
406 mutex_unlock(&device_mutex);
407
408 ib_device_unregister_sysfs(device);
409 ib_cache_cleanup_one(device);
410
411 down_write(&lists_rwsem);
412 spin_lock_irqsave(&device->client_data_lock, flags);
413 list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
414 kfree(context);
415 spin_unlock_irqrestore(&device->client_data_lock, flags);
416 up_write(&lists_rwsem);
417
418 device->reg_state = IB_DEV_UNREGISTERED;
419}
420EXPORT_SYMBOL(ib_unregister_device);
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435int ib_register_client(struct ib_client *client)
436{
437 struct ib_device *device;
438
439 mutex_lock(&device_mutex);
440
441 list_for_each_entry(device, &device_list, core_list)
442 if (client->add && !add_client_context(device, client))
443 client->add(device);
444
445 down_write(&lists_rwsem);
446 list_add_tail(&client->list, &client_list);
447 up_write(&lists_rwsem);
448
449 mutex_unlock(&device_mutex);
450
451 return 0;
452}
453EXPORT_SYMBOL(ib_register_client);
454
455
456
457
458
459
460
461
462
463void ib_unregister_client(struct ib_client *client)
464{
465 struct ib_client_data *context, *tmp;
466 struct ib_device *device;
467 unsigned long flags;
468
469 mutex_lock(&device_mutex);
470
471 down_write(&lists_rwsem);
472 list_del(&client->list);
473 up_write(&lists_rwsem);
474
475 list_for_each_entry(device, &device_list, core_list) {
476 struct ib_client_data *found_context = NULL;
477
478 down_write(&lists_rwsem);
479 spin_lock_irqsave(&device->client_data_lock, flags);
480 list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
481 if (context->client == client) {
482 context->going_down = true;
483 found_context = context;
484 break;
485 }
486 spin_unlock_irqrestore(&device->client_data_lock, flags);
487 up_write(&lists_rwsem);
488
489 if (client->remove)
490 client->remove(device, found_context ?
491 found_context->data : NULL);
492
493 if (!found_context) {
494 pr_warn("No client context found for %s/%s\n",
495 device->name, client->name);
496 continue;
497 }
498
499 down_write(&lists_rwsem);
500 spin_lock_irqsave(&device->client_data_lock, flags);
501 list_del(&found_context->list);
502 kfree(found_context);
503 spin_unlock_irqrestore(&device->client_data_lock, flags);
504 up_write(&lists_rwsem);
505 }
506
507 mutex_unlock(&device_mutex);
508}
509EXPORT_SYMBOL(ib_unregister_client);
510
511
512
513
514
515
516
517
518
519void *ib_get_client_data(struct ib_device *device, struct ib_client *client)
520{
521 struct ib_client_data *context;
522 void *ret = NULL;
523 unsigned long flags;
524
525 spin_lock_irqsave(&device->client_data_lock, flags);
526 list_for_each_entry(context, &device->client_data_list, list)
527 if (context->client == client) {
528 ret = context->data;
529 break;
530 }
531 spin_unlock_irqrestore(&device->client_data_lock, flags);
532
533 return ret;
534}
535EXPORT_SYMBOL(ib_get_client_data);
536
537
538
539
540
541
542
543
544
545
546void ib_set_client_data(struct ib_device *device, struct ib_client *client,
547 void *data)
548{
549 struct ib_client_data *context;
550 unsigned long flags;
551
552 spin_lock_irqsave(&device->client_data_lock, flags);
553 list_for_each_entry(context, &device->client_data_list, list)
554 if (context->client == client) {
555 context->data = data;
556 goto out;
557 }
558
559 printk(KERN_WARNING "No client context found for %s/%s\n",
560 device->name, client->name);
561
562out:
563 spin_unlock_irqrestore(&device->client_data_lock, flags);
564}
565EXPORT_SYMBOL(ib_set_client_data);
566
567
568
569
570
571
572
573
574
575
576int ib_register_event_handler (struct ib_event_handler *event_handler)
577{
578 unsigned long flags;
579
580 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
581 list_add_tail(&event_handler->list,
582 &event_handler->device->event_handler_list);
583 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
584
585 return 0;
586}
587EXPORT_SYMBOL(ib_register_event_handler);
588
589
590
591
592
593
594
595
596int ib_unregister_event_handler(struct ib_event_handler *event_handler)
597{
598 unsigned long flags;
599
600 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
601 list_del(&event_handler->list);
602 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
603
604 return 0;
605}
606EXPORT_SYMBOL(ib_unregister_event_handler);
607
608
609
610
611
612
613
614
615
616void ib_dispatch_event(struct ib_event *event)
617{
618 unsigned long flags;
619 struct ib_event_handler *handler;
620
621 spin_lock_irqsave(&event->device->event_handler_lock, flags);
622
623 list_for_each_entry(handler, &event->device->event_handler_list, list)
624 handler->handler(handler, event);
625
626 spin_unlock_irqrestore(&event->device->event_handler_lock, flags);
627}
628EXPORT_SYMBOL(ib_dispatch_event);
629
630
631
632
633
634
635
636
637
638int ib_query_device(struct ib_device *device,
639 struct ib_device_attr *device_attr)
640{
641 struct ib_udata uhw = {.outlen = 0, .inlen = 0};
642
643 memset(device_attr, 0, sizeof(*device_attr));
644
645 return device->query_device(device, device_attr, &uhw);
646}
647EXPORT_SYMBOL(ib_query_device);
648
649
650
651
652
653
654
655
656
657
658int ib_query_port(struct ib_device *device,
659 u8 port_num,
660 struct ib_port_attr *port_attr)
661{
662 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
663 return -EINVAL;
664
665 return device->query_port(device, port_num, port_attr);
666}
667EXPORT_SYMBOL(ib_query_port);
668
669
670
671
672
673
674
675
676
677
678
679
680int ib_query_gid(struct ib_device *device,
681 u8 port_num, int index, union ib_gid *gid,
682 struct ib_gid_attr *attr)
683{
684 if (rdma_cap_roce_gid_table(device, port_num))
685 return ib_get_cached_gid(device, port_num, index, gid, attr);
686
687 if (attr)
688 return -EINVAL;
689
690 return device->query_gid(device, port_num, index, gid);
691}
692EXPORT_SYMBOL(ib_query_gid);
693
694
695
696
697
698
699
700
701
702
703
704
705
706void ib_enum_roce_netdev(struct ib_device *ib_dev,
707 roce_netdev_filter filter,
708 void *filter_cookie,
709 roce_netdev_callback cb,
710 void *cookie)
711{
712 u8 port;
713
714 for (port = rdma_start_port(ib_dev); port <= rdma_end_port(ib_dev);
715 port++)
716 if (rdma_protocol_roce(ib_dev, port)) {
717 struct net_device *idev = NULL;
718
719 if (ib_dev->get_netdev)
720 idev = ib_dev->get_netdev(ib_dev, port);
721
722 if (idev &&
723 idev->reg_state >= NETREG_UNREGISTERED) {
724 dev_put(idev);
725 idev = NULL;
726 }
727
728 if (filter(ib_dev, port, idev, filter_cookie))
729 cb(ib_dev, port, idev, cookie);
730
731 if (idev)
732 dev_put(idev);
733 }
734}
735
736
737
738
739
740
741
742
743
744
745
746
747void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
748 void *filter_cookie,
749 roce_netdev_callback cb,
750 void *cookie)
751{
752 struct ib_device *dev;
753
754 down_read(&lists_rwsem);
755 list_for_each_entry(dev, &device_list, core_list)
756 ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie);
757 up_read(&lists_rwsem);
758}
759
760
761
762
763
764
765
766
767
768
769int ib_query_pkey(struct ib_device *device,
770 u8 port_num, u16 index, u16 *pkey)
771{
772 return device->query_pkey(device, port_num, index, pkey);
773}
774EXPORT_SYMBOL(ib_query_pkey);
775
776
777
778
779
780
781
782
783
784
785int ib_modify_device(struct ib_device *device,
786 int device_modify_mask,
787 struct ib_device_modify *device_modify)
788{
789 if (!device->modify_device)
790 return -ENOSYS;
791
792 return device->modify_device(device, device_modify_mask,
793 device_modify);
794}
795EXPORT_SYMBOL(ib_modify_device);
796
797
798
799
800
801
802
803
804
805
806
807
808int ib_modify_port(struct ib_device *device,
809 u8 port_num, int port_modify_mask,
810 struct ib_port_modify *port_modify)
811{
812 if (!device->modify_port)
813 return -ENOSYS;
814
815 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
816 return -EINVAL;
817
818 return device->modify_port(device, port_num, port_modify_mask,
819 port_modify);
820}
821EXPORT_SYMBOL(ib_modify_port);
822
823
824
825
826
827
828
829
830
831
832
833int ib_find_gid(struct ib_device *device, union ib_gid *gid,
834 struct net_device *ndev, u8 *port_num, u16 *index)
835{
836 union ib_gid tmp_gid;
837 int ret, port, i;
838
839 for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) {
840 if (rdma_cap_roce_gid_table(device, port)) {
841 if (!ib_find_cached_gid_by_port(device, gid, port,
842 ndev, index)) {
843 *port_num = port;
844 return 0;
845 }
846 }
847
848 for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) {
849 ret = ib_query_gid(device, port, i, &tmp_gid, NULL);
850 if (ret)
851 return ret;
852 if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
853 *port_num = port;
854 if (index)
855 *index = i;
856 return 0;
857 }
858 }
859 }
860
861 return -ENOENT;
862}
863EXPORT_SYMBOL(ib_find_gid);
864
865
866
867
868
869
870
871
872
873int ib_find_pkey(struct ib_device *device,
874 u8 port_num, u16 pkey, u16 *index)
875{
876 int ret, i;
877 u16 tmp_pkey;
878 int partial_ix = -1;
879
880 for (i = 0; i < device->port_immutable[port_num].pkey_tbl_len; ++i) {
881 ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
882 if (ret)
883 return ret;
884 if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) {
885
886 if (tmp_pkey & 0x8000) {
887 *index = i;
888 return 0;
889 }
890 if (partial_ix < 0)
891 partial_ix = i;
892 }
893 }
894
895
896 if (partial_ix >= 0) {
897 *index = partial_ix;
898 return 0;
899 }
900 return -ENOENT;
901}
902EXPORT_SYMBOL(ib_find_pkey);
903
904
905
906
907
908
909
910
911
912
913
914struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
915 u8 port,
916 u16 pkey,
917 const union ib_gid *gid,
918 const struct sockaddr *addr)
919{
920 struct net_device *net_dev = NULL;
921 struct ib_client_data *context;
922
923 if (!rdma_protocol_ib(dev, port))
924 return NULL;
925
926 down_read(&lists_rwsem);
927
928 list_for_each_entry(context, &dev->client_data_list, list) {
929 struct ib_client *client = context->client;
930
931 if (context->going_down)
932 continue;
933
934 if (client->get_net_dev_by_params) {
935 net_dev = client->get_net_dev_by_params(dev, port, pkey,
936 gid, addr,
937 context->data);
938 if (net_dev)
939 break;
940 }
941 }
942
943 up_read(&lists_rwsem);
944
945 return net_dev;
946}
947EXPORT_SYMBOL(ib_get_net_dev_by_params);
948
949static int __init ib_core_init(void)
950{
951 int ret;
952
953 ib_wq = alloc_workqueue("infiniband", 0, 0);
954 if (!ib_wq)
955 return -ENOMEM;
956
957 ret = class_register(&ib_class);
958 if (ret) {
959 printk(KERN_WARNING "Couldn't create InfiniBand device class\n");
960 goto err;
961 }
962
963 ret = ibnl_init();
964 if (ret) {
965 printk(KERN_WARNING "Couldn't init IB netlink interface\n");
966 goto err_sysfs;
967 }
968
969 ib_cache_setup();
970
971 return 0;
972
973err_sysfs:
974 class_unregister(&ib_class);
975
976err:
977 destroy_workqueue(ib_wq);
978 return ret;
979}
980
981static void __exit ib_core_cleanup(void)
982{
983 ib_cache_cleanup();
984 ibnl_cleanup();
985 class_unregister(&ib_class);
986
987 destroy_workqueue(ib_wq);
988}
989
990module_init(ib_core_init);
991module_exit(ib_core_cleanup);
992