1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/module.h>
35#include <linux/string.h>
36#include <linux/errno.h>
37#include <linux/kernel.h>
38#include <linux/slab.h>
39#include <linux/init.h>
40#include <linux/mutex.h>
41#include <rdma/rdma_netlink.h>
42
43#include "core_priv.h"
44
45MODULE_AUTHOR("Roland Dreier");
46MODULE_DESCRIPTION("core kernel InfiniBand API");
47MODULE_LICENSE("Dual BSD/GPL");
48
49struct ib_client_data {
50 struct list_head list;
51 struct ib_client *client;
52 void * data;
53};
54
55struct workqueue_struct *ib_wq;
56EXPORT_SYMBOL_GPL(ib_wq);
57
58static LIST_HEAD(device_list);
59static LIST_HEAD(client_list);
60
61
62
63
64
65
66
67
68static DEFINE_MUTEX(device_mutex);
69
70static int ib_device_check_mandatory(struct ib_device *device)
71{
72#define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x }
73 static const struct {
74 size_t offset;
75 char *name;
76 } mandatory_table[] = {
77 IB_MANDATORY_FUNC(query_device),
78 IB_MANDATORY_FUNC(query_port),
79 IB_MANDATORY_FUNC(query_pkey),
80 IB_MANDATORY_FUNC(query_gid),
81 IB_MANDATORY_FUNC(alloc_pd),
82 IB_MANDATORY_FUNC(dealloc_pd),
83 IB_MANDATORY_FUNC(create_ah),
84 IB_MANDATORY_FUNC(destroy_ah),
85 IB_MANDATORY_FUNC(create_qp),
86 IB_MANDATORY_FUNC(modify_qp),
87 IB_MANDATORY_FUNC(destroy_qp),
88 IB_MANDATORY_FUNC(post_send),
89 IB_MANDATORY_FUNC(post_recv),
90 IB_MANDATORY_FUNC(create_cq),
91 IB_MANDATORY_FUNC(destroy_cq),
92 IB_MANDATORY_FUNC(poll_cq),
93 IB_MANDATORY_FUNC(req_notify_cq),
94 IB_MANDATORY_FUNC(get_dma_mr),
95 IB_MANDATORY_FUNC(dereg_mr)
96 };
97 int i;
98
99 for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
100 if (!*(void **) ((void *) device + mandatory_table[i].offset)) {
101 printk(KERN_WARNING "Device %s is missing mandatory function %s\n",
102 device->name, mandatory_table[i].name);
103 return -EINVAL;
104 }
105 }
106
107 return 0;
108}
109
110static struct ib_device *__ib_device_get_by_name(const char *name)
111{
112 struct ib_device *device;
113
114 list_for_each_entry(device, &device_list, core_list)
115 if (!strncmp(name, device->name, IB_DEVICE_NAME_MAX))
116 return device;
117
118 return NULL;
119}
120
121
122static int alloc_name(char *name)
123{
124 unsigned long *inuse;
125 char buf[IB_DEVICE_NAME_MAX];
126 struct ib_device *device;
127 int i;
128
129 inuse = (unsigned long *) get_zeroed_page(GFP_KERNEL);
130 if (!inuse)
131 return -ENOMEM;
132
133 list_for_each_entry(device, &device_list, core_list) {
134 if (!sscanf(device->name, name, &i))
135 continue;
136 if (i < 0 || i >= PAGE_SIZE * 8)
137 continue;
138 snprintf(buf, sizeof buf, name, i);
139 if (!strncmp(buf, device->name, IB_DEVICE_NAME_MAX))
140 set_bit(i, inuse);
141 }
142
143 i = find_first_zero_bit(inuse, PAGE_SIZE * 8);
144 free_page((unsigned long) inuse);
145 snprintf(buf, sizeof buf, name, i);
146
147 if (__ib_device_get_by_name(buf))
148 return -ENFILE;
149
150 strlcpy(name, buf, IB_DEVICE_NAME_MAX);
151 return 0;
152}
153
154static int start_port(struct ib_device *device)
155{
156 return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1;
157}
158
159
160static int end_port(struct ib_device *device)
161{
162 return (device->node_type == RDMA_NODE_IB_SWITCH) ?
163 0 : device->phys_port_cnt;
164}
165
166
167
168
169
170
171
172
173
174
175
176struct ib_device *ib_alloc_device(size_t size)
177{
178 BUG_ON(size < sizeof (struct ib_device));
179
180 return kzalloc(size, GFP_KERNEL);
181}
182EXPORT_SYMBOL(ib_alloc_device);
183
184
185
186
187
188
189
190void ib_dealloc_device(struct ib_device *device)
191{
192 if (device->reg_state == IB_DEV_UNINITIALIZED) {
193 kfree(device);
194 return;
195 }
196
197 BUG_ON(device->reg_state != IB_DEV_UNREGISTERED);
198
199 kobject_put(&device->dev.kobj);
200}
201EXPORT_SYMBOL(ib_dealloc_device);
202
203static int add_client_context(struct ib_device *device, struct ib_client *client)
204{
205 struct ib_client_data *context;
206 unsigned long flags;
207
208 context = kmalloc(sizeof *context, GFP_KERNEL);
209 if (!context) {
210 printk(KERN_WARNING "Couldn't allocate client context for %s/%s\n",
211 device->name, client->name);
212 return -ENOMEM;
213 }
214
215 context->client = client;
216 context->data = NULL;
217
218 spin_lock_irqsave(&device->client_data_lock, flags);
219 list_add(&context->list, &device->client_data_list);
220 spin_unlock_irqrestore(&device->client_data_lock, flags);
221
222 return 0;
223}
224
225static int read_port_table_lengths(struct ib_device *device)
226{
227 struct ib_port_attr *tprops = NULL;
228 int num_ports, ret = -ENOMEM;
229 u8 port_index;
230
231 tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
232 if (!tprops)
233 goto out;
234
235 num_ports = end_port(device) - start_port(device) + 1;
236
237 device->pkey_tbl_len = kmalloc(sizeof *device->pkey_tbl_len * num_ports,
238 GFP_KERNEL);
239 device->gid_tbl_len = kmalloc(sizeof *device->gid_tbl_len * num_ports,
240 GFP_KERNEL);
241 if (!device->pkey_tbl_len || !device->gid_tbl_len)
242 goto err;
243
244 for (port_index = 0; port_index < num_ports; ++port_index) {
245 ret = ib_query_port(device, port_index + start_port(device),
246 tprops);
247 if (ret)
248 goto err;
249 device->pkey_tbl_len[port_index] = tprops->pkey_tbl_len;
250 device->gid_tbl_len[port_index] = tprops->gid_tbl_len;
251 }
252
253 ret = 0;
254 goto out;
255
256err:
257 kfree(device->gid_tbl_len);
258 kfree(device->pkey_tbl_len);
259out:
260 kfree(tprops);
261 return ret;
262}
263
264
265
266
267
268
269
270
271
272
273int ib_register_device(struct ib_device *device,
274 int (*port_callback)(struct ib_device *,
275 u8, struct kobject *))
276{
277 int ret;
278
279 mutex_lock(&device_mutex);
280
281 if (strchr(device->name, '%')) {
282 ret = alloc_name(device->name);
283 if (ret)
284 goto out;
285 }
286
287 if (ib_device_check_mandatory(device)) {
288 ret = -EINVAL;
289 goto out;
290 }
291
292 INIT_LIST_HEAD(&device->event_handler_list);
293 INIT_LIST_HEAD(&device->client_data_list);
294 spin_lock_init(&device->event_handler_lock);
295 spin_lock_init(&device->client_data_lock);
296
297 ret = read_port_table_lengths(device);
298 if (ret) {
299 printk(KERN_WARNING "Couldn't create table lengths cache for device %s\n",
300 device->name);
301 goto out;
302 }
303
304 ret = ib_device_register_sysfs(device, port_callback);
305 if (ret) {
306 printk(KERN_WARNING "Couldn't register device %s with driver model\n",
307 device->name);
308 kfree(device->gid_tbl_len);
309 kfree(device->pkey_tbl_len);
310 goto out;
311 }
312
313 list_add_tail(&device->core_list, &device_list);
314
315 device->reg_state = IB_DEV_REGISTERED;
316
317 {
318 struct ib_client *client;
319
320 list_for_each_entry(client, &client_list, list)
321 if (client->add && !add_client_context(device, client))
322 client->add(device);
323 }
324
325 out:
326 mutex_unlock(&device_mutex);
327 return ret;
328}
329EXPORT_SYMBOL(ib_register_device);
330
331
332
333
334
335
336
337void ib_unregister_device(struct ib_device *device)
338{
339 struct ib_client *client;
340 struct ib_client_data *context, *tmp;
341 unsigned long flags;
342
343 mutex_lock(&device_mutex);
344
345 list_for_each_entry_reverse(client, &client_list, list)
346 if (client->remove)
347 client->remove(device);
348
349 list_del(&device->core_list);
350
351 kfree(device->gid_tbl_len);
352 kfree(device->pkey_tbl_len);
353
354 mutex_unlock(&device_mutex);
355
356 ib_device_unregister_sysfs(device);
357
358 spin_lock_irqsave(&device->client_data_lock, flags);
359 list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
360 kfree(context);
361 spin_unlock_irqrestore(&device->client_data_lock, flags);
362
363 device->reg_state = IB_DEV_UNREGISTERED;
364}
365EXPORT_SYMBOL(ib_unregister_device);
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380int ib_register_client(struct ib_client *client)
381{
382 struct ib_device *device;
383
384 mutex_lock(&device_mutex);
385
386 list_add_tail(&client->list, &client_list);
387 list_for_each_entry(device, &device_list, core_list)
388 if (client->add && !add_client_context(device, client))
389 client->add(device);
390
391 mutex_unlock(&device_mutex);
392
393 return 0;
394}
395EXPORT_SYMBOL(ib_register_client);
396
397
398
399
400
401
402
403
404
405void ib_unregister_client(struct ib_client *client)
406{
407 struct ib_client_data *context, *tmp;
408 struct ib_device *device;
409 unsigned long flags;
410
411 mutex_lock(&device_mutex);
412
413 list_for_each_entry(device, &device_list, core_list) {
414 if (client->remove)
415 client->remove(device);
416
417 spin_lock_irqsave(&device->client_data_lock, flags);
418 list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
419 if (context->client == client) {
420 list_del(&context->list);
421 kfree(context);
422 }
423 spin_unlock_irqrestore(&device->client_data_lock, flags);
424 }
425 list_del(&client->list);
426
427 mutex_unlock(&device_mutex);
428}
429EXPORT_SYMBOL(ib_unregister_client);
430
431
432
433
434
435
436
437
438
439void *ib_get_client_data(struct ib_device *device, struct ib_client *client)
440{
441 struct ib_client_data *context;
442 void *ret = NULL;
443 unsigned long flags;
444
445 spin_lock_irqsave(&device->client_data_lock, flags);
446 list_for_each_entry(context, &device->client_data_list, list)
447 if (context->client == client) {
448 ret = context->data;
449 break;
450 }
451 spin_unlock_irqrestore(&device->client_data_lock, flags);
452
453 return ret;
454}
455EXPORT_SYMBOL(ib_get_client_data);
456
457
458
459
460
461
462
463
464
465
466void ib_set_client_data(struct ib_device *device, struct ib_client *client,
467 void *data)
468{
469 struct ib_client_data *context;
470 unsigned long flags;
471
472 spin_lock_irqsave(&device->client_data_lock, flags);
473 list_for_each_entry(context, &device->client_data_list, list)
474 if (context->client == client) {
475 context->data = data;
476 goto out;
477 }
478
479 printk(KERN_WARNING "No client context found for %s/%s\n",
480 device->name, client->name);
481
482out:
483 spin_unlock_irqrestore(&device->client_data_lock, flags);
484}
485EXPORT_SYMBOL(ib_set_client_data);
486
487
488
489
490
491
492
493
494
495
496int ib_register_event_handler (struct ib_event_handler *event_handler)
497{
498 unsigned long flags;
499
500 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
501 list_add_tail(&event_handler->list,
502 &event_handler->device->event_handler_list);
503 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
504
505 return 0;
506}
507EXPORT_SYMBOL(ib_register_event_handler);
508
509
510
511
512
513
514
515
516int ib_unregister_event_handler(struct ib_event_handler *event_handler)
517{
518 unsigned long flags;
519
520 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
521 list_del(&event_handler->list);
522 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
523
524 return 0;
525}
526EXPORT_SYMBOL(ib_unregister_event_handler);
527
528
529
530
531
532
533
534
535
536void ib_dispatch_event(struct ib_event *event)
537{
538 unsigned long flags;
539 struct ib_event_handler *handler;
540
541 spin_lock_irqsave(&event->device->event_handler_lock, flags);
542
543 list_for_each_entry(handler, &event->device->event_handler_list, list)
544 handler->handler(handler, event);
545
546 spin_unlock_irqrestore(&event->device->event_handler_lock, flags);
547}
548EXPORT_SYMBOL(ib_dispatch_event);
549
550
551
552
553
554
555
556
557
558int ib_query_device(struct ib_device *device,
559 struct ib_device_attr *device_attr)
560{
561 return device->query_device(device, device_attr);
562}
563EXPORT_SYMBOL(ib_query_device);
564
565
566
567
568
569
570
571
572
573
574int ib_query_port(struct ib_device *device,
575 u8 port_num,
576 struct ib_port_attr *port_attr)
577{
578 if (port_num < start_port(device) || port_num > end_port(device))
579 return -EINVAL;
580
581 return device->query_port(device, port_num, port_attr);
582}
583EXPORT_SYMBOL(ib_query_port);
584
585
586
587
588
589
590
591
592
593
594int ib_query_gid(struct ib_device *device,
595 u8 port_num, int index, union ib_gid *gid)
596{
597 return device->query_gid(device, port_num, index, gid);
598}
599EXPORT_SYMBOL(ib_query_gid);
600
601
602
603
604
605
606
607
608
609
610int ib_query_pkey(struct ib_device *device,
611 u8 port_num, u16 index, u16 *pkey)
612{
613 return device->query_pkey(device, port_num, index, pkey);
614}
615EXPORT_SYMBOL(ib_query_pkey);
616
617
618
619
620
621
622
623
624
625
626int ib_modify_device(struct ib_device *device,
627 int device_modify_mask,
628 struct ib_device_modify *device_modify)
629{
630 if (!device->modify_device)
631 return -ENOSYS;
632
633 return device->modify_device(device, device_modify_mask,
634 device_modify);
635}
636EXPORT_SYMBOL(ib_modify_device);
637
638
639
640
641
642
643
644
645
646
647
648
649int ib_modify_port(struct ib_device *device,
650 u8 port_num, int port_modify_mask,
651 struct ib_port_modify *port_modify)
652{
653 if (!device->modify_port)
654 return -ENOSYS;
655
656 if (port_num < start_port(device) || port_num > end_port(device))
657 return -EINVAL;
658
659 return device->modify_port(device, port_num, port_modify_mask,
660 port_modify);
661}
662EXPORT_SYMBOL(ib_modify_port);
663
664
665
666
667
668
669
670
671
672
673int ib_find_gid(struct ib_device *device, union ib_gid *gid,
674 u8 *port_num, u16 *index)
675{
676 union ib_gid tmp_gid;
677 int ret, port, i;
678
679 for (port = start_port(device); port <= end_port(device); ++port) {
680 for (i = 0; i < device->gid_tbl_len[port - start_port(device)]; ++i) {
681 ret = ib_query_gid(device, port, i, &tmp_gid);
682 if (ret)
683 return ret;
684 if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
685 *port_num = port;
686 if (index)
687 *index = i;
688 return 0;
689 }
690 }
691 }
692
693 return -ENOENT;
694}
695EXPORT_SYMBOL(ib_find_gid);
696
697
698
699
700
701
702
703
704
705int ib_find_pkey(struct ib_device *device,
706 u8 port_num, u16 pkey, u16 *index)
707{
708 int ret, i;
709 u16 tmp_pkey;
710 int partial_ix = -1;
711
712 for (i = 0; i < device->pkey_tbl_len[port_num - start_port(device)]; ++i) {
713 ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
714 if (ret)
715 return ret;
716 if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) {
717
718 if (tmp_pkey & 0x8000) {
719 *index = i;
720 return 0;
721 }
722 if (partial_ix < 0)
723 partial_ix = i;
724 }
725 }
726
727
728 if (partial_ix >= 0) {
729 *index = partial_ix;
730 return 0;
731 }
732 return -ENOENT;
733}
734EXPORT_SYMBOL(ib_find_pkey);
735
736static int __init ib_core_init(void)
737{
738 int ret;
739
740 ib_wq = alloc_workqueue("infiniband", 0, 0);
741 if (!ib_wq)
742 return -ENOMEM;
743
744 ret = ib_sysfs_setup();
745 if (ret) {
746 printk(KERN_WARNING "Couldn't create InfiniBand device class\n");
747 goto err;
748 }
749
750 ret = ibnl_init();
751 if (ret) {
752 printk(KERN_WARNING "Couldn't init IB netlink interface\n");
753 goto err_sysfs;
754 }
755
756 ret = ib_cache_setup();
757 if (ret) {
758 printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n");
759 goto err_nl;
760 }
761
762 return 0;
763
764err_nl:
765 ibnl_cleanup();
766
767err_sysfs:
768 ib_sysfs_cleanup();
769
770err:
771 destroy_workqueue(ib_wq);
772 return ret;
773}
774
775static void __exit ib_core_cleanup(void)
776{
777 ib_cache_cleanup();
778 ibnl_cleanup();
779 ib_sysfs_cleanup();
780
781 destroy_workqueue(ib_wq);
782}
783
784module_init(ib_core_init);
785module_exit(ib_core_cleanup);
786