1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#include <linux/module.h>
37#include <linux/errno.h>
38#include <linux/slab.h>
39#include <linux/workqueue.h>
40#include <linux/netdevice.h>
41#include <net/addrconf.h>
42
43#include <rdma/ib_cache.h>
44
45#include "core_priv.h"
46
47struct ib_pkey_cache {
48 int table_len;
49 u16 table[0];
50};
51
52struct ib_update_work {
53 struct work_struct work;
54 struct ib_device *device;
55 u8 port_num;
56 bool enforce_security;
57};
58
59union ib_gid zgid;
60EXPORT_SYMBOL(zgid);
61
62enum gid_attr_find_mask {
63 GID_ATTR_FIND_MASK_GID = 1UL << 0,
64 GID_ATTR_FIND_MASK_NETDEV = 1UL << 1,
65 GID_ATTR_FIND_MASK_DEFAULT = 1UL << 2,
66 GID_ATTR_FIND_MASK_GID_TYPE = 1UL << 3,
67};
68
69enum gid_table_entry_state {
70 GID_TABLE_ENTRY_INVALID = 1,
71 GID_TABLE_ENTRY_VALID = 2,
72
73
74
75
76
77
78 GID_TABLE_ENTRY_PENDING_DEL = 3,
79};
80
81struct ib_gid_table_entry {
82 struct kref kref;
83 struct work_struct del_work;
84 struct ib_gid_attr attr;
85 void *context;
86 enum gid_table_entry_state state;
87};
88
89struct ib_gid_table {
90 int sz;
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105 struct mutex lock;
106
107
108 rwlock_t rwlock;
109 struct ib_gid_table_entry **data_vec;
110
111 u32 default_gid_indices;
112};
113
114static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
115{
116 struct ib_event event;
117
118 event.device = ib_dev;
119 event.element.port_num = port;
120 event.event = IB_EVENT_GID_CHANGE;
121
122 ib_dispatch_event(&event);
123}
124
125static const char * const gid_type_str[] = {
126 [IB_GID_TYPE_IB] = "IB/RoCE v1",
127 [IB_GID_TYPE_ROCE_UDP_ENCAP] = "RoCE v2",
128};
129
130const char *ib_cache_gid_type_str(enum ib_gid_type gid_type)
131{
132 if (gid_type < ARRAY_SIZE(gid_type_str) && gid_type_str[gid_type])
133 return gid_type_str[gid_type];
134
135 return "Invalid GID type";
136}
137EXPORT_SYMBOL(ib_cache_gid_type_str);
138
139
140
141
142
143bool rdma_is_zero_gid(const union ib_gid *gid)
144{
145 return !memcmp(gid, &zgid, sizeof(*gid));
146}
147EXPORT_SYMBOL(rdma_is_zero_gid);
148
149
150
151
152
153
154
155
156static bool is_gid_index_default(const struct ib_gid_table *table,
157 unsigned int index)
158{
159 return index < 32 && (BIT(index) & table->default_gid_indices);
160}
161
162int ib_cache_gid_parse_type_str(const char *buf)
163{
164 unsigned int i;
165 size_t len;
166 int err = -EINVAL;
167
168 len = strlen(buf);
169 if (len == 0)
170 return -EINVAL;
171
172 if (buf[len - 1] == '\n')
173 len--;
174
175 for (i = 0; i < ARRAY_SIZE(gid_type_str); ++i)
176 if (gid_type_str[i] && !strncmp(buf, gid_type_str[i], len) &&
177 len == strlen(gid_type_str[i])) {
178 err = i;
179 break;
180 }
181
182 return err;
183}
184EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
185
186static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u8 port)
187{
188 return device->cache.ports[port - rdma_start_port(device)].gid;
189}
190
191static bool is_gid_entry_free(const struct ib_gid_table_entry *entry)
192{
193 return !entry;
194}
195
196static bool is_gid_entry_valid(const struct ib_gid_table_entry *entry)
197{
198 return entry && entry->state == GID_TABLE_ENTRY_VALID;
199}
200
201static void schedule_free_gid(struct kref *kref)
202{
203 struct ib_gid_table_entry *entry =
204 container_of(kref, struct ib_gid_table_entry, kref);
205
206 queue_work(ib_wq, &entry->del_work);
207}
208
209static void free_gid_entry_locked(struct ib_gid_table_entry *entry)
210{
211 struct ib_device *device = entry->attr.device;
212 u8 port_num = entry->attr.port_num;
213 struct ib_gid_table *table = rdma_gid_table(device, port_num);
214
215 pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
216 device->name, port_num, entry->attr.index,
217 entry->attr.gid.raw);
218
219 if (rdma_cap_roce_gid_table(device, port_num) &&
220 entry->state != GID_TABLE_ENTRY_INVALID)
221 device->del_gid(&entry->attr, &entry->context);
222
223 write_lock_irq(&table->rwlock);
224
225
226
227
228
229
230
231 if (entry == table->data_vec[entry->attr.index])
232 table->data_vec[entry->attr.index] = NULL;
233
234 write_unlock_irq(&table->rwlock);
235
236 if (entry->attr.ndev)
237 dev_put(entry->attr.ndev);
238 kfree(entry);
239}
240
241static void free_gid_entry(struct kref *kref)
242{
243 struct ib_gid_table_entry *entry =
244 container_of(kref, struct ib_gid_table_entry, kref);
245
246 free_gid_entry_locked(entry);
247}
248
249
250
251
252
253
254
255
256
257static void free_gid_work(struct work_struct *work)
258{
259 struct ib_gid_table_entry *entry =
260 container_of(work, struct ib_gid_table_entry, del_work);
261 struct ib_device *device = entry->attr.device;
262 u8 port_num = entry->attr.port_num;
263 struct ib_gid_table *table = rdma_gid_table(device, port_num);
264
265 mutex_lock(&table->lock);
266 free_gid_entry_locked(entry);
267 mutex_unlock(&table->lock);
268}
269
270static struct ib_gid_table_entry *
271alloc_gid_entry(const struct ib_gid_attr *attr)
272{
273 struct ib_gid_table_entry *entry;
274
275 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
276 if (!entry)
277 return NULL;
278 kref_init(&entry->kref);
279 memcpy(&entry->attr, attr, sizeof(*attr));
280 if (entry->attr.ndev)
281 dev_hold(entry->attr.ndev);
282 INIT_WORK(&entry->del_work, free_gid_work);
283 entry->state = GID_TABLE_ENTRY_INVALID;
284 return entry;
285}
286
287static void store_gid_entry(struct ib_gid_table *table,
288 struct ib_gid_table_entry *entry)
289{
290 entry->state = GID_TABLE_ENTRY_VALID;
291
292 pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
293 entry->attr.device->name, entry->attr.port_num,
294 entry->attr.index, entry->attr.gid.raw);
295
296 lockdep_assert_held(&table->lock);
297 write_lock_irq(&table->rwlock);
298 table->data_vec[entry->attr.index] = entry;
299 write_unlock_irq(&table->rwlock);
300}
301
302static void get_gid_entry(struct ib_gid_table_entry *entry)
303{
304 kref_get(&entry->kref);
305}
306
307static void put_gid_entry(struct ib_gid_table_entry *entry)
308{
309 kref_put(&entry->kref, schedule_free_gid);
310}
311
312static void put_gid_entry_locked(struct ib_gid_table_entry *entry)
313{
314 kref_put(&entry->kref, free_gid_entry);
315}
316
317static int add_roce_gid(struct ib_gid_table_entry *entry)
318{
319 const struct ib_gid_attr *attr = &entry->attr;
320 int ret;
321
322 if (!attr->ndev) {
323 pr_err("%s NULL netdev device=%s port=%d index=%d\n",
324 __func__, attr->device->name, attr->port_num,
325 attr->index);
326 return -EINVAL;
327 }
328 if (rdma_cap_roce_gid_table(attr->device, attr->port_num)) {
329 ret = attr->device->add_gid(attr, &entry->context);
330 if (ret) {
331 pr_err("%s GID add failed device=%s port=%d index=%d\n",
332 __func__, attr->device->name, attr->port_num,
333 attr->index);
334 return ret;
335 }
336 }
337 return 0;
338}
339
340
341
342
343
344
345
346
347
348
349static void del_gid(struct ib_device *ib_dev, u8 port,
350 struct ib_gid_table *table, int ix)
351{
352 struct ib_gid_table_entry *entry;
353
354 lockdep_assert_held(&table->lock);
355
356 pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
357 ib_dev->name, port, ix,
358 table->data_vec[ix]->attr.gid.raw);
359
360 write_lock_irq(&table->rwlock);
361 entry = table->data_vec[ix];
362 entry->state = GID_TABLE_ENTRY_PENDING_DEL;
363
364
365
366 if (!rdma_protocol_roce(ib_dev, port))
367 table->data_vec[ix] = NULL;
368 write_unlock_irq(&table->rwlock);
369
370 put_gid_entry_locked(entry);
371}
372
373
374
375
376
377
378
379
380
381
382
383static int add_modify_gid(struct ib_gid_table *table,
384 const struct ib_gid_attr *attr)
385{
386 struct ib_gid_table_entry *entry;
387 int ret = 0;
388
389
390
391
392
393 if (is_gid_entry_valid(table->data_vec[attr->index]))
394 del_gid(attr->device, attr->port_num, table, attr->index);
395
396
397
398
399
400
401 if (rdma_is_zero_gid(&attr->gid))
402 return 0;
403
404 entry = alloc_gid_entry(attr);
405 if (!entry)
406 return -ENOMEM;
407
408 if (rdma_protocol_roce(attr->device, attr->port_num)) {
409 ret = add_roce_gid(entry);
410 if (ret)
411 goto done;
412 }
413
414 store_gid_entry(table, entry);
415 return 0;
416
417done:
418 put_gid_entry(entry);
419 return ret;
420}
421
422
423static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
424 const struct ib_gid_attr *val, bool default_gid,
425 unsigned long mask, int *pempty)
426{
427 int i = 0;
428 int found = -1;
429 int empty = pempty ? -1 : 0;
430
431 while (i < table->sz && (found < 0 || empty < 0)) {
432 struct ib_gid_table_entry *data = table->data_vec[i];
433 struct ib_gid_attr *attr;
434 int curr_index = i;
435
436 i++;
437
438
439
440
441
442
443 if (pempty && empty < 0) {
444 if (is_gid_entry_free(data) &&
445 default_gid ==
446 is_gid_index_default(table, curr_index)) {
447
448
449
450
451
452
453
454
455 empty = curr_index;
456 }
457 }
458
459
460
461
462
463
464
465 if (!is_gid_entry_valid(data))
466 continue;
467
468 if (found >= 0)
469 continue;
470
471 attr = &data->attr;
472 if (mask & GID_ATTR_FIND_MASK_GID_TYPE &&
473 attr->gid_type != val->gid_type)
474 continue;
475
476 if (mask & GID_ATTR_FIND_MASK_GID &&
477 memcmp(gid, &data->attr.gid, sizeof(*gid)))
478 continue;
479
480 if (mask & GID_ATTR_FIND_MASK_NETDEV &&
481 attr->ndev != val->ndev)
482 continue;
483
484 if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
485 is_gid_index_default(table, curr_index) != default_gid)
486 continue;
487
488 found = curr_index;
489 }
490
491 if (pempty)
492 *pempty = empty;
493
494 return found;
495}
496
497static void make_default_gid(struct net_device *dev, union ib_gid *gid)
498{
499 gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
500 addrconf_ifid_eui48(&gid->raw[8], dev);
501}
502
503static int __ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
504 union ib_gid *gid, struct ib_gid_attr *attr,
505 unsigned long mask, bool default_gid)
506{
507 struct ib_gid_table *table;
508 int ret = 0;
509 int empty;
510 int ix;
511
512
513
514
515
516 if (rdma_is_zero_gid(gid))
517 return -EINVAL;
518
519 table = rdma_gid_table(ib_dev, port);
520
521 mutex_lock(&table->lock);
522
523 ix = find_gid(table, gid, attr, default_gid, mask, &empty);
524 if (ix >= 0)
525 goto out_unlock;
526
527 if (empty < 0) {
528 ret = -ENOSPC;
529 goto out_unlock;
530 }
531 attr->device = ib_dev;
532 attr->index = empty;
533 attr->port_num = port;
534 attr->gid = *gid;
535 ret = add_modify_gid(table, attr);
536 if (!ret)
537 dispatch_gid_change_event(ib_dev, port);
538
539out_unlock:
540 mutex_unlock(&table->lock);
541 if (ret)
542 pr_warn("%s: unable to add gid %pI6 error=%d\n",
543 __func__, gid->raw, ret);
544 return ret;
545}
546
547int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
548 union ib_gid *gid, struct ib_gid_attr *attr)
549{
550 struct net_device *idev;
551 unsigned long mask;
552 int ret;
553
554 if (ib_dev->get_netdev) {
555 idev = ib_dev->get_netdev(ib_dev, port);
556 if (idev && attr->ndev != idev) {
557 union ib_gid default_gid;
558
559
560 make_default_gid(idev, &default_gid);
561 if (!memcmp(gid, &default_gid, sizeof(*gid))) {
562 dev_put(idev);
563 return -EPERM;
564 }
565 }
566 if (idev)
567 dev_put(idev);
568 }
569
570 mask = GID_ATTR_FIND_MASK_GID |
571 GID_ATTR_FIND_MASK_GID_TYPE |
572 GID_ATTR_FIND_MASK_NETDEV;
573
574 ret = __ib_cache_gid_add(ib_dev, port, gid, attr, mask, false);
575 return ret;
576}
577
578static int
579_ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
580 union ib_gid *gid, struct ib_gid_attr *attr,
581 unsigned long mask, bool default_gid)
582{
583 struct ib_gid_table *table;
584 int ret = 0;
585 int ix;
586
587 table = rdma_gid_table(ib_dev, port);
588
589 mutex_lock(&table->lock);
590
591 ix = find_gid(table, gid, attr, default_gid, mask, NULL);
592 if (ix < 0) {
593 ret = -EINVAL;
594 goto out_unlock;
595 }
596
597 del_gid(ib_dev, port, table, ix);
598 dispatch_gid_change_event(ib_dev, port);
599
600out_unlock:
601 mutex_unlock(&table->lock);
602 if (ret)
603 pr_debug("%s: can't delete gid %pI6 error=%d\n",
604 __func__, gid->raw, ret);
605 return ret;
606}
607
608int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
609 union ib_gid *gid, struct ib_gid_attr *attr)
610{
611 unsigned long mask = GID_ATTR_FIND_MASK_GID |
612 GID_ATTR_FIND_MASK_GID_TYPE |
613 GID_ATTR_FIND_MASK_DEFAULT |
614 GID_ATTR_FIND_MASK_NETDEV;
615
616 return _ib_cache_gid_del(ib_dev, port, gid, attr, mask, false);
617}
618
619int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
620 struct net_device *ndev)
621{
622 struct ib_gid_table *table;
623 int ix;
624 bool deleted = false;
625
626 table = rdma_gid_table(ib_dev, port);
627
628 mutex_lock(&table->lock);
629
630 for (ix = 0; ix < table->sz; ix++) {
631 if (is_gid_entry_valid(table->data_vec[ix]) &&
632 table->data_vec[ix]->attr.ndev == ndev) {
633 del_gid(ib_dev, port, table, ix);
634 deleted = true;
635 }
636 }
637
638 mutex_unlock(&table->lock);
639
640 if (deleted)
641 dispatch_gid_change_event(ib_dev, port);
642
643 return 0;
644}
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661const struct ib_gid_attr *
662rdma_find_gid_by_port(struct ib_device *ib_dev,
663 const union ib_gid *gid,
664 enum ib_gid_type gid_type,
665 u8 port, struct net_device *ndev)
666{
667 int local_index;
668 struct ib_gid_table *table;
669 unsigned long mask = GID_ATTR_FIND_MASK_GID |
670 GID_ATTR_FIND_MASK_GID_TYPE;
671 struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
672 const struct ib_gid_attr *attr;
673 unsigned long flags;
674
675 if (!rdma_is_port_valid(ib_dev, port))
676 return ERR_PTR(-ENOENT);
677
678 table = rdma_gid_table(ib_dev, port);
679
680 if (ndev)
681 mask |= GID_ATTR_FIND_MASK_NETDEV;
682
683 read_lock_irqsave(&table->rwlock, flags);
684 local_index = find_gid(table, gid, &val, false, mask, NULL);
685 if (local_index >= 0) {
686 get_gid_entry(table->data_vec[local_index]);
687 attr = &table->data_vec[local_index]->attr;
688 read_unlock_irqrestore(&table->rwlock, flags);
689 return attr;
690 }
691
692 read_unlock_irqrestore(&table->rwlock, flags);
693 return ERR_PTR(-ENOENT);
694}
695EXPORT_SYMBOL(rdma_find_gid_by_port);
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714const struct ib_gid_attr *rdma_find_gid_by_filter(
715 struct ib_device *ib_dev, const union ib_gid *gid, u8 port,
716 bool (*filter)(const union ib_gid *gid, const struct ib_gid_attr *,
717 void *),
718 void *context)
719{
720 const struct ib_gid_attr *res = ERR_PTR(-ENOENT);
721 struct ib_gid_table *table;
722 unsigned long flags;
723 unsigned int i;
724
725 if (!rdma_is_port_valid(ib_dev, port))
726 return ERR_PTR(-EINVAL);
727
728 table = rdma_gid_table(ib_dev, port);
729
730 read_lock_irqsave(&table->rwlock, flags);
731 for (i = 0; i < table->sz; i++) {
732 struct ib_gid_table_entry *entry = table->data_vec[i];
733
734 if (!is_gid_entry_valid(entry))
735 continue;
736
737 if (memcmp(gid, &entry->attr.gid, sizeof(*gid)))
738 continue;
739
740 if (filter(gid, &entry->attr, context)) {
741 get_gid_entry(entry);
742 res = &entry->attr;
743 break;
744 }
745 }
746 read_unlock_irqrestore(&table->rwlock, flags);
747 return res;
748}
749
750static struct ib_gid_table *alloc_gid_table(int sz)
751{
752 struct ib_gid_table *table = kzalloc(sizeof(*table), GFP_KERNEL);
753
754 if (!table)
755 return NULL;
756
757 table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL);
758 if (!table->data_vec)
759 goto err_free_table;
760
761 mutex_init(&table->lock);
762
763 table->sz = sz;
764 rwlock_init(&table->rwlock);
765 return table;
766
767err_free_table:
768 kfree(table);
769 return NULL;
770}
771
772static void release_gid_table(struct ib_device *device, u8 port,
773 struct ib_gid_table *table)
774{
775 bool leak = false;
776 int i;
777
778 if (!table)
779 return;
780
781 for (i = 0; i < table->sz; i++) {
782 if (is_gid_entry_free(table->data_vec[i]))
783 continue;
784 if (kref_read(&table->data_vec[i]->kref) > 1) {
785 pr_err("GID entry ref leak for %s (index %d) ref=%d\n",
786 device->name, i,
787 kref_read(&table->data_vec[i]->kref));
788 leak = true;
789 }
790 }
791 if (leak)
792 return;
793
794 kfree(table->data_vec);
795 kfree(table);
796}
797
798static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
799 struct ib_gid_table *table)
800{
801 int i;
802 bool deleted = false;
803
804 if (!table)
805 return;
806
807 mutex_lock(&table->lock);
808 for (i = 0; i < table->sz; ++i) {
809 if (is_gid_entry_valid(table->data_vec[i])) {
810 del_gid(ib_dev, port, table, i);
811 deleted = true;
812 }
813 }
814 mutex_unlock(&table->lock);
815
816 if (deleted)
817 dispatch_gid_change_event(ib_dev, port);
818}
819
820void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
821 struct net_device *ndev,
822 unsigned long gid_type_mask,
823 enum ib_cache_gid_default_mode mode)
824{
825 union ib_gid gid = { };
826 struct ib_gid_attr gid_attr;
827 unsigned int gid_type;
828 unsigned long mask;
829
830 mask = GID_ATTR_FIND_MASK_GID_TYPE |
831 GID_ATTR_FIND_MASK_DEFAULT |
832 GID_ATTR_FIND_MASK_NETDEV;
833 memset(&gid_attr, 0, sizeof(gid_attr));
834 gid_attr.ndev = ndev;
835
836 for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) {
837 if (1UL << gid_type & ~gid_type_mask)
838 continue;
839
840 gid_attr.gid_type = gid_type;
841
842 if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
843 make_default_gid(ndev, &gid);
844 __ib_cache_gid_add(ib_dev, port, &gid,
845 &gid_attr, mask, true);
846 } else if (mode == IB_CACHE_GID_DEFAULT_MODE_DELETE) {
847 _ib_cache_gid_del(ib_dev, port, &gid,
848 &gid_attr, mask, true);
849 }
850 }
851}
852
853static void gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
854 struct ib_gid_table *table)
855{
856 unsigned int i;
857 unsigned long roce_gid_type_mask;
858 unsigned int num_default_gids;
859
860 roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
861 num_default_gids = hweight_long(roce_gid_type_mask);
862
863 for (i = 0; i < num_default_gids && i < table->sz; i++)
864 table->default_gid_indices |= BIT(i);
865}
866
867
868static void gid_table_release_one(struct ib_device *ib_dev)
869{
870 struct ib_gid_table *table;
871 u8 port;
872
873 for (port = 0; port < ib_dev->phys_port_cnt; port++) {
874 table = ib_dev->cache.ports[port].gid;
875 release_gid_table(ib_dev, port, table);
876 ib_dev->cache.ports[port].gid = NULL;
877 }
878}
879
880static int _gid_table_setup_one(struct ib_device *ib_dev)
881{
882 u8 port;
883 struct ib_gid_table *table;
884
885 for (port = 0; port < ib_dev->phys_port_cnt; port++) {
886 u8 rdma_port = port + rdma_start_port(ib_dev);
887
888 table = alloc_gid_table(
889 ib_dev->port_immutable[rdma_port].gid_tbl_len);
890 if (!table)
891 goto rollback_table_setup;
892
893 gid_table_reserve_default(ib_dev, rdma_port, table);
894 ib_dev->cache.ports[port].gid = table;
895 }
896 return 0;
897
898rollback_table_setup:
899 gid_table_release_one(ib_dev);
900 return -ENOMEM;
901}
902
903static void gid_table_cleanup_one(struct ib_device *ib_dev)
904{
905 struct ib_gid_table *table;
906 u8 port;
907
908 for (port = 0; port < ib_dev->phys_port_cnt; port++) {
909 table = ib_dev->cache.ports[port].gid;
910 cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
911 table);
912 }
913}
914
915static int gid_table_setup_one(struct ib_device *ib_dev)
916{
917 int err;
918
919 err = _gid_table_setup_one(ib_dev);
920
921 if (err)
922 return err;
923
924 rdma_roce_rescan_device(ib_dev);
925
926 return err;
927}
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943int rdma_query_gid(struct ib_device *device, u8 port_num,
944 int index, union ib_gid *gid)
945{
946 struct ib_gid_table *table;
947 unsigned long flags;
948 int res = -EINVAL;
949
950 if (!rdma_is_port_valid(device, port_num))
951 return -EINVAL;
952
953 table = rdma_gid_table(device, port_num);
954 read_lock_irqsave(&table->rwlock, flags);
955
956 if (index < 0 || index >= table->sz ||
957 !is_gid_entry_valid(table->data_vec[index]))
958 goto done;
959
960 memcpy(gid, &table->data_vec[index]->attr.gid, sizeof(*gid));
961 res = 0;
962
963done:
964 read_unlock_irqrestore(&table->rwlock, flags);
965 return res;
966}
967EXPORT_SYMBOL(rdma_query_gid);
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982const struct ib_gid_attr *rdma_find_gid(struct ib_device *device,
983 const union ib_gid *gid,
984 enum ib_gid_type gid_type,
985 struct net_device *ndev)
986{
987 unsigned long mask = GID_ATTR_FIND_MASK_GID |
988 GID_ATTR_FIND_MASK_GID_TYPE;
989 struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
990 u8 p;
991
992 if (ndev)
993 mask |= GID_ATTR_FIND_MASK_NETDEV;
994
995 for (p = 0; p < device->phys_port_cnt; p++) {
996 struct ib_gid_table *table;
997 unsigned long flags;
998 int index;
999
1000 table = device->cache.ports[p].gid;
1001 read_lock_irqsave(&table->rwlock, flags);
1002 index = find_gid(table, gid, &gid_attr_val, false, mask, NULL);
1003 if (index >= 0) {
1004 const struct ib_gid_attr *attr;
1005
1006 get_gid_entry(table->data_vec[index]);
1007 attr = &table->data_vec[index]->attr;
1008 read_unlock_irqrestore(&table->rwlock, flags);
1009 return attr;
1010 }
1011 read_unlock_irqrestore(&table->rwlock, flags);
1012 }
1013
1014 return ERR_PTR(-ENOENT);
1015}
1016EXPORT_SYMBOL(rdma_find_gid);
1017
1018int ib_get_cached_pkey(struct ib_device *device,
1019 u8 port_num,
1020 int index,
1021 u16 *pkey)
1022{
1023 struct ib_pkey_cache *cache;
1024 unsigned long flags;
1025 int ret = 0;
1026
1027 if (!rdma_is_port_valid(device, port_num))
1028 return -EINVAL;
1029
1030 read_lock_irqsave(&device->cache.lock, flags);
1031
1032 cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
1033
1034 if (index < 0 || index >= cache->table_len)
1035 ret = -EINVAL;
1036 else
1037 *pkey = cache->table[index];
1038
1039 read_unlock_irqrestore(&device->cache.lock, flags);
1040
1041 return ret;
1042}
1043EXPORT_SYMBOL(ib_get_cached_pkey);
1044
1045int ib_get_cached_subnet_prefix(struct ib_device *device,
1046 u8 port_num,
1047 u64 *sn_pfx)
1048{
1049 unsigned long flags;
1050 int p;
1051
1052 if (!rdma_is_port_valid(device, port_num))
1053 return -EINVAL;
1054
1055 p = port_num - rdma_start_port(device);
1056 read_lock_irqsave(&device->cache.lock, flags);
1057 *sn_pfx = device->cache.ports[p].subnet_prefix;
1058 read_unlock_irqrestore(&device->cache.lock, flags);
1059
1060 return 0;
1061}
1062EXPORT_SYMBOL(ib_get_cached_subnet_prefix);
1063
1064int ib_find_cached_pkey(struct ib_device *device,
1065 u8 port_num,
1066 u16 pkey,
1067 u16 *index)
1068{
1069 struct ib_pkey_cache *cache;
1070 unsigned long flags;
1071 int i;
1072 int ret = -ENOENT;
1073 int partial_ix = -1;
1074
1075 if (!rdma_is_port_valid(device, port_num))
1076 return -EINVAL;
1077
1078 read_lock_irqsave(&device->cache.lock, flags);
1079
1080 cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
1081
1082 *index = -1;
1083
1084 for (i = 0; i < cache->table_len; ++i)
1085 if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
1086 if (cache->table[i] & 0x8000) {
1087 *index = i;
1088 ret = 0;
1089 break;
1090 } else
1091 partial_ix = i;
1092 }
1093
1094 if (ret && partial_ix >= 0) {
1095 *index = partial_ix;
1096 ret = 0;
1097 }
1098
1099 read_unlock_irqrestore(&device->cache.lock, flags);
1100
1101 return ret;
1102}
1103EXPORT_SYMBOL(ib_find_cached_pkey);
1104
1105int ib_find_exact_cached_pkey(struct ib_device *device,
1106 u8 port_num,
1107 u16 pkey,
1108 u16 *index)
1109{
1110 struct ib_pkey_cache *cache;
1111 unsigned long flags;
1112 int i;
1113 int ret = -ENOENT;
1114
1115 if (!rdma_is_port_valid(device, port_num))
1116 return -EINVAL;
1117
1118 read_lock_irqsave(&device->cache.lock, flags);
1119
1120 cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
1121
1122 *index = -1;
1123
1124 for (i = 0; i < cache->table_len; ++i)
1125 if (cache->table[i] == pkey) {
1126 *index = i;
1127 ret = 0;
1128 break;
1129 }
1130
1131 read_unlock_irqrestore(&device->cache.lock, flags);
1132
1133 return ret;
1134}
1135EXPORT_SYMBOL(ib_find_exact_cached_pkey);
1136
1137int ib_get_cached_lmc(struct ib_device *device,
1138 u8 port_num,
1139 u8 *lmc)
1140{
1141 unsigned long flags;
1142 int ret = 0;
1143
1144 if (!rdma_is_port_valid(device, port_num))
1145 return -EINVAL;
1146
1147 read_lock_irqsave(&device->cache.lock, flags);
1148 *lmc = device->cache.ports[port_num - rdma_start_port(device)].lmc;
1149 read_unlock_irqrestore(&device->cache.lock, flags);
1150
1151 return ret;
1152}
1153EXPORT_SYMBOL(ib_get_cached_lmc);
1154
1155int ib_get_cached_port_state(struct ib_device *device,
1156 u8 port_num,
1157 enum ib_port_state *port_state)
1158{
1159 unsigned long flags;
1160 int ret = 0;
1161
1162 if (!rdma_is_port_valid(device, port_num))
1163 return -EINVAL;
1164
1165 read_lock_irqsave(&device->cache.lock, flags);
1166 *port_state = device->cache.ports[port_num
1167 - rdma_start_port(device)].port_state;
1168 read_unlock_irqrestore(&device->cache.lock, flags);
1169
1170 return ret;
1171}
1172EXPORT_SYMBOL(ib_get_cached_port_state);
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190const struct ib_gid_attr *
1191rdma_get_gid_attr(struct ib_device *device, u8 port_num, int index)
1192{
1193 const struct ib_gid_attr *attr = ERR_PTR(-EINVAL);
1194 struct ib_gid_table *table;
1195 unsigned long flags;
1196
1197 if (!rdma_is_port_valid(device, port_num))
1198 return ERR_PTR(-EINVAL);
1199
1200 table = rdma_gid_table(device, port_num);
1201 if (index < 0 || index >= table->sz)
1202 return ERR_PTR(-EINVAL);
1203
1204 read_lock_irqsave(&table->rwlock, flags);
1205 if (!is_gid_entry_valid(table->data_vec[index]))
1206 goto done;
1207
1208 get_gid_entry(table->data_vec[index]);
1209 attr = &table->data_vec[index]->attr;
1210done:
1211 read_unlock_irqrestore(&table->rwlock, flags);
1212 return attr;
1213}
1214EXPORT_SYMBOL(rdma_get_gid_attr);
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227void rdma_put_gid_attr(const struct ib_gid_attr *attr)
1228{
1229 struct ib_gid_table_entry *entry =
1230 container_of(attr, struct ib_gid_table_entry, attr);
1231
1232 put_gid_entry(entry);
1233}
1234EXPORT_SYMBOL(rdma_put_gid_attr);
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246void rdma_hold_gid_attr(const struct ib_gid_attr *attr)
1247{
1248 struct ib_gid_table_entry *entry =
1249 container_of(attr, struct ib_gid_table_entry, attr);
1250
1251 get_gid_entry(entry);
1252}
1253EXPORT_SYMBOL(rdma_hold_gid_attr);
1254
1255static int config_non_roce_gid_cache(struct ib_device *device,
1256 u8 port, int gid_tbl_len)
1257{
1258 struct ib_gid_attr gid_attr = {};
1259 struct ib_gid_table *table;
1260 int ret = 0;
1261 int i;
1262
1263 gid_attr.device = device;
1264 gid_attr.port_num = port;
1265 table = rdma_gid_table(device, port);
1266
1267 mutex_lock(&table->lock);
1268 for (i = 0; i < gid_tbl_len; ++i) {
1269 if (!device->query_gid)
1270 continue;
1271 ret = device->query_gid(device, port, i, &gid_attr.gid);
1272 if (ret) {
1273 pr_warn("query_gid failed (%d) for %s (index %d)\n",
1274 ret, device->name, i);
1275 goto err;
1276 }
1277 gid_attr.index = i;
1278 add_modify_gid(table, &gid_attr);
1279 }
1280err:
1281 mutex_unlock(&table->lock);
1282 return ret;
1283}
1284
1285static void ib_cache_update(struct ib_device *device,
1286 u8 port,
1287 bool enforce_security)
1288{
1289 struct ib_port_attr *tprops = NULL;
1290 struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache;
1291 int i;
1292 int ret;
1293
1294 if (!rdma_is_port_valid(device, port))
1295 return;
1296
1297 tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
1298 if (!tprops)
1299 return;
1300
1301 ret = ib_query_port(device, port, tprops);
1302 if (ret) {
1303 pr_warn("ib_query_port failed (%d) for %s\n",
1304 ret, device->name);
1305 goto err;
1306 }
1307
1308 if (!rdma_protocol_roce(device, port)) {
1309 ret = config_non_roce_gid_cache(device, port,
1310 tprops->gid_tbl_len);
1311 if (ret)
1312 goto err;
1313 }
1314
1315 pkey_cache = kmalloc(struct_size(pkey_cache, table,
1316 tprops->pkey_tbl_len),
1317 GFP_KERNEL);
1318 if (!pkey_cache)
1319 goto err;
1320
1321 pkey_cache->table_len = tprops->pkey_tbl_len;
1322
1323 for (i = 0; i < pkey_cache->table_len; ++i) {
1324 ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
1325 if (ret) {
1326 pr_warn("ib_query_pkey failed (%d) for %s (index %d)\n",
1327 ret, device->name, i);
1328 goto err;
1329 }
1330 }
1331
1332 write_lock_irq(&device->cache.lock);
1333
1334 old_pkey_cache = device->cache.ports[port -
1335 rdma_start_port(device)].pkey;
1336
1337 device->cache.ports[port - rdma_start_port(device)].pkey = pkey_cache;
1338 device->cache.ports[port - rdma_start_port(device)].lmc = tprops->lmc;
1339 device->cache.ports[port - rdma_start_port(device)].port_state =
1340 tprops->state;
1341
1342 device->cache.ports[port - rdma_start_port(device)].subnet_prefix =
1343 tprops->subnet_prefix;
1344 write_unlock_irq(&device->cache.lock);
1345
1346 if (enforce_security)
1347 ib_security_cache_change(device,
1348 port,
1349 tprops->subnet_prefix);
1350
1351 kfree(old_pkey_cache);
1352 kfree(tprops);
1353 return;
1354
1355err:
1356 kfree(pkey_cache);
1357 kfree(tprops);
1358}
1359
1360static void ib_cache_task(struct work_struct *_work)
1361{
1362 struct ib_update_work *work =
1363 container_of(_work, struct ib_update_work, work);
1364
1365 ib_cache_update(work->device,
1366 work->port_num,
1367 work->enforce_security);
1368 kfree(work);
1369}
1370
1371static void ib_cache_event(struct ib_event_handler *handler,
1372 struct ib_event *event)
1373{
1374 struct ib_update_work *work;
1375
1376 if (event->event == IB_EVENT_PORT_ERR ||
1377 event->event == IB_EVENT_PORT_ACTIVE ||
1378 event->event == IB_EVENT_LID_CHANGE ||
1379 event->event == IB_EVENT_PKEY_CHANGE ||
1380 event->event == IB_EVENT_SM_CHANGE ||
1381 event->event == IB_EVENT_CLIENT_REREGISTER ||
1382 event->event == IB_EVENT_GID_CHANGE) {
1383 work = kmalloc(sizeof *work, GFP_ATOMIC);
1384 if (work) {
1385 INIT_WORK(&work->work, ib_cache_task);
1386 work->device = event->device;
1387 work->port_num = event->element.port_num;
1388 if (event->event == IB_EVENT_PKEY_CHANGE ||
1389 event->event == IB_EVENT_GID_CHANGE)
1390 work->enforce_security = true;
1391 else
1392 work->enforce_security = false;
1393
1394 queue_work(ib_wq, &work->work);
1395 }
1396 }
1397}
1398
1399int ib_cache_setup_one(struct ib_device *device)
1400{
1401 int p;
1402 int err;
1403
1404 rwlock_init(&device->cache.lock);
1405
1406 device->cache.ports =
1407 kcalloc(rdma_end_port(device) - rdma_start_port(device) + 1,
1408 sizeof(*device->cache.ports),
1409 GFP_KERNEL);
1410 if (!device->cache.ports)
1411 return -ENOMEM;
1412
1413 err = gid_table_setup_one(device);
1414 if (err) {
1415 kfree(device->cache.ports);
1416 device->cache.ports = NULL;
1417 return err;
1418 }
1419
1420 for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1421 ib_cache_update(device, p + rdma_start_port(device), true);
1422
1423 INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
1424 device, ib_cache_event);
1425 ib_register_event_handler(&device->cache.event_handler);
1426 return 0;
1427}
1428
1429void ib_cache_release_one(struct ib_device *device)
1430{
1431 int p;
1432
1433
1434
1435
1436
1437
1438
1439 for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1440 kfree(device->cache.ports[p].pkey);
1441
1442 gid_table_release_one(device);
1443 kfree(device->cache.ports);
1444}
1445
1446void ib_cache_cleanup_one(struct ib_device *device)
1447{
1448
1449
1450
1451
1452
1453
1454
1455 ib_unregister_event_handler(&device->cache.event_handler);
1456 flush_workqueue(ib_wq);
1457 gid_table_cleanup_one(device);
1458
1459
1460
1461
1462 flush_workqueue(ib_wq);
1463}
1464