1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#include <linux/module.h>
37#include <linux/errno.h>
38#include <linux/slab.h>
39#include <linux/workqueue.h>
40#include <linux/netdevice.h>
41#include <net/addrconf.h>
42
43#include <rdma/ib_cache.h>
44
45#include "core_priv.h"
46
47struct ib_pkey_cache {
48 int table_len;
49 u16 table[];
50};
51
52struct ib_update_work {
53 struct work_struct work;
54 struct ib_event event;
55 bool enforce_security;
56};
57
58union ib_gid zgid;
59EXPORT_SYMBOL(zgid);
60
61enum gid_attr_find_mask {
62 GID_ATTR_FIND_MASK_GID = 1UL << 0,
63 GID_ATTR_FIND_MASK_NETDEV = 1UL << 1,
64 GID_ATTR_FIND_MASK_DEFAULT = 1UL << 2,
65 GID_ATTR_FIND_MASK_GID_TYPE = 1UL << 3,
66};
67
68enum gid_table_entry_state {
69 GID_TABLE_ENTRY_INVALID = 1,
70 GID_TABLE_ENTRY_VALID = 2,
71
72
73
74
75
76
77 GID_TABLE_ENTRY_PENDING_DEL = 3,
78};
79
80struct roce_gid_ndev_storage {
81 struct rcu_head rcu_head;
82 struct net_device *ndev;
83};
84
85struct ib_gid_table_entry {
86 struct kref kref;
87 struct work_struct del_work;
88 struct ib_gid_attr attr;
89 void *context;
90
91
92
93
94
95 struct roce_gid_ndev_storage *ndev_storage;
96 enum gid_table_entry_state state;
97};
98
99struct ib_gid_table {
100 int sz;
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115 struct mutex lock;
116
117
118 rwlock_t rwlock;
119 struct ib_gid_table_entry **data_vec;
120
121 u32 default_gid_indices;
122};
123
124static void dispatch_gid_change_event(struct ib_device *ib_dev, u32 port)
125{
126 struct ib_event event;
127
128 event.device = ib_dev;
129 event.element.port_num = port;
130 event.event = IB_EVENT_GID_CHANGE;
131
132 ib_dispatch_event_clients(&event);
133}
134
135static const char * const gid_type_str[] = {
136
137
138
139 [IB_GID_TYPE_IB] = "IB/RoCE v1",
140 [IB_GID_TYPE_ROCE] = "IB/RoCE v1",
141 [IB_GID_TYPE_ROCE_UDP_ENCAP] = "RoCE v2",
142};
143
144const char *ib_cache_gid_type_str(enum ib_gid_type gid_type)
145{
146 if (gid_type < ARRAY_SIZE(gid_type_str) && gid_type_str[gid_type])
147 return gid_type_str[gid_type];
148
149 return "Invalid GID type";
150}
151EXPORT_SYMBOL(ib_cache_gid_type_str);
152
153
154
155
156
157bool rdma_is_zero_gid(const union ib_gid *gid)
158{
159 return !memcmp(gid, &zgid, sizeof(*gid));
160}
161EXPORT_SYMBOL(rdma_is_zero_gid);
162
163
164
165
166
167
168
169
170static bool is_gid_index_default(const struct ib_gid_table *table,
171 unsigned int index)
172{
173 return index < 32 && (BIT(index) & table->default_gid_indices);
174}
175
176int ib_cache_gid_parse_type_str(const char *buf)
177{
178 unsigned int i;
179 size_t len;
180 int err = -EINVAL;
181
182 len = strlen(buf);
183 if (len == 0)
184 return -EINVAL;
185
186 if (buf[len - 1] == '\n')
187 len--;
188
189 for (i = 0; i < ARRAY_SIZE(gid_type_str); ++i)
190 if (gid_type_str[i] && !strncmp(buf, gid_type_str[i], len) &&
191 len == strlen(gid_type_str[i])) {
192 err = i;
193 break;
194 }
195
196 return err;
197}
198EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
199
200static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u32 port)
201{
202 return device->port_data[port].cache.gid;
203}
204
205static bool is_gid_entry_free(const struct ib_gid_table_entry *entry)
206{
207 return !entry;
208}
209
210static bool is_gid_entry_valid(const struct ib_gid_table_entry *entry)
211{
212 return entry && entry->state == GID_TABLE_ENTRY_VALID;
213}
214
215static void schedule_free_gid(struct kref *kref)
216{
217 struct ib_gid_table_entry *entry =
218 container_of(kref, struct ib_gid_table_entry, kref);
219
220 queue_work(ib_wq, &entry->del_work);
221}
222
223static void put_gid_ndev(struct rcu_head *head)
224{
225 struct roce_gid_ndev_storage *storage =
226 container_of(head, struct roce_gid_ndev_storage, rcu_head);
227
228 WARN_ON(!storage->ndev);
229
230
231
232
233 dev_put(storage->ndev);
234 kfree(storage);
235}
236
237static void free_gid_entry_locked(struct ib_gid_table_entry *entry)
238{
239 struct ib_device *device = entry->attr.device;
240 u32 port_num = entry->attr.port_num;
241 struct ib_gid_table *table = rdma_gid_table(device, port_num);
242
243 dev_dbg(&device->dev, "%s port=%u index=%u gid %pI6\n", __func__,
244 port_num, entry->attr.index, entry->attr.gid.raw);
245
246 write_lock_irq(&table->rwlock);
247
248
249
250
251
252
253
254 if (entry == table->data_vec[entry->attr.index])
255 table->data_vec[entry->attr.index] = NULL;
256
257 write_unlock_irq(&table->rwlock);
258
259 if (entry->ndev_storage)
260 call_rcu(&entry->ndev_storage->rcu_head, put_gid_ndev);
261 kfree(entry);
262}
263
264static void free_gid_entry(struct kref *kref)
265{
266 struct ib_gid_table_entry *entry =
267 container_of(kref, struct ib_gid_table_entry, kref);
268
269 free_gid_entry_locked(entry);
270}
271
272
273
274
275
276
277
278
279
280static void free_gid_work(struct work_struct *work)
281{
282 struct ib_gid_table_entry *entry =
283 container_of(work, struct ib_gid_table_entry, del_work);
284 struct ib_device *device = entry->attr.device;
285 u32 port_num = entry->attr.port_num;
286 struct ib_gid_table *table = rdma_gid_table(device, port_num);
287
288 mutex_lock(&table->lock);
289 free_gid_entry_locked(entry);
290 mutex_unlock(&table->lock);
291}
292
293static struct ib_gid_table_entry *
294alloc_gid_entry(const struct ib_gid_attr *attr)
295{
296 struct ib_gid_table_entry *entry;
297 struct net_device *ndev;
298
299 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
300 if (!entry)
301 return NULL;
302
303 ndev = rcu_dereference_protected(attr->ndev, 1);
304 if (ndev) {
305 entry->ndev_storage = kzalloc(sizeof(*entry->ndev_storage),
306 GFP_KERNEL);
307 if (!entry->ndev_storage) {
308 kfree(entry);
309 return NULL;
310 }
311 dev_hold(ndev);
312 entry->ndev_storage->ndev = ndev;
313 }
314 kref_init(&entry->kref);
315 memcpy(&entry->attr, attr, sizeof(*attr));
316 INIT_WORK(&entry->del_work, free_gid_work);
317 entry->state = GID_TABLE_ENTRY_INVALID;
318 return entry;
319}
320
321static void store_gid_entry(struct ib_gid_table *table,
322 struct ib_gid_table_entry *entry)
323{
324 entry->state = GID_TABLE_ENTRY_VALID;
325
326 dev_dbg(&entry->attr.device->dev, "%s port=%u index=%u gid %pI6\n",
327 __func__, entry->attr.port_num, entry->attr.index,
328 entry->attr.gid.raw);
329
330 lockdep_assert_held(&table->lock);
331 write_lock_irq(&table->rwlock);
332 table->data_vec[entry->attr.index] = entry;
333 write_unlock_irq(&table->rwlock);
334}
335
336static void get_gid_entry(struct ib_gid_table_entry *entry)
337{
338 kref_get(&entry->kref);
339}
340
341static void put_gid_entry(struct ib_gid_table_entry *entry)
342{
343 kref_put(&entry->kref, schedule_free_gid);
344}
345
346static void put_gid_entry_locked(struct ib_gid_table_entry *entry)
347{
348 kref_put(&entry->kref, free_gid_entry);
349}
350
351static int add_roce_gid(struct ib_gid_table_entry *entry)
352{
353 const struct ib_gid_attr *attr = &entry->attr;
354 int ret;
355
356 if (!attr->ndev) {
357 dev_err(&attr->device->dev, "%s NULL netdev port=%u index=%u\n",
358 __func__, attr->port_num, attr->index);
359 return -EINVAL;
360 }
361 if (rdma_cap_roce_gid_table(attr->device, attr->port_num)) {
362 ret = attr->device->ops.add_gid(attr, &entry->context);
363 if (ret) {
364 dev_err(&attr->device->dev,
365 "%s GID add failed port=%u index=%u\n",
366 __func__, attr->port_num, attr->index);
367 return ret;
368 }
369 }
370 return 0;
371}
372
373
374
375
376
377
378
379
380
381
382static void del_gid(struct ib_device *ib_dev, u32 port,
383 struct ib_gid_table *table, int ix)
384{
385 struct roce_gid_ndev_storage *ndev_storage;
386 struct ib_gid_table_entry *entry;
387
388 lockdep_assert_held(&table->lock);
389
390 dev_dbg(&ib_dev->dev, "%s port=%u index=%d gid %pI6\n", __func__, port,
391 ix, table->data_vec[ix]->attr.gid.raw);
392
393 write_lock_irq(&table->rwlock);
394 entry = table->data_vec[ix];
395 entry->state = GID_TABLE_ENTRY_PENDING_DEL;
396
397
398
399 if (!rdma_protocol_roce(ib_dev, port))
400 table->data_vec[ix] = NULL;
401 write_unlock_irq(&table->rwlock);
402
403 ndev_storage = entry->ndev_storage;
404 if (ndev_storage) {
405 entry->ndev_storage = NULL;
406 rcu_assign_pointer(entry->attr.ndev, NULL);
407 call_rcu(&ndev_storage->rcu_head, put_gid_ndev);
408 }
409
410 if (rdma_cap_roce_gid_table(ib_dev, port))
411 ib_dev->ops.del_gid(&entry->attr, &entry->context);
412
413 put_gid_entry_locked(entry);
414}
415
416
417
418
419
420
421
422
423
424
425
426static int add_modify_gid(struct ib_gid_table *table,
427 const struct ib_gid_attr *attr)
428{
429 struct ib_gid_table_entry *entry;
430 int ret = 0;
431
432
433
434
435
436 if (is_gid_entry_valid(table->data_vec[attr->index]))
437 del_gid(attr->device, attr->port_num, table, attr->index);
438
439
440
441
442
443
444 if (rdma_is_zero_gid(&attr->gid))
445 return 0;
446
447 entry = alloc_gid_entry(attr);
448 if (!entry)
449 return -ENOMEM;
450
451 if (rdma_protocol_roce(attr->device, attr->port_num)) {
452 ret = add_roce_gid(entry);
453 if (ret)
454 goto done;
455 }
456
457 store_gid_entry(table, entry);
458 return 0;
459
460done:
461 put_gid_entry(entry);
462 return ret;
463}
464
465
466static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
467 const struct ib_gid_attr *val, bool default_gid,
468 unsigned long mask, int *pempty)
469{
470 int i = 0;
471 int found = -1;
472 int empty = pempty ? -1 : 0;
473
474 while (i < table->sz && (found < 0 || empty < 0)) {
475 struct ib_gid_table_entry *data = table->data_vec[i];
476 struct ib_gid_attr *attr;
477 int curr_index = i;
478
479 i++;
480
481
482
483
484
485
486 if (pempty && empty < 0) {
487 if (is_gid_entry_free(data) &&
488 default_gid ==
489 is_gid_index_default(table, curr_index)) {
490
491
492
493
494
495
496
497
498 empty = curr_index;
499 }
500 }
501
502
503
504
505
506
507
508 if (!is_gid_entry_valid(data))
509 continue;
510
511 if (found >= 0)
512 continue;
513
514 attr = &data->attr;
515 if (mask & GID_ATTR_FIND_MASK_GID_TYPE &&
516 attr->gid_type != val->gid_type)
517 continue;
518
519 if (mask & GID_ATTR_FIND_MASK_GID &&
520 memcmp(gid, &data->attr.gid, sizeof(*gid)))
521 continue;
522
523 if (mask & GID_ATTR_FIND_MASK_NETDEV &&
524 attr->ndev != val->ndev)
525 continue;
526
527 if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
528 is_gid_index_default(table, curr_index) != default_gid)
529 continue;
530
531 found = curr_index;
532 }
533
534 if (pempty)
535 *pempty = empty;
536
537 return found;
538}
539
540static void make_default_gid(struct net_device *dev, union ib_gid *gid)
541{
542 gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
543 addrconf_ifid_eui48(&gid->raw[8], dev);
544}
545
546static int __ib_cache_gid_add(struct ib_device *ib_dev, u32 port,
547 union ib_gid *gid, struct ib_gid_attr *attr,
548 unsigned long mask, bool default_gid)
549{
550 struct ib_gid_table *table;
551 int ret = 0;
552 int empty;
553 int ix;
554
555
556
557
558
559 if (rdma_is_zero_gid(gid))
560 return -EINVAL;
561
562 table = rdma_gid_table(ib_dev, port);
563
564 mutex_lock(&table->lock);
565
566 ix = find_gid(table, gid, attr, default_gid, mask, &empty);
567 if (ix >= 0)
568 goto out_unlock;
569
570 if (empty < 0) {
571 ret = -ENOSPC;
572 goto out_unlock;
573 }
574 attr->device = ib_dev;
575 attr->index = empty;
576 attr->port_num = port;
577 attr->gid = *gid;
578 ret = add_modify_gid(table, attr);
579 if (!ret)
580 dispatch_gid_change_event(ib_dev, port);
581
582out_unlock:
583 mutex_unlock(&table->lock);
584 if (ret)
585 pr_warn("%s: unable to add gid %pI6 error=%d\n",
586 __func__, gid->raw, ret);
587 return ret;
588}
589
590int ib_cache_gid_add(struct ib_device *ib_dev, u32 port,
591 union ib_gid *gid, struct ib_gid_attr *attr)
592{
593 unsigned long mask = GID_ATTR_FIND_MASK_GID |
594 GID_ATTR_FIND_MASK_GID_TYPE |
595 GID_ATTR_FIND_MASK_NETDEV;
596
597 return __ib_cache_gid_add(ib_dev, port, gid, attr, mask, false);
598}
599
600static int
601_ib_cache_gid_del(struct ib_device *ib_dev, u32 port,
602 union ib_gid *gid, struct ib_gid_attr *attr,
603 unsigned long mask, bool default_gid)
604{
605 struct ib_gid_table *table;
606 int ret = 0;
607 int ix;
608
609 table = rdma_gid_table(ib_dev, port);
610
611 mutex_lock(&table->lock);
612
613 ix = find_gid(table, gid, attr, default_gid, mask, NULL);
614 if (ix < 0) {
615 ret = -EINVAL;
616 goto out_unlock;
617 }
618
619 del_gid(ib_dev, port, table, ix);
620 dispatch_gid_change_event(ib_dev, port);
621
622out_unlock:
623 mutex_unlock(&table->lock);
624 if (ret)
625 pr_debug("%s: can't delete gid %pI6 error=%d\n",
626 __func__, gid->raw, ret);
627 return ret;
628}
629
630int ib_cache_gid_del(struct ib_device *ib_dev, u32 port,
631 union ib_gid *gid, struct ib_gid_attr *attr)
632{
633 unsigned long mask = GID_ATTR_FIND_MASK_GID |
634 GID_ATTR_FIND_MASK_GID_TYPE |
635 GID_ATTR_FIND_MASK_DEFAULT |
636 GID_ATTR_FIND_MASK_NETDEV;
637
638 return _ib_cache_gid_del(ib_dev, port, gid, attr, mask, false);
639}
640
641int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u32 port,
642 struct net_device *ndev)
643{
644 struct ib_gid_table *table;
645 int ix;
646 bool deleted = false;
647
648 table = rdma_gid_table(ib_dev, port);
649
650 mutex_lock(&table->lock);
651
652 for (ix = 0; ix < table->sz; ix++) {
653 if (is_gid_entry_valid(table->data_vec[ix]) &&
654 table->data_vec[ix]->attr.ndev == ndev) {
655 del_gid(ib_dev, port, table, ix);
656 deleted = true;
657 }
658 }
659
660 mutex_unlock(&table->lock);
661
662 if (deleted)
663 dispatch_gid_change_event(ib_dev, port);
664
665 return 0;
666}
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682const struct ib_gid_attr *
683rdma_find_gid_by_port(struct ib_device *ib_dev,
684 const union ib_gid *gid,
685 enum ib_gid_type gid_type,
686 u32 port, struct net_device *ndev)
687{
688 int local_index;
689 struct ib_gid_table *table;
690 unsigned long mask = GID_ATTR_FIND_MASK_GID |
691 GID_ATTR_FIND_MASK_GID_TYPE;
692 struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
693 const struct ib_gid_attr *attr;
694 unsigned long flags;
695
696 if (!rdma_is_port_valid(ib_dev, port))
697 return ERR_PTR(-ENOENT);
698
699 table = rdma_gid_table(ib_dev, port);
700
701 if (ndev)
702 mask |= GID_ATTR_FIND_MASK_NETDEV;
703
704 read_lock_irqsave(&table->rwlock, flags);
705 local_index = find_gid(table, gid, &val, false, mask, NULL);
706 if (local_index >= 0) {
707 get_gid_entry(table->data_vec[local_index]);
708 attr = &table->data_vec[local_index]->attr;
709 read_unlock_irqrestore(&table->rwlock, flags);
710 return attr;
711 }
712
713 read_unlock_irqrestore(&table->rwlock, flags);
714 return ERR_PTR(-ENOENT);
715}
716EXPORT_SYMBOL(rdma_find_gid_by_port);
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736const struct ib_gid_attr *rdma_find_gid_by_filter(
737 struct ib_device *ib_dev, const union ib_gid *gid, u32 port,
738 bool (*filter)(const union ib_gid *gid, const struct ib_gid_attr *,
739 void *),
740 void *context)
741{
742 const struct ib_gid_attr *res = ERR_PTR(-ENOENT);
743 struct ib_gid_table *table;
744 unsigned long flags;
745 unsigned int i;
746
747 if (!rdma_is_port_valid(ib_dev, port))
748 return ERR_PTR(-EINVAL);
749
750 table = rdma_gid_table(ib_dev, port);
751
752 read_lock_irqsave(&table->rwlock, flags);
753 for (i = 0; i < table->sz; i++) {
754 struct ib_gid_table_entry *entry = table->data_vec[i];
755
756 if (!is_gid_entry_valid(entry))
757 continue;
758
759 if (memcmp(gid, &entry->attr.gid, sizeof(*gid)))
760 continue;
761
762 if (filter(gid, &entry->attr, context)) {
763 get_gid_entry(entry);
764 res = &entry->attr;
765 break;
766 }
767 }
768 read_unlock_irqrestore(&table->rwlock, flags);
769 return res;
770}
771
772static struct ib_gid_table *alloc_gid_table(int sz)
773{
774 struct ib_gid_table *table = kzalloc(sizeof(*table), GFP_KERNEL);
775
776 if (!table)
777 return NULL;
778
779 table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL);
780 if (!table->data_vec)
781 goto err_free_table;
782
783 mutex_init(&table->lock);
784
785 table->sz = sz;
786 rwlock_init(&table->rwlock);
787 return table;
788
789err_free_table:
790 kfree(table);
791 return NULL;
792}
793
794static void release_gid_table(struct ib_device *device,
795 struct ib_gid_table *table)
796{
797 bool leak = false;
798 int i;
799
800 if (!table)
801 return;
802
803 for (i = 0; i < table->sz; i++) {
804 if (is_gid_entry_free(table->data_vec[i]))
805 continue;
806 if (kref_read(&table->data_vec[i]->kref) > 1) {
807 dev_err(&device->dev,
808 "GID entry ref leak for index %d ref=%u\n", i,
809 kref_read(&table->data_vec[i]->kref));
810 leak = true;
811 }
812 }
813 if (leak)
814 return;
815
816 mutex_destroy(&table->lock);
817 kfree(table->data_vec);
818 kfree(table);
819}
820
821static void cleanup_gid_table_port(struct ib_device *ib_dev, u32 port,
822 struct ib_gid_table *table)
823{
824 int i;
825
826 if (!table)
827 return;
828
829 mutex_lock(&table->lock);
830 for (i = 0; i < table->sz; ++i) {
831 if (is_gid_entry_valid(table->data_vec[i]))
832 del_gid(ib_dev, port, table, i);
833 }
834 mutex_unlock(&table->lock);
835}
836
837void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u32 port,
838 struct net_device *ndev,
839 unsigned long gid_type_mask,
840 enum ib_cache_gid_default_mode mode)
841{
842 union ib_gid gid = { };
843 struct ib_gid_attr gid_attr;
844 unsigned int gid_type;
845 unsigned long mask;
846
847 mask = GID_ATTR_FIND_MASK_GID_TYPE |
848 GID_ATTR_FIND_MASK_DEFAULT |
849 GID_ATTR_FIND_MASK_NETDEV;
850 memset(&gid_attr, 0, sizeof(gid_attr));
851 gid_attr.ndev = ndev;
852
853 for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) {
854 if (1UL << gid_type & ~gid_type_mask)
855 continue;
856
857 gid_attr.gid_type = gid_type;
858
859 if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
860 make_default_gid(ndev, &gid);
861 __ib_cache_gid_add(ib_dev, port, &gid,
862 &gid_attr, mask, true);
863 } else if (mode == IB_CACHE_GID_DEFAULT_MODE_DELETE) {
864 _ib_cache_gid_del(ib_dev, port, &gid,
865 &gid_attr, mask, true);
866 }
867 }
868}
869
870static void gid_table_reserve_default(struct ib_device *ib_dev, u32 port,
871 struct ib_gid_table *table)
872{
873 unsigned int i;
874 unsigned long roce_gid_type_mask;
875 unsigned int num_default_gids;
876
877 roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
878 num_default_gids = hweight_long(roce_gid_type_mask);
879
880 for (i = 0; i < num_default_gids && i < table->sz; i++)
881 table->default_gid_indices |= BIT(i);
882}
883
884
885static void gid_table_release_one(struct ib_device *ib_dev)
886{
887 u32 p;
888
889 rdma_for_each_port (ib_dev, p) {
890 release_gid_table(ib_dev, ib_dev->port_data[p].cache.gid);
891 ib_dev->port_data[p].cache.gid = NULL;
892 }
893}
894
895static int _gid_table_setup_one(struct ib_device *ib_dev)
896{
897 struct ib_gid_table *table;
898 u32 rdma_port;
899
900 rdma_for_each_port (ib_dev, rdma_port) {
901 table = alloc_gid_table(
902 ib_dev->port_data[rdma_port].immutable.gid_tbl_len);
903 if (!table)
904 goto rollback_table_setup;
905
906 gid_table_reserve_default(ib_dev, rdma_port, table);
907 ib_dev->port_data[rdma_port].cache.gid = table;
908 }
909 return 0;
910
911rollback_table_setup:
912 gid_table_release_one(ib_dev);
913 return -ENOMEM;
914}
915
916static void gid_table_cleanup_one(struct ib_device *ib_dev)
917{
918 u32 p;
919
920 rdma_for_each_port (ib_dev, p)
921 cleanup_gid_table_port(ib_dev, p,
922 ib_dev->port_data[p].cache.gid);
923}
924
925static int gid_table_setup_one(struct ib_device *ib_dev)
926{
927 int err;
928
929 err = _gid_table_setup_one(ib_dev);
930
931 if (err)
932 return err;
933
934 rdma_roce_rescan_device(ib_dev);
935
936 return err;
937}
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953int rdma_query_gid(struct ib_device *device, u32 port_num,
954 int index, union ib_gid *gid)
955{
956 struct ib_gid_table *table;
957 unsigned long flags;
958 int res = -EINVAL;
959
960 if (!rdma_is_port_valid(device, port_num))
961 return -EINVAL;
962
963 table = rdma_gid_table(device, port_num);
964 read_lock_irqsave(&table->rwlock, flags);
965
966 if (index < 0 || index >= table->sz ||
967 !is_gid_entry_valid(table->data_vec[index]))
968 goto done;
969
970 memcpy(gid, &table->data_vec[index]->attr.gid, sizeof(*gid));
971 res = 0;
972
973done:
974 read_unlock_irqrestore(&table->rwlock, flags);
975 return res;
976}
977EXPORT_SYMBOL(rdma_query_gid);
978
979
980
981
982
983
984
985
986
987
988
989
990void *rdma_read_gid_hw_context(const struct ib_gid_attr *attr)
991{
992 return container_of(attr, struct ib_gid_table_entry, attr)->context;
993}
994EXPORT_SYMBOL(rdma_read_gid_hw_context);
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009const struct ib_gid_attr *rdma_find_gid(struct ib_device *device,
1010 const union ib_gid *gid,
1011 enum ib_gid_type gid_type,
1012 struct net_device *ndev)
1013{
1014 unsigned long mask = GID_ATTR_FIND_MASK_GID |
1015 GID_ATTR_FIND_MASK_GID_TYPE;
1016 struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
1017 u32 p;
1018
1019 if (ndev)
1020 mask |= GID_ATTR_FIND_MASK_NETDEV;
1021
1022 rdma_for_each_port(device, p) {
1023 struct ib_gid_table *table;
1024 unsigned long flags;
1025 int index;
1026
1027 table = device->port_data[p].cache.gid;
1028 read_lock_irqsave(&table->rwlock, flags);
1029 index = find_gid(table, gid, &gid_attr_val, false, mask, NULL);
1030 if (index >= 0) {
1031 const struct ib_gid_attr *attr;
1032
1033 get_gid_entry(table->data_vec[index]);
1034 attr = &table->data_vec[index]->attr;
1035 read_unlock_irqrestore(&table->rwlock, flags);
1036 return attr;
1037 }
1038 read_unlock_irqrestore(&table->rwlock, flags);
1039 }
1040
1041 return ERR_PTR(-ENOENT);
1042}
1043EXPORT_SYMBOL(rdma_find_gid);
1044
1045int ib_get_cached_pkey(struct ib_device *device,
1046 u32 port_num,
1047 int index,
1048 u16 *pkey)
1049{
1050 struct ib_pkey_cache *cache;
1051 unsigned long flags;
1052 int ret = 0;
1053
1054 if (!rdma_is_port_valid(device, port_num))
1055 return -EINVAL;
1056
1057 read_lock_irqsave(&device->cache_lock, flags);
1058
1059 cache = device->port_data[port_num].cache.pkey;
1060
1061 if (!cache || index < 0 || index >= cache->table_len)
1062 ret = -EINVAL;
1063 else
1064 *pkey = cache->table[index];
1065
1066 read_unlock_irqrestore(&device->cache_lock, flags);
1067
1068 return ret;
1069}
1070EXPORT_SYMBOL(ib_get_cached_pkey);
1071
1072void ib_get_cached_subnet_prefix(struct ib_device *device, u32 port_num,
1073 u64 *sn_pfx)
1074{
1075 unsigned long flags;
1076
1077 read_lock_irqsave(&device->cache_lock, flags);
1078 *sn_pfx = device->port_data[port_num].cache.subnet_prefix;
1079 read_unlock_irqrestore(&device->cache_lock, flags);
1080}
1081EXPORT_SYMBOL(ib_get_cached_subnet_prefix);
1082
1083int ib_find_cached_pkey(struct ib_device *device, u32 port_num,
1084 u16 pkey, u16 *index)
1085{
1086 struct ib_pkey_cache *cache;
1087 unsigned long flags;
1088 int i;
1089 int ret = -ENOENT;
1090 int partial_ix = -1;
1091
1092 if (!rdma_is_port_valid(device, port_num))
1093 return -EINVAL;
1094
1095 read_lock_irqsave(&device->cache_lock, flags);
1096
1097 cache = device->port_data[port_num].cache.pkey;
1098 if (!cache) {
1099 ret = -EINVAL;
1100 goto err;
1101 }
1102
1103 *index = -1;
1104
1105 for (i = 0; i < cache->table_len; ++i)
1106 if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
1107 if (cache->table[i] & 0x8000) {
1108 *index = i;
1109 ret = 0;
1110 break;
1111 } else {
1112 partial_ix = i;
1113 }
1114 }
1115
1116 if (ret && partial_ix >= 0) {
1117 *index = partial_ix;
1118 ret = 0;
1119 }
1120
1121err:
1122 read_unlock_irqrestore(&device->cache_lock, flags);
1123
1124 return ret;
1125}
1126EXPORT_SYMBOL(ib_find_cached_pkey);
1127
1128int ib_find_exact_cached_pkey(struct ib_device *device, u32 port_num,
1129 u16 pkey, u16 *index)
1130{
1131 struct ib_pkey_cache *cache;
1132 unsigned long flags;
1133 int i;
1134 int ret = -ENOENT;
1135
1136 if (!rdma_is_port_valid(device, port_num))
1137 return -EINVAL;
1138
1139 read_lock_irqsave(&device->cache_lock, flags);
1140
1141 cache = device->port_data[port_num].cache.pkey;
1142 if (!cache) {
1143 ret = -EINVAL;
1144 goto err;
1145 }
1146
1147 *index = -1;
1148
1149 for (i = 0; i < cache->table_len; ++i)
1150 if (cache->table[i] == pkey) {
1151 *index = i;
1152 ret = 0;
1153 break;
1154 }
1155
1156err:
1157 read_unlock_irqrestore(&device->cache_lock, flags);
1158
1159 return ret;
1160}
1161EXPORT_SYMBOL(ib_find_exact_cached_pkey);
1162
1163int ib_get_cached_lmc(struct ib_device *device, u32 port_num, u8 *lmc)
1164{
1165 unsigned long flags;
1166 int ret = 0;
1167
1168 if (!rdma_is_port_valid(device, port_num))
1169 return -EINVAL;
1170
1171 read_lock_irqsave(&device->cache_lock, flags);
1172 *lmc = device->port_data[port_num].cache.lmc;
1173 read_unlock_irqrestore(&device->cache_lock, flags);
1174
1175 return ret;
1176}
1177EXPORT_SYMBOL(ib_get_cached_lmc);
1178
1179int ib_get_cached_port_state(struct ib_device *device, u32 port_num,
1180 enum ib_port_state *port_state)
1181{
1182 unsigned long flags;
1183 int ret = 0;
1184
1185 if (!rdma_is_port_valid(device, port_num))
1186 return -EINVAL;
1187
1188 read_lock_irqsave(&device->cache_lock, flags);
1189 *port_state = device->port_data[port_num].cache.port_state;
1190 read_unlock_irqrestore(&device->cache_lock, flags);
1191
1192 return ret;
1193}
1194EXPORT_SYMBOL(ib_get_cached_port_state);
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212const struct ib_gid_attr *
1213rdma_get_gid_attr(struct ib_device *device, u32 port_num, int index)
1214{
1215 const struct ib_gid_attr *attr = ERR_PTR(-ENODATA);
1216 struct ib_gid_table *table;
1217 unsigned long flags;
1218
1219 if (!rdma_is_port_valid(device, port_num))
1220 return ERR_PTR(-EINVAL);
1221
1222 table = rdma_gid_table(device, port_num);
1223 if (index < 0 || index >= table->sz)
1224 return ERR_PTR(-EINVAL);
1225
1226 read_lock_irqsave(&table->rwlock, flags);
1227 if (!is_gid_entry_valid(table->data_vec[index]))
1228 goto done;
1229
1230 get_gid_entry(table->data_vec[index]);
1231 attr = &table->data_vec[index]->attr;
1232done:
1233 read_unlock_irqrestore(&table->rwlock, flags);
1234 return attr;
1235}
1236EXPORT_SYMBOL(rdma_get_gid_attr);
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247ssize_t rdma_query_gid_table(struct ib_device *device,
1248 struct ib_uverbs_gid_entry *entries,
1249 size_t max_entries)
1250{
1251 const struct ib_gid_attr *gid_attr;
1252 ssize_t num_entries = 0, ret;
1253 struct ib_gid_table *table;
1254 u32 port_num, i;
1255 struct net_device *ndev;
1256 unsigned long flags;
1257
1258 rdma_for_each_port(device, port_num) {
1259 table = rdma_gid_table(device, port_num);
1260 read_lock_irqsave(&table->rwlock, flags);
1261 for (i = 0; i < table->sz; i++) {
1262 if (!is_gid_entry_valid(table->data_vec[i]))
1263 continue;
1264 if (num_entries >= max_entries) {
1265 ret = -EINVAL;
1266 goto err;
1267 }
1268
1269 gid_attr = &table->data_vec[i]->attr;
1270
1271 memcpy(&entries->gid, &gid_attr->gid,
1272 sizeof(gid_attr->gid));
1273 entries->gid_index = gid_attr->index;
1274 entries->port_num = gid_attr->port_num;
1275 entries->gid_type = gid_attr->gid_type;
1276 ndev = rcu_dereference_protected(
1277 gid_attr->ndev,
1278 lockdep_is_held(&table->rwlock));
1279 if (ndev)
1280 entries->netdev_ifindex = ndev->ifindex;
1281
1282 num_entries++;
1283 entries++;
1284 }
1285 read_unlock_irqrestore(&table->rwlock, flags);
1286 }
1287
1288 return num_entries;
1289err:
1290 read_unlock_irqrestore(&table->rwlock, flags);
1291 return ret;
1292}
1293EXPORT_SYMBOL(rdma_query_gid_table);
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306void rdma_put_gid_attr(const struct ib_gid_attr *attr)
1307{
1308 struct ib_gid_table_entry *entry =
1309 container_of(attr, struct ib_gid_table_entry, attr);
1310
1311 put_gid_entry(entry);
1312}
1313EXPORT_SYMBOL(rdma_put_gid_attr);
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325void rdma_hold_gid_attr(const struct ib_gid_attr *attr)
1326{
1327 struct ib_gid_table_entry *entry =
1328 container_of(attr, struct ib_gid_table_entry, attr);
1329
1330 get_gid_entry(entry);
1331}
1332EXPORT_SYMBOL(rdma_hold_gid_attr);
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr)
1347{
1348 struct ib_gid_table_entry *entry =
1349 container_of(attr, struct ib_gid_table_entry, attr);
1350 struct ib_device *device = entry->attr.device;
1351 struct net_device *ndev = ERR_PTR(-EINVAL);
1352 u32 port_num = entry->attr.port_num;
1353 struct ib_gid_table *table;
1354 unsigned long flags;
1355 bool valid;
1356
1357 table = rdma_gid_table(device, port_num);
1358
1359 read_lock_irqsave(&table->rwlock, flags);
1360 valid = is_gid_entry_valid(table->data_vec[attr->index]);
1361 if (valid) {
1362 ndev = rcu_dereference(attr->ndev);
1363 if (!ndev)
1364 ndev = ERR_PTR(-ENODEV);
1365 }
1366 read_unlock_irqrestore(&table->rwlock, flags);
1367 return ndev;
1368}
1369EXPORT_SYMBOL(rdma_read_gid_attr_ndev_rcu);
1370
1371static int get_lower_dev_vlan(struct net_device *lower_dev,
1372 struct netdev_nested_priv *priv)
1373{
1374 u16 *vlan_id = (u16 *)priv->data;
1375
1376 if (is_vlan_dev(lower_dev))
1377 *vlan_id = vlan_dev_vlan_id(lower_dev);
1378
1379
1380
1381
1382 return 1;
1383}
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397int rdma_read_gid_l2_fields(const struct ib_gid_attr *attr,
1398 u16 *vlan_id, u8 *smac)
1399{
1400 struct netdev_nested_priv priv = {
1401 .data = (void *)vlan_id,
1402 };
1403 struct net_device *ndev;
1404
1405 rcu_read_lock();
1406 ndev = rcu_dereference(attr->ndev);
1407 if (!ndev) {
1408 rcu_read_unlock();
1409 return -ENODEV;
1410 }
1411 if (smac)
1412 ether_addr_copy(smac, ndev->dev_addr);
1413 if (vlan_id) {
1414 *vlan_id = 0xffff;
1415 if (is_vlan_dev(ndev)) {
1416 *vlan_id = vlan_dev_vlan_id(ndev);
1417 } else {
1418
1419
1420
1421
1422 netdev_walk_all_lower_dev_rcu(attr->ndev,
1423 get_lower_dev_vlan, &priv);
1424 }
1425 }
1426 rcu_read_unlock();
1427 return 0;
1428}
1429EXPORT_SYMBOL(rdma_read_gid_l2_fields);
1430
1431static int config_non_roce_gid_cache(struct ib_device *device,
1432 u32 port, struct ib_port_attr *tprops)
1433{
1434 struct ib_gid_attr gid_attr = {};
1435 struct ib_gid_table *table;
1436 int ret = 0;
1437 int i;
1438
1439 gid_attr.device = device;
1440 gid_attr.port_num = port;
1441 table = rdma_gid_table(device, port);
1442
1443 mutex_lock(&table->lock);
1444 for (i = 0; i < tprops->gid_tbl_len; ++i) {
1445 if (!device->ops.query_gid)
1446 continue;
1447 ret = device->ops.query_gid(device, port, i, &gid_attr.gid);
1448 if (ret) {
1449 dev_warn(&device->dev,
1450 "query_gid failed (%d) for index %d\n", ret,
1451 i);
1452 goto err;
1453 }
1454 gid_attr.index = i;
1455 tprops->subnet_prefix =
1456 be64_to_cpu(gid_attr.gid.global.subnet_prefix);
1457 add_modify_gid(table, &gid_attr);
1458 }
1459err:
1460 mutex_unlock(&table->lock);
1461 return ret;
1462}
1463
1464static int
1465ib_cache_update(struct ib_device *device, u32 port, bool update_gids,
1466 bool update_pkeys, bool enforce_security)
1467{
1468 struct ib_port_attr *tprops = NULL;
1469 struct ib_pkey_cache *pkey_cache = NULL;
1470 struct ib_pkey_cache *old_pkey_cache = NULL;
1471 int i;
1472 int ret;
1473
1474 if (!rdma_is_port_valid(device, port))
1475 return -EINVAL;
1476
1477 tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
1478 if (!tprops)
1479 return -ENOMEM;
1480
1481 ret = ib_query_port(device, port, tprops);
1482 if (ret) {
1483 dev_warn(&device->dev, "ib_query_port failed (%d)\n", ret);
1484 goto err;
1485 }
1486
1487 if (!rdma_protocol_roce(device, port) && update_gids) {
1488 ret = config_non_roce_gid_cache(device, port,
1489 tprops);
1490 if (ret)
1491 goto err;
1492 }
1493
1494 update_pkeys &= !!tprops->pkey_tbl_len;
1495
1496 if (update_pkeys) {
1497 pkey_cache = kmalloc(struct_size(pkey_cache, table,
1498 tprops->pkey_tbl_len),
1499 GFP_KERNEL);
1500 if (!pkey_cache) {
1501 ret = -ENOMEM;
1502 goto err;
1503 }
1504
1505 pkey_cache->table_len = tprops->pkey_tbl_len;
1506
1507 for (i = 0; i < pkey_cache->table_len; ++i) {
1508 ret = ib_query_pkey(device, port, i,
1509 pkey_cache->table + i);
1510 if (ret) {
1511 dev_warn(&device->dev,
1512 "ib_query_pkey failed (%d) for index %d\n",
1513 ret, i);
1514 goto err;
1515 }
1516 }
1517 }
1518
1519 write_lock_irq(&device->cache_lock);
1520
1521 if (update_pkeys) {
1522 old_pkey_cache = device->port_data[port].cache.pkey;
1523 device->port_data[port].cache.pkey = pkey_cache;
1524 }
1525 device->port_data[port].cache.lmc = tprops->lmc;
1526 device->port_data[port].cache.port_state = tprops->state;
1527
1528 device->port_data[port].cache.subnet_prefix = tprops->subnet_prefix;
1529 write_unlock_irq(&device->cache_lock);
1530
1531 if (enforce_security)
1532 ib_security_cache_change(device,
1533 port,
1534 tprops->subnet_prefix);
1535
1536 kfree(old_pkey_cache);
1537 kfree(tprops);
1538 return 0;
1539
1540err:
1541 kfree(pkey_cache);
1542 kfree(tprops);
1543 return ret;
1544}
1545
1546static void ib_cache_event_task(struct work_struct *_work)
1547{
1548 struct ib_update_work *work =
1549 container_of(_work, struct ib_update_work, work);
1550 int ret;
1551
1552
1553
1554
1555 ret = ib_cache_update(work->event.device, work->event.element.port_num,
1556 work->event.event == IB_EVENT_GID_CHANGE,
1557 work->event.event == IB_EVENT_PKEY_CHANGE,
1558 work->enforce_security);
1559
1560
1561
1562
1563
1564 if (!ret && work->event.event != IB_EVENT_GID_CHANGE)
1565 ib_dispatch_event_clients(&work->event);
1566
1567 kfree(work);
1568}
1569
1570static void ib_generic_event_task(struct work_struct *_work)
1571{
1572 struct ib_update_work *work =
1573 container_of(_work, struct ib_update_work, work);
1574
1575 ib_dispatch_event_clients(&work->event);
1576 kfree(work);
1577}
1578
1579static bool is_cache_update_event(const struct ib_event *event)
1580{
1581 return (event->event == IB_EVENT_PORT_ERR ||
1582 event->event == IB_EVENT_PORT_ACTIVE ||
1583 event->event == IB_EVENT_LID_CHANGE ||
1584 event->event == IB_EVENT_PKEY_CHANGE ||
1585 event->event == IB_EVENT_CLIENT_REREGISTER ||
1586 event->event == IB_EVENT_GID_CHANGE);
1587}
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597void ib_dispatch_event(const struct ib_event *event)
1598{
1599 struct ib_update_work *work;
1600
1601 work = kzalloc(sizeof(*work), GFP_ATOMIC);
1602 if (!work)
1603 return;
1604
1605 if (is_cache_update_event(event))
1606 INIT_WORK(&work->work, ib_cache_event_task);
1607 else
1608 INIT_WORK(&work->work, ib_generic_event_task);
1609
1610 work->event = *event;
1611 if (event->event == IB_EVENT_PKEY_CHANGE ||
1612 event->event == IB_EVENT_GID_CHANGE)
1613 work->enforce_security = true;
1614
1615 queue_work(ib_wq, &work->work);
1616}
1617EXPORT_SYMBOL(ib_dispatch_event);
1618
1619int ib_cache_setup_one(struct ib_device *device)
1620{
1621 u32 p;
1622 int err;
1623
1624 err = gid_table_setup_one(device);
1625 if (err)
1626 return err;
1627
1628 rdma_for_each_port (device, p) {
1629 err = ib_cache_update(device, p, true, true, true);
1630 if (err)
1631 return err;
1632 }
1633
1634 return 0;
1635}
1636
1637void ib_cache_release_one(struct ib_device *device)
1638{
1639 u32 p;
1640
1641
1642
1643
1644
1645
1646
1647 rdma_for_each_port (device, p)
1648 kfree(device->port_data[p].cache.pkey);
1649
1650 gid_table_release_one(device);
1651}
1652
1653void ib_cache_cleanup_one(struct ib_device *device)
1654{
1655
1656
1657
1658
1659
1660
1661 flush_workqueue(ib_wq);
1662 gid_table_cleanup_one(device);
1663
1664
1665
1666
1667 flush_workqueue(ib_wq);
1668}
1669