1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37#include <net/sock.h>
38#include "core.h"
39#include "netlink.h"
40#include "name_table.h"
41#include "name_distr.h"
42#include "subscr.h"
43#include "bcast.h"
44#include "addr.h"
45#include <net/genetlink.h>
46
47#define TIPC_NAMETBL_SIZE 1024
48
49static const struct nla_policy
50tipc_nl_name_table_policy[TIPC_NLA_NAME_TABLE_MAX + 1] = {
51 [TIPC_NLA_NAME_TABLE_UNSPEC] = { .type = NLA_UNSPEC },
52 [TIPC_NLA_NAME_TABLE_PUBL] = { .type = NLA_NESTED }
53};
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68struct name_info {
69 struct list_head node_list;
70 struct list_head cluster_list;
71 struct list_head zone_list;
72 u32 node_list_size;
73 u32 cluster_list_size;
74 u32 zone_list_size;
75};
76
77
78
79
80
81
82
83struct sub_seq {
84 u32 lower;
85 u32 upper;
86 struct name_info *info;
87};
88
89
90
91
92
93
94
95
96
97
98
99
100
101struct name_seq {
102 u32 type;
103 struct sub_seq *sseqs;
104 u32 alloc;
105 u32 first_free;
106 struct hlist_node ns_list;
107 struct list_head subscriptions;
108 spinlock_t lock;
109 struct rcu_head rcu;
110};
111
112static int hash(int x)
113{
114 return x & (TIPC_NAMETBL_SIZE - 1);
115}
116
117
118
119
120static struct publication *publ_create(u32 type, u32 lower, u32 upper,
121 u32 scope, u32 node, u32 port_ref,
122 u32 key)
123{
124 struct publication *publ = kzalloc(sizeof(*publ), GFP_ATOMIC);
125 if (publ == NULL) {
126 pr_warn("Publication creation failure, no memory\n");
127 return NULL;
128 }
129
130 publ->type = type;
131 publ->lower = lower;
132 publ->upper = upper;
133 publ->scope = scope;
134 publ->node = node;
135 publ->ref = port_ref;
136 publ->key = key;
137 INIT_LIST_HEAD(&publ->pport_list);
138 return publ;
139}
140
141
142
143
144static struct sub_seq *tipc_subseq_alloc(u32 cnt)
145{
146 return kcalloc(cnt, sizeof(struct sub_seq), GFP_ATOMIC);
147}
148
149
150
151
152
153
154static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_head)
155{
156 struct name_seq *nseq = kzalloc(sizeof(*nseq), GFP_ATOMIC);
157 struct sub_seq *sseq = tipc_subseq_alloc(1);
158
159 if (!nseq || !sseq) {
160 pr_warn("Name sequence creation failed, no memory\n");
161 kfree(nseq);
162 kfree(sseq);
163 return NULL;
164 }
165
166 spin_lock_init(&nseq->lock);
167 nseq->type = type;
168 nseq->sseqs = sseq;
169 nseq->alloc = 1;
170 INIT_HLIST_NODE(&nseq->ns_list);
171 INIT_LIST_HEAD(&nseq->subscriptions);
172 hlist_add_head_rcu(&nseq->ns_list, seq_head);
173 return nseq;
174}
175
176
177
178
179
180
181static struct sub_seq *nameseq_find_subseq(struct name_seq *nseq,
182 u32 instance)
183{
184 struct sub_seq *sseqs = nseq->sseqs;
185 int low = 0;
186 int high = nseq->first_free - 1;
187 int mid;
188
189 while (low <= high) {
190 mid = (low + high) / 2;
191 if (instance < sseqs[mid].lower)
192 high = mid - 1;
193 else if (instance > sseqs[mid].upper)
194 low = mid + 1;
195 else
196 return &sseqs[mid];
197 }
198 return NULL;
199}
200
201
202
203
204
205
206
207
208
209
210static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance)
211{
212 struct sub_seq *sseqs = nseq->sseqs;
213 int low = 0;
214 int high = nseq->first_free - 1;
215 int mid;
216
217 while (low <= high) {
218 mid = (low + high) / 2;
219 if (instance < sseqs[mid].lower)
220 high = mid - 1;
221 else if (instance > sseqs[mid].upper)
222 low = mid + 1;
223 else
224 return mid;
225 }
226 return low;
227}
228
229
230
231
232static struct publication *tipc_nameseq_insert_publ(struct net *net,
233 struct name_seq *nseq,
234 u32 type, u32 lower,
235 u32 upper, u32 scope,
236 u32 node, u32 port, u32 key)
237{
238 struct tipc_subscription *s;
239 struct tipc_subscription *st;
240 struct publication *publ;
241 struct sub_seq *sseq;
242 struct name_info *info;
243 int created_subseq = 0;
244
245 sseq = nameseq_find_subseq(nseq, lower);
246 if (sseq) {
247
248
249 if ((sseq->lower != lower) || (sseq->upper != upper)) {
250 return NULL;
251 }
252
253 info = sseq->info;
254
255
256 list_for_each_entry(publ, &info->zone_list, zone_list) {
257 if ((publ->ref == port) && (publ->key == key) &&
258 (!publ->node || (publ->node == node)))
259 return NULL;
260 }
261 } else {
262 u32 inspos;
263 struct sub_seq *freesseq;
264
265
266 inspos = nameseq_locate_subseq(nseq, lower);
267
268
269 if ((inspos < nseq->first_free) &&
270 (upper >= nseq->sseqs[inspos].lower)) {
271 return NULL;
272 }
273
274
275 if (nseq->first_free == nseq->alloc) {
276 struct sub_seq *sseqs = tipc_subseq_alloc(nseq->alloc * 2);
277
278 if (!sseqs) {
279 pr_warn("Cannot publish {%u,%u,%u}, no memory\n",
280 type, lower, upper);
281 return NULL;
282 }
283 memcpy(sseqs, nseq->sseqs,
284 nseq->alloc * sizeof(struct sub_seq));
285 kfree(nseq->sseqs);
286 nseq->sseqs = sseqs;
287 nseq->alloc *= 2;
288 }
289
290 info = kzalloc(sizeof(*info), GFP_ATOMIC);
291 if (!info) {
292 pr_warn("Cannot publish {%u,%u,%u}, no memory\n",
293 type, lower, upper);
294 return NULL;
295 }
296
297 INIT_LIST_HEAD(&info->node_list);
298 INIT_LIST_HEAD(&info->cluster_list);
299 INIT_LIST_HEAD(&info->zone_list);
300
301
302 sseq = &nseq->sseqs[inspos];
303 freesseq = &nseq->sseqs[nseq->first_free];
304 memmove(sseq + 1, sseq, (freesseq - sseq) * sizeof(*sseq));
305 memset(sseq, 0, sizeof(*sseq));
306 nseq->first_free++;
307 sseq->lower = lower;
308 sseq->upper = upper;
309 sseq->info = info;
310 created_subseq = 1;
311 }
312
313
314 publ = publ_create(type, lower, upper, scope, node, port, key);
315 if (!publ)
316 return NULL;
317
318 list_add(&publ->zone_list, &info->zone_list);
319 info->zone_list_size++;
320
321 if (in_own_cluster(net, node)) {
322 list_add(&publ->cluster_list, &info->cluster_list);
323 info->cluster_list_size++;
324 }
325
326 if (in_own_node(net, node)) {
327 list_add(&publ->node_list, &info->node_list);
328 info->node_list_size++;
329 }
330
331
332 list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
333 tipc_subscrp_report_overlap(s, publ->lower, publ->upper,
334 TIPC_PUBLISHED, publ->ref,
335 publ->node, created_subseq);
336 }
337 return publ;
338}
339
340
341
342
343
344
345
346
347
348
349
350
351static struct publication *tipc_nameseq_remove_publ(struct net *net,
352 struct name_seq *nseq,
353 u32 inst, u32 node,
354 u32 ref, u32 key)
355{
356 struct publication *publ;
357 struct sub_seq *sseq = nameseq_find_subseq(nseq, inst);
358 struct name_info *info;
359 struct sub_seq *free;
360 struct tipc_subscription *s, *st;
361 int removed_subseq = 0;
362
363 if (!sseq)
364 return NULL;
365
366 info = sseq->info;
367
368
369 list_for_each_entry(publ, &info->zone_list, zone_list) {
370 if ((publ->key == key) && (publ->ref == ref) &&
371 (!publ->node || (publ->node == node)))
372 goto found;
373 }
374 return NULL;
375
376found:
377
378 list_del(&publ->zone_list);
379 info->zone_list_size--;
380
381
382 if (in_own_cluster(net, node)) {
383 list_del(&publ->cluster_list);
384 info->cluster_list_size--;
385 }
386
387
388 if (in_own_node(net, node)) {
389 list_del(&publ->node_list);
390 info->node_list_size--;
391 }
392
393
394 if (list_empty(&info->zone_list)) {
395 kfree(info);
396 free = &nseq->sseqs[nseq->first_free--];
397 memmove(sseq, sseq + 1, (free - (sseq + 1)) * sizeof(*sseq));
398 removed_subseq = 1;
399 }
400
401
402 list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
403 tipc_subscrp_report_overlap(s, publ->lower, publ->upper,
404 TIPC_WITHDRAWN, publ->ref,
405 publ->node, removed_subseq);
406 }
407
408 return publ;
409}
410
411
412
413
414
415
416static void tipc_nameseq_subscribe(struct name_seq *nseq,
417 struct tipc_subscription *s)
418{
419 struct sub_seq *sseq = nseq->sseqs;
420
421 list_add(&s->nameseq_list, &nseq->subscriptions);
422
423 if (!sseq)
424 return;
425
426 while (sseq != &nseq->sseqs[nseq->first_free]) {
427 if (tipc_subscrp_check_overlap(s, sseq->lower, sseq->upper)) {
428 struct publication *crs;
429 struct name_info *info = sseq->info;
430 int must_report = 1;
431
432 list_for_each_entry(crs, &info->zone_list, zone_list) {
433 tipc_subscrp_report_overlap(s, sseq->lower,
434 sseq->upper,
435 TIPC_PUBLISHED,
436 crs->ref, crs->node,
437 must_report);
438 must_report = 0;
439 }
440 }
441 sseq++;
442 }
443}
444
445static struct name_seq *nametbl_find_seq(struct net *net, u32 type)
446{
447 struct tipc_net *tn = net_generic(net, tipc_net_id);
448 struct hlist_head *seq_head;
449 struct name_seq *ns;
450
451 seq_head = &tn->nametbl->seq_hlist[hash(type)];
452 hlist_for_each_entry_rcu(ns, seq_head, ns_list) {
453 if (ns->type == type)
454 return ns;
455 }
456
457 return NULL;
458};
459
460struct publication *tipc_nametbl_insert_publ(struct net *net, u32 type,
461 u32 lower, u32 upper, u32 scope,
462 u32 node, u32 port, u32 key)
463{
464 struct tipc_net *tn = net_generic(net, tipc_net_id);
465 struct publication *publ;
466 struct name_seq *seq = nametbl_find_seq(net, type);
467 int index = hash(type);
468
469 if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE) ||
470 (lower > upper)) {
471 pr_debug("Failed to publish illegal {%u,%u,%u} with scope %u\n",
472 type, lower, upper, scope);
473 return NULL;
474 }
475
476 if (!seq)
477 seq = tipc_nameseq_create(type, &tn->nametbl->seq_hlist[index]);
478 if (!seq)
479 return NULL;
480
481 spin_lock_bh(&seq->lock);
482 publ = tipc_nameseq_insert_publ(net, seq, type, lower, upper,
483 scope, node, port, key);
484 spin_unlock_bh(&seq->lock);
485 return publ;
486}
487
488struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type,
489 u32 lower, u32 node, u32 ref,
490 u32 key)
491{
492 struct publication *publ;
493 struct name_seq *seq = nametbl_find_seq(net, type);
494
495 if (!seq)
496 return NULL;
497
498 spin_lock_bh(&seq->lock);
499 publ = tipc_nameseq_remove_publ(net, seq, lower, node, ref, key);
500 if (!seq->first_free && list_empty(&seq->subscriptions)) {
501 hlist_del_init_rcu(&seq->ns_list);
502 kfree(seq->sseqs);
503 spin_unlock_bh(&seq->lock);
504 kfree_rcu(seq, rcu);
505 return publ;
506 }
507 spin_unlock_bh(&seq->lock);
508 return publ;
509}
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524u32 tipc_nametbl_translate(struct net *net, u32 type, u32 instance,
525 u32 *destnode)
526{
527 struct tipc_net *tn = net_generic(net, tipc_net_id);
528 struct sub_seq *sseq;
529 struct name_info *info;
530 struct publication *publ;
531 struct name_seq *seq;
532 u32 ref = 0;
533 u32 node = 0;
534
535 if (!tipc_in_scope(*destnode, tn->own_addr))
536 return 0;
537
538 rcu_read_lock();
539 seq = nametbl_find_seq(net, type);
540 if (unlikely(!seq))
541 goto not_found;
542 spin_lock_bh(&seq->lock);
543 sseq = nameseq_find_subseq(seq, instance);
544 if (unlikely(!sseq))
545 goto no_match;
546 info = sseq->info;
547
548
549 if (likely(!*destnode)) {
550 if (!list_empty(&info->node_list)) {
551 publ = list_first_entry(&info->node_list,
552 struct publication,
553 node_list);
554 list_move_tail(&publ->node_list,
555 &info->node_list);
556 } else if (!list_empty(&info->cluster_list)) {
557 publ = list_first_entry(&info->cluster_list,
558 struct publication,
559 cluster_list);
560 list_move_tail(&publ->cluster_list,
561 &info->cluster_list);
562 } else {
563 publ = list_first_entry(&info->zone_list,
564 struct publication,
565 zone_list);
566 list_move_tail(&publ->zone_list,
567 &info->zone_list);
568 }
569 }
570
571
572 else if (*destnode == tn->own_addr) {
573 if (list_empty(&info->node_list))
574 goto no_match;
575 publ = list_first_entry(&info->node_list, struct publication,
576 node_list);
577 list_move_tail(&publ->node_list, &info->node_list);
578 } else if (in_own_cluster_exact(net, *destnode)) {
579 if (list_empty(&info->cluster_list))
580 goto no_match;
581 publ = list_first_entry(&info->cluster_list, struct publication,
582 cluster_list);
583 list_move_tail(&publ->cluster_list, &info->cluster_list);
584 } else {
585 publ = list_first_entry(&info->zone_list, struct publication,
586 zone_list);
587 list_move_tail(&publ->zone_list, &info->zone_list);
588 }
589
590 ref = publ->ref;
591 node = publ->node;
592no_match:
593 spin_unlock_bh(&seq->lock);
594not_found:
595 rcu_read_unlock();
596 *destnode = node;
597 return ref;
598}
599
600
601
602
603
604
605
606
607
608
609
610
611
612int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper,
613 u32 limit, struct tipc_plist *dports)
614{
615 struct name_seq *seq;
616 struct sub_seq *sseq;
617 struct sub_seq *sseq_stop;
618 struct name_info *info;
619 int res = 0;
620
621 rcu_read_lock();
622 seq = nametbl_find_seq(net, type);
623 if (!seq)
624 goto exit;
625
626 spin_lock_bh(&seq->lock);
627 sseq = seq->sseqs + nameseq_locate_subseq(seq, lower);
628 sseq_stop = seq->sseqs + seq->first_free;
629 for (; sseq != sseq_stop; sseq++) {
630 struct publication *publ;
631
632 if (sseq->lower > upper)
633 break;
634
635 info = sseq->info;
636 list_for_each_entry(publ, &info->node_list, node_list) {
637 if (publ->scope <= limit)
638 tipc_plist_push(dports, publ->ref);
639 }
640
641 if (info->cluster_list_size != info->node_list_size)
642 res = 1;
643 }
644 spin_unlock_bh(&seq->lock);
645exit:
646 rcu_read_unlock();
647 return res;
648}
649
650
651
652
653struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower,
654 u32 upper, u32 scope, u32 port_ref,
655 u32 key)
656{
657 struct publication *publ;
658 struct sk_buff *buf = NULL;
659 struct tipc_net *tn = net_generic(net, tipc_net_id);
660
661 spin_lock_bh(&tn->nametbl_lock);
662 if (tn->nametbl->local_publ_count >= TIPC_MAX_PUBLICATIONS) {
663 pr_warn("Publication failed, local publication limit reached (%u)\n",
664 TIPC_MAX_PUBLICATIONS);
665 spin_unlock_bh(&tn->nametbl_lock);
666 return NULL;
667 }
668
669 publ = tipc_nametbl_insert_publ(net, type, lower, upper, scope,
670 tn->own_addr, port_ref, key);
671 if (likely(publ)) {
672 tn->nametbl->local_publ_count++;
673 buf = tipc_named_publish(net, publ);
674
675 tipc_named_process_backlog(net);
676 }
677 spin_unlock_bh(&tn->nametbl_lock);
678
679 if (buf)
680 named_cluster_distribute(net, buf);
681 return publ;
682}
683
684
685
686
687int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, u32 ref,
688 u32 key)
689{
690 struct publication *publ;
691 struct sk_buff *skb = NULL;
692 struct tipc_net *tn = net_generic(net, tipc_net_id);
693
694 spin_lock_bh(&tn->nametbl_lock);
695 publ = tipc_nametbl_remove_publ(net, type, lower, tn->own_addr,
696 ref, key);
697 if (likely(publ)) {
698 tn->nametbl->local_publ_count--;
699 skb = tipc_named_withdraw(net, publ);
700
701 tipc_named_process_backlog(net);
702 list_del_init(&publ->pport_list);
703 kfree_rcu(publ, rcu);
704 } else {
705 pr_err("Unable to remove local publication\n"
706 "(type=%u, lower=%u, ref=%u, key=%u)\n",
707 type, lower, ref, key);
708 }
709 spin_unlock_bh(&tn->nametbl_lock);
710
711 if (skb) {
712 named_cluster_distribute(net, skb);
713 return 1;
714 }
715 return 0;
716}
717
718
719
720
721void tipc_nametbl_subscribe(struct tipc_subscription *s)
722{
723 struct tipc_net *tn = net_generic(s->net, tipc_net_id);
724 u32 type = s->seq.type;
725 int index = hash(type);
726 struct name_seq *seq;
727
728 spin_lock_bh(&tn->nametbl_lock);
729 seq = nametbl_find_seq(s->net, type);
730 if (!seq)
731 seq = tipc_nameseq_create(type, &tn->nametbl->seq_hlist[index]);
732 if (seq) {
733 spin_lock_bh(&seq->lock);
734 tipc_nameseq_subscribe(seq, s);
735 spin_unlock_bh(&seq->lock);
736 } else {
737 pr_warn("Failed to create subscription for {%u,%u,%u}\n",
738 s->seq.type, s->seq.lower, s->seq.upper);
739 }
740 spin_unlock_bh(&tn->nametbl_lock);
741}
742
743
744
745
746void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
747{
748 struct tipc_net *tn = net_generic(s->net, tipc_net_id);
749 struct name_seq *seq;
750
751 spin_lock_bh(&tn->nametbl_lock);
752 seq = nametbl_find_seq(s->net, s->seq.type);
753 if (seq != NULL) {
754 spin_lock_bh(&seq->lock);
755 list_del_init(&s->nameseq_list);
756 if (!seq->first_free && list_empty(&seq->subscriptions)) {
757 hlist_del_init_rcu(&seq->ns_list);
758 kfree(seq->sseqs);
759 spin_unlock_bh(&seq->lock);
760 kfree_rcu(seq, rcu);
761 } else {
762 spin_unlock_bh(&seq->lock);
763 }
764 }
765 spin_unlock_bh(&tn->nametbl_lock);
766}
767
768int tipc_nametbl_init(struct net *net)
769{
770 struct tipc_net *tn = net_generic(net, tipc_net_id);
771 struct name_table *tipc_nametbl;
772 int i;
773
774 tipc_nametbl = kzalloc(sizeof(*tipc_nametbl), GFP_ATOMIC);
775 if (!tipc_nametbl)
776 return -ENOMEM;
777
778 for (i = 0; i < TIPC_NAMETBL_SIZE; i++)
779 INIT_HLIST_HEAD(&tipc_nametbl->seq_hlist[i]);
780
781 INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_ZONE_SCOPE]);
782 INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_CLUSTER_SCOPE]);
783 INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_NODE_SCOPE]);
784 tn->nametbl = tipc_nametbl;
785 spin_lock_init(&tn->nametbl_lock);
786 return 0;
787}
788
789
790
791
792
793
794static void tipc_purge_publications(struct net *net, struct name_seq *seq)
795{
796 struct publication *publ, *safe;
797 struct sub_seq *sseq;
798 struct name_info *info;
799
800 spin_lock_bh(&seq->lock);
801 sseq = seq->sseqs;
802 info = sseq->info;
803 list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) {
804 tipc_nameseq_remove_publ(net, seq, publ->lower, publ->node,
805 publ->ref, publ->key);
806 kfree_rcu(publ, rcu);
807 }
808 hlist_del_init_rcu(&seq->ns_list);
809 kfree(seq->sseqs);
810 spin_unlock_bh(&seq->lock);
811
812 kfree_rcu(seq, rcu);
813}
814
815void tipc_nametbl_stop(struct net *net)
816{
817 u32 i;
818 struct name_seq *seq;
819 struct hlist_head *seq_head;
820 struct tipc_net *tn = net_generic(net, tipc_net_id);
821 struct name_table *tipc_nametbl = tn->nametbl;
822
823
824
825
826 spin_lock_bh(&tn->nametbl_lock);
827 for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
828 if (hlist_empty(&tipc_nametbl->seq_hlist[i]))
829 continue;
830 seq_head = &tipc_nametbl->seq_hlist[i];
831 hlist_for_each_entry_rcu(seq, seq_head, ns_list) {
832 tipc_purge_publications(net, seq);
833 }
834 }
835 spin_unlock_bh(&tn->nametbl_lock);
836
837 synchronize_net();
838 kfree(tipc_nametbl);
839
840}
841
842static int __tipc_nl_add_nametable_publ(struct tipc_nl_msg *msg,
843 struct name_seq *seq,
844 struct sub_seq *sseq, u32 *last_publ)
845{
846 void *hdr;
847 struct nlattr *attrs;
848 struct nlattr *publ;
849 struct publication *p;
850
851 if (*last_publ) {
852 list_for_each_entry(p, &sseq->info->zone_list, zone_list)
853 if (p->key == *last_publ)
854 break;
855 if (p->key != *last_publ)
856 return -EPIPE;
857 } else {
858 p = list_first_entry(&sseq->info->zone_list, struct publication,
859 zone_list);
860 }
861
862 list_for_each_entry_from(p, &sseq->info->zone_list, zone_list) {
863 *last_publ = p->key;
864
865 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq,
866 &tipc_genl_family, NLM_F_MULTI,
867 TIPC_NL_NAME_TABLE_GET);
868 if (!hdr)
869 return -EMSGSIZE;
870
871 attrs = nla_nest_start(msg->skb, TIPC_NLA_NAME_TABLE);
872 if (!attrs)
873 goto msg_full;
874
875 publ = nla_nest_start(msg->skb, TIPC_NLA_NAME_TABLE_PUBL);
876 if (!publ)
877 goto attr_msg_full;
878
879 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_TYPE, seq->type))
880 goto publ_msg_full;
881 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_LOWER, sseq->lower))
882 goto publ_msg_full;
883 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_UPPER, sseq->upper))
884 goto publ_msg_full;
885 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_SCOPE, p->scope))
886 goto publ_msg_full;
887 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_NODE, p->node))
888 goto publ_msg_full;
889 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_REF, p->ref))
890 goto publ_msg_full;
891 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_KEY, p->key))
892 goto publ_msg_full;
893
894 nla_nest_end(msg->skb, publ);
895 nla_nest_end(msg->skb, attrs);
896 genlmsg_end(msg->skb, hdr);
897 }
898 *last_publ = 0;
899
900 return 0;
901
902publ_msg_full:
903 nla_nest_cancel(msg->skb, publ);
904attr_msg_full:
905 nla_nest_cancel(msg->skb, attrs);
906msg_full:
907 genlmsg_cancel(msg->skb, hdr);
908
909 return -EMSGSIZE;
910}
911
912static int __tipc_nl_subseq_list(struct tipc_nl_msg *msg, struct name_seq *seq,
913 u32 *last_lower, u32 *last_publ)
914{
915 struct sub_seq *sseq;
916 struct sub_seq *sseq_start;
917 int err;
918
919 if (*last_lower) {
920 sseq_start = nameseq_find_subseq(seq, *last_lower);
921 if (!sseq_start)
922 return -EPIPE;
923 } else {
924 sseq_start = seq->sseqs;
925 }
926
927 for (sseq = sseq_start; sseq != &seq->sseqs[seq->first_free]; sseq++) {
928 err = __tipc_nl_add_nametable_publ(msg, seq, sseq, last_publ);
929 if (err) {
930 *last_lower = sseq->lower;
931 return err;
932 }
933 }
934 *last_lower = 0;
935
936 return 0;
937}
938
939static int tipc_nl_seq_list(struct net *net, struct tipc_nl_msg *msg,
940 u32 *last_type, u32 *last_lower, u32 *last_publ)
941{
942 struct tipc_net *tn = net_generic(net, tipc_net_id);
943 struct hlist_head *seq_head;
944 struct name_seq *seq = NULL;
945 int err;
946 int i;
947
948 if (*last_type)
949 i = hash(*last_type);
950 else
951 i = 0;
952
953 for (; i < TIPC_NAMETBL_SIZE; i++) {
954 seq_head = &tn->nametbl->seq_hlist[i];
955
956 if (*last_type) {
957 seq = nametbl_find_seq(net, *last_type);
958 if (!seq)
959 return -EPIPE;
960 } else {
961 hlist_for_each_entry_rcu(seq, seq_head, ns_list)
962 break;
963 if (!seq)
964 continue;
965 }
966
967 hlist_for_each_entry_from_rcu(seq, ns_list) {
968 spin_lock_bh(&seq->lock);
969 err = __tipc_nl_subseq_list(msg, seq, last_lower,
970 last_publ);
971
972 if (err) {
973 *last_type = seq->type;
974 spin_unlock_bh(&seq->lock);
975 return err;
976 }
977 spin_unlock_bh(&seq->lock);
978 }
979 *last_type = 0;
980 }
981 return 0;
982}
983
984int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb)
985{
986 int err;
987 int done = cb->args[3];
988 u32 last_type = cb->args[0];
989 u32 last_lower = cb->args[1];
990 u32 last_publ = cb->args[2];
991 struct net *net = sock_net(skb->sk);
992 struct tipc_nl_msg msg;
993
994 if (done)
995 return 0;
996
997 msg.skb = skb;
998 msg.portid = NETLINK_CB(cb->skb).portid;
999 msg.seq = cb->nlh->nlmsg_seq;
1000
1001 rcu_read_lock();
1002 err = tipc_nl_seq_list(net, &msg, &last_type, &last_lower, &last_publ);
1003 if (!err) {
1004 done = 1;
1005 } else if (err != -EMSGSIZE) {
1006
1007
1008
1009
1010
1011
1012 cb->prev_seq = 1;
1013 }
1014 rcu_read_unlock();
1015
1016 cb->args[0] = last_type;
1017 cb->args[1] = last_lower;
1018 cb->args[2] = last_publ;
1019 cb->args[3] = done;
1020
1021 return skb->len;
1022}
1023
1024void tipc_plist_push(struct tipc_plist *pl, u32 port)
1025{
1026 struct tipc_plist *nl;
1027
1028 if (likely(!pl->port)) {
1029 pl->port = port;
1030 return;
1031 }
1032 if (pl->port == port)
1033 return;
1034 list_for_each_entry(nl, &pl->list, list) {
1035 if (nl->port == port)
1036 return;
1037 }
1038 nl = kmalloc(sizeof(*nl), GFP_ATOMIC);
1039 if (nl) {
1040 nl->port = port;
1041 list_add(&nl->list, &pl->list);
1042 }
1043}
1044
1045u32 tipc_plist_pop(struct tipc_plist *pl)
1046{
1047 struct tipc_plist *nl;
1048 u32 port = 0;
1049
1050 if (likely(list_empty(&pl->list))) {
1051 port = pl->port;
1052 pl->port = 0;
1053 return port;
1054 }
1055 nl = list_first_entry(&pl->list, typeof(*nl), list);
1056 port = nl->port;
1057 list_del(&nl->list);
1058 kfree(nl);
1059 return port;
1060}
1061