1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37#include "core.h"
38#include "link.h"
39#include "name_distr.h"
40
41int sysctl_tipc_named_timeout __read_mostly = 2000;
42
43struct distr_queue_item {
44 struct distr_item i;
45 u32 dtype;
46 u32 node;
47 unsigned long expires;
48 struct list_head next;
49};
50
51
52
53
54static void publ_to_item(struct distr_item *i, struct publication *p)
55{
56 i->type = htonl(p->type);
57 i->lower = htonl(p->lower);
58 i->upper = htonl(p->upper);
59 i->port = htonl(p->port);
60 i->key = htonl(p->key);
61}
62
63
64
65
66
67
68static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
69 u32 dest)
70{
71 struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC);
72 u32 self = tipc_own_addr(net);
73 struct tipc_msg *msg;
74
75 if (buf != NULL) {
76 msg = buf_msg(buf);
77 tipc_msg_init(self, msg, NAME_DISTRIBUTOR,
78 type, INT_H_SIZE, dest);
79 msg_set_size(msg, INT_H_SIZE + size);
80 }
81 return buf;
82}
83
84
85
86
87struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ)
88{
89 struct name_table *nt = tipc_name_table(net);
90 struct distr_item *item;
91 struct sk_buff *skb;
92
93 if (publ->scope == TIPC_NODE_SCOPE) {
94 list_add_tail_rcu(&publ->binding_node, &nt->node_scope);
95 return NULL;
96 }
97 write_lock_bh(&nt->cluster_scope_lock);
98 list_add_tail(&publ->binding_node, &nt->cluster_scope);
99 write_unlock_bh(&nt->cluster_scope_lock);
100 skb = named_prepare_buf(net, PUBLICATION, ITEM_SIZE, 0);
101 if (!skb) {
102 pr_warn("Publication distribution failure\n");
103 return NULL;
104 }
105 msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++);
106 msg_set_non_legacy(buf_msg(skb));
107 item = (struct distr_item *)msg_data(buf_msg(skb));
108 publ_to_item(item, publ);
109 return skb;
110}
111
112
113
114
115struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ)
116{
117 struct name_table *nt = tipc_name_table(net);
118 struct distr_item *item;
119 struct sk_buff *skb;
120
121 write_lock_bh(&nt->cluster_scope_lock);
122 list_del(&publ->binding_node);
123 write_unlock_bh(&nt->cluster_scope_lock);
124 if (publ->scope == TIPC_NODE_SCOPE)
125 return NULL;
126
127 skb = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0);
128 if (!skb) {
129 pr_warn("Withdrawal distribution failure\n");
130 return NULL;
131 }
132 msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++);
133 msg_set_non_legacy(buf_msg(skb));
134 item = (struct distr_item *)msg_data(buf_msg(skb));
135 publ_to_item(item, publ);
136 return skb;
137}
138
139
140
141
142
143
144
145static void named_distribute(struct net *net, struct sk_buff_head *list,
146 u32 dnode, struct list_head *pls, u16 seqno)
147{
148 struct publication *publ;
149 struct sk_buff *skb = NULL;
150 struct distr_item *item = NULL;
151 u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0, false) - INT_H_SIZE) /
152 ITEM_SIZE) * ITEM_SIZE;
153 u32 msg_rem = msg_dsz;
154 struct tipc_msg *hdr;
155
156 list_for_each_entry(publ, pls, binding_node) {
157
158 if (!skb) {
159 skb = named_prepare_buf(net, PUBLICATION, msg_rem,
160 dnode);
161 if (!skb) {
162 pr_warn("Bulk publication failure\n");
163 return;
164 }
165 hdr = buf_msg(skb);
166 msg_set_bc_ack_invalid(hdr, true);
167 msg_set_bulk(hdr);
168 msg_set_non_legacy(hdr);
169 item = (struct distr_item *)msg_data(hdr);
170 }
171
172
173 publ_to_item(item, publ);
174 item++;
175 msg_rem -= ITEM_SIZE;
176
177
178 if (!msg_rem) {
179 __skb_queue_tail(list, skb);
180 skb = NULL;
181 msg_rem = msg_dsz;
182 }
183 }
184 if (skb) {
185 hdr = buf_msg(skb);
186 msg_set_size(hdr, INT_H_SIZE + (msg_dsz - msg_rem));
187 skb_trim(skb, INT_H_SIZE + (msg_dsz - msg_rem));
188 __skb_queue_tail(list, skb);
189 }
190 hdr = buf_msg(skb_peek_tail(list));
191 msg_set_last_bulk(hdr);
192 msg_set_named_seqno(hdr, seqno);
193}
194
195
196
197
198void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities)
199{
200 struct name_table *nt = tipc_name_table(net);
201 struct tipc_net *tn = tipc_net(net);
202 struct sk_buff_head head;
203 u16 seqno;
204
205 __skb_queue_head_init(&head);
206 spin_lock_bh(&tn->nametbl_lock);
207 if (!(capabilities & TIPC_NAMED_BCAST))
208 nt->rc_dests++;
209 seqno = nt->snd_nxt;
210 spin_unlock_bh(&tn->nametbl_lock);
211
212 read_lock_bh(&nt->cluster_scope_lock);
213 named_distribute(net, &head, dnode, &nt->cluster_scope, seqno);
214 tipc_node_xmit(net, &head, dnode, 0);
215 read_unlock_bh(&nt->cluster_scope_lock);
216}
217
218
219
220
221
222
223
224static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr)
225{
226 struct tipc_net *tn = tipc_net(net);
227 struct publication *p;
228
229 spin_lock_bh(&tn->nametbl_lock);
230 p = tipc_nametbl_remove_publ(net, publ->type, publ->lower, publ->upper,
231 publ->node, publ->key);
232 if (p)
233 tipc_node_unsubscribe(net, &p->binding_node, addr);
234 spin_unlock_bh(&tn->nametbl_lock);
235
236 if (p != publ) {
237 pr_err("Unable to remove publication from failed node\n"
238 " (type=%u, lower=%u, node=0x%x, port=%u, key=%u)\n",
239 publ->type, publ->lower, publ->node, publ->port,
240 publ->key);
241 }
242
243 if (p)
244 kfree_rcu(p, rcu);
245}
246
247
248
249
250static void tipc_dist_queue_purge(struct net *net, u32 addr)
251{
252 struct tipc_net *tn = net_generic(net, tipc_net_id);
253 struct distr_queue_item *e, *tmp;
254
255 spin_lock_bh(&tn->nametbl_lock);
256 list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
257 if (e->node != addr)
258 continue;
259 list_del(&e->next);
260 kfree(e);
261 }
262 spin_unlock_bh(&tn->nametbl_lock);
263}
264
265void tipc_publ_notify(struct net *net, struct list_head *nsub_list,
266 u32 addr, u16 capabilities)
267{
268 struct name_table *nt = tipc_name_table(net);
269 struct tipc_net *tn = tipc_net(net);
270
271 struct publication *publ, *tmp;
272
273 list_for_each_entry_safe(publ, tmp, nsub_list, binding_node)
274 tipc_publ_purge(net, publ, addr);
275 tipc_dist_queue_purge(net, addr);
276 spin_lock_bh(&tn->nametbl_lock);
277 if (!(capabilities & TIPC_NAMED_BCAST))
278 nt->rc_dests--;
279 spin_unlock_bh(&tn->nametbl_lock);
280}
281
282
283
284
285
286
287
288
289static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
290 u32 node, u32 dtype)
291{
292 struct publication *p = NULL;
293 u32 lower = ntohl(i->lower);
294 u32 upper = ntohl(i->upper);
295 u32 type = ntohl(i->type);
296 u32 port = ntohl(i->port);
297 u32 key = ntohl(i->key);
298
299 if (dtype == PUBLICATION) {
300 p = tipc_nametbl_insert_publ(net, type, lower, upper,
301 TIPC_CLUSTER_SCOPE, node,
302 port, key);
303 if (p) {
304 tipc_node_subscribe(net, &p->binding_node, node);
305 return true;
306 }
307 } else if (dtype == WITHDRAWAL) {
308 p = tipc_nametbl_remove_publ(net, type, lower,
309 upper, node, key);
310 if (p) {
311 tipc_node_unsubscribe(net, &p->binding_node, node);
312 kfree_rcu(p, rcu);
313 return true;
314 }
315 pr_warn_ratelimited("Failed to remove binding %u,%u from %x\n",
316 type, lower, node);
317 } else {
318 pr_warn("Unrecognized name table message received\n");
319 }
320 return false;
321}
322
323static struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq,
324 u16 *rcv_nxt, bool *open)
325{
326 struct sk_buff *skb, *tmp;
327 struct tipc_msg *hdr;
328 u16 seqno;
329
330 spin_lock_bh(&namedq->lock);
331 skb_queue_walk_safe(namedq, skb, tmp) {
332 if (unlikely(skb_linearize(skb))) {
333 __skb_unlink(skb, namedq);
334 kfree_skb(skb);
335 continue;
336 }
337 hdr = buf_msg(skb);
338 seqno = msg_named_seqno(hdr);
339 if (msg_is_last_bulk(hdr)) {
340 *rcv_nxt = seqno;
341 *open = true;
342 }
343
344 if (msg_is_bulk(hdr) || msg_is_legacy(hdr)) {
345 __skb_unlink(skb, namedq);
346 spin_unlock_bh(&namedq->lock);
347 return skb;
348 }
349
350 if (*open && (*rcv_nxt == seqno)) {
351 (*rcv_nxt)++;
352 __skb_unlink(skb, namedq);
353 spin_unlock_bh(&namedq->lock);
354 return skb;
355 }
356
357 if (less(seqno, *rcv_nxt)) {
358 __skb_unlink(skb, namedq);
359 kfree_skb(skb);
360 continue;
361 }
362 }
363 spin_unlock_bh(&namedq->lock);
364 return NULL;
365}
366
367
368
369
370void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq,
371 u16 *rcv_nxt, bool *open)
372{
373 struct tipc_net *tn = tipc_net(net);
374 struct distr_item *item;
375 struct tipc_msg *hdr;
376 struct sk_buff *skb;
377 u32 count, node;
378
379 spin_lock_bh(&tn->nametbl_lock);
380 while ((skb = tipc_named_dequeue(namedq, rcv_nxt, open))) {
381 hdr = buf_msg(skb);
382 node = msg_orignode(hdr);
383 item = (struct distr_item *)msg_data(hdr);
384 count = msg_data_sz(hdr) / ITEM_SIZE;
385 while (count--) {
386 tipc_update_nametbl(net, item, node, msg_type(hdr));
387 item++;
388 }
389 kfree_skb(skb);
390 }
391 spin_unlock_bh(&tn->nametbl_lock);
392}
393
394
395
396
397
398
399
400
401void tipc_named_reinit(struct net *net)
402{
403 struct name_table *nt = tipc_name_table(net);
404 struct tipc_net *tn = tipc_net(net);
405 struct publication *publ;
406 u32 self = tipc_own_addr(net);
407
408 spin_lock_bh(&tn->nametbl_lock);
409
410 list_for_each_entry_rcu(publ, &nt->node_scope, binding_node)
411 publ->node = self;
412 list_for_each_entry_rcu(publ, &nt->cluster_scope, binding_node)
413 publ->node = self;
414 nt->rc_dests = 0;
415 spin_unlock_bh(&tn->nametbl_lock);
416}
417