1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37#include "core.h"
38#include "subscr.h"
39#include "link.h"
40#include "bcast.h"
41#include "socket.h"
42#include "name_distr.h"
43#include "discover.h"
44#include "netlink.h"
45#include "monitor.h"
46#include "trace.h"
47#include "crypto.h"
48
49#include <linux/pkt_sched.h>
50
51struct tipc_stats {
52 u32 sent_pkts;
53 u32 recv_pkts;
54 u32 sent_states;
55 u32 recv_states;
56 u32 sent_probes;
57 u32 recv_probes;
58 u32 sent_nacks;
59 u32 recv_nacks;
60 u32 sent_acks;
61 u32 sent_bundled;
62 u32 sent_bundles;
63 u32 recv_bundled;
64 u32 recv_bundles;
65 u32 retransmitted;
66 u32 sent_fragmented;
67 u32 sent_fragments;
68 u32 recv_fragmented;
69 u32 recv_fragments;
70 u32 link_congs;
71 u32 deferred_recv;
72 u32 duplicates;
73 u32 max_queue_sz;
74 u32 accu_queue_sz;
75 u32 queue_sz_counts;
76 u32 msg_length_counts;
77 u32 msg_lengths_total;
78 u32 msg_length_profile[7];
79};
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152struct tipc_link {
153 u32 addr;
154 char name[TIPC_MAX_LINK_NAME];
155 struct net *net;
156
157
158 u16 peer_session;
159 u16 session;
160 u16 snd_nxt_state;
161 u16 rcv_nxt_state;
162 u32 peer_bearer_id;
163 u32 bearer_id;
164 u32 tolerance;
165 u32 abort_limit;
166 u32 state;
167 u16 peer_caps;
168 bool in_session;
169 bool active;
170 u32 silent_intv_cnt;
171 char if_name[TIPC_MAX_IF_NAME];
172 u32 priority;
173 char net_plane;
174 struct tipc_mon_state mon_state;
175 u16 rst_cnt;
176
177
178 u16 drop_point;
179 struct sk_buff *failover_reasm_skb;
180 struct sk_buff_head failover_deferdq;
181
182
183 u16 mtu;
184 u16 advertised_mtu;
185
186
187 struct sk_buff_head transmq;
188 struct sk_buff_head backlogq;
189 struct {
190 u16 len;
191 u16 limit;
192 struct sk_buff *target_bskb;
193 } backlog[5];
194 u16 snd_nxt;
195
196
197 u16 rcv_nxt;
198 u32 rcv_unacked;
199 struct sk_buff_head deferdq;
200 struct sk_buff_head *inputq;
201 struct sk_buff_head *namedq;
202
203
204 struct sk_buff_head wakeupq;
205 u16 window;
206 u16 min_win;
207 u16 ssthresh;
208 u16 max_win;
209 u16 cong_acks;
210 u16 checkpoint;
211
212
213 struct sk_buff *reasm_buf;
214 struct sk_buff *reasm_tnlmsg;
215
216
217 u16 ackers;
218 u16 acked;
219 u16 last_gap;
220 struct tipc_gap_ack_blks *last_ga;
221 struct tipc_link *bc_rcvlink;
222 struct tipc_link *bc_sndlink;
223 u8 nack_state;
224 bool bc_peer_is_up;
225
226
227 struct tipc_stats stats;
228};
229
230
231
232
233static const char *link_co_err = "Link tunneling error, ";
234static const char *link_rst_msg = "Resetting link ";
235
236
237
238enum {
239 BC_NACK_SND_CONDITIONAL,
240 BC_NACK_SND_UNCONDITIONAL,
241 BC_NACK_SND_SUPPRESS,
242};
243
244#define TIPC_BC_RETR_LIM (jiffies + msecs_to_jiffies(10))
245#define TIPC_UC_RETR_TIME (jiffies + msecs_to_jiffies(1))
246
247
248
249enum {
250 LINK_ESTABLISHED = 0xe,
251 LINK_ESTABLISHING = 0xe << 4,
252 LINK_RESET = 0x1 << 8,
253 LINK_RESETTING = 0x2 << 12,
254 LINK_PEER_RESET = 0xd << 16,
255 LINK_FAILINGOVER = 0xf << 20,
256 LINK_SYNCHING = 0xc << 24
257};
258
259
260
261static int link_is_up(struct tipc_link *l)
262{
263 return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
264}
265
266static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
267 struct sk_buff_head *xmitq);
268static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
269 bool probe_reply, u16 rcvgap,
270 int tolerance, int priority,
271 struct sk_buff_head *xmitq);
272static void link_print(struct tipc_link *l, const char *str);
273static int tipc_link_build_nack_msg(struct tipc_link *l,
274 struct sk_buff_head *xmitq);
275static void tipc_link_build_bc_init_msg(struct tipc_link *l,
276 struct sk_buff_head *xmitq);
277static u8 __tipc_build_gap_ack_blks(struct tipc_gap_ack_blks *ga,
278 struct tipc_link *l, u8 start_index);
279static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr);
280static int tipc_link_advance_transmq(struct tipc_link *l, struct tipc_link *r,
281 u16 acked, u16 gap,
282 struct tipc_gap_ack_blks *ga,
283 struct sk_buff_head *xmitq,
284 bool *retransmitted, int *rc);
285static void tipc_link_update_cwin(struct tipc_link *l, int released,
286 bool retransmitted);
287
288
289
290bool tipc_link_is_up(struct tipc_link *l)
291{
292 return link_is_up(l);
293}
294
295bool tipc_link_peer_is_down(struct tipc_link *l)
296{
297 return l->state == LINK_PEER_RESET;
298}
299
300bool tipc_link_is_reset(struct tipc_link *l)
301{
302 return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
303}
304
305bool tipc_link_is_establishing(struct tipc_link *l)
306{
307 return l->state == LINK_ESTABLISHING;
308}
309
310bool tipc_link_is_synching(struct tipc_link *l)
311{
312 return l->state == LINK_SYNCHING;
313}
314
315bool tipc_link_is_failingover(struct tipc_link *l)
316{
317 return l->state == LINK_FAILINGOVER;
318}
319
320bool tipc_link_is_blocked(struct tipc_link *l)
321{
322 return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
323}
324
325static bool link_is_bc_sndlink(struct tipc_link *l)
326{
327 return !l->bc_sndlink;
328}
329
330static bool link_is_bc_rcvlink(struct tipc_link *l)
331{
332 return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
333}
334
335void tipc_link_set_active(struct tipc_link *l, bool active)
336{
337 l->active = active;
338}
339
340u32 tipc_link_id(struct tipc_link *l)
341{
342 return l->peer_bearer_id << 16 | l->bearer_id;
343}
344
345int tipc_link_min_win(struct tipc_link *l)
346{
347 return l->min_win;
348}
349
350int tipc_link_max_win(struct tipc_link *l)
351{
352 return l->max_win;
353}
354
355int tipc_link_prio(struct tipc_link *l)
356{
357 return l->priority;
358}
359
360unsigned long tipc_link_tolerance(struct tipc_link *l)
361{
362 return l->tolerance;
363}
364
365struct sk_buff_head *tipc_link_inputq(struct tipc_link *l)
366{
367 return l->inputq;
368}
369
370char tipc_link_plane(struct tipc_link *l)
371{
372 return l->net_plane;
373}
374
375struct net *tipc_link_net(struct tipc_link *l)
376{
377 return l->net;
378}
379
380void tipc_link_update_caps(struct tipc_link *l, u16 capabilities)
381{
382 l->peer_caps = capabilities;
383}
384
385void tipc_link_add_bc_peer(struct tipc_link *snd_l,
386 struct tipc_link *uc_l,
387 struct sk_buff_head *xmitq)
388{
389 struct tipc_link *rcv_l = uc_l->bc_rcvlink;
390
391 snd_l->ackers++;
392 rcv_l->acked = snd_l->snd_nxt - 1;
393 snd_l->state = LINK_ESTABLISHED;
394 tipc_link_build_bc_init_msg(uc_l, xmitq);
395}
396
397void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
398 struct tipc_link *rcv_l,
399 struct sk_buff_head *xmitq)
400{
401 u16 ack = snd_l->snd_nxt - 1;
402
403 snd_l->ackers--;
404 rcv_l->bc_peer_is_up = true;
405 rcv_l->state = LINK_ESTABLISHED;
406 tipc_link_bc_ack_rcv(rcv_l, ack, 0, NULL, xmitq, NULL);
407 trace_tipc_link_reset(rcv_l, TIPC_DUMP_ALL, "bclink removed!");
408 tipc_link_reset(rcv_l);
409 rcv_l->state = LINK_RESET;
410 if (!snd_l->ackers) {
411 trace_tipc_link_reset(snd_l, TIPC_DUMP_ALL, "zero ackers!");
412 tipc_link_reset(snd_l);
413 snd_l->state = LINK_RESET;
414 __skb_queue_purge(xmitq);
415 }
416}
417
418int tipc_link_bc_peers(struct tipc_link *l)
419{
420 return l->ackers;
421}
422
423static u16 link_bc_rcv_gap(struct tipc_link *l)
424{
425 struct sk_buff *skb = skb_peek(&l->deferdq);
426 u16 gap = 0;
427
428 if (more(l->snd_nxt, l->rcv_nxt))
429 gap = l->snd_nxt - l->rcv_nxt;
430 if (skb)
431 gap = buf_seqno(skb) - l->rcv_nxt;
432 return gap;
433}
434
435void tipc_link_set_mtu(struct tipc_link *l, int mtu)
436{
437 l->mtu = mtu;
438}
439
440int tipc_link_mtu(struct tipc_link *l)
441{
442 return l->mtu;
443}
444
445int tipc_link_mss(struct tipc_link *l)
446{
447#ifdef CONFIG_TIPC_CRYPTO
448 return l->mtu - INT_H_SIZE - EMSG_OVERHEAD;
449#else
450 return l->mtu - INT_H_SIZE;
451#endif
452}
453
454u16 tipc_link_rcv_nxt(struct tipc_link *l)
455{
456 return l->rcv_nxt;
457}
458
459u16 tipc_link_acked(struct tipc_link *l)
460{
461 return l->acked;
462}
463
464char *tipc_link_name(struct tipc_link *l)
465{
466 return l->name;
467}
468
469u32 tipc_link_state(struct tipc_link *l)
470{
471 return l->state;
472}
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
499 int tolerance, char net_plane, u32 mtu, int priority,
500 u32 min_win, u32 max_win, u32 session, u32 self,
501 u32 peer, u8 *peer_id, u16 peer_caps,
502 struct tipc_link *bc_sndlink,
503 struct tipc_link *bc_rcvlink,
504 struct sk_buff_head *inputq,
505 struct sk_buff_head *namedq,
506 struct tipc_link **link)
507{
508 char peer_str[NODE_ID_STR_LEN] = {0,};
509 char self_str[NODE_ID_STR_LEN] = {0,};
510 struct tipc_link *l;
511
512 l = kzalloc(sizeof(*l), GFP_ATOMIC);
513 if (!l)
514 return false;
515 *link = l;
516 l->session = session;
517
518
519 if (peer_id) {
520 tipc_nodeid2string(self_str, tipc_own_id(net));
521 if (strlen(self_str) > 16)
522 sprintf(self_str, "%x", self);
523 tipc_nodeid2string(peer_str, peer_id);
524 if (strlen(peer_str) > 16)
525 sprintf(peer_str, "%x", peer);
526 }
527
528 snprintf(l->name, sizeof(l->name), "%s:%s-%s:unknown",
529 self_str, if_name, peer_str);
530
531 strcpy(l->if_name, if_name);
532 l->addr = peer;
533 l->peer_caps = peer_caps;
534 l->net = net;
535 l->in_session = false;
536 l->bearer_id = bearer_id;
537 l->tolerance = tolerance;
538 if (bc_rcvlink)
539 bc_rcvlink->tolerance = tolerance;
540 l->net_plane = net_plane;
541 l->advertised_mtu = mtu;
542 l->mtu = mtu;
543 l->priority = priority;
544 tipc_link_set_queue_limits(l, min_win, max_win);
545 l->ackers = 1;
546 l->bc_sndlink = bc_sndlink;
547 l->bc_rcvlink = bc_rcvlink;
548 l->inputq = inputq;
549 l->namedq = namedq;
550 l->state = LINK_RESETTING;
551 __skb_queue_head_init(&l->transmq);
552 __skb_queue_head_init(&l->backlogq);
553 __skb_queue_head_init(&l->deferdq);
554 __skb_queue_head_init(&l->failover_deferdq);
555 skb_queue_head_init(&l->wakeupq);
556 skb_queue_head_init(l->inputq);
557 return true;
558}
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer, u8 *peer_id,
578 int mtu, u32 min_win, u32 max_win, u16 peer_caps,
579 struct sk_buff_head *inputq,
580 struct sk_buff_head *namedq,
581 struct tipc_link *bc_sndlink,
582 struct tipc_link **link)
583{
584 struct tipc_link *l;
585
586 if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, min_win,
587 max_win, 0, ownnode, peer, NULL, peer_caps,
588 bc_sndlink, NULL, inputq, namedq, link))
589 return false;
590
591 l = *link;
592 if (peer_id) {
593 char peer_str[NODE_ID_STR_LEN] = {0,};
594
595 tipc_nodeid2string(peer_str, peer_id);
596 if (strlen(peer_str) > 16)
597 sprintf(peer_str, "%x", peer);
598
599 snprintf(l->name, sizeof(l->name), "%s:%s", tipc_bclink_name,
600 peer_str);
601 } else {
602 strcpy(l->name, tipc_bclink_name);
603 }
604 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "bclink created!");
605 tipc_link_reset(l);
606 l->state = LINK_RESET;
607 l->ackers = 0;
608 l->bc_rcvlink = l;
609
610
611 if (link_is_bc_sndlink(l))
612 l->state = LINK_ESTABLISHED;
613
614
615 if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST))
616 tipc_bcast_toggle_rcast(net, false);
617
618 return true;
619}
620
621
622
623
624
625
626int tipc_link_fsm_evt(struct tipc_link *l, int evt)
627{
628 int rc = 0;
629 int old_state = l->state;
630
631 switch (l->state) {
632 case LINK_RESETTING:
633 switch (evt) {
634 case LINK_PEER_RESET_EVT:
635 l->state = LINK_PEER_RESET;
636 break;
637 case LINK_RESET_EVT:
638 l->state = LINK_RESET;
639 break;
640 case LINK_FAILURE_EVT:
641 case LINK_FAILOVER_BEGIN_EVT:
642 case LINK_ESTABLISH_EVT:
643 case LINK_FAILOVER_END_EVT:
644 case LINK_SYNCH_BEGIN_EVT:
645 case LINK_SYNCH_END_EVT:
646 default:
647 goto illegal_evt;
648 }
649 break;
650 case LINK_RESET:
651 switch (evt) {
652 case LINK_PEER_RESET_EVT:
653 l->state = LINK_ESTABLISHING;
654 break;
655 case LINK_FAILOVER_BEGIN_EVT:
656 l->state = LINK_FAILINGOVER;
657 break;
658 case LINK_FAILURE_EVT:
659 case LINK_RESET_EVT:
660 case LINK_ESTABLISH_EVT:
661 case LINK_FAILOVER_END_EVT:
662 break;
663 case LINK_SYNCH_BEGIN_EVT:
664 case LINK_SYNCH_END_EVT:
665 default:
666 goto illegal_evt;
667 }
668 break;
669 case LINK_PEER_RESET:
670 switch (evt) {
671 case LINK_RESET_EVT:
672 l->state = LINK_ESTABLISHING;
673 break;
674 case LINK_PEER_RESET_EVT:
675 case LINK_ESTABLISH_EVT:
676 case LINK_FAILURE_EVT:
677 break;
678 case LINK_SYNCH_BEGIN_EVT:
679 case LINK_SYNCH_END_EVT:
680 case LINK_FAILOVER_BEGIN_EVT:
681 case LINK_FAILOVER_END_EVT:
682 default:
683 goto illegal_evt;
684 }
685 break;
686 case LINK_FAILINGOVER:
687 switch (evt) {
688 case LINK_FAILOVER_END_EVT:
689 l->state = LINK_RESET;
690 break;
691 case LINK_PEER_RESET_EVT:
692 case LINK_RESET_EVT:
693 case LINK_ESTABLISH_EVT:
694 case LINK_FAILURE_EVT:
695 break;
696 case LINK_FAILOVER_BEGIN_EVT:
697 case LINK_SYNCH_BEGIN_EVT:
698 case LINK_SYNCH_END_EVT:
699 default:
700 goto illegal_evt;
701 }
702 break;
703 case LINK_ESTABLISHING:
704 switch (evt) {
705 case LINK_ESTABLISH_EVT:
706 l->state = LINK_ESTABLISHED;
707 break;
708 case LINK_FAILOVER_BEGIN_EVT:
709 l->state = LINK_FAILINGOVER;
710 break;
711 case LINK_RESET_EVT:
712 l->state = LINK_RESET;
713 break;
714 case LINK_FAILURE_EVT:
715 case LINK_PEER_RESET_EVT:
716 case LINK_SYNCH_BEGIN_EVT:
717 case LINK_FAILOVER_END_EVT:
718 break;
719 case LINK_SYNCH_END_EVT:
720 default:
721 goto illegal_evt;
722 }
723 break;
724 case LINK_ESTABLISHED:
725 switch (evt) {
726 case LINK_PEER_RESET_EVT:
727 l->state = LINK_PEER_RESET;
728 rc |= TIPC_LINK_DOWN_EVT;
729 break;
730 case LINK_FAILURE_EVT:
731 l->state = LINK_RESETTING;
732 rc |= TIPC_LINK_DOWN_EVT;
733 break;
734 case LINK_RESET_EVT:
735 l->state = LINK_RESET;
736 break;
737 case LINK_ESTABLISH_EVT:
738 case LINK_SYNCH_END_EVT:
739 break;
740 case LINK_SYNCH_BEGIN_EVT:
741 l->state = LINK_SYNCHING;
742 break;
743 case LINK_FAILOVER_BEGIN_EVT:
744 case LINK_FAILOVER_END_EVT:
745 default:
746 goto illegal_evt;
747 }
748 break;
749 case LINK_SYNCHING:
750 switch (evt) {
751 case LINK_PEER_RESET_EVT:
752 l->state = LINK_PEER_RESET;
753 rc |= TIPC_LINK_DOWN_EVT;
754 break;
755 case LINK_FAILURE_EVT:
756 l->state = LINK_RESETTING;
757 rc |= TIPC_LINK_DOWN_EVT;
758 break;
759 case LINK_RESET_EVT:
760 l->state = LINK_RESET;
761 break;
762 case LINK_ESTABLISH_EVT:
763 case LINK_SYNCH_BEGIN_EVT:
764 break;
765 case LINK_SYNCH_END_EVT:
766 l->state = LINK_ESTABLISHED;
767 break;
768 case LINK_FAILOVER_BEGIN_EVT:
769 case LINK_FAILOVER_END_EVT:
770 default:
771 goto illegal_evt;
772 }
773 break;
774 default:
775 pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
776 }
777 trace_tipc_link_fsm(l->name, old_state, l->state, evt);
778 return rc;
779illegal_evt:
780 pr_err("Illegal FSM event %x in state %x on link %s\n",
781 evt, l->state, l->name);
782 trace_tipc_link_fsm(l->name, old_state, l->state, evt);
783 return rc;
784}
785
786
787
788static void link_profile_stats(struct tipc_link *l)
789{
790 struct sk_buff *skb;
791 struct tipc_msg *msg;
792 int length;
793
794
795 l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
796 l->stats.queue_sz_counts++;
797
798 skb = skb_peek(&l->transmq);
799 if (!skb)
800 return;
801 msg = buf_msg(skb);
802 length = msg_size(msg);
803
804 if (msg_user(msg) == MSG_FRAGMENTER) {
805 if (msg_type(msg) != FIRST_FRAGMENT)
806 return;
807 length = msg_size(msg_inner_hdr(msg));
808 }
809 l->stats.msg_lengths_total += length;
810 l->stats.msg_length_counts++;
811 if (length <= 64)
812 l->stats.msg_length_profile[0]++;
813 else if (length <= 256)
814 l->stats.msg_length_profile[1]++;
815 else if (length <= 1024)
816 l->stats.msg_length_profile[2]++;
817 else if (length <= 4096)
818 l->stats.msg_length_profile[3]++;
819 else if (length <= 16384)
820 l->stats.msg_length_profile[4]++;
821 else if (length <= 32768)
822 l->stats.msg_length_profile[5]++;
823 else
824 l->stats.msg_length_profile[6]++;
825}
826
827
828
829
830
831
832
833
834bool tipc_link_too_silent(struct tipc_link *l)
835{
836 return (l->silent_intv_cnt + 2 > l->abort_limit);
837}
838
839
840
841int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
842{
843 int mtyp = 0;
844 int rc = 0;
845 bool state = false;
846 bool probe = false;
847 bool setup = false;
848 u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
849 u16 bc_acked = l->bc_rcvlink->acked;
850 struct tipc_mon_state *mstate = &l->mon_state;
851
852 trace_tipc_link_timeout(l, TIPC_DUMP_NONE, " ");
853 trace_tipc_link_too_silent(l, TIPC_DUMP_ALL, " ");
854 switch (l->state) {
855 case LINK_ESTABLISHED:
856 case LINK_SYNCHING:
857 mtyp = STATE_MSG;
858 link_profile_stats(l);
859 tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id);
860 if (mstate->reset || (l->silent_intv_cnt > l->abort_limit))
861 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
862 state = bc_acked != bc_snt;
863 state |= l->bc_rcvlink->rcv_unacked;
864 state |= l->rcv_unacked;
865 state |= !skb_queue_empty(&l->transmq);
866 probe = mstate->probing;
867 probe |= l->silent_intv_cnt;
868 if (probe || mstate->monitoring)
869 l->silent_intv_cnt++;
870 probe |= !skb_queue_empty(&l->deferdq);
871 if (l->snd_nxt == l->checkpoint) {
872 tipc_link_update_cwin(l, 0, 0);
873 probe = true;
874 }
875 l->checkpoint = l->snd_nxt;
876 break;
877 case LINK_RESET:
878 setup = l->rst_cnt++ <= 4;
879 setup |= !(l->rst_cnt % 16);
880 mtyp = RESET_MSG;
881 break;
882 case LINK_ESTABLISHING:
883 setup = true;
884 mtyp = ACTIVATE_MSG;
885 break;
886 case LINK_PEER_RESET:
887 case LINK_RESETTING:
888 case LINK_FAILINGOVER:
889 break;
890 default:
891 break;
892 }
893
894 if (state || probe || setup)
895 tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, 0, xmitq);
896
897 return rc;
898}
899
900
901
902
903
904
905
906static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
907{
908 u32 dnode = tipc_own_addr(l->net);
909 u32 dport = msg_origport(hdr);
910 struct sk_buff *skb;
911
912
913 skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
914 dnode, l->addr, dport, 0, 0);
915 if (!skb)
916 return -ENOBUFS;
917 msg_set_dest_droppable(buf_msg(skb), true);
918 TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
919 skb_queue_tail(&l->wakeupq, skb);
920 l->stats.link_congs++;
921 trace_tipc_link_conges(l, TIPC_DUMP_ALL, "wakeup scheduled!");
922 return -ELINKCONG;
923}
924
925
926
927
928
929
930
931static void link_prepare_wakeup(struct tipc_link *l)
932{
933 struct sk_buff_head *wakeupq = &l->wakeupq;
934 struct sk_buff_head *inputq = l->inputq;
935 struct sk_buff *skb, *tmp;
936 struct sk_buff_head tmpq;
937 int avail[5] = {0,};
938 int imp = 0;
939
940 __skb_queue_head_init(&tmpq);
941
942 for (; imp <= TIPC_SYSTEM_IMPORTANCE; imp++)
943 avail[imp] = l->backlog[imp].limit - l->backlog[imp].len;
944
945 skb_queue_walk_safe(wakeupq, skb, tmp) {
946 imp = TIPC_SKB_CB(skb)->chain_imp;
947 if (avail[imp] <= 0)
948 continue;
949 avail[imp]--;
950 __skb_unlink(skb, wakeupq);
951 __skb_queue_tail(&tmpq, skb);
952 }
953
954 spin_lock_bh(&inputq->lock);
955 skb_queue_splice_tail(&tmpq, inputq);
956 spin_unlock_bh(&inputq->lock);
957
958}
959
960
961
962
963
964
965
966static void tipc_link_set_skb_retransmit_time(struct sk_buff *skb,
967 struct tipc_link *l)
968{
969 if (link_is_bc_sndlink(l))
970 TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
971 else
972 TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME;
973}
974
975void tipc_link_reset(struct tipc_link *l)
976{
977 struct sk_buff_head list;
978 u32 imp;
979
980 __skb_queue_head_init(&list);
981
982 l->in_session = false;
983
984 l->peer_session--;
985 l->session++;
986 l->mtu = l->advertised_mtu;
987
988 spin_lock_bh(&l->wakeupq.lock);
989 skb_queue_splice_init(&l->wakeupq, &list);
990 spin_unlock_bh(&l->wakeupq.lock);
991
992 spin_lock_bh(&l->inputq->lock);
993 skb_queue_splice_init(&list, l->inputq);
994 spin_unlock_bh(&l->inputq->lock);
995
996 __skb_queue_purge(&l->transmq);
997 __skb_queue_purge(&l->deferdq);
998 __skb_queue_purge(&l->backlogq);
999 __skb_queue_purge(&l->failover_deferdq);
1000 for (imp = 0; imp <= TIPC_SYSTEM_IMPORTANCE; imp++) {
1001 l->backlog[imp].len = 0;
1002 l->backlog[imp].target_bskb = NULL;
1003 }
1004 kfree_skb(l->reasm_buf);
1005 kfree_skb(l->reasm_tnlmsg);
1006 kfree_skb(l->failover_reasm_skb);
1007 l->reasm_buf = NULL;
1008 l->reasm_tnlmsg = NULL;
1009 l->failover_reasm_skb = NULL;
1010 l->rcv_unacked = 0;
1011 l->snd_nxt = 1;
1012 l->rcv_nxt = 1;
1013 l->snd_nxt_state = 1;
1014 l->rcv_nxt_state = 1;
1015 l->acked = 0;
1016 l->last_gap = 0;
1017 kfree(l->last_ga);
1018 l->last_ga = NULL;
1019 l->silent_intv_cnt = 0;
1020 l->rst_cnt = 0;
1021 l->bc_peer_is_up = false;
1022 memset(&l->mon_state, 0, sizeof(l->mon_state));
1023 tipc_link_reset_stats(l);
1024}
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
1037 struct sk_buff_head *xmitq)
1038{
1039 struct sk_buff_head *backlogq = &l->backlogq;
1040 struct sk_buff_head *transmq = &l->transmq;
1041 struct sk_buff *skb, *_skb;
1042 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1043 u16 ack = l->rcv_nxt - 1;
1044 u16 seqno = l->snd_nxt;
1045 int pkt_cnt = skb_queue_len(list);
1046 unsigned int mss = tipc_link_mss(l);
1047 unsigned int cwin = l->window;
1048 unsigned int mtu = l->mtu;
1049 struct tipc_msg *hdr;
1050 bool new_bundle;
1051 int rc = 0;
1052 int imp;
1053
1054 if (pkt_cnt <= 0)
1055 return 0;
1056
1057 hdr = buf_msg(skb_peek(list));
1058 if (unlikely(msg_size(hdr) > mtu)) {
1059 pr_warn("Too large msg, purging xmit list %d %d %d %d %d!\n",
1060 skb_queue_len(list), msg_user(hdr),
1061 msg_type(hdr), msg_size(hdr), mtu);
1062 __skb_queue_purge(list);
1063 return -EMSGSIZE;
1064 }
1065
1066 imp = msg_importance(hdr);
1067
1068 if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
1069 if (imp == TIPC_SYSTEM_IMPORTANCE) {
1070 pr_warn("%s<%s>, link overflow", link_rst_msg, l->name);
1071 return -ENOBUFS;
1072 }
1073 rc = link_schedule_user(l, hdr);
1074 }
1075
1076 if (pkt_cnt > 1) {
1077 l->stats.sent_fragmented++;
1078 l->stats.sent_fragments += pkt_cnt;
1079 }
1080
1081
1082 while ((skb = __skb_dequeue(list))) {
1083 if (likely(skb_queue_len(transmq) < cwin)) {
1084 hdr = buf_msg(skb);
1085 msg_set_seqno(hdr, seqno);
1086 msg_set_ack(hdr, ack);
1087 msg_set_bcast_ack(hdr, bc_ack);
1088 _skb = skb_clone(skb, GFP_ATOMIC);
1089 if (!_skb) {
1090 kfree_skb(skb);
1091 __skb_queue_purge(list);
1092 return -ENOBUFS;
1093 }
1094 __skb_queue_tail(transmq, skb);
1095 tipc_link_set_skb_retransmit_time(skb, l);
1096 __skb_queue_tail(xmitq, _skb);
1097 TIPC_SKB_CB(skb)->ackers = l->ackers;
1098 l->rcv_unacked = 0;
1099 l->stats.sent_pkts++;
1100 seqno++;
1101 continue;
1102 }
1103 if (tipc_msg_try_bundle(l->backlog[imp].target_bskb, &skb,
1104 mss, l->addr, &new_bundle)) {
1105 if (skb) {
1106
1107 l->backlog[imp].target_bskb = skb;
1108 l->backlog[imp].len++;
1109 __skb_queue_tail(backlogq, skb);
1110 } else {
1111 if (new_bundle) {
1112 l->stats.sent_bundles++;
1113 l->stats.sent_bundled++;
1114 }
1115 l->stats.sent_bundled++;
1116 }
1117 continue;
1118 }
1119 l->backlog[imp].target_bskb = NULL;
1120 l->backlog[imp].len += (1 + skb_queue_len(list));
1121 __skb_queue_tail(backlogq, skb);
1122 skb_queue_splice_tail_init(list, backlogq);
1123 }
1124 l->snd_nxt = seqno;
1125 return rc;
1126}
1127
1128static void tipc_link_update_cwin(struct tipc_link *l, int released,
1129 bool retransmitted)
1130{
1131 int bklog_len = skb_queue_len(&l->backlogq);
1132 struct sk_buff_head *txq = &l->transmq;
1133 int txq_len = skb_queue_len(txq);
1134 u16 cwin = l->window;
1135
1136
1137 if (unlikely(retransmitted)) {
1138 l->ssthresh = max_t(u16, l->window / 2, 300);
1139 l->window = min_t(u16, l->ssthresh, l->window);
1140 return;
1141 }
1142
1143 if (unlikely(!released)) {
1144 l->ssthresh = max_t(u16, l->window / 2, 300);
1145 l->window = l->min_win;
1146 return;
1147 }
1148
1149 if (txq_len + bklog_len < cwin)
1150 return;
1151
1152
1153 if (txq_len && l->snd_nxt - buf_seqno(skb_peek(txq)) != txq_len)
1154 return;
1155
1156 l->cong_acks += released;
1157
1158
1159 if (cwin <= l->ssthresh) {
1160 l->window = min_t(u16, cwin + released, l->max_win);
1161 return;
1162 }
1163
1164 if (l->cong_acks < cwin)
1165 return;
1166 l->window = min_t(u16, ++cwin, l->max_win);
1167 l->cong_acks = 0;
1168}
1169
1170static void tipc_link_advance_backlog(struct tipc_link *l,
1171 struct sk_buff_head *xmitq)
1172{
1173 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1174 struct sk_buff_head *txq = &l->transmq;
1175 struct sk_buff *skb, *_skb;
1176 u16 ack = l->rcv_nxt - 1;
1177 u16 seqno = l->snd_nxt;
1178 struct tipc_msg *hdr;
1179 u16 cwin = l->window;
1180 u32 imp;
1181
1182 while (skb_queue_len(txq) < cwin) {
1183 skb = skb_peek(&l->backlogq);
1184 if (!skb)
1185 break;
1186 _skb = skb_clone(skb, GFP_ATOMIC);
1187 if (!_skb)
1188 break;
1189 __skb_dequeue(&l->backlogq);
1190 hdr = buf_msg(skb);
1191 imp = msg_importance(hdr);
1192 l->backlog[imp].len--;
1193 if (unlikely(skb == l->backlog[imp].target_bskb))
1194 l->backlog[imp].target_bskb = NULL;
1195 __skb_queue_tail(&l->transmq, skb);
1196 tipc_link_set_skb_retransmit_time(skb, l);
1197
1198 __skb_queue_tail(xmitq, _skb);
1199 TIPC_SKB_CB(skb)->ackers = l->ackers;
1200 msg_set_seqno(hdr, seqno);
1201 msg_set_ack(hdr, ack);
1202 msg_set_bcast_ack(hdr, bc_ack);
1203 l->rcv_unacked = 0;
1204 l->stats.sent_pkts++;
1205 seqno++;
1206 }
1207 l->snd_nxt = seqno;
1208}
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r,
1220 int *rc)
1221{
1222 struct sk_buff *skb = skb_peek(&l->transmq);
1223 struct tipc_msg *hdr;
1224
1225 if (!skb)
1226 return false;
1227
1228 if (!TIPC_SKB_CB(skb)->retr_cnt)
1229 return false;
1230
1231 if (!time_after(jiffies, TIPC_SKB_CB(skb)->retr_stamp +
1232 msecs_to_jiffies(r->tolerance * 10)))
1233 return false;
1234
1235 hdr = buf_msg(skb);
1236 if (link_is_bc_sndlink(l) && !less(r->acked, msg_seqno(hdr)))
1237 return false;
1238
1239 pr_warn("Retransmission failure on link <%s>\n", l->name);
1240 link_print(l, "State of link ");
1241 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
1242 msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
1243 pr_info("sqno %u, prev: %x, dest: %x\n",
1244 msg_seqno(hdr), msg_prevnode(hdr), msg_destnode(hdr));
1245 pr_info("retr_stamp %d, retr_cnt %d\n",
1246 jiffies_to_msecs(TIPC_SKB_CB(skb)->retr_stamp),
1247 TIPC_SKB_CB(skb)->retr_cnt);
1248
1249 trace_tipc_list_dump(&l->transmq, true, "retrans failure!");
1250 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!");
1251 trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!");
1252
1253 if (link_is_bc_sndlink(l)) {
1254 r->state = LINK_RESET;
1255 *rc |= TIPC_LINK_DOWN_EVT;
1256 } else {
1257 *rc |= tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1258 }
1259
1260 return true;
1261}
1262
1263
1264
1265
1266
1267
1268static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
1269 struct sk_buff_head *inputq)
1270{
1271 struct sk_buff_head *mc_inputq = l->bc_rcvlink->inputq;
1272 struct tipc_msg *hdr = buf_msg(skb);
1273
1274 switch (msg_user(hdr)) {
1275 case TIPC_LOW_IMPORTANCE:
1276 case TIPC_MEDIUM_IMPORTANCE:
1277 case TIPC_HIGH_IMPORTANCE:
1278 case TIPC_CRITICAL_IMPORTANCE:
1279 if (unlikely(msg_in_group(hdr) || msg_mcast(hdr))) {
1280 skb_queue_tail(mc_inputq, skb);
1281 return true;
1282 }
1283 fallthrough;
1284 case CONN_MANAGER:
1285 skb_queue_tail(inputq, skb);
1286 return true;
1287 case GROUP_PROTOCOL:
1288 skb_queue_tail(mc_inputq, skb);
1289 return true;
1290 case NAME_DISTRIBUTOR:
1291 l->bc_rcvlink->state = LINK_ESTABLISHED;
1292 skb_queue_tail(l->namedq, skb);
1293 return true;
1294 case MSG_BUNDLER:
1295 case TUNNEL_PROTOCOL:
1296 case MSG_FRAGMENTER:
1297 case BCAST_PROTOCOL:
1298 return false;
1299#ifdef CONFIG_TIPC_CRYPTO
1300 case MSG_CRYPTO:
1301 tipc_crypto_msg_rcv(l->net, skb);
1302 return true;
1303#endif
1304 default:
1305 pr_warn("Dropping received illegal msg type\n");
1306 kfree_skb(skb);
1307 return true;
1308 }
1309}
1310
1311
1312
1313
1314
1315static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
1316 struct sk_buff_head *inputq,
1317 struct sk_buff **reasm_skb)
1318{
1319 struct tipc_msg *hdr = buf_msg(skb);
1320 struct sk_buff *iskb;
1321 struct sk_buff_head tmpq;
1322 int usr = msg_user(hdr);
1323 int pos = 0;
1324
1325 if (usr == MSG_BUNDLER) {
1326 skb_queue_head_init(&tmpq);
1327 l->stats.recv_bundles++;
1328 l->stats.recv_bundled += msg_msgcnt(hdr);
1329 while (tipc_msg_extract(skb, &iskb, &pos))
1330 tipc_data_input(l, iskb, &tmpq);
1331 tipc_skb_queue_splice_tail(&tmpq, inputq);
1332 return 0;
1333 } else if (usr == MSG_FRAGMENTER) {
1334 l->stats.recv_fragments++;
1335 if (tipc_buf_append(reasm_skb, &skb)) {
1336 l->stats.recv_fragmented++;
1337 tipc_data_input(l, skb, inputq);
1338 } else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
1339 pr_warn_ratelimited("Unable to build fragment list\n");
1340 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1341 }
1342 return 0;
1343 } else if (usr == BCAST_PROTOCOL) {
1344 tipc_bcast_lock(l->net);
1345 tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
1346 tipc_bcast_unlock(l->net);
1347 }
1348
1349 kfree_skb(skb);
1350 return 0;
1351}
1352
1353
1354
1355
1356
1357
1358
1359
1360static int tipc_link_tnl_rcv(struct tipc_link *l, struct sk_buff *skb,
1361 struct sk_buff_head *inputq)
1362{
1363 struct sk_buff **reasm_skb = &l->failover_reasm_skb;
1364 struct sk_buff **reasm_tnlmsg = &l->reasm_tnlmsg;
1365 struct sk_buff_head *fdefq = &l->failover_deferdq;
1366 struct tipc_msg *hdr = buf_msg(skb);
1367 struct sk_buff *iskb;
1368 int ipos = 0;
1369 int rc = 0;
1370 u16 seqno;
1371
1372 if (msg_type(hdr) == SYNCH_MSG) {
1373 kfree_skb(skb);
1374 return 0;
1375 }
1376
1377
1378 if (likely(!msg_nof_fragms(hdr))) {
1379 if (unlikely(!tipc_msg_extract(skb, &iskb, &ipos))) {
1380 pr_warn_ratelimited("Unable to extract msg, defq: %d\n",
1381 skb_queue_len(fdefq));
1382 return 0;
1383 }
1384 kfree_skb(skb);
1385 } else {
1386
1387 if (msg_fragm_no(hdr) == 1)
1388 msg_set_type(hdr, FIRST_FRAGMENT);
1389 else if (msg_fragm_no(hdr) < msg_nof_fragms(hdr))
1390 msg_set_type(hdr, FRAGMENT);
1391 else
1392 msg_set_type(hdr, LAST_FRAGMENT);
1393
1394 if (!tipc_buf_append(reasm_tnlmsg, &skb)) {
1395
1396 if (*reasm_tnlmsg || link_is_bc_rcvlink(l))
1397 return 0;
1398 pr_warn_ratelimited("Unable to reassemble tunnel msg\n");
1399 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1400 }
1401 iskb = skb;
1402 }
1403
1404 do {
1405 seqno = buf_seqno(iskb);
1406 if (unlikely(less(seqno, l->drop_point))) {
1407 kfree_skb(iskb);
1408 continue;
1409 }
1410 if (unlikely(seqno != l->drop_point)) {
1411 __tipc_skb_queue_sorted(fdefq, seqno, iskb);
1412 continue;
1413 }
1414
1415 l->drop_point++;
1416 if (!tipc_data_input(l, iskb, inputq))
1417 rc |= tipc_link_input(l, iskb, inputq, reasm_skb);
1418 if (unlikely(rc))
1419 break;
1420 } while ((iskb = __tipc_skb_dequeue(fdefq, l->drop_point)));
1421
1422 return rc;
1423}
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434u16 tipc_get_gap_ack_blks(struct tipc_gap_ack_blks **ga, struct tipc_link *l,
1435 struct tipc_msg *hdr, bool uc)
1436{
1437 struct tipc_gap_ack_blks *p;
1438 u16 sz = 0;
1439
1440
1441 if (l->peer_caps & TIPC_GAP_ACK_BLOCK) {
1442 p = (struct tipc_gap_ack_blks *)msg_data(hdr);
1443 sz = ntohs(p->len);
1444
1445 if (sz == struct_size(p, gacks, p->ugack_cnt + p->bgack_cnt)) {
1446
1447 if ((uc && p->ugack_cnt) || (!uc && p->bgack_cnt))
1448 goto ok;
1449
1450 } else if (uc && sz == struct_size(p, gacks, p->ugack_cnt)) {
1451 if (p->ugack_cnt) {
1452 p->bgack_cnt = 0;
1453 goto ok;
1454 }
1455 }
1456 }
1457
1458 p = NULL;
1459
1460ok:
1461 *ga = p;
1462 return sz;
1463}
1464
1465static u8 __tipc_build_gap_ack_blks(struct tipc_gap_ack_blks *ga,
1466 struct tipc_link *l, u8 start_index)
1467{
1468 struct tipc_gap_ack *gacks = &ga->gacks[start_index];
1469 struct sk_buff *skb = skb_peek(&l->deferdq);
1470 u16 expect, seqno = 0;
1471 u8 n = 0;
1472
1473 if (!skb)
1474 return 0;
1475
1476 expect = buf_seqno(skb);
1477 skb_queue_walk(&l->deferdq, skb) {
1478 seqno = buf_seqno(skb);
1479 if (unlikely(more(seqno, expect))) {
1480 gacks[n].ack = htons(expect - 1);
1481 gacks[n].gap = htons(seqno - expect);
1482 if (++n >= MAX_GAP_ACK_BLKS / 2) {
1483 pr_info_ratelimited("Gacks on %s: %d, ql: %d!\n",
1484 l->name, n,
1485 skb_queue_len(&l->deferdq));
1486 return n;
1487 }
1488 } else if (unlikely(less(seqno, expect))) {
1489 pr_warn("Unexpected skb in deferdq!\n");
1490 continue;
1491 }
1492 expect = seqno + 1;
1493 }
1494
1495
1496 gacks[n].ack = htons(seqno);
1497 gacks[n].gap = 0;
1498 n++;
1499 return n;
1500}
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr)
1513{
1514 struct tipc_link *bcl = l->bc_rcvlink;
1515 struct tipc_gap_ack_blks *ga;
1516 u16 len;
1517
1518 ga = (struct tipc_gap_ack_blks *)msg_data(hdr);
1519
1520
1521 tipc_bcast_lock(bcl->net);
1522 msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
1523 msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
1524 ga->bgack_cnt = __tipc_build_gap_ack_blks(ga, bcl, 0);
1525 tipc_bcast_unlock(bcl->net);
1526
1527
1528 ga->ugack_cnt = (msg_seq_gap(hdr)) ?
1529 __tipc_build_gap_ack_blks(ga, l, ga->bgack_cnt) : 0;
1530
1531
1532 len = struct_size(ga, gacks, ga->bgack_cnt + ga->ugack_cnt);
1533 ga->len = htons(len);
1534 return len;
1535}
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552static int tipc_link_advance_transmq(struct tipc_link *l, struct tipc_link *r,
1553 u16 acked, u16 gap,
1554 struct tipc_gap_ack_blks *ga,
1555 struct sk_buff_head *xmitq,
1556 bool *retransmitted, int *rc)
1557{
1558 struct tipc_gap_ack_blks *last_ga = r->last_ga, *this_ga = NULL;
1559 struct tipc_gap_ack *gacks = NULL;
1560 struct sk_buff *skb, *_skb, *tmp;
1561 struct tipc_msg *hdr;
1562 u32 qlen = skb_queue_len(&l->transmq);
1563 u16 nacked = acked, ngap = gap, gack_cnt = 0;
1564 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1565 u16 ack = l->rcv_nxt - 1;
1566 u16 seqno, n = 0;
1567 u16 end = r->acked, start = end, offset = r->last_gap;
1568 u16 si = (last_ga) ? last_ga->start_index : 0;
1569 bool is_uc = !link_is_bc_sndlink(l);
1570 bool bc_has_acked = false;
1571
1572 trace_tipc_link_retrans(r, acked + 1, acked + gap, &l->transmq);
1573
1574
1575 if (ga && is_uc) {
1576
1577 gack_cnt = ga->ugack_cnt;
1578 gacks = &ga->gacks[ga->bgack_cnt];
1579 } else if (ga) {
1580
1581 this_ga = kmemdup(ga, struct_size(ga, gacks, ga->bgack_cnt),
1582 GFP_ATOMIC);
1583 if (likely(this_ga)) {
1584 this_ga->start_index = 0;
1585
1586 gack_cnt = this_ga->bgack_cnt;
1587 gacks = &this_ga->gacks[0];
1588 } else {
1589
1590 pr_warn_ratelimited("Ignoring bc Gap ACKs, no memory\n");
1591 }
1592 }
1593
1594
1595 skb_queue_walk_safe(&l->transmq, skb, tmp) {
1596 seqno = buf_seqno(skb);
1597
1598next_gap_ack:
1599 if (less_eq(seqno, nacked)) {
1600 if (is_uc)
1601 goto release;
1602
1603 if (!more(seqno, r->acked))
1604 continue;
1605
1606 while (more(seqno, end)) {
1607 if (!last_ga || si >= last_ga->bgack_cnt)
1608 break;
1609 start = end + offset + 1;
1610 end = ntohs(last_ga->gacks[si].ack);
1611 offset = ntohs(last_ga->gacks[si].gap);
1612 si++;
1613 WARN_ONCE(more(start, end) ||
1614 (!offset &&
1615 si < last_ga->bgack_cnt) ||
1616 si > MAX_GAP_ACK_BLKS,
1617 "Corrupted Gap ACK: %d %d %d %d %d\n",
1618 start, end, offset, si,
1619 last_ga->bgack_cnt);
1620 }
1621
1622 if (in_range(seqno, start, end))
1623 continue;
1624
1625 bc_has_acked = true;
1626 if (--TIPC_SKB_CB(skb)->ackers)
1627 continue;
1628release:
1629
1630 __skb_unlink(skb, &l->transmq);
1631 kfree_skb(skb);
1632 } else if (less_eq(seqno, nacked + ngap)) {
1633
1634 if (unlikely(seqno == acked + 1 &&
1635 link_retransmit_failure(l, r, rc))) {
1636
1637 kfree(this_ga);
1638 this_ga = NULL;
1639 break;
1640 }
1641
1642 if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
1643 continue;
1644 tipc_link_set_skb_retransmit_time(skb, l);
1645 _skb = pskb_copy(skb, GFP_ATOMIC);
1646 if (!_skb)
1647 continue;
1648 hdr = buf_msg(_skb);
1649 msg_set_ack(hdr, ack);
1650 msg_set_bcast_ack(hdr, bc_ack);
1651 _skb->priority = TC_PRIO_CONTROL;
1652 __skb_queue_tail(xmitq, _skb);
1653 l->stats.retransmitted++;
1654 if (!is_uc)
1655 r->stats.retransmitted++;
1656 *retransmitted = true;
1657
1658 if (!TIPC_SKB_CB(skb)->retr_cnt++)
1659 TIPC_SKB_CB(skb)->retr_stamp = jiffies;
1660 } else {
1661
1662 if (n >= gack_cnt)
1663 break;
1664 nacked = ntohs(gacks[n].ack);
1665 ngap = ntohs(gacks[n].gap);
1666 n++;
1667 goto next_gap_ack;
1668 }
1669 }
1670
1671
1672 if (bc_has_acked) {
1673 if (this_ga) {
1674 kfree(last_ga);
1675 r->last_ga = this_ga;
1676 r->last_gap = gap;
1677 } else if (last_ga) {
1678 if (less(acked, start)) {
1679 si--;
1680 offset = start - acked - 1;
1681 } else if (less(acked, end)) {
1682 acked = end;
1683 }
1684 if (si < last_ga->bgack_cnt) {
1685 last_ga->start_index = si;
1686 r->last_gap = offset;
1687 } else {
1688 kfree(last_ga);
1689 r->last_ga = NULL;
1690 r->last_gap = 0;
1691 }
1692 } else {
1693 r->last_gap = 0;
1694 }
1695 r->acked = acked;
1696 } else {
1697 kfree(this_ga);
1698 }
1699
1700 return qlen - skb_queue_len(&l->transmq);
1701}
1702
1703
1704
1705
1706
1707
1708int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1709{
1710 if (!l)
1711 return 0;
1712
1713
1714 if (link_is_bc_rcvlink(l)) {
1715 if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
1716 return 0;
1717 l->rcv_unacked = 0;
1718
1719
1720 l->snd_nxt = l->rcv_nxt;
1721 return TIPC_LINK_SND_STATE;
1722 }
1723
1724 l->rcv_unacked = 0;
1725 l->stats.sent_acks++;
1726 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
1727 return 0;
1728}
1729
1730
1731
1732void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1733{
1734 int mtyp = RESET_MSG;
1735 struct sk_buff *skb;
1736
1737 if (l->state == LINK_ESTABLISHING)
1738 mtyp = ACTIVATE_MSG;
1739
1740 tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, 0, xmitq);
1741
1742
1743 skb = skb_peek_tail(xmitq);
1744 if (skb && (l->state == LINK_RESET))
1745 msg_set_peer_stopping(buf_msg(skb), 1);
1746}
1747
1748
1749
1750
1751
1752static int tipc_link_build_nack_msg(struct tipc_link *l,
1753 struct sk_buff_head *xmitq)
1754{
1755 u32 def_cnt = ++l->stats.deferred_recv;
1756 struct sk_buff_head *dfq = &l->deferdq;
1757 u32 defq_len = skb_queue_len(dfq);
1758 int match1, match2;
1759
1760 if (link_is_bc_rcvlink(l)) {
1761 match1 = def_cnt & 0xf;
1762 match2 = tipc_own_addr(l->net) & 0xf;
1763 if (match1 == match2)
1764 return TIPC_LINK_SND_STATE;
1765 return 0;
1766 }
1767
1768 if (defq_len >= 3 && !((defq_len - 3) % 16)) {
1769 u16 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1770
1771 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0,
1772 rcvgap, 0, 0, xmitq);
1773 }
1774 return 0;
1775}
1776
1777
1778
1779
1780
1781
1782int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1783 struct sk_buff_head *xmitq)
1784{
1785 struct sk_buff_head *defq = &l->deferdq;
1786 struct tipc_msg *hdr = buf_msg(skb);
1787 u16 seqno, rcv_nxt, win_lim;
1788 int released = 0;
1789 int rc = 0;
1790
1791
1792 if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1793 return tipc_link_proto_rcv(l, skb, xmitq);
1794
1795
1796 l->silent_intv_cnt = 0;
1797
1798 do {
1799 hdr = buf_msg(skb);
1800 seqno = msg_seqno(hdr);
1801 rcv_nxt = l->rcv_nxt;
1802 win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
1803
1804 if (unlikely(!link_is_up(l))) {
1805 if (l->state == LINK_ESTABLISHING)
1806 rc = TIPC_LINK_UP_EVT;
1807 kfree_skb(skb);
1808 break;
1809 }
1810
1811
1812 if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
1813 l->stats.duplicates++;
1814 kfree_skb(skb);
1815 break;
1816 }
1817 released += tipc_link_advance_transmq(l, l, msg_ack(hdr), 0,
1818 NULL, NULL, NULL, NULL);
1819
1820
1821 if (unlikely(seqno != rcv_nxt)) {
1822 if (!__tipc_skb_queue_sorted(defq, seqno, skb))
1823 l->stats.duplicates++;
1824 rc |= tipc_link_build_nack_msg(l, xmitq);
1825 break;
1826 }
1827
1828
1829 l->rcv_nxt++;
1830 l->stats.recv_pkts++;
1831
1832 if (unlikely(msg_user(hdr) == TUNNEL_PROTOCOL))
1833 rc |= tipc_link_tnl_rcv(l, skb, l->inputq);
1834 else if (!tipc_data_input(l, skb, l->inputq))
1835 rc |= tipc_link_input(l, skb, l->inputq, &l->reasm_buf);
1836 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
1837 rc |= tipc_link_build_state_msg(l, xmitq);
1838 if (unlikely(rc & ~TIPC_LINK_SND_STATE))
1839 break;
1840 } while ((skb = __tipc_skb_dequeue(defq, l->rcv_nxt)));
1841
1842
1843 if (released) {
1844 tipc_link_update_cwin(l, released, 0);
1845 tipc_link_advance_backlog(l, xmitq);
1846 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1847 link_prepare_wakeup(l);
1848 }
1849 return rc;
1850}
1851
1852static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1853 bool probe_reply, u16 rcvgap,
1854 int tolerance, int priority,
1855 struct sk_buff_head *xmitq)
1856{
1857 struct tipc_mon_state *mstate = &l->mon_state;
1858 struct sk_buff_head *dfq = &l->deferdq;
1859 struct tipc_link *bcl = l->bc_rcvlink;
1860 struct tipc_msg *hdr;
1861 struct sk_buff *skb;
1862 bool node_up = link_is_up(bcl);
1863 u16 glen = 0, bc_rcvgap = 0;
1864 int dlen = 0;
1865 void *data;
1866
1867
1868 if (tipc_link_is_blocked(l))
1869 return;
1870
1871 if (!tipc_link_is_up(l) && (mtyp == STATE_MSG))
1872 return;
1873
1874 if ((probe || probe_reply) && !skb_queue_empty(dfq))
1875 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1876
1877 skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
1878 tipc_max_domain_size + MAX_GAP_ACK_BLKS_SZ,
1879 l->addr, tipc_own_addr(l->net), 0, 0, 0);
1880 if (!skb)
1881 return;
1882
1883 hdr = buf_msg(skb);
1884 data = msg_data(hdr);
1885 msg_set_session(hdr, l->session);
1886 msg_set_bearer_id(hdr, l->bearer_id);
1887 msg_set_net_plane(hdr, l->net_plane);
1888 msg_set_next_sent(hdr, l->snd_nxt);
1889 msg_set_ack(hdr, l->rcv_nxt - 1);
1890 msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
1891 msg_set_bc_ack_invalid(hdr, !node_up);
1892 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1893 msg_set_link_tolerance(hdr, tolerance);
1894 msg_set_linkprio(hdr, priority);
1895 msg_set_redundant_link(hdr, node_up);
1896 msg_set_seq_gap(hdr, 0);
1897 msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
1898
1899 if (mtyp == STATE_MSG) {
1900 if (l->peer_caps & TIPC_LINK_PROTO_SEQNO)
1901 msg_set_seqno(hdr, l->snd_nxt_state++);
1902 msg_set_seq_gap(hdr, rcvgap);
1903 bc_rcvgap = link_bc_rcv_gap(bcl);
1904 msg_set_bc_gap(hdr, bc_rcvgap);
1905 msg_set_probe(hdr, probe);
1906 msg_set_is_keepalive(hdr, probe || probe_reply);
1907 if (l->peer_caps & TIPC_GAP_ACK_BLOCK)
1908 glen = tipc_build_gap_ack_blks(l, hdr);
1909 tipc_mon_prep(l->net, data + glen, &dlen, mstate, l->bearer_id);
1910 msg_set_size(hdr, INT_H_SIZE + glen + dlen);
1911 skb_trim(skb, INT_H_SIZE + glen + dlen);
1912 l->stats.sent_states++;
1913 l->rcv_unacked = 0;
1914 } else {
1915
1916 if (mtyp == ACTIVATE_MSG) {
1917 msg_set_dest_session_valid(hdr, 1);
1918 msg_set_dest_session(hdr, l->peer_session);
1919 }
1920 msg_set_max_pkt(hdr, l->advertised_mtu);
1921 strcpy(data, l->if_name);
1922 msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
1923 skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME);
1924 }
1925 if (probe)
1926 l->stats.sent_probes++;
1927 if (rcvgap)
1928 l->stats.sent_nacks++;
1929 if (bc_rcvgap)
1930 bcl->stats.sent_nacks++;
1931 skb->priority = TC_PRIO_CONTROL;
1932 __skb_queue_tail(xmitq, skb);
1933 trace_tipc_proto_build(skb, false, l->name);
1934}
1935
1936void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
1937 struct sk_buff_head *xmitq)
1938{
1939 u32 onode = tipc_own_addr(l->net);
1940 struct tipc_msg *hdr, *ihdr;
1941 struct sk_buff_head tnlq;
1942 struct sk_buff *skb;
1943 u32 dnode = l->addr;
1944
1945 __skb_queue_head_init(&tnlq);
1946 skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG,
1947 INT_H_SIZE, BASIC_H_SIZE,
1948 dnode, onode, 0, 0, 0);
1949 if (!skb) {
1950 pr_warn("%sunable to create tunnel packet\n", link_co_err);
1951 return;
1952 }
1953
1954 hdr = buf_msg(skb);
1955 msg_set_msgcnt(hdr, 1);
1956 msg_set_bearer_id(hdr, l->peer_bearer_id);
1957
1958 ihdr = (struct tipc_msg *)msg_data(hdr);
1959 tipc_msg_init(onode, ihdr, TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1960 BASIC_H_SIZE, dnode);
1961 msg_set_errcode(ihdr, TIPC_ERR_NO_PORT);
1962 __skb_queue_tail(&tnlq, skb);
1963 tipc_link_xmit(l, &tnlq, xmitq);
1964}
1965
1966
1967
1968
1969void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1970 int mtyp, struct sk_buff_head *xmitq)
1971{
1972 struct sk_buff_head *fdefq = &tnl->failover_deferdq;
1973 struct sk_buff *skb, *tnlskb;
1974 struct tipc_msg *hdr, tnlhdr;
1975 struct sk_buff_head *queue = &l->transmq;
1976 struct sk_buff_head tmpxq, tnlq, frags;
1977 u16 pktlen, pktcnt, seqno = l->snd_nxt;
1978 bool pktcnt_need_update = false;
1979 u16 syncpt;
1980 int rc;
1981
1982 if (!tnl)
1983 return;
1984
1985 __skb_queue_head_init(&tnlq);
1986
1987
1988
1989
1990
1991 if (mtyp == SYNCH_MSG && (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) {
1992 tnlskb = tipc_msg_create(TUNNEL_PROTOCOL, SYNCH_MSG,
1993 INT_H_SIZE, 0, l->addr,
1994 tipc_own_addr(l->net),
1995 0, 0, 0);
1996 if (!tnlskb) {
1997 pr_warn("%sunable to create dummy SYNCH_MSG\n",
1998 link_co_err);
1999 return;
2000 }
2001
2002 hdr = buf_msg(tnlskb);
2003 syncpt = l->snd_nxt + skb_queue_len(&l->backlogq) - 1;
2004 msg_set_syncpt(hdr, syncpt);
2005 msg_set_bearer_id(hdr, l->peer_bearer_id);
2006 __skb_queue_tail(&tnlq, tnlskb);
2007 tipc_link_xmit(tnl, &tnlq, xmitq);
2008 return;
2009 }
2010
2011 __skb_queue_head_init(&tmpxq);
2012 __skb_queue_head_init(&frags);
2013
2014 skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
2015 BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
2016 0, 0, TIPC_ERR_NO_PORT);
2017 if (!skb) {
2018 pr_warn("%sunable to create tunnel packet\n", link_co_err);
2019 return;
2020 }
2021 __skb_queue_tail(&tnlq, skb);
2022 tipc_link_xmit(l, &tnlq, &tmpxq);
2023 __skb_queue_purge(&tmpxq);
2024
2025
2026 tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
2027 mtyp, INT_H_SIZE, l->addr);
2028 if (mtyp == SYNCH_MSG)
2029 pktcnt = l->snd_nxt - buf_seqno(skb_peek(&l->transmq));
2030 else
2031 pktcnt = skb_queue_len(&l->transmq);
2032 pktcnt += skb_queue_len(&l->backlogq);
2033 msg_set_msgcnt(&tnlhdr, pktcnt);
2034 msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
2035tnl:
2036
2037 skb_queue_walk(queue, skb) {
2038 hdr = buf_msg(skb);
2039 if (queue == &l->backlogq)
2040 msg_set_seqno(hdr, seqno++);
2041 pktlen = msg_size(hdr);
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051 if (pktlen > tnl->mtu - INT_H_SIZE) {
2052 if (mtyp == FAILOVER_MSG &&
2053 (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) {
2054 rc = tipc_msg_fragment(skb, &tnlhdr, tnl->mtu,
2055 &frags);
2056 if (rc) {
2057 pr_warn("%sunable to frag msg: rc %d\n",
2058 link_co_err, rc);
2059 return;
2060 }
2061 pktcnt += skb_queue_len(&frags) - 1;
2062 pktcnt_need_update = true;
2063 skb_queue_splice_tail_init(&frags, &tnlq);
2064 continue;
2065 }
2066
2067
2068
2069 pr_warn_ratelimited("%stoo large msg <%d, %d>: %d!\n",
2070 link_co_err, msg_user(hdr),
2071 msg_type(hdr), msg_size(hdr));
2072 return;
2073 }
2074
2075 msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
2076 tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
2077 if (!tnlskb) {
2078 pr_warn("%sunable to send packet\n", link_co_err);
2079 return;
2080 }
2081 skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
2082 skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
2083 __skb_queue_tail(&tnlq, tnlskb);
2084 }
2085 if (queue != &l->backlogq) {
2086 queue = &l->backlogq;
2087 goto tnl;
2088 }
2089
2090 if (pktcnt_need_update)
2091 skb_queue_walk(&tnlq, skb) {
2092 hdr = buf_msg(skb);
2093 msg_set_msgcnt(hdr, pktcnt);
2094 }
2095
2096 tipc_link_xmit(tnl, &tnlq, xmitq);
2097
2098 if (mtyp == FAILOVER_MSG) {
2099 tnl->drop_point = l->rcv_nxt;
2100 tnl->failover_reasm_skb = l->reasm_buf;
2101 l->reasm_buf = NULL;
2102
2103
2104 if (unlikely(!skb_queue_empty(fdefq))) {
2105 pr_warn("Link failover deferdq not empty: %d!\n",
2106 skb_queue_len(fdefq));
2107 __skb_queue_purge(fdefq);
2108 }
2109 skb_queue_splice_init(&l->deferdq, fdefq);
2110 }
2111}
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123void tipc_link_failover_prepare(struct tipc_link *l, struct tipc_link *tnl,
2124 struct sk_buff_head *xmitq)
2125{
2126 struct sk_buff_head *fdefq = &tnl->failover_deferdq;
2127
2128 tipc_link_create_dummy_tnl_msg(tnl, xmitq);
2129
2130
2131
2132
2133
2134
2135
2136 tnl->drop_point = 1;
2137 tnl->failover_reasm_skb = NULL;
2138
2139
2140 if (unlikely(!skb_queue_empty(fdefq))) {
2141 pr_warn("Link failover deferdq not empty: %d!\n",
2142 skb_queue_len(fdefq));
2143 __skb_queue_purge(fdefq);
2144 }
2145}
2146
2147
2148
2149
2150bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)
2151{
2152 u16 curr_session = l->peer_session;
2153 u16 session = msg_session(hdr);
2154 int mtyp = msg_type(hdr);
2155
2156 if (msg_user(hdr) != LINK_PROTOCOL)
2157 return true;
2158
2159 switch (mtyp) {
2160 case RESET_MSG:
2161 if (!l->in_session)
2162 return true;
2163
2164 return more(session, curr_session);
2165 case ACTIVATE_MSG:
2166 if (!l->in_session)
2167 return true;
2168
2169 return !less(session, curr_session);
2170 case STATE_MSG:
2171
2172 if (!l->in_session)
2173 return false;
2174 if (session != curr_session)
2175 return false;
2176
2177 if (!link_is_up(l) && msg_ack(hdr))
2178 return false;
2179 if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))
2180 return true;
2181
2182 return !less(msg_seqno(hdr), l->rcv_nxt_state);
2183 default:
2184 return false;
2185 }
2186}
2187
2188
2189
2190
2191
2192
2193static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
2194 struct sk_buff_head *xmitq)
2195{
2196 struct tipc_msg *hdr = buf_msg(skb);
2197 struct tipc_gap_ack_blks *ga = NULL;
2198 bool reply = msg_probe(hdr), retransmitted = false;
2199 u16 dlen = msg_data_sz(hdr), glen = 0;
2200 u16 peers_snd_nxt = msg_next_sent(hdr);
2201 u16 peers_tol = msg_link_tolerance(hdr);
2202 u16 peers_prio = msg_linkprio(hdr);
2203 u16 gap = msg_seq_gap(hdr);
2204 u16 ack = msg_ack(hdr);
2205 u16 rcv_nxt = l->rcv_nxt;
2206 u16 rcvgap = 0;
2207 int mtyp = msg_type(hdr);
2208 int rc = 0, released;
2209 char *if_name;
2210 void *data;
2211
2212 trace_tipc_proto_rcv(skb, false, l->name);
2213 if (tipc_link_is_blocked(l) || !xmitq)
2214 goto exit;
2215
2216 if (tipc_own_addr(l->net) > msg_prevnode(hdr))
2217 l->net_plane = msg_net_plane(hdr);
2218
2219 skb_linearize(skb);
2220 hdr = buf_msg(skb);
2221 data = msg_data(hdr);
2222
2223 if (!tipc_link_validate_msg(l, hdr)) {
2224 trace_tipc_skb_dump(skb, false, "PROTO invalid (1)!");
2225 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (1)!");
2226 goto exit;
2227 }
2228
2229 switch (mtyp) {
2230 case RESET_MSG:
2231 case ACTIVATE_MSG:
2232
2233 if_name = strrchr(l->name, ':') + 1;
2234 if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
2235 break;
2236 if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
2237 break;
2238 strncpy(if_name, data, TIPC_MAX_IF_NAME);
2239
2240
2241 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
2242 l->tolerance = peers_tol;
2243 l->bc_rcvlink->tolerance = peers_tol;
2244 }
2245
2246 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
2247 l->priority = peers_prio;
2248
2249
2250 if (msg_peer_stopping(hdr)) {
2251 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
2252 break;
2253 }
2254
2255
2256
2257
2258 if (mtyp == ACTIVATE_MSG && msg_dest_session_valid(hdr) &&
2259 l->session != msg_dest_session(hdr)) {
2260 if (less(l->session, msg_dest_session(hdr)))
2261 l->session = msg_dest_session(hdr) + 1;
2262 break;
2263 }
2264
2265
2266 if (mtyp == RESET_MSG || !link_is_up(l))
2267 rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
2268
2269
2270 if (mtyp == ACTIVATE_MSG && l->state == LINK_ESTABLISHING)
2271 rc = TIPC_LINK_UP_EVT;
2272
2273 l->peer_session = msg_session(hdr);
2274 l->in_session = true;
2275 l->peer_bearer_id = msg_bearer_id(hdr);
2276 if (l->mtu > msg_max_pkt(hdr))
2277 l->mtu = msg_max_pkt(hdr);
2278 break;
2279
2280 case STATE_MSG:
2281 l->rcv_nxt_state = msg_seqno(hdr) + 1;
2282
2283
2284 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
2285 l->tolerance = peers_tol;
2286 l->bc_rcvlink->tolerance = peers_tol;
2287 }
2288
2289 if ((peers_prio != l->priority) &&
2290 in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
2291 l->priority = peers_prio;
2292 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
2293 }
2294
2295 l->silent_intv_cnt = 0;
2296 l->stats.recv_states++;
2297 if (msg_probe(hdr))
2298 l->stats.recv_probes++;
2299
2300 if (!link_is_up(l)) {
2301 if (l->state == LINK_ESTABLISHING)
2302 rc = TIPC_LINK_UP_EVT;
2303 break;
2304 }
2305
2306
2307 glen = tipc_get_gap_ack_blks(&ga, l, hdr, true);
2308
2309 tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr,
2310 &l->mon_state, l->bearer_id);
2311
2312
2313 if ((reply || msg_is_keepalive(hdr)) &&
2314 more(peers_snd_nxt, rcv_nxt) &&
2315 !tipc_link_is_synching(l) &&
2316 skb_queue_empty(&l->deferdq))
2317 rcvgap = peers_snd_nxt - l->rcv_nxt;
2318 if (rcvgap || reply)
2319 tipc_link_build_proto_msg(l, STATE_MSG, 0, reply,
2320 rcvgap, 0, 0, xmitq);
2321
2322 released = tipc_link_advance_transmq(l, l, ack, gap, ga, xmitq,
2323 &retransmitted, &rc);
2324 if (gap)
2325 l->stats.recv_nacks++;
2326 if (released || retransmitted)
2327 tipc_link_update_cwin(l, released, retransmitted);
2328 if (released)
2329 tipc_link_advance_backlog(l, xmitq);
2330 if (unlikely(!skb_queue_empty(&l->wakeupq)))
2331 link_prepare_wakeup(l);
2332 }
2333exit:
2334 kfree_skb(skb);
2335 return rc;
2336}
2337
2338
2339
2340static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
2341 u16 peers_snd_nxt,
2342 struct sk_buff_head *xmitq)
2343{
2344 struct sk_buff *skb;
2345 struct tipc_msg *hdr;
2346 struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
2347 u16 ack = l->rcv_nxt - 1;
2348 u16 gap_to = peers_snd_nxt - 1;
2349
2350 skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
2351 0, l->addr, tipc_own_addr(l->net), 0, 0, 0);
2352 if (!skb)
2353 return false;
2354 hdr = buf_msg(skb);
2355 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
2356 msg_set_bcast_ack(hdr, ack);
2357 msg_set_bcgap_after(hdr, ack);
2358 if (dfrd_skb)
2359 gap_to = buf_seqno(dfrd_skb) - 1;
2360 msg_set_bcgap_to(hdr, gap_to);
2361 msg_set_non_seq(hdr, bcast);
2362 __skb_queue_tail(xmitq, skb);
2363 return true;
2364}
2365
2366
2367
2368
2369
2370
2371static void tipc_link_build_bc_init_msg(struct tipc_link *l,
2372 struct sk_buff_head *xmitq)
2373{
2374 struct sk_buff_head list;
2375
2376 __skb_queue_head_init(&list);
2377 if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
2378 return;
2379 msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true);
2380 tipc_link_xmit(l, &list, xmitq);
2381}
2382
2383
2384
2385void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
2386{
2387 int mtyp = msg_type(hdr);
2388 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
2389
2390 if (link_is_up(l))
2391 return;
2392
2393 if (msg_user(hdr) == BCAST_PROTOCOL) {
2394 l->rcv_nxt = peers_snd_nxt;
2395 l->state = LINK_ESTABLISHED;
2396 return;
2397 }
2398
2399 if (l->peer_caps & TIPC_BCAST_SYNCH)
2400 return;
2401
2402 if (msg_peer_node_is_up(hdr))
2403 return;
2404
2405
2406 if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
2407 l->rcv_nxt = peers_snd_nxt;
2408}
2409
2410
2411
2412int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
2413 struct sk_buff_head *xmitq)
2414{
2415 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
2416 int rc = 0;
2417
2418 if (!link_is_up(l))
2419 return rc;
2420
2421 if (!msg_peer_node_is_up(hdr))
2422 return rc;
2423
2424
2425 if (msg_ack(hdr))
2426 l->bc_peer_is_up = true;
2427
2428 if (!l->bc_peer_is_up)
2429 return rc;
2430
2431
2432 if (more(peers_snd_nxt, l->rcv_nxt + l->window))
2433 return rc;
2434
2435 l->snd_nxt = peers_snd_nxt;
2436 if (link_bc_rcv_gap(l))
2437 rc |= TIPC_LINK_SND_STATE;
2438
2439
2440 if (l->peer_caps & TIPC_BCAST_STATE_NACK)
2441 return rc;
2442
2443
2444
2445 if (!more(peers_snd_nxt, l->rcv_nxt)) {
2446 l->nack_state = BC_NACK_SND_CONDITIONAL;
2447 return 0;
2448 }
2449
2450
2451 if (l->nack_state == BC_NACK_SND_SUPPRESS) {
2452 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
2453 return 0;
2454 }
2455
2456
2457 if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
2458 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
2459 if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
2460 return 0;
2461 }
2462
2463
2464 tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
2465 l->nack_state = BC_NACK_SND_SUPPRESS;
2466 return 0;
2467}
2468
2469int tipc_link_bc_ack_rcv(struct tipc_link *r, u16 acked, u16 gap,
2470 struct tipc_gap_ack_blks *ga,
2471 struct sk_buff_head *xmitq,
2472 struct sk_buff_head *retrq)
2473{
2474 struct tipc_link *l = r->bc_sndlink;
2475 bool unused = false;
2476 int rc = 0;
2477
2478 if (!link_is_up(r) || !r->bc_peer_is_up)
2479 return 0;
2480
2481 if (gap) {
2482 l->stats.recv_nacks++;
2483 r->stats.recv_nacks++;
2484 }
2485
2486 if (less(acked, r->acked) || (acked == r->acked && !gap && !ga))
2487 return 0;
2488
2489 trace_tipc_link_bc_ack(r, acked, gap, &l->transmq);
2490 tipc_link_advance_transmq(l, r, acked, gap, ga, retrq, &unused, &rc);
2491
2492 tipc_link_advance_backlog(l, xmitq);
2493 if (unlikely(!skb_queue_empty(&l->wakeupq)))
2494 link_prepare_wakeup(l);
2495
2496 return rc;
2497}
2498
2499
2500
2501
2502
2503int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
2504 struct sk_buff_head *xmitq)
2505{
2506 struct tipc_msg *hdr = buf_msg(skb);
2507 u32 dnode = msg_destnode(hdr);
2508 int mtyp = msg_type(hdr);
2509 u16 acked = msg_bcast_ack(hdr);
2510 u16 from = acked + 1;
2511 u16 to = msg_bcgap_to(hdr);
2512 u16 peers_snd_nxt = to + 1;
2513 int rc = 0;
2514
2515 kfree_skb(skb);
2516
2517 if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
2518 return 0;
2519
2520 if (mtyp != STATE_MSG)
2521 return 0;
2522
2523 if (dnode == tipc_own_addr(l->net)) {
2524 rc = tipc_link_bc_ack_rcv(l, acked, to - acked, NULL, xmitq,
2525 xmitq);
2526 l->stats.recv_nacks++;
2527 return rc;
2528 }
2529
2530
2531 if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
2532 l->nack_state = BC_NACK_SND_SUPPRESS;
2533
2534 return 0;
2535}
2536
2537void tipc_link_set_queue_limits(struct tipc_link *l, u32 min_win, u32 max_win)
2538{
2539 int max_bulk = TIPC_MAX_PUBL / (l->mtu / ITEM_SIZE);
2540
2541 l->min_win = min_win;
2542 l->ssthresh = max_win;
2543 l->max_win = max_win;
2544 l->window = min_win;
2545 l->backlog[TIPC_LOW_IMPORTANCE].limit = min_win * 2;
2546 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = min_win * 4;
2547 l->backlog[TIPC_HIGH_IMPORTANCE].limit = min_win * 6;
2548 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = min_win * 8;
2549 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk;
2550}
2551
2552
2553
2554
2555
2556void tipc_link_reset_stats(struct tipc_link *l)
2557{
2558 memset(&l->stats, 0, sizeof(l->stats));
2559}
2560
2561static void link_print(struct tipc_link *l, const char *str)
2562{
2563 struct sk_buff *hskb = skb_peek(&l->transmq);
2564 u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
2565 u16 tail = l->snd_nxt - 1;
2566
2567 pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
2568 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
2569 skb_queue_len(&l->transmq), head, tail,
2570 skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
2571}
2572
2573
2574
2575int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
2576{
2577 int err;
2578
2579 err = nla_parse_nested_deprecated(props, TIPC_NLA_PROP_MAX, prop,
2580 tipc_nl_prop_policy, NULL);
2581 if (err)
2582 return err;
2583
2584 if (props[TIPC_NLA_PROP_PRIO]) {
2585 u32 prio;
2586
2587 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2588 if (prio > TIPC_MAX_LINK_PRI)
2589 return -EINVAL;
2590 }
2591
2592 if (props[TIPC_NLA_PROP_TOL]) {
2593 u32 tol;
2594
2595 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2596 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
2597 return -EINVAL;
2598 }
2599
2600 if (props[TIPC_NLA_PROP_WIN]) {
2601 u32 max_win;
2602
2603 max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2604 if (max_win < TIPC_DEF_LINK_WIN || max_win > TIPC_MAX_LINK_WIN)
2605 return -EINVAL;
2606 }
2607
2608 return 0;
2609}
2610
2611static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
2612{
2613 int i;
2614 struct nlattr *stats;
2615
2616 struct nla_map {
2617 u32 key;
2618 u32 val;
2619 };
2620
2621 struct nla_map map[] = {
2622 {TIPC_NLA_STATS_RX_INFO, 0},
2623 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
2624 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
2625 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
2626 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
2627 {TIPC_NLA_STATS_TX_INFO, 0},
2628 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
2629 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
2630 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
2631 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
2632 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
2633 s->msg_length_counts : 1},
2634 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
2635 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
2636 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
2637 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
2638 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
2639 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
2640 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
2641 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
2642 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
2643 {TIPC_NLA_STATS_RX_STATES, s->recv_states},
2644 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
2645 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
2646 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
2647 {TIPC_NLA_STATS_TX_STATES, s->sent_states},
2648 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
2649 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
2650 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
2651 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
2652 {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
2653 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
2654 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
2655 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
2656 (s->accu_queue_sz / s->queue_sz_counts) : 0}
2657 };
2658
2659 stats = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
2660 if (!stats)
2661 return -EMSGSIZE;
2662
2663 for (i = 0; i < ARRAY_SIZE(map); i++)
2664 if (nla_put_u32(skb, map[i].key, map[i].val))
2665 goto msg_full;
2666
2667 nla_nest_end(skb, stats);
2668
2669 return 0;
2670msg_full:
2671 nla_nest_cancel(skb, stats);
2672
2673 return -EMSGSIZE;
2674}
2675
2676
2677int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
2678 struct tipc_link *link, int nlflags)
2679{
2680 u32 self = tipc_own_addr(net);
2681 struct nlattr *attrs;
2682 struct nlattr *prop;
2683 void *hdr;
2684 int err;
2685
2686 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2687 nlflags, TIPC_NL_LINK_GET);
2688 if (!hdr)
2689 return -EMSGSIZE;
2690
2691 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
2692 if (!attrs)
2693 goto msg_full;
2694
2695 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
2696 goto attr_msg_full;
2697 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, tipc_cluster_mask(self)))
2698 goto attr_msg_full;
2699 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
2700 goto attr_msg_full;
2701 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts))
2702 goto attr_msg_full;
2703 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts))
2704 goto attr_msg_full;
2705
2706 if (tipc_link_is_up(link))
2707 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2708 goto attr_msg_full;
2709 if (link->active)
2710 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
2711 goto attr_msg_full;
2712
2713 prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
2714 if (!prop)
2715 goto attr_msg_full;
2716 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2717 goto prop_msg_full;
2718 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
2719 goto prop_msg_full;
2720 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
2721 link->window))
2722 goto prop_msg_full;
2723 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2724 goto prop_msg_full;
2725 nla_nest_end(msg->skb, prop);
2726
2727 err = __tipc_nl_add_stats(msg->skb, &link->stats);
2728 if (err)
2729 goto attr_msg_full;
2730
2731 nla_nest_end(msg->skb, attrs);
2732 genlmsg_end(msg->skb, hdr);
2733
2734 return 0;
2735
2736prop_msg_full:
2737 nla_nest_cancel(msg->skb, prop);
2738attr_msg_full:
2739 nla_nest_cancel(msg->skb, attrs);
2740msg_full:
2741 genlmsg_cancel(msg->skb, hdr);
2742
2743 return -EMSGSIZE;
2744}
2745
2746static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
2747 struct tipc_stats *stats)
2748{
2749 int i;
2750 struct nlattr *nest;
2751
2752 struct nla_map {
2753 __u32 key;
2754 __u32 val;
2755 };
2756
2757 struct nla_map map[] = {
2758 {TIPC_NLA_STATS_RX_INFO, stats->recv_pkts},
2759 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
2760 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
2761 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
2762 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
2763 {TIPC_NLA_STATS_TX_INFO, stats->sent_pkts},
2764 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
2765 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
2766 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
2767 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
2768 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
2769 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
2770 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
2771 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
2772 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
2773 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
2774 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
2775 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
2776 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
2777 (stats->accu_queue_sz / stats->queue_sz_counts) : 0}
2778 };
2779
2780 nest = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
2781 if (!nest)
2782 return -EMSGSIZE;
2783
2784 for (i = 0; i < ARRAY_SIZE(map); i++)
2785 if (nla_put_u32(skb, map[i].key, map[i].val))
2786 goto msg_full;
2787
2788 nla_nest_end(skb, nest);
2789
2790 return 0;
2791msg_full:
2792 nla_nest_cancel(skb, nest);
2793
2794 return -EMSGSIZE;
2795}
2796
2797int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg,
2798 struct tipc_link *bcl)
2799{
2800 int err;
2801 void *hdr;
2802 struct nlattr *attrs;
2803 struct nlattr *prop;
2804 u32 bc_mode = tipc_bcast_get_mode(net);
2805 u32 bc_ratio = tipc_bcast_get_broadcast_ratio(net);
2806
2807 if (!bcl)
2808 return 0;
2809
2810 tipc_bcast_lock(net);
2811
2812 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2813 NLM_F_MULTI, TIPC_NL_LINK_GET);
2814 if (!hdr) {
2815 tipc_bcast_unlock(net);
2816 return -EMSGSIZE;
2817 }
2818
2819 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
2820 if (!attrs)
2821 goto msg_full;
2822
2823
2824 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2825 goto attr_msg_full;
2826
2827 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
2828 goto attr_msg_full;
2829 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
2830 goto attr_msg_full;
2831 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0))
2832 goto attr_msg_full;
2833 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0))
2834 goto attr_msg_full;
2835
2836 prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
2837 if (!prop)
2838 goto attr_msg_full;
2839 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->max_win))
2840 goto prop_msg_full;
2841 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST, bc_mode))
2842 goto prop_msg_full;
2843 if (bc_mode & BCLINK_MODE_SEL)
2844 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST_RATIO,
2845 bc_ratio))
2846 goto prop_msg_full;
2847 nla_nest_end(msg->skb, prop);
2848
2849 err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
2850 if (err)
2851 goto attr_msg_full;
2852
2853 tipc_bcast_unlock(net);
2854 nla_nest_end(msg->skb, attrs);
2855 genlmsg_end(msg->skb, hdr);
2856
2857 return 0;
2858
2859prop_msg_full:
2860 nla_nest_cancel(msg->skb, prop);
2861attr_msg_full:
2862 nla_nest_cancel(msg->skb, attrs);
2863msg_full:
2864 tipc_bcast_unlock(net);
2865 genlmsg_cancel(msg->skb, hdr);
2866
2867 return -EMSGSIZE;
2868}
2869
2870void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
2871 struct sk_buff_head *xmitq)
2872{
2873 l->tolerance = tol;
2874 if (l->bc_rcvlink)
2875 l->bc_rcvlink->tolerance = tol;
2876 if (link_is_up(l))
2877 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);
2878}
2879
2880void tipc_link_set_prio(struct tipc_link *l, u32 prio,
2881 struct sk_buff_head *xmitq)
2882{
2883 l->priority = prio;
2884 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, prio, xmitq);
2885}
2886
2887void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
2888{
2889 l->abort_limit = limit;
2890}
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf)
2906{
2907 int i = 0;
2908 size_t sz = (dqueues) ? LINK_LMAX : LINK_LMIN;
2909 struct sk_buff_head *list;
2910 struct sk_buff *hskb, *tskb;
2911 u32 len;
2912
2913 if (!l) {
2914 i += scnprintf(buf, sz, "link data: (null)\n");
2915 return i;
2916 }
2917
2918 i += scnprintf(buf, sz, "link data: %x", l->addr);
2919 i += scnprintf(buf + i, sz - i, " %x", l->state);
2920 i += scnprintf(buf + i, sz - i, " %u", l->in_session);
2921 i += scnprintf(buf + i, sz - i, " %u", l->session);
2922 i += scnprintf(buf + i, sz - i, " %u", l->peer_session);
2923 i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt);
2924 i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt);
2925 i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt_state);
2926 i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt_state);
2927 i += scnprintf(buf + i, sz - i, " %x", l->peer_caps);
2928 i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt);
2929 i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt);
2930 i += scnprintf(buf + i, sz - i, " %u", 0);
2931 i += scnprintf(buf + i, sz - i, " %u", 0);
2932 i += scnprintf(buf + i, sz - i, " %u", l->acked);
2933
2934 list = &l->transmq;
2935 len = skb_queue_len(list);
2936 hskb = skb_peek(list);
2937 tskb = skb_peek_tail(list);
2938 i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2939 (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2940 (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2941
2942 list = &l->deferdq;
2943 len = skb_queue_len(list);
2944 hskb = skb_peek(list);
2945 tskb = skb_peek_tail(list);
2946 i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2947 (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2948 (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2949
2950 list = &l->backlogq;
2951 len = skb_queue_len(list);
2952 hskb = skb_peek(list);
2953 tskb = skb_peek_tail(list);
2954 i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2955 (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2956 (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2957
2958 list = l->inputq;
2959 len = skb_queue_len(list);
2960 hskb = skb_peek(list);
2961 tskb = skb_peek_tail(list);
2962 i += scnprintf(buf + i, sz - i, " | %u %u %u\n", len,
2963 (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2964 (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2965
2966 if (dqueues & TIPC_DUMP_TRANSMQ) {
2967 i += scnprintf(buf + i, sz - i, "transmq: ");
2968 i += tipc_list_dump(&l->transmq, false, buf + i);
2969 }
2970 if (dqueues & TIPC_DUMP_BACKLOGQ) {
2971 i += scnprintf(buf + i, sz - i,
2972 "backlogq: <%u %u %u %u %u>, ",
2973 l->backlog[TIPC_LOW_IMPORTANCE].len,
2974 l->backlog[TIPC_MEDIUM_IMPORTANCE].len,
2975 l->backlog[TIPC_HIGH_IMPORTANCE].len,
2976 l->backlog[TIPC_CRITICAL_IMPORTANCE].len,
2977 l->backlog[TIPC_SYSTEM_IMPORTANCE].len);
2978 i += tipc_list_dump(&l->backlogq, false, buf + i);
2979 }
2980 if (dqueues & TIPC_DUMP_DEFERDQ) {
2981 i += scnprintf(buf + i, sz - i, "deferdq: ");
2982 i += tipc_list_dump(&l->deferdq, false, buf + i);
2983 }
2984 if (dqueues & TIPC_DUMP_INPUTQ) {
2985 i += scnprintf(buf + i, sz - i, "inputq: ");
2986 i += tipc_list_dump(l->inputq, false, buf + i);
2987 }
2988 if (dqueues & TIPC_DUMP_WAKEUP) {
2989 i += scnprintf(buf + i, sz - i, "wakeup: ");
2990 i += tipc_list_dump(&l->wakeupq, false, buf + i);
2991 }
2992
2993 return i;
2994}
2995