1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37#include "core.h"
38#include "subscr.h"
39#include "link.h"
40#include "bcast.h"
41#include "socket.h"
42#include "name_distr.h"
43#include "discover.h"
44#include "netlink.h"
45#include "monitor.h"
46#include "trace.h"
47#include "crypto.h"
48
49#include <linux/pkt_sched.h>
50
51struct tipc_stats {
52 u32 sent_pkts;
53 u32 recv_pkts;
54 u32 sent_states;
55 u32 recv_states;
56 u32 sent_probes;
57 u32 recv_probes;
58 u32 sent_nacks;
59 u32 recv_nacks;
60 u32 sent_acks;
61 u32 sent_bundled;
62 u32 sent_bundles;
63 u32 recv_bundled;
64 u32 recv_bundles;
65 u32 retransmitted;
66 u32 sent_fragmented;
67 u32 sent_fragments;
68 u32 recv_fragmented;
69 u32 recv_fragments;
70 u32 link_congs;
71 u32 deferred_recv;
72 u32 duplicates;
73 u32 max_queue_sz;
74 u32 accu_queue_sz;
75 u32 queue_sz_counts;
76 u32 msg_length_counts;
77 u32 msg_lengths_total;
78 u32 msg_length_profile[7];
79};
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152struct tipc_link {
153 u32 addr;
154 char name[TIPC_MAX_LINK_NAME];
155 struct net *net;
156
157
158 u16 peer_session;
159 u16 session;
160 u16 snd_nxt_state;
161 u16 rcv_nxt_state;
162 u32 peer_bearer_id;
163 u32 bearer_id;
164 u32 tolerance;
165 u32 abort_limit;
166 u32 state;
167 u16 peer_caps;
168 bool in_session;
169 bool active;
170 u32 silent_intv_cnt;
171 char if_name[TIPC_MAX_IF_NAME];
172 u32 priority;
173 char net_plane;
174 struct tipc_mon_state mon_state;
175 u16 rst_cnt;
176
177
178 u16 drop_point;
179 struct sk_buff *failover_reasm_skb;
180 struct sk_buff_head failover_deferdq;
181
182
183 u16 mtu;
184 u16 advertised_mtu;
185
186
187 struct sk_buff_head transmq;
188 struct sk_buff_head backlogq;
189 struct {
190 u16 len;
191 u16 limit;
192 struct sk_buff *target_bskb;
193 } backlog[5];
194 u16 snd_nxt;
195
196
197 u16 rcv_nxt;
198 u32 rcv_unacked;
199 struct sk_buff_head deferdq;
200 struct sk_buff_head *inputq;
201 struct sk_buff_head *namedq;
202
203
204 struct sk_buff_head wakeupq;
205 u16 window;
206 u16 min_win;
207 u16 ssthresh;
208 u16 max_win;
209 u16 cong_acks;
210 u16 checkpoint;
211
212
213 struct sk_buff *reasm_buf;
214 struct sk_buff *reasm_tnlmsg;
215
216
217 u16 ackers;
218 u16 acked;
219 u16 last_gap;
220 struct tipc_gap_ack_blks *last_ga;
221 struct tipc_link *bc_rcvlink;
222 struct tipc_link *bc_sndlink;
223 u8 nack_state;
224 bool bc_peer_is_up;
225
226
227 struct tipc_stats stats;
228};
229
230
231
232
233static const char *link_co_err = "Link tunneling error, ";
234static const char *link_rst_msg = "Resetting link ";
235
236
237
238enum {
239 BC_NACK_SND_CONDITIONAL,
240 BC_NACK_SND_UNCONDITIONAL,
241 BC_NACK_SND_SUPPRESS,
242};
243
244#define TIPC_BC_RETR_LIM (jiffies + msecs_to_jiffies(10))
245#define TIPC_UC_RETR_TIME (jiffies + msecs_to_jiffies(1))
246
247
248
249enum {
250 LINK_ESTABLISHED = 0xe,
251 LINK_ESTABLISHING = 0xe << 4,
252 LINK_RESET = 0x1 << 8,
253 LINK_RESETTING = 0x2 << 12,
254 LINK_PEER_RESET = 0xd << 16,
255 LINK_FAILINGOVER = 0xf << 20,
256 LINK_SYNCHING = 0xc << 24
257};
258
259
260
261static int link_is_up(struct tipc_link *l)
262{
263 return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
264}
265
266static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
267 struct sk_buff_head *xmitq);
268static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
269 bool probe_reply, u16 rcvgap,
270 int tolerance, int priority,
271 struct sk_buff_head *xmitq);
272static void link_print(struct tipc_link *l, const char *str);
273static int tipc_link_build_nack_msg(struct tipc_link *l,
274 struct sk_buff_head *xmitq);
275static void tipc_link_build_bc_init_msg(struct tipc_link *l,
276 struct sk_buff_head *xmitq);
277static u8 __tipc_build_gap_ack_blks(struct tipc_gap_ack_blks *ga,
278 struct tipc_link *l, u8 start_index);
279static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr);
280static int tipc_link_advance_transmq(struct tipc_link *l, struct tipc_link *r,
281 u16 acked, u16 gap,
282 struct tipc_gap_ack_blks *ga,
283 struct sk_buff_head *xmitq,
284 bool *retransmitted, int *rc);
285static void tipc_link_update_cwin(struct tipc_link *l, int released,
286 bool retransmitted);
287
288
289
290bool tipc_link_is_up(struct tipc_link *l)
291{
292 return link_is_up(l);
293}
294
295bool tipc_link_peer_is_down(struct tipc_link *l)
296{
297 return l->state == LINK_PEER_RESET;
298}
299
300bool tipc_link_is_reset(struct tipc_link *l)
301{
302 return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
303}
304
305bool tipc_link_is_establishing(struct tipc_link *l)
306{
307 return l->state == LINK_ESTABLISHING;
308}
309
310bool tipc_link_is_synching(struct tipc_link *l)
311{
312 return l->state == LINK_SYNCHING;
313}
314
315bool tipc_link_is_failingover(struct tipc_link *l)
316{
317 return l->state == LINK_FAILINGOVER;
318}
319
320bool tipc_link_is_blocked(struct tipc_link *l)
321{
322 return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
323}
324
325static bool link_is_bc_sndlink(struct tipc_link *l)
326{
327 return !l->bc_sndlink;
328}
329
330static bool link_is_bc_rcvlink(struct tipc_link *l)
331{
332 return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
333}
334
335void tipc_link_set_active(struct tipc_link *l, bool active)
336{
337 l->active = active;
338}
339
340u32 tipc_link_id(struct tipc_link *l)
341{
342 return l->peer_bearer_id << 16 | l->bearer_id;
343}
344
345int tipc_link_min_win(struct tipc_link *l)
346{
347 return l->min_win;
348}
349
350int tipc_link_max_win(struct tipc_link *l)
351{
352 return l->max_win;
353}
354
355int tipc_link_prio(struct tipc_link *l)
356{
357 return l->priority;
358}
359
360unsigned long tipc_link_tolerance(struct tipc_link *l)
361{
362 return l->tolerance;
363}
364
365struct sk_buff_head *tipc_link_inputq(struct tipc_link *l)
366{
367 return l->inputq;
368}
369
370char tipc_link_plane(struct tipc_link *l)
371{
372 return l->net_plane;
373}
374
375struct net *tipc_link_net(struct tipc_link *l)
376{
377 return l->net;
378}
379
380void tipc_link_update_caps(struct tipc_link *l, u16 capabilities)
381{
382 l->peer_caps = capabilities;
383}
384
385void tipc_link_add_bc_peer(struct tipc_link *snd_l,
386 struct tipc_link *uc_l,
387 struct sk_buff_head *xmitq)
388{
389 struct tipc_link *rcv_l = uc_l->bc_rcvlink;
390
391 snd_l->ackers++;
392 rcv_l->acked = snd_l->snd_nxt - 1;
393 snd_l->state = LINK_ESTABLISHED;
394 tipc_link_build_bc_init_msg(uc_l, xmitq);
395}
396
397void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
398 struct tipc_link *rcv_l,
399 struct sk_buff_head *xmitq)
400{
401 u16 ack = snd_l->snd_nxt - 1;
402
403 snd_l->ackers--;
404 rcv_l->bc_peer_is_up = true;
405 rcv_l->state = LINK_ESTABLISHED;
406 tipc_link_bc_ack_rcv(rcv_l, ack, 0, NULL, xmitq, NULL);
407 trace_tipc_link_reset(rcv_l, TIPC_DUMP_ALL, "bclink removed!");
408 tipc_link_reset(rcv_l);
409 rcv_l->state = LINK_RESET;
410 if (!snd_l->ackers) {
411 trace_tipc_link_reset(snd_l, TIPC_DUMP_ALL, "zero ackers!");
412 tipc_link_reset(snd_l);
413 snd_l->state = LINK_RESET;
414 __skb_queue_purge(xmitq);
415 }
416}
417
418int tipc_link_bc_peers(struct tipc_link *l)
419{
420 return l->ackers;
421}
422
423static u16 link_bc_rcv_gap(struct tipc_link *l)
424{
425 struct sk_buff *skb = skb_peek(&l->deferdq);
426 u16 gap = 0;
427
428 if (more(l->snd_nxt, l->rcv_nxt))
429 gap = l->snd_nxt - l->rcv_nxt;
430 if (skb)
431 gap = buf_seqno(skb) - l->rcv_nxt;
432 return gap;
433}
434
435void tipc_link_set_mtu(struct tipc_link *l, int mtu)
436{
437 l->mtu = mtu;
438}
439
440int tipc_link_mtu(struct tipc_link *l)
441{
442 return l->mtu;
443}
444
445int tipc_link_mss(struct tipc_link *l)
446{
447#ifdef CONFIG_TIPC_CRYPTO
448 return l->mtu - INT_H_SIZE - EMSG_OVERHEAD;
449#else
450 return l->mtu - INT_H_SIZE;
451#endif
452}
453
454u16 tipc_link_rcv_nxt(struct tipc_link *l)
455{
456 return l->rcv_nxt;
457}
458
459u16 tipc_link_acked(struct tipc_link *l)
460{
461 return l->acked;
462}
463
464char *tipc_link_name(struct tipc_link *l)
465{
466 return l->name;
467}
468
469u32 tipc_link_state(struct tipc_link *l)
470{
471 return l->state;
472}
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
499 int tolerance, char net_plane, u32 mtu, int priority,
500 u32 min_win, u32 max_win, u32 session, u32 self,
501 u32 peer, u8 *peer_id, u16 peer_caps,
502 struct tipc_link *bc_sndlink,
503 struct tipc_link *bc_rcvlink,
504 struct sk_buff_head *inputq,
505 struct sk_buff_head *namedq,
506 struct tipc_link **link)
507{
508 char peer_str[NODE_ID_STR_LEN] = {0,};
509 char self_str[NODE_ID_STR_LEN] = {0,};
510 struct tipc_link *l;
511
512 l = kzalloc(sizeof(*l), GFP_ATOMIC);
513 if (!l)
514 return false;
515 *link = l;
516 l->session = session;
517
518
519 if (peer_id) {
520 tipc_nodeid2string(self_str, tipc_own_id(net));
521 if (strlen(self_str) > 16)
522 sprintf(self_str, "%x", self);
523 tipc_nodeid2string(peer_str, peer_id);
524 if (strlen(peer_str) > 16)
525 sprintf(peer_str, "%x", peer);
526 }
527
528 snprintf(l->name, sizeof(l->name), "%s:%s-%s:unknown",
529 self_str, if_name, peer_str);
530
531 strcpy(l->if_name, if_name);
532 l->addr = peer;
533 l->peer_caps = peer_caps;
534 l->net = net;
535 l->in_session = false;
536 l->bearer_id = bearer_id;
537 l->tolerance = tolerance;
538 if (bc_rcvlink)
539 bc_rcvlink->tolerance = tolerance;
540 l->net_plane = net_plane;
541 l->advertised_mtu = mtu;
542 l->mtu = mtu;
543 l->priority = priority;
544 tipc_link_set_queue_limits(l, min_win, max_win);
545 l->ackers = 1;
546 l->bc_sndlink = bc_sndlink;
547 l->bc_rcvlink = bc_rcvlink;
548 l->inputq = inputq;
549 l->namedq = namedq;
550 l->state = LINK_RESETTING;
551 __skb_queue_head_init(&l->transmq);
552 __skb_queue_head_init(&l->backlogq);
553 __skb_queue_head_init(&l->deferdq);
554 __skb_queue_head_init(&l->failover_deferdq);
555 skb_queue_head_init(&l->wakeupq);
556 skb_queue_head_init(l->inputq);
557 return true;
558}
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer, u8 *peer_id,
578 int mtu, u32 min_win, u32 max_win, u16 peer_caps,
579 struct sk_buff_head *inputq,
580 struct sk_buff_head *namedq,
581 struct tipc_link *bc_sndlink,
582 struct tipc_link **link)
583{
584 struct tipc_link *l;
585
586 if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, min_win,
587 max_win, 0, ownnode, peer, NULL, peer_caps,
588 bc_sndlink, NULL, inputq, namedq, link))
589 return false;
590
591 l = *link;
592 if (peer_id) {
593 char peer_str[NODE_ID_STR_LEN] = {0,};
594
595 tipc_nodeid2string(peer_str, peer_id);
596 if (strlen(peer_str) > 16)
597 sprintf(peer_str, "%x", peer);
598
599 snprintf(l->name, sizeof(l->name), "%s:%s", tipc_bclink_name,
600 peer_str);
601 } else {
602 strcpy(l->name, tipc_bclink_name);
603 }
604 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "bclink created!");
605 tipc_link_reset(l);
606 l->state = LINK_RESET;
607 l->ackers = 0;
608 l->bc_rcvlink = l;
609
610
611 if (link_is_bc_sndlink(l))
612 l->state = LINK_ESTABLISHED;
613
614
615 if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST))
616 tipc_bcast_toggle_rcast(net, false);
617
618 return true;
619}
620
621
622
623
624
625
626int tipc_link_fsm_evt(struct tipc_link *l, int evt)
627{
628 int rc = 0;
629 int old_state = l->state;
630
631 switch (l->state) {
632 case LINK_RESETTING:
633 switch (evt) {
634 case LINK_PEER_RESET_EVT:
635 l->state = LINK_PEER_RESET;
636 break;
637 case LINK_RESET_EVT:
638 l->state = LINK_RESET;
639 break;
640 case LINK_FAILURE_EVT:
641 case LINK_FAILOVER_BEGIN_EVT:
642 case LINK_ESTABLISH_EVT:
643 case LINK_FAILOVER_END_EVT:
644 case LINK_SYNCH_BEGIN_EVT:
645 case LINK_SYNCH_END_EVT:
646 default:
647 goto illegal_evt;
648 }
649 break;
650 case LINK_RESET:
651 switch (evt) {
652 case LINK_PEER_RESET_EVT:
653 l->state = LINK_ESTABLISHING;
654 break;
655 case LINK_FAILOVER_BEGIN_EVT:
656 l->state = LINK_FAILINGOVER;
657 case LINK_FAILURE_EVT:
658 case LINK_RESET_EVT:
659 case LINK_ESTABLISH_EVT:
660 case LINK_FAILOVER_END_EVT:
661 break;
662 case LINK_SYNCH_BEGIN_EVT:
663 case LINK_SYNCH_END_EVT:
664 default:
665 goto illegal_evt;
666 }
667 break;
668 case LINK_PEER_RESET:
669 switch (evt) {
670 case LINK_RESET_EVT:
671 l->state = LINK_ESTABLISHING;
672 break;
673 case LINK_PEER_RESET_EVT:
674 case LINK_ESTABLISH_EVT:
675 case LINK_FAILURE_EVT:
676 break;
677 case LINK_SYNCH_BEGIN_EVT:
678 case LINK_SYNCH_END_EVT:
679 case LINK_FAILOVER_BEGIN_EVT:
680 case LINK_FAILOVER_END_EVT:
681 default:
682 goto illegal_evt;
683 }
684 break;
685 case LINK_FAILINGOVER:
686 switch (evt) {
687 case LINK_FAILOVER_END_EVT:
688 l->state = LINK_RESET;
689 break;
690 case LINK_PEER_RESET_EVT:
691 case LINK_RESET_EVT:
692 case LINK_ESTABLISH_EVT:
693 case LINK_FAILURE_EVT:
694 break;
695 case LINK_FAILOVER_BEGIN_EVT:
696 case LINK_SYNCH_BEGIN_EVT:
697 case LINK_SYNCH_END_EVT:
698 default:
699 goto illegal_evt;
700 }
701 break;
702 case LINK_ESTABLISHING:
703 switch (evt) {
704 case LINK_ESTABLISH_EVT:
705 l->state = LINK_ESTABLISHED;
706 break;
707 case LINK_FAILOVER_BEGIN_EVT:
708 l->state = LINK_FAILINGOVER;
709 break;
710 case LINK_RESET_EVT:
711 l->state = LINK_RESET;
712 break;
713 case LINK_FAILURE_EVT:
714 case LINK_PEER_RESET_EVT:
715 case LINK_SYNCH_BEGIN_EVT:
716 case LINK_FAILOVER_END_EVT:
717 break;
718 case LINK_SYNCH_END_EVT:
719 default:
720 goto illegal_evt;
721 }
722 break;
723 case LINK_ESTABLISHED:
724 switch (evt) {
725 case LINK_PEER_RESET_EVT:
726 l->state = LINK_PEER_RESET;
727 rc |= TIPC_LINK_DOWN_EVT;
728 break;
729 case LINK_FAILURE_EVT:
730 l->state = LINK_RESETTING;
731 rc |= TIPC_LINK_DOWN_EVT;
732 break;
733 case LINK_RESET_EVT:
734 l->state = LINK_RESET;
735 break;
736 case LINK_ESTABLISH_EVT:
737 case LINK_SYNCH_END_EVT:
738 break;
739 case LINK_SYNCH_BEGIN_EVT:
740 l->state = LINK_SYNCHING;
741 break;
742 case LINK_FAILOVER_BEGIN_EVT:
743 case LINK_FAILOVER_END_EVT:
744 default:
745 goto illegal_evt;
746 }
747 break;
748 case LINK_SYNCHING:
749 switch (evt) {
750 case LINK_PEER_RESET_EVT:
751 l->state = LINK_PEER_RESET;
752 rc |= TIPC_LINK_DOWN_EVT;
753 break;
754 case LINK_FAILURE_EVT:
755 l->state = LINK_RESETTING;
756 rc |= TIPC_LINK_DOWN_EVT;
757 break;
758 case LINK_RESET_EVT:
759 l->state = LINK_RESET;
760 break;
761 case LINK_ESTABLISH_EVT:
762 case LINK_SYNCH_BEGIN_EVT:
763 break;
764 case LINK_SYNCH_END_EVT:
765 l->state = LINK_ESTABLISHED;
766 break;
767 case LINK_FAILOVER_BEGIN_EVT:
768 case LINK_FAILOVER_END_EVT:
769 default:
770 goto illegal_evt;
771 }
772 break;
773 default:
774 pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
775 }
776 trace_tipc_link_fsm(l->name, old_state, l->state, evt);
777 return rc;
778illegal_evt:
779 pr_err("Illegal FSM event %x in state %x on link %s\n",
780 evt, l->state, l->name);
781 trace_tipc_link_fsm(l->name, old_state, l->state, evt);
782 return rc;
783}
784
785
786
787static void link_profile_stats(struct tipc_link *l)
788{
789 struct sk_buff *skb;
790 struct tipc_msg *msg;
791 int length;
792
793
794 l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
795 l->stats.queue_sz_counts++;
796
797 skb = skb_peek(&l->transmq);
798 if (!skb)
799 return;
800 msg = buf_msg(skb);
801 length = msg_size(msg);
802
803 if (msg_user(msg) == MSG_FRAGMENTER) {
804 if (msg_type(msg) != FIRST_FRAGMENT)
805 return;
806 length = msg_size(msg_inner_hdr(msg));
807 }
808 l->stats.msg_lengths_total += length;
809 l->stats.msg_length_counts++;
810 if (length <= 64)
811 l->stats.msg_length_profile[0]++;
812 else if (length <= 256)
813 l->stats.msg_length_profile[1]++;
814 else if (length <= 1024)
815 l->stats.msg_length_profile[2]++;
816 else if (length <= 4096)
817 l->stats.msg_length_profile[3]++;
818 else if (length <= 16384)
819 l->stats.msg_length_profile[4]++;
820 else if (length <= 32768)
821 l->stats.msg_length_profile[5]++;
822 else
823 l->stats.msg_length_profile[6]++;
824}
825
826
827
828
829
830
831
832
833bool tipc_link_too_silent(struct tipc_link *l)
834{
835 return (l->silent_intv_cnt + 2 > l->abort_limit);
836}
837
838
839
840int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
841{
842 int mtyp = 0;
843 int rc = 0;
844 bool state = false;
845 bool probe = false;
846 bool setup = false;
847 u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
848 u16 bc_acked = l->bc_rcvlink->acked;
849 struct tipc_mon_state *mstate = &l->mon_state;
850
851 trace_tipc_link_timeout(l, TIPC_DUMP_NONE, " ");
852 trace_tipc_link_too_silent(l, TIPC_DUMP_ALL, " ");
853 switch (l->state) {
854 case LINK_ESTABLISHED:
855 case LINK_SYNCHING:
856 mtyp = STATE_MSG;
857 link_profile_stats(l);
858 tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id);
859 if (mstate->reset || (l->silent_intv_cnt > l->abort_limit))
860 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
861 state = bc_acked != bc_snt;
862 state |= l->bc_rcvlink->rcv_unacked;
863 state |= l->rcv_unacked;
864 state |= !skb_queue_empty(&l->transmq);
865 probe = mstate->probing;
866 probe |= l->silent_intv_cnt;
867 if (probe || mstate->monitoring)
868 l->silent_intv_cnt++;
869 probe |= !skb_queue_empty(&l->deferdq);
870 if (l->snd_nxt == l->checkpoint) {
871 tipc_link_update_cwin(l, 0, 0);
872 probe = true;
873 }
874 l->checkpoint = l->snd_nxt;
875 break;
876 case LINK_RESET:
877 setup = l->rst_cnt++ <= 4;
878 setup |= !(l->rst_cnt % 16);
879 mtyp = RESET_MSG;
880 break;
881 case LINK_ESTABLISHING:
882 setup = true;
883 mtyp = ACTIVATE_MSG;
884 break;
885 case LINK_PEER_RESET:
886 case LINK_RESETTING:
887 case LINK_FAILINGOVER:
888 break;
889 default:
890 break;
891 }
892
893 if (state || probe || setup)
894 tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, 0, xmitq);
895
896 return rc;
897}
898
899
900
901
902
903
904
905static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
906{
907 u32 dnode = tipc_own_addr(l->net);
908 u32 dport = msg_origport(hdr);
909 struct sk_buff *skb;
910
911
912 skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
913 dnode, l->addr, dport, 0, 0);
914 if (!skb)
915 return -ENOBUFS;
916 msg_set_dest_droppable(buf_msg(skb), true);
917 TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
918 skb_queue_tail(&l->wakeupq, skb);
919 l->stats.link_congs++;
920 trace_tipc_link_conges(l, TIPC_DUMP_ALL, "wakeup scheduled!");
921 return -ELINKCONG;
922}
923
924
925
926
927
928
929
930static void link_prepare_wakeup(struct tipc_link *l)
931{
932 struct sk_buff_head *wakeupq = &l->wakeupq;
933 struct sk_buff_head *inputq = l->inputq;
934 struct sk_buff *skb, *tmp;
935 struct sk_buff_head tmpq;
936 int avail[5] = {0,};
937 int imp = 0;
938
939 __skb_queue_head_init(&tmpq);
940
941 for (; imp <= TIPC_SYSTEM_IMPORTANCE; imp++)
942 avail[imp] = l->backlog[imp].limit - l->backlog[imp].len;
943
944 skb_queue_walk_safe(wakeupq, skb, tmp) {
945 imp = TIPC_SKB_CB(skb)->chain_imp;
946 if (avail[imp] <= 0)
947 continue;
948 avail[imp]--;
949 __skb_unlink(skb, wakeupq);
950 __skb_queue_tail(&tmpq, skb);
951 }
952
953 spin_lock_bh(&inputq->lock);
954 skb_queue_splice_tail(&tmpq, inputq);
955 spin_unlock_bh(&inputq->lock);
956
957}
958
959
960
961
962
963
964
965static void tipc_link_set_skb_retransmit_time(struct sk_buff *skb,
966 struct tipc_link *l)
967{
968 if (link_is_bc_sndlink(l))
969 TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
970 else
971 TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME;
972}
973
974void tipc_link_reset(struct tipc_link *l)
975{
976 struct sk_buff_head list;
977 u32 imp;
978
979 __skb_queue_head_init(&list);
980
981 l->in_session = false;
982
983 l->peer_session--;
984 l->session++;
985 l->mtu = l->advertised_mtu;
986
987 spin_lock_bh(&l->wakeupq.lock);
988 skb_queue_splice_init(&l->wakeupq, &list);
989 spin_unlock_bh(&l->wakeupq.lock);
990
991 spin_lock_bh(&l->inputq->lock);
992 skb_queue_splice_init(&list, l->inputq);
993 spin_unlock_bh(&l->inputq->lock);
994
995 __skb_queue_purge(&l->transmq);
996 __skb_queue_purge(&l->deferdq);
997 __skb_queue_purge(&l->backlogq);
998 __skb_queue_purge(&l->failover_deferdq);
999 for (imp = 0; imp <= TIPC_SYSTEM_IMPORTANCE; imp++) {
1000 l->backlog[imp].len = 0;
1001 l->backlog[imp].target_bskb = NULL;
1002 }
1003 kfree_skb(l->reasm_buf);
1004 kfree_skb(l->reasm_tnlmsg);
1005 kfree_skb(l->failover_reasm_skb);
1006 l->reasm_buf = NULL;
1007 l->reasm_tnlmsg = NULL;
1008 l->failover_reasm_skb = NULL;
1009 l->rcv_unacked = 0;
1010 l->snd_nxt = 1;
1011 l->rcv_nxt = 1;
1012 l->snd_nxt_state = 1;
1013 l->rcv_nxt_state = 1;
1014 l->acked = 0;
1015 l->last_gap = 0;
1016 kfree(l->last_ga);
1017 l->last_ga = NULL;
1018 l->silent_intv_cnt = 0;
1019 l->rst_cnt = 0;
1020 l->bc_peer_is_up = false;
1021 memset(&l->mon_state, 0, sizeof(l->mon_state));
1022 tipc_link_reset_stats(l);
1023}
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
1036 struct sk_buff_head *xmitq)
1037{
1038 struct sk_buff_head *backlogq = &l->backlogq;
1039 struct sk_buff_head *transmq = &l->transmq;
1040 struct sk_buff *skb, *_skb;
1041 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1042 u16 ack = l->rcv_nxt - 1;
1043 u16 seqno = l->snd_nxt;
1044 int pkt_cnt = skb_queue_len(list);
1045 unsigned int mss = tipc_link_mss(l);
1046 unsigned int cwin = l->window;
1047 unsigned int mtu = l->mtu;
1048 struct tipc_msg *hdr;
1049 bool new_bundle;
1050 int rc = 0;
1051 int imp;
1052
1053 if (pkt_cnt <= 0)
1054 return 0;
1055
1056 hdr = buf_msg(skb_peek(list));
1057 if (unlikely(msg_size(hdr) > mtu)) {
1058 pr_warn("Too large msg, purging xmit list %d %d %d %d %d!\n",
1059 skb_queue_len(list), msg_user(hdr),
1060 msg_type(hdr), msg_size(hdr), mtu);
1061 __skb_queue_purge(list);
1062 return -EMSGSIZE;
1063 }
1064
1065 imp = msg_importance(hdr);
1066
1067 if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
1068 if (imp == TIPC_SYSTEM_IMPORTANCE) {
1069 pr_warn("%s<%s>, link overflow", link_rst_msg, l->name);
1070 return -ENOBUFS;
1071 }
1072 rc = link_schedule_user(l, hdr);
1073 }
1074
1075 if (pkt_cnt > 1) {
1076 l->stats.sent_fragmented++;
1077 l->stats.sent_fragments += pkt_cnt;
1078 }
1079
1080
1081 while ((skb = __skb_dequeue(list))) {
1082 if (likely(skb_queue_len(transmq) < cwin)) {
1083 hdr = buf_msg(skb);
1084 msg_set_seqno(hdr, seqno);
1085 msg_set_ack(hdr, ack);
1086 msg_set_bcast_ack(hdr, bc_ack);
1087 _skb = skb_clone(skb, GFP_ATOMIC);
1088 if (!_skb) {
1089 kfree_skb(skb);
1090 __skb_queue_purge(list);
1091 return -ENOBUFS;
1092 }
1093 __skb_queue_tail(transmq, skb);
1094 tipc_link_set_skb_retransmit_time(skb, l);
1095 __skb_queue_tail(xmitq, _skb);
1096 TIPC_SKB_CB(skb)->ackers = l->ackers;
1097 l->rcv_unacked = 0;
1098 l->stats.sent_pkts++;
1099 seqno++;
1100 continue;
1101 }
1102 if (tipc_msg_try_bundle(l->backlog[imp].target_bskb, &skb,
1103 mss, l->addr, &new_bundle)) {
1104 if (skb) {
1105
1106 l->backlog[imp].target_bskb = skb;
1107 l->backlog[imp].len++;
1108 __skb_queue_tail(backlogq, skb);
1109 } else {
1110 if (new_bundle) {
1111 l->stats.sent_bundles++;
1112 l->stats.sent_bundled++;
1113 }
1114 l->stats.sent_bundled++;
1115 }
1116 continue;
1117 }
1118 l->backlog[imp].target_bskb = NULL;
1119 l->backlog[imp].len += (1 + skb_queue_len(list));
1120 __skb_queue_tail(backlogq, skb);
1121 skb_queue_splice_tail_init(list, backlogq);
1122 }
1123 l->snd_nxt = seqno;
1124 return rc;
1125}
1126
1127static void tipc_link_update_cwin(struct tipc_link *l, int released,
1128 bool retransmitted)
1129{
1130 int bklog_len = skb_queue_len(&l->backlogq);
1131 struct sk_buff_head *txq = &l->transmq;
1132 int txq_len = skb_queue_len(txq);
1133 u16 cwin = l->window;
1134
1135
1136 if (unlikely(retransmitted)) {
1137 l->ssthresh = max_t(u16, l->window / 2, 300);
1138 l->window = min_t(u16, l->ssthresh, l->window);
1139 return;
1140 }
1141
1142 if (unlikely(!released)) {
1143 l->ssthresh = max_t(u16, l->window / 2, 300);
1144 l->window = l->min_win;
1145 return;
1146 }
1147
1148 if (txq_len + bklog_len < cwin)
1149 return;
1150
1151
1152 if (txq_len && l->snd_nxt - buf_seqno(skb_peek(txq)) != txq_len)
1153 return;
1154
1155 l->cong_acks += released;
1156
1157
1158 if (cwin <= l->ssthresh) {
1159 l->window = min_t(u16, cwin + released, l->max_win);
1160 return;
1161 }
1162
1163 if (l->cong_acks < cwin)
1164 return;
1165 l->window = min_t(u16, ++cwin, l->max_win);
1166 l->cong_acks = 0;
1167}
1168
1169static void tipc_link_advance_backlog(struct tipc_link *l,
1170 struct sk_buff_head *xmitq)
1171{
1172 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1173 struct sk_buff_head *txq = &l->transmq;
1174 struct sk_buff *skb, *_skb;
1175 u16 ack = l->rcv_nxt - 1;
1176 u16 seqno = l->snd_nxt;
1177 struct tipc_msg *hdr;
1178 u16 cwin = l->window;
1179 u32 imp;
1180
1181 while (skb_queue_len(txq) < cwin) {
1182 skb = skb_peek(&l->backlogq);
1183 if (!skb)
1184 break;
1185 _skb = skb_clone(skb, GFP_ATOMIC);
1186 if (!_skb)
1187 break;
1188 __skb_dequeue(&l->backlogq);
1189 hdr = buf_msg(skb);
1190 imp = msg_importance(hdr);
1191 l->backlog[imp].len--;
1192 if (unlikely(skb == l->backlog[imp].target_bskb))
1193 l->backlog[imp].target_bskb = NULL;
1194 __skb_queue_tail(&l->transmq, skb);
1195 tipc_link_set_skb_retransmit_time(skb, l);
1196
1197 __skb_queue_tail(xmitq, _skb);
1198 TIPC_SKB_CB(skb)->ackers = l->ackers;
1199 msg_set_seqno(hdr, seqno);
1200 msg_set_ack(hdr, ack);
1201 msg_set_bcast_ack(hdr, bc_ack);
1202 l->rcv_unacked = 0;
1203 l->stats.sent_pkts++;
1204 seqno++;
1205 }
1206 l->snd_nxt = seqno;
1207}
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r,
1219 int *rc)
1220{
1221 struct sk_buff *skb = skb_peek(&l->transmq);
1222 struct tipc_msg *hdr;
1223
1224 if (!skb)
1225 return false;
1226
1227 if (!TIPC_SKB_CB(skb)->retr_cnt)
1228 return false;
1229
1230 if (!time_after(jiffies, TIPC_SKB_CB(skb)->retr_stamp +
1231 msecs_to_jiffies(r->tolerance * 10)))
1232 return false;
1233
1234 hdr = buf_msg(skb);
1235 if (link_is_bc_sndlink(l) && !less(r->acked, msg_seqno(hdr)))
1236 return false;
1237
1238 pr_warn("Retransmission failure on link <%s>\n", l->name);
1239 link_print(l, "State of link ");
1240 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
1241 msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
1242 pr_info("sqno %u, prev: %x, dest: %x\n",
1243 msg_seqno(hdr), msg_prevnode(hdr), msg_destnode(hdr));
1244 pr_info("retr_stamp %d, retr_cnt %d\n",
1245 jiffies_to_msecs(TIPC_SKB_CB(skb)->retr_stamp),
1246 TIPC_SKB_CB(skb)->retr_cnt);
1247
1248 trace_tipc_list_dump(&l->transmq, true, "retrans failure!");
1249 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!");
1250 trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!");
1251
1252 if (link_is_bc_sndlink(l)) {
1253 r->state = LINK_RESET;
1254 *rc |= TIPC_LINK_DOWN_EVT;
1255 } else {
1256 *rc |= tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1257 }
1258
1259 return true;
1260}
1261
1262
1263
1264
1265
1266
1267static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
1268 struct sk_buff_head *inputq)
1269{
1270 struct sk_buff_head *mc_inputq = l->bc_rcvlink->inputq;
1271 struct tipc_msg *hdr = buf_msg(skb);
1272
1273 switch (msg_user(hdr)) {
1274 case TIPC_LOW_IMPORTANCE:
1275 case TIPC_MEDIUM_IMPORTANCE:
1276 case TIPC_HIGH_IMPORTANCE:
1277 case TIPC_CRITICAL_IMPORTANCE:
1278 if (unlikely(msg_in_group(hdr) || msg_mcast(hdr))) {
1279 skb_queue_tail(mc_inputq, skb);
1280 return true;
1281 }
1282 fallthrough;
1283 case CONN_MANAGER:
1284 skb_queue_tail(inputq, skb);
1285 return true;
1286 case GROUP_PROTOCOL:
1287 skb_queue_tail(mc_inputq, skb);
1288 return true;
1289 case NAME_DISTRIBUTOR:
1290 l->bc_rcvlink->state = LINK_ESTABLISHED;
1291 skb_queue_tail(l->namedq, skb);
1292 return true;
1293 case MSG_BUNDLER:
1294 case TUNNEL_PROTOCOL:
1295 case MSG_FRAGMENTER:
1296 case BCAST_PROTOCOL:
1297 return false;
1298#ifdef CONFIG_TIPC_CRYPTO
1299 case MSG_CRYPTO:
1300 tipc_crypto_msg_rcv(l->net, skb);
1301 return true;
1302#endif
1303 default:
1304 pr_warn("Dropping received illegal msg type\n");
1305 kfree_skb(skb);
1306 return true;
1307 }
1308}
1309
1310
1311
1312
1313
1314static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
1315 struct sk_buff_head *inputq,
1316 struct sk_buff **reasm_skb)
1317{
1318 struct tipc_msg *hdr = buf_msg(skb);
1319 struct sk_buff *iskb;
1320 struct sk_buff_head tmpq;
1321 int usr = msg_user(hdr);
1322 int pos = 0;
1323
1324 if (usr == MSG_BUNDLER) {
1325 skb_queue_head_init(&tmpq);
1326 l->stats.recv_bundles++;
1327 l->stats.recv_bundled += msg_msgcnt(hdr);
1328 while (tipc_msg_extract(skb, &iskb, &pos))
1329 tipc_data_input(l, iskb, &tmpq);
1330 tipc_skb_queue_splice_tail(&tmpq, inputq);
1331 return 0;
1332 } else if (usr == MSG_FRAGMENTER) {
1333 l->stats.recv_fragments++;
1334 if (tipc_buf_append(reasm_skb, &skb)) {
1335 l->stats.recv_fragmented++;
1336 tipc_data_input(l, skb, inputq);
1337 } else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
1338 pr_warn_ratelimited("Unable to build fragment list\n");
1339 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1340 }
1341 return 0;
1342 } else if (usr == BCAST_PROTOCOL) {
1343 tipc_bcast_lock(l->net);
1344 tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
1345 tipc_bcast_unlock(l->net);
1346 }
1347
1348 kfree_skb(skb);
1349 return 0;
1350}
1351
1352
1353
1354
1355
1356
1357
1358
1359static int tipc_link_tnl_rcv(struct tipc_link *l, struct sk_buff *skb,
1360 struct sk_buff_head *inputq)
1361{
1362 struct sk_buff **reasm_skb = &l->failover_reasm_skb;
1363 struct sk_buff **reasm_tnlmsg = &l->reasm_tnlmsg;
1364 struct sk_buff_head *fdefq = &l->failover_deferdq;
1365 struct tipc_msg *hdr = buf_msg(skb);
1366 struct sk_buff *iskb;
1367 int ipos = 0;
1368 int rc = 0;
1369 u16 seqno;
1370
1371 if (msg_type(hdr) == SYNCH_MSG) {
1372 kfree_skb(skb);
1373 return 0;
1374 }
1375
1376
1377 if (likely(!msg_nof_fragms(hdr))) {
1378 if (unlikely(!tipc_msg_extract(skb, &iskb, &ipos))) {
1379 pr_warn_ratelimited("Unable to extract msg, defq: %d\n",
1380 skb_queue_len(fdefq));
1381 return 0;
1382 }
1383 kfree_skb(skb);
1384 } else {
1385
1386 if (msg_fragm_no(hdr) == 1)
1387 msg_set_type(hdr, FIRST_FRAGMENT);
1388 else if (msg_fragm_no(hdr) < msg_nof_fragms(hdr))
1389 msg_set_type(hdr, FRAGMENT);
1390 else
1391 msg_set_type(hdr, LAST_FRAGMENT);
1392
1393 if (!tipc_buf_append(reasm_tnlmsg, &skb)) {
1394
1395 if (*reasm_tnlmsg || link_is_bc_rcvlink(l))
1396 return 0;
1397 pr_warn_ratelimited("Unable to reassemble tunnel msg\n");
1398 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1399 }
1400 iskb = skb;
1401 }
1402
1403 do {
1404 seqno = buf_seqno(iskb);
1405 if (unlikely(less(seqno, l->drop_point))) {
1406 kfree_skb(iskb);
1407 continue;
1408 }
1409 if (unlikely(seqno != l->drop_point)) {
1410 __tipc_skb_queue_sorted(fdefq, seqno, iskb);
1411 continue;
1412 }
1413
1414 l->drop_point++;
1415 if (!tipc_data_input(l, iskb, inputq))
1416 rc |= tipc_link_input(l, iskb, inputq, reasm_skb);
1417 if (unlikely(rc))
1418 break;
1419 } while ((iskb = __tipc_skb_dequeue(fdefq, l->drop_point)));
1420
1421 return rc;
1422}
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433u16 tipc_get_gap_ack_blks(struct tipc_gap_ack_blks **ga, struct tipc_link *l,
1434 struct tipc_msg *hdr, bool uc)
1435{
1436 struct tipc_gap_ack_blks *p;
1437 u16 sz = 0;
1438
1439
1440 if (l->peer_caps & TIPC_GAP_ACK_BLOCK) {
1441 p = (struct tipc_gap_ack_blks *)msg_data(hdr);
1442 sz = ntohs(p->len);
1443
1444 if (sz == struct_size(p, gacks, p->ugack_cnt + p->bgack_cnt)) {
1445
1446 if ((uc && p->ugack_cnt) || (!uc && p->bgack_cnt))
1447 goto ok;
1448
1449 } else if (uc && sz == struct_size(p, gacks, p->ugack_cnt)) {
1450 if (p->ugack_cnt) {
1451 p->bgack_cnt = 0;
1452 goto ok;
1453 }
1454 }
1455 }
1456
1457 p = NULL;
1458
1459ok:
1460 *ga = p;
1461 return sz;
1462}
1463
1464static u8 __tipc_build_gap_ack_blks(struct tipc_gap_ack_blks *ga,
1465 struct tipc_link *l, u8 start_index)
1466{
1467 struct tipc_gap_ack *gacks = &ga->gacks[start_index];
1468 struct sk_buff *skb = skb_peek(&l->deferdq);
1469 u16 expect, seqno = 0;
1470 u8 n = 0;
1471
1472 if (!skb)
1473 return 0;
1474
1475 expect = buf_seqno(skb);
1476 skb_queue_walk(&l->deferdq, skb) {
1477 seqno = buf_seqno(skb);
1478 if (unlikely(more(seqno, expect))) {
1479 gacks[n].ack = htons(expect - 1);
1480 gacks[n].gap = htons(seqno - expect);
1481 if (++n >= MAX_GAP_ACK_BLKS / 2) {
1482 pr_info_ratelimited("Gacks on %s: %d, ql: %d!\n",
1483 l->name, n,
1484 skb_queue_len(&l->deferdq));
1485 return n;
1486 }
1487 } else if (unlikely(less(seqno, expect))) {
1488 pr_warn("Unexpected skb in deferdq!\n");
1489 continue;
1490 }
1491 expect = seqno + 1;
1492 }
1493
1494
1495 gacks[n].ack = htons(seqno);
1496 gacks[n].gap = 0;
1497 n++;
1498 return n;
1499}
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr)
1512{
1513 struct tipc_link *bcl = l->bc_rcvlink;
1514 struct tipc_gap_ack_blks *ga;
1515 u16 len;
1516
1517 ga = (struct tipc_gap_ack_blks *)msg_data(hdr);
1518
1519
1520 tipc_bcast_lock(bcl->net);
1521 msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
1522 msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
1523 ga->bgack_cnt = __tipc_build_gap_ack_blks(ga, bcl, 0);
1524 tipc_bcast_unlock(bcl->net);
1525
1526
1527 ga->ugack_cnt = (msg_seq_gap(hdr)) ?
1528 __tipc_build_gap_ack_blks(ga, l, ga->bgack_cnt) : 0;
1529
1530
1531 len = struct_size(ga, gacks, ga->bgack_cnt + ga->ugack_cnt);
1532 ga->len = htons(len);
1533 return len;
1534}
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551static int tipc_link_advance_transmq(struct tipc_link *l, struct tipc_link *r,
1552 u16 acked, u16 gap,
1553 struct tipc_gap_ack_blks *ga,
1554 struct sk_buff_head *xmitq,
1555 bool *retransmitted, int *rc)
1556{
1557 struct tipc_gap_ack_blks *last_ga = r->last_ga, *this_ga = NULL;
1558 struct tipc_gap_ack *gacks = NULL;
1559 struct sk_buff *skb, *_skb, *tmp;
1560 struct tipc_msg *hdr;
1561 u32 qlen = skb_queue_len(&l->transmq);
1562 u16 nacked = acked, ngap = gap, gack_cnt = 0;
1563 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1564 u16 ack = l->rcv_nxt - 1;
1565 u16 seqno, n = 0;
1566 u16 end = r->acked, start = end, offset = r->last_gap;
1567 u16 si = (last_ga) ? last_ga->start_index : 0;
1568 bool is_uc = !link_is_bc_sndlink(l);
1569 bool bc_has_acked = false;
1570
1571 trace_tipc_link_retrans(r, acked + 1, acked + gap, &l->transmq);
1572
1573
1574 if (ga && is_uc) {
1575
1576 gack_cnt = ga->ugack_cnt;
1577 gacks = &ga->gacks[ga->bgack_cnt];
1578 } else if (ga) {
1579
1580 this_ga = kmemdup(ga, struct_size(ga, gacks, ga->bgack_cnt),
1581 GFP_ATOMIC);
1582 if (likely(this_ga)) {
1583 this_ga->start_index = 0;
1584
1585 gack_cnt = this_ga->bgack_cnt;
1586 gacks = &this_ga->gacks[0];
1587 } else {
1588
1589 pr_warn_ratelimited("Ignoring bc Gap ACKs, no memory\n");
1590 }
1591 }
1592
1593
1594 skb_queue_walk_safe(&l->transmq, skb, tmp) {
1595 seqno = buf_seqno(skb);
1596
1597next_gap_ack:
1598 if (less_eq(seqno, nacked)) {
1599 if (is_uc)
1600 goto release;
1601
1602 if (!more(seqno, r->acked))
1603 continue;
1604
1605 while (more(seqno, end)) {
1606 if (!last_ga || si >= last_ga->bgack_cnt)
1607 break;
1608 start = end + offset + 1;
1609 end = ntohs(last_ga->gacks[si].ack);
1610 offset = ntohs(last_ga->gacks[si].gap);
1611 si++;
1612 WARN_ONCE(more(start, end) ||
1613 (!offset &&
1614 si < last_ga->bgack_cnt) ||
1615 si > MAX_GAP_ACK_BLKS,
1616 "Corrupted Gap ACK: %d %d %d %d %d\n",
1617 start, end, offset, si,
1618 last_ga->bgack_cnt);
1619 }
1620
1621 if (in_range(seqno, start, end))
1622 continue;
1623
1624 bc_has_acked = true;
1625 if (--TIPC_SKB_CB(skb)->ackers)
1626 continue;
1627release:
1628
1629 __skb_unlink(skb, &l->transmq);
1630 kfree_skb(skb);
1631 } else if (less_eq(seqno, nacked + ngap)) {
1632
1633 if (unlikely(seqno == acked + 1 &&
1634 link_retransmit_failure(l, r, rc))) {
1635
1636 kfree(this_ga);
1637 this_ga = NULL;
1638 break;
1639 }
1640
1641 if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
1642 continue;
1643 tipc_link_set_skb_retransmit_time(skb, l);
1644 _skb = pskb_copy(skb, GFP_ATOMIC);
1645 if (!_skb)
1646 continue;
1647 hdr = buf_msg(_skb);
1648 msg_set_ack(hdr, ack);
1649 msg_set_bcast_ack(hdr, bc_ack);
1650 _skb->priority = TC_PRIO_CONTROL;
1651 __skb_queue_tail(xmitq, _skb);
1652 l->stats.retransmitted++;
1653 if (!is_uc)
1654 r->stats.retransmitted++;
1655 *retransmitted = true;
1656
1657 if (!TIPC_SKB_CB(skb)->retr_cnt++)
1658 TIPC_SKB_CB(skb)->retr_stamp = jiffies;
1659 } else {
1660
1661 if (n >= gack_cnt)
1662 break;
1663 nacked = ntohs(gacks[n].ack);
1664 ngap = ntohs(gacks[n].gap);
1665 n++;
1666 goto next_gap_ack;
1667 }
1668 }
1669
1670
1671 if (bc_has_acked) {
1672 if (this_ga) {
1673 kfree(last_ga);
1674 r->last_ga = this_ga;
1675 r->last_gap = gap;
1676 } else if (last_ga) {
1677 if (less(acked, start)) {
1678 si--;
1679 offset = start - acked - 1;
1680 } else if (less(acked, end)) {
1681 acked = end;
1682 }
1683 if (si < last_ga->bgack_cnt) {
1684 last_ga->start_index = si;
1685 r->last_gap = offset;
1686 } else {
1687 kfree(last_ga);
1688 r->last_ga = NULL;
1689 r->last_gap = 0;
1690 }
1691 } else {
1692 r->last_gap = 0;
1693 }
1694 r->acked = acked;
1695 } else {
1696 kfree(this_ga);
1697 }
1698
1699 return qlen - skb_queue_len(&l->transmq);
1700}
1701
1702
1703
1704
1705
1706
1707int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1708{
1709 if (!l)
1710 return 0;
1711
1712
1713 if (link_is_bc_rcvlink(l)) {
1714 if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
1715 return 0;
1716 l->rcv_unacked = 0;
1717
1718
1719 l->snd_nxt = l->rcv_nxt;
1720 return TIPC_LINK_SND_STATE;
1721 }
1722
1723 l->rcv_unacked = 0;
1724 l->stats.sent_acks++;
1725 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
1726 return 0;
1727}
1728
1729
1730
1731void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1732{
1733 int mtyp = RESET_MSG;
1734 struct sk_buff *skb;
1735
1736 if (l->state == LINK_ESTABLISHING)
1737 mtyp = ACTIVATE_MSG;
1738
1739 tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, 0, xmitq);
1740
1741
1742 skb = skb_peek_tail(xmitq);
1743 if (skb && (l->state == LINK_RESET))
1744 msg_set_peer_stopping(buf_msg(skb), 1);
1745}
1746
1747
1748
1749
1750
1751static int tipc_link_build_nack_msg(struct tipc_link *l,
1752 struct sk_buff_head *xmitq)
1753{
1754 u32 def_cnt = ++l->stats.deferred_recv;
1755 struct sk_buff_head *dfq = &l->deferdq;
1756 u32 defq_len = skb_queue_len(dfq);
1757 int match1, match2;
1758
1759 if (link_is_bc_rcvlink(l)) {
1760 match1 = def_cnt & 0xf;
1761 match2 = tipc_own_addr(l->net) & 0xf;
1762 if (match1 == match2)
1763 return TIPC_LINK_SND_STATE;
1764 return 0;
1765 }
1766
1767 if (defq_len >= 3 && !((defq_len - 3) % 16)) {
1768 u16 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1769
1770 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0,
1771 rcvgap, 0, 0, xmitq);
1772 }
1773 return 0;
1774}
1775
1776
1777
1778
1779
1780
1781int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1782 struct sk_buff_head *xmitq)
1783{
1784 struct sk_buff_head *defq = &l->deferdq;
1785 struct tipc_msg *hdr = buf_msg(skb);
1786 u16 seqno, rcv_nxt, win_lim;
1787 int released = 0;
1788 int rc = 0;
1789
1790
1791 if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1792 return tipc_link_proto_rcv(l, skb, xmitq);
1793
1794
1795 l->silent_intv_cnt = 0;
1796
1797 do {
1798 hdr = buf_msg(skb);
1799 seqno = msg_seqno(hdr);
1800 rcv_nxt = l->rcv_nxt;
1801 win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
1802
1803 if (unlikely(!link_is_up(l))) {
1804 if (l->state == LINK_ESTABLISHING)
1805 rc = TIPC_LINK_UP_EVT;
1806 kfree_skb(skb);
1807 break;
1808 }
1809
1810
1811 if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
1812 l->stats.duplicates++;
1813 kfree_skb(skb);
1814 break;
1815 }
1816 released += tipc_link_advance_transmq(l, l, msg_ack(hdr), 0,
1817 NULL, NULL, NULL, NULL);
1818
1819
1820 if (unlikely(seqno != rcv_nxt)) {
1821 if (!__tipc_skb_queue_sorted(defq, seqno, skb))
1822 l->stats.duplicates++;
1823 rc |= tipc_link_build_nack_msg(l, xmitq);
1824 break;
1825 }
1826
1827
1828 l->rcv_nxt++;
1829 l->stats.recv_pkts++;
1830
1831 if (unlikely(msg_user(hdr) == TUNNEL_PROTOCOL))
1832 rc |= tipc_link_tnl_rcv(l, skb, l->inputq);
1833 else if (!tipc_data_input(l, skb, l->inputq))
1834 rc |= tipc_link_input(l, skb, l->inputq, &l->reasm_buf);
1835 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
1836 rc |= tipc_link_build_state_msg(l, xmitq);
1837 if (unlikely(rc & ~TIPC_LINK_SND_STATE))
1838 break;
1839 } while ((skb = __tipc_skb_dequeue(defq, l->rcv_nxt)));
1840
1841
1842 if (released) {
1843 tipc_link_update_cwin(l, released, 0);
1844 tipc_link_advance_backlog(l, xmitq);
1845 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1846 link_prepare_wakeup(l);
1847 }
1848 return rc;
1849}
1850
1851static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1852 bool probe_reply, u16 rcvgap,
1853 int tolerance, int priority,
1854 struct sk_buff_head *xmitq)
1855{
1856 struct tipc_mon_state *mstate = &l->mon_state;
1857 struct sk_buff_head *dfq = &l->deferdq;
1858 struct tipc_link *bcl = l->bc_rcvlink;
1859 struct tipc_msg *hdr;
1860 struct sk_buff *skb;
1861 bool node_up = link_is_up(bcl);
1862 u16 glen = 0, bc_rcvgap = 0;
1863 int dlen = 0;
1864 void *data;
1865
1866
1867 if (tipc_link_is_blocked(l))
1868 return;
1869
1870 if (!tipc_link_is_up(l) && (mtyp == STATE_MSG))
1871 return;
1872
1873 if ((probe || probe_reply) && !skb_queue_empty(dfq))
1874 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1875
1876 skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
1877 tipc_max_domain_size + MAX_GAP_ACK_BLKS_SZ,
1878 l->addr, tipc_own_addr(l->net), 0, 0, 0);
1879 if (!skb)
1880 return;
1881
1882 hdr = buf_msg(skb);
1883 data = msg_data(hdr);
1884 msg_set_session(hdr, l->session);
1885 msg_set_bearer_id(hdr, l->bearer_id);
1886 msg_set_net_plane(hdr, l->net_plane);
1887 msg_set_next_sent(hdr, l->snd_nxt);
1888 msg_set_ack(hdr, l->rcv_nxt - 1);
1889 msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
1890 msg_set_bc_ack_invalid(hdr, !node_up);
1891 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1892 msg_set_link_tolerance(hdr, tolerance);
1893 msg_set_linkprio(hdr, priority);
1894 msg_set_redundant_link(hdr, node_up);
1895 msg_set_seq_gap(hdr, 0);
1896 msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
1897
1898 if (mtyp == STATE_MSG) {
1899 if (l->peer_caps & TIPC_LINK_PROTO_SEQNO)
1900 msg_set_seqno(hdr, l->snd_nxt_state++);
1901 msg_set_seq_gap(hdr, rcvgap);
1902 bc_rcvgap = link_bc_rcv_gap(bcl);
1903 msg_set_bc_gap(hdr, bc_rcvgap);
1904 msg_set_probe(hdr, probe);
1905 msg_set_is_keepalive(hdr, probe || probe_reply);
1906 if (l->peer_caps & TIPC_GAP_ACK_BLOCK)
1907 glen = tipc_build_gap_ack_blks(l, hdr);
1908 tipc_mon_prep(l->net, data + glen, &dlen, mstate, l->bearer_id);
1909 msg_set_size(hdr, INT_H_SIZE + glen + dlen);
1910 skb_trim(skb, INT_H_SIZE + glen + dlen);
1911 l->stats.sent_states++;
1912 l->rcv_unacked = 0;
1913 } else {
1914
1915 if (mtyp == ACTIVATE_MSG) {
1916 msg_set_dest_session_valid(hdr, 1);
1917 msg_set_dest_session(hdr, l->peer_session);
1918 }
1919 msg_set_max_pkt(hdr, l->advertised_mtu);
1920 strcpy(data, l->if_name);
1921 msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
1922 skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME);
1923 }
1924 if (probe)
1925 l->stats.sent_probes++;
1926 if (rcvgap)
1927 l->stats.sent_nacks++;
1928 if (bc_rcvgap)
1929 bcl->stats.sent_nacks++;
1930 skb->priority = TC_PRIO_CONTROL;
1931 __skb_queue_tail(xmitq, skb);
1932 trace_tipc_proto_build(skb, false, l->name);
1933}
1934
1935void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
1936 struct sk_buff_head *xmitq)
1937{
1938 u32 onode = tipc_own_addr(l->net);
1939 struct tipc_msg *hdr, *ihdr;
1940 struct sk_buff_head tnlq;
1941 struct sk_buff *skb;
1942 u32 dnode = l->addr;
1943
1944 __skb_queue_head_init(&tnlq);
1945 skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG,
1946 INT_H_SIZE, BASIC_H_SIZE,
1947 dnode, onode, 0, 0, 0);
1948 if (!skb) {
1949 pr_warn("%sunable to create tunnel packet\n", link_co_err);
1950 return;
1951 }
1952
1953 hdr = buf_msg(skb);
1954 msg_set_msgcnt(hdr, 1);
1955 msg_set_bearer_id(hdr, l->peer_bearer_id);
1956
1957 ihdr = (struct tipc_msg *)msg_data(hdr);
1958 tipc_msg_init(onode, ihdr, TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1959 BASIC_H_SIZE, dnode);
1960 msg_set_errcode(ihdr, TIPC_ERR_NO_PORT);
1961 __skb_queue_tail(&tnlq, skb);
1962 tipc_link_xmit(l, &tnlq, xmitq);
1963}
1964
1965
1966
1967
1968void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1969 int mtyp, struct sk_buff_head *xmitq)
1970{
1971 struct sk_buff_head *fdefq = &tnl->failover_deferdq;
1972 struct sk_buff *skb, *tnlskb;
1973 struct tipc_msg *hdr, tnlhdr;
1974 struct sk_buff_head *queue = &l->transmq;
1975 struct sk_buff_head tmpxq, tnlq, frags;
1976 u16 pktlen, pktcnt, seqno = l->snd_nxt;
1977 bool pktcnt_need_update = false;
1978 u16 syncpt;
1979 int rc;
1980
1981 if (!tnl)
1982 return;
1983
1984 __skb_queue_head_init(&tnlq);
1985
1986
1987
1988
1989
1990 if (mtyp == SYNCH_MSG && (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) {
1991 tnlskb = tipc_msg_create(TUNNEL_PROTOCOL, SYNCH_MSG,
1992 INT_H_SIZE, 0, l->addr,
1993 tipc_own_addr(l->net),
1994 0, 0, 0);
1995 if (!tnlskb) {
1996 pr_warn("%sunable to create dummy SYNCH_MSG\n",
1997 link_co_err);
1998 return;
1999 }
2000
2001 hdr = buf_msg(tnlskb);
2002 syncpt = l->snd_nxt + skb_queue_len(&l->backlogq) - 1;
2003 msg_set_syncpt(hdr, syncpt);
2004 msg_set_bearer_id(hdr, l->peer_bearer_id);
2005 __skb_queue_tail(&tnlq, tnlskb);
2006 tipc_link_xmit(tnl, &tnlq, xmitq);
2007 return;
2008 }
2009
2010 __skb_queue_head_init(&tmpxq);
2011 __skb_queue_head_init(&frags);
2012
2013 skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
2014 BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
2015 0, 0, TIPC_ERR_NO_PORT);
2016 if (!skb) {
2017 pr_warn("%sunable to create tunnel packet\n", link_co_err);
2018 return;
2019 }
2020 __skb_queue_tail(&tnlq, skb);
2021 tipc_link_xmit(l, &tnlq, &tmpxq);
2022 __skb_queue_purge(&tmpxq);
2023
2024
2025 tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
2026 mtyp, INT_H_SIZE, l->addr);
2027 if (mtyp == SYNCH_MSG)
2028 pktcnt = l->snd_nxt - buf_seqno(skb_peek(&l->transmq));
2029 else
2030 pktcnt = skb_queue_len(&l->transmq);
2031 pktcnt += skb_queue_len(&l->backlogq);
2032 msg_set_msgcnt(&tnlhdr, pktcnt);
2033 msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
2034tnl:
2035
2036 skb_queue_walk(queue, skb) {
2037 hdr = buf_msg(skb);
2038 if (queue == &l->backlogq)
2039 msg_set_seqno(hdr, seqno++);
2040 pktlen = msg_size(hdr);
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050 if (pktlen > tnl->mtu - INT_H_SIZE) {
2051 if (mtyp == FAILOVER_MSG &&
2052 (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) {
2053 rc = tipc_msg_fragment(skb, &tnlhdr, tnl->mtu,
2054 &frags);
2055 if (rc) {
2056 pr_warn("%sunable to frag msg: rc %d\n",
2057 link_co_err, rc);
2058 return;
2059 }
2060 pktcnt += skb_queue_len(&frags) - 1;
2061 pktcnt_need_update = true;
2062 skb_queue_splice_tail_init(&frags, &tnlq);
2063 continue;
2064 }
2065
2066
2067
2068 pr_warn_ratelimited("%stoo large msg <%d, %d>: %d!\n",
2069 link_co_err, msg_user(hdr),
2070 msg_type(hdr), msg_size(hdr));
2071 return;
2072 }
2073
2074 msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
2075 tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
2076 if (!tnlskb) {
2077 pr_warn("%sunable to send packet\n", link_co_err);
2078 return;
2079 }
2080 skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
2081 skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
2082 __skb_queue_tail(&tnlq, tnlskb);
2083 }
2084 if (queue != &l->backlogq) {
2085 queue = &l->backlogq;
2086 goto tnl;
2087 }
2088
2089 if (pktcnt_need_update)
2090 skb_queue_walk(&tnlq, skb) {
2091 hdr = buf_msg(skb);
2092 msg_set_msgcnt(hdr, pktcnt);
2093 }
2094
2095 tipc_link_xmit(tnl, &tnlq, xmitq);
2096
2097 if (mtyp == FAILOVER_MSG) {
2098 tnl->drop_point = l->rcv_nxt;
2099 tnl->failover_reasm_skb = l->reasm_buf;
2100 l->reasm_buf = NULL;
2101
2102
2103 if (unlikely(!skb_queue_empty(fdefq))) {
2104 pr_warn("Link failover deferdq not empty: %d!\n",
2105 skb_queue_len(fdefq));
2106 __skb_queue_purge(fdefq);
2107 }
2108 skb_queue_splice_init(&l->deferdq, fdefq);
2109 }
2110}
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122void tipc_link_failover_prepare(struct tipc_link *l, struct tipc_link *tnl,
2123 struct sk_buff_head *xmitq)
2124{
2125 struct sk_buff_head *fdefq = &tnl->failover_deferdq;
2126
2127 tipc_link_create_dummy_tnl_msg(tnl, xmitq);
2128
2129
2130
2131
2132
2133
2134
2135 tnl->drop_point = 1;
2136 tnl->failover_reasm_skb = NULL;
2137
2138
2139 if (unlikely(!skb_queue_empty(fdefq))) {
2140 pr_warn("Link failover deferdq not empty: %d!\n",
2141 skb_queue_len(fdefq));
2142 __skb_queue_purge(fdefq);
2143 }
2144}
2145
2146
2147
2148
2149bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)
2150{
2151 u16 curr_session = l->peer_session;
2152 u16 session = msg_session(hdr);
2153 int mtyp = msg_type(hdr);
2154
2155 if (msg_user(hdr) != LINK_PROTOCOL)
2156 return true;
2157
2158 switch (mtyp) {
2159 case RESET_MSG:
2160 if (!l->in_session)
2161 return true;
2162
2163 return more(session, curr_session);
2164 case ACTIVATE_MSG:
2165 if (!l->in_session)
2166 return true;
2167
2168 return !less(session, curr_session);
2169 case STATE_MSG:
2170
2171 if (!l->in_session)
2172 return false;
2173 if (session != curr_session)
2174 return false;
2175
2176 if (!link_is_up(l) && msg_ack(hdr))
2177 return false;
2178 if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))
2179 return true;
2180
2181 return !less(msg_seqno(hdr), l->rcv_nxt_state);
2182 default:
2183 return false;
2184 }
2185}
2186
2187
2188
2189
2190
2191
2192static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
2193 struct sk_buff_head *xmitq)
2194{
2195 struct tipc_msg *hdr = buf_msg(skb);
2196 struct tipc_gap_ack_blks *ga = NULL;
2197 bool reply = msg_probe(hdr), retransmitted = false;
2198 u16 dlen = msg_data_sz(hdr), glen = 0;
2199 u16 peers_snd_nxt = msg_next_sent(hdr);
2200 u16 peers_tol = msg_link_tolerance(hdr);
2201 u16 peers_prio = msg_linkprio(hdr);
2202 u16 gap = msg_seq_gap(hdr);
2203 u16 ack = msg_ack(hdr);
2204 u16 rcv_nxt = l->rcv_nxt;
2205 u16 rcvgap = 0;
2206 int mtyp = msg_type(hdr);
2207 int rc = 0, released;
2208 char *if_name;
2209 void *data;
2210
2211 trace_tipc_proto_rcv(skb, false, l->name);
2212 if (tipc_link_is_blocked(l) || !xmitq)
2213 goto exit;
2214
2215 if (tipc_own_addr(l->net) > msg_prevnode(hdr))
2216 l->net_plane = msg_net_plane(hdr);
2217
2218 skb_linearize(skb);
2219 hdr = buf_msg(skb);
2220 data = msg_data(hdr);
2221
2222 if (!tipc_link_validate_msg(l, hdr)) {
2223 trace_tipc_skb_dump(skb, false, "PROTO invalid (1)!");
2224 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (1)!");
2225 goto exit;
2226 }
2227
2228 switch (mtyp) {
2229 case RESET_MSG:
2230 case ACTIVATE_MSG:
2231
2232 if_name = strrchr(l->name, ':') + 1;
2233 if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
2234 break;
2235 if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
2236 break;
2237 strncpy(if_name, data, TIPC_MAX_IF_NAME);
2238
2239
2240 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
2241 l->tolerance = peers_tol;
2242 l->bc_rcvlink->tolerance = peers_tol;
2243 }
2244
2245 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
2246 l->priority = peers_prio;
2247
2248
2249 if (msg_peer_stopping(hdr)) {
2250 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
2251 break;
2252 }
2253
2254
2255
2256
2257 if (mtyp == ACTIVATE_MSG && msg_dest_session_valid(hdr) &&
2258 l->session != msg_dest_session(hdr)) {
2259 if (less(l->session, msg_dest_session(hdr)))
2260 l->session = msg_dest_session(hdr) + 1;
2261 break;
2262 }
2263
2264
2265 if (mtyp == RESET_MSG || !link_is_up(l))
2266 rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
2267
2268
2269 if (mtyp == ACTIVATE_MSG && l->state == LINK_ESTABLISHING)
2270 rc = TIPC_LINK_UP_EVT;
2271
2272 l->peer_session = msg_session(hdr);
2273 l->in_session = true;
2274 l->peer_bearer_id = msg_bearer_id(hdr);
2275 if (l->mtu > msg_max_pkt(hdr))
2276 l->mtu = msg_max_pkt(hdr);
2277 break;
2278
2279 case STATE_MSG:
2280 l->rcv_nxt_state = msg_seqno(hdr) + 1;
2281
2282
2283 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
2284 l->tolerance = peers_tol;
2285 l->bc_rcvlink->tolerance = peers_tol;
2286 }
2287
2288 if ((peers_prio != l->priority) &&
2289 in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
2290 l->priority = peers_prio;
2291 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
2292 }
2293
2294 l->silent_intv_cnt = 0;
2295 l->stats.recv_states++;
2296 if (msg_probe(hdr))
2297 l->stats.recv_probes++;
2298
2299 if (!link_is_up(l)) {
2300 if (l->state == LINK_ESTABLISHING)
2301 rc = TIPC_LINK_UP_EVT;
2302 break;
2303 }
2304
2305
2306 glen = tipc_get_gap_ack_blks(&ga, l, hdr, true);
2307
2308 tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr,
2309 &l->mon_state, l->bearer_id);
2310
2311
2312 if ((reply || msg_is_keepalive(hdr)) &&
2313 more(peers_snd_nxt, rcv_nxt) &&
2314 !tipc_link_is_synching(l) &&
2315 skb_queue_empty(&l->deferdq))
2316 rcvgap = peers_snd_nxt - l->rcv_nxt;
2317 if (rcvgap || reply)
2318 tipc_link_build_proto_msg(l, STATE_MSG, 0, reply,
2319 rcvgap, 0, 0, xmitq);
2320
2321 released = tipc_link_advance_transmq(l, l, ack, gap, ga, xmitq,
2322 &retransmitted, &rc);
2323 if (gap)
2324 l->stats.recv_nacks++;
2325 if (released || retransmitted)
2326 tipc_link_update_cwin(l, released, retransmitted);
2327 if (released)
2328 tipc_link_advance_backlog(l, xmitq);
2329 if (unlikely(!skb_queue_empty(&l->wakeupq)))
2330 link_prepare_wakeup(l);
2331 }
2332exit:
2333 kfree_skb(skb);
2334 return rc;
2335}
2336
2337
2338
2339static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
2340 u16 peers_snd_nxt,
2341 struct sk_buff_head *xmitq)
2342{
2343 struct sk_buff *skb;
2344 struct tipc_msg *hdr;
2345 struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
2346 u16 ack = l->rcv_nxt - 1;
2347 u16 gap_to = peers_snd_nxt - 1;
2348
2349 skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
2350 0, l->addr, tipc_own_addr(l->net), 0, 0, 0);
2351 if (!skb)
2352 return false;
2353 hdr = buf_msg(skb);
2354 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
2355 msg_set_bcast_ack(hdr, ack);
2356 msg_set_bcgap_after(hdr, ack);
2357 if (dfrd_skb)
2358 gap_to = buf_seqno(dfrd_skb) - 1;
2359 msg_set_bcgap_to(hdr, gap_to);
2360 msg_set_non_seq(hdr, bcast);
2361 __skb_queue_tail(xmitq, skb);
2362 return true;
2363}
2364
2365
2366
2367
2368
2369
2370static void tipc_link_build_bc_init_msg(struct tipc_link *l,
2371 struct sk_buff_head *xmitq)
2372{
2373 struct sk_buff_head list;
2374
2375 __skb_queue_head_init(&list);
2376 if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
2377 return;
2378 msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true);
2379 tipc_link_xmit(l, &list, xmitq);
2380}
2381
2382
2383
2384void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
2385{
2386 int mtyp = msg_type(hdr);
2387 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
2388
2389 if (link_is_up(l))
2390 return;
2391
2392 if (msg_user(hdr) == BCAST_PROTOCOL) {
2393 l->rcv_nxt = peers_snd_nxt;
2394 l->state = LINK_ESTABLISHED;
2395 return;
2396 }
2397
2398 if (l->peer_caps & TIPC_BCAST_SYNCH)
2399 return;
2400
2401 if (msg_peer_node_is_up(hdr))
2402 return;
2403
2404
2405 if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
2406 l->rcv_nxt = peers_snd_nxt;
2407}
2408
2409
2410
2411int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
2412 struct sk_buff_head *xmitq)
2413{
2414 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
2415 int rc = 0;
2416
2417 if (!link_is_up(l))
2418 return rc;
2419
2420 if (!msg_peer_node_is_up(hdr))
2421 return rc;
2422
2423
2424 if (msg_ack(hdr))
2425 l->bc_peer_is_up = true;
2426
2427 if (!l->bc_peer_is_up)
2428 return rc;
2429
2430
2431 if (more(peers_snd_nxt, l->rcv_nxt + l->window))
2432 return rc;
2433
2434 l->snd_nxt = peers_snd_nxt;
2435 if (link_bc_rcv_gap(l))
2436 rc |= TIPC_LINK_SND_STATE;
2437
2438
2439 if (l->peer_caps & TIPC_BCAST_STATE_NACK)
2440 return rc;
2441
2442
2443
2444 if (!more(peers_snd_nxt, l->rcv_nxt)) {
2445 l->nack_state = BC_NACK_SND_CONDITIONAL;
2446 return 0;
2447 }
2448
2449
2450 if (l->nack_state == BC_NACK_SND_SUPPRESS) {
2451 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
2452 return 0;
2453 }
2454
2455
2456 if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
2457 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
2458 if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
2459 return 0;
2460 }
2461
2462
2463 tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
2464 l->nack_state = BC_NACK_SND_SUPPRESS;
2465 return 0;
2466}
2467
2468int tipc_link_bc_ack_rcv(struct tipc_link *r, u16 acked, u16 gap,
2469 struct tipc_gap_ack_blks *ga,
2470 struct sk_buff_head *xmitq,
2471 struct sk_buff_head *retrq)
2472{
2473 struct tipc_link *l = r->bc_sndlink;
2474 bool unused = false;
2475 int rc = 0;
2476
2477 if (!link_is_up(r) || !r->bc_peer_is_up)
2478 return 0;
2479
2480 if (gap) {
2481 l->stats.recv_nacks++;
2482 r->stats.recv_nacks++;
2483 }
2484
2485 if (less(acked, r->acked) || (acked == r->acked && !gap && !ga))
2486 return 0;
2487
2488 trace_tipc_link_bc_ack(r, acked, gap, &l->transmq);
2489 tipc_link_advance_transmq(l, r, acked, gap, ga, retrq, &unused, &rc);
2490
2491 tipc_link_advance_backlog(l, xmitq);
2492 if (unlikely(!skb_queue_empty(&l->wakeupq)))
2493 link_prepare_wakeup(l);
2494
2495 return rc;
2496}
2497
2498
2499
2500
2501
2502int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
2503 struct sk_buff_head *xmitq)
2504{
2505 struct tipc_msg *hdr = buf_msg(skb);
2506 u32 dnode = msg_destnode(hdr);
2507 int mtyp = msg_type(hdr);
2508 u16 acked = msg_bcast_ack(hdr);
2509 u16 from = acked + 1;
2510 u16 to = msg_bcgap_to(hdr);
2511 u16 peers_snd_nxt = to + 1;
2512 int rc = 0;
2513
2514 kfree_skb(skb);
2515
2516 if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
2517 return 0;
2518
2519 if (mtyp != STATE_MSG)
2520 return 0;
2521
2522 if (dnode == tipc_own_addr(l->net)) {
2523 rc = tipc_link_bc_ack_rcv(l, acked, to - acked, NULL, xmitq,
2524 xmitq);
2525 l->stats.recv_nacks++;
2526 return rc;
2527 }
2528
2529
2530 if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
2531 l->nack_state = BC_NACK_SND_SUPPRESS;
2532
2533 return 0;
2534}
2535
2536void tipc_link_set_queue_limits(struct tipc_link *l, u32 min_win, u32 max_win)
2537{
2538 int max_bulk = TIPC_MAX_PUBL / (l->mtu / ITEM_SIZE);
2539
2540 l->min_win = min_win;
2541 l->ssthresh = max_win;
2542 l->max_win = max_win;
2543 l->window = min_win;
2544 l->backlog[TIPC_LOW_IMPORTANCE].limit = min_win * 2;
2545 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = min_win * 4;
2546 l->backlog[TIPC_HIGH_IMPORTANCE].limit = min_win * 6;
2547 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = min_win * 8;
2548 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk;
2549}
2550
2551
2552
2553
2554
2555void tipc_link_reset_stats(struct tipc_link *l)
2556{
2557 memset(&l->stats, 0, sizeof(l->stats));
2558}
2559
2560static void link_print(struct tipc_link *l, const char *str)
2561{
2562 struct sk_buff *hskb = skb_peek(&l->transmq);
2563 u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
2564 u16 tail = l->snd_nxt - 1;
2565
2566 pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
2567 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
2568 skb_queue_len(&l->transmq), head, tail,
2569 skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
2570}
2571
2572
2573
2574int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
2575{
2576 int err;
2577
2578 err = nla_parse_nested_deprecated(props, TIPC_NLA_PROP_MAX, prop,
2579 tipc_nl_prop_policy, NULL);
2580 if (err)
2581 return err;
2582
2583 if (props[TIPC_NLA_PROP_PRIO]) {
2584 u32 prio;
2585
2586 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2587 if (prio > TIPC_MAX_LINK_PRI)
2588 return -EINVAL;
2589 }
2590
2591 if (props[TIPC_NLA_PROP_TOL]) {
2592 u32 tol;
2593
2594 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2595 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
2596 return -EINVAL;
2597 }
2598
2599 if (props[TIPC_NLA_PROP_WIN]) {
2600 u32 max_win;
2601
2602 max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2603 if (max_win < TIPC_DEF_LINK_WIN || max_win > TIPC_MAX_LINK_WIN)
2604 return -EINVAL;
2605 }
2606
2607 return 0;
2608}
2609
2610static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
2611{
2612 int i;
2613 struct nlattr *stats;
2614
2615 struct nla_map {
2616 u32 key;
2617 u32 val;
2618 };
2619
2620 struct nla_map map[] = {
2621 {TIPC_NLA_STATS_RX_INFO, 0},
2622 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
2623 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
2624 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
2625 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
2626 {TIPC_NLA_STATS_TX_INFO, 0},
2627 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
2628 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
2629 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
2630 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
2631 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
2632 s->msg_length_counts : 1},
2633 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
2634 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
2635 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
2636 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
2637 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
2638 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
2639 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
2640 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
2641 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
2642 {TIPC_NLA_STATS_RX_STATES, s->recv_states},
2643 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
2644 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
2645 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
2646 {TIPC_NLA_STATS_TX_STATES, s->sent_states},
2647 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
2648 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
2649 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
2650 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
2651 {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
2652 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
2653 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
2654 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
2655 (s->accu_queue_sz / s->queue_sz_counts) : 0}
2656 };
2657
2658 stats = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
2659 if (!stats)
2660 return -EMSGSIZE;
2661
2662 for (i = 0; i < ARRAY_SIZE(map); i++)
2663 if (nla_put_u32(skb, map[i].key, map[i].val))
2664 goto msg_full;
2665
2666 nla_nest_end(skb, stats);
2667
2668 return 0;
2669msg_full:
2670 nla_nest_cancel(skb, stats);
2671
2672 return -EMSGSIZE;
2673}
2674
2675
2676int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
2677 struct tipc_link *link, int nlflags)
2678{
2679 u32 self = tipc_own_addr(net);
2680 struct nlattr *attrs;
2681 struct nlattr *prop;
2682 void *hdr;
2683 int err;
2684
2685 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2686 nlflags, TIPC_NL_LINK_GET);
2687 if (!hdr)
2688 return -EMSGSIZE;
2689
2690 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
2691 if (!attrs)
2692 goto msg_full;
2693
2694 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
2695 goto attr_msg_full;
2696 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, tipc_cluster_mask(self)))
2697 goto attr_msg_full;
2698 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
2699 goto attr_msg_full;
2700 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts))
2701 goto attr_msg_full;
2702 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts))
2703 goto attr_msg_full;
2704
2705 if (tipc_link_is_up(link))
2706 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2707 goto attr_msg_full;
2708 if (link->active)
2709 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
2710 goto attr_msg_full;
2711
2712 prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
2713 if (!prop)
2714 goto attr_msg_full;
2715 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2716 goto prop_msg_full;
2717 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
2718 goto prop_msg_full;
2719 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
2720 link->window))
2721 goto prop_msg_full;
2722 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2723 goto prop_msg_full;
2724 nla_nest_end(msg->skb, prop);
2725
2726 err = __tipc_nl_add_stats(msg->skb, &link->stats);
2727 if (err)
2728 goto attr_msg_full;
2729
2730 nla_nest_end(msg->skb, attrs);
2731 genlmsg_end(msg->skb, hdr);
2732
2733 return 0;
2734
2735prop_msg_full:
2736 nla_nest_cancel(msg->skb, prop);
2737attr_msg_full:
2738 nla_nest_cancel(msg->skb, attrs);
2739msg_full:
2740 genlmsg_cancel(msg->skb, hdr);
2741
2742 return -EMSGSIZE;
2743}
2744
2745static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
2746 struct tipc_stats *stats)
2747{
2748 int i;
2749 struct nlattr *nest;
2750
2751 struct nla_map {
2752 __u32 key;
2753 __u32 val;
2754 };
2755
2756 struct nla_map map[] = {
2757 {TIPC_NLA_STATS_RX_INFO, stats->recv_pkts},
2758 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
2759 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
2760 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
2761 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
2762 {TIPC_NLA_STATS_TX_INFO, stats->sent_pkts},
2763 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
2764 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
2765 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
2766 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
2767 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
2768 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
2769 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
2770 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
2771 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
2772 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
2773 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
2774 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
2775 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
2776 (stats->accu_queue_sz / stats->queue_sz_counts) : 0}
2777 };
2778
2779 nest = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
2780 if (!nest)
2781 return -EMSGSIZE;
2782
2783 for (i = 0; i < ARRAY_SIZE(map); i++)
2784 if (nla_put_u32(skb, map[i].key, map[i].val))
2785 goto msg_full;
2786
2787 nla_nest_end(skb, nest);
2788
2789 return 0;
2790msg_full:
2791 nla_nest_cancel(skb, nest);
2792
2793 return -EMSGSIZE;
2794}
2795
2796int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg,
2797 struct tipc_link *bcl)
2798{
2799 int err;
2800 void *hdr;
2801 struct nlattr *attrs;
2802 struct nlattr *prop;
2803 u32 bc_mode = tipc_bcast_get_mode(net);
2804 u32 bc_ratio = tipc_bcast_get_broadcast_ratio(net);
2805
2806 if (!bcl)
2807 return 0;
2808
2809 tipc_bcast_lock(net);
2810
2811 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2812 NLM_F_MULTI, TIPC_NL_LINK_GET);
2813 if (!hdr) {
2814 tipc_bcast_unlock(net);
2815 return -EMSGSIZE;
2816 }
2817
2818 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
2819 if (!attrs)
2820 goto msg_full;
2821
2822
2823 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2824 goto attr_msg_full;
2825
2826 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
2827 goto attr_msg_full;
2828 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
2829 goto attr_msg_full;
2830 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0))
2831 goto attr_msg_full;
2832 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0))
2833 goto attr_msg_full;
2834
2835 prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
2836 if (!prop)
2837 goto attr_msg_full;
2838 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->max_win))
2839 goto prop_msg_full;
2840 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST, bc_mode))
2841 goto prop_msg_full;
2842 if (bc_mode & BCLINK_MODE_SEL)
2843 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST_RATIO,
2844 bc_ratio))
2845 goto prop_msg_full;
2846 nla_nest_end(msg->skb, prop);
2847
2848 err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
2849 if (err)
2850 goto attr_msg_full;
2851
2852 tipc_bcast_unlock(net);
2853 nla_nest_end(msg->skb, attrs);
2854 genlmsg_end(msg->skb, hdr);
2855
2856 return 0;
2857
2858prop_msg_full:
2859 nla_nest_cancel(msg->skb, prop);
2860attr_msg_full:
2861 nla_nest_cancel(msg->skb, attrs);
2862msg_full:
2863 tipc_bcast_unlock(net);
2864 genlmsg_cancel(msg->skb, hdr);
2865
2866 return -EMSGSIZE;
2867}
2868
2869void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
2870 struct sk_buff_head *xmitq)
2871{
2872 l->tolerance = tol;
2873 if (l->bc_rcvlink)
2874 l->bc_rcvlink->tolerance = tol;
2875 if (link_is_up(l))
2876 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);
2877}
2878
2879void tipc_link_set_prio(struct tipc_link *l, u32 prio,
2880 struct sk_buff_head *xmitq)
2881{
2882 l->priority = prio;
2883 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, prio, xmitq);
2884}
2885
2886void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
2887{
2888 l->abort_limit = limit;
2889}
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf)
2905{
2906 int i = 0;
2907 size_t sz = (dqueues) ? LINK_LMAX : LINK_LMIN;
2908 struct sk_buff_head *list;
2909 struct sk_buff *hskb, *tskb;
2910 u32 len;
2911
2912 if (!l) {
2913 i += scnprintf(buf, sz, "link data: (null)\n");
2914 return i;
2915 }
2916
2917 i += scnprintf(buf, sz, "link data: %x", l->addr);
2918 i += scnprintf(buf + i, sz - i, " %x", l->state);
2919 i += scnprintf(buf + i, sz - i, " %u", l->in_session);
2920 i += scnprintf(buf + i, sz - i, " %u", l->session);
2921 i += scnprintf(buf + i, sz - i, " %u", l->peer_session);
2922 i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt);
2923 i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt);
2924 i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt_state);
2925 i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt_state);
2926 i += scnprintf(buf + i, sz - i, " %x", l->peer_caps);
2927 i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt);
2928 i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt);
2929 i += scnprintf(buf + i, sz - i, " %u", 0);
2930 i += scnprintf(buf + i, sz - i, " %u", 0);
2931 i += scnprintf(buf + i, sz - i, " %u", l->acked);
2932
2933 list = &l->transmq;
2934 len = skb_queue_len(list);
2935 hskb = skb_peek(list);
2936 tskb = skb_peek_tail(list);
2937 i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2938 (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2939 (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2940
2941 list = &l->deferdq;
2942 len = skb_queue_len(list);
2943 hskb = skb_peek(list);
2944 tskb = skb_peek_tail(list);
2945 i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2946 (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2947 (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2948
2949 list = &l->backlogq;
2950 len = skb_queue_len(list);
2951 hskb = skb_peek(list);
2952 tskb = skb_peek_tail(list);
2953 i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2954 (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2955 (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2956
2957 list = l->inputq;
2958 len = skb_queue_len(list);
2959 hskb = skb_peek(list);
2960 tskb = skb_peek_tail(list);
2961 i += scnprintf(buf + i, sz - i, " | %u %u %u\n", len,
2962 (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2963 (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2964
2965 if (dqueues & TIPC_DUMP_TRANSMQ) {
2966 i += scnprintf(buf + i, sz - i, "transmq: ");
2967 i += tipc_list_dump(&l->transmq, false, buf + i);
2968 }
2969 if (dqueues & TIPC_DUMP_BACKLOGQ) {
2970 i += scnprintf(buf + i, sz - i,
2971 "backlogq: <%u %u %u %u %u>, ",
2972 l->backlog[TIPC_LOW_IMPORTANCE].len,
2973 l->backlog[TIPC_MEDIUM_IMPORTANCE].len,
2974 l->backlog[TIPC_HIGH_IMPORTANCE].len,
2975 l->backlog[TIPC_CRITICAL_IMPORTANCE].len,
2976 l->backlog[TIPC_SYSTEM_IMPORTANCE].len);
2977 i += tipc_list_dump(&l->backlogq, false, buf + i);
2978 }
2979 if (dqueues & TIPC_DUMP_DEFERDQ) {
2980 i += scnprintf(buf + i, sz - i, "deferdq: ");
2981 i += tipc_list_dump(&l->deferdq, false, buf + i);
2982 }
2983 if (dqueues & TIPC_DUMP_INPUTQ) {
2984 i += scnprintf(buf + i, sz - i, "inputq: ");
2985 i += tipc_list_dump(l->inputq, false, buf + i);
2986 }
2987 if (dqueues & TIPC_DUMP_WAKEUP) {
2988 i += scnprintf(buf + i, sz - i, "wakeup: ");
2989 i += tipc_list_dump(&l->wakeupq, false, buf + i);
2990 }
2991
2992 return i;
2993}
2994