1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "tp_meter.h"
19#include "main.h"
20
21#include <linux/atomic.h>
22#include <linux/bug.h>
23#include <linux/byteorder/generic.h>
24#include <linux/cache.h>
25#include <linux/compiler.h>
26#include <linux/err.h>
27#include <linux/etherdevice.h>
28#include <linux/fs.h>
29#include <linux/if_ether.h>
30#include <linux/init.h>
31#include <linux/jiffies.h>
32#include <linux/kernel.h>
33#include <linux/kref.h>
34#include <linux/kthread.h>
35#include <linux/list.h>
36#include <linux/netdevice.h>
37#include <linux/param.h>
38#include <linux/printk.h>
39#include <linux/random.h>
40#include <linux/rculist.h>
41#include <linux/rcupdate.h>
42#include <linux/sched.h>
43#include <linux/skbuff.h>
44#include <linux/slab.h>
45#include <linux/spinlock.h>
46#include <linux/stddef.h>
47#include <linux/string.h>
48#include <linux/timer.h>
49#include <linux/wait.h>
50#include <linux/workqueue.h>
51#include <uapi/linux/batman_adv.h>
52
53#include "hard-interface.h"
54#include "log.h"
55#include "netlink.h"
56#include "originator.h"
57#include "packet.h"
58#include "send.h"
59
60
61
62
63
64#define BATADV_TP_DEF_TEST_LENGTH 10000
65
66
67
68
69#define BATADV_TP_AWND 0x20000000
70
71
72
73
74
75#define BATADV_TP_RECV_TIMEOUT 1000
76
77
78
79
80
81
82#define BATADV_TP_MAX_RTO 30000
83
84
85
86
87
88#define BATADV_TP_FIRST_SEQ ((u32)-1 - 2000)
89
90
91
92
93
94#define BATADV_TP_PLEN (BATADV_TP_PACKET_LEN - ETH_HLEN - \
95 sizeof(struct batadv_unicast_packet))
96
97static u8 batadv_tp_prerandom[4096] __read_mostly;
98
99
100
101
102
103
104
105
106static u32 batadv_tp_session_cookie(const u8 session[2], u8 icmp_uid)
107{
108 u32 cookie;
109
110 cookie = icmp_uid << 16;
111 cookie |= session[0] << 8;
112 cookie |= session[1];
113
114 return cookie;
115}
116
117
118
119
120
121
122
123
124
125
126
127
128
129static u32 batadv_tp_cwnd(u32 base, u32 increment, u32 min)
130{
131 u32 new_size = base + increment;
132
133
134 if (new_size < base)
135 new_size = (u32)ULONG_MAX;
136
137 new_size = min_t(u32, new_size, BATADV_TP_AWND);
138
139 return max_t(u32, new_size, min);
140}
141
142
143
144
145
146
147
148
149
150
151
152static void batadv_tp_update_cwnd(struct batadv_tp_vars *tp_vars, u32 mss)
153{
154 spin_lock_bh(&tp_vars->cwnd_lock);
155
156
157 if (tp_vars->cwnd <= tp_vars->ss_threshold) {
158 tp_vars->dec_cwnd = 0;
159 tp_vars->cwnd = batadv_tp_cwnd(tp_vars->cwnd, mss, mss);
160 spin_unlock_bh(&tp_vars->cwnd_lock);
161 return;
162 }
163
164
165 tp_vars->dec_cwnd += max_t(u32, 1U << 3,
166 ((mss * mss) << 6) / (tp_vars->cwnd << 3));
167 if (tp_vars->dec_cwnd < (mss << 3)) {
168 spin_unlock_bh(&tp_vars->cwnd_lock);
169 return;
170 }
171
172 tp_vars->cwnd = batadv_tp_cwnd(tp_vars->cwnd, mss, mss);
173 tp_vars->dec_cwnd = 0;
174
175 spin_unlock_bh(&tp_vars->cwnd_lock);
176}
177
178
179
180
181
182
183static void batadv_tp_update_rto(struct batadv_tp_vars *tp_vars,
184 u32 new_rtt)
185{
186 long m = new_rtt;
187
188
189
190
191
192
193
194 if (tp_vars->srtt != 0) {
195 m -= (tp_vars->srtt >> 3);
196 tp_vars->srtt += m;
197 if (m < 0)
198 m = -m;
199
200 m -= (tp_vars->rttvar >> 2);
201 tp_vars->rttvar += m;
202 } else {
203
204 tp_vars->srtt = m << 3;
205 tp_vars->rttvar = m << 1;
206 }
207
208
209
210
211 tp_vars->rto = (tp_vars->srtt >> 3) + tp_vars->rttvar;
212}
213
214
215
216
217
218
219
220
221
222
223static void batadv_tp_batctl_notify(enum batadv_tp_meter_reason reason,
224 const u8 *dst, struct batadv_priv *bat_priv,
225 unsigned long start_time, u64 total_sent,
226 u32 cookie)
227{
228 u32 test_time;
229 u8 result;
230 u32 total_bytes;
231
232 if (!batadv_tp_is_error(reason)) {
233 result = BATADV_TP_REASON_COMPLETE;
234 test_time = jiffies_to_msecs(jiffies - start_time);
235 total_bytes = total_sent;
236 } else {
237 result = reason;
238 test_time = 0;
239 total_bytes = 0;
240 }
241
242 batadv_netlink_tpmeter_notify(bat_priv, dst, result, test_time,
243 total_bytes, cookie);
244}
245
246
247
248
249
250
251
252
253static void batadv_tp_batctl_error_notify(enum batadv_tp_meter_reason reason,
254 const u8 *dst,
255 struct batadv_priv *bat_priv,
256 u32 cookie)
257{
258 batadv_tp_batctl_notify(reason, dst, bat_priv, 0, 0, cookie);
259}
260
261
262
263
264
265
266
267
268
269
270
271static struct batadv_tp_vars *batadv_tp_list_find(struct batadv_priv *bat_priv,
272 const u8 *dst)
273{
274 struct batadv_tp_vars *pos, *tp_vars = NULL;
275
276 rcu_read_lock();
277 hlist_for_each_entry_rcu(pos, &bat_priv->tp_list, list) {
278 if (!batadv_compare_eth(pos->other_end, dst))
279 continue;
280
281
282
283
284
285 if (unlikely(!kref_get_unless_zero(&pos->refcount)))
286 continue;
287
288 tp_vars = pos;
289 break;
290 }
291 rcu_read_unlock();
292
293 return tp_vars;
294}
295
296
297
298
299
300
301
302
303
304
305
306
307
308static struct batadv_tp_vars *
309batadv_tp_list_find_session(struct batadv_priv *bat_priv, const u8 *dst,
310 const u8 *session)
311{
312 struct batadv_tp_vars *pos, *tp_vars = NULL;
313
314 rcu_read_lock();
315 hlist_for_each_entry_rcu(pos, &bat_priv->tp_list, list) {
316 if (!batadv_compare_eth(pos->other_end, dst))
317 continue;
318
319 if (memcmp(pos->session, session, sizeof(pos->session)) != 0)
320 continue;
321
322
323
324
325
326 if (unlikely(!kref_get_unless_zero(&pos->refcount)))
327 continue;
328
329 tp_vars = pos;
330 break;
331 }
332 rcu_read_unlock();
333
334 return tp_vars;
335}
336
337
338
339
340
341
342static void batadv_tp_vars_release(struct kref *ref)
343{
344 struct batadv_tp_vars *tp_vars;
345 struct batadv_tp_unacked *un, *safe;
346
347 tp_vars = container_of(ref, struct batadv_tp_vars, refcount);
348
349
350
351
352 spin_lock_bh(&tp_vars->unacked_lock);
353 list_for_each_entry_safe(un, safe, &tp_vars->unacked_list, list) {
354 list_del(&un->list);
355 kfree(un);
356 }
357 spin_unlock_bh(&tp_vars->unacked_lock);
358
359 kfree_rcu(tp_vars, rcu);
360}
361
362
363
364
365
366
367static void batadv_tp_vars_put(struct batadv_tp_vars *tp_vars)
368{
369 kref_put(&tp_vars->refcount, batadv_tp_vars_release);
370}
371
372
373
374
375
376
377static void batadv_tp_sender_cleanup(struct batadv_priv *bat_priv,
378 struct batadv_tp_vars *tp_vars)
379{
380 cancel_delayed_work(&tp_vars->finish_work);
381
382 spin_lock_bh(&tp_vars->bat_priv->tp_list_lock);
383 hlist_del_rcu(&tp_vars->list);
384 spin_unlock_bh(&tp_vars->bat_priv->tp_list_lock);
385
386
387 batadv_tp_vars_put(tp_vars);
388
389 atomic_dec(&tp_vars->bat_priv->tp_num);
390
391
392 del_timer_sync(&tp_vars->timer);
393
394
395
396
397
398 del_timer(&tp_vars->timer);
399 batadv_tp_vars_put(tp_vars);
400}
401
402
403
404
405
406
407static void batadv_tp_sender_end(struct batadv_priv *bat_priv,
408 struct batadv_tp_vars *tp_vars)
409{
410 u32 session_cookie;
411
412 batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
413 "Test towards %pM finished..shutting down (reason=%d)\n",
414 tp_vars->other_end, tp_vars->reason);
415
416 batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
417 "Last timing stats: SRTT=%ums RTTVAR=%ums RTO=%ums\n",
418 tp_vars->srtt >> 3, tp_vars->rttvar >> 2, tp_vars->rto);
419
420 batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
421 "Final values: cwnd=%u ss_threshold=%u\n",
422 tp_vars->cwnd, tp_vars->ss_threshold);
423
424 session_cookie = batadv_tp_session_cookie(tp_vars->session,
425 tp_vars->icmp_uid);
426
427 batadv_tp_batctl_notify(tp_vars->reason,
428 tp_vars->other_end,
429 bat_priv,
430 tp_vars->start_time,
431 atomic64_read(&tp_vars->tot_sent),
432 session_cookie);
433}
434
435
436
437
438
439
440static void batadv_tp_sender_shutdown(struct batadv_tp_vars *tp_vars,
441 enum batadv_tp_meter_reason reason)
442{
443 if (!atomic_dec_and_test(&tp_vars->sending))
444 return;
445
446 tp_vars->reason = reason;
447}
448
449
450
451
452
453static void batadv_tp_sender_finish(struct work_struct *work)
454{
455 struct delayed_work *delayed_work;
456 struct batadv_tp_vars *tp_vars;
457
458 delayed_work = to_delayed_work(work);
459 tp_vars = container_of(delayed_work, struct batadv_tp_vars,
460 finish_work);
461
462 batadv_tp_sender_shutdown(tp_vars, BATADV_TP_REASON_COMPLETE);
463}
464
465
466
467
468
469
470
471static void batadv_tp_reset_sender_timer(struct batadv_tp_vars *tp_vars)
472{
473
474
475
476 if (unlikely(atomic_read(&tp_vars->sending) == 0))
477
478 return;
479
480 mod_timer(&tp_vars->timer, jiffies + msecs_to_jiffies(tp_vars->rto));
481}
482
483
484
485
486
487
488
489
490
491static void batadv_tp_sender_timeout(struct timer_list *t)
492{
493 struct batadv_tp_vars *tp_vars = from_timer(tp_vars, t, timer);
494 struct batadv_priv *bat_priv = tp_vars->bat_priv;
495
496 if (atomic_read(&tp_vars->sending) == 0)
497 return;
498
499
500 if (unlikely(tp_vars->rto >= BATADV_TP_MAX_RTO)) {
501 batadv_tp_sender_shutdown(tp_vars,
502 BATADV_TP_REASON_DST_UNREACHABLE);
503 return;
504 }
505
506
507
508
509 tp_vars->rto <<= 1;
510
511 spin_lock_bh(&tp_vars->cwnd_lock);
512
513 tp_vars->ss_threshold = tp_vars->cwnd >> 1;
514 if (tp_vars->ss_threshold < BATADV_TP_PLEN * 2)
515 tp_vars->ss_threshold = BATADV_TP_PLEN * 2;
516
517 batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
518 "Meter: RTO fired during test towards %pM! cwnd=%u new ss_thr=%u, resetting last_sent to %u\n",
519 tp_vars->other_end, tp_vars->cwnd, tp_vars->ss_threshold,
520 atomic_read(&tp_vars->last_acked));
521
522 tp_vars->cwnd = BATADV_TP_PLEN * 3;
523
524 spin_unlock_bh(&tp_vars->cwnd_lock);
525
526
527 tp_vars->last_sent = atomic_read(&tp_vars->last_acked);
528 wake_up(&tp_vars->more_bytes);
529
530 batadv_tp_reset_sender_timer(tp_vars);
531}
532
533
534
535
536
537
538
539static void batadv_tp_fill_prerandom(struct batadv_tp_vars *tp_vars,
540 u8 *buf, size_t nbytes)
541{
542 u32 local_offset;
543 size_t bytes_inbuf;
544 size_t to_copy;
545 size_t pos = 0;
546
547 spin_lock_bh(&tp_vars->prerandom_lock);
548 local_offset = tp_vars->prerandom_offset;
549 tp_vars->prerandom_offset += nbytes;
550 tp_vars->prerandom_offset %= sizeof(batadv_tp_prerandom);
551 spin_unlock_bh(&tp_vars->prerandom_lock);
552
553 while (nbytes) {
554 local_offset %= sizeof(batadv_tp_prerandom);
555 bytes_inbuf = sizeof(batadv_tp_prerandom) - local_offset;
556 to_copy = min(nbytes, bytes_inbuf);
557
558 memcpy(&buf[pos], &batadv_tp_prerandom[local_offset], to_copy);
559 pos += to_copy;
560 nbytes -= to_copy;
561 local_offset = 0;
562 }
563}
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582static int batadv_tp_send_msg(struct batadv_tp_vars *tp_vars, const u8 *src,
583 struct batadv_orig_node *orig_node,
584 u32 seqno, size_t len, const u8 *session,
585 int uid, u32 timestamp)
586{
587 struct batadv_icmp_tp_packet *icmp;
588 struct sk_buff *skb;
589 int r;
590 u8 *data;
591 size_t data_len;
592
593 skb = netdev_alloc_skb_ip_align(NULL, len + ETH_HLEN);
594 if (unlikely(!skb))
595 return BATADV_TP_REASON_MEMORY_ERROR;
596
597 skb_reserve(skb, ETH_HLEN);
598 icmp = skb_put(skb, sizeof(*icmp));
599
600
601 ether_addr_copy(icmp->dst, orig_node->orig);
602 ether_addr_copy(icmp->orig, src);
603 icmp->version = BATADV_COMPAT_VERSION;
604 icmp->packet_type = BATADV_ICMP;
605 icmp->ttl = BATADV_TTL;
606 icmp->msg_type = BATADV_TP;
607 icmp->uid = uid;
608
609 icmp->subtype = BATADV_TP_MSG;
610 memcpy(icmp->session, session, sizeof(icmp->session));
611 icmp->seqno = htonl(seqno);
612 icmp->timestamp = htonl(timestamp);
613
614 data_len = len - sizeof(*icmp);
615 data = skb_put(skb, data_len);
616 batadv_tp_fill_prerandom(tp_vars, data, data_len);
617
618 r = batadv_send_skb_to_orig(skb, orig_node, NULL);
619 if (r == NET_XMIT_SUCCESS)
620 return 0;
621
622 return BATADV_TP_REASON_CANT_SEND;
623}
624
625
626
627
628
629
630
631
632static void batadv_tp_recv_ack(struct batadv_priv *bat_priv,
633 const struct sk_buff *skb)
634{
635 struct batadv_hard_iface *primary_if = NULL;
636 struct batadv_orig_node *orig_node = NULL;
637 const struct batadv_icmp_tp_packet *icmp;
638 struct batadv_tp_vars *tp_vars;
639 size_t packet_len, mss;
640 u32 rtt, recv_ack, cwnd;
641 unsigned char *dev_addr;
642
643 packet_len = BATADV_TP_PLEN;
644 mss = BATADV_TP_PLEN;
645 packet_len += sizeof(struct batadv_unicast_packet);
646
647 icmp = (struct batadv_icmp_tp_packet *)skb->data;
648
649
650 tp_vars = batadv_tp_list_find_session(bat_priv, icmp->orig,
651 icmp->session);
652 if (unlikely(!tp_vars))
653 return;
654
655 if (unlikely(atomic_read(&tp_vars->sending) == 0))
656 goto out;
657
658
659 if (batadv_seq_before(ntohl(icmp->seqno),
660 (u32)atomic_read(&tp_vars->last_acked)))
661 goto out;
662
663 primary_if = batadv_primary_if_get_selected(bat_priv);
664 if (unlikely(!primary_if))
665 goto out;
666
667 orig_node = batadv_orig_hash_find(bat_priv, icmp->orig);
668 if (unlikely(!orig_node))
669 goto out;
670
671
672 rtt = jiffies_to_msecs(jiffies) - ntohl(icmp->timestamp);
673 if (icmp->timestamp && rtt)
674 batadv_tp_update_rto(tp_vars, rtt);
675
676
677 batadv_tp_reset_sender_timer(tp_vars);
678
679 recv_ack = ntohl(icmp->seqno);
680
681
682 if (atomic_read(&tp_vars->last_acked) == recv_ack) {
683 atomic_inc(&tp_vars->dup_acks);
684 if (atomic_read(&tp_vars->dup_acks) != 3)
685 goto out;
686
687 if (recv_ack >= tp_vars->recover)
688 goto out;
689
690
691 batadv_tp_send_msg(tp_vars, primary_if->net_dev->dev_addr,
692 orig_node, recv_ack, packet_len,
693 icmp->session, icmp->uid,
694 jiffies_to_msecs(jiffies));
695
696 spin_lock_bh(&tp_vars->cwnd_lock);
697
698
699 tp_vars->fast_recovery = true;
700
701
702
703 tp_vars->recover = tp_vars->last_sent;
704 tp_vars->ss_threshold = tp_vars->cwnd >> 1;
705 batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
706 "Meter: Fast Recovery, (cur cwnd=%u) ss_thr=%u last_sent=%u recv_ack=%u\n",
707 tp_vars->cwnd, tp_vars->ss_threshold,
708 tp_vars->last_sent, recv_ack);
709 tp_vars->cwnd = batadv_tp_cwnd(tp_vars->ss_threshold, 3 * mss,
710 mss);
711 tp_vars->dec_cwnd = 0;
712 tp_vars->last_sent = recv_ack;
713
714 spin_unlock_bh(&tp_vars->cwnd_lock);
715 } else {
716
717 atomic64_add(recv_ack - atomic_read(&tp_vars->last_acked),
718 &tp_vars->tot_sent);
719
720 atomic_set(&tp_vars->dup_acks, 0);
721
722 if (tp_vars->fast_recovery) {
723
724 if (batadv_seq_before(recv_ack, tp_vars->recover)) {
725
726
727
728
729 dev_addr = primary_if->net_dev->dev_addr;
730 batadv_tp_send_msg(tp_vars, dev_addr,
731 orig_node, recv_ack,
732 packet_len, icmp->session,
733 icmp->uid,
734 jiffies_to_msecs(jiffies));
735 tp_vars->cwnd = batadv_tp_cwnd(tp_vars->cwnd,
736 mss, mss);
737 } else {
738 tp_vars->fast_recovery = false;
739
740
741
742
743 cwnd = batadv_tp_cwnd(tp_vars->ss_threshold, 0,
744 mss);
745 tp_vars->cwnd = cwnd;
746 }
747 goto move_twnd;
748 }
749
750 if (recv_ack - atomic_read(&tp_vars->last_acked) >= mss)
751 batadv_tp_update_cwnd(tp_vars, mss);
752move_twnd:
753
754 atomic_set(&tp_vars->last_acked, recv_ack);
755 }
756
757 wake_up(&tp_vars->more_bytes);
758out:
759 if (likely(primary_if))
760 batadv_hardif_put(primary_if);
761 if (likely(orig_node))
762 batadv_orig_node_put(orig_node);
763 if (likely(tp_vars))
764 batadv_tp_vars_put(tp_vars);
765}
766
767
768
769
770
771
772
773
774static bool batadv_tp_avail(struct batadv_tp_vars *tp_vars,
775 size_t payload_len)
776{
777 u32 win_left, win_limit;
778
779 win_limit = atomic_read(&tp_vars->last_acked) + tp_vars->cwnd;
780 win_left = win_limit - tp_vars->last_sent;
781
782 return win_left >= payload_len;
783}
784
785
786
787
788
789
790
791
792
793
794
795
796static int batadv_tp_wait_available(struct batadv_tp_vars *tp_vars, size_t plen)
797{
798 int ret;
799
800 ret = wait_event_interruptible_timeout(tp_vars->more_bytes,
801 batadv_tp_avail(tp_vars, plen),
802 HZ / 10);
803
804 return ret;
805}
806
807
808
809
810
811
812
813static int batadv_tp_send(void *arg)
814{
815 struct batadv_tp_vars *tp_vars = arg;
816 struct batadv_priv *bat_priv = tp_vars->bat_priv;
817 struct batadv_hard_iface *primary_if = NULL;
818 struct batadv_orig_node *orig_node = NULL;
819 size_t payload_len, packet_len;
820 int err = 0;
821
822 if (unlikely(tp_vars->role != BATADV_TP_SENDER)) {
823 err = BATADV_TP_REASON_DST_UNREACHABLE;
824 tp_vars->reason = err;
825 goto out;
826 }
827
828 orig_node = batadv_orig_hash_find(bat_priv, tp_vars->other_end);
829 if (unlikely(!orig_node)) {
830 err = BATADV_TP_REASON_DST_UNREACHABLE;
831 tp_vars->reason = err;
832 goto out;
833 }
834
835 primary_if = batadv_primary_if_get_selected(bat_priv);
836 if (unlikely(!primary_if)) {
837 err = BATADV_TP_REASON_DST_UNREACHABLE;
838 tp_vars->reason = err;
839 goto out;
840 }
841
842
843
844
845
846
847
848 payload_len = BATADV_TP_PLEN;
849 BUILD_BUG_ON(sizeof(struct batadv_icmp_tp_packet) > BATADV_TP_PLEN);
850
851 batadv_tp_reset_sender_timer(tp_vars);
852
853
854 queue_delayed_work(batadv_event_workqueue, &tp_vars->finish_work,
855 msecs_to_jiffies(tp_vars->test_length));
856
857 while (atomic_read(&tp_vars->sending) != 0) {
858 if (unlikely(!batadv_tp_avail(tp_vars, payload_len))) {
859 batadv_tp_wait_available(tp_vars, payload_len);
860 continue;
861 }
862
863
864
865
866 packet_len = payload_len + sizeof(struct batadv_unicast_packet);
867
868 err = batadv_tp_send_msg(tp_vars, primary_if->net_dev->dev_addr,
869 orig_node, tp_vars->last_sent,
870 packet_len,
871 tp_vars->session, tp_vars->icmp_uid,
872 jiffies_to_msecs(jiffies));
873
874
875 if (unlikely(err && err != BATADV_TP_REASON_CANT_SEND)) {
876 batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
877 "Meter: %s() cannot send packets (%d)\n",
878 __func__, err);
879
880 if (atomic_dec_and_test(&tp_vars->sending))
881 tp_vars->reason = err;
882 break;
883 }
884
885
886 if (!err)
887 tp_vars->last_sent += payload_len;
888
889 cond_resched();
890 }
891
892out:
893 if (likely(primary_if))
894 batadv_hardif_put(primary_if);
895 if (likely(orig_node))
896 batadv_orig_node_put(orig_node);
897
898 batadv_tp_sender_end(bat_priv, tp_vars);
899 batadv_tp_sender_cleanup(bat_priv, tp_vars);
900
901 batadv_tp_vars_put(tp_vars);
902
903 do_exit(0);
904}
905
906
907
908
909
910static void batadv_tp_start_kthread(struct batadv_tp_vars *tp_vars)
911{
912 struct task_struct *kthread;
913 struct batadv_priv *bat_priv = tp_vars->bat_priv;
914 u32 session_cookie;
915
916 kref_get(&tp_vars->refcount);
917 kthread = kthread_create(batadv_tp_send, tp_vars, "kbatadv_tp_meter");
918 if (IS_ERR(kthread)) {
919 session_cookie = batadv_tp_session_cookie(tp_vars->session,
920 tp_vars->icmp_uid);
921 pr_err("batadv: cannot create tp meter kthread\n");
922 batadv_tp_batctl_error_notify(BATADV_TP_REASON_MEMORY_ERROR,
923 tp_vars->other_end,
924 bat_priv, session_cookie);
925
926
927 batadv_tp_vars_put(tp_vars);
928
929
930 batadv_tp_sender_cleanup(bat_priv, tp_vars);
931 return;
932 }
933
934 wake_up_process(kthread);
935}
936
937
938
939
940
941
942
943
944void batadv_tp_start(struct batadv_priv *bat_priv, const u8 *dst,
945 u32 test_length, u32 *cookie)
946{
947 struct batadv_tp_vars *tp_vars;
948 u8 session_id[2];
949 u8 icmp_uid;
950 u32 session_cookie;
951
952 get_random_bytes(session_id, sizeof(session_id));
953 get_random_bytes(&icmp_uid, 1);
954 session_cookie = batadv_tp_session_cookie(session_id, icmp_uid);
955 *cookie = session_cookie;
956
957
958 spin_lock_bh(&bat_priv->tp_list_lock);
959 tp_vars = batadv_tp_list_find(bat_priv, dst);
960 if (tp_vars) {
961 spin_unlock_bh(&bat_priv->tp_list_lock);
962 batadv_tp_vars_put(tp_vars);
963 batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
964 "Meter: test to or from the same node already ongoing, aborting\n");
965 batadv_tp_batctl_error_notify(BATADV_TP_REASON_ALREADY_ONGOING,
966 dst, bat_priv, session_cookie);
967 return;
968 }
969
970 if (!atomic_add_unless(&bat_priv->tp_num, 1, BATADV_TP_MAX_NUM)) {
971 spin_unlock_bh(&bat_priv->tp_list_lock);
972 batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
973 "Meter: too many ongoing sessions, aborting (SEND)\n");
974 batadv_tp_batctl_error_notify(BATADV_TP_REASON_TOO_MANY, dst,
975 bat_priv, session_cookie);
976 return;
977 }
978
979 tp_vars = kmalloc(sizeof(*tp_vars), GFP_ATOMIC);
980 if (!tp_vars) {
981 spin_unlock_bh(&bat_priv->tp_list_lock);
982 batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
983 "Meter: %s cannot allocate list elements\n",
984 __func__);
985 batadv_tp_batctl_error_notify(BATADV_TP_REASON_MEMORY_ERROR,
986 dst, bat_priv, session_cookie);
987 return;
988 }
989
990
991 ether_addr_copy(tp_vars->other_end, dst);
992 kref_init(&tp_vars->refcount);
993 tp_vars->role = BATADV_TP_SENDER;
994 atomic_set(&tp_vars->sending, 1);
995 memcpy(tp_vars->session, session_id, sizeof(session_id));
996 tp_vars->icmp_uid = icmp_uid;
997
998 tp_vars->last_sent = BATADV_TP_FIRST_SEQ;
999 atomic_set(&tp_vars->last_acked, BATADV_TP_FIRST_SEQ);
1000 tp_vars->fast_recovery = false;
1001 tp_vars->recover = BATADV_TP_FIRST_SEQ;
1002
1003
1004
1005
1006
1007 tp_vars->cwnd = BATADV_TP_PLEN * 3;
1008
1009
1010
1011 tp_vars->ss_threshold = BATADV_TP_AWND;
1012
1013
1014
1015
1016 tp_vars->rto = 1000;
1017 tp_vars->srtt = 0;
1018 tp_vars->rttvar = 0;
1019
1020 atomic64_set(&tp_vars->tot_sent, 0);
1021
1022 kref_get(&tp_vars->refcount);
1023 timer_setup(&tp_vars->timer, batadv_tp_sender_timeout, 0);
1024
1025 tp_vars->bat_priv = bat_priv;
1026 tp_vars->start_time = jiffies;
1027
1028 init_waitqueue_head(&tp_vars->more_bytes);
1029
1030 spin_lock_init(&tp_vars->unacked_lock);
1031 INIT_LIST_HEAD(&tp_vars->unacked_list);
1032
1033 spin_lock_init(&tp_vars->cwnd_lock);
1034
1035 tp_vars->prerandom_offset = 0;
1036 spin_lock_init(&tp_vars->prerandom_lock);
1037
1038 kref_get(&tp_vars->refcount);
1039 hlist_add_head_rcu(&tp_vars->list, &bat_priv->tp_list);
1040 spin_unlock_bh(&bat_priv->tp_list_lock);
1041
1042 tp_vars->test_length = test_length;
1043 if (!tp_vars->test_length)
1044 tp_vars->test_length = BATADV_TP_DEF_TEST_LENGTH;
1045
1046 batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
1047 "Meter: starting throughput meter towards %pM (length=%ums)\n",
1048 dst, test_length);
1049
1050
1051 INIT_DELAYED_WORK(&tp_vars->finish_work, batadv_tp_sender_finish);
1052
1053
1054
1055
1056 batadv_tp_start_kthread(tp_vars);
1057
1058
1059 batadv_tp_vars_put(tp_vars);
1060}
1061
1062
1063
1064
1065
1066
1067
1068void batadv_tp_stop(struct batadv_priv *bat_priv, const u8 *dst,
1069 u8 return_value)
1070{
1071 struct batadv_orig_node *orig_node;
1072 struct batadv_tp_vars *tp_vars;
1073
1074 batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
1075 "Meter: stopping test towards %pM\n", dst);
1076
1077 orig_node = batadv_orig_hash_find(bat_priv, dst);
1078 if (!orig_node)
1079 return;
1080
1081 tp_vars = batadv_tp_list_find(bat_priv, orig_node->orig);
1082 if (!tp_vars) {
1083 batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
1084 "Meter: trying to interrupt an already over connection\n");
1085 goto out;
1086 }
1087
1088 batadv_tp_sender_shutdown(tp_vars, return_value);
1089 batadv_tp_vars_put(tp_vars);
1090out:
1091 batadv_orig_node_put(orig_node);
1092}
1093
1094
1095
1096
1097
1098
1099
1100static void batadv_tp_reset_receiver_timer(struct batadv_tp_vars *tp_vars)
1101{
1102 mod_timer(&tp_vars->timer,
1103 jiffies + msecs_to_jiffies(BATADV_TP_RECV_TIMEOUT));
1104}
1105
1106
1107
1108
1109
1110
1111static void batadv_tp_receiver_shutdown(struct timer_list *t)
1112{
1113 struct batadv_tp_vars *tp_vars = from_timer(tp_vars, t, timer);
1114 struct batadv_tp_unacked *un, *safe;
1115 struct batadv_priv *bat_priv;
1116
1117 bat_priv = tp_vars->bat_priv;
1118
1119
1120 if (!batadv_has_timed_out(tp_vars->last_recv_time,
1121 BATADV_TP_RECV_TIMEOUT)) {
1122
1123 batadv_tp_reset_receiver_timer(tp_vars);
1124 return;
1125 }
1126
1127 batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
1128 "Shutting down for inactivity (more than %dms) from %pM\n",
1129 BATADV_TP_RECV_TIMEOUT, tp_vars->other_end);
1130
1131 spin_lock_bh(&tp_vars->bat_priv->tp_list_lock);
1132 hlist_del_rcu(&tp_vars->list);
1133 spin_unlock_bh(&tp_vars->bat_priv->tp_list_lock);
1134
1135
1136 batadv_tp_vars_put(tp_vars);
1137
1138 atomic_dec(&bat_priv->tp_num);
1139
1140 spin_lock_bh(&tp_vars->unacked_lock);
1141 list_for_each_entry_safe(un, safe, &tp_vars->unacked_list, list) {
1142 list_del(&un->list);
1143 kfree(un);
1144 }
1145 spin_unlock_bh(&tp_vars->unacked_lock);
1146
1147
1148 batadv_tp_vars_put(tp_vars);
1149}
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163static int batadv_tp_send_ack(struct batadv_priv *bat_priv, const u8 *dst,
1164 u32 seq, __be32 timestamp, const u8 *session,
1165 int socket_index)
1166{
1167 struct batadv_hard_iface *primary_if = NULL;
1168 struct batadv_orig_node *orig_node;
1169 struct batadv_icmp_tp_packet *icmp;
1170 struct sk_buff *skb;
1171 int r, ret;
1172
1173 orig_node = batadv_orig_hash_find(bat_priv, dst);
1174 if (unlikely(!orig_node)) {
1175 ret = BATADV_TP_REASON_DST_UNREACHABLE;
1176 goto out;
1177 }
1178
1179 primary_if = batadv_primary_if_get_selected(bat_priv);
1180 if (unlikely(!primary_if)) {
1181 ret = BATADV_TP_REASON_DST_UNREACHABLE;
1182 goto out;
1183 }
1184
1185 skb = netdev_alloc_skb_ip_align(NULL, sizeof(*icmp) + ETH_HLEN);
1186 if (unlikely(!skb)) {
1187 ret = BATADV_TP_REASON_MEMORY_ERROR;
1188 goto out;
1189 }
1190
1191 skb_reserve(skb, ETH_HLEN);
1192 icmp = skb_put(skb, sizeof(*icmp));
1193 icmp->packet_type = BATADV_ICMP;
1194 icmp->version = BATADV_COMPAT_VERSION;
1195 icmp->ttl = BATADV_TTL;
1196 icmp->msg_type = BATADV_TP;
1197 ether_addr_copy(icmp->dst, orig_node->orig);
1198 ether_addr_copy(icmp->orig, primary_if->net_dev->dev_addr);
1199 icmp->uid = socket_index;
1200
1201 icmp->subtype = BATADV_TP_ACK;
1202 memcpy(icmp->session, session, sizeof(icmp->session));
1203 icmp->seqno = htonl(seq);
1204 icmp->timestamp = timestamp;
1205
1206
1207 r = batadv_send_skb_to_orig(skb, orig_node, NULL);
1208 if (unlikely(r < 0) || r == NET_XMIT_DROP) {
1209 ret = BATADV_TP_REASON_DST_UNREACHABLE;
1210 goto out;
1211 }
1212 ret = 0;
1213
1214out:
1215 if (likely(orig_node))
1216 batadv_orig_node_put(orig_node);
1217 if (likely(primary_if))
1218 batadv_hardif_put(primary_if);
1219
1220 return ret;
1221}
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234static bool batadv_tp_handle_out_of_order(struct batadv_tp_vars *tp_vars,
1235 const struct sk_buff *skb)
1236{
1237 const struct batadv_icmp_tp_packet *icmp;
1238 struct batadv_tp_unacked *un, *new;
1239 u32 payload_len;
1240 bool added = false;
1241
1242 new = kmalloc(sizeof(*new), GFP_ATOMIC);
1243 if (unlikely(!new))
1244 return false;
1245
1246 icmp = (struct batadv_icmp_tp_packet *)skb->data;
1247
1248 new->seqno = ntohl(icmp->seqno);
1249 payload_len = skb->len - sizeof(struct batadv_unicast_packet);
1250 new->len = payload_len;
1251
1252 spin_lock_bh(&tp_vars->unacked_lock);
1253
1254 if (list_empty(&tp_vars->unacked_list)) {
1255 list_add(&new->list, &tp_vars->unacked_list);
1256 goto out;
1257 }
1258
1259
1260
1261
1262
1263
1264
1265
1266 list_for_each_entry_reverse(un, &tp_vars->unacked_list, list) {
1267
1268 if (new->seqno == un->seqno) {
1269 if (new->len > un->len)
1270 un->len = new->len;
1271 kfree(new);
1272 added = true;
1273 break;
1274 }
1275
1276
1277 if (batadv_seq_before(new->seqno, un->seqno))
1278 continue;
1279
1280
1281
1282
1283
1284 list_add_tail(&new->list, &un->list);
1285 added = true;
1286 break;
1287 }
1288
1289
1290 if (!added)
1291 list_add(&new->list, &tp_vars->unacked_list);
1292
1293out:
1294 spin_unlock_bh(&tp_vars->unacked_lock);
1295
1296 return true;
1297}
1298
1299
1300
1301
1302
1303
1304static void batadv_tp_ack_unordered(struct batadv_tp_vars *tp_vars)
1305{
1306 struct batadv_tp_unacked *un, *safe;
1307 u32 to_ack;
1308
1309
1310
1311
1312 spin_lock_bh(&tp_vars->unacked_lock);
1313 list_for_each_entry_safe(un, safe, &tp_vars->unacked_list, list) {
1314
1315
1316
1317
1318 if (batadv_seq_before(tp_vars->last_recv, un->seqno))
1319 break;
1320
1321 to_ack = un->seqno + un->len - tp_vars->last_recv;
1322
1323 if (batadv_seq_before(tp_vars->last_recv, un->seqno + un->len))
1324 tp_vars->last_recv += to_ack;
1325
1326 list_del(&un->list);
1327 kfree(un);
1328 }
1329 spin_unlock_bh(&tp_vars->unacked_lock);
1330}
1331
1332
1333
1334
1335
1336
1337
1338
1339static struct batadv_tp_vars *
1340batadv_tp_init_recv(struct batadv_priv *bat_priv,
1341 const struct batadv_icmp_tp_packet *icmp)
1342{
1343 struct batadv_tp_vars *tp_vars;
1344
1345 spin_lock_bh(&bat_priv->tp_list_lock);
1346 tp_vars = batadv_tp_list_find_session(bat_priv, icmp->orig,
1347 icmp->session);
1348 if (tp_vars)
1349 goto out_unlock;
1350
1351 if (!atomic_add_unless(&bat_priv->tp_num, 1, BATADV_TP_MAX_NUM)) {
1352 batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
1353 "Meter: too many ongoing sessions, aborting (RECV)\n");
1354 goto out_unlock;
1355 }
1356
1357 tp_vars = kmalloc(sizeof(*tp_vars), GFP_ATOMIC);
1358 if (!tp_vars)
1359 goto out_unlock;
1360
1361 ether_addr_copy(tp_vars->other_end, icmp->orig);
1362 tp_vars->role = BATADV_TP_RECEIVER;
1363 memcpy(tp_vars->session, icmp->session, sizeof(tp_vars->session));
1364 tp_vars->last_recv = BATADV_TP_FIRST_SEQ;
1365 tp_vars->bat_priv = bat_priv;
1366 kref_init(&tp_vars->refcount);
1367
1368 spin_lock_init(&tp_vars->unacked_lock);
1369 INIT_LIST_HEAD(&tp_vars->unacked_list);
1370
1371 kref_get(&tp_vars->refcount);
1372 hlist_add_head_rcu(&tp_vars->list, &bat_priv->tp_list);
1373
1374 kref_get(&tp_vars->refcount);
1375 timer_setup(&tp_vars->timer, batadv_tp_receiver_shutdown, 0);
1376
1377 batadv_tp_reset_receiver_timer(tp_vars);
1378
1379out_unlock:
1380 spin_unlock_bh(&bat_priv->tp_list_lock);
1381
1382 return tp_vars;
1383}
1384
1385
1386
1387
1388
1389
1390
1391
1392static void batadv_tp_recv_msg(struct batadv_priv *bat_priv,
1393 const struct sk_buff *skb)
1394{
1395 const struct batadv_icmp_tp_packet *icmp;
1396 struct batadv_tp_vars *tp_vars;
1397 size_t packet_size;
1398 u32 seqno;
1399
1400 icmp = (struct batadv_icmp_tp_packet *)skb->data;
1401
1402 seqno = ntohl(icmp->seqno);
1403
1404
1405
1406 if (seqno == BATADV_TP_FIRST_SEQ) {
1407 tp_vars = batadv_tp_init_recv(bat_priv, icmp);
1408 if (!tp_vars) {
1409 batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
1410 "Meter: seqno != BATADV_TP_FIRST_SEQ cannot initiate connection\n");
1411 goto out;
1412 }
1413 } else {
1414 tp_vars = batadv_tp_list_find_session(bat_priv, icmp->orig,
1415 icmp->session);
1416 if (!tp_vars) {
1417 batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
1418 "Unexpected packet from %pM!\n",
1419 icmp->orig);
1420 goto out;
1421 }
1422 }
1423
1424 if (unlikely(tp_vars->role != BATADV_TP_RECEIVER)) {
1425 batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
1426 "Meter: dropping packet: not expected (role=%u)\n",
1427 tp_vars->role);
1428 goto out;
1429 }
1430
1431 tp_vars->last_recv_time = jiffies;
1432
1433
1434
1435
1436 if (batadv_seq_before(seqno, tp_vars->last_recv))
1437 goto send_ack;
1438
1439
1440 if (ntohl(icmp->seqno) != tp_vars->last_recv) {
1441
1442
1443
1444 if (!batadv_tp_handle_out_of_order(tp_vars, skb))
1445 goto out;
1446
1447
1448 goto send_ack;
1449 }
1450
1451
1452 packet_size = skb->len - sizeof(struct batadv_unicast_packet);
1453 tp_vars->last_recv += packet_size;
1454
1455
1456 batadv_tp_ack_unordered(tp_vars);
1457
1458send_ack:
1459
1460
1461
1462
1463 batadv_tp_send_ack(bat_priv, icmp->orig, tp_vars->last_recv,
1464 icmp->timestamp, icmp->session, icmp->uid);
1465out:
1466 if (likely(tp_vars))
1467 batadv_tp_vars_put(tp_vars);
1468}
1469
1470
1471
1472
1473
1474
1475void batadv_tp_meter_recv(struct batadv_priv *bat_priv, struct sk_buff *skb)
1476{
1477 struct batadv_icmp_tp_packet *icmp;
1478
1479 icmp = (struct batadv_icmp_tp_packet *)skb->data;
1480
1481 switch (icmp->subtype) {
1482 case BATADV_TP_MSG:
1483 batadv_tp_recv_msg(bat_priv, skb);
1484 break;
1485 case BATADV_TP_ACK:
1486 batadv_tp_recv_ack(bat_priv, skb);
1487 break;
1488 default:
1489 batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
1490 "Received unknown TP Metric packet type %u\n",
1491 icmp->subtype);
1492 }
1493 consume_skb(skb);
1494}
1495
1496
1497
1498
1499void __init batadv_tp_meter_init(void)
1500{
1501 get_random_bytes(batadv_tp_prerandom, sizeof(batadv_tp_prerandom));
1502}
1503