1
2
3
4
5
6
7
8
9
10
11
12#include <net/sock.h>
13#include "tfrc.h"
14
15static struct kmem_cache *tfrc_lh_slab __read_mostly;
16
17static const int tfrc_lh_weights[NINTERVAL] = { 10, 10, 10, 10, 8, 6, 4, 2 };
18
19
20static inline u8 LIH_INDEX(const u8 ctr)
21{
22 return LIH_SIZE - 1 - (ctr % LIH_SIZE);
23}
24
25
26static inline struct tfrc_loss_interval *tfrc_lh_peek(struct tfrc_loss_hist *lh)
27{
28 return lh->counter ? lh->ring[LIH_INDEX(lh->counter - 1)] : NULL;
29}
30
31
32static inline u32 tfrc_lh_get_interval(struct tfrc_loss_hist *lh, const u8 i)
33{
34 BUG_ON(i >= lh->counter);
35 return lh->ring[LIH_INDEX(lh->counter - i - 1)]->li_length;
36}
37
38
39
40
41static struct tfrc_loss_interval *tfrc_lh_demand_next(struct tfrc_loss_hist *lh)
42{
43 if (lh->ring[LIH_INDEX(lh->counter)] == NULL)
44 lh->ring[LIH_INDEX(lh->counter)] = kmem_cache_alloc(tfrc_lh_slab,
45 GFP_ATOMIC);
46 return lh->ring[LIH_INDEX(lh->counter)];
47}
48
49void tfrc_lh_cleanup(struct tfrc_loss_hist *lh)
50{
51 if (!tfrc_lh_is_initialised(lh))
52 return;
53
54 for (lh->counter = 0; lh->counter < LIH_SIZE; lh->counter++)
55 if (lh->ring[LIH_INDEX(lh->counter)] != NULL) {
56 kmem_cache_free(tfrc_lh_slab,
57 lh->ring[LIH_INDEX(lh->counter)]);
58 lh->ring[LIH_INDEX(lh->counter)] = NULL;
59 }
60}
61
62static void tfrc_lh_calc_i_mean(struct tfrc_loss_hist *lh)
63{
64 u32 i_i, i_tot0 = 0, i_tot1 = 0, w_tot = 0;
65 int i, k = tfrc_lh_length(lh) - 1;
66
67 if (k <= 0)
68 return;
69
70 for (i = 0; i <= k; i++) {
71 i_i = tfrc_lh_get_interval(lh, i);
72
73 if (i < k) {
74 i_tot0 += i_i * tfrc_lh_weights[i];
75 w_tot += tfrc_lh_weights[i];
76 }
77 if (i > 0)
78 i_tot1 += i_i * tfrc_lh_weights[i-1];
79 }
80
81 lh->i_mean = max(i_tot0, i_tot1) / w_tot;
82}
83
84
85
86
87
88u8 tfrc_lh_update_i_mean(struct tfrc_loss_hist *lh, struct sk_buff *skb)
89{
90 struct tfrc_loss_interval *cur = tfrc_lh_peek(lh);
91 u32 old_i_mean = lh->i_mean;
92 s64 len;
93
94 if (cur == NULL)
95 return 0;
96
97 len = dccp_delta_seqno(cur->li_seqno, DCCP_SKB_CB(skb)->dccpd_seq) + 1;
98
99 if (len - (s64)cur->li_length <= 0)
100 return 0;
101
102 if (SUB16(dccp_hdr(skb)->dccph_ccval, cur->li_ccval) > 4)
103
104
105
106
107
108
109
110
111 cur->li_is_closed = 1;
112
113 if (tfrc_lh_length(lh) == 1)
114 return 0;
115
116 cur->li_length = len;
117 tfrc_lh_calc_i_mean(lh);
118
119 return lh->i_mean < old_i_mean;
120}
121
122
123static inline u8 tfrc_lh_is_new_loss(struct tfrc_loss_interval *cur,
124 struct tfrc_rx_hist_entry *new_loss)
125{
126 return dccp_delta_seqno(cur->li_seqno, new_loss->tfrchrx_seqno) > 0 &&
127 (cur->li_is_closed || SUB16(new_loss->tfrchrx_ccval, cur->li_ccval) > 4);
128}
129
130
131
132
133
134
135
136
137
138
139int tfrc_lh_interval_add(struct tfrc_loss_hist *lh, struct tfrc_rx_hist *rh,
140 u32 (*calc_first_li)(struct sock *), struct sock *sk)
141{
142 struct tfrc_loss_interval *cur = tfrc_lh_peek(lh), *new;
143
144 if (cur != NULL && !tfrc_lh_is_new_loss(cur, tfrc_rx_hist_loss_prev(rh)))
145 return 0;
146
147 new = tfrc_lh_demand_next(lh);
148 if (unlikely(new == NULL)) {
149 DCCP_CRIT("Cannot allocate/add loss record.");
150 return 0;
151 }
152
153 new->li_seqno = tfrc_rx_hist_loss_prev(rh)->tfrchrx_seqno;
154 new->li_ccval = tfrc_rx_hist_loss_prev(rh)->tfrchrx_ccval;
155 new->li_is_closed = 0;
156
157 if (++lh->counter == 1)
158 lh->i_mean = new->li_length = (*calc_first_li)(sk);
159 else {
160 cur->li_length = dccp_delta_seqno(cur->li_seqno, new->li_seqno);
161 new->li_length = dccp_delta_seqno(new->li_seqno,
162 tfrc_rx_hist_last_rcv(rh)->tfrchrx_seqno) + 1;
163 if (lh->counter > (2*LIH_SIZE))
164 lh->counter -= LIH_SIZE;
165
166 tfrc_lh_calc_i_mean(lh);
167 }
168 return 1;
169}
170
171int __init tfrc_li_init(void)
172{
173 tfrc_lh_slab = kmem_cache_create("tfrc_li_hist",
174 sizeof(struct tfrc_loss_interval), 0,
175 SLAB_HWCACHE_ALIGN, NULL);
176 return tfrc_lh_slab == NULL ? -ENOBUFS : 0;
177}
178
179void tfrc_li_exit(void)
180{
181 if (tfrc_lh_slab != NULL) {
182 kmem_cache_destroy(tfrc_lh_slab);
183 tfrc_lh_slab = NULL;
184 }
185}
186