1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/mm.h>
17#include <linux/module.h>
18#include <net/tcp.h>
19
20#define BICTCP_BETA_SCALE 1024
21
22
23#define BICTCP_B 4
24
25
26
27
28static int fast_convergence = 1;
29static int max_increment = 16;
30static int low_window = 14;
31static int beta = 819;
32static int initial_ssthresh;
33static int smooth_part = 20;
34
35module_param(fast_convergence, int, 0644);
36MODULE_PARM_DESC(fast_convergence, "turn on/off fast convergence");
37module_param(max_increment, int, 0644);
38MODULE_PARM_DESC(max_increment, "Limit on increment allowed during binary search");
39module_param(low_window, int, 0644);
40MODULE_PARM_DESC(low_window, "lower bound on congestion window (for TCP friendliness)");
41module_param(beta, int, 0644);
42MODULE_PARM_DESC(beta, "beta for multiplicative increase");
43module_param(initial_ssthresh, int, 0644);
44MODULE_PARM_DESC(initial_ssthresh, "initial value of slow start threshold");
45module_param(smooth_part, int, 0644);
46MODULE_PARM_DESC(smooth_part, "log(B/(B*Smin))/log(B/(B-1))+B, # of RTT from Wmax-B to Wmax");
47
48
49struct bictcp {
50 u32 cnt;
51 u32 last_max_cwnd;
52 u32 last_cwnd;
53 u32 last_time;
54 u32 epoch_start;
55#define ACK_RATIO_SHIFT 4
56 u32 delayed_ack;
57};
58
59static inline void bictcp_reset(struct bictcp *ca)
60{
61 ca->cnt = 0;
62 ca->last_max_cwnd = 0;
63 ca->last_cwnd = 0;
64 ca->last_time = 0;
65 ca->epoch_start = 0;
66 ca->delayed_ack = 2 << ACK_RATIO_SHIFT;
67}
68
69static void bictcp_init(struct sock *sk)
70{
71 struct bictcp *ca = inet_csk_ca(sk);
72
73 bictcp_reset(ca);
74
75 if (initial_ssthresh)
76 tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
77}
78
79
80
81
82static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
83{
84 if (ca->last_cwnd == cwnd &&
85 (s32)(tcp_jiffies32 - ca->last_time) <= HZ / 32)
86 return;
87
88 ca->last_cwnd = cwnd;
89 ca->last_time = tcp_jiffies32;
90
91 if (ca->epoch_start == 0)
92 ca->epoch_start = tcp_jiffies32;
93
94
95 if (cwnd <= low_window) {
96 ca->cnt = cwnd;
97 return;
98 }
99
100
101 if (cwnd < ca->last_max_cwnd) {
102 __u32 dist = (ca->last_max_cwnd - cwnd)
103 / BICTCP_B;
104
105 if (dist > max_increment)
106
107 ca->cnt = cwnd / max_increment;
108 else if (dist <= 1U)
109
110 ca->cnt = (cwnd * smooth_part) / BICTCP_B;
111 else
112
113 ca->cnt = cwnd / dist;
114 } else {
115
116 if (cwnd < ca->last_max_cwnd + BICTCP_B)
117
118 ca->cnt = (cwnd * smooth_part) / BICTCP_B;
119 else if (cwnd < ca->last_max_cwnd + max_increment*(BICTCP_B-1))
120
121 ca->cnt = (cwnd * (BICTCP_B-1))
122 / (cwnd - ca->last_max_cwnd);
123 else
124
125 ca->cnt = cwnd / max_increment;
126 }
127
128
129 if (ca->last_max_cwnd == 0) {
130 if (ca->cnt > 20)
131 ca->cnt = 20;
132 }
133
134 ca->cnt = (ca->cnt << ACK_RATIO_SHIFT) / ca->delayed_ack;
135 if (ca->cnt == 0)
136 ca->cnt = 1;
137}
138
139static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
140{
141 struct tcp_sock *tp = tcp_sk(sk);
142 struct bictcp *ca = inet_csk_ca(sk);
143
144 if (!tcp_is_cwnd_limited(sk))
145 return;
146
147 if (tcp_in_slow_start(tp))
148 tcp_slow_start(tp, acked);
149 else {
150 bictcp_update(ca, tp->snd_cwnd);
151 tcp_cong_avoid_ai(tp, ca->cnt, 1);
152 }
153}
154
155
156
157
158
159static u32 bictcp_recalc_ssthresh(struct sock *sk)
160{
161 const struct tcp_sock *tp = tcp_sk(sk);
162 struct bictcp *ca = inet_csk_ca(sk);
163
164 ca->epoch_start = 0;
165
166
167 if (tp->snd_cwnd < ca->last_max_cwnd && fast_convergence)
168 ca->last_max_cwnd = (tp->snd_cwnd * (BICTCP_BETA_SCALE + beta))
169 / (2 * BICTCP_BETA_SCALE);
170 else
171 ca->last_max_cwnd = tp->snd_cwnd;
172
173 if (tp->snd_cwnd <= low_window)
174 return max(tp->snd_cwnd >> 1U, 2U);
175 else
176 return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U);
177}
178
179static void bictcp_state(struct sock *sk, u8 new_state)
180{
181 if (new_state == TCP_CA_Loss)
182 bictcp_reset(inet_csk_ca(sk));
183}
184
185
186
187
188static void bictcp_acked(struct sock *sk, const struct ack_sample *sample)
189{
190 const struct inet_connection_sock *icsk = inet_csk(sk);
191
192 if (icsk->icsk_ca_state == TCP_CA_Open) {
193 struct bictcp *ca = inet_csk_ca(sk);
194
195 ca->delayed_ack += sample->pkts_acked -
196 (ca->delayed_ack >> ACK_RATIO_SHIFT);
197 }
198}
199
200static struct tcp_congestion_ops bictcp __read_mostly = {
201 .init = bictcp_init,
202 .ssthresh = bictcp_recalc_ssthresh,
203 .cong_avoid = bictcp_cong_avoid,
204 .set_state = bictcp_state,
205 .undo_cwnd = tcp_reno_undo_cwnd,
206 .pkts_acked = bictcp_acked,
207 .owner = THIS_MODULE,
208 .name = "bic",
209};
210
211static int __init bictcp_register(void)
212{
213 BUILD_BUG_ON(sizeof(struct bictcp) > ICSK_CA_PRIV_SIZE);
214 return tcp_register_congestion_control(&bictcp);
215}
216
217static void __exit bictcp_unregister(void)
218{
219 tcp_unregister_congestion_control(&bictcp);
220}
221
222module_init(bictcp_register);
223module_exit(bictcp_unregister);
224
225MODULE_AUTHOR("Stephen Hemminger");
226MODULE_LICENSE("GPL");
227MODULE_DESCRIPTION("BIC TCP");
228