1
2
3
4
5
6
7
8
9
10
11
12#include <linux/mm.h>
13#include <linux/module.h>
14#include <linux/skbuff.h>
15#include <linux/inet_diag.h>
16
17#include <net/tcp.h>
18
19
20
21
22#define V_PARAM_SHIFT 1
23static const int beta = 3 << V_PARAM_SHIFT;
24
25
26struct veno {
27 u8 doing_veno_now;
28 u16 cntrtt;
29 u32 minrtt;
30 u32 basertt;
31 u32 inc;
32 u32 diff;
33};
34
35
36
37
38
39
40
41
42
43
44static inline void veno_enable(struct sock *sk)
45{
46 struct veno *veno = inet_csk_ca(sk);
47
48
49 veno->doing_veno_now = 1;
50
51 veno->minrtt = 0x7fffffff;
52}
53
54static inline void veno_disable(struct sock *sk)
55{
56 struct veno *veno = inet_csk_ca(sk);
57
58
59 veno->doing_veno_now = 0;
60}
61
62static void tcp_veno_init(struct sock *sk)
63{
64 struct veno *veno = inet_csk_ca(sk);
65
66 veno->basertt = 0x7fffffff;
67 veno->inc = 1;
68 veno_enable(sk);
69}
70
71
72static void tcp_veno_pkts_acked(struct sock *sk,
73 const struct ack_sample *sample)
74{
75 struct veno *veno = inet_csk_ca(sk);
76 u32 vrtt;
77
78 if (sample->rtt_us < 0)
79 return;
80
81
82 vrtt = sample->rtt_us + 1;
83
84
85 if (vrtt < veno->basertt)
86 veno->basertt = vrtt;
87
88
89
90
91 veno->minrtt = min(veno->minrtt, vrtt);
92 veno->cntrtt++;
93}
94
95static void tcp_veno_state(struct sock *sk, u8 ca_state)
96{
97 if (ca_state == TCP_CA_Open)
98 veno_enable(sk);
99 else
100 veno_disable(sk);
101}
102
103
104
105
106
107
108
109
110
111
112static void tcp_veno_cwnd_event(struct sock *sk, enum tcp_ca_event event)
113{
114 if (event == CA_EVENT_CWND_RESTART || event == CA_EVENT_TX_START)
115 tcp_veno_init(sk);
116}
117
118static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
119{
120 struct tcp_sock *tp = tcp_sk(sk);
121 struct veno *veno = inet_csk_ca(sk);
122
123 if (!veno->doing_veno_now) {
124 tcp_reno_cong_avoid(sk, ack, acked);
125 return;
126 }
127
128
129 if (!tcp_is_cwnd_limited(sk))
130 return;
131
132
133 if (veno->cntrtt <= 2) {
134
135
136
137 tcp_reno_cong_avoid(sk, ack, acked);
138 } else {
139 u64 target_cwnd;
140 u32 rtt;
141
142
143
144
145
146 rtt = veno->minrtt;
147
148 target_cwnd = (u64)tp->snd_cwnd * veno->basertt;
149 target_cwnd <<= V_PARAM_SHIFT;
150 do_div(target_cwnd, rtt);
151
152 veno->diff = (tp->snd_cwnd << V_PARAM_SHIFT) - target_cwnd;
153
154 if (tcp_in_slow_start(tp)) {
155
156 tcp_slow_start(tp, acked);
157 } else {
158
159 if (veno->diff < beta) {
160
161
162
163 tcp_cong_avoid_ai(tp, tp->snd_cwnd, 1);
164 } else {
165
166
167
168 if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
169 if (veno->inc &&
170 tp->snd_cwnd < tp->snd_cwnd_clamp) {
171 tp->snd_cwnd++;
172 veno->inc = 0;
173 } else
174 veno->inc = 1;
175 tp->snd_cwnd_cnt = 0;
176 } else
177 tp->snd_cwnd_cnt++;
178 }
179 }
180 if (tp->snd_cwnd < 2)
181 tp->snd_cwnd = 2;
182 else if (tp->snd_cwnd > tp->snd_cwnd_clamp)
183 tp->snd_cwnd = tp->snd_cwnd_clamp;
184 }
185
186
187 veno->minrtt = 0x7fffffff;
188}
189
190
191static u32 tcp_veno_ssthresh(struct sock *sk)
192{
193 const struct tcp_sock *tp = tcp_sk(sk);
194 struct veno *veno = inet_csk_ca(sk);
195
196 if (veno->diff < beta)
197
198 return max(tp->snd_cwnd * 4 / 5, 2U);
199 else
200
201 return max(tp->snd_cwnd >> 1U, 2U);
202}
203
204static struct tcp_congestion_ops tcp_veno __read_mostly = {
205 .init = tcp_veno_init,
206 .ssthresh = tcp_veno_ssthresh,
207 .cong_avoid = tcp_veno_cong_avoid,
208 .pkts_acked = tcp_veno_pkts_acked,
209 .set_state = tcp_veno_state,
210 .cwnd_event = tcp_veno_cwnd_event,
211
212 .owner = THIS_MODULE,
213 .name = "veno",
214};
215
216static int __init tcp_veno_register(void)
217{
218 BUILD_BUG_ON(sizeof(struct veno) > ICSK_CA_PRIV_SIZE);
219 tcp_register_congestion_control(&tcp_veno);
220 return 0;
221}
222
223static void __exit tcp_veno_unregister(void)
224{
225 tcp_unregister_congestion_control(&tcp_veno);
226}
227
228module_init(tcp_veno_register);
229module_exit(tcp_veno_unregister);
230
231MODULE_AUTHOR("Bin Zhou, Cheng Peng Fu");
232MODULE_LICENSE("GPL");
233MODULE_DESCRIPTION("TCP Veno");
234