1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/module.h>
26#include <linux/slab.h>
27#include <linux/types.h>
28#include <linux/kernel.h>
29#include <linux/errno.h>
30#include <linux/skbuff.h>
31#include <net/pkt_sched.h>
32#include <net/inet_ecn.h>
33
34#define QUEUE_THRESHOLD 10000
35#define DQCOUNT_INVALID -1
36#define MAX_PROB 0xffffffff
37#define PIE_SCALE 8
38
39
40struct pie_params {
41 psched_time_t target;
42 u32 tupdate;
43 u32 limit;
44 u32 alpha;
45 u32 beta;
46 bool ecn;
47 bool bytemode;
48};
49
50
51struct pie_vars {
52 u32 prob;
53 psched_time_t burst_time;
54 psched_time_t qdelay;
55 psched_time_t qdelay_old;
56 u64 dq_count;
57 psched_time_t dq_tstamp;
58 u32 avg_dq_rate;
59 u32 qlen_old;
60};
61
62
63struct pie_stats {
64 u32 packets_in;
65 u32 dropped;
66 u32 overlimit;
67 u32 maxq;
68 u32 ecn_mark;
69};
70
71
72struct pie_sched_data {
73 struct pie_params params;
74 struct pie_vars vars;
75 struct pie_stats stats;
76 struct timer_list adapt_timer;
77};
78
79static void pie_params_init(struct pie_params *params)
80{
81 params->alpha = 2;
82 params->beta = 20;
83 params->tupdate = usecs_to_jiffies(30 * USEC_PER_MSEC);
84 params->limit = 1000;
85 params->target = PSCHED_NS2TICKS(20 * NSEC_PER_MSEC);
86 params->ecn = false;
87 params->bytemode = false;
88}
89
90static void pie_vars_init(struct pie_vars *vars)
91{
92 vars->dq_count = DQCOUNT_INVALID;
93 vars->avg_dq_rate = 0;
94
95 vars->burst_time = PSCHED_NS2TICKS(100 * NSEC_PER_MSEC);
96}
97
98static bool drop_early(struct Qdisc *sch, u32 packet_size)
99{
100 struct pie_sched_data *q = qdisc_priv(sch);
101 u32 rnd;
102 u32 local_prob = q->vars.prob;
103 u32 mtu = psched_mtu(qdisc_dev(sch));
104
105
106 if (q->vars.burst_time > 0)
107 return false;
108
109
110
111
112 if ((q->vars.qdelay < q->params.target / 2)
113 && (q->vars.prob < MAX_PROB / 5))
114 return false;
115
116
117
118
119 if (sch->qstats.backlog < 2 * mtu)
120 return false;
121
122
123
124
125 if (q->params.bytemode && packet_size <= mtu)
126 local_prob = (local_prob / mtu) * packet_size;
127 else
128 local_prob = q->vars.prob;
129
130 rnd = prandom_u32();
131 if (rnd < local_prob)
132 return true;
133
134 return false;
135}
136
137static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
138{
139 struct pie_sched_data *q = qdisc_priv(sch);
140 bool enqueue = false;
141
142 if (unlikely(qdisc_qlen(sch) >= sch->limit)) {
143 q->stats.overlimit++;
144 goto out;
145 }
146
147 if (!drop_early(sch, skb->len)) {
148 enqueue = true;
149 } else if (q->params.ecn && (q->vars.prob <= MAX_PROB / 10) &&
150 INET_ECN_set_ce(skb)) {
151
152
153
154 q->stats.ecn_mark++;
155 enqueue = true;
156 }
157
158
159 if (enqueue) {
160 q->stats.packets_in++;
161 if (qdisc_qlen(sch) > q->stats.maxq)
162 q->stats.maxq = qdisc_qlen(sch);
163
164 return qdisc_enqueue_tail(skb, sch);
165 }
166
167out:
168 q->stats.dropped++;
169 return qdisc_drop(skb, sch);
170}
171
172static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = {
173 [TCA_PIE_TARGET] = {.type = NLA_U32},
174 [TCA_PIE_LIMIT] = {.type = NLA_U32},
175 [TCA_PIE_TUPDATE] = {.type = NLA_U32},
176 [TCA_PIE_ALPHA] = {.type = NLA_U32},
177 [TCA_PIE_BETA] = {.type = NLA_U32},
178 [TCA_PIE_ECN] = {.type = NLA_U32},
179 [TCA_PIE_BYTEMODE] = {.type = NLA_U32},
180};
181
182static int pie_change(struct Qdisc *sch, struct nlattr *opt)
183{
184 struct pie_sched_data *q = qdisc_priv(sch);
185 struct nlattr *tb[TCA_PIE_MAX + 1];
186 unsigned int qlen;
187 int err;
188
189 if (!opt)
190 return -EINVAL;
191
192 err = nla_parse_nested(tb, TCA_PIE_MAX, opt, pie_policy);
193 if (err < 0)
194 return err;
195
196 sch_tree_lock(sch);
197
198
199 if (tb[TCA_PIE_TARGET]) {
200
201 u32 target = nla_get_u32(tb[TCA_PIE_TARGET]);
202
203
204 q->params.target = PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC);
205 }
206
207
208 if (tb[TCA_PIE_TUPDATE])
209 q->params.tupdate = usecs_to_jiffies(nla_get_u32(tb[TCA_PIE_TUPDATE]));
210
211 if (tb[TCA_PIE_LIMIT]) {
212 u32 limit = nla_get_u32(tb[TCA_PIE_LIMIT]);
213
214 q->params.limit = limit;
215 sch->limit = limit;
216 }
217
218 if (tb[TCA_PIE_ALPHA])
219 q->params.alpha = nla_get_u32(tb[TCA_PIE_ALPHA]);
220
221 if (tb[TCA_PIE_BETA])
222 q->params.beta = nla_get_u32(tb[TCA_PIE_BETA]);
223
224 if (tb[TCA_PIE_ECN])
225 q->params.ecn = nla_get_u32(tb[TCA_PIE_ECN]);
226
227 if (tb[TCA_PIE_BYTEMODE])
228 q->params.bytemode = nla_get_u32(tb[TCA_PIE_BYTEMODE]);
229
230
231 qlen = sch->q.qlen;
232 while (sch->q.qlen > sch->limit) {
233 struct sk_buff *skb = __skb_dequeue(&sch->q);
234
235 qdisc_qstats_backlog_dec(sch, skb);
236 qdisc_drop(skb, sch);
237 }
238 qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
239
240 sch_tree_unlock(sch);
241 return 0;
242}
243
244static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb)
245{
246
247 struct pie_sched_data *q = qdisc_priv(sch);
248 int qlen = sch->qstats.backlog;
249
250
251
252
253
254 if (qlen >= QUEUE_THRESHOLD && q->vars.dq_count == DQCOUNT_INVALID) {
255 q->vars.dq_tstamp = psched_get_time();
256 q->vars.dq_count = 0;
257 }
258
259
260
261
262
263
264
265
266
267
268 if (q->vars.dq_count != DQCOUNT_INVALID) {
269 q->vars.dq_count += skb->len;
270
271 if (q->vars.dq_count >= QUEUE_THRESHOLD) {
272 psched_time_t now = psched_get_time();
273 u32 dtime = now - q->vars.dq_tstamp;
274 u32 count = q->vars.dq_count << PIE_SCALE;
275
276 if (dtime == 0)
277 return;
278
279 count = count / dtime;
280
281 if (q->vars.avg_dq_rate == 0)
282 q->vars.avg_dq_rate = count;
283 else
284 q->vars.avg_dq_rate =
285 (q->vars.avg_dq_rate -
286 (q->vars.avg_dq_rate >> 3)) + (count >> 3);
287
288
289
290
291
292
293 if (qlen < QUEUE_THRESHOLD)
294 q->vars.dq_count = DQCOUNT_INVALID;
295 else {
296 q->vars.dq_count = 0;
297 q->vars.dq_tstamp = psched_get_time();
298 }
299
300 if (q->vars.burst_time > 0) {
301 if (q->vars.burst_time > dtime)
302 q->vars.burst_time -= dtime;
303 else
304 q->vars.burst_time = 0;
305 }
306 }
307 }
308}
309
310static void calculate_probability(struct Qdisc *sch)
311{
312 struct pie_sched_data *q = qdisc_priv(sch);
313 u32 qlen = sch->qstats.backlog;
314 psched_time_t qdelay = 0;
315 psched_time_t qdelay_old = q->vars.qdelay;
316 s32 delta = 0;
317 u32 oldprob;
318 u32 alpha, beta;
319 bool update_prob = true;
320
321 q->vars.qdelay_old = q->vars.qdelay;
322
323 if (q->vars.avg_dq_rate > 0)
324 qdelay = (qlen << PIE_SCALE) / q->vars.avg_dq_rate;
325 else
326 qdelay = 0;
327
328
329
330
331 if (qdelay == 0 && qlen != 0)
332 update_prob = false;
333
334
335
336
337
338
339
340
341
342
343
344
345 if (q->vars.prob < MAX_PROB / 100) {
346 alpha =
347 (q->params.alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 7;
348 beta =
349 (q->params.beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 7;
350 } else if (q->vars.prob < MAX_PROB / 10) {
351 alpha =
352 (q->params.alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 5;
353 beta =
354 (q->params.beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 5;
355 } else {
356 alpha =
357 (q->params.alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4;
358 beta =
359 (q->params.beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4;
360 }
361
362
363 delta += alpha * ((qdelay - q->params.target));
364 delta += beta * ((qdelay - qdelay_old));
365
366 oldprob = q->vars.prob;
367
368
369 if (delta > (s32) (MAX_PROB / (100 / 2)) &&
370 q->vars.prob >= MAX_PROB / 10)
371 delta = (MAX_PROB / 100) * 2;
372
373
374
375
376
377
378 if (qdelay > (PSCHED_NS2TICKS(250 * NSEC_PER_MSEC)))
379 delta += MAX_PROB / (100 / 2);
380
381 q->vars.prob += delta;
382
383 if (delta > 0) {
384
385 if (q->vars.prob < oldprob) {
386 q->vars.prob = MAX_PROB;
387
388
389
390
391
392 update_prob = false;
393 }
394 } else {
395
396 if (q->vars.prob > oldprob)
397 q->vars.prob = 0;
398 }
399
400
401
402
403
404 if ((qdelay == 0) && (qdelay_old == 0) && update_prob)
405 q->vars.prob = (q->vars.prob * 98) / 100;
406
407 q->vars.qdelay = qdelay;
408 q->vars.qlen_old = qlen;
409
410
411
412
413
414
415
416 if ((q->vars.qdelay < q->params.target / 2) &&
417 (q->vars.qdelay_old < q->params.target / 2) &&
418 (q->vars.prob == 0) &&
419 (q->vars.avg_dq_rate > 0))
420 pie_vars_init(&q->vars);
421}
422
423static void pie_timer(unsigned long arg)
424{
425 struct Qdisc *sch = (struct Qdisc *)arg;
426 struct pie_sched_data *q = qdisc_priv(sch);
427 spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
428
429 spin_lock(root_lock);
430 calculate_probability(sch);
431
432
433 if (q->params.tupdate)
434 mod_timer(&q->adapt_timer, jiffies + q->params.tupdate);
435 spin_unlock(root_lock);
436
437}
438
439static int pie_init(struct Qdisc *sch, struct nlattr *opt)
440{
441 struct pie_sched_data *q = qdisc_priv(sch);
442
443 pie_params_init(&q->params);
444 pie_vars_init(&q->vars);
445 sch->limit = q->params.limit;
446
447 setup_timer(&q->adapt_timer, pie_timer, (unsigned long)sch);
448
449 if (opt) {
450 int err = pie_change(sch, opt);
451
452 if (err)
453 return err;
454 }
455
456 mod_timer(&q->adapt_timer, jiffies + HZ / 2);
457 return 0;
458}
459
460static int pie_dump(struct Qdisc *sch, struct sk_buff *skb)
461{
462 struct pie_sched_data *q = qdisc_priv(sch);
463 struct nlattr *opts;
464
465 opts = nla_nest_start(skb, TCA_OPTIONS);
466 if (opts == NULL)
467 goto nla_put_failure;
468
469
470 if (nla_put_u32(skb, TCA_PIE_TARGET,
471 ((u32) PSCHED_TICKS2NS(q->params.target)) /
472 NSEC_PER_USEC) ||
473 nla_put_u32(skb, TCA_PIE_LIMIT, sch->limit) ||
474 nla_put_u32(skb, TCA_PIE_TUPDATE, jiffies_to_usecs(q->params.tupdate)) ||
475 nla_put_u32(skb, TCA_PIE_ALPHA, q->params.alpha) ||
476 nla_put_u32(skb, TCA_PIE_BETA, q->params.beta) ||
477 nla_put_u32(skb, TCA_PIE_ECN, q->params.ecn) ||
478 nla_put_u32(skb, TCA_PIE_BYTEMODE, q->params.bytemode))
479 goto nla_put_failure;
480
481 return nla_nest_end(skb, opts);
482
483nla_put_failure:
484 nla_nest_cancel(skb, opts);
485 return -1;
486
487}
488
489static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
490{
491 struct pie_sched_data *q = qdisc_priv(sch);
492 struct tc_pie_xstats st = {
493 .prob = q->vars.prob,
494 .delay = ((u32) PSCHED_TICKS2NS(q->vars.qdelay)) /
495 NSEC_PER_USEC,
496
497 .avg_dq_rate = q->vars.avg_dq_rate *
498 (PSCHED_TICKS_PER_SEC) >> PIE_SCALE,
499 .packets_in = q->stats.packets_in,
500 .overlimit = q->stats.overlimit,
501 .maxq = q->stats.maxq,
502 .dropped = q->stats.dropped,
503 .ecn_mark = q->stats.ecn_mark,
504 };
505
506 return gnet_stats_copy_app(d, &st, sizeof(st));
507}
508
509static struct sk_buff *pie_qdisc_dequeue(struct Qdisc *sch)
510{
511 struct sk_buff *skb;
512 skb = __qdisc_dequeue_head(sch, &sch->q);
513
514 if (!skb)
515 return NULL;
516
517 pie_process_dequeue(sch, skb);
518 return skb;
519}
520
521static void pie_reset(struct Qdisc *sch)
522{
523 struct pie_sched_data *q = qdisc_priv(sch);
524 qdisc_reset_queue(sch);
525 pie_vars_init(&q->vars);
526}
527
528static void pie_destroy(struct Qdisc *sch)
529{
530 struct pie_sched_data *q = qdisc_priv(sch);
531 q->params.tupdate = 0;
532 del_timer_sync(&q->adapt_timer);
533}
534
535static struct Qdisc_ops pie_qdisc_ops __read_mostly = {
536 .id = "pie",
537 .priv_size = sizeof(struct pie_sched_data),
538 .enqueue = pie_qdisc_enqueue,
539 .dequeue = pie_qdisc_dequeue,
540 .peek = qdisc_peek_dequeued,
541 .init = pie_init,
542 .destroy = pie_destroy,
543 .reset = pie_reset,
544 .change = pie_change,
545 .dump = pie_dump,
546 .dump_stats = pie_dump_stats,
547 .owner = THIS_MODULE,
548};
549
550static int __init pie_module_init(void)
551{
552 return register_qdisc(&pie_qdisc_ops);
553}
554
555static void __exit pie_module_exit(void)
556{
557 unregister_qdisc(&pie_qdisc_ops);
558}
559
560module_init(pie_module_init);
561module_exit(pie_module_exit);
562
563MODULE_DESCRIPTION("Proportional Integral controller Enhanced (PIE) scheduler");
564MODULE_AUTHOR("Vijay Subramanian");
565MODULE_AUTHOR("Mythili Prabhu");
566MODULE_LICENSE("GPL");
567