linux/net/sched/sch_pie.c
<<
>>
Prefs
   1/* Copyright (C) 2013 Cisco Systems, Inc, 2013.
   2 *
   3 * This program is free software; you can redistribute it and/or
   4 * modify it under the terms of the GNU General Public License
   5 * as published by the Free Software Foundation; either version 2
   6 * of the License.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  11 * GNU General Public License for more details.
  12 *
  13 * Author: Vijay Subramanian <vijaynsu@cisco.com>
  14 * Author: Mythili Prabhu <mysuryan@cisco.com>
  15 *
  16 * ECN support is added by Naeem Khademi <naeemk@ifi.uio.no>
  17 * University of Oslo, Norway.
  18 *
  19 * References:
  20 * IETF draft submission: http://tools.ietf.org/html/draft-pan-aqm-pie-00
  21 * IEEE  Conference on High Performance Switching and Routing 2013 :
  22 * "PIE: A * Lightweight Control Scheme to Address the Bufferbloat Problem"
  23 */
  24
  25#include <linux/module.h>
  26#include <linux/slab.h>
  27#include <linux/types.h>
  28#include <linux/kernel.h>
  29#include <linux/errno.h>
  30#include <linux/skbuff.h>
  31#include <net/pkt_sched.h>
  32#include <net/inet_ecn.h>
  33
  34#define QUEUE_THRESHOLD 10000
  35#define DQCOUNT_INVALID -1
  36#define MAX_PROB  0xffffffff
  37#define PIE_SCALE 8
  38
  39/* parameters used */
  40struct pie_params {
  41        psched_time_t target;   /* user specified target delay in pschedtime */
  42        u32 tupdate;            /* timer frequency (in jiffies) */
  43        u32 limit;              /* number of packets that can be enqueued */
  44        u32 alpha;              /* alpha and beta are between 0 and 32 */
  45        u32 beta;               /* and are used for shift relative to 1 */
  46        bool ecn;               /* true if ecn is enabled */
  47        bool bytemode;          /* to scale drop early prob based on pkt size */
  48};
  49
  50/* variables used */
  51struct pie_vars {
  52        u32 prob;               /* probability but scaled by u32 limit. */
  53        psched_time_t burst_time;
  54        psched_time_t qdelay;
  55        psched_time_t qdelay_old;
  56        u64 dq_count;           /* measured in bytes */
  57        psched_time_t dq_tstamp;        /* drain rate */
  58        u32 avg_dq_rate;        /* bytes per pschedtime tick,scaled */
  59        u32 qlen_old;           /* in bytes */
  60};
  61
  62/* statistics gathering */
  63struct pie_stats {
  64        u32 packets_in;         /* total number of packets enqueued */
  65        u32 dropped;            /* packets dropped due to pie_action */
  66        u32 overlimit;          /* dropped due to lack of space in queue */
  67        u32 maxq;               /* maximum queue size */
  68        u32 ecn_mark;           /* packets marked with ECN */
  69};
  70
  71/* private data for the Qdisc */
  72struct pie_sched_data {
  73        struct pie_params params;
  74        struct pie_vars vars;
  75        struct pie_stats stats;
  76        struct timer_list adapt_timer;
  77        struct Qdisc *sch;
  78};
  79
  80static void pie_params_init(struct pie_params *params)
  81{
  82        params->alpha = 2;
  83        params->beta = 20;
  84        params->tupdate = usecs_to_jiffies(30 * USEC_PER_MSEC); /* 30 ms */
  85        params->limit = 1000;   /* default of 1000 packets */
  86        params->target = PSCHED_NS2TICKS(20 * NSEC_PER_MSEC);   /* 20 ms */
  87        params->ecn = false;
  88        params->bytemode = false;
  89}
  90
  91static void pie_vars_init(struct pie_vars *vars)
  92{
  93        vars->dq_count = DQCOUNT_INVALID;
  94        vars->avg_dq_rate = 0;
  95        /* default of 100 ms in pschedtime */
  96        vars->burst_time = PSCHED_NS2TICKS(100 * NSEC_PER_MSEC);
  97}
  98
  99static bool drop_early(struct Qdisc *sch, u32 packet_size)
 100{
 101        struct pie_sched_data *q = qdisc_priv(sch);
 102        u32 rnd;
 103        u32 local_prob = q->vars.prob;
 104        u32 mtu = psched_mtu(qdisc_dev(sch));
 105
 106        /* If there is still burst allowance left skip random early drop */
 107        if (q->vars.burst_time > 0)
 108                return false;
 109
 110        /* If current delay is less than half of target, and
 111         * if drop prob is low already, disable early_drop
 112         */
 113        if ((q->vars.qdelay < q->params.target / 2)
 114            && (q->vars.prob < MAX_PROB / 5))
 115                return false;
 116
 117        /* If we have fewer than 2 mtu-sized packets, disable drop_early,
 118         * similar to min_th in RED
 119         */
 120        if (sch->qstats.backlog < 2 * mtu)
 121                return false;
 122
 123        /* If bytemode is turned on, use packet size to compute new
 124         * probablity. Smaller packets will have lower drop prob in this case
 125         */
 126        if (q->params.bytemode && packet_size <= mtu)
 127                local_prob = (local_prob / mtu) * packet_size;
 128        else
 129                local_prob = q->vars.prob;
 130
 131        rnd = prandom_u32();
 132        if (rnd < local_prob)
 133                return true;
 134
 135        return false;
 136}
 137
 138static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 139                             struct sk_buff **to_free)
 140{
 141        struct pie_sched_data *q = qdisc_priv(sch);
 142        bool enqueue = false;
 143
 144        if (unlikely(qdisc_qlen(sch) >= sch->limit)) {
 145                q->stats.overlimit++;
 146                goto out;
 147        }
 148
 149        if (!drop_early(sch, skb->len)) {
 150                enqueue = true;
 151        } else if (q->params.ecn && (q->vars.prob <= MAX_PROB / 10) &&
 152                   INET_ECN_set_ce(skb)) {
 153                /* If packet is ecn capable, mark it if drop probability
 154                 * is lower than 10%, else drop it.
 155                 */
 156                q->stats.ecn_mark++;
 157                enqueue = true;
 158        }
 159
 160        /* we can enqueue the packet */
 161        if (enqueue) {
 162                q->stats.packets_in++;
 163                if (qdisc_qlen(sch) > q->stats.maxq)
 164                        q->stats.maxq = qdisc_qlen(sch);
 165
 166                return qdisc_enqueue_tail(skb, sch);
 167        }
 168
 169out:
 170        q->stats.dropped++;
 171        return qdisc_drop(skb, sch, to_free);
 172}
 173
 174static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = {
 175        [TCA_PIE_TARGET] = {.type = NLA_U32},
 176        [TCA_PIE_LIMIT] = {.type = NLA_U32},
 177        [TCA_PIE_TUPDATE] = {.type = NLA_U32},
 178        [TCA_PIE_ALPHA] = {.type = NLA_U32},
 179        [TCA_PIE_BETA] = {.type = NLA_U32},
 180        [TCA_PIE_ECN] = {.type = NLA_U32},
 181        [TCA_PIE_BYTEMODE] = {.type = NLA_U32},
 182};
 183
 184static int pie_change(struct Qdisc *sch, struct nlattr *opt,
 185                      struct netlink_ext_ack *extack)
 186{
 187        struct pie_sched_data *q = qdisc_priv(sch);
 188        struct nlattr *tb[TCA_PIE_MAX + 1];
 189        unsigned int qlen, dropped = 0;
 190        int err;
 191
 192        if (!opt)
 193                return -EINVAL;
 194
 195        err = nla_parse_nested(tb, TCA_PIE_MAX, opt, pie_policy, NULL);
 196        if (err < 0)
 197                return err;
 198
 199        sch_tree_lock(sch);
 200
 201        /* convert from microseconds to pschedtime */
 202        if (tb[TCA_PIE_TARGET]) {
 203                /* target is in us */
 204                u32 target = nla_get_u32(tb[TCA_PIE_TARGET]);
 205
 206                /* convert to pschedtime */
 207                q->params.target = PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC);
 208        }
 209
 210        /* tupdate is in jiffies */
 211        if (tb[TCA_PIE_TUPDATE])
 212                q->params.tupdate = usecs_to_jiffies(nla_get_u32(tb[TCA_PIE_TUPDATE]));
 213
 214        if (tb[TCA_PIE_LIMIT]) {
 215                u32 limit = nla_get_u32(tb[TCA_PIE_LIMIT]);
 216
 217                q->params.limit = limit;
 218                sch->limit = limit;
 219        }
 220
 221        if (tb[TCA_PIE_ALPHA])
 222                q->params.alpha = nla_get_u32(tb[TCA_PIE_ALPHA]);
 223
 224        if (tb[TCA_PIE_BETA])
 225                q->params.beta = nla_get_u32(tb[TCA_PIE_BETA]);
 226
 227        if (tb[TCA_PIE_ECN])
 228                q->params.ecn = nla_get_u32(tb[TCA_PIE_ECN]);
 229
 230        if (tb[TCA_PIE_BYTEMODE])
 231                q->params.bytemode = nla_get_u32(tb[TCA_PIE_BYTEMODE]);
 232
 233        /* Drop excess packets if new limit is lower */
 234        qlen = sch->q.qlen;
 235        while (sch->q.qlen > sch->limit) {
 236                struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
 237
 238                dropped += qdisc_pkt_len(skb);
 239                qdisc_qstats_backlog_dec(sch, skb);
 240                rtnl_qdisc_drop(skb, sch);
 241        }
 242        qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
 243
 244        sch_tree_unlock(sch);
 245        return 0;
 246}
 247
 248static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb)
 249{
 250
 251        struct pie_sched_data *q = qdisc_priv(sch);
 252        int qlen = sch->qstats.backlog; /* current queue size in bytes */
 253
 254        /* If current queue is about 10 packets or more and dq_count is unset
 255         * we have enough packets to calculate the drain rate. Save
 256         * current time as dq_tstamp and start measurement cycle.
 257         */
 258        if (qlen >= QUEUE_THRESHOLD && q->vars.dq_count == DQCOUNT_INVALID) {
 259                q->vars.dq_tstamp = psched_get_time();
 260                q->vars.dq_count = 0;
 261        }
 262
 263        /* Calculate the average drain rate from this value.  If queue length
 264         * has receded to a small value viz., <= QUEUE_THRESHOLD bytes,reset
 265         * the dq_count to -1 as we don't have enough packets to calculate the
 266         * drain rate anymore The following if block is entered only when we
 267         * have a substantial queue built up (QUEUE_THRESHOLD bytes or more)
 268         * and we calculate the drain rate for the threshold here.  dq_count is
 269         * in bytes, time difference in psched_time, hence rate is in
 270         * bytes/psched_time.
 271         */
 272        if (q->vars.dq_count != DQCOUNT_INVALID) {
 273                q->vars.dq_count += skb->len;
 274
 275                if (q->vars.dq_count >= QUEUE_THRESHOLD) {
 276                        psched_time_t now = psched_get_time();
 277                        u32 dtime = now - q->vars.dq_tstamp;
 278                        u32 count = q->vars.dq_count << PIE_SCALE;
 279
 280                        if (dtime == 0)
 281                                return;
 282
 283                        count = count / dtime;
 284
 285                        if (q->vars.avg_dq_rate == 0)
 286                                q->vars.avg_dq_rate = count;
 287                        else
 288                                q->vars.avg_dq_rate =
 289                                    (q->vars.avg_dq_rate -
 290                                     (q->vars.avg_dq_rate >> 3)) + (count >> 3);
 291
 292                        /* If the queue has receded below the threshold, we hold
 293                         * on to the last drain rate calculated, else we reset
 294                         * dq_count to 0 to re-enter the if block when the next
 295                         * packet is dequeued
 296                         */
 297                        if (qlen < QUEUE_THRESHOLD)
 298                                q->vars.dq_count = DQCOUNT_INVALID;
 299                        else {
 300                                q->vars.dq_count = 0;
 301                                q->vars.dq_tstamp = psched_get_time();
 302                        }
 303
 304                        if (q->vars.burst_time > 0) {
 305                                if (q->vars.burst_time > dtime)
 306                                        q->vars.burst_time -= dtime;
 307                                else
 308                                        q->vars.burst_time = 0;
 309                        }
 310                }
 311        }
 312}
 313
 314static void calculate_probability(struct Qdisc *sch)
 315{
 316        struct pie_sched_data *q = qdisc_priv(sch);
 317        u32 qlen = sch->qstats.backlog; /* queue size in bytes */
 318        psched_time_t qdelay = 0;       /* in pschedtime */
 319        psched_time_t qdelay_old = q->vars.qdelay;      /* in pschedtime */
 320        s32 delta = 0;          /* determines the change in probability */
 321        u32 oldprob;
 322        u32 alpha, beta;
 323        bool update_prob = true;
 324
 325        q->vars.qdelay_old = q->vars.qdelay;
 326
 327        if (q->vars.avg_dq_rate > 0)
 328                qdelay = (qlen << PIE_SCALE) / q->vars.avg_dq_rate;
 329        else
 330                qdelay = 0;
 331
 332        /* If qdelay is zero and qlen is not, it means qlen is very small, less
 333         * than dequeue_rate, so we do not update probabilty in this round
 334         */
 335        if (qdelay == 0 && qlen != 0)
 336                update_prob = false;
 337
 338        /* In the algorithm, alpha and beta are between 0 and 2 with typical
 339         * value for alpha as 0.125. In this implementation, we use values 0-32
 340         * passed from user space to represent this. Also, alpha and beta have
 341         * unit of HZ and need to be scaled before they can used to update
 342         * probability. alpha/beta are updated locally below by 1) scaling them
 343         * appropriately 2) scaling down by 16 to come to 0-2 range.
 344         * Please see paper for details.
 345         *
 346         * We scale alpha and beta differently depending on whether we are in
 347         * light, medium or high dropping mode.
 348         */
 349        if (q->vars.prob < MAX_PROB / 100) {
 350                alpha =
 351                    (q->params.alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 7;
 352                beta =
 353                    (q->params.beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 7;
 354        } else if (q->vars.prob < MAX_PROB / 10) {
 355                alpha =
 356                    (q->params.alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 5;
 357                beta =
 358                    (q->params.beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 5;
 359        } else {
 360                alpha =
 361                    (q->params.alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4;
 362                beta =
 363                    (q->params.beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4;
 364        }
 365
 366        /* alpha and beta should be between 0 and 32, in multiples of 1/16 */
 367        delta += alpha * ((qdelay - q->params.target));
 368        delta += beta * ((qdelay - qdelay_old));
 369
 370        oldprob = q->vars.prob;
 371
 372        /* to ensure we increase probability in steps of no more than 2% */
 373        if (delta > (s32) (MAX_PROB / (100 / 2)) &&
 374            q->vars.prob >= MAX_PROB / 10)
 375                delta = (MAX_PROB / 100) * 2;
 376
 377        /* Non-linear drop:
 378         * Tune drop probability to increase quickly for high delays(>= 250ms)
 379         * 250ms is derived through experiments and provides error protection
 380         */
 381
 382        if (qdelay > (PSCHED_NS2TICKS(250 * NSEC_PER_MSEC)))
 383                delta += MAX_PROB / (100 / 2);
 384
 385        q->vars.prob += delta;
 386
 387        if (delta > 0) {
 388                /* prevent overflow */
 389                if (q->vars.prob < oldprob) {
 390                        q->vars.prob = MAX_PROB;
 391                        /* Prevent normalization error. If probability is at
 392                         * maximum value already, we normalize it here, and
 393                         * skip the check to do a non-linear drop in the next
 394                         * section.
 395                         */
 396                        update_prob = false;
 397                }
 398        } else {
 399                /* prevent underflow */
 400                if (q->vars.prob > oldprob)
 401                        q->vars.prob = 0;
 402        }
 403
 404        /* Non-linear drop in probability: Reduce drop probability quickly if
 405         * delay is 0 for 2 consecutive Tupdate periods.
 406         */
 407
 408        if ((qdelay == 0) && (qdelay_old == 0) && update_prob)
 409                q->vars.prob = (q->vars.prob * 98) / 100;
 410
 411        q->vars.qdelay = qdelay;
 412        q->vars.qlen_old = qlen;
 413
 414        /* We restart the measurement cycle if the following conditions are met
 415         * 1. If the delay has been low for 2 consecutive Tupdate periods
 416         * 2. Calculated drop probability is zero
 417         * 3. We have atleast one estimate for the avg_dq_rate ie.,
 418         *    is a non-zero value
 419         */
 420        if ((q->vars.qdelay < q->params.target / 2) &&
 421            (q->vars.qdelay_old < q->params.target / 2) &&
 422            (q->vars.prob == 0) &&
 423            (q->vars.avg_dq_rate > 0))
 424                pie_vars_init(&q->vars);
 425}
 426
 427static void pie_timer(struct timer_list *t)
 428{
 429        struct pie_sched_data *q = from_timer(q, t, adapt_timer);
 430        struct Qdisc *sch = q->sch;
 431        spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
 432
 433        spin_lock(root_lock);
 434        calculate_probability(sch);
 435
 436        /* reset the timer to fire after 'tupdate'. tupdate is in jiffies. */
 437        if (q->params.tupdate)
 438                mod_timer(&q->adapt_timer, jiffies + q->params.tupdate);
 439        spin_unlock(root_lock);
 440
 441}
 442
 443static int pie_init(struct Qdisc *sch, struct nlattr *opt,
 444                    struct netlink_ext_ack *extack)
 445{
 446        struct pie_sched_data *q = qdisc_priv(sch);
 447
 448        pie_params_init(&q->params);
 449        pie_vars_init(&q->vars);
 450        sch->limit = q->params.limit;
 451
 452        q->sch = sch;
 453        timer_setup(&q->adapt_timer, pie_timer, 0);
 454
 455        if (opt) {
 456                int err = pie_change(sch, opt, extack);
 457
 458                if (err)
 459                        return err;
 460        }
 461
 462        mod_timer(&q->adapt_timer, jiffies + HZ / 2);
 463        return 0;
 464}
 465
 466static int pie_dump(struct Qdisc *sch, struct sk_buff *skb)
 467{
 468        struct pie_sched_data *q = qdisc_priv(sch);
 469        struct nlattr *opts;
 470
 471        opts = nla_nest_start(skb, TCA_OPTIONS);
 472        if (opts == NULL)
 473                goto nla_put_failure;
 474
 475        /* convert target from pschedtime to us */
 476        if (nla_put_u32(skb, TCA_PIE_TARGET,
 477                        ((u32) PSCHED_TICKS2NS(q->params.target)) /
 478                        NSEC_PER_USEC) ||
 479            nla_put_u32(skb, TCA_PIE_LIMIT, sch->limit) ||
 480            nla_put_u32(skb, TCA_PIE_TUPDATE, jiffies_to_usecs(q->params.tupdate)) ||
 481            nla_put_u32(skb, TCA_PIE_ALPHA, q->params.alpha) ||
 482            nla_put_u32(skb, TCA_PIE_BETA, q->params.beta) ||
 483            nla_put_u32(skb, TCA_PIE_ECN, q->params.ecn) ||
 484            nla_put_u32(skb, TCA_PIE_BYTEMODE, q->params.bytemode))
 485                goto nla_put_failure;
 486
 487        return nla_nest_end(skb, opts);
 488
 489nla_put_failure:
 490        nla_nest_cancel(skb, opts);
 491        return -1;
 492
 493}
 494
 495static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
 496{
 497        struct pie_sched_data *q = qdisc_priv(sch);
 498        struct tc_pie_xstats st = {
 499                .prob           = q->vars.prob,
 500                .delay          = ((u32) PSCHED_TICKS2NS(q->vars.qdelay)) /
 501                                   NSEC_PER_USEC,
 502                /* unscale and return dq_rate in bytes per sec */
 503                .avg_dq_rate    = q->vars.avg_dq_rate *
 504                                  (PSCHED_TICKS_PER_SEC) >> PIE_SCALE,
 505                .packets_in     = q->stats.packets_in,
 506                .overlimit      = q->stats.overlimit,
 507                .maxq           = q->stats.maxq,
 508                .dropped        = q->stats.dropped,
 509                .ecn_mark       = q->stats.ecn_mark,
 510        };
 511
 512        return gnet_stats_copy_app(d, &st, sizeof(st));
 513}
 514
 515static struct sk_buff *pie_qdisc_dequeue(struct Qdisc *sch)
 516{
 517        struct sk_buff *skb;
 518        skb = qdisc_dequeue_head(sch);
 519
 520        if (!skb)
 521                return NULL;
 522
 523        pie_process_dequeue(sch, skb);
 524        return skb;
 525}
 526
 527static void pie_reset(struct Qdisc *sch)
 528{
 529        struct pie_sched_data *q = qdisc_priv(sch);
 530        qdisc_reset_queue(sch);
 531        pie_vars_init(&q->vars);
 532}
 533
 534static void pie_destroy(struct Qdisc *sch)
 535{
 536        struct pie_sched_data *q = qdisc_priv(sch);
 537        q->params.tupdate = 0;
 538        del_timer_sync(&q->adapt_timer);
 539}
 540
 541static struct Qdisc_ops pie_qdisc_ops __read_mostly = {
 542        .id = "pie",
 543        .priv_size      = sizeof(struct pie_sched_data),
 544        .enqueue        = pie_qdisc_enqueue,
 545        .dequeue        = pie_qdisc_dequeue,
 546        .peek           = qdisc_peek_dequeued,
 547        .init           = pie_init,
 548        .destroy        = pie_destroy,
 549        .reset          = pie_reset,
 550        .change         = pie_change,
 551        .dump           = pie_dump,
 552        .dump_stats     = pie_dump_stats,
 553        .owner          = THIS_MODULE,
 554};
 555
 556static int __init pie_module_init(void)
 557{
 558        return register_qdisc(&pie_qdisc_ops);
 559}
 560
 561static void __exit pie_module_exit(void)
 562{
 563        unregister_qdisc(&pie_qdisc_ops);
 564}
 565
 566module_init(pie_module_init);
 567module_exit(pie_module_exit);
 568
 569MODULE_DESCRIPTION("Proportional Integral controller Enhanced (PIE) scheduler");
 570MODULE_AUTHOR("Vijay Subramanian");
 571MODULE_AUTHOR("Mythili Prabhu");
 572MODULE_LICENSE("GPL");
 573