linux/include/net/red.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __NET_SCHED_RED_H
   3#define __NET_SCHED_RED_H
   4
   5#include <linux/types.h>
   6#include <linux/bug.h>
   7#include <net/pkt_sched.h>
   8#include <net/inet_ecn.h>
   9#include <net/dsfield.h>
  10#include <linux/reciprocal_div.h>
  11
  12/*      Random Early Detection (RED) algorithm.
  13        =======================================
  14
  15        Source: Sally Floyd and Van Jacobson, "Random Early Detection Gateways
  16        for Congestion Avoidance", 1993, IEEE/ACM Transactions on Networking.
  17
  18        This file codes a "divisionless" version of RED algorithm
  19        as written down in Fig.17 of the paper.
  20
  21        Short description.
  22        ------------------
  23
  24        When a new packet arrives we calculate the average queue length:
  25
  26        avg = (1-W)*avg + W*current_queue_len,
  27
  28        W is the filter time constant (chosen as 2^(-Wlog)), it controls
  29        the inertia of the algorithm. To allow larger bursts, W should be
  30        decreased.
  31
  32        if (avg > th_max) -> packet marked (dropped).
  33        if (avg < th_min) -> packet passes.
  34        if (th_min < avg < th_max) we calculate probability:
  35
  36        Pb = max_P * (avg - th_min)/(th_max-th_min)
  37
  38        and mark (drop) packet with this probability.
  39        Pb changes from 0 (at avg==th_min) to max_P (avg==th_max).
  40        max_P should be small (not 1), usually 0.01..0.02 is good value.
  41
  42        max_P is chosen as a number, so that max_P/(th_max-th_min)
  43        is a negative power of two in order arithmetics to contain
  44        only shifts.
  45
  46
  47        Parameters, settable by user:
  48        -----------------------------
  49
  50        qth_min         - bytes (should be < qth_max/2)
  51        qth_max         - bytes (should be at least 2*qth_min and less limit)
  52        Wlog            - bits (<32) log(1/W).
  53        Plog            - bits (<32)
  54
  55        Plog is related to max_P by formula:
  56
  57        max_P = (qth_max-qth_min)/2^Plog;
  58
  59        F.e. if qth_max=128K and qth_min=32K, then Plog=22
  60        corresponds to max_P=0.02
  61
  62        Scell_log
  63        Stab
  64
  65        Lookup table for log((1-W)^(t/t_ave).
  66
  67
  68        NOTES:
  69
  70        Upper bound on W.
  71        -----------------
  72
  73        If you want to allow bursts of L packets of size S,
  74        you should choose W:
  75
  76        L + 1 - th_min/S < (1-(1-W)^L)/W
  77
  78        th_min/S = 32         th_min/S = 4
  79
  80        log(W)  L
  81        -1      33
  82        -2      35
  83        -3      39
  84        -4      46
  85        -5      57
  86        -6      75
  87        -7      101
  88        -8      135
  89        -9      190
  90        etc.
  91 */
  92
  93/*
  94 * Adaptative RED : An Algorithm for Increasing the Robustness of RED's AQM
  95 * (Sally FLoyd, Ramakrishna Gummadi, and Scott Shenker) August 2001
  96 *
  97 * Every 500 ms:
  98 *  if (avg > target and max_p <= 0.5)
  99 *   increase max_p : max_p += alpha;
 100 *  else if (avg < target and max_p >= 0.01)
 101 *   decrease max_p : max_p *= beta;
 102 *
 103 * target :[qth_min + 0.4*(qth_min - qth_max),
 104 *          qth_min + 0.6*(qth_min - qth_max)].
 105 * alpha : min(0.01, max_p / 4)
 106 * beta : 0.9
 107 * max_P is a Q0.32 fixed point number (with 32 bits mantissa)
 108 * max_P between 0.01 and 0.5 (1% - 50%) [ Its no longer a negative power of two ]
 109 */
 110#define RED_ONE_PERCENT ((u32)DIV_ROUND_CLOSEST(1ULL<<32, 100))
 111
 112#define MAX_P_MIN (1 * RED_ONE_PERCENT)
 113#define MAX_P_MAX (50 * RED_ONE_PERCENT)
 114#define MAX_P_ALPHA(val) min(MAX_P_MIN, val / 4)
 115
 116#define RED_STAB_SIZE   256
 117#define RED_STAB_MASK   (RED_STAB_SIZE - 1)
 118
 119struct red_stats {
 120        u32             prob_drop;      /* Early probability drops */
 121        u32             prob_mark;      /* Early probability marks */
 122        u32             forced_drop;    /* Forced drops, qavg > max_thresh */
 123        u32             forced_mark;    /* Forced marks, qavg > max_thresh */
 124        u32             pdrop;          /* Drops due to queue limits */
 125        u32             other;          /* Drops due to drop() calls */
 126};
 127
 128struct red_parms {
 129        /* Parameters */
 130        u32             qth_min;        /* Min avg length threshold: Wlog scaled */
 131        u32             qth_max;        /* Max avg length threshold: Wlog scaled */
 132        u32             Scell_max;
 133        u32             max_P;          /* probability, [0 .. 1.0] 32 scaled */
 134        /* reciprocal_value(max_P / qth_delta) */
 135        struct reciprocal_value max_P_reciprocal;
 136        u32             qth_delta;      /* max_th - min_th */
 137        u32             target_min;     /* min_th + 0.4*(max_th - min_th) */
 138        u32             target_max;     /* min_th + 0.6*(max_th - min_th) */
 139        u8              Scell_log;
 140        u8              Wlog;           /* log(W)               */
 141        u8              Plog;           /* random number bits   */
 142        u8              Stab[RED_STAB_SIZE];
 143};
 144
 145struct red_vars {
 146        /* Variables */
 147        int             qcount;         /* Number of packets since last random
 148                                           number generation */
 149        u32             qR;             /* Cached random number */
 150
 151        unsigned long   qavg;           /* Average queue length: Wlog scaled */
 152        ktime_t         qidlestart;     /* Start of current idle period */
 153};
 154
 155static inline u32 red_maxp(u8 Plog)
 156{
 157        return Plog < 32 ? (~0U >> Plog) : ~0U;
 158}
 159
 160static inline void red_set_vars(struct red_vars *v)
 161{
 162        /* Reset average queue length, the value is strictly bound
 163         * to the parameters below, reseting hurts a bit but leaving
 164         * it might result in an unreasonable qavg for a while. --TGR
 165         */
 166        v->qavg         = 0;
 167
 168        v->qcount       = -1;
 169}
 170
 171static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog)
 172{
 173        if (fls(qth_min) + Wlog > 32)
 174                return false;
 175        if (fls(qth_max) + Wlog > 32)
 176                return false;
 177        if (qth_max < qth_min)
 178                return false;
 179        return true;
 180}
 181
 182static inline int red_get_flags(unsigned char qopt_flags,
 183                                unsigned char historic_mask,
 184                                struct nlattr *flags_attr,
 185                                unsigned char supported_mask,
 186                                struct nla_bitfield32 *p_flags,
 187                                unsigned char *p_userbits,
 188                                struct netlink_ext_ack *extack)
 189{
 190        struct nla_bitfield32 flags;
 191
 192        if (qopt_flags && flags_attr) {
 193                NL_SET_ERR_MSG_MOD(extack, "flags should be passed either through qopt, or through a dedicated attribute");
 194                return -EINVAL;
 195        }
 196
 197        if (flags_attr) {
 198                flags = nla_get_bitfield32(flags_attr);
 199        } else {
 200                flags.selector = historic_mask;
 201                flags.value = qopt_flags & historic_mask;
 202        }
 203
 204        *p_flags = flags;
 205        *p_userbits = qopt_flags & ~historic_mask;
 206        return 0;
 207}
 208
 209static inline int red_validate_flags(unsigned char flags,
 210                                     struct netlink_ext_ack *extack)
 211{
 212        if ((flags & TC_RED_NODROP) && !(flags & TC_RED_ECN)) {
 213                NL_SET_ERR_MSG_MOD(extack, "nodrop mode is only meaningful with ECN");
 214                return -EINVAL;
 215        }
 216
 217        return 0;
 218}
 219
 220static inline void red_set_parms(struct red_parms *p,
 221                                 u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog,
 222                                 u8 Scell_log, u8 *stab, u32 max_P)
 223{
 224        int delta = qth_max - qth_min;
 225        u32 max_p_delta;
 226
 227        p->qth_min      = qth_min << Wlog;
 228        p->qth_max      = qth_max << Wlog;
 229        p->Wlog         = Wlog;
 230        p->Plog         = Plog;
 231        if (delta <= 0)
 232                delta = 1;
 233        p->qth_delta    = delta;
 234        if (!max_P) {
 235                max_P = red_maxp(Plog);
 236                max_P *= delta; /* max_P = (qth_max - qth_min)/2^Plog */
 237        }
 238        p->max_P = max_P;
 239        max_p_delta = max_P / delta;
 240        max_p_delta = max(max_p_delta, 1U);
 241        p->max_P_reciprocal  = reciprocal_value(max_p_delta);
 242
 243        /* RED Adaptative target :
 244         * [min_th + 0.4*(min_th - max_th),
 245         *  min_th + 0.6*(min_th - max_th)].
 246         */
 247        delta /= 5;
 248        p->target_min = qth_min + 2*delta;
 249        p->target_max = qth_min + 3*delta;
 250
 251        p->Scell_log    = Scell_log;
 252        p->Scell_max    = (255 << Scell_log);
 253
 254        if (stab)
 255                memcpy(p->Stab, stab, sizeof(p->Stab));
 256}
 257
 258static inline int red_is_idling(const struct red_vars *v)
 259{
 260        return v->qidlestart != 0;
 261}
 262
 263static inline void red_start_of_idle_period(struct red_vars *v)
 264{
 265        v->qidlestart = ktime_get();
 266}
 267
 268static inline void red_end_of_idle_period(struct red_vars *v)
 269{
 270        v->qidlestart = 0;
 271}
 272
 273static inline void red_restart(struct red_vars *v)
 274{
 275        red_end_of_idle_period(v);
 276        v->qavg = 0;
 277        v->qcount = -1;
 278}
 279
 280static inline unsigned long red_calc_qavg_from_idle_time(const struct red_parms *p,
 281                                                         const struct red_vars *v)
 282{
 283        s64 delta = ktime_us_delta(ktime_get(), v->qidlestart);
 284        long us_idle = min_t(s64, delta, p->Scell_max);
 285        int  shift;
 286
 287        /*
 288         * The problem: ideally, average length queue recalcultion should
 289         * be done over constant clock intervals. This is too expensive, so
 290         * that the calculation is driven by outgoing packets.
 291         * When the queue is idle we have to model this clock by hand.
 292         *
 293         * SF+VJ proposed to "generate":
 294         *
 295         *      m = idletime / (average_pkt_size / bandwidth)
 296         *
 297         * dummy packets as a burst after idle time, i.e.
 298         *
 299         *      v->qavg *= (1-W)^m
 300         *
 301         * This is an apparently overcomplicated solution (f.e. we have to
 302         * precompute a table to make this calculation in reasonable time)
 303         * I believe that a simpler model may be used here,
 304         * but it is field for experiments.
 305         */
 306
 307        shift = p->Stab[(us_idle >> p->Scell_log) & RED_STAB_MASK];
 308
 309        if (shift)
 310                return v->qavg >> shift;
 311        else {
 312                /* Approximate initial part of exponent with linear function:
 313                 *
 314                 *      (1-W)^m ~= 1-mW + ...
 315                 *
 316                 * Seems, it is the best solution to
 317                 * problem of too coarse exponent tabulation.
 318                 */
 319                us_idle = (v->qavg * (u64)us_idle) >> p->Scell_log;
 320
 321                if (us_idle < (v->qavg >> 1))
 322                        return v->qavg - us_idle;
 323                else
 324                        return v->qavg >> 1;
 325        }
 326}
 327
 328static inline unsigned long red_calc_qavg_no_idle_time(const struct red_parms *p,
 329                                                       const struct red_vars *v,
 330                                                       unsigned int backlog)
 331{
 332        /*
 333         * NOTE: v->qavg is fixed point number with point at Wlog.
 334         * The formula below is equvalent to floating point
 335         * version:
 336         *
 337         *      qavg = qavg*(1-W) + backlog*W;
 338         *
 339         * --ANK (980924)
 340         */
 341        return v->qavg + (backlog - (v->qavg >> p->Wlog));
 342}
 343
 344static inline unsigned long red_calc_qavg(const struct red_parms *p,
 345                                          const struct red_vars *v,
 346                                          unsigned int backlog)
 347{
 348        if (!red_is_idling(v))
 349                return red_calc_qavg_no_idle_time(p, v, backlog);
 350        else
 351                return red_calc_qavg_from_idle_time(p, v);
 352}
 353
 354
 355static inline u32 red_random(const struct red_parms *p)
 356{
 357        return reciprocal_divide(prandom_u32(), p->max_P_reciprocal);
 358}
 359
 360static inline int red_mark_probability(const struct red_parms *p,
 361                                       const struct red_vars *v,
 362                                       unsigned long qavg)
 363{
 364        /* The formula used below causes questions.
 365
 366           OK. qR is random number in the interval
 367                (0..1/max_P)*(qth_max-qth_min)
 368           i.e. 0..(2^Plog). If we used floating point
 369           arithmetics, it would be: (2^Plog)*rnd_num,
 370           where rnd_num is less 1.
 371
 372           Taking into account, that qavg have fixed
 373           point at Wlog, two lines
 374           below have the following floating point equivalent:
 375
 376           max_P*(qavg - qth_min)/(qth_max-qth_min) < rnd/qcount
 377
 378           Any questions? --ANK (980924)
 379         */
 380        return !(((qavg - p->qth_min) >> p->Wlog) * v->qcount < v->qR);
 381}
 382
 383enum {
 384        RED_BELOW_MIN_THRESH,
 385        RED_BETWEEN_TRESH,
 386        RED_ABOVE_MAX_TRESH,
 387};
 388
 389static inline int red_cmp_thresh(const struct red_parms *p, unsigned long qavg)
 390{
 391        if (qavg < p->qth_min)
 392                return RED_BELOW_MIN_THRESH;
 393        else if (qavg >= p->qth_max)
 394                return RED_ABOVE_MAX_TRESH;
 395        else
 396                return RED_BETWEEN_TRESH;
 397}
 398
 399enum {
 400        RED_DONT_MARK,
 401        RED_PROB_MARK,
 402        RED_HARD_MARK,
 403};
 404
 405static inline int red_action(const struct red_parms *p,
 406                             struct red_vars *v,
 407                             unsigned long qavg)
 408{
 409        switch (red_cmp_thresh(p, qavg)) {
 410                case RED_BELOW_MIN_THRESH:
 411                        v->qcount = -1;
 412                        return RED_DONT_MARK;
 413
 414                case RED_BETWEEN_TRESH:
 415                        if (++v->qcount) {
 416                                if (red_mark_probability(p, v, qavg)) {
 417                                        v->qcount = 0;
 418                                        v->qR = red_random(p);
 419                                        return RED_PROB_MARK;
 420                                }
 421                        } else
 422                                v->qR = red_random(p);
 423
 424                        return RED_DONT_MARK;
 425
 426                case RED_ABOVE_MAX_TRESH:
 427                        v->qcount = -1;
 428                        return RED_HARD_MARK;
 429        }
 430
 431        BUG();
 432        return RED_DONT_MARK;
 433}
 434
 435static inline void red_adaptative_algo(struct red_parms *p, struct red_vars *v)
 436{
 437        unsigned long qavg;
 438        u32 max_p_delta;
 439
 440        qavg = v->qavg;
 441        if (red_is_idling(v))
 442                qavg = red_calc_qavg_from_idle_time(p, v);
 443
 444        /* v->qavg is fixed point number with point at Wlog */
 445        qavg >>= p->Wlog;
 446
 447        if (qavg > p->target_max && p->max_P <= MAX_P_MAX)
 448                p->max_P += MAX_P_ALPHA(p->max_P); /* maxp = maxp + alpha */
 449        else if (qavg < p->target_min && p->max_P >= MAX_P_MIN)
 450                p->max_P = (p->max_P/10)*9; /* maxp = maxp * Beta */
 451
 452        max_p_delta = DIV_ROUND_CLOSEST(p->max_P, p->qth_delta);
 453        max_p_delta = max(max_p_delta, 1U);
 454        p->max_P_reciprocal = reciprocal_value(max_p_delta);
 455}
 456#endif
 457