linux/include/uapi/linux/pkt_sched.h
<<
>>
Prefs
   1#ifndef __LINUX_PKT_SCHED_H
   2#define __LINUX_PKT_SCHED_H
   3
   4#include <linux/types.h>
   5
   6/* Logical priority bands not depending on specific packet scheduler.
   7   Every scheduler will map them to real traffic classes, if it has
   8   no more precise mechanism to classify packets.
   9
  10   These numbers have no special meaning, though their coincidence
  11   with obsolete IPv6 values is not occasional :-). New IPv6 drafts
  12   preferred full anarchy inspired by diffserv group.
  13
  14   Note: TC_PRIO_BESTEFFORT does not mean that it is the most unhappy
  15   class, actually, as rule it will be handled with more care than
  16   filler or even bulk.
  17 */
  18
  19#define TC_PRIO_BESTEFFORT              0
  20#define TC_PRIO_FILLER                  1
  21#define TC_PRIO_BULK                    2
  22#define TC_PRIO_INTERACTIVE_BULK        4
  23#define TC_PRIO_INTERACTIVE             6
  24#define TC_PRIO_CONTROL                 7
  25
  26#define TC_PRIO_MAX                     15
  27
  28/* Generic queue statistics, available for all the elements.
  29   Particular schedulers may have also their private records.
  30 */
  31
  32struct tc_stats {
  33        __u64   bytes;                  /* Number of enqueued bytes */
  34        __u32   packets;                /* Number of enqueued packets   */
  35        __u32   drops;                  /* Packets dropped because of lack of resources */
  36        __u32   overlimits;             /* Number of throttle events when this
  37                                         * flow goes out of allocated bandwidth */
  38        __u32   bps;                    /* Current flow byte rate */
  39        __u32   pps;                    /* Current flow packet rate */
  40        __u32   qlen;
  41        __u32   backlog;
  42};
  43
  44struct tc_estimator {
  45        signed char     interval;
  46        unsigned char   ewma_log;
  47};
  48
  49/* "Handles"
  50   ---------
  51
  52    All the traffic control objects have 32bit identifiers, or "handles".
  53
  54    They can be considered as opaque numbers from user API viewpoint,
  55    but actually they always consist of two fields: major and
  56    minor numbers, which are interpreted by kernel specially,
  57    that may be used by applications, though not recommended.
  58
  59    F.e. qdisc handles always have minor number equal to zero,
  60    classes (or flows) have major equal to parent qdisc major, and
  61    minor uniquely identifying class inside qdisc.
  62
  63    Macros to manipulate handles:
  64 */
  65
  66#define TC_H_MAJ_MASK (0xFFFF0000U)
  67#define TC_H_MIN_MASK (0x0000FFFFU)
  68#define TC_H_MAJ(h) ((h)&TC_H_MAJ_MASK)
  69#define TC_H_MIN(h) ((h)&TC_H_MIN_MASK)
  70#define TC_H_MAKE(maj,min) (((maj)&TC_H_MAJ_MASK)|((min)&TC_H_MIN_MASK))
  71
  72#define TC_H_UNSPEC     (0U)
  73#define TC_H_ROOT       (0xFFFFFFFFU)
  74#define TC_H_INGRESS    (0xFFFFFFF1U)
  75#define TC_H_CLSACT     TC_H_INGRESS
  76
  77#define TC_H_MIN_PRIORITY       0xFFE0U
  78#define TC_H_MIN_INGRESS        0xFFF2U
  79#define TC_H_MIN_EGRESS         0xFFF3U
  80
  81/* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */
  82enum tc_link_layer {
  83        TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */
  84        TC_LINKLAYER_ETHERNET,
  85        TC_LINKLAYER_ATM,
  86};
  87#define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */
  88
  89struct tc_ratespec {
  90        unsigned char   cell_log;
  91        __u8            linklayer; /* lower 4 bits */
  92        unsigned short  overhead;
  93        short           cell_align;
  94        unsigned short  mpu;
  95        __u32           rate;
  96};
  97
  98#define TC_RTAB_SIZE    1024
  99
 100struct tc_sizespec {
 101        unsigned char   cell_log;
 102        unsigned char   size_log;
 103        short           cell_align;
 104        int             overhead;
 105        unsigned int    linklayer;
 106        unsigned int    mpu;
 107        unsigned int    mtu;
 108        unsigned int    tsize;
 109};
 110
 111enum {
 112        TCA_STAB_UNSPEC,
 113        TCA_STAB_BASE,
 114        TCA_STAB_DATA,
 115        __TCA_STAB_MAX
 116};
 117
 118#define TCA_STAB_MAX (__TCA_STAB_MAX - 1)
 119
 120/* FIFO section */
 121
 122struct tc_fifo_qopt {
 123        __u32   limit;  /* Queue length: bytes for bfifo, packets for pfifo */
 124};
 125
 126/* PRIO section */
 127
 128#define TCQ_PRIO_BANDS  16
 129#define TCQ_MIN_PRIO_BANDS 2
 130
 131struct tc_prio_qopt {
 132        int     bands;                  /* Number of bands */
 133        __u8    priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */
 134};
 135
 136/* MULTIQ section */
 137
 138struct tc_multiq_qopt {
 139        __u16   bands;                  /* Number of bands */
 140        __u16   max_bands;              /* Maximum number of queues */
 141};
 142
 143/* PLUG section */
 144
 145#define TCQ_PLUG_BUFFER                0
 146#define TCQ_PLUG_RELEASE_ONE           1
 147#define TCQ_PLUG_RELEASE_INDEFINITE    2
 148#define TCQ_PLUG_LIMIT                 3
 149
 150struct tc_plug_qopt {
 151        /* TCQ_PLUG_BUFFER: Inset a plug into the queue and
 152         *  buffer any incoming packets
 153         * TCQ_PLUG_RELEASE_ONE: Dequeue packets from queue head
 154         *   to beginning of the next plug.
 155         * TCQ_PLUG_RELEASE_INDEFINITE: Dequeue all packets from queue.
 156         *   Stop buffering packets until the next TCQ_PLUG_BUFFER
 157         *   command is received (just act as a pass-thru queue).
 158         * TCQ_PLUG_LIMIT: Increase/decrease queue size
 159         */
 160        int             action;
 161        __u32           limit;
 162};
 163
 164/* TBF section */
 165
 166struct tc_tbf_qopt {
 167        struct tc_ratespec rate;
 168        struct tc_ratespec peakrate;
 169        __u32           limit;
 170        __u32           buffer;
 171        __u32           mtu;
 172};
 173
 174enum {
 175        TCA_TBF_UNSPEC,
 176        TCA_TBF_PARMS,
 177        TCA_TBF_RTAB,
 178        TCA_TBF_PTAB,
 179        TCA_TBF_RATE64,
 180        TCA_TBF_PRATE64,
 181        TCA_TBF_BURST,
 182        TCA_TBF_PBURST,
 183        TCA_TBF_PAD,
 184        __TCA_TBF_MAX,
 185};
 186
 187#define TCA_TBF_MAX (__TCA_TBF_MAX - 1)
 188
 189
 190/* TEQL section */
 191
 192/* TEQL does not require any parameters */
 193
 194/* SFQ section */
 195
 196struct tc_sfq_qopt {
 197        unsigned        quantum;        /* Bytes per round allocated to flow */
 198        int             perturb_period; /* Period of hash perturbation */
 199        __u32           limit;          /* Maximal packets in queue */
 200        unsigned        divisor;        /* Hash divisor  */
 201        unsigned        flows;          /* Maximal number of flows  */
 202};
 203
 204struct tc_sfqred_stats {
 205        __u32           prob_drop;      /* Early drops, below max threshold */
 206        __u32           forced_drop;    /* Early drops, after max threshold */
 207        __u32           prob_mark;      /* Marked packets, below max threshold */
 208        __u32           forced_mark;    /* Marked packets, after max threshold */
 209        __u32           prob_mark_head; /* Marked packets, below max threshold */
 210        __u32           forced_mark_head;/* Marked packets, after max threshold */
 211};
 212
 213struct tc_sfq_qopt_v1 {
 214        struct tc_sfq_qopt v0;
 215        unsigned int    depth;          /* max number of packets per flow */
 216        unsigned int    headdrop;
 217/* SFQRED parameters */
 218        __u32           limit;          /* HARD maximal flow queue length (bytes) */
 219        __u32           qth_min;        /* Min average length threshold (bytes) */
 220        __u32           qth_max;        /* Max average length threshold (bytes) */
 221        unsigned char   Wlog;           /* log(W)               */
 222        unsigned char   Plog;           /* log(P_max/(qth_max-qth_min)) */
 223        unsigned char   Scell_log;      /* cell size for idle damping */
 224        unsigned char   flags;
 225        __u32           max_P;          /* probability, high resolution */
 226/* SFQRED stats */
 227        struct tc_sfqred_stats stats;
 228};
 229
 230
 231struct tc_sfq_xstats {
 232        __s32           allot;
 233};
 234
 235/* RED section */
 236
 237enum {
 238        TCA_RED_UNSPEC,
 239        TCA_RED_PARMS,
 240        TCA_RED_STAB,
 241        TCA_RED_MAX_P,
 242        __TCA_RED_MAX,
 243};
 244
 245#define TCA_RED_MAX (__TCA_RED_MAX - 1)
 246
 247struct tc_red_qopt {
 248        __u32           limit;          /* HARD maximal queue length (bytes)    */
 249        __u32           qth_min;        /* Min average length threshold (bytes) */
 250        __u32           qth_max;        /* Max average length threshold (bytes) */
 251        unsigned char   Wlog;           /* log(W)               */
 252        unsigned char   Plog;           /* log(P_max/(qth_max-qth_min)) */
 253        unsigned char   Scell_log;      /* cell size for idle damping */
 254        unsigned char   flags;
 255#define TC_RED_ECN              1
 256#define TC_RED_HARDDROP         2
 257#define TC_RED_ADAPTATIVE       4
 258};
 259
 260struct tc_red_xstats {
 261        __u32           early;          /* Early drops */
 262        __u32           pdrop;          /* Drops due to queue limits */
 263        __u32           other;          /* Drops due to drop() calls */
 264        __u32           marked;         /* Marked packets */
 265};
 266
 267/* GRED section */
 268
 269#define MAX_DPs 16
 270
 271enum {
 272       TCA_GRED_UNSPEC,
 273       TCA_GRED_PARMS,
 274       TCA_GRED_STAB,
 275       TCA_GRED_DPS,
 276       TCA_GRED_MAX_P,
 277       TCA_GRED_LIMIT,
 278       __TCA_GRED_MAX,
 279};
 280
 281#define TCA_GRED_MAX (__TCA_GRED_MAX - 1)
 282
 283struct tc_gred_qopt {
 284        __u32           limit;        /* HARD maximal queue length (bytes)    */
 285        __u32           qth_min;      /* Min average length threshold (bytes) */
 286        __u32           qth_max;      /* Max average length threshold (bytes) */
 287        __u32           DP;           /* up to 2^32 DPs */
 288        __u32           backlog;
 289        __u32           qave;
 290        __u32           forced;
 291        __u32           early;
 292        __u32           other;
 293        __u32           pdrop;
 294        __u8            Wlog;         /* log(W)               */
 295        __u8            Plog;         /* log(P_max/(qth_max-qth_min)) */
 296        __u8            Scell_log;    /* cell size for idle damping */
 297        __u8            prio;         /* prio of this VQ */
 298        __u32           packets;
 299        __u32           bytesin;
 300};
 301
 302/* gred setup */
 303struct tc_gred_sopt {
 304        __u32           DPs;
 305        __u32           def_DP;
 306        __u8            grio;
 307        __u8            flags;
 308        __u16           pad1;
 309};
 310
 311/* CHOKe section */
 312
 313enum {
 314        TCA_CHOKE_UNSPEC,
 315        TCA_CHOKE_PARMS,
 316        TCA_CHOKE_STAB,
 317        TCA_CHOKE_MAX_P,
 318        __TCA_CHOKE_MAX,
 319};
 320
 321#define TCA_CHOKE_MAX (__TCA_CHOKE_MAX - 1)
 322
 323struct tc_choke_qopt {
 324        __u32           limit;          /* Hard queue length (packets)  */
 325        __u32           qth_min;        /* Min average threshold (packets) */
 326        __u32           qth_max;        /* Max average threshold (packets) */
 327        unsigned char   Wlog;           /* log(W)               */
 328        unsigned char   Plog;           /* log(P_max/(qth_max-qth_min)) */
 329        unsigned char   Scell_log;      /* cell size for idle damping */
 330        unsigned char   flags;          /* see RED flags */
 331};
 332
 333struct tc_choke_xstats {
 334        __u32           early;          /* Early drops */
 335        __u32           pdrop;          /* Drops due to queue limits */
 336        __u32           other;          /* Drops due to drop() calls */
 337        __u32           marked;         /* Marked packets */
 338        __u32           matched;        /* Drops due to flow match */
 339};
 340
 341/* HTB section */
 342#define TC_HTB_NUMPRIO          8
 343#define TC_HTB_MAXDEPTH         8
 344#define TC_HTB_PROTOVER         3 /* the same as HTB and TC's major */
 345
 346struct tc_htb_opt {
 347        struct tc_ratespec      rate;
 348        struct tc_ratespec      ceil;
 349        __u32   buffer;
 350        __u32   cbuffer;
 351        __u32   quantum;
 352        __u32   level;          /* out only */
 353        __u32   prio;
 354};
 355struct tc_htb_glob {
 356        __u32 version;          /* to match HTB/TC */
 357        __u32 rate2quantum;     /* bps->quantum divisor */
 358        __u32 defcls;           /* default class number */
 359        __u32 debug;            /* debug flags */
 360
 361        /* stats */
 362        __u32 direct_pkts; /* count of non shaped packets */
 363};
 364enum {
 365        TCA_HTB_UNSPEC,
 366        TCA_HTB_PARMS,
 367        TCA_HTB_INIT,
 368        TCA_HTB_CTAB,
 369        TCA_HTB_RTAB,
 370        TCA_HTB_DIRECT_QLEN,
 371        TCA_HTB_RATE64,
 372        TCA_HTB_CEIL64,
 373        TCA_HTB_PAD,
 374        __TCA_HTB_MAX,
 375};
 376
 377#define TCA_HTB_MAX (__TCA_HTB_MAX - 1)
 378
 379struct tc_htb_xstats {
 380        __u32 lends;
 381        __u32 borrows;
 382        __u32 giants;   /* too big packets (rate will not be accurate) */
 383        __u32 tokens;
 384        __u32 ctokens;
 385};
 386
 387/* HFSC section */
 388
 389struct tc_hfsc_qopt {
 390        __u16   defcls;         /* default class */
 391};
 392
 393struct tc_service_curve {
 394        __u32   m1;             /* slope of the first segment in bps */
 395        __u32   d;              /* x-projection of the first segment in us */
 396        __u32   m2;             /* slope of the second segment in bps */
 397};
 398
 399struct tc_hfsc_stats {
 400        __u64   work;           /* total work done */
 401        __u64   rtwork;         /* work done by real-time criteria */
 402        __u32   period;         /* current period */
 403        __u32   level;          /* class level in hierarchy */
 404};
 405
 406enum {
 407        TCA_HFSC_UNSPEC,
 408        TCA_HFSC_RSC,
 409        TCA_HFSC_FSC,
 410        TCA_HFSC_USC,
 411        __TCA_HFSC_MAX,
 412};
 413
 414#define TCA_HFSC_MAX (__TCA_HFSC_MAX - 1)
 415
 416
 417/* CBQ section */
 418
 419#define TC_CBQ_MAXPRIO          8
 420#define TC_CBQ_MAXLEVEL         8
 421#define TC_CBQ_DEF_EWMA         5
 422
 423struct tc_cbq_lssopt {
 424        unsigned char   change;
 425        unsigned char   flags;
 426#define TCF_CBQ_LSS_BOUNDED     1
 427#define TCF_CBQ_LSS_ISOLATED    2
 428        unsigned char   ewma_log;
 429        unsigned char   level;
 430#define TCF_CBQ_LSS_FLAGS       1
 431#define TCF_CBQ_LSS_EWMA        2
 432#define TCF_CBQ_LSS_MAXIDLE     4
 433#define TCF_CBQ_LSS_MINIDLE     8
 434#define TCF_CBQ_LSS_OFFTIME     0x10
 435#define TCF_CBQ_LSS_AVPKT       0x20
 436        __u32           maxidle;
 437        __u32           minidle;
 438        __u32           offtime;
 439        __u32           avpkt;
 440};
 441
 442struct tc_cbq_wrropt {
 443        unsigned char   flags;
 444        unsigned char   priority;
 445        unsigned char   cpriority;
 446        unsigned char   __reserved;
 447        __u32           allot;
 448        __u32           weight;
 449};
 450
 451struct tc_cbq_ovl {
 452        unsigned char   strategy;
 453#define TC_CBQ_OVL_CLASSIC      0
 454#define TC_CBQ_OVL_DELAY        1
 455#define TC_CBQ_OVL_LOWPRIO      2
 456#define TC_CBQ_OVL_DROP         3
 457#define TC_CBQ_OVL_RCLASSIC     4
 458        unsigned char   priority2;
 459        __u16           pad;
 460        __u32           penalty;
 461};
 462
 463struct tc_cbq_police {
 464        unsigned char   police;
 465        unsigned char   __res1;
 466        unsigned short  __res2;
 467};
 468
 469struct tc_cbq_fopt {
 470        __u32           split;
 471        __u32           defmap;
 472        __u32           defchange;
 473};
 474
 475struct tc_cbq_xstats {
 476        __u32           borrows;
 477        __u32           overactions;
 478        __s32           avgidle;
 479        __s32           undertime;
 480};
 481
 482enum {
 483        TCA_CBQ_UNSPEC,
 484        TCA_CBQ_LSSOPT,
 485        TCA_CBQ_WRROPT,
 486        TCA_CBQ_FOPT,
 487        TCA_CBQ_OVL_STRATEGY,
 488        TCA_CBQ_RATE,
 489        TCA_CBQ_RTAB,
 490        TCA_CBQ_POLICE,
 491        __TCA_CBQ_MAX,
 492};
 493
 494#define TCA_CBQ_MAX     (__TCA_CBQ_MAX - 1)
 495
 496/* dsmark section */
 497
 498enum {
 499        TCA_DSMARK_UNSPEC,
 500        TCA_DSMARK_INDICES,
 501        TCA_DSMARK_DEFAULT_INDEX,
 502        TCA_DSMARK_SET_TC_INDEX,
 503        TCA_DSMARK_MASK,
 504        TCA_DSMARK_VALUE,
 505        __TCA_DSMARK_MAX,
 506};
 507
 508#define TCA_DSMARK_MAX (__TCA_DSMARK_MAX - 1)
 509
 510/* ATM  section */
 511
 512enum {
 513        TCA_ATM_UNSPEC,
 514        TCA_ATM_FD,             /* file/socket descriptor */
 515        TCA_ATM_PTR,            /* pointer to descriptor - later */
 516        TCA_ATM_HDR,            /* LL header */
 517        TCA_ATM_EXCESS,         /* excess traffic class (0 for CLP)  */
 518        TCA_ATM_ADDR,           /* PVC address (for output only) */
 519        TCA_ATM_STATE,          /* VC state (ATM_VS_*; for output only) */
 520        __TCA_ATM_MAX,
 521};
 522
 523#define TCA_ATM_MAX     (__TCA_ATM_MAX - 1)
 524
 525/* Network emulator */
 526
 527enum {
 528        TCA_NETEM_UNSPEC,
 529        TCA_NETEM_CORR,
 530        TCA_NETEM_DELAY_DIST,
 531        TCA_NETEM_REORDER,
 532        TCA_NETEM_CORRUPT,
 533        TCA_NETEM_LOSS,
 534        TCA_NETEM_RATE,
 535        TCA_NETEM_ECN,
 536        TCA_NETEM_RATE64,
 537        TCA_NETEM_PAD,
 538        __TCA_NETEM_MAX,
 539};
 540
 541#define TCA_NETEM_MAX (__TCA_NETEM_MAX - 1)
 542
 543struct tc_netem_qopt {
 544        __u32   latency;        /* added delay (us) */
 545        __u32   limit;          /* fifo limit (packets) */
 546        __u32   loss;           /* random packet loss (0=none ~0=100%) */
 547        __u32   gap;            /* re-ordering gap (0 for none) */
 548        __u32   duplicate;      /* random packet dup  (0=none ~0=100%) */
 549        __u32   jitter;         /* random jitter in latency (us) */
 550};
 551
 552struct tc_netem_corr {
 553        __u32   delay_corr;     /* delay correlation */
 554        __u32   loss_corr;      /* packet loss correlation */
 555        __u32   dup_corr;       /* duplicate correlation  */
 556};
 557
 558struct tc_netem_reorder {
 559        __u32   probability;
 560        __u32   correlation;
 561};
 562
 563struct tc_netem_corrupt {
 564        __u32   probability;
 565        __u32   correlation;
 566};
 567
 568struct tc_netem_rate {
 569        __u32   rate;   /* byte/s */
 570        __s32   packet_overhead;
 571        __u32   cell_size;
 572        __s32   cell_overhead;
 573};
 574
 575enum {
 576        NETEM_LOSS_UNSPEC,
 577        NETEM_LOSS_GI,          /* General Intuitive - 4 state model */
 578        NETEM_LOSS_GE,          /* Gilbert Elliot models */
 579        __NETEM_LOSS_MAX
 580};
 581#define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1)
 582
 583/* State transition probabilities for 4 state model */
 584struct tc_netem_gimodel {
 585        __u32   p13;
 586        __u32   p31;
 587        __u32   p32;
 588        __u32   p14;
 589        __u32   p23;
 590};
 591
 592/* Gilbert-Elliot models */
 593struct tc_netem_gemodel {
 594        __u32 p;
 595        __u32 r;
 596        __u32 h;
 597        __u32 k1;
 598};
 599
 600#define NETEM_DIST_SCALE        8192
 601#define NETEM_DIST_MAX          16384
 602
 603/* DRR */
 604
 605enum {
 606        TCA_DRR_UNSPEC,
 607        TCA_DRR_QUANTUM,
 608        __TCA_DRR_MAX
 609};
 610
 611#define TCA_DRR_MAX     (__TCA_DRR_MAX - 1)
 612
 613struct tc_drr_stats {
 614        __u32   deficit;
 615};
 616
 617/* MQPRIO */
 618#define TC_QOPT_BITMASK 15
 619#define TC_QOPT_MAX_QUEUE 16
 620
 621enum {
 622        TC_MQPRIO_HW_OFFLOAD_NONE,      /* no offload requested */
 623        TC_MQPRIO_HW_OFFLOAD_TCS,       /* offload TCs, no queue counts */
 624        __TC_MQPRIO_HW_OFFLOAD_MAX
 625};
 626
 627#define TC_MQPRIO_HW_OFFLOAD_MAX (__TC_MQPRIO_HW_OFFLOAD_MAX - 1)
 628
 629enum {
 630        TC_MQPRIO_MODE_DCB,
 631        TC_MQPRIO_MODE_CHANNEL,
 632        __TC_MQPRIO_MODE_MAX
 633};
 634
 635#define __TC_MQPRIO_MODE_MAX (__TC_MQPRIO_MODE_MAX - 1)
 636
 637enum {
 638        TC_MQPRIO_SHAPER_DCB,
 639        TC_MQPRIO_SHAPER_BW_RATE,       /* Add new shapers below */
 640        __TC_MQPRIO_SHAPER_MAX
 641};
 642
 643#define __TC_MQPRIO_SHAPER_MAX (__TC_MQPRIO_SHAPER_MAX - 1)
 644
 645struct tc_mqprio_qopt {
 646        __u8    num_tc;
 647        __u8    prio_tc_map[TC_QOPT_BITMASK + 1];
 648        __u8    hw;
 649        __u16   count[TC_QOPT_MAX_QUEUE];
 650        __u16   offset[TC_QOPT_MAX_QUEUE];
 651};
 652
 653#define TC_MQPRIO_F_MODE                0x1
 654#define TC_MQPRIO_F_SHAPER              0x2
 655#define TC_MQPRIO_F_MIN_RATE            0x4
 656#define TC_MQPRIO_F_MAX_RATE            0x8
 657
 658enum {
 659        TCA_MQPRIO_UNSPEC,
 660        TCA_MQPRIO_MODE,
 661        TCA_MQPRIO_SHAPER,
 662        TCA_MQPRIO_MIN_RATE64,
 663        TCA_MQPRIO_MAX_RATE64,
 664        __TCA_MQPRIO_MAX,
 665};
 666
 667#define TCA_MQPRIO_MAX (__TCA_MQPRIO_MAX - 1)
 668
 669/* SFB */
 670
 671enum {
 672        TCA_SFB_UNSPEC,
 673        TCA_SFB_PARMS,
 674        __TCA_SFB_MAX,
 675};
 676
 677#define TCA_SFB_MAX (__TCA_SFB_MAX - 1)
 678
 679/*
 680 * Note: increment, decrement are Q0.16 fixed-point values.
 681 */
 682struct tc_sfb_qopt {
 683        __u32 rehash_interval;  /* delay between hash move, in ms */
 684        __u32 warmup_time;      /* double buffering warmup time in ms (warmup_time < rehash_interval) */
 685        __u32 max;              /* max len of qlen_min */
 686        __u32 bin_size;         /* maximum queue length per bin */
 687        __u32 increment;        /* probability increment, (d1 in Blue) */
 688        __u32 decrement;        /* probability decrement, (d2 in Blue) */
 689        __u32 limit;            /* max SFB queue length */
 690        __u32 penalty_rate;     /* inelastic flows are rate limited to 'rate' pps */
 691        __u32 penalty_burst;
 692};
 693
 694struct tc_sfb_xstats {
 695        __u32 earlydrop;
 696        __u32 penaltydrop;
 697        __u32 bucketdrop;
 698        __u32 queuedrop;
 699        __u32 childdrop; /* drops in child qdisc */
 700        __u32 marked;
 701        __u32 maxqlen;
 702        __u32 maxprob;
 703        __u32 avgprob;
 704};
 705
 706#define SFB_MAX_PROB 0xFFFF
 707
 708/* QFQ */
 709enum {
 710        TCA_QFQ_UNSPEC,
 711        TCA_QFQ_WEIGHT,
 712        TCA_QFQ_LMAX,
 713        __TCA_QFQ_MAX
 714};
 715
 716#define TCA_QFQ_MAX     (__TCA_QFQ_MAX - 1)
 717
 718struct tc_qfq_stats {
 719        __u32 weight;
 720        __u32 lmax;
 721};
 722
 723/* CODEL */
 724
 725enum {
 726        TCA_CODEL_UNSPEC,
 727        TCA_CODEL_TARGET,
 728        TCA_CODEL_LIMIT,
 729        TCA_CODEL_INTERVAL,
 730        TCA_CODEL_ECN,
 731        TCA_CODEL_CE_THRESHOLD,
 732        __TCA_CODEL_MAX
 733};
 734
 735#define TCA_CODEL_MAX   (__TCA_CODEL_MAX - 1)
 736
 737struct tc_codel_xstats {
 738        __u32   maxpacket; /* largest packet we've seen so far */
 739        __u32   count;     /* how many drops we've done since the last time we
 740                            * entered dropping state
 741                            */
 742        __u32   lastcount; /* count at entry to dropping state */
 743        __u32   ldelay;    /* in-queue delay seen by most recently dequeued packet */
 744        __s32   drop_next; /* time to drop next packet */
 745        __u32   drop_overlimit; /* number of time max qdisc packet limit was hit */
 746        __u32   ecn_mark;  /* number of packets we ECN marked instead of dropped */
 747        __u32   dropping;  /* are we in dropping state ? */
 748        __u32   ce_mark;   /* number of CE marked packets because of ce_threshold */
 749};
 750
 751/* FQ_CODEL */
 752
 753enum {
 754        TCA_FQ_CODEL_UNSPEC,
 755        TCA_FQ_CODEL_TARGET,
 756        TCA_FQ_CODEL_LIMIT,
 757        TCA_FQ_CODEL_INTERVAL,
 758        TCA_FQ_CODEL_ECN,
 759        TCA_FQ_CODEL_FLOWS,
 760        TCA_FQ_CODEL_QUANTUM,
 761        TCA_FQ_CODEL_CE_THRESHOLD,
 762        TCA_FQ_CODEL_DROP_BATCH_SIZE,
 763        TCA_FQ_CODEL_MEMORY_LIMIT,
 764        __TCA_FQ_CODEL_MAX
 765};
 766
 767#define TCA_FQ_CODEL_MAX        (__TCA_FQ_CODEL_MAX - 1)
 768
 769enum {
 770        TCA_FQ_CODEL_XSTATS_QDISC,
 771        TCA_FQ_CODEL_XSTATS_CLASS,
 772};
 773
 774struct tc_fq_codel_qd_stats {
 775        __u32   maxpacket;      /* largest packet we've seen so far */
 776        __u32   drop_overlimit; /* number of time max qdisc
 777                                 * packet limit was hit
 778                                 */
 779        __u32   ecn_mark;       /* number of packets we ECN marked
 780                                 * instead of being dropped
 781                                 */
 782        __u32   new_flow_count; /* number of time packets
 783                                 * created a 'new flow'
 784                                 */
 785        __u32   new_flows_len;  /* count of flows in new list */
 786        __u32   old_flows_len;  /* count of flows in old list */
 787        __u32   ce_mark;        /* packets above ce_threshold */
 788        __u32   memory_usage;   /* in bytes */
 789        __u32   drop_overmemory;
 790};
 791
 792struct tc_fq_codel_cl_stats {
 793        __s32   deficit;
 794        __u32   ldelay;         /* in-queue delay seen by most recently
 795                                 * dequeued packet
 796                                 */
 797        __u32   count;
 798        __u32   lastcount;
 799        __u32   dropping;
 800        __s32   drop_next;
 801};
 802
 803struct tc_fq_codel_xstats {
 804        __u32   type;
 805        union {
 806                struct tc_fq_codel_qd_stats qdisc_stats;
 807                struct tc_fq_codel_cl_stats class_stats;
 808        };
 809};
 810
 811/* FQ */
 812
 813enum {
 814        TCA_FQ_UNSPEC,
 815
 816        TCA_FQ_PLIMIT,          /* limit of total number of packets in queue */
 817
 818        TCA_FQ_FLOW_PLIMIT,     /* limit of packets per flow */
 819
 820        TCA_FQ_QUANTUM,         /* RR quantum */
 821
 822        TCA_FQ_INITIAL_QUANTUM,         /* RR quantum for new flow */
 823
 824        TCA_FQ_RATE_ENABLE,     /* enable/disable rate limiting */
 825
 826        TCA_FQ_FLOW_DEFAULT_RATE,/* obsolete, do not use */
 827
 828        TCA_FQ_FLOW_MAX_RATE,   /* per flow max rate */
 829
 830        TCA_FQ_BUCKETS_LOG,     /* log2(number of buckets) */
 831
 832        TCA_FQ_FLOW_REFILL_DELAY,       /* flow credit refill delay in usec */
 833
 834        TCA_FQ_ORPHAN_MASK,     /* mask applied to orphaned skb hashes */
 835
 836        TCA_FQ_LOW_RATE_THRESHOLD, /* per packet delay under this rate */
 837
 838        __TCA_FQ_MAX
 839};
 840
 841#define TCA_FQ_MAX      (__TCA_FQ_MAX - 1)
 842
 843struct tc_fq_qd_stats {
 844        __u64   gc_flows;
 845        __u64   highprio_packets;
 846        __u64   tcp_retrans;
 847        __u64   throttled;
 848        __u64   flows_plimit;
 849        __u64   pkts_too_long;
 850        __u64   allocation_errors;
 851        __s64   time_next_delayed_flow;
 852        __u32   flows;
 853        __u32   inactive_flows;
 854        __u32   throttled_flows;
 855        __u32   unthrottle_latency_ns;
 856};
 857
 858/* Heavy-Hitter Filter */
 859
 860enum {
 861        TCA_HHF_UNSPEC,
 862        TCA_HHF_BACKLOG_LIMIT,
 863        TCA_HHF_QUANTUM,
 864        TCA_HHF_HH_FLOWS_LIMIT,
 865        TCA_HHF_RESET_TIMEOUT,
 866        TCA_HHF_ADMIT_BYTES,
 867        TCA_HHF_EVICT_TIMEOUT,
 868        TCA_HHF_NON_HH_WEIGHT,
 869        __TCA_HHF_MAX
 870};
 871
 872#define TCA_HHF_MAX     (__TCA_HHF_MAX - 1)
 873
 874struct tc_hhf_xstats {
 875        __u32   drop_overlimit; /* number of times max qdisc packet limit
 876                                 * was hit
 877                                 */
 878        __u32   hh_overlimit;   /* number of times max heavy-hitters was hit */
 879        __u32   hh_tot_count;   /* number of captured heavy-hitters so far */
 880        __u32   hh_cur_count;   /* number of current heavy-hitters */
 881};
 882
 883/* PIE */
 884enum {
 885        TCA_PIE_UNSPEC,
 886        TCA_PIE_TARGET,
 887        TCA_PIE_LIMIT,
 888        TCA_PIE_TUPDATE,
 889        TCA_PIE_ALPHA,
 890        TCA_PIE_BETA,
 891        TCA_PIE_ECN,
 892        TCA_PIE_BYTEMODE,
 893        __TCA_PIE_MAX
 894};
 895#define TCA_PIE_MAX   (__TCA_PIE_MAX - 1)
 896
 897struct tc_pie_xstats {
 898        __u32 prob;             /* current probability */
 899        __u32 delay;            /* current delay in ms */
 900        __u32 avg_dq_rate;      /* current average dq_rate in bits/pie_time */
 901        __u32 packets_in;       /* total number of packets enqueued */
 902        __u32 dropped;          /* packets dropped due to pie_action */
 903        __u32 overlimit;        /* dropped due to lack of space in queue */
 904        __u32 maxq;             /* maximum queue size */
 905        __u32 ecn_mark;         /* packets marked with ecn*/
 906};
 907
 908/* CBS */
 909struct tc_cbs_qopt {
 910        __u8 offload;
 911        __u8 _pad[3];
 912        __s32 hicredit;
 913        __s32 locredit;
 914        __s32 idleslope;
 915        __s32 sendslope;
 916};
 917
 918enum {
 919        TCA_CBS_UNSPEC,
 920        TCA_CBS_PARMS,
 921        __TCA_CBS_MAX,
 922};
 923
 924#define TCA_CBS_MAX (__TCA_CBS_MAX - 1)
 925
 926#endif
 927