linux/include/net/fq_impl.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * Copyright (c) 2016 Qualcomm Atheros, Inc
   4 *
   5 * Based on net/sched/sch_fq_codel.c
   6 */
   7#ifndef __NET_SCHED_FQ_IMPL_H
   8#define __NET_SCHED_FQ_IMPL_H
   9
  10#include <net/fq.h>
  11
  12/* functions that are embedded into includer */
  13
  14static void fq_adjust_removal(struct fq *fq,
  15                              struct fq_flow *flow,
  16                              struct sk_buff *skb)
  17{
  18        struct fq_tin *tin = flow->tin;
  19
  20        tin->backlog_bytes -= skb->len;
  21        tin->backlog_packets--;
  22        flow->backlog -= skb->len;
  23        fq->backlog--;
  24        fq->memory_usage -= skb->truesize;
  25}
  26
  27static void fq_rejigger_backlog(struct fq *fq, struct fq_flow *flow)
  28{
  29        struct fq_flow *i;
  30
  31        if (flow->backlog == 0) {
  32                list_del_init(&flow->backlogchain);
  33        } else {
  34                i = flow;
  35
  36                list_for_each_entry_continue(i, &fq->backlogs, backlogchain)
  37                        if (i->backlog < flow->backlog)
  38                                break;
  39
  40                list_move_tail(&flow->backlogchain,
  41                               &i->backlogchain);
  42        }
  43}
  44
  45static struct sk_buff *fq_flow_dequeue(struct fq *fq,
  46                                       struct fq_flow *flow)
  47{
  48        struct sk_buff *skb;
  49
  50        lockdep_assert_held(&fq->lock);
  51
  52        skb = __skb_dequeue(&flow->queue);
  53        if (!skb)
  54                return NULL;
  55
  56        fq_adjust_removal(fq, flow, skb);
  57        fq_rejigger_backlog(fq, flow);
  58
  59        return skb;
  60}
  61
  62static struct sk_buff *fq_tin_dequeue(struct fq *fq,
  63                                      struct fq_tin *tin,
  64                                      fq_tin_dequeue_t dequeue_func)
  65{
  66        struct fq_flow *flow;
  67        struct list_head *head;
  68        struct sk_buff *skb;
  69
  70        lockdep_assert_held(&fq->lock);
  71
  72begin:
  73        head = &tin->new_flows;
  74        if (list_empty(head)) {
  75                head = &tin->old_flows;
  76                if (list_empty(head))
  77                        return NULL;
  78        }
  79
  80        flow = list_first_entry(head, struct fq_flow, flowchain);
  81
  82        if (flow->deficit <= 0) {
  83                flow->deficit += fq->quantum;
  84                list_move_tail(&flow->flowchain,
  85                               &tin->old_flows);
  86                goto begin;
  87        }
  88
  89        skb = dequeue_func(fq, tin, flow);
  90        if (!skb) {
  91                /* force a pass through old_flows to prevent starvation */
  92                if ((head == &tin->new_flows) &&
  93                    !list_empty(&tin->old_flows)) {
  94                        list_move_tail(&flow->flowchain, &tin->old_flows);
  95                } else {
  96                        list_del_init(&flow->flowchain);
  97                        flow->tin = NULL;
  98                }
  99                goto begin;
 100        }
 101
 102        flow->deficit -= skb->len;
 103        tin->tx_bytes += skb->len;
 104        tin->tx_packets++;
 105
 106        return skb;
 107}
 108
 109static u32 fq_flow_idx(struct fq *fq, struct sk_buff *skb)
 110{
 111        u32 hash = skb_get_hash(skb);
 112
 113        return reciprocal_scale(hash, fq->flows_cnt);
 114}
 115
 116static struct fq_flow *fq_flow_classify(struct fq *fq,
 117                                        struct fq_tin *tin, u32 idx,
 118                                        struct sk_buff *skb,
 119                                        fq_flow_get_default_t get_default_func)
 120{
 121        struct fq_flow *flow;
 122
 123        lockdep_assert_held(&fq->lock);
 124
 125        flow = &fq->flows[idx];
 126        if (flow->tin && flow->tin != tin) {
 127                flow = get_default_func(fq, tin, idx, skb);
 128                tin->collisions++;
 129                fq->collisions++;
 130        }
 131
 132        if (!flow->tin)
 133                tin->flows++;
 134
 135        return flow;
 136}
 137
 138static void fq_recalc_backlog(struct fq *fq,
 139                              struct fq_tin *tin,
 140                              struct fq_flow *flow)
 141{
 142        struct fq_flow *i;
 143
 144        if (list_empty(&flow->backlogchain))
 145                list_add_tail(&flow->backlogchain, &fq->backlogs);
 146
 147        i = flow;
 148        list_for_each_entry_continue_reverse(i, &fq->backlogs,
 149                                             backlogchain)
 150                if (i->backlog > flow->backlog)
 151                        break;
 152
 153        list_move(&flow->backlogchain, &i->backlogchain);
 154}
 155
 156static void fq_tin_enqueue(struct fq *fq,
 157                           struct fq_tin *tin, u32 idx,
 158                           struct sk_buff *skb,
 159                           fq_skb_free_t free_func,
 160                           fq_flow_get_default_t get_default_func)
 161{
 162        struct fq_flow *flow;
 163        bool oom;
 164
 165        lockdep_assert_held(&fq->lock);
 166
 167        flow = fq_flow_classify(fq, tin, idx, skb, get_default_func);
 168
 169        flow->tin = tin;
 170        flow->backlog += skb->len;
 171        tin->backlog_bytes += skb->len;
 172        tin->backlog_packets++;
 173        fq->memory_usage += skb->truesize;
 174        fq->backlog++;
 175
 176        fq_recalc_backlog(fq, tin, flow);
 177
 178        if (list_empty(&flow->flowchain)) {
 179                flow->deficit = fq->quantum;
 180                list_add_tail(&flow->flowchain,
 181                              &tin->new_flows);
 182        }
 183
 184        __skb_queue_tail(&flow->queue, skb);
 185        oom = (fq->memory_usage > fq->memory_limit);
 186        while (fq->backlog > fq->limit || oom) {
 187                flow = list_first_entry_or_null(&fq->backlogs,
 188                                                struct fq_flow,
 189                                                backlogchain);
 190                if (!flow)
 191                        return;
 192
 193                skb = fq_flow_dequeue(fq, flow);
 194                if (!skb)
 195                        return;
 196
 197                free_func(fq, flow->tin, flow, skb);
 198
 199                flow->tin->overlimit++;
 200                fq->overlimit++;
 201                if (oom) {
 202                        fq->overmemory++;
 203                        oom = (fq->memory_usage > fq->memory_limit);
 204                }
 205        }
 206}
 207
 208static void fq_flow_filter(struct fq *fq,
 209                           struct fq_flow *flow,
 210                           fq_skb_filter_t filter_func,
 211                           void *filter_data,
 212                           fq_skb_free_t free_func)
 213{
 214        struct fq_tin *tin = flow->tin;
 215        struct sk_buff *skb, *tmp;
 216
 217        lockdep_assert_held(&fq->lock);
 218
 219        skb_queue_walk_safe(&flow->queue, skb, tmp) {
 220                if (!filter_func(fq, tin, flow, skb, filter_data))
 221                        continue;
 222
 223                __skb_unlink(skb, &flow->queue);
 224                fq_adjust_removal(fq, flow, skb);
 225                free_func(fq, tin, flow, skb);
 226        }
 227
 228        fq_rejigger_backlog(fq, flow);
 229}
 230
 231static void fq_tin_filter(struct fq *fq,
 232                          struct fq_tin *tin,
 233                          fq_skb_filter_t filter_func,
 234                          void *filter_data,
 235                          fq_skb_free_t free_func)
 236{
 237        struct fq_flow *flow;
 238
 239        lockdep_assert_held(&fq->lock);
 240
 241        list_for_each_entry(flow, &tin->new_flows, flowchain)
 242                fq_flow_filter(fq, flow, filter_func, filter_data, free_func);
 243        list_for_each_entry(flow, &tin->old_flows, flowchain)
 244                fq_flow_filter(fq, flow, filter_func, filter_data, free_func);
 245}
 246
 247static void fq_flow_reset(struct fq *fq,
 248                          struct fq_flow *flow,
 249                          fq_skb_free_t free_func)
 250{
 251        struct sk_buff *skb;
 252
 253        while ((skb = fq_flow_dequeue(fq, flow)))
 254                free_func(fq, flow->tin, flow, skb);
 255
 256        if (!list_empty(&flow->flowchain))
 257                list_del_init(&flow->flowchain);
 258
 259        if (!list_empty(&flow->backlogchain))
 260                list_del_init(&flow->backlogchain);
 261
 262        flow->tin = NULL;
 263
 264        WARN_ON_ONCE(flow->backlog);
 265}
 266
 267static void fq_tin_reset(struct fq *fq,
 268                         struct fq_tin *tin,
 269                         fq_skb_free_t free_func)
 270{
 271        struct list_head *head;
 272        struct fq_flow *flow;
 273
 274        for (;;) {
 275                head = &tin->new_flows;
 276                if (list_empty(head)) {
 277                        head = &tin->old_flows;
 278                        if (list_empty(head))
 279                                break;
 280                }
 281
 282                flow = list_first_entry(head, struct fq_flow, flowchain);
 283                fq_flow_reset(fq, flow, free_func);
 284        }
 285
 286        WARN_ON_ONCE(tin->backlog_bytes);
 287        WARN_ON_ONCE(tin->backlog_packets);
 288}
 289
 290static void fq_flow_init(struct fq_flow *flow)
 291{
 292        INIT_LIST_HEAD(&flow->flowchain);
 293        INIT_LIST_HEAD(&flow->backlogchain);
 294        __skb_queue_head_init(&flow->queue);
 295}
 296
 297static void fq_tin_init(struct fq_tin *tin)
 298{
 299        INIT_LIST_HEAD(&tin->new_flows);
 300        INIT_LIST_HEAD(&tin->old_flows);
 301}
 302
 303static int fq_init(struct fq *fq, int flows_cnt)
 304{
 305        int i;
 306
 307        memset(fq, 0, sizeof(fq[0]));
 308        INIT_LIST_HEAD(&fq->backlogs);
 309        spin_lock_init(&fq->lock);
 310        fq->flows_cnt = max_t(u32, flows_cnt, 1);
 311        fq->quantum = 300;
 312        fq->limit = 8192;
 313        fq->memory_limit = 16 << 20; /* 16 MBytes */
 314
 315        fq->flows = kvcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL);
 316        if (!fq->flows)
 317                return -ENOMEM;
 318
 319        for (i = 0; i < fq->flows_cnt; i++)
 320                fq_flow_init(&fq->flows[i]);
 321
 322        return 0;
 323}
 324
 325static void fq_reset(struct fq *fq,
 326                     fq_skb_free_t free_func)
 327{
 328        int i;
 329
 330        for (i = 0; i < fq->flows_cnt; i++)
 331                fq_flow_reset(fq, &fq->flows[i], free_func);
 332
 333        kvfree(fq->flows);
 334        fq->flows = NULL;
 335}
 336
 337#endif
 338