linux/include/linux/flex_proportions.h
<<
>>
Prefs
   1/*
   2 * Floating proportions with flexible aging period
   3 *
   4 *  Copyright (C) 2011, SUSE, Jan Kara <jack@suse.cz>
   5 */
   6
   7#ifndef _LINUX_FLEX_PROPORTIONS_H
   8#define _LINUX_FLEX_PROPORTIONS_H
   9
  10#include <linux/percpu_counter.h>
  11#include <linux/spinlock.h>
  12#include <linux/seqlock.h>
  13
  14/*
  15 * When maximum proportion of some event type is specified, this is the
  16 * precision with which we allow limitting. Note that this creates an upper
  17 * bound on the number of events per period like
  18 *   ULLONG_MAX >> FPROP_FRAC_SHIFT.
  19 */
  20#define FPROP_FRAC_SHIFT 10
  21#define FPROP_FRAC_BASE (1UL << FPROP_FRAC_SHIFT)
  22
  23/*
  24 * ---- Global proportion definitions ----
  25 */
  26struct fprop_global {
  27        /* Number of events in the current period */
  28        struct percpu_counter events;
  29        /* Current period */
  30        unsigned int period;
  31        /* Synchronization with period transitions */
  32        seqcount_t sequence;
  33};
  34
  35int fprop_global_init(struct fprop_global *p);
  36void fprop_global_destroy(struct fprop_global *p);
  37bool fprop_new_period(struct fprop_global *p, int periods);
  38
  39/*
  40 *  ---- SINGLE ----
  41 */
  42struct fprop_local_single {
  43        /* the local events counter */
  44        unsigned long events;
  45        /* Period in which we last updated events */
  46        unsigned int period;
  47        raw_spinlock_t lock;    /* Protect period and numerator */
  48};
  49
  50#define INIT_FPROP_LOCAL_SINGLE(name)                   \
  51{       .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock),    \
  52}
  53
  54int fprop_local_init_single(struct fprop_local_single *pl);
  55void fprop_local_destroy_single(struct fprop_local_single *pl);
  56void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl);
  57void fprop_fraction_single(struct fprop_global *p,
  58        struct fprop_local_single *pl, unsigned long *numerator,
  59        unsigned long *denominator);
  60
  61static inline
  62void fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl)
  63{
  64        unsigned long flags;
  65
  66        local_irq_save(flags);
  67        __fprop_inc_single(p, pl);
  68        local_irq_restore(flags);
  69}
  70
  71/*
  72 * ---- PERCPU ----
  73 */
  74struct fprop_local_percpu {
  75        /* the local events counter */
  76        struct percpu_counter events;
  77        /* Period in which we last updated events */
  78        unsigned int period;
  79        raw_spinlock_t lock;    /* Protect period and numerator */
  80};
  81
  82int fprop_local_init_percpu(struct fprop_local_percpu *pl);
  83void fprop_local_destroy_percpu(struct fprop_local_percpu *pl);
  84void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl);
  85void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl,
  86                            int max_frac);
  87void fprop_fraction_percpu(struct fprop_global *p,
  88        struct fprop_local_percpu *pl, unsigned long *numerator,
  89        unsigned long *denominator);
  90
  91static inline
  92void fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
  93{
  94        unsigned long flags;
  95
  96        local_irq_save(flags);
  97        __fprop_inc_percpu(p, pl);
  98        local_irq_restore(flags);
  99}
 100
 101#endif
 102