linux/include/linux/psi_types.h
<<
>>
Prefs
   1#ifndef _LINUX_PSI_TYPES_H
   2#define _LINUX_PSI_TYPES_H
   3
   4#include <linux/kthread.h>
   5#include <linux/seqlock.h>
   6#include <linux/types.h>
   7#include <linux/kref.h>
   8#include <linux/wait.h>
   9
  10#ifdef CONFIG_PSI
  11
  12/* Tracked task states */
  13enum psi_task_count {
  14        NR_IOWAIT,
  15        NR_MEMSTALL,
  16        NR_RUNNING,
  17        /*
  18         * This can't have values other than 0 or 1 and could be
  19         * implemented as a bit flag. But for now we still have room
  20         * in the first cacheline of psi_group_cpu, and this way we
  21         * don't have to special case any state tracking for it.
  22         */
  23        NR_ONCPU,
  24        NR_PSI_TASK_COUNTS = 4,
  25};
  26
  27/* Task state bitmasks */
  28#define TSK_IOWAIT      (1 << NR_IOWAIT)
  29#define TSK_MEMSTALL    (1 << NR_MEMSTALL)
  30#define TSK_RUNNING     (1 << NR_RUNNING)
  31#define TSK_ONCPU       (1 << NR_ONCPU)
  32
  33/* Resources that workloads could be stalled on */
  34enum psi_res {
  35        PSI_IO,
  36        PSI_MEM,
  37        PSI_CPU,
  38        NR_PSI_RESOURCES = 3,
  39};
  40
  41/*
  42 * Pressure states for each resource:
  43 *
  44 * SOME: Stalled tasks & working tasks
  45 * FULL: Stalled tasks & no working tasks
  46 */
  47enum psi_states {
  48        PSI_IO_SOME,
  49        PSI_IO_FULL,
  50        PSI_MEM_SOME,
  51        PSI_MEM_FULL,
  52        PSI_CPU_SOME,
  53        /* Only per-CPU, to weigh the CPU in the global average: */
  54        PSI_NONIDLE,
  55        NR_PSI_STATES = 6,
  56};
  57
  58enum psi_aggregators {
  59        PSI_AVGS = 0,
  60        PSI_POLL,
  61        NR_PSI_AGGREGATORS,
  62};
  63
  64struct psi_group_cpu {
  65        /* 1st cacheline updated by the scheduler */
  66
  67        /* Aggregator needs to know of concurrent changes */
  68        seqcount_t seq ____cacheline_aligned_in_smp;
  69
  70        /* States of the tasks belonging to this group */
  71        unsigned int tasks[NR_PSI_TASK_COUNTS];
  72
  73        /* Aggregate pressure state derived from the tasks */
  74        u32 state_mask;
  75
  76        /* Period time sampling buckets for each state of interest (ns) */
  77        u32 times[NR_PSI_STATES];
  78
  79        /* Time of last task change in this group (rq_clock) */
  80        u64 state_start;
  81
  82        /* 2nd cacheline updated by the aggregator */
  83
  84        /* Delta detection against the sampling buckets */
  85        u32 times_prev[NR_PSI_AGGREGATORS][NR_PSI_STATES]
  86                        ____cacheline_aligned_in_smp;
  87};
  88
  89/* PSI growth tracking window */
  90struct psi_window {
  91        /* Window size in ns */
  92        u64 size;
  93
  94        /* Start time of the current window in ns */
  95        u64 start_time;
  96
  97        /* Value at the start of the window */
  98        u64 start_value;
  99
 100        /* Value growth in the previous window */
 101        u64 prev_growth;
 102};
 103
 104struct psi_trigger {
 105        /* PSI state being monitored by the trigger */
 106        enum psi_states state;
 107
 108        /* User-spacified threshold in ns */
 109        u64 threshold;
 110
 111        /* List node inside triggers list */
 112        struct list_head node;
 113
 114        /* Backpointer needed during trigger destruction */
 115        struct psi_group *group;
 116
 117        /* Wait queue for polling */
 118        wait_queue_head_t event_wait;
 119
 120        /* Pending event flag */
 121        int event;
 122
 123        /* Tracking window */
 124        struct psi_window win;
 125
 126        /*
 127         * Time last event was generated. Used for rate-limiting
 128         * events to one per window
 129         */
 130        u64 last_event_time;
 131
 132        /* Refcounting to prevent premature destruction */
 133        struct kref refcount;
 134};
 135
 136struct psi_group {
 137        /* Protects data used by the aggregator */
 138        struct mutex avgs_lock;
 139
 140        /* Per-cpu task state & time tracking */
 141        struct psi_group_cpu __percpu *pcpu;
 142
 143        /* Running pressure averages */
 144        u64 avg_total[NR_PSI_STATES - 1];
 145        u64 avg_last_update;
 146        u64 avg_next_update;
 147
 148        /* Aggregator work control */
 149        struct delayed_work avgs_work;
 150
 151        /* Total stall times and sampled pressure averages */
 152        u64 total[NR_PSI_AGGREGATORS][NR_PSI_STATES - 1];
 153        unsigned long avg[NR_PSI_STATES - 1][3];
 154
 155        /* Monitor work control */
 156        struct task_struct __rcu *poll_task;
 157        struct timer_list poll_timer;
 158        wait_queue_head_t poll_wait;
 159        atomic_t poll_wakeup;
 160
 161        /* Protects data used by the monitor */
 162        struct mutex trigger_lock;
 163
 164        /* Configured polling triggers */
 165        struct list_head triggers;
 166        u32 nr_triggers[NR_PSI_STATES - 1];
 167        u32 poll_states;
 168        u64 poll_min_period;
 169
 170        /* Total stall times at the start of monitor activation */
 171        u64 polling_total[NR_PSI_STATES - 1];
 172        u64 polling_next_update;
 173        u64 polling_until;
 174};
 175
 176#else /* CONFIG_PSI */
 177
 178struct psi_group { };
 179
 180#endif /* CONFIG_PSI */
 181
 182#endif /* _LINUX_PSI_TYPES_H */
 183