linux/include/linux/sched/topology.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_SCHED_TOPOLOGY_H
   3#define _LINUX_SCHED_TOPOLOGY_H
   4
   5#include <linux/topology.h>
   6
   7#include <linux/sched/idle.h>
   8
   9/*
  10 * sched-domains (multiprocessor balancing) declarations:
  11 */
  12#ifdef CONFIG_SMP
  13
  14#define SD_LOAD_BALANCE         0x0001  /* Do load balancing on this domain. */
  15#define SD_BALANCE_NEWIDLE      0x0002  /* Balance when about to become idle */
  16#define SD_BALANCE_EXEC         0x0004  /* Balance on exec */
  17#define SD_BALANCE_FORK         0x0008  /* Balance on fork, clone */
  18#define SD_BALANCE_WAKE         0x0010  /* Balance on wakeup */
  19#define SD_WAKE_AFFINE          0x0020  /* Wake task to waking CPU */
  20#define SD_ASYM_CPUCAPACITY     0x0040  /* Groups have different max cpu capacities */
  21#define SD_SHARE_CPUCAPACITY    0x0080  /* Domain members share cpu capacity */
  22#define SD_SHARE_POWERDOMAIN    0x0100  /* Domain members share power domain */
  23#define SD_SHARE_PKG_RESOURCES  0x0200  /* Domain members share cpu pkg resources */
  24#define SD_SERIALIZE            0x0400  /* Only a single load balancing instance */
  25#define SD_ASYM_PACKING         0x0800  /* Place busy groups earlier in the domain */
  26#define SD_PREFER_SIBLING       0x1000  /* Prefer to place tasks in a sibling domain */
  27#define SD_OVERLAP              0x2000  /* sched_domains of this level overlap */
  28#define SD_NUMA                 0x4000  /* cross-node balancing */
  29
  30/*
  31 * Increase resolution of cpu_capacity calculations
  32 */
  33#define SCHED_CAPACITY_SHIFT    SCHED_FIXEDPOINT_SHIFT
  34#define SCHED_CAPACITY_SCALE    (1L << SCHED_CAPACITY_SHIFT)
  35
  36#ifdef CONFIG_SCHED_SMT
  37static inline int cpu_smt_flags(void)
  38{
  39        return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
  40}
  41#endif
  42
  43#ifdef CONFIG_SCHED_MC
  44static inline int cpu_core_flags(void)
  45{
  46        return SD_SHARE_PKG_RESOURCES;
  47}
  48#endif
  49
  50#ifdef CONFIG_NUMA
  51static inline int cpu_numa_flags(void)
  52{
  53        return SD_NUMA;
  54}
  55#endif
  56
  57extern int arch_asym_cpu_priority(int cpu);
  58
  59struct sched_domain_attr {
  60        int relax_domain_level;
  61};
  62
  63#define SD_ATTR_INIT    (struct sched_domain_attr) {    \
  64        .relax_domain_level = -1,                       \
  65}
  66
  67extern int sched_domain_level_max;
  68
  69struct sched_group;
  70
  71struct sched_domain_shared {
  72        atomic_t        ref;
  73        atomic_t        nr_busy_cpus;
  74        int             has_idle_cores;
  75};
  76
  77struct sched_domain {
  78        /* These fields must be setup */
  79        struct sched_domain *parent;    /* top domain must be null terminated */
  80        struct sched_domain *child;     /* bottom domain must be null terminated */
  81        struct sched_group *groups;     /* the balancing groups of the domain */
  82        unsigned long min_interval;     /* Minimum balance interval ms */
  83        unsigned long max_interval;     /* Maximum balance interval ms */
  84        unsigned int busy_factor;       /* less balancing by factor if busy */
  85        unsigned int imbalance_pct;     /* No balance until over watermark */
  86        unsigned int cache_nice_tries;  /* Leave cache hot tasks for # tries */
  87        unsigned int busy_idx;
  88        unsigned int idle_idx;
  89        unsigned int newidle_idx;
  90        unsigned int wake_idx;
  91        unsigned int forkexec_idx;
  92        unsigned int smt_gain;
  93
  94        int nohz_idle;                  /* NOHZ IDLE status */
  95        int flags;                      /* See SD_* */
  96        int level;
  97
  98        /* Runtime fields. */
  99        unsigned long last_balance;     /* init to jiffies. units in jiffies */
 100        unsigned int balance_interval;  /* initialise to 1. units in ms. */
 101        unsigned int nr_balance_failed; /* initialise to 0 */
 102
 103        /* idle_balance() stats */
 104        u64 max_newidle_lb_cost;
 105        unsigned long next_decay_max_lb_cost;
 106
 107        u64 avg_scan_cost;              /* select_idle_sibling */
 108
 109#ifdef CONFIG_SCHEDSTATS
 110        /* load_balance() stats */
 111        unsigned int lb_count[CPU_MAX_IDLE_TYPES];
 112        unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
 113        unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
 114        unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
 115        unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
 116        unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
 117        unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
 118        unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
 119
 120        /* Active load balancing */
 121        unsigned int alb_count;
 122        unsigned int alb_failed;
 123        unsigned int alb_pushed;
 124
 125        /* SD_BALANCE_EXEC stats */
 126        unsigned int sbe_count;
 127        unsigned int sbe_balanced;
 128        unsigned int sbe_pushed;
 129
 130        /* SD_BALANCE_FORK stats */
 131        unsigned int sbf_count;
 132        unsigned int sbf_balanced;
 133        unsigned int sbf_pushed;
 134
 135        /* try_to_wake_up() stats */
 136        unsigned int ttwu_wake_remote;
 137        unsigned int ttwu_move_affine;
 138        unsigned int ttwu_move_balance;
 139#endif
 140#ifdef CONFIG_SCHED_DEBUG
 141        char *name;
 142#endif
 143        union {
 144                void *private;          /* used during construction */
 145                struct rcu_head rcu;    /* used during destruction */
 146        };
 147        struct sched_domain_shared *shared;
 148
 149        unsigned int span_weight;
 150        /*
 151         * Span of all CPUs in this domain.
 152         *
 153         * NOTE: this field is variable length. (Allocated dynamically
 154         * by attaching extra space to the end of the structure,
 155         * depending on how many CPUs the kernel has booted up with)
 156         */
 157        unsigned long span[0];
 158};
 159
 160static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
 161{
 162        return to_cpumask(sd->span);
 163}
 164
 165extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
 166                                    struct sched_domain_attr *dattr_new);
 167
 168/* Allocate an array of sched domains, for partition_sched_domains(). */
 169cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
 170void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
 171
 172bool cpus_share_cache(int this_cpu, int that_cpu);
 173
 174typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
 175typedef int (*sched_domain_flags_f)(void);
 176
 177#define SDTL_OVERLAP    0x01
 178
 179struct sd_data {
 180        struct sched_domain **__percpu sd;
 181        struct sched_domain_shared **__percpu sds;
 182        struct sched_group **__percpu sg;
 183        struct sched_group_capacity **__percpu sgc;
 184};
 185
 186struct sched_domain_topology_level {
 187        sched_domain_mask_f mask;
 188        sched_domain_flags_f sd_flags;
 189        int                 flags;
 190        int                 numa_level;
 191        struct sd_data      data;
 192#ifdef CONFIG_SCHED_DEBUG
 193        char                *name;
 194#endif
 195};
 196
 197extern void set_sched_topology(struct sched_domain_topology_level *tl);
 198
 199#ifdef CONFIG_SCHED_DEBUG
 200# define SD_INIT_NAME(type)             .name = #type
 201#else
 202# define SD_INIT_NAME(type)
 203#endif
 204
 205#else /* CONFIG_SMP */
 206
 207struct sched_domain_attr;
 208
 209static inline void
 210partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
 211                        struct sched_domain_attr *dattr_new)
 212{
 213}
 214
 215static inline bool cpus_share_cache(int this_cpu, int that_cpu)
 216{
 217        return true;
 218}
 219
 220#endif  /* !CONFIG_SMP */
 221
 222static inline int task_node(const struct task_struct *p)
 223{
 224        return cpu_to_node(task_cpu(p));
 225}
 226
 227#endif /* _LINUX_SCHED_TOPOLOGY_H */
 228