linux/include/linux/sched/topology.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_SCHED_TOPOLOGY_H
   3#define _LINUX_SCHED_TOPOLOGY_H
   4
   5#include <linux/topology.h>
   6
   7#include <linux/sched/idle.h>
   8
   9/*
  10 * sched-domains (multiprocessor balancing) declarations:
  11 */
  12#ifdef CONFIG_SMP
  13
  14#define SD_BALANCE_NEWIDLE      0x0001  /* Balance when about to become idle */
  15#define SD_BALANCE_EXEC         0x0002  /* Balance on exec */
  16#define SD_BALANCE_FORK         0x0004  /* Balance on fork, clone */
  17#define SD_BALANCE_WAKE         0x0008  /* Balance on wakeup */
  18#define SD_WAKE_AFFINE          0x0010  /* Wake task to waking CPU */
  19#define SD_ASYM_CPUCAPACITY     0x0020  /* Domain members have different CPU capacities */
  20#define SD_SHARE_CPUCAPACITY    0x0040  /* Domain members share CPU capacity */
  21#define SD_SHARE_POWERDOMAIN    0x0080  /* Domain members share power domain */
  22#define SD_SHARE_PKG_RESOURCES  0x0100  /* Domain members share CPU pkg resources */
  23#define SD_SERIALIZE            0x0200  /* Only a single load balancing instance */
  24#define SD_ASYM_PACKING         0x0400  /* Place busy groups earlier in the domain */
  25#define SD_PREFER_SIBLING       0x0800  /* Prefer to place tasks in a sibling domain */
  26#define SD_OVERLAP              0x1000  /* sched_domains of this level overlap */
  27#define SD_NUMA                 0x2000  /* cross-node balancing */
  28
  29#ifdef CONFIG_SCHED_SMT
  30static inline int cpu_smt_flags(void)
  31{
  32        return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
  33}
  34#endif
  35
  36#ifdef CONFIG_SCHED_MC
  37static inline int cpu_core_flags(void)
  38{
  39        return SD_SHARE_PKG_RESOURCES;
  40}
  41#endif
  42
  43#ifdef CONFIG_NUMA
  44static inline int cpu_numa_flags(void)
  45{
  46        return SD_NUMA;
  47}
  48#endif
  49
  50extern int arch_asym_cpu_priority(int cpu);
  51
  52struct sched_domain_attr {
  53        int relax_domain_level;
  54};
  55
  56#define SD_ATTR_INIT    (struct sched_domain_attr) {    \
  57        .relax_domain_level = -1,                       \
  58}
  59
  60extern int sched_domain_level_max;
  61
  62struct sched_group;
  63
  64struct sched_domain_shared {
  65        atomic_t        ref;
  66        atomic_t        nr_busy_cpus;
  67        int             has_idle_cores;
  68};
  69
  70struct sched_domain {
  71        /* These fields must be setup */
  72        struct sched_domain __rcu *parent;      /* top domain must be null terminated */
  73        struct sched_domain __rcu *child;       /* bottom domain must be null terminated */
  74        struct sched_group *groups;     /* the balancing groups of the domain */
  75        unsigned long min_interval;     /* Minimum balance interval ms */
  76        unsigned long max_interval;     /* Maximum balance interval ms */
  77        unsigned int busy_factor;       /* less balancing by factor if busy */
  78        unsigned int imbalance_pct;     /* No balance until over watermark */
  79        unsigned int cache_nice_tries;  /* Leave cache hot tasks for # tries */
  80
  81        int nohz_idle;                  /* NOHZ IDLE status */
  82        int flags;                      /* See SD_* */
  83        int level;
  84
  85        /* Runtime fields. */
  86        unsigned long last_balance;     /* init to jiffies. units in jiffies */
  87        unsigned int balance_interval;  /* initialise to 1. units in ms. */
  88        unsigned int nr_balance_failed; /* initialise to 0 */
  89
  90        /* idle_balance() stats */
  91        u64 max_newidle_lb_cost;
  92        unsigned long next_decay_max_lb_cost;
  93
  94        u64 avg_scan_cost;              /* select_idle_sibling */
  95
  96#ifdef CONFIG_SCHEDSTATS
  97        /* load_balance() stats */
  98        unsigned int lb_count[CPU_MAX_IDLE_TYPES];
  99        unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
 100        unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
 101        unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
 102        unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
 103        unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
 104        unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
 105        unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
 106
 107        /* Active load balancing */
 108        unsigned int alb_count;
 109        unsigned int alb_failed;
 110        unsigned int alb_pushed;
 111
 112        /* SD_BALANCE_EXEC stats */
 113        unsigned int sbe_count;
 114        unsigned int sbe_balanced;
 115        unsigned int sbe_pushed;
 116
 117        /* SD_BALANCE_FORK stats */
 118        unsigned int sbf_count;
 119        unsigned int sbf_balanced;
 120        unsigned int sbf_pushed;
 121
 122        /* try_to_wake_up() stats */
 123        unsigned int ttwu_wake_remote;
 124        unsigned int ttwu_move_affine;
 125        unsigned int ttwu_move_balance;
 126#endif
 127#ifdef CONFIG_SCHED_DEBUG
 128        char *name;
 129#endif
 130        union {
 131                void *private;          /* used during construction */
 132                struct rcu_head rcu;    /* used during destruction */
 133        };
 134        struct sched_domain_shared *shared;
 135
 136        unsigned int span_weight;
 137        /*
 138         * Span of all CPUs in this domain.
 139         *
 140         * NOTE: this field is variable length. (Allocated dynamically
 141         * by attaching extra space to the end of the structure,
 142         * depending on how many CPUs the kernel has booted up with)
 143         */
 144        unsigned long span[];
 145};
 146
 147static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
 148{
 149        return to_cpumask(sd->span);
 150}
 151
 152extern void partition_sched_domains_locked(int ndoms_new,
 153                                           cpumask_var_t doms_new[],
 154                                           struct sched_domain_attr *dattr_new);
 155
 156extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
 157                                    struct sched_domain_attr *dattr_new);
 158
 159/* Allocate an array of sched domains, for partition_sched_domains(). */
 160cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
 161void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
 162
 163bool cpus_share_cache(int this_cpu, int that_cpu);
 164
 165typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
 166typedef int (*sched_domain_flags_f)(void);
 167
 168#define SDTL_OVERLAP    0x01
 169
 170struct sd_data {
 171        struct sched_domain *__percpu *sd;
 172        struct sched_domain_shared *__percpu *sds;
 173        struct sched_group *__percpu *sg;
 174        struct sched_group_capacity *__percpu *sgc;
 175};
 176
 177struct sched_domain_topology_level {
 178        sched_domain_mask_f mask;
 179        sched_domain_flags_f sd_flags;
 180        int                 flags;
 181        int                 numa_level;
 182        struct sd_data      data;
 183#ifdef CONFIG_SCHED_DEBUG
 184        char                *name;
 185#endif
 186};
 187
 188extern void set_sched_topology(struct sched_domain_topology_level *tl);
 189
 190#ifdef CONFIG_SCHED_DEBUG
 191# define SD_INIT_NAME(type)             .name = #type
 192#else
 193# define SD_INIT_NAME(type)
 194#endif
 195
 196#else /* CONFIG_SMP */
 197
 198struct sched_domain_attr;
 199
 200static inline void
 201partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[],
 202                               struct sched_domain_attr *dattr_new)
 203{
 204}
 205
 206static inline void
 207partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
 208                        struct sched_domain_attr *dattr_new)
 209{
 210}
 211
 212static inline bool cpus_share_cache(int this_cpu, int that_cpu)
 213{
 214        return true;
 215}
 216
 217#endif  /* !CONFIG_SMP */
 218
 219#ifndef arch_scale_cpu_capacity
 220static __always_inline
 221unsigned long arch_scale_cpu_capacity(int cpu)
 222{
 223        return SCHED_CAPACITY_SCALE;
 224}
 225#endif
 226
 227#ifndef arch_scale_thermal_pressure
 228static __always_inline
 229unsigned long arch_scale_thermal_pressure(int cpu)
 230{
 231        return 0;
 232}
 233#endif
 234
 235static inline int task_node(const struct task_struct *p)
 236{
 237        return cpu_to_node(task_cpu(p));
 238}
 239
 240#endif /* _LINUX_SCHED_TOPOLOGY_H */
 241