linux/kernel/sched/cpudeadline.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  kernel/sched/cpudl.c
   4 *
   5 *  Global CPU deadline management
   6 *
   7 *  Author: Juri Lelli <j.lelli@sssup.it>
   8 */
   9#include "sched.h"
  10
  11static inline int parent(int i)
  12{
  13        return (i - 1) >> 1;
  14}
  15
  16static inline int left_child(int i)
  17{
  18        return (i << 1) + 1;
  19}
  20
  21static inline int right_child(int i)
  22{
  23        return (i << 1) + 2;
  24}
  25
  26static void cpudl_heapify_down(struct cpudl *cp, int idx)
  27{
  28        int l, r, largest;
  29
  30        int orig_cpu = cp->elements[idx].cpu;
  31        u64 orig_dl = cp->elements[idx].dl;
  32
  33        if (left_child(idx) >= cp->size)
  34                return;
  35
  36        /* adapted from lib/prio_heap.c */
  37        while (1) {
  38                u64 largest_dl;
  39
  40                l = left_child(idx);
  41                r = right_child(idx);
  42                largest = idx;
  43                largest_dl = orig_dl;
  44
  45                if ((l < cp->size) && dl_time_before(orig_dl,
  46                                                cp->elements[l].dl)) {
  47                        largest = l;
  48                        largest_dl = cp->elements[l].dl;
  49                }
  50                if ((r < cp->size) && dl_time_before(largest_dl,
  51                                                cp->elements[r].dl))
  52                        largest = r;
  53
  54                if (largest == idx)
  55                        break;
  56
  57                /* pull largest child onto idx */
  58                cp->elements[idx].cpu = cp->elements[largest].cpu;
  59                cp->elements[idx].dl = cp->elements[largest].dl;
  60                cp->elements[cp->elements[idx].cpu].idx = idx;
  61                idx = largest;
  62        }
  63        /* actual push down of saved original values orig_* */
  64        cp->elements[idx].cpu = orig_cpu;
  65        cp->elements[idx].dl = orig_dl;
  66        cp->elements[cp->elements[idx].cpu].idx = idx;
  67}
  68
  69static void cpudl_heapify_up(struct cpudl *cp, int idx)
  70{
  71        int p;
  72
  73        int orig_cpu = cp->elements[idx].cpu;
  74        u64 orig_dl = cp->elements[idx].dl;
  75
  76        if (idx == 0)
  77                return;
  78
  79        do {
  80                p = parent(idx);
  81                if (dl_time_before(orig_dl, cp->elements[p].dl))
  82                        break;
  83                /* pull parent onto idx */
  84                cp->elements[idx].cpu = cp->elements[p].cpu;
  85                cp->elements[idx].dl = cp->elements[p].dl;
  86                cp->elements[cp->elements[idx].cpu].idx = idx;
  87                idx = p;
  88        } while (idx != 0);
  89        /* actual push up of saved original values orig_* */
  90        cp->elements[idx].cpu = orig_cpu;
  91        cp->elements[idx].dl = orig_dl;
  92        cp->elements[cp->elements[idx].cpu].idx = idx;
  93}
  94
  95static void cpudl_heapify(struct cpudl *cp, int idx)
  96{
  97        if (idx > 0 && dl_time_before(cp->elements[parent(idx)].dl,
  98                                cp->elements[idx].dl))
  99                cpudl_heapify_up(cp, idx);
 100        else
 101                cpudl_heapify_down(cp, idx);
 102}
 103
 104static inline int cpudl_maximum(struct cpudl *cp)
 105{
 106        return cp->elements[0].cpu;
 107}
 108
 109/*
 110 * cpudl_find - find the best (later-dl) CPU in the system
 111 * @cp: the cpudl max-heap context
 112 * @p: the task
 113 * @later_mask: a mask to fill in with the selected CPUs (or NULL)
 114 *
 115 * Returns: int - CPUs were found
 116 */
 117int cpudl_find(struct cpudl *cp, struct task_struct *p,
 118               struct cpumask *later_mask)
 119{
 120        const struct sched_dl_entity *dl_se = &p->dl;
 121
 122        if (later_mask &&
 123            cpumask_and(later_mask, cp->free_cpus, &p->cpus_mask)) {
 124                unsigned long cap, max_cap = 0;
 125                int cpu, max_cpu = -1;
 126
 127                if (!static_branch_unlikely(&sched_asym_cpucapacity))
 128                        return 1;
 129
 130                /* Ensure the capacity of the CPUs fits the task. */
 131                for_each_cpu(cpu, later_mask) {
 132                        if (!dl_task_fits_capacity(p, cpu)) {
 133                                cpumask_clear_cpu(cpu, later_mask);
 134
 135                                cap = capacity_orig_of(cpu);
 136
 137                                if (cap > max_cap ||
 138                                    (cpu == task_cpu(p) && cap == max_cap)) {
 139                                        max_cap = cap;
 140                                        max_cpu = cpu;
 141                                }
 142                        }
 143                }
 144
 145                if (cpumask_empty(later_mask))
 146                        cpumask_set_cpu(max_cpu, later_mask);
 147
 148                return 1;
 149        } else {
 150                int best_cpu = cpudl_maximum(cp);
 151
 152                WARN_ON(best_cpu != -1 && !cpu_present(best_cpu));
 153
 154                if (cpumask_test_cpu(best_cpu, &p->cpus_mask) &&
 155                    dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
 156                        if (later_mask)
 157                                cpumask_set_cpu(best_cpu, later_mask);
 158
 159                        return 1;
 160                }
 161        }
 162        return 0;
 163}
 164
 165/*
 166 * cpudl_clear - remove a CPU from the cpudl max-heap
 167 * @cp: the cpudl max-heap context
 168 * @cpu: the target CPU
 169 *
 170 * Notes: assumes cpu_rq(cpu)->lock is locked
 171 *
 172 * Returns: (void)
 173 */
 174void cpudl_clear(struct cpudl *cp, int cpu)
 175{
 176        int old_idx, new_cpu;
 177        unsigned long flags;
 178
 179        WARN_ON(!cpu_present(cpu));
 180
 181        raw_spin_lock_irqsave(&cp->lock, flags);
 182
 183        old_idx = cp->elements[cpu].idx;
 184        if (old_idx == IDX_INVALID) {
 185                /*
 186                 * Nothing to remove if old_idx was invalid.
 187                 * This could happen if a rq_offline_dl is
 188                 * called for a CPU without -dl tasks running.
 189                 */
 190        } else {
 191                new_cpu = cp->elements[cp->size - 1].cpu;
 192                cp->elements[old_idx].dl = cp->elements[cp->size - 1].dl;
 193                cp->elements[old_idx].cpu = new_cpu;
 194                cp->size--;
 195                cp->elements[new_cpu].idx = old_idx;
 196                cp->elements[cpu].idx = IDX_INVALID;
 197                cpudl_heapify(cp, old_idx);
 198
 199                cpumask_set_cpu(cpu, cp->free_cpus);
 200        }
 201        raw_spin_unlock_irqrestore(&cp->lock, flags);
 202}
 203
 204/*
 205 * cpudl_set - update the cpudl max-heap
 206 * @cp: the cpudl max-heap context
 207 * @cpu: the target CPU
 208 * @dl: the new earliest deadline for this CPU
 209 *
 210 * Notes: assumes cpu_rq(cpu)->lock is locked
 211 *
 212 * Returns: (void)
 213 */
 214void cpudl_set(struct cpudl *cp, int cpu, u64 dl)
 215{
 216        int old_idx;
 217        unsigned long flags;
 218
 219        WARN_ON(!cpu_present(cpu));
 220
 221        raw_spin_lock_irqsave(&cp->lock, flags);
 222
 223        old_idx = cp->elements[cpu].idx;
 224        if (old_idx == IDX_INVALID) {
 225                int new_idx = cp->size++;
 226
 227                cp->elements[new_idx].dl = dl;
 228                cp->elements[new_idx].cpu = cpu;
 229                cp->elements[cpu].idx = new_idx;
 230                cpudl_heapify_up(cp, new_idx);
 231                cpumask_clear_cpu(cpu, cp->free_cpus);
 232        } else {
 233                cp->elements[old_idx].dl = dl;
 234                cpudl_heapify(cp, old_idx);
 235        }
 236
 237        raw_spin_unlock_irqrestore(&cp->lock, flags);
 238}
 239
 240/*
 241 * cpudl_set_freecpu - Set the cpudl.free_cpus
 242 * @cp: the cpudl max-heap context
 243 * @cpu: rd attached CPU
 244 */
 245void cpudl_set_freecpu(struct cpudl *cp, int cpu)
 246{
 247        cpumask_set_cpu(cpu, cp->free_cpus);
 248}
 249
 250/*
 251 * cpudl_clear_freecpu - Clear the cpudl.free_cpus
 252 * @cp: the cpudl max-heap context
 253 * @cpu: rd attached CPU
 254 */
 255void cpudl_clear_freecpu(struct cpudl *cp, int cpu)
 256{
 257        cpumask_clear_cpu(cpu, cp->free_cpus);
 258}
 259
 260/*
 261 * cpudl_init - initialize the cpudl structure
 262 * @cp: the cpudl max-heap context
 263 */
 264int cpudl_init(struct cpudl *cp)
 265{
 266        int i;
 267
 268        raw_spin_lock_init(&cp->lock);
 269        cp->size = 0;
 270
 271        cp->elements = kcalloc(nr_cpu_ids,
 272                               sizeof(struct cpudl_item),
 273                               GFP_KERNEL);
 274        if (!cp->elements)
 275                return -ENOMEM;
 276
 277        if (!zalloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) {
 278                kfree(cp->elements);
 279                return -ENOMEM;
 280        }
 281
 282        for_each_possible_cpu(i)
 283                cp->elements[i].idx = IDX_INVALID;
 284
 285        return 0;
 286}
 287
 288/*
 289 * cpudl_cleanup - clean up the cpudl structure
 290 * @cp: the cpudl max-heap context
 291 */
 292void cpudl_cleanup(struct cpudl *cp)
 293{
 294        free_cpumask_var(cp->free_cpus);
 295        kfree(cp->elements);
 296}
 297