linux/kernel/sched_cpupri.c
<<
>>
Prefs
   1/*
   2 *  kernel/sched_cpupri.c
   3 *
   4 *  CPU priority management
   5 *
   6 *  Copyright (C) 2007-2008 Novell
   7 *
   8 *  Author: Gregory Haskins <ghaskins@novell.com>
   9 *
  10 *  This code tracks the priority of each CPU so that global migration
  11 *  decisions are easy to calculate.  Each CPU can be in a state as follows:
  12 *
  13 *                 (INVALID), IDLE, NORMAL, RT1, ... RT99
  14 *
  15 *  going from the lowest priority to the highest.  CPUs in the INVALID state
  16 *  are not eligible for routing.  The system maintains this state with
  17 *  a 2 dimensional bitmap (the first for priority class, the second for cpus
  18 *  in that class).  Therefore a typical application without affinity
  19 *  restrictions can find a suitable CPU with O(1) complexity (e.g. two bit
  20 *  searches).  For tasks with affinity restrictions, the algorithm has a
  21 *  worst case complexity of O(min(102, nr_domcpus)), though the scenario that
  22 *  yields the worst case search is fairly contrived.
  23 *
  24 *  This program is free software; you can redistribute it and/or
  25 *  modify it under the terms of the GNU General Public License
  26 *  as published by the Free Software Foundation; version 2
  27 *  of the License.
  28 */
  29
  30#include "sched_cpupri.h"
  31
  32/* Convert between a 140 based task->prio, and our 102 based cpupri */
  33static int convert_prio(int prio)
  34{
  35        int cpupri;
  36
  37        if (prio == CPUPRI_INVALID)
  38                cpupri = CPUPRI_INVALID;
  39        else if (prio == MAX_PRIO)
  40                cpupri = CPUPRI_IDLE;
  41        else if (prio >= MAX_RT_PRIO)
  42                cpupri = CPUPRI_NORMAL;
  43        else
  44                cpupri = MAX_RT_PRIO - prio + 1;
  45
  46        return cpupri;
  47}
  48
  49#define for_each_cpupri_active(array, idx)                    \
  50  for (idx = find_first_bit(array, CPUPRI_NR_PRIORITIES);     \
  51       idx < CPUPRI_NR_PRIORITIES;                            \
  52       idx = find_next_bit(array, CPUPRI_NR_PRIORITIES, idx+1))
  53
  54/**
  55 * cpupri_find - find the best (lowest-pri) CPU in the system
  56 * @cp: The cpupri context
  57 * @p: The task
  58 * @lowest_mask: A mask to fill in with selected CPUs (or NULL)
  59 *
  60 * Note: This function returns the recommended CPUs as calculated during the
  61 * current invokation.  By the time the call returns, the CPUs may have in
  62 * fact changed priorities any number of times.  While not ideal, it is not
  63 * an issue of correctness since the normal rebalancer logic will correct
  64 * any discrepancies created by racing against the uncertainty of the current
  65 * priority configuration.
  66 *
  67 * Returns: (int)bool - CPUs were found
  68 */
  69int cpupri_find(struct cpupri *cp, struct task_struct *p,
  70                struct cpumask *lowest_mask)
  71{
  72        int                  idx      = 0;
  73        int                  task_pri = convert_prio(p->prio);
  74
  75        for_each_cpupri_active(cp->pri_active, idx) {
  76                struct cpupri_vec *vec  = &cp->pri_to_cpu[idx];
  77
  78                if (idx >= task_pri)
  79                        break;
  80
  81                if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
  82                        continue;
  83
  84                if (lowest_mask) {
  85                        cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
  86
  87                        /*
  88                         * We have to ensure that we have at least one bit
  89                         * still set in the array, since the map could have
  90                         * been concurrently emptied between the first and
  91                         * second reads of vec->mask.  If we hit this
  92                         * condition, simply act as though we never hit this
  93                         * priority level and continue on.
  94                         */
  95                        if (cpumask_any(lowest_mask) >= nr_cpu_ids)
  96                                continue;
  97                }
  98
  99                return 1;
 100        }
 101
 102        return 0;
 103}
 104
 105/**
 106 * cpupri_set - update the cpu priority setting
 107 * @cp: The cpupri context
 108 * @cpu: The target cpu
 109 * @pri: The priority (INVALID-RT99) to assign to this CPU
 110 *
 111 * Note: Assumes cpu_rq(cpu)->lock is locked
 112 *
 113 * Returns: (void)
 114 */
 115void cpupri_set(struct cpupri *cp, int cpu, int newpri)
 116{
 117        int                 *currpri = &cp->cpu_to_pri[cpu];
 118        int                  oldpri  = *currpri;
 119        unsigned long        flags;
 120
 121        newpri = convert_prio(newpri);
 122
 123        BUG_ON(newpri >= CPUPRI_NR_PRIORITIES);
 124
 125        if (newpri == oldpri)
 126                return;
 127
 128        /*
 129         * If the cpu was currently mapped to a different value, we
 130         * need to map it to the new value then remove the old value.
 131         * Note, we must add the new value first, otherwise we risk the
 132         * cpu being cleared from pri_active, and this cpu could be
 133         * missed for a push or pull.
 134         */
 135        if (likely(newpri != CPUPRI_INVALID)) {
 136                struct cpupri_vec *vec = &cp->pri_to_cpu[newpri];
 137
 138                spin_lock_irqsave(&vec->lock, flags);
 139
 140                cpumask_set_cpu(cpu, vec->mask);
 141                vec->count++;
 142                if (vec->count == 1)
 143                        set_bit(newpri, cp->pri_active);
 144
 145                spin_unlock_irqrestore(&vec->lock, flags);
 146        }
 147        if (likely(oldpri != CPUPRI_INVALID)) {
 148                struct cpupri_vec *vec  = &cp->pri_to_cpu[oldpri];
 149
 150                spin_lock_irqsave(&vec->lock, flags);
 151
 152                vec->count--;
 153                if (!vec->count)
 154                        clear_bit(oldpri, cp->pri_active);
 155                cpumask_clear_cpu(cpu, vec->mask);
 156
 157                spin_unlock_irqrestore(&vec->lock, flags);
 158        }
 159
 160        *currpri = newpri;
 161}
 162
 163/**
 164 * cpupri_init - initialize the cpupri structure
 165 * @cp: The cpupri context
 166 * @bootmem: true if allocations need to use bootmem
 167 *
 168 * Returns: -ENOMEM if memory fails.
 169 */
 170int cpupri_init(struct cpupri *cp, bool bootmem)
 171{
 172        gfp_t gfp = GFP_KERNEL;
 173        int i;
 174
 175        if (bootmem)
 176                gfp = GFP_NOWAIT;
 177
 178        memset(cp, 0, sizeof(*cp));
 179
 180        for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
 181                struct cpupri_vec *vec = &cp->pri_to_cpu[i];
 182
 183                spin_lock_init(&vec->lock);
 184                vec->count = 0;
 185                if (!zalloc_cpumask_var(&vec->mask, gfp))
 186                        goto cleanup;
 187        }
 188
 189        for_each_possible_cpu(i)
 190                cp->cpu_to_pri[i] = CPUPRI_INVALID;
 191        return 0;
 192
 193cleanup:
 194        for (i--; i >= 0; i--)
 195                free_cpumask_var(cp->pri_to_cpu[i].mask);
 196        return -ENOMEM;
 197}
 198
 199/**
 200 * cpupri_cleanup - clean up the cpupri structure
 201 * @cp: The cpupri context
 202 */
 203void cpupri_cleanup(struct cpupri *cp)
 204{
 205        int i;
 206
 207        for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
 208                free_cpumask_var(cp->pri_to_cpu[i].mask);
 209}
 210