linux/kernel/sched/cpupri.c
<<
>>
Prefs
   1/*
   2 *  kernel/sched/cpupri.c
   3 *
   4 *  CPU priority management
   5 *
   6 *  Copyright (C) 2007-2008 Novell
   7 *
   8 *  Author: Gregory Haskins <ghaskins@novell.com>
   9 *
  10 *  This code tracks the priority of each CPU so that global migration
  11 *  decisions are easy to calculate.  Each CPU can be in a state as follows:
  12 *
  13 *                 (INVALID), IDLE, NORMAL, RT1, ... RT99
  14 *
  15 *  going from the lowest priority to the highest.  CPUs in the INVALID state
  16 *  are not eligible for routing.  The system maintains this state with
  17 *  a 2 dimensional bitmap (the first for priority class, the second for cpus
  18 *  in that class).  Therefore a typical application without affinity
  19 *  restrictions can find a suitable CPU with O(1) complexity (e.g. two bit
  20 *  searches).  For tasks with affinity restrictions, the algorithm has a
  21 *  worst case complexity of O(min(102, nr_domcpus)), though the scenario that
  22 *  yields the worst case search is fairly contrived.
  23 *
  24 *  This program is free software; you can redistribute it and/or
  25 *  modify it under the terms of the GNU General Public License
  26 *  as published by the Free Software Foundation; version 2
  27 *  of the License.
  28 */
  29
  30#include <linux/gfp.h>
  31#include <linux/sched.h>
  32#include <linux/sched/rt.h>
  33#include "cpupri.h"
  34
  35/* Convert between a 140 based task->prio, and our 102 based cpupri */
  36static int convert_prio(int prio)
  37{
  38        int cpupri;
  39
  40        if (prio == CPUPRI_INVALID)
  41                cpupri = CPUPRI_INVALID;
  42        else if (prio == MAX_PRIO)
  43                cpupri = CPUPRI_IDLE;
  44        else if (prio >= MAX_RT_PRIO)
  45                cpupri = CPUPRI_NORMAL;
  46        else
  47                cpupri = MAX_RT_PRIO - prio + 1;
  48
  49        return cpupri;
  50}
  51
  52/**
  53 * cpupri_find - find the best (lowest-pri) CPU in the system
  54 * @cp: The cpupri context
  55 * @p: The task
  56 * @lowest_mask: A mask to fill in with selected CPUs (or NULL)
  57 *
  58 * Note: This function returns the recommended CPUs as calculated during the
  59 * current invocation.  By the time the call returns, the CPUs may have in
  60 * fact changed priorities any number of times.  While not ideal, it is not
  61 * an issue of correctness since the normal rebalancer logic will correct
  62 * any discrepancies created by racing against the uncertainty of the current
  63 * priority configuration.
  64 *
  65 * Returns: (int)bool - CPUs were found
  66 */
  67int cpupri_find(struct cpupri *cp, struct task_struct *p,
  68                struct cpumask *lowest_mask)
  69{
  70        int idx = 0;
  71        int task_pri = convert_prio(p->prio);
  72
  73        if (task_pri >= MAX_RT_PRIO)
  74                return 0;
  75
  76        for (idx = 0; idx < task_pri; idx++) {
  77                struct cpupri_vec *vec  = &cp->pri_to_cpu[idx];
  78                int skip = 0;
  79
  80                if (!atomic_read(&(vec)->count))
  81                        skip = 1;
  82                /*
  83                 * When looking at the vector, we need to read the counter,
  84                 * do a memory barrier, then read the mask.
  85                 *
  86                 * Note: This is still all racey, but we can deal with it.
  87                 *  Ideally, we only want to look at masks that are set.
  88                 *
  89                 *  If a mask is not set, then the only thing wrong is that we
  90                 *  did a little more work than necessary.
  91                 *
  92                 *  If we read a zero count but the mask is set, because of the
  93                 *  memory barriers, that can only happen when the highest prio
  94                 *  task for a run queue has left the run queue, in which case,
  95                 *  it will be followed by a pull. If the task we are processing
  96                 *  fails to find a proper place to go, that pull request will
  97                 *  pull this task if the run queue is running at a lower
  98                 *  priority.
  99                 */
 100                smp_rmb();
 101
 102                /* Need to do the rmb for every iteration */
 103                if (skip)
 104                        continue;
 105
 106                if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
 107                        continue;
 108
 109                if (lowest_mask) {
 110                        cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
 111
 112                        /*
 113                         * We have to ensure that we have at least one bit
 114                         * still set in the array, since the map could have
 115                         * been concurrently emptied between the first and
 116                         * second reads of vec->mask.  If we hit this
 117                         * condition, simply act as though we never hit this
 118                         * priority level and continue on.
 119                         */
 120                        if (cpumask_any(lowest_mask) >= nr_cpu_ids)
 121                                continue;
 122                }
 123
 124                return 1;
 125        }
 126
 127        return 0;
 128}
 129
 130/**
 131 * cpupri_set - update the cpu priority setting
 132 * @cp: The cpupri context
 133 * @cpu: The target cpu
 134 * @newpri: The priority (INVALID-RT99) to assign to this CPU
 135 *
 136 * Note: Assumes cpu_rq(cpu)->lock is locked
 137 *
 138 * Returns: (void)
 139 */
 140void cpupri_set(struct cpupri *cp, int cpu, int newpri)
 141{
 142        int *currpri = &cp->cpu_to_pri[cpu];
 143        int oldpri = *currpri;
 144        int do_mb = 0;
 145
 146        newpri = convert_prio(newpri);
 147
 148        BUG_ON(newpri >= CPUPRI_NR_PRIORITIES);
 149
 150        if (newpri == oldpri)
 151                return;
 152
 153        /*
 154         * If the cpu was currently mapped to a different value, we
 155         * need to map it to the new value then remove the old value.
 156         * Note, we must add the new value first, otherwise we risk the
 157         * cpu being missed by the priority loop in cpupri_find.
 158         */
 159        if (likely(newpri != CPUPRI_INVALID)) {
 160                struct cpupri_vec *vec = &cp->pri_to_cpu[newpri];
 161
 162                cpumask_set_cpu(cpu, vec->mask);
 163                /*
 164                 * When adding a new vector, we update the mask first,
 165                 * do a write memory barrier, and then update the count, to
 166                 * make sure the vector is visible when count is set.
 167                 */
 168                smp_mb__before_atomic_inc();
 169                atomic_inc(&(vec)->count);
 170                do_mb = 1;
 171        }
 172        if (likely(oldpri != CPUPRI_INVALID)) {
 173                struct cpupri_vec *vec  = &cp->pri_to_cpu[oldpri];
 174
 175                /*
 176                 * Because the order of modification of the vec->count
 177                 * is important, we must make sure that the update
 178                 * of the new prio is seen before we decrement the
 179                 * old prio. This makes sure that the loop sees
 180                 * one or the other when we raise the priority of
 181                 * the run queue. We don't care about when we lower the
 182                 * priority, as that will trigger an rt pull anyway.
 183                 *
 184                 * We only need to do a memory barrier if we updated
 185                 * the new priority vec.
 186                 */
 187                if (do_mb)
 188                        smp_mb__after_atomic_inc();
 189
 190                /*
 191                 * When removing from the vector, we decrement the counter first
 192                 * do a memory barrier and then clear the mask.
 193                 */
 194                atomic_dec(&(vec)->count);
 195                smp_mb__after_atomic_inc();
 196                cpumask_clear_cpu(cpu, vec->mask);
 197        }
 198
 199        *currpri = newpri;
 200}
 201
 202/**
 203 * cpupri_init - initialize the cpupri structure
 204 * @cp: The cpupri context
 205 *
 206 * Returns: -ENOMEM if memory fails.
 207 */
 208int cpupri_init(struct cpupri *cp)
 209{
 210        int i;
 211
 212        memset(cp, 0, sizeof(*cp));
 213
 214        for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
 215                struct cpupri_vec *vec = &cp->pri_to_cpu[i];
 216
 217                atomic_set(&vec->count, 0);
 218                if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL))
 219                        goto cleanup;
 220        }
 221
 222        for_each_possible_cpu(i)
 223                cp->cpu_to_pri[i] = CPUPRI_INVALID;
 224        return 0;
 225
 226cleanup:
 227        for (i--; i >= 0; i--)
 228                free_cpumask_var(cp->pri_to_cpu[i].mask);
 229        return -ENOMEM;
 230}
 231
 232/**
 233 * cpupri_cleanup - clean up the cpupri structure
 234 * @cp: The cpupri context
 235 */
 236void cpupri_cleanup(struct cpupri *cp)
 237{
 238        int i;
 239
 240        for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
 241                free_cpumask_var(cp->pri_to_cpu[i].mask);
 242}
 243