1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include "sched_cpupri.h"
31
32
33static int convert_prio(int prio)
34{
35 int cpupri;
36
37 if (prio == CPUPRI_INVALID)
38 cpupri = CPUPRI_INVALID;
39 else if (prio == MAX_PRIO)
40 cpupri = CPUPRI_IDLE;
41 else if (prio >= MAX_RT_PRIO)
42 cpupri = CPUPRI_NORMAL;
43 else
44 cpupri = MAX_RT_PRIO - prio + 1;
45
46 return cpupri;
47}
48
49#define for_each_cpupri_active(array, idx) \
50 for (idx = find_first_bit(array, CPUPRI_NR_PRIORITIES); \
51 idx < CPUPRI_NR_PRIORITIES; \
52 idx = find_next_bit(array, CPUPRI_NR_PRIORITIES, idx+1))
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69int cpupri_find(struct cpupri *cp, struct task_struct *p,
70 struct cpumask *lowest_mask)
71{
72 int idx = 0;
73 int task_pri = convert_prio(p->prio);
74
75 for_each_cpupri_active(cp->pri_active, idx) {
76 struct cpupri_vec *vec = &cp->pri_to_cpu[idx];
77
78 if (idx >= task_pri)
79 break;
80
81 if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
82 continue;
83
84 if (lowest_mask) {
85 cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
86
87
88
89
90
91
92
93
94
95 if (cpumask_any(lowest_mask) >= nr_cpu_ids)
96 continue;
97 }
98
99 return 1;
100 }
101
102 return 0;
103}
104
105
106
107
108
109
110
111
112
113
114
115void cpupri_set(struct cpupri *cp, int cpu, int newpri)
116{
117 int *currpri = &cp->cpu_to_pri[cpu];
118 int oldpri = *currpri;
119 unsigned long flags;
120
121 newpri = convert_prio(newpri);
122
123 BUG_ON(newpri >= CPUPRI_NR_PRIORITIES);
124
125 if (newpri == oldpri)
126 return;
127
128
129
130
131
132
133
134
135 if (likely(newpri != CPUPRI_INVALID)) {
136 struct cpupri_vec *vec = &cp->pri_to_cpu[newpri];
137
138 spin_lock_irqsave(&vec->lock, flags);
139
140 cpumask_set_cpu(cpu, vec->mask);
141 vec->count++;
142 if (vec->count == 1)
143 set_bit(newpri, cp->pri_active);
144
145 spin_unlock_irqrestore(&vec->lock, flags);
146 }
147 if (likely(oldpri != CPUPRI_INVALID)) {
148 struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri];
149
150 spin_lock_irqsave(&vec->lock, flags);
151
152 vec->count--;
153 if (!vec->count)
154 clear_bit(oldpri, cp->pri_active);
155 cpumask_clear_cpu(cpu, vec->mask);
156
157 spin_unlock_irqrestore(&vec->lock, flags);
158 }
159
160 *currpri = newpri;
161}
162
163
164
165
166
167
168
169
170int cpupri_init(struct cpupri *cp, bool bootmem)
171{
172 gfp_t gfp = GFP_KERNEL;
173 int i;
174
175 if (bootmem)
176 gfp = GFP_NOWAIT;
177
178 memset(cp, 0, sizeof(*cp));
179
180 for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
181 struct cpupri_vec *vec = &cp->pri_to_cpu[i];
182
183 spin_lock_init(&vec->lock);
184 vec->count = 0;
185 if (!zalloc_cpumask_var(&vec->mask, gfp))
186 goto cleanup;
187 }
188
189 for_each_possible_cpu(i)
190 cp->cpu_to_pri[i] = CPUPRI_INVALID;
191 return 0;
192
193cleanup:
194 for (i--; i >= 0; i--)
195 free_cpumask_var(cp->pri_to_cpu[i].mask);
196 return -ENOMEM;
197}
198
199
200
201
202
203void cpupri_cleanup(struct cpupri *cp)
204{
205 int i;
206
207 for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
208 free_cpumask_var(cp->pri_to_cpu[i].mask);
209}
210