1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "qemu/main-loop.h"
22#include "exec/cpu-common.h"
23#include "qom/cpu.h"
24#include "sysemu/cpus.h"
25
26static QemuMutex qemu_cpu_list_lock;
27static QemuCond exclusive_cond;
28static QemuCond exclusive_resume;
29static QemuCond qemu_work_cond;
30
31
32
33
34static int pending_cpus;
35
36void qemu_init_cpu_list(void)
37{
38
39
40 pending_cpus = 0;
41
42 qemu_mutex_init(&qemu_cpu_list_lock);
43 qemu_cond_init(&exclusive_cond);
44 qemu_cond_init(&exclusive_resume);
45 qemu_cond_init(&qemu_work_cond);
46}
47
48void cpu_list_lock(void)
49{
50 qemu_mutex_lock(&qemu_cpu_list_lock);
51}
52
53void cpu_list_unlock(void)
54{
55 qemu_mutex_unlock(&qemu_cpu_list_lock);
56}
57
58static bool cpu_index_auto_assigned;
59
60static int cpu_get_free_index(void)
61{
62 CPUState *some_cpu;
63 int cpu_index = 0;
64
65 cpu_index_auto_assigned = true;
66 CPU_FOREACH(some_cpu) {
67 cpu_index++;
68 }
69 return cpu_index;
70}
71
72static void finish_safe_work(CPUState *cpu)
73{
74 cpu_exec_start(cpu);
75 cpu_exec_end(cpu);
76}
77
78void cpu_list_add(CPUState *cpu)
79{
80 qemu_mutex_lock(&qemu_cpu_list_lock);
81 if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) {
82 cpu->cpu_index = cpu_get_free_index();
83 assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX);
84 } else {
85 assert(!cpu_index_auto_assigned);
86 }
87 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
88 qemu_mutex_unlock(&qemu_cpu_list_lock);
89
90 finish_safe_work(cpu);
91}
92
93void cpu_list_remove(CPUState *cpu)
94{
95 qemu_mutex_lock(&qemu_cpu_list_lock);
96 if (!QTAILQ_IN_USE(cpu, node)) {
97
98 qemu_mutex_unlock(&qemu_cpu_list_lock);
99 return;
100 }
101
102 assert(!(cpu_index_auto_assigned && cpu != QTAILQ_LAST(&cpus, CPUTailQ)));
103
104 QTAILQ_REMOVE(&cpus, cpu, node);
105 cpu->cpu_index = UNASSIGNED_CPU_INDEX;
106 qemu_mutex_unlock(&qemu_cpu_list_lock);
107}
108
109struct qemu_work_item {
110 struct qemu_work_item *next;
111 run_on_cpu_func func;
112 run_on_cpu_data data;
113 bool free, exclusive, done;
114};
115
116static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi)
117{
118 qemu_mutex_lock(&cpu->work_mutex);
119 if (cpu->queued_work_first == NULL) {
120 cpu->queued_work_first = wi;
121 } else {
122 cpu->queued_work_last->next = wi;
123 }
124 cpu->queued_work_last = wi;
125 wi->next = NULL;
126 wi->done = false;
127 qemu_mutex_unlock(&cpu->work_mutex);
128
129 qemu_cpu_kick(cpu);
130}
131
132void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,
133 QemuMutex *mutex)
134{
135 struct qemu_work_item wi;
136
137 if (qemu_cpu_is_self(cpu)) {
138 func(cpu, data);
139 return;
140 }
141
142 wi.func = func;
143 wi.data = data;
144 wi.done = false;
145 wi.free = false;
146 wi.exclusive = false;
147
148 queue_work_on_cpu(cpu, &wi);
149 while (!atomic_mb_read(&wi.done)) {
150 CPUState *self_cpu = current_cpu;
151
152 qemu_cond_wait(&qemu_work_cond, mutex);
153 current_cpu = self_cpu;
154 }
155}
156
157void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
158{
159 struct qemu_work_item *wi;
160
161 wi = g_malloc0(sizeof(struct qemu_work_item));
162 wi->func = func;
163 wi->data = data;
164 wi->free = true;
165
166 queue_work_on_cpu(cpu, wi);
167}
168
169
170
171static inline void exclusive_idle(void)
172{
173 while (pending_cpus) {
174 qemu_cond_wait(&exclusive_resume, &qemu_cpu_list_lock);
175 }
176}
177
178
179
180void start_exclusive(void)
181{
182 CPUState *other_cpu;
183 int running_cpus;
184
185 qemu_mutex_lock(&qemu_cpu_list_lock);
186 exclusive_idle();
187
188
189 atomic_set(&pending_cpus, 1);
190
191
192 smp_mb();
193 running_cpus = 0;
194 CPU_FOREACH(other_cpu) {
195 if (atomic_read(&other_cpu->running)) {
196 other_cpu->has_waiter = true;
197 running_cpus++;
198 qemu_cpu_kick(other_cpu);
199 }
200 }
201
202 atomic_set(&pending_cpus, running_cpus + 1);
203 while (pending_cpus > 1) {
204 qemu_cond_wait(&exclusive_cond, &qemu_cpu_list_lock);
205 }
206
207
208
209
210 qemu_mutex_unlock(&qemu_cpu_list_lock);
211}
212
213
214void end_exclusive(void)
215{
216 qemu_mutex_lock(&qemu_cpu_list_lock);
217 atomic_set(&pending_cpus, 0);
218 qemu_cond_broadcast(&exclusive_resume);
219 qemu_mutex_unlock(&qemu_cpu_list_lock);
220}
221
222
223void cpu_exec_start(CPUState *cpu)
224{
225 atomic_set(&cpu->running, true);
226
227
228 smp_mb();
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243 if (unlikely(atomic_read(&pending_cpus))) {
244 qemu_mutex_lock(&qemu_cpu_list_lock);
245 if (!cpu->has_waiter) {
246
247
248
249
250 atomic_set(&cpu->running, false);
251 exclusive_idle();
252
253 atomic_set(&cpu->running, true);
254 } else {
255
256
257
258 }
259 qemu_mutex_unlock(&qemu_cpu_list_lock);
260 }
261}
262
263
264void cpu_exec_end(CPUState *cpu)
265{
266 atomic_set(&cpu->running, false);
267
268
269 smp_mb();
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286 if (unlikely(atomic_read(&pending_cpus))) {
287 qemu_mutex_lock(&qemu_cpu_list_lock);
288 if (cpu->has_waiter) {
289 cpu->has_waiter = false;
290 atomic_set(&pending_cpus, pending_cpus - 1);
291 if (pending_cpus == 1) {
292 qemu_cond_signal(&exclusive_cond);
293 }
294 }
295 qemu_mutex_unlock(&qemu_cpu_list_lock);
296 }
297}
298
299void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func,
300 run_on_cpu_data data)
301{
302 struct qemu_work_item *wi;
303
304 wi = g_malloc0(sizeof(struct qemu_work_item));
305 wi->func = func;
306 wi->data = data;
307 wi->free = true;
308 wi->exclusive = true;
309
310 queue_work_on_cpu(cpu, wi);
311}
312
313void process_queued_cpu_work(CPUState *cpu)
314{
315 struct qemu_work_item *wi;
316
317 if (cpu->queued_work_first == NULL) {
318 return;
319 }
320
321 qemu_mutex_lock(&cpu->work_mutex);
322 while (cpu->queued_work_first != NULL) {
323 wi = cpu->queued_work_first;
324 cpu->queued_work_first = wi->next;
325 if (!cpu->queued_work_first) {
326 cpu->queued_work_last = NULL;
327 }
328 qemu_mutex_unlock(&cpu->work_mutex);
329 if (wi->exclusive) {
330
331
332
333
334
335
336 qemu_mutex_unlock_iothread();
337 start_exclusive();
338 wi->func(cpu, wi->data);
339 end_exclusive();
340 qemu_mutex_lock_iothread();
341 } else {
342 wi->func(cpu, wi->data);
343 }
344 qemu_mutex_lock(&cpu->work_mutex);
345 if (wi->free) {
346 g_free(wi);
347 } else {
348 atomic_mb_set(&wi->done, true);
349 }
350 }
351 qemu_mutex_unlock(&cpu->work_mutex);
352 qemu_cond_broadcast(&qemu_work_cond);
353}
354