1
2
3
4
5
6
7
8
9
10
11
12
13
14#ifndef QEMU_KVM_H
15#define QEMU_KVM_H
16
17#include "qemu/queue.h"
18#include "hw/core/cpu.h"
19#include "exec/memattrs.h"
20#include "qemu/accel.h"
21#include "qom/object.h"
22
23#ifdef NEED_CPU_H
24# ifdef CONFIG_KVM
25# include <linux/kvm.h>
26# define CONFIG_KVM_IS_POSSIBLE
27# endif
28#else
29# define CONFIG_KVM_IS_POSSIBLE
30#endif
31
32#ifdef CONFIG_KVM_IS_POSSIBLE
33
34extern bool kvm_allowed;
35extern bool kvm_kernel_irqchip;
36extern bool kvm_split_irqchip;
37extern bool kvm_async_interrupts_allowed;
38extern bool kvm_halt_in_kernel_allowed;
39extern bool kvm_eventfds_allowed;
40extern bool kvm_irqfds_allowed;
41extern bool kvm_resamplefds_allowed;
42extern bool kvm_msi_via_irqfd_allowed;
43extern bool kvm_gsi_routing_allowed;
44extern bool kvm_gsi_direct_mapping;
45extern bool kvm_readonly_mem_allowed;
46extern bool kvm_direct_msi_allowed;
47extern bool kvm_ioeventfd_any_length_allowed;
48extern bool kvm_msi_use_devid;
49
50#define kvm_enabled() (kvm_allowed)
51
52
53
54
55
56
57
58
59
60
61#define kvm_irqchip_in_kernel() (kvm_kernel_irqchip)
62
63
64
65
66
67
68
69
70
71#define kvm_irqchip_is_split() (kvm_split_irqchip)
72
73
74
75
76
77
78
79
80
81#define kvm_async_interrupts_enabled() (kvm_async_interrupts_allowed)
82
83
84
85
86
87
88
89#define kvm_halt_in_kernel() (kvm_halt_in_kernel_allowed)
90
91
92
93
94
95
96
97
98#define kvm_eventfds_enabled() (kvm_eventfds_allowed)
99
100
101
102
103
104
105
106
107#define kvm_irqfds_enabled() (kvm_irqfds_allowed)
108
109
110
111
112
113
114
115
116#define kvm_resamplefds_enabled() (kvm_resamplefds_allowed)
117
118
119
120
121
122
123
124
125#define kvm_msi_via_irqfd_enabled() (kvm_msi_via_irqfd_allowed)
126
127
128
129
130
131
132
133#define kvm_gsi_routing_enabled() (kvm_gsi_routing_allowed)
134
135
136
137
138
139
140#define kvm_gsi_direct_mapping() (kvm_gsi_direct_mapping)
141
142
143
144
145
146
147
148#define kvm_readonly_mem_enabled() (kvm_readonly_mem_allowed)
149
150
151
152
153
154
155#define kvm_direct_msi_enabled() (kvm_direct_msi_allowed)
156
157
158
159
160
161#define kvm_ioeventfd_any_length_enabled() (kvm_ioeventfd_any_length_allowed)
162
163
164
165
166
167
168#define kvm_msi_devid_required() (kvm_msi_use_devid)
169
170#else
171
172#define kvm_enabled() (0)
173#define kvm_irqchip_in_kernel() (false)
174#define kvm_irqchip_is_split() (false)
175#define kvm_async_interrupts_enabled() (false)
176#define kvm_halt_in_kernel() (false)
177#define kvm_eventfds_enabled() (false)
178#define kvm_irqfds_enabled() (false)
179#define kvm_resamplefds_enabled() (false)
180#define kvm_msi_via_irqfd_enabled() (false)
181#define kvm_gsi_routing_allowed() (false)
182#define kvm_gsi_direct_mapping() (false)
183#define kvm_readonly_mem_enabled() (false)
184#define kvm_direct_msi_enabled() (false)
185#define kvm_ioeventfd_any_length_enabled() (false)
186#define kvm_msi_devid_required() (false)
187
188#endif
189
190struct kvm_run;
191struct kvm_lapic_state;
192struct kvm_irq_routing_entry;
193
194typedef struct KVMCapabilityInfo {
195 const char *name;
196 int value;
197} KVMCapabilityInfo;
198
199#define KVM_CAP_INFO(CAP) { "KVM_CAP_" stringify(CAP), KVM_CAP_##CAP }
200#define KVM_CAP_LAST_INFO { NULL, 0 }
201
202struct KVMState;
203
204#define TYPE_KVM_ACCEL ACCEL_CLASS_NAME("kvm")
205typedef struct KVMState KVMState;
206DECLARE_INSTANCE_CHECKER(KVMState, KVM_STATE,
207 TYPE_KVM_ACCEL)
208
209extern KVMState *kvm_state;
210typedef struct Notifier Notifier;
211
212typedef struct KVMRouteChange {
213 KVMState *s;
214 int changes;
215} KVMRouteChange;
216
217
218
219bool kvm_has_free_slot(MachineState *ms);
220bool kvm_has_sync_mmu(void);
221int kvm_has_vcpu_events(void);
222int kvm_has_robust_singlestep(void);
223int kvm_has_debugregs(void);
224int kvm_max_nested_state_length(void);
225int kvm_has_pit_state2(void);
226int kvm_has_many_ioeventfds(void);
227int kvm_has_gsi_routing(void);
228int kvm_has_intx_set_mask(void);
229
230
231
232
233
234
235
236
237
238
239bool kvm_arm_supports_user_irq(void);
240
241
242int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr);
243int kvm_on_sigbus(int code, void *addr);
244
245#ifdef NEED_CPU_H
246#include "cpu.h"
247
248void kvm_flush_coalesced_mmio_buffer(void);
249
250
251
252
253
254
255
256
257
258
259#ifdef KVM_CAP_SET_GUEST_DEBUG
260int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap);
261#else
262static inline int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
263{
264 return -EINVAL;
265}
266#endif
267
268
269
270int kvm_ioctl(KVMState *s, int type, ...);
271
272int kvm_vm_ioctl(KVMState *s, int type, ...);
273
274int kvm_vcpu_ioctl(CPUState *cpu, int type, ...);
275
276
277
278
279
280
281
282
283int kvm_device_ioctl(int fd, int type, ...);
284
285
286
287
288
289
290
291
292
293
294
295int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr);
296
297
298
299
300
301
302
303
304
305
306
307int kvm_device_check_attr(int fd, uint32_t group, uint64_t attr);
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323int kvm_device_access(int fd, int group, uint64_t attr,
324 void *val, bool write, Error **errp);
325
326
327
328
329
330
331
332
333
334
335
336int kvm_create_device(KVMState *s, uint64_t type, bool test);
337
338
339
340
341
342
343
344
345
346bool kvm_device_supported(int vmfd, uint64_t type);
347
348
349
350extern const KVMCapabilityInfo kvm_arch_required_capabilities[];
351
352void kvm_arch_accel_class_init(ObjectClass *oc);
353
354void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run);
355MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run);
356
357int kvm_arch_handle_exit(CPUState *cpu, struct kvm_run *run);
358
359int kvm_arch_process_async_events(CPUState *cpu);
360
361int kvm_arch_get_registers(CPUState *cpu);
362
363
364#define KVM_PUT_RUNTIME_STATE 1
365
366#define KVM_PUT_RESET_STATE 2
367
368#define KVM_PUT_FULL_STATE 3
369
370int kvm_arch_put_registers(CPUState *cpu, int level);
371
372int kvm_arch_init(MachineState *ms, KVMState *s);
373
374int kvm_arch_init_vcpu(CPUState *cpu);
375int kvm_arch_destroy_vcpu(CPUState *cpu);
376
377bool kvm_vcpu_id_is_valid(int vcpu_id);
378
379
380unsigned long kvm_arch_vcpu_id(CPUState *cpu);
381
382#ifdef KVM_HAVE_MCE_INJECTION
383void kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr);
384#endif
385
386void kvm_arch_init_irq_routing(KVMState *s);
387
388int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
389 uint64_t address, uint32_t data, PCIDevice *dev);
390
391
392int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
393 int vector, PCIDevice *dev);
394
395int kvm_arch_release_virq_post(int virq);
396
397int kvm_arch_msi_data_to_gsi(uint32_t data);
398
399int kvm_set_irq(KVMState *s, int irq, int level);
400int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg);
401
402void kvm_irqchip_add_irq_route(KVMState *s, int gsi, int irqchip, int pin);
403
404void kvm_irqchip_add_change_notifier(Notifier *n);
405void kvm_irqchip_remove_change_notifier(Notifier *n);
406void kvm_irqchip_change_notify(void);
407
408void kvm_get_apic_state(DeviceState *d, struct kvm_lapic_state *kapic);
409
410struct kvm_guest_debug;
411struct kvm_debug_exit_arch;
412
413struct kvm_sw_breakpoint {
414 target_ulong pc;
415 target_ulong saved_insn;
416 int use_count;
417 QTAILQ_ENTRY(kvm_sw_breakpoint) entry;
418};
419
420struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
421 target_ulong pc);
422
423int kvm_sw_breakpoints_active(CPUState *cpu);
424
425int kvm_arch_insert_sw_breakpoint(CPUState *cpu,
426 struct kvm_sw_breakpoint *bp);
427int kvm_arch_remove_sw_breakpoint(CPUState *cpu,
428 struct kvm_sw_breakpoint *bp);
429int kvm_arch_insert_hw_breakpoint(target_ulong addr,
430 target_ulong len, int type);
431int kvm_arch_remove_hw_breakpoint(target_ulong addr,
432 target_ulong len, int type);
433void kvm_arch_remove_all_hw_breakpoints(void);
434
435void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg);
436
437bool kvm_arch_stop_on_emulation_error(CPUState *cpu);
438
439int kvm_check_extension(KVMState *s, unsigned int extension);
440
441int kvm_vm_check_extension(KVMState *s, unsigned int extension);
442
443#define kvm_vm_enable_cap(s, capability, cap_flags, ...) \
444 ({ \
445 struct kvm_enable_cap cap = { \
446 .cap = capability, \
447 .flags = cap_flags, \
448 }; \
449 uint64_t args_tmp[] = { __VA_ARGS__ }; \
450 size_t n = MIN(ARRAY_SIZE(args_tmp), ARRAY_SIZE(cap.args)); \
451 memcpy(cap.args, args_tmp, n * sizeof(cap.args[0])); \
452 kvm_vm_ioctl(s, KVM_ENABLE_CAP, &cap); \
453 })
454
455#define kvm_vcpu_enable_cap(cpu, capability, cap_flags, ...) \
456 ({ \
457 struct kvm_enable_cap cap = { \
458 .cap = capability, \
459 .flags = cap_flags, \
460 }; \
461 uint64_t args_tmp[] = { __VA_ARGS__ }; \
462 size_t n = MIN(ARRAY_SIZE(args_tmp), ARRAY_SIZE(cap.args)); \
463 memcpy(cap.args, args_tmp, n * sizeof(cap.args[0])); \
464 kvm_vcpu_ioctl(cpu, KVM_ENABLE_CAP, &cap); \
465 })
466
467uint32_t kvm_arch_get_supported_cpuid(KVMState *env, uint32_t function,
468 uint32_t index, int reg);
469uint64_t kvm_arch_get_supported_msr_feature(KVMState *s, uint32_t index);
470
471
472void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len);
473
474int kvm_physical_memory_addr_from_host(KVMState *s, void *ram_addr,
475 hwaddr *phys_addr);
476
477#endif
478
479void kvm_cpu_synchronize_state(CPUState *cpu);
480
481void kvm_init_cpu_signals(CPUState *cpu);
482
483
484
485
486
487
488
489
490
491
492
493
494int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev);
495int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg,
496 PCIDevice *dev);
497void kvm_irqchip_commit_routes(KVMState *s);
498
499static inline KVMRouteChange kvm_irqchip_begin_route_changes(KVMState *s)
500{
501 return (KVMRouteChange) { .s = s, .changes = 0 };
502}
503
504static inline void kvm_irqchip_commit_route_changes(KVMRouteChange *c)
505{
506 if (c->changes) {
507 kvm_irqchip_commit_routes(c->s);
508 c->changes = 0;
509 }
510}
511
512void kvm_irqchip_release_virq(KVMState *s, int virq);
513
514int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter);
515int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint);
516
517int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
518 EventNotifier *rn, int virq);
519int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
520 int virq);
521int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,
522 EventNotifier *rn, qemu_irq irq);
523int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n,
524 qemu_irq irq);
525void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi);
526void kvm_pc_setup_irq_routing(bool pci_enabled);
527void kvm_init_irq_routing(KVMState *s);
528
529bool kvm_kernel_irqchip_allowed(void);
530bool kvm_kernel_irqchip_required(void);
531bool kvm_kernel_irqchip_split(void);
532
533
534
535
536
537
538
539
540
541
542
543int kvm_arch_irqchip_create(KVMState *s);
544
545
546
547
548
549
550
551
552
553int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source);
554
555
556
557
558
559
560
561
562
563int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target);
564struct ppc_radix_page_info *kvm_get_radix_page_info(void);
565int kvm_get_max_memslots(void);
566
567
568void kvm_resample_fd_notify(int gsi);
569
570
571
572
573
574
575
576bool kvm_cpu_check_are_resettable(void);
577
578bool kvm_arch_cpu_check_are_resettable(void);
579
580bool kvm_dirty_ring_enabled(void);
581
582uint32_t kvm_dirty_ring_size(void);
583#endif
584