1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include "qemu/osdep.h"
24#include "qemu/error-report.h"
25#include "qemu/option.h"
26#include "qemu/cutils.h"
27#include "qemu/units.h"
28#include "qemu/datadir.h"
29#include "qapi/error.h"
30#include "qapi/qmp/qerror.h"
31#include "qapi/qapi-visit-common.h"
32#include "qapi/clone-visitor.h"
33#include "qapi/qapi-visit-machine.h"
34#include "qapi/visitor.h"
35#include "sysemu/qtest.h"
36#include "sysemu/whpx.h"
37#include "sysemu/numa.h"
38#include "sysemu/replay.h"
39#include "sysemu/sysemu.h"
40#include "sysemu/cpu-timers.h"
41#include "sysemu/xen.h"
42#include "trace.h"
43
44#include "hw/i386/x86.h"
45#include "target/i386/cpu.h"
46#include "hw/i386/topology.h"
47#include "hw/i386/fw_cfg.h"
48#include "hw/intc/i8259.h"
49#include "hw/rtc/mc146818rtc.h"
50#include "target/i386/sev.h"
51
52#include "hw/acpi/cpu_hotplug.h"
53#include "hw/irq.h"
54#include "hw/nmi.h"
55#include "hw/loader.h"
56#include "multiboot.h"
57#include "elf.h"
58#include "standard-headers/asm-x86/bootparam.h"
59#include CONFIG_DEVICES
60#include "kvm/kvm_i386.h"
61
62
63static size_t pvh_start_addr;
64
65inline void init_topo_info(X86CPUTopoInfo *topo_info,
66 const X86MachineState *x86ms)
67{
68 MachineState *ms = MACHINE(x86ms);
69
70 topo_info->dies_per_pkg = ms->smp.dies;
71 topo_info->cores_per_die = ms->smp.cores;
72 topo_info->threads_per_core = ms->smp.threads;
73}
74
75
76
77
78
79
80
81
82
83uint32_t x86_cpu_apic_id_from_index(X86MachineState *x86ms,
84 unsigned int cpu_index)
85{
86 X86CPUTopoInfo topo_info;
87
88 init_topo_info(&topo_info, x86ms);
89
90 return x86_apicid_from_cpu_idx(&topo_info, cpu_index);
91}
92
93
94void x86_cpu_new(X86MachineState *x86ms, int64_t apic_id, Error **errp)
95{
96 Object *cpu = object_new(MACHINE(x86ms)->cpu_type);
97
98 if (!object_property_set_uint(cpu, "apic-id", apic_id, errp)) {
99 goto out;
100 }
101 qdev_realize(DEVICE(cpu), NULL, errp);
102
103out:
104 object_unref(cpu);
105}
106
107void x86_cpus_init(X86MachineState *x86ms, int default_cpu_version)
108{
109 int i;
110 const CPUArchIdList *possible_cpus;
111 MachineState *ms = MACHINE(x86ms);
112 MachineClass *mc = MACHINE_GET_CLASS(x86ms);
113
114 x86_cpu_set_default_version(default_cpu_version);
115
116
117
118
119
120
121
122
123
124 x86ms->apic_id_limit = x86_cpu_apic_id_from_index(x86ms,
125 ms->smp.max_cpus - 1) + 1;
126
127
128
129
130
131
132
133
134 if (x86ms->apic_id_limit > 255 && !xen_enabled() &&
135 (!kvm_irqchip_in_kernel() || !kvm_enable_x2apic())) {
136 error_report("current -smp configuration requires kernel "
137 "irqchip and X2APIC API support.");
138 exit(EXIT_FAILURE);
139 }
140
141 if (kvm_enabled()) {
142 kvm_set_max_apic_id(x86ms->apic_id_limit);
143 }
144
145 possible_cpus = mc->possible_cpu_arch_ids(ms);
146 for (i = 0; i < ms->smp.cpus; i++) {
147 x86_cpu_new(x86ms, possible_cpus->cpus[i].arch_id, &error_fatal);
148 }
149}
150
151void x86_rtc_set_cpus_count(ISADevice *rtc, uint16_t cpus_count)
152{
153 if (cpus_count > 0xff) {
154
155
156
157
158
159 rtc_set_memory(rtc, 0x5f, 0);
160 } else {
161 rtc_set_memory(rtc, 0x5f, cpus_count - 1);
162 }
163}
164
165static int x86_apic_cmp(const void *a, const void *b)
166{
167 CPUArchId *apic_a = (CPUArchId *)a;
168 CPUArchId *apic_b = (CPUArchId *)b;
169
170 return apic_a->arch_id - apic_b->arch_id;
171}
172
173
174
175
176
177
178CPUArchId *x86_find_cpu_slot(MachineState *ms, uint32_t id, int *idx)
179{
180 CPUArchId apic_id, *found_cpu;
181
182 apic_id.arch_id = id;
183 found_cpu = bsearch(&apic_id, ms->possible_cpus->cpus,
184 ms->possible_cpus->len, sizeof(*ms->possible_cpus->cpus),
185 x86_apic_cmp);
186 if (found_cpu && idx) {
187 *idx = found_cpu - ms->possible_cpus->cpus;
188 }
189 return found_cpu;
190}
191
192void x86_cpu_plug(HotplugHandler *hotplug_dev,
193 DeviceState *dev, Error **errp)
194{
195 CPUArchId *found_cpu;
196 Error *local_err = NULL;
197 X86CPU *cpu = X86_CPU(dev);
198 X86MachineState *x86ms = X86_MACHINE(hotplug_dev);
199
200 if (x86ms->acpi_dev) {
201 hotplug_handler_plug(x86ms->acpi_dev, dev, &local_err);
202 if (local_err) {
203 goto out;
204 }
205 }
206
207
208 x86ms->boot_cpus++;
209 if (x86ms->rtc) {
210 x86_rtc_set_cpus_count(x86ms->rtc, x86ms->boot_cpus);
211 }
212 if (x86ms->fw_cfg) {
213 fw_cfg_modify_i16(x86ms->fw_cfg, FW_CFG_NB_CPUS, x86ms->boot_cpus);
214 }
215
216 found_cpu = x86_find_cpu_slot(MACHINE(x86ms), cpu->apic_id, NULL);
217 found_cpu->cpu = OBJECT(dev);
218out:
219 error_propagate(errp, local_err);
220}
221
222void x86_cpu_unplug_request_cb(HotplugHandler *hotplug_dev,
223 DeviceState *dev, Error **errp)
224{
225 int idx = -1;
226 X86CPU *cpu = X86_CPU(dev);
227 X86MachineState *x86ms = X86_MACHINE(hotplug_dev);
228
229 if (!x86ms->acpi_dev) {
230 error_setg(errp, "CPU hot unplug not supported without ACPI");
231 return;
232 }
233
234 x86_find_cpu_slot(MACHINE(x86ms), cpu->apic_id, &idx);
235 assert(idx != -1);
236 if (idx == 0) {
237 error_setg(errp, "Boot CPU is unpluggable");
238 return;
239 }
240
241 hotplug_handler_unplug_request(x86ms->acpi_dev, dev,
242 errp);
243}
244
245void x86_cpu_unplug_cb(HotplugHandler *hotplug_dev,
246 DeviceState *dev, Error **errp)
247{
248 CPUArchId *found_cpu;
249 Error *local_err = NULL;
250 X86CPU *cpu = X86_CPU(dev);
251 X86MachineState *x86ms = X86_MACHINE(hotplug_dev);
252
253 hotplug_handler_unplug(x86ms->acpi_dev, dev, &local_err);
254 if (local_err) {
255 goto out;
256 }
257
258 found_cpu = x86_find_cpu_slot(MACHINE(x86ms), cpu->apic_id, NULL);
259 found_cpu->cpu = NULL;
260 qdev_unrealize(dev);
261
262
263 x86ms->boot_cpus--;
264
265 x86_rtc_set_cpus_count(x86ms->rtc, x86ms->boot_cpus);
266 fw_cfg_modify_i16(x86ms->fw_cfg, FW_CFG_NB_CPUS, x86ms->boot_cpus);
267 out:
268 error_propagate(errp, local_err);
269}
270
271void x86_cpu_pre_plug(HotplugHandler *hotplug_dev,
272 DeviceState *dev, Error **errp)
273{
274 int idx;
275 CPUState *cs;
276 CPUArchId *cpu_slot;
277 X86CPUTopoIDs topo_ids;
278 X86CPU *cpu = X86_CPU(dev);
279 CPUX86State *env = &cpu->env;
280 MachineState *ms = MACHINE(hotplug_dev);
281 X86MachineState *x86ms = X86_MACHINE(hotplug_dev);
282 unsigned int smp_cores = ms->smp.cores;
283 unsigned int smp_threads = ms->smp.threads;
284 X86CPUTopoInfo topo_info;
285
286 if (!object_dynamic_cast(OBJECT(cpu), ms->cpu_type)) {
287 error_setg(errp, "Invalid CPU type, expected cpu type: '%s'",
288 ms->cpu_type);
289 return;
290 }
291
292 if (x86ms->acpi_dev) {
293 Error *local_err = NULL;
294
295 hotplug_handler_pre_plug(HOTPLUG_HANDLER(x86ms->acpi_dev), dev,
296 &local_err);
297 if (local_err) {
298 error_propagate(errp, local_err);
299 return;
300 }
301 }
302
303 init_topo_info(&topo_info, x86ms);
304
305 env->nr_dies = ms->smp.dies;
306
307
308
309
310
311 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
312 int max_socket = (ms->smp.max_cpus - 1) /
313 smp_threads / smp_cores / ms->smp.dies;
314
315
316
317
318
319 if (cpu->die_id < 0 && ms->smp.dies == 1) {
320 cpu->die_id = 0;
321 }
322
323 if (cpu->socket_id < 0) {
324 error_setg(errp, "CPU socket-id is not set");
325 return;
326 } else if (cpu->socket_id > max_socket) {
327 error_setg(errp, "Invalid CPU socket-id: %u must be in range 0:%u",
328 cpu->socket_id, max_socket);
329 return;
330 }
331 if (cpu->die_id < 0) {
332 error_setg(errp, "CPU die-id is not set");
333 return;
334 } else if (cpu->die_id > ms->smp.dies - 1) {
335 error_setg(errp, "Invalid CPU die-id: %u must be in range 0:%u",
336 cpu->die_id, ms->smp.dies - 1);
337 return;
338 }
339 if (cpu->core_id < 0) {
340 error_setg(errp, "CPU core-id is not set");
341 return;
342 } else if (cpu->core_id > (smp_cores - 1)) {
343 error_setg(errp, "Invalid CPU core-id: %u must be in range 0:%u",
344 cpu->core_id, smp_cores - 1);
345 return;
346 }
347 if (cpu->thread_id < 0) {
348 error_setg(errp, "CPU thread-id is not set");
349 return;
350 } else if (cpu->thread_id > (smp_threads - 1)) {
351 error_setg(errp, "Invalid CPU thread-id: %u must be in range 0:%u",
352 cpu->thread_id, smp_threads - 1);
353 return;
354 }
355
356 topo_ids.pkg_id = cpu->socket_id;
357 topo_ids.die_id = cpu->die_id;
358 topo_ids.core_id = cpu->core_id;
359 topo_ids.smt_id = cpu->thread_id;
360 cpu->apic_id = x86_apicid_from_topo_ids(&topo_info, &topo_ids);
361 }
362
363 cpu_slot = x86_find_cpu_slot(MACHINE(x86ms), cpu->apic_id, &idx);
364 if (!cpu_slot) {
365 MachineState *ms = MACHINE(x86ms);
366
367 x86_topo_ids_from_apicid(cpu->apic_id, &topo_info, &topo_ids);
368 error_setg(errp,
369 "Invalid CPU [socket: %u, die: %u, core: %u, thread: %u] with"
370 " APIC ID %" PRIu32 ", valid index range 0:%d",
371 topo_ids.pkg_id, topo_ids.die_id, topo_ids.core_id, topo_ids.smt_id,
372 cpu->apic_id, ms->possible_cpus->len - 1);
373 return;
374 }
375
376 if (cpu_slot->cpu) {
377 error_setg(errp, "CPU[%d] with APIC ID %" PRIu32 " exists",
378 idx, cpu->apic_id);
379 return;
380 }
381
382
383
384
385
386
387
388 x86_topo_ids_from_apicid(cpu->apic_id, &topo_info, &topo_ids);
389 if (cpu->socket_id != -1 && cpu->socket_id != topo_ids.pkg_id) {
390 error_setg(errp, "property socket-id: %u doesn't match set apic-id:"
391 " 0x%x (socket-id: %u)", cpu->socket_id, cpu->apic_id,
392 topo_ids.pkg_id);
393 return;
394 }
395 cpu->socket_id = topo_ids.pkg_id;
396
397 if (cpu->die_id != -1 && cpu->die_id != topo_ids.die_id) {
398 error_setg(errp, "property die-id: %u doesn't match set apic-id:"
399 " 0x%x (die-id: %u)", cpu->die_id, cpu->apic_id, topo_ids.die_id);
400 return;
401 }
402 cpu->die_id = topo_ids.die_id;
403
404 if (cpu->core_id != -1 && cpu->core_id != topo_ids.core_id) {
405 error_setg(errp, "property core-id: %u doesn't match set apic-id:"
406 " 0x%x (core-id: %u)", cpu->core_id, cpu->apic_id,
407 topo_ids.core_id);
408 return;
409 }
410 cpu->core_id = topo_ids.core_id;
411
412 if (cpu->thread_id != -1 && cpu->thread_id != topo_ids.smt_id) {
413 error_setg(errp, "property thread-id: %u doesn't match set apic-id:"
414 " 0x%x (thread-id: %u)", cpu->thread_id, cpu->apic_id,
415 topo_ids.smt_id);
416 return;
417 }
418 cpu->thread_id = topo_ids.smt_id;
419
420 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX) &&
421 !kvm_hv_vpindex_settable()) {
422 error_setg(errp, "kernel doesn't allow setting HyperV VP_INDEX");
423 return;
424 }
425
426 cs = CPU(cpu);
427 cs->cpu_index = idx;
428
429 numa_cpu_pre_plug(cpu_slot, dev, errp);
430}
431
432CpuInstanceProperties
433x86_cpu_index_to_props(MachineState *ms, unsigned cpu_index)
434{
435 MachineClass *mc = MACHINE_GET_CLASS(ms);
436 const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms);
437
438 assert(cpu_index < possible_cpus->len);
439 return possible_cpus->cpus[cpu_index].props;
440}
441
442int64_t x86_get_default_cpu_node_id(const MachineState *ms, int idx)
443{
444 X86CPUTopoIDs topo_ids;
445 X86MachineState *x86ms = X86_MACHINE(ms);
446 X86CPUTopoInfo topo_info;
447
448 init_topo_info(&topo_info, x86ms);
449
450 assert(idx < ms->possible_cpus->len);
451 x86_topo_ids_from_apicid(ms->possible_cpus->cpus[idx].arch_id,
452 &topo_info, &topo_ids);
453 return topo_ids.pkg_id % ms->numa_state->num_nodes;
454}
455
456const CPUArchIdList *x86_possible_cpu_arch_ids(MachineState *ms)
457{
458 X86MachineState *x86ms = X86_MACHINE(ms);
459 unsigned int max_cpus = ms->smp.max_cpus;
460 X86CPUTopoInfo topo_info;
461 int i;
462
463 if (ms->possible_cpus) {
464
465
466
467
468 assert(ms->possible_cpus->len == max_cpus);
469 return ms->possible_cpus;
470 }
471
472 ms->possible_cpus = g_malloc0(sizeof(CPUArchIdList) +
473 sizeof(CPUArchId) * max_cpus);
474 ms->possible_cpus->len = max_cpus;
475
476 init_topo_info(&topo_info, x86ms);
477
478 for (i = 0; i < ms->possible_cpus->len; i++) {
479 X86CPUTopoIDs topo_ids;
480
481 ms->possible_cpus->cpus[i].type = ms->cpu_type;
482 ms->possible_cpus->cpus[i].vcpus_count = 1;
483 ms->possible_cpus->cpus[i].arch_id =
484 x86_cpu_apic_id_from_index(x86ms, i);
485 x86_topo_ids_from_apicid(ms->possible_cpus->cpus[i].arch_id,
486 &topo_info, &topo_ids);
487 ms->possible_cpus->cpus[i].props.has_socket_id = true;
488 ms->possible_cpus->cpus[i].props.socket_id = topo_ids.pkg_id;
489 if (ms->smp.dies > 1) {
490 ms->possible_cpus->cpus[i].props.has_die_id = true;
491 ms->possible_cpus->cpus[i].props.die_id = topo_ids.die_id;
492 }
493 ms->possible_cpus->cpus[i].props.has_core_id = true;
494 ms->possible_cpus->cpus[i].props.core_id = topo_ids.core_id;
495 ms->possible_cpus->cpus[i].props.has_thread_id = true;
496 ms->possible_cpus->cpus[i].props.thread_id = topo_ids.smt_id;
497 }
498 return ms->possible_cpus;
499}
500
501static void x86_nmi(NMIState *n, int cpu_index, Error **errp)
502{
503
504 CPUState *cs;
505
506 CPU_FOREACH(cs) {
507 X86CPU *cpu = X86_CPU(cs);
508
509 if (!cpu->apic_state) {
510 cpu_interrupt(cs, CPU_INTERRUPT_NMI);
511 } else {
512 apic_deliver_nmi(cpu->apic_state);
513 }
514 }
515}
516
517static long get_file_size(FILE *f)
518{
519 long where, size;
520
521
522
523 where = ftell(f);
524 fseek(f, 0, SEEK_END);
525 size = ftell(f);
526 fseek(f, where, SEEK_SET);
527
528 return size;
529}
530
531
532uint64_t cpu_get_tsc(CPUX86State *env)
533{
534 return cpus_get_elapsed_ticks();
535}
536
537
538static void pic_irq_request(void *opaque, int irq, int level)
539{
540 CPUState *cs = first_cpu;
541 X86CPU *cpu = X86_CPU(cs);
542
543 trace_x86_pic_interrupt(irq, level);
544 if (cpu->apic_state && !kvm_irqchip_in_kernel() &&
545 !whpx_apic_in_platform()) {
546 CPU_FOREACH(cs) {
547 cpu = X86_CPU(cs);
548 if (apic_accept_pic_intr(cpu->apic_state)) {
549 apic_deliver_pic_intr(cpu->apic_state, level);
550 }
551 }
552 } else {
553 if (level) {
554 cpu_interrupt(cs, CPU_INTERRUPT_HARD);
555 } else {
556 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
557 }
558 }
559}
560
561qemu_irq x86_allocate_cpu_irq(void)
562{
563 return qemu_allocate_irq(pic_irq_request, NULL, 0);
564}
565
566int cpu_get_pic_interrupt(CPUX86State *env)
567{
568 X86CPU *cpu = env_archcpu(env);
569 int intno;
570
571 if (!kvm_irqchip_in_kernel() && !whpx_apic_in_platform()) {
572 intno = apic_get_interrupt(cpu->apic_state);
573 if (intno >= 0) {
574 return intno;
575 }
576
577 if (!apic_accept_pic_intr(cpu->apic_state)) {
578 return -1;
579 }
580 }
581
582 intno = pic_read_irq(isa_pic);
583 return intno;
584}
585
586DeviceState *cpu_get_current_apic(void)
587{
588 if (current_cpu) {
589 X86CPU *cpu = X86_CPU(current_cpu);
590 return cpu->apic_state;
591 } else {
592 return NULL;
593 }
594}
595
596void gsi_handler(void *opaque, int n, int level)
597{
598 GSIState *s = opaque;
599
600 trace_x86_gsi_interrupt(n, level);
601 switch (n) {
602 case 0 ... ISA_NUM_IRQS - 1:
603 if (s->i8259_irq[n]) {
604
605 qemu_set_irq(s->i8259_irq[n], level);
606 }
607
608 case ISA_NUM_IRQS ... IOAPIC_NUM_PINS - 1:
609 qemu_set_irq(s->ioapic_irq[n], level);
610 break;
611 case IO_APIC_SECONDARY_IRQBASE
612 ... IO_APIC_SECONDARY_IRQBASE + IOAPIC_NUM_PINS - 1:
613 qemu_set_irq(s->ioapic2_irq[n - IO_APIC_SECONDARY_IRQBASE], level);
614 break;
615 }
616}
617
618void ioapic_init_gsi(GSIState *gsi_state, const char *parent_name)
619{
620 DeviceState *dev;
621 SysBusDevice *d;
622 unsigned int i;
623
624 assert(parent_name);
625 if (kvm_ioapic_in_kernel()) {
626 dev = qdev_new(TYPE_KVM_IOAPIC);
627 } else {
628 dev = qdev_new(TYPE_IOAPIC);
629 }
630 object_property_add_child(object_resolve_path(parent_name, NULL),
631 "ioapic", OBJECT(dev));
632 d = SYS_BUS_DEVICE(dev);
633 sysbus_realize_and_unref(d, &error_fatal);
634 sysbus_mmio_map(d, 0, IO_APIC_DEFAULT_ADDRESS);
635
636 for (i = 0; i < IOAPIC_NUM_PINS; i++) {
637 gsi_state->ioapic_irq[i] = qdev_get_gpio_in(dev, i);
638 }
639}
640
641DeviceState *ioapic_init_secondary(GSIState *gsi_state)
642{
643 DeviceState *dev;
644 SysBusDevice *d;
645 unsigned int i;
646
647 dev = qdev_new(TYPE_IOAPIC);
648 d = SYS_BUS_DEVICE(dev);
649 sysbus_realize_and_unref(d, &error_fatal);
650 sysbus_mmio_map(d, 0, IO_APIC_SECONDARY_ADDRESS);
651
652 for (i = 0; i < IOAPIC_NUM_PINS; i++) {
653 gsi_state->ioapic2_irq[i] = qdev_get_gpio_in(dev, i);
654 }
655 return dev;
656}
657
658struct setup_data {
659 uint64_t next;
660 uint32_t type;
661 uint32_t len;
662 uint8_t data[];
663} __attribute__((packed));
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684static uint64_t read_pvh_start_addr(void *arg1, void *arg2, bool is64)
685{
686 size_t *elf_note_data_addr;
687
688
689 if (arg1 == NULL) {
690 return 0;
691 }
692
693 if (is64) {
694 struct elf64_note *nhdr64 = (struct elf64_note *)arg1;
695 uint64_t nhdr_size64 = sizeof(struct elf64_note);
696 uint64_t phdr_align = *(uint64_t *)arg2;
697 uint64_t nhdr_namesz = nhdr64->n_namesz;
698
699 elf_note_data_addr =
700 ((void *)nhdr64) + nhdr_size64 +
701 QEMU_ALIGN_UP(nhdr_namesz, phdr_align);
702
703 pvh_start_addr = *elf_note_data_addr;
704 } else {
705 struct elf32_note *nhdr32 = (struct elf32_note *)arg1;
706 uint32_t nhdr_size32 = sizeof(struct elf32_note);
707 uint32_t phdr_align = *(uint32_t *)arg2;
708 uint32_t nhdr_namesz = nhdr32->n_namesz;
709
710 elf_note_data_addr =
711 ((void *)nhdr32) + nhdr_size32 +
712 QEMU_ALIGN_UP(nhdr_namesz, phdr_align);
713
714 pvh_start_addr = *(uint32_t *)elf_note_data_addr;
715 }
716
717 return pvh_start_addr;
718}
719
720static bool load_elfboot(const char *kernel_filename,
721 int kernel_file_size,
722 uint8_t *header,
723 size_t pvh_xen_start_addr,
724 FWCfgState *fw_cfg)
725{
726 uint32_t flags = 0;
727 uint32_t mh_load_addr = 0;
728 uint32_t elf_kernel_size = 0;
729 uint64_t elf_entry;
730 uint64_t elf_low, elf_high;
731 int kernel_size;
732
733 if (ldl_p(header) != 0x464c457f) {
734 return false;
735 }
736
737 bool elf_is64 = header[EI_CLASS] == ELFCLASS64;
738 flags = elf_is64 ?
739 ((Elf64_Ehdr *)header)->e_flags : ((Elf32_Ehdr *)header)->e_flags;
740
741 if (flags & 0x00010004) {
742 error_report("elfboot unsupported flags = %x", flags);
743 exit(1);
744 }
745
746 uint64_t elf_note_type = XEN_ELFNOTE_PHYS32_ENTRY;
747 kernel_size = load_elf(kernel_filename, read_pvh_start_addr,
748 NULL, &elf_note_type, &elf_entry,
749 &elf_low, &elf_high, NULL, 0, I386_ELF_MACHINE,
750 0, 0);
751
752 if (kernel_size < 0) {
753 error_report("Error while loading elf kernel");
754 exit(1);
755 }
756 mh_load_addr = elf_low;
757 elf_kernel_size = elf_high - elf_low;
758
759 if (pvh_start_addr == 0) {
760 error_report("Error loading uncompressed kernel without PVH ELF Note");
761 exit(1);
762 }
763 fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ENTRY, pvh_start_addr);
764 fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, mh_load_addr);
765 fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, elf_kernel_size);
766
767 return true;
768}
769
770void x86_load_linux(X86MachineState *x86ms,
771 FWCfgState *fw_cfg,
772 int acpi_data_size,
773 bool pvh_enabled)
774{
775 bool linuxboot_dma_enabled = X86_MACHINE_GET_CLASS(x86ms)->fwcfg_dma_enabled;
776 uint16_t protocol;
777 int setup_size, kernel_size, cmdline_size;
778 int dtb_size, setup_data_offset;
779 uint32_t initrd_max;
780 uint8_t header[8192], *setup, *kernel;
781 hwaddr real_addr, prot_addr, cmdline_addr, initrd_addr = 0;
782 FILE *f;
783 char *vmode;
784 MachineState *machine = MACHINE(x86ms);
785 struct setup_data *setup_data;
786 const char *kernel_filename = machine->kernel_filename;
787 const char *initrd_filename = machine->initrd_filename;
788 const char *dtb_filename = machine->dtb;
789 const char *kernel_cmdline = machine->kernel_cmdline;
790 SevKernelLoaderContext sev_load_ctx = {};
791
792
793 cmdline_size = (strlen(kernel_cmdline) + 16) & ~15;
794
795
796 f = fopen(kernel_filename, "rb");
797 if (!f) {
798 fprintf(stderr, "qemu: could not open kernel file '%s': %s\n",
799 kernel_filename, strerror(errno));
800 exit(1);
801 }
802
803 kernel_size = get_file_size(f);
804 if (!kernel_size ||
805 fread(header, 1, MIN(ARRAY_SIZE(header), kernel_size), f) !=
806 MIN(ARRAY_SIZE(header), kernel_size)) {
807 fprintf(stderr, "qemu: could not load kernel '%s': %s\n",
808 kernel_filename, strerror(errno));
809 exit(1);
810 }
811
812
813 if (ldl_p(header + 0x202) == 0x53726448) {
814 protocol = lduw_p(header + 0x206);
815 } else {
816
817
818
819
820
821
822
823 if (load_multiboot(x86ms, fw_cfg, f, kernel_filename, initrd_filename,
824 kernel_cmdline, kernel_size, header)) {
825 return;
826 }
827
828
829
830
831
832 if (pvh_enabled &&
833 load_elfboot(kernel_filename, kernel_size,
834 header, pvh_start_addr, fw_cfg)) {
835 fclose(f);
836
837 fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE,
838 strlen(kernel_cmdline) + 1);
839 fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA, kernel_cmdline);
840
841 fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_SIZE, sizeof(header));
842 fw_cfg_add_bytes(fw_cfg, FW_CFG_SETUP_DATA,
843 header, sizeof(header));
844
845
846 if (initrd_filename) {
847 GMappedFile *mapped_file;
848 gsize initrd_size;
849 gchar *initrd_data;
850 GError *gerr = NULL;
851
852 mapped_file = g_mapped_file_new(initrd_filename, false, &gerr);
853 if (!mapped_file) {
854 fprintf(stderr, "qemu: error reading initrd %s: %s\n",
855 initrd_filename, gerr->message);
856 exit(1);
857 }
858 x86ms->initrd_mapped_file = mapped_file;
859
860 initrd_data = g_mapped_file_get_contents(mapped_file);
861 initrd_size = g_mapped_file_get_length(mapped_file);
862 initrd_max = x86ms->below_4g_mem_size - acpi_data_size - 1;
863 if (initrd_size >= initrd_max) {
864 fprintf(stderr, "qemu: initrd is too large, cannot support."
865 "(max: %"PRIu32", need %"PRId64")\n",
866 initrd_max, (uint64_t)initrd_size);
867 exit(1);
868 }
869
870 initrd_addr = (initrd_max - initrd_size) & ~4095;
871
872 fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_ADDR, initrd_addr);
873 fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_SIZE, initrd_size);
874 fw_cfg_add_bytes(fw_cfg, FW_CFG_INITRD_DATA, initrd_data,
875 initrd_size);
876 }
877
878 option_rom[nb_option_roms].bootindex = 0;
879 option_rom[nb_option_roms].name = "pvh.bin";
880 nb_option_roms++;
881
882 return;
883 }
884 protocol = 0;
885 }
886
887 if (protocol < 0x200 || !(header[0x211] & 0x01)) {
888
889 real_addr = 0x90000;
890 cmdline_addr = 0x9a000 - cmdline_size;
891 prot_addr = 0x10000;
892 } else if (protocol < 0x202) {
893
894 real_addr = 0x90000;
895 cmdline_addr = 0x9a000 - cmdline_size;
896 prot_addr = 0x100000;
897 } else {
898
899 real_addr = 0x10000;
900 cmdline_addr = 0x20000;
901 prot_addr = 0x100000;
902 }
903
904
905 if (protocol >= 0x20c &&
906 lduw_p(header + 0x236) & XLF_CAN_BE_LOADED_ABOVE_4G) {
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923 initrd_max = UINT32_MAX;
924 } else if (protocol >= 0x203) {
925 initrd_max = ldl_p(header + 0x22c);
926 } else {
927 initrd_max = 0x37ffffff;
928 }
929
930 if (initrd_max >= x86ms->below_4g_mem_size - acpi_data_size) {
931 initrd_max = x86ms->below_4g_mem_size - acpi_data_size - 1;
932 }
933
934 fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_ADDR, cmdline_addr);
935 fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, strlen(kernel_cmdline) + 1);
936 fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA, kernel_cmdline);
937 sev_load_ctx.cmdline_data = (char *)kernel_cmdline;
938 sev_load_ctx.cmdline_size = strlen(kernel_cmdline) + 1;
939
940 if (protocol >= 0x202) {
941 stl_p(header + 0x228, cmdline_addr);
942 } else {
943 stw_p(header + 0x20, 0xA33F);
944 stw_p(header + 0x22, cmdline_addr - real_addr);
945 }
946
947
948 vmode = strstr(kernel_cmdline, "vga=");
949 if (vmode) {
950 unsigned int video_mode;
951 const char *end;
952 int ret;
953
954 vmode += 4;
955 if (!strncmp(vmode, "normal", 6)) {
956 video_mode = 0xffff;
957 } else if (!strncmp(vmode, "ext", 3)) {
958 video_mode = 0xfffe;
959 } else if (!strncmp(vmode, "ask", 3)) {
960 video_mode = 0xfffd;
961 } else {
962 ret = qemu_strtoui(vmode, &end, 0, &video_mode);
963 if (ret != 0 || (*end && *end != ' ')) {
964 fprintf(stderr, "qemu: invalid 'vga=' kernel parameter.\n");
965 exit(1);
966 }
967 }
968 stw_p(header + 0x1fa, video_mode);
969 }
970
971
972
973
974
975
976
977 if (protocol >= 0x200) {
978 header[0x210] = 0xB0;
979 }
980
981 if (protocol >= 0x201) {
982 header[0x211] |= 0x80;
983 stw_p(header + 0x224, cmdline_addr - real_addr - 0x200);
984 }
985
986
987 if (initrd_filename) {
988 GMappedFile *mapped_file;
989 gsize initrd_size;
990 gchar *initrd_data;
991 GError *gerr = NULL;
992
993 if (protocol < 0x200) {
994 fprintf(stderr, "qemu: linux kernel too old to load a ram disk\n");
995 exit(1);
996 }
997
998 mapped_file = g_mapped_file_new(initrd_filename, false, &gerr);
999 if (!mapped_file) {
1000 fprintf(stderr, "qemu: error reading initrd %s: %s\n",
1001 initrd_filename, gerr->message);
1002 exit(1);
1003 }
1004 x86ms->initrd_mapped_file = mapped_file;
1005
1006 initrd_data = g_mapped_file_get_contents(mapped_file);
1007 initrd_size = g_mapped_file_get_length(mapped_file);
1008 if (initrd_size >= initrd_max) {
1009 fprintf(stderr, "qemu: initrd is too large, cannot support."
1010 "(max: %"PRIu32", need %"PRId64")\n",
1011 initrd_max, (uint64_t)initrd_size);
1012 exit(1);
1013 }
1014
1015 initrd_addr = (initrd_max - initrd_size) & ~4095;
1016
1017 fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_ADDR, initrd_addr);
1018 fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_SIZE, initrd_size);
1019 fw_cfg_add_bytes(fw_cfg, FW_CFG_INITRD_DATA, initrd_data, initrd_size);
1020 sev_load_ctx.initrd_data = initrd_data;
1021 sev_load_ctx.initrd_size = initrd_size;
1022
1023 stl_p(header + 0x218, initrd_addr);
1024 stl_p(header + 0x21c, initrd_size);
1025 }
1026
1027
1028 setup_size = header[0x1f1];
1029 if (setup_size == 0) {
1030 setup_size = 4;
1031 }
1032 setup_size = (setup_size + 1) * 512;
1033 if (setup_size > kernel_size) {
1034 fprintf(stderr, "qemu: invalid kernel header\n");
1035 exit(1);
1036 }
1037 kernel_size -= setup_size;
1038
1039 setup = g_malloc(setup_size);
1040 kernel = g_malloc(kernel_size);
1041 fseek(f, 0, SEEK_SET);
1042 if (fread(setup, 1, setup_size, f) != setup_size) {
1043 fprintf(stderr, "fread() failed\n");
1044 exit(1);
1045 }
1046 if (fread(kernel, 1, kernel_size, f) != kernel_size) {
1047 fprintf(stderr, "fread() failed\n");
1048 exit(1);
1049 }
1050 fclose(f);
1051
1052
1053 if (dtb_filename) {
1054 if (protocol < 0x209) {
1055 fprintf(stderr, "qemu: Linux kernel too old to load a dtb\n");
1056 exit(1);
1057 }
1058
1059 dtb_size = get_image_size(dtb_filename);
1060 if (dtb_size <= 0) {
1061 fprintf(stderr, "qemu: error reading dtb %s: %s\n",
1062 dtb_filename, strerror(errno));
1063 exit(1);
1064 }
1065
1066 setup_data_offset = QEMU_ALIGN_UP(kernel_size, 16);
1067 kernel_size = setup_data_offset + sizeof(struct setup_data) + dtb_size;
1068 kernel = g_realloc(kernel, kernel_size);
1069
1070 stq_p(header + 0x250, prot_addr + setup_data_offset);
1071
1072 setup_data = (struct setup_data *)(kernel + setup_data_offset);
1073 setup_data->next = 0;
1074 setup_data->type = cpu_to_le32(SETUP_DTB);
1075 setup_data->len = cpu_to_le32(dtb_size);
1076
1077 load_image_size(dtb_filename, setup_data->data, dtb_size);
1078 }
1079
1080
1081
1082
1083
1084
1085
1086
1087 if (!sev_enabled()) {
1088 memcpy(setup, header, MIN(sizeof(header), setup_size));
1089 }
1090
1091 fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, prot_addr);
1092 fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size);
1093 fw_cfg_add_bytes(fw_cfg, FW_CFG_KERNEL_DATA, kernel, kernel_size);
1094 sev_load_ctx.kernel_data = (char *)kernel;
1095 sev_load_ctx.kernel_size = kernel_size;
1096
1097 fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_ADDR, real_addr);
1098 fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_SIZE, setup_size);
1099 fw_cfg_add_bytes(fw_cfg, FW_CFG_SETUP_DATA, setup, setup_size);
1100 sev_load_ctx.setup_data = (char *)setup;
1101 sev_load_ctx.setup_size = setup_size;
1102
1103 if (sev_enabled()) {
1104 sev_add_kernel_loader_hashes(&sev_load_ctx, &error_fatal);
1105 }
1106
1107 option_rom[nb_option_roms].bootindex = 0;
1108 option_rom[nb_option_roms].name = "linuxboot.bin";
1109 if (linuxboot_dma_enabled && fw_cfg_dma_enabled(fw_cfg)) {
1110 option_rom[nb_option_roms].name = "linuxboot_dma.bin";
1111 }
1112 nb_option_roms++;
1113}
1114
1115void x86_bios_rom_init(MachineState *ms, const char *default_firmware,
1116 MemoryRegion *rom_memory, bool isapc_ram_fw)
1117{
1118 const char *bios_name;
1119 char *filename;
1120 MemoryRegion *bios, *isa_bios;
1121 int bios_size, isa_bios_size;
1122 ssize_t ret;
1123
1124
1125 bios_name = ms->firmware ?: default_firmware;
1126 filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
1127 if (filename) {
1128 bios_size = get_image_size(filename);
1129 } else {
1130 bios_size = -1;
1131 }
1132 if (bios_size <= 0 ||
1133 (bios_size % 65536) != 0) {
1134 goto bios_error;
1135 }
1136 bios = g_malloc(sizeof(*bios));
1137 memory_region_init_ram(bios, NULL, "pc.bios", bios_size, &error_fatal);
1138 if (sev_enabled()) {
1139
1140
1141
1142
1143
1144
1145
1146 void *ptr = memory_region_get_ram_ptr(bios);
1147 load_image_size(filename, ptr, bios_size);
1148 x86_firmware_configure(ptr, bios_size);
1149 } else {
1150 if (!isapc_ram_fw) {
1151 memory_region_set_readonly(bios, true);
1152 }
1153 ret = rom_add_file_fixed(bios_name, (uint32_t)(-bios_size), -1);
1154 if (ret != 0) {
1155 goto bios_error;
1156 }
1157 }
1158 g_free(filename);
1159
1160
1161 isa_bios_size = MIN(bios_size, 128 * KiB);
1162 isa_bios = g_malloc(sizeof(*isa_bios));
1163 memory_region_init_alias(isa_bios, NULL, "isa-bios", bios,
1164 bios_size - isa_bios_size, isa_bios_size);
1165 memory_region_add_subregion_overlap(rom_memory,
1166 0x100000 - isa_bios_size,
1167 isa_bios,
1168 1);
1169 if (!isapc_ram_fw) {
1170 memory_region_set_readonly(isa_bios, true);
1171 }
1172
1173
1174 memory_region_add_subregion(rom_memory,
1175 (uint32_t)(-bios_size),
1176 bios);
1177 return;
1178
1179bios_error:
1180 fprintf(stderr, "qemu: could not load PC BIOS '%s'\n", bios_name);
1181 exit(1);
1182}
1183
1184bool x86_machine_is_smm_enabled(const X86MachineState *x86ms)
1185{
1186 bool smm_available = false;
1187
1188 if (x86ms->smm == ON_OFF_AUTO_OFF) {
1189 return false;
1190 }
1191
1192 if (tcg_enabled() || qtest_enabled()) {
1193 smm_available = true;
1194 } else if (kvm_enabled()) {
1195 smm_available = kvm_has_smm();
1196 }
1197
1198 if (smm_available) {
1199 return true;
1200 }
1201
1202 if (x86ms->smm == ON_OFF_AUTO_ON) {
1203 error_report("System Management Mode not supported by this hypervisor.");
1204 exit(1);
1205 }
1206 return false;
1207}
1208
1209static void x86_machine_get_smm(Object *obj, Visitor *v, const char *name,
1210 void *opaque, Error **errp)
1211{
1212 X86MachineState *x86ms = X86_MACHINE(obj);
1213 OnOffAuto smm = x86ms->smm;
1214
1215 visit_type_OnOffAuto(v, name, &smm, errp);
1216}
1217
1218static void x86_machine_set_smm(Object *obj, Visitor *v, const char *name,
1219 void *opaque, Error **errp)
1220{
1221 X86MachineState *x86ms = X86_MACHINE(obj);
1222
1223 visit_type_OnOffAuto(v, name, &x86ms->smm, errp);
1224}
1225
1226bool x86_machine_is_acpi_enabled(const X86MachineState *x86ms)
1227{
1228 if (x86ms->acpi == ON_OFF_AUTO_OFF) {
1229 return false;
1230 }
1231 return true;
1232}
1233
1234static void x86_machine_get_acpi(Object *obj, Visitor *v, const char *name,
1235 void *opaque, Error **errp)
1236{
1237 X86MachineState *x86ms = X86_MACHINE(obj);
1238 OnOffAuto acpi = x86ms->acpi;
1239
1240 visit_type_OnOffAuto(v, name, &acpi, errp);
1241}
1242
1243static void x86_machine_set_acpi(Object *obj, Visitor *v, const char *name,
1244 void *opaque, Error **errp)
1245{
1246 X86MachineState *x86ms = X86_MACHINE(obj);
1247
1248 visit_type_OnOffAuto(v, name, &x86ms->acpi, errp);
1249}
1250
1251static void x86_machine_get_pit(Object *obj, Visitor *v, const char *name,
1252 void *opaque, Error **errp)
1253{
1254 X86MachineState *x86ms = X86_MACHINE(obj);
1255 OnOffAuto pit = x86ms->pit;
1256
1257 visit_type_OnOffAuto(v, name, &pit, errp);
1258}
1259
1260static void x86_machine_set_pit(Object *obj, Visitor *v, const char *name,
1261 void *opaque, Error **errp)
1262{
1263 X86MachineState *x86ms = X86_MACHINE(obj);;
1264
1265 visit_type_OnOffAuto(v, name, &x86ms->pit, errp);
1266}
1267
1268static void x86_machine_get_pic(Object *obj, Visitor *v, const char *name,
1269 void *opaque, Error **errp)
1270{
1271 X86MachineState *x86ms = X86_MACHINE(obj);
1272 OnOffAuto pic = x86ms->pic;
1273
1274 visit_type_OnOffAuto(v, name, &pic, errp);
1275}
1276
1277static void x86_machine_set_pic(Object *obj, Visitor *v, const char *name,
1278 void *opaque, Error **errp)
1279{
1280 X86MachineState *x86ms = X86_MACHINE(obj);
1281
1282 visit_type_OnOffAuto(v, name, &x86ms->pic, errp);
1283}
1284
1285static char *x86_machine_get_oem_id(Object *obj, Error **errp)
1286{
1287 X86MachineState *x86ms = X86_MACHINE(obj);
1288
1289 return g_strdup(x86ms->oem_id);
1290}
1291
1292static void x86_machine_set_oem_id(Object *obj, const char *value, Error **errp)
1293{
1294 X86MachineState *x86ms = X86_MACHINE(obj);
1295 size_t len = strlen(value);
1296
1297 if (len > 6) {
1298 error_setg(errp,
1299 "User specified "X86_MACHINE_OEM_ID" value is bigger than "
1300 "6 bytes in size");
1301 return;
1302 }
1303
1304 strncpy(x86ms->oem_id, value, 6);
1305}
1306
1307static char *x86_machine_get_oem_table_id(Object *obj, Error **errp)
1308{
1309 X86MachineState *x86ms = X86_MACHINE(obj);
1310
1311 return g_strdup(x86ms->oem_table_id);
1312}
1313
1314static void x86_machine_set_oem_table_id(Object *obj, const char *value,
1315 Error **errp)
1316{
1317 X86MachineState *x86ms = X86_MACHINE(obj);
1318 size_t len = strlen(value);
1319
1320 if (len > 8) {
1321 error_setg(errp,
1322 "User specified "X86_MACHINE_OEM_TABLE_ID
1323 " value is bigger than "
1324 "8 bytes in size");
1325 return;
1326 }
1327 strncpy(x86ms->oem_table_id, value, 8);
1328}
1329
1330static void x86_machine_get_bus_lock_ratelimit(Object *obj, Visitor *v,
1331 const char *name, void *opaque, Error **errp)
1332{
1333 X86MachineState *x86ms = X86_MACHINE(obj);
1334 uint64_t bus_lock_ratelimit = x86ms->bus_lock_ratelimit;
1335
1336 visit_type_uint64(v, name, &bus_lock_ratelimit, errp);
1337}
1338
1339static void x86_machine_set_bus_lock_ratelimit(Object *obj, Visitor *v,
1340 const char *name, void *opaque, Error **errp)
1341{
1342 X86MachineState *x86ms = X86_MACHINE(obj);
1343
1344 visit_type_uint64(v, name, &x86ms->bus_lock_ratelimit, errp);
1345}
1346
1347static void machine_get_sgx_epc(Object *obj, Visitor *v, const char *name,
1348 void *opaque, Error **errp)
1349{
1350 X86MachineState *x86ms = X86_MACHINE(obj);
1351 SgxEPCList *list = x86ms->sgx_epc_list;
1352
1353 visit_type_SgxEPCList(v, name, &list, errp);
1354}
1355
1356static void machine_set_sgx_epc(Object *obj, Visitor *v, const char *name,
1357 void *opaque, Error **errp)
1358{
1359 X86MachineState *x86ms = X86_MACHINE(obj);
1360 SgxEPCList *list;
1361
1362 list = x86ms->sgx_epc_list;
1363 visit_type_SgxEPCList(v, name, &x86ms->sgx_epc_list, errp);
1364
1365 qapi_free_SgxEPCList(list);
1366}
1367
1368static void x86_machine_initfn(Object *obj)
1369{
1370 X86MachineState *x86ms = X86_MACHINE(obj);
1371
1372 x86ms->smm = ON_OFF_AUTO_AUTO;
1373 x86ms->acpi = ON_OFF_AUTO_AUTO;
1374 x86ms->pit = ON_OFF_AUTO_AUTO;
1375 x86ms->pic = ON_OFF_AUTO_AUTO;
1376 x86ms->pci_irq_mask = ACPI_BUILD_PCI_IRQS;
1377 x86ms->oem_id = g_strndup(ACPI_BUILD_APPNAME6, 6);
1378 x86ms->oem_table_id = g_strndup(ACPI_BUILD_APPNAME8, 8);
1379 x86ms->bus_lock_ratelimit = 0;
1380 x86ms->above_4g_mem_start = 4 * GiB;
1381}
1382
1383static void x86_machine_class_init(ObjectClass *oc, void *data)
1384{
1385 MachineClass *mc = MACHINE_CLASS(oc);
1386 X86MachineClass *x86mc = X86_MACHINE_CLASS(oc);
1387 NMIClass *nc = NMI_CLASS(oc);
1388
1389 mc->cpu_index_to_instance_props = x86_cpu_index_to_props;
1390 mc->get_default_cpu_node_id = x86_get_default_cpu_node_id;
1391 mc->possible_cpu_arch_ids = x86_possible_cpu_arch_ids;
1392 x86mc->save_tsc_khz = true;
1393 x86mc->fwcfg_dma_enabled = true;
1394 nc->nmi_monitor_handler = x86_nmi;
1395
1396 object_class_property_add(oc, X86_MACHINE_SMM, "OnOffAuto",
1397 x86_machine_get_smm, x86_machine_set_smm,
1398 NULL, NULL);
1399 object_class_property_set_description(oc, X86_MACHINE_SMM,
1400 "Enable SMM");
1401
1402 object_class_property_add(oc, X86_MACHINE_ACPI, "OnOffAuto",
1403 x86_machine_get_acpi, x86_machine_set_acpi,
1404 NULL, NULL);
1405 object_class_property_set_description(oc, X86_MACHINE_ACPI,
1406 "Enable ACPI");
1407
1408 object_class_property_add(oc, X86_MACHINE_PIT, "OnOffAuto",
1409 x86_machine_get_pit,
1410 x86_machine_set_pit,
1411 NULL, NULL);
1412 object_class_property_set_description(oc, X86_MACHINE_PIT,
1413 "Enable i8254 PIT");
1414
1415 object_class_property_add(oc, X86_MACHINE_PIC, "OnOffAuto",
1416 x86_machine_get_pic,
1417 x86_machine_set_pic,
1418 NULL, NULL);
1419 object_class_property_set_description(oc, X86_MACHINE_PIC,
1420 "Enable i8259 PIC");
1421
1422 object_class_property_add_str(oc, X86_MACHINE_OEM_ID,
1423 x86_machine_get_oem_id,
1424 x86_machine_set_oem_id);
1425 object_class_property_set_description(oc, X86_MACHINE_OEM_ID,
1426 "Override the default value of field OEMID "
1427 "in ACPI table header."
1428 "The string may be up to 6 bytes in size");
1429
1430
1431 object_class_property_add_str(oc, X86_MACHINE_OEM_TABLE_ID,
1432 x86_machine_get_oem_table_id,
1433 x86_machine_set_oem_table_id);
1434 object_class_property_set_description(oc, X86_MACHINE_OEM_TABLE_ID,
1435 "Override the default value of field OEM Table ID "
1436 "in ACPI table header."
1437 "The string may be up to 8 bytes in size");
1438
1439 object_class_property_add(oc, X86_MACHINE_BUS_LOCK_RATELIMIT, "uint64_t",
1440 x86_machine_get_bus_lock_ratelimit,
1441 x86_machine_set_bus_lock_ratelimit, NULL, NULL);
1442 object_class_property_set_description(oc, X86_MACHINE_BUS_LOCK_RATELIMIT,
1443 "Set the ratelimit for the bus locks acquired in VMs");
1444
1445 object_class_property_add(oc, "sgx-epc", "SgxEPC",
1446 machine_get_sgx_epc, machine_set_sgx_epc,
1447 NULL, NULL);
1448 object_class_property_set_description(oc, "sgx-epc",
1449 "SGX EPC device");
1450}
1451
1452static const TypeInfo x86_machine_info = {
1453 .name = TYPE_X86_MACHINE,
1454 .parent = TYPE_MACHINE,
1455 .abstract = true,
1456 .instance_size = sizeof(X86MachineState),
1457 .instance_init = x86_machine_initfn,
1458 .class_size = sizeof(X86MachineClass),
1459 .class_init = x86_machine_class_init,
1460 .interfaces = (InterfaceInfo[]) {
1461 { TYPE_NMI },
1462 { }
1463 },
1464};
1465
1466static void x86_machine_register_types(void)
1467{
1468 type_register_static(&x86_machine_info);
1469}
1470
1471type_init(x86_machine_register_types)
1472