1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include "qemu/osdep.h"
28#include "qapi/error.h"
29#include "qapi/visitor.h"
30#include "sysemu/sysemu.h"
31#include "sysemu/numa.h"
32#include "hw/hw.h"
33#include "qemu/log.h"
34#include "hw/fw-path-provider.h"
35#include "elf.h"
36#include "net/net.h"
37#include "sysemu/device_tree.h"
38#include "sysemu/block-backend.h"
39#include "sysemu/cpus.h"
40#include "sysemu/hw_accel.h"
41#include "kvm_ppc.h"
42#include "migration/misc.h"
43#include "migration/global_state.h"
44#include "migration/register.h"
45#include "mmu-hash64.h"
46#include "mmu-book3s-v3.h"
47#include "cpu-models.h"
48#include "qom/cpu.h"
49
50#include "hw/boards.h"
51#include "hw/ppc/ppc.h"
52#include "hw/loader.h"
53
54#include "hw/ppc/fdt.h"
55#include "hw/ppc/spapr.h"
56#include "hw/ppc/spapr_vio.h"
57#include "hw/pci-host/spapr.h"
58#include "hw/ppc/xics.h"
59#include "hw/pci/msi.h"
60
61#include "hw/pci/pci.h"
62#include "hw/scsi/scsi.h"
63#include "hw/virtio/virtio-scsi.h"
64#include "hw/virtio/vhost-scsi-common.h"
65
66#include "exec/address-spaces.h"
67#include "hw/usb.h"
68#include "qemu/config-file.h"
69#include "qemu/error-report.h"
70#include "trace.h"
71#include "hw/nmi.h"
72#include "hw/intc/intc.h"
73
74#include "hw/compat.h"
75#include "qemu/cutils.h"
76#include "hw/ppc/spapr_cpu_core.h"
77#include "qmp-commands.h"
78
79#include <libfdt.h>
80
81
82
83
84
85
86
87
88
89
90
91#define FDT_MAX_SIZE 0x100000
92#define RTAS_MAX_SIZE 0x10000
93#define RTAS_MAX_ADDR 0x80000000
94#define FW_MAX_SIZE 0x400000
95#define FW_FILE_NAME "slof.bin"
96#define FW_OVERHEAD 0x2800000
97#define KERNEL_LOAD_ADDR FW_MAX_SIZE
98
99#define MIN_RMA_SLOF 128UL
100
101#define PHANDLE_XICP 0x00001111
102
103
104
105
106
107static int spapr_vcpu_id(sPAPRMachineState *spapr, int cpu_index)
108{
109 return
110 (cpu_index / smp_threads) * spapr->vsmt + cpu_index % smp_threads;
111}
112static bool spapr_is_thread0_in_vcore(sPAPRMachineState *spapr,
113 PowerPCCPU *cpu)
114{
115 return spapr_get_vcpu_id(cpu) % spapr->vsmt == 0;
116}
117
118static ICSState *spapr_ics_create(sPAPRMachineState *spapr,
119 const char *type_ics,
120 int nr_irqs, Error **errp)
121{
122 Error *local_err = NULL;
123 Object *obj;
124
125 obj = object_new(type_ics);
126 object_property_add_child(OBJECT(spapr), "ics", obj, &error_abort);
127 object_property_add_const_link(obj, ICS_PROP_XICS, OBJECT(spapr),
128 &error_abort);
129 object_property_set_int(obj, nr_irqs, "nr-irqs", &local_err);
130 if (local_err) {
131 goto error;
132 }
133 object_property_set_bool(obj, true, "realized", &local_err);
134 if (local_err) {
135 goto error;
136 }
137
138 return ICS_SIMPLE(obj);
139
140error:
141 error_propagate(errp, local_err);
142 return NULL;
143}
144
145static bool pre_2_10_vmstate_dummy_icp_needed(void *opaque)
146{
147
148
149
150
151 return false;
152}
153
154static const VMStateDescription pre_2_10_vmstate_dummy_icp = {
155 .name = "icp/server",
156 .version_id = 1,
157 .minimum_version_id = 1,
158 .needed = pre_2_10_vmstate_dummy_icp_needed,
159 .fields = (VMStateField[]) {
160 VMSTATE_UNUSED(4),
161 VMSTATE_UNUSED(1),
162 VMSTATE_UNUSED(1),
163 VMSTATE_END_OF_LIST()
164 },
165};
166
167static void pre_2_10_vmstate_register_dummy_icp(int i)
168{
169 vmstate_register(NULL, i, &pre_2_10_vmstate_dummy_icp,
170 (void *)(uintptr_t) i);
171}
172
173static void pre_2_10_vmstate_unregister_dummy_icp(int i)
174{
175 vmstate_unregister(NULL, &pre_2_10_vmstate_dummy_icp,
176 (void *)(uintptr_t) i);
177}
178
179static int xics_max_server_number(sPAPRMachineState *spapr)
180{
181 return DIV_ROUND_UP(max_cpus * spapr->vsmt, smp_threads);
182}
183
184static void xics_system_init(MachineState *machine, int nr_irqs, Error **errp)
185{
186 sPAPRMachineState *spapr = SPAPR_MACHINE(machine);
187
188 if (kvm_enabled()) {
189 if (machine_kernel_irqchip_allowed(machine) &&
190 !xics_kvm_init(spapr, errp)) {
191 spapr->icp_type = TYPE_KVM_ICP;
192 spapr->ics = spapr_ics_create(spapr, TYPE_ICS_KVM, nr_irqs, errp);
193 }
194 if (machine_kernel_irqchip_required(machine) && !spapr->ics) {
195 error_prepend(errp, "kernel_irqchip requested but unavailable: ");
196 return;
197 }
198 }
199
200 if (!spapr->ics) {
201 xics_spapr_init(spapr);
202 spapr->icp_type = TYPE_ICP;
203 spapr->ics = spapr_ics_create(spapr, TYPE_ICS_SIMPLE, nr_irqs, errp);
204 if (!spapr->ics) {
205 return;
206 }
207 }
208}
209
210static int spapr_fixup_cpu_smt_dt(void *fdt, int offset, PowerPCCPU *cpu,
211 int smt_threads)
212{
213 int i, ret = 0;
214 uint32_t servers_prop[smt_threads];
215 uint32_t gservers_prop[smt_threads * 2];
216 int index = spapr_get_vcpu_id(cpu);
217
218 if (cpu->compat_pvr) {
219 ret = fdt_setprop_cell(fdt, offset, "cpu-version", cpu->compat_pvr);
220 if (ret < 0) {
221 return ret;
222 }
223 }
224
225
226 for (i = 0; i < smt_threads; i++) {
227 servers_prop[i] = cpu_to_be32(index + i);
228
229 gservers_prop[i*2] = cpu_to_be32(index + i);
230 gservers_prop[i*2 + 1] = 0;
231 }
232 ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-server#s",
233 servers_prop, sizeof(servers_prop));
234 if (ret < 0) {
235 return ret;
236 }
237 ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-gserver#s",
238 gservers_prop, sizeof(gservers_prop));
239
240 return ret;
241}
242
243static int spapr_fixup_cpu_numa_dt(void *fdt, int offset, PowerPCCPU *cpu)
244{
245 int index = spapr_get_vcpu_id(cpu);
246 uint32_t associativity[] = {cpu_to_be32(0x5),
247 cpu_to_be32(0x0),
248 cpu_to_be32(0x0),
249 cpu_to_be32(0x0),
250 cpu_to_be32(cpu->node_id),
251 cpu_to_be32(index)};
252
253
254 return fdt_setprop(fdt, offset, "ibm,associativity", associativity,
255 sizeof(associativity));
256}
257
258
259static void spapr_populate_pa_features(sPAPRMachineState *spapr,
260 PowerPCCPU *cpu,
261 void *fdt, int offset,
262 bool legacy_guest)
263{
264 CPUPPCState *env = &cpu->env;
265 uint8_t pa_features_206[] = { 6, 0,
266 0xf6, 0x1f, 0xc7, 0x00, 0x80, 0xc0 };
267 uint8_t pa_features_207[] = { 24, 0,
268 0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0,
269 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
270 0x00, 0x00, 0x00, 0x00, 0x80, 0x00,
271 0x80, 0x00, 0x80, 0x00, 0x00, 0x00 };
272 uint8_t pa_features_300[] = { 66, 0,
273
274
275 0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0,
276
277 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
278
279 0x00, 0x00, 0x00, 0x00, 0x80, 0x00,
280
281 0x80, 0x00, 0x80, 0x00, 0x00, 0x00,
282
283 0x80, 0x00, 0x80, 0x00, 0x80, 0x00,
284
285 0x80, 0x00, 0x80, 0x00, 0xC0, 0x00,
286
287 0x80, 0x00, 0x80, 0x00, 0x80, 0x00,
288
289 0x80, 0x00, 0x80, 0x00, 0x80, 0x00,
290
291 0x80, 0x00, 0x80, 0x00, 0x80, 0x00,
292
293 0x80, 0x00, 0x80, 0x00, 0x80, 0x00,
294
295 0x80, 0x00, 0x80, 0x00, 0x00, 0x00,
296 };
297 uint8_t *pa_features = NULL;
298 size_t pa_size;
299
300 if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_2_06, 0, cpu->compat_pvr)) {
301 pa_features = pa_features_206;
302 pa_size = sizeof(pa_features_206);
303 }
304 if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_2_07, 0, cpu->compat_pvr)) {
305 pa_features = pa_features_207;
306 pa_size = sizeof(pa_features_207);
307 }
308 if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0, cpu->compat_pvr)) {
309 pa_features = pa_features_300;
310 pa_size = sizeof(pa_features_300);
311 }
312 if (!pa_features) {
313 return;
314 }
315
316 if (env->ci_large_pages) {
317
318
319
320
321
322
323
324 pa_features[3] |= 0x20;
325 }
326 if ((spapr_get_cap(spapr, SPAPR_CAP_HTM) != 0) && pa_size > 24) {
327 pa_features[24] |= 0x80;
328 }
329 if (legacy_guest && pa_size > 40) {
330
331
332
333 pa_features[40 + 2] &= ~0x80;
334 }
335
336 _FDT((fdt_setprop(fdt, offset, "ibm,pa-features", pa_features, pa_size)));
337}
338
339static int spapr_fixup_cpu_dt(void *fdt, sPAPRMachineState *spapr)
340{
341 int ret = 0, offset, cpus_offset;
342 CPUState *cs;
343 char cpu_model[32];
344 uint32_t pft_size_prop[] = {0, cpu_to_be32(spapr->htab_shift)};
345
346 CPU_FOREACH(cs) {
347 PowerPCCPU *cpu = POWERPC_CPU(cs);
348 DeviceClass *dc = DEVICE_GET_CLASS(cs);
349 int index = spapr_get_vcpu_id(cpu);
350 int compat_smt = MIN(smp_threads, ppc_compat_max_vthreads(cpu));
351
352 if (!spapr_is_thread0_in_vcore(spapr, cpu)) {
353 continue;
354 }
355
356 snprintf(cpu_model, 32, "%s@%x", dc->fw_name, index);
357
358 cpus_offset = fdt_path_offset(fdt, "/cpus");
359 if (cpus_offset < 0) {
360 cpus_offset = fdt_add_subnode(fdt, 0, "cpus");
361 if (cpus_offset < 0) {
362 return cpus_offset;
363 }
364 }
365 offset = fdt_subnode_offset(fdt, cpus_offset, cpu_model);
366 if (offset < 0) {
367 offset = fdt_add_subnode(fdt, cpus_offset, cpu_model);
368 if (offset < 0) {
369 return offset;
370 }
371 }
372
373 ret = fdt_setprop(fdt, offset, "ibm,pft-size",
374 pft_size_prop, sizeof(pft_size_prop));
375 if (ret < 0) {
376 return ret;
377 }
378
379 if (nb_numa_nodes > 1) {
380 ret = spapr_fixup_cpu_numa_dt(fdt, offset, cpu);
381 if (ret < 0) {
382 return ret;
383 }
384 }
385
386 ret = spapr_fixup_cpu_smt_dt(fdt, offset, cpu, compat_smt);
387 if (ret < 0) {
388 return ret;
389 }
390
391 spapr_populate_pa_features(spapr, cpu, fdt, offset,
392 spapr->cas_legacy_guest_workaround);
393 }
394 return ret;
395}
396
397static hwaddr spapr_node0_size(MachineState *machine)
398{
399 if (nb_numa_nodes) {
400 int i;
401 for (i = 0; i < nb_numa_nodes; ++i) {
402 if (numa_info[i].node_mem) {
403 return MIN(pow2floor(numa_info[i].node_mem),
404 machine->ram_size);
405 }
406 }
407 }
408 return machine->ram_size;
409}
410
411static void add_str(GString *s, const gchar *s1)
412{
413 g_string_append_len(s, s1, strlen(s1) + 1);
414}
415
416static int spapr_populate_memory_node(void *fdt, int nodeid, hwaddr start,
417 hwaddr size)
418{
419 uint32_t associativity[] = {
420 cpu_to_be32(0x4),
421 cpu_to_be32(0x0), cpu_to_be32(0x0),
422 cpu_to_be32(0x0), cpu_to_be32(nodeid)
423 };
424 char mem_name[32];
425 uint64_t mem_reg_property[2];
426 int off;
427
428 mem_reg_property[0] = cpu_to_be64(start);
429 mem_reg_property[1] = cpu_to_be64(size);
430
431 sprintf(mem_name, "memory@" TARGET_FMT_lx, start);
432 off = fdt_add_subnode(fdt, 0, mem_name);
433 _FDT(off);
434 _FDT((fdt_setprop_string(fdt, off, "device_type", "memory")));
435 _FDT((fdt_setprop(fdt, off, "reg", mem_reg_property,
436 sizeof(mem_reg_property))));
437 _FDT((fdt_setprop(fdt, off, "ibm,associativity", associativity,
438 sizeof(associativity))));
439 return off;
440}
441
442static int spapr_populate_memory(sPAPRMachineState *spapr, void *fdt)
443{
444 MachineState *machine = MACHINE(spapr);
445 hwaddr mem_start, node_size;
446 int i, nb_nodes = nb_numa_nodes;
447 NodeInfo *nodes = numa_info;
448 NodeInfo ramnode;
449
450
451 if (!nb_numa_nodes) {
452 nb_nodes = 1;
453 ramnode.node_mem = machine->ram_size;
454 nodes = &ramnode;
455 }
456
457 for (i = 0, mem_start = 0; i < nb_nodes; ++i) {
458 if (!nodes[i].node_mem) {
459 continue;
460 }
461 if (mem_start >= machine->ram_size) {
462 node_size = 0;
463 } else {
464 node_size = nodes[i].node_mem;
465 if (node_size > machine->ram_size - mem_start) {
466 node_size = machine->ram_size - mem_start;
467 }
468 }
469 if (!mem_start) {
470
471 spapr_populate_memory_node(fdt, i, 0, spapr->rma_size);
472 mem_start += spapr->rma_size;
473 node_size -= spapr->rma_size;
474 }
475 for ( ; node_size; ) {
476 hwaddr sizetmp = pow2floor(node_size);
477
478
479 if (ctzl(mem_start) < ctzl(sizetmp)) {
480 sizetmp = 1ULL << ctzl(mem_start);
481 }
482
483 spapr_populate_memory_node(fdt, i, mem_start, sizetmp);
484 node_size -= sizetmp;
485 mem_start += sizetmp;
486 }
487 }
488
489 return 0;
490}
491
492static void spapr_populate_cpu_dt(CPUState *cs, void *fdt, int offset,
493 sPAPRMachineState *spapr)
494{
495 PowerPCCPU *cpu = POWERPC_CPU(cs);
496 CPUPPCState *env = &cpu->env;
497 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
498 int index = spapr_get_vcpu_id(cpu);
499 uint32_t segs[] = {cpu_to_be32(28), cpu_to_be32(40),
500 0xffffffff, 0xffffffff};
501 uint32_t tbfreq = kvm_enabled() ? kvmppc_get_tbfreq()
502 : SPAPR_TIMEBASE_FREQ;
503 uint32_t cpufreq = kvm_enabled() ? kvmppc_get_clockfreq() : 1000000000;
504 uint32_t page_sizes_prop[64];
505 size_t page_sizes_prop_size;
506 uint32_t vcpus_per_socket = smp_threads * smp_cores;
507 uint32_t pft_size_prop[] = {0, cpu_to_be32(spapr->htab_shift)};
508 int compat_smt = MIN(smp_threads, ppc_compat_max_vthreads(cpu));
509 sPAPRDRConnector *drc;
510 int drc_index;
511 uint32_t radix_AP_encodings[PPC_PAGE_SIZES_MAX_SZ];
512 int i;
513
514 drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, index);
515 if (drc) {
516 drc_index = spapr_drc_index(drc);
517 _FDT((fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", drc_index)));
518 }
519
520 _FDT((fdt_setprop_cell(fdt, offset, "reg", index)));
521 _FDT((fdt_setprop_string(fdt, offset, "device_type", "cpu")));
522
523 _FDT((fdt_setprop_cell(fdt, offset, "cpu-version", env->spr[SPR_PVR])));
524 _FDT((fdt_setprop_cell(fdt, offset, "d-cache-block-size",
525 env->dcache_line_size)));
526 _FDT((fdt_setprop_cell(fdt, offset, "d-cache-line-size",
527 env->dcache_line_size)));
528 _FDT((fdt_setprop_cell(fdt, offset, "i-cache-block-size",
529 env->icache_line_size)));
530 _FDT((fdt_setprop_cell(fdt, offset, "i-cache-line-size",
531 env->icache_line_size)));
532
533 if (pcc->l1_dcache_size) {
534 _FDT((fdt_setprop_cell(fdt, offset, "d-cache-size",
535 pcc->l1_dcache_size)));
536 } else {
537 warn_report("Unknown L1 dcache size for cpu");
538 }
539 if (pcc->l1_icache_size) {
540 _FDT((fdt_setprop_cell(fdt, offset, "i-cache-size",
541 pcc->l1_icache_size)));
542 } else {
543 warn_report("Unknown L1 icache size for cpu");
544 }
545
546 _FDT((fdt_setprop_cell(fdt, offset, "timebase-frequency", tbfreq)));
547 _FDT((fdt_setprop_cell(fdt, offset, "clock-frequency", cpufreq)));
548 _FDT((fdt_setprop_cell(fdt, offset, "slb-size", env->slb_nr)));
549 _FDT((fdt_setprop_cell(fdt, offset, "ibm,slb-size", env->slb_nr)));
550 _FDT((fdt_setprop_string(fdt, offset, "status", "okay")));
551 _FDT((fdt_setprop(fdt, offset, "64-bit", NULL, 0)));
552
553 if (env->spr_cb[SPR_PURR].oea_read) {
554 _FDT((fdt_setprop(fdt, offset, "ibm,purr", NULL, 0)));
555 }
556
557 if (env->mmu_model & POWERPC_MMU_1TSEG) {
558 _FDT((fdt_setprop(fdt, offset, "ibm,processor-segment-sizes",
559 segs, sizeof(segs))));
560 }
561
562
563
564
565
566
567
568 if (spapr_get_cap(spapr, SPAPR_CAP_VSX) != 0) {
569 _FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", 2)));
570 } else {
571 _FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", 1)));
572 }
573
574
575
576
577 if (spapr_get_cap(spapr, SPAPR_CAP_DFP) != 0) {
578 _FDT((fdt_setprop_cell(fdt, offset, "ibm,dfp", 1)));
579 }
580
581 page_sizes_prop_size = ppc_create_page_sizes_prop(env, page_sizes_prop,
582 sizeof(page_sizes_prop));
583 if (page_sizes_prop_size) {
584 _FDT((fdt_setprop(fdt, offset, "ibm,segment-page-sizes",
585 page_sizes_prop, page_sizes_prop_size)));
586 }
587
588 spapr_populate_pa_features(spapr, cpu, fdt, offset, false);
589
590 _FDT((fdt_setprop_cell(fdt, offset, "ibm,chip-id",
591 cs->cpu_index / vcpus_per_socket)));
592
593 _FDT((fdt_setprop(fdt, offset, "ibm,pft-size",
594 pft_size_prop, sizeof(pft_size_prop))));
595
596 if (nb_numa_nodes > 1) {
597 _FDT(spapr_fixup_cpu_numa_dt(fdt, offset, cpu));
598 }
599
600 _FDT(spapr_fixup_cpu_smt_dt(fdt, offset, cpu, compat_smt));
601
602 if (pcc->radix_page_info) {
603 for (i = 0; i < pcc->radix_page_info->count; i++) {
604 radix_AP_encodings[i] =
605 cpu_to_be32(pcc->radix_page_info->entries[i]);
606 }
607 _FDT((fdt_setprop(fdt, offset, "ibm,processor-radix-AP-encodings",
608 radix_AP_encodings,
609 pcc->radix_page_info->count *
610 sizeof(radix_AP_encodings[0]))));
611 }
612}
613
614static void spapr_populate_cpus_dt_node(void *fdt, sPAPRMachineState *spapr)
615{
616 CPUState *cs;
617 int cpus_offset;
618 char *nodename;
619
620 cpus_offset = fdt_add_subnode(fdt, 0, "cpus");
621 _FDT(cpus_offset);
622 _FDT((fdt_setprop_cell(fdt, cpus_offset, "#address-cells", 0x1)));
623 _FDT((fdt_setprop_cell(fdt, cpus_offset, "#size-cells", 0x0)));
624
625
626
627
628
629
630 CPU_FOREACH_REVERSE(cs) {
631 PowerPCCPU *cpu = POWERPC_CPU(cs);
632 int index = spapr_get_vcpu_id(cpu);
633 DeviceClass *dc = DEVICE_GET_CLASS(cs);
634 int offset;
635
636 if (!spapr_is_thread0_in_vcore(spapr, cpu)) {
637 continue;
638 }
639
640 nodename = g_strdup_printf("%s@%x", dc->fw_name, index);
641 offset = fdt_add_subnode(fdt, cpus_offset, nodename);
642 g_free(nodename);
643 _FDT(offset);
644 spapr_populate_cpu_dt(cs, fdt, offset, spapr);
645 }
646
647}
648
649
650
651
652
653
654static int spapr_populate_drconf_memory(sPAPRMachineState *spapr, void *fdt)
655{
656 MachineState *machine = MACHINE(spapr);
657 int ret, i, offset;
658 uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
659 uint32_t prop_lmb_size[] = {0, cpu_to_be32(lmb_size)};
660 uint32_t hotplug_lmb_start = spapr->hotplug_memory.base / lmb_size;
661 uint32_t nr_lmbs = (spapr->hotplug_memory.base +
662 memory_region_size(&spapr->hotplug_memory.mr)) /
663 lmb_size;
664 uint32_t *int_buf, *cur_index, buf_len;
665 int nr_nodes = nb_numa_nodes ? nb_numa_nodes : 1;
666
667
668
669
670 if (machine->ram_size == machine->maxram_size) {
671 return 0;
672 }
673
674
675
676
677
678 buf_len = MAX(nr_lmbs * SPAPR_DR_LMB_LIST_ENTRY_SIZE + 1, nr_nodes * 4 + 2)
679 * sizeof(uint32_t);
680 cur_index = int_buf = g_malloc0(buf_len);
681
682 offset = fdt_add_subnode(fdt, 0, "ibm,dynamic-reconfiguration-memory");
683
684 ret = fdt_setprop(fdt, offset, "ibm,lmb-size", prop_lmb_size,
685 sizeof(prop_lmb_size));
686 if (ret < 0) {
687 goto out;
688 }
689
690 ret = fdt_setprop_cell(fdt, offset, "ibm,memory-flags-mask", 0xff);
691 if (ret < 0) {
692 goto out;
693 }
694
695 ret = fdt_setprop_cell(fdt, offset, "ibm,memory-preservation-time", 0x0);
696 if (ret < 0) {
697 goto out;
698 }
699
700
701 int_buf[0] = cpu_to_be32(nr_lmbs);
702 cur_index++;
703 for (i = 0; i < nr_lmbs; i++) {
704 uint64_t addr = i * lmb_size;
705 uint32_t *dynamic_memory = cur_index;
706
707 if (i >= hotplug_lmb_start) {
708 sPAPRDRConnector *drc;
709
710 drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, i);
711 g_assert(drc);
712
713 dynamic_memory[0] = cpu_to_be32(addr >> 32);
714 dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff);
715 dynamic_memory[2] = cpu_to_be32(spapr_drc_index(drc));
716 dynamic_memory[3] = cpu_to_be32(0);
717 dynamic_memory[4] = cpu_to_be32(numa_get_node(addr, NULL));
718 if (memory_region_present(get_system_memory(), addr)) {
719 dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_ASSIGNED);
720 } else {
721 dynamic_memory[5] = cpu_to_be32(0);
722 }
723 } else {
724
725
726
727
728
729 dynamic_memory[0] = cpu_to_be32(addr >> 32);
730 dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff);
731 dynamic_memory[2] = cpu_to_be32(0);
732 dynamic_memory[3] = cpu_to_be32(0);
733 dynamic_memory[4] = cpu_to_be32(-1);
734 dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_RESERVED |
735 SPAPR_LMB_FLAGS_DRC_INVALID);
736 }
737
738 cur_index += SPAPR_DR_LMB_LIST_ENTRY_SIZE;
739 }
740 ret = fdt_setprop(fdt, offset, "ibm,dynamic-memory", int_buf, buf_len);
741 if (ret < 0) {
742 goto out;
743 }
744
745
746 cur_index = int_buf;
747 int_buf[0] = cpu_to_be32(nr_nodes);
748 int_buf[1] = cpu_to_be32(4);
749 cur_index += 2;
750 for (i = 0; i < nr_nodes; i++) {
751 uint32_t associativity[] = {
752 cpu_to_be32(0x0),
753 cpu_to_be32(0x0),
754 cpu_to_be32(0x0),
755 cpu_to_be32(i)
756 };
757 memcpy(cur_index, associativity, sizeof(associativity));
758 cur_index += 4;
759 }
760 ret = fdt_setprop(fdt, offset, "ibm,associativity-lookup-arrays", int_buf,
761 (cur_index - int_buf) * sizeof(uint32_t));
762out:
763 g_free(int_buf);
764 return ret;
765}
766
767static int spapr_dt_cas_updates(sPAPRMachineState *spapr, void *fdt,
768 sPAPROptionVector *ov5_updates)
769{
770 sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
771 int ret = 0, offset;
772
773
774 if (spapr_ovec_test(ov5_updates, OV5_DRCONF_MEMORY)) {
775 g_assert(smc->dr_lmb_enabled);
776 ret = spapr_populate_drconf_memory(spapr, fdt);
777 if (ret) {
778 goto out;
779 }
780 }
781
782 offset = fdt_path_offset(fdt, "/chosen");
783 if (offset < 0) {
784 offset = fdt_add_subnode(fdt, 0, "chosen");
785 if (offset < 0) {
786 return offset;
787 }
788 }
789 ret = spapr_ovec_populate_dt(fdt, offset, spapr->ov5_cas,
790 "ibm,architecture-vec-5");
791
792out:
793 return ret;
794}
795
796static bool spapr_hotplugged_dev_before_cas(void)
797{
798 Object *drc_container, *obj;
799 ObjectProperty *prop;
800 ObjectPropertyIterator iter;
801
802 drc_container = container_get(object_get_root(), "/dr-connector");
803 object_property_iter_init(&iter, drc_container);
804 while ((prop = object_property_iter_next(&iter))) {
805 if (!strstart(prop->type, "link<", NULL)) {
806 continue;
807 }
808 obj = object_property_get_link(drc_container, prop->name, NULL);
809 if (spapr_drc_needed(obj)) {
810 return true;
811 }
812 }
813 return false;
814}
815
816int spapr_h_cas_compose_response(sPAPRMachineState *spapr,
817 target_ulong addr, target_ulong size,
818 sPAPROptionVector *ov5_updates)
819{
820 void *fdt, *fdt_skel;
821 sPAPRDeviceTreeUpdateHeader hdr = { .version_id = 1 };
822
823 if (spapr_hotplugged_dev_before_cas()) {
824 return 1;
825 }
826
827 if (size < sizeof(hdr) || size > FW_MAX_SIZE) {
828 error_report("SLOF provided an unexpected CAS buffer size "
829 TARGET_FMT_lu " (min: %zu, max: %u)",
830 size, sizeof(hdr), FW_MAX_SIZE);
831 exit(EXIT_FAILURE);
832 }
833
834 size -= sizeof(hdr);
835
836
837 fdt_skel = g_malloc0(size);
838 _FDT((fdt_create(fdt_skel, size)));
839 _FDT((fdt_begin_node(fdt_skel, "")));
840 _FDT((fdt_end_node(fdt_skel)));
841 _FDT((fdt_finish(fdt_skel)));
842 fdt = g_malloc0(size);
843 _FDT((fdt_open_into(fdt_skel, fdt, size)));
844 g_free(fdt_skel);
845
846
847 _FDT((spapr_fixup_cpu_dt(fdt, spapr)));
848
849 if (spapr_dt_cas_updates(spapr, fdt, ov5_updates)) {
850 return -1;
851 }
852
853
854 _FDT((fdt_pack(fdt)));
855
856 if (fdt_totalsize(fdt) + sizeof(hdr) > size) {
857 trace_spapr_cas_failed(size);
858 return -1;
859 }
860
861 cpu_physical_memory_write(addr, &hdr, sizeof(hdr));
862 cpu_physical_memory_write(addr + sizeof(hdr), fdt, fdt_totalsize(fdt));
863 trace_spapr_cas_continue(fdt_totalsize(fdt) + sizeof(hdr));
864 g_free(fdt);
865
866 return 0;
867}
868
869static void spapr_dt_rtas(sPAPRMachineState *spapr, void *fdt)
870{
871 int rtas;
872 GString *hypertas = g_string_sized_new(256);
873 GString *qemu_hypertas = g_string_sized_new(256);
874 uint32_t refpoints[] = { cpu_to_be32(0x4), cpu_to_be32(0x4) };
875 uint64_t max_hotplug_addr = spapr->hotplug_memory.base +
876 memory_region_size(&spapr->hotplug_memory.mr);
877 uint32_t lrdr_capacity[] = {
878 cpu_to_be32(max_hotplug_addr >> 32),
879 cpu_to_be32(max_hotplug_addr & 0xffffffff),
880 0, cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE),
881 cpu_to_be32(max_cpus / smp_threads),
882 };
883
884 _FDT(rtas = fdt_add_subnode(fdt, 0, "rtas"));
885
886
887 add_str(hypertas, "hcall-pft");
888 add_str(hypertas, "hcall-term");
889 add_str(hypertas, "hcall-dabr");
890 add_str(hypertas, "hcall-interrupt");
891 add_str(hypertas, "hcall-tce");
892 add_str(hypertas, "hcall-vio");
893 add_str(hypertas, "hcall-splpar");
894 add_str(hypertas, "hcall-bulk");
895 add_str(hypertas, "hcall-set-mode");
896 add_str(hypertas, "hcall-sprg0");
897 add_str(hypertas, "hcall-copy");
898 add_str(hypertas, "hcall-debug");
899 add_str(qemu_hypertas, "hcall-memop1");
900
901 if (!kvm_enabled() || kvmppc_spapr_use_multitce()) {
902 add_str(hypertas, "hcall-multi-tce");
903 }
904
905 if (spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) {
906 add_str(hypertas, "hcall-hpt-resize");
907 }
908
909 _FDT(fdt_setprop(fdt, rtas, "ibm,hypertas-functions",
910 hypertas->str, hypertas->len));
911 g_string_free(hypertas, TRUE);
912 _FDT(fdt_setprop(fdt, rtas, "qemu,hypertas-functions",
913 qemu_hypertas->str, qemu_hypertas->len));
914 g_string_free(qemu_hypertas, TRUE);
915
916 _FDT(fdt_setprop(fdt, rtas, "ibm,associativity-reference-points",
917 refpoints, sizeof(refpoints)));
918
919 _FDT(fdt_setprop_cell(fdt, rtas, "rtas-error-log-max",
920 RTAS_ERROR_LOG_MAX));
921 _FDT(fdt_setprop_cell(fdt, rtas, "rtas-event-scan-rate",
922 RTAS_EVENT_SCAN_RATE));
923
924 if (msi_nonbroken) {
925 _FDT(fdt_setprop(fdt, rtas, "ibm,change-msix-capable", NULL, 0));
926 }
927
928
929
930
931
932
933
934
935 _FDT(fdt_setprop(fdt, rtas, "ibm,extended-os-term", NULL, 0));
936
937 _FDT(fdt_setprop(fdt, rtas, "ibm,lrdr-capacity",
938 lrdr_capacity, sizeof(lrdr_capacity)));
939
940 spapr_dt_rtas_tokens(fdt, rtas);
941}
942
943
944
945
946static void spapr_dt_ov5_platform_support(void *fdt, int chosen)
947{
948 PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
949
950 char val[2 * 4] = {
951 23, 0x00,
952 24, 0x00,
953 25, 0x00,
954 26, 0x40,
955 };
956
957 if (!ppc_check_compat(first_ppc_cpu, CPU_POWERPC_LOGICAL_3_00, 0,
958 first_ppc_cpu->compat_pvr)) {
959
960 val[3] = 0x00;
961 } else if (kvm_enabled()) {
962 if (kvmppc_has_cap_mmu_radix() && kvmppc_has_cap_mmu_hash_v3()) {
963 val[3] = 0x80;
964 } else if (kvmppc_has_cap_mmu_radix()) {
965 val[3] = 0x40;
966 } else {
967 val[3] = 0x00;
968 }
969 } else {
970
971 val[3] = 0xC0;
972 }
973 _FDT(fdt_setprop(fdt, chosen, "ibm,arch-vec-5-platform-support",
974 val, sizeof(val)));
975}
976
977static void spapr_dt_chosen(sPAPRMachineState *spapr, void *fdt)
978{
979 MachineState *machine = MACHINE(spapr);
980 int chosen;
981 const char *boot_device = machine->boot_order;
982 char *stdout_path = spapr_vio_stdout_path(spapr->vio_bus);
983 size_t cb = 0;
984 char *bootlist = get_boot_devices_list(&cb, true);
985
986 _FDT(chosen = fdt_add_subnode(fdt, 0, "chosen"));
987
988 _FDT(fdt_setprop_string(fdt, chosen, "bootargs", machine->kernel_cmdline));
989 _FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-start",
990 spapr->initrd_base));
991 _FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-end",
992 spapr->initrd_base + spapr->initrd_size));
993
994 if (spapr->kernel_size) {
995 uint64_t kprop[2] = { cpu_to_be64(KERNEL_LOAD_ADDR),
996 cpu_to_be64(spapr->kernel_size) };
997
998 _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel",
999 &kprop, sizeof(kprop)));
1000 if (spapr->kernel_le) {
1001 _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel-le", NULL, 0));
1002 }
1003 }
1004 if (boot_menu) {
1005 _FDT((fdt_setprop_cell(fdt, chosen, "qemu,boot-menu", boot_menu)));
1006 }
1007 _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-width", graphic_width));
1008 _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-height", graphic_height));
1009 _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-depth", graphic_depth));
1010
1011 if (cb && bootlist) {
1012 int i;
1013
1014 for (i = 0; i < cb; i++) {
1015 if (bootlist[i] == '\n') {
1016 bootlist[i] = ' ';
1017 }
1018 }
1019 _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-list", bootlist));
1020 }
1021
1022 if (boot_device && strlen(boot_device)) {
1023 _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-device", boot_device));
1024 }
1025
1026 if (!spapr->has_graphics && stdout_path) {
1027 _FDT(fdt_setprop_string(fdt, chosen, "linux,stdout-path", stdout_path));
1028 }
1029
1030 spapr_dt_ov5_platform_support(fdt, chosen);
1031
1032 g_free(stdout_path);
1033 g_free(bootlist);
1034}
1035
1036static void spapr_dt_hypervisor(sPAPRMachineState *spapr, void *fdt)
1037{
1038
1039
1040 int hypervisor;
1041 uint8_t hypercall[16];
1042
1043 _FDT(hypervisor = fdt_add_subnode(fdt, 0, "hypervisor"));
1044
1045 _FDT(fdt_setprop_string(fdt, hypervisor, "compatible", "linux,kvm"));
1046 if (kvmppc_has_cap_fixup_hcalls()) {
1047
1048
1049
1050
1051 if (!kvmppc_get_hypercall(first_cpu->env_ptr, hypercall,
1052 sizeof(hypercall))) {
1053 _FDT(fdt_setprop(fdt, hypervisor, "hcall-instructions",
1054 hypercall, sizeof(hypercall)));
1055 }
1056 }
1057}
1058
1059static void *spapr_build_fdt(sPAPRMachineState *spapr,
1060 hwaddr rtas_addr,
1061 hwaddr rtas_size)
1062{
1063 MachineState *machine = MACHINE(spapr);
1064 MachineClass *mc = MACHINE_GET_CLASS(machine);
1065 sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
1066 int ret;
1067 void *fdt;
1068 sPAPRPHBState *phb;
1069 char *buf;
1070
1071 fdt = g_malloc0(FDT_MAX_SIZE);
1072 _FDT((fdt_create_empty_tree(fdt, FDT_MAX_SIZE)));
1073
1074
1075 _FDT(fdt_setprop_string(fdt, 0, "device_type", "chrp"));
1076 _FDT(fdt_setprop_string(fdt, 0, "model", "IBM pSeries (emulated by qemu)"));
1077 _FDT(fdt_setprop_string(fdt, 0, "compatible", "qemu,pseries"));
1078
1079
1080
1081
1082
1083 if (kvmppc_get_host_model(&buf)) {
1084 _FDT(fdt_setprop_string(fdt, 0, "host-model", buf));
1085 g_free(buf);
1086 }
1087 if (kvmppc_get_host_serial(&buf)) {
1088 _FDT(fdt_setprop_string(fdt, 0, "host-serial", buf));
1089 g_free(buf);
1090 }
1091
1092 buf = qemu_uuid_unparse_strdup(&qemu_uuid);
1093
1094 _FDT(fdt_setprop_string(fdt, 0, "vm,uuid", buf));
1095 if (qemu_uuid_set) {
1096 _FDT(fdt_setprop_string(fdt, 0, "system-id", buf));
1097 }
1098 g_free(buf);
1099
1100 if (qemu_get_vm_name()) {
1101 _FDT(fdt_setprop_string(fdt, 0, "ibm,partition-name",
1102 qemu_get_vm_name()));
1103 }
1104
1105 _FDT(fdt_setprop_cell(fdt, 0, "#address-cells", 2));
1106 _FDT(fdt_setprop_cell(fdt, 0, "#size-cells", 2));
1107
1108
1109 spapr_dt_xics(xics_max_server_number(spapr), fdt, PHANDLE_XICP);
1110
1111 ret = spapr_populate_memory(spapr, fdt);
1112 if (ret < 0) {
1113 error_report("couldn't setup memory nodes in fdt");
1114 exit(1);
1115 }
1116
1117
1118 spapr_dt_vdevice(spapr->vio_bus, fdt);
1119
1120 if (object_resolve_path_type("", TYPE_SPAPR_RNG, NULL)) {
1121 ret = spapr_rng_populate_dt(fdt);
1122 if (ret < 0) {
1123 error_report("could not set up rng device in the fdt");
1124 exit(1);
1125 }
1126 }
1127
1128 QLIST_FOREACH(phb, &spapr->phbs, list) {
1129 ret = spapr_populate_pci_dt(phb, PHANDLE_XICP, fdt);
1130 if (ret < 0) {
1131 error_report("couldn't setup PCI devices in fdt");
1132 exit(1);
1133 }
1134 }
1135
1136
1137 spapr_populate_cpus_dt_node(fdt, spapr);
1138
1139 if (smc->dr_lmb_enabled) {
1140 _FDT(spapr_drc_populate_dt(fdt, 0, NULL, SPAPR_DR_CONNECTOR_TYPE_LMB));
1141 }
1142
1143 if (mc->has_hotpluggable_cpus) {
1144 int offset = fdt_path_offset(fdt, "/cpus");
1145 ret = spapr_drc_populate_dt(fdt, offset, NULL,
1146 SPAPR_DR_CONNECTOR_TYPE_CPU);
1147 if (ret < 0) {
1148 error_report("Couldn't set up CPU DR device tree properties");
1149 exit(1);
1150 }
1151 }
1152
1153
1154 spapr_dt_events(spapr, fdt);
1155
1156
1157 spapr_dt_rtas(spapr, fdt);
1158
1159
1160 spapr_dt_chosen(spapr, fdt);
1161
1162
1163 if (kvm_enabled()) {
1164 spapr_dt_hypervisor(spapr, fdt);
1165 }
1166
1167
1168 if (spapr->kernel_size) {
1169 _FDT((fdt_add_mem_rsv(fdt, KERNEL_LOAD_ADDR, spapr->kernel_size)));
1170 }
1171 if (spapr->initrd_size) {
1172 _FDT((fdt_add_mem_rsv(fdt, spapr->initrd_base, spapr->initrd_size)));
1173 }
1174
1175
1176 ret = spapr_dt_cas_updates(spapr, fdt, spapr->ov5_cas);
1177 if (ret < 0) {
1178 error_report("couldn't setup CAS properties fdt");
1179 exit(1);
1180 }
1181
1182 return fdt;
1183}
1184
1185static uint64_t translate_kernel_address(void *opaque, uint64_t addr)
1186{
1187 return (addr & 0x0fffffff) + KERNEL_LOAD_ADDR;
1188}
1189
1190static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp,
1191 PowerPCCPU *cpu)
1192{
1193 CPUPPCState *env = &cpu->env;
1194
1195
1196 g_assert(qemu_mutex_iothread_locked());
1197
1198 if (msr_pr) {
1199 hcall_dprintf("Hypercall made with MSR[PR]=1\n");
1200 env->gpr[3] = H_PRIVILEGE;
1201 } else {
1202 env->gpr[3] = spapr_hypercall(cpu, env->gpr[3], &env->gpr[4]);
1203 }
1204}
1205
1206static uint64_t spapr_get_patbe(PPCVirtualHypervisor *vhyp)
1207{
1208 sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp);
1209
1210 return spapr->patb_entry;
1211}
1212
1213#define HPTE(_table, _i) (void *)(((uint64_t *)(_table)) + ((_i) * 2))
1214#define HPTE_VALID(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_VALID)
1215#define HPTE_DIRTY(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_HPTE_DIRTY)
1216#define CLEAN_HPTE(_hpte) ((*(uint64_t *)(_hpte)) &= tswap64(~HPTE64_V_HPTE_DIRTY))
1217#define DIRTY_HPTE(_hpte) ((*(uint64_t *)(_hpte)) |= tswap64(HPTE64_V_HPTE_DIRTY))
1218
1219
1220
1221
1222static int get_htab_fd(sPAPRMachineState *spapr)
1223{
1224 Error *local_err = NULL;
1225
1226 if (spapr->htab_fd >= 0) {
1227 return spapr->htab_fd;
1228 }
1229
1230 spapr->htab_fd = kvmppc_get_htab_fd(false, 0, &local_err);
1231 if (spapr->htab_fd < 0) {
1232 error_report_err(local_err);
1233 }
1234
1235 return spapr->htab_fd;
1236}
1237
1238void close_htab_fd(sPAPRMachineState *spapr)
1239{
1240 if (spapr->htab_fd >= 0) {
1241 close(spapr->htab_fd);
1242 }
1243 spapr->htab_fd = -1;
1244}
1245
1246static hwaddr spapr_hpt_mask(PPCVirtualHypervisor *vhyp)
1247{
1248 sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp);
1249
1250 return HTAB_SIZE(spapr) / HASH_PTEG_SIZE_64 - 1;
1251}
1252
1253static target_ulong spapr_encode_hpt_for_kvm_pr(PPCVirtualHypervisor *vhyp)
1254{
1255 sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp);
1256
1257 assert(kvm_enabled());
1258
1259 if (!spapr->htab) {
1260 return 0;
1261 }
1262
1263 return (target_ulong)(uintptr_t)spapr->htab | (spapr->htab_shift - 18);
1264}
1265
1266static const ppc_hash_pte64_t *spapr_map_hptes(PPCVirtualHypervisor *vhyp,
1267 hwaddr ptex, int n)
1268{
1269 sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp);
1270 hwaddr pte_offset = ptex * HASH_PTE_SIZE_64;
1271
1272 if (!spapr->htab) {
1273
1274
1275
1276 ppc_hash_pte64_t *hptes = g_malloc(n * HASH_PTE_SIZE_64);
1277 kvmppc_read_hptes(hptes, ptex, n);
1278 return hptes;
1279 }
1280
1281
1282
1283
1284
1285 return (const ppc_hash_pte64_t *)(spapr->htab + pte_offset);
1286}
1287
1288static void spapr_unmap_hptes(PPCVirtualHypervisor *vhyp,
1289 const ppc_hash_pte64_t *hptes,
1290 hwaddr ptex, int n)
1291{
1292 sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp);
1293
1294 if (!spapr->htab) {
1295 g_free((void *)hptes);
1296 }
1297
1298
1299}
1300
1301static void spapr_store_hpte(PPCVirtualHypervisor *vhyp, hwaddr ptex,
1302 uint64_t pte0, uint64_t pte1)
1303{
1304 sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp);
1305 hwaddr offset = ptex * HASH_PTE_SIZE_64;
1306
1307 if (!spapr->htab) {
1308 kvmppc_write_hpte(ptex, pte0, pte1);
1309 } else {
1310 stq_p(spapr->htab + offset, pte0);
1311 stq_p(spapr->htab + offset + HASH_PTE_SIZE_64 / 2, pte1);
1312 }
1313}
1314
1315int spapr_hpt_shift_for_ramsize(uint64_t ramsize)
1316{
1317 int shift;
1318
1319
1320
1321
1322 shift = ctz64(pow2ceil(ramsize)) - 7;
1323 shift = MAX(shift, 18);
1324 shift = MIN(shift, 46);
1325 return shift;
1326}
1327
1328void spapr_free_hpt(sPAPRMachineState *spapr)
1329{
1330 g_free(spapr->htab);
1331 spapr->htab = NULL;
1332 spapr->htab_shift = 0;
1333 close_htab_fd(spapr);
1334}
1335
1336void spapr_reallocate_hpt(sPAPRMachineState *spapr, int shift,
1337 Error **errp)
1338{
1339 long rc;
1340
1341
1342 spapr_free_hpt(spapr);
1343
1344 rc = kvmppc_reset_htab(shift);
1345 if (rc < 0) {
1346
1347 error_setg_errno(errp, errno,
1348 "Failed to allocate KVM HPT of order %d (try smaller maxmem?)",
1349 shift);
1350
1351
1352 } else if (rc > 0) {
1353
1354 if (rc != shift) {
1355 error_setg(errp,
1356 "Requested order %d HPT, but kernel allocated order %ld (try smaller maxmem?)",
1357 shift, rc);
1358 }
1359
1360 spapr->htab_shift = shift;
1361 spapr->htab = NULL;
1362 } else {
1363
1364 size_t size = 1ULL << shift;
1365 int i;
1366
1367 spapr->htab = qemu_memalign(size, size);
1368 if (!spapr->htab) {
1369 error_setg_errno(errp, errno,
1370 "Could not allocate HPT of order %d", shift);
1371 return;
1372 }
1373
1374 memset(spapr->htab, 0, size);
1375 spapr->htab_shift = shift;
1376
1377 for (i = 0; i < size / HASH_PTE_SIZE_64; i++) {
1378 DIRTY_HPTE(HPTE(spapr->htab, i));
1379 }
1380 }
1381
1382 spapr->patb_entry = 0;
1383}
1384
1385void spapr_setup_hpt_and_vrma(sPAPRMachineState *spapr)
1386{
1387 int hpt_shift;
1388
1389 if ((spapr->resize_hpt == SPAPR_RESIZE_HPT_DISABLED)
1390 || (spapr->cas_reboot
1391 && !spapr_ovec_test(spapr->ov5_cas, OV5_HPT_RESIZE))) {
1392 hpt_shift = spapr_hpt_shift_for_ramsize(MACHINE(spapr)->maxram_size);
1393 } else {
1394 uint64_t current_ram_size;
1395
1396 current_ram_size = MACHINE(spapr)->ram_size + get_plugged_memory_size();
1397 hpt_shift = spapr_hpt_shift_for_ramsize(current_ram_size);
1398 }
1399 spapr_reallocate_hpt(spapr, hpt_shift, &error_fatal);
1400
1401 if (spapr->vrma_adjust) {
1402 spapr->rma_size = kvmppc_rma_size(spapr_node0_size(MACHINE(spapr)),
1403 spapr->htab_shift);
1404 }
1405}
1406
1407static void find_unknown_sysbus_device(SysBusDevice *sbdev, void *opaque)
1408{
1409 bool matched = false;
1410
1411 if (object_dynamic_cast(OBJECT(sbdev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
1412 matched = true;
1413 }
1414
1415 if (!matched) {
1416 error_report("Device %s is not supported by this machine yet.",
1417 qdev_fw_name(DEVICE(sbdev)));
1418 exit(1);
1419 }
1420}
1421
1422static int spapr_reset_drcs(Object *child, void *opaque)
1423{
1424 sPAPRDRConnector *drc =
1425 (sPAPRDRConnector *) object_dynamic_cast(child,
1426 TYPE_SPAPR_DR_CONNECTOR);
1427
1428 if (drc) {
1429 spapr_drc_reset(drc);
1430 }
1431
1432 return 0;
1433}
1434
1435static void ppc_spapr_reset(void)
1436{
1437 MachineState *machine = MACHINE(qdev_get_machine());
1438 sPAPRMachineState *spapr = SPAPR_MACHINE(machine);
1439 PowerPCCPU *first_ppc_cpu;
1440 uint32_t rtas_limit;
1441 hwaddr rtas_addr, fdt_addr;
1442 void *fdt;
1443 int rc;
1444
1445
1446 foreach_dynamic_sysbus_device(find_unknown_sysbus_device, NULL);
1447
1448 spapr_caps_reset(spapr);
1449
1450 first_ppc_cpu = POWERPC_CPU(first_cpu);
1451 if (kvm_enabled() && kvmppc_has_cap_mmu_radix() &&
1452 ppc_check_compat(first_ppc_cpu, CPU_POWERPC_LOGICAL_3_00, 0,
1453 spapr->max_compat_pvr)) {
1454
1455
1456
1457 spapr->patb_entry = PATBE1_GR;
1458 } else {
1459 spapr_setup_hpt_and_vrma(spapr);
1460 }
1461
1462
1463
1464 if (!spapr->cas_reboot) {
1465 spapr_ovec_cleanup(spapr->ov5_cas);
1466 spapr->ov5_cas = spapr_ovec_new();
1467
1468 ppc_set_compat(first_ppc_cpu, spapr->max_compat_pvr, &error_fatal);
1469 }
1470
1471 qemu_devices_reset();
1472
1473
1474
1475
1476
1477
1478 object_child_foreach_recursive(object_get_root(), spapr_reset_drcs, NULL);
1479
1480 spapr_clear_pending_events(spapr);
1481
1482
1483
1484
1485
1486
1487 rtas_limit = MIN(spapr->rma_size, RTAS_MAX_ADDR);
1488 rtas_addr = rtas_limit - RTAS_MAX_SIZE;
1489 fdt_addr = rtas_addr - FDT_MAX_SIZE;
1490
1491 fdt = spapr_build_fdt(spapr, rtas_addr, spapr->rtas_size);
1492
1493 spapr_load_rtas(spapr, fdt, rtas_addr);
1494
1495 rc = fdt_pack(fdt);
1496
1497
1498 assert(rc == 0);
1499
1500 if (fdt_totalsize(fdt) > FDT_MAX_SIZE) {
1501 error_report("FDT too big ! 0x%x bytes (max is 0x%x)",
1502 fdt_totalsize(fdt), FDT_MAX_SIZE);
1503 exit(1);
1504 }
1505
1506
1507 qemu_fdt_dumpdtb(fdt, fdt_totalsize(fdt));
1508 cpu_physical_memory_write(fdt_addr, fdt, fdt_totalsize(fdt));
1509 g_free(fdt);
1510
1511
1512 first_ppc_cpu->env.gpr[3] = fdt_addr;
1513 first_ppc_cpu->env.gpr[5] = 0;
1514 first_cpu->halted = 0;
1515 first_ppc_cpu->env.nip = SPAPR_ENTRY_POINT;
1516
1517 spapr->cas_reboot = false;
1518}
1519
1520static void spapr_create_nvram(sPAPRMachineState *spapr)
1521{
1522 DeviceState *dev = qdev_create(&spapr->vio_bus->bus, "spapr-nvram");
1523 DriveInfo *dinfo = drive_get(IF_PFLASH, 0, 0);
1524
1525 if (dinfo) {
1526 qdev_prop_set_drive(dev, "drive", blk_by_legacy_dinfo(dinfo),
1527 &error_fatal);
1528 }
1529
1530 qdev_init_nofail(dev);
1531
1532 spapr->nvram = (struct sPAPRNVRAM *)dev;
1533}
1534
1535static void spapr_rtc_create(sPAPRMachineState *spapr)
1536{
1537 object_initialize(&spapr->rtc, sizeof(spapr->rtc), TYPE_SPAPR_RTC);
1538 object_property_add_child(OBJECT(spapr), "rtc", OBJECT(&spapr->rtc),
1539 &error_fatal);
1540 object_property_set_bool(OBJECT(&spapr->rtc), true, "realized",
1541 &error_fatal);
1542 object_property_add_alias(OBJECT(spapr), "rtc-time", OBJECT(&spapr->rtc),
1543 "date", &error_fatal);
1544}
1545
1546
1547static bool spapr_vga_init(PCIBus *pci_bus, Error **errp)
1548{
1549 switch (vga_interface_type) {
1550 case VGA_NONE:
1551 return false;
1552 case VGA_DEVICE:
1553 return true;
1554 case VGA_STD:
1555 case VGA_VIRTIO:
1556 return pci_vga_init(pci_bus) != NULL;
1557 default:
1558 error_setg(errp,
1559 "Unsupported VGA mode, only -vga std or -vga virtio is supported");
1560 return false;
1561 }
1562}
1563
1564static int spapr_pre_load(void *opaque)
1565{
1566 int rc;
1567
1568 rc = spapr_caps_pre_load(opaque);
1569 if (rc) {
1570 return rc;
1571 }
1572
1573 return 0;
1574}
1575
1576static int spapr_post_load(void *opaque, int version_id)
1577{
1578 sPAPRMachineState *spapr = (sPAPRMachineState *)opaque;
1579 int err = 0;
1580
1581 err = spapr_caps_post_migration(spapr);
1582 if (err) {
1583 return err;
1584 }
1585
1586 if (!object_dynamic_cast(OBJECT(spapr->ics), TYPE_ICS_KVM)) {
1587 CPUState *cs;
1588 CPU_FOREACH(cs) {
1589 PowerPCCPU *cpu = POWERPC_CPU(cs);
1590 icp_resend(ICP(cpu->intc));
1591 }
1592 }
1593
1594
1595
1596
1597
1598 if (version_id < 3) {
1599 err = spapr_rtc_import_offset(&spapr->rtc, spapr->rtc_offset);
1600 }
1601
1602 if (kvm_enabled() && spapr->patb_entry) {
1603 PowerPCCPU *cpu = POWERPC_CPU(first_cpu);
1604 bool radix = !!(spapr->patb_entry & PATBE1_GR);
1605 bool gtse = !!(cpu->env.spr[SPR_LPCR] & LPCR_GTSE);
1606
1607 err = kvmppc_configure_v3_mmu(cpu, radix, gtse, spapr->patb_entry);
1608 if (err) {
1609 error_report("Process table config unsupported by the host");
1610 return -EINVAL;
1611 }
1612 }
1613
1614 return err;
1615}
1616
1617static int spapr_pre_save(void *opaque)
1618{
1619 int rc;
1620
1621 rc = spapr_caps_pre_save(opaque);
1622 if (rc) {
1623 return rc;
1624 }
1625
1626 return 0;
1627}
1628
1629static bool version_before_3(void *opaque, int version_id)
1630{
1631 return version_id < 3;
1632}
1633
1634static bool spapr_pending_events_needed(void *opaque)
1635{
1636 sPAPRMachineState *spapr = (sPAPRMachineState *)opaque;
1637 return !QTAILQ_EMPTY(&spapr->pending_events);
1638}
1639
1640static const VMStateDescription vmstate_spapr_event_entry = {
1641 .name = "spapr_event_log_entry",
1642 .version_id = 1,
1643 .minimum_version_id = 1,
1644 .fields = (VMStateField[]) {
1645 VMSTATE_UINT32(summary, sPAPREventLogEntry),
1646 VMSTATE_UINT32(extended_length, sPAPREventLogEntry),
1647 VMSTATE_VBUFFER_ALLOC_UINT32(extended_log, sPAPREventLogEntry, 0,
1648 NULL, extended_length),
1649 VMSTATE_END_OF_LIST()
1650 },
1651};
1652
1653static const VMStateDescription vmstate_spapr_pending_events = {
1654 .name = "spapr_pending_events",
1655 .version_id = 1,
1656 .minimum_version_id = 1,
1657 .needed = spapr_pending_events_needed,
1658 .fields = (VMStateField[]) {
1659 VMSTATE_QTAILQ_V(pending_events, sPAPRMachineState, 1,
1660 vmstate_spapr_event_entry, sPAPREventLogEntry, next),
1661 VMSTATE_END_OF_LIST()
1662 },
1663};
1664
1665static bool spapr_ov5_cas_needed(void *opaque)
1666{
1667 sPAPRMachineState *spapr = opaque;
1668 sPAPROptionVector *ov5_mask = spapr_ovec_new();
1669 sPAPROptionVector *ov5_legacy = spapr_ovec_new();
1670 sPAPROptionVector *ov5_removed = spapr_ovec_new();
1671 bool cas_needed;
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697 spapr_ovec_set(ov5_mask, OV5_FORM1_AFFINITY);
1698 spapr_ovec_set(ov5_mask, OV5_DRCONF_MEMORY);
1699
1700
1701
1702
1703
1704
1705 spapr_ovec_intersect(ov5_legacy, spapr->ov5, ov5_mask);
1706 cas_needed = spapr_ovec_diff(ov5_removed, spapr->ov5, ov5_legacy);
1707
1708 spapr_ovec_cleanup(ov5_mask);
1709 spapr_ovec_cleanup(ov5_legacy);
1710 spapr_ovec_cleanup(ov5_removed);
1711
1712 return cas_needed;
1713}
1714
1715static const VMStateDescription vmstate_spapr_ov5_cas = {
1716 .name = "spapr_option_vector_ov5_cas",
1717 .version_id = 1,
1718 .minimum_version_id = 1,
1719 .needed = spapr_ov5_cas_needed,
1720 .fields = (VMStateField[]) {
1721 VMSTATE_STRUCT_POINTER_V(ov5_cas, sPAPRMachineState, 1,
1722 vmstate_spapr_ovec, sPAPROptionVector),
1723 VMSTATE_END_OF_LIST()
1724 },
1725};
1726
1727static bool spapr_patb_entry_needed(void *opaque)
1728{
1729 sPAPRMachineState *spapr = opaque;
1730
1731 return !!spapr->patb_entry;
1732}
1733
1734static const VMStateDescription vmstate_spapr_patb_entry = {
1735 .name = "spapr_patb_entry",
1736 .version_id = 1,
1737 .minimum_version_id = 1,
1738 .needed = spapr_patb_entry_needed,
1739 .fields = (VMStateField[]) {
1740 VMSTATE_UINT64(patb_entry, sPAPRMachineState),
1741 VMSTATE_END_OF_LIST()
1742 },
1743};
1744
1745static const VMStateDescription vmstate_spapr = {
1746 .name = "spapr",
1747 .version_id = 3,
1748 .minimum_version_id = 1,
1749 .pre_load = spapr_pre_load,
1750 .post_load = spapr_post_load,
1751 .pre_save = spapr_pre_save,
1752 .fields = (VMStateField[]) {
1753
1754 VMSTATE_UNUSED_BUFFER(version_before_3, 0, 4),
1755
1756
1757 VMSTATE_UINT64_TEST(rtc_offset, sPAPRMachineState, version_before_3),
1758
1759 VMSTATE_PPC_TIMEBASE_V(tb, sPAPRMachineState, 2),
1760 VMSTATE_END_OF_LIST()
1761 },
1762 .subsections = (const VMStateDescription*[]) {
1763 &vmstate_spapr_ov5_cas,
1764 &vmstate_spapr_patb_entry,
1765 &vmstate_spapr_pending_events,
1766 &vmstate_spapr_cap_htm,
1767 &vmstate_spapr_cap_vsx,
1768 &vmstate_spapr_cap_dfp,
1769 &vmstate_spapr_cap_cfpc,
1770 &vmstate_spapr_cap_sbbc,
1771 &vmstate_spapr_cap_ibs,
1772 NULL
1773 }
1774};
1775
1776static int htab_save_setup(QEMUFile *f, void *opaque)
1777{
1778 sPAPRMachineState *spapr = opaque;
1779
1780
1781 if (!spapr->htab_shift) {
1782 qemu_put_be32(f, -1);
1783 } else {
1784 qemu_put_be32(f, spapr->htab_shift);
1785 }
1786
1787 if (spapr->htab) {
1788 spapr->htab_save_index = 0;
1789 spapr->htab_first_pass = true;
1790 } else {
1791 if (spapr->htab_shift) {
1792 assert(kvm_enabled());
1793 }
1794 }
1795
1796
1797 return 0;
1798}
1799
1800static void htab_save_chunk(QEMUFile *f, sPAPRMachineState *spapr,
1801 int chunkstart, int n_valid, int n_invalid)
1802{
1803 qemu_put_be32(f, chunkstart);
1804 qemu_put_be16(f, n_valid);
1805 qemu_put_be16(f, n_invalid);
1806 qemu_put_buffer(f, HPTE(spapr->htab, chunkstart),
1807 HASH_PTE_SIZE_64 * n_valid);
1808}
1809
1810static void htab_save_end_marker(QEMUFile *f)
1811{
1812 qemu_put_be32(f, 0);
1813 qemu_put_be16(f, 0);
1814 qemu_put_be16(f, 0);
1815}
1816
1817static void htab_save_first_pass(QEMUFile *f, sPAPRMachineState *spapr,
1818 int64_t max_ns)
1819{
1820 bool has_timeout = max_ns != -1;
1821 int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64;
1822 int index = spapr->htab_save_index;
1823 int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1824
1825 assert(spapr->htab_first_pass);
1826
1827 do {
1828 int chunkstart;
1829
1830
1831 while ((index < htabslots)
1832 && !HPTE_VALID(HPTE(spapr->htab, index))) {
1833 CLEAN_HPTE(HPTE(spapr->htab, index));
1834 index++;
1835 }
1836
1837
1838 chunkstart = index;
1839 while ((index < htabslots) && (index - chunkstart < USHRT_MAX)
1840 && HPTE_VALID(HPTE(spapr->htab, index))) {
1841 CLEAN_HPTE(HPTE(spapr->htab, index));
1842 index++;
1843 }
1844
1845 if (index > chunkstart) {
1846 int n_valid = index - chunkstart;
1847
1848 htab_save_chunk(f, spapr, chunkstart, n_valid, 0);
1849
1850 if (has_timeout &&
1851 (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) {
1852 break;
1853 }
1854 }
1855 } while ((index < htabslots) && !qemu_file_rate_limit(f));
1856
1857 if (index >= htabslots) {
1858 assert(index == htabslots);
1859 index = 0;
1860 spapr->htab_first_pass = false;
1861 }
1862 spapr->htab_save_index = index;
1863}
1864
1865static int htab_save_later_pass(QEMUFile *f, sPAPRMachineState *spapr,
1866 int64_t max_ns)
1867{
1868 bool final = max_ns < 0;
1869 int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64;
1870 int examined = 0, sent = 0;
1871 int index = spapr->htab_save_index;
1872 int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1873
1874 assert(!spapr->htab_first_pass);
1875
1876 do {
1877 int chunkstart, invalidstart;
1878
1879
1880 while ((index < htabslots)
1881 && !HPTE_DIRTY(HPTE(spapr->htab, index))) {
1882 index++;
1883 examined++;
1884 }
1885
1886 chunkstart = index;
1887
1888 while ((index < htabslots) && (index - chunkstart < USHRT_MAX)
1889 && HPTE_DIRTY(HPTE(spapr->htab, index))
1890 && HPTE_VALID(HPTE(spapr->htab, index))) {
1891 CLEAN_HPTE(HPTE(spapr->htab, index));
1892 index++;
1893 examined++;
1894 }
1895
1896 invalidstart = index;
1897
1898 while ((index < htabslots) && (index - invalidstart < USHRT_MAX)
1899 && HPTE_DIRTY(HPTE(spapr->htab, index))
1900 && !HPTE_VALID(HPTE(spapr->htab, index))) {
1901 CLEAN_HPTE(HPTE(spapr->htab, index));
1902 index++;
1903 examined++;
1904 }
1905
1906 if (index > chunkstart) {
1907 int n_valid = invalidstart - chunkstart;
1908 int n_invalid = index - invalidstart;
1909
1910 htab_save_chunk(f, spapr, chunkstart, n_valid, n_invalid);
1911 sent += index - chunkstart;
1912
1913 if (!final && (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) {
1914 break;
1915 }
1916 }
1917
1918 if (examined >= htabslots) {
1919 break;
1920 }
1921
1922 if (index >= htabslots) {
1923 assert(index == htabslots);
1924 index = 0;
1925 }
1926 } while ((examined < htabslots) && (!qemu_file_rate_limit(f) || final));
1927
1928 if (index >= htabslots) {
1929 assert(index == htabslots);
1930 index = 0;
1931 }
1932
1933 spapr->htab_save_index = index;
1934
1935 return (examined >= htabslots) && (sent == 0) ? 1 : 0;
1936}
1937
1938#define MAX_ITERATION_NS 5000000
1939#define MAX_KVM_BUF_SIZE 2048
1940
1941static int htab_save_iterate(QEMUFile *f, void *opaque)
1942{
1943 sPAPRMachineState *spapr = opaque;
1944 int fd;
1945 int rc = 0;
1946
1947
1948 if (!spapr->htab_shift) {
1949 qemu_put_be32(f, -1);
1950 return 1;
1951 } else {
1952 qemu_put_be32(f, 0);
1953 }
1954
1955 if (!spapr->htab) {
1956 assert(kvm_enabled());
1957
1958 fd = get_htab_fd(spapr);
1959 if (fd < 0) {
1960 return fd;
1961 }
1962
1963 rc = kvmppc_save_htab(f, fd, MAX_KVM_BUF_SIZE, MAX_ITERATION_NS);
1964 if (rc < 0) {
1965 return rc;
1966 }
1967 } else if (spapr->htab_first_pass) {
1968 htab_save_first_pass(f, spapr, MAX_ITERATION_NS);
1969 } else {
1970 rc = htab_save_later_pass(f, spapr, MAX_ITERATION_NS);
1971 }
1972
1973 htab_save_end_marker(f);
1974
1975 return rc;
1976}
1977
1978static int htab_save_complete(QEMUFile *f, void *opaque)
1979{
1980 sPAPRMachineState *spapr = opaque;
1981 int fd;
1982
1983
1984 if (!spapr->htab_shift) {
1985 qemu_put_be32(f, -1);
1986 return 0;
1987 } else {
1988 qemu_put_be32(f, 0);
1989 }
1990
1991 if (!spapr->htab) {
1992 int rc;
1993
1994 assert(kvm_enabled());
1995
1996 fd = get_htab_fd(spapr);
1997 if (fd < 0) {
1998 return fd;
1999 }
2000
2001 rc = kvmppc_save_htab(f, fd, MAX_KVM_BUF_SIZE, -1);
2002 if (rc < 0) {
2003 return rc;
2004 }
2005 } else {
2006 if (spapr->htab_first_pass) {
2007 htab_save_first_pass(f, spapr, -1);
2008 }
2009 htab_save_later_pass(f, spapr, -1);
2010 }
2011
2012
2013 htab_save_end_marker(f);
2014
2015 return 0;
2016}
2017
2018static int htab_load(QEMUFile *f, void *opaque, int version_id)
2019{
2020 sPAPRMachineState *spapr = opaque;
2021 uint32_t section_hdr;
2022 int fd = -1;
2023 Error *local_err = NULL;
2024
2025 if (version_id < 1 || version_id > 1) {
2026 error_report("htab_load() bad version");
2027 return -EINVAL;
2028 }
2029
2030 section_hdr = qemu_get_be32(f);
2031
2032 if (section_hdr == -1) {
2033 spapr_free_hpt(spapr);
2034 return 0;
2035 }
2036
2037 if (section_hdr) {
2038
2039 spapr_reallocate_hpt(spapr, section_hdr, &local_err);
2040 if (local_err) {
2041 error_report_err(local_err);
2042 return -EINVAL;
2043 }
2044 return 0;
2045 }
2046
2047 if (!spapr->htab) {
2048 assert(kvm_enabled());
2049
2050 fd = kvmppc_get_htab_fd(true, 0, &local_err);
2051 if (fd < 0) {
2052 error_report_err(local_err);
2053 return fd;
2054 }
2055 }
2056
2057 while (true) {
2058 uint32_t index;
2059 uint16_t n_valid, n_invalid;
2060
2061 index = qemu_get_be32(f);
2062 n_valid = qemu_get_be16(f);
2063 n_invalid = qemu_get_be16(f);
2064
2065 if ((index == 0) && (n_valid == 0) && (n_invalid == 0)) {
2066
2067 break;
2068 }
2069
2070 if ((index + n_valid + n_invalid) >
2071 (HTAB_SIZE(spapr) / HASH_PTE_SIZE_64)) {
2072
2073 error_report(
2074 "htab_load() bad index %d (%hd+%hd entries) in htab stream (htab_shift=%d)",
2075 index, n_valid, n_invalid, spapr->htab_shift);
2076 return -EINVAL;
2077 }
2078
2079 if (spapr->htab) {
2080 if (n_valid) {
2081 qemu_get_buffer(f, HPTE(spapr->htab, index),
2082 HASH_PTE_SIZE_64 * n_valid);
2083 }
2084 if (n_invalid) {
2085 memset(HPTE(spapr->htab, index + n_valid), 0,
2086 HASH_PTE_SIZE_64 * n_invalid);
2087 }
2088 } else {
2089 int rc;
2090
2091 assert(fd >= 0);
2092
2093 rc = kvmppc_load_htab_chunk(f, fd, index, n_valid, n_invalid);
2094 if (rc < 0) {
2095 return rc;
2096 }
2097 }
2098 }
2099
2100 if (!spapr->htab) {
2101 assert(fd >= 0);
2102 close(fd);
2103 }
2104
2105 return 0;
2106}
2107
2108static void htab_save_cleanup(void *opaque)
2109{
2110 sPAPRMachineState *spapr = opaque;
2111
2112 close_htab_fd(spapr);
2113}
2114
2115static SaveVMHandlers savevm_htab_handlers = {
2116 .save_setup = htab_save_setup,
2117 .save_live_iterate = htab_save_iterate,
2118 .save_live_complete_precopy = htab_save_complete,
2119 .save_cleanup = htab_save_cleanup,
2120 .load_state = htab_load,
2121};
2122
2123static void spapr_boot_set(void *opaque, const char *boot_device,
2124 Error **errp)
2125{
2126 MachineState *machine = MACHINE(opaque);
2127 machine->boot_order = g_strdup(boot_device);
2128}
2129
2130static void spapr_create_lmb_dr_connectors(sPAPRMachineState *spapr)
2131{
2132 MachineState *machine = MACHINE(spapr);
2133 uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
2134 uint32_t nr_lmbs = (machine->maxram_size - machine->ram_size)/lmb_size;
2135 int i;
2136
2137 for (i = 0; i < nr_lmbs; i++) {
2138 uint64_t addr;
2139
2140 addr = i * lmb_size + spapr->hotplug_memory.base;
2141 spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_LMB,
2142 addr / lmb_size);
2143 }
2144}
2145
2146
2147
2148
2149
2150
2151static void spapr_validate_node_memory(MachineState *machine, Error **errp)
2152{
2153 int i;
2154
2155 if (machine->ram_size % SPAPR_MEMORY_BLOCK_SIZE) {
2156 error_setg(errp, "Memory size 0x" RAM_ADDR_FMT
2157 " is not aligned to %llu MiB",
2158 machine->ram_size,
2159 SPAPR_MEMORY_BLOCK_SIZE / M_BYTE);
2160 return;
2161 }
2162
2163 if (machine->maxram_size % SPAPR_MEMORY_BLOCK_SIZE) {
2164 error_setg(errp, "Maximum memory size 0x" RAM_ADDR_FMT
2165 " is not aligned to %llu MiB",
2166 machine->ram_size,
2167 SPAPR_MEMORY_BLOCK_SIZE / M_BYTE);
2168 return;
2169 }
2170
2171 for (i = 0; i < nb_numa_nodes; i++) {
2172 if (numa_info[i].node_mem % SPAPR_MEMORY_BLOCK_SIZE) {
2173 error_setg(errp,
2174 "Node %d memory size 0x%" PRIx64
2175 " is not aligned to %llu MiB",
2176 i, numa_info[i].node_mem,
2177 SPAPR_MEMORY_BLOCK_SIZE / M_BYTE);
2178 return;
2179 }
2180 }
2181}
2182
2183
2184static CPUArchId *spapr_find_cpu_slot(MachineState *ms, uint32_t id, int *idx)
2185{
2186 int index = id / smp_threads;
2187
2188 if (index >= ms->possible_cpus->len) {
2189 return NULL;
2190 }
2191 if (idx) {
2192 *idx = index;
2193 }
2194 return &ms->possible_cpus->cpus[index];
2195}
2196
2197static void spapr_init_cpus(sPAPRMachineState *spapr)
2198{
2199 MachineState *machine = MACHINE(spapr);
2200 MachineClass *mc = MACHINE_GET_CLASS(machine);
2201 sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
2202 const char *type = spapr_get_cpu_core_type(machine->cpu_type);
2203 const CPUArchIdList *possible_cpus;
2204 int boot_cores_nr = smp_cpus / smp_threads;
2205 int i;
2206
2207 if (!type) {
2208 error_report("Unable to find sPAPR CPU Core definition");
2209 exit(1);
2210 }
2211
2212 possible_cpus = mc->possible_cpu_arch_ids(machine);
2213 if (mc->has_hotpluggable_cpus) {
2214 if (smp_cpus % smp_threads) {
2215 error_report("smp_cpus (%u) must be multiple of threads (%u)",
2216 smp_cpus, smp_threads);
2217 exit(1);
2218 }
2219 if (max_cpus % smp_threads) {
2220 error_report("max_cpus (%u) must be multiple of threads (%u)",
2221 max_cpus, smp_threads);
2222 exit(1);
2223 }
2224 } else {
2225 if (max_cpus != smp_cpus) {
2226 error_report("This machine version does not support CPU hotplug");
2227 exit(1);
2228 }
2229 boot_cores_nr = possible_cpus->len;
2230 }
2231
2232 if (smc->pre_2_10_has_unused_icps) {
2233 int i;
2234
2235 for (i = 0; i < xics_max_server_number(spapr); i++) {
2236
2237
2238
2239 pre_2_10_vmstate_register_dummy_icp(i);
2240 }
2241 }
2242
2243 for (i = 0; i < possible_cpus->len; i++) {
2244 int core_id = i * smp_threads;
2245
2246 if (mc->has_hotpluggable_cpus) {
2247 spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_CPU,
2248 spapr_vcpu_id(spapr, core_id));
2249 }
2250
2251 if (i < boot_cores_nr) {
2252 Object *core = object_new(type);
2253 int nr_threads = smp_threads;
2254
2255
2256 if ((i + 1) * smp_threads >= smp_cpus) {
2257 nr_threads = smp_cpus - i * smp_threads;
2258 }
2259
2260 object_property_set_int(core, nr_threads, "nr-threads",
2261 &error_fatal);
2262 object_property_set_int(core, core_id, CPU_CORE_PROP_CORE_ID,
2263 &error_fatal);
2264 object_property_set_bool(core, true, "realized", &error_fatal);
2265 }
2266 }
2267}
2268
2269static void spapr_set_vsmt_mode(sPAPRMachineState *spapr, Error **errp)
2270{
2271 Error *local_err = NULL;
2272 bool vsmt_user = !!spapr->vsmt;
2273 int kvm_smt = kvmppc_smt_threads();
2274 int ret;
2275
2276 if (!kvm_enabled() && (smp_threads > 1)) {
2277 error_setg(&local_err, "TCG cannot support more than 1 thread/core "
2278 "on a pseries machine");
2279 goto out;
2280 }
2281 if (!is_power_of_2(smp_threads)) {
2282 error_setg(&local_err, "Cannot support %d threads/core on a pseries "
2283 "machine because it must be a power of 2", smp_threads);
2284 goto out;
2285 }
2286
2287
2288 if (vsmt_user) {
2289 if (spapr->vsmt < smp_threads) {
2290 error_setg(&local_err, "Cannot support VSMT mode %d"
2291 " because it must be >= threads/core (%d)",
2292 spapr->vsmt, smp_threads);
2293 goto out;
2294 }
2295
2296 } else {
2297
2298
2299
2300
2301
2302
2303
2304 spapr->vsmt = MAX(8, smp_threads);
2305 }
2306
2307
2308 if (kvm_enabled() && (spapr->vsmt != kvm_smt)) {
2309 ret = kvmppc_set_smt_threads(spapr->vsmt);
2310 if (ret) {
2311
2312 error_setg(&local_err,
2313 "Failed to set KVM's VSMT mode to %d (errno %d)",
2314 spapr->vsmt, ret);
2315
2316
2317
2318
2319 if ((kvm_smt >= smp_threads) && ((spapr->vsmt % kvm_smt) == 0)) {
2320 warn_report_err(local_err);
2321 local_err = NULL;
2322 goto out;
2323 } else {
2324 if (!vsmt_user) {
2325 error_append_hint(&local_err,
2326 "On PPC, a VM with %d threads/core"
2327 " on a host with %d threads/core"
2328 " requires the use of VSMT mode %d.\n",
2329 smp_threads, kvm_smt, spapr->vsmt);
2330 }
2331 kvmppc_hint_smt_possible(&local_err);
2332 goto out;
2333 }
2334 }
2335 }
2336
2337out:
2338 error_propagate(errp, local_err);
2339}
2340
2341
2342static void ppc_spapr_init(MachineState *machine)
2343{
2344 sPAPRMachineState *spapr = SPAPR_MACHINE(machine);
2345 sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
2346 const char *kernel_filename = machine->kernel_filename;
2347 const char *initrd_filename = machine->initrd_filename;
2348 PCIHostState *phb;
2349 int i;
2350 MemoryRegion *sysmem = get_system_memory();
2351 MemoryRegion *ram = g_new(MemoryRegion, 1);
2352 MemoryRegion *rma_region;
2353 void *rma = NULL;
2354 hwaddr rma_alloc_size;
2355 hwaddr node0_size = spapr_node0_size(machine);
2356 long load_limit, fw_size;
2357 char *filename;
2358 Error *resize_hpt_err = NULL;
2359 PowerPCCPU *first_ppc_cpu;
2360
2361 msi_nonbroken = true;
2362
2363 QLIST_INIT(&spapr->phbs);
2364 QTAILQ_INIT(&spapr->pending_dimm_unplugs);
2365
2366
2367 kvmppc_check_papr_resize_hpt(&resize_hpt_err);
2368 if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DEFAULT) {
2369
2370
2371
2372
2373
2374
2375 if (resize_hpt_err) {
2376 spapr->resize_hpt = SPAPR_RESIZE_HPT_DISABLED;
2377 error_free(resize_hpt_err);
2378 resize_hpt_err = NULL;
2379 } else {
2380 spapr->resize_hpt = smc->resize_hpt_default;
2381 }
2382 }
2383
2384 assert(spapr->resize_hpt != SPAPR_RESIZE_HPT_DEFAULT);
2385
2386 if ((spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) && resize_hpt_err) {
2387
2388
2389
2390 error_report_err(resize_hpt_err);
2391 exit(1);
2392 }
2393
2394
2395 rma_alloc_size = kvmppc_alloc_rma(&rma);
2396
2397 if (rma_alloc_size == -1) {
2398 error_report("Unable to create RMA");
2399 exit(1);
2400 }
2401
2402 if (rma_alloc_size && (rma_alloc_size < node0_size)) {
2403 spapr->rma_size = rma_alloc_size;
2404 } else {
2405 spapr->rma_size = node0_size;
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416 if (kvm_enabled()) {
2417 spapr->vrma_adjust = 1;
2418 spapr->rma_size = MIN(spapr->rma_size, 0x10000000);
2419 }
2420
2421
2422
2423
2424
2425
2426 spapr->rma_size = MIN(spapr->rma_size, 0x400000000ull);
2427 }
2428
2429 if (spapr->rma_size > node0_size) {
2430 error_report("Numa node 0 has to span the RMA (%#08"HWADDR_PRIx")",
2431 spapr->rma_size);
2432 exit(1);
2433 }
2434
2435
2436 load_limit = MIN(spapr->rma_size, RTAS_MAX_ADDR) - FW_OVERHEAD;
2437
2438
2439 xics_system_init(machine, XICS_IRQS_SPAPR, &error_fatal);
2440
2441
2442
2443 spapr->ov5 = spapr_ovec_new();
2444 spapr->ov5_cas = spapr_ovec_new();
2445
2446 if (smc->dr_lmb_enabled) {
2447 spapr_ovec_set(spapr->ov5, OV5_DRCONF_MEMORY);
2448 spapr_validate_node_memory(machine, &error_fatal);
2449 }
2450
2451 spapr_ovec_set(spapr->ov5, OV5_FORM1_AFFINITY);
2452
2453
2454 if (spapr->use_hotplug_event_source) {
2455 spapr_ovec_set(spapr->ov5, OV5_HP_EVT);
2456 }
2457
2458
2459 if (spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) {
2460 spapr_ovec_set(spapr->ov5, OV5_HPT_RESIZE);
2461 }
2462
2463
2464 spapr_set_vsmt_mode(spapr, &error_fatal);
2465
2466 spapr_init_cpus(spapr);
2467
2468 first_ppc_cpu = POWERPC_CPU(first_cpu);
2469 if ((!kvm_enabled() || kvmppc_has_cap_mmu_radix()) &&
2470 ppc_check_compat(first_ppc_cpu, CPU_POWERPC_LOGICAL_3_00, 0,
2471 spapr->max_compat_pvr)) {
2472
2473 spapr_ovec_set(spapr->ov5, OV5_MMU_RADIX_GTSE);
2474 }
2475
2476
2477 if (kvm_enabled()) {
2478
2479 kvmppc_enable_logical_ci_hcalls();
2480 kvmppc_enable_set_mode_hcall();
2481
2482
2483 kvmppc_enable_clear_ref_mod_hcalls();
2484 }
2485
2486
2487 memory_region_allocate_system_memory(ram, NULL, "ppc_spapr.ram",
2488 machine->ram_size);
2489 memory_region_add_subregion(sysmem, 0, ram);
2490
2491 if (rma_alloc_size && rma) {
2492 rma_region = g_new(MemoryRegion, 1);
2493 memory_region_init_ram_ptr(rma_region, NULL, "ppc_spapr.rma",
2494 rma_alloc_size, rma);
2495 vmstate_register_ram_global(rma_region);
2496 memory_region_add_subregion(sysmem, 0, rma_region);
2497 }
2498
2499
2500 if (machine->ram_size < machine->maxram_size) {
2501 ram_addr_t hotplug_mem_size = machine->maxram_size - machine->ram_size;
2502
2503
2504
2505
2506
2507 int max_memslots = kvm_enabled() ? kvm_get_max_memslots() / 2 :
2508 SPAPR_MAX_RAM_SLOTS;
2509
2510 if (max_memslots < SPAPR_MAX_RAM_SLOTS) {
2511 max_memslots = SPAPR_MAX_RAM_SLOTS;
2512 }
2513 if (machine->ram_slots > max_memslots) {
2514 error_report("Specified number of memory slots %"
2515 PRIu64" exceeds max supported %d",
2516 machine->ram_slots, max_memslots);
2517 exit(1);
2518 }
2519
2520 spapr->hotplug_memory.base = ROUND_UP(machine->ram_size,
2521 SPAPR_HOTPLUG_MEM_ALIGN);
2522 memory_region_init(&spapr->hotplug_memory.mr, OBJECT(spapr),
2523 "hotplug-memory", hotplug_mem_size);
2524 memory_region_add_subregion(sysmem, spapr->hotplug_memory.base,
2525 &spapr->hotplug_memory.mr);
2526 }
2527
2528 if (smc->dr_lmb_enabled) {
2529 spapr_create_lmb_dr_connectors(spapr);
2530 }
2531
2532 filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, "spapr-rtas.bin");
2533 if (!filename) {
2534 error_report("Could not find LPAR rtas '%s'", "spapr-rtas.bin");
2535 exit(1);
2536 }
2537 spapr->rtas_size = get_image_size(filename);
2538 if (spapr->rtas_size < 0) {
2539 error_report("Could not get size of LPAR rtas '%s'", filename);
2540 exit(1);
2541 }
2542 spapr->rtas_blob = g_malloc(spapr->rtas_size);
2543 if (load_image_size(filename, spapr->rtas_blob, spapr->rtas_size) < 0) {
2544 error_report("Could not load LPAR rtas '%s'", filename);
2545 exit(1);
2546 }
2547 if (spapr->rtas_size > RTAS_MAX_SIZE) {
2548 error_report("RTAS too big ! 0x%zx bytes (max is 0x%x)",
2549 (size_t)spapr->rtas_size, RTAS_MAX_SIZE);
2550 exit(1);
2551 }
2552 g_free(filename);
2553
2554
2555 spapr_events_init(spapr);
2556
2557
2558 spapr_rtc_create(spapr);
2559
2560
2561 spapr->vio_bus = spapr_vio_bus_init();
2562
2563 for (i = 0; i < MAX_SERIAL_PORTS; i++) {
2564 if (serial_hds[i]) {
2565 spapr_vty_create(spapr->vio_bus, serial_hds[i]);
2566 }
2567 }
2568
2569
2570 spapr_create_nvram(spapr);
2571
2572
2573 spapr_pci_rtas_init();
2574
2575 phb = spapr_create_phb(spapr, 0);
2576
2577 for (i = 0; i < nb_nics; i++) {
2578 NICInfo *nd = &nd_table[i];
2579
2580 if (!nd->model) {
2581 nd->model = g_strdup("ibmveth");
2582 }
2583
2584 if (strcmp(nd->model, "ibmveth") == 0) {
2585 spapr_vlan_create(spapr->vio_bus, nd);
2586 } else {
2587 pci_nic_init_nofail(&nd_table[i], phb->bus, nd->model, NULL);
2588 }
2589 }
2590
2591 for (i = 0; i <= drive_get_max_bus(IF_SCSI); i++) {
2592 spapr_vscsi_create(spapr->vio_bus);
2593 }
2594
2595
2596 if (spapr_vga_init(phb->bus, &error_fatal)) {
2597 spapr->has_graphics = true;
2598 machine->usb |= defaults_enabled() && !machine->usb_disabled;
2599 }
2600
2601 if (machine->usb) {
2602 if (smc->use_ohci_by_default) {
2603 pci_create_simple(phb->bus, -1, "pci-ohci");
2604 } else {
2605 pci_create_simple(phb->bus, -1, "nec-usb-xhci");
2606 }
2607
2608 if (spapr->has_graphics) {
2609 USBBus *usb_bus = usb_bus_find(-1);
2610
2611 usb_create_simple(usb_bus, "usb-kbd");
2612 usb_create_simple(usb_bus, "usb-mouse");
2613 }
2614 }
2615
2616 if (spapr->rma_size < (MIN_RMA_SLOF << 20)) {
2617 error_report(
2618 "pSeries SLOF firmware requires >= %ldM guest RMA (Real Mode Area memory)",
2619 MIN_RMA_SLOF);
2620 exit(1);
2621 }
2622
2623 if (kernel_filename) {
2624 uint64_t lowaddr = 0;
2625
2626 spapr->kernel_size = load_elf(kernel_filename, translate_kernel_address,
2627 NULL, NULL, &lowaddr, NULL, 1,
2628 PPC_ELF_MACHINE, 0, 0);
2629 if (spapr->kernel_size == ELF_LOAD_WRONG_ENDIAN) {
2630 spapr->kernel_size = load_elf(kernel_filename,
2631 translate_kernel_address, NULL, NULL,
2632 &lowaddr, NULL, 0, PPC_ELF_MACHINE,
2633 0, 0);
2634 spapr->kernel_le = spapr->kernel_size > 0;
2635 }
2636 if (spapr->kernel_size < 0) {
2637 error_report("error loading %s: %s", kernel_filename,
2638 load_elf_strerror(spapr->kernel_size));
2639 exit(1);
2640 }
2641
2642
2643 if (initrd_filename) {
2644
2645
2646
2647 spapr->initrd_base = (KERNEL_LOAD_ADDR + spapr->kernel_size
2648 + 0x1ffff) & ~0xffff;
2649 spapr->initrd_size = load_image_targphys(initrd_filename,
2650 spapr->initrd_base,
2651 load_limit
2652 - spapr->initrd_base);
2653 if (spapr->initrd_size < 0) {
2654 error_report("could not load initial ram disk '%s'",
2655 initrd_filename);
2656 exit(1);
2657 }
2658 }
2659 }
2660
2661 if (bios_name == NULL) {
2662 bios_name = FW_FILE_NAME;
2663 }
2664 filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
2665 if (!filename) {
2666 error_report("Could not find LPAR firmware '%s'", bios_name);
2667 exit(1);
2668 }
2669 fw_size = load_image_targphys(filename, 0, FW_MAX_SIZE);
2670 if (fw_size <= 0) {
2671 error_report("Could not load LPAR firmware '%s'", filename);
2672 exit(1);
2673 }
2674 g_free(filename);
2675
2676
2677
2678
2679 vmstate_register(NULL, 0, &vmstate_spapr, spapr);
2680 register_savevm_live(NULL, "spapr/htab", -1, 1,
2681 &savevm_htab_handlers, spapr);
2682
2683 qemu_register_boot_set(spapr_boot_set, spapr);
2684
2685 if (kvm_enabled()) {
2686
2687 qemu_add_vm_change_state_handler(cpu_ppc_clock_vm_state_change,
2688 &spapr->tb);
2689
2690 kvmppc_spapr_enable_inkernel_multitce();
2691 }
2692}
2693
2694static int spapr_kvm_type(const char *vm_type)
2695{
2696 if (!vm_type) {
2697 return 0;
2698 }
2699
2700 if (!strcmp(vm_type, "HV")) {
2701 return 1;
2702 }
2703
2704 if (!strcmp(vm_type, "PR")) {
2705 return 2;
2706 }
2707
2708 error_report("Unknown kvm-type specified '%s'", vm_type);
2709 exit(1);
2710}
2711
2712
2713
2714
2715
2716static char *spapr_get_fw_dev_path(FWPathProvider *p, BusState *bus,
2717 DeviceState *dev)
2718{
2719#define CAST(type, obj, name) \
2720 ((type *)object_dynamic_cast(OBJECT(obj), (name)))
2721 SCSIDevice *d = CAST(SCSIDevice, dev, TYPE_SCSI_DEVICE);
2722 sPAPRPHBState *phb = CAST(sPAPRPHBState, dev, TYPE_SPAPR_PCI_HOST_BRIDGE);
2723 VHostSCSICommon *vsc = CAST(VHostSCSICommon, dev, TYPE_VHOST_SCSI_COMMON);
2724
2725 if (d) {
2726 void *spapr = CAST(void, bus->parent, "spapr-vscsi");
2727 VirtIOSCSI *virtio = CAST(VirtIOSCSI, bus->parent, TYPE_VIRTIO_SCSI);
2728 USBDevice *usb = CAST(USBDevice, bus->parent, TYPE_USB_DEVICE);
2729
2730 if (spapr) {
2731
2732
2733
2734
2735
2736 unsigned id = 0x8000 | (d->id << 8) | d->lun;
2737 return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
2738 (uint64_t)id << 48);
2739 } else if (virtio) {
2740
2741
2742
2743
2744
2745
2746
2747 unsigned id = 0x1000000 | (d->id << 16) | d->lun;
2748 if (d->lun >= 256) {
2749
2750 id |= 0x4000;
2751 }
2752 return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
2753 (uint64_t)id << 32);
2754 } else if (usb) {
2755
2756
2757
2758
2759 unsigned usb_port = atoi(usb->port->path);
2760 unsigned id = 0x1000000 | (usb_port << 16) | d->lun;
2761 return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
2762 (uint64_t)id << 32);
2763 }
2764 }
2765
2766
2767
2768
2769
2770
2771
2772 if (strcmp("usb-host", qdev_fw_name(dev)) == 0) {
2773 USBDevice *usbdev = CAST(USBDevice, dev, TYPE_USB_DEVICE);
2774 if (usb_host_dev_is_scsi_storage(usbdev)) {
2775 return g_strdup_printf("storage@%s/disk", usbdev->port->path);
2776 }
2777 }
2778
2779 if (phb) {
2780
2781 return g_strdup_printf("pci@%"PRIX64, phb->buid);
2782 }
2783
2784 if (vsc) {
2785
2786 unsigned id = 0x1000000 | (vsc->target << 16) | vsc->lun;
2787 return g_strdup_printf("disk@%"PRIX64, (uint64_t)id << 32);
2788 }
2789
2790 if (g_str_equal("pci-bridge", qdev_fw_name(dev))) {
2791
2792 PCIDevice *pcidev = CAST(PCIDevice, dev, TYPE_PCI_DEVICE);
2793 return g_strdup_printf("pci@%x", PCI_SLOT(pcidev->devfn));
2794 }
2795
2796 return NULL;
2797}
2798
2799static char *spapr_get_kvm_type(Object *obj, Error **errp)
2800{
2801 sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
2802
2803 return g_strdup(spapr->kvm_type);
2804}
2805
2806static void spapr_set_kvm_type(Object *obj, const char *value, Error **errp)
2807{
2808 sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
2809
2810 g_free(spapr->kvm_type);
2811 spapr->kvm_type = g_strdup(value);
2812}
2813
2814static bool spapr_get_modern_hotplug_events(Object *obj, Error **errp)
2815{
2816 sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
2817
2818 return spapr->use_hotplug_event_source;
2819}
2820
2821static void spapr_set_modern_hotplug_events(Object *obj, bool value,
2822 Error **errp)
2823{
2824 sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
2825
2826 spapr->use_hotplug_event_source = value;
2827}
2828
2829static char *spapr_get_resize_hpt(Object *obj, Error **errp)
2830{
2831 sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
2832
2833 switch (spapr->resize_hpt) {
2834 case SPAPR_RESIZE_HPT_DEFAULT:
2835 return g_strdup("default");
2836 case SPAPR_RESIZE_HPT_DISABLED:
2837 return g_strdup("disabled");
2838 case SPAPR_RESIZE_HPT_ENABLED:
2839 return g_strdup("enabled");
2840 case SPAPR_RESIZE_HPT_REQUIRED:
2841 return g_strdup("required");
2842 }
2843 g_assert_not_reached();
2844}
2845
2846static void spapr_set_resize_hpt(Object *obj, const char *value, Error **errp)
2847{
2848 sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
2849
2850 if (strcmp(value, "default") == 0) {
2851 spapr->resize_hpt = SPAPR_RESIZE_HPT_DEFAULT;
2852 } else if (strcmp(value, "disabled") == 0) {
2853 spapr->resize_hpt = SPAPR_RESIZE_HPT_DISABLED;
2854 } else if (strcmp(value, "enabled") == 0) {
2855 spapr->resize_hpt = SPAPR_RESIZE_HPT_ENABLED;
2856 } else if (strcmp(value, "required") == 0) {
2857 spapr->resize_hpt = SPAPR_RESIZE_HPT_REQUIRED;
2858 } else {
2859 error_setg(errp, "Bad value for \"resize-hpt\" property");
2860 }
2861}
2862
2863static void spapr_get_vsmt(Object *obj, Visitor *v, const char *name,
2864 void *opaque, Error **errp)
2865{
2866 visit_type_uint32(v, name, (uint32_t *)opaque, errp);
2867}
2868
2869static void spapr_set_vsmt(Object *obj, Visitor *v, const char *name,
2870 void *opaque, Error **errp)
2871{
2872 visit_type_uint32(v, name, (uint32_t *)opaque, errp);
2873}
2874
2875static void spapr_machine_initfn(Object *obj)
2876{
2877 sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
2878
2879 spapr->htab_fd = -1;
2880 spapr->use_hotplug_event_source = true;
2881 object_property_add_str(obj, "kvm-type",
2882 spapr_get_kvm_type, spapr_set_kvm_type, NULL);
2883 object_property_set_description(obj, "kvm-type",
2884 "Specifies the KVM virtualization mode (HV, PR)",
2885 NULL);
2886 object_property_add_bool(obj, "modern-hotplug-events",
2887 spapr_get_modern_hotplug_events,
2888 spapr_set_modern_hotplug_events,
2889 NULL);
2890 object_property_set_description(obj, "modern-hotplug-events",
2891 "Use dedicated hotplug event mechanism in"
2892 " place of standard EPOW events when possible"
2893 " (required for memory hot-unplug support)",
2894 NULL);
2895
2896 ppc_compat_add_property(obj, "max-cpu-compat", &spapr->max_compat_pvr,
2897 "Maximum permitted CPU compatibility mode",
2898 &error_fatal);
2899
2900 object_property_add_str(obj, "resize-hpt",
2901 spapr_get_resize_hpt, spapr_set_resize_hpt, NULL);
2902 object_property_set_description(obj, "resize-hpt",
2903 "Resizing of the Hash Page Table (enabled, disabled, required)",
2904 NULL);
2905 object_property_add(obj, "vsmt", "uint32", spapr_get_vsmt,
2906 spapr_set_vsmt, NULL, &spapr->vsmt, &error_abort);
2907 object_property_set_description(obj, "vsmt",
2908 "Virtual SMT: KVM behaves as if this were"
2909 " the host's SMT mode", &error_abort);
2910}
2911
2912static void spapr_machine_finalizefn(Object *obj)
2913{
2914 sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
2915
2916 g_free(spapr->kvm_type);
2917}
2918
2919void spapr_do_system_reset_on_cpu(CPUState *cs, run_on_cpu_data arg)
2920{
2921 cpu_synchronize_state(cs);
2922 ppc_cpu_do_system_reset(cs);
2923}
2924
2925static void spapr_nmi(NMIState *n, int cpu_index, Error **errp)
2926{
2927 CPUState *cs;
2928
2929 CPU_FOREACH(cs) {
2930 async_run_on_cpu(cs, spapr_do_system_reset_on_cpu, RUN_ON_CPU_NULL);
2931 }
2932}
2933
2934static void spapr_add_lmbs(DeviceState *dev, uint64_t addr_start, uint64_t size,
2935 uint32_t node, bool dedicated_hp_event_source,
2936 Error **errp)
2937{
2938 sPAPRDRConnector *drc;
2939 uint32_t nr_lmbs = size/SPAPR_MEMORY_BLOCK_SIZE;
2940 int i, fdt_offset, fdt_size;
2941 void *fdt;
2942 uint64_t addr = addr_start;
2943 bool hotplugged = spapr_drc_hotplugged(dev);
2944 Error *local_err = NULL;
2945
2946 for (i = 0; i < nr_lmbs; i++) {
2947 drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
2948 addr / SPAPR_MEMORY_BLOCK_SIZE);
2949 g_assert(drc);
2950
2951 fdt = create_device_tree(&fdt_size);
2952 fdt_offset = spapr_populate_memory_node(fdt, node, addr,
2953 SPAPR_MEMORY_BLOCK_SIZE);
2954
2955 spapr_drc_attach(drc, dev, fdt, fdt_offset, &local_err);
2956 if (local_err) {
2957 while (addr > addr_start) {
2958 addr -= SPAPR_MEMORY_BLOCK_SIZE;
2959 drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
2960 addr / SPAPR_MEMORY_BLOCK_SIZE);
2961 spapr_drc_detach(drc);
2962 }
2963 g_free(fdt);
2964 error_propagate(errp, local_err);
2965 return;
2966 }
2967 if (!hotplugged) {
2968 spapr_drc_reset(drc);
2969 }
2970 addr += SPAPR_MEMORY_BLOCK_SIZE;
2971 }
2972
2973
2974
2975 if (hotplugged) {
2976 if (dedicated_hp_event_source) {
2977 drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
2978 addr_start / SPAPR_MEMORY_BLOCK_SIZE);
2979 spapr_hotplug_req_add_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB,
2980 nr_lmbs,
2981 spapr_drc_index(drc));
2982 } else {
2983 spapr_hotplug_req_add_by_count(SPAPR_DR_CONNECTOR_TYPE_LMB,
2984 nr_lmbs);
2985 }
2986 }
2987}
2988
2989static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
2990 uint32_t node, Error **errp)
2991{
2992 Error *local_err = NULL;
2993 sPAPRMachineState *ms = SPAPR_MACHINE(hotplug_dev);
2994 PCDIMMDevice *dimm = PC_DIMM(dev);
2995 PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
2996 MemoryRegion *mr;
2997 uint64_t align, size, addr;
2998
2999 mr = ddc->get_memory_region(dimm, &local_err);
3000 if (local_err) {
3001 goto out;
3002 }
3003 align = memory_region_get_alignment(mr);
3004 size = memory_region_size(mr);
3005
3006 pc_dimm_memory_plug(dev, &ms->hotplug_memory, mr, align, &local_err);
3007 if (local_err) {
3008 goto out;
3009 }
3010
3011 addr = object_property_get_uint(OBJECT(dimm),
3012 PC_DIMM_ADDR_PROP, &local_err);
3013 if (local_err) {
3014 goto out_unplug;
3015 }
3016
3017 spapr_add_lmbs(dev, addr, size, node,
3018 spapr_ovec_test(ms->ov5_cas, OV5_HP_EVT),
3019 &local_err);
3020 if (local_err) {
3021 goto out_unplug;
3022 }
3023
3024 return;
3025
3026out_unplug:
3027 pc_dimm_memory_unplug(dev, &ms->hotplug_memory, mr);
3028out:
3029 error_propagate(errp, local_err);
3030}
3031
3032static void spapr_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
3033 Error **errp)
3034{
3035 PCDIMMDevice *dimm = PC_DIMM(dev);
3036 PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
3037 MemoryRegion *mr;
3038 uint64_t size;
3039 char *mem_dev;
3040
3041 mr = ddc->get_memory_region(dimm, errp);
3042 if (!mr) {
3043 return;
3044 }
3045 size = memory_region_size(mr);
3046
3047 if (size % SPAPR_MEMORY_BLOCK_SIZE) {
3048 error_setg(errp, "Hotplugged memory size must be a multiple of "
3049 "%lld MB", SPAPR_MEMORY_BLOCK_SIZE / M_BYTE);
3050 return;
3051 }
3052
3053 mem_dev = object_property_get_str(OBJECT(dimm), PC_DIMM_MEMDEV_PROP, NULL);
3054 if (mem_dev && !kvmppc_is_mem_backend_page_size_ok(mem_dev)) {
3055 error_setg(errp, "Memory backend has bad page size. "
3056 "Use 'memory-backend-file' with correct mem-path.");
3057 goto out;
3058 }
3059
3060out:
3061 g_free(mem_dev);
3062}
3063
3064struct sPAPRDIMMState {
3065 PCDIMMDevice *dimm;
3066 uint32_t nr_lmbs;
3067 QTAILQ_ENTRY(sPAPRDIMMState) next;
3068};
3069
3070static sPAPRDIMMState *spapr_pending_dimm_unplugs_find(sPAPRMachineState *s,
3071 PCDIMMDevice *dimm)
3072{
3073 sPAPRDIMMState *dimm_state = NULL;
3074
3075 QTAILQ_FOREACH(dimm_state, &s->pending_dimm_unplugs, next) {
3076 if (dimm_state->dimm == dimm) {
3077 break;
3078 }
3079 }
3080 return dimm_state;
3081}
3082
3083static sPAPRDIMMState *spapr_pending_dimm_unplugs_add(sPAPRMachineState *spapr,
3084 uint32_t nr_lmbs,
3085 PCDIMMDevice *dimm)
3086{
3087 sPAPRDIMMState *ds = NULL;
3088
3089
3090
3091
3092
3093
3094
3095 ds = spapr_pending_dimm_unplugs_find(spapr, dimm);
3096 if (!ds) {
3097 ds = g_malloc0(sizeof(sPAPRDIMMState));
3098 ds->nr_lmbs = nr_lmbs;
3099 ds->dimm = dimm;
3100 QTAILQ_INSERT_HEAD(&spapr->pending_dimm_unplugs, ds, next);
3101 }
3102 return ds;
3103}
3104
3105static void spapr_pending_dimm_unplugs_remove(sPAPRMachineState *spapr,
3106 sPAPRDIMMState *dimm_state)
3107{
3108 QTAILQ_REMOVE(&spapr->pending_dimm_unplugs, dimm_state, next);
3109 g_free(dimm_state);
3110}
3111
3112static sPAPRDIMMState *spapr_recover_pending_dimm_state(sPAPRMachineState *ms,
3113 PCDIMMDevice *dimm)
3114{
3115 sPAPRDRConnector *drc;
3116 PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
3117 MemoryRegion *mr = ddc->get_memory_region(dimm, &error_abort);
3118 uint64_t size = memory_region_size(mr);
3119 uint32_t nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE;
3120 uint32_t avail_lmbs = 0;
3121 uint64_t addr_start, addr;
3122 int i;
3123
3124 addr_start = object_property_get_int(OBJECT(dimm), PC_DIMM_ADDR_PROP,
3125 &error_abort);
3126
3127 addr = addr_start;
3128 for (i = 0; i < nr_lmbs; i++) {
3129 drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3130 addr / SPAPR_MEMORY_BLOCK_SIZE);
3131 g_assert(drc);
3132 if (drc->dev) {
3133 avail_lmbs++;
3134 }
3135 addr += SPAPR_MEMORY_BLOCK_SIZE;
3136 }
3137
3138 return spapr_pending_dimm_unplugs_add(ms, avail_lmbs, dimm);
3139}
3140
3141
3142void spapr_lmb_release(DeviceState *dev)
3143{
3144 sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_hotplug_handler(dev));
3145 PCDIMMDevice *dimm = PC_DIMM(dev);
3146 PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
3147 MemoryRegion *mr = ddc->get_memory_region(dimm, &error_abort);
3148 sPAPRDIMMState *ds = spapr_pending_dimm_unplugs_find(spapr, PC_DIMM(dev));
3149
3150
3151
3152 if (ds == NULL) {
3153 ds = spapr_recover_pending_dimm_state(spapr, PC_DIMM(dev));
3154 g_assert(ds);
3155
3156 g_assert(ds->nr_lmbs);
3157 }
3158
3159 if (--ds->nr_lmbs) {
3160 return;
3161 }
3162
3163
3164
3165
3166
3167 pc_dimm_memory_unplug(dev, &spapr->hotplug_memory, mr);
3168 object_unparent(OBJECT(dev));
3169 spapr_pending_dimm_unplugs_remove(spapr, ds);
3170}
3171
3172static void spapr_memory_unplug_request(HotplugHandler *hotplug_dev,
3173 DeviceState *dev, Error **errp)
3174{
3175 sPAPRMachineState *spapr = SPAPR_MACHINE(hotplug_dev);
3176 Error *local_err = NULL;
3177 PCDIMMDevice *dimm = PC_DIMM(dev);
3178 PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
3179 MemoryRegion *mr;
3180 uint32_t nr_lmbs;
3181 uint64_t size, addr_start, addr;
3182 int i;
3183 sPAPRDRConnector *drc;
3184
3185 mr = ddc->get_memory_region(dimm, &local_err);
3186 if (local_err) {
3187 goto out;
3188 }
3189 size = memory_region_size(mr);
3190 nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE;
3191
3192 addr_start = object_property_get_uint(OBJECT(dimm), PC_DIMM_ADDR_PROP,
3193 &local_err);
3194 if (local_err) {
3195 goto out;
3196 }
3197
3198
3199
3200
3201
3202
3203
3204 if (spapr_pending_dimm_unplugs_find(spapr, dimm)) {
3205 error_setg(&local_err,
3206 "Memory unplug already in progress for device %s",
3207 dev->id);
3208 goto out;
3209 }
3210
3211 spapr_pending_dimm_unplugs_add(spapr, nr_lmbs, dimm);
3212
3213 addr = addr_start;
3214 for (i = 0; i < nr_lmbs; i++) {
3215 drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3216 addr / SPAPR_MEMORY_BLOCK_SIZE);
3217 g_assert(drc);
3218
3219 spapr_drc_detach(drc);
3220 addr += SPAPR_MEMORY_BLOCK_SIZE;
3221 }
3222
3223 drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
3224 addr_start / SPAPR_MEMORY_BLOCK_SIZE);
3225 spapr_hotplug_req_remove_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB,
3226 nr_lmbs, spapr_drc_index(drc));
3227out:
3228 error_propagate(errp, local_err);
3229}
3230
3231static void *spapr_populate_hotplug_cpu_dt(CPUState *cs, int *fdt_offset,
3232 sPAPRMachineState *spapr)
3233{
3234 PowerPCCPU *cpu = POWERPC_CPU(cs);
3235 DeviceClass *dc = DEVICE_GET_CLASS(cs);
3236 int id = spapr_get_vcpu_id(cpu);
3237 void *fdt;
3238 int offset, fdt_size;
3239 char *nodename;
3240
3241 fdt = create_device_tree(&fdt_size);
3242 nodename = g_strdup_printf("%s@%x", dc->fw_name, id);
3243 offset = fdt_add_subnode(fdt, 0, nodename);
3244
3245 spapr_populate_cpu_dt(cs, fdt, offset, spapr);
3246 g_free(nodename);
3247
3248 *fdt_offset = offset;
3249 return fdt;
3250}
3251
3252
3253void spapr_core_release(DeviceState *dev)
3254{
3255 MachineState *ms = MACHINE(qdev_get_hotplug_handler(dev));
3256 sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(ms);
3257 CPUCore *cc = CPU_CORE(dev);
3258 CPUArchId *core_slot = spapr_find_cpu_slot(ms, cc->core_id, NULL);
3259
3260 if (smc->pre_2_10_has_unused_icps) {
3261 sPAPRCPUCore *sc = SPAPR_CPU_CORE(OBJECT(dev));
3262 sPAPRCPUCoreClass *scc = SPAPR_CPU_CORE_GET_CLASS(OBJECT(cc));
3263 size_t size = object_type_get_instance_size(scc->cpu_type);
3264 int i;
3265
3266 for (i = 0; i < cc->nr_threads; i++) {
3267 CPUState *cs = CPU(sc->threads + i * size);
3268
3269 pre_2_10_vmstate_register_dummy_icp(cs->cpu_index);
3270 }
3271 }
3272
3273 assert(core_slot);
3274 core_slot->cpu = NULL;
3275 object_unparent(OBJECT(dev));
3276}
3277
3278static
3279void spapr_core_unplug_request(HotplugHandler *hotplug_dev, DeviceState *dev,
3280 Error **errp)
3281{
3282 sPAPRMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
3283 int index;
3284 sPAPRDRConnector *drc;
3285 CPUCore *cc = CPU_CORE(dev);
3286
3287 if (!spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index)) {
3288 error_setg(errp, "Unable to find CPU core with core-id: %d",
3289 cc->core_id);
3290 return;
3291 }
3292 if (index == 0) {
3293 error_setg(errp, "Boot CPU core may not be unplugged");
3294 return;
3295 }
3296
3297 drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU,
3298 spapr_vcpu_id(spapr, cc->core_id));
3299 g_assert(drc);
3300
3301 spapr_drc_detach(drc);
3302
3303 spapr_hotplug_req_remove_by_index(drc);
3304}
3305
3306static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
3307 Error **errp)
3308{
3309 sPAPRMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
3310 MachineClass *mc = MACHINE_GET_CLASS(spapr);
3311 sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
3312 sPAPRCPUCore *core = SPAPR_CPU_CORE(OBJECT(dev));
3313 CPUCore *cc = CPU_CORE(dev);
3314 CPUState *cs = CPU(core->threads);
3315 sPAPRDRConnector *drc;
3316 Error *local_err = NULL;
3317 CPUArchId *core_slot;
3318 int index;
3319 bool hotplugged = spapr_drc_hotplugged(dev);
3320
3321 core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index);
3322 if (!core_slot) {
3323 error_setg(errp, "Unable to find CPU core with core-id: %d",
3324 cc->core_id);
3325 return;
3326 }
3327 drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU,
3328 spapr_vcpu_id(spapr, cc->core_id));
3329
3330 g_assert(drc || !mc->has_hotpluggable_cpus);
3331
3332 if (drc) {
3333 void *fdt;
3334 int fdt_offset;
3335
3336 fdt = spapr_populate_hotplug_cpu_dt(cs, &fdt_offset, spapr);
3337
3338 spapr_drc_attach(drc, dev, fdt, fdt_offset, &local_err);
3339 if (local_err) {
3340 g_free(fdt);
3341 error_propagate(errp, local_err);
3342 return;
3343 }
3344
3345 if (hotplugged) {
3346
3347
3348
3349
3350 spapr_hotplug_req_add_by_index(drc);
3351 } else {
3352 spapr_drc_reset(drc);
3353 }
3354 }
3355
3356 core_slot->cpu = OBJECT(dev);
3357
3358 if (smc->pre_2_10_has_unused_icps) {
3359 sPAPRCPUCoreClass *scc = SPAPR_CPU_CORE_GET_CLASS(OBJECT(cc));
3360 size_t size = object_type_get_instance_size(scc->cpu_type);
3361 int i;
3362
3363 for (i = 0; i < cc->nr_threads; i++) {
3364 sPAPRCPUCore *sc = SPAPR_CPU_CORE(dev);
3365 void *obj = sc->threads + i * size;
3366
3367 cs = CPU(obj);
3368 pre_2_10_vmstate_unregister_dummy_icp(cs->cpu_index);
3369 }
3370 }
3371}
3372
3373static void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
3374 Error **errp)
3375{
3376 MachineState *machine = MACHINE(OBJECT(hotplug_dev));
3377 MachineClass *mc = MACHINE_GET_CLASS(hotplug_dev);
3378 Error *local_err = NULL;
3379 CPUCore *cc = CPU_CORE(dev);
3380 const char *base_core_type = spapr_get_cpu_core_type(machine->cpu_type);
3381 const char *type = object_get_typename(OBJECT(dev));
3382 CPUArchId *core_slot;
3383 int index;
3384
3385 if (dev->hotplugged && !mc->has_hotpluggable_cpus) {
3386 error_setg(&local_err, "CPU hotplug not supported for this machine");
3387 goto out;
3388 }
3389
3390 if (strcmp(base_core_type, type)) {
3391 error_setg(&local_err, "CPU core type should be %s", base_core_type);
3392 goto out;
3393 }
3394
3395 if (cc->core_id % smp_threads) {
3396 error_setg(&local_err, "invalid core id %d", cc->core_id);
3397 goto out;
3398 }
3399
3400
3401
3402
3403
3404
3405
3406 if (mc->has_hotpluggable_cpus && (cc->nr_threads != smp_threads)) {
3407 error_setg(&local_err, "invalid nr-threads %d, must be %d",
3408 cc->nr_threads, smp_threads);
3409 goto out;
3410 }
3411
3412 core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index);
3413 if (!core_slot) {
3414 error_setg(&local_err, "core id %d out of range", cc->core_id);
3415 goto out;
3416 }
3417
3418 if (core_slot->cpu) {
3419 error_setg(&local_err, "core %d already populated", cc->core_id);
3420 goto out;
3421 }
3422
3423 numa_cpu_pre_plug(core_slot, dev, &local_err);
3424
3425out:
3426 error_propagate(errp, local_err);
3427}
3428
3429static void spapr_machine_device_plug(HotplugHandler *hotplug_dev,
3430 DeviceState *dev, Error **errp)
3431{
3432 MachineState *ms = MACHINE(hotplug_dev);
3433 sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(ms);
3434
3435 if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
3436 int node;
3437
3438 if (!smc->dr_lmb_enabled) {
3439 error_setg(errp, "Memory hotplug not supported for this machine");
3440 return;
3441 }
3442 node = object_property_get_uint(OBJECT(dev), PC_DIMM_NODE_PROP, errp);
3443 if (*errp) {
3444 return;
3445 }
3446 if (node < 0 || node >= MAX_NODES) {
3447 error_setg(errp, "Invaild node %d", node);
3448 return;
3449 }
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467 if (nb_numa_nodes && !numa_info[node].node_mem) {
3468 error_setg(errp, "Can't hotplug memory to memory-less node %d",
3469 node);
3470 return;
3471 }
3472
3473 spapr_memory_plug(hotplug_dev, dev, node, errp);
3474 } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
3475 spapr_core_plug(hotplug_dev, dev, errp);
3476 }
3477}
3478
3479static void spapr_machine_device_unplug_request(HotplugHandler *hotplug_dev,
3480 DeviceState *dev, Error **errp)
3481{
3482 sPAPRMachineState *sms = SPAPR_MACHINE(OBJECT(hotplug_dev));
3483 MachineClass *mc = MACHINE_GET_CLASS(sms);
3484
3485 if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
3486 if (spapr_ovec_test(sms->ov5_cas, OV5_HP_EVT)) {
3487 spapr_memory_unplug_request(hotplug_dev, dev, errp);
3488 } else {
3489
3490
3491
3492
3493
3494
3495 error_setg(errp, "Memory hot unplug not supported for this guest");
3496 }
3497 } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
3498 if (!mc->has_hotpluggable_cpus) {
3499 error_setg(errp, "CPU hot unplug not supported on this machine");
3500 return;
3501 }
3502 spapr_core_unplug_request(hotplug_dev, dev, errp);
3503 }
3504}
3505
3506static void spapr_machine_device_pre_plug(HotplugHandler *hotplug_dev,
3507 DeviceState *dev, Error **errp)
3508{
3509 if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
3510 spapr_memory_pre_plug(hotplug_dev, dev, errp);
3511 } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
3512 spapr_core_pre_plug(hotplug_dev, dev, errp);
3513 }
3514}
3515
3516static HotplugHandler *spapr_get_hotplug_handler(MachineState *machine,
3517 DeviceState *dev)
3518{
3519 if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) ||
3520 object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
3521 return HOTPLUG_HANDLER(machine);
3522 }
3523 return NULL;
3524}
3525
3526static CpuInstanceProperties
3527spapr_cpu_index_to_props(MachineState *machine, unsigned cpu_index)
3528{
3529 CPUArchId *core_slot;
3530 MachineClass *mc = MACHINE_GET_CLASS(machine);
3531
3532
3533 mc->possible_cpu_arch_ids(machine);
3534
3535 core_slot = spapr_find_cpu_slot(machine, cpu_index, NULL);
3536 assert(core_slot);
3537 return core_slot->props;
3538}
3539
3540static int64_t spapr_get_default_cpu_node_id(const MachineState *ms, int idx)
3541{
3542 return idx / smp_cores % nb_numa_nodes;
3543}
3544
3545static const CPUArchIdList *spapr_possible_cpu_arch_ids(MachineState *machine)
3546{
3547 int i;
3548 int spapr_max_cores = max_cpus / smp_threads;
3549 MachineClass *mc = MACHINE_GET_CLASS(machine);
3550
3551 if (!mc->has_hotpluggable_cpus) {
3552 spapr_max_cores = QEMU_ALIGN_UP(smp_cpus, smp_threads) / smp_threads;
3553 }
3554 if (machine->possible_cpus) {
3555 assert(machine->possible_cpus->len == spapr_max_cores);
3556 return machine->possible_cpus;
3557 }
3558
3559 machine->possible_cpus = g_malloc0(sizeof(CPUArchIdList) +
3560 sizeof(CPUArchId) * spapr_max_cores);
3561 machine->possible_cpus->len = spapr_max_cores;
3562 for (i = 0; i < machine->possible_cpus->len; i++) {
3563 int core_id = i * smp_threads;
3564
3565 machine->possible_cpus->cpus[i].vcpus_count = smp_threads;
3566 machine->possible_cpus->cpus[i].arch_id = core_id;
3567 machine->possible_cpus->cpus[i].props.has_core_id = true;
3568 machine->possible_cpus->cpus[i].props.core_id = core_id;
3569 }
3570 return machine->possible_cpus;
3571}
3572
3573static void spapr_phb_placement(sPAPRMachineState *spapr, uint32_t index,
3574 uint64_t *buid, hwaddr *pio,
3575 hwaddr *mmio32, hwaddr *mmio64,
3576 unsigned n_dma, uint32_t *liobns, Error **errp)
3577{
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593 const uint64_t base_buid = 0x800000020000000ULL;
3594#define SPAPR_MAX_PHBS ((SPAPR_PCI_LIMIT - SPAPR_PCI_BASE) / \
3595 SPAPR_PCI_MEM64_WIN_SIZE - 1)
3596 int i;
3597
3598
3599 QEMU_BUILD_BUG_ON((SPAPR_PCI_BASE % SPAPR_PCI_MEM64_WIN_SIZE) != 0);
3600 QEMU_BUILD_BUG_ON((SPAPR_PCI_LIMIT % SPAPR_PCI_MEM64_WIN_SIZE) != 0);
3601 QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM64_WIN_SIZE % SPAPR_PCI_MEM32_WIN_SIZE) != 0);
3602 QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM32_WIN_SIZE % SPAPR_PCI_IO_WIN_SIZE) != 0);
3603
3604 QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS * SPAPR_PCI_IO_WIN_SIZE) >
3605 SPAPR_PCI_MEM32_WIN_SIZE);
3606 QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS * SPAPR_PCI_MEM32_WIN_SIZE) >
3607 SPAPR_PCI_MEM64_WIN_SIZE);
3608
3609 if (index >= SPAPR_MAX_PHBS) {
3610 error_setg(errp, "\"index\" for PAPR PHB is too large (max %llu)",
3611 SPAPR_MAX_PHBS - 1);
3612 return;
3613 }
3614
3615 *buid = base_buid + index;
3616 for (i = 0; i < n_dma; ++i) {
3617 liobns[i] = SPAPR_PCI_LIOBN(index, i);
3618 }
3619
3620 *pio = SPAPR_PCI_BASE + index * SPAPR_PCI_IO_WIN_SIZE;
3621 *mmio32 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM32_WIN_SIZE;
3622 *mmio64 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM64_WIN_SIZE;
3623}
3624
3625static ICSState *spapr_ics_get(XICSFabric *dev, int irq)
3626{
3627 sPAPRMachineState *spapr = SPAPR_MACHINE(dev);
3628
3629 return ics_valid_irq(spapr->ics, irq) ? spapr->ics : NULL;
3630}
3631
3632static void spapr_ics_resend(XICSFabric *dev)
3633{
3634 sPAPRMachineState *spapr = SPAPR_MACHINE(dev);
3635
3636 ics_resend(spapr->ics);
3637}
3638
3639static ICPState *spapr_icp_get(XICSFabric *xi, int vcpu_id)
3640{
3641 PowerPCCPU *cpu = spapr_find_cpu(vcpu_id);
3642
3643 return cpu ? ICP(cpu->intc) : NULL;
3644}
3645
3646static void spapr_pic_print_info(InterruptStatsProvider *obj,
3647 Monitor *mon)
3648{
3649 sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
3650 CPUState *cs;
3651
3652 CPU_FOREACH(cs) {
3653 PowerPCCPU *cpu = POWERPC_CPU(cs);
3654
3655 icp_pic_print_info(ICP(cpu->intc), mon);
3656 }
3657
3658 ics_pic_print_info(spapr->ics, mon);
3659}
3660
3661int spapr_get_vcpu_id(PowerPCCPU *cpu)
3662{
3663 return cpu->vcpu_id;
3664}
3665
3666void spapr_set_vcpu_id(PowerPCCPU *cpu, int cpu_index, Error **errp)
3667{
3668 sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
3669 int vcpu_id;
3670
3671 vcpu_id = spapr_vcpu_id(spapr, cpu_index);
3672
3673 if (kvm_enabled() && !kvm_vcpu_id_is_valid(vcpu_id)) {
3674 error_setg(errp, "Can't create CPU with id %d in KVM", vcpu_id);
3675 error_append_hint(errp, "Adjust the number of cpus to %d "
3676 "or try to raise the number of threads per core\n",
3677 vcpu_id * smp_threads / spapr->vsmt);
3678 return;
3679 }
3680
3681 cpu->vcpu_id = vcpu_id;
3682}
3683
3684PowerPCCPU *spapr_find_cpu(int vcpu_id)
3685{
3686 CPUState *cs;
3687
3688 CPU_FOREACH(cs) {
3689 PowerPCCPU *cpu = POWERPC_CPU(cs);
3690
3691 if (spapr_get_vcpu_id(cpu) == vcpu_id) {
3692 return cpu;
3693 }
3694 }
3695
3696 return NULL;
3697}
3698
3699static void spapr_machine_class_init(ObjectClass *oc, void *data)
3700{
3701 MachineClass *mc = MACHINE_CLASS(oc);
3702 sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(oc);
3703 FWPathProviderClass *fwc = FW_PATH_PROVIDER_CLASS(oc);
3704 NMIClass *nc = NMI_CLASS(oc);
3705 HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
3706 PPCVirtualHypervisorClass *vhc = PPC_VIRTUAL_HYPERVISOR_CLASS(oc);
3707 XICSFabricClass *xic = XICS_FABRIC_CLASS(oc);
3708 InterruptStatsProviderClass *ispc = INTERRUPT_STATS_PROVIDER_CLASS(oc);
3709
3710 mc->desc = "pSeries Logical Partition (PAPR compliant)";
3711
3712
3713
3714
3715
3716
3717 mc->init = ppc_spapr_init;
3718 mc->reset = ppc_spapr_reset;
3719 mc->block_default_type = IF_SCSI;
3720 mc->max_cpus = 1024;
3721 mc->no_parallel = 1;
3722 mc->default_boot_order = "";
3723 mc->default_ram_size = 512 * M_BYTE;
3724 mc->kvm_type = spapr_kvm_type;
3725 mc->has_dynamic_sysbus = true;
3726 mc->pci_allow_0_address = true;
3727 mc->get_hotplug_handler = spapr_get_hotplug_handler;
3728 hc->pre_plug = spapr_machine_device_pre_plug;
3729 hc->plug = spapr_machine_device_plug;
3730 mc->cpu_index_to_instance_props = spapr_cpu_index_to_props;
3731 mc->get_default_cpu_node_id = spapr_get_default_cpu_node_id;
3732 mc->possible_cpu_arch_ids = spapr_possible_cpu_arch_ids;
3733 hc->unplug_request = spapr_machine_device_unplug_request;
3734
3735 smc->dr_lmb_enabled = true;
3736 mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power8_v2.0");
3737 mc->has_hotpluggable_cpus = true;
3738 smc->resize_hpt_default = SPAPR_RESIZE_HPT_ENABLED;
3739 fwc->get_dev_path = spapr_get_fw_dev_path;
3740 nc->nmi_monitor_handler = spapr_nmi;
3741 smc->phb_placement = spapr_phb_placement;
3742 vhc->hypercall = emulate_spapr_hypercall;
3743 vhc->hpt_mask = spapr_hpt_mask;
3744 vhc->map_hptes = spapr_map_hptes;
3745 vhc->unmap_hptes = spapr_unmap_hptes;
3746 vhc->store_hpte = spapr_store_hpte;
3747 vhc->get_patbe = spapr_get_patbe;
3748 vhc->encode_hpt_for_kvm_pr = spapr_encode_hpt_for_kvm_pr;
3749 xic->ics_get = spapr_ics_get;
3750 xic->ics_resend = spapr_ics_resend;
3751 xic->icp_get = spapr_icp_get;
3752 ispc->print_info = spapr_pic_print_info;
3753
3754
3755
3756
3757 mc->numa_mem_align_shift = 28;
3758
3759 smc->default_caps.caps[SPAPR_CAP_HTM] = SPAPR_CAP_OFF;
3760 smc->default_caps.caps[SPAPR_CAP_VSX] = SPAPR_CAP_ON;
3761 smc->default_caps.caps[SPAPR_CAP_DFP] = SPAPR_CAP_ON;
3762 smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_BROKEN;
3763 smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_BROKEN;
3764 smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_BROKEN;
3765 spapr_caps_add_properties(smc, &error_abort);
3766}
3767
3768static const TypeInfo spapr_machine_info = {
3769 .name = TYPE_SPAPR_MACHINE,
3770 .parent = TYPE_MACHINE,
3771 .abstract = true,
3772 .instance_size = sizeof(sPAPRMachineState),
3773 .instance_init = spapr_machine_initfn,
3774 .instance_finalize = spapr_machine_finalizefn,
3775 .class_size = sizeof(sPAPRMachineClass),
3776 .class_init = spapr_machine_class_init,
3777 .interfaces = (InterfaceInfo[]) {
3778 { TYPE_FW_PATH_PROVIDER },
3779 { TYPE_NMI },
3780 { TYPE_HOTPLUG_HANDLER },
3781 { TYPE_PPC_VIRTUAL_HYPERVISOR },
3782 { TYPE_XICS_FABRIC },
3783 { TYPE_INTERRUPT_STATS_PROVIDER },
3784 { }
3785 },
3786};
3787
3788#define DEFINE_SPAPR_MACHINE(suffix, verstr, latest) \
3789 static void spapr_machine_##suffix##_class_init(ObjectClass *oc, \
3790 void *data) \
3791 { \
3792 MachineClass *mc = MACHINE_CLASS(oc); \
3793 spapr_machine_##suffix##_class_options(mc); \
3794 if (latest) { \
3795 mc->alias = "pseries"; \
3796 mc->is_default = 1; \
3797 } \
3798 } \
3799 static void spapr_machine_##suffix##_instance_init(Object *obj) \
3800 { \
3801 MachineState *machine = MACHINE(obj); \
3802 spapr_machine_##suffix##_instance_options(machine); \
3803 } \
3804 static const TypeInfo spapr_machine_##suffix##_info = { \
3805 .name = MACHINE_TYPE_NAME("pseries-" verstr), \
3806 .parent = TYPE_SPAPR_MACHINE, \
3807 .class_init = spapr_machine_##suffix##_class_init, \
3808 .instance_init = spapr_machine_##suffix##_instance_init, \
3809 }; \
3810 static void spapr_machine_register_##suffix(void) \
3811 { \
3812 type_register(&spapr_machine_##suffix##_info); \
3813 } \
3814 type_init(spapr_machine_register_##suffix)
3815
3816
3817
3818
3819static void spapr_machine_2_12_instance_options(MachineState *machine)
3820{
3821}
3822
3823static void spapr_machine_2_12_class_options(MachineClass *mc)
3824{
3825
3826}
3827
3828DEFINE_SPAPR_MACHINE(2_12, "2.12", false);
3829
3830
3831
3832
3833#define SPAPR_COMPAT_2_11 \
3834 HW_COMPAT_2_11
3835
3836static void spapr_machine_2_11_instance_options(MachineState *machine)
3837{
3838 spapr_machine_2_12_instance_options(machine);
3839}
3840
3841static void spapr_machine_2_11_class_options(MachineClass *mc)
3842{
3843 sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
3844
3845 spapr_machine_2_12_class_options(mc);
3846 smc->default_caps.caps[SPAPR_CAP_HTM] = SPAPR_CAP_ON;
3847 SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_11);
3848}
3849
3850DEFINE_SPAPR_MACHINE(2_11, "2.11", true);
3851
3852
3853
3854
3855#define SPAPR_COMPAT_2_10 \
3856 HW_COMPAT_2_10
3857
3858static void spapr_machine_2_10_instance_options(MachineState *machine)
3859{
3860 spapr_machine_2_11_instance_options(machine);
3861}
3862
3863static void spapr_machine_2_10_class_options(MachineClass *mc)
3864{
3865 spapr_machine_2_11_class_options(mc);
3866 SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_10);
3867}
3868
3869DEFINE_SPAPR_MACHINE(2_10, "2.10", false);
3870
3871
3872
3873
3874#define SPAPR_COMPAT_2_9 \
3875 HW_COMPAT_2_9 \
3876 { \
3877 .driver = TYPE_POWERPC_CPU, \
3878 .property = "pre-2.10-migration", \
3879 .value = "on", \
3880 }, \
3881
3882static void spapr_machine_2_9_instance_options(MachineState *machine)
3883{
3884 spapr_machine_2_10_instance_options(machine);
3885}
3886
3887static void spapr_machine_2_9_class_options(MachineClass *mc)
3888{
3889 sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
3890
3891 spapr_machine_2_10_class_options(mc);
3892 SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_9);
3893 mc->numa_auto_assign_ram = numa_legacy_auto_assign_ram;
3894 smc->pre_2_10_has_unused_icps = true;
3895 smc->resize_hpt_default = SPAPR_RESIZE_HPT_DISABLED;
3896}
3897
3898DEFINE_SPAPR_MACHINE(2_9, "2.9", false);
3899
3900
3901
3902
3903#define SPAPR_COMPAT_2_8 \
3904 HW_COMPAT_2_8 \
3905 { \
3906 .driver = TYPE_SPAPR_PCI_HOST_BRIDGE, \
3907 .property = "pcie-extended-configuration-space", \
3908 .value = "off", \
3909 },
3910
3911static void spapr_machine_2_8_instance_options(MachineState *machine)
3912{
3913 spapr_machine_2_9_instance_options(machine);
3914}
3915
3916static void spapr_machine_2_8_class_options(MachineClass *mc)
3917{
3918 spapr_machine_2_9_class_options(mc);
3919 SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_8);
3920 mc->numa_mem_align_shift = 23;
3921}
3922
3923DEFINE_SPAPR_MACHINE(2_8, "2.8", false);
3924
3925
3926
3927
3928#define SPAPR_COMPAT_2_7 \
3929 HW_COMPAT_2_7 \
3930 { \
3931 .driver = TYPE_SPAPR_PCI_HOST_BRIDGE, \
3932 .property = "mem_win_size", \
3933 .value = stringify(SPAPR_PCI_2_7_MMIO_WIN_SIZE),\
3934 }, \
3935 { \
3936 .driver = TYPE_SPAPR_PCI_HOST_BRIDGE, \
3937 .property = "mem64_win_size", \
3938 .value = "0", \
3939 }, \
3940 { \
3941 .driver = TYPE_POWERPC_CPU, \
3942 .property = "pre-2.8-migration", \
3943 .value = "on", \
3944 }, \
3945 { \
3946 .driver = TYPE_SPAPR_PCI_HOST_BRIDGE, \
3947 .property = "pre-2.8-migration", \
3948 .value = "on", \
3949 },
3950
3951static void phb_placement_2_7(sPAPRMachineState *spapr, uint32_t index,
3952 uint64_t *buid, hwaddr *pio,
3953 hwaddr *mmio32, hwaddr *mmio64,
3954 unsigned n_dma, uint32_t *liobns, Error **errp)
3955{
3956
3957 const uint64_t base_buid = 0x800000020000000ULL;
3958 const hwaddr phb_spacing = 0x1000000000ULL;
3959 const hwaddr mmio_offset = 0xa0000000;
3960 const hwaddr pio_offset = 0x80000000;
3961 const uint32_t max_index = 255;
3962 const hwaddr phb0_alignment = 0x10000000000ULL;
3963
3964 uint64_t ram_top = MACHINE(spapr)->ram_size;
3965 hwaddr phb0_base, phb_base;
3966 int i;
3967
3968
3969 if (MACHINE(spapr)->maxram_size > ram_top) {
3970
3971
3972
3973 ram_top = spapr->hotplug_memory.base +
3974 memory_region_size(&spapr->hotplug_memory.mr);
3975 }
3976
3977 phb0_base = QEMU_ALIGN_UP(ram_top, phb0_alignment);
3978
3979 if (index > max_index) {
3980 error_setg(errp, "\"index\" for PAPR PHB is too large (max %u)",
3981 max_index);
3982 return;
3983 }
3984
3985 *buid = base_buid + index;
3986 for (i = 0; i < n_dma; ++i) {
3987 liobns[i] = SPAPR_PCI_LIOBN(index, i);
3988 }
3989
3990 phb_base = phb0_base + index * phb_spacing;
3991 *pio = phb_base + pio_offset;
3992 *mmio32 = phb_base + mmio_offset;
3993
3994
3995
3996
3997
3998}
3999
4000static void spapr_machine_2_7_instance_options(MachineState *machine)
4001{
4002 sPAPRMachineState *spapr = SPAPR_MACHINE(machine);
4003
4004 spapr_machine_2_8_instance_options(machine);
4005 spapr->use_hotplug_event_source = false;
4006}
4007
4008static void spapr_machine_2_7_class_options(MachineClass *mc)
4009{
4010 sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4011
4012 spapr_machine_2_8_class_options(mc);
4013 mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power7_v2.3");
4014 SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_7);
4015 smc->phb_placement = phb_placement_2_7;
4016}
4017
4018DEFINE_SPAPR_MACHINE(2_7, "2.7", false);
4019
4020
4021
4022
4023#define SPAPR_COMPAT_2_6 \
4024 HW_COMPAT_2_6 \
4025 { \
4026 .driver = TYPE_SPAPR_PCI_HOST_BRIDGE,\
4027 .property = "ddw",\
4028 .value = stringify(off),\
4029 },
4030
4031static void spapr_machine_2_6_instance_options(MachineState *machine)
4032{
4033 spapr_machine_2_7_instance_options(machine);
4034}
4035
4036static void spapr_machine_2_6_class_options(MachineClass *mc)
4037{
4038 spapr_machine_2_7_class_options(mc);
4039 mc->has_hotpluggable_cpus = false;
4040 SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_6);
4041}
4042
4043DEFINE_SPAPR_MACHINE(2_6, "2.6", false);
4044
4045
4046
4047
4048#define SPAPR_COMPAT_2_5 \
4049 HW_COMPAT_2_5 \
4050 { \
4051 .driver = "spapr-vlan", \
4052 .property = "use-rx-buffer-pools", \
4053 .value = "off", \
4054 },
4055
4056static void spapr_machine_2_5_instance_options(MachineState *machine)
4057{
4058 spapr_machine_2_6_instance_options(machine);
4059}
4060
4061static void spapr_machine_2_5_class_options(MachineClass *mc)
4062{
4063 sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4064
4065 spapr_machine_2_6_class_options(mc);
4066 smc->use_ohci_by_default = true;
4067 SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_5);
4068}
4069
4070DEFINE_SPAPR_MACHINE(2_5, "2.5", false);
4071
4072
4073
4074
4075#define SPAPR_COMPAT_2_4 \
4076 HW_COMPAT_2_4
4077
4078static void spapr_machine_2_4_instance_options(MachineState *machine)
4079{
4080 spapr_machine_2_5_instance_options(machine);
4081}
4082
4083static void spapr_machine_2_4_class_options(MachineClass *mc)
4084{
4085 sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
4086
4087 spapr_machine_2_5_class_options(mc);
4088 smc->dr_lmb_enabled = false;
4089 SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_4);
4090}
4091
4092DEFINE_SPAPR_MACHINE(2_4, "2.4", false);
4093
4094
4095
4096
4097#define SPAPR_COMPAT_2_3 \
4098 HW_COMPAT_2_3 \
4099 {\
4100 .driver = "spapr-pci-host-bridge",\
4101 .property = "dynamic-reconfiguration",\
4102 .value = "off",\
4103 },
4104
4105static void spapr_machine_2_3_instance_options(MachineState *machine)
4106{
4107 spapr_machine_2_4_instance_options(machine);
4108}
4109
4110static void spapr_machine_2_3_class_options(MachineClass *mc)
4111{
4112 spapr_machine_2_4_class_options(mc);
4113 SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_3);
4114}
4115DEFINE_SPAPR_MACHINE(2_3, "2.3", false);
4116
4117
4118
4119
4120
4121#define SPAPR_COMPAT_2_2 \
4122 HW_COMPAT_2_2 \
4123 {\
4124 .driver = TYPE_SPAPR_PCI_HOST_BRIDGE,\
4125 .property = "mem_win_size",\
4126 .value = "0x20000000",\
4127 },
4128
4129static void spapr_machine_2_2_instance_options(MachineState *machine)
4130{
4131 spapr_machine_2_3_instance_options(machine);
4132 machine->suppress_vmdesc = true;
4133}
4134
4135static void spapr_machine_2_2_class_options(MachineClass *mc)
4136{
4137 spapr_machine_2_3_class_options(mc);
4138 SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_2);
4139}
4140DEFINE_SPAPR_MACHINE(2_2, "2.2", false);
4141
4142
4143
4144
4145#define SPAPR_COMPAT_2_1 \
4146 HW_COMPAT_2_1
4147
4148static void spapr_machine_2_1_instance_options(MachineState *machine)
4149{
4150 spapr_machine_2_2_instance_options(machine);
4151}
4152
4153static void spapr_machine_2_1_class_options(MachineClass *mc)
4154{
4155 spapr_machine_2_2_class_options(mc);
4156 SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_1);
4157}
4158DEFINE_SPAPR_MACHINE(2_1, "2.1", false);
4159
4160static void spapr_machine_register_types(void)
4161{
4162 type_register_static(&spapr_machine_info);
4163}
4164
4165type_init(spapr_machine_register_types)
4166