1
2
3
4
5
6
7
8
9#include <linux/cpu.h>
10#include <linux/kvm_host.h>
11#include <linux/preempt.h>
12#include <linux/export.h>
13#include <linux/sched.h>
14#include <linux/spinlock.h>
15#include <linux/bootmem.h>
16#include <linux/init.h>
17#include <linux/memblock.h>
18#include <linux/sizes.h>
19#include <linux/cma.h>
20
21#include <asm/cputable.h>
22#include <asm/kvm_ppc.h>
23#include <asm/kvm_book3s.h>
24
25#define KVM_CMA_CHUNK_ORDER 18
26
27
28
29
30
31#define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT)
32
33
34
35static unsigned long kvm_cma_resv_ratio = 5;
36
37
38
39
40
41
42
43
44
45unsigned long kvm_rma_pages = (1 << 27) >> PAGE_SHIFT;
46EXPORT_SYMBOL_GPL(kvm_rma_pages);
47
48static struct cma *kvm_cma;
49
50
51
52static inline int lpcr_rmls(unsigned long rma_size)
53{
54 switch (rma_size) {
55 case 32ul << 20:
56 if (cpu_has_feature(CPU_FTR_ARCH_206))
57 return 8;
58 return -1;
59 case 64ul << 20:
60 return 3;
61 case 128ul << 20:
62 return 7;
63 case 256ul << 20:
64 return 4;
65 case 1ul << 30:
66 return 2;
67 case 16ul << 30:
68 return 1;
69 case 256ul << 30:
70 return 0;
71 default:
72 return -1;
73 }
74}
75
76static int __init early_parse_rma_size(char *p)
77{
78 unsigned long kvm_rma_size;
79
80 pr_debug("%s(%s)\n", __func__, p);
81 if (!p)
82 return -EINVAL;
83 kvm_rma_size = memparse(p, &p);
84
85
86
87 if (lpcr_rmls(kvm_rma_size) < 0) {
88 pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size);
89 return -EINVAL;
90 }
91 kvm_rma_pages = kvm_rma_size >> PAGE_SHIFT;
92 return 0;
93}
94early_param("kvm_rma_size", early_parse_rma_size);
95
96struct kvm_rma_info *kvm_alloc_rma()
97{
98 struct page *page;
99 struct kvm_rma_info *ri;
100
101 ri = kmalloc(sizeof(struct kvm_rma_info), GFP_KERNEL);
102 if (!ri)
103 return NULL;
104 page = cma_alloc(kvm_cma, kvm_rma_pages, order_base_2(kvm_rma_pages));
105 if (!page)
106 goto err_out;
107 atomic_set(&ri->use_count, 1);
108 ri->base_pfn = page_to_pfn(page);
109 return ri;
110err_out:
111 kfree(ri);
112 return NULL;
113}
114EXPORT_SYMBOL_GPL(kvm_alloc_rma);
115
116void kvm_release_rma(struct kvm_rma_info *ri)
117{
118 if (atomic_dec_and_test(&ri->use_count)) {
119 cma_release(kvm_cma, pfn_to_page(ri->base_pfn), kvm_rma_pages);
120 kfree(ri);
121 }
122}
123EXPORT_SYMBOL_GPL(kvm_release_rma);
124
125static int __init early_parse_kvm_cma_resv(char *p)
126{
127 pr_debug("%s(%s)\n", __func__, p);
128 if (!p)
129 return -EINVAL;
130 return kstrtoul(p, 0, &kvm_cma_resv_ratio);
131}
132early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv);
133
134struct page *kvm_alloc_hpt(unsigned long nr_pages)
135{
136 unsigned long align_pages = HPT_ALIGN_PAGES;
137
138 VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
139
140
141 if (!cpu_has_feature(CPU_FTR_ARCH_206))
142 align_pages = nr_pages;
143 return cma_alloc(kvm_cma, nr_pages, order_base_2(align_pages));
144}
145EXPORT_SYMBOL_GPL(kvm_alloc_hpt);
146
147void kvm_release_hpt(struct page *page, unsigned long nr_pages)
148{
149 cma_release(kvm_cma, page, nr_pages);
150}
151EXPORT_SYMBOL_GPL(kvm_release_hpt);
152
153
154
155
156
157
158
159
160
161void __init kvm_cma_reserve(void)
162{
163 unsigned long align_size;
164 struct memblock_region *reg;
165 phys_addr_t selected_size = 0;
166
167
168
169
170 if (!cpu_has_feature(CPU_FTR_HVMODE))
171 return;
172
173
174
175
176 for_each_memblock(memory, reg)
177 selected_size += memblock_region_memory_end_pfn(reg) -
178 memblock_region_memory_base_pfn(reg);
179
180 selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT;
181 if (selected_size) {
182 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
183 (unsigned long)selected_size / SZ_1M);
184
185
186
187
188 if (!cpu_has_feature(CPU_FTR_ARCH_206))
189 align_size = __rounddown_pow_of_two(selected_size);
190 else
191 align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
192
193 align_size = max(kvm_rma_pages << PAGE_SHIFT, align_size);
194 cma_declare_contiguous(0, selected_size, 0, align_size,
195 KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma);
196 }
197}
198
199
200
201
202
203
204
205
206static atomic_t hv_vm_count;
207
208void kvm_hv_vm_activated(void)
209{
210 get_online_cpus();
211 atomic_inc(&hv_vm_count);
212 put_online_cpus();
213}
214EXPORT_SYMBOL_GPL(kvm_hv_vm_activated);
215
216void kvm_hv_vm_deactivated(void)
217{
218 get_online_cpus();
219 atomic_dec(&hv_vm_count);
220 put_online_cpus();
221}
222EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated);
223
224bool kvm_hv_mode_active(void)
225{
226 return atomic_read(&hv_vm_count) != 0;
227}
228
229extern int hcall_real_table[], hcall_real_table_end[];
230
231int kvmppc_hcall_impl_hv_realmode(unsigned long cmd)
232{
233 cmd /= 4;
234 if (cmd < hcall_real_table_end - hcall_real_table &&
235 hcall_real_table[cmd])
236 return 1;
237
238 return 0;
239}
240EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode);
241