1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/kvm_host.h>
22
23#include <asm/kvm_ppc.h>
24#include <asm/kvm_book3s.h>
25#include <asm/book3s/32/mmu-hash.h>
26#include <asm/machdep.h>
27#include <asm/mmu_context.h>
28#include <asm/hw_irq.h>
29#include "book3s.h"
30
31
32
33
34#ifdef DEBUG_MMU
35#define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
36#else
37#define dprintk_mmu(a, ...) do { } while(0)
38#endif
39
40#ifdef DEBUG_SR
41#define dprintk_sr(a, ...) printk(KERN_INFO a, __VA_ARGS__)
42#else
43#define dprintk_sr(a, ...) do { } while(0)
44#endif
45
46#if PAGE_SHIFT != 12
47#error Unknown page size
48#endif
49
50#ifdef CONFIG_SMP
51#error XXX need to grab mmu_hash_lock
52#endif
53
54#ifdef CONFIG_PTE_64BIT
55#error Only 32 bit pages are supported for now
56#endif
57
58static ulong htab;
59static u32 htabmask;
60
61void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
62{
63 volatile u32 *pteg;
64
65
66 pteg = (u32*)pte->slot;
67 pteg[0] = 0;
68
69
70 asm volatile ("sync");
71 asm volatile ("tlbie %0" : : "r" (pte->pte.eaddr) : "memory");
72 asm volatile ("sync");
73 asm volatile ("tlbsync");
74}
75
76
77
78static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
79{
80 return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
81 ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
82 ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
83 ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
84 ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
85 ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
86 ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
87 ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
88}
89
90
91static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
92{
93 struct kvmppc_sid_map *map;
94 u16 sid_map_mask;
95
96 if (kvmppc_get_msr(vcpu) & MSR_PR)
97 gvsid |= VSID_PR;
98
99 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
100 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
101 if (map->guest_vsid == gvsid) {
102 dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
103 gvsid, map->host_vsid);
104 return map;
105 }
106
107 map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
108 if (map->guest_vsid == gvsid) {
109 dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
110 gvsid, map->host_vsid);
111 return map;
112 }
113
114 dprintk_sr("SR: Searching 0x%llx -> not found\n", gvsid);
115 return NULL;
116}
117
118static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr,
119 bool primary)
120{
121 u32 page, hash;
122 ulong pteg = htab;
123
124 page = (eaddr & ~ESID_MASK) >> 12;
125
126 hash = ((vsid ^ page) << 6);
127 if (!primary)
128 hash = ~hash;
129
130 hash &= htabmask;
131
132 pteg |= hash;
133
134 dprintk_mmu("htab: %lx | hash: %x | htabmask: %x | pteg: %lx\n",
135 htab, hash, htabmask, pteg);
136
137 return (u32*)pteg;
138}
139
140extern char etext[];
141
142int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
143 bool iswrite)
144{
145 kvm_pfn_t hpaddr;
146 u64 vpn;
147 u64 vsid;
148 struct kvmppc_sid_map *map;
149 volatile u32 *pteg;
150 u32 eaddr = orig_pte->eaddr;
151 u32 pteg0, pteg1;
152 register int rr = 0;
153 bool primary = false;
154 bool evict = false;
155 struct hpte_cache *pte;
156 int r = 0;
157 bool writable;
158
159
160 hpaddr = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable);
161 if (is_error_noslot_pfn(hpaddr)) {
162 printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n",
163 orig_pte->raddr);
164 r = -EINVAL;
165 goto out;
166 }
167 hpaddr <<= PAGE_SHIFT;
168
169
170 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
171 map = find_sid_vsid(vcpu, vsid);
172 if (!map) {
173 kvmppc_mmu_map_segment(vcpu, eaddr);
174 map = find_sid_vsid(vcpu, vsid);
175 }
176 BUG_ON(!map);
177
178 vsid = map->host_vsid;
179 vpn = (vsid << (SID_SHIFT - VPN_SHIFT)) |
180 ((eaddr & ~ESID_MASK) >> VPN_SHIFT);
181next_pteg:
182 if (rr == 16) {
183 primary = !primary;
184 evict = true;
185 rr = 0;
186 }
187
188 pteg = kvmppc_mmu_get_pteg(vcpu, vsid, eaddr, primary);
189
190
191 if (!evict && (pteg[rr] & PTE_V)) {
192 rr += 2;
193 goto next_pteg;
194 }
195
196 dprintk_mmu("KVM: old PTEG: %p (%d)\n", pteg, rr);
197 dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]);
198 dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]);
199 dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]);
200 dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]);
201 dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]);
202 dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]);
203 dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]);
204 dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]);
205
206 pteg0 = ((eaddr & 0x0fffffff) >> 22) | (vsid << 7) | PTE_V |
207 (primary ? 0 : PTE_SEC);
208 pteg1 = hpaddr | PTE_M | PTE_R | PTE_C;
209
210 if (orig_pte->may_write && writable) {
211 pteg1 |= PP_RWRW;
212 mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
213 } else {
214 pteg1 |= PP_RWRX;
215 }
216
217 if (orig_pte->may_execute)
218 kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
219
220 local_irq_disable();
221
222 if (pteg[rr]) {
223 pteg[rr] = 0;
224 asm volatile ("sync");
225 }
226 pteg[rr + 1] = pteg1;
227 pteg[rr] = pteg0;
228 asm volatile ("sync");
229
230 local_irq_enable();
231
232 dprintk_mmu("KVM: new PTEG: %p\n", pteg);
233 dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]);
234 dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]);
235 dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]);
236 dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]);
237 dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]);
238 dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]);
239 dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]);
240 dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]);
241
242
243
244
245 pte = kvmppc_mmu_hpte_cache_next(vcpu);
246 if (!pte) {
247 kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
248 r = -EAGAIN;
249 goto out;
250 }
251
252 dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n",
253 orig_pte->may_write ? 'w' : '-',
254 orig_pte->may_execute ? 'x' : '-',
255 orig_pte->eaddr, (ulong)pteg, vpn,
256 orig_pte->vpage, hpaddr);
257
258 pte->slot = (ulong)&pteg[rr];
259 pte->host_vpn = vpn;
260 pte->pte = *orig_pte;
261 pte->pfn = hpaddr >> PAGE_SHIFT;
262
263 kvmppc_mmu_hpte_cache_map(vcpu, pte);
264
265 kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
266out:
267 return r;
268}
269
270void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
271{
272 kvmppc_mmu_pte_vflush(vcpu, pte->vpage, 0xfffffffffULL);
273}
274
275static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
276{
277 struct kvmppc_sid_map *map;
278 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
279 u16 sid_map_mask;
280 static int backwards_map = 0;
281
282 if (kvmppc_get_msr(vcpu) & MSR_PR)
283 gvsid |= VSID_PR;
284
285
286
287
288 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
289 if (backwards_map)
290 sid_map_mask = SID_MAP_MASK - sid_map_mask;
291
292 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
293
294
295 backwards_map = !backwards_map;
296
297
298 if (vcpu_book3s->vsid_next >= VSID_POOL_SIZE) {
299 vcpu_book3s->vsid_next = 0;
300 memset(vcpu_book3s->sid_map, 0,
301 sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
302 kvmppc_mmu_pte_flush(vcpu, 0, 0);
303 kvmppc_mmu_flush_segments(vcpu);
304 }
305 map->host_vsid = vcpu_book3s->vsid_pool[vcpu_book3s->vsid_next];
306 vcpu_book3s->vsid_next++;
307
308 map->guest_vsid = gvsid;
309 map->valid = true;
310
311 return map;
312}
313
314int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
315{
316 u32 esid = eaddr >> SID_SHIFT;
317 u64 gvsid;
318 u32 sr;
319 struct kvmppc_sid_map *map;
320 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
321 int r = 0;
322
323 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
324
325 svcpu->sr[esid] = SR_INVALID;
326 r = -ENOENT;
327 goto out;
328 }
329
330 map = find_sid_vsid(vcpu, gvsid);
331 if (!map)
332 map = create_sid_map(vcpu, gvsid);
333
334 map->guest_esid = esid;
335 sr = map->host_vsid | SR_KP;
336 svcpu->sr[esid] = sr;
337
338 dprintk_sr("MMU: mtsr %d, 0x%x\n", esid, sr);
339
340out:
341 svcpu_put(svcpu);
342 return r;
343}
344
345void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
346{
347 int i;
348 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
349
350 dprintk_sr("MMU: flushing all segments (%d)\n", ARRAY_SIZE(svcpu->sr));
351 for (i = 0; i < ARRAY_SIZE(svcpu->sr); i++)
352 svcpu->sr[i] = SR_INVALID;
353
354 svcpu_put(svcpu);
355}
356
357void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
358{
359 int i;
360
361 kvmppc_mmu_hpte_destroy(vcpu);
362 preempt_disable();
363 for (i = 0; i < SID_CONTEXTS; i++)
364 __destroy_context(to_book3s(vcpu)->context_id[i]);
365 preempt_enable();
366}
367
368
369#define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff)
370
371int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
372{
373 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
374 int err;
375 ulong sdr1;
376 int i;
377 int j;
378
379 for (i = 0; i < SID_CONTEXTS; i++) {
380 err = __init_new_context();
381 if (err < 0)
382 goto init_fail;
383 vcpu3s->context_id[i] = err;
384
385
386 for (j = 0; j < 16; j++)
387 vcpu3s->vsid_pool[(i * 16) + j] = CTX_TO_VSID(err, j);
388 }
389
390 vcpu3s->vsid_next = 0;
391
392
393 asm ( "mfsdr1 %0" : "=r"(sdr1) );
394 htabmask = ((sdr1 & 0x1FF) << 16) | 0xFFC0;
395 htab = (ulong)__va(sdr1 & 0xffff0000);
396
397 kvmppc_mmu_hpte_init(vcpu);
398
399 return 0;
400
401init_fail:
402 for (j = 0; j < i; j++) {
403 if (!vcpu3s->context_id[j])
404 continue;
405
406 __destroy_context(to_book3s(vcpu)->context_id[j]);
407 }
408
409 return -1;
410}
411