1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/kvm_host.h>
22
23#include <asm/kvm_ppc.h>
24#include <asm/kvm_book3s.h>
25#include <asm/mmu-hash32.h>
26#include <asm/machdep.h>
27#include <asm/mmu_context.h>
28#include <asm/hw_irq.h>
29
30
31
32
33#ifdef DEBUG_MMU
34#define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
35#else
36#define dprintk_mmu(a, ...) do { } while(0)
37#endif
38
39#ifdef DEBUG_SR
40#define dprintk_sr(a, ...) printk(KERN_INFO a, __VA_ARGS__)
41#else
42#define dprintk_sr(a, ...) do { } while(0)
43#endif
44
45#if PAGE_SHIFT != 12
46#error Unknown page size
47#endif
48
49#ifdef CONFIG_SMP
50#error XXX need to grab mmu_hash_lock
51#endif
52
53#ifdef CONFIG_PTE_64BIT
54#error Only 32 bit pages are supported for now
55#endif
56
57static ulong htab;
58static u32 htabmask;
59
60void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
61{
62 volatile u32 *pteg;
63
64
65 pteg = (u32*)pte->slot;
66 pteg[0] = 0;
67
68
69 asm volatile ("sync");
70 asm volatile ("tlbie %0" : : "r" (pte->pte.eaddr) : "memory");
71 asm volatile ("sync");
72 asm volatile ("tlbsync");
73}
74
75
76
77static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
78{
79 return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
80 ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
81 ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
82 ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
83 ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
84 ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
85 ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
86 ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
87}
88
89
90static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
91{
92 struct kvmppc_sid_map *map;
93 u16 sid_map_mask;
94
95 if (vcpu->arch.shared->msr & MSR_PR)
96 gvsid |= VSID_PR;
97
98 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
99 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
100 if (map->guest_vsid == gvsid) {
101 dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
102 gvsid, map->host_vsid);
103 return map;
104 }
105
106 map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
107 if (map->guest_vsid == gvsid) {
108 dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
109 gvsid, map->host_vsid);
110 return map;
111 }
112
113 dprintk_sr("SR: Searching 0x%llx -> not found\n", gvsid);
114 return NULL;
115}
116
117static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr,
118 bool primary)
119{
120 u32 page, hash;
121 ulong pteg = htab;
122
123 page = (eaddr & ~ESID_MASK) >> 12;
124
125 hash = ((vsid ^ page) << 6);
126 if (!primary)
127 hash = ~hash;
128
129 hash &= htabmask;
130
131 pteg |= hash;
132
133 dprintk_mmu("htab: %lx | hash: %x | htabmask: %x | pteg: %lx\n",
134 htab, hash, htabmask, pteg);
135
136 return (u32*)pteg;
137}
138
139extern char etext[];
140
141int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
142{
143 pfn_t hpaddr;
144 u64 vpn;
145 u64 vsid;
146 struct kvmppc_sid_map *map;
147 volatile u32 *pteg;
148 u32 eaddr = orig_pte->eaddr;
149 u32 pteg0, pteg1;
150 register int rr = 0;
151 bool primary = false;
152 bool evict = false;
153 struct hpte_cache *pte;
154 int r = 0;
155
156
157 hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
158 if (is_error_noslot_pfn(hpaddr)) {
159 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n",
160 orig_pte->eaddr);
161 r = -EINVAL;
162 goto out;
163 }
164 hpaddr <<= PAGE_SHIFT;
165
166
167 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
168 map = find_sid_vsid(vcpu, vsid);
169 if (!map) {
170 kvmppc_mmu_map_segment(vcpu, eaddr);
171 map = find_sid_vsid(vcpu, vsid);
172 }
173 BUG_ON(!map);
174
175 vsid = map->host_vsid;
176 vpn = (vsid << (SID_SHIFT - VPN_SHIFT)) |
177 ((eaddr & ~ESID_MASK) >> VPN_SHIFT);
178next_pteg:
179 if (rr == 16) {
180 primary = !primary;
181 evict = true;
182 rr = 0;
183 }
184
185 pteg = kvmppc_mmu_get_pteg(vcpu, vsid, eaddr, primary);
186
187
188 if (!evict && (pteg[rr] & PTE_V)) {
189 rr += 2;
190 goto next_pteg;
191 }
192
193 dprintk_mmu("KVM: old PTEG: %p (%d)\n", pteg, rr);
194 dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]);
195 dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]);
196 dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]);
197 dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]);
198 dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]);
199 dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]);
200 dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]);
201 dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]);
202
203 pteg0 = ((eaddr & 0x0fffffff) >> 22) | (vsid << 7) | PTE_V |
204 (primary ? 0 : PTE_SEC);
205 pteg1 = hpaddr | PTE_M | PTE_R | PTE_C;
206
207 if (orig_pte->may_write) {
208 pteg1 |= PP_RWRW;
209 mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
210 } else {
211 pteg1 |= PP_RWRX;
212 }
213
214 if (orig_pte->may_execute)
215 kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
216
217 local_irq_disable();
218
219 if (pteg[rr]) {
220 pteg[rr] = 0;
221 asm volatile ("sync");
222 }
223 pteg[rr + 1] = pteg1;
224 pteg[rr] = pteg0;
225 asm volatile ("sync");
226
227 local_irq_enable();
228
229 dprintk_mmu("KVM: new PTEG: %p\n", pteg);
230 dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]);
231 dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]);
232 dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]);
233 dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]);
234 dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]);
235 dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]);
236 dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]);
237 dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]);
238
239
240
241
242 pte = kvmppc_mmu_hpte_cache_next(vcpu);
243
244 dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n",
245 orig_pte->may_write ? 'w' : '-',
246 orig_pte->may_execute ? 'x' : '-',
247 orig_pte->eaddr, (ulong)pteg, vpn,
248 orig_pte->vpage, hpaddr);
249
250 pte->slot = (ulong)&pteg[rr];
251 pte->host_vpn = vpn;
252 pte->pte = *orig_pte;
253 pte->pfn = hpaddr >> PAGE_SHIFT;
254
255 kvmppc_mmu_hpte_cache_map(vcpu, pte);
256
257 kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
258out:
259 return r;
260}
261
262static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
263{
264 struct kvmppc_sid_map *map;
265 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
266 u16 sid_map_mask;
267 static int backwards_map = 0;
268
269 if (vcpu->arch.shared->msr & MSR_PR)
270 gvsid |= VSID_PR;
271
272
273
274
275 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
276 if (backwards_map)
277 sid_map_mask = SID_MAP_MASK - sid_map_mask;
278
279 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
280
281
282 backwards_map = !backwards_map;
283
284
285 if (vcpu_book3s->vsid_next >= VSID_POOL_SIZE) {
286 vcpu_book3s->vsid_next = 0;
287 memset(vcpu_book3s->sid_map, 0,
288 sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
289 kvmppc_mmu_pte_flush(vcpu, 0, 0);
290 kvmppc_mmu_flush_segments(vcpu);
291 }
292 map->host_vsid = vcpu_book3s->vsid_pool[vcpu_book3s->vsid_next];
293 vcpu_book3s->vsid_next++;
294
295 map->guest_vsid = gvsid;
296 map->valid = true;
297
298 return map;
299}
300
301int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
302{
303 u32 esid = eaddr >> SID_SHIFT;
304 u64 gvsid;
305 u32 sr;
306 struct kvmppc_sid_map *map;
307 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
308 int r = 0;
309
310 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
311
312 svcpu->sr[esid] = SR_INVALID;
313 r = -ENOENT;
314 goto out;
315 }
316
317 map = find_sid_vsid(vcpu, gvsid);
318 if (!map)
319 map = create_sid_map(vcpu, gvsid);
320
321 map->guest_esid = esid;
322 sr = map->host_vsid | SR_KP;
323 svcpu->sr[esid] = sr;
324
325 dprintk_sr("MMU: mtsr %d, 0x%x\n", esid, sr);
326
327out:
328 svcpu_put(svcpu);
329 return r;
330}
331
332void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
333{
334 int i;
335 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
336
337 dprintk_sr("MMU: flushing all segments (%d)\n", ARRAY_SIZE(svcpu->sr));
338 for (i = 0; i < ARRAY_SIZE(svcpu->sr); i++)
339 svcpu->sr[i] = SR_INVALID;
340
341 svcpu_put(svcpu);
342}
343
344void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
345{
346 int i;
347
348 kvmppc_mmu_hpte_destroy(vcpu);
349 preempt_disable();
350 for (i = 0; i < SID_CONTEXTS; i++)
351 __destroy_context(to_book3s(vcpu)->context_id[i]);
352 preempt_enable();
353}
354
355
356#define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff)
357
358int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
359{
360 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
361 int err;
362 ulong sdr1;
363 int i;
364 int j;
365
366 for (i = 0; i < SID_CONTEXTS; i++) {
367 err = __init_new_context();
368 if (err < 0)
369 goto init_fail;
370 vcpu3s->context_id[i] = err;
371
372
373 for (j = 0; j < 16; j++)
374 vcpu3s->vsid_pool[(i * 16) + j] = CTX_TO_VSID(err, j);
375 }
376
377 vcpu3s->vsid_next = 0;
378
379
380 asm ( "mfsdr1 %0" : "=r"(sdr1) );
381 htabmask = ((sdr1 & 0x1FF) << 16) | 0xFFC0;
382 htab = (ulong)__va(sdr1 & 0xffff0000);
383
384 kvmppc_mmu_hpte_init(vcpu);
385
386 return 0;
387
388init_fail:
389 for (j = 0; j < i; j++) {
390 if (!vcpu3s->context_id[j])
391 continue;
392
393 __destroy_context(to_book3s(vcpu)->context_id[j]);
394 }
395
396 return -1;
397}
398