1
2
3
4
5
6
7
8
9#include <linux/types.h>
10#include <linux/string.h>
11#include <linux/kvm.h>
12#include <linux/kvm_host.h>
13#include <linux/hugetlb.h>
14#include <linux/module.h>
15#include <linux/log2.h>
16
17#include <asm/trace.h>
18#include <asm/kvm_ppc.h>
19#include <asm/kvm_book3s.h>
20#include <asm/book3s/64/mmu-hash.h>
21#include <asm/hvcall.h>
22#include <asm/synch.h>
23#include <asm/ppc-opcode.h>
24#include <asm/pte-walk.h>
25
26
27static void *real_vmalloc_addr(void *x)
28{
29 unsigned long addr = (unsigned long) x;
30 pte_t *p;
31
32
33
34
35
36 p = find_init_mm_pte(addr, NULL);
37 if (!p || !pte_present(*p))
38 return NULL;
39 addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
40 return __va(addr);
41}
42
43
44static int global_invalidates(struct kvm *kvm)
45{
46 int global;
47 int cpu;
48
49
50
51
52
53
54
55
56 if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcpu)
57 global = 0;
58 else
59 global = 1;
60
61 if (!global) {
62
63 smp_wmb();
64 cpumask_setall(&kvm->arch.need_tlb_flush);
65 cpu = local_paca->kvm_hstate.kvm_vcore->pcpu;
66
67
68
69
70 if (cpu_has_feature(CPU_FTR_ARCH_300))
71 cpu = cpu_first_thread_sibling(cpu);
72 cpumask_clear_cpu(cpu, &kvm->arch.need_tlb_flush);
73 }
74
75 return global;
76}
77
78
79
80
81
82void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
83 unsigned long *rmap, long pte_index, int realmode)
84{
85 struct revmap_entry *head, *tail;
86 unsigned long i;
87
88 if (*rmap & KVMPPC_RMAP_PRESENT) {
89 i = *rmap & KVMPPC_RMAP_INDEX;
90 head = &kvm->arch.hpt.rev[i];
91 if (realmode)
92 head = real_vmalloc_addr(head);
93 tail = &kvm->arch.hpt.rev[head->back];
94 if (realmode)
95 tail = real_vmalloc_addr(tail);
96 rev->forw = i;
97 rev->back = head->back;
98 tail->forw = pte_index;
99 head->back = pte_index;
100 } else {
101 rev->forw = rev->back = pte_index;
102 *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) |
103 pte_index | KVMPPC_RMAP_PRESENT;
104 }
105 unlock_rmap(rmap);
106}
107EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain);
108
109
110void kvmppc_update_dirty_map(struct kvm_memory_slot *memslot,
111 unsigned long gfn, unsigned long psize)
112{
113 unsigned long npages;
114
115 if (!psize || !memslot->dirty_bitmap)
116 return;
117 npages = (psize + PAGE_SIZE - 1) / PAGE_SIZE;
118 gfn -= memslot->base_gfn;
119 set_dirty_bits_atomic(memslot->dirty_bitmap, gfn, npages);
120}
121EXPORT_SYMBOL_GPL(kvmppc_update_dirty_map);
122
123static void kvmppc_set_dirty_from_hpte(struct kvm *kvm,
124 unsigned long hpte_v, unsigned long hpte_gr)
125{
126 struct kvm_memory_slot *memslot;
127 unsigned long gfn;
128 unsigned long psize;
129
130 psize = kvmppc_actual_pgsz(hpte_v, hpte_gr);
131 gfn = hpte_rpn(hpte_gr, psize);
132 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
133 if (memslot && memslot->dirty_bitmap)
134 kvmppc_update_dirty_map(memslot, gfn, psize);
135}
136
137
138static unsigned long *revmap_for_hpte(struct kvm *kvm, unsigned long hpte_v,
139 unsigned long hpte_gr,
140 struct kvm_memory_slot **memslotp,
141 unsigned long *gfnp)
142{
143 struct kvm_memory_slot *memslot;
144 unsigned long *rmap;
145 unsigned long gfn;
146
147 gfn = hpte_rpn(hpte_gr, kvmppc_actual_pgsz(hpte_v, hpte_gr));
148 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
149 if (memslotp)
150 *memslotp = memslot;
151 if (gfnp)
152 *gfnp = gfn;
153 if (!memslot)
154 return NULL;
155
156 rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]);
157 return rmap;
158}
159
160
161static void remove_revmap_chain(struct kvm *kvm, long pte_index,
162 struct revmap_entry *rev,
163 unsigned long hpte_v, unsigned long hpte_r)
164{
165 struct revmap_entry *next, *prev;
166 unsigned long ptel, head;
167 unsigned long *rmap;
168 unsigned long rcbits;
169 struct kvm_memory_slot *memslot;
170 unsigned long gfn;
171
172 rcbits = hpte_r & (HPTE_R_R | HPTE_R_C);
173 ptel = rev->guest_rpte |= rcbits;
174 rmap = revmap_for_hpte(kvm, hpte_v, ptel, &memslot, &gfn);
175 if (!rmap)
176 return;
177 lock_rmap(rmap);
178
179 head = *rmap & KVMPPC_RMAP_INDEX;
180 next = real_vmalloc_addr(&kvm->arch.hpt.rev[rev->forw]);
181 prev = real_vmalloc_addr(&kvm->arch.hpt.rev[rev->back]);
182 next->back = rev->back;
183 prev->forw = rev->forw;
184 if (head == pte_index) {
185 head = rev->forw;
186 if (head == pte_index)
187 *rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
188 else
189 *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head;
190 }
191 *rmap |= rcbits << KVMPPC_RMAP_RC_SHIFT;
192 if (rcbits & HPTE_R_C)
193 kvmppc_update_dirty_map(memslot, gfn,
194 kvmppc_actual_pgsz(hpte_v, hpte_r));
195 unlock_rmap(rmap);
196}
197
198long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
199 long pte_index, unsigned long pteh, unsigned long ptel,
200 pgd_t *pgdir, bool realmode, unsigned long *pte_idx_ret)
201{
202 unsigned long i, pa, gpa, gfn, psize;
203 unsigned long slot_fn, hva;
204 __be64 *hpte;
205 struct revmap_entry *rev;
206 unsigned long g_ptel;
207 struct kvm_memory_slot *memslot;
208 unsigned hpage_shift;
209 bool is_ci;
210 unsigned long *rmap;
211 pte_t *ptep;
212 unsigned int writing;
213 unsigned long mmu_seq;
214 unsigned long rcbits, irq_flags = 0;
215
216 if (kvm_is_radix(kvm))
217 return H_FUNCTION;
218 psize = kvmppc_actual_pgsz(pteh, ptel);
219 if (!psize)
220 return H_PARAMETER;
221 writing = hpte_is_writable(ptel);
222 pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
223 ptel &= ~HPTE_GR_RESERVED;
224 g_ptel = ptel;
225
226
227 mmu_seq = kvm->mmu_notifier_seq;
228 smp_rmb();
229
230
231 gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
232 gfn = gpa >> PAGE_SHIFT;
233 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
234 pa = 0;
235 is_ci = false;
236 rmap = NULL;
237 if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) {
238
239 pteh |= HPTE_V_ABSENT;
240 ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO;
241 goto do_insert;
242 }
243
244
245 if (!slot_is_aligned(memslot, psize))
246 return H_PARAMETER;
247 slot_fn = gfn - memslot->base_gfn;
248 rmap = &memslot->arch.rmap[slot_fn];
249
250
251 hva = __gfn_to_hva_memslot(memslot, gfn);
252
253
254
255
256 if (!realmode)
257 local_irq_save(irq_flags);
258
259
260
261
262 ptep = __find_linux_pte(pgdir, hva, NULL, &hpage_shift);
263 if (ptep) {
264 pte_t pte;
265 unsigned int host_pte_size;
266
267 if (hpage_shift)
268 host_pte_size = 1ul << hpage_shift;
269 else
270 host_pte_size = PAGE_SIZE;
271
272
273
274
275 if (host_pte_size < psize) {
276 if (!realmode)
277 local_irq_restore(flags);
278 return H_PARAMETER;
279 }
280 pte = kvmppc_read_update_linux_pte(ptep, writing);
281 if (pte_present(pte) && !pte_protnone(pte)) {
282 if (writing && !__pte_write(pte))
283
284 ptel = hpte_make_readonly(ptel);
285 is_ci = pte_ci(pte);
286 pa = pte_pfn(pte) << PAGE_SHIFT;
287 pa |= hva & (host_pte_size - 1);
288 pa |= gpa & ~PAGE_MASK;
289 }
290 }
291 if (!realmode)
292 local_irq_restore(irq_flags);
293
294 ptel &= HPTE_R_KEY | HPTE_R_PP0 | (psize-1);
295 ptel |= pa;
296
297 if (pa)
298 pteh |= HPTE_V_VALID;
299 else {
300 pteh |= HPTE_V_ABSENT;
301 ptel &= ~(HPTE_R_KEY_HI | HPTE_R_KEY_LO);
302 }
303
304
305 if (ptep && !hpte_cache_flags_ok(ptel, is_ci)) {
306 if (is_ci)
307 return H_PARAMETER;
308
309
310
311
312 ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G);
313 ptel |= HPTE_R_M;
314 }
315
316
317 do_insert:
318 if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
319 return H_PARAMETER;
320 if (likely((flags & H_EXACT) == 0)) {
321 pte_index &= ~7UL;
322 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
323 for (i = 0; i < 8; ++i) {
324 if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0 &&
325 try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
326 HPTE_V_ABSENT))
327 break;
328 hpte += 2;
329 }
330 if (i == 8) {
331
332
333
334
335
336
337 hpte -= 16;
338 for (i = 0; i < 8; ++i) {
339 u64 pte;
340 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
341 cpu_relax();
342 pte = be64_to_cpu(hpte[0]);
343 if (!(pte & (HPTE_V_VALID | HPTE_V_ABSENT)))
344 break;
345 __unlock_hpte(hpte, pte);
346 hpte += 2;
347 }
348 if (i == 8)
349 return H_PTEG_FULL;
350 }
351 pte_index += i;
352 } else {
353 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
354 if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
355 HPTE_V_ABSENT)) {
356
357 u64 pte;
358
359 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
360 cpu_relax();
361 pte = be64_to_cpu(hpte[0]);
362 if (pte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
363 __unlock_hpte(hpte, pte);
364 return H_PTEG_FULL;
365 }
366 }
367 }
368
369
370 rev = &kvm->arch.hpt.rev[pte_index];
371 if (realmode)
372 rev = real_vmalloc_addr(rev);
373 if (rev) {
374 rev->guest_rpte = g_ptel;
375 note_hpte_modification(kvm, rev);
376 }
377
378
379 if (pteh & HPTE_V_VALID) {
380 if (realmode)
381 rmap = real_vmalloc_addr(rmap);
382 lock_rmap(rmap);
383
384 if (mmu_notifier_retry(kvm, mmu_seq)) {
385
386 pteh |= HPTE_V_ABSENT;
387 pteh &= ~HPTE_V_VALID;
388 ptel &= ~(HPTE_R_KEY_HI | HPTE_R_KEY_LO);
389 unlock_rmap(rmap);
390 } else {
391 kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index,
392 realmode);
393
394 rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
395 ptel &= rcbits | ~(HPTE_R_R | HPTE_R_C);
396 }
397 }
398
399
400 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
401 ptel = hpte_old_to_new_r(pteh, ptel);
402 pteh = hpte_old_to_new_v(pteh);
403 }
404 hpte[1] = cpu_to_be64(ptel);
405
406
407 eieio();
408 __unlock_hpte(hpte, pteh);
409 asm volatile("ptesync" : : : "memory");
410
411 *pte_idx_ret = pte_index;
412 return H_SUCCESS;
413}
414EXPORT_SYMBOL_GPL(kvmppc_do_h_enter);
415
416long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
417 long pte_index, unsigned long pteh, unsigned long ptel)
418{
419 return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel,
420 vcpu->arch.pgdir, true,
421 &vcpu->arch.regs.gpr[4]);
422}
423
424#ifdef __BIG_ENDIAN__
425#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
426#else
427#define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
428#endif
429
430static inline int is_mmio_hpte(unsigned long v, unsigned long r)
431{
432 return ((v & HPTE_V_ABSENT) &&
433 (r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
434 (HPTE_R_KEY_HI | HPTE_R_KEY_LO));
435}
436
437static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
438 long npages, int global, bool need_sync)
439{
440 long i;
441
442
443
444
445
446
447 if (global) {
448 if (need_sync)
449 asm volatile("ptesync" : : : "memory");
450 for (i = 0; i < npages; ++i) {
451 asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : :
452 "r" (rbvalues[i]), "r" (kvm->arch.lpid));
453 }
454
455 if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
456
457
458
459
460 asm volatile("ptesync": : :"memory");
461 asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : :
462 "r" (rbvalues[0]), "r" (kvm->arch.lpid));
463 }
464
465 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
466 } else {
467 if (need_sync)
468 asm volatile("ptesync" : : : "memory");
469 for (i = 0; i < npages; ++i) {
470 asm volatile(PPC_TLBIEL(%0,%1,0,0,0) : :
471 "r" (rbvalues[i]), "r" (0));
472 }
473 asm volatile("ptesync" : : : "memory");
474 }
475}
476
477long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
478 unsigned long pte_index, unsigned long avpn,
479 unsigned long *hpret)
480{
481 __be64 *hpte;
482 unsigned long v, r, rb;
483 struct revmap_entry *rev;
484 u64 pte, orig_pte, pte_r;
485
486 if (kvm_is_radix(kvm))
487 return H_FUNCTION;
488 if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
489 return H_PARAMETER;
490 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
491 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
492 cpu_relax();
493 pte = orig_pte = be64_to_cpu(hpte[0]);
494 pte_r = be64_to_cpu(hpte[1]);
495 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
496 pte = hpte_new_to_old_v(pte, pte_r);
497 pte_r = hpte_new_to_old_r(pte_r);
498 }
499 if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
500 ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn) ||
501 ((flags & H_ANDCOND) && (pte & avpn) != 0)) {
502 __unlock_hpte(hpte, orig_pte);
503 return H_NOT_FOUND;
504 }
505
506 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
507 v = pte & ~HPTE_V_HVLOCK;
508 if (v & HPTE_V_VALID) {
509 hpte[0] &= ~cpu_to_be64(HPTE_V_VALID);
510 rb = compute_tlbie_rb(v, pte_r, pte_index);
511 do_tlbies(kvm, &rb, 1, global_invalidates(kvm), true);
512
513
514
515
516
517
518
519
520
521 remove_revmap_chain(kvm, pte_index, rev, v,
522 be64_to_cpu(hpte[1]));
523 }
524 r = rev->guest_rpte & ~HPTE_GR_RESERVED;
525 note_hpte_modification(kvm, rev);
526 unlock_hpte(hpte, 0);
527
528 if (is_mmio_hpte(v, pte_r))
529 atomic64_inc(&kvm->arch.mmio_update);
530
531 if (v & HPTE_V_ABSENT)
532 v = (v & ~HPTE_V_ABSENT) | HPTE_V_VALID;
533 hpret[0] = v;
534 hpret[1] = r;
535 return H_SUCCESS;
536}
537EXPORT_SYMBOL_GPL(kvmppc_do_h_remove);
538
539long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
540 unsigned long pte_index, unsigned long avpn)
541{
542 return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn,
543 &vcpu->arch.regs.gpr[4]);
544}
545
546long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
547{
548 struct kvm *kvm = vcpu->kvm;
549 unsigned long *args = &vcpu->arch.regs.gpr[4];
550 __be64 *hp, *hptes[4];
551 unsigned long tlbrb[4];
552 long int i, j, k, n, found, indexes[4];
553 unsigned long flags, req, pte_index, rcbits;
554 int global;
555 long int ret = H_SUCCESS;
556 struct revmap_entry *rev, *revs[4];
557 u64 hp0, hp1;
558
559 if (kvm_is_radix(kvm))
560 return H_FUNCTION;
561 global = global_invalidates(kvm);
562 for (i = 0; i < 4 && ret == H_SUCCESS; ) {
563 n = 0;
564 for (; i < 4; ++i) {
565 j = i * 2;
566 pte_index = args[j];
567 flags = pte_index >> 56;
568 pte_index &= ((1ul << 56) - 1);
569 req = flags >> 6;
570 flags &= 3;
571 if (req == 3) {
572 i = 4;
573 break;
574 }
575 if (req != 1 || flags == 3 ||
576 pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt)) {
577
578 args[j] = ((0xa0 | flags) << 56) + pte_index;
579 ret = H_PARAMETER;
580 break;
581 }
582 hp = (__be64 *) (kvm->arch.hpt.virt + (pte_index << 4));
583
584 if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) {
585 if (n)
586 break;
587 while (!try_lock_hpte(hp, HPTE_V_HVLOCK))
588 cpu_relax();
589 }
590 found = 0;
591 hp0 = be64_to_cpu(hp[0]);
592 hp1 = be64_to_cpu(hp[1]);
593 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
594 hp0 = hpte_new_to_old_v(hp0, hp1);
595 hp1 = hpte_new_to_old_r(hp1);
596 }
597 if (hp0 & (HPTE_V_ABSENT | HPTE_V_VALID)) {
598 switch (flags & 3) {
599 case 0:
600 found = 1;
601 break;
602 case 1:
603 if (!(hp0 & args[j + 1]))
604 found = 1;
605 break;
606 case 2:
607 if ((hp0 & ~0x7fUL) == args[j + 1])
608 found = 1;
609 break;
610 }
611 }
612 if (!found) {
613 hp[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
614 args[j] = ((0x90 | flags) << 56) + pte_index;
615 continue;
616 }
617
618 args[j] = ((0x80 | flags) << 56) + pte_index;
619 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
620 note_hpte_modification(kvm, rev);
621
622 if (!(hp0 & HPTE_V_VALID)) {
623
624 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
625 args[j] |= rcbits << (56 - 5);
626 hp[0] = 0;
627 if (is_mmio_hpte(hp0, hp1))
628 atomic64_inc(&kvm->arch.mmio_update);
629 continue;
630 }
631
632
633 hp[0] &= ~cpu_to_be64(HPTE_V_VALID);
634 tlbrb[n] = compute_tlbie_rb(hp0, hp1, pte_index);
635 indexes[n] = j;
636 hptes[n] = hp;
637 revs[n] = rev;
638 ++n;
639 }
640
641 if (!n)
642 break;
643
644
645 do_tlbies(kvm, tlbrb, n, global, true);
646
647
648 for (k = 0; k < n; ++k) {
649 j = indexes[k];
650 pte_index = args[j] & ((1ul << 56) - 1);
651 hp = hptes[k];
652 rev = revs[k];
653 remove_revmap_chain(kvm, pte_index, rev,
654 be64_to_cpu(hp[0]), be64_to_cpu(hp[1]));
655 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
656 args[j] |= rcbits << (56 - 5);
657 __unlock_hpte(hp, 0);
658 }
659 }
660
661 return ret;
662}
663
664long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
665 unsigned long pte_index, unsigned long avpn,
666 unsigned long va)
667{
668 struct kvm *kvm = vcpu->kvm;
669 __be64 *hpte;
670 struct revmap_entry *rev;
671 unsigned long v, r, rb, mask, bits;
672 u64 pte_v, pte_r;
673
674 if (kvm_is_radix(kvm))
675 return H_FUNCTION;
676 if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
677 return H_PARAMETER;
678
679 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
680 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
681 cpu_relax();
682 v = pte_v = be64_to_cpu(hpte[0]);
683 if (cpu_has_feature(CPU_FTR_ARCH_300))
684 v = hpte_new_to_old_v(v, be64_to_cpu(hpte[1]));
685 if ((v & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
686 ((flags & H_AVPN) && (v & ~0x7fUL) != avpn)) {
687 __unlock_hpte(hpte, pte_v);
688 return H_NOT_FOUND;
689 }
690
691 pte_r = be64_to_cpu(hpte[1]);
692 bits = (flags << 55) & HPTE_R_PP0;
693 bits |= (flags << 48) & HPTE_R_KEY_HI;
694 bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
695
696
697 mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
698 HPTE_R_KEY_HI | HPTE_R_KEY_LO;
699 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
700 if (rev) {
701 r = (rev->guest_rpte & ~mask) | bits;
702 rev->guest_rpte = r;
703 note_hpte_modification(kvm, rev);
704 }
705
706
707 if (v & HPTE_V_VALID) {
708
709
710
711
712
713 r = (pte_r & ~mask) | bits;
714 if (hpte_is_writable(r) && !hpte_is_writable(pte_r))
715 r = hpte_make_readonly(r);
716
717 if (r != pte_r) {
718 rb = compute_tlbie_rb(v, r, pte_index);
719 hpte[0] = cpu_to_be64((pte_v & ~HPTE_V_VALID) |
720 HPTE_V_ABSENT);
721 do_tlbies(kvm, &rb, 1, global_invalidates(kvm), true);
722
723 r |= be64_to_cpu(hpte[1]) & (HPTE_R_R | HPTE_R_C);
724 hpte[1] = cpu_to_be64(r);
725 }
726 }
727 unlock_hpte(hpte, pte_v & ~HPTE_V_HVLOCK);
728 asm volatile("ptesync" : : : "memory");
729 if (is_mmio_hpte(v, pte_r))
730 atomic64_inc(&kvm->arch.mmio_update);
731
732 return H_SUCCESS;
733}
734
735long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
736 unsigned long pte_index)
737{
738 struct kvm *kvm = vcpu->kvm;
739 __be64 *hpte;
740 unsigned long v, r;
741 int i, n = 1;
742 struct revmap_entry *rev = NULL;
743
744 if (kvm_is_radix(kvm))
745 return H_FUNCTION;
746 if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
747 return H_PARAMETER;
748 if (flags & H_READ_4) {
749 pte_index &= ~3;
750 n = 4;
751 }
752 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
753 for (i = 0; i < n; ++i, ++pte_index) {
754 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
755 v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
756 r = be64_to_cpu(hpte[1]);
757 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
758 v = hpte_new_to_old_v(v, r);
759 r = hpte_new_to_old_r(r);
760 }
761 if (v & HPTE_V_ABSENT) {
762 v &= ~HPTE_V_ABSENT;
763 v |= HPTE_V_VALID;
764 }
765 if (v & HPTE_V_VALID) {
766 r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C));
767 r &= ~HPTE_GR_RESERVED;
768 }
769 vcpu->arch.regs.gpr[4 + i * 2] = v;
770 vcpu->arch.regs.gpr[5 + i * 2] = r;
771 }
772 return H_SUCCESS;
773}
774
775long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
776 unsigned long pte_index)
777{
778 struct kvm *kvm = vcpu->kvm;
779 __be64 *hpte;
780 unsigned long v, r, gr;
781 struct revmap_entry *rev;
782 unsigned long *rmap;
783 long ret = H_NOT_FOUND;
784
785 if (kvm_is_radix(kvm))
786 return H_FUNCTION;
787 if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
788 return H_PARAMETER;
789
790 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
791 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
792 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
793 cpu_relax();
794 v = be64_to_cpu(hpte[0]);
795 r = be64_to_cpu(hpte[1]);
796 if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT)))
797 goto out;
798
799 gr = rev->guest_rpte;
800 if (rev->guest_rpte & HPTE_R_R) {
801 rev->guest_rpte &= ~HPTE_R_R;
802 note_hpte_modification(kvm, rev);
803 }
804 if (v & HPTE_V_VALID) {
805 gr |= r & (HPTE_R_R | HPTE_R_C);
806 if (r & HPTE_R_R) {
807 kvmppc_clear_ref_hpte(kvm, hpte, pte_index);
808 rmap = revmap_for_hpte(kvm, v, gr, NULL, NULL);
809 if (rmap) {
810 lock_rmap(rmap);
811 *rmap |= KVMPPC_RMAP_REFERENCED;
812 unlock_rmap(rmap);
813 }
814 }
815 }
816 vcpu->arch.regs.gpr[4] = gr;
817 ret = H_SUCCESS;
818 out:
819 unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
820 return ret;
821}
822
823long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
824 unsigned long pte_index)
825{
826 struct kvm *kvm = vcpu->kvm;
827 __be64 *hpte;
828 unsigned long v, r, gr;
829 struct revmap_entry *rev;
830 long ret = H_NOT_FOUND;
831
832 if (kvm_is_radix(kvm))
833 return H_FUNCTION;
834 if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
835 return H_PARAMETER;
836
837 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
838 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
839 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
840 cpu_relax();
841 v = be64_to_cpu(hpte[0]);
842 r = be64_to_cpu(hpte[1]);
843 if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT)))
844 goto out;
845
846 gr = rev->guest_rpte;
847 if (gr & HPTE_R_C) {
848 rev->guest_rpte &= ~HPTE_R_C;
849 note_hpte_modification(kvm, rev);
850 }
851 if (v & HPTE_V_VALID) {
852
853 hpte[0] |= cpu_to_be64(HPTE_V_ABSENT);
854 kvmppc_invalidate_hpte(kvm, hpte, pte_index);
855 r = be64_to_cpu(hpte[1]);
856 gr |= r & (HPTE_R_R | HPTE_R_C);
857 if (r & HPTE_R_C) {
858 hpte[1] = cpu_to_be64(r & ~HPTE_R_C);
859 eieio();
860 kvmppc_set_dirty_from_hpte(kvm, v, gr);
861 }
862 }
863 vcpu->arch.regs.gpr[4] = gr;
864 ret = H_SUCCESS;
865 out:
866 unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
867 return ret;
868}
869
870void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
871 unsigned long pte_index)
872{
873 unsigned long rb;
874 u64 hp0, hp1;
875
876 hptep[0] &= ~cpu_to_be64(HPTE_V_VALID);
877 hp0 = be64_to_cpu(hptep[0]);
878 hp1 = be64_to_cpu(hptep[1]);
879 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
880 hp0 = hpte_new_to_old_v(hp0, hp1);
881 hp1 = hpte_new_to_old_r(hp1);
882 }
883 rb = compute_tlbie_rb(hp0, hp1, pte_index);
884 do_tlbies(kvm, &rb, 1, 1, true);
885}
886EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
887
888void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
889 unsigned long pte_index)
890{
891 unsigned long rb;
892 unsigned char rbyte;
893 u64 hp0, hp1;
894
895 hp0 = be64_to_cpu(hptep[0]);
896 hp1 = be64_to_cpu(hptep[1]);
897 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
898 hp0 = hpte_new_to_old_v(hp0, hp1);
899 hp1 = hpte_new_to_old_r(hp1);
900 }
901 rb = compute_tlbie_rb(hp0, hp1, pte_index);
902 rbyte = (be64_to_cpu(hptep[1]) & ~HPTE_R_R) >> 8;
903
904 *((char *)hptep + 14) = rbyte;
905 do_tlbies(kvm, &rb, 1, 1, false);
906}
907EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte);
908
909static int slb_base_page_shift[4] = {
910 24,
911 16,
912 34,
913 20,
914};
915
916static struct mmio_hpte_cache_entry *mmio_cache_search(struct kvm_vcpu *vcpu,
917 unsigned long eaddr, unsigned long slb_v, long mmio_update)
918{
919 struct mmio_hpte_cache_entry *entry = NULL;
920 unsigned int pshift;
921 unsigned int i;
922
923 for (i = 0; i < MMIO_HPTE_CACHE_SIZE; i++) {
924 entry = &vcpu->arch.mmio_cache.entry[i];
925 if (entry->mmio_update == mmio_update) {
926 pshift = entry->slb_base_pshift;
927 if ((entry->eaddr >> pshift) == (eaddr >> pshift) &&
928 entry->slb_v == slb_v)
929 return entry;
930 }
931 }
932 return NULL;
933}
934
935static struct mmio_hpte_cache_entry *
936 next_mmio_cache_entry(struct kvm_vcpu *vcpu)
937{
938 unsigned int index = vcpu->arch.mmio_cache.index;
939
940 vcpu->arch.mmio_cache.index++;
941 if (vcpu->arch.mmio_cache.index == MMIO_HPTE_CACHE_SIZE)
942 vcpu->arch.mmio_cache.index = 0;
943
944 return &vcpu->arch.mmio_cache.entry[index];
945}
946
947
948
949
950
951long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
952 unsigned long valid)
953{
954 unsigned int i;
955 unsigned int pshift;
956 unsigned long somask;
957 unsigned long vsid, hash;
958 unsigned long avpn;
959 __be64 *hpte;
960 unsigned long mask, val;
961 unsigned long v, r, orig_v;
962
963
964 mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_SECONDARY;
965 val = 0;
966 pshift = 12;
967 if (slb_v & SLB_VSID_L) {
968 mask |= HPTE_V_LARGE;
969 val |= HPTE_V_LARGE;
970 pshift = slb_base_page_shift[(slb_v & SLB_VSID_LP) >> 4];
971 }
972 if (slb_v & SLB_VSID_B_1T) {
973 somask = (1UL << 40) - 1;
974 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T;
975 vsid ^= vsid << 25;
976 } else {
977 somask = (1UL << 28) - 1;
978 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT;
979 }
980 hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvmppc_hpt_mask(&kvm->arch.hpt);
981 avpn = slb_v & ~(somask >> 16);
982 avpn |= (eaddr & somask) >> 16;
983
984 if (pshift >= 24)
985 avpn &= ~((1UL << (pshift - 16)) - 1);
986 else
987 avpn &= ~0x7fUL;
988 val |= avpn;
989
990 for (;;) {
991 hpte = (__be64 *)(kvm->arch.hpt.virt + (hash << 7));
992
993 for (i = 0; i < 16; i += 2) {
994
995 v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
996 if (cpu_has_feature(CPU_FTR_ARCH_300))
997 v = hpte_new_to_old_v(v, be64_to_cpu(hpte[i+1]));
998
999
1000 if (!(v & valid) || (v & mask) != val)
1001 continue;
1002
1003
1004 while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK))
1005 cpu_relax();
1006 v = orig_v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
1007 r = be64_to_cpu(hpte[i+1]);
1008 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1009 v = hpte_new_to_old_v(v, r);
1010 r = hpte_new_to_old_r(r);
1011 }
1012
1013
1014
1015
1016 if ((v & valid) && (v & mask) == val &&
1017 kvmppc_hpte_base_page_shift(v, r) == pshift)
1018
1019 return (hash << 3) + (i >> 1);
1020
1021 __unlock_hpte(&hpte[i], orig_v);
1022 }
1023
1024 if (val & HPTE_V_SECONDARY)
1025 break;
1026 val |= HPTE_V_SECONDARY;
1027 hash = hash ^ kvmppc_hpt_mask(&kvm->arch.hpt);
1028 }
1029 return -1;
1030}
1031EXPORT_SYMBOL(kvmppc_hv_find_lock_hpte);
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
1045 unsigned long slb_v, unsigned int status, bool data)
1046{
1047 struct kvm *kvm = vcpu->kvm;
1048 long int index;
1049 unsigned long v, r, gr, orig_v;
1050 __be64 *hpte;
1051 unsigned long valid;
1052 struct revmap_entry *rev;
1053 unsigned long pp, key;
1054 struct mmio_hpte_cache_entry *cache_entry = NULL;
1055 long mmio_update = 0;
1056
1057
1058 valid = HPTE_V_VALID;
1059 if (status & DSISR_NOHPTE) {
1060 valid |= HPTE_V_ABSENT;
1061 mmio_update = atomic64_read(&kvm->arch.mmio_update);
1062 cache_entry = mmio_cache_search(vcpu, addr, slb_v, mmio_update);
1063 }
1064 if (cache_entry) {
1065 index = cache_entry->pte_index;
1066 v = cache_entry->hpte_v;
1067 r = cache_entry->hpte_r;
1068 gr = cache_entry->rpte;
1069 } else {
1070 index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid);
1071 if (index < 0) {
1072 if (status & DSISR_NOHPTE)
1073 return status;
1074 return 0;
1075 }
1076 hpte = (__be64 *)(kvm->arch.hpt.virt + (index << 4));
1077 v = orig_v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
1078 r = be64_to_cpu(hpte[1]);
1079 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1080 v = hpte_new_to_old_v(v, r);
1081 r = hpte_new_to_old_r(r);
1082 }
1083 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[index]);
1084 gr = rev->guest_rpte;
1085
1086 unlock_hpte(hpte, orig_v);
1087 }
1088
1089
1090 if ((status & DSISR_NOHPTE) && (v & HPTE_V_VALID))
1091 return 0;
1092
1093
1094 pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
1095 key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
1096 status &= ~DSISR_NOHPTE;
1097 if (!data) {
1098 if (gr & (HPTE_R_N | HPTE_R_G))
1099 return status | SRR1_ISI_N_OR_G;
1100 if (!hpte_read_permission(pp, slb_v & key))
1101 return status | SRR1_ISI_PROT;
1102 } else if (status & DSISR_ISSTORE) {
1103
1104 if (!hpte_write_permission(pp, slb_v & key))
1105 return status | DSISR_PROTFAULT;
1106 } else {
1107 if (!hpte_read_permission(pp, slb_v & key))
1108 return status | DSISR_PROTFAULT;
1109 }
1110
1111
1112 if (data && (vcpu->arch.shregs.msr & MSR_DR)) {
1113 unsigned int perm = hpte_get_skey_perm(gr, vcpu->arch.amr);
1114 if (status & DSISR_ISSTORE)
1115 perm >>= 1;
1116 if (perm & 1)
1117 return status | DSISR_KEYFAULT;
1118 }
1119
1120
1121 vcpu->arch.pgfault_addr = addr;
1122 vcpu->arch.pgfault_index = index;
1123 vcpu->arch.pgfault_hpte[0] = v;
1124 vcpu->arch.pgfault_hpte[1] = r;
1125 vcpu->arch.pgfault_cache = cache_entry;
1126
1127
1128 if ((r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
1129 (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) {
1130 if (!cache_entry) {
1131 unsigned int pshift = 12;
1132 unsigned int pshift_index;
1133
1134 if (slb_v & SLB_VSID_L) {
1135 pshift_index = ((slb_v & SLB_VSID_LP) >> 4);
1136 pshift = slb_base_page_shift[pshift_index];
1137 }
1138 cache_entry = next_mmio_cache_entry(vcpu);
1139 cache_entry->eaddr = addr;
1140 cache_entry->slb_base_pshift = pshift;
1141 cache_entry->pte_index = index;
1142 cache_entry->hpte_v = v;
1143 cache_entry->hpte_r = r;
1144 cache_entry->rpte = gr;
1145 cache_entry->slb_v = slb_v;
1146 cache_entry->mmio_update = mmio_update;
1147 }
1148 if (data && (vcpu->arch.shregs.msr & MSR_IR))
1149 return -2;
1150 }
1151
1152 return -1;
1153}
1154