1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89#include <linux/pagemap.h>
90#include <linux/migrate.h>
91#include <linux/kvm_host.h>
92#include <linux/ksm.h>
93#include <linux/of.h>
94#include <asm/ultravisor.h>
95#include <asm/mman.h>
96#include <asm/kvm_ppc.h>
97#include <asm/kvm_book3s_uvmem.h>
98
99static struct dev_pagemap kvmppc_uvmem_pgmap;
100static unsigned long *kvmppc_uvmem_bitmap;
101static DEFINE_SPINLOCK(kvmppc_uvmem_bitmap_lock);
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218#define KVMPPC_GFN_UVMEM_PFN (1UL << 63)
219#define KVMPPC_GFN_MEM_PFN (1UL << 62)
220#define KVMPPC_GFN_SHARED (1UL << 61)
221#define KVMPPC_GFN_SECURE (KVMPPC_GFN_UVMEM_PFN | KVMPPC_GFN_MEM_PFN)
222#define KVMPPC_GFN_FLAG_MASK (KVMPPC_GFN_SECURE | KVMPPC_GFN_SHARED)
223#define KVMPPC_GFN_PFN_MASK (~KVMPPC_GFN_FLAG_MASK)
224
225struct kvmppc_uvmem_slot {
226 struct list_head list;
227 unsigned long nr_pfns;
228 unsigned long base_pfn;
229 unsigned long *pfns;
230};
231struct kvmppc_uvmem_page_pvt {
232 struct kvm *kvm;
233 unsigned long gpa;
234 bool skip_page_out;
235 bool remove_gfn;
236};
237
238bool kvmppc_uvmem_available(void)
239{
240
241
242
243
244 return !!kvmppc_uvmem_bitmap;
245}
246
247int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot)
248{
249 struct kvmppc_uvmem_slot *p;
250
251 p = kzalloc(sizeof(*p), GFP_KERNEL);
252 if (!p)
253 return -ENOMEM;
254 p->pfns = vzalloc(array_size(slot->npages, sizeof(*p->pfns)));
255 if (!p->pfns) {
256 kfree(p);
257 return -ENOMEM;
258 }
259 p->nr_pfns = slot->npages;
260 p->base_pfn = slot->base_gfn;
261
262 mutex_lock(&kvm->arch.uvmem_lock);
263 list_add(&p->list, &kvm->arch.uvmem_pfns);
264 mutex_unlock(&kvm->arch.uvmem_lock);
265
266 return 0;
267}
268
269
270
271
272void kvmppc_uvmem_slot_free(struct kvm *kvm, const struct kvm_memory_slot *slot)
273{
274 struct kvmppc_uvmem_slot *p, *next;
275
276 mutex_lock(&kvm->arch.uvmem_lock);
277 list_for_each_entry_safe(p, next, &kvm->arch.uvmem_pfns, list) {
278 if (p->base_pfn == slot->base_gfn) {
279 vfree(p->pfns);
280 list_del(&p->list);
281 kfree(p);
282 break;
283 }
284 }
285 mutex_unlock(&kvm->arch.uvmem_lock);
286}
287
288static void kvmppc_mark_gfn(unsigned long gfn, struct kvm *kvm,
289 unsigned long flag, unsigned long uvmem_pfn)
290{
291 struct kvmppc_uvmem_slot *p;
292
293 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
294 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
295 unsigned long index = gfn - p->base_pfn;
296
297 if (flag == KVMPPC_GFN_UVMEM_PFN)
298 p->pfns[index] = uvmem_pfn | flag;
299 else
300 p->pfns[index] = flag;
301 return;
302 }
303 }
304}
305
306
307static void kvmppc_gfn_secure_uvmem_pfn(unsigned long gfn,
308 unsigned long uvmem_pfn, struct kvm *kvm)
309{
310 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_UVMEM_PFN, uvmem_pfn);
311}
312
313
314static void kvmppc_gfn_secure_mem_pfn(unsigned long gfn, struct kvm *kvm)
315{
316 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_MEM_PFN, 0);
317}
318
319
320static void kvmppc_gfn_shared(unsigned long gfn, struct kvm *kvm)
321{
322 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_SHARED, 0);
323}
324
325
326static void kvmppc_gfn_remove(unsigned long gfn, struct kvm *kvm)
327{
328 kvmppc_mark_gfn(gfn, kvm, 0, 0);
329}
330
331
332static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm,
333 unsigned long *uvmem_pfn)
334{
335 struct kvmppc_uvmem_slot *p;
336
337 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
338 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
339 unsigned long index = gfn - p->base_pfn;
340
341 if (p->pfns[index] & KVMPPC_GFN_UVMEM_PFN) {
342 if (uvmem_pfn)
343 *uvmem_pfn = p->pfns[index] &
344 KVMPPC_GFN_PFN_MASK;
345 return true;
346 } else
347 return false;
348 }
349 }
350 return false;
351}
352
353
354
355
356
357
358
359
360static bool kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot *memslot,
361 struct kvm *kvm, unsigned long *gfn)
362{
363 struct kvmppc_uvmem_slot *p;
364 bool ret = false;
365 unsigned long i;
366
367 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list)
368 if (*gfn >= p->base_pfn && *gfn < p->base_pfn + p->nr_pfns)
369 break;
370 if (!p)
371 return ret;
372
373
374
375
376 for (i = *gfn; i < p->base_pfn + p->nr_pfns; i++) {
377 unsigned long index = i - p->base_pfn;
378
379 if (!(p->pfns[index] & KVMPPC_GFN_FLAG_MASK)) {
380 *gfn = i;
381 ret = true;
382 break;
383 }
384 }
385 return ret;
386}
387
388static int kvmppc_memslot_page_merge(struct kvm *kvm,
389 const struct kvm_memory_slot *memslot, bool merge)
390{
391 unsigned long gfn = memslot->base_gfn;
392 unsigned long end, start = gfn_to_hva(kvm, gfn);
393 int ret = 0;
394 struct vm_area_struct *vma;
395 int merge_flag = (merge) ? MADV_MERGEABLE : MADV_UNMERGEABLE;
396
397 if (kvm_is_error_hva(start))
398 return H_STATE;
399
400 end = start + (memslot->npages << PAGE_SHIFT);
401
402 mmap_write_lock(kvm->mm);
403 do {
404 vma = find_vma_intersection(kvm->mm, start, end);
405 if (!vma) {
406 ret = H_STATE;
407 break;
408 }
409 ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
410 merge_flag, &vma->vm_flags);
411 if (ret) {
412 ret = H_STATE;
413 break;
414 }
415 start = vma->vm_end;
416 } while (end > vma->vm_end);
417
418 mmap_write_unlock(kvm->mm);
419 return ret;
420}
421
422static void __kvmppc_uvmem_memslot_delete(struct kvm *kvm,
423 const struct kvm_memory_slot *memslot)
424{
425 uv_unregister_mem_slot(kvm->arch.lpid, memslot->id);
426 kvmppc_uvmem_slot_free(kvm, memslot);
427 kvmppc_memslot_page_merge(kvm, memslot, true);
428}
429
430static int __kvmppc_uvmem_memslot_create(struct kvm *kvm,
431 const struct kvm_memory_slot *memslot)
432{
433 int ret = H_PARAMETER;
434
435 if (kvmppc_memslot_page_merge(kvm, memslot, false))
436 return ret;
437
438 if (kvmppc_uvmem_slot_init(kvm, memslot))
439 goto out1;
440
441 ret = uv_register_mem_slot(kvm->arch.lpid,
442 memslot->base_gfn << PAGE_SHIFT,
443 memslot->npages * PAGE_SIZE,
444 0, memslot->id);
445 if (ret < 0) {
446 ret = H_PARAMETER;
447 goto out;
448 }
449 return 0;
450out:
451 kvmppc_uvmem_slot_free(kvm, memslot);
452out1:
453 kvmppc_memslot_page_merge(kvm, memslot, true);
454 return ret;
455}
456
457unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
458{
459 struct kvm_memslots *slots;
460 struct kvm_memory_slot *memslot, *m;
461 int ret = H_SUCCESS;
462 int srcu_idx;
463
464 kvm->arch.secure_guest = KVMPPC_SECURE_INIT_START;
465
466 if (!kvmppc_uvmem_bitmap)
467 return H_UNSUPPORTED;
468
469
470 if (!kvm_is_radix(kvm))
471 return H_UNSUPPORTED;
472
473
474 if (!kvm->arch.svm_enabled)
475 return H_AUTHORITY;
476
477 srcu_idx = srcu_read_lock(&kvm->srcu);
478
479
480 slots = kvm_memslots(kvm);
481 kvm_for_each_memslot(memslot, slots) {
482 ret = __kvmppc_uvmem_memslot_create(kvm, memslot);
483 if (ret)
484 break;
485 }
486
487 if (ret) {
488 slots = kvm_memslots(kvm);
489 kvm_for_each_memslot(m, slots) {
490 if (m == memslot)
491 break;
492 __kvmppc_uvmem_memslot_delete(kvm, memslot);
493 }
494 }
495
496 srcu_read_unlock(&kvm->srcu, srcu_idx);
497 return ret;
498}
499
500
501
502
503
504
505static int __kvmppc_svm_page_out(struct vm_area_struct *vma,
506 unsigned long start,
507 unsigned long end, unsigned long page_shift,
508 struct kvm *kvm, unsigned long gpa)
509{
510 unsigned long src_pfn, dst_pfn = 0;
511 struct migrate_vma mig;
512 struct page *dpage, *spage;
513 struct kvmppc_uvmem_page_pvt *pvt;
514 unsigned long pfn;
515 int ret = U_SUCCESS;
516
517 memset(&mig, 0, sizeof(mig));
518 mig.vma = vma;
519 mig.start = start;
520 mig.end = end;
521 mig.src = &src_pfn;
522 mig.dst = &dst_pfn;
523 mig.pgmap_owner = &kvmppc_uvmem_pgmap;
524 mig.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
525
526
527 if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL))
528 return ret;
529
530 ret = migrate_vma_setup(&mig);
531 if (ret)
532 return -1;
533
534 spage = migrate_pfn_to_page(*mig.src);
535 if (!spage || !(*mig.src & MIGRATE_PFN_MIGRATE))
536 goto out_finalize;
537
538 if (!is_zone_device_page(spage))
539 goto out_finalize;
540
541 dpage = alloc_page_vma(GFP_HIGHUSER, vma, start);
542 if (!dpage) {
543 ret = -1;
544 goto out_finalize;
545 }
546
547 lock_page(dpage);
548 pvt = spage->zone_device_data;
549 pfn = page_to_pfn(dpage);
550
551
552
553
554
555
556
557
558 if (!pvt->skip_page_out)
559 ret = uv_page_out(kvm->arch.lpid, pfn << page_shift,
560 gpa, 0, page_shift);
561
562 if (ret == U_SUCCESS)
563 *mig.dst = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED;
564 else {
565 unlock_page(dpage);
566 __free_page(dpage);
567 goto out_finalize;
568 }
569
570 migrate_vma_pages(&mig);
571
572out_finalize:
573 migrate_vma_finalize(&mig);
574 return ret;
575}
576
577static inline int kvmppc_svm_page_out(struct vm_area_struct *vma,
578 unsigned long start, unsigned long end,
579 unsigned long page_shift,
580 struct kvm *kvm, unsigned long gpa)
581{
582 int ret;
583
584 mutex_lock(&kvm->arch.uvmem_lock);
585 ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa);
586 mutex_unlock(&kvm->arch.uvmem_lock);
587
588 return ret;
589}
590
591
592
593
594
595
596
597
598
599void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *slot,
600 struct kvm *kvm, bool skip_page_out)
601{
602 int i;
603 struct kvmppc_uvmem_page_pvt *pvt;
604 struct page *uvmem_page;
605 struct vm_area_struct *vma = NULL;
606 unsigned long uvmem_pfn, gfn;
607 unsigned long addr;
608
609 mmap_read_lock(kvm->mm);
610
611 addr = slot->userspace_addr;
612
613 gfn = slot->base_gfn;
614 for (i = slot->npages; i; --i, ++gfn, addr += PAGE_SIZE) {
615
616
617 if (!vma || addr >= vma->vm_end) {
618 vma = vma_lookup(kvm->mm, addr);
619 if (!vma) {
620 pr_err("Can't find VMA for gfn:0x%lx\n", gfn);
621 break;
622 }
623 }
624
625 mutex_lock(&kvm->arch.uvmem_lock);
626
627 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
628 uvmem_page = pfn_to_page(uvmem_pfn);
629 pvt = uvmem_page->zone_device_data;
630 pvt->skip_page_out = skip_page_out;
631 pvt->remove_gfn = true;
632
633 if (__kvmppc_svm_page_out(vma, addr, addr + PAGE_SIZE,
634 PAGE_SHIFT, kvm, pvt->gpa))
635 pr_err("Can't page out gpa:0x%lx addr:0x%lx\n",
636 pvt->gpa, addr);
637 } else {
638
639 kvmppc_gfn_remove(gfn, kvm);
640 }
641
642 mutex_unlock(&kvm->arch.uvmem_lock);
643 }
644
645 mmap_read_unlock(kvm->mm);
646}
647
648unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm)
649{
650 int srcu_idx;
651 struct kvm_memory_slot *memslot;
652
653
654
655
656
657 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
658 return H_UNSUPPORTED;
659
660 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
661 return H_STATE;
662
663 srcu_idx = srcu_read_lock(&kvm->srcu);
664
665 kvm_for_each_memslot(memslot, kvm_memslots(kvm))
666 kvmppc_uvmem_drop_pages(memslot, kvm, false);
667
668 srcu_read_unlock(&kvm->srcu, srcu_idx);
669
670 kvm->arch.secure_guest = 0;
671 uv_svm_terminate(kvm->arch.lpid);
672
673 return H_PARAMETER;
674}
675
676
677
678
679
680
681
682
683
684static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm)
685{
686 struct page *dpage = NULL;
687 unsigned long bit, uvmem_pfn;
688 struct kvmppc_uvmem_page_pvt *pvt;
689 unsigned long pfn_last, pfn_first;
690
691 pfn_first = kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT;
692 pfn_last = pfn_first +
693 (range_len(&kvmppc_uvmem_pgmap.range) >> PAGE_SHIFT);
694
695 spin_lock(&kvmppc_uvmem_bitmap_lock);
696 bit = find_first_zero_bit(kvmppc_uvmem_bitmap,
697 pfn_last - pfn_first);
698 if (bit >= (pfn_last - pfn_first))
699 goto out;
700 bitmap_set(kvmppc_uvmem_bitmap, bit, 1);
701 spin_unlock(&kvmppc_uvmem_bitmap_lock);
702
703 pvt = kzalloc(sizeof(*pvt), GFP_KERNEL);
704 if (!pvt)
705 goto out_clear;
706
707 uvmem_pfn = bit + pfn_first;
708 kvmppc_gfn_secure_uvmem_pfn(gpa >> PAGE_SHIFT, uvmem_pfn, kvm);
709
710 pvt->gpa = gpa;
711 pvt->kvm = kvm;
712
713 dpage = pfn_to_page(uvmem_pfn);
714 dpage->zone_device_data = pvt;
715 get_page(dpage);
716 lock_page(dpage);
717 return dpage;
718out_clear:
719 spin_lock(&kvmppc_uvmem_bitmap_lock);
720 bitmap_clear(kvmppc_uvmem_bitmap, bit, 1);
721out:
722 spin_unlock(&kvmppc_uvmem_bitmap_lock);
723 return NULL;
724}
725
726
727
728
729
730static int kvmppc_svm_page_in(struct vm_area_struct *vma,
731 unsigned long start,
732 unsigned long end, unsigned long gpa, struct kvm *kvm,
733 unsigned long page_shift,
734 bool pagein)
735{
736 unsigned long src_pfn, dst_pfn = 0;
737 struct migrate_vma mig;
738 struct page *spage;
739 unsigned long pfn;
740 struct page *dpage;
741 int ret = 0;
742
743 memset(&mig, 0, sizeof(mig));
744 mig.vma = vma;
745 mig.start = start;
746 mig.end = end;
747 mig.src = &src_pfn;
748 mig.dst = &dst_pfn;
749 mig.flags = MIGRATE_VMA_SELECT_SYSTEM;
750
751 ret = migrate_vma_setup(&mig);
752 if (ret)
753 return ret;
754
755 if (!(*mig.src & MIGRATE_PFN_MIGRATE)) {
756 ret = -1;
757 goto out_finalize;
758 }
759
760 dpage = kvmppc_uvmem_get_page(gpa, kvm);
761 if (!dpage) {
762 ret = -1;
763 goto out_finalize;
764 }
765
766 if (pagein) {
767 pfn = *mig.src >> MIGRATE_PFN_SHIFT;
768 spage = migrate_pfn_to_page(*mig.src);
769 if (spage) {
770 ret = uv_page_in(kvm->arch.lpid, pfn << page_shift,
771 gpa, 0, page_shift);
772 if (ret)
773 goto out_finalize;
774 }
775 }
776
777 *mig.dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
778 migrate_vma_pages(&mig);
779out_finalize:
780 migrate_vma_finalize(&mig);
781 return ret;
782}
783
784static int kvmppc_uv_migrate_mem_slot(struct kvm *kvm,
785 const struct kvm_memory_slot *memslot)
786{
787 unsigned long gfn = memslot->base_gfn;
788 struct vm_area_struct *vma;
789 unsigned long start, end;
790 int ret = 0;
791
792 mmap_read_lock(kvm->mm);
793 mutex_lock(&kvm->arch.uvmem_lock);
794 while (kvmppc_next_nontransitioned_gfn(memslot, kvm, &gfn)) {
795 ret = H_STATE;
796 start = gfn_to_hva(kvm, gfn);
797 if (kvm_is_error_hva(start))
798 break;
799
800 end = start + (1UL << PAGE_SHIFT);
801 vma = find_vma_intersection(kvm->mm, start, end);
802 if (!vma || vma->vm_start > start || vma->vm_end < end)
803 break;
804
805 ret = kvmppc_svm_page_in(vma, start, end,
806 (gfn << PAGE_SHIFT), kvm, PAGE_SHIFT, false);
807 if (ret) {
808 ret = H_STATE;
809 break;
810 }
811
812
813 cond_resched();
814 }
815 mutex_unlock(&kvm->arch.uvmem_lock);
816 mmap_read_unlock(kvm->mm);
817 return ret;
818}
819
820unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
821{
822 struct kvm_memslots *slots;
823 struct kvm_memory_slot *memslot;
824 int srcu_idx;
825 long ret = H_SUCCESS;
826
827 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
828 return H_UNSUPPORTED;
829
830
831 srcu_idx = srcu_read_lock(&kvm->srcu);
832 slots = kvm_memslots(kvm);
833 kvm_for_each_memslot(memslot, slots) {
834 ret = kvmppc_uv_migrate_mem_slot(kvm, memslot);
835 if (ret) {
836
837
838
839
840
841
842
843
844
845 ret = H_STATE;
846 goto out;
847 }
848 }
849
850 kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE;
851 pr_info("LPID %d went secure\n", kvm->arch.lpid);
852
853out:
854 srcu_read_unlock(&kvm->srcu, srcu_idx);
855 return ret;
856}
857
858
859
860
861
862
863
864
865
866
867static unsigned long kvmppc_share_page(struct kvm *kvm, unsigned long gpa,
868 unsigned long page_shift)
869{
870
871 int ret = H_PARAMETER;
872 struct page *uvmem_page;
873 struct kvmppc_uvmem_page_pvt *pvt;
874 unsigned long pfn;
875 unsigned long gfn = gpa >> page_shift;
876 int srcu_idx;
877 unsigned long uvmem_pfn;
878
879 srcu_idx = srcu_read_lock(&kvm->srcu);
880 mutex_lock(&kvm->arch.uvmem_lock);
881 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
882 uvmem_page = pfn_to_page(uvmem_pfn);
883 pvt = uvmem_page->zone_device_data;
884 pvt->skip_page_out = true;
885
886
887
888
889 pvt->remove_gfn = false;
890 }
891
892retry:
893 mutex_unlock(&kvm->arch.uvmem_lock);
894 pfn = gfn_to_pfn(kvm, gfn);
895 if (is_error_noslot_pfn(pfn))
896 goto out;
897
898 mutex_lock(&kvm->arch.uvmem_lock);
899 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
900 uvmem_page = pfn_to_page(uvmem_pfn);
901 pvt = uvmem_page->zone_device_data;
902 pvt->skip_page_out = true;
903 pvt->remove_gfn = false;
904 kvm_release_pfn_clean(pfn);
905 goto retry;
906 }
907
908 if (!uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0,
909 page_shift)) {
910 kvmppc_gfn_shared(gfn, kvm);
911 ret = H_SUCCESS;
912 }
913 kvm_release_pfn_clean(pfn);
914 mutex_unlock(&kvm->arch.uvmem_lock);
915out:
916 srcu_read_unlock(&kvm->srcu, srcu_idx);
917 return ret;
918}
919
920
921
922
923
924
925
926unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
927 unsigned long flags,
928 unsigned long page_shift)
929{
930 unsigned long start, end;
931 struct vm_area_struct *vma;
932 int srcu_idx;
933 unsigned long gfn = gpa >> page_shift;
934 int ret;
935
936 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
937 return H_UNSUPPORTED;
938
939 if (page_shift != PAGE_SHIFT)
940 return H_P3;
941
942 if (flags & ~H_PAGE_IN_SHARED)
943 return H_P2;
944
945 if (flags & H_PAGE_IN_SHARED)
946 return kvmppc_share_page(kvm, gpa, page_shift);
947
948 ret = H_PARAMETER;
949 srcu_idx = srcu_read_lock(&kvm->srcu);
950 mmap_read_lock(kvm->mm);
951
952 start = gfn_to_hva(kvm, gfn);
953 if (kvm_is_error_hva(start))
954 goto out;
955
956 mutex_lock(&kvm->arch.uvmem_lock);
957
958 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL))
959 goto out_unlock;
960
961 end = start + (1UL << page_shift);
962 vma = find_vma_intersection(kvm->mm, start, end);
963 if (!vma || vma->vm_start > start || vma->vm_end < end)
964 goto out_unlock;
965
966 if (kvmppc_svm_page_in(vma, start, end, gpa, kvm, page_shift,
967 true))
968 goto out_unlock;
969
970 ret = H_SUCCESS;
971
972out_unlock:
973 mutex_unlock(&kvm->arch.uvmem_lock);
974out:
975 mmap_read_unlock(kvm->mm);
976 srcu_read_unlock(&kvm->srcu, srcu_idx);
977 return ret;
978}
979
980
981
982
983
984
985
986
987
988
989static vm_fault_t kvmppc_uvmem_migrate_to_ram(struct vm_fault *vmf)
990{
991 struct kvmppc_uvmem_page_pvt *pvt = vmf->page->zone_device_data;
992
993 if (kvmppc_svm_page_out(vmf->vma, vmf->address,
994 vmf->address + PAGE_SIZE, PAGE_SHIFT,
995 pvt->kvm, pvt->gpa))
996 return VM_FAULT_SIGBUS;
997 else
998 return 0;
999}
1000
1001
1002
1003
1004
1005
1006
1007
1008static void kvmppc_uvmem_page_free(struct page *page)
1009{
1010 unsigned long pfn = page_to_pfn(page) -
1011 (kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT);
1012 struct kvmppc_uvmem_page_pvt *pvt;
1013
1014 spin_lock(&kvmppc_uvmem_bitmap_lock);
1015 bitmap_clear(kvmppc_uvmem_bitmap, pfn, 1);
1016 spin_unlock(&kvmppc_uvmem_bitmap_lock);
1017
1018 pvt = page->zone_device_data;
1019 page->zone_device_data = NULL;
1020 if (pvt->remove_gfn)
1021 kvmppc_gfn_remove(pvt->gpa >> PAGE_SHIFT, pvt->kvm);
1022 else
1023 kvmppc_gfn_secure_mem_pfn(pvt->gpa >> PAGE_SHIFT, pvt->kvm);
1024 kfree(pvt);
1025}
1026
1027static const struct dev_pagemap_ops kvmppc_uvmem_ops = {
1028 .page_free = kvmppc_uvmem_page_free,
1029 .migrate_to_ram = kvmppc_uvmem_migrate_to_ram,
1030};
1031
1032
1033
1034
1035unsigned long
1036kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa,
1037 unsigned long flags, unsigned long page_shift)
1038{
1039 unsigned long gfn = gpa >> page_shift;
1040 unsigned long start, end;
1041 struct vm_area_struct *vma;
1042 int srcu_idx;
1043 int ret;
1044
1045 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
1046 return H_UNSUPPORTED;
1047
1048 if (page_shift != PAGE_SHIFT)
1049 return H_P3;
1050
1051 if (flags)
1052 return H_P2;
1053
1054 ret = H_PARAMETER;
1055 srcu_idx = srcu_read_lock(&kvm->srcu);
1056 mmap_read_lock(kvm->mm);
1057 start = gfn_to_hva(kvm, gfn);
1058 if (kvm_is_error_hva(start))
1059 goto out;
1060
1061 end = start + (1UL << page_shift);
1062 vma = find_vma_intersection(kvm->mm, start, end);
1063 if (!vma || vma->vm_start > start || vma->vm_end < end)
1064 goto out;
1065
1066 if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa))
1067 ret = H_SUCCESS;
1068out:
1069 mmap_read_unlock(kvm->mm);
1070 srcu_read_unlock(&kvm->srcu, srcu_idx);
1071 return ret;
1072}
1073
1074int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn)
1075{
1076 unsigned long pfn;
1077 int ret = U_SUCCESS;
1078
1079 pfn = gfn_to_pfn(kvm, gfn);
1080 if (is_error_noslot_pfn(pfn))
1081 return -EFAULT;
1082
1083 mutex_lock(&kvm->arch.uvmem_lock);
1084 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL))
1085 goto out;
1086
1087 ret = uv_page_in(kvm->arch.lpid, pfn << PAGE_SHIFT, gfn << PAGE_SHIFT,
1088 0, PAGE_SHIFT);
1089out:
1090 kvm_release_pfn_clean(pfn);
1091 mutex_unlock(&kvm->arch.uvmem_lock);
1092 return (ret == U_SUCCESS) ? RESUME_GUEST : -EFAULT;
1093}
1094
1095int kvmppc_uvmem_memslot_create(struct kvm *kvm, const struct kvm_memory_slot *new)
1096{
1097 int ret = __kvmppc_uvmem_memslot_create(kvm, new);
1098
1099 if (!ret)
1100 ret = kvmppc_uv_migrate_mem_slot(kvm, new);
1101
1102 return ret;
1103}
1104
1105void kvmppc_uvmem_memslot_delete(struct kvm *kvm, const struct kvm_memory_slot *old)
1106{
1107 __kvmppc_uvmem_memslot_delete(kvm, old);
1108}
1109
1110static u64 kvmppc_get_secmem_size(void)
1111{
1112 struct device_node *np;
1113 int i, len;
1114 const __be32 *prop;
1115 u64 size = 0;
1116
1117
1118
1119
1120
1121
1122 for_each_compatible_node(np, NULL, "ibm,secure-memory") {
1123 prop = of_get_property(np, "reg", &len);
1124 if (!prop)
1125 continue;
1126 size += of_read_number(prop + 2, 2);
1127 }
1128 if (size)
1129 return size;
1130
1131 np = of_find_compatible_node(NULL, NULL, "ibm,uv-firmware");
1132 if (!np)
1133 goto out;
1134
1135 prop = of_get_property(np, "secure-memory-ranges", &len);
1136 if (!prop)
1137 goto out_put;
1138
1139 for (i = 0; i < len / (sizeof(*prop) * 4); i++)
1140 size += of_read_number(prop + (i * 4) + 2, 2);
1141
1142out_put:
1143 of_node_put(np);
1144out:
1145 return size;
1146}
1147
1148int kvmppc_uvmem_init(void)
1149{
1150 int ret = 0;
1151 unsigned long size;
1152 struct resource *res;
1153 void *addr;
1154 unsigned long pfn_last, pfn_first;
1155
1156 size = kvmppc_get_secmem_size();
1157 if (!size) {
1158
1159
1160
1161
1162
1163 pr_info("KVMPPC-UVMEM: No support for secure guests\n");
1164 goto out;
1165 }
1166
1167 res = request_free_mem_region(&iomem_resource, size, "kvmppc_uvmem");
1168 if (IS_ERR(res)) {
1169 ret = PTR_ERR(res);
1170 goto out;
1171 }
1172
1173 kvmppc_uvmem_pgmap.type = MEMORY_DEVICE_PRIVATE;
1174 kvmppc_uvmem_pgmap.range.start = res->start;
1175 kvmppc_uvmem_pgmap.range.end = res->end;
1176 kvmppc_uvmem_pgmap.nr_range = 1;
1177 kvmppc_uvmem_pgmap.ops = &kvmppc_uvmem_ops;
1178
1179 kvmppc_uvmem_pgmap.owner = &kvmppc_uvmem_pgmap;
1180 addr = memremap_pages(&kvmppc_uvmem_pgmap, NUMA_NO_NODE);
1181 if (IS_ERR(addr)) {
1182 ret = PTR_ERR(addr);
1183 goto out_free_region;
1184 }
1185
1186 pfn_first = res->start >> PAGE_SHIFT;
1187 pfn_last = pfn_first + (resource_size(res) >> PAGE_SHIFT);
1188 kvmppc_uvmem_bitmap = kcalloc(BITS_TO_LONGS(pfn_last - pfn_first),
1189 sizeof(unsigned long), GFP_KERNEL);
1190 if (!kvmppc_uvmem_bitmap) {
1191 ret = -ENOMEM;
1192 goto out_unmap;
1193 }
1194
1195 pr_info("KVMPPC-UVMEM: Secure Memory size 0x%lx\n", size);
1196 return ret;
1197out_unmap:
1198 memunmap_pages(&kvmppc_uvmem_pgmap);
1199out_free_region:
1200 release_mem_region(res->start, size);
1201out:
1202 return ret;
1203}
1204
1205void kvmppc_uvmem_free(void)
1206{
1207 if (!kvmppc_uvmem_bitmap)
1208 return;
1209
1210 memunmap_pages(&kvmppc_uvmem_pgmap);
1211 release_mem_region(kvmppc_uvmem_pgmap.range.start,
1212 range_len(&kvmppc_uvmem_pgmap.range));
1213 kfree(kvmppc_uvmem_bitmap);
1214}
1215