linux/arch/s390/mm/gmap.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  KVM guest address space mapping code
   4 *
   5 *    Copyright IBM Corp. 2007, 2016, 2018
   6 *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
   7 *               David Hildenbrand <david@redhat.com>
   8 *               Janosch Frank <frankja@linux.vnet.ibm.com>
   9 */
  10
  11#include <linux/kernel.h>
  12#include <linux/pagewalk.h>
  13#include <linux/swap.h>
  14#include <linux/smp.h>
  15#include <linux/spinlock.h>
  16#include <linux/slab.h>
  17#include <linux/swapops.h>
  18#include <linux/ksm.h>
  19#include <linux/mman.h>
  20
  21#include <asm/pgtable.h>
  22#include <asm/pgalloc.h>
  23#include <asm/gmap.h>
  24#include <asm/tlb.h>
  25
  26#define GMAP_SHADOW_FAKE_TABLE 1ULL
  27
  28/**
  29 * gmap_alloc - allocate and initialize a guest address space
  30 * @mm: pointer to the parent mm_struct
  31 * @limit: maximum address of the gmap address space
  32 *
  33 * Returns a guest address space structure.
  34 */
  35static struct gmap *gmap_alloc(unsigned long limit)
  36{
  37        struct gmap *gmap;
  38        struct page *page;
  39        unsigned long *table;
  40        unsigned long etype, atype;
  41
  42        if (limit < _REGION3_SIZE) {
  43                limit = _REGION3_SIZE - 1;
  44                atype = _ASCE_TYPE_SEGMENT;
  45                etype = _SEGMENT_ENTRY_EMPTY;
  46        } else if (limit < _REGION2_SIZE) {
  47                limit = _REGION2_SIZE - 1;
  48                atype = _ASCE_TYPE_REGION3;
  49                etype = _REGION3_ENTRY_EMPTY;
  50        } else if (limit < _REGION1_SIZE) {
  51                limit = _REGION1_SIZE - 1;
  52                atype = _ASCE_TYPE_REGION2;
  53                etype = _REGION2_ENTRY_EMPTY;
  54        } else {
  55                limit = -1UL;
  56                atype = _ASCE_TYPE_REGION1;
  57                etype = _REGION1_ENTRY_EMPTY;
  58        }
  59        gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
  60        if (!gmap)
  61                goto out;
  62        INIT_LIST_HEAD(&gmap->crst_list);
  63        INIT_LIST_HEAD(&gmap->children);
  64        INIT_LIST_HEAD(&gmap->pt_list);
  65        INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL);
  66        INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
  67        INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC);
  68        spin_lock_init(&gmap->guest_table_lock);
  69        spin_lock_init(&gmap->shadow_lock);
  70        refcount_set(&gmap->ref_count, 1);
  71        page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
  72        if (!page)
  73                goto out_free;
  74        page->index = 0;
  75        list_add(&page->lru, &gmap->crst_list);
  76        table = (unsigned long *) page_to_phys(page);
  77        crst_table_init(table, etype);
  78        gmap->table = table;
  79        gmap->asce = atype | _ASCE_TABLE_LENGTH |
  80                _ASCE_USER_BITS | __pa(table);
  81        gmap->asce_end = limit;
  82        return gmap;
  83
  84out_free:
  85        kfree(gmap);
  86out:
  87        return NULL;
  88}
  89
  90/**
  91 * gmap_create - create a guest address space
  92 * @mm: pointer to the parent mm_struct
  93 * @limit: maximum size of the gmap address space
  94 *
  95 * Returns a guest address space structure.
  96 */
  97struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
  98{
  99        struct gmap *gmap;
 100        unsigned long gmap_asce;
 101
 102        gmap = gmap_alloc(limit);
 103        if (!gmap)
 104                return NULL;
 105        gmap->mm = mm;
 106        spin_lock(&mm->context.lock);
 107        list_add_rcu(&gmap->list, &mm->context.gmap_list);
 108        if (list_is_singular(&mm->context.gmap_list))
 109                gmap_asce = gmap->asce;
 110        else
 111                gmap_asce = -1UL;
 112        WRITE_ONCE(mm->context.gmap_asce, gmap_asce);
 113        spin_unlock(&mm->context.lock);
 114        return gmap;
 115}
 116EXPORT_SYMBOL_GPL(gmap_create);
 117
 118static void gmap_flush_tlb(struct gmap *gmap)
 119{
 120        if (MACHINE_HAS_IDTE)
 121                __tlb_flush_idte(gmap->asce);
 122        else
 123                __tlb_flush_global();
 124}
 125
 126static void gmap_radix_tree_free(struct radix_tree_root *root)
 127{
 128        struct radix_tree_iter iter;
 129        unsigned long indices[16];
 130        unsigned long index;
 131        void __rcu **slot;
 132        int i, nr;
 133
 134        /* A radix tree is freed by deleting all of its entries */
 135        index = 0;
 136        do {
 137                nr = 0;
 138                radix_tree_for_each_slot(slot, root, &iter, index) {
 139                        indices[nr] = iter.index;
 140                        if (++nr == 16)
 141                                break;
 142                }
 143                for (i = 0; i < nr; i++) {
 144                        index = indices[i];
 145                        radix_tree_delete(root, index);
 146                }
 147        } while (nr > 0);
 148}
 149
 150static void gmap_rmap_radix_tree_free(struct radix_tree_root *root)
 151{
 152        struct gmap_rmap *rmap, *rnext, *head;
 153        struct radix_tree_iter iter;
 154        unsigned long indices[16];
 155        unsigned long index;
 156        void __rcu **slot;
 157        int i, nr;
 158
 159        /* A radix tree is freed by deleting all of its entries */
 160        index = 0;
 161        do {
 162                nr = 0;
 163                radix_tree_for_each_slot(slot, root, &iter, index) {
 164                        indices[nr] = iter.index;
 165                        if (++nr == 16)
 166                                break;
 167                }
 168                for (i = 0; i < nr; i++) {
 169                        index = indices[i];
 170                        head = radix_tree_delete(root, index);
 171                        gmap_for_each_rmap_safe(rmap, rnext, head)
 172                                kfree(rmap);
 173                }
 174        } while (nr > 0);
 175}
 176
 177/**
 178 * gmap_free - free a guest address space
 179 * @gmap: pointer to the guest address space structure
 180 *
 181 * No locks required. There are no references to this gmap anymore.
 182 */
 183static void gmap_free(struct gmap *gmap)
 184{
 185        struct page *page, *next;
 186
 187        /* Flush tlb of all gmaps (if not already done for shadows) */
 188        if (!(gmap_is_shadow(gmap) && gmap->removed))
 189                gmap_flush_tlb(gmap);
 190        /* Free all segment & region tables. */
 191        list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
 192                __free_pages(page, CRST_ALLOC_ORDER);
 193        gmap_radix_tree_free(&gmap->guest_to_host);
 194        gmap_radix_tree_free(&gmap->host_to_guest);
 195
 196        /* Free additional data for a shadow gmap */
 197        if (gmap_is_shadow(gmap)) {
 198                /* Free all page tables. */
 199                list_for_each_entry_safe(page, next, &gmap->pt_list, lru)
 200                        page_table_free_pgste(page);
 201                gmap_rmap_radix_tree_free(&gmap->host_to_rmap);
 202                /* Release reference to the parent */
 203                gmap_put(gmap->parent);
 204        }
 205
 206        kfree(gmap);
 207}
 208
 209/**
 210 * gmap_get - increase reference counter for guest address space
 211 * @gmap: pointer to the guest address space structure
 212 *
 213 * Returns the gmap pointer
 214 */
 215struct gmap *gmap_get(struct gmap *gmap)
 216{
 217        refcount_inc(&gmap->ref_count);
 218        return gmap;
 219}
 220EXPORT_SYMBOL_GPL(gmap_get);
 221
 222/**
 223 * gmap_put - decrease reference counter for guest address space
 224 * @gmap: pointer to the guest address space structure
 225 *
 226 * If the reference counter reaches zero the guest address space is freed.
 227 */
 228void gmap_put(struct gmap *gmap)
 229{
 230        if (refcount_dec_and_test(&gmap->ref_count))
 231                gmap_free(gmap);
 232}
 233EXPORT_SYMBOL_GPL(gmap_put);
 234
 235/**
 236 * gmap_remove - remove a guest address space but do not free it yet
 237 * @gmap: pointer to the guest address space structure
 238 */
 239void gmap_remove(struct gmap *gmap)
 240{
 241        struct gmap *sg, *next;
 242        unsigned long gmap_asce;
 243
 244        /* Remove all shadow gmaps linked to this gmap */
 245        if (!list_empty(&gmap->children)) {
 246                spin_lock(&gmap->shadow_lock);
 247                list_for_each_entry_safe(sg, next, &gmap->children, list) {
 248                        list_del(&sg->list);
 249                        gmap_put(sg);
 250                }
 251                spin_unlock(&gmap->shadow_lock);
 252        }
 253        /* Remove gmap from the pre-mm list */
 254        spin_lock(&gmap->mm->context.lock);
 255        list_del_rcu(&gmap->list);
 256        if (list_empty(&gmap->mm->context.gmap_list))
 257                gmap_asce = 0;
 258        else if (list_is_singular(&gmap->mm->context.gmap_list))
 259                gmap_asce = list_first_entry(&gmap->mm->context.gmap_list,
 260                                             struct gmap, list)->asce;
 261        else
 262                gmap_asce = -1UL;
 263        WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce);
 264        spin_unlock(&gmap->mm->context.lock);
 265        synchronize_rcu();
 266        /* Put reference */
 267        gmap_put(gmap);
 268}
 269EXPORT_SYMBOL_GPL(gmap_remove);
 270
 271/**
 272 * gmap_enable - switch primary space to the guest address space
 273 * @gmap: pointer to the guest address space structure
 274 */
 275void gmap_enable(struct gmap *gmap)
 276{
 277        S390_lowcore.gmap = (unsigned long) gmap;
 278}
 279EXPORT_SYMBOL_GPL(gmap_enable);
 280
 281/**
 282 * gmap_disable - switch back to the standard primary address space
 283 * @gmap: pointer to the guest address space structure
 284 */
 285void gmap_disable(struct gmap *gmap)
 286{
 287        S390_lowcore.gmap = 0UL;
 288}
 289EXPORT_SYMBOL_GPL(gmap_disable);
 290
 291/**
 292 * gmap_get_enabled - get a pointer to the currently enabled gmap
 293 *
 294 * Returns a pointer to the currently enabled gmap. 0 if none is enabled.
 295 */
 296struct gmap *gmap_get_enabled(void)
 297{
 298        return (struct gmap *) S390_lowcore.gmap;
 299}
 300EXPORT_SYMBOL_GPL(gmap_get_enabled);
 301
 302/*
 303 * gmap_alloc_table is assumed to be called with mmap_sem held
 304 */
 305static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
 306                            unsigned long init, unsigned long gaddr)
 307{
 308        struct page *page;
 309        unsigned long *new;
 310
 311        /* since we dont free the gmap table until gmap_free we can unlock */
 312        page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
 313        if (!page)
 314                return -ENOMEM;
 315        new = (unsigned long *) page_to_phys(page);
 316        crst_table_init(new, init);
 317        spin_lock(&gmap->guest_table_lock);
 318        if (*table & _REGION_ENTRY_INVALID) {
 319                list_add(&page->lru, &gmap->crst_list);
 320                *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
 321                        (*table & _REGION_ENTRY_TYPE_MASK);
 322                page->index = gaddr;
 323                page = NULL;
 324        }
 325        spin_unlock(&gmap->guest_table_lock);
 326        if (page)
 327                __free_pages(page, CRST_ALLOC_ORDER);
 328        return 0;
 329}
 330
 331/**
 332 * __gmap_segment_gaddr - find virtual address from segment pointer
 333 * @entry: pointer to a segment table entry in the guest address space
 334 *
 335 * Returns the virtual address in the guest address space for the segment
 336 */
 337static unsigned long __gmap_segment_gaddr(unsigned long *entry)
 338{
 339        struct page *page;
 340        unsigned long offset, mask;
 341
 342        offset = (unsigned long) entry / sizeof(unsigned long);
 343        offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
 344        mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
 345        page = virt_to_page((void *)((unsigned long) entry & mask));
 346        return page->index + offset;
 347}
 348
 349/**
 350 * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
 351 * @gmap: pointer to the guest address space structure
 352 * @vmaddr: address in the host process address space
 353 *
 354 * Returns 1 if a TLB flush is required
 355 */
 356static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
 357{
 358        unsigned long *entry;
 359        int flush = 0;
 360
 361        BUG_ON(gmap_is_shadow(gmap));
 362        spin_lock(&gmap->guest_table_lock);
 363        entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
 364        if (entry) {
 365                flush = (*entry != _SEGMENT_ENTRY_EMPTY);
 366                *entry = _SEGMENT_ENTRY_EMPTY;
 367        }
 368        spin_unlock(&gmap->guest_table_lock);
 369        return flush;
 370}
 371
 372/**
 373 * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
 374 * @gmap: pointer to the guest address space structure
 375 * @gaddr: address in the guest address space
 376 *
 377 * Returns 1 if a TLB flush is required
 378 */
 379static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
 380{
 381        unsigned long vmaddr;
 382
 383        vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
 384                                                   gaddr >> PMD_SHIFT);
 385        return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
 386}
 387
 388/**
 389 * gmap_unmap_segment - unmap segment from the guest address space
 390 * @gmap: pointer to the guest address space structure
 391 * @to: address in the guest address space
 392 * @len: length of the memory area to unmap
 393 *
 394 * Returns 0 if the unmap succeeded, -EINVAL if not.
 395 */
 396int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
 397{
 398        unsigned long off;
 399        int flush;
 400
 401        BUG_ON(gmap_is_shadow(gmap));
 402        if ((to | len) & (PMD_SIZE - 1))
 403                return -EINVAL;
 404        if (len == 0 || to + len < to)
 405                return -EINVAL;
 406
 407        flush = 0;
 408        down_write(&gmap->mm->mmap_sem);
 409        for (off = 0; off < len; off += PMD_SIZE)
 410                flush |= __gmap_unmap_by_gaddr(gmap, to + off);
 411        up_write(&gmap->mm->mmap_sem);
 412        if (flush)
 413                gmap_flush_tlb(gmap);
 414        return 0;
 415}
 416EXPORT_SYMBOL_GPL(gmap_unmap_segment);
 417
 418/**
 419 * gmap_map_segment - map a segment to the guest address space
 420 * @gmap: pointer to the guest address space structure
 421 * @from: source address in the parent address space
 422 * @to: target address in the guest address space
 423 * @len: length of the memory area to map
 424 *
 425 * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
 426 */
 427int gmap_map_segment(struct gmap *gmap, unsigned long from,
 428                     unsigned long to, unsigned long len)
 429{
 430        unsigned long off;
 431        int flush;
 432
 433        BUG_ON(gmap_is_shadow(gmap));
 434        if ((from | to | len) & (PMD_SIZE - 1))
 435                return -EINVAL;
 436        if (len == 0 || from + len < from || to + len < to ||
 437            from + len - 1 > TASK_SIZE_MAX || to + len - 1 > gmap->asce_end)
 438                return -EINVAL;
 439
 440        flush = 0;
 441        down_write(&gmap->mm->mmap_sem);
 442        for (off = 0; off < len; off += PMD_SIZE) {
 443                /* Remove old translation */
 444                flush |= __gmap_unmap_by_gaddr(gmap, to + off);
 445                /* Store new translation */
 446                if (radix_tree_insert(&gmap->guest_to_host,
 447                                      (to + off) >> PMD_SHIFT,
 448                                      (void *) from + off))
 449                        break;
 450        }
 451        up_write(&gmap->mm->mmap_sem);
 452        if (flush)
 453                gmap_flush_tlb(gmap);
 454        if (off >= len)
 455                return 0;
 456        gmap_unmap_segment(gmap, to, len);
 457        return -ENOMEM;
 458}
 459EXPORT_SYMBOL_GPL(gmap_map_segment);
 460
 461/**
 462 * __gmap_translate - translate a guest address to a user space address
 463 * @gmap: pointer to guest mapping meta data structure
 464 * @gaddr: guest address
 465 *
 466 * Returns user space address which corresponds to the guest address or
 467 * -EFAULT if no such mapping exists.
 468 * This function does not establish potentially missing page table entries.
 469 * The mmap_sem of the mm that belongs to the address space must be held
 470 * when this function gets called.
 471 *
 472 * Note: Can also be called for shadow gmaps.
 473 */
 474unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
 475{
 476        unsigned long vmaddr;
 477
 478        vmaddr = (unsigned long)
 479                radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
 480        /* Note: guest_to_host is empty for a shadow gmap */
 481        return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
 482}
 483EXPORT_SYMBOL_GPL(__gmap_translate);
 484
 485/**
 486 * gmap_translate - translate a guest address to a user space address
 487 * @gmap: pointer to guest mapping meta data structure
 488 * @gaddr: guest address
 489 *
 490 * Returns user space address which corresponds to the guest address or
 491 * -EFAULT if no such mapping exists.
 492 * This function does not establish potentially missing page table entries.
 493 */
 494unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
 495{
 496        unsigned long rc;
 497
 498        down_read(&gmap->mm->mmap_sem);
 499        rc = __gmap_translate(gmap, gaddr);
 500        up_read(&gmap->mm->mmap_sem);
 501        return rc;
 502}
 503EXPORT_SYMBOL_GPL(gmap_translate);
 504
 505/**
 506 * gmap_unlink - disconnect a page table from the gmap shadow tables
 507 * @gmap: pointer to guest mapping meta data structure
 508 * @table: pointer to the host page table
 509 * @vmaddr: vm address associated with the host page table
 510 */
 511void gmap_unlink(struct mm_struct *mm, unsigned long *table,
 512                 unsigned long vmaddr)
 513{
 514        struct gmap *gmap;
 515        int flush;
 516
 517        rcu_read_lock();
 518        list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
 519                flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
 520                if (flush)
 521                        gmap_flush_tlb(gmap);
 522        }
 523        rcu_read_unlock();
 524}
 525
 526static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *old, pmd_t new,
 527                           unsigned long gaddr);
 528
 529/**
 530 * gmap_link - set up shadow page tables to connect a host to a guest address
 531 * @gmap: pointer to guest mapping meta data structure
 532 * @gaddr: guest address
 533 * @vmaddr: vm address
 534 *
 535 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
 536 * if the vm address is already mapped to a different guest segment.
 537 * The mmap_sem of the mm that belongs to the address space must be held
 538 * when this function gets called.
 539 */
 540int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
 541{
 542        struct mm_struct *mm;
 543        unsigned long *table;
 544        spinlock_t *ptl;
 545        pgd_t *pgd;
 546        p4d_t *p4d;
 547        pud_t *pud;
 548        pmd_t *pmd;
 549        u64 unprot;
 550        int rc;
 551
 552        BUG_ON(gmap_is_shadow(gmap));
 553        /* Create higher level tables in the gmap page table */
 554        table = gmap->table;
 555        if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
 556                table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
 557                if ((*table & _REGION_ENTRY_INVALID) &&
 558                    gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
 559                                     gaddr & _REGION1_MASK))
 560                        return -ENOMEM;
 561                table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
 562        }
 563        if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
 564                table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
 565                if ((*table & _REGION_ENTRY_INVALID) &&
 566                    gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
 567                                     gaddr & _REGION2_MASK))
 568                        return -ENOMEM;
 569                table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
 570        }
 571        if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
 572                table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
 573                if ((*table & _REGION_ENTRY_INVALID) &&
 574                    gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
 575                                     gaddr & _REGION3_MASK))
 576                        return -ENOMEM;
 577                table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
 578        }
 579        table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
 580        /* Walk the parent mm page table */
 581        mm = gmap->mm;
 582        pgd = pgd_offset(mm, vmaddr);
 583        VM_BUG_ON(pgd_none(*pgd));
 584        p4d = p4d_offset(pgd, vmaddr);
 585        VM_BUG_ON(p4d_none(*p4d));
 586        pud = pud_offset(p4d, vmaddr);
 587        VM_BUG_ON(pud_none(*pud));
 588        /* large puds cannot yet be handled */
 589        if (pud_large(*pud))
 590                return -EFAULT;
 591        pmd = pmd_offset(pud, vmaddr);
 592        VM_BUG_ON(pmd_none(*pmd));
 593        /* Are we allowed to use huge pages? */
 594        if (pmd_large(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m)
 595                return -EFAULT;
 596        /* Link gmap segment table entry location to page table. */
 597        rc = radix_tree_preload(GFP_KERNEL);
 598        if (rc)
 599                return rc;
 600        ptl = pmd_lock(mm, pmd);
 601        spin_lock(&gmap->guest_table_lock);
 602        if (*table == _SEGMENT_ENTRY_EMPTY) {
 603                rc = radix_tree_insert(&gmap->host_to_guest,
 604                                       vmaddr >> PMD_SHIFT, table);
 605                if (!rc) {
 606                        if (pmd_large(*pmd)) {
 607                                *table = (pmd_val(*pmd) &
 608                                          _SEGMENT_ENTRY_HARDWARE_BITS_LARGE)
 609                                        | _SEGMENT_ENTRY_GMAP_UC;
 610                        } else
 611                                *table = pmd_val(*pmd) &
 612                                        _SEGMENT_ENTRY_HARDWARE_BITS;
 613                }
 614        } else if (*table & _SEGMENT_ENTRY_PROTECT &&
 615                   !(pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT)) {
 616                unprot = (u64)*table;
 617                unprot &= ~_SEGMENT_ENTRY_PROTECT;
 618                unprot |= _SEGMENT_ENTRY_GMAP_UC;
 619                gmap_pmdp_xchg(gmap, (pmd_t *)table, __pmd(unprot), gaddr);
 620        }
 621        spin_unlock(&gmap->guest_table_lock);
 622        spin_unlock(ptl);
 623        radix_tree_preload_end();
 624        return rc;
 625}
 626
 627/**
 628 * gmap_fault - resolve a fault on a guest address
 629 * @gmap: pointer to guest mapping meta data structure
 630 * @gaddr: guest address
 631 * @fault_flags: flags to pass down to handle_mm_fault()
 632 *
 633 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
 634 * if the vm address is already mapped to a different guest segment.
 635 */
 636int gmap_fault(struct gmap *gmap, unsigned long gaddr,
 637               unsigned int fault_flags)
 638{
 639        unsigned long vmaddr;
 640        int rc;
 641        bool unlocked;
 642
 643        down_read(&gmap->mm->mmap_sem);
 644
 645retry:
 646        unlocked = false;
 647        vmaddr = __gmap_translate(gmap, gaddr);
 648        if (IS_ERR_VALUE(vmaddr)) {
 649                rc = vmaddr;
 650                goto out_up;
 651        }
 652        if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags,
 653                             &unlocked)) {
 654                rc = -EFAULT;
 655                goto out_up;
 656        }
 657        /*
 658         * In the case that fixup_user_fault unlocked the mmap_sem during
 659         * faultin redo __gmap_translate to not race with a map/unmap_segment.
 660         */
 661        if (unlocked)
 662                goto retry;
 663
 664        rc = __gmap_link(gmap, gaddr, vmaddr);
 665out_up:
 666        up_read(&gmap->mm->mmap_sem);
 667        return rc;
 668}
 669EXPORT_SYMBOL_GPL(gmap_fault);
 670
 671/*
 672 * this function is assumed to be called with mmap_sem held
 673 */
 674void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
 675{
 676        unsigned long vmaddr;
 677        spinlock_t *ptl;
 678        pte_t *ptep;
 679
 680        /* Find the vm address for the guest address */
 681        vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
 682                                                   gaddr >> PMD_SHIFT);
 683        if (vmaddr) {
 684                vmaddr |= gaddr & ~PMD_MASK;
 685                /* Get pointer to the page table entry */
 686                ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
 687                if (likely(ptep))
 688                        ptep_zap_unused(gmap->mm, vmaddr, ptep, 0);
 689                pte_unmap_unlock(ptep, ptl);
 690        }
 691}
 692EXPORT_SYMBOL_GPL(__gmap_zap);
 693
 694void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
 695{
 696        unsigned long gaddr, vmaddr, size;
 697        struct vm_area_struct *vma;
 698
 699        down_read(&gmap->mm->mmap_sem);
 700        for (gaddr = from; gaddr < to;
 701             gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
 702                /* Find the vm address for the guest address */
 703                vmaddr = (unsigned long)
 704                        radix_tree_lookup(&gmap->guest_to_host,
 705                                          gaddr >> PMD_SHIFT);
 706                if (!vmaddr)
 707                        continue;
 708                vmaddr |= gaddr & ~PMD_MASK;
 709                /* Find vma in the parent mm */
 710                vma = find_vma(gmap->mm, vmaddr);
 711                if (!vma)
 712                        continue;
 713                /*
 714                 * We do not discard pages that are backed by
 715                 * hugetlbfs, so we don't have to refault them.
 716                 */
 717                if (is_vm_hugetlb_page(vma))
 718                        continue;
 719                size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
 720                zap_page_range(vma, vmaddr, size);
 721        }
 722        up_read(&gmap->mm->mmap_sem);
 723}
 724EXPORT_SYMBOL_GPL(gmap_discard);
 725
 726static LIST_HEAD(gmap_notifier_list);
 727static DEFINE_SPINLOCK(gmap_notifier_lock);
 728
 729/**
 730 * gmap_register_pte_notifier - register a pte invalidation callback
 731 * @nb: pointer to the gmap notifier block
 732 */
 733void gmap_register_pte_notifier(struct gmap_notifier *nb)
 734{
 735        spin_lock(&gmap_notifier_lock);
 736        list_add_rcu(&nb->list, &gmap_notifier_list);
 737        spin_unlock(&gmap_notifier_lock);
 738}
 739EXPORT_SYMBOL_GPL(gmap_register_pte_notifier);
 740
 741/**
 742 * gmap_unregister_pte_notifier - remove a pte invalidation callback
 743 * @nb: pointer to the gmap notifier block
 744 */
 745void gmap_unregister_pte_notifier(struct gmap_notifier *nb)
 746{
 747        spin_lock(&gmap_notifier_lock);
 748        list_del_rcu(&nb->list);
 749        spin_unlock(&gmap_notifier_lock);
 750        synchronize_rcu();
 751}
 752EXPORT_SYMBOL_GPL(gmap_unregister_pte_notifier);
 753
 754/**
 755 * gmap_call_notifier - call all registered invalidation callbacks
 756 * @gmap: pointer to guest mapping meta data structure
 757 * @start: start virtual address in the guest address space
 758 * @end: end virtual address in the guest address space
 759 */
 760static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
 761                               unsigned long end)
 762{
 763        struct gmap_notifier *nb;
 764
 765        list_for_each_entry(nb, &gmap_notifier_list, list)
 766                nb->notifier_call(gmap, start, end);
 767}
 768
 769/**
 770 * gmap_table_walk - walk the gmap page tables
 771 * @gmap: pointer to guest mapping meta data structure
 772 * @gaddr: virtual address in the guest address space
 773 * @level: page table level to stop at
 774 *
 775 * Returns a table entry pointer for the given guest address and @level
 776 * @level=0 : returns a pointer to a page table table entry (or NULL)
 777 * @level=1 : returns a pointer to a segment table entry (or NULL)
 778 * @level=2 : returns a pointer to a region-3 table entry (or NULL)
 779 * @level=3 : returns a pointer to a region-2 table entry (or NULL)
 780 * @level=4 : returns a pointer to a region-1 table entry (or NULL)
 781 *
 782 * Returns NULL if the gmap page tables could not be walked to the
 783 * requested level.
 784 *
 785 * Note: Can also be called for shadow gmaps.
 786 */
 787static inline unsigned long *gmap_table_walk(struct gmap *gmap,
 788                                             unsigned long gaddr, int level)
 789{
 790        unsigned long *table;
 791
 792        if ((gmap->asce & _ASCE_TYPE_MASK) + 4 < (level * 4))
 793                return NULL;
 794        if (gmap_is_shadow(gmap) && gmap->removed)
 795                return NULL;
 796        if (gaddr & (-1UL << (31 + ((gmap->asce & _ASCE_TYPE_MASK) >> 2)*11)))
 797                return NULL;
 798        table = gmap->table;
 799        switch (gmap->asce & _ASCE_TYPE_MASK) {
 800        case _ASCE_TYPE_REGION1:
 801                table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
 802                if (level == 4)
 803                        break;
 804                if (*table & _REGION_ENTRY_INVALID)
 805                        return NULL;
 806                table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
 807                /* Fallthrough */
 808        case _ASCE_TYPE_REGION2:
 809                table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
 810                if (level == 3)
 811                        break;
 812                if (*table & _REGION_ENTRY_INVALID)
 813                        return NULL;
 814                table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
 815                /* Fallthrough */
 816        case _ASCE_TYPE_REGION3:
 817                table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
 818                if (level == 2)
 819                        break;
 820                if (*table & _REGION_ENTRY_INVALID)
 821                        return NULL;
 822                table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
 823                /* Fallthrough */
 824        case _ASCE_TYPE_SEGMENT:
 825                table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
 826                if (level == 1)
 827                        break;
 828                if (*table & _REGION_ENTRY_INVALID)
 829                        return NULL;
 830                table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
 831                table += (gaddr & _PAGE_INDEX) >> _PAGE_SHIFT;
 832        }
 833        return table;
 834}
 835
 836/**
 837 * gmap_pte_op_walk - walk the gmap page table, get the page table lock
 838 *                    and return the pte pointer
 839 * @gmap: pointer to guest mapping meta data structure
 840 * @gaddr: virtual address in the guest address space
 841 * @ptl: pointer to the spinlock pointer
 842 *
 843 * Returns a pointer to the locked pte for a guest address, or NULL
 844 */
 845static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr,
 846                               spinlock_t **ptl)
 847{
 848        unsigned long *table;
 849
 850        BUG_ON(gmap_is_shadow(gmap));
 851        /* Walk the gmap page table, lock and get pte pointer */
 852        table = gmap_table_walk(gmap, gaddr, 1); /* get segment pointer */
 853        if (!table || *table & _SEGMENT_ENTRY_INVALID)
 854                return NULL;
 855        return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl);
 856}
 857
 858/**
 859 * gmap_pte_op_fixup - force a page in and connect the gmap page table
 860 * @gmap: pointer to guest mapping meta data structure
 861 * @gaddr: virtual address in the guest address space
 862 * @vmaddr: address in the host process address space
 863 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
 864 *
 865 * Returns 0 if the caller can retry __gmap_translate (might fail again),
 866 * -ENOMEM if out of memory and -EFAULT if anything goes wrong while fixing
 867 * up or connecting the gmap page table.
 868 */
 869static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
 870                             unsigned long vmaddr, int prot)
 871{
 872        struct mm_struct *mm = gmap->mm;
 873        unsigned int fault_flags;
 874        bool unlocked = false;
 875
 876        BUG_ON(gmap_is_shadow(gmap));
 877        fault_flags = (prot == PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
 878        if (fixup_user_fault(current, mm, vmaddr, fault_flags, &unlocked))
 879                return -EFAULT;
 880        if (unlocked)
 881                /* lost mmap_sem, caller has to retry __gmap_translate */
 882                return 0;
 883        /* Connect the page tables */
 884        return __gmap_link(gmap, gaddr, vmaddr);
 885}
 886
 887/**
 888 * gmap_pte_op_end - release the page table lock
 889 * @ptl: pointer to the spinlock pointer
 890 */
 891static void gmap_pte_op_end(spinlock_t *ptl)
 892{
 893        if (ptl)
 894                spin_unlock(ptl);
 895}
 896
 897/**
 898 * gmap_pmd_op_walk - walk the gmap tables, get the guest table lock
 899 *                    and return the pmd pointer
 900 * @gmap: pointer to guest mapping meta data structure
 901 * @gaddr: virtual address in the guest address space
 902 *
 903 * Returns a pointer to the pmd for a guest address, or NULL
 904 */
 905static inline pmd_t *gmap_pmd_op_walk(struct gmap *gmap, unsigned long gaddr)
 906{
 907        pmd_t *pmdp;
 908
 909        BUG_ON(gmap_is_shadow(gmap));
 910        pmdp = (pmd_t *) gmap_table_walk(gmap, gaddr, 1);
 911        if (!pmdp)
 912                return NULL;
 913
 914        /* without huge pages, there is no need to take the table lock */
 915        if (!gmap->mm->context.allow_gmap_hpage_1m)
 916                return pmd_none(*pmdp) ? NULL : pmdp;
 917
 918        spin_lock(&gmap->guest_table_lock);
 919        if (pmd_none(*pmdp)) {
 920                spin_unlock(&gmap->guest_table_lock);
 921                return NULL;
 922        }
 923
 924        /* 4k page table entries are locked via the pte (pte_alloc_map_lock). */
 925        if (!pmd_large(*pmdp))
 926                spin_unlock(&gmap->guest_table_lock);
 927        return pmdp;
 928}
 929
 930/**
 931 * gmap_pmd_op_end - release the guest_table_lock if needed
 932 * @gmap: pointer to the guest mapping meta data structure
 933 * @pmdp: pointer to the pmd
 934 */
 935static inline void gmap_pmd_op_end(struct gmap *gmap, pmd_t *pmdp)
 936{
 937        if (pmd_large(*pmdp))
 938                spin_unlock(&gmap->guest_table_lock);
 939}
 940
 941/*
 942 * gmap_protect_pmd - remove access rights to memory and set pmd notification bits
 943 * @pmdp: pointer to the pmd to be protected
 944 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
 945 * @bits: notification bits to set
 946 *
 947 * Returns:
 948 * 0 if successfully protected
 949 * -EAGAIN if a fixup is needed
 950 * -EINVAL if unsupported notifier bits have been specified
 951 *
 952 * Expected to be called with sg->mm->mmap_sem in read and
 953 * guest_table_lock held.
 954 */
 955static int gmap_protect_pmd(struct gmap *gmap, unsigned long gaddr,
 956                            pmd_t *pmdp, int prot, unsigned long bits)
 957{
 958        int pmd_i = pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID;
 959        int pmd_p = pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT;
 960        pmd_t new = *pmdp;
 961
 962        /* Fixup needed */
 963        if ((pmd_i && (prot != PROT_NONE)) || (pmd_p && (prot == PROT_WRITE)))
 964                return -EAGAIN;
 965
 966        if (prot == PROT_NONE && !pmd_i) {
 967                pmd_val(new) |= _SEGMENT_ENTRY_INVALID;
 968                gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
 969        }
 970
 971        if (prot == PROT_READ && !pmd_p) {
 972                pmd_val(new) &= ~_SEGMENT_ENTRY_INVALID;
 973                pmd_val(new) |= _SEGMENT_ENTRY_PROTECT;
 974                gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
 975        }
 976
 977        if (bits & GMAP_NOTIFY_MPROT)
 978                pmd_val(*pmdp) |= _SEGMENT_ENTRY_GMAP_IN;
 979
 980        /* Shadow GMAP protection needs split PMDs */
 981        if (bits & GMAP_NOTIFY_SHADOW)
 982                return -EINVAL;
 983
 984        return 0;
 985}
 986
 987/*
 988 * gmap_protect_pte - remove access rights to memory and set pgste bits
 989 * @gmap: pointer to guest mapping meta data structure
 990 * @gaddr: virtual address in the guest address space
 991 * @pmdp: pointer to the pmd associated with the pte
 992 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
 993 * @bits: notification bits to set
 994 *
 995 * Returns 0 if successfully protected, -ENOMEM if out of memory and
 996 * -EAGAIN if a fixup is needed.
 997 *
 998 * Expected to be called with sg->mm->mmap_sem in read
 999 */
1000static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
1001                            pmd_t *pmdp, int prot, unsigned long bits)
1002{
1003        int rc;
1004        pte_t *ptep;
1005        spinlock_t *ptl = NULL;
1006        unsigned long pbits = 0;
1007
1008        if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
1009                return -EAGAIN;
1010
1011        ptep = pte_alloc_map_lock(gmap->mm, pmdp, gaddr, &ptl);
1012        if (!ptep)
1013                return -ENOMEM;
1014
1015        pbits |= (bits & GMAP_NOTIFY_MPROT) ? PGSTE_IN_BIT : 0;
1016        pbits |= (bits & GMAP_NOTIFY_SHADOW) ? PGSTE_VSIE_BIT : 0;
1017        /* Protect and unlock. */
1018        rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, pbits);
1019        gmap_pte_op_end(ptl);
1020        return rc;
1021}
1022
1023/*
1024 * gmap_protect_range - remove access rights to memory and set pgste bits
1025 * @gmap: pointer to guest mapping meta data structure
1026 * @gaddr: virtual address in the guest address space
1027 * @len: size of area
1028 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
1029 * @bits: pgste notification bits to set
1030 *
1031 * Returns 0 if successfully protected, -ENOMEM if out of memory and
1032 * -EFAULT if gaddr is invalid (or mapping for shadows is missing).
1033 *
1034 * Called with sg->mm->mmap_sem in read.
1035 */
1036static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
1037                              unsigned long len, int prot, unsigned long bits)
1038{
1039        unsigned long vmaddr, dist;
1040        pmd_t *pmdp;
1041        int rc;
1042
1043        BUG_ON(gmap_is_shadow(gmap));
1044        while (len) {
1045                rc = -EAGAIN;
1046                pmdp = gmap_pmd_op_walk(gmap, gaddr);
1047                if (pmdp) {
1048                        if (!pmd_large(*pmdp)) {
1049                                rc = gmap_protect_pte(gmap, gaddr, pmdp, prot,
1050                                                      bits);
1051                                if (!rc) {
1052                                        len -= PAGE_SIZE;
1053                                        gaddr += PAGE_SIZE;
1054                                }
1055                        } else {
1056                                rc = gmap_protect_pmd(gmap, gaddr, pmdp, prot,
1057                                                      bits);
1058                                if (!rc) {
1059                                        dist = HPAGE_SIZE - (gaddr & ~HPAGE_MASK);
1060                                        len = len < dist ? 0 : len - dist;
1061                                        gaddr = (gaddr & HPAGE_MASK) + HPAGE_SIZE;
1062                                }
1063                        }
1064                        gmap_pmd_op_end(gmap, pmdp);
1065                }
1066                if (rc) {
1067                        if (rc == -EINVAL)
1068                                return rc;
1069
1070                        /* -EAGAIN, fixup of userspace mm and gmap */
1071                        vmaddr = __gmap_translate(gmap, gaddr);
1072                        if (IS_ERR_VALUE(vmaddr))
1073                                return vmaddr;
1074                        rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, prot);
1075                        if (rc)
1076                                return rc;
1077                }
1078        }
1079        return 0;
1080}
1081
1082/**
1083 * gmap_mprotect_notify - change access rights for a range of ptes and
1084 *                        call the notifier if any pte changes again
1085 * @gmap: pointer to guest mapping meta data structure
1086 * @gaddr: virtual address in the guest address space
1087 * @len: size of area
1088 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
1089 *
1090 * Returns 0 if for each page in the given range a gmap mapping exists,
1091 * the new access rights could be set and the notifier could be armed.
1092 * If the gmap mapping is missing for one or more pages -EFAULT is
1093 * returned. If no memory could be allocated -ENOMEM is returned.
1094 * This function establishes missing page table entries.
1095 */
1096int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
1097                         unsigned long len, int prot)
1098{
1099        int rc;
1100
1101        if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK) || gmap_is_shadow(gmap))
1102                return -EINVAL;
1103        if (!MACHINE_HAS_ESOP && prot == PROT_READ)
1104                return -EINVAL;
1105        down_read(&gmap->mm->mmap_sem);
1106        rc = gmap_protect_range(gmap, gaddr, len, prot, GMAP_NOTIFY_MPROT);
1107        up_read(&gmap->mm->mmap_sem);
1108        return rc;
1109}
1110EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
1111
1112/**
1113 * gmap_read_table - get an unsigned long value from a guest page table using
1114 *                   absolute addressing, without marking the page referenced.
1115 * @gmap: pointer to guest mapping meta data structure
1116 * @gaddr: virtual address in the guest address space
1117 * @val: pointer to the unsigned long value to return
1118 *
1119 * Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT
1120 * if reading using the virtual address failed. -EINVAL if called on a gmap
1121 * shadow.
1122 *
1123 * Called with gmap->mm->mmap_sem in read.
1124 */
1125int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
1126{
1127        unsigned long address, vmaddr;
1128        spinlock_t *ptl;
1129        pte_t *ptep, pte;
1130        int rc;
1131
1132        if (gmap_is_shadow(gmap))
1133                return -EINVAL;
1134
1135        while (1) {
1136                rc = -EAGAIN;
1137                ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
1138                if (ptep) {
1139                        pte = *ptep;
1140                        if (pte_present(pte) && (pte_val(pte) & _PAGE_READ)) {
1141                                address = pte_val(pte) & PAGE_MASK;
1142                                address += gaddr & ~PAGE_MASK;
1143                                *val = *(unsigned long *) address;
1144                                pte_val(*ptep) |= _PAGE_YOUNG;
1145                                /* Do *NOT* clear the _PAGE_INVALID bit! */
1146                                rc = 0;
1147                        }
1148                        gmap_pte_op_end(ptl);
1149                }
1150                if (!rc)
1151                        break;
1152                vmaddr = __gmap_translate(gmap, gaddr);
1153                if (IS_ERR_VALUE(vmaddr)) {
1154                        rc = vmaddr;
1155                        break;
1156                }
1157                rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, PROT_READ);
1158                if (rc)
1159                        break;
1160        }
1161        return rc;
1162}
1163EXPORT_SYMBOL_GPL(gmap_read_table);
1164
1165/**
1166 * gmap_insert_rmap - add a rmap to the host_to_rmap radix tree
1167 * @sg: pointer to the shadow guest address space structure
1168 * @vmaddr: vm address associated with the rmap
1169 * @rmap: pointer to the rmap structure
1170 *
1171 * Called with the sg->guest_table_lock
1172 */
1173static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
1174                                    struct gmap_rmap *rmap)
1175{
1176        void __rcu **slot;
1177
1178        BUG_ON(!gmap_is_shadow(sg));
1179        slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
1180        if (slot) {
1181                rmap->next = radix_tree_deref_slot_protected(slot,
1182                                                        &sg->guest_table_lock);
1183                radix_tree_replace_slot(&sg->host_to_rmap, slot, rmap);
1184        } else {
1185                rmap->next = NULL;
1186                radix_tree_insert(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT,
1187                                  rmap);
1188        }
1189}
1190
1191/**
1192 * gmap_protect_rmap - restrict access rights to memory (RO) and create an rmap
1193 * @sg: pointer to the shadow guest address space structure
1194 * @raddr: rmap address in the shadow gmap
1195 * @paddr: address in the parent guest address space
1196 * @len: length of the memory area to protect
1197 *
1198 * Returns 0 if successfully protected and the rmap was created, -ENOMEM
1199 * if out of memory and -EFAULT if paddr is invalid.
1200 */
1201static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
1202                             unsigned long paddr, unsigned long len)
1203{
1204        struct gmap *parent;
1205        struct gmap_rmap *rmap;
1206        unsigned long vmaddr;
1207        spinlock_t *ptl;
1208        pte_t *ptep;
1209        int rc;
1210
1211        BUG_ON(!gmap_is_shadow(sg));
1212        parent = sg->parent;
1213        while (len) {
1214                vmaddr = __gmap_translate(parent, paddr);
1215                if (IS_ERR_VALUE(vmaddr))
1216                        return vmaddr;
1217                rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
1218                if (!rmap)
1219                        return -ENOMEM;
1220                rmap->raddr = raddr;
1221                rc = radix_tree_preload(GFP_KERNEL);
1222                if (rc) {
1223                        kfree(rmap);
1224                        return rc;
1225                }
1226                rc = -EAGAIN;
1227                ptep = gmap_pte_op_walk(parent, paddr, &ptl);
1228                if (ptep) {
1229                        spin_lock(&sg->guest_table_lock);
1230                        rc = ptep_force_prot(parent->mm, paddr, ptep, PROT_READ,
1231                                             PGSTE_VSIE_BIT);
1232                        if (!rc)
1233                                gmap_insert_rmap(sg, vmaddr, rmap);
1234                        spin_unlock(&sg->guest_table_lock);
1235                        gmap_pte_op_end(ptl);
1236                }
1237                radix_tree_preload_end();
1238                if (rc) {
1239                        kfree(rmap);
1240                        rc = gmap_pte_op_fixup(parent, paddr, vmaddr, PROT_READ);
1241                        if (rc)
1242                                return rc;
1243                        continue;
1244                }
1245                paddr += PAGE_SIZE;
1246                len -= PAGE_SIZE;
1247        }
1248        return 0;
1249}
1250
1251#define _SHADOW_RMAP_MASK       0x7
1252#define _SHADOW_RMAP_REGION1    0x5
1253#define _SHADOW_RMAP_REGION2    0x4
1254#define _SHADOW_RMAP_REGION3    0x3
1255#define _SHADOW_RMAP_SEGMENT    0x2
1256#define _SHADOW_RMAP_PGTABLE    0x1
1257
1258/**
1259 * gmap_idte_one - invalidate a single region or segment table entry
1260 * @asce: region or segment table *origin* + table-type bits
1261 * @vaddr: virtual address to identify the table entry to flush
1262 *
1263 * The invalid bit of a single region or segment table entry is set
1264 * and the associated TLB entries depending on the entry are flushed.
1265 * The table-type of the @asce identifies the portion of the @vaddr
1266 * that is used as the invalidation index.
1267 */
1268static inline void gmap_idte_one(unsigned long asce, unsigned long vaddr)
1269{
1270        asm volatile(
1271                "       .insn   rrf,0xb98e0000,%0,%1,0,0"
1272                : : "a" (asce), "a" (vaddr) : "cc", "memory");
1273}
1274
1275/**
1276 * gmap_unshadow_page - remove a page from a shadow page table
1277 * @sg: pointer to the shadow guest address space structure
1278 * @raddr: rmap address in the shadow guest address space
1279 *
1280 * Called with the sg->guest_table_lock
1281 */
1282static void gmap_unshadow_page(struct gmap *sg, unsigned long raddr)
1283{
1284        unsigned long *table;
1285
1286        BUG_ON(!gmap_is_shadow(sg));
1287        table = gmap_table_walk(sg, raddr, 0); /* get page table pointer */
1288        if (!table || *table & _PAGE_INVALID)
1289                return;
1290        gmap_call_notifier(sg, raddr, raddr + _PAGE_SIZE - 1);
1291        ptep_unshadow_pte(sg->mm, raddr, (pte_t *) table);
1292}
1293
1294/**
1295 * __gmap_unshadow_pgt - remove all entries from a shadow page table
1296 * @sg: pointer to the shadow guest address space structure
1297 * @raddr: rmap address in the shadow guest address space
1298 * @pgt: pointer to the start of a shadow page table
1299 *
1300 * Called with the sg->guest_table_lock
1301 */
1302static void __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr,
1303                                unsigned long *pgt)
1304{
1305        int i;
1306
1307        BUG_ON(!gmap_is_shadow(sg));
1308        for (i = 0; i < _PAGE_ENTRIES; i++, raddr += _PAGE_SIZE)
1309                pgt[i] = _PAGE_INVALID;
1310}
1311
1312/**
1313 * gmap_unshadow_pgt - remove a shadow page table from a segment entry
1314 * @sg: pointer to the shadow guest address space structure
1315 * @raddr: address in the shadow guest address space
1316 *
1317 * Called with the sg->guest_table_lock
1318 */
1319static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
1320{
1321        unsigned long sto, *ste, *pgt;
1322        struct page *page;
1323
1324        BUG_ON(!gmap_is_shadow(sg));
1325        ste = gmap_table_walk(sg, raddr, 1); /* get segment pointer */
1326        if (!ste || !(*ste & _SEGMENT_ENTRY_ORIGIN))
1327                return;
1328        gmap_call_notifier(sg, raddr, raddr + _SEGMENT_SIZE - 1);
1329        sto = (unsigned long) (ste - ((raddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT));
1330        gmap_idte_one(sto | _ASCE_TYPE_SEGMENT, raddr);
1331        pgt = (unsigned long *)(*ste & _SEGMENT_ENTRY_ORIGIN);
1332        *ste = _SEGMENT_ENTRY_EMPTY;
1333        __gmap_unshadow_pgt(sg, raddr, pgt);
1334        /* Free page table */
1335        page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
1336        list_del(&page->lru);
1337        page_table_free_pgste(page);
1338}
1339
1340/**
1341 * __gmap_unshadow_sgt - remove all entries from a shadow segment table
1342 * @sg: pointer to the shadow guest address space structure
1343 * @raddr: rmap address in the shadow guest address space
1344 * @sgt: pointer to the start of a shadow segment table
1345 *
1346 * Called with the sg->guest_table_lock
1347 */
1348static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
1349                                unsigned long *sgt)
1350{
1351        unsigned long *pgt;
1352        struct page *page;
1353        int i;
1354
1355        BUG_ON(!gmap_is_shadow(sg));
1356        for (i = 0; i < _CRST_ENTRIES; i++, raddr += _SEGMENT_SIZE) {
1357                if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN))
1358                        continue;
1359                pgt = (unsigned long *)(sgt[i] & _REGION_ENTRY_ORIGIN);
1360                sgt[i] = _SEGMENT_ENTRY_EMPTY;
1361                __gmap_unshadow_pgt(sg, raddr, pgt);
1362                /* Free page table */
1363                page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
1364                list_del(&page->lru);
1365                page_table_free_pgste(page);
1366        }
1367}
1368
1369/**
1370 * gmap_unshadow_sgt - remove a shadow segment table from a region-3 entry
1371 * @sg: pointer to the shadow guest address space structure
1372 * @raddr: rmap address in the shadow guest address space
1373 *
1374 * Called with the shadow->guest_table_lock
1375 */
1376static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
1377{
1378        unsigned long r3o, *r3e, *sgt;
1379        struct page *page;
1380
1381        BUG_ON(!gmap_is_shadow(sg));
1382        r3e = gmap_table_walk(sg, raddr, 2); /* get region-3 pointer */
1383        if (!r3e || !(*r3e & _REGION_ENTRY_ORIGIN))
1384                return;
1385        gmap_call_notifier(sg, raddr, raddr + _REGION3_SIZE - 1);
1386        r3o = (unsigned long) (r3e - ((raddr & _REGION3_INDEX) >> _REGION3_SHIFT));
1387        gmap_idte_one(r3o | _ASCE_TYPE_REGION3, raddr);
1388        sgt = (unsigned long *)(*r3e & _REGION_ENTRY_ORIGIN);
1389        *r3e = _REGION3_ENTRY_EMPTY;
1390        __gmap_unshadow_sgt(sg, raddr, sgt);
1391        /* Free segment table */
1392        page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1393        list_del(&page->lru);
1394        __free_pages(page, CRST_ALLOC_ORDER);
1395}
1396
1397/**
1398 * __gmap_unshadow_r3t - remove all entries from a shadow region-3 table
1399 * @sg: pointer to the shadow guest address space structure
1400 * @raddr: address in the shadow guest address space
1401 * @r3t: pointer to the start of a shadow region-3 table
1402 *
1403 * Called with the sg->guest_table_lock
1404 */
1405static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
1406                                unsigned long *r3t)
1407{
1408        unsigned long *sgt;
1409        struct page *page;
1410        int i;
1411
1412        BUG_ON(!gmap_is_shadow(sg));
1413        for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION3_SIZE) {
1414                if (!(r3t[i] & _REGION_ENTRY_ORIGIN))
1415                        continue;
1416                sgt = (unsigned long *)(r3t[i] & _REGION_ENTRY_ORIGIN);
1417                r3t[i] = _REGION3_ENTRY_EMPTY;
1418                __gmap_unshadow_sgt(sg, raddr, sgt);
1419                /* Free segment table */
1420                page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1421                list_del(&page->lru);
1422                __free_pages(page, CRST_ALLOC_ORDER);
1423        }
1424}
1425
1426/**
1427 * gmap_unshadow_r3t - remove a shadow region-3 table from a region-2 entry
1428 * @sg: pointer to the shadow guest address space structure
1429 * @raddr: rmap address in the shadow guest address space
1430 *
1431 * Called with the sg->guest_table_lock
1432 */
1433static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
1434{
1435        unsigned long r2o, *r2e, *r3t;
1436        struct page *page;
1437
1438        BUG_ON(!gmap_is_shadow(sg));
1439        r2e = gmap_table_walk(sg, raddr, 3); /* get region-2 pointer */
1440        if (!r2e || !(*r2e & _REGION_ENTRY_ORIGIN))
1441                return;
1442        gmap_call_notifier(sg, raddr, raddr + _REGION2_SIZE - 1);
1443        r2o = (unsigned long) (r2e - ((raddr & _REGION2_INDEX) >> _REGION2_SHIFT));
1444        gmap_idte_one(r2o | _ASCE_TYPE_REGION2, raddr);
1445        r3t = (unsigned long *)(*r2e & _REGION_ENTRY_ORIGIN);
1446        *r2e = _REGION2_ENTRY_EMPTY;
1447        __gmap_unshadow_r3t(sg, raddr, r3t);
1448        /* Free region 3 table */
1449        page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1450        list_del(&page->lru);
1451        __free_pages(page, CRST_ALLOC_ORDER);
1452}
1453
1454/**
1455 * __gmap_unshadow_r2t - remove all entries from a shadow region-2 table
1456 * @sg: pointer to the shadow guest address space structure
1457 * @raddr: rmap address in the shadow guest address space
1458 * @r2t: pointer to the start of a shadow region-2 table
1459 *
1460 * Called with the sg->guest_table_lock
1461 */
1462static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
1463                                unsigned long *r2t)
1464{
1465        unsigned long *r3t;
1466        struct page *page;
1467        int i;
1468
1469        BUG_ON(!gmap_is_shadow(sg));
1470        for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION2_SIZE) {
1471                if (!(r2t[i] & _REGION_ENTRY_ORIGIN))
1472                        continue;
1473                r3t = (unsigned long *)(r2t[i] & _REGION_ENTRY_ORIGIN);
1474                r2t[i] = _REGION2_ENTRY_EMPTY;
1475                __gmap_unshadow_r3t(sg, raddr, r3t);
1476                /* Free region 3 table */
1477                page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1478                list_del(&page->lru);
1479                __free_pages(page, CRST_ALLOC_ORDER);
1480        }
1481}
1482
1483/**
1484 * gmap_unshadow_r2t - remove a shadow region-2 table from a region-1 entry
1485 * @sg: pointer to the shadow guest address space structure
1486 * @raddr: rmap address in the shadow guest address space
1487 *
1488 * Called with the sg->guest_table_lock
1489 */
1490static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
1491{
1492        unsigned long r1o, *r1e, *r2t;
1493        struct page *page;
1494
1495        BUG_ON(!gmap_is_shadow(sg));
1496        r1e = gmap_table_walk(sg, raddr, 4); /* get region-1 pointer */
1497        if (!r1e || !(*r1e & _REGION_ENTRY_ORIGIN))
1498                return;
1499        gmap_call_notifier(sg, raddr, raddr + _REGION1_SIZE - 1);
1500        r1o = (unsigned long) (r1e - ((raddr & _REGION1_INDEX) >> _REGION1_SHIFT));
1501        gmap_idte_one(r1o | _ASCE_TYPE_REGION1, raddr);
1502        r2t = (unsigned long *)(*r1e & _REGION_ENTRY_ORIGIN);
1503        *r1e = _REGION1_ENTRY_EMPTY;
1504        __gmap_unshadow_r2t(sg, raddr, r2t);
1505        /* Free region 2 table */
1506        page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1507        list_del(&page->lru);
1508        __free_pages(page, CRST_ALLOC_ORDER);
1509}
1510
1511/**
1512 * __gmap_unshadow_r1t - remove all entries from a shadow region-1 table
1513 * @sg: pointer to the shadow guest address space structure
1514 * @raddr: rmap address in the shadow guest address space
1515 * @r1t: pointer to the start of a shadow region-1 table
1516 *
1517 * Called with the shadow->guest_table_lock
1518 */
1519static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
1520                                unsigned long *r1t)
1521{
1522        unsigned long asce, *r2t;
1523        struct page *page;
1524        int i;
1525
1526        BUG_ON(!gmap_is_shadow(sg));
1527        asce = (unsigned long) r1t | _ASCE_TYPE_REGION1;
1528        for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION1_SIZE) {
1529                if (!(r1t[i] & _REGION_ENTRY_ORIGIN))
1530                        continue;
1531                r2t = (unsigned long *)(r1t[i] & _REGION_ENTRY_ORIGIN);
1532                __gmap_unshadow_r2t(sg, raddr, r2t);
1533                /* Clear entry and flush translation r1t -> r2t */
1534                gmap_idte_one(asce, raddr);
1535                r1t[i] = _REGION1_ENTRY_EMPTY;
1536                /* Free region 2 table */
1537                page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1538                list_del(&page->lru);
1539                __free_pages(page, CRST_ALLOC_ORDER);
1540        }
1541}
1542
1543/**
1544 * gmap_unshadow - remove a shadow page table completely
1545 * @sg: pointer to the shadow guest address space structure
1546 *
1547 * Called with sg->guest_table_lock
1548 */
1549static void gmap_unshadow(struct gmap *sg)
1550{
1551        unsigned long *table;
1552
1553        BUG_ON(!gmap_is_shadow(sg));
1554        if (sg->removed)
1555                return;
1556        sg->removed = 1;
1557        gmap_call_notifier(sg, 0, -1UL);
1558        gmap_flush_tlb(sg);
1559        table = (unsigned long *)(sg->asce & _ASCE_ORIGIN);
1560        switch (sg->asce & _ASCE_TYPE_MASK) {
1561        case _ASCE_TYPE_REGION1:
1562                __gmap_unshadow_r1t(sg, 0, table);
1563                break;
1564        case _ASCE_TYPE_REGION2:
1565                __gmap_unshadow_r2t(sg, 0, table);
1566                break;
1567        case _ASCE_TYPE_REGION3:
1568                __gmap_unshadow_r3t(sg, 0, table);
1569                break;
1570        case _ASCE_TYPE_SEGMENT:
1571                __gmap_unshadow_sgt(sg, 0, table);
1572                break;
1573        }
1574}
1575
1576/**
1577 * gmap_find_shadow - find a specific asce in the list of shadow tables
1578 * @parent: pointer to the parent gmap
1579 * @asce: ASCE for which the shadow table is created
1580 * @edat_level: edat level to be used for the shadow translation
1581 *
1582 * Returns the pointer to a gmap if a shadow table with the given asce is
1583 * already available, ERR_PTR(-EAGAIN) if another one is just being created,
1584 * otherwise NULL
1585 */
1586static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce,
1587                                     int edat_level)
1588{
1589        struct gmap *sg;
1590
1591        list_for_each_entry(sg, &parent->children, list) {
1592                if (sg->orig_asce != asce || sg->edat_level != edat_level ||
1593                    sg->removed)
1594                        continue;
1595                if (!sg->initialized)
1596                        return ERR_PTR(-EAGAIN);
1597                refcount_inc(&sg->ref_count);
1598                return sg;
1599        }
1600        return NULL;
1601}
1602
1603/**
1604 * gmap_shadow_valid - check if a shadow guest address space matches the
1605 *                     given properties and is still valid
1606 * @sg: pointer to the shadow guest address space structure
1607 * @asce: ASCE for which the shadow table is requested
1608 * @edat_level: edat level to be used for the shadow translation
1609 *
1610 * Returns 1 if the gmap shadow is still valid and matches the given
1611 * properties, the caller can continue using it. Returns 0 otherwise, the
1612 * caller has to request a new shadow gmap in this case.
1613 *
1614 */
1615int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level)
1616{
1617        if (sg->removed)
1618                return 0;
1619        return sg->orig_asce == asce && sg->edat_level == edat_level;
1620}
1621EXPORT_SYMBOL_GPL(gmap_shadow_valid);
1622
1623/**
1624 * gmap_shadow - create/find a shadow guest address space
1625 * @parent: pointer to the parent gmap
1626 * @asce: ASCE for which the shadow table is created
1627 * @edat_level: edat level to be used for the shadow translation
1628 *
1629 * The pages of the top level page table referred by the asce parameter
1630 * will be set to read-only and marked in the PGSTEs of the kvm process.
1631 * The shadow table will be removed automatically on any change to the
1632 * PTE mapping for the source table.
1633 *
1634 * Returns a guest address space structure, ERR_PTR(-ENOMEM) if out of memory,
1635 * ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the
1636 * parent gmap table could not be protected.
1637 */
1638struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
1639                         int edat_level)
1640{
1641        struct gmap *sg, *new;
1642        unsigned long limit;
1643        int rc;
1644
1645        BUG_ON(parent->mm->context.allow_gmap_hpage_1m);
1646        BUG_ON(gmap_is_shadow(parent));
1647        spin_lock(&parent->shadow_lock);
1648        sg = gmap_find_shadow(parent, asce, edat_level);
1649        spin_unlock(&parent->shadow_lock);
1650        if (sg)
1651                return sg;
1652        /* Create a new shadow gmap */
1653        limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11));
1654        if (asce & _ASCE_REAL_SPACE)
1655                limit = -1UL;
1656        new = gmap_alloc(limit);
1657        if (!new)
1658                return ERR_PTR(-ENOMEM);
1659        new->mm = parent->mm;
1660        new->parent = gmap_get(parent);
1661        new->orig_asce = asce;
1662        new->edat_level = edat_level;
1663        new->initialized = false;
1664        spin_lock(&parent->shadow_lock);
1665        /* Recheck if another CPU created the same shadow */
1666        sg = gmap_find_shadow(parent, asce, edat_level);
1667        if (sg) {
1668                spin_unlock(&parent->shadow_lock);
1669                gmap_free(new);
1670                return sg;
1671        }
1672        if (asce & _ASCE_REAL_SPACE) {
1673                /* only allow one real-space gmap shadow */
1674                list_for_each_entry(sg, &parent->children, list) {
1675                        if (sg->orig_asce & _ASCE_REAL_SPACE) {
1676                                spin_lock(&sg->guest_table_lock);
1677                                gmap_unshadow(sg);
1678                                spin_unlock(&sg->guest_table_lock);
1679                                list_del(&sg->list);
1680                                gmap_put(sg);
1681                                break;
1682                        }
1683                }
1684        }
1685        refcount_set(&new->ref_count, 2);
1686        list_add(&new->list, &parent->children);
1687        if (asce & _ASCE_REAL_SPACE) {
1688                /* nothing to protect, return right away */
1689                new->initialized = true;
1690                spin_unlock(&parent->shadow_lock);
1691                return new;
1692        }
1693        spin_unlock(&parent->shadow_lock);
1694        /* protect after insertion, so it will get properly invalidated */
1695        down_read(&parent->mm->mmap_sem);
1696        rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
1697                                ((asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE,
1698                                PROT_READ, GMAP_NOTIFY_SHADOW);
1699        up_read(&parent->mm->mmap_sem);
1700        spin_lock(&parent->shadow_lock);
1701        new->initialized = true;
1702        if (rc) {
1703                list_del(&new->list);
1704                gmap_free(new);
1705                new = ERR_PTR(rc);
1706        }
1707        spin_unlock(&parent->shadow_lock);
1708        return new;
1709}
1710EXPORT_SYMBOL_GPL(gmap_shadow);
1711
1712/**
1713 * gmap_shadow_r2t - create an empty shadow region 2 table
1714 * @sg: pointer to the shadow guest address space structure
1715 * @saddr: faulting address in the shadow gmap
1716 * @r2t: parent gmap address of the region 2 table to get shadowed
1717 * @fake: r2t references contiguous guest memory block, not a r2t
1718 *
1719 * The r2t parameter specifies the address of the source table. The
1720 * four pages of the source table are made read-only in the parent gmap
1721 * address space. A write to the source table area @r2t will automatically
1722 * remove the shadow r2 table and all of its decendents.
1723 *
1724 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1725 * shadow table structure is incomplete, -ENOMEM if out of memory and
1726 * -EFAULT if an address in the parent gmap could not be resolved.
1727 *
1728 * Called with sg->mm->mmap_sem in read.
1729 */
1730int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
1731                    int fake)
1732{
1733        unsigned long raddr, origin, offset, len;
1734        unsigned long *s_r2t, *table;
1735        struct page *page;
1736        int rc;
1737
1738        BUG_ON(!gmap_is_shadow(sg));
1739        /* Allocate a shadow region second table */
1740        page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
1741        if (!page)
1742                return -ENOMEM;
1743        page->index = r2t & _REGION_ENTRY_ORIGIN;
1744        if (fake)
1745                page->index |= GMAP_SHADOW_FAKE_TABLE;
1746        s_r2t = (unsigned long *) page_to_phys(page);
1747        /* Install shadow region second table */
1748        spin_lock(&sg->guest_table_lock);
1749        table = gmap_table_walk(sg, saddr, 4); /* get region-1 pointer */
1750        if (!table) {
1751                rc = -EAGAIN;           /* Race with unshadow */
1752                goto out_free;
1753        }
1754        if (!(*table & _REGION_ENTRY_INVALID)) {
1755                rc = 0;                 /* Already established */
1756                goto out_free;
1757        } else if (*table & _REGION_ENTRY_ORIGIN) {
1758                rc = -EAGAIN;           /* Race with shadow */
1759                goto out_free;
1760        }
1761        crst_table_init(s_r2t, _REGION2_ENTRY_EMPTY);
1762        /* mark as invalid as long as the parent table is not protected */
1763        *table = (unsigned long) s_r2t | _REGION_ENTRY_LENGTH |
1764                 _REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID;
1765        if (sg->edat_level >= 1)
1766                *table |= (r2t & _REGION_ENTRY_PROTECT);
1767        list_add(&page->lru, &sg->crst_list);
1768        if (fake) {
1769                /* nothing to protect for fake tables */
1770                *table &= ~_REGION_ENTRY_INVALID;
1771                spin_unlock(&sg->guest_table_lock);
1772                return 0;
1773        }
1774        spin_unlock(&sg->guest_table_lock);
1775        /* Make r2t read-only in parent gmap page table */
1776        raddr = (saddr & _REGION1_MASK) | _SHADOW_RMAP_REGION1;
1777        origin = r2t & _REGION_ENTRY_ORIGIN;
1778        offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
1779        len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
1780        rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
1781        spin_lock(&sg->guest_table_lock);
1782        if (!rc) {
1783                table = gmap_table_walk(sg, saddr, 4);
1784                if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1785                              (unsigned long) s_r2t)
1786                        rc = -EAGAIN;           /* Race with unshadow */
1787                else
1788                        *table &= ~_REGION_ENTRY_INVALID;
1789        } else {
1790                gmap_unshadow_r2t(sg, raddr);
1791        }
1792        spin_unlock(&sg->guest_table_lock);
1793        return rc;
1794out_free:
1795        spin_unlock(&sg->guest_table_lock);
1796        __free_pages(page, CRST_ALLOC_ORDER);
1797        return rc;
1798}
1799EXPORT_SYMBOL_GPL(gmap_shadow_r2t);
1800
1801/**
1802 * gmap_shadow_r3t - create a shadow region 3 table
1803 * @sg: pointer to the shadow guest address space structure
1804 * @saddr: faulting address in the shadow gmap
1805 * @r3t: parent gmap address of the region 3 table to get shadowed
1806 * @fake: r3t references contiguous guest memory block, not a r3t
1807 *
1808 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1809 * shadow table structure is incomplete, -ENOMEM if out of memory and
1810 * -EFAULT if an address in the parent gmap could not be resolved.
1811 *
1812 * Called with sg->mm->mmap_sem in read.
1813 */
1814int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
1815                    int fake)
1816{
1817        unsigned long raddr, origin, offset, len;
1818        unsigned long *s_r3t, *table;
1819        struct page *page;
1820        int rc;
1821
1822        BUG_ON(!gmap_is_shadow(sg));
1823        /* Allocate a shadow region second table */
1824        page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
1825        if (!page)
1826                return -ENOMEM;
1827        page->index = r3t & _REGION_ENTRY_ORIGIN;
1828        if (fake)
1829                page->index |= GMAP_SHADOW_FAKE_TABLE;
1830        s_r3t = (unsigned long *) page_to_phys(page);
1831        /* Install shadow region second table */
1832        spin_lock(&sg->guest_table_lock);
1833        table = gmap_table_walk(sg, saddr, 3); /* get region-2 pointer */
1834        if (!table) {
1835                rc = -EAGAIN;           /* Race with unshadow */
1836                goto out_free;
1837        }
1838        if (!(*table & _REGION_ENTRY_INVALID)) {
1839                rc = 0;                 /* Already established */
1840                goto out_free;
1841        } else if (*table & _REGION_ENTRY_ORIGIN) {
1842                rc = -EAGAIN;           /* Race with shadow */
1843        }
1844        crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY);
1845        /* mark as invalid as long as the parent table is not protected */
1846        *table = (unsigned long) s_r3t | _REGION_ENTRY_LENGTH |
1847                 _REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID;
1848        if (sg->edat_level >= 1)
1849                *table |= (r3t & _REGION_ENTRY_PROTECT);
1850        list_add(&page->lru, &sg->crst_list);
1851        if (fake) {
1852                /* nothing to protect for fake tables */
1853                *table &= ~_REGION_ENTRY_INVALID;
1854                spin_unlock(&sg->guest_table_lock);
1855                return 0;
1856        }
1857        spin_unlock(&sg->guest_table_lock);
1858        /* Make r3t read-only in parent gmap page table */
1859        raddr = (saddr & _REGION2_MASK) | _SHADOW_RMAP_REGION2;
1860        origin = r3t & _REGION_ENTRY_ORIGIN;
1861        offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
1862        len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
1863        rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
1864        spin_lock(&sg->guest_table_lock);
1865        if (!rc) {
1866                table = gmap_table_walk(sg, saddr, 3);
1867                if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1868                              (unsigned long) s_r3t)
1869                        rc = -EAGAIN;           /* Race with unshadow */
1870                else
1871                        *table &= ~_REGION_ENTRY_INVALID;
1872        } else {
1873                gmap_unshadow_r3t(sg, raddr);
1874        }
1875        spin_unlock(&sg->guest_table_lock);
1876        return rc;
1877out_free:
1878        spin_unlock(&sg->guest_table_lock);
1879        __free_pages(page, CRST_ALLOC_ORDER);
1880        return rc;
1881}
1882EXPORT_SYMBOL_GPL(gmap_shadow_r3t);
1883
1884/**
1885 * gmap_shadow_sgt - create a shadow segment table
1886 * @sg: pointer to the shadow guest address space structure
1887 * @saddr: faulting address in the shadow gmap
1888 * @sgt: parent gmap address of the segment table to get shadowed
1889 * @fake: sgt references contiguous guest memory block, not a sgt
1890 *
1891 * Returns: 0 if successfully shadowed or already shadowed, -EAGAIN if the
1892 * shadow table structure is incomplete, -ENOMEM if out of memory and
1893 * -EFAULT if an address in the parent gmap could not be resolved.
1894 *
1895 * Called with sg->mm->mmap_sem in read.
1896 */
1897int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
1898                    int fake)
1899{
1900        unsigned long raddr, origin, offset, len;
1901        unsigned long *s_sgt, *table;
1902        struct page *page;
1903        int rc;
1904
1905        BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
1906        /* Allocate a shadow segment table */
1907        page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
1908        if (!page)
1909                return -ENOMEM;
1910        page->index = sgt & _REGION_ENTRY_ORIGIN;
1911        if (fake)
1912                page->index |= GMAP_SHADOW_FAKE_TABLE;
1913        s_sgt = (unsigned long *) page_to_phys(page);
1914        /* Install shadow region second table */
1915        spin_lock(&sg->guest_table_lock);
1916        table = gmap_table_walk(sg, saddr, 2); /* get region-3 pointer */
1917        if (!table) {
1918                rc = -EAGAIN;           /* Race with unshadow */
1919                goto out_free;
1920        }
1921        if (!(*table & _REGION_ENTRY_INVALID)) {
1922                rc = 0;                 /* Already established */
1923                goto out_free;
1924        } else if (*table & _REGION_ENTRY_ORIGIN) {
1925                rc = -EAGAIN;           /* Race with shadow */
1926                goto out_free;
1927        }
1928        crst_table_init(s_sgt, _SEGMENT_ENTRY_EMPTY);
1929        /* mark as invalid as long as the parent table is not protected */
1930        *table = (unsigned long) s_sgt | _REGION_ENTRY_LENGTH |
1931                 _REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID;
1932        if (sg->edat_level >= 1)
1933                *table |= sgt & _REGION_ENTRY_PROTECT;
1934        list_add(&page->lru, &sg->crst_list);
1935        if (fake) {
1936                /* nothing to protect for fake tables */
1937                *table &= ~_REGION_ENTRY_INVALID;
1938                spin_unlock(&sg->guest_table_lock);
1939                return 0;
1940        }
1941        spin_unlock(&sg->guest_table_lock);
1942        /* Make sgt read-only in parent gmap page table */
1943        raddr = (saddr & _REGION3_MASK) | _SHADOW_RMAP_REGION3;
1944        origin = sgt & _REGION_ENTRY_ORIGIN;
1945        offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
1946        len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
1947        rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
1948        spin_lock(&sg->guest_table_lock);
1949        if (!rc) {
1950                table = gmap_table_walk(sg, saddr, 2);
1951                if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1952                              (unsigned long) s_sgt)
1953                        rc = -EAGAIN;           /* Race with unshadow */
1954                else
1955                        *table &= ~_REGION_ENTRY_INVALID;
1956        } else {
1957                gmap_unshadow_sgt(sg, raddr);
1958        }
1959        spin_unlock(&sg->guest_table_lock);
1960        return rc;
1961out_free:
1962        spin_unlock(&sg->guest_table_lock);
1963        __free_pages(page, CRST_ALLOC_ORDER);
1964        return rc;
1965}
1966EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
1967
1968/**
1969 * gmap_shadow_lookup_pgtable - find a shadow page table
1970 * @sg: pointer to the shadow guest address space structure
1971 * @saddr: the address in the shadow aguest address space
1972 * @pgt: parent gmap address of the page table to get shadowed
1973 * @dat_protection: if the pgtable is marked as protected by dat
1974 * @fake: pgt references contiguous guest memory block, not a pgtable
1975 *
1976 * Returns 0 if the shadow page table was found and -EAGAIN if the page
1977 * table was not found.
1978 *
1979 * Called with sg->mm->mmap_sem in read.
1980 */
1981int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
1982                           unsigned long *pgt, int *dat_protection,
1983                           int *fake)
1984{
1985        unsigned long *table;
1986        struct page *page;
1987        int rc;
1988
1989        BUG_ON(!gmap_is_shadow(sg));
1990        spin_lock(&sg->guest_table_lock);
1991        table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
1992        if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
1993                /* Shadow page tables are full pages (pte+pgste) */
1994                page = pfn_to_page(*table >> PAGE_SHIFT);
1995                *pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
1996                *dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
1997                *fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
1998                rc = 0;
1999        } else  {
2000                rc = -EAGAIN;
2001        }
2002        spin_unlock(&sg->guest_table_lock);
2003        return rc;
2004
2005}
2006EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
2007
2008/**
2009 * gmap_shadow_pgt - instantiate a shadow page table
2010 * @sg: pointer to the shadow guest address space structure
2011 * @saddr: faulting address in the shadow gmap
2012 * @pgt: parent gmap address of the page table to get shadowed
2013 * @fake: pgt references contiguous guest memory block, not a pgtable
2014 *
2015 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
2016 * shadow table structure is incomplete, -ENOMEM if out of memory,
2017 * -EFAULT if an address in the parent gmap could not be resolved and
2018 *
2019 * Called with gmap->mm->mmap_sem in read
2020 */
2021int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
2022                    int fake)
2023{
2024        unsigned long raddr, origin;
2025        unsigned long *s_pgt, *table;
2026        struct page *page;
2027        int rc;
2028
2029        BUG_ON(!gmap_is_shadow(sg) || (pgt & _SEGMENT_ENTRY_LARGE));
2030        /* Allocate a shadow page table */
2031        page = page_table_alloc_pgste(sg->mm);
2032        if (!page)
2033                return -ENOMEM;
2034        page->index = pgt & _SEGMENT_ENTRY_ORIGIN;
2035        if (fake)
2036                page->index |= GMAP_SHADOW_FAKE_TABLE;
2037        s_pgt = (unsigned long *) page_to_phys(page);
2038        /* Install shadow page table */
2039        spin_lock(&sg->guest_table_lock);
2040        table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
2041        if (!table) {
2042                rc = -EAGAIN;           /* Race with unshadow */
2043                goto out_free;
2044        }
2045        if (!(*table & _SEGMENT_ENTRY_INVALID)) {
2046                rc = 0;                 /* Already established */
2047                goto out_free;
2048        } else if (*table & _SEGMENT_ENTRY_ORIGIN) {
2049                rc = -EAGAIN;           /* Race with shadow */
2050                goto out_free;
2051        }
2052        /* mark as invalid as long as the parent table is not protected */
2053        *table = (unsigned long) s_pgt | _SEGMENT_ENTRY |
2054                 (pgt & _SEGMENT_ENTRY_PROTECT) | _SEGMENT_ENTRY_INVALID;
2055        list_add(&page->lru, &sg->pt_list);
2056        if (fake) {
2057                /* nothing to protect for fake tables */
2058                *table &= ~_SEGMENT_ENTRY_INVALID;
2059                spin_unlock(&sg->guest_table_lock);
2060                return 0;
2061        }
2062        spin_unlock(&sg->guest_table_lock);
2063        /* Make pgt read-only in parent gmap page table (not the pgste) */
2064        raddr = (saddr & _SEGMENT_MASK) | _SHADOW_RMAP_SEGMENT;
2065        origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK;
2066        rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE);
2067        spin_lock(&sg->guest_table_lock);
2068        if (!rc) {
2069                table = gmap_table_walk(sg, saddr, 1);
2070                if (!table || (*table & _SEGMENT_ENTRY_ORIGIN) !=
2071                              (unsigned long) s_pgt)
2072                        rc = -EAGAIN;           /* Race with unshadow */
2073                else
2074                        *table &= ~_SEGMENT_ENTRY_INVALID;
2075        } else {
2076                gmap_unshadow_pgt(sg, raddr);
2077        }
2078        spin_unlock(&sg->guest_table_lock);
2079        return rc;
2080out_free:
2081        spin_unlock(&sg->guest_table_lock);
2082        page_table_free_pgste(page);
2083        return rc;
2084
2085}
2086EXPORT_SYMBOL_GPL(gmap_shadow_pgt);
2087
2088/**
2089 * gmap_shadow_page - create a shadow page mapping
2090 * @sg: pointer to the shadow guest address space structure
2091 * @saddr: faulting address in the shadow gmap
2092 * @pte: pte in parent gmap address space to get shadowed
2093 *
2094 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
2095 * shadow table structure is incomplete, -ENOMEM if out of memory and
2096 * -EFAULT if an address in the parent gmap could not be resolved.
2097 *
2098 * Called with sg->mm->mmap_sem in read.
2099 */
2100int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
2101{
2102        struct gmap *parent;
2103        struct gmap_rmap *rmap;
2104        unsigned long vmaddr, paddr;
2105        spinlock_t *ptl;
2106        pte_t *sptep, *tptep;
2107        int prot;
2108        int rc;
2109
2110        BUG_ON(!gmap_is_shadow(sg));
2111        parent = sg->parent;
2112        prot = (pte_val(pte) & _PAGE_PROTECT) ? PROT_READ : PROT_WRITE;
2113
2114        rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
2115        if (!rmap)
2116                return -ENOMEM;
2117        rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE;
2118
2119        while (1) {
2120                paddr = pte_val(pte) & PAGE_MASK;
2121                vmaddr = __gmap_translate(parent, paddr);
2122                if (IS_ERR_VALUE(vmaddr)) {
2123                        rc = vmaddr;
2124                        break;
2125                }
2126                rc = radix_tree_preload(GFP_KERNEL);
2127                if (rc)
2128                        break;
2129                rc = -EAGAIN;
2130                sptep = gmap_pte_op_walk(parent, paddr, &ptl);
2131                if (sptep) {
2132                        spin_lock(&sg->guest_table_lock);
2133                        /* Get page table pointer */
2134                        tptep = (pte_t *) gmap_table_walk(sg, saddr, 0);
2135                        if (!tptep) {
2136                                spin_unlock(&sg->guest_table_lock);
2137                                gmap_pte_op_end(ptl);
2138                                radix_tree_preload_end();
2139                                break;
2140                        }
2141                        rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte);
2142                        if (rc > 0) {
2143                                /* Success and a new mapping */
2144                                gmap_insert_rmap(sg, vmaddr, rmap);
2145                                rmap = NULL;
2146                                rc = 0;
2147                        }
2148                        gmap_pte_op_end(ptl);
2149                        spin_unlock(&sg->guest_table_lock);
2150                }
2151                radix_tree_preload_end();
2152                if (!rc)
2153                        break;
2154                rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
2155                if (rc)
2156                        break;
2157        }
2158        kfree(rmap);
2159        return rc;
2160}
2161EXPORT_SYMBOL_GPL(gmap_shadow_page);
2162
2163/**
2164 * gmap_shadow_notify - handle notifications for shadow gmap
2165 *
2166 * Called with sg->parent->shadow_lock.
2167 */
2168static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
2169                               unsigned long gaddr)
2170{
2171        struct gmap_rmap *rmap, *rnext, *head;
2172        unsigned long start, end, bits, raddr;
2173
2174        BUG_ON(!gmap_is_shadow(sg));
2175
2176        spin_lock(&sg->guest_table_lock);
2177        if (sg->removed) {
2178                spin_unlock(&sg->guest_table_lock);
2179                return;
2180        }
2181        /* Check for top level table */
2182        start = sg->orig_asce & _ASCE_ORIGIN;
2183        end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE;
2184        if (!(sg->orig_asce & _ASCE_REAL_SPACE) && gaddr >= start &&
2185            gaddr < end) {
2186                /* The complete shadow table has to go */
2187                gmap_unshadow(sg);
2188                spin_unlock(&sg->guest_table_lock);
2189                list_del(&sg->list);
2190                gmap_put(sg);
2191                return;
2192        }
2193        /* Remove the page table tree from on specific entry */
2194        head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
2195        gmap_for_each_rmap_safe(rmap, rnext, head) {
2196                bits = rmap->raddr & _SHADOW_RMAP_MASK;
2197                raddr = rmap->raddr ^ bits;
2198                switch (bits) {
2199                case _SHADOW_RMAP_REGION1:
2200                        gmap_unshadow_r2t(sg, raddr);
2201                        break;
2202                case _SHADOW_RMAP_REGION2:
2203                        gmap_unshadow_r3t(sg, raddr);
2204                        break;
2205                case _SHADOW_RMAP_REGION3:
2206                        gmap_unshadow_sgt(sg, raddr);
2207                        break;
2208                case _SHADOW_RMAP_SEGMENT:
2209                        gmap_unshadow_pgt(sg, raddr);
2210                        break;
2211                case _SHADOW_RMAP_PGTABLE:
2212                        gmap_unshadow_page(sg, raddr);
2213                        break;
2214                }
2215                kfree(rmap);
2216        }
2217        spin_unlock(&sg->guest_table_lock);
2218}
2219
2220/**
2221 * ptep_notify - call all invalidation callbacks for a specific pte.
2222 * @mm: pointer to the process mm_struct
2223 * @addr: virtual address in the process address space
2224 * @pte: pointer to the page table entry
2225 * @bits: bits from the pgste that caused the notify call
2226 *
2227 * This function is assumed to be called with the page table lock held
2228 * for the pte to notify.
2229 */
2230void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
2231                 pte_t *pte, unsigned long bits)
2232{
2233        unsigned long offset, gaddr = 0;
2234        unsigned long *table;
2235        struct gmap *gmap, *sg, *next;
2236
2237        offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
2238        offset = offset * (PAGE_SIZE / sizeof(pte_t));
2239        rcu_read_lock();
2240        list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2241                spin_lock(&gmap->guest_table_lock);
2242                table = radix_tree_lookup(&gmap->host_to_guest,
2243                                          vmaddr >> PMD_SHIFT);
2244                if (table)
2245                        gaddr = __gmap_segment_gaddr(table) + offset;
2246                spin_unlock(&gmap->guest_table_lock);
2247                if (!table)
2248                        continue;
2249
2250                if (!list_empty(&gmap->children) && (bits & PGSTE_VSIE_BIT)) {
2251                        spin_lock(&gmap->shadow_lock);
2252                        list_for_each_entry_safe(sg, next,
2253                                                 &gmap->children, list)
2254                                gmap_shadow_notify(sg, vmaddr, gaddr);
2255                        spin_unlock(&gmap->shadow_lock);
2256                }
2257                if (bits & PGSTE_IN_BIT)
2258                        gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1);
2259        }
2260        rcu_read_unlock();
2261}
2262EXPORT_SYMBOL_GPL(ptep_notify);
2263
2264static void pmdp_notify_gmap(struct gmap *gmap, pmd_t *pmdp,
2265                             unsigned long gaddr)
2266{
2267        pmd_val(*pmdp) &= ~_SEGMENT_ENTRY_GMAP_IN;
2268        gmap_call_notifier(gmap, gaddr, gaddr + HPAGE_SIZE - 1);
2269}
2270
2271/**
2272 * gmap_pmdp_xchg - exchange a gmap pmd with another
2273 * @gmap: pointer to the guest address space structure
2274 * @pmdp: pointer to the pmd entry
2275 * @new: replacement entry
2276 * @gaddr: the affected guest address
2277 *
2278 * This function is assumed to be called with the guest_table_lock
2279 * held.
2280 */
2281static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *pmdp, pmd_t new,
2282                           unsigned long gaddr)
2283{
2284        gaddr &= HPAGE_MASK;
2285        pmdp_notify_gmap(gmap, pmdp, gaddr);
2286        pmd_val(new) &= ~_SEGMENT_ENTRY_GMAP_IN;
2287        if (MACHINE_HAS_TLB_GUEST)
2288                __pmdp_idte(gaddr, (pmd_t *)pmdp, IDTE_GUEST_ASCE, gmap->asce,
2289                            IDTE_GLOBAL);
2290        else if (MACHINE_HAS_IDTE)
2291                __pmdp_idte(gaddr, (pmd_t *)pmdp, 0, 0, IDTE_GLOBAL);
2292        else
2293                __pmdp_csp(pmdp);
2294        *pmdp = new;
2295}
2296
2297static void gmap_pmdp_clear(struct mm_struct *mm, unsigned long vmaddr,
2298                            int purge)
2299{
2300        pmd_t *pmdp;
2301        struct gmap *gmap;
2302        unsigned long gaddr;
2303
2304        rcu_read_lock();
2305        list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2306                spin_lock(&gmap->guest_table_lock);
2307                pmdp = (pmd_t *)radix_tree_delete(&gmap->host_to_guest,
2308                                                  vmaddr >> PMD_SHIFT);
2309                if (pmdp) {
2310                        gaddr = __gmap_segment_gaddr((unsigned long *)pmdp);
2311                        pmdp_notify_gmap(gmap, pmdp, gaddr);
2312                        WARN_ON(pmd_val(*pmdp) & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
2313                                                   _SEGMENT_ENTRY_GMAP_UC));
2314                        if (purge)
2315                                __pmdp_csp(pmdp);
2316                        pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
2317                }
2318                spin_unlock(&gmap->guest_table_lock);
2319        }
2320        rcu_read_unlock();
2321}
2322
2323/**
2324 * gmap_pmdp_invalidate - invalidate all affected guest pmd entries without
2325 *                        flushing
2326 * @mm: pointer to the process mm_struct
2327 * @vmaddr: virtual address in the process address space
2328 */
2329void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr)
2330{
2331        gmap_pmdp_clear(mm, vmaddr, 0);
2332}
2333EXPORT_SYMBOL_GPL(gmap_pmdp_invalidate);
2334
2335/**
2336 * gmap_pmdp_csp - csp all affected guest pmd entries
2337 * @mm: pointer to the process mm_struct
2338 * @vmaddr: virtual address in the process address space
2339 */
2340void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr)
2341{
2342        gmap_pmdp_clear(mm, vmaddr, 1);
2343}
2344EXPORT_SYMBOL_GPL(gmap_pmdp_csp);
2345
2346/**
2347 * gmap_pmdp_idte_local - invalidate and clear a guest pmd entry
2348 * @mm: pointer to the process mm_struct
2349 * @vmaddr: virtual address in the process address space
2350 */
2351void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr)
2352{
2353        unsigned long *entry, gaddr;
2354        struct gmap *gmap;
2355        pmd_t *pmdp;
2356
2357        rcu_read_lock();
2358        list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2359                spin_lock(&gmap->guest_table_lock);
2360                entry = radix_tree_delete(&gmap->host_to_guest,
2361                                          vmaddr >> PMD_SHIFT);
2362                if (entry) {
2363                        pmdp = (pmd_t *)entry;
2364                        gaddr = __gmap_segment_gaddr(entry);
2365                        pmdp_notify_gmap(gmap, pmdp, gaddr);
2366                        WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
2367                                           _SEGMENT_ENTRY_GMAP_UC));
2368                        if (MACHINE_HAS_TLB_GUEST)
2369                                __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
2370                                            gmap->asce, IDTE_LOCAL);
2371                        else if (MACHINE_HAS_IDTE)
2372                                __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_LOCAL);
2373                        *entry = _SEGMENT_ENTRY_EMPTY;
2374                }
2375                spin_unlock(&gmap->guest_table_lock);
2376        }
2377        rcu_read_unlock();
2378}
2379EXPORT_SYMBOL_GPL(gmap_pmdp_idte_local);
2380
2381/**
2382 * gmap_pmdp_idte_global - invalidate and clear a guest pmd entry
2383 * @mm: pointer to the process mm_struct
2384 * @vmaddr: virtual address in the process address space
2385 */
2386void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr)
2387{
2388        unsigned long *entry, gaddr;
2389        struct gmap *gmap;
2390        pmd_t *pmdp;
2391
2392        rcu_read_lock();
2393        list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2394                spin_lock(&gmap->guest_table_lock);
2395                entry = radix_tree_delete(&gmap->host_to_guest,
2396                                          vmaddr >> PMD_SHIFT);
2397                if (entry) {
2398                        pmdp = (pmd_t *)entry;
2399                        gaddr = __gmap_segment_gaddr(entry);
2400                        pmdp_notify_gmap(gmap, pmdp, gaddr);
2401                        WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
2402                                           _SEGMENT_ENTRY_GMAP_UC));
2403                        if (MACHINE_HAS_TLB_GUEST)
2404                                __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
2405                                            gmap->asce, IDTE_GLOBAL);
2406                        else if (MACHINE_HAS_IDTE)
2407                                __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_GLOBAL);
2408                        else
2409                                __pmdp_csp(pmdp);
2410                        *entry = _SEGMENT_ENTRY_EMPTY;
2411                }
2412                spin_unlock(&gmap->guest_table_lock);
2413        }
2414        rcu_read_unlock();
2415}
2416EXPORT_SYMBOL_GPL(gmap_pmdp_idte_global);
2417
2418/**
2419 * gmap_test_and_clear_dirty_pmd - test and reset segment dirty status
2420 * @gmap: pointer to guest address space
2421 * @pmdp: pointer to the pmd to be tested
2422 * @gaddr: virtual address in the guest address space
2423 *
2424 * This function is assumed to be called with the guest_table_lock
2425 * held.
2426 */
2427static bool gmap_test_and_clear_dirty_pmd(struct gmap *gmap, pmd_t *pmdp,
2428                                          unsigned long gaddr)
2429{
2430        if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
2431                return false;
2432
2433        /* Already protected memory, which did not change is clean */
2434        if (pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT &&
2435            !(pmd_val(*pmdp) & _SEGMENT_ENTRY_GMAP_UC))
2436                return false;
2437
2438        /* Clear UC indication and reset protection */
2439        pmd_val(*pmdp) &= ~_SEGMENT_ENTRY_GMAP_UC;
2440        gmap_protect_pmd(gmap, gaddr, pmdp, PROT_READ, 0);
2441        return true;
2442}
2443
2444/**
2445 * gmap_sync_dirty_log_pmd - set bitmap based on dirty status of segment
2446 * @gmap: pointer to guest address space
2447 * @bitmap: dirty bitmap for this pmd
2448 * @gaddr: virtual address in the guest address space
2449 * @vmaddr: virtual address in the host address space
2450 *
2451 * This function is assumed to be called with the guest_table_lock
2452 * held.
2453 */
2454void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4],
2455                             unsigned long gaddr, unsigned long vmaddr)
2456{
2457        int i;
2458        pmd_t *pmdp;
2459        pte_t *ptep;
2460        spinlock_t *ptl;
2461
2462        pmdp = gmap_pmd_op_walk(gmap, gaddr);
2463        if (!pmdp)
2464                return;
2465
2466        if (pmd_large(*pmdp)) {
2467                if (gmap_test_and_clear_dirty_pmd(gmap, pmdp, gaddr))
2468                        bitmap_fill(bitmap, _PAGE_ENTRIES);
2469        } else {
2470                for (i = 0; i < _PAGE_ENTRIES; i++, vmaddr += PAGE_SIZE) {
2471                        ptep = pte_alloc_map_lock(gmap->mm, pmdp, vmaddr, &ptl);
2472                        if (!ptep)
2473                                continue;
2474                        if (ptep_test_and_clear_uc(gmap->mm, vmaddr, ptep))
2475                                set_bit(i, bitmap);
2476                        spin_unlock(ptl);
2477                }
2478        }
2479        gmap_pmd_op_end(gmap, pmdp);
2480}
2481EXPORT_SYMBOL_GPL(gmap_sync_dirty_log_pmd);
2482
2483static inline void thp_split_mm(struct mm_struct *mm)
2484{
2485#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2486        struct vm_area_struct *vma;
2487        unsigned long addr;
2488
2489        for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
2490                for (addr = vma->vm_start;
2491                     addr < vma->vm_end;
2492                     addr += PAGE_SIZE)
2493                        follow_page(vma, addr, FOLL_SPLIT);
2494                vma->vm_flags &= ~VM_HUGEPAGE;
2495                vma->vm_flags |= VM_NOHUGEPAGE;
2496        }
2497        mm->def_flags |= VM_NOHUGEPAGE;
2498#endif
2499}
2500
2501/*
2502 * Remove all empty zero pages from the mapping for lazy refaulting
2503 * - This must be called after mm->context.has_pgste is set, to avoid
2504 *   future creation of zero pages
2505 * - This must be called after THP was enabled
2506 */
2507static int __zap_zero_pages(pmd_t *pmd, unsigned long start,
2508                           unsigned long end, struct mm_walk *walk)
2509{
2510        unsigned long addr;
2511
2512        for (addr = start; addr != end; addr += PAGE_SIZE) {
2513                pte_t *ptep;
2514                spinlock_t *ptl;
2515
2516                ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
2517                if (is_zero_pfn(pte_pfn(*ptep)))
2518                        ptep_xchg_direct(walk->mm, addr, ptep, __pte(_PAGE_INVALID));
2519                pte_unmap_unlock(ptep, ptl);
2520        }
2521        return 0;
2522}
2523
2524static const struct mm_walk_ops zap_zero_walk_ops = {
2525        .pmd_entry      = __zap_zero_pages,
2526};
2527
2528/*
2529 * switch on pgstes for its userspace process (for kvm)
2530 */
2531int s390_enable_sie(void)
2532{
2533        struct mm_struct *mm = current->mm;
2534
2535        /* Do we have pgstes? if yes, we are done */
2536        if (mm_has_pgste(mm))
2537                return 0;
2538        /* Fail if the page tables are 2K */
2539        if (!mm_alloc_pgste(mm))
2540                return -EINVAL;
2541        down_write(&mm->mmap_sem);
2542        mm->context.has_pgste = 1;
2543        /* split thp mappings and disable thp for future mappings */
2544        thp_split_mm(mm);
2545        walk_page_range(mm, 0, TASK_SIZE, &zap_zero_walk_ops, NULL);
2546        up_write(&mm->mmap_sem);
2547        return 0;
2548}
2549EXPORT_SYMBOL_GPL(s390_enable_sie);
2550
2551/*
2552 * Enable storage key handling from now on and initialize the storage
2553 * keys with the default key.
2554 */
2555static int __s390_enable_skey_pte(pte_t *pte, unsigned long addr,
2556                                  unsigned long next, struct mm_walk *walk)
2557{
2558        /* Clear storage key */
2559        ptep_zap_key(walk->mm, addr, pte);
2560        return 0;
2561}
2562
2563static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
2564                                      unsigned long hmask, unsigned long next,
2565                                      struct mm_walk *walk)
2566{
2567        pmd_t *pmd = (pmd_t *)pte;
2568        unsigned long start, end;
2569        struct page *page = pmd_page(*pmd);
2570
2571        /*
2572         * The write check makes sure we do not set a key on shared
2573         * memory. This is needed as the walker does not differentiate
2574         * between actual guest memory and the process executable or
2575         * shared libraries.
2576         */
2577        if (pmd_val(*pmd) & _SEGMENT_ENTRY_INVALID ||
2578            !(pmd_val(*pmd) & _SEGMENT_ENTRY_WRITE))
2579                return 0;
2580
2581        start = pmd_val(*pmd) & HPAGE_MASK;
2582        end = start + HPAGE_SIZE - 1;
2583        __storage_key_init_range(start, end);
2584        set_bit(PG_arch_1, &page->flags);
2585        return 0;
2586}
2587
2588static const struct mm_walk_ops enable_skey_walk_ops = {
2589        .hugetlb_entry          = __s390_enable_skey_hugetlb,
2590        .pte_entry              = __s390_enable_skey_pte,
2591};
2592
2593int s390_enable_skey(void)
2594{
2595        struct mm_struct *mm = current->mm;
2596        struct vm_area_struct *vma;
2597        int rc = 0;
2598
2599        down_write(&mm->mmap_sem);
2600        if (mm_uses_skeys(mm))
2601                goto out_up;
2602
2603        mm->context.uses_skeys = 1;
2604        for (vma = mm->mmap; vma; vma = vma->vm_next) {
2605                if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
2606                                MADV_UNMERGEABLE, &vma->vm_flags)) {
2607                        mm->context.uses_skeys = 0;
2608                        rc = -ENOMEM;
2609                        goto out_up;
2610                }
2611        }
2612        mm->def_flags &= ~VM_MERGEABLE;
2613
2614        walk_page_range(mm, 0, TASK_SIZE, &enable_skey_walk_ops, NULL);
2615
2616out_up:
2617        up_write(&mm->mmap_sem);
2618        return rc;
2619}
2620EXPORT_SYMBOL_GPL(s390_enable_skey);
2621
2622/*
2623 * Reset CMMA state, make all pages stable again.
2624 */
2625static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
2626                             unsigned long next, struct mm_walk *walk)
2627{
2628        ptep_zap_unused(walk->mm, addr, pte, 1);
2629        return 0;
2630}
2631
2632static const struct mm_walk_ops reset_cmma_walk_ops = {
2633        .pte_entry              = __s390_reset_cmma,
2634};
2635
2636void s390_reset_cmma(struct mm_struct *mm)
2637{
2638        down_write(&mm->mmap_sem);
2639        walk_page_range(mm, 0, TASK_SIZE, &reset_cmma_walk_ops, NULL);
2640        up_write(&mm->mmap_sem);
2641}
2642EXPORT_SYMBOL_GPL(s390_reset_cmma);
2643