linux/arch/powerpc/mm/hugetlbpage-hash64.c
<<
>>
Prefs
   1/*
   2 * PPC64 Huge TLB Page Support for hash based MMUs (POWER4 and later)
   3 *
   4 * Copyright (C) 2003 David Gibson, IBM Corporation.
   5 *
   6 * Based on the IA-32 version:
   7 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
   8 */
   9
  10#include <linux/mm.h>
  11#include <linux/hugetlb.h>
  12#include <asm/pgtable.h>
  13#include <asm/pgalloc.h>
  14#include <asm/cacheflush.h>
  15#include <asm/machdep.h>
  16
  17extern long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
  18                                  unsigned long pa, unsigned long rlags,
  19                                  unsigned long vflags, int psize, int ssize);
  20
  21int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
  22                     pte_t *ptep, unsigned long trap, int local, int ssize,
  23                     unsigned int shift, unsigned int mmu_psize)
  24{
  25        unsigned long vpn;
  26        unsigned long old_pte, new_pte;
  27        unsigned long rflags, pa, sz;
  28        long slot;
  29
  30        BUG_ON(shift != mmu_psize_defs[mmu_psize].shift);
  31
  32        /* Search the Linux page table for a match with va */
  33        vpn = hpt_vpn(ea, vsid, ssize);
  34
  35        /* At this point, we have a pte (old_pte) which can be used to build
  36         * or update an HPTE. There are 2 cases:
  37         *
  38         * 1. There is a valid (present) pte with no associated HPTE (this is
  39         *      the most common case)
  40         * 2. There is a valid (present) pte with an associated HPTE. The
  41         *      current values of the pp bits in the HPTE prevent access
  42         *      because we are doing software DIRTY bit management and the
  43         *      page is currently not DIRTY.
  44         */
  45
  46
  47        do {
  48                old_pte = pte_val(*ptep);
  49                /* If PTE busy, retry the access */
  50                if (unlikely(old_pte & _PAGE_BUSY))
  51                        return 0;
  52                /* If PTE permissions don't match, take page fault */
  53                if (unlikely(access & ~old_pte))
  54                        return 1;
  55                /* Try to lock the PTE, add ACCESSED and DIRTY if it was
  56                 * a write access */
  57                new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
  58                if (access & _PAGE_RW)
  59                        new_pte |= _PAGE_DIRTY;
  60        } while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
  61                                         old_pte, new_pte));
  62
  63        rflags = 0x2 | (!(new_pte & _PAGE_RW));
  64        /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
  65        rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
  66        sz = ((1UL) << shift);
  67        if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
  68                /* No CPU has hugepages but lacks no execute, so we
  69                 * don't need to worry about that case */
  70                rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
  71
  72        /* Check if pte already has an hpte (case 2) */
  73        if (unlikely(old_pte & _PAGE_HASHPTE)) {
  74                /* There MIGHT be an HPTE for this pte */
  75                unsigned long hash, slot;
  76
  77                hash = hpt_hash(vpn, shift, ssize);
  78                if (old_pte & _PAGE_F_SECOND)
  79                        hash = ~hash;
  80                slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
  81                slot += (old_pte & _PAGE_F_GIX) >> 12;
  82
  83                if (ppc_md.hpte_updatepp(slot, rflags, vpn, mmu_psize,
  84                                         mmu_psize, ssize, local) == -1)
  85                        old_pte &= ~_PAGE_HPTEFLAGS;
  86        }
  87
  88        if (likely(!(old_pte & _PAGE_HASHPTE))) {
  89                unsigned long hash = hpt_hash(vpn, shift, ssize);
  90
  91                pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
  92
  93                /* clear HPTE slot informations in new PTE */
  94#ifdef CONFIG_PPC_64K_PAGES
  95                new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HPTE_SUB0;
  96#else
  97                new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
  98#endif
  99                /* Add in WIMG bits */
 100                rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
 101                                      _PAGE_COHERENT | _PAGE_GUARDED));
 102                /*
 103                 * enable the memory coherence always
 104                 */
 105                rflags |= HPTE_R_M;
 106
 107                slot = hpte_insert_repeating(hash, vpn, pa, rflags, 0,
 108                                             mmu_psize, ssize);
 109
 110                /*
 111                 * Hypervisor failure. Restore old pte and return -1
 112                 * similar to __hash_page_*
 113                 */
 114                if (unlikely(slot == -2)) {
 115                        *ptep = __pte(old_pte);
 116                        hash_failure_debug(ea, access, vsid, trap, ssize,
 117                                           mmu_psize, mmu_psize, old_pte);
 118                        return -1;
 119                }
 120
 121                new_pte |= (slot << 12) & (_PAGE_F_SECOND | _PAGE_F_GIX);
 122        }
 123
 124        /*
 125         * No need to use ldarx/stdcx here
 126         */
 127        *ptep = __pte(new_pte & ~_PAGE_BUSY);
 128        return 0;
 129}
 130