linux/arch/powerpc/mm/hugepage-hash64.c
<<
>>
Prefs
   1/*
   2 * Copyright IBM Corporation, 2013
   3 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms of version 2.1 of the GNU Lesser General Public License
   7 * as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful, but
  10 * WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  12 *
  13 */
  14
  15/*
  16 * PPC64 THP Support for hash based MMUs
  17 */
  18#include <linux/mm.h>
  19#include <asm/machdep.h>
  20
  21int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
  22                    pmd_t *pmdp, unsigned long trap, int local, int ssize,
  23                    unsigned int psize)
  24{
  25        unsigned int index, valid;
  26        unsigned char *hpte_slot_array;
  27        unsigned long rflags, pa, hidx;
  28        unsigned long old_pmd, new_pmd;
  29        int ret, lpsize = MMU_PAGE_16M;
  30        unsigned long vpn, hash, shift, slot;
  31
  32        /*
  33         * atomically mark the linux large page PMD busy and dirty
  34         */
  35        do {
  36                old_pmd = pmd_val(*pmdp);
  37                /* If PMD busy, retry the access */
  38                if (unlikely(old_pmd & _PAGE_BUSY))
  39                        return 0;
  40                /* If PMD is trans splitting retry the access */
  41                if (unlikely(old_pmd & _PAGE_SPLITTING))
  42                        return 0;
  43                /* If PMD permissions don't match, take page fault */
  44                if (unlikely(access & ~old_pmd))
  45                        return 1;
  46                /*
  47                 * Try to lock the PTE, add ACCESSED and DIRTY if it was
  48                 * a write access
  49                 */
  50                new_pmd = old_pmd | _PAGE_BUSY | _PAGE_ACCESSED;
  51                if (access & _PAGE_RW)
  52                        new_pmd |= _PAGE_DIRTY;
  53        } while (old_pmd != __cmpxchg_u64((unsigned long *)pmdp,
  54                                          old_pmd, new_pmd));
  55        /*
  56         * PP bits. _PAGE_USER is already PP bit 0x2, so we only
  57         * need to add in 0x1 if it's a read-only user page
  58         */
  59        rflags = new_pmd & _PAGE_USER;
  60        if ((new_pmd & _PAGE_USER) && !((new_pmd & _PAGE_RW) &&
  61                                           (new_pmd & _PAGE_DIRTY)))
  62                rflags |= 0x1;
  63        /*
  64         * _PAGE_EXEC -> HW_NO_EXEC since it's inverted
  65         */
  66        rflags |= ((new_pmd & _PAGE_EXEC) ? 0 : HPTE_R_N);
  67
  68#if 0
  69        if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
  70
  71                /*
  72                 * No CPU has hugepages but lacks no execute, so we
  73                 * don't need to worry about that case
  74                 */
  75                rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
  76        }
  77#endif
  78        /*
  79         * Find the slot index details for this ea, using base page size.
  80         */
  81        shift = mmu_psize_defs[psize].shift;
  82        index = (ea & ~HPAGE_PMD_MASK) >> shift;
  83        BUG_ON(index >= 4096);
  84
  85        vpn = hpt_vpn(ea, vsid, ssize);
  86        hash = hpt_hash(vpn, shift, ssize);
  87        hpte_slot_array = get_hpte_slot_array(pmdp);
  88
  89        valid = hpte_valid(hpte_slot_array, index);
  90        if (valid) {
  91                /* update the hpte bits */
  92                hidx =  hpte_hash_index(hpte_slot_array, index);
  93                if (hidx & _PTEIDX_SECONDARY)
  94                        hash = ~hash;
  95                slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
  96                slot += hidx & _PTEIDX_GROUP_IX;
  97
  98                ret = ppc_md.hpte_updatepp(slot, rflags, vpn,
  99                                           psize, lpsize, ssize, local);
 100                /*
 101                 * We failed to update, try to insert a new entry.
 102                 */
 103                if (ret == -1) {
 104                        /*
 105                         * large pte is marked busy, so we can be sure
 106                         * nobody is looking at hpte_slot_array. hence we can
 107                         * safely update this here.
 108                         */
 109                        valid = 0;
 110                        new_pmd &= ~_PAGE_HPTEFLAGS;
 111                        hpte_slot_array[index] = 0;
 112                } else
 113                        /* clear the busy bits and set the hash pte bits */
 114                        new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
 115        }
 116
 117        if (!valid) {
 118                unsigned long hpte_group;
 119
 120                /* insert new entry */
 121                pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
 122repeat:
 123                hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
 124
 125                /* clear the busy bits and set the hash pte bits */
 126                new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
 127
 128                /* Add in WIMG bits */
 129                rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
 130                                      _PAGE_COHERENT | _PAGE_GUARDED));
 131
 132                /* Insert into the hash table, primary slot */
 133                slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
 134                                          psize, lpsize, ssize);
 135                /*
 136                 * Primary is full, try the secondary
 137                 */
 138                if (unlikely(slot == -1)) {
 139                        hpte_group = ((~hash & htab_hash_mask) *
 140                                      HPTES_PER_GROUP) & ~0x7UL;
 141                        slot = ppc_md.hpte_insert(hpte_group, vpn, pa,
 142                                                  rflags, HPTE_V_SECONDARY,
 143                                                  psize, lpsize, ssize);
 144                        if (slot == -1) {
 145                                if (mftb() & 0x1)
 146                                        hpte_group = ((hash & htab_hash_mask) *
 147                                                      HPTES_PER_GROUP) & ~0x7UL;
 148
 149                                ppc_md.hpte_remove(hpte_group);
 150                                goto repeat;
 151                        }
 152                }
 153                /*
 154                 * Hypervisor failure. Restore old pmd and return -1
 155                 * similar to __hash_page_*
 156                 */
 157                if (unlikely(slot == -2)) {
 158                        *pmdp = __pmd(old_pmd);
 159                        hash_failure_debug(ea, access, vsid, trap, ssize,
 160                                           psize, lpsize, old_pmd);
 161                        return -1;
 162                }
 163                /*
 164                 * large pte is marked busy, so we can be sure
 165                 * nobody is looking at hpte_slot_array. hence we can
 166                 * safely update this here.
 167                 */
 168                mark_hpte_slot_valid(hpte_slot_array, index, slot);
 169        }
 170        /*
 171         * No need to use ldarx/stdcx here
 172         */
 173        *pmdp = __pmd(new_pmd & ~_PAGE_BUSY);
 174        return 0;
 175}
 176