linux/arch/powerpc/mm/hash64_4k.c
<<
>>
Prefs
   1/*
   2 * Copyright IBM Corporation, 2015
   3 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms of version 2 of the GNU Lesser General Public License
   7 * as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful, but
  10 * WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  12 *
  13 */
  14
  15#include <linux/mm.h>
  16#include <asm/machdep.h>
  17#include <asm/mmu.h>
  18
  19int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
  20                   pte_t *ptep, unsigned long trap, unsigned long flags,
  21                   int ssize, int subpg_prot)
  22{
  23        unsigned long hpte_group;
  24        unsigned long rflags, pa;
  25        unsigned long old_pte, new_pte;
  26        unsigned long vpn, hash, slot;
  27        unsigned long shift = mmu_psize_defs[MMU_PAGE_4K].shift;
  28
  29        /*
  30         * atomically mark the linux large page PTE busy and dirty
  31         */
  32        do {
  33                pte_t pte = READ_ONCE(*ptep);
  34
  35                old_pte = pte_val(pte);
  36                /* If PTE busy, retry the access */
  37                if (unlikely(old_pte & H_PAGE_BUSY))
  38                        return 0;
  39                /* If PTE permissions don't match, take page fault */
  40                if (unlikely(!check_pte_access(access, old_pte)))
  41                        return 1;
  42                /*
  43                 * Try to lock the PTE, add ACCESSED and DIRTY if it was
  44                 * a write access. Since this is 4K insert of 64K page size
  45                 * also add H_PAGE_COMBO
  46                 */
  47                new_pte = old_pte | H_PAGE_BUSY | _PAGE_ACCESSED;
  48                if (access & _PAGE_WRITE)
  49                        new_pte |= _PAGE_DIRTY;
  50        } while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
  51
  52        /*
  53         * PP bits. _PAGE_USER is already PP bit 0x2, so we only
  54         * need to add in 0x1 if it's a read-only user page
  55         */
  56        rflags = htab_convert_pte_flags(new_pte);
  57
  58        if (cpu_has_feature(CPU_FTR_NOEXECUTE) &&
  59            !cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
  60                rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
  61
  62        vpn  = hpt_vpn(ea, vsid, ssize);
  63        if (unlikely(old_pte & H_PAGE_HASHPTE)) {
  64                /*
  65                 * There MIGHT be an HPTE for this pte
  66                 */
  67                hash = hpt_hash(vpn, shift, ssize);
  68                if (old_pte & H_PAGE_F_SECOND)
  69                        hash = ~hash;
  70                slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
  71                slot += (old_pte & H_PAGE_F_GIX) >> H_PAGE_F_GIX_SHIFT;
  72
  73                if (mmu_hash_ops.hpte_updatepp(slot, rflags, vpn, MMU_PAGE_4K,
  74                                               MMU_PAGE_4K, ssize, flags) == -1)
  75                        old_pte &= ~_PAGE_HPTEFLAGS;
  76        }
  77
  78        if (likely(!(old_pte & H_PAGE_HASHPTE))) {
  79
  80                pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
  81                hash = hpt_hash(vpn, shift, ssize);
  82
  83repeat:
  84                hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
  85
  86                /* Insert into the hash table, primary slot */
  87                slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0,
  88                                                MMU_PAGE_4K, MMU_PAGE_4K, ssize);
  89                /*
  90                 * Primary is full, try the secondary
  91                 */
  92                if (unlikely(slot == -1)) {
  93                        hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
  94                        slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa,
  95                                                        rflags,
  96                                                        HPTE_V_SECONDARY,
  97                                                        MMU_PAGE_4K,
  98                                                        MMU_PAGE_4K, ssize);
  99                        if (slot == -1) {
 100                                if (mftb() & 0x1)
 101                                        hpte_group = ((hash & htab_hash_mask) *
 102                                                      HPTES_PER_GROUP) & ~0x7UL;
 103                                mmu_hash_ops.hpte_remove(hpte_group);
 104                                /*
 105                                 * FIXME!! Should be try the group from which we removed ?
 106                                 */
 107                                goto repeat;
 108                        }
 109                }
 110                /*
 111                 * Hypervisor failure. Restore old pte and return -1
 112                 * similar to __hash_page_*
 113                 */
 114                if (unlikely(slot == -2)) {
 115                        *ptep = __pte(old_pte);
 116                        hash_failure_debug(ea, access, vsid, trap, ssize,
 117                                           MMU_PAGE_4K, MMU_PAGE_4K, old_pte);
 118                        return -1;
 119                }
 120                new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE;
 121                new_pte |= (slot << H_PAGE_F_GIX_SHIFT) &
 122                        (H_PAGE_F_SECOND | H_PAGE_F_GIX);
 123        }
 124        *ptep = __pte(new_pte & ~H_PAGE_BUSY);
 125        return 0;
 126}
 127