linux/arch/powerpc/include/asm/pte-walk.h
<<
>>
Prefs
   1#ifndef _ASM_POWERPC_PTE_WALK_H
   2#define _ASM_POWERPC_PTE_WALK_H
   3
   4#include <linux/sched.h>
   5
   6/* Don't use this directly */
   7extern pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
   8                               bool *is_thp, unsigned *hshift);
   9
  10static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea,
  11                                    bool *is_thp, unsigned *hshift)
  12{
  13        pte_t *pte;
  14
  15        VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__);
  16        pte = __find_linux_pte(pgdir, ea, is_thp, hshift);
  17
  18#if defined(CONFIG_DEBUG_VM) &&                                         \
  19        !(defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE))
  20        /*
  21         * We should not find huge page if these configs are not enabled.
  22         */
  23        if (hshift)
  24                WARN_ON(*hshift);
  25#endif
  26        return pte;
  27}
  28
  29static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift)
  30{
  31        pgd_t *pgdir = init_mm.pgd;
  32        return __find_linux_pte(pgdir, ea, NULL, hshift);
  33}
  34
  35/*
  36 * Convert a kernel vmap virtual address (vmalloc or ioremap space) to a
  37 * physical address, without taking locks. This can be used in real-mode.
  38 */
  39static inline phys_addr_t ppc_find_vmap_phys(unsigned long addr)
  40{
  41        pte_t *ptep;
  42        phys_addr_t pa;
  43        int hugepage_shift;
  44
  45        /*
  46         * init_mm does not free page tables, and does not do THP. It may
  47         * have huge pages from huge vmalloc / ioremap etc.
  48         */
  49        ptep = find_init_mm_pte(addr, &hugepage_shift);
  50        if (WARN_ON(!ptep))
  51                return 0;
  52
  53        pa = PFN_PHYS(pte_pfn(*ptep));
  54
  55        if (!hugepage_shift)
  56                hugepage_shift = PAGE_SHIFT;
  57
  58        pa |= addr & ((1ul << hugepage_shift) - 1);
  59
  60        return pa;
  61}
  62
  63/*
  64 * This is what we should always use. Any other lockless page table lookup needs
  65 * careful audit against THP split.
  66 */
  67static inline pte_t *find_current_mm_pte(pgd_t *pgdir, unsigned long ea,
  68                                         bool *is_thp, unsigned *hshift)
  69{
  70        pte_t *pte;
  71
  72        VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__);
  73        VM_WARN(pgdir != current->mm->pgd,
  74                "%s lock less page table lookup called on wrong mm\n", __func__);
  75        pte = __find_linux_pte(pgdir, ea, is_thp, hshift);
  76
  77#if defined(CONFIG_DEBUG_VM) &&                                         \
  78        !(defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE))
  79        /*
  80         * We should not find huge page if these configs are not enabled.
  81         */
  82        if (hshift)
  83                WARN_ON(*hshift);
  84#endif
  85        return pte;
  86}
  87
  88#endif /* _ASM_POWERPC_PTE_WALK_H */
  89