linux/include/asm-generic/pgtable.h
<<
>>
Prefs
   1#ifndef _ASM_GENERIC_PGTABLE_H
   2#define _ASM_GENERIC_PGTABLE_H
   3
   4#ifndef __ASSEMBLY__
   5#ifdef CONFIG_MMU
   6
   7#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
   8/*
   9 * Largely same as above, but only sets the access flags (dirty,
  10 * accessed, and writable). Furthermore, we know it always gets set
  11 * to a "more permissive" setting, which allows most architectures
  12 * to optimize this. We return whether the PTE actually changed, which
  13 * in turn instructs the caller to do things like update__mmu_cache.
  14 * This used to be done in the caller, but sparc needs minor faults to
  15 * force that call on sun4c so we changed this macro slightly
  16 */
  17#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
  18({                                                                        \
  19        int __changed = !pte_same(*(__ptep), __entry);                    \
  20        if (__changed) {                                                  \
  21                set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
  22                flush_tlb_page(__vma, __address);                         \
  23        }                                                                 \
  24        __changed;                                                        \
  25})
  26#endif
  27
  28#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
  29#define ptep_test_and_clear_young(__vma, __address, __ptep)             \
  30({                                                                      \
  31        pte_t __pte = *(__ptep);                                        \
  32        int r = 1;                                                      \
  33        if (!pte_young(__pte))                                          \
  34                r = 0;                                                  \
  35        else                                                            \
  36                set_pte_at((__vma)->vm_mm, (__address),                 \
  37                           (__ptep), pte_mkold(__pte));                 \
  38        r;                                                              \
  39})
  40#endif
  41
  42#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
  43#define ptep_clear_flush_young(__vma, __address, __ptep)                \
  44({                                                                      \
  45        int __young;                                                    \
  46        __young = ptep_test_and_clear_young(__vma, __address, __ptep);  \
  47        if (__young)                                                    \
  48                flush_tlb_page(__vma, __address);                       \
  49        __young;                                                        \
  50})
  51#endif
  52
  53#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
  54#define ptep_get_and_clear(__mm, __address, __ptep)                     \
  55({                                                                      \
  56        pte_t __pte = *(__ptep);                                        \
  57        pte_clear((__mm), (__address), (__ptep));                       \
  58        __pte;                                                          \
  59})
  60#endif
  61
  62#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
  63#define ptep_get_and_clear_full(__mm, __address, __ptep, __full)        \
  64({                                                                      \
  65        pte_t __pte;                                                    \
  66        __pte = ptep_get_and_clear((__mm), (__address), (__ptep));      \
  67        __pte;                                                          \
  68})
  69#endif
  70
  71/*
  72 * Some architectures may be able to avoid expensive synchronization
  73 * primitives when modifications are made to PTE's which are already
  74 * not present, or in the process of an address space destruction.
  75 */
  76#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
  77#define pte_clear_not_present_full(__mm, __address, __ptep, __full)     \
  78do {                                                                    \
  79        pte_clear((__mm), (__address), (__ptep));                       \
  80} while (0)
  81#endif
  82
  83#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
  84#define ptep_clear_flush(__vma, __address, __ptep)                      \
  85({                                                                      \
  86        pte_t __pte;                                                    \
  87        __pte = ptep_get_and_clear((__vma)->vm_mm, __address, __ptep);  \
  88        flush_tlb_page(__vma, __address);                               \
  89        __pte;                                                          \
  90})
  91#endif
  92
  93#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
  94struct mm_struct;
  95static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
  96{
  97        pte_t old_pte = *ptep;
  98        set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
  99}
 100#endif
 101
 102#ifndef __HAVE_ARCH_PTE_SAME
 103#define pte_same(A,B)   (pte_val(A) == pte_val(B))
 104#endif
 105
 106#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
 107#define page_test_dirty(page)           (0)
 108#endif
 109
 110#ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY
 111#define page_clear_dirty(page)          do { } while (0)
 112#endif
 113
 114#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
 115#define pte_maybe_dirty(pte)            pte_dirty(pte)
 116#else
 117#define pte_maybe_dirty(pte)            (1)
 118#endif
 119
 120#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
 121#define page_test_and_clear_young(page) (0)
 122#endif
 123
 124#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
 125#define pgd_offset_gate(mm, addr)       pgd_offset(mm, addr)
 126#endif
 127
 128#ifndef __HAVE_ARCH_MOVE_PTE
 129#define move_pte(pte, prot, old_addr, new_addr) (pte)
 130#endif
 131
 132#ifndef pgprot_noncached
 133#define pgprot_noncached(prot)  (prot)
 134#endif
 135
 136#ifndef pgprot_writecombine
 137#define pgprot_writecombine pgprot_noncached
 138#endif
 139
 140/*
 141 * When walking page tables, get the address of the next boundary,
 142 * or the end address of the range if that comes earlier.  Although no
 143 * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
 144 */
 145
 146#define pgd_addr_end(addr, end)                                         \
 147({      unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK;  \
 148        (__boundary - 1 < (end) - 1)? __boundary: (end);                \
 149})
 150
 151#ifndef pud_addr_end
 152#define pud_addr_end(addr, end)                                         \
 153({      unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK;      \
 154        (__boundary - 1 < (end) - 1)? __boundary: (end);                \
 155})
 156#endif
 157
 158#ifndef pmd_addr_end
 159#define pmd_addr_end(addr, end)                                         \
 160({      unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK;      \
 161        (__boundary - 1 < (end) - 1)? __boundary: (end);                \
 162})
 163#endif
 164
 165/*
 166 * When walking page tables, we usually want to skip any p?d_none entries;
 167 * and any p?d_bad entries - reporting the error before resetting to none.
 168 * Do the tests inline, but report and clear the bad entry in mm/memory.c.
 169 */
 170void pgd_clear_bad(pgd_t *);
 171void pud_clear_bad(pud_t *);
 172void pmd_clear_bad(pmd_t *);
 173
 174static inline int pgd_none_or_clear_bad(pgd_t *pgd)
 175{
 176        if (pgd_none(*pgd))
 177                return 1;
 178        if (unlikely(pgd_bad(*pgd))) {
 179                pgd_clear_bad(pgd);
 180                return 1;
 181        }
 182        return 0;
 183}
 184
 185static inline int pud_none_or_clear_bad(pud_t *pud)
 186{
 187        if (pud_none(*pud))
 188                return 1;
 189        if (unlikely(pud_bad(*pud))) {
 190                pud_clear_bad(pud);
 191                return 1;
 192        }
 193        return 0;
 194}
 195
 196static inline int pmd_none_or_clear_bad(pmd_t *pmd)
 197{
 198        if (pmd_none(*pmd))
 199                return 1;
 200        if (unlikely(pmd_bad(*pmd))) {
 201                pmd_clear_bad(pmd);
 202                return 1;
 203        }
 204        return 0;
 205}
 206
 207static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm,
 208                                             unsigned long addr,
 209                                             pte_t *ptep)
 210{
 211        /*
 212         * Get the current pte state, but zero it out to make it
 213         * non-present, preventing the hardware from asynchronously
 214         * updating it.
 215         */
 216        return ptep_get_and_clear(mm, addr, ptep);
 217}
 218
 219static inline void __ptep_modify_prot_commit(struct mm_struct *mm,
 220                                             unsigned long addr,
 221                                             pte_t *ptep, pte_t pte)
 222{
 223        /*
 224         * The pte is non-present, so there's no hardware state to
 225         * preserve.
 226         */
 227        set_pte_at(mm, addr, ptep, pte);
 228}
 229
 230#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
 231/*
 232 * Start a pte protection read-modify-write transaction, which
 233 * protects against asynchronous hardware modifications to the pte.
 234 * The intention is not to prevent the hardware from making pte
 235 * updates, but to prevent any updates it may make from being lost.
 236 *
 237 * This does not protect against other software modifications of the
 238 * pte; the appropriate pte lock must be held over the transation.
 239 *
 240 * Note that this interface is intended to be batchable, meaning that
 241 * ptep_modify_prot_commit may not actually update the pte, but merely
 242 * queue the update to be done at some later time.  The update must be
 243 * actually committed before the pte lock is released, however.
 244 */
 245static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
 246                                           unsigned long addr,
 247                                           pte_t *ptep)
 248{
 249        return __ptep_modify_prot_start(mm, addr, ptep);
 250}
 251
 252/*
 253 * Commit an update to a pte, leaving any hardware-controlled bits in
 254 * the PTE unmodified.
 255 */
 256static inline void ptep_modify_prot_commit(struct mm_struct *mm,
 257                                           unsigned long addr,
 258                                           pte_t *ptep, pte_t pte)
 259{
 260        __ptep_modify_prot_commit(mm, addr, ptep, pte);
 261}
 262#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
 263#endif /* CONFIG_MMU */
 264
 265/*
 266 * A facility to provide lazy MMU batching.  This allows PTE updates and
 267 * page invalidations to be delayed until a call to leave lazy MMU mode
 268 * is issued.  Some architectures may benefit from doing this, and it is
 269 * beneficial for both shadow and direct mode hypervisors, which may batch
 270 * the PTE updates which happen during this window.  Note that using this
 271 * interface requires that read hazards be removed from the code.  A read
 272 * hazard could result in the direct mode hypervisor case, since the actual
 273 * write to the page tables may not yet have taken place, so reads though
 274 * a raw PTE pointer after it has been modified are not guaranteed to be
 275 * up to date.  This mode can only be entered and left under the protection of
 276 * the page table locks for all page tables which may be modified.  In the UP
 277 * case, this is required so that preemption is disabled, and in the SMP case,
 278 * it must synchronize the delayed page table writes properly on other CPUs.
 279 */
 280#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
 281#define arch_enter_lazy_mmu_mode()      do {} while (0)
 282#define arch_leave_lazy_mmu_mode()      do {} while (0)
 283#define arch_flush_lazy_mmu_mode()      do {} while (0)
 284#endif
 285
 286/*
 287 * A facility to provide batching of the reload of page tables and
 288 * other process state with the actual context switch code for
 289 * paravirtualized guests.  By convention, only one of the batched
 290 * update (lazy) modes (CPU, MMU) should be active at any given time,
 291 * entry should never be nested, and entry and exits should always be
 292 * paired.  This is for sanity of maintaining and reasoning about the
 293 * kernel code.  In this case, the exit (end of the context switch) is
 294 * in architecture-specific code, and so doesn't need a generic
 295 * definition.
 296 */
 297#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
 298#define arch_start_context_switch(prev) do {} while (0)
 299#endif
 300
 301#ifndef __HAVE_PFNMAP_TRACKING
 302/*
 303 * Interface that can be used by architecture code to keep track of
 304 * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
 305 *
 306 * track_pfn_vma_new is called when a _new_ pfn mapping is being established
 307 * for physical range indicated by pfn and size.
 308 */
 309static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
 310                                        unsigned long pfn, unsigned long size)
 311{
 312        return 0;
 313}
 314
 315/*
 316 * Interface that can be used by architecture code to keep track of
 317 * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
 318 *
 319 * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
 320 * copied through copy_page_range().
 321 */
 322static inline int track_pfn_vma_copy(struct vm_area_struct *vma)
 323{
 324        return 0;
 325}
 326
 327/*
 328 * Interface that can be used by architecture code to keep track of
 329 * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
 330 *
 331 * untrack_pfn_vma is called while unmapping a pfnmap for a region.
 332 * untrack can be called for a specific region indicated by pfn and size or
 333 * can be for the entire vma (in which case size can be zero).
 334 */
 335static inline void untrack_pfn_vma(struct vm_area_struct *vma,
 336                                        unsigned long pfn, unsigned long size)
 337{
 338}
 339#else
 340extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
 341                                unsigned long pfn, unsigned long size);
 342extern int track_pfn_vma_copy(struct vm_area_struct *vma);
 343extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
 344                                unsigned long size);
 345#endif
 346
 347#endif /* !__ASSEMBLY__ */
 348
 349#endif /* _ASM_GENERIC_PGTABLE_H */
 350