linux/arch/m68k/include/asm/pgtable_mm.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _M68K_PGTABLE_H
   3#define _M68K_PGTABLE_H
   4
   5#include <asm-generic/4level-fixup.h>
   6
   7#include <asm/setup.h>
   8
   9#ifndef __ASSEMBLY__
  10#include <asm/processor.h>
  11#include <linux/sched.h>
  12#include <linux/threads.h>
  13
  14/*
  15 * This file contains the functions and defines necessary to modify and use
  16 * the m68k page table tree.
  17 */
  18
  19#include <asm/virtconvert.h>
  20
  21/* Certain architectures need to do special things when pte's
  22 * within a page table are directly modified.  Thus, the following
  23 * hook is made available.
  24 */
  25#define set_pte(pteptr, pteval)                                 \
  26        do{                                                     \
  27                *(pteptr) = (pteval);                           \
  28        } while(0)
  29#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
  30
  31
  32/* PMD_SHIFT determines the size of the area a second-level page table can map */
  33#ifdef CONFIG_SUN3
  34#define PMD_SHIFT       17
  35#else
  36#define PMD_SHIFT       22
  37#endif
  38#define PMD_SIZE        (1UL << PMD_SHIFT)
  39#define PMD_MASK        (~(PMD_SIZE-1))
  40
  41/* PGDIR_SHIFT determines what a third-level page table entry can map */
  42#ifdef CONFIG_SUN3
  43#define PGDIR_SHIFT     17
  44#elif defined(CONFIG_COLDFIRE)
  45#define PGDIR_SHIFT     22
  46#else
  47#define PGDIR_SHIFT     25
  48#endif
  49#define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
  50#define PGDIR_MASK      (~(PGDIR_SIZE-1))
  51
  52/*
  53 * entries per page directory level: the m68k is configured as three-level,
  54 * so we do have PMD level physically.
  55 */
  56#ifdef CONFIG_SUN3
  57#define PTRS_PER_PTE   16
  58#define __PAGETABLE_PMD_FOLDED 1
  59#define PTRS_PER_PMD   1
  60#define PTRS_PER_PGD   2048
  61#elif defined(CONFIG_COLDFIRE)
  62#define PTRS_PER_PTE    512
  63#define __PAGETABLE_PMD_FOLDED 1
  64#define PTRS_PER_PMD    1
  65#define PTRS_PER_PGD    1024
  66#else
  67#define PTRS_PER_PTE    1024
  68#define PTRS_PER_PMD    8
  69#define PTRS_PER_PGD    128
  70#endif
  71#define USER_PTRS_PER_PGD       (TASK_SIZE/PGDIR_SIZE)
  72#define FIRST_USER_ADDRESS      0UL
  73
  74/* Virtual address region for use by kernel_map() */
  75#ifdef CONFIG_SUN3
  76#define KMAP_START     0x0DC00000
  77#define KMAP_END       0x0E000000
  78#elif defined(CONFIG_COLDFIRE)
  79#define KMAP_START      0xe0000000
  80#define KMAP_END        0xf0000000
  81#else
  82#define KMAP_START      0xd0000000
  83#define KMAP_END        0xf0000000
  84#endif
  85
  86#ifdef CONFIG_SUN3
  87extern unsigned long m68k_vmalloc_end;
  88#define VMALLOC_START 0x0f800000
  89#define VMALLOC_END m68k_vmalloc_end
  90#elif defined(CONFIG_COLDFIRE)
  91#define VMALLOC_START   0xd0000000
  92#define VMALLOC_END     0xe0000000
  93#else
  94/* Just any arbitrary offset to the start of the vmalloc VM area: the
  95 * current 8MB value just means that there will be a 8MB "hole" after the
  96 * physical memory until the kernel virtual memory starts.  That means that
  97 * any out-of-bounds memory accesses will hopefully be caught.
  98 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
  99 * area for the same reason. ;)
 100 */
 101#define VMALLOC_OFFSET  (8*1024*1024)
 102#define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
 103#define VMALLOC_END KMAP_START
 104#endif
 105
 106/* zero page used for uninitialized stuff */
 107extern void *empty_zero_page;
 108
 109/*
 110 * ZERO_PAGE is a global shared page that is always zero: used
 111 * for zero-mapped memory areas etc..
 112 */
 113#define ZERO_PAGE(vaddr)        (virt_to_page(empty_zero_page))
 114
 115/* number of bits that fit into a memory pointer */
 116#define BITS_PER_PTR                    (8*sizeof(unsigned long))
 117
 118/* to align the pointer to a pointer address */
 119#define PTR_MASK                        (~(sizeof(void*)-1))
 120
 121/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
 122/* 64-bit machines, beware!  SRB. */
 123#define SIZEOF_PTR_LOG2                        2
 124
 125extern void kernel_set_cachemode(void *addr, unsigned long size, int cmode);
 126
 127/*
 128 * The m68k doesn't have any external MMU info: the kernel page
 129 * tables contain all the necessary information.  The Sun3 does, but
 130 * they are updated on demand.
 131 */
 132static inline void update_mmu_cache(struct vm_area_struct *vma,
 133                                    unsigned long address, pte_t *ptep)
 134{
 135}
 136
 137#endif /* !__ASSEMBLY__ */
 138
 139#define kern_addr_valid(addr)   (1)
 140
 141/* MMU-specific headers */
 142
 143#ifdef CONFIG_SUN3
 144#include <asm/sun3_pgtable.h>
 145#elif defined(CONFIG_COLDFIRE)
 146#include <asm/mcf_pgtable.h>
 147#else
 148#include <asm/motorola_pgtable.h>
 149#endif
 150
 151#ifndef __ASSEMBLY__
 152/*
 153 * Macro to mark a page protection value as "uncacheable".
 154 */
 155#ifdef CONFIG_COLDFIRE
 156# define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | CF_PAGE_NOCACHE))
 157#else
 158#ifdef SUN3_PAGE_NOCACHE
 159# define __SUN3_PAGE_NOCACHE    SUN3_PAGE_NOCACHE
 160#else
 161# define __SUN3_PAGE_NOCACHE    0
 162#endif
 163#define pgprot_noncached(prot)                                                  \
 164        (MMU_IS_SUN3                                                            \
 165         ? (__pgprot(pgprot_val(prot) | __SUN3_PAGE_NOCACHE))                   \
 166         : ((MMU_IS_851 || MMU_IS_030)                                          \
 167            ? (__pgprot(pgprot_val(prot) | _PAGE_NOCACHE030))                   \
 168            : (MMU_IS_040 || MMU_IS_060)                                        \
 169            ? (__pgprot((pgprot_val(prot) & _CACHEMASK040) | _PAGE_NOCACHE_S))  \
 170            : (prot)))
 171
 172#endif /* CONFIG_COLDFIRE */
 173#include <asm-generic/pgtable.h>
 174#endif /* !__ASSEMBLY__ */
 175
 176/*
 177 * No page table caches to initialise
 178 */
 179#define pgtable_cache_init()    do { } while (0)
 180
 181#define check_pgt_cache()       do { } while (0)
 182
 183#endif /* _M68K_PGTABLE_H */
 184