linux/arch/powerpc/include/asm/pte-40x.h
<<
>>
Prefs
   1#ifndef _ASM_POWERPC_PTE_40x_H
   2#define _ASM_POWERPC_PTE_40x_H
   3#ifdef __KERNEL__
   4
   5/*
   6 * At present, all PowerPC 400-class processors share a similar TLB
   7 * architecture. The instruction and data sides share a unified,
   8 * 64-entry, fully-associative TLB which is maintained totally under
   9 * software control. In addition, the instruction side has a
  10 * hardware-managed, 4-entry, fully-associative TLB which serves as a
  11 * first level to the shared TLB. These two TLBs are known as the UTLB
  12 * and ITLB, respectively (see "mmu.h" for definitions).
  13 *
  14 * There are several potential gotchas here.  The 40x hardware TLBLO
  15 * field looks like this:
  16 *
  17 * 0  1  2  3  4  ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31
  18 * RPN.....................  0  0 EX WR ZSEL.......  W  I  M  G
  19 *
  20 * Where possible we make the Linux PTE bits match up with this
  21 *
  22 * - bits 20 and 21 must be cleared, because we use 4k pages (40x can
  23 *   support down to 1k pages), this is done in the TLBMiss exception
  24 *   handler.
  25 * - We use only zones 0 (for kernel pages) and 1 (for user pages)
  26 *   of the 16 available.  Bit 24-26 of the TLB are cleared in the TLB
  27 *   miss handler.  Bit 27 is PAGE_USER, thus selecting the correct
  28 *   zone.
  29 * - PRESENT *must* be in the bottom two bits because swap cache
  30 *   entries use the top 30 bits.  Because 40x doesn't support SMP
  31 *   anyway, M is irrelevant so we borrow it for PAGE_PRESENT.  Bit 30
  32 *   is cleared in the TLB miss handler before the TLB entry is loaded.
  33 * - All other bits of the PTE are loaded into TLBLO without
  34 *   modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for
  35 *   software PTE bits.  We actually use use bits 21, 24, 25, and
  36 *   30 respectively for the software bits: ACCESSED, DIRTY, RW, and
  37 *   PRESENT.
  38 */
  39
  40#define _PAGE_GUARDED   0x001   /* G: page is guarded from prefetch */
  41#define _PAGE_FILE      0x001   /* when !present: nonlinear file mapping */
  42#define _PAGE_PRESENT   0x002   /* software: PTE contains a translation */
  43#define _PAGE_NO_CACHE  0x004   /* I: caching is inhibited */
  44#define _PAGE_WRITETHRU 0x008   /* W: caching is write-through */
  45#define _PAGE_USER      0x010   /* matches one of the zone permission bits */
  46#define _PAGE_SPECIAL   0x020   /* software: Special page */
  47#define _PAGE_RW        0x040   /* software: Writes permitted */
  48#define _PAGE_DIRTY     0x080   /* software: dirty page */
  49#define _PAGE_HWWRITE   0x100   /* hardware: Dirty & RW, set in exception */
  50#define _PAGE_EXEC      0x200   /* hardware: EX permission */
  51#define _PAGE_ACCESSED  0x400   /* software: R: page referenced */
  52
  53#define _PMD_PRESENT    0x400   /* PMD points to page of PTEs */
  54#define _PMD_BAD        0x802
  55#define _PMD_SIZE       0x0e0   /* size field, != 0 for large-page PMD entry */
  56#define _PMD_SIZE_4M    0x0c0
  57#define _PMD_SIZE_16M   0x0e0
  58
  59#define PMD_PAGE_SIZE(pmdval)   (1024 << (((pmdval) & _PMD_SIZE) >> 4))
  60
  61/* Until my rework is finished, 40x still needs atomic PTE updates */
  62#define PTE_ATOMIC_UPDATES      1
  63
  64#endif /* __KERNEL__ */
  65#endif /*  _ASM_POWERPC_PTE_40x_H */
  66