linux/arch/powerpc/include/asm/page_64.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2#ifndef _ASM_POWERPC_PAGE_64_H
   3#define _ASM_POWERPC_PAGE_64_H
   4
   5/*
   6 * Copyright (C) 2001 PPC64 Team, IBM Corp
   7 */
   8
   9#include <asm/asm-const.h>
  10
  11/*
  12 * We always define HW_PAGE_SHIFT to 12 as use of 64K pages remains Linux
  13 * specific, every notion of page number shared with the firmware, TCEs,
  14 * iommu, etc... still uses a page size of 4K.
  15 */
  16#define HW_PAGE_SHIFT           12
  17#define HW_PAGE_SIZE            (ASM_CONST(1) << HW_PAGE_SHIFT)
  18#define HW_PAGE_MASK            (~(HW_PAGE_SIZE-1))
  19
  20/*
  21 * PAGE_FACTOR is the number of bits factor between PAGE_SHIFT and
  22 * HW_PAGE_SHIFT, that is 4K pages.
  23 */
  24#define PAGE_FACTOR             (PAGE_SHIFT - HW_PAGE_SHIFT)
  25
  26/* Segment size; normal 256M segments */
  27#define SID_SHIFT               28
  28#define SID_MASK                ASM_CONST(0xfffffffff)
  29#define ESID_MASK               0xfffffffff0000000UL
  30#define GET_ESID(x)             (((x) >> SID_SHIFT) & SID_MASK)
  31
  32/* 1T segments */
  33#define SID_SHIFT_1T            40
  34#define SID_MASK_1T             0xffffffUL
  35#define ESID_MASK_1T            0xffffff0000000000UL
  36#define GET_ESID_1T(x)          (((x) >> SID_SHIFT_1T) & SID_MASK_1T)
  37
  38#ifndef __ASSEMBLY__
  39#include <asm/cache.h>
  40
  41typedef unsigned long pte_basic_t;
  42
  43static inline void clear_page(void *addr)
  44{
  45        unsigned long iterations;
  46        unsigned long onex, twox, fourx, eightx;
  47
  48        iterations = ppc64_caches.l1d.blocks_per_page / 8;
  49
  50        /*
  51         * Some verisions of gcc use multiply instructions to
  52         * calculate the offsets so lets give it a hand to
  53         * do better.
  54         */
  55        onex = ppc64_caches.l1d.block_size;
  56        twox = onex << 1;
  57        fourx = onex << 2;
  58        eightx = onex << 3;
  59
  60        asm volatile(
  61        "mtctr  %1      # clear_page\n\
  62        .balign 16\n\
  631:      dcbz    0,%0\n\
  64        dcbz    %3,%0\n\
  65        dcbz    %4,%0\n\
  66        dcbz    %5,%0\n\
  67        dcbz    %6,%0\n\
  68        dcbz    %7,%0\n\
  69        dcbz    %8,%0\n\
  70        dcbz    %9,%0\n\
  71        add     %0,%0,%10\n\
  72        bdnz+   1b"
  73        : "=&r" (addr)
  74        : "r" (iterations), "0" (addr), "b" (onex), "b" (twox),
  75                "b" (twox+onex), "b" (fourx), "b" (fourx+onex),
  76                "b" (twox+fourx), "b" (eightx-onex), "r" (eightx)
  77        : "ctr", "memory");
  78}
  79
  80extern void copy_page(void *to, void *from);
  81
  82/* Log 2 of page table size */
  83extern u64 ppc64_pft_size;
  84
  85#endif /* __ASSEMBLY__ */
  86
  87#define VM_DATA_DEFAULT_FLAGS \
  88        (is_32bit_task() ? \
  89         VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
  90
  91/*
  92 * This is the default if a program doesn't have a PT_GNU_STACK
  93 * program header entry. The PPC64 ELF ABI has a non executable stack
  94 * stack by default, so in the absence of a PT_GNU_STACK program header
  95 * we turn execute permission off.
  96 */
  97#define VM_STACK_DEFAULT_FLAGS32        VM_DATA_FLAGS_EXEC
  98#define VM_STACK_DEFAULT_FLAGS64        VM_DATA_FLAGS_NON_EXEC
  99
 100#define VM_STACK_DEFAULT_FLAGS \
 101        (is_32bit_task() ? \
 102         VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
 103
 104#include <asm-generic/getorder.h>
 105
 106#endif /* _ASM_POWERPC_PAGE_64_H */
 107