1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#ifndef __ASM_OPENRISC_PAGE_H
20#define __ASM_OPENRISC_PAGE_H
21
22
23
24
25#define PAGE_SHIFT 13
26#ifdef __ASSEMBLY__
27#define PAGE_SIZE (1 << PAGE_SHIFT)
28#else
29#define PAGE_SIZE (1UL << PAGE_SHIFT)
30#endif
31#define PAGE_MASK (~(PAGE_SIZE-1))
32
33#define PAGE_OFFSET 0xc0000000
34#define KERNELBASE PAGE_OFFSET
35
36
37
38
39#include <asm/setup.h>
40
41#ifndef __ASSEMBLY__
42
43#define clear_page(page) memset((page), 0, PAGE_SIZE)
44#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
45
46#define clear_user_page(page, vaddr, pg) clear_page(page)
47#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
48
49
50
51
52typedef struct {
53 unsigned long pte;
54} pte_t;
55typedef struct {
56 unsigned long pgd;
57} pgd_t;
58typedef struct {
59 unsigned long pgprot;
60} pgprot_t;
61typedef struct page *pgtable_t;
62
63#define pte_val(x) ((x).pte)
64#define pgd_val(x) ((x).pgd)
65#define pgprot_val(x) ((x).pgprot)
66
67#define __pte(x) ((pte_t) { (x) })
68#define __pgd(x) ((pgd_t) { (x) })
69#define __pgprot(x) ((pgprot_t) { (x) })
70
71#endif
72
73
74#ifndef __ASSEMBLY__
75
76#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET))
77#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
78
79#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
80#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
81
82#define virt_to_page(addr) \
83 (mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT))
84#define page_to_virt(page) \
85 ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
86
87#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
88
89#define pfn_valid(pfn) ((pfn) < max_mapnr)
90
91#define virt_addr_valid(kaddr) (pfn_valid(virt_to_pfn(kaddr)))
92
93#endif
94
95
96#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
97 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
98
99
100#include <asm-generic/memory_model.h>
101#include <asm-generic/getorder.h>
102
103#endif
104