1
2#ifndef _ASM_POWERPC_PAGE_H
3#define _ASM_POWERPC_PAGE_H
4
5
6
7
8
9#ifndef __ASSEMBLY__
10#include <linux/types.h>
11#include <linux/kernel.h>
12#else
13#include <asm/types.h>
14#endif
15#include <asm/asm-const.h>
16
17
18
19
20
21
22
23#define PAGE_SHIFT CONFIG_PPC_PAGE_SHIFT
24#define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
25
26#ifndef __ASSEMBLY__
27#ifndef CONFIG_HUGETLB_PAGE
28#define HPAGE_SHIFT PAGE_SHIFT
29#elif defined(CONFIG_PPC_BOOK3S_64)
30extern unsigned int hpage_shift;
31#define HPAGE_SHIFT hpage_shift
32#elif defined(CONFIG_PPC_8xx)
33#define HPAGE_SHIFT 19
34#elif defined(CONFIG_PPC_FSL_BOOK3E)
35#define HPAGE_SHIFT 22
36#endif
37#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
38#define HPAGE_MASK (~(HPAGE_SIZE - 1))
39#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
40#define HUGE_MAX_HSTATE (MMU_PAGE_COUNT-1)
41#endif
42
43
44
45
46
47
48#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80#define KERNELBASE ASM_CONST(CONFIG_KERNEL_START)
81#define PAGE_OFFSET ASM_CONST(CONFIG_PAGE_OFFSET)
82#define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START))
83
84#if defined(CONFIG_NONSTATIC_KERNEL)
85#ifndef __ASSEMBLY__
86
87extern phys_addr_t memstart_addr;
88extern phys_addr_t kernstart_addr;
89
90#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC32)
91extern long long virt_phys_offset;
92#endif
93
94#endif
95#define PHYSICAL_START kernstart_addr
96
97#else
98#define PHYSICAL_START ASM_CONST(CONFIG_PHYSICAL_START)
99#endif
100
101
102#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
103#ifdef CONFIG_RELOCATABLE
104#define VIRT_PHYS_OFFSET virt_phys_offset
105#else
106#define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START)
107#endif
108#endif
109
110#ifdef CONFIG_PPC64
111#define MEMORY_START 0UL
112#elif defined(CONFIG_NONSTATIC_KERNEL)
113#define MEMORY_START memstart_addr
114#else
115#define MEMORY_START (PHYSICAL_START + PAGE_OFFSET - KERNELBASE)
116#endif
117
118#ifdef CONFIG_FLATMEM
119#define ARCH_PFN_OFFSET ((unsigned long)(MEMORY_START >> PAGE_SHIFT))
120#ifndef __ASSEMBLY__
121extern unsigned long max_mapnr;
122static inline bool pfn_valid(unsigned long pfn)
123{
124 unsigned long min_pfn = ARCH_PFN_OFFSET;
125
126 return pfn >= min_pfn && pfn < max_mapnr;
127}
128#endif
129#endif
130
131#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
132#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
133#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
134
135#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr))
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
211#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
212#define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET)
213#else
214#ifdef CONFIG_PPC64
215
216
217
218
219
220#define __va(x) \
221({ \
222 VIRTUAL_BUG_ON((unsigned long)(x) >= PAGE_OFFSET); \
223 (void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET); \
224})
225
226#define __pa(x) \
227({ \
228 VIRTUAL_BUG_ON((unsigned long)(x) < PAGE_OFFSET); \
229 (unsigned long)(x) & 0x0fffffffffffffffUL; \
230})
231
232#else
233#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
234#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START)
235#endif
236#endif
237
238
239
240
241
242
243#define VM_DATA_DEFAULT_FLAGS32 \
244 (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
245 VM_READ | VM_WRITE | \
246 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
247
248#define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
249 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
250
251#ifdef __powerpc64__
252#include <asm/page_64.h>
253#else
254#include <asm/page_32.h>
255#endif
256
257
258#define _ALIGN_UP(addr, size) __ALIGN_KERNEL(addr, size)
259#define _ALIGN_DOWN(addr, size) ((addr)&(~((typeof(addr))(size)-1)))
260
261
262#define _ALIGN(addr,size) _ALIGN_UP(addr,size)
263
264
265
266
267
268#ifdef CONFIG_PPC_BOOK3E_64
269#define is_kernel_addr(x) ((x) >= 0x8000000000000000ul)
270#else
271#define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
272#endif
273
274#ifndef CONFIG_PPC_BOOK3S_64
275
276
277
278
279
280
281#ifdef CONFIG_PPC64
282#define PD_HUGE 0x8000000000000000UL
283#else
284#define PD_HUGE 0x80000000
285#endif
286
287#else
288
289
290
291
292#define HUGEPD_ADDR_MASK (0x0ffffffffffffffful & ~HUGEPD_SHIFT_MASK)
293#endif
294
295
296
297
298
299#define HUGEPD_SHIFT_MASK 0x3f
300
301#ifndef __ASSEMBLY__
302
303#ifdef CONFIG_PPC_BOOK3S_64
304#include <asm/pgtable-be-types.h>
305#else
306#include <asm/pgtable-types.h>
307#endif
308
309
310#ifndef CONFIG_HUGETLB_PAGE
311#define is_hugepd(pdep) (0)
312#define pgd_huge(pgd) (0)
313#endif
314
315struct page;
316extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
317extern void copy_user_page(void *to, void *from, unsigned long vaddr,
318 struct page *p);
319extern int devmem_is_allowed(unsigned long pfn);
320
321#ifdef CONFIG_PPC_SMLPAR
322void arch_free_page(struct page *page, int order);
323#define HAVE_ARCH_FREE_PAGE
324#endif
325
326struct vm_area_struct;
327
328#include <asm-generic/memory_model.h>
329#endif
330#include <asm/slice.h>
331
332
333
334
335#ifdef CONFIG_PPC32
336#define ARCH_ZONE_DMA_BITS 30
337#else
338#define ARCH_ZONE_DMA_BITS 31
339#endif
340
341#endif
342