1
2
3
4
5
6
7
8
9
10
11
12
13#ifndef _ASM_S390_PGTABLE_H
14#define _ASM_S390_PGTABLE_H
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#ifndef __ASSEMBLY__
32#include <linux/mm_types.h>
33#include <asm/bug.h>
34#include <asm/processor.h>
35
36extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
37extern void paging_init(void);
38extern void vmem_map_init(void);
39
40
41
42
43
44#define update_mmu_cache(vma, address, pte) do { } while (0)
45
46
47
48
49
50extern char empty_zero_page[PAGE_SIZE];
51#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
52#endif
53
54
55
56
57
58
59#ifndef __s390x__
60# define PMD_SHIFT 22
61# define PUD_SHIFT 22
62# define PGDIR_SHIFT 22
63#else
64# define PMD_SHIFT 21
65# define PUD_SHIFT 31
66# define PGDIR_SHIFT 31
67#endif
68
69#define PMD_SIZE (1UL << PMD_SHIFT)
70#define PMD_MASK (~(PMD_SIZE-1))
71#define PUD_SIZE (1UL << PUD_SHIFT)
72#define PUD_MASK (~(PUD_SIZE-1))
73#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
74#define PGDIR_MASK (~(PGDIR_SIZE-1))
75
76
77
78
79
80
81
82#ifndef __s390x__
83# define PTRS_PER_PTE 1024
84# define PTRS_PER_PMD 1
85# define PTRS_PER_PUD 1
86# define PTRS_PER_PGD 512
87#else
88# define PTRS_PER_PTE 512
89# define PTRS_PER_PMD 1024
90# define PTRS_PER_PUD 1
91# define PTRS_PER_PGD 2048
92#endif
93
94#define FIRST_USER_ADDRESS 0
95
96#define pte_ERROR(e) \
97 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
98#define pmd_ERROR(e) \
99 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
100#define pud_ERROR(e) \
101 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
102#define pgd_ERROR(e) \
103 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
104
105#ifndef __ASSEMBLY__
106
107
108
109
110
111
112
113
114
115
116extern unsigned long vmalloc_end;
117
118#ifdef CONFIG_64BIT
119#define VMALLOC_ADDR (max(0x100000000UL, (unsigned long) high_memory))
120#else
121#define VMALLOC_ADDR ((unsigned long) high_memory)
122#endif
123#define VMALLOC_OFFSET (8*1024*1024)
124#define VMALLOC_START ((VMALLOC_ADDR + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
125#define VMALLOC_END vmalloc_end
126
127
128
129
130
131
132
133
134#ifndef __s390x__
135#define VMALLOC_MIN_SIZE 0x8000000UL
136#define VMALLOC_END_INIT 0x80000000UL
137#else
138#define VMALLOC_MIN_SIZE 0x2000000000UL
139#define VMALLOC_END_INIT 0x40000000000UL
140#endif
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225#define _PAGE_RO 0x200
226#define _PAGE_INVALID 0x400
227
228
229#define _PAGE_SWT 0x001
230#define _PAGE_SWX 0x002
231
232
233#define _PAGE_TYPE_EMPTY 0x400
234#define _PAGE_TYPE_NONE 0x401
235#define _PAGE_TYPE_SWAP 0x403
236#define _PAGE_TYPE_FILE 0x601
237#define _PAGE_TYPE_RO 0x200
238#define _PAGE_TYPE_RW 0x000
239#define _PAGE_TYPE_EX_RO 0x202
240#define _PAGE_TYPE_EX_RW 0x002
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270#ifndef __s390x__
271
272
273#define _ASCE_SPACE_SWITCH 0x80000000UL
274#define _ASCE_ORIGIN_MASK 0x7ffff000UL
275#define _ASCE_PRIVATE_SPACE 0x100
276#define _ASCE_ALT_EVENT 0x80
277#define _ASCE_TABLE_LENGTH 0x7f
278
279
280#define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL
281#define _SEGMENT_ENTRY_INV 0x20
282#define _SEGMENT_ENTRY_COMMON 0x10
283#define _SEGMENT_ENTRY_PTL 0x0f
284
285#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
286#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
287
288#else
289
290
291#define _ASCE_ORIGIN ~0xfffUL
292#define _ASCE_PRIVATE_SPACE 0x100
293#define _ASCE_ALT_EVENT 0x80
294#define _ASCE_SPACE_SWITCH 0x40
295#define _ASCE_REAL_SPACE 0x20
296#define _ASCE_TYPE_MASK 0x0c
297#define _ASCE_TYPE_REGION1 0x0c
298#define _ASCE_TYPE_REGION2 0x08
299#define _ASCE_TYPE_REGION3 0x04
300#define _ASCE_TYPE_SEGMENT 0x00
301#define _ASCE_TABLE_LENGTH 0x03
302
303
304#define _REGION_ENTRY_ORIGIN ~0xfffUL
305#define _REGION_ENTRY_INV 0x20
306#define _REGION_ENTRY_TYPE_MASK 0x0c
307#define _REGION_ENTRY_TYPE_R1 0x0c
308#define _REGION_ENTRY_TYPE_R2 0x08
309#define _REGION_ENTRY_TYPE_R3 0x04
310#define _REGION_ENTRY_LENGTH 0x03
311
312#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
313#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV)
314#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
315#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV)
316#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
317#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV)
318
319
320#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL
321#define _SEGMENT_ENTRY_RO 0x200
322#define _SEGMENT_ENTRY_INV 0x20
323
324#define _SEGMENT_ENTRY (0)
325#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
326
327#endif
328
329
330
331
332
333
334#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
335 _ASCE_ALT_EVENT)
336
337
338#define _PAGE_CHANGED 0x02
339#define _PAGE_REFERENCED 0x04
340
341
342
343
344#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
345#define PAGE_RO __pgprot(_PAGE_TYPE_RO)
346#define PAGE_RW __pgprot(_PAGE_TYPE_RW)
347#define PAGE_EX_RO __pgprot(_PAGE_TYPE_EX_RO)
348#define PAGE_EX_RW __pgprot(_PAGE_TYPE_EX_RW)
349
350#define PAGE_KERNEL PAGE_RW
351#define PAGE_COPY PAGE_RO
352
353
354
355
356
357
358
359
360
361
362#define __P000 PAGE_NONE
363#define __P001 PAGE_RO
364#define __P010 PAGE_RO
365#define __P011 PAGE_RO
366#define __P100 PAGE_EX_RO
367#define __P101 PAGE_EX_RO
368#define __P110 PAGE_EX_RO
369#define __P111 PAGE_EX_RO
370
371#define __S000 PAGE_NONE
372#define __S001 PAGE_RO
373#define __S010 PAGE_RW
374#define __S011 PAGE_RW
375#define __S100 PAGE_EX_RO
376#define __S101 PAGE_EX_RO
377#define __S110 PAGE_EX_RW
378#define __S111 PAGE_EX_RW
379
380#ifndef __s390x__
381# define PxD_SHADOW_SHIFT 1
382#else
383# define PxD_SHADOW_SHIFT 2
384#endif
385
386static inline struct page *get_shadow_page(struct page *page)
387{
388 if (s390_noexec && page->index)
389 return virt_to_page((void *)(addr_t) page->index);
390 return NULL;
391}
392
393static inline void *get_shadow_pte(void *table)
394{
395 unsigned long addr, offset;
396 struct page *page;
397
398 addr = (unsigned long) table;
399 offset = addr & (PAGE_SIZE - 1);
400 page = virt_to_page((void *)(addr ^ offset));
401 return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL);
402}
403
404static inline void *get_shadow_table(void *table)
405{
406 unsigned long addr, offset;
407 struct page *page;
408
409 addr = (unsigned long) table;
410 offset = addr & ((PAGE_SIZE << PxD_SHADOW_SHIFT) - 1);
411 page = virt_to_page((void *)(addr ^ offset));
412 return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL);
413}
414
415
416
417
418
419
420static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
421 pte_t *pteptr, pte_t pteval)
422{
423 pte_t *shadow_pte = get_shadow_pte(pteptr);
424
425 *pteptr = pteval;
426 if (shadow_pte) {
427 if (!(pte_val(pteval) & _PAGE_INVALID) &&
428 (pte_val(pteval) & _PAGE_SWX))
429 pte_val(*shadow_pte) = pte_val(pteval) | _PAGE_RO;
430 else
431 pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY;
432 }
433}
434
435
436
437
438#ifndef __s390x__
439
440static inline int pgd_present(pgd_t pgd) { return 1; }
441static inline int pgd_none(pgd_t pgd) { return 0; }
442static inline int pgd_bad(pgd_t pgd) { return 0; }
443
444static inline int pud_present(pud_t pud) { return 1; }
445static inline int pud_none(pud_t pud) { return 0; }
446static inline int pud_bad(pud_t pud) { return 0; }
447
448#else
449
450static inline int pgd_present(pgd_t pgd) { return 1; }
451static inline int pgd_none(pgd_t pgd) { return 0; }
452static inline int pgd_bad(pgd_t pgd) { return 0; }
453
454static inline int pud_present(pud_t pud)
455{
456 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
457}
458
459static inline int pud_none(pud_t pud)
460{
461 return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL;
462}
463
464static inline int pud_bad(pud_t pud)
465{
466 unsigned long mask = ~_REGION_ENTRY_ORIGIN & ~_REGION_ENTRY_INV;
467 return (pud_val(pud) & mask) != _REGION3_ENTRY;
468}
469
470#endif
471
472static inline int pmd_present(pmd_t pmd)
473{
474 return (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) != 0UL;
475}
476
477static inline int pmd_none(pmd_t pmd)
478{
479 return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL;
480}
481
482static inline int pmd_bad(pmd_t pmd)
483{
484 unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV;
485 return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY;
486}
487
488static inline int pte_none(pte_t pte)
489{
490 return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT);
491}
492
493static inline int pte_present(pte_t pte)
494{
495 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX;
496 return (pte_val(pte) & mask) == _PAGE_TYPE_NONE ||
497 (!(pte_val(pte) & _PAGE_INVALID) &&
498 !(pte_val(pte) & _PAGE_SWT));
499}
500
501static inline int pte_file(pte_t pte)
502{
503 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT;
504 return (pte_val(pte) & mask) == _PAGE_TYPE_FILE;
505}
506
507#define __HAVE_ARCH_PTE_SAME
508#define pte_same(a,b) (pte_val(a) == pte_val(b))
509
510
511
512
513
514static inline int pte_write(pte_t pte)
515{
516 return (pte_val(pte) & _PAGE_RO) == 0;
517}
518
519static inline int pte_dirty(pte_t pte)
520{
521
522
523
524
525 return 0;
526}
527
528static inline int pte_young(pte_t pte)
529{
530
531
532
533
534 return 0;
535}
536
537
538
539
540
541#ifndef __s390x__
542
543#define pgd_clear(pgd) do { } while (0)
544#define pud_clear(pud) do { } while (0)
545
546static inline void pmd_clear_kernel(pmd_t * pmdp)
547{
548 pmd_val(pmdp[0]) = _SEGMENT_ENTRY_EMPTY;
549 pmd_val(pmdp[1]) = _SEGMENT_ENTRY_EMPTY;
550 pmd_val(pmdp[2]) = _SEGMENT_ENTRY_EMPTY;
551 pmd_val(pmdp[3]) = _SEGMENT_ENTRY_EMPTY;
552}
553
554#else
555
556#define pgd_clear(pgd) do { } while (0)
557
558static inline void pud_clear_kernel(pud_t *pud)
559{
560 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
561}
562
563static inline void pud_clear(pud_t * pud)
564{
565 pud_t *shadow = get_shadow_table(pud);
566
567 pud_clear_kernel(pud);
568 if (shadow)
569 pud_clear_kernel(shadow);
570}
571
572static inline void pmd_clear_kernel(pmd_t * pmdp)
573{
574 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
575 pmd_val1(*pmdp) = _SEGMENT_ENTRY_EMPTY;
576}
577
578#endif
579
580static inline void pmd_clear(pmd_t * pmdp)
581{
582 pmd_t *shadow_pmd = get_shadow_table(pmdp);
583
584 pmd_clear_kernel(pmdp);
585 if (shadow_pmd)
586 pmd_clear_kernel(shadow_pmd);
587}
588
589static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
590{
591 pte_t *shadow_pte = get_shadow_pte(ptep);
592
593 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
594 if (shadow_pte)
595 pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY;
596}
597
598
599
600
601
602static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
603{
604 pte_val(pte) &= PAGE_MASK;
605 pte_val(pte) |= pgprot_val(newprot);
606 return pte;
607}
608
609static inline pte_t pte_wrprotect(pte_t pte)
610{
611
612 if (!(pte_val(pte) & _PAGE_INVALID))
613 pte_val(pte) |= _PAGE_RO;
614 return pte;
615}
616
617static inline pte_t pte_mkwrite(pte_t pte)
618{
619 pte_val(pte) &= ~_PAGE_RO;
620 return pte;
621}
622
623static inline pte_t pte_mkclean(pte_t pte)
624{
625
626
627
628
629 return pte;
630}
631
632static inline pte_t pte_mkdirty(pte_t pte)
633{
634
635
636
637
638 return pte;
639}
640
641static inline pte_t pte_mkold(pte_t pte)
642{
643
644
645
646 return pte;
647}
648
649static inline pte_t pte_mkyoung(pte_t pte)
650{
651
652
653
654 return pte;
655}
656
657#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
658static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
659 unsigned long addr, pte_t *ptep)
660{
661 return 0;
662}
663
664#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
665static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
666 unsigned long address, pte_t *ptep)
667{
668
669 return 0;
670}
671
672static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
673{
674 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
675#ifndef __s390x__
676
677 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
678#else
679
680 pte_t *pto = ptep;
681#endif
682 asm volatile(
683 " ipte %2,%3"
684 : "=m" (*ptep) : "m" (*ptep),
685 "a" (pto), "a" (address));
686 }
687 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
688}
689
690static inline void ptep_invalidate(unsigned long address, pte_t *ptep)
691{
692 __ptep_ipte(address, ptep);
693 ptep = get_shadow_pte(ptep);
694 if (ptep)
695 __ptep_ipte(address, ptep);
696}
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
712#define ptep_get_and_clear(__mm, __address, __ptep) \
713({ \
714 pte_t __pte = *(__ptep); \
715 if (atomic_read(&(__mm)->mm_users) > 1 || \
716 (__mm) != current->active_mm) \
717 ptep_invalidate(__address, __ptep); \
718 else \
719 pte_clear((__mm), (__address), (__ptep)); \
720 __pte; \
721})
722
723#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
724static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
725 unsigned long address, pte_t *ptep)
726{
727 pte_t pte = *ptep;
728 ptep_invalidate(address, ptep);
729 return pte;
730}
731
732
733
734
735
736
737
738
739#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
740static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
741 unsigned long addr,
742 pte_t *ptep, int full)
743{
744 pte_t pte = *ptep;
745
746 if (full)
747 pte_clear(mm, addr, ptep);
748 else
749 ptep_invalidate(addr, ptep);
750 return pte;
751}
752
753#define __HAVE_ARCH_PTEP_SET_WRPROTECT
754#define ptep_set_wrprotect(__mm, __addr, __ptep) \
755({ \
756 pte_t __pte = *(__ptep); \
757 if (pte_write(__pte)) { \
758 if (atomic_read(&(__mm)->mm_users) > 1 || \
759 (__mm) != current->active_mm) \
760 ptep_invalidate(__addr, __ptep); \
761 set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \
762 } \
763})
764
765#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
766#define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
767({ \
768 int __changed = !pte_same(*(__ptep), __entry); \
769 if (__changed) { \
770 ptep_invalidate(__addr, __ptep); \
771 set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
772 } \
773 __changed; \
774})
775
776
777
778
779
780
781
782
783#define __HAVE_ARCH_PAGE_TEST_DIRTY
784static inline int page_test_dirty(struct page *page)
785{
786 return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0;
787}
788
789#define __HAVE_ARCH_PAGE_CLEAR_DIRTY
790static inline void page_clear_dirty(struct page *page)
791{
792 page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY);
793}
794
795
796
797
798#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
799static inline int page_test_and_clear_young(struct page *page)
800{
801 unsigned long physpage = page_to_phys(page);
802 int ccode;
803
804 asm volatile(
805 " rrbe 0,%1\n"
806 " ipm %0\n"
807 " srl %0,28\n"
808 : "=d" (ccode) : "a" (physpage) : "cc" );
809 return ccode & 2;
810}
811
812
813
814
815
816static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
817{
818 pte_t __pte;
819 pte_val(__pte) = physpage + pgprot_val(pgprot);
820 return __pte;
821}
822
823static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
824{
825 unsigned long physpage = page_to_phys(page);
826
827 return mk_pte_phys(physpage, pgprot);
828}
829
830#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
831#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
832#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
833#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
834
835#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
836#define pgd_offset_k(address) pgd_offset(&init_mm, address)
837
838#ifndef __s390x__
839
840#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
841#define pud_deref(pmd) ({ BUG(); 0UL; })
842#define pgd_deref(pmd) ({ BUG(); 0UL; })
843
844#define pud_offset(pgd, address) ((pud_t *) pgd)
845#define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
846
847#else
848
849#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
850#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
851#define pgd_deref(pgd) ({ BUG(); 0UL; })
852
853#define pud_offset(pgd, address) ((pud_t *) pgd)
854
855static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
856{
857 pmd_t *pmd = (pmd_t *) pud_deref(*pud);
858 return pmd + pmd_index(address);
859}
860
861#endif
862
863#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
864#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
865#define pte_page(x) pfn_to_page(pte_pfn(x))
866
867#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
868
869
870#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
871#define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
872#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
873#define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address)
874#define pte_unmap(pte) do { } while (0)
875#define pte_unmap_nested(pte) do { } while (0)
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912#ifndef __s390x__
913#define __SWP_OFFSET_MASK (~0UL >> 12)
914#else
915#define __SWP_OFFSET_MASK (~0UL >> 11)
916#endif
917static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
918{
919 pte_t pte;
920 offset &= __SWP_OFFSET_MASK;
921 pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) |
922 ((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
923 return pte;
924}
925
926#define __swp_type(entry) (((entry).val >> 2) & 0x1f)
927#define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1))
928#define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
929
930#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
931#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
932
933#ifndef __s390x__
934# define PTE_FILE_MAX_BITS 26
935#else
936# define PTE_FILE_MAX_BITS 59
937#endif
938
939#define pte_to_pgoff(__pte) \
940 ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))
941
942#define pgoff_to_pte(__off) \
943 ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
944 | _PAGE_TYPE_FILE })
945
946#endif
947
948#define kern_addr_valid(addr) (1)
949
950extern int add_shared_memory(unsigned long start, unsigned long size);
951extern int remove_shared_memory(unsigned long start, unsigned long size);
952
953
954
955
956#define pgtable_cache_init() do { } while (0)
957
958#define __HAVE_ARCH_MEMMAP_INIT
959extern void memmap_init(unsigned long, int, unsigned long, unsigned long);
960
961#include <asm-generic/pgtable.h>
962
963#endif
964