1
2
3
4
5
6
7
8
9
10
11
12
13#ifndef _ASM_S390_PGTABLE_H
14#define _ASM_S390_PGTABLE_H
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#ifndef __ASSEMBLY__
32#include <linux/sched.h>
33#include <linux/mm_types.h>
34#include <asm/bitops.h>
35#include <asm/bug.h>
36#include <asm/processor.h>
37
38extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
39extern void paging_init(void);
40extern void vmem_map_init(void);
41extern void fault_init(void);
42
43
44
45
46
47#define update_mmu_cache(vma, address, ptep) do { } while (0)
48
49
50
51
52
53
54extern unsigned long empty_zero_page;
55extern unsigned long zero_page_mask;
56
57#define ZERO_PAGE(vaddr) \
58 (virt_to_page((void *)(empty_zero_page + \
59 (((unsigned long)(vaddr)) &zero_page_mask))))
60
61#define is_zero_pfn is_zero_pfn
62static inline int is_zero_pfn(unsigned long pfn)
63{
64 extern unsigned long zero_pfn;
65 unsigned long offset_from_zero_pfn = pfn - zero_pfn;
66 return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
67}
68
69#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
70
71#endif
72
73
74
75
76
77
78#ifndef __s390x__
79# define PMD_SHIFT 20
80# define PUD_SHIFT 20
81# define PGDIR_SHIFT 20
82#else
83# define PMD_SHIFT 20
84# define PUD_SHIFT 31
85# define PGDIR_SHIFT 42
86#endif
87
88#define PMD_SIZE (1UL << PMD_SHIFT)
89#define PMD_MASK (~(PMD_SIZE-1))
90#define PUD_SIZE (1UL << PUD_SHIFT)
91#define PUD_MASK (~(PUD_SIZE-1))
92#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
93#define PGDIR_MASK (~(PGDIR_SIZE-1))
94
95
96
97
98
99
100
101#define PTRS_PER_PTE 256
102#ifndef __s390x__
103#define PTRS_PER_PMD 1
104#define PTRS_PER_PUD 1
105#else
106#define PTRS_PER_PMD 2048
107#define PTRS_PER_PUD 2048
108#endif
109#define PTRS_PER_PGD 2048
110
111#define FIRST_USER_ADDRESS 0
112
113#define pte_ERROR(e) \
114 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
115#define pmd_ERROR(e) \
116 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
117#define pud_ERROR(e) \
118 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
119#define pgd_ERROR(e) \
120 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
121
122#ifndef __ASSEMBLY__
123
124
125
126
127
128
129
130
131
132
133extern unsigned long VMALLOC_START;
134
135#ifndef __s390x__
136#define VMALLOC_SIZE (96UL << 20)
137#define VMALLOC_END 0x7e000000UL
138#define VMEM_MAP_END 0x80000000UL
139#else
140#define VMALLOC_SIZE (128UL << 30)
141#define VMALLOC_END 0x3e000000000UL
142#define VMEM_MAP_END 0x40000000000UL
143#endif
144
145
146
147
148
149
150#define VMEM_MAX_PAGES ((VMEM_MAP_END - VMALLOC_END) / sizeof(struct page))
151#define VMEM_MAX_PFN min(VMALLOC_START >> PAGE_SHIFT, VMEM_MAX_PAGES)
152#define VMEM_MAX_PHYS ((VMEM_MAX_PFN << PAGE_SHIFT) & ~((16 << 20) - 1))
153#define vmemmap ((struct page *) VMALLOC_END)
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239#define _PAGE_CO 0x100
240#define _PAGE_RO 0x200
241#define _PAGE_INVALID 0x400
242
243
244#define _PAGE_SWT 0x001
245#define _PAGE_SWX 0x002
246#define _PAGE_SPECIAL 0x004
247#define __HAVE_ARCH_PTE_SPECIAL
248
249
250#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL)
251
252
253#define _PAGE_TYPE_EMPTY 0x400
254#define _PAGE_TYPE_NONE 0x401
255#define _PAGE_TYPE_SWAP 0x403
256#define _PAGE_TYPE_FILE 0x601
257#define _PAGE_TYPE_RO 0x200
258#define _PAGE_TYPE_RW 0x000
259#define _PAGE_TYPE_EX_RO 0x202
260#define _PAGE_TYPE_EX_RW 0x002
261
262
263
264
265
266#define _HPAGE_TYPE_EMPTY 0x020
267#define _HPAGE_TYPE_NONE 0x220
268#define _HPAGE_TYPE_RO 0x200
269#define _HPAGE_TYPE_RW 0x000
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300#define RCP_PCL_BIT 55
301#define RCP_HR_BIT 54
302#define RCP_HC_BIT 53
303#define RCP_GR_BIT 50
304#define RCP_GC_BIT 49
305
306
307#define KVM_UD_BIT 47
308
309#ifndef __s390x__
310
311
312#define _ASCE_SPACE_SWITCH 0x80000000UL
313#define _ASCE_ORIGIN_MASK 0x7ffff000UL
314#define _ASCE_PRIVATE_SPACE 0x100
315#define _ASCE_ALT_EVENT 0x80
316#define _ASCE_TABLE_LENGTH 0x7f
317
318
319#define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL
320#define _SEGMENT_ENTRY_RO 0x200
321#define _SEGMENT_ENTRY_INV 0x20
322#define _SEGMENT_ENTRY_COMMON 0x10
323#define _SEGMENT_ENTRY_PTL 0x0f
324
325#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
326#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
327
328#else
329
330
331#define _ASCE_ORIGIN ~0xfffUL
332#define _ASCE_PRIVATE_SPACE 0x100
333#define _ASCE_ALT_EVENT 0x80
334#define _ASCE_SPACE_SWITCH 0x40
335#define _ASCE_REAL_SPACE 0x20
336#define _ASCE_TYPE_MASK 0x0c
337#define _ASCE_TYPE_REGION1 0x0c
338#define _ASCE_TYPE_REGION2 0x08
339#define _ASCE_TYPE_REGION3 0x04
340#define _ASCE_TYPE_SEGMENT 0x00
341#define _ASCE_TABLE_LENGTH 0x03
342
343
344#define _REGION_ENTRY_ORIGIN ~0xfffUL
345#define _REGION_ENTRY_INV 0x20
346#define _REGION_ENTRY_TYPE_MASK 0x0c
347#define _REGION_ENTRY_TYPE_R1 0x0c
348#define _REGION_ENTRY_TYPE_R2 0x08
349#define _REGION_ENTRY_TYPE_R3 0x04
350#define _REGION_ENTRY_LENGTH 0x03
351
352#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
353#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV)
354#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
355#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV)
356#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
357#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV)
358
359
360#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL
361#define _SEGMENT_ENTRY_RO 0x200
362#define _SEGMENT_ENTRY_INV 0x20
363
364#define _SEGMENT_ENTRY (0)
365#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
366
367#define _SEGMENT_ENTRY_LARGE 0x400
368#define _SEGMENT_ENTRY_CO 0x100
369
370#endif
371
372
373
374
375
376
377#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
378 _ASCE_ALT_EVENT)
379
380
381#define _PAGE_CHANGED 0x02
382#define _PAGE_REFERENCED 0x04
383
384
385
386
387#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
388#define PAGE_RO __pgprot(_PAGE_TYPE_RO)
389#define PAGE_RW __pgprot(_PAGE_TYPE_RW)
390#define PAGE_EX_RO __pgprot(_PAGE_TYPE_EX_RO)
391#define PAGE_EX_RW __pgprot(_PAGE_TYPE_EX_RW)
392
393#define PAGE_KERNEL PAGE_RW
394#define PAGE_COPY PAGE_RO
395
396
397
398
399
400
401
402
403
404
405#define __P000 PAGE_NONE
406#define __P001 PAGE_RO
407#define __P010 PAGE_RO
408#define __P011 PAGE_RO
409#define __P100 PAGE_EX_RO
410#define __P101 PAGE_EX_RO
411#define __P110 PAGE_EX_RO
412#define __P111 PAGE_EX_RO
413
414#define __S000 PAGE_NONE
415#define __S001 PAGE_RO
416#define __S010 PAGE_RW
417#define __S011 PAGE_RW
418#define __S100 PAGE_EX_RO
419#define __S101 PAGE_EX_RO
420#define __S110 PAGE_EX_RW
421#define __S111 PAGE_EX_RW
422
423#ifndef __s390x__
424# define PxD_SHADOW_SHIFT 1
425#else
426# define PxD_SHADOW_SHIFT 2
427#endif
428
429static inline void *get_shadow_table(void *table)
430{
431 unsigned long addr, offset;
432 struct page *page;
433
434 addr = (unsigned long) table;
435 offset = addr & ((PAGE_SIZE << PxD_SHADOW_SHIFT) - 1);
436 page = virt_to_page((void *)(addr ^ offset));
437 return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL);
438}
439
440
441
442
443
444
445static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
446 pte_t *ptep, pte_t entry)
447{
448 *ptep = entry;
449 if (mm->context.noexec) {
450 if (!(pte_val(entry) & _PAGE_INVALID) &&
451 (pte_val(entry) & _PAGE_SWX))
452 pte_val(entry) |= _PAGE_RO;
453 else
454 pte_val(entry) = _PAGE_TYPE_EMPTY;
455 ptep[PTRS_PER_PTE] = entry;
456 }
457}
458
459
460
461
462#ifndef __s390x__
463
464static inline int pgd_present(pgd_t pgd) { return 1; }
465static inline int pgd_none(pgd_t pgd) { return 0; }
466static inline int pgd_bad(pgd_t pgd) { return 0; }
467
468static inline int pud_present(pud_t pud) { return 1; }
469static inline int pud_none(pud_t pud) { return 0; }
470static inline int pud_bad(pud_t pud) { return 0; }
471
472#else
473
474static inline int pgd_present(pgd_t pgd)
475{
476 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
477 return 1;
478 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
479}
480
481static inline int pgd_none(pgd_t pgd)
482{
483 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
484 return 0;
485 return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL;
486}
487
488static inline int pgd_bad(pgd_t pgd)
489{
490
491
492
493
494
495 unsigned long mask =
496 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
497 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
498 return (pgd_val(pgd) & mask) != 0;
499}
500
501static inline int pud_present(pud_t pud)
502{
503 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
504 return 1;
505 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
506}
507
508static inline int pud_none(pud_t pud)
509{
510 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
511 return 0;
512 return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL;
513}
514
515static inline int pud_bad(pud_t pud)
516{
517
518
519
520
521
522 unsigned long mask =
523 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
524 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
525 return (pud_val(pud) & mask) != 0;
526}
527
528#endif
529
530static inline int pmd_present(pmd_t pmd)
531{
532 return (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) != 0UL;
533}
534
535static inline int pmd_none(pmd_t pmd)
536{
537 return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL;
538}
539
540static inline int pmd_bad(pmd_t pmd)
541{
542 unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV;
543 return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY;
544}
545
546static inline int pte_none(pte_t pte)
547{
548 return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT);
549}
550
551static inline int pte_present(pte_t pte)
552{
553 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX;
554 return (pte_val(pte) & mask) == _PAGE_TYPE_NONE ||
555 (!(pte_val(pte) & _PAGE_INVALID) &&
556 !(pte_val(pte) & _PAGE_SWT));
557}
558
559static inline int pte_file(pte_t pte)
560{
561 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT;
562 return (pte_val(pte) & mask) == _PAGE_TYPE_FILE;
563}
564
565static inline int pte_special(pte_t pte)
566{
567 return (pte_val(pte) & _PAGE_SPECIAL);
568}
569
570#define __HAVE_ARCH_PTE_SAME
571#define pte_same(a,b) (pte_val(a) == pte_val(b))
572
573static inline void rcp_lock(pte_t *ptep)
574{
575#ifdef CONFIG_PGSTE
576 unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
577 preempt_disable();
578 while (test_and_set_bit(RCP_PCL_BIT, pgste))
579 ;
580#endif
581}
582
583static inline void rcp_unlock(pte_t *ptep)
584{
585#ifdef CONFIG_PGSTE
586 unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
587 clear_bit(RCP_PCL_BIT, pgste);
588 preempt_enable();
589#endif
590}
591
592
593static inline void page_clear_dirty(struct page *page, int mapped);
594#include <linux/page-flags.h>
595
596static inline void ptep_rcp_copy(pte_t *ptep)
597{
598#ifdef CONFIG_PGSTE
599 struct page *page = virt_to_page(pte_val(*ptep));
600 unsigned int skey;
601 unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
602
603 skey = page_get_storage_key(page_to_phys(page));
604 if (skey & _PAGE_CHANGED) {
605 set_bit_simple(RCP_GC_BIT, pgste);
606 set_bit_simple(KVM_UD_BIT, pgste);
607 }
608 if (skey & _PAGE_REFERENCED)
609 set_bit_simple(RCP_GR_BIT, pgste);
610 if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) {
611 SetPageDirty(page);
612 set_bit_simple(KVM_UD_BIT, pgste);
613 }
614 if (test_and_clear_bit_simple(RCP_HR_BIT, pgste))
615 SetPageReferenced(page);
616#endif
617}
618
619
620
621
622
623static inline int pte_write(pte_t pte)
624{
625 return (pte_val(pte) & _PAGE_RO) == 0;
626}
627
628static inline int pte_dirty(pte_t pte)
629{
630
631
632
633
634 return 0;
635}
636
637static inline int pte_young(pte_t pte)
638{
639
640
641
642
643 return 0;
644}
645
646
647
648
649
650#ifndef __s390x__
651
652#define pgd_clear(pgd) do { } while (0)
653#define pud_clear(pud) do { } while (0)
654
655#else
656
657static inline void pgd_clear_kernel(pgd_t * pgd)
658{
659 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
660 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
661}
662
663static inline void pgd_clear(pgd_t * pgd)
664{
665 pgd_t *shadow = get_shadow_table(pgd);
666
667 pgd_clear_kernel(pgd);
668 if (shadow)
669 pgd_clear_kernel(shadow);
670}
671
672static inline void pud_clear_kernel(pud_t *pud)
673{
674 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
675 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
676}
677
678static inline void pud_clear(pud_t *pud)
679{
680 pud_t *shadow = get_shadow_table(pud);
681
682 pud_clear_kernel(pud);
683 if (shadow)
684 pud_clear_kernel(shadow);
685}
686
687#endif
688
689static inline void pmd_clear_kernel(pmd_t * pmdp)
690{
691 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
692}
693
694static inline void pmd_clear(pmd_t *pmd)
695{
696 pmd_t *shadow = get_shadow_table(pmd);
697
698 pmd_clear_kernel(pmd);
699 if (shadow)
700 pmd_clear_kernel(shadow);
701}
702
703static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
704{
705 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
706 if (mm->context.noexec)
707 pte_val(ptep[PTRS_PER_PTE]) = _PAGE_TYPE_EMPTY;
708}
709
710
711
712
713
714static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
715{
716 pte_val(pte) &= _PAGE_CHG_MASK;
717 pte_val(pte) |= pgprot_val(newprot);
718 return pte;
719}
720
721static inline pte_t pte_wrprotect(pte_t pte)
722{
723
724 if (!(pte_val(pte) & _PAGE_INVALID))
725 pte_val(pte) |= _PAGE_RO;
726 return pte;
727}
728
729static inline pte_t pte_mkwrite(pte_t pte)
730{
731 pte_val(pte) &= ~_PAGE_RO;
732 return pte;
733}
734
735static inline pte_t pte_mkclean(pte_t pte)
736{
737
738
739
740
741 return pte;
742}
743
744static inline pte_t pte_mkdirty(pte_t pte)
745{
746
747
748
749
750 return pte;
751}
752
753static inline pte_t pte_mkold(pte_t pte)
754{
755
756
757
758 return pte;
759}
760
761static inline pte_t pte_mkyoung(pte_t pte)
762{
763
764
765
766 return pte;
767}
768
769static inline pte_t pte_mkspecial(pte_t pte)
770{
771 pte_val(pte) |= _PAGE_SPECIAL;
772 return pte;
773}
774
775#ifdef CONFIG_HUGETLB_PAGE
776static inline pte_t pte_mkhuge(pte_t pte)
777{
778
779
780
781
782
783
784 if (pte_val(pte) & _PAGE_INVALID) {
785 if (pte_val(pte) & _PAGE_SWT)
786 pte_val(pte) |= _HPAGE_TYPE_NONE;
787 pte_val(pte) |= _SEGMENT_ENTRY_INV;
788 }
789
790
791
792
793 pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX);
794
795
796
797
798 pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO);
799 return pte;
800}
801#endif
802
803#ifdef CONFIG_PGSTE
804
805
806
807static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm,
808 pte_t *ptep)
809{
810 int dirty;
811 unsigned long *pgste;
812 struct page *page;
813 unsigned int skey;
814
815 if (!mm->context.has_pgste)
816 return -EINVAL;
817 rcp_lock(ptep);
818 pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
819 page = virt_to_page(pte_val(*ptep));
820 skey = page_get_storage_key(page_to_phys(page));
821 if (skey & _PAGE_CHANGED) {
822 set_bit_simple(RCP_GC_BIT, pgste);
823 set_bit_simple(KVM_UD_BIT, pgste);
824 }
825 if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) {
826 SetPageDirty(page);
827 set_bit_simple(KVM_UD_BIT, pgste);
828 }
829 dirty = test_and_clear_bit_simple(KVM_UD_BIT, pgste);
830 if (skey & _PAGE_CHANGED)
831 page_clear_dirty(page, 1);
832 rcp_unlock(ptep);
833 return dirty;
834}
835#endif
836
837#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
838static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
839 unsigned long addr, pte_t *ptep)
840{
841#ifdef CONFIG_PGSTE
842 unsigned long physpage;
843 int young;
844 unsigned long *pgste;
845
846 if (!vma->vm_mm->context.has_pgste)
847 return 0;
848 physpage = pte_val(*ptep) & PAGE_MASK;
849 pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
850
851 young = ((page_get_storage_key(physpage) & _PAGE_REFERENCED) != 0);
852 rcp_lock(ptep);
853 if (young)
854 set_bit_simple(RCP_GR_BIT, pgste);
855 young |= test_and_clear_bit_simple(RCP_HR_BIT, pgste);
856 rcp_unlock(ptep);
857 return young;
858#endif
859 return 0;
860}
861
862#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
863static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
864 unsigned long address, pte_t *ptep)
865{
866
867
868
869
870#ifdef CONFIG_PGSTE
871 return ptep_test_and_clear_young(vma, address, ptep);
872#endif
873 return 0;
874}
875
876static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
877{
878 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
879#ifndef __s390x__
880
881 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
882#else
883
884 pte_t *pto = ptep;
885#endif
886 asm volatile(
887 " ipte %2,%3"
888 : "=m" (*ptep) : "m" (*ptep),
889 "a" (pto), "a" (address));
890 }
891}
892
893static inline void ptep_invalidate(struct mm_struct *mm,
894 unsigned long address, pte_t *ptep)
895{
896 if (mm->context.has_pgste) {
897 rcp_lock(ptep);
898 __ptep_ipte(address, ptep);
899 ptep_rcp_copy(ptep);
900 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
901 rcp_unlock(ptep);
902 return;
903 }
904 __ptep_ipte(address, ptep);
905 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
906 if (mm->context.noexec) {
907 __ptep_ipte(address, ptep + PTRS_PER_PTE);
908 pte_val(*(ptep + PTRS_PER_PTE)) = _PAGE_TYPE_EMPTY;
909 }
910}
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
926#define ptep_get_and_clear(__mm, __address, __ptep) \
927({ \
928 pte_t __pte = *(__ptep); \
929 (__mm)->context.flush_mm = 1; \
930 if (atomic_read(&(__mm)->context.attach_count) > 1 || \
931 (__mm) != current->active_mm) \
932 ptep_invalidate(__mm, __address, __ptep); \
933 else \
934 pte_clear((__mm), (__address), (__ptep)); \
935 __pte; \
936})
937
938#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
939static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
940 unsigned long address, pte_t *ptep)
941{
942 pte_t pte = *ptep;
943 ptep_invalidate(vma->vm_mm, address, ptep);
944 return pte;
945}
946
947
948
949
950
951
952
953
954#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
955static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
956 unsigned long addr,
957 pte_t *ptep, int full)
958{
959 pte_t pte = *ptep;
960
961 if (full)
962 pte_clear(mm, addr, ptep);
963 else
964 ptep_invalidate(mm, addr, ptep);
965 return pte;
966}
967
968#define __HAVE_ARCH_PTEP_SET_WRPROTECT
969#define ptep_set_wrprotect(__mm, __addr, __ptep) \
970({ \
971 pte_t __pte = *(__ptep); \
972 if (pte_write(__pte)) { \
973 (__mm)->context.flush_mm = 1; \
974 if (atomic_read(&(__mm)->context.attach_count) > 1 || \
975 (__mm) != current->active_mm) \
976 ptep_invalidate(__mm, __addr, __ptep); \
977 set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \
978 } \
979})
980
981#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
982#define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
983({ \
984 int __changed = !pte_same(*(__ptep), __entry); \
985 if (__changed) { \
986 ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \
987 set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
988 } \
989 __changed; \
990})
991
992
993
994
995
996
997
998
999#define __HAVE_ARCH_PAGE_TEST_DIRTY
1000static inline int page_test_dirty(struct page *page)
1001{
1002 return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0;
1003}
1004
1005#define __HAVE_ARCH_PAGE_CLEAR_DIRTY
1006static inline void page_clear_dirty(struct page *page, int mapped)
1007{
1008 page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY, mapped);
1009}
1010
1011
1012
1013
1014#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
1015static inline int page_test_and_clear_young(struct page *page)
1016{
1017 unsigned long physpage = page_to_phys(page);
1018 int ccode;
1019
1020 asm volatile(
1021 " rrbe 0,%1\n"
1022 " ipm %0\n"
1023 " srl %0,28\n"
1024 : "=d" (ccode) : "a" (physpage) : "cc" );
1025 return ccode & 2;
1026}
1027
1028
1029
1030
1031
1032static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1033{
1034 pte_t __pte;
1035 pte_val(__pte) = physpage + pgprot_val(pgprot);
1036 return __pte;
1037}
1038
1039static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1040{
1041 unsigned long physpage = page_to_phys(page);
1042
1043 return mk_pte_phys(physpage, pgprot);
1044}
1045
1046#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1047#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1048#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1049#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1050
1051#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
1052#define pgd_offset_k(address) pgd_offset(&init_mm, address)
1053
1054#ifndef __s390x__
1055
1056#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1057#define pud_deref(pmd) ({ BUG(); 0UL; })
1058#define pgd_deref(pmd) ({ BUG(); 0UL; })
1059
1060#define pud_offset(pgd, address) ((pud_t *) pgd)
1061#define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
1062
1063#else
1064
1065#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1066#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1067#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1068
1069static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
1070{
1071 pud_t *pud = (pud_t *) pgd;
1072 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
1073 pud = (pud_t *) pgd_deref(*pgd);
1074 return pud + pud_index(address);
1075}
1076
1077static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1078{
1079 pmd_t *pmd = (pmd_t *) pud;
1080 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
1081 pmd = (pmd_t *) pud_deref(*pud);
1082 return pmd + pmd_index(address);
1083}
1084
1085#endif
1086
1087#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1088#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1089#define pte_page(x) pfn_to_page(pte_pfn(x))
1090
1091#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
1092
1093
1094#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1095#define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
1096#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1097#define pte_unmap(pte) do { } while (0)
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134#ifndef __s390x__
1135#define __SWP_OFFSET_MASK (~0UL >> 12)
1136#else
1137#define __SWP_OFFSET_MASK (~0UL >> 11)
1138#endif
1139static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1140{
1141 pte_t pte;
1142 offset &= __SWP_OFFSET_MASK;
1143 pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) |
1144 ((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
1145 return pte;
1146}
1147
1148#define __swp_type(entry) (((entry).val >> 2) & 0x1f)
1149#define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1))
1150#define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
1151
1152#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1153#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1154
1155#ifndef __s390x__
1156# define PTE_FILE_MAX_BITS 26
1157#else
1158# define PTE_FILE_MAX_BITS 59
1159#endif
1160
1161#define pte_to_pgoff(__pte) \
1162 ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))
1163
1164#define pgoff_to_pte(__off) \
1165 ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
1166 | _PAGE_TYPE_FILE })
1167
1168#endif
1169
1170#define kern_addr_valid(addr) (1)
1171
1172extern int vmem_add_mapping(unsigned long start, unsigned long size);
1173extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1174extern int s390_enable_sie(void);
1175
1176
1177
1178
1179#define pgtable_cache_init() do { } while (0)
1180
1181#include <asm-generic/pgtable.h>
1182
1183#endif
1184