1
2
3
4
5
6
7
8
9
10
11
12
13#ifndef _ASM_S390_PGTABLE_H
14#define _ASM_S390_PGTABLE_H
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#ifndef __ASSEMBLY__
32#include <linux/sched.h>
33#include <linux/mm_types.h>
34#include <asm/bitops.h>
35#include <asm/bug.h>
36#include <asm/processor.h>
37
38extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
39extern void paging_init(void);
40extern void vmem_map_init(void);
41
42
43
44
45
46#define update_mmu_cache(vma, address, pte) do { } while (0)
47
48
49
50
51
52extern char empty_zero_page[PAGE_SIZE];
53#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
54#endif
55
56
57
58
59
60
61#ifndef __s390x__
62# define PMD_SHIFT 20
63# define PUD_SHIFT 20
64# define PGDIR_SHIFT 20
65#else
66# define PMD_SHIFT 20
67# define PUD_SHIFT 31
68# define PGDIR_SHIFT 42
69#endif
70
71#define PMD_SIZE (1UL << PMD_SHIFT)
72#define PMD_MASK (~(PMD_SIZE-1))
73#define PUD_SIZE (1UL << PUD_SHIFT)
74#define PUD_MASK (~(PUD_SIZE-1))
75#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
76#define PGDIR_MASK (~(PGDIR_SIZE-1))
77
78
79
80
81
82
83
84#define PTRS_PER_PTE 256
85#ifndef __s390x__
86#define PTRS_PER_PMD 1
87#define PTRS_PER_PUD 1
88#else
89#define PTRS_PER_PMD 2048
90#define PTRS_PER_PUD 2048
91#endif
92#define PTRS_PER_PGD 2048
93
94#define FIRST_USER_ADDRESS 0
95
96#define pte_ERROR(e) \
97 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
98#define pmd_ERROR(e) \
99 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
100#define pud_ERROR(e) \
101 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
102#define pgd_ERROR(e) \
103 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
104
105#ifndef __ASSEMBLY__
106
107
108
109
110
111
112
113
114
115
116extern unsigned long VMALLOC_START;
117
118#ifndef __s390x__
119#define VMALLOC_SIZE (96UL << 20)
120#define VMALLOC_END 0x7e000000UL
121#define VMEM_MAP_END 0x80000000UL
122#else
123#define VMALLOC_SIZE (1UL << 30)
124#define VMALLOC_END 0x3e040000000UL
125#define VMEM_MAP_END 0x40000000000UL
126#endif
127
128
129
130
131
132
133#define VMEM_MAX_PAGES ((VMEM_MAP_END - VMALLOC_END) / sizeof(struct page))
134#define VMEM_MAX_PFN min(VMALLOC_START >> PAGE_SHIFT, VMEM_MAX_PAGES)
135#define VMEM_MAX_PHYS ((VMEM_MAX_PFN << PAGE_SHIFT) & ~((16 << 20) - 1))
136#define vmemmap ((struct page *) VMALLOC_END)
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221#define _PAGE_RO 0x200
222#define _PAGE_INVALID 0x400
223
224
225#define _PAGE_SWT 0x001
226#define _PAGE_SWX 0x002
227#define _PAGE_SPECIAL 0x004
228#define __HAVE_ARCH_PTE_SPECIAL
229
230
231#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL)
232
233
234#define _PAGE_TYPE_EMPTY 0x400
235#define _PAGE_TYPE_NONE 0x401
236#define _PAGE_TYPE_SWAP 0x403
237#define _PAGE_TYPE_FILE 0x601
238#define _PAGE_TYPE_RO 0x200
239#define _PAGE_TYPE_RW 0x000
240#define _PAGE_TYPE_EX_RO 0x202
241#define _PAGE_TYPE_EX_RW 0x002
242
243
244
245
246
247#define _HPAGE_TYPE_EMPTY 0x020
248#define _HPAGE_TYPE_NONE 0x220
249#define _HPAGE_TYPE_RO 0x200
250#define _HPAGE_TYPE_RW 0x000
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281#define RCP_PCL_BIT 55
282#define RCP_HR_BIT 54
283#define RCP_HC_BIT 53
284#define RCP_GR_BIT 50
285#define RCP_GC_BIT 49
286
287
288#define KVM_UD_BIT 47
289
290#ifndef __s390x__
291
292
293#define _ASCE_SPACE_SWITCH 0x80000000UL
294#define _ASCE_ORIGIN_MASK 0x7ffff000UL
295#define _ASCE_PRIVATE_SPACE 0x100
296#define _ASCE_ALT_EVENT 0x80
297#define _ASCE_TABLE_LENGTH 0x7f
298
299
300#define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL
301#define _SEGMENT_ENTRY_INV 0x20
302#define _SEGMENT_ENTRY_COMMON 0x10
303#define _SEGMENT_ENTRY_PTL 0x0f
304
305#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
306#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
307
308#else
309
310
311#define _ASCE_ORIGIN ~0xfffUL
312#define _ASCE_PRIVATE_SPACE 0x100
313#define _ASCE_ALT_EVENT 0x80
314#define _ASCE_SPACE_SWITCH 0x40
315#define _ASCE_REAL_SPACE 0x20
316#define _ASCE_TYPE_MASK 0x0c
317#define _ASCE_TYPE_REGION1 0x0c
318#define _ASCE_TYPE_REGION2 0x08
319#define _ASCE_TYPE_REGION3 0x04
320#define _ASCE_TYPE_SEGMENT 0x00
321#define _ASCE_TABLE_LENGTH 0x03
322
323
324#define _REGION_ENTRY_ORIGIN ~0xfffUL
325#define _REGION_ENTRY_INV 0x20
326#define _REGION_ENTRY_TYPE_MASK 0x0c
327#define _REGION_ENTRY_TYPE_R1 0x0c
328#define _REGION_ENTRY_TYPE_R2 0x08
329#define _REGION_ENTRY_TYPE_R3 0x04
330#define _REGION_ENTRY_LENGTH 0x03
331
332#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
333#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV)
334#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
335#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV)
336#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
337#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV)
338
339
340#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL
341#define _SEGMENT_ENTRY_RO 0x200
342#define _SEGMENT_ENTRY_INV 0x20
343
344#define _SEGMENT_ENTRY (0)
345#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
346
347#define _SEGMENT_ENTRY_LARGE 0x400
348#define _SEGMENT_ENTRY_CO 0x100
349
350#endif
351
352
353
354
355
356
357#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
358 _ASCE_ALT_EVENT)
359
360
361#define _PAGE_CHANGED 0x02
362#define _PAGE_REFERENCED 0x04
363
364
365
366
367#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
368#define PAGE_RO __pgprot(_PAGE_TYPE_RO)
369#define PAGE_RW __pgprot(_PAGE_TYPE_RW)
370#define PAGE_EX_RO __pgprot(_PAGE_TYPE_EX_RO)
371#define PAGE_EX_RW __pgprot(_PAGE_TYPE_EX_RW)
372
373#define PAGE_KERNEL PAGE_RW
374#define PAGE_COPY PAGE_RO
375
376
377
378
379
380
381
382
383
384
385#define __P000 PAGE_NONE
386#define __P001 PAGE_RO
387#define __P010 PAGE_RO
388#define __P011 PAGE_RO
389#define __P100 PAGE_EX_RO
390#define __P101 PAGE_EX_RO
391#define __P110 PAGE_EX_RO
392#define __P111 PAGE_EX_RO
393
394#define __S000 PAGE_NONE
395#define __S001 PAGE_RO
396#define __S010 PAGE_RW
397#define __S011 PAGE_RW
398#define __S100 PAGE_EX_RO
399#define __S101 PAGE_EX_RO
400#define __S110 PAGE_EX_RW
401#define __S111 PAGE_EX_RW
402
403#ifndef __s390x__
404# define PxD_SHADOW_SHIFT 1
405#else
406# define PxD_SHADOW_SHIFT 2
407#endif
408
409static inline void *get_shadow_table(void *table)
410{
411 unsigned long addr, offset;
412 struct page *page;
413
414 addr = (unsigned long) table;
415 offset = addr & ((PAGE_SIZE << PxD_SHADOW_SHIFT) - 1);
416 page = virt_to_page((void *)(addr ^ offset));
417 return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL);
418}
419
420
421
422
423
424
425static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
426 pte_t *ptep, pte_t entry)
427{
428 *ptep = entry;
429 if (mm->context.noexec) {
430 if (!(pte_val(entry) & _PAGE_INVALID) &&
431 (pte_val(entry) & _PAGE_SWX))
432 pte_val(entry) |= _PAGE_RO;
433 else
434 pte_val(entry) = _PAGE_TYPE_EMPTY;
435 ptep[PTRS_PER_PTE] = entry;
436 }
437}
438
439
440
441
442#ifndef __s390x__
443
444static inline int pgd_present(pgd_t pgd) { return 1; }
445static inline int pgd_none(pgd_t pgd) { return 0; }
446static inline int pgd_bad(pgd_t pgd) { return 0; }
447
448static inline int pud_present(pud_t pud) { return 1; }
449static inline int pud_none(pud_t pud) { return 0; }
450static inline int pud_bad(pud_t pud) { return 0; }
451
452#else
453
454static inline int pgd_present(pgd_t pgd)
455{
456 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
457 return 1;
458 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
459}
460
461static inline int pgd_none(pgd_t pgd)
462{
463 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
464 return 0;
465 return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL;
466}
467
468static inline int pgd_bad(pgd_t pgd)
469{
470
471
472
473
474
475 unsigned long mask =
476 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
477 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
478 return (pgd_val(pgd) & mask) != 0;
479}
480
481static inline int pud_present(pud_t pud)
482{
483 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
484 return 1;
485 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
486}
487
488static inline int pud_none(pud_t pud)
489{
490 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
491 return 0;
492 return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL;
493}
494
495static inline int pud_bad(pud_t pud)
496{
497
498
499
500
501
502 unsigned long mask =
503 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
504 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
505 return (pud_val(pud) & mask) != 0;
506}
507
508#endif
509
510static inline int pmd_present(pmd_t pmd)
511{
512 return (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) != 0UL;
513}
514
515static inline int pmd_none(pmd_t pmd)
516{
517 return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL;
518}
519
520static inline int pmd_bad(pmd_t pmd)
521{
522 unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV;
523 return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY;
524}
525
526static inline int pte_none(pte_t pte)
527{
528 return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT);
529}
530
531static inline int pte_present(pte_t pte)
532{
533 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX;
534 return (pte_val(pte) & mask) == _PAGE_TYPE_NONE ||
535 (!(pte_val(pte) & _PAGE_INVALID) &&
536 !(pte_val(pte) & _PAGE_SWT));
537}
538
539static inline int pte_file(pte_t pte)
540{
541 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT;
542 return (pte_val(pte) & mask) == _PAGE_TYPE_FILE;
543}
544
545static inline int pte_special(pte_t pte)
546{
547 return (pte_val(pte) & _PAGE_SPECIAL);
548}
549
550#define __HAVE_ARCH_PTE_SAME
551#define pte_same(a,b) (pte_val(a) == pte_val(b))
552
553static inline void rcp_lock(pte_t *ptep)
554{
555#ifdef CONFIG_PGSTE
556 unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
557 preempt_disable();
558 while (test_and_set_bit(RCP_PCL_BIT, pgste))
559 ;
560#endif
561}
562
563static inline void rcp_unlock(pte_t *ptep)
564{
565#ifdef CONFIG_PGSTE
566 unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
567 clear_bit(RCP_PCL_BIT, pgste);
568 preempt_enable();
569#endif
570}
571
572
573static inline void page_clear_dirty(struct page *page);
574#include <linux/page-flags.h>
575
576static inline void ptep_rcp_copy(pte_t *ptep)
577{
578#ifdef CONFIG_PGSTE
579 struct page *page = virt_to_page(pte_val(*ptep));
580 unsigned int skey;
581 unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
582
583 skey = page_get_storage_key(page_to_phys(page));
584 if (skey & _PAGE_CHANGED) {
585 set_bit_simple(RCP_GC_BIT, pgste);
586 set_bit_simple(KVM_UD_BIT, pgste);
587 }
588 if (skey & _PAGE_REFERENCED)
589 set_bit_simple(RCP_GR_BIT, pgste);
590 if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) {
591 SetPageDirty(page);
592 set_bit_simple(KVM_UD_BIT, pgste);
593 }
594 if (test_and_clear_bit_simple(RCP_HR_BIT, pgste))
595 SetPageReferenced(page);
596#endif
597}
598
599
600
601
602
603static inline int pte_write(pte_t pte)
604{
605 return (pte_val(pte) & _PAGE_RO) == 0;
606}
607
608static inline int pte_dirty(pte_t pte)
609{
610
611
612
613
614 return 0;
615}
616
617static inline int pte_young(pte_t pte)
618{
619
620
621
622
623 return 0;
624}
625
626
627
628
629
630#ifndef __s390x__
631
632#define pgd_clear(pgd) do { } while (0)
633#define pud_clear(pud) do { } while (0)
634
635#else
636
637static inline void pgd_clear_kernel(pgd_t * pgd)
638{
639 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
640 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
641}
642
643static inline void pgd_clear(pgd_t * pgd)
644{
645 pgd_t *shadow = get_shadow_table(pgd);
646
647 pgd_clear_kernel(pgd);
648 if (shadow)
649 pgd_clear_kernel(shadow);
650}
651
652static inline void pud_clear_kernel(pud_t *pud)
653{
654 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
655 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
656}
657
658static inline void pud_clear(pud_t *pud)
659{
660 pud_t *shadow = get_shadow_table(pud);
661
662 pud_clear_kernel(pud);
663 if (shadow)
664 pud_clear_kernel(shadow);
665}
666
667#endif
668
669static inline void pmd_clear_kernel(pmd_t * pmdp)
670{
671 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
672}
673
674static inline void pmd_clear(pmd_t *pmd)
675{
676 pmd_t *shadow = get_shadow_table(pmd);
677
678 pmd_clear_kernel(pmd);
679 if (shadow)
680 pmd_clear_kernel(shadow);
681}
682
683static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
684{
685 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
686 if (mm->context.noexec)
687 pte_val(ptep[PTRS_PER_PTE]) = _PAGE_TYPE_EMPTY;
688}
689
690
691
692
693
694static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
695{
696 pte_val(pte) &= _PAGE_CHG_MASK;
697 pte_val(pte) |= pgprot_val(newprot);
698 return pte;
699}
700
701static inline pte_t pte_wrprotect(pte_t pte)
702{
703
704 if (!(pte_val(pte) & _PAGE_INVALID))
705 pte_val(pte) |= _PAGE_RO;
706 return pte;
707}
708
709static inline pte_t pte_mkwrite(pte_t pte)
710{
711 pte_val(pte) &= ~_PAGE_RO;
712 return pte;
713}
714
715static inline pte_t pte_mkclean(pte_t pte)
716{
717
718
719
720
721 return pte;
722}
723
724static inline pte_t pte_mkdirty(pte_t pte)
725{
726
727
728
729
730 return pte;
731}
732
733static inline pte_t pte_mkold(pte_t pte)
734{
735
736
737
738 return pte;
739}
740
741static inline pte_t pte_mkyoung(pte_t pte)
742{
743
744
745
746 return pte;
747}
748
749static inline pte_t pte_mkspecial(pte_t pte)
750{
751 pte_val(pte) |= _PAGE_SPECIAL;
752 return pte;
753}
754
755#ifdef CONFIG_PGSTE
756
757
758
759static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm,
760 pte_t *ptep)
761{
762 int dirty;
763 unsigned long *pgste;
764 struct page *page;
765 unsigned int skey;
766
767 if (!mm->context.has_pgste)
768 return -EINVAL;
769 rcp_lock(ptep);
770 pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
771 page = virt_to_page(pte_val(*ptep));
772 skey = page_get_storage_key(page_to_phys(page));
773 if (skey & _PAGE_CHANGED) {
774 set_bit_simple(RCP_GC_BIT, pgste);
775 set_bit_simple(KVM_UD_BIT, pgste);
776 }
777 if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) {
778 SetPageDirty(page);
779 set_bit_simple(KVM_UD_BIT, pgste);
780 }
781 dirty = test_and_clear_bit_simple(KVM_UD_BIT, pgste);
782 if (skey & _PAGE_CHANGED)
783 page_clear_dirty(page);
784 rcp_unlock(ptep);
785 return dirty;
786}
787#endif
788
789#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
790static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
791 unsigned long addr, pte_t *ptep)
792{
793#ifdef CONFIG_PGSTE
794 unsigned long physpage;
795 int young;
796 unsigned long *pgste;
797
798 if (!vma->vm_mm->context.has_pgste)
799 return 0;
800 physpage = pte_val(*ptep) & PAGE_MASK;
801 pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
802
803 young = ((page_get_storage_key(physpage) & _PAGE_REFERENCED) != 0);
804 rcp_lock(ptep);
805 if (young)
806 set_bit_simple(RCP_GR_BIT, pgste);
807 young |= test_and_clear_bit_simple(RCP_HR_BIT, pgste);
808 rcp_unlock(ptep);
809 return young;
810#endif
811 return 0;
812}
813
814#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
815static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
816 unsigned long address, pte_t *ptep)
817{
818
819
820
821
822#ifdef CONFIG_PGSTE
823 return ptep_test_and_clear_young(vma, address, ptep);
824#endif
825 return 0;
826}
827
828static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
829{
830 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
831#ifndef __s390x__
832
833 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
834#else
835
836 pte_t *pto = ptep;
837#endif
838 asm volatile(
839 " ipte %2,%3"
840 : "=m" (*ptep) : "m" (*ptep),
841 "a" (pto), "a" (address));
842 }
843}
844
845static inline void ptep_invalidate(struct mm_struct *mm,
846 unsigned long address, pte_t *ptep)
847{
848 if (mm->context.has_pgste) {
849 rcp_lock(ptep);
850 __ptep_ipte(address, ptep);
851 ptep_rcp_copy(ptep);
852 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
853 rcp_unlock(ptep);
854 return;
855 }
856 __ptep_ipte(address, ptep);
857 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
858 if (mm->context.noexec) {
859 __ptep_ipte(address, ptep + PTRS_PER_PTE);
860 pte_val(*(ptep + PTRS_PER_PTE)) = _PAGE_TYPE_EMPTY;
861 }
862}
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
878#define ptep_get_and_clear(__mm, __address, __ptep) \
879({ \
880 pte_t __pte = *(__ptep); \
881 if (atomic_read(&(__mm)->mm_users) > 1 || \
882 (__mm) != current->active_mm) \
883 ptep_invalidate(__mm, __address, __ptep); \
884 else \
885 pte_clear((__mm), (__address), (__ptep)); \
886 __pte; \
887})
888
889#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
890static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
891 unsigned long address, pte_t *ptep)
892{
893 pte_t pte = *ptep;
894 ptep_invalidate(vma->vm_mm, address, ptep);
895 return pte;
896}
897
898
899
900
901
902
903
904
905#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
906static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
907 unsigned long addr,
908 pte_t *ptep, int full)
909{
910 pte_t pte = *ptep;
911
912 if (full)
913 pte_clear(mm, addr, ptep);
914 else
915 ptep_invalidate(mm, addr, ptep);
916 return pte;
917}
918
919#define __HAVE_ARCH_PTEP_SET_WRPROTECT
920#define ptep_set_wrprotect(__mm, __addr, __ptep) \
921({ \
922 pte_t __pte = *(__ptep); \
923 if (pte_write(__pte)) { \
924 if (atomic_read(&(__mm)->mm_users) > 1 || \
925 (__mm) != current->active_mm) \
926 ptep_invalidate(__mm, __addr, __ptep); \
927 set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \
928 } \
929})
930
931#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
932#define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
933({ \
934 int __changed = !pte_same(*(__ptep), __entry); \
935 if (__changed) { \
936 ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \
937 set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
938 } \
939 __changed; \
940})
941
942
943
944
945
946
947
948
949#define __HAVE_ARCH_PAGE_TEST_DIRTY
950static inline int page_test_dirty(struct page *page)
951{
952 return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0;
953}
954
955#define __HAVE_ARCH_PAGE_CLEAR_DIRTY
956static inline void page_clear_dirty(struct page *page)
957{
958 page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY);
959}
960
961
962
963
964#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
965static inline int page_test_and_clear_young(struct page *page)
966{
967 unsigned long physpage = page_to_phys(page);
968 int ccode;
969
970 asm volatile(
971 " rrbe 0,%1\n"
972 " ipm %0\n"
973 " srl %0,28\n"
974 : "=d" (ccode) : "a" (physpage) : "cc" );
975 return ccode & 2;
976}
977
978
979
980
981
982static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
983{
984 pte_t __pte;
985 pte_val(__pte) = physpage + pgprot_val(pgprot);
986 return __pte;
987}
988
989static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
990{
991 unsigned long physpage = page_to_phys(page);
992
993 return mk_pte_phys(physpage, pgprot);
994}
995
996#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
997#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
998#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
999#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1000
1001#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
1002#define pgd_offset_k(address) pgd_offset(&init_mm, address)
1003
1004#ifndef __s390x__
1005
1006#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1007#define pud_deref(pmd) ({ BUG(); 0UL; })
1008#define pgd_deref(pmd) ({ BUG(); 0UL; })
1009
1010#define pud_offset(pgd, address) ((pud_t *) pgd)
1011#define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
1012
1013#else
1014
1015#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1016#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1017#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1018
1019static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
1020{
1021 pud_t *pud = (pud_t *) pgd;
1022 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
1023 pud = (pud_t *) pgd_deref(*pgd);
1024 return pud + pud_index(address);
1025}
1026
1027static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1028{
1029 pmd_t *pmd = (pmd_t *) pud;
1030 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
1031 pmd = (pmd_t *) pud_deref(*pud);
1032 return pmd + pmd_index(address);
1033}
1034
1035#endif
1036
1037#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1038#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1039#define pte_page(x) pfn_to_page(pte_pfn(x))
1040
1041#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
1042
1043
1044#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1045#define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
1046#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1047#define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address)
1048#define pte_unmap(pte) do { } while (0)
1049#define pte_unmap_nested(pte) do { } while (0)
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086#ifndef __s390x__
1087#define __SWP_OFFSET_MASK (~0UL >> 12)
1088#else
1089#define __SWP_OFFSET_MASK (~0UL >> 11)
1090#endif
1091static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1092{
1093 pte_t pte;
1094 offset &= __SWP_OFFSET_MASK;
1095 pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) |
1096 ((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
1097 return pte;
1098}
1099
1100#define __swp_type(entry) (((entry).val >> 2) & 0x1f)
1101#define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1))
1102#define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
1103
1104#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1105#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1106
1107#ifndef __s390x__
1108# define PTE_FILE_MAX_BITS 26
1109#else
1110# define PTE_FILE_MAX_BITS 59
1111#endif
1112
1113#define pte_to_pgoff(__pte) \
1114 ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))
1115
1116#define pgoff_to_pte(__off) \
1117 ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
1118 | _PAGE_TYPE_FILE })
1119
1120#endif
1121
1122#define kern_addr_valid(addr) (1)
1123
1124extern int vmem_add_mapping(unsigned long start, unsigned long size);
1125extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1126extern int s390_enable_sie(void);
1127
1128
1129
1130
1131#define pgtable_cache_init() do { } while (0)
1132
1133#include <asm-generic/pgtable.h>
1134
1135#endif
1136