1
2
3
4
5
6
7
8
9
10
11#ifndef _ASM_S390_PGTABLE_H
12#define _ASM_S390_PGTABLE_H
13
14
15
16
17
18
19
20
21
22
23
24
25
26#ifndef __ASSEMBLY__
27#include <linux/sched.h>
28#include <linux/mm_types.h>
29#include <linux/page-flags.h>
30#include <linux/radix-tree.h>
31#include <asm/bug.h>
32#include <asm/page.h>
33
34extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
35extern void paging_init(void);
36extern void vmem_map_init(void);
37
38
39
40
41
42#define update_mmu_cache(vma, address, ptep) do { } while (0)
43#define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
44
45
46
47
48
49
50extern unsigned long empty_zero_page;
51extern unsigned long zero_page_mask;
52
53#define ZERO_PAGE(vaddr) \
54 (virt_to_page((void *)(empty_zero_page + \
55 (((unsigned long)(vaddr)) &zero_page_mask))))
56#define __HAVE_COLOR_ZERO_PAGE
57
58
59#endif
60
61
62
63
64
65
66#define PMD_SHIFT 20
67#define PUD_SHIFT 31
68#define PGDIR_SHIFT 42
69
70#define PMD_SIZE (1UL << PMD_SHIFT)
71#define PMD_MASK (~(PMD_SIZE-1))
72#define PUD_SIZE (1UL << PUD_SHIFT)
73#define PUD_MASK (~(PUD_SIZE-1))
74#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
75#define PGDIR_MASK (~(PGDIR_SIZE-1))
76
77
78
79
80
81
82
83#define PTRS_PER_PTE 256
84#define PTRS_PER_PMD 2048
85#define PTRS_PER_PUD 2048
86#define PTRS_PER_PGD 2048
87
88#define FIRST_USER_ADDRESS 0UL
89
90#define pte_ERROR(e) \
91 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
92#define pmd_ERROR(e) \
93 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
94#define pud_ERROR(e) \
95 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
96#define pgd_ERROR(e) \
97 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
98
99#ifndef __ASSEMBLY__
100
101
102
103
104
105
106
107
108extern unsigned long VMALLOC_START;
109extern unsigned long VMALLOC_END;
110extern struct page *vmemmap;
111
112#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
113
114extern unsigned long MODULES_VADDR;
115extern unsigned long MODULES_END;
116#define MODULES_VADDR MODULES_VADDR
117#define MODULES_END MODULES_END
118#define MODULES_LEN (1UL << 31)
119
120static inline int is_module_addr(void *addr)
121{
122 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
123 if (addr < (void *)MODULES_VADDR)
124 return 0;
125 if (addr > (void *)MODULES_END)
126 return 0;
127 return 1;
128}
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182#define _PAGE_PROTECT 0x200
183#define _PAGE_INVALID 0x400
184#define _PAGE_LARGE 0x800
185
186
187#define _PAGE_PRESENT 0x001
188#define _PAGE_YOUNG 0x004
189#define _PAGE_DIRTY 0x008
190#define _PAGE_READ 0x010
191#define _PAGE_WRITE 0x020
192#define _PAGE_SPECIAL 0x040
193#define _PAGE_UNUSED 0x080
194#define __HAVE_ARCH_PTE_SPECIAL
195
196#ifdef CONFIG_MEM_SOFT_DIRTY
197#define _PAGE_SOFT_DIRTY 0x002
198#else
199#define _PAGE_SOFT_DIRTY 0x000
200#endif
201
202
203#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
204 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244#define _ASCE_ORIGIN ~0xfffUL
245#define _ASCE_PRIVATE_SPACE 0x100
246#define _ASCE_ALT_EVENT 0x80
247#define _ASCE_SPACE_SWITCH 0x40
248#define _ASCE_REAL_SPACE 0x20
249#define _ASCE_TYPE_MASK 0x0c
250#define _ASCE_TYPE_REGION1 0x0c
251#define _ASCE_TYPE_REGION2 0x08
252#define _ASCE_TYPE_REGION3 0x04
253#define _ASCE_TYPE_SEGMENT 0x00
254#define _ASCE_TABLE_LENGTH 0x03
255
256
257#define _REGION_ENTRY_ORIGIN ~0xfffUL
258#define _REGION_ENTRY_PROTECT 0x200
259#define _REGION_ENTRY_INVALID 0x20
260#define _REGION_ENTRY_TYPE_MASK 0x0c
261#define _REGION_ENTRY_TYPE_R1 0x0c
262#define _REGION_ENTRY_TYPE_R2 0x08
263#define _REGION_ENTRY_TYPE_R3 0x04
264#define _REGION_ENTRY_LENGTH 0x03
265
266#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
267#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
268#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
269#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
270#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
271#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
272
273#define _REGION3_ENTRY_LARGE 0x400
274#define _REGION3_ENTRY_RO 0x200
275
276
277#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
278#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
279#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL
280#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL
281#define _SEGMENT_ENTRY_PROTECT 0x200
282#define _SEGMENT_ENTRY_INVALID 0x20
283
284#define _SEGMENT_ENTRY (0)
285#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
286
287#define _SEGMENT_ENTRY_DIRTY 0x2000
288#define _SEGMENT_ENTRY_YOUNG 0x1000
289#define _SEGMENT_ENTRY_LARGE 0x0400
290#define _SEGMENT_ENTRY_READ 0x0002
291#define _SEGMENT_ENTRY_WRITE 0x0001
292
293#ifdef CONFIG_MEM_SOFT_DIRTY
294#define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000
295#else
296#define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000
297#endif
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321#define PGSTE_ACC_BITS 0xf000000000000000UL
322#define PGSTE_FP_BIT 0x0800000000000000UL
323#define PGSTE_PCL_BIT 0x0080000000000000UL
324#define PGSTE_HR_BIT 0x0040000000000000UL
325#define PGSTE_HC_BIT 0x0020000000000000UL
326#define PGSTE_GR_BIT 0x0004000000000000UL
327#define PGSTE_GC_BIT 0x0002000000000000UL
328#define PGSTE_UC_BIT 0x0000800000000000UL
329#define PGSTE_IN_BIT 0x0000400000000000UL
330
331
332#define _PGSTE_GPS_ZERO 0x0000000080000000UL
333#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
334#define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
335#define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
336
337
338
339
340
341
342#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
343 _ASCE_ALT_EVENT)
344
345
346
347
348#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID)
349#define PAGE_READ __pgprot(_PAGE_PRESENT | _PAGE_READ | \
350 _PAGE_INVALID | _PAGE_PROTECT)
351#define PAGE_WRITE __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
352 _PAGE_INVALID | _PAGE_PROTECT)
353
354#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
355 _PAGE_YOUNG | _PAGE_DIRTY)
356#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
357 _PAGE_YOUNG | _PAGE_DIRTY)
358#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
359 _PAGE_PROTECT)
360
361
362
363
364
365
366
367#define __P000 PAGE_NONE
368#define __P001 PAGE_READ
369#define __P010 PAGE_READ
370#define __P011 PAGE_READ
371#define __P100 PAGE_READ
372#define __P101 PAGE_READ
373#define __P110 PAGE_READ
374#define __P111 PAGE_READ
375
376#define __S000 PAGE_NONE
377#define __S001 PAGE_READ
378#define __S010 PAGE_WRITE
379#define __S011 PAGE_WRITE
380#define __S100 PAGE_READ
381#define __S101 PAGE_READ
382#define __S110 PAGE_WRITE
383#define __S111 PAGE_WRITE
384
385
386
387
388#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
389 _SEGMENT_ENTRY_PROTECT)
390#define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_PROTECT | \
391 _SEGMENT_ENTRY_READ)
392#define SEGMENT_WRITE __pgprot(_SEGMENT_ENTRY_READ | \
393 _SEGMENT_ENTRY_WRITE)
394
395static inline int mm_has_pgste(struct mm_struct *mm)
396{
397#ifdef CONFIG_PGSTE
398 if (unlikely(mm->context.has_pgste))
399 return 1;
400#endif
401 return 0;
402}
403
404static inline int mm_alloc_pgste(struct mm_struct *mm)
405{
406#ifdef CONFIG_PGSTE
407 if (unlikely(mm->context.alloc_pgste))
408 return 1;
409#endif
410 return 0;
411}
412
413
414
415
416
417#define mm_forbids_zeropage mm_use_skey
418static inline int mm_use_skey(struct mm_struct *mm)
419{
420#ifdef CONFIG_PGSTE
421 if (mm->context.use_skey)
422 return 1;
423#endif
424 return 0;
425}
426
427
428
429
430static inline int pgd_present(pgd_t pgd)
431{
432 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
433 return 1;
434 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
435}
436
437static inline int pgd_none(pgd_t pgd)
438{
439 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
440 return 0;
441 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
442}
443
444static inline int pgd_bad(pgd_t pgd)
445{
446
447
448
449
450
451 unsigned long mask =
452 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
453 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
454 return (pgd_val(pgd) & mask) != 0;
455}
456
457static inline int pud_present(pud_t pud)
458{
459 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
460 return 1;
461 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
462}
463
464static inline int pud_none(pud_t pud)
465{
466 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
467 return 0;
468 return (pud_val(pud) & _REGION_ENTRY_INVALID) != 0UL;
469}
470
471static inline int pud_large(pud_t pud)
472{
473 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
474 return 0;
475 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
476}
477
478static inline int pud_bad(pud_t pud)
479{
480
481
482
483
484
485 unsigned long mask =
486 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
487 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
488 return (pud_val(pud) & mask) != 0;
489}
490
491static inline int pmd_present(pmd_t pmd)
492{
493 return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID;
494}
495
496static inline int pmd_none(pmd_t pmd)
497{
498 return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID;
499}
500
501static inline int pmd_large(pmd_t pmd)
502{
503 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
504}
505
506static inline unsigned long pmd_pfn(pmd_t pmd)
507{
508 unsigned long origin_mask;
509
510 origin_mask = _SEGMENT_ENTRY_ORIGIN;
511 if (pmd_large(pmd))
512 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
513 return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
514}
515
516static inline int pmd_bad(pmd_t pmd)
517{
518 if (pmd_large(pmd))
519 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
520 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
521}
522
523#define __HAVE_ARCH_PMD_WRITE
524static inline int pmd_write(pmd_t pmd)
525{
526 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
527}
528
529static inline int pmd_dirty(pmd_t pmd)
530{
531 int dirty = 1;
532 if (pmd_large(pmd))
533 dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
534 return dirty;
535}
536
537static inline int pmd_young(pmd_t pmd)
538{
539 int young = 1;
540 if (pmd_large(pmd))
541 young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
542 return young;
543}
544
545static inline int pte_present(pte_t pte)
546{
547
548 return (pte_val(pte) & _PAGE_PRESENT) != 0;
549}
550
551static inline int pte_none(pte_t pte)
552{
553
554 return pte_val(pte) == _PAGE_INVALID;
555}
556
557static inline int pte_swap(pte_t pte)
558{
559
560 return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
561 == _PAGE_PROTECT;
562}
563
564static inline int pte_special(pte_t pte)
565{
566 return (pte_val(pte) & _PAGE_SPECIAL);
567}
568
569#define __HAVE_ARCH_PTE_SAME
570static inline int pte_same(pte_t a, pte_t b)
571{
572 return pte_val(a) == pte_val(b);
573}
574
575#ifdef CONFIG_NUMA_BALANCING
576static inline int pte_protnone(pte_t pte)
577{
578 return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
579}
580
581static inline int pmd_protnone(pmd_t pmd)
582{
583
584 return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
585}
586#endif
587
588static inline int pte_soft_dirty(pte_t pte)
589{
590 return pte_val(pte) & _PAGE_SOFT_DIRTY;
591}
592#define pte_swp_soft_dirty pte_soft_dirty
593
594static inline pte_t pte_mksoft_dirty(pte_t pte)
595{
596 pte_val(pte) |= _PAGE_SOFT_DIRTY;
597 return pte;
598}
599#define pte_swp_mksoft_dirty pte_mksoft_dirty
600
601static inline pte_t pte_clear_soft_dirty(pte_t pte)
602{
603 pte_val(pte) &= ~_PAGE_SOFT_DIRTY;
604 return pte;
605}
606#define pte_swp_clear_soft_dirty pte_clear_soft_dirty
607
608static inline int pmd_soft_dirty(pmd_t pmd)
609{
610 return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
611}
612
613static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
614{
615 pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY;
616 return pmd;
617}
618
619static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
620{
621 pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY;
622 return pmd;
623}
624
625
626
627
628
629static inline int pte_write(pte_t pte)
630{
631 return (pte_val(pte) & _PAGE_WRITE) != 0;
632}
633
634static inline int pte_dirty(pte_t pte)
635{
636 return (pte_val(pte) & _PAGE_DIRTY) != 0;
637}
638
639static inline int pte_young(pte_t pte)
640{
641 return (pte_val(pte) & _PAGE_YOUNG) != 0;
642}
643
644#define __HAVE_ARCH_PTE_UNUSED
645static inline int pte_unused(pte_t pte)
646{
647 return pte_val(pte) & _PAGE_UNUSED;
648}
649
650
651
652
653
654static inline void pgd_clear(pgd_t *pgd)
655{
656 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
657 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
658}
659
660static inline void pud_clear(pud_t *pud)
661{
662 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
663 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
664}
665
666static inline void pmd_clear(pmd_t *pmdp)
667{
668 pmd_val(*pmdp) = _SEGMENT_ENTRY_INVALID;
669}
670
671static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
672{
673 pte_val(*ptep) = _PAGE_INVALID;
674}
675
676
677
678
679
680static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
681{
682 pte_val(pte) &= _PAGE_CHG_MASK;
683 pte_val(pte) |= pgprot_val(newprot);
684
685
686
687
688 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
689 pte_val(pte) &= ~_PAGE_INVALID;
690
691
692
693
694 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
695 pte_val(pte) &= ~_PAGE_PROTECT;
696 return pte;
697}
698
699static inline pte_t pte_wrprotect(pte_t pte)
700{
701 pte_val(pte) &= ~_PAGE_WRITE;
702 pte_val(pte) |= _PAGE_PROTECT;
703 return pte;
704}
705
706static inline pte_t pte_mkwrite(pte_t pte)
707{
708 pte_val(pte) |= _PAGE_WRITE;
709 if (pte_val(pte) & _PAGE_DIRTY)
710 pte_val(pte) &= ~_PAGE_PROTECT;
711 return pte;
712}
713
714static inline pte_t pte_mkclean(pte_t pte)
715{
716 pte_val(pte) &= ~_PAGE_DIRTY;
717 pte_val(pte) |= _PAGE_PROTECT;
718 return pte;
719}
720
721static inline pte_t pte_mkdirty(pte_t pte)
722{
723 pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY;
724 if (pte_val(pte) & _PAGE_WRITE)
725 pte_val(pte) &= ~_PAGE_PROTECT;
726 return pte;
727}
728
729static inline pte_t pte_mkold(pte_t pte)
730{
731 pte_val(pte) &= ~_PAGE_YOUNG;
732 pte_val(pte) |= _PAGE_INVALID;
733 return pte;
734}
735
736static inline pte_t pte_mkyoung(pte_t pte)
737{
738 pte_val(pte) |= _PAGE_YOUNG;
739 if (pte_val(pte) & _PAGE_READ)
740 pte_val(pte) &= ~_PAGE_INVALID;
741 return pte;
742}
743
744static inline pte_t pte_mkspecial(pte_t pte)
745{
746 pte_val(pte) |= _PAGE_SPECIAL;
747 return pte;
748}
749
750#ifdef CONFIG_HUGETLB_PAGE
751static inline pte_t pte_mkhuge(pte_t pte)
752{
753 pte_val(pte) |= _PAGE_LARGE;
754 return pte;
755}
756#endif
757
758static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
759{
760 unsigned long pto = (unsigned long) ptep;
761
762
763 asm volatile(
764 " ipte %2,%3"
765 : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
766}
767
768static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep)
769{
770 unsigned long pto = (unsigned long) ptep;
771
772
773 asm volatile(
774 " .insn rrf,0xb2210000,%2,%3,0,1"
775 : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
776}
777
778static inline void __ptep_ipte_range(unsigned long address, int nr, pte_t *ptep)
779{
780 unsigned long pto = (unsigned long) ptep;
781
782
783 do {
784 asm volatile(
785 " .insn rrf,0xb2210000,%2,%0,%1,0"
786 : "+a" (address), "+a" (nr) : "a" (pto) : "memory");
787 } while (nr != 255);
788}
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
804pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
805
806#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
807static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
808 unsigned long addr, pte_t *ptep)
809{
810 pte_t pte = *ptep;
811
812 pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
813 return pte_young(pte);
814}
815
816#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
817static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
818 unsigned long address, pte_t *ptep)
819{
820 return ptep_test_and_clear_young(vma, address, ptep);
821}
822
823#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
824static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
825 unsigned long addr, pte_t *ptep)
826{
827 return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
828}
829
830#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
831pte_t ptep_modify_prot_start(struct mm_struct *, unsigned long, pte_t *);
832void ptep_modify_prot_commit(struct mm_struct *, unsigned long, pte_t *, pte_t);
833
834#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
835static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
836 unsigned long addr, pte_t *ptep)
837{
838 return ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
839}
840
841
842
843
844
845
846
847
848#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
849static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
850 unsigned long addr,
851 pte_t *ptep, int full)
852{
853 if (full) {
854 pte_t pte = *ptep;
855 *ptep = __pte(_PAGE_INVALID);
856 return pte;
857 }
858 return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
859}
860
861#define __HAVE_ARCH_PTEP_SET_WRPROTECT
862static inline void ptep_set_wrprotect(struct mm_struct *mm,
863 unsigned long addr, pte_t *ptep)
864{
865 pte_t pte = *ptep;
866
867 if (pte_write(pte))
868 ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
869}
870
871#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
872static inline int ptep_set_access_flags(struct vm_area_struct *vma,
873 unsigned long addr, pte_t *ptep,
874 pte_t entry, int dirty)
875{
876 if (pte_same(*ptep, entry))
877 return 0;
878 ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
879 return 1;
880}
881
882
883
884
885void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
886 pte_t *ptep, pte_t entry);
887void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
888void ptep_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
889void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
890 pte_t *ptep , int reset);
891void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
892
893bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long address);
894int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
895 unsigned char key, bool nq);
896unsigned char get_guest_storage_key(struct mm_struct *mm, unsigned long addr);
897
898
899
900
901
902
903static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
904 pte_t *ptep, pte_t entry)
905{
906 if (mm_has_pgste(mm))
907 ptep_set_pte_at(mm, addr, ptep, entry);
908 else
909 *ptep = entry;
910}
911
912
913
914
915
916static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
917{
918 pte_t __pte;
919 pte_val(__pte) = physpage + pgprot_val(pgprot);
920 return pte_mkyoung(__pte);
921}
922
923static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
924{
925 unsigned long physpage = page_to_phys(page);
926 pte_t __pte = mk_pte_phys(physpage, pgprot);
927
928 if (pte_write(__pte) && PageDirty(page))
929 __pte = pte_mkdirty(__pte);
930 return __pte;
931}
932
933#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
934#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
935#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
936#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
937
938#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
939#define pgd_offset_k(address) pgd_offset(&init_mm, address)
940
941#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
942#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
943#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
944
945static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
946{
947 pud_t *pud = (pud_t *) pgd;
948 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
949 pud = (pud_t *) pgd_deref(*pgd);
950 return pud + pud_index(address);
951}
952
953static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
954{
955 pmd_t *pmd = (pmd_t *) pud;
956 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
957 pmd = (pmd_t *) pud_deref(*pud);
958 return pmd + pmd_index(address);
959}
960
961#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
962#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
963#define pte_page(x) pfn_to_page(pte_pfn(x))
964
965#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
966
967
968#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
969#define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
970#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
971#define pte_unmap(pte) do { } while (0)
972
973#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
974static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
975{
976
977
978
979
980 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
981 return pgprot_val(SEGMENT_NONE);
982 if (pgprot_val(pgprot) == pgprot_val(PAGE_READ))
983 return pgprot_val(SEGMENT_READ);
984 return pgprot_val(SEGMENT_WRITE);
985}
986
987static inline pmd_t pmd_wrprotect(pmd_t pmd)
988{
989 pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
990 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
991 return pmd;
992}
993
994static inline pmd_t pmd_mkwrite(pmd_t pmd)
995{
996 pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
997 if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
998 return pmd;
999 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1000 return pmd;
1001}
1002
1003static inline pmd_t pmd_mkclean(pmd_t pmd)
1004{
1005 if (pmd_large(pmd)) {
1006 pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1007 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1008 }
1009 return pmd;
1010}
1011
1012static inline pmd_t pmd_mkdirty(pmd_t pmd)
1013{
1014 if (pmd_large(pmd)) {
1015 pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY |
1016 _SEGMENT_ENTRY_SOFT_DIRTY;
1017 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1018 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1019 }
1020 return pmd;
1021}
1022
1023static inline pmd_t pmd_mkyoung(pmd_t pmd)
1024{
1025 if (pmd_large(pmd)) {
1026 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1027 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1028 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
1029 }
1030 return pmd;
1031}
1032
1033static inline pmd_t pmd_mkold(pmd_t pmd)
1034{
1035 if (pmd_large(pmd)) {
1036 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1037 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1038 }
1039 return pmd;
1040}
1041
1042static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1043{
1044 if (pmd_large(pmd)) {
1045 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1046 _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1047 _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
1048 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1049 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1050 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1051 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1052 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1053 return pmd;
1054 }
1055 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN;
1056 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1057 return pmd;
1058}
1059
1060static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1061{
1062 pmd_t __pmd;
1063 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1064 return __pmd;
1065}
1066
1067#endif
1068
1069static inline void __pmdp_csp(pmd_t *pmdp)
1070{
1071 register unsigned long reg2 asm("2") = pmd_val(*pmdp);
1072 register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
1073 _SEGMENT_ENTRY_INVALID;
1074 register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
1075
1076 asm volatile(
1077 " csp %1,%3"
1078 : "=m" (*pmdp)
1079 : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
1080}
1081
1082static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp)
1083{
1084 unsigned long sto;
1085
1086 sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
1087 asm volatile(
1088 " .insn rrf,0xb98e0000,%2,%3,0,0"
1089 : "=m" (*pmdp)
1090 : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
1091 : "cc" );
1092}
1093
1094static inline void __pmdp_idte_local(unsigned long address, pmd_t *pmdp)
1095{
1096 unsigned long sto;
1097
1098 sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
1099 asm volatile(
1100 " .insn rrf,0xb98e0000,%2,%3,0,1"
1101 : "=m" (*pmdp)
1102 : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
1103 : "cc" );
1104}
1105
1106pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1107pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1108
1109#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1110
1111#define __HAVE_ARCH_PGTABLE_DEPOSIT
1112void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1113 pgtable_t pgtable);
1114
1115#define __HAVE_ARCH_PGTABLE_WITHDRAW
1116pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1117
1118#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1119static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1120 unsigned long addr, pmd_t *pmdp,
1121 pmd_t entry, int dirty)
1122{
1123 VM_BUG_ON(addr & ~HPAGE_MASK);
1124
1125 entry = pmd_mkyoung(entry);
1126 if (dirty)
1127 entry = pmd_mkdirty(entry);
1128 if (pmd_val(*pmdp) == pmd_val(entry))
1129 return 0;
1130 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
1131 return 1;
1132}
1133
1134#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1135static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1136 unsigned long addr, pmd_t *pmdp)
1137{
1138 pmd_t pmd = *pmdp;
1139
1140 pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
1141 return pmd_young(pmd);
1142}
1143
1144#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1145static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
1146 unsigned long addr, pmd_t *pmdp)
1147{
1148 VM_BUG_ON(addr & ~HPAGE_MASK);
1149 return pmdp_test_and_clear_young(vma, addr, pmdp);
1150}
1151
1152static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1153 pmd_t *pmdp, pmd_t entry)
1154{
1155 *pmdp = entry;
1156}
1157
1158static inline pmd_t pmd_mkhuge(pmd_t pmd)
1159{
1160 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1161 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1162 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1163 return pmd;
1164}
1165
1166#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1167static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1168 unsigned long addr, pmd_t *pmdp)
1169{
1170 return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID));
1171}
1172
1173#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
1174static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
1175 unsigned long addr,
1176 pmd_t *pmdp, int full)
1177{
1178 if (full) {
1179 pmd_t pmd = *pmdp;
1180 *pmdp = __pmd(_SEGMENT_ENTRY_INVALID);
1181 return pmd;
1182 }
1183 return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID));
1184}
1185
1186#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
1187static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
1188 unsigned long addr, pmd_t *pmdp)
1189{
1190 return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
1191}
1192
1193#define __HAVE_ARCH_PMDP_INVALIDATE
1194static inline void pmdp_invalidate(struct vm_area_struct *vma,
1195 unsigned long addr, pmd_t *pmdp)
1196{
1197 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID));
1198}
1199
1200#define __HAVE_ARCH_PMDP_SET_WRPROTECT
1201static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1202 unsigned long addr, pmd_t *pmdp)
1203{
1204 pmd_t pmd = *pmdp;
1205
1206 if (pmd_write(pmd))
1207 pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
1208}
1209
1210static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1211 unsigned long address,
1212 pmd_t *pmdp)
1213{
1214 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1215}
1216#define pmdp_collapse_flush pmdp_collapse_flush
1217
1218#define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1219#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1220
1221static inline int pmd_trans_huge(pmd_t pmd)
1222{
1223 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1224}
1225
1226static inline int has_transparent_hugepage(void)
1227{
1228 return MACHINE_HAS_HPAGE ? 1 : 0;
1229}
1230#endif
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249#define __SWP_OFFSET_MASK ((1UL << 52) - 1)
1250#define __SWP_OFFSET_SHIFT 12
1251#define __SWP_TYPE_MASK ((1UL << 5) - 1)
1252#define __SWP_TYPE_SHIFT 2
1253
1254static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1255{
1256 pte_t pte;
1257
1258 pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
1259 pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1260 pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
1261 return pte;
1262}
1263
1264static inline unsigned long __swp_type(swp_entry_t entry)
1265{
1266 return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1267}
1268
1269static inline unsigned long __swp_offset(swp_entry_t entry)
1270{
1271 return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1272}
1273
1274static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1275{
1276 return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1277}
1278
1279#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1280#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1281
1282#endif
1283
1284#define kern_addr_valid(addr) (1)
1285
1286extern int vmem_add_mapping(unsigned long start, unsigned long size);
1287extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1288extern int s390_enable_sie(void);
1289extern int s390_enable_skey(void);
1290extern void s390_reset_cmma(struct mm_struct *mm);
1291
1292
1293#define HAVE_ARCH_UNMAPPED_AREA
1294#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1295
1296
1297
1298
1299static inline void pgtable_cache_init(void) { }
1300static inline void check_pgt_cache(void) { }
1301
1302#include <asm-generic/pgtable.h>
1303
1304#endif
1305