1
2
3
4
5
6
7
8
9
10
11#ifndef _ASM_S390_PGTABLE_H
12#define _ASM_S390_PGTABLE_H
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#ifndef __ASSEMBLY__
30#include <linux/sched.h>
31#include <linux/mm_types.h>
32#include <linux/page-flags.h>
33#include <linux/radix-tree.h>
34#include <asm/bug.h>
35#include <asm/page.h>
36
37extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
38extern void paging_init(void);
39extern void vmem_map_init(void);
40
41
42
43
44
45#define update_mmu_cache(vma, address, ptep) do { } while (0)
46#define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
47
48
49
50
51
52
53extern unsigned long empty_zero_page;
54extern unsigned long zero_page_mask;
55
56#define ZERO_PAGE(vaddr) \
57 (virt_to_page((void *)(empty_zero_page + \
58 (((unsigned long)(vaddr)) &zero_page_mask))))
59#define __HAVE_COLOR_ZERO_PAGE
60
61
62#endif
63
64
65
66
67
68
69#ifndef CONFIG_64BIT
70# define PMD_SHIFT 20
71# define PUD_SHIFT 20
72# define PGDIR_SHIFT 20
73#else
74# define PMD_SHIFT 20
75# define PUD_SHIFT 31
76# define PGDIR_SHIFT 42
77#endif
78
79#define PMD_SIZE (1UL << PMD_SHIFT)
80#define PMD_MASK (~(PMD_SIZE-1))
81#define PUD_SIZE (1UL << PUD_SHIFT)
82#define PUD_MASK (~(PUD_SIZE-1))
83#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
84#define PGDIR_MASK (~(PGDIR_SIZE-1))
85
86
87
88
89
90
91
92#define PTRS_PER_PTE 256
93#ifndef CONFIG_64BIT
94#define __PAGETABLE_PUD_FOLDED
95#define PTRS_PER_PMD 1
96#define __PAGETABLE_PMD_FOLDED
97#define PTRS_PER_PUD 1
98#else
99#define PTRS_PER_PMD 2048
100#define PTRS_PER_PUD 2048
101#endif
102#define PTRS_PER_PGD 2048
103
104#define FIRST_USER_ADDRESS 0UL
105
106#define pte_ERROR(e) \
107 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
108#define pmd_ERROR(e) \
109 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
110#define pud_ERROR(e) \
111 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
112#define pgd_ERROR(e) \
113 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
114
115#ifndef __ASSEMBLY__
116
117
118
119
120
121
122
123
124extern unsigned long VMALLOC_START;
125extern unsigned long VMALLOC_END;
126extern struct page *vmemmap;
127
128#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
129
130#ifdef CONFIG_64BIT
131extern unsigned long MODULES_VADDR;
132extern unsigned long MODULES_END;
133#define MODULES_VADDR MODULES_VADDR
134#define MODULES_END MODULES_END
135#define MODULES_LEN (1UL << 31)
136#endif
137
138static inline int is_module_addr(void *addr)
139{
140#ifdef CONFIG_64BIT
141 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
142 if (addr < (void *)MODULES_VADDR)
143 return 0;
144 if (addr > (void *)MODULES_END)
145 return 0;
146#endif
147 return 1;
148}
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234#define _PAGE_PROTECT 0x200
235#define _PAGE_INVALID 0x400
236#define _PAGE_LARGE 0x800
237
238
239#define _PAGE_PRESENT 0x001
240#define _PAGE_TYPE 0x002
241#define _PAGE_YOUNG 0x004
242#define _PAGE_DIRTY 0x008
243#define _PAGE_READ 0x010
244#define _PAGE_WRITE 0x020
245#define _PAGE_SPECIAL 0x040
246#define _PAGE_UNUSED 0x080
247#define __HAVE_ARCH_PTE_SPECIAL
248
249
250#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
251 _PAGE_YOUNG)
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287#ifndef CONFIG_64BIT
288
289
290#define _ASCE_SPACE_SWITCH 0x80000000UL
291#define _ASCE_ORIGIN_MASK 0x7ffff000UL
292#define _ASCE_PRIVATE_SPACE 0x100
293#define _ASCE_ALT_EVENT 0x80
294#define _ASCE_TABLE_LENGTH 0x7f
295
296
297#define _SEGMENT_ENTRY_BITS 0x7fffffffUL
298#define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL
299#define _SEGMENT_ENTRY_PROTECT 0x200
300#define _SEGMENT_ENTRY_INVALID 0x20
301#define _SEGMENT_ENTRY_COMMON 0x10
302#define _SEGMENT_ENTRY_PTL 0x0f
303
304#define _SEGMENT_ENTRY_DIRTY 0
305#define _SEGMENT_ENTRY_YOUNG 0
306#define _SEGMENT_ENTRY_READ 0
307#define _SEGMENT_ENTRY_WRITE 0
308#define _SEGMENT_ENTRY_LARGE 0
309#define _SEGMENT_ENTRY_BITS_LARGE 0
310#define _SEGMENT_ENTRY_ORIGIN_LARGE 0
311
312#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
313#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
314
315
316
317
318
319
320
321
322
323
324
325#define PGSTE_ACC_BITS 0xf0000000UL
326#define PGSTE_FP_BIT 0x08000000UL
327#define PGSTE_PCL_BIT 0x00800000UL
328#define PGSTE_HR_BIT 0x00400000UL
329#define PGSTE_HC_BIT 0x00200000UL
330#define PGSTE_GR_BIT 0x00040000UL
331#define PGSTE_GC_BIT 0x00020000UL
332#define PGSTE_UC_BIT 0x00008000UL
333#define PGSTE_IN_BIT 0x00004000UL
334
335#else
336
337
338#define _ASCE_ORIGIN ~0xfffUL
339#define _ASCE_PRIVATE_SPACE 0x100
340#define _ASCE_ALT_EVENT 0x80
341#define _ASCE_SPACE_SWITCH 0x40
342#define _ASCE_REAL_SPACE 0x20
343#define _ASCE_TYPE_MASK 0x0c
344#define _ASCE_TYPE_REGION1 0x0c
345#define _ASCE_TYPE_REGION2 0x08
346#define _ASCE_TYPE_REGION3 0x04
347#define _ASCE_TYPE_SEGMENT 0x00
348#define _ASCE_TABLE_LENGTH 0x03
349
350
351#define _REGION_ENTRY_ORIGIN ~0xfffUL
352#define _REGION_ENTRY_PROTECT 0x200
353#define _REGION_ENTRY_INVALID 0x20
354#define _REGION_ENTRY_TYPE_MASK 0x0c
355#define _REGION_ENTRY_TYPE_R1 0x0c
356#define _REGION_ENTRY_TYPE_R2 0x08
357#define _REGION_ENTRY_TYPE_R3 0x04
358#define _REGION_ENTRY_LENGTH 0x03
359
360#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
361#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
362#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
363#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
364#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
365#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
366
367#define _REGION3_ENTRY_LARGE 0x400
368#define _REGION3_ENTRY_RO 0x200
369
370
371#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
372#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
373#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL
374#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL
375#define _SEGMENT_ENTRY_PROTECT 0x200
376#define _SEGMENT_ENTRY_INVALID 0x20
377
378#define _SEGMENT_ENTRY (0)
379#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
380
381#define _SEGMENT_ENTRY_DIRTY 0x2000
382#define _SEGMENT_ENTRY_YOUNG 0x1000
383#define _SEGMENT_ENTRY_SPLIT 0x0800
384#define _SEGMENT_ENTRY_LARGE 0x0400
385#define _SEGMENT_ENTRY_READ 0x0002
386#define _SEGMENT_ENTRY_WRITE 0x0001
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407#define _SEGMENT_ENTRY_SPLIT_BIT 11
408
409
410#define PGSTE_ACC_BITS 0xf000000000000000UL
411#define PGSTE_FP_BIT 0x0800000000000000UL
412#define PGSTE_PCL_BIT 0x0080000000000000UL
413#define PGSTE_HR_BIT 0x0040000000000000UL
414#define PGSTE_HC_BIT 0x0020000000000000UL
415#define PGSTE_GR_BIT 0x0004000000000000UL
416#define PGSTE_GC_BIT 0x0002000000000000UL
417#define PGSTE_UC_BIT 0x0000800000000000UL
418#define PGSTE_IN_BIT 0x0000400000000000UL
419
420#endif
421
422
423#define _PGSTE_GPS_ZERO 0x0000000080000000UL
424#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
425#define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
426#define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
427
428
429
430
431
432
433#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
434 _ASCE_ALT_EVENT)
435
436
437
438
439#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID)
440#define PAGE_READ __pgprot(_PAGE_PRESENT | _PAGE_READ | \
441 _PAGE_INVALID | _PAGE_PROTECT)
442#define PAGE_WRITE __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
443 _PAGE_INVALID | _PAGE_PROTECT)
444
445#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
446 _PAGE_YOUNG | _PAGE_DIRTY)
447#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
448 _PAGE_YOUNG | _PAGE_DIRTY)
449#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
450 _PAGE_PROTECT)
451
452
453
454
455
456
457
458#define __P000 PAGE_NONE
459#define __P001 PAGE_READ
460#define __P010 PAGE_READ
461#define __P011 PAGE_READ
462#define __P100 PAGE_READ
463#define __P101 PAGE_READ
464#define __P110 PAGE_READ
465#define __P111 PAGE_READ
466
467#define __S000 PAGE_NONE
468#define __S001 PAGE_READ
469#define __S010 PAGE_WRITE
470#define __S011 PAGE_WRITE
471#define __S100 PAGE_READ
472#define __S101 PAGE_READ
473#define __S110 PAGE_WRITE
474#define __S111 PAGE_WRITE
475
476
477
478
479#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
480 _SEGMENT_ENTRY_PROTECT)
481#define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_PROTECT | \
482 _SEGMENT_ENTRY_READ)
483#define SEGMENT_WRITE __pgprot(_SEGMENT_ENTRY_READ | \
484 _SEGMENT_ENTRY_WRITE)
485
486static inline int mm_has_pgste(struct mm_struct *mm)
487{
488#ifdef CONFIG_PGSTE
489 if (unlikely(mm->context.has_pgste))
490 return 1;
491#endif
492 return 0;
493}
494
495
496
497
498
499#define mm_forbids_zeropage mm_use_skey
500static inline int mm_use_skey(struct mm_struct *mm)
501{
502#ifdef CONFIG_PGSTE
503 if (mm->context.use_skey)
504 return 1;
505#endif
506 return 0;
507}
508
509
510
511
512#ifndef CONFIG_64BIT
513
514static inline int pgd_present(pgd_t pgd) { return 1; }
515static inline int pgd_none(pgd_t pgd) { return 0; }
516static inline int pgd_bad(pgd_t pgd) { return 0; }
517
518static inline int pud_present(pud_t pud) { return 1; }
519static inline int pud_none(pud_t pud) { return 0; }
520static inline int pud_large(pud_t pud) { return 0; }
521static inline int pud_bad(pud_t pud) { return 0; }
522
523#else
524
525static inline int pgd_present(pgd_t pgd)
526{
527 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
528 return 1;
529 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
530}
531
532static inline int pgd_none(pgd_t pgd)
533{
534 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
535 return 0;
536 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
537}
538
539static inline int pgd_bad(pgd_t pgd)
540{
541
542
543
544
545
546 unsigned long mask =
547 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
548 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
549 return (pgd_val(pgd) & mask) != 0;
550}
551
552static inline int pud_present(pud_t pud)
553{
554 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
555 return 1;
556 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
557}
558
559static inline int pud_none(pud_t pud)
560{
561 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
562 return 0;
563 return (pud_val(pud) & _REGION_ENTRY_INVALID) != 0UL;
564}
565
566static inline int pud_large(pud_t pud)
567{
568 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
569 return 0;
570 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
571}
572
573static inline int pud_bad(pud_t pud)
574{
575
576
577
578
579
580 unsigned long mask =
581 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
582 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
583 return (pud_val(pud) & mask) != 0;
584}
585
586#endif
587
588static inline int pmd_present(pmd_t pmd)
589{
590 return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID;
591}
592
593static inline int pmd_none(pmd_t pmd)
594{
595 return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID;
596}
597
598static inline int pmd_large(pmd_t pmd)
599{
600 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
601}
602
603static inline int pmd_pfn(pmd_t pmd)
604{
605 unsigned long origin_mask;
606
607 origin_mask = _SEGMENT_ENTRY_ORIGIN;
608 if (pmd_large(pmd))
609 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
610 return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
611}
612
613static inline int pmd_bad(pmd_t pmd)
614{
615 if (pmd_large(pmd))
616 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
617 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
618}
619
620#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
621extern void pmdp_splitting_flush(struct vm_area_struct *vma,
622 unsigned long addr, pmd_t *pmdp);
623
624#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
625extern int pmdp_set_access_flags(struct vm_area_struct *vma,
626 unsigned long address, pmd_t *pmdp,
627 pmd_t entry, int dirty);
628
629#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
630extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
631 unsigned long address, pmd_t *pmdp);
632
633#define __HAVE_ARCH_PMD_WRITE
634static inline int pmd_write(pmd_t pmd)
635{
636 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
637}
638
639static inline int pmd_dirty(pmd_t pmd)
640{
641 int dirty = 1;
642 if (pmd_large(pmd))
643 dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
644 return dirty;
645}
646
647static inline int pmd_young(pmd_t pmd)
648{
649 int young = 1;
650 if (pmd_large(pmd))
651 young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
652 return young;
653}
654
655static inline int pte_present(pte_t pte)
656{
657
658 return (pte_val(pte) & _PAGE_PRESENT) != 0;
659}
660
661static inline int pte_none(pte_t pte)
662{
663
664 return pte_val(pte) == _PAGE_INVALID;
665}
666
667static inline int pte_swap(pte_t pte)
668{
669
670 return (pte_val(pte) & (_PAGE_INVALID | _PAGE_PROTECT |
671 _PAGE_TYPE | _PAGE_PRESENT))
672 == (_PAGE_INVALID | _PAGE_TYPE);
673}
674
675static inline int pte_special(pte_t pte)
676{
677 return (pte_val(pte) & _PAGE_SPECIAL);
678}
679
680#define __HAVE_ARCH_PTE_SAME
681static inline int pte_same(pte_t a, pte_t b)
682{
683 return pte_val(a) == pte_val(b);
684}
685
686static inline pgste_t pgste_get_lock(pte_t *ptep)
687{
688 unsigned long new = 0;
689#ifdef CONFIG_PGSTE
690 unsigned long old;
691
692 preempt_disable();
693 asm(
694 " lg %0,%2\n"
695 "0: lgr %1,%0\n"
696 " nihh %0,0xff7f\n"
697 " oihh %1,0x0080\n"
698 " csg %0,%1,%2\n"
699 " jl 0b\n"
700 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
701 : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
702#endif
703 return __pgste(new);
704}
705
706static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
707{
708#ifdef CONFIG_PGSTE
709 asm(
710 " nihh %1,0xff7f\n"
711 " stg %1,%0\n"
712 : "=Q" (ptep[PTRS_PER_PTE])
713 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
714 : "cc", "memory");
715 preempt_enable();
716#endif
717}
718
719static inline pgste_t pgste_get(pte_t *ptep)
720{
721 unsigned long pgste = 0;
722#ifdef CONFIG_PGSTE
723 pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
724#endif
725 return __pgste(pgste);
726}
727
728static inline void pgste_set(pte_t *ptep, pgste_t pgste)
729{
730#ifdef CONFIG_PGSTE
731 *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
732#endif
733}
734
735static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste,
736 struct mm_struct *mm)
737{
738#ifdef CONFIG_PGSTE
739 unsigned long address, bits, skey;
740
741 if (!mm_use_skey(mm) || pte_val(*ptep) & _PAGE_INVALID)
742 return pgste;
743 address = pte_val(*ptep) & PAGE_MASK;
744 skey = (unsigned long) page_get_storage_key(address);
745 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
746
747 pgste_val(pgste) |= bits << 48;
748
749 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
750 pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
751#endif
752 return pgste;
753
754}
755
756static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry,
757 struct mm_struct *mm)
758{
759#ifdef CONFIG_PGSTE
760 unsigned long address;
761 unsigned long nkey;
762
763 if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID)
764 return;
765 VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
766 address = pte_val(entry) & PAGE_MASK;
767
768
769
770
771
772 nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
773 nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
774 page_set_storage_key(address, nkey, 0);
775#endif
776}
777
778static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
779{
780 if ((pte_val(entry) & _PAGE_PRESENT) &&
781 (pte_val(entry) & _PAGE_WRITE) &&
782 !(pte_val(entry) & _PAGE_INVALID)) {
783 if (!MACHINE_HAS_ESOP) {
784
785
786
787
788 pte_val(entry) |= _PAGE_DIRTY;
789 pte_val(entry) &= ~_PAGE_PROTECT;
790 }
791 if (!(pte_val(entry) & _PAGE_PROTECT))
792
793 pgste_val(pgste) |= PGSTE_UC_BIT;
794 }
795 *ptep = entry;
796 return pgste;
797}
798
799
800
801
802
803
804
805
806
807
808
809
810struct gmap {
811 struct list_head list;
812 struct list_head crst_list;
813 struct mm_struct *mm;
814 struct radix_tree_root guest_to_host;
815 struct radix_tree_root host_to_guest;
816 spinlock_t guest_table_lock;
817 unsigned long *table;
818 unsigned long asce;
819 unsigned long asce_end;
820 void *private;
821 bool pfault_enabled;
822};
823
824
825
826
827
828struct gmap_notifier {
829 struct list_head list;
830 void (*notifier_call)(struct gmap *gmap, unsigned long gaddr);
831};
832
833struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit);
834void gmap_free(struct gmap *gmap);
835void gmap_enable(struct gmap *gmap);
836void gmap_disable(struct gmap *gmap);
837int gmap_map_segment(struct gmap *gmap, unsigned long from,
838 unsigned long to, unsigned long len);
839int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
840unsigned long __gmap_translate(struct gmap *, unsigned long gaddr);
841unsigned long gmap_translate(struct gmap *, unsigned long gaddr);
842int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr);
843int gmap_fault(struct gmap *, unsigned long gaddr, unsigned int fault_flags);
844void gmap_discard(struct gmap *, unsigned long from, unsigned long to);
845void __gmap_zap(struct gmap *, unsigned long gaddr);
846bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *);
847
848
849void gmap_register_ipte_notifier(struct gmap_notifier *);
850void gmap_unregister_ipte_notifier(struct gmap_notifier *);
851int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len);
852void gmap_do_ipte_notify(struct mm_struct *, unsigned long addr, pte_t *);
853
854static inline pgste_t pgste_ipte_notify(struct mm_struct *mm,
855 unsigned long addr,
856 pte_t *ptep, pgste_t pgste)
857{
858#ifdef CONFIG_PGSTE
859 if (pgste_val(pgste) & PGSTE_IN_BIT) {
860 pgste_val(pgste) &= ~PGSTE_IN_BIT;
861 gmap_do_ipte_notify(mm, addr, ptep);
862 }
863#endif
864 return pgste;
865}
866
867
868
869
870
871
872static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
873 pte_t *ptep, pte_t entry)
874{
875 pgste_t pgste;
876
877 if (mm_has_pgste(mm)) {
878 pgste = pgste_get_lock(ptep);
879 pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
880 pgste_set_key(ptep, pgste, entry, mm);
881 pgste = pgste_set_pte(ptep, pgste, entry);
882 pgste_set_unlock(ptep, pgste);
883 } else {
884 *ptep = entry;
885 }
886}
887
888
889
890
891
892static inline int pte_write(pte_t pte)
893{
894 return (pte_val(pte) & _PAGE_WRITE) != 0;
895}
896
897static inline int pte_dirty(pte_t pte)
898{
899 return (pte_val(pte) & _PAGE_DIRTY) != 0;
900}
901
902static inline int pte_young(pte_t pte)
903{
904 return (pte_val(pte) & _PAGE_YOUNG) != 0;
905}
906
907#define __HAVE_ARCH_PTE_UNUSED
908static inline int pte_unused(pte_t pte)
909{
910 return pte_val(pte) & _PAGE_UNUSED;
911}
912
913
914
915
916
917static inline void pgd_clear(pgd_t *pgd)
918{
919#ifdef CONFIG_64BIT
920 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
921 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
922#endif
923}
924
925static inline void pud_clear(pud_t *pud)
926{
927#ifdef CONFIG_64BIT
928 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
929 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
930#endif
931}
932
933static inline void pmd_clear(pmd_t *pmdp)
934{
935 pmd_val(*pmdp) = _SEGMENT_ENTRY_INVALID;
936}
937
938static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
939{
940 pte_val(*ptep) = _PAGE_INVALID;
941}
942
943
944
945
946
947static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
948{
949 pte_val(pte) &= _PAGE_CHG_MASK;
950 pte_val(pte) |= pgprot_val(newprot);
951
952
953
954
955 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
956 pte_val(pte) &= ~_PAGE_INVALID;
957
958
959
960
961 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
962 pte_val(pte) &= ~_PAGE_PROTECT;
963 return pte;
964}
965
966static inline pte_t pte_wrprotect(pte_t pte)
967{
968 pte_val(pte) &= ~_PAGE_WRITE;
969 pte_val(pte) |= _PAGE_PROTECT;
970 return pte;
971}
972
973static inline pte_t pte_mkwrite(pte_t pte)
974{
975 pte_val(pte) |= _PAGE_WRITE;
976 if (pte_val(pte) & _PAGE_DIRTY)
977 pte_val(pte) &= ~_PAGE_PROTECT;
978 return pte;
979}
980
981static inline pte_t pte_mkclean(pte_t pte)
982{
983 pte_val(pte) &= ~_PAGE_DIRTY;
984 pte_val(pte) |= _PAGE_PROTECT;
985 return pte;
986}
987
988static inline pte_t pte_mkdirty(pte_t pte)
989{
990 pte_val(pte) |= _PAGE_DIRTY;
991 if (pte_val(pte) & _PAGE_WRITE)
992 pte_val(pte) &= ~_PAGE_PROTECT;
993 return pte;
994}
995
996static inline pte_t pte_mkold(pte_t pte)
997{
998 pte_val(pte) &= ~_PAGE_YOUNG;
999 pte_val(pte) |= _PAGE_INVALID;
1000 return pte;
1001}
1002
1003static inline pte_t pte_mkyoung(pte_t pte)
1004{
1005 pte_val(pte) |= _PAGE_YOUNG;
1006 if (pte_val(pte) & _PAGE_READ)
1007 pte_val(pte) &= ~_PAGE_INVALID;
1008 return pte;
1009}
1010
1011static inline pte_t pte_mkspecial(pte_t pte)
1012{
1013 pte_val(pte) |= _PAGE_SPECIAL;
1014 return pte;
1015}
1016
1017#ifdef CONFIG_HUGETLB_PAGE
1018static inline pte_t pte_mkhuge(pte_t pte)
1019{
1020 pte_val(pte) |= _PAGE_LARGE;
1021 return pte;
1022}
1023#endif
1024
1025static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
1026{
1027 unsigned long pto = (unsigned long) ptep;
1028
1029#ifndef CONFIG_64BIT
1030
1031 pto &= 0x7ffffc00;
1032#endif
1033
1034 asm volatile(
1035 " ipte %2,%3"
1036 : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
1037}
1038
1039static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep)
1040{
1041 unsigned long pto = (unsigned long) ptep;
1042
1043#ifndef CONFIG_64BIT
1044
1045 pto &= 0x7ffffc00;
1046#endif
1047
1048 asm volatile(
1049 " .insn rrf,0xb2210000,%2,%3,0,1"
1050 : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
1051}
1052
1053static inline void __ptep_ipte_range(unsigned long address, int nr, pte_t *ptep)
1054{
1055 unsigned long pto = (unsigned long) ptep;
1056
1057#ifndef CONFIG_64BIT
1058
1059 pto &= 0x7ffffc00;
1060#endif
1061
1062 do {
1063 asm volatile(
1064 " .insn rrf,0xb2210000,%2,%0,%1,0"
1065 : "+a" (address), "+a" (nr) : "a" (pto) : "memory");
1066 } while (nr != 255);
1067}
1068
1069static inline void ptep_flush_direct(struct mm_struct *mm,
1070 unsigned long address, pte_t *ptep)
1071{
1072 int active, count;
1073
1074 if (pte_val(*ptep) & _PAGE_INVALID)
1075 return;
1076 active = (mm == current->active_mm) ? 1 : 0;
1077 count = atomic_add_return(0x10000, &mm->context.attach_count);
1078 if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
1079 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
1080 __ptep_ipte_local(address, ptep);
1081 else
1082 __ptep_ipte(address, ptep);
1083 atomic_sub(0x10000, &mm->context.attach_count);
1084}
1085
1086static inline void ptep_flush_lazy(struct mm_struct *mm,
1087 unsigned long address, pte_t *ptep)
1088{
1089 int active, count;
1090
1091 if (pte_val(*ptep) & _PAGE_INVALID)
1092 return;
1093 active = (mm == current->active_mm) ? 1 : 0;
1094 count = atomic_add_return(0x10000, &mm->context.attach_count);
1095 if ((count & 0xffff) <= active) {
1096 pte_val(*ptep) |= _PAGE_INVALID;
1097 mm->context.flush_mm = 1;
1098 } else
1099 __ptep_ipte(address, ptep);
1100 atomic_sub(0x10000, &mm->context.attach_count);
1101}
1102
1103
1104
1105
1106static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
1107 unsigned long addr,
1108 pte_t *ptep)
1109{
1110 pgste_t pgste;
1111 pte_t pte;
1112 int dirty;
1113
1114 if (!mm_has_pgste(mm))
1115 return 0;
1116 pgste = pgste_get_lock(ptep);
1117 dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
1118 pgste_val(pgste) &= ~PGSTE_UC_BIT;
1119 pte = *ptep;
1120 if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
1121 pgste = pgste_ipte_notify(mm, addr, ptep, pgste);
1122 __ptep_ipte(addr, ptep);
1123 if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
1124 pte_val(pte) |= _PAGE_PROTECT;
1125 else
1126 pte_val(pte) |= _PAGE_INVALID;
1127 *ptep = pte;
1128 }
1129 pgste_set_unlock(ptep, pgste);
1130 return dirty;
1131}
1132
1133#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1134static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1135 unsigned long addr, pte_t *ptep)
1136{
1137 pgste_t pgste;
1138 pte_t pte, oldpte;
1139 int young;
1140
1141 if (mm_has_pgste(vma->vm_mm)) {
1142 pgste = pgste_get_lock(ptep);
1143 pgste = pgste_ipte_notify(vma->vm_mm, addr, ptep, pgste);
1144 }
1145
1146 oldpte = pte = *ptep;
1147 ptep_flush_direct(vma->vm_mm, addr, ptep);
1148 young = pte_young(pte);
1149 pte = pte_mkold(pte);
1150
1151 if (mm_has_pgste(vma->vm_mm)) {
1152 pgste = pgste_update_all(&oldpte, pgste, vma->vm_mm);
1153 pgste = pgste_set_pte(ptep, pgste, pte);
1154 pgste_set_unlock(ptep, pgste);
1155 } else
1156 *ptep = pte;
1157
1158 return young;
1159}
1160
1161#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1162static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1163 unsigned long address, pte_t *ptep)
1164{
1165 return ptep_test_and_clear_young(vma, address, ptep);
1166}
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1182static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1183 unsigned long address, pte_t *ptep)
1184{
1185 pgste_t pgste;
1186 pte_t pte;
1187
1188 if (mm_has_pgste(mm)) {
1189 pgste = pgste_get_lock(ptep);
1190 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1191 }
1192
1193 pte = *ptep;
1194 ptep_flush_lazy(mm, address, ptep);
1195 pte_val(*ptep) = _PAGE_INVALID;
1196
1197 if (mm_has_pgste(mm)) {
1198 pgste = pgste_update_all(&pte, pgste, mm);
1199 pgste_set_unlock(ptep, pgste);
1200 }
1201 return pte;
1202}
1203
1204#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1205static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
1206 unsigned long address,
1207 pte_t *ptep)
1208{
1209 pgste_t pgste;
1210 pte_t pte;
1211
1212 if (mm_has_pgste(mm)) {
1213 pgste = pgste_get_lock(ptep);
1214 pgste_ipte_notify(mm, address, ptep, pgste);
1215 }
1216
1217 pte = *ptep;
1218 ptep_flush_lazy(mm, address, ptep);
1219
1220 if (mm_has_pgste(mm)) {
1221 pgste = pgste_update_all(&pte, pgste, mm);
1222 pgste_set(ptep, pgste);
1223 }
1224 return pte;
1225}
1226
1227static inline void ptep_modify_prot_commit(struct mm_struct *mm,
1228 unsigned long address,
1229 pte_t *ptep, pte_t pte)
1230{
1231 pgste_t pgste;
1232
1233 if (mm_has_pgste(mm)) {
1234 pgste = pgste_get(ptep);
1235 pgste_set_key(ptep, pgste, pte, mm);
1236 pgste = pgste_set_pte(ptep, pgste, pte);
1237 pgste_set_unlock(ptep, pgste);
1238 } else
1239 *ptep = pte;
1240}
1241
1242#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1243static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1244 unsigned long address, pte_t *ptep)
1245{
1246 pgste_t pgste;
1247 pte_t pte;
1248
1249 if (mm_has_pgste(vma->vm_mm)) {
1250 pgste = pgste_get_lock(ptep);
1251 pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
1252 }
1253
1254 pte = *ptep;
1255 ptep_flush_direct(vma->vm_mm, address, ptep);
1256 pte_val(*ptep) = _PAGE_INVALID;
1257
1258 if (mm_has_pgste(vma->vm_mm)) {
1259 if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
1260 _PGSTE_GPS_USAGE_UNUSED)
1261 pte_val(pte) |= _PAGE_UNUSED;
1262 pgste = pgste_update_all(&pte, pgste, vma->vm_mm);
1263 pgste_set_unlock(ptep, pgste);
1264 }
1265 return pte;
1266}
1267
1268
1269
1270
1271
1272
1273
1274
1275#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1276static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1277 unsigned long address,
1278 pte_t *ptep, int full)
1279{
1280 pgste_t pgste;
1281 pte_t pte;
1282
1283 if (!full && mm_has_pgste(mm)) {
1284 pgste = pgste_get_lock(ptep);
1285 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1286 }
1287
1288 pte = *ptep;
1289 if (!full)
1290 ptep_flush_lazy(mm, address, ptep);
1291 pte_val(*ptep) = _PAGE_INVALID;
1292
1293 if (!full && mm_has_pgste(mm)) {
1294 pgste = pgste_update_all(&pte, pgste, mm);
1295 pgste_set_unlock(ptep, pgste);
1296 }
1297 return pte;
1298}
1299
1300#define __HAVE_ARCH_PTEP_SET_WRPROTECT
1301static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
1302 unsigned long address, pte_t *ptep)
1303{
1304 pgste_t pgste;
1305 pte_t pte = *ptep;
1306
1307 if (pte_write(pte)) {
1308 if (mm_has_pgste(mm)) {
1309 pgste = pgste_get_lock(ptep);
1310 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1311 }
1312
1313 ptep_flush_lazy(mm, address, ptep);
1314 pte = pte_wrprotect(pte);
1315
1316 if (mm_has_pgste(mm)) {
1317 pgste = pgste_set_pte(ptep, pgste, pte);
1318 pgste_set_unlock(ptep, pgste);
1319 } else
1320 *ptep = pte;
1321 }
1322 return pte;
1323}
1324
1325#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1326static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1327 unsigned long address, pte_t *ptep,
1328 pte_t entry, int dirty)
1329{
1330 pgste_t pgste;
1331
1332 if (pte_same(*ptep, entry))
1333 return 0;
1334 if (mm_has_pgste(vma->vm_mm)) {
1335 pgste = pgste_get_lock(ptep);
1336 pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
1337 }
1338
1339 ptep_flush_direct(vma->vm_mm, address, ptep);
1340
1341 if (mm_has_pgste(vma->vm_mm)) {
1342 pgste_set_key(ptep, pgste, entry, vma->vm_mm);
1343 pgste = pgste_set_pte(ptep, pgste, entry);
1344 pgste_set_unlock(ptep, pgste);
1345 } else
1346 *ptep = entry;
1347 return 1;
1348}
1349
1350
1351
1352
1353
1354static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1355{
1356 pte_t __pte;
1357 pte_val(__pte) = physpage + pgprot_val(pgprot);
1358 return pte_mkyoung(__pte);
1359}
1360
1361static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1362{
1363 unsigned long physpage = page_to_phys(page);
1364 pte_t __pte = mk_pte_phys(physpage, pgprot);
1365
1366 if (pte_write(__pte) && PageDirty(page))
1367 __pte = pte_mkdirty(__pte);
1368 return __pte;
1369}
1370
1371#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1372#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1373#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1374#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1375
1376#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
1377#define pgd_offset_k(address) pgd_offset(&init_mm, address)
1378
1379#ifndef CONFIG_64BIT
1380
1381#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1382#define pud_deref(pmd) ({ BUG(); 0UL; })
1383#define pgd_deref(pmd) ({ BUG(); 0UL; })
1384
1385#define pud_offset(pgd, address) ((pud_t *) pgd)
1386#define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
1387
1388#else
1389
1390#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1391#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1392#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1393
1394static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
1395{
1396 pud_t *pud = (pud_t *) pgd;
1397 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
1398 pud = (pud_t *) pgd_deref(*pgd);
1399 return pud + pud_index(address);
1400}
1401
1402static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1403{
1404 pmd_t *pmd = (pmd_t *) pud;
1405 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
1406 pmd = (pmd_t *) pud_deref(*pud);
1407 return pmd + pmd_index(address);
1408}
1409
1410#endif
1411
1412#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1413#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1414#define pte_page(x) pfn_to_page(pte_pfn(x))
1415
1416#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1417
1418
1419#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1420#define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
1421#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1422#define pte_unmap(pte) do { } while (0)
1423
1424#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1425static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1426{
1427
1428
1429
1430
1431 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1432 return pgprot_val(SEGMENT_NONE);
1433 if (pgprot_val(pgprot) == pgprot_val(PAGE_READ))
1434 return pgprot_val(SEGMENT_READ);
1435 return pgprot_val(SEGMENT_WRITE);
1436}
1437
1438static inline pmd_t pmd_wrprotect(pmd_t pmd)
1439{
1440 pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
1441 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1442 return pmd;
1443}
1444
1445static inline pmd_t pmd_mkwrite(pmd_t pmd)
1446{
1447 pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
1448 if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1449 return pmd;
1450 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1451 return pmd;
1452}
1453
1454static inline pmd_t pmd_mkclean(pmd_t pmd)
1455{
1456 if (pmd_large(pmd)) {
1457 pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1458 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1459 }
1460 return pmd;
1461}
1462
1463static inline pmd_t pmd_mkdirty(pmd_t pmd)
1464{
1465 if (pmd_large(pmd)) {
1466 pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY;
1467 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1468 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1469 }
1470 return pmd;
1471}
1472
1473static inline pmd_t pmd_mkyoung(pmd_t pmd)
1474{
1475 if (pmd_large(pmd)) {
1476 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1477 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1478 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
1479 }
1480 return pmd;
1481}
1482
1483static inline pmd_t pmd_mkold(pmd_t pmd)
1484{
1485 if (pmd_large(pmd)) {
1486 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1487 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1488 }
1489 return pmd;
1490}
1491
1492static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1493{
1494 if (pmd_large(pmd)) {
1495 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1496 _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1497 _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SPLIT;
1498 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1499 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1500 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1501 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1502 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1503 return pmd;
1504 }
1505 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN;
1506 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1507 return pmd;
1508}
1509
1510static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1511{
1512 pmd_t __pmd;
1513 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1514 return __pmd;
1515}
1516
1517#endif
1518
1519static inline void __pmdp_csp(pmd_t *pmdp)
1520{
1521 register unsigned long reg2 asm("2") = pmd_val(*pmdp);
1522 register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
1523 _SEGMENT_ENTRY_INVALID;
1524 register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
1525
1526 asm volatile(
1527 " csp %1,%3"
1528 : "=m" (*pmdp)
1529 : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
1530}
1531
1532static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp)
1533{
1534 unsigned long sto;
1535
1536 sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
1537 asm volatile(
1538 " .insn rrf,0xb98e0000,%2,%3,0,0"
1539 : "=m" (*pmdp)
1540 : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
1541 : "cc" );
1542}
1543
1544static inline void __pmdp_idte_local(unsigned long address, pmd_t *pmdp)
1545{
1546 unsigned long sto;
1547
1548 sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
1549 asm volatile(
1550 " .insn rrf,0xb98e0000,%2,%3,0,1"
1551 : "=m" (*pmdp)
1552 : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
1553 : "cc" );
1554}
1555
1556static inline void pmdp_flush_direct(struct mm_struct *mm,
1557 unsigned long address, pmd_t *pmdp)
1558{
1559 int active, count;
1560
1561 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
1562 return;
1563 if (!MACHINE_HAS_IDTE) {
1564 __pmdp_csp(pmdp);
1565 return;
1566 }
1567 active = (mm == current->active_mm) ? 1 : 0;
1568 count = atomic_add_return(0x10000, &mm->context.attach_count);
1569 if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
1570 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
1571 __pmdp_idte_local(address, pmdp);
1572 else
1573 __pmdp_idte(address, pmdp);
1574 atomic_sub(0x10000, &mm->context.attach_count);
1575}
1576
1577static inline void pmdp_flush_lazy(struct mm_struct *mm,
1578 unsigned long address, pmd_t *pmdp)
1579{
1580 int active, count;
1581
1582 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
1583 return;
1584 active = (mm == current->active_mm) ? 1 : 0;
1585 count = atomic_add_return(0x10000, &mm->context.attach_count);
1586 if ((count & 0xffff) <= active) {
1587 pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
1588 mm->context.flush_mm = 1;
1589 } else if (MACHINE_HAS_IDTE)
1590 __pmdp_idte(address, pmdp);
1591 else
1592 __pmdp_csp(pmdp);
1593 atomic_sub(0x10000, &mm->context.attach_count);
1594}
1595
1596#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1597
1598#define __HAVE_ARCH_PGTABLE_DEPOSIT
1599extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1600 pgtable_t pgtable);
1601
1602#define __HAVE_ARCH_PGTABLE_WITHDRAW
1603extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1604
1605static inline int pmd_trans_splitting(pmd_t pmd)
1606{
1607 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) &&
1608 (pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT);
1609}
1610
1611static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1612 pmd_t *pmdp, pmd_t entry)
1613{
1614 *pmdp = entry;
1615}
1616
1617static inline pmd_t pmd_mkhuge(pmd_t pmd)
1618{
1619 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1620 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1621 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1622 return pmd;
1623}
1624
1625#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1626static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1627 unsigned long address, pmd_t *pmdp)
1628{
1629 pmd_t pmd;
1630
1631 pmd = *pmdp;
1632 pmdp_flush_direct(vma->vm_mm, address, pmdp);
1633 *pmdp = pmd_mkold(pmd);
1634 return pmd_young(pmd);
1635}
1636
1637#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
1638static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
1639 unsigned long address, pmd_t *pmdp)
1640{
1641 pmd_t pmd = *pmdp;
1642
1643 pmdp_flush_direct(mm, address, pmdp);
1644 pmd_clear(pmdp);
1645 return pmd;
1646}
1647
1648#define __HAVE_ARCH_PMDP_GET_AND_CLEAR_FULL
1649static inline pmd_t pmdp_get_and_clear_full(struct mm_struct *mm,
1650 unsigned long address,
1651 pmd_t *pmdp, int full)
1652{
1653 pmd_t pmd = *pmdp;
1654
1655 if (!full)
1656 pmdp_flush_lazy(mm, address, pmdp);
1657 pmd_clear(pmdp);
1658 return pmd;
1659}
1660
1661#define __HAVE_ARCH_PMDP_CLEAR_FLUSH
1662static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
1663 unsigned long address, pmd_t *pmdp)
1664{
1665 return pmdp_get_and_clear(vma->vm_mm, address, pmdp);
1666}
1667
1668#define __HAVE_ARCH_PMDP_INVALIDATE
1669static inline void pmdp_invalidate(struct vm_area_struct *vma,
1670 unsigned long address, pmd_t *pmdp)
1671{
1672 pmdp_flush_direct(vma->vm_mm, address, pmdp);
1673}
1674
1675#define __HAVE_ARCH_PMDP_SET_WRPROTECT
1676static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1677 unsigned long address, pmd_t *pmdp)
1678{
1679 pmd_t pmd = *pmdp;
1680
1681 if (pmd_write(pmd)) {
1682 pmdp_flush_direct(mm, address, pmdp);
1683 set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd));
1684 }
1685}
1686
1687#define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1688#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1689
1690static inline int pmd_trans_huge(pmd_t pmd)
1691{
1692 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1693}
1694
1695static inline int has_transparent_hugepage(void)
1696{
1697 return MACHINE_HAS_HPAGE ? 1 : 0;
1698}
1699#endif
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732#ifndef CONFIG_64BIT
1733#define __SWP_OFFSET_MASK (~0UL >> 12)
1734#else
1735#define __SWP_OFFSET_MASK (~0UL >> 11)
1736#endif
1737static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1738{
1739 pte_t pte;
1740 offset &= __SWP_OFFSET_MASK;
1741 pte_val(pte) = _PAGE_INVALID | _PAGE_TYPE | ((type & 0x1f) << 2) |
1742 ((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
1743 return pte;
1744}
1745
1746#define __swp_type(entry) (((entry).val >> 2) & 0x1f)
1747#define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1))
1748#define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
1749
1750#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1751#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1752
1753#endif
1754
1755#define kern_addr_valid(addr) (1)
1756
1757extern int vmem_add_mapping(unsigned long start, unsigned long size);
1758extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1759extern int s390_enable_sie(void);
1760extern int s390_enable_skey(void);
1761extern void s390_reset_cmma(struct mm_struct *mm);
1762
1763
1764#define HAVE_ARCH_UNMAPPED_AREA
1765#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1766
1767
1768
1769
1770static inline void pgtable_cache_init(void) { }
1771static inline void check_pgt_cache(void) { }
1772
1773#include <asm-generic/pgtable.h>
1774
1775#endif
1776