1
2
3
4
5
6
7
8
9
10
11#ifndef _ASM_S390_PGTABLE_H
12#define _ASM_S390_PGTABLE_H
13
14
15
16
17
18
19
20
21
22
23
24
25
26#ifndef __ASSEMBLY__
27#include <linux/sched.h>
28#include <linux/mm_types.h>
29#include <linux/page-flags.h>
30#include <linux/radix-tree.h>
31#include <asm/bug.h>
32#include <asm/page.h>
33
34extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
35extern void paging_init(void);
36extern void vmem_map_init(void);
37
38
39
40
41
42#define update_mmu_cache(vma, address, ptep) do { } while (0)
43#define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
44
45
46
47
48
49
50extern unsigned long empty_zero_page;
51extern unsigned long zero_page_mask;
52
53#define ZERO_PAGE(vaddr) \
54 (virt_to_page((void *)(empty_zero_page + \
55 (((unsigned long)(vaddr)) &zero_page_mask))))
56#define __HAVE_COLOR_ZERO_PAGE
57
58
59#endif
60
61
62
63
64
65
66#define PMD_SHIFT 20
67#define PUD_SHIFT 31
68#define PGDIR_SHIFT 42
69
70#define PMD_SIZE (1UL << PMD_SHIFT)
71#define PMD_MASK (~(PMD_SIZE-1))
72#define PUD_SIZE (1UL << PUD_SHIFT)
73#define PUD_MASK (~(PUD_SIZE-1))
74#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
75#define PGDIR_MASK (~(PGDIR_SIZE-1))
76
77
78
79
80
81
82
83#define PTRS_PER_PTE 256
84#define PTRS_PER_PMD 2048
85#define PTRS_PER_PUD 2048
86#define PTRS_PER_PGD 2048
87
88#define FIRST_USER_ADDRESS 0UL
89
90#define pte_ERROR(e) \
91 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
92#define pmd_ERROR(e) \
93 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
94#define pud_ERROR(e) \
95 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
96#define pgd_ERROR(e) \
97 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
98
99#ifndef __ASSEMBLY__
100
101
102
103
104
105
106
107
108extern unsigned long VMALLOC_START;
109extern unsigned long VMALLOC_END;
110extern struct page *vmemmap;
111
112#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
113
114extern unsigned long MODULES_VADDR;
115extern unsigned long MODULES_END;
116#define MODULES_VADDR MODULES_VADDR
117#define MODULES_END MODULES_END
118#define MODULES_LEN (1UL << 31)
119
120static inline int is_module_addr(void *addr)
121{
122 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
123 if (addr < (void *)MODULES_VADDR)
124 return 0;
125 if (addr > (void *)MODULES_END)
126 return 0;
127 return 1;
128}
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182#define _PAGE_PROTECT 0x200
183#define _PAGE_INVALID 0x400
184#define _PAGE_LARGE 0x800
185
186
187#define _PAGE_PRESENT 0x001
188#define _PAGE_YOUNG 0x004
189#define _PAGE_DIRTY 0x008
190#define _PAGE_READ 0x010
191#define _PAGE_WRITE 0x020
192#define _PAGE_SPECIAL 0x040
193#define _PAGE_UNUSED 0x080
194#define __HAVE_ARCH_PTE_SPECIAL
195
196#ifdef CONFIG_MEM_SOFT_DIRTY
197#define _PAGE_SOFT_DIRTY 0x002
198#else
199#define _PAGE_SOFT_DIRTY 0x000
200#endif
201
202
203#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
204 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244#define _ASCE_ORIGIN ~0xfffUL
245#define _ASCE_PRIVATE_SPACE 0x100
246#define _ASCE_ALT_EVENT 0x80
247#define _ASCE_SPACE_SWITCH 0x40
248#define _ASCE_REAL_SPACE 0x20
249#define _ASCE_TYPE_MASK 0x0c
250#define _ASCE_TYPE_REGION1 0x0c
251#define _ASCE_TYPE_REGION2 0x08
252#define _ASCE_TYPE_REGION3 0x04
253#define _ASCE_TYPE_SEGMENT 0x00
254#define _ASCE_TABLE_LENGTH 0x03
255
256
257#define _REGION_ENTRY_ORIGIN ~0xfffUL
258#define _REGION_ENTRY_PROTECT 0x200
259#define _REGION_ENTRY_INVALID 0x20
260#define _REGION_ENTRY_TYPE_MASK 0x0c
261#define _REGION_ENTRY_TYPE_R1 0x0c
262#define _REGION_ENTRY_TYPE_R2 0x08
263#define _REGION_ENTRY_TYPE_R3 0x04
264#define _REGION_ENTRY_LENGTH 0x03
265
266#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
267#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
268#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
269#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
270#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
271#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
272
273#define _REGION3_ENTRY_LARGE 0x400
274#define _REGION3_ENTRY_RO 0x200
275
276
277#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
278#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
279#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL
280#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL
281#define _SEGMENT_ENTRY_PROTECT 0x200
282#define _SEGMENT_ENTRY_INVALID 0x20
283
284#define _SEGMENT_ENTRY (0)
285#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
286
287#define _SEGMENT_ENTRY_DIRTY 0x2000
288#define _SEGMENT_ENTRY_YOUNG 0x1000
289#define _SEGMENT_ENTRY_LARGE 0x0400
290#define _SEGMENT_ENTRY_READ 0x0002
291#define _SEGMENT_ENTRY_WRITE 0x0001
292
293#ifdef CONFIG_MEM_SOFT_DIRTY
294#define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000
295#else
296#define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000
297#endif
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321#define PGSTE_ACC_BITS 0xf000000000000000UL
322#define PGSTE_FP_BIT 0x0800000000000000UL
323#define PGSTE_PCL_BIT 0x0080000000000000UL
324#define PGSTE_HR_BIT 0x0040000000000000UL
325#define PGSTE_HC_BIT 0x0020000000000000UL
326#define PGSTE_GR_BIT 0x0004000000000000UL
327#define PGSTE_GC_BIT 0x0002000000000000UL
328#define PGSTE_UC_BIT 0x0000800000000000UL
329#define PGSTE_IN_BIT 0x0000400000000000UL
330
331
332#define _PGSTE_GPS_ZERO 0x0000000080000000UL
333#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
334#define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
335#define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
336
337
338
339
340
341
342#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
343 _ASCE_ALT_EVENT)
344
345
346
347
348#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID)
349#define PAGE_READ __pgprot(_PAGE_PRESENT | _PAGE_READ | \
350 _PAGE_INVALID | _PAGE_PROTECT)
351#define PAGE_WRITE __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
352 _PAGE_INVALID | _PAGE_PROTECT)
353
354#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
355 _PAGE_YOUNG | _PAGE_DIRTY)
356#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
357 _PAGE_YOUNG | _PAGE_DIRTY)
358#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
359 _PAGE_PROTECT)
360
361
362
363
364
365
366
367#define __P000 PAGE_NONE
368#define __P001 PAGE_READ
369#define __P010 PAGE_READ
370#define __P011 PAGE_READ
371#define __P100 PAGE_READ
372#define __P101 PAGE_READ
373#define __P110 PAGE_READ
374#define __P111 PAGE_READ
375
376#define __S000 PAGE_NONE
377#define __S001 PAGE_READ
378#define __S010 PAGE_WRITE
379#define __S011 PAGE_WRITE
380#define __S100 PAGE_READ
381#define __S101 PAGE_READ
382#define __S110 PAGE_WRITE
383#define __S111 PAGE_WRITE
384
385
386
387
388#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
389 _SEGMENT_ENTRY_PROTECT)
390#define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_PROTECT | \
391 _SEGMENT_ENTRY_READ)
392#define SEGMENT_WRITE __pgprot(_SEGMENT_ENTRY_READ | \
393 _SEGMENT_ENTRY_WRITE)
394
395static inline int mm_has_pgste(struct mm_struct *mm)
396{
397#ifdef CONFIG_PGSTE
398 if (unlikely(mm->context.has_pgste))
399 return 1;
400#endif
401 return 0;
402}
403
404static inline int mm_alloc_pgste(struct mm_struct *mm)
405{
406#ifdef CONFIG_PGSTE
407 if (unlikely(mm->context.alloc_pgste))
408 return 1;
409#endif
410 return 0;
411}
412
413
414
415
416
417#define mm_forbids_zeropage mm_use_skey
418static inline int mm_use_skey(struct mm_struct *mm)
419{
420#ifdef CONFIG_PGSTE
421 if (mm->context.use_skey)
422 return 1;
423#endif
424 return 0;
425}
426
427
428
429
430static inline int pgd_present(pgd_t pgd)
431{
432 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
433 return 1;
434 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
435}
436
437static inline int pgd_none(pgd_t pgd)
438{
439 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
440 return 0;
441 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
442}
443
444static inline int pgd_bad(pgd_t pgd)
445{
446
447
448
449
450
451 unsigned long mask =
452 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
453 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
454 return (pgd_val(pgd) & mask) != 0;
455}
456
457static inline int pud_present(pud_t pud)
458{
459 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
460 return 1;
461 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
462}
463
464static inline int pud_none(pud_t pud)
465{
466 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
467 return 0;
468 return (pud_val(pud) & _REGION_ENTRY_INVALID) != 0UL;
469}
470
471static inline int pud_large(pud_t pud)
472{
473 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
474 return 0;
475 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
476}
477
478static inline int pud_bad(pud_t pud)
479{
480
481
482
483
484
485 unsigned long mask =
486 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
487 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
488 return (pud_val(pud) & mask) != 0;
489}
490
491static inline int pmd_present(pmd_t pmd)
492{
493 return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID;
494}
495
496static inline int pmd_none(pmd_t pmd)
497{
498 return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID;
499}
500
501static inline int pmd_large(pmd_t pmd)
502{
503 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
504}
505
506static inline unsigned long pmd_pfn(pmd_t pmd)
507{
508 unsigned long origin_mask;
509
510 origin_mask = _SEGMENT_ENTRY_ORIGIN;
511 if (pmd_large(pmd))
512 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
513 return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
514}
515
516static inline int pmd_bad(pmd_t pmd)
517{
518 if (pmd_large(pmd))
519 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
520 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
521}
522
523#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
524extern int pmdp_set_access_flags(struct vm_area_struct *vma,
525 unsigned long address, pmd_t *pmdp,
526 pmd_t entry, int dirty);
527
528#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
529extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
530 unsigned long address, pmd_t *pmdp);
531
532#define __HAVE_ARCH_PMD_WRITE
533static inline int pmd_write(pmd_t pmd)
534{
535 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
536}
537
538static inline int pmd_dirty(pmd_t pmd)
539{
540 int dirty = 1;
541 if (pmd_large(pmd))
542 dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
543 return dirty;
544}
545
546static inline int pmd_young(pmd_t pmd)
547{
548 int young = 1;
549 if (pmd_large(pmd))
550 young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
551 return young;
552}
553
554static inline int pte_present(pte_t pte)
555{
556
557 return (pte_val(pte) & _PAGE_PRESENT) != 0;
558}
559
560static inline int pte_none(pte_t pte)
561{
562
563 return pte_val(pte) == _PAGE_INVALID;
564}
565
566static inline int pte_swap(pte_t pte)
567{
568
569 return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
570 == _PAGE_PROTECT;
571}
572
573static inline int pte_special(pte_t pte)
574{
575 return (pte_val(pte) & _PAGE_SPECIAL);
576}
577
578#define __HAVE_ARCH_PTE_SAME
579static inline int pte_same(pte_t a, pte_t b)
580{
581 return pte_val(a) == pte_val(b);
582}
583
584#ifdef CONFIG_NUMA_BALANCING
585static inline int pte_protnone(pte_t pte)
586{
587 return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
588}
589
590static inline int pmd_protnone(pmd_t pmd)
591{
592
593 return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
594}
595#endif
596
597static inline int pte_soft_dirty(pte_t pte)
598{
599 return pte_val(pte) & _PAGE_SOFT_DIRTY;
600}
601#define pte_swp_soft_dirty pte_soft_dirty
602
603static inline pte_t pte_mksoft_dirty(pte_t pte)
604{
605 pte_val(pte) |= _PAGE_SOFT_DIRTY;
606 return pte;
607}
608#define pte_swp_mksoft_dirty pte_mksoft_dirty
609
610static inline pte_t pte_clear_soft_dirty(pte_t pte)
611{
612 pte_val(pte) &= ~_PAGE_SOFT_DIRTY;
613 return pte;
614}
615#define pte_swp_clear_soft_dirty pte_clear_soft_dirty
616
617static inline int pmd_soft_dirty(pmd_t pmd)
618{
619 return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
620}
621
622static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
623{
624 pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY;
625 return pmd;
626}
627
628static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
629{
630 pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY;
631 return pmd;
632}
633
634static inline pgste_t pgste_get_lock(pte_t *ptep)
635{
636 unsigned long new = 0;
637#ifdef CONFIG_PGSTE
638 unsigned long old;
639
640 preempt_disable();
641 asm(
642 " lg %0,%2\n"
643 "0: lgr %1,%0\n"
644 " nihh %0,0xff7f\n"
645 " oihh %1,0x0080\n"
646 " csg %0,%1,%2\n"
647 " jl 0b\n"
648 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
649 : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
650#endif
651 return __pgste(new);
652}
653
654static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
655{
656#ifdef CONFIG_PGSTE
657 asm(
658 " nihh %1,0xff7f\n"
659 " stg %1,%0\n"
660 : "=Q" (ptep[PTRS_PER_PTE])
661 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
662 : "cc", "memory");
663 preempt_enable();
664#endif
665}
666
667static inline pgste_t pgste_get(pte_t *ptep)
668{
669 unsigned long pgste = 0;
670#ifdef CONFIG_PGSTE
671 pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
672#endif
673 return __pgste(pgste);
674}
675
676static inline void pgste_set(pte_t *ptep, pgste_t pgste)
677{
678#ifdef CONFIG_PGSTE
679 *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
680#endif
681}
682
683static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste,
684 struct mm_struct *mm)
685{
686#ifdef CONFIG_PGSTE
687 unsigned long address, bits, skey;
688
689 if (!mm_use_skey(mm) || pte_val(*ptep) & _PAGE_INVALID)
690 return pgste;
691 address = pte_val(*ptep) & PAGE_MASK;
692 skey = (unsigned long) page_get_storage_key(address);
693 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
694
695 pgste_val(pgste) |= bits << 48;
696
697 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
698 pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
699#endif
700 return pgste;
701
702}
703
704static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry,
705 struct mm_struct *mm)
706{
707#ifdef CONFIG_PGSTE
708 unsigned long address;
709 unsigned long nkey;
710
711 if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID)
712 return;
713 VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
714 address = pte_val(entry) & PAGE_MASK;
715
716
717
718
719
720 nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
721 nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
722 page_set_storage_key(address, nkey, 0);
723#endif
724}
725
726static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
727{
728 if ((pte_val(entry) & _PAGE_PRESENT) &&
729 (pte_val(entry) & _PAGE_WRITE) &&
730 !(pte_val(entry) & _PAGE_INVALID)) {
731 if (!MACHINE_HAS_ESOP) {
732
733
734
735
736 pte_val(entry) |= _PAGE_DIRTY;
737 pte_val(entry) &= ~_PAGE_PROTECT;
738 }
739 if (!(pte_val(entry) & _PAGE_PROTECT))
740
741 pgste_val(pgste) |= PGSTE_UC_BIT;
742 }
743 *ptep = entry;
744 return pgste;
745}
746
747
748
749
750
751
752
753
754
755
756
757
758struct gmap {
759 struct list_head list;
760 struct list_head crst_list;
761 struct mm_struct *mm;
762 struct radix_tree_root guest_to_host;
763 struct radix_tree_root host_to_guest;
764 spinlock_t guest_table_lock;
765 unsigned long *table;
766 unsigned long asce;
767 unsigned long asce_end;
768 void *private;
769 bool pfault_enabled;
770};
771
772
773
774
775
776struct gmap_notifier {
777 struct list_head list;
778 void (*notifier_call)(struct gmap *gmap, unsigned long gaddr);
779};
780
781struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit);
782void gmap_free(struct gmap *gmap);
783void gmap_enable(struct gmap *gmap);
784void gmap_disable(struct gmap *gmap);
785int gmap_map_segment(struct gmap *gmap, unsigned long from,
786 unsigned long to, unsigned long len);
787int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
788unsigned long __gmap_translate(struct gmap *, unsigned long gaddr);
789unsigned long gmap_translate(struct gmap *, unsigned long gaddr);
790int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr);
791int gmap_fault(struct gmap *, unsigned long gaddr, unsigned int fault_flags);
792void gmap_discard(struct gmap *, unsigned long from, unsigned long to);
793void __gmap_zap(struct gmap *, unsigned long gaddr);
794bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *);
795
796
797void gmap_register_ipte_notifier(struct gmap_notifier *);
798void gmap_unregister_ipte_notifier(struct gmap_notifier *);
799int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len);
800void gmap_do_ipte_notify(struct mm_struct *, unsigned long addr, pte_t *);
801
802static inline pgste_t pgste_ipte_notify(struct mm_struct *mm,
803 unsigned long addr,
804 pte_t *ptep, pgste_t pgste)
805{
806#ifdef CONFIG_PGSTE
807 if (pgste_val(pgste) & PGSTE_IN_BIT) {
808 pgste_val(pgste) &= ~PGSTE_IN_BIT;
809 gmap_do_ipte_notify(mm, addr, ptep);
810 }
811#endif
812 return pgste;
813}
814
815
816
817
818
819
820static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
821 pte_t *ptep, pte_t entry)
822{
823 pgste_t pgste;
824
825 if (mm_has_pgste(mm)) {
826 pgste = pgste_get_lock(ptep);
827 pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
828 pgste_set_key(ptep, pgste, entry, mm);
829 pgste = pgste_set_pte(ptep, pgste, entry);
830 pgste_set_unlock(ptep, pgste);
831 } else {
832 *ptep = entry;
833 }
834}
835
836
837
838
839
840static inline int pte_write(pte_t pte)
841{
842 return (pte_val(pte) & _PAGE_WRITE) != 0;
843}
844
845static inline int pte_dirty(pte_t pte)
846{
847 return (pte_val(pte) & _PAGE_DIRTY) != 0;
848}
849
850static inline int pte_young(pte_t pte)
851{
852 return (pte_val(pte) & _PAGE_YOUNG) != 0;
853}
854
855#define __HAVE_ARCH_PTE_UNUSED
856static inline int pte_unused(pte_t pte)
857{
858 return pte_val(pte) & _PAGE_UNUSED;
859}
860
861
862
863
864
865static inline void pgd_clear(pgd_t *pgd)
866{
867 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
868 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
869}
870
871static inline void pud_clear(pud_t *pud)
872{
873 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
874 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
875}
876
877static inline void pmd_clear(pmd_t *pmdp)
878{
879 pmd_val(*pmdp) = _SEGMENT_ENTRY_INVALID;
880}
881
882static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
883{
884 pte_val(*ptep) = _PAGE_INVALID;
885}
886
887
888
889
890
891static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
892{
893 pte_val(pte) &= _PAGE_CHG_MASK;
894 pte_val(pte) |= pgprot_val(newprot);
895
896
897
898
899 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
900 pte_val(pte) &= ~_PAGE_INVALID;
901
902
903
904
905 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
906 pte_val(pte) &= ~_PAGE_PROTECT;
907 return pte;
908}
909
910static inline pte_t pte_wrprotect(pte_t pte)
911{
912 pte_val(pte) &= ~_PAGE_WRITE;
913 pte_val(pte) |= _PAGE_PROTECT;
914 return pte;
915}
916
917static inline pte_t pte_mkwrite(pte_t pte)
918{
919 pte_val(pte) |= _PAGE_WRITE;
920 if (pte_val(pte) & _PAGE_DIRTY)
921 pte_val(pte) &= ~_PAGE_PROTECT;
922 return pte;
923}
924
925static inline pte_t pte_mkclean(pte_t pte)
926{
927 pte_val(pte) &= ~_PAGE_DIRTY;
928 pte_val(pte) |= _PAGE_PROTECT;
929 return pte;
930}
931
932static inline pte_t pte_mkdirty(pte_t pte)
933{
934 pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY;
935 if (pte_val(pte) & _PAGE_WRITE)
936 pte_val(pte) &= ~_PAGE_PROTECT;
937 return pte;
938}
939
940static inline pte_t pte_mkold(pte_t pte)
941{
942 pte_val(pte) &= ~_PAGE_YOUNG;
943 pte_val(pte) |= _PAGE_INVALID;
944 return pte;
945}
946
947static inline pte_t pte_mkyoung(pte_t pte)
948{
949 pte_val(pte) |= _PAGE_YOUNG;
950 if (pte_val(pte) & _PAGE_READ)
951 pte_val(pte) &= ~_PAGE_INVALID;
952 return pte;
953}
954
955static inline pte_t pte_mkspecial(pte_t pte)
956{
957 pte_val(pte) |= _PAGE_SPECIAL;
958 return pte;
959}
960
961#ifdef CONFIG_HUGETLB_PAGE
962static inline pte_t pte_mkhuge(pte_t pte)
963{
964 pte_val(pte) |= _PAGE_LARGE;
965 return pte;
966}
967#endif
968
969static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
970{
971 unsigned long pto = (unsigned long) ptep;
972
973
974 asm volatile(
975 " ipte %2,%3"
976 : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
977}
978
979static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep)
980{
981 unsigned long pto = (unsigned long) ptep;
982
983
984 asm volatile(
985 " .insn rrf,0xb2210000,%2,%3,0,1"
986 : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
987}
988
989static inline void __ptep_ipte_range(unsigned long address, int nr, pte_t *ptep)
990{
991 unsigned long pto = (unsigned long) ptep;
992
993
994 do {
995 asm volatile(
996 " .insn rrf,0xb2210000,%2,%0,%1,0"
997 : "+a" (address), "+a" (nr) : "a" (pto) : "memory");
998 } while (nr != 255);
999}
1000
1001static inline void ptep_flush_direct(struct mm_struct *mm,
1002 unsigned long address, pte_t *ptep)
1003{
1004 int active, count;
1005
1006 if (pte_val(*ptep) & _PAGE_INVALID)
1007 return;
1008 active = (mm == current->active_mm) ? 1 : 0;
1009 count = atomic_add_return(0x10000, &mm->context.attach_count);
1010 if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
1011 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
1012 __ptep_ipte_local(address, ptep);
1013 else
1014 __ptep_ipte(address, ptep);
1015 atomic_sub(0x10000, &mm->context.attach_count);
1016}
1017
1018static inline void ptep_flush_lazy(struct mm_struct *mm,
1019 unsigned long address, pte_t *ptep)
1020{
1021 int active, count;
1022
1023 if (pte_val(*ptep) & _PAGE_INVALID)
1024 return;
1025 active = (mm == current->active_mm) ? 1 : 0;
1026 count = atomic_add_return(0x10000, &mm->context.attach_count);
1027 if ((count & 0xffff) <= active) {
1028 pte_val(*ptep) |= _PAGE_INVALID;
1029 mm->context.flush_mm = 1;
1030 } else
1031 __ptep_ipte(address, ptep);
1032 atomic_sub(0x10000, &mm->context.attach_count);
1033}
1034
1035
1036
1037
1038static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
1039 unsigned long addr,
1040 pte_t *ptep)
1041{
1042 pgste_t pgste;
1043 pte_t pte;
1044 int dirty;
1045
1046 if (!mm_has_pgste(mm))
1047 return 0;
1048 pgste = pgste_get_lock(ptep);
1049 dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
1050 pgste_val(pgste) &= ~PGSTE_UC_BIT;
1051 pte = *ptep;
1052 if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
1053 pgste = pgste_ipte_notify(mm, addr, ptep, pgste);
1054 __ptep_ipte(addr, ptep);
1055 if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
1056 pte_val(pte) |= _PAGE_PROTECT;
1057 else
1058 pte_val(pte) |= _PAGE_INVALID;
1059 *ptep = pte;
1060 }
1061 pgste_set_unlock(ptep, pgste);
1062 return dirty;
1063}
1064
1065#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1066static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1067 unsigned long addr, pte_t *ptep)
1068{
1069 pgste_t pgste;
1070 pte_t pte, oldpte;
1071 int young;
1072
1073 if (mm_has_pgste(vma->vm_mm)) {
1074 pgste = pgste_get_lock(ptep);
1075 pgste = pgste_ipte_notify(vma->vm_mm, addr, ptep, pgste);
1076 }
1077
1078 oldpte = pte = *ptep;
1079 ptep_flush_direct(vma->vm_mm, addr, ptep);
1080 young = pte_young(pte);
1081 pte = pte_mkold(pte);
1082
1083 if (mm_has_pgste(vma->vm_mm)) {
1084 pgste = pgste_update_all(&oldpte, pgste, vma->vm_mm);
1085 pgste = pgste_set_pte(ptep, pgste, pte);
1086 pgste_set_unlock(ptep, pgste);
1087 } else
1088 *ptep = pte;
1089
1090 return young;
1091}
1092
1093#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1094static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1095 unsigned long address, pte_t *ptep)
1096{
1097 return ptep_test_and_clear_young(vma, address, ptep);
1098}
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1114static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1115 unsigned long address, pte_t *ptep)
1116{
1117 pgste_t pgste;
1118 pte_t pte;
1119
1120 if (mm_has_pgste(mm)) {
1121 pgste = pgste_get_lock(ptep);
1122 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1123 }
1124
1125 pte = *ptep;
1126 ptep_flush_lazy(mm, address, ptep);
1127 pte_val(*ptep) = _PAGE_INVALID;
1128
1129 if (mm_has_pgste(mm)) {
1130 pgste = pgste_update_all(&pte, pgste, mm);
1131 pgste_set_unlock(ptep, pgste);
1132 }
1133 return pte;
1134}
1135
1136#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1137static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
1138 unsigned long address,
1139 pte_t *ptep)
1140{
1141 pgste_t pgste;
1142 pte_t pte;
1143
1144 if (mm_has_pgste(mm)) {
1145 pgste = pgste_get_lock(ptep);
1146 pgste_ipte_notify(mm, address, ptep, pgste);
1147 }
1148
1149 pte = *ptep;
1150 ptep_flush_lazy(mm, address, ptep);
1151
1152 if (mm_has_pgste(mm)) {
1153 pgste = pgste_update_all(&pte, pgste, mm);
1154 pgste_set(ptep, pgste);
1155 }
1156 return pte;
1157}
1158
1159static inline void ptep_modify_prot_commit(struct mm_struct *mm,
1160 unsigned long address,
1161 pte_t *ptep, pte_t pte)
1162{
1163 pgste_t pgste;
1164
1165 if (mm_has_pgste(mm)) {
1166 pgste = pgste_get(ptep);
1167 pgste_set_key(ptep, pgste, pte, mm);
1168 pgste = pgste_set_pte(ptep, pgste, pte);
1169 pgste_set_unlock(ptep, pgste);
1170 } else
1171 *ptep = pte;
1172}
1173
1174#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1175static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1176 unsigned long address, pte_t *ptep)
1177{
1178 pgste_t pgste;
1179 pte_t pte;
1180
1181 if (mm_has_pgste(vma->vm_mm)) {
1182 pgste = pgste_get_lock(ptep);
1183 pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
1184 }
1185
1186 pte = *ptep;
1187 ptep_flush_direct(vma->vm_mm, address, ptep);
1188 pte_val(*ptep) = _PAGE_INVALID;
1189
1190 if (mm_has_pgste(vma->vm_mm)) {
1191 if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
1192 _PGSTE_GPS_USAGE_UNUSED)
1193 pte_val(pte) |= _PAGE_UNUSED;
1194 pgste = pgste_update_all(&pte, pgste, vma->vm_mm);
1195 pgste_set_unlock(ptep, pgste);
1196 }
1197 return pte;
1198}
1199
1200
1201
1202
1203
1204
1205
1206
1207#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1208static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1209 unsigned long address,
1210 pte_t *ptep, int full)
1211{
1212 pgste_t pgste;
1213 pte_t pte;
1214
1215 if (!full && mm_has_pgste(mm)) {
1216 pgste = pgste_get_lock(ptep);
1217 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1218 }
1219
1220 pte = *ptep;
1221 if (!full)
1222 ptep_flush_lazy(mm, address, ptep);
1223 pte_val(*ptep) = _PAGE_INVALID;
1224
1225 if (!full && mm_has_pgste(mm)) {
1226 pgste = pgste_update_all(&pte, pgste, mm);
1227 pgste_set_unlock(ptep, pgste);
1228 }
1229 return pte;
1230}
1231
1232#define __HAVE_ARCH_PTEP_SET_WRPROTECT
1233static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
1234 unsigned long address, pte_t *ptep)
1235{
1236 pgste_t pgste;
1237 pte_t pte = *ptep;
1238
1239 if (pte_write(pte)) {
1240 if (mm_has_pgste(mm)) {
1241 pgste = pgste_get_lock(ptep);
1242 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1243 }
1244
1245 ptep_flush_lazy(mm, address, ptep);
1246 pte = pte_wrprotect(pte);
1247
1248 if (mm_has_pgste(mm)) {
1249 pgste = pgste_set_pte(ptep, pgste, pte);
1250 pgste_set_unlock(ptep, pgste);
1251 } else
1252 *ptep = pte;
1253 }
1254 return pte;
1255}
1256
1257#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1258static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1259 unsigned long address, pte_t *ptep,
1260 pte_t entry, int dirty)
1261{
1262 pgste_t pgste;
1263 pte_t oldpte;
1264
1265 oldpte = *ptep;
1266 if (pte_same(oldpte, entry))
1267 return 0;
1268 if (mm_has_pgste(vma->vm_mm)) {
1269 pgste = pgste_get_lock(ptep);
1270 pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
1271 }
1272
1273 ptep_flush_direct(vma->vm_mm, address, ptep);
1274
1275 if (mm_has_pgste(vma->vm_mm)) {
1276 if (pte_val(oldpte) & _PAGE_INVALID)
1277 pgste_set_key(ptep, pgste, entry, vma->vm_mm);
1278 pgste = pgste_set_pte(ptep, pgste, entry);
1279 pgste_set_unlock(ptep, pgste);
1280 } else
1281 *ptep = entry;
1282 return 1;
1283}
1284
1285
1286
1287
1288
1289static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1290{
1291 pte_t __pte;
1292 pte_val(__pte) = physpage + pgprot_val(pgprot);
1293 return pte_mkyoung(__pte);
1294}
1295
1296static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1297{
1298 unsigned long physpage = page_to_phys(page);
1299 pte_t __pte = mk_pte_phys(physpage, pgprot);
1300
1301 if (pte_write(__pte) && PageDirty(page))
1302 __pte = pte_mkdirty(__pte);
1303 return __pte;
1304}
1305
1306#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1307#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1308#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1309#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1310
1311#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
1312#define pgd_offset_k(address) pgd_offset(&init_mm, address)
1313
1314#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1315#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1316#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1317
1318static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
1319{
1320 pud_t *pud = (pud_t *) pgd;
1321 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
1322 pud = (pud_t *) pgd_deref(*pgd);
1323 return pud + pud_index(address);
1324}
1325
1326static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1327{
1328 pmd_t *pmd = (pmd_t *) pud;
1329 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
1330 pmd = (pmd_t *) pud_deref(*pud);
1331 return pmd + pmd_index(address);
1332}
1333
1334#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1335#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1336#define pte_page(x) pfn_to_page(pte_pfn(x))
1337
1338#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1339
1340
1341#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1342#define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
1343#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1344#define pte_unmap(pte) do { } while (0)
1345
1346#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1347static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1348{
1349
1350
1351
1352
1353 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1354 return pgprot_val(SEGMENT_NONE);
1355 if (pgprot_val(pgprot) == pgprot_val(PAGE_READ))
1356 return pgprot_val(SEGMENT_READ);
1357 return pgprot_val(SEGMENT_WRITE);
1358}
1359
1360static inline pmd_t pmd_wrprotect(pmd_t pmd)
1361{
1362 pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
1363 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1364 return pmd;
1365}
1366
1367static inline pmd_t pmd_mkwrite(pmd_t pmd)
1368{
1369 pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
1370 if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1371 return pmd;
1372 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1373 return pmd;
1374}
1375
1376static inline pmd_t pmd_mkclean(pmd_t pmd)
1377{
1378 if (pmd_large(pmd)) {
1379 pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1380 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1381 }
1382 return pmd;
1383}
1384
1385static inline pmd_t pmd_mkdirty(pmd_t pmd)
1386{
1387 if (pmd_large(pmd)) {
1388 pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY |
1389 _SEGMENT_ENTRY_SOFT_DIRTY;
1390 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1391 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1392 }
1393 return pmd;
1394}
1395
1396static inline pmd_t pmd_mkyoung(pmd_t pmd)
1397{
1398 if (pmd_large(pmd)) {
1399 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1400 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1401 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
1402 }
1403 return pmd;
1404}
1405
1406static inline pmd_t pmd_mkold(pmd_t pmd)
1407{
1408 if (pmd_large(pmd)) {
1409 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1410 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1411 }
1412 return pmd;
1413}
1414
1415static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1416{
1417 if (pmd_large(pmd)) {
1418 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1419 _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1420 _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
1421 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1422 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1423 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1424 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1425 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1426 return pmd;
1427 }
1428 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN;
1429 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1430 return pmd;
1431}
1432
1433static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1434{
1435 pmd_t __pmd;
1436 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1437 return __pmd;
1438}
1439
1440#endif
1441
1442static inline void __pmdp_csp(pmd_t *pmdp)
1443{
1444 register unsigned long reg2 asm("2") = pmd_val(*pmdp);
1445 register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
1446 _SEGMENT_ENTRY_INVALID;
1447 register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
1448
1449 asm volatile(
1450 " csp %1,%3"
1451 : "=m" (*pmdp)
1452 : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
1453}
1454
1455static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp)
1456{
1457 unsigned long sto;
1458
1459 sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
1460 asm volatile(
1461 " .insn rrf,0xb98e0000,%2,%3,0,0"
1462 : "=m" (*pmdp)
1463 : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
1464 : "cc" );
1465}
1466
1467static inline void __pmdp_idte_local(unsigned long address, pmd_t *pmdp)
1468{
1469 unsigned long sto;
1470
1471 sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
1472 asm volatile(
1473 " .insn rrf,0xb98e0000,%2,%3,0,1"
1474 : "=m" (*pmdp)
1475 : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
1476 : "cc" );
1477}
1478
1479static inline void pmdp_flush_direct(struct mm_struct *mm,
1480 unsigned long address, pmd_t *pmdp)
1481{
1482 int active, count;
1483
1484 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
1485 return;
1486 if (!MACHINE_HAS_IDTE) {
1487 __pmdp_csp(pmdp);
1488 return;
1489 }
1490 active = (mm == current->active_mm) ? 1 : 0;
1491 count = atomic_add_return(0x10000, &mm->context.attach_count);
1492 if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
1493 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
1494 __pmdp_idte_local(address, pmdp);
1495 else
1496 __pmdp_idte(address, pmdp);
1497 atomic_sub(0x10000, &mm->context.attach_count);
1498}
1499
1500static inline void pmdp_flush_lazy(struct mm_struct *mm,
1501 unsigned long address, pmd_t *pmdp)
1502{
1503 int active, count;
1504
1505 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
1506 return;
1507 active = (mm == current->active_mm) ? 1 : 0;
1508 count = atomic_add_return(0x10000, &mm->context.attach_count);
1509 if ((count & 0xffff) <= active) {
1510 pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
1511 mm->context.flush_mm = 1;
1512 } else if (MACHINE_HAS_IDTE)
1513 __pmdp_idte(address, pmdp);
1514 else
1515 __pmdp_csp(pmdp);
1516 atomic_sub(0x10000, &mm->context.attach_count);
1517}
1518
1519#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1520
1521#define __HAVE_ARCH_PGTABLE_DEPOSIT
1522extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1523 pgtable_t pgtable);
1524
1525#define __HAVE_ARCH_PGTABLE_WITHDRAW
1526extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1527
1528static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1529 pmd_t *pmdp, pmd_t entry)
1530{
1531 *pmdp = entry;
1532}
1533
1534static inline pmd_t pmd_mkhuge(pmd_t pmd)
1535{
1536 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1537 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1538 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1539 return pmd;
1540}
1541
1542#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1543static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1544 unsigned long address, pmd_t *pmdp)
1545{
1546 pmd_t pmd;
1547
1548 pmd = *pmdp;
1549 pmdp_flush_direct(vma->vm_mm, address, pmdp);
1550 *pmdp = pmd_mkold(pmd);
1551 return pmd_young(pmd);
1552}
1553
1554#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1555static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1556 unsigned long address, pmd_t *pmdp)
1557{
1558 pmd_t pmd = *pmdp;
1559
1560 pmdp_flush_direct(mm, address, pmdp);
1561 pmd_clear(pmdp);
1562 return pmd;
1563}
1564
1565#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
1566static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
1567 unsigned long address,
1568 pmd_t *pmdp, int full)
1569{
1570 pmd_t pmd = *pmdp;
1571
1572 if (!full)
1573 pmdp_flush_lazy(mm, address, pmdp);
1574 pmd_clear(pmdp);
1575 return pmd;
1576}
1577
1578#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
1579static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
1580 unsigned long address, pmd_t *pmdp)
1581{
1582 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1583}
1584
1585#define __HAVE_ARCH_PMDP_INVALIDATE
1586static inline void pmdp_invalidate(struct vm_area_struct *vma,
1587 unsigned long address, pmd_t *pmdp)
1588{
1589 pmdp_flush_direct(vma->vm_mm, address, pmdp);
1590}
1591
1592#define __HAVE_ARCH_PMDP_SET_WRPROTECT
1593static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1594 unsigned long address, pmd_t *pmdp)
1595{
1596 pmd_t pmd = *pmdp;
1597
1598 if (pmd_write(pmd)) {
1599 pmdp_flush_direct(mm, address, pmdp);
1600 set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd));
1601 }
1602}
1603
1604static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1605 unsigned long address,
1606 pmd_t *pmdp)
1607{
1608 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1609}
1610#define pmdp_collapse_flush pmdp_collapse_flush
1611
1612#define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1613#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1614
1615static inline int pmd_trans_huge(pmd_t pmd)
1616{
1617 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1618}
1619
1620static inline int has_transparent_hugepage(void)
1621{
1622 return MACHINE_HAS_HPAGE ? 1 : 0;
1623}
1624#endif
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643#define __SWP_OFFSET_MASK ((1UL << 52) - 1)
1644#define __SWP_OFFSET_SHIFT 12
1645#define __SWP_TYPE_MASK ((1UL << 5) - 1)
1646#define __SWP_TYPE_SHIFT 2
1647
1648static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1649{
1650 pte_t pte;
1651
1652 pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
1653 pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1654 pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
1655 return pte;
1656}
1657
1658static inline unsigned long __swp_type(swp_entry_t entry)
1659{
1660 return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1661}
1662
1663static inline unsigned long __swp_offset(swp_entry_t entry)
1664{
1665 return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1666}
1667
1668static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1669{
1670 return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1671}
1672
1673#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1674#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1675
1676#endif
1677
1678#define kern_addr_valid(addr) (1)
1679
1680extern int vmem_add_mapping(unsigned long start, unsigned long size);
1681extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1682extern int s390_enable_sie(void);
1683extern int s390_enable_skey(void);
1684extern void s390_reset_cmma(struct mm_struct *mm);
1685
1686
1687#define HAVE_ARCH_UNMAPPED_AREA
1688#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1689
1690
1691
1692
1693static inline void pgtable_cache_init(void) { }
1694static inline void check_pgt_cache(void) { }
1695
1696#include <asm-generic/pgtable.h>
1697
1698#endif
1699