1
2
3
4
5
6
7
8
9
10
11
12#ifndef _ASM_S390_PGTABLE_H
13#define _ASM_S390_PGTABLE_H
14
15#include <linux/sched.h>
16#include <linux/mm_types.h>
17#include <linux/page-flags.h>
18#include <linux/radix-tree.h>
19#include <linux/atomic.h>
20#include <asm/bug.h>
21#include <asm/page.h>
22
23extern pgd_t swapper_pg_dir[];
24extern void paging_init(void);
25
26enum {
27 PG_DIRECT_MAP_4K = 0,
28 PG_DIRECT_MAP_1M,
29 PG_DIRECT_MAP_2G,
30 PG_DIRECT_MAP_MAX
31};
32
33extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
34
35static inline void update_page_count(int level, long count)
36{
37 if (IS_ENABLED(CONFIG_PROC_FS))
38 atomic_long_add(count, &direct_pages_count[level]);
39}
40
41struct seq_file;
42void arch_report_meminfo(struct seq_file *m);
43
44
45
46
47
48#define update_mmu_cache(vma, address, ptep) do { } while (0)
49#define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
50
51
52
53
54
55
56extern unsigned long empty_zero_page;
57extern unsigned long zero_page_mask;
58
59#define ZERO_PAGE(vaddr) \
60 (virt_to_page((void *)(empty_zero_page + \
61 (((unsigned long)(vaddr)) &zero_page_mask))))
62#define __HAVE_COLOR_ZERO_PAGE
63
64
65
66#define FIRST_USER_ADDRESS 0UL
67
68#define pte_ERROR(e) \
69 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
70#define pmd_ERROR(e) \
71 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
72#define pud_ERROR(e) \
73 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
74#define p4d_ERROR(e) \
75 printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e))
76#define pgd_ERROR(e) \
77 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
78
79
80
81
82
83
84
85
86
87extern unsigned long VMALLOC_START;
88extern unsigned long VMALLOC_END;
89#define VMALLOC_DEFAULT_SIZE ((128UL << 30) - MODULES_LEN)
90extern struct page *vmemmap;
91
92#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
93
94extern unsigned long MODULES_VADDR;
95extern unsigned long MODULES_END;
96#define MODULES_VADDR MODULES_VADDR
97#define MODULES_END MODULES_END
98#define MODULES_LEN (1UL << 31)
99
100static inline int is_module_addr(void *addr)
101{
102 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
103 if (addr < (void *)MODULES_VADDR)
104 return 0;
105 if (addr > (void *)MODULES_END)
106 return 0;
107 return 1;
108}
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162#define _PAGE_NOEXEC 0x100
163#define _PAGE_PROTECT 0x200
164#define _PAGE_INVALID 0x400
165#define _PAGE_LARGE 0x800
166
167
168#define _PAGE_PRESENT 0x001
169#define _PAGE_YOUNG 0x004
170#define _PAGE_DIRTY 0x008
171#define _PAGE_READ 0x010
172#define _PAGE_WRITE 0x020
173#define _PAGE_SPECIAL 0x040
174#define _PAGE_UNUSED 0x080
175
176#ifdef CONFIG_MEM_SOFT_DIRTY
177#define _PAGE_SOFT_DIRTY 0x002
178#else
179#define _PAGE_SOFT_DIRTY 0x000
180#endif
181
182
183#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
184 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224#define _ASCE_ORIGIN ~0xfffUL
225#define _ASCE_PRIVATE_SPACE 0x100
226#define _ASCE_ALT_EVENT 0x80
227#define _ASCE_SPACE_SWITCH 0x40
228#define _ASCE_REAL_SPACE 0x20
229#define _ASCE_TYPE_MASK 0x0c
230#define _ASCE_TYPE_REGION1 0x0c
231#define _ASCE_TYPE_REGION2 0x08
232#define _ASCE_TYPE_REGION3 0x04
233#define _ASCE_TYPE_SEGMENT 0x00
234#define _ASCE_TABLE_LENGTH 0x03
235
236
237#define _REGION_ENTRY_ORIGIN ~0xfffUL
238#define _REGION_ENTRY_PROTECT 0x200
239#define _REGION_ENTRY_NOEXEC 0x100
240#define _REGION_ENTRY_OFFSET 0xc0
241#define _REGION_ENTRY_INVALID 0x20
242#define _REGION_ENTRY_TYPE_MASK 0x0c
243#define _REGION_ENTRY_TYPE_R1 0x0c
244#define _REGION_ENTRY_TYPE_R2 0x08
245#define _REGION_ENTRY_TYPE_R3 0x04
246#define _REGION_ENTRY_LENGTH 0x03
247
248#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
249#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
250#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
251#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
252#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
253#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
254
255#define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL
256#define _REGION3_ENTRY_DIRTY 0x2000
257#define _REGION3_ENTRY_YOUNG 0x1000
258#define _REGION3_ENTRY_LARGE 0x0400
259#define _REGION3_ENTRY_READ 0x0002
260#define _REGION3_ENTRY_WRITE 0x0001
261
262#ifdef CONFIG_MEM_SOFT_DIRTY
263#define _REGION3_ENTRY_SOFT_DIRTY 0x4000
264#else
265#define _REGION3_ENTRY_SOFT_DIRTY 0x0000
266#endif
267
268#define _REGION_ENTRY_BITS 0xfffffffffffff22fUL
269
270
271#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
272#define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe30UL
273#define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff00730UL
274#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL
275#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL
276#define _SEGMENT_ENTRY_PROTECT 0x200
277#define _SEGMENT_ENTRY_NOEXEC 0x100
278#define _SEGMENT_ENTRY_INVALID 0x20
279#define _SEGMENT_ENTRY_TYPE_MASK 0x0c
280
281#define _SEGMENT_ENTRY (0)
282#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
283
284#define _SEGMENT_ENTRY_DIRTY 0x2000
285#define _SEGMENT_ENTRY_YOUNG 0x1000
286#define _SEGMENT_ENTRY_LARGE 0x0400
287#define _SEGMENT_ENTRY_WRITE 0x0002
288#define _SEGMENT_ENTRY_READ 0x0001
289
290#ifdef CONFIG_MEM_SOFT_DIRTY
291#define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000
292#else
293#define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000
294#endif
295
296#define _CRST_ENTRIES 2048
297#define _PAGE_ENTRIES 256
298
299#define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8)
300#define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8)
301
302#define _REGION1_SHIFT 53
303#define _REGION2_SHIFT 42
304#define _REGION3_SHIFT 31
305#define _SEGMENT_SHIFT 20
306
307#define _REGION1_INDEX (0x7ffUL << _REGION1_SHIFT)
308#define _REGION2_INDEX (0x7ffUL << _REGION2_SHIFT)
309#define _REGION3_INDEX (0x7ffUL << _REGION3_SHIFT)
310#define _SEGMENT_INDEX (0x7ffUL << _SEGMENT_SHIFT)
311#define _PAGE_INDEX (0xffUL << _PAGE_SHIFT)
312
313#define _REGION1_SIZE (1UL << _REGION1_SHIFT)
314#define _REGION2_SIZE (1UL << _REGION2_SHIFT)
315#define _REGION3_SIZE (1UL << _REGION3_SHIFT)
316#define _SEGMENT_SIZE (1UL << _SEGMENT_SHIFT)
317
318#define _REGION1_MASK (~(_REGION1_SIZE - 1))
319#define _REGION2_MASK (~(_REGION2_SIZE - 1))
320#define _REGION3_MASK (~(_REGION3_SIZE - 1))
321#define _SEGMENT_MASK (~(_SEGMENT_SIZE - 1))
322
323#define PMD_SHIFT _SEGMENT_SHIFT
324#define PUD_SHIFT _REGION3_SHIFT
325#define P4D_SHIFT _REGION2_SHIFT
326#define PGDIR_SHIFT _REGION1_SHIFT
327
328#define PMD_SIZE _SEGMENT_SIZE
329#define PUD_SIZE _REGION3_SIZE
330#define P4D_SIZE _REGION2_SIZE
331#define PGDIR_SIZE _REGION1_SIZE
332
333#define PMD_MASK _SEGMENT_MASK
334#define PUD_MASK _REGION3_MASK
335#define P4D_MASK _REGION2_MASK
336#define PGDIR_MASK _REGION1_MASK
337
338#define PTRS_PER_PTE _PAGE_ENTRIES
339#define PTRS_PER_PMD _CRST_ENTRIES
340#define PTRS_PER_PUD _CRST_ENTRIES
341#define PTRS_PER_P4D _CRST_ENTRIES
342#define PTRS_PER_PGD _CRST_ENTRIES
343
344#define MAX_PTRS_PER_P4D PTRS_PER_P4D
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369#define PGSTE_ACC_BITS 0xf000000000000000UL
370#define PGSTE_FP_BIT 0x0800000000000000UL
371#define PGSTE_PCL_BIT 0x0080000000000000UL
372#define PGSTE_HR_BIT 0x0040000000000000UL
373#define PGSTE_HC_BIT 0x0020000000000000UL
374#define PGSTE_GR_BIT 0x0004000000000000UL
375#define PGSTE_GC_BIT 0x0002000000000000UL
376#define PGSTE_UC_BIT 0x0000800000000000UL
377#define PGSTE_IN_BIT 0x0000400000000000UL
378#define PGSTE_VSIE_BIT 0x0000200000000000UL
379
380
381#define _PGSTE_GPS_ZERO 0x0000000080000000UL
382#define _PGSTE_GPS_NODAT 0x0000000040000000UL
383#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
384#define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
385#define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
386#define _PGSTE_GPS_USAGE_POT_VOLATILE 0x0000000002000000UL
387#define _PGSTE_GPS_USAGE_VOLATILE _PGSTE_GPS_USAGE_MASK
388
389
390
391
392
393
394#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
395 _ASCE_ALT_EVENT)
396
397
398
399
400#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
401#define PAGE_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | \
402 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
403#define PAGE_RX __pgprot(_PAGE_PRESENT | _PAGE_READ | \
404 _PAGE_INVALID | _PAGE_PROTECT)
405#define PAGE_RW __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
406 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
407#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
408 _PAGE_INVALID | _PAGE_PROTECT)
409
410#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
411 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
412#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
413 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
414#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
415 _PAGE_PROTECT | _PAGE_NOEXEC)
416#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
417 _PAGE_YOUNG | _PAGE_DIRTY)
418
419
420
421
422
423
424
425#define __P000 PAGE_NONE
426#define __P001 PAGE_RO
427#define __P010 PAGE_RO
428#define __P011 PAGE_RO
429#define __P100 PAGE_RX
430#define __P101 PAGE_RX
431#define __P110 PAGE_RX
432#define __P111 PAGE_RX
433
434#define __S000 PAGE_NONE
435#define __S001 PAGE_RO
436#define __S010 PAGE_RW
437#define __S011 PAGE_RW
438#define __S100 PAGE_RX
439#define __S101 PAGE_RX
440#define __S110 PAGE_RWX
441#define __S111 PAGE_RWX
442
443
444
445
446#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
447 _SEGMENT_ENTRY_PROTECT)
448#define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PROTECT | \
449 _SEGMENT_ENTRY_READ | \
450 _SEGMENT_ENTRY_NOEXEC)
451#define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PROTECT | \
452 _SEGMENT_ENTRY_READ)
453#define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_READ | \
454 _SEGMENT_ENTRY_WRITE | \
455 _SEGMENT_ENTRY_NOEXEC)
456#define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_READ | \
457 _SEGMENT_ENTRY_WRITE)
458#define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \
459 _SEGMENT_ENTRY_LARGE | \
460 _SEGMENT_ENTRY_READ | \
461 _SEGMENT_ENTRY_WRITE | \
462 _SEGMENT_ENTRY_YOUNG | \
463 _SEGMENT_ENTRY_DIRTY | \
464 _SEGMENT_ENTRY_NOEXEC)
465#define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \
466 _SEGMENT_ENTRY_LARGE | \
467 _SEGMENT_ENTRY_READ | \
468 _SEGMENT_ENTRY_YOUNG | \
469 _SEGMENT_ENTRY_PROTECT | \
470 _SEGMENT_ENTRY_NOEXEC)
471#define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY | \
472 _SEGMENT_ENTRY_LARGE | \
473 _SEGMENT_ENTRY_READ | \
474 _SEGMENT_ENTRY_WRITE | \
475 _SEGMENT_ENTRY_YOUNG | \
476 _SEGMENT_ENTRY_DIRTY)
477
478
479
480
481
482#define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \
483 _REGION3_ENTRY_LARGE | \
484 _REGION3_ENTRY_READ | \
485 _REGION3_ENTRY_WRITE | \
486 _REGION3_ENTRY_YOUNG | \
487 _REGION3_ENTRY_DIRTY | \
488 _REGION_ENTRY_NOEXEC)
489#define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
490 _REGION3_ENTRY_LARGE | \
491 _REGION3_ENTRY_READ | \
492 _REGION3_ENTRY_YOUNG | \
493 _REGION_ENTRY_PROTECT | \
494 _REGION_ENTRY_NOEXEC)
495
496static inline bool mm_p4d_folded(struct mm_struct *mm)
497{
498 return mm->context.asce_limit <= _REGION1_SIZE;
499}
500#define mm_p4d_folded(mm) mm_p4d_folded(mm)
501
502static inline bool mm_pud_folded(struct mm_struct *mm)
503{
504 return mm->context.asce_limit <= _REGION2_SIZE;
505}
506#define mm_pud_folded(mm) mm_pud_folded(mm)
507
508static inline bool mm_pmd_folded(struct mm_struct *mm)
509{
510 return mm->context.asce_limit <= _REGION3_SIZE;
511}
512#define mm_pmd_folded(mm) mm_pmd_folded(mm)
513
514static inline int mm_has_pgste(struct mm_struct *mm)
515{
516#ifdef CONFIG_PGSTE
517 if (unlikely(mm->context.has_pgste))
518 return 1;
519#endif
520 return 0;
521}
522
523static inline int mm_alloc_pgste(struct mm_struct *mm)
524{
525#ifdef CONFIG_PGSTE
526 if (unlikely(mm->context.alloc_pgste))
527 return 1;
528#endif
529 return 0;
530}
531
532
533
534
535
536#define mm_forbids_zeropage mm_has_pgste
537static inline int mm_uses_skeys(struct mm_struct *mm)
538{
539#ifdef CONFIG_PGSTE
540 if (mm->context.uses_skeys)
541 return 1;
542#endif
543 return 0;
544}
545
546static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
547{
548 register unsigned long reg2 asm("2") = old;
549 register unsigned long reg3 asm("3") = new;
550 unsigned long address = (unsigned long)ptr | 1;
551
552 asm volatile(
553 " csp %0,%3"
554 : "+d" (reg2), "+m" (*ptr)
555 : "d" (reg3), "d" (address)
556 : "cc");
557}
558
559static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
560{
561 register unsigned long reg2 asm("2") = old;
562 register unsigned long reg3 asm("3") = new;
563 unsigned long address = (unsigned long)ptr | 1;
564
565 asm volatile(
566 " .insn rre,0xb98a0000,%0,%3"
567 : "+d" (reg2), "+m" (*ptr)
568 : "d" (reg3), "d" (address)
569 : "cc");
570}
571
572#define CRDTE_DTT_PAGE 0x00UL
573#define CRDTE_DTT_SEGMENT 0x10UL
574#define CRDTE_DTT_REGION3 0x14UL
575#define CRDTE_DTT_REGION2 0x18UL
576#define CRDTE_DTT_REGION1 0x1cUL
577
578static inline void crdte(unsigned long old, unsigned long new,
579 unsigned long table, unsigned long dtt,
580 unsigned long address, unsigned long asce)
581{
582 register unsigned long reg2 asm("2") = old;
583 register unsigned long reg3 asm("3") = new;
584 register unsigned long reg4 asm("4") = table | dtt;
585 register unsigned long reg5 asm("5") = address;
586
587 asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0"
588 : "+d" (reg2)
589 : "d" (reg3), "d" (reg4), "d" (reg5), "a" (asce)
590 : "memory", "cc");
591}
592
593
594
595
596static inline int pgd_folded(pgd_t pgd)
597{
598 return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1;
599}
600
601static inline int pgd_present(pgd_t pgd)
602{
603 if (pgd_folded(pgd))
604 return 1;
605 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
606}
607
608static inline int pgd_none(pgd_t pgd)
609{
610 if (pgd_folded(pgd))
611 return 0;
612 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
613}
614
615static inline int pgd_bad(pgd_t pgd)
616{
617 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1)
618 return 0;
619 return (pgd_val(pgd) & ~_REGION_ENTRY_BITS) != 0;
620}
621
622static inline unsigned long pgd_pfn(pgd_t pgd)
623{
624 unsigned long origin_mask;
625
626 origin_mask = _REGION_ENTRY_ORIGIN;
627 return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT;
628}
629
630static inline int p4d_folded(p4d_t p4d)
631{
632 return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
633}
634
635static inline int p4d_present(p4d_t p4d)
636{
637 if (p4d_folded(p4d))
638 return 1;
639 return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL;
640}
641
642static inline int p4d_none(p4d_t p4d)
643{
644 if (p4d_folded(p4d))
645 return 0;
646 return p4d_val(p4d) == _REGION2_ENTRY_EMPTY;
647}
648
649static inline unsigned long p4d_pfn(p4d_t p4d)
650{
651 unsigned long origin_mask;
652
653 origin_mask = _REGION_ENTRY_ORIGIN;
654 return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT;
655}
656
657static inline int pud_folded(pud_t pud)
658{
659 return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3;
660}
661
662static inline int pud_present(pud_t pud)
663{
664 if (pud_folded(pud))
665 return 1;
666 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
667}
668
669static inline int pud_none(pud_t pud)
670{
671 if (pud_folded(pud))
672 return 0;
673 return pud_val(pud) == _REGION3_ENTRY_EMPTY;
674}
675
676#define pud_leaf pud_large
677static inline int pud_large(pud_t pud)
678{
679 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
680 return 0;
681 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
682}
683
684static inline unsigned long pud_pfn(pud_t pud)
685{
686 unsigned long origin_mask;
687
688 origin_mask = _REGION_ENTRY_ORIGIN;
689 if (pud_large(pud))
690 origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
691 return (pud_val(pud) & origin_mask) >> PAGE_SHIFT;
692}
693
694#define pmd_leaf pmd_large
695static inline int pmd_large(pmd_t pmd)
696{
697 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
698}
699
700static inline int pmd_bad(pmd_t pmd)
701{
702 if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_large(pmd))
703 return 1;
704 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
705}
706
707static inline int pud_bad(pud_t pud)
708{
709 unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
710
711 if (type > _REGION_ENTRY_TYPE_R3 || pud_large(pud))
712 return 1;
713 if (type < _REGION_ENTRY_TYPE_R3)
714 return 0;
715 return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
716}
717
718static inline int p4d_bad(p4d_t p4d)
719{
720 unsigned long type = p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK;
721
722 if (type > _REGION_ENTRY_TYPE_R2)
723 return 1;
724 if (type < _REGION_ENTRY_TYPE_R2)
725 return 0;
726 return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
727}
728
729static inline int pmd_present(pmd_t pmd)
730{
731 return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
732}
733
734static inline int pmd_none(pmd_t pmd)
735{
736 return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
737}
738
739static inline unsigned long pmd_pfn(pmd_t pmd)
740{
741 unsigned long origin_mask;
742
743 origin_mask = _SEGMENT_ENTRY_ORIGIN;
744 if (pmd_large(pmd))
745 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
746 return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
747}
748
749#define pmd_write pmd_write
750static inline int pmd_write(pmd_t pmd)
751{
752 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
753}
754
755#define pud_write pud_write
756static inline int pud_write(pud_t pud)
757{
758 return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
759}
760
761static inline int pmd_dirty(pmd_t pmd)
762{
763 return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
764}
765
766static inline int pmd_young(pmd_t pmd)
767{
768 return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
769}
770
771static inline int pte_present(pte_t pte)
772{
773
774 return (pte_val(pte) & _PAGE_PRESENT) != 0;
775}
776
777static inline int pte_none(pte_t pte)
778{
779
780 return pte_val(pte) == _PAGE_INVALID;
781}
782
783static inline int pte_swap(pte_t pte)
784{
785
786 return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
787 == _PAGE_PROTECT;
788}
789
790static inline int pte_special(pte_t pte)
791{
792 return (pte_val(pte) & _PAGE_SPECIAL);
793}
794
795#define __HAVE_ARCH_PTE_SAME
796static inline int pte_same(pte_t a, pte_t b)
797{
798 return pte_val(a) == pte_val(b);
799}
800
801#ifdef CONFIG_NUMA_BALANCING
802static inline int pte_protnone(pte_t pte)
803{
804 return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
805}
806
807static inline int pmd_protnone(pmd_t pmd)
808{
809
810 return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
811}
812#endif
813
814static inline int pte_soft_dirty(pte_t pte)
815{
816 return pte_val(pte) & _PAGE_SOFT_DIRTY;
817}
818#define pte_swp_soft_dirty pte_soft_dirty
819
820static inline pte_t pte_mksoft_dirty(pte_t pte)
821{
822 pte_val(pte) |= _PAGE_SOFT_DIRTY;
823 return pte;
824}
825#define pte_swp_mksoft_dirty pte_mksoft_dirty
826
827static inline pte_t pte_clear_soft_dirty(pte_t pte)
828{
829 pte_val(pte) &= ~_PAGE_SOFT_DIRTY;
830 return pte;
831}
832#define pte_swp_clear_soft_dirty pte_clear_soft_dirty
833
834static inline int pmd_soft_dirty(pmd_t pmd)
835{
836 return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
837}
838
839static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
840{
841 pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY;
842 return pmd;
843}
844
845static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
846{
847 pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY;
848 return pmd;
849}
850
851
852
853
854
855static inline int pte_write(pte_t pte)
856{
857 return (pte_val(pte) & _PAGE_WRITE) != 0;
858}
859
860static inline int pte_dirty(pte_t pte)
861{
862 return (pte_val(pte) & _PAGE_DIRTY) != 0;
863}
864
865static inline int pte_young(pte_t pte)
866{
867 return (pte_val(pte) & _PAGE_YOUNG) != 0;
868}
869
870#define __HAVE_ARCH_PTE_UNUSED
871static inline int pte_unused(pte_t pte)
872{
873 return pte_val(pte) & _PAGE_UNUSED;
874}
875
876
877
878
879
880static inline void pgd_clear(pgd_t *pgd)
881{
882 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
883 pgd_val(*pgd) = _REGION1_ENTRY_EMPTY;
884}
885
886static inline void p4d_clear(p4d_t *p4d)
887{
888 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
889 p4d_val(*p4d) = _REGION2_ENTRY_EMPTY;
890}
891
892static inline void pud_clear(pud_t *pud)
893{
894 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
895 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
896}
897
898static inline void pmd_clear(pmd_t *pmdp)
899{
900 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
901}
902
903static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
904{
905 pte_val(*ptep) = _PAGE_INVALID;
906}
907
908
909
910
911
912static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
913{
914 pte_val(pte) &= _PAGE_CHG_MASK;
915 pte_val(pte) |= pgprot_val(newprot);
916
917
918
919
920 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
921 pte_val(pte) &= ~_PAGE_INVALID;
922
923
924
925
926 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
927 pte_val(pte) &= ~_PAGE_PROTECT;
928 return pte;
929}
930
931static inline pte_t pte_wrprotect(pte_t pte)
932{
933 pte_val(pte) &= ~_PAGE_WRITE;
934 pte_val(pte) |= _PAGE_PROTECT;
935 return pte;
936}
937
938static inline pte_t pte_mkwrite(pte_t pte)
939{
940 pte_val(pte) |= _PAGE_WRITE;
941 if (pte_val(pte) & _PAGE_DIRTY)
942 pte_val(pte) &= ~_PAGE_PROTECT;
943 return pte;
944}
945
946static inline pte_t pte_mkclean(pte_t pte)
947{
948 pte_val(pte) &= ~_PAGE_DIRTY;
949 pte_val(pte) |= _PAGE_PROTECT;
950 return pte;
951}
952
953static inline pte_t pte_mkdirty(pte_t pte)
954{
955 pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY;
956 if (pte_val(pte) & _PAGE_WRITE)
957 pte_val(pte) &= ~_PAGE_PROTECT;
958 return pte;
959}
960
961static inline pte_t pte_mkold(pte_t pte)
962{
963 pte_val(pte) &= ~_PAGE_YOUNG;
964 pte_val(pte) |= _PAGE_INVALID;
965 return pte;
966}
967
968static inline pte_t pte_mkyoung(pte_t pte)
969{
970 pte_val(pte) |= _PAGE_YOUNG;
971 if (pte_val(pte) & _PAGE_READ)
972 pte_val(pte) &= ~_PAGE_INVALID;
973 return pte;
974}
975
976static inline pte_t pte_mkspecial(pte_t pte)
977{
978 pte_val(pte) |= _PAGE_SPECIAL;
979 return pte;
980}
981
982#ifdef CONFIG_HUGETLB_PAGE
983static inline pte_t pte_mkhuge(pte_t pte)
984{
985 pte_val(pte) |= _PAGE_LARGE;
986 return pte;
987}
988#endif
989
990#define IPTE_GLOBAL 0
991#define IPTE_LOCAL 1
992
993#define IPTE_NODAT 0x400
994#define IPTE_GUEST_ASCE 0x800
995
996static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
997 unsigned long opt, unsigned long asce,
998 int local)
999{
1000 unsigned long pto = (unsigned long) ptep;
1001
1002 if (__builtin_constant_p(opt) && opt == 0) {
1003
1004 asm volatile(
1005 " .insn rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
1006 : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
1007 [m4] "i" (local));
1008 return;
1009 }
1010
1011
1012 opt = opt | (asce & _ASCE_ORIGIN);
1013 asm volatile(
1014 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1015 : [r2] "+a" (address), [r3] "+a" (opt)
1016 : [r1] "a" (pto), [m4] "i" (local) : "memory");
1017}
1018
1019static __always_inline void __ptep_ipte_range(unsigned long address, int nr,
1020 pte_t *ptep, int local)
1021{
1022 unsigned long pto = (unsigned long) ptep;
1023
1024
1025 do {
1026 asm volatile(
1027 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1028 : [r2] "+a" (address), [r3] "+a" (nr)
1029 : [r1] "a" (pto), [m4] "i" (local) : "memory");
1030 } while (nr != 255);
1031}
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
1047pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
1048
1049#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1050static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1051 unsigned long addr, pte_t *ptep)
1052{
1053 pte_t pte = *ptep;
1054
1055 pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
1056 return pte_young(pte);
1057}
1058
1059#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1060static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1061 unsigned long address, pte_t *ptep)
1062{
1063 return ptep_test_and_clear_young(vma, address, ptep);
1064}
1065
1066#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1067static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1068 unsigned long addr, pte_t *ptep)
1069{
1070 return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1071}
1072
1073#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1074pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
1075void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
1076 pte_t *, pte_t, pte_t);
1077
1078#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1079static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1080 unsigned long addr, pte_t *ptep)
1081{
1082 return ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
1083}
1084
1085
1086
1087
1088
1089
1090
1091
1092#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1093static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1094 unsigned long addr,
1095 pte_t *ptep, int full)
1096{
1097 if (full) {
1098 pte_t pte = *ptep;
1099 *ptep = __pte(_PAGE_INVALID);
1100 return pte;
1101 }
1102 return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1103}
1104
1105#define __HAVE_ARCH_PTEP_SET_WRPROTECT
1106static inline void ptep_set_wrprotect(struct mm_struct *mm,
1107 unsigned long addr, pte_t *ptep)
1108{
1109 pte_t pte = *ptep;
1110
1111 if (pte_write(pte))
1112 ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
1113}
1114
1115#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1116static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1117 unsigned long addr, pte_t *ptep,
1118 pte_t entry, int dirty)
1119{
1120 if (pte_same(*ptep, entry))
1121 return 0;
1122 ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
1123 return 1;
1124}
1125
1126
1127
1128
1129void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
1130 pte_t *ptep, pte_t entry);
1131void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1132void ptep_notify(struct mm_struct *mm, unsigned long addr,
1133 pte_t *ptep, unsigned long bits);
1134int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
1135 pte_t *ptep, int prot, unsigned long bit);
1136void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
1137 pte_t *ptep , int reset);
1138void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1139int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
1140 pte_t *sptep, pte_t *tptep, pte_t pte);
1141void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
1142
1143bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address,
1144 pte_t *ptep);
1145int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1146 unsigned char key, bool nq);
1147int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1148 unsigned char key, unsigned char *oldkey,
1149 bool nq, bool mr, bool mc);
1150int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
1151int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1152 unsigned char *key);
1153
1154int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
1155 unsigned long bits, unsigned long value);
1156int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
1157int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
1158 unsigned long *oldpte, unsigned long *oldpgste);
1159void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
1160void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
1161void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
1162void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
1163
1164
1165
1166
1167
1168
1169static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1170 pte_t *ptep, pte_t entry)
1171{
1172 if (pte_present(entry))
1173 pte_val(entry) &= ~_PAGE_UNUSED;
1174 if (mm_has_pgste(mm))
1175 ptep_set_pte_at(mm, addr, ptep, entry);
1176 else
1177 *ptep = entry;
1178}
1179
1180
1181
1182
1183
1184static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1185{
1186 pte_t __pte;
1187 pte_val(__pte) = physpage + pgprot_val(pgprot);
1188 if (!MACHINE_HAS_NX)
1189 pte_val(__pte) &= ~_PAGE_NOEXEC;
1190 return pte_mkyoung(__pte);
1191}
1192
1193static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1194{
1195 unsigned long physpage = page_to_phys(page);
1196 pte_t __pte = mk_pte_phys(physpage, pgprot);
1197
1198 if (pte_write(__pte) && PageDirty(page))
1199 __pte = pte_mkdirty(__pte);
1200 return __pte;
1201}
1202
1203#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1204#define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
1205#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1206#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1207#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1208
1209#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1210#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1211#define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN)
1212#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
1226{
1227 unsigned long rste;
1228 unsigned int shift;
1229
1230
1231 rste = pgd_val(*pgd);
1232
1233 shift = ((rste & _REGION_ENTRY_TYPE_MASK) >> 2) * 11 + 20;
1234 return pgd + ((address >> shift) & (PTRS_PER_PGD - 1));
1235}
1236
1237#define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
1238#define pgd_offset_k(address) pgd_offset(&init_mm, address)
1239
1240static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
1241{
1242 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
1243 return (p4d_t *) pgd_deref(*pgd) + p4d_index(address);
1244 return (p4d_t *) pgd;
1245}
1246
1247static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
1248{
1249 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
1250 return (pud_t *) p4d_deref(*p4d) + pud_index(address);
1251 return (pud_t *) p4d;
1252}
1253
1254static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1255{
1256 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
1257 return (pmd_t *) pud_deref(*pud) + pmd_index(address);
1258 return (pmd_t *) pud;
1259}
1260
1261static inline pte_t *pte_offset(pmd_t *pmd, unsigned long address)
1262{
1263 return (pte_t *) pmd_deref(*pmd) + pte_index(address);
1264}
1265
1266#define pte_offset_kernel(pmd, address) pte_offset(pmd, address)
1267#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1268
1269static inline void pte_unmap(pte_t *pte) { }
1270
1271static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
1272{
1273 return end <= current->mm->context.asce_limit;
1274}
1275#define gup_fast_permitted gup_fast_permitted
1276
1277#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1278#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1279#define pte_page(x) pfn_to_page(pte_pfn(x))
1280
1281#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1282#define pud_page(pud) pfn_to_page(pud_pfn(pud))
1283#define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
1284#define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
1285
1286static inline pmd_t pmd_wrprotect(pmd_t pmd)
1287{
1288 pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
1289 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1290 return pmd;
1291}
1292
1293static inline pmd_t pmd_mkwrite(pmd_t pmd)
1294{
1295 pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
1296 if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)
1297 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1298 return pmd;
1299}
1300
1301static inline pmd_t pmd_mkclean(pmd_t pmd)
1302{
1303 pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1304 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1305 return pmd;
1306}
1307
1308static inline pmd_t pmd_mkdirty(pmd_t pmd)
1309{
1310 pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY;
1311 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1312 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1313 return pmd;
1314}
1315
1316static inline pud_t pud_wrprotect(pud_t pud)
1317{
1318 pud_val(pud) &= ~_REGION3_ENTRY_WRITE;
1319 pud_val(pud) |= _REGION_ENTRY_PROTECT;
1320 return pud;
1321}
1322
1323static inline pud_t pud_mkwrite(pud_t pud)
1324{
1325 pud_val(pud) |= _REGION3_ENTRY_WRITE;
1326 if (pud_val(pud) & _REGION3_ENTRY_DIRTY)
1327 pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1328 return pud;
1329}
1330
1331static inline pud_t pud_mkclean(pud_t pud)
1332{
1333 pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
1334 pud_val(pud) |= _REGION_ENTRY_PROTECT;
1335 return pud;
1336}
1337
1338static inline pud_t pud_mkdirty(pud_t pud)
1339{
1340 pud_val(pud) |= _REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY;
1341 if (pud_val(pud) & _REGION3_ENTRY_WRITE)
1342 pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1343 return pud;
1344}
1345
1346#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1347static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1348{
1349
1350
1351
1352
1353 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1354 return pgprot_val(SEGMENT_NONE);
1355 if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
1356 return pgprot_val(SEGMENT_RO);
1357 if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
1358 return pgprot_val(SEGMENT_RX);
1359 if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
1360 return pgprot_val(SEGMENT_RW);
1361 return pgprot_val(SEGMENT_RWX);
1362}
1363
1364static inline pmd_t pmd_mkyoung(pmd_t pmd)
1365{
1366 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1367 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1368 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
1369 return pmd;
1370}
1371
1372static inline pmd_t pmd_mkold(pmd_t pmd)
1373{
1374 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1375 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1376 return pmd;
1377}
1378
1379static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1380{
1381 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1382 _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1383 _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
1384 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1385 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1386 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1387 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1388 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1389 return pmd;
1390}
1391
1392static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1393{
1394 pmd_t __pmd;
1395 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1396 return __pmd;
1397}
1398
1399#endif
1400
1401static inline void __pmdp_csp(pmd_t *pmdp)
1402{
1403 csp((unsigned int *)pmdp + 1, pmd_val(*pmdp),
1404 pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1405}
1406
1407#define IDTE_GLOBAL 0
1408#define IDTE_LOCAL 1
1409
1410#define IDTE_PTOA 0x0800
1411#define IDTE_NODAT 0x1000
1412#define IDTE_GUEST_ASCE 0x2000
1413
1414static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
1415 unsigned long opt, unsigned long asce,
1416 int local)
1417{
1418 unsigned long sto;
1419
1420 sto = (unsigned long) pmdp - pmd_index(addr) * sizeof(pmd_t);
1421 if (__builtin_constant_p(opt) && opt == 0) {
1422
1423 asm volatile(
1424 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1425 : "+m" (*pmdp)
1426 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)),
1427 [m4] "i" (local)
1428 : "cc" );
1429 } else {
1430
1431 asm volatile(
1432 " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1433 : "+m" (*pmdp)
1434 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt),
1435 [r3] "a" (asce), [m4] "i" (local)
1436 : "cc" );
1437 }
1438}
1439
1440static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp,
1441 unsigned long opt, unsigned long asce,
1442 int local)
1443{
1444 unsigned long r3o;
1445
1446 r3o = (unsigned long) pudp - pud_index(addr) * sizeof(pud_t);
1447 r3o |= _ASCE_TYPE_REGION3;
1448 if (__builtin_constant_p(opt) && opt == 0) {
1449
1450 asm volatile(
1451 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1452 : "+m" (*pudp)
1453 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)),
1454 [m4] "i" (local)
1455 : "cc");
1456 } else {
1457
1458 asm volatile(
1459 " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1460 : "+m" (*pudp)
1461 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt),
1462 [r3] "a" (asce), [m4] "i" (local)
1463 : "cc" );
1464 }
1465}
1466
1467pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1468pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1469pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
1470
1471#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1472
1473#define __HAVE_ARCH_PGTABLE_DEPOSIT
1474void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1475 pgtable_t pgtable);
1476
1477#define __HAVE_ARCH_PGTABLE_WITHDRAW
1478pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1479
1480#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1481static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1482 unsigned long addr, pmd_t *pmdp,
1483 pmd_t entry, int dirty)
1484{
1485 VM_BUG_ON(addr & ~HPAGE_MASK);
1486
1487 entry = pmd_mkyoung(entry);
1488 if (dirty)
1489 entry = pmd_mkdirty(entry);
1490 if (pmd_val(*pmdp) == pmd_val(entry))
1491 return 0;
1492 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
1493 return 1;
1494}
1495
1496#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1497static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1498 unsigned long addr, pmd_t *pmdp)
1499{
1500 pmd_t pmd = *pmdp;
1501
1502 pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
1503 return pmd_young(pmd);
1504}
1505
1506#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1507static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
1508 unsigned long addr, pmd_t *pmdp)
1509{
1510 VM_BUG_ON(addr & ~HPAGE_MASK);
1511 return pmdp_test_and_clear_young(vma, addr, pmdp);
1512}
1513
1514static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1515 pmd_t *pmdp, pmd_t entry)
1516{
1517 if (!MACHINE_HAS_NX)
1518 pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC;
1519 *pmdp = entry;
1520}
1521
1522static inline pmd_t pmd_mkhuge(pmd_t pmd)
1523{
1524 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1525 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1526 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1527 return pmd;
1528}
1529
1530#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1531static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1532 unsigned long addr, pmd_t *pmdp)
1533{
1534 return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1535}
1536
1537#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
1538static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
1539 unsigned long addr,
1540 pmd_t *pmdp, int full)
1541{
1542 if (full) {
1543 pmd_t pmd = *pmdp;
1544 *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
1545 return pmd;
1546 }
1547 return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1548}
1549
1550#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
1551static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
1552 unsigned long addr, pmd_t *pmdp)
1553{
1554 return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
1555}
1556
1557#define __HAVE_ARCH_PMDP_INVALIDATE
1558static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
1559 unsigned long addr, pmd_t *pmdp)
1560{
1561 pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1562
1563 return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
1564}
1565
1566#define __HAVE_ARCH_PMDP_SET_WRPROTECT
1567static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1568 unsigned long addr, pmd_t *pmdp)
1569{
1570 pmd_t pmd = *pmdp;
1571
1572 if (pmd_write(pmd))
1573 pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
1574}
1575
1576static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1577 unsigned long address,
1578 pmd_t *pmdp)
1579{
1580 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1581}
1582#define pmdp_collapse_flush pmdp_collapse_flush
1583
1584#define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1585#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1586
1587static inline int pmd_trans_huge(pmd_t pmd)
1588{
1589 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1590}
1591
1592#define has_transparent_hugepage has_transparent_hugepage
1593static inline int has_transparent_hugepage(void)
1594{
1595 return MACHINE_HAS_EDAT1 ? 1 : 0;
1596}
1597#endif
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616#define __SWP_OFFSET_MASK ((1UL << 52) - 1)
1617#define __SWP_OFFSET_SHIFT 12
1618#define __SWP_TYPE_MASK ((1UL << 5) - 1)
1619#define __SWP_TYPE_SHIFT 2
1620
1621static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1622{
1623 pte_t pte;
1624
1625 pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
1626 pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1627 pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
1628 return pte;
1629}
1630
1631static inline unsigned long __swp_type(swp_entry_t entry)
1632{
1633 return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1634}
1635
1636static inline unsigned long __swp_offset(swp_entry_t entry)
1637{
1638 return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1639}
1640
1641static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1642{
1643 return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1644}
1645
1646#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1647#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1648
1649#define kern_addr_valid(addr) (1)
1650
1651extern int vmem_add_mapping(unsigned long start, unsigned long size);
1652extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1653extern int s390_enable_sie(void);
1654extern int s390_enable_skey(void);
1655extern void s390_reset_cmma(struct mm_struct *mm);
1656
1657
1658#define HAVE_ARCH_UNMAPPED_AREA
1659#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1660
1661#include <asm-generic/pgtable.h>
1662
1663#endif
1664