1
2
3
4
5
6
7
8
9
10
11
12#ifndef _ASM_S390_PGTABLE_H
13#define _ASM_S390_PGTABLE_H
14
15#include <linux/sched.h>
16#include <linux/mm_types.h>
17#include <linux/page-flags.h>
18#include <linux/radix-tree.h>
19#include <linux/atomic.h>
20#include <asm/bug.h>
21#include <asm/page.h>
22#include <asm/uv.h>
23
24extern pgd_t swapper_pg_dir[];
25extern void paging_init(void);
26
27enum {
28 PG_DIRECT_MAP_4K = 0,
29 PG_DIRECT_MAP_1M,
30 PG_DIRECT_MAP_2G,
31 PG_DIRECT_MAP_MAX
32};
33
34extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
35
36static inline void update_page_count(int level, long count)
37{
38 if (IS_ENABLED(CONFIG_PROC_FS))
39 atomic_long_add(count, &direct_pages_count[level]);
40}
41
42struct seq_file;
43void arch_report_meminfo(struct seq_file *m);
44
45
46
47
48
49#define update_mmu_cache(vma, address, ptep) do { } while (0)
50#define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
51
52
53
54
55
56
57extern unsigned long empty_zero_page;
58extern unsigned long zero_page_mask;
59
60#define ZERO_PAGE(vaddr) \
61 (virt_to_page((void *)(empty_zero_page + \
62 (((unsigned long)(vaddr)) &zero_page_mask))))
63#define __HAVE_COLOR_ZERO_PAGE
64
65
66
67#define FIRST_USER_ADDRESS 0UL
68
69#define pte_ERROR(e) \
70 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
71#define pmd_ERROR(e) \
72 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
73#define pud_ERROR(e) \
74 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
75#define p4d_ERROR(e) \
76 printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e))
77#define pgd_ERROR(e) \
78 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
79
80
81
82
83
84
85
86
87
88extern unsigned long VMALLOC_START;
89extern unsigned long VMALLOC_END;
90extern struct page *vmemmap;
91
92#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
93
94extern unsigned long MODULES_VADDR;
95extern unsigned long MODULES_END;
96#define MODULES_VADDR MODULES_VADDR
97#define MODULES_END MODULES_END
98#define MODULES_LEN (1UL << 31)
99
100static inline int is_module_addr(void *addr)
101{
102 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
103 if (addr < (void *)MODULES_VADDR)
104 return 0;
105 if (addr > (void *)MODULES_END)
106 return 0;
107 return 1;
108}
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162#define _PAGE_NOEXEC 0x100
163#define _PAGE_PROTECT 0x200
164#define _PAGE_INVALID 0x400
165#define _PAGE_LARGE 0x800
166
167
168#define _PAGE_PRESENT 0x001
169#define _PAGE_YOUNG 0x004
170#define _PAGE_DIRTY 0x008
171#define _PAGE_READ 0x010
172#define _PAGE_WRITE 0x020
173#define _PAGE_SPECIAL 0x040
174#define _PAGE_UNUSED 0x080
175
176#ifdef CONFIG_MEM_SOFT_DIRTY
177#define _PAGE_SOFT_DIRTY 0x002
178#else
179#define _PAGE_SOFT_DIRTY 0x000
180#endif
181
182
183#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
184 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224#define _ASCE_ORIGIN ~0xfffUL
225#define _ASCE_PRIVATE_SPACE 0x100
226#define _ASCE_ALT_EVENT 0x80
227#define _ASCE_SPACE_SWITCH 0x40
228#define _ASCE_REAL_SPACE 0x20
229#define _ASCE_TYPE_MASK 0x0c
230#define _ASCE_TYPE_REGION1 0x0c
231#define _ASCE_TYPE_REGION2 0x08
232#define _ASCE_TYPE_REGION3 0x04
233#define _ASCE_TYPE_SEGMENT 0x00
234#define _ASCE_TABLE_LENGTH 0x03
235
236
237#define _REGION_ENTRY_ORIGIN ~0xfffUL
238#define _REGION_ENTRY_PROTECT 0x200
239#define _REGION_ENTRY_NOEXEC 0x100
240#define _REGION_ENTRY_OFFSET 0xc0
241#define _REGION_ENTRY_INVALID 0x20
242#define _REGION_ENTRY_TYPE_MASK 0x0c
243#define _REGION_ENTRY_TYPE_R1 0x0c
244#define _REGION_ENTRY_TYPE_R2 0x08
245#define _REGION_ENTRY_TYPE_R3 0x04
246#define _REGION_ENTRY_LENGTH 0x03
247
248#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
249#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
250#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
251#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
252#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
253#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
254
255#define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL
256#define _REGION3_ENTRY_DIRTY 0x2000
257#define _REGION3_ENTRY_YOUNG 0x1000
258#define _REGION3_ENTRY_LARGE 0x0400
259#define _REGION3_ENTRY_READ 0x0002
260#define _REGION3_ENTRY_WRITE 0x0001
261
262#ifdef CONFIG_MEM_SOFT_DIRTY
263#define _REGION3_ENTRY_SOFT_DIRTY 0x4000
264#else
265#define _REGION3_ENTRY_SOFT_DIRTY 0x0000
266#endif
267
268#define _REGION_ENTRY_BITS 0xfffffffffffff22fUL
269#define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe2fUL
270
271
272#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
273#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
274#define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe30UL
275#define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff00730UL
276#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL
277#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL
278#define _SEGMENT_ENTRY_PROTECT 0x200
279#define _SEGMENT_ENTRY_NOEXEC 0x100
280#define _SEGMENT_ENTRY_INVALID 0x20
281#define _SEGMENT_ENTRY_TYPE_MASK 0x0c
282
283#define _SEGMENT_ENTRY (0)
284#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
285
286#define _SEGMENT_ENTRY_DIRTY 0x2000
287#define _SEGMENT_ENTRY_YOUNG 0x1000
288#define _SEGMENT_ENTRY_LARGE 0x0400
289#define _SEGMENT_ENTRY_WRITE 0x0002
290#define _SEGMENT_ENTRY_READ 0x0001
291
292#ifdef CONFIG_MEM_SOFT_DIRTY
293#define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000
294#else
295#define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000
296#endif
297
298#define _CRST_ENTRIES 2048
299#define _PAGE_ENTRIES 256
300
301#define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8)
302#define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8)
303
304#define _REGION1_SHIFT 53
305#define _REGION2_SHIFT 42
306#define _REGION3_SHIFT 31
307#define _SEGMENT_SHIFT 20
308
309#define _REGION1_INDEX (0x7ffUL << _REGION1_SHIFT)
310#define _REGION2_INDEX (0x7ffUL << _REGION2_SHIFT)
311#define _REGION3_INDEX (0x7ffUL << _REGION3_SHIFT)
312#define _SEGMENT_INDEX (0x7ffUL << _SEGMENT_SHIFT)
313#define _PAGE_INDEX (0xffUL << _PAGE_SHIFT)
314
315#define _REGION1_SIZE (1UL << _REGION1_SHIFT)
316#define _REGION2_SIZE (1UL << _REGION2_SHIFT)
317#define _REGION3_SIZE (1UL << _REGION3_SHIFT)
318#define _SEGMENT_SIZE (1UL << _SEGMENT_SHIFT)
319
320#define _REGION1_MASK (~(_REGION1_SIZE - 1))
321#define _REGION2_MASK (~(_REGION2_SIZE - 1))
322#define _REGION3_MASK (~(_REGION3_SIZE - 1))
323#define _SEGMENT_MASK (~(_SEGMENT_SIZE - 1))
324
325#define PMD_SHIFT _SEGMENT_SHIFT
326#define PUD_SHIFT _REGION3_SHIFT
327#define P4D_SHIFT _REGION2_SHIFT
328#define PGDIR_SHIFT _REGION1_SHIFT
329
330#define PMD_SIZE _SEGMENT_SIZE
331#define PUD_SIZE _REGION3_SIZE
332#define P4D_SIZE _REGION2_SIZE
333#define PGDIR_SIZE _REGION1_SIZE
334
335#define PMD_MASK _SEGMENT_MASK
336#define PUD_MASK _REGION3_MASK
337#define P4D_MASK _REGION2_MASK
338#define PGDIR_MASK _REGION1_MASK
339
340#define PTRS_PER_PTE _PAGE_ENTRIES
341#define PTRS_PER_PMD _CRST_ENTRIES
342#define PTRS_PER_PUD _CRST_ENTRIES
343#define PTRS_PER_P4D _CRST_ENTRIES
344#define PTRS_PER_PGD _CRST_ENTRIES
345
346#define MAX_PTRS_PER_P4D PTRS_PER_P4D
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371#define PGSTE_ACC_BITS 0xf000000000000000UL
372#define PGSTE_FP_BIT 0x0800000000000000UL
373#define PGSTE_PCL_BIT 0x0080000000000000UL
374#define PGSTE_HR_BIT 0x0040000000000000UL
375#define PGSTE_HC_BIT 0x0020000000000000UL
376#define PGSTE_GR_BIT 0x0004000000000000UL
377#define PGSTE_GC_BIT 0x0002000000000000UL
378#define PGSTE_UC_BIT 0x0000800000000000UL
379#define PGSTE_IN_BIT 0x0000400000000000UL
380#define PGSTE_VSIE_BIT 0x0000200000000000UL
381
382
383#define _PGSTE_GPS_ZERO 0x0000000080000000UL
384#define _PGSTE_GPS_NODAT 0x0000000040000000UL
385#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
386#define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
387#define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
388#define _PGSTE_GPS_USAGE_POT_VOLATILE 0x0000000002000000UL
389#define _PGSTE_GPS_USAGE_VOLATILE _PGSTE_GPS_USAGE_MASK
390
391
392
393
394
395
396#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
397 _ASCE_ALT_EVENT)
398
399
400
401
402#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
403#define PAGE_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | \
404 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
405#define PAGE_RX __pgprot(_PAGE_PRESENT | _PAGE_READ | \
406 _PAGE_INVALID | _PAGE_PROTECT)
407#define PAGE_RW __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
408 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
409#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
410 _PAGE_INVALID | _PAGE_PROTECT)
411
412#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
413 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
414#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
415 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
416#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
417 _PAGE_PROTECT | _PAGE_NOEXEC)
418#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
419 _PAGE_YOUNG | _PAGE_DIRTY)
420
421
422
423
424
425
426
427#define __P000 PAGE_NONE
428#define __P001 PAGE_RO
429#define __P010 PAGE_RO
430#define __P011 PAGE_RO
431#define __P100 PAGE_RX
432#define __P101 PAGE_RX
433#define __P110 PAGE_RX
434#define __P111 PAGE_RX
435
436#define __S000 PAGE_NONE
437#define __S001 PAGE_RO
438#define __S010 PAGE_RW
439#define __S011 PAGE_RW
440#define __S100 PAGE_RX
441#define __S101 PAGE_RX
442#define __S110 PAGE_RWX
443#define __S111 PAGE_RWX
444
445
446
447
448#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
449 _SEGMENT_ENTRY_PROTECT)
450#define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PROTECT | \
451 _SEGMENT_ENTRY_READ | \
452 _SEGMENT_ENTRY_NOEXEC)
453#define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PROTECT | \
454 _SEGMENT_ENTRY_READ)
455#define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_READ | \
456 _SEGMENT_ENTRY_WRITE | \
457 _SEGMENT_ENTRY_NOEXEC)
458#define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_READ | \
459 _SEGMENT_ENTRY_WRITE)
460#define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \
461 _SEGMENT_ENTRY_LARGE | \
462 _SEGMENT_ENTRY_READ | \
463 _SEGMENT_ENTRY_WRITE | \
464 _SEGMENT_ENTRY_YOUNG | \
465 _SEGMENT_ENTRY_DIRTY | \
466 _SEGMENT_ENTRY_NOEXEC)
467#define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \
468 _SEGMENT_ENTRY_LARGE | \
469 _SEGMENT_ENTRY_READ | \
470 _SEGMENT_ENTRY_YOUNG | \
471 _SEGMENT_ENTRY_PROTECT | \
472 _SEGMENT_ENTRY_NOEXEC)
473#define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY | \
474 _SEGMENT_ENTRY_LARGE | \
475 _SEGMENT_ENTRY_READ | \
476 _SEGMENT_ENTRY_WRITE | \
477 _SEGMENT_ENTRY_YOUNG | \
478 _SEGMENT_ENTRY_DIRTY)
479
480
481
482
483
484#define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \
485 _REGION3_ENTRY_LARGE | \
486 _REGION3_ENTRY_READ | \
487 _REGION3_ENTRY_WRITE | \
488 _REGION3_ENTRY_YOUNG | \
489 _REGION3_ENTRY_DIRTY | \
490 _REGION_ENTRY_NOEXEC)
491#define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
492 _REGION3_ENTRY_LARGE | \
493 _REGION3_ENTRY_READ | \
494 _REGION3_ENTRY_YOUNG | \
495 _REGION_ENTRY_PROTECT | \
496 _REGION_ENTRY_NOEXEC)
497
498static inline bool mm_p4d_folded(struct mm_struct *mm)
499{
500 return mm->context.asce_limit <= _REGION1_SIZE;
501}
502#define mm_p4d_folded(mm) mm_p4d_folded(mm)
503
504static inline bool mm_pud_folded(struct mm_struct *mm)
505{
506 return mm->context.asce_limit <= _REGION2_SIZE;
507}
508#define mm_pud_folded(mm) mm_pud_folded(mm)
509
510static inline bool mm_pmd_folded(struct mm_struct *mm)
511{
512 return mm->context.asce_limit <= _REGION3_SIZE;
513}
514#define mm_pmd_folded(mm) mm_pmd_folded(mm)
515
516static inline int mm_has_pgste(struct mm_struct *mm)
517{
518#ifdef CONFIG_PGSTE
519 if (unlikely(mm->context.has_pgste))
520 return 1;
521#endif
522 return 0;
523}
524
525static inline int mm_is_protected(struct mm_struct *mm)
526{
527#ifdef CONFIG_PGSTE
528 if (unlikely(atomic_read(&mm->context.is_protected)))
529 return 1;
530#endif
531 return 0;
532}
533
534static inline int mm_alloc_pgste(struct mm_struct *mm)
535{
536#ifdef CONFIG_PGSTE
537 if (unlikely(mm->context.alloc_pgste))
538 return 1;
539#endif
540 return 0;
541}
542
543
544
545
546
547#define mm_forbids_zeropage mm_has_pgste
548static inline int mm_uses_skeys(struct mm_struct *mm)
549{
550#ifdef CONFIG_PGSTE
551 if (mm->context.uses_skeys)
552 return 1;
553#endif
554 return 0;
555}
556
557static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
558{
559 register unsigned long reg2 asm("2") = old;
560 register unsigned long reg3 asm("3") = new;
561 unsigned long address = (unsigned long)ptr | 1;
562
563 asm volatile(
564 " csp %0,%3"
565 : "+d" (reg2), "+m" (*ptr)
566 : "d" (reg3), "d" (address)
567 : "cc");
568}
569
570static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
571{
572 register unsigned long reg2 asm("2") = old;
573 register unsigned long reg3 asm("3") = new;
574 unsigned long address = (unsigned long)ptr | 1;
575
576 asm volatile(
577 " .insn rre,0xb98a0000,%0,%3"
578 : "+d" (reg2), "+m" (*ptr)
579 : "d" (reg3), "d" (address)
580 : "cc");
581}
582
583#define CRDTE_DTT_PAGE 0x00UL
584#define CRDTE_DTT_SEGMENT 0x10UL
585#define CRDTE_DTT_REGION3 0x14UL
586#define CRDTE_DTT_REGION2 0x18UL
587#define CRDTE_DTT_REGION1 0x1cUL
588
589static inline void crdte(unsigned long old, unsigned long new,
590 unsigned long table, unsigned long dtt,
591 unsigned long address, unsigned long asce)
592{
593 register unsigned long reg2 asm("2") = old;
594 register unsigned long reg3 asm("3") = new;
595 register unsigned long reg4 asm("4") = table | dtt;
596 register unsigned long reg5 asm("5") = address;
597
598 asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0"
599 : "+d" (reg2)
600 : "d" (reg3), "d" (reg4), "d" (reg5), "a" (asce)
601 : "memory", "cc");
602}
603
604
605
606
607static inline int pgd_folded(pgd_t pgd)
608{
609 return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1;
610}
611
612static inline int pgd_present(pgd_t pgd)
613{
614 if (pgd_folded(pgd))
615 return 1;
616 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
617}
618
619static inline int pgd_none(pgd_t pgd)
620{
621 if (pgd_folded(pgd))
622 return 0;
623 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
624}
625
626static inline int pgd_bad(pgd_t pgd)
627{
628 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1)
629 return 0;
630 return (pgd_val(pgd) & ~_REGION_ENTRY_BITS) != 0;
631}
632
633static inline unsigned long pgd_pfn(pgd_t pgd)
634{
635 unsigned long origin_mask;
636
637 origin_mask = _REGION_ENTRY_ORIGIN;
638 return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT;
639}
640
641static inline int p4d_folded(p4d_t p4d)
642{
643 return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
644}
645
646static inline int p4d_present(p4d_t p4d)
647{
648 if (p4d_folded(p4d))
649 return 1;
650 return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL;
651}
652
653static inline int p4d_none(p4d_t p4d)
654{
655 if (p4d_folded(p4d))
656 return 0;
657 return p4d_val(p4d) == _REGION2_ENTRY_EMPTY;
658}
659
660static inline unsigned long p4d_pfn(p4d_t p4d)
661{
662 unsigned long origin_mask;
663
664 origin_mask = _REGION_ENTRY_ORIGIN;
665 return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT;
666}
667
668static inline int pud_folded(pud_t pud)
669{
670 return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3;
671}
672
673static inline int pud_present(pud_t pud)
674{
675 if (pud_folded(pud))
676 return 1;
677 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
678}
679
680static inline int pud_none(pud_t pud)
681{
682 if (pud_folded(pud))
683 return 0;
684 return pud_val(pud) == _REGION3_ENTRY_EMPTY;
685}
686
687#define pud_leaf pud_large
688static inline int pud_large(pud_t pud)
689{
690 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
691 return 0;
692 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
693}
694
695static inline unsigned long pud_pfn(pud_t pud)
696{
697 unsigned long origin_mask;
698
699 origin_mask = _REGION_ENTRY_ORIGIN;
700 if (pud_large(pud))
701 origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
702 return (pud_val(pud) & origin_mask) >> PAGE_SHIFT;
703}
704
705#define pmd_leaf pmd_large
706static inline int pmd_large(pmd_t pmd)
707{
708 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
709}
710
711static inline int pmd_bad(pmd_t pmd)
712{
713 if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0)
714 return 1;
715 if (pmd_large(pmd))
716 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
717 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
718}
719
720static inline int pud_bad(pud_t pud)
721{
722 unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
723
724 if (type > _REGION_ENTRY_TYPE_R3)
725 return 1;
726 if (type < _REGION_ENTRY_TYPE_R3)
727 return 0;
728 if (pud_large(pud))
729 return (pud_val(pud) & ~_REGION_ENTRY_BITS_LARGE) != 0;
730 return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
731}
732
733static inline int p4d_bad(p4d_t p4d)
734{
735 unsigned long type = p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK;
736
737 if (type > _REGION_ENTRY_TYPE_R2)
738 return 1;
739 if (type < _REGION_ENTRY_TYPE_R2)
740 return 0;
741 return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
742}
743
744static inline int pmd_present(pmd_t pmd)
745{
746 return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
747}
748
749static inline int pmd_none(pmd_t pmd)
750{
751 return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
752}
753
754static inline unsigned long pmd_pfn(pmd_t pmd)
755{
756 unsigned long origin_mask;
757
758 origin_mask = _SEGMENT_ENTRY_ORIGIN;
759 if (pmd_large(pmd))
760 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
761 return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
762}
763
764#define pmd_write pmd_write
765static inline int pmd_write(pmd_t pmd)
766{
767 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
768}
769
770#define pud_write pud_write
771static inline int pud_write(pud_t pud)
772{
773 return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
774}
775
776static inline int pmd_dirty(pmd_t pmd)
777{
778 int dirty = 1;
779 if (pmd_large(pmd))
780 dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
781 return dirty;
782}
783
784static inline int pmd_young(pmd_t pmd)
785{
786 int young = 1;
787 if (pmd_large(pmd))
788 young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
789 return young;
790}
791
792static inline int pte_present(pte_t pte)
793{
794
795 return (pte_val(pte) & _PAGE_PRESENT) != 0;
796}
797
798static inline int pte_none(pte_t pte)
799{
800
801 return pte_val(pte) == _PAGE_INVALID;
802}
803
804static inline int pte_swap(pte_t pte)
805{
806
807 return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
808 == _PAGE_PROTECT;
809}
810
811static inline int pte_special(pte_t pte)
812{
813 return (pte_val(pte) & _PAGE_SPECIAL);
814}
815
816#define __HAVE_ARCH_PTE_SAME
817static inline int pte_same(pte_t a, pte_t b)
818{
819 return pte_val(a) == pte_val(b);
820}
821
822#ifdef CONFIG_NUMA_BALANCING
823static inline int pte_protnone(pte_t pte)
824{
825 return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
826}
827
828static inline int pmd_protnone(pmd_t pmd)
829{
830
831 return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
832}
833#endif
834
835static inline int pte_soft_dirty(pte_t pte)
836{
837 return pte_val(pte) & _PAGE_SOFT_DIRTY;
838}
839#define pte_swp_soft_dirty pte_soft_dirty
840
841static inline pte_t pte_mksoft_dirty(pte_t pte)
842{
843 pte_val(pte) |= _PAGE_SOFT_DIRTY;
844 return pte;
845}
846#define pte_swp_mksoft_dirty pte_mksoft_dirty
847
848static inline pte_t pte_clear_soft_dirty(pte_t pte)
849{
850 pte_val(pte) &= ~_PAGE_SOFT_DIRTY;
851 return pte;
852}
853#define pte_swp_clear_soft_dirty pte_clear_soft_dirty
854
855static inline int pmd_soft_dirty(pmd_t pmd)
856{
857 return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
858}
859
860static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
861{
862 pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY;
863 return pmd;
864}
865
866static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
867{
868 pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY;
869 return pmd;
870}
871
872
873
874
875
876static inline int pte_write(pte_t pte)
877{
878 return (pte_val(pte) & _PAGE_WRITE) != 0;
879}
880
881static inline int pte_dirty(pte_t pte)
882{
883 return (pte_val(pte) & _PAGE_DIRTY) != 0;
884}
885
886static inline int pte_young(pte_t pte)
887{
888 return (pte_val(pte) & _PAGE_YOUNG) != 0;
889}
890
891#define __HAVE_ARCH_PTE_UNUSED
892static inline int pte_unused(pte_t pte)
893{
894 return pte_val(pte) & _PAGE_UNUSED;
895}
896
897
898
899
900
901static inline void pgd_clear(pgd_t *pgd)
902{
903 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
904 pgd_val(*pgd) = _REGION1_ENTRY_EMPTY;
905}
906
907static inline void p4d_clear(p4d_t *p4d)
908{
909 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
910 p4d_val(*p4d) = _REGION2_ENTRY_EMPTY;
911}
912
913static inline void pud_clear(pud_t *pud)
914{
915 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
916 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
917}
918
919static inline void pmd_clear(pmd_t *pmdp)
920{
921 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
922}
923
924static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
925{
926 pte_val(*ptep) = _PAGE_INVALID;
927}
928
929
930
931
932
933static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
934{
935 pte_val(pte) &= _PAGE_CHG_MASK;
936 pte_val(pte) |= pgprot_val(newprot);
937
938
939
940
941 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
942 pte_val(pte) &= ~_PAGE_INVALID;
943
944
945
946
947 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
948 pte_val(pte) &= ~_PAGE_PROTECT;
949 return pte;
950}
951
952static inline pte_t pte_wrprotect(pte_t pte)
953{
954 pte_val(pte) &= ~_PAGE_WRITE;
955 pte_val(pte) |= _PAGE_PROTECT;
956 return pte;
957}
958
959static inline pte_t pte_mkwrite(pte_t pte)
960{
961 pte_val(pte) |= _PAGE_WRITE;
962 if (pte_val(pte) & _PAGE_DIRTY)
963 pte_val(pte) &= ~_PAGE_PROTECT;
964 return pte;
965}
966
967static inline pte_t pte_mkclean(pte_t pte)
968{
969 pte_val(pte) &= ~_PAGE_DIRTY;
970 pte_val(pte) |= _PAGE_PROTECT;
971 return pte;
972}
973
974static inline pte_t pte_mkdirty(pte_t pte)
975{
976 pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY;
977 if (pte_val(pte) & _PAGE_WRITE)
978 pte_val(pte) &= ~_PAGE_PROTECT;
979 return pte;
980}
981
982static inline pte_t pte_mkold(pte_t pte)
983{
984 pte_val(pte) &= ~_PAGE_YOUNG;
985 pte_val(pte) |= _PAGE_INVALID;
986 return pte;
987}
988
989static inline pte_t pte_mkyoung(pte_t pte)
990{
991 pte_val(pte) |= _PAGE_YOUNG;
992 if (pte_val(pte) & _PAGE_READ)
993 pte_val(pte) &= ~_PAGE_INVALID;
994 return pte;
995}
996
997static inline pte_t pte_mkspecial(pte_t pte)
998{
999 pte_val(pte) |= _PAGE_SPECIAL;
1000 return pte;
1001}
1002
1003#ifdef CONFIG_HUGETLB_PAGE
1004static inline pte_t pte_mkhuge(pte_t pte)
1005{
1006 pte_val(pte) |= _PAGE_LARGE;
1007 return pte;
1008}
1009#endif
1010
1011#define IPTE_GLOBAL 0
1012#define IPTE_LOCAL 1
1013
1014#define IPTE_NODAT 0x400
1015#define IPTE_GUEST_ASCE 0x800
1016
1017static inline void __ptep_ipte(unsigned long address, pte_t *ptep,
1018 unsigned long opt, unsigned long asce,
1019 int local)
1020{
1021 unsigned long pto = (unsigned long) ptep;
1022
1023 if (__builtin_constant_p(opt) && opt == 0) {
1024
1025 asm volatile(
1026 " .insn rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
1027 : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
1028 [m4] "i" (local));
1029 return;
1030 }
1031
1032
1033 opt = opt | (asce & _ASCE_ORIGIN);
1034 asm volatile(
1035 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1036 : [r2] "+a" (address), [r3] "+a" (opt)
1037 : [r1] "a" (pto), [m4] "i" (local) : "memory");
1038}
1039
1040static inline void __ptep_ipte_range(unsigned long address, int nr,
1041 pte_t *ptep, int local)
1042{
1043 unsigned long pto = (unsigned long) ptep;
1044
1045
1046 do {
1047 asm volatile(
1048 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1049 : [r2] "+a" (address), [r3] "+a" (nr)
1050 : [r1] "a" (pto), [m4] "i" (local) : "memory");
1051 } while (nr != 255);
1052}
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
1068pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
1069
1070#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1071static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1072 unsigned long addr, pte_t *ptep)
1073{
1074 pte_t pte = *ptep;
1075
1076 pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
1077 return pte_young(pte);
1078}
1079
1080#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1081static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1082 unsigned long address, pte_t *ptep)
1083{
1084 return ptep_test_and_clear_young(vma, address, ptep);
1085}
1086
1087#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1088static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1089 unsigned long addr, pte_t *ptep)
1090{
1091 pte_t res;
1092
1093 res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1094 if (mm_is_protected(mm) && pte_present(res))
1095 uv_convert_from_secure(pte_val(res) & PAGE_MASK);
1096 return res;
1097}
1098
1099#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1100pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
1101void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
1102 pte_t *, pte_t, pte_t);
1103
1104#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1105static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1106 unsigned long addr, pte_t *ptep)
1107{
1108 pte_t res;
1109
1110 res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
1111 if (mm_is_protected(vma->vm_mm) && pte_present(res))
1112 uv_convert_from_secure(pte_val(res) & PAGE_MASK);
1113 return res;
1114}
1115
1116
1117
1118
1119
1120
1121
1122
1123#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1124static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1125 unsigned long addr,
1126 pte_t *ptep, int full)
1127{
1128 pte_t res;
1129
1130 if (full) {
1131 res = *ptep;
1132 *ptep = __pte(_PAGE_INVALID);
1133 } else {
1134 res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1135 }
1136 if (mm_is_protected(mm) && pte_present(res))
1137 uv_convert_from_secure(pte_val(res) & PAGE_MASK);
1138 return res;
1139}
1140
1141#define __HAVE_ARCH_PTEP_SET_WRPROTECT
1142static inline void ptep_set_wrprotect(struct mm_struct *mm,
1143 unsigned long addr, pte_t *ptep)
1144{
1145 pte_t pte = *ptep;
1146
1147 if (pte_write(pte))
1148 ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
1149}
1150
1151#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1152static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1153 unsigned long addr, pte_t *ptep,
1154 pte_t entry, int dirty)
1155{
1156 if (pte_same(*ptep, entry))
1157 return 0;
1158 ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
1159 return 1;
1160}
1161
1162
1163
1164
1165void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
1166 pte_t *ptep, pte_t entry);
1167void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1168void ptep_notify(struct mm_struct *mm, unsigned long addr,
1169 pte_t *ptep, unsigned long bits);
1170int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
1171 pte_t *ptep, int prot, unsigned long bit);
1172void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
1173 pte_t *ptep , int reset);
1174void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1175int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
1176 pte_t *sptep, pte_t *tptep, pte_t pte);
1177void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
1178
1179bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address,
1180 pte_t *ptep);
1181int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1182 unsigned char key, bool nq);
1183int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1184 unsigned char key, unsigned char *oldkey,
1185 bool nq, bool mr, bool mc);
1186int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
1187int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1188 unsigned char *key);
1189
1190int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
1191 unsigned long bits, unsigned long value);
1192int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
1193int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
1194 unsigned long *oldpte, unsigned long *oldpgste);
1195void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
1196void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
1197void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
1198void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
1199
1200
1201
1202
1203
1204
1205static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1206 pte_t *ptep, pte_t entry)
1207{
1208 if (!MACHINE_HAS_NX)
1209 pte_val(entry) &= ~_PAGE_NOEXEC;
1210 if (pte_present(entry))
1211 pte_val(entry) &= ~_PAGE_UNUSED;
1212 if (mm_has_pgste(mm))
1213 ptep_set_pte_at(mm, addr, ptep, entry);
1214 else
1215 *ptep = entry;
1216}
1217
1218
1219
1220
1221
1222static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1223{
1224 pte_t __pte;
1225 pte_val(__pte) = physpage + pgprot_val(pgprot);
1226 return pte_mkyoung(__pte);
1227}
1228
1229static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1230{
1231 unsigned long physpage = page_to_phys(page);
1232 pte_t __pte = mk_pte_phys(physpage, pgprot);
1233
1234 if (pte_write(__pte) && PageDirty(page))
1235 __pte = pte_mkdirty(__pte);
1236 return __pte;
1237}
1238
1239#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1240#define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
1241#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1242#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1243#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1244
1245#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1246#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1247#define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN)
1248#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
1262{
1263 unsigned long rste;
1264 unsigned int shift;
1265
1266
1267 rste = pgd_val(*pgd);
1268
1269 shift = ((rste & _REGION_ENTRY_TYPE_MASK) >> 2) * 11 + 20;
1270 return pgd + ((address >> shift) & (PTRS_PER_PGD - 1));
1271}
1272
1273#define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
1274#define pgd_offset_k(address) pgd_offset(&init_mm, address)
1275
1276static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address)
1277{
1278 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
1279 return (p4d_t *) pgd_deref(pgd) + p4d_index(address);
1280 return (p4d_t *) pgdp;
1281}
1282#define p4d_offset_lockless p4d_offset_lockless
1283
1284static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address)
1285{
1286 return p4d_offset_lockless(pgdp, *pgdp, address);
1287}
1288
1289static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address)
1290{
1291 if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
1292 return (pud_t *) p4d_deref(p4d) + pud_index(address);
1293 return (pud_t *) p4dp;
1294}
1295#define pud_offset_lockless pud_offset_lockless
1296
1297static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address)
1298{
1299 return pud_offset_lockless(p4dp, *p4dp, address);
1300}
1301
1302static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address)
1303{
1304 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
1305 return (pmd_t *) pud_deref(pud) + pmd_index(address);
1306 return (pmd_t *) pudp;
1307}
1308#define pmd_offset_lockless pmd_offset_lockless
1309
1310static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address)
1311{
1312 return pmd_offset_lockless(pudp, *pudp, address);
1313}
1314
1315static inline pte_t *pte_offset(pmd_t *pmd, unsigned long address)
1316{
1317 return (pte_t *) pmd_deref(*pmd) + pte_index(address);
1318}
1319
1320#define pte_offset_kernel(pmd, address) pte_offset(pmd, address)
1321#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1322#define pte_unmap(pte) do { } while (0)
1323
1324static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
1325{
1326 return end <= current->mm->context.asce_limit;
1327}
1328#define gup_fast_permitted gup_fast_permitted
1329
1330#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1331#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1332#define pte_page(x) pfn_to_page(pte_pfn(x))
1333
1334#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1335#define pud_page(pud) pfn_to_page(pud_pfn(pud))
1336#define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
1337#define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
1338
1339static inline pmd_t pmd_wrprotect(pmd_t pmd)
1340{
1341 pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
1342 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1343 return pmd;
1344}
1345
1346static inline pmd_t pmd_mkwrite(pmd_t pmd)
1347{
1348 pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
1349 if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1350 return pmd;
1351 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1352 return pmd;
1353}
1354
1355static inline pmd_t pmd_mkclean(pmd_t pmd)
1356{
1357 if (pmd_large(pmd)) {
1358 pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1359 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1360 }
1361 return pmd;
1362}
1363
1364static inline pmd_t pmd_mkdirty(pmd_t pmd)
1365{
1366 if (pmd_large(pmd)) {
1367 pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY |
1368 _SEGMENT_ENTRY_SOFT_DIRTY;
1369 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1370 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1371 }
1372 return pmd;
1373}
1374
1375static inline pud_t pud_wrprotect(pud_t pud)
1376{
1377 pud_val(pud) &= ~_REGION3_ENTRY_WRITE;
1378 pud_val(pud) |= _REGION_ENTRY_PROTECT;
1379 return pud;
1380}
1381
1382static inline pud_t pud_mkwrite(pud_t pud)
1383{
1384 pud_val(pud) |= _REGION3_ENTRY_WRITE;
1385 if (pud_large(pud) && !(pud_val(pud) & _REGION3_ENTRY_DIRTY))
1386 return pud;
1387 pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1388 return pud;
1389}
1390
1391static inline pud_t pud_mkclean(pud_t pud)
1392{
1393 if (pud_large(pud)) {
1394 pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
1395 pud_val(pud) |= _REGION_ENTRY_PROTECT;
1396 }
1397 return pud;
1398}
1399
1400static inline pud_t pud_mkdirty(pud_t pud)
1401{
1402 if (pud_large(pud)) {
1403 pud_val(pud) |= _REGION3_ENTRY_DIRTY |
1404 _REGION3_ENTRY_SOFT_DIRTY;
1405 if (pud_val(pud) & _REGION3_ENTRY_WRITE)
1406 pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1407 }
1408 return pud;
1409}
1410
1411#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1412static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1413{
1414
1415
1416
1417
1418 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1419 return pgprot_val(SEGMENT_NONE);
1420 if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
1421 return pgprot_val(SEGMENT_RO);
1422 if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
1423 return pgprot_val(SEGMENT_RX);
1424 if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
1425 return pgprot_val(SEGMENT_RW);
1426 return pgprot_val(SEGMENT_RWX);
1427}
1428
1429static inline pmd_t pmd_mkyoung(pmd_t pmd)
1430{
1431 if (pmd_large(pmd)) {
1432 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1433 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1434 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
1435 }
1436 return pmd;
1437}
1438
1439static inline pmd_t pmd_mkold(pmd_t pmd)
1440{
1441 if (pmd_large(pmd)) {
1442 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1443 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1444 }
1445 return pmd;
1446}
1447
1448static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1449{
1450 if (pmd_large(pmd)) {
1451 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1452 _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1453 _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
1454 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1455 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1456 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1457 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1458 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1459 return pmd;
1460 }
1461 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN;
1462 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1463 return pmd;
1464}
1465
1466static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1467{
1468 pmd_t __pmd;
1469 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1470 return __pmd;
1471}
1472
1473#endif
1474
1475static inline void __pmdp_csp(pmd_t *pmdp)
1476{
1477 csp((unsigned int *)pmdp + 1, pmd_val(*pmdp),
1478 pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1479}
1480
1481#define IDTE_GLOBAL 0
1482#define IDTE_LOCAL 1
1483
1484#define IDTE_PTOA 0x0800
1485#define IDTE_NODAT 0x1000
1486#define IDTE_GUEST_ASCE 0x2000
1487
1488static inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
1489 unsigned long opt, unsigned long asce,
1490 int local)
1491{
1492 unsigned long sto;
1493
1494 sto = (unsigned long) pmdp - pmd_index(addr) * sizeof(pmd_t);
1495 if (__builtin_constant_p(opt) && opt == 0) {
1496
1497 asm volatile(
1498 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1499 : "+m" (*pmdp)
1500 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)),
1501 [m4] "i" (local)
1502 : "cc" );
1503 } else {
1504
1505 asm volatile(
1506 " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1507 : "+m" (*pmdp)
1508 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt),
1509 [r3] "a" (asce), [m4] "i" (local)
1510 : "cc" );
1511 }
1512}
1513
1514static inline void __pudp_idte(unsigned long addr, pud_t *pudp,
1515 unsigned long opt, unsigned long asce,
1516 int local)
1517{
1518 unsigned long r3o;
1519
1520 r3o = (unsigned long) pudp - pud_index(addr) * sizeof(pud_t);
1521 r3o |= _ASCE_TYPE_REGION3;
1522 if (__builtin_constant_p(opt) && opt == 0) {
1523
1524 asm volatile(
1525 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1526 : "+m" (*pudp)
1527 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)),
1528 [m4] "i" (local)
1529 : "cc");
1530 } else {
1531
1532 asm volatile(
1533 " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1534 : "+m" (*pudp)
1535 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt),
1536 [r3] "a" (asce), [m4] "i" (local)
1537 : "cc" );
1538 }
1539}
1540
1541pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1542pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1543pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
1544
1545#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1546
1547#define __HAVE_ARCH_PGTABLE_DEPOSIT
1548void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1549 pgtable_t pgtable);
1550
1551#define __HAVE_ARCH_PGTABLE_WITHDRAW
1552pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1553
1554#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1555static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1556 unsigned long addr, pmd_t *pmdp,
1557 pmd_t entry, int dirty)
1558{
1559 VM_BUG_ON(addr & ~HPAGE_MASK);
1560
1561 entry = pmd_mkyoung(entry);
1562 if (dirty)
1563 entry = pmd_mkdirty(entry);
1564 if (pmd_val(*pmdp) == pmd_val(entry))
1565 return 0;
1566 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
1567 return 1;
1568}
1569
1570#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1571static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1572 unsigned long addr, pmd_t *pmdp)
1573{
1574 pmd_t pmd = *pmdp;
1575
1576 pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
1577 return pmd_young(pmd);
1578}
1579
1580#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1581static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
1582 unsigned long addr, pmd_t *pmdp)
1583{
1584 VM_BUG_ON(addr & ~HPAGE_MASK);
1585 return pmdp_test_and_clear_young(vma, addr, pmdp);
1586}
1587
1588static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1589 pmd_t *pmdp, pmd_t entry)
1590{
1591 if (!MACHINE_HAS_NX)
1592 pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC;
1593 *pmdp = entry;
1594}
1595
1596static inline pmd_t pmd_mkhuge(pmd_t pmd)
1597{
1598 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1599 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1600 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1601 return pmd;
1602}
1603
1604#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1605static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1606 unsigned long addr, pmd_t *pmdp)
1607{
1608 return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1609}
1610
1611#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
1612static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
1613 unsigned long addr,
1614 pmd_t *pmdp, int full)
1615{
1616 if (full) {
1617 pmd_t pmd = *pmdp;
1618 *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
1619 return pmd;
1620 }
1621 return pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1622}
1623
1624#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
1625static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
1626 unsigned long addr, pmd_t *pmdp)
1627{
1628 return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
1629}
1630
1631#define __HAVE_ARCH_PMDP_INVALIDATE
1632static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
1633 unsigned long addr, pmd_t *pmdp)
1634{
1635 pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1636
1637 return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
1638}
1639
1640#define __HAVE_ARCH_PMDP_SET_WRPROTECT
1641static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1642 unsigned long addr, pmd_t *pmdp)
1643{
1644 pmd_t pmd = *pmdp;
1645
1646 if (pmd_write(pmd))
1647 pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
1648}
1649
1650static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1651 unsigned long address,
1652 pmd_t *pmdp)
1653{
1654 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1655}
1656#define pmdp_collapse_flush pmdp_collapse_flush
1657
1658#define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1659#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1660
1661static inline int pmd_trans_huge(pmd_t pmd)
1662{
1663 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1664}
1665
1666#define has_transparent_hugepage has_transparent_hugepage
1667static inline int has_transparent_hugepage(void)
1668{
1669 return MACHINE_HAS_EDAT1 ? 1 : 0;
1670}
1671#endif
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690#define __SWP_OFFSET_MASK ((1UL << 52) - 1)
1691#define __SWP_OFFSET_SHIFT 12
1692#define __SWP_TYPE_MASK ((1UL << 5) - 1)
1693#define __SWP_TYPE_SHIFT 2
1694
1695static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1696{
1697 pte_t pte;
1698
1699 pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
1700 pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1701 pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
1702 return pte;
1703}
1704
1705static inline unsigned long __swp_type(swp_entry_t entry)
1706{
1707 return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1708}
1709
1710static inline unsigned long __swp_offset(swp_entry_t entry)
1711{
1712 return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1713}
1714
1715static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1716{
1717 return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1718}
1719
1720#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1721#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1722
1723#define kern_addr_valid(addr) (1)
1724
1725extern int vmem_add_mapping(unsigned long start, unsigned long size);
1726extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1727extern int s390_enable_sie(void);
1728extern int s390_enable_skey(void);
1729extern void s390_reset_cmma(struct mm_struct *mm);
1730
1731
1732#define HAVE_ARCH_UNMAPPED_AREA
1733#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1734
1735
1736
1737
1738static inline void pgtable_cache_init(void) { }
1739static inline void check_pgt_cache(void) { }
1740
1741#include <asm-generic/pgtable.h>
1742
1743#endif
1744