1
2
3
4
5
6
7
8
9
10
11
12#ifndef _ASM_S390_PGTABLE_H
13#define _ASM_S390_PGTABLE_H
14
15#include <linux/sched.h>
16#include <linux/mm_types.h>
17#include <linux/page-flags.h>
18#include <linux/radix-tree.h>
19#include <linux/atomic.h>
20#include <asm/bug.h>
21#include <asm/page.h>
22#include <asm/uv.h>
23
24extern pgd_t swapper_pg_dir[];
25extern void paging_init(void);
26extern unsigned long s390_invalid_asce;
27
28enum {
29 PG_DIRECT_MAP_4K = 0,
30 PG_DIRECT_MAP_1M,
31 PG_DIRECT_MAP_2G,
32 PG_DIRECT_MAP_MAX
33};
34
35extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
36
37static inline void update_page_count(int level, long count)
38{
39 if (IS_ENABLED(CONFIG_PROC_FS))
40 atomic_long_add(count, &direct_pages_count[level]);
41}
42
43struct seq_file;
44void arch_report_meminfo(struct seq_file *m);
45
46
47
48
49
50#define update_mmu_cache(vma, address, ptep) do { } while (0)
51#define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
52
53
54
55
56
57
58extern unsigned long empty_zero_page;
59extern unsigned long zero_page_mask;
60
61#define ZERO_PAGE(vaddr) \
62 (virt_to_page((void *)(empty_zero_page + \
63 (((unsigned long)(vaddr)) &zero_page_mask))))
64#define __HAVE_COLOR_ZERO_PAGE
65
66
67
68#define FIRST_USER_ADDRESS 0UL
69
70#define pte_ERROR(e) \
71 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
72#define pmd_ERROR(e) \
73 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
74#define pud_ERROR(e) \
75 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
76#define p4d_ERROR(e) \
77 printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e))
78#define pgd_ERROR(e) \
79 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
80
81
82
83
84
85
86
87
88
89extern unsigned long VMALLOC_START;
90extern unsigned long VMALLOC_END;
91#define VMALLOC_DEFAULT_SIZE ((512UL << 30) - MODULES_LEN)
92extern struct page *vmemmap;
93extern unsigned long vmemmap_size;
94
95#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
96
97extern unsigned long MODULES_VADDR;
98extern unsigned long MODULES_END;
99#define MODULES_VADDR MODULES_VADDR
100#define MODULES_END MODULES_END
101#define MODULES_LEN (1UL << 31)
102
103static inline int is_module_addr(void *addr)
104{
105 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
106 if (addr < (void *)MODULES_VADDR)
107 return 0;
108 if (addr > (void *)MODULES_END)
109 return 0;
110 return 1;
111}
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165#define _PAGE_NOEXEC 0x100
166#define _PAGE_PROTECT 0x200
167#define _PAGE_INVALID 0x400
168#define _PAGE_LARGE 0x800
169
170
171#define _PAGE_PRESENT 0x001
172#define _PAGE_YOUNG 0x004
173#define _PAGE_DIRTY 0x008
174#define _PAGE_READ 0x010
175#define _PAGE_WRITE 0x020
176#define _PAGE_SPECIAL 0x040
177#define _PAGE_UNUSED 0x080
178
179#ifdef CONFIG_MEM_SOFT_DIRTY
180#define _PAGE_SOFT_DIRTY 0x002
181#else
182#define _PAGE_SOFT_DIRTY 0x000
183#endif
184
185
186#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
187 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227#define _ASCE_ORIGIN ~0xfffUL
228#define _ASCE_PRIVATE_SPACE 0x100
229#define _ASCE_ALT_EVENT 0x80
230#define _ASCE_SPACE_SWITCH 0x40
231#define _ASCE_REAL_SPACE 0x20
232#define _ASCE_TYPE_MASK 0x0c
233#define _ASCE_TYPE_REGION1 0x0c
234#define _ASCE_TYPE_REGION2 0x08
235#define _ASCE_TYPE_REGION3 0x04
236#define _ASCE_TYPE_SEGMENT 0x00
237#define _ASCE_TABLE_LENGTH 0x03
238
239
240#define _REGION_ENTRY_ORIGIN ~0xfffUL
241#define _REGION_ENTRY_PROTECT 0x200
242#define _REGION_ENTRY_NOEXEC 0x100
243#define _REGION_ENTRY_OFFSET 0xc0
244#define _REGION_ENTRY_INVALID 0x20
245#define _REGION_ENTRY_TYPE_MASK 0x0c
246#define _REGION_ENTRY_TYPE_R1 0x0c
247#define _REGION_ENTRY_TYPE_R2 0x08
248#define _REGION_ENTRY_TYPE_R3 0x04
249#define _REGION_ENTRY_LENGTH 0x03
250
251#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
252#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
253#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
254#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
255#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
256#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
257
258#define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL
259#define _REGION3_ENTRY_DIRTY 0x2000
260#define _REGION3_ENTRY_YOUNG 0x1000
261#define _REGION3_ENTRY_LARGE 0x0400
262#define _REGION3_ENTRY_READ 0x0002
263#define _REGION3_ENTRY_WRITE 0x0001
264
265#ifdef CONFIG_MEM_SOFT_DIRTY
266#define _REGION3_ENTRY_SOFT_DIRTY 0x4000
267#else
268#define _REGION3_ENTRY_SOFT_DIRTY 0x0000
269#endif
270
271#define _REGION_ENTRY_BITS 0xfffffffffffff22fUL
272
273
274#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
275#define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe30UL
276#define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff00730UL
277#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL
278#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL
279#define _SEGMENT_ENTRY_PROTECT 0x200
280#define _SEGMENT_ENTRY_NOEXEC 0x100
281#define _SEGMENT_ENTRY_INVALID 0x20
282#define _SEGMENT_ENTRY_TYPE_MASK 0x0c
283
284#define _SEGMENT_ENTRY (0)
285#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
286
287#define _SEGMENT_ENTRY_DIRTY 0x2000
288#define _SEGMENT_ENTRY_YOUNG 0x1000
289#define _SEGMENT_ENTRY_LARGE 0x0400
290#define _SEGMENT_ENTRY_WRITE 0x0002
291#define _SEGMENT_ENTRY_READ 0x0001
292
293#ifdef CONFIG_MEM_SOFT_DIRTY
294#define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000
295#else
296#define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000
297#endif
298
299#define _CRST_ENTRIES 2048
300#define _PAGE_ENTRIES 256
301
302#define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8)
303#define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8)
304
305#define _REGION1_SHIFT 53
306#define _REGION2_SHIFT 42
307#define _REGION3_SHIFT 31
308#define _SEGMENT_SHIFT 20
309
310#define _REGION1_INDEX (0x7ffUL << _REGION1_SHIFT)
311#define _REGION2_INDEX (0x7ffUL << _REGION2_SHIFT)
312#define _REGION3_INDEX (0x7ffUL << _REGION3_SHIFT)
313#define _SEGMENT_INDEX (0x7ffUL << _SEGMENT_SHIFT)
314#define _PAGE_INDEX (0xffUL << _PAGE_SHIFT)
315
316#define _REGION1_SIZE (1UL << _REGION1_SHIFT)
317#define _REGION2_SIZE (1UL << _REGION2_SHIFT)
318#define _REGION3_SIZE (1UL << _REGION3_SHIFT)
319#define _SEGMENT_SIZE (1UL << _SEGMENT_SHIFT)
320
321#define _REGION1_MASK (~(_REGION1_SIZE - 1))
322#define _REGION2_MASK (~(_REGION2_SIZE - 1))
323#define _REGION3_MASK (~(_REGION3_SIZE - 1))
324#define _SEGMENT_MASK (~(_SEGMENT_SIZE - 1))
325
326#define PMD_SHIFT _SEGMENT_SHIFT
327#define PUD_SHIFT _REGION3_SHIFT
328#define P4D_SHIFT _REGION2_SHIFT
329#define PGDIR_SHIFT _REGION1_SHIFT
330
331#define PMD_SIZE _SEGMENT_SIZE
332#define PUD_SIZE _REGION3_SIZE
333#define P4D_SIZE _REGION2_SIZE
334#define PGDIR_SIZE _REGION1_SIZE
335
336#define PMD_MASK _SEGMENT_MASK
337#define PUD_MASK _REGION3_MASK
338#define P4D_MASK _REGION2_MASK
339#define PGDIR_MASK _REGION1_MASK
340
341#define PTRS_PER_PTE _PAGE_ENTRIES
342#define PTRS_PER_PMD _CRST_ENTRIES
343#define PTRS_PER_PUD _CRST_ENTRIES
344#define PTRS_PER_P4D _CRST_ENTRIES
345#define PTRS_PER_PGD _CRST_ENTRIES
346
347#define MAX_PTRS_PER_P4D PTRS_PER_P4D
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372#define PGSTE_ACC_BITS 0xf000000000000000UL
373#define PGSTE_FP_BIT 0x0800000000000000UL
374#define PGSTE_PCL_BIT 0x0080000000000000UL
375#define PGSTE_HR_BIT 0x0040000000000000UL
376#define PGSTE_HC_BIT 0x0020000000000000UL
377#define PGSTE_GR_BIT 0x0004000000000000UL
378#define PGSTE_GC_BIT 0x0002000000000000UL
379#define PGSTE_UC_BIT 0x0000800000000000UL
380#define PGSTE_IN_BIT 0x0000400000000000UL
381#define PGSTE_VSIE_BIT 0x0000200000000000UL
382
383
384#define _PGSTE_GPS_ZERO 0x0000000080000000UL
385#define _PGSTE_GPS_NODAT 0x0000000040000000UL
386#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
387#define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
388#define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
389#define _PGSTE_GPS_USAGE_POT_VOLATILE 0x0000000002000000UL
390#define _PGSTE_GPS_USAGE_VOLATILE _PGSTE_GPS_USAGE_MASK
391
392
393
394
395
396
397#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
398 _ASCE_ALT_EVENT)
399
400
401
402
403#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
404#define PAGE_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | \
405 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
406#define PAGE_RX __pgprot(_PAGE_PRESENT | _PAGE_READ | \
407 _PAGE_INVALID | _PAGE_PROTECT)
408#define PAGE_RW __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
409 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
410#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
411 _PAGE_INVALID | _PAGE_PROTECT)
412
413#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
414 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
415#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
416 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
417#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
418 _PAGE_PROTECT | _PAGE_NOEXEC)
419#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
420 _PAGE_YOUNG | _PAGE_DIRTY)
421
422
423
424
425
426
427
428#define __P000 PAGE_NONE
429#define __P001 PAGE_RO
430#define __P010 PAGE_RO
431#define __P011 PAGE_RO
432#define __P100 PAGE_RX
433#define __P101 PAGE_RX
434#define __P110 PAGE_RX
435#define __P111 PAGE_RX
436
437#define __S000 PAGE_NONE
438#define __S001 PAGE_RO
439#define __S010 PAGE_RW
440#define __S011 PAGE_RW
441#define __S100 PAGE_RX
442#define __S101 PAGE_RX
443#define __S110 PAGE_RWX
444#define __S111 PAGE_RWX
445
446
447
448
449#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
450 _SEGMENT_ENTRY_PROTECT)
451#define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PROTECT | \
452 _SEGMENT_ENTRY_READ | \
453 _SEGMENT_ENTRY_NOEXEC)
454#define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PROTECT | \
455 _SEGMENT_ENTRY_READ)
456#define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_READ | \
457 _SEGMENT_ENTRY_WRITE | \
458 _SEGMENT_ENTRY_NOEXEC)
459#define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_READ | \
460 _SEGMENT_ENTRY_WRITE)
461#define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \
462 _SEGMENT_ENTRY_LARGE | \
463 _SEGMENT_ENTRY_READ | \
464 _SEGMENT_ENTRY_WRITE | \
465 _SEGMENT_ENTRY_YOUNG | \
466 _SEGMENT_ENTRY_DIRTY | \
467 _SEGMENT_ENTRY_NOEXEC)
468#define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \
469 _SEGMENT_ENTRY_LARGE | \
470 _SEGMENT_ENTRY_READ | \
471 _SEGMENT_ENTRY_YOUNG | \
472 _SEGMENT_ENTRY_PROTECT | \
473 _SEGMENT_ENTRY_NOEXEC)
474#define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY | \
475 _SEGMENT_ENTRY_LARGE | \
476 _SEGMENT_ENTRY_READ | \
477 _SEGMENT_ENTRY_WRITE | \
478 _SEGMENT_ENTRY_YOUNG | \
479 _SEGMENT_ENTRY_DIRTY)
480
481
482
483
484
485#define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \
486 _REGION3_ENTRY_LARGE | \
487 _REGION3_ENTRY_READ | \
488 _REGION3_ENTRY_WRITE | \
489 _REGION3_ENTRY_YOUNG | \
490 _REGION3_ENTRY_DIRTY | \
491 _REGION_ENTRY_NOEXEC)
492#define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
493 _REGION3_ENTRY_LARGE | \
494 _REGION3_ENTRY_READ | \
495 _REGION3_ENTRY_YOUNG | \
496 _REGION_ENTRY_PROTECT | \
497 _REGION_ENTRY_NOEXEC)
498
499static inline bool mm_p4d_folded(struct mm_struct *mm)
500{
501 return mm->context.asce_limit <= _REGION1_SIZE;
502}
503#define mm_p4d_folded(mm) mm_p4d_folded(mm)
504
505static inline bool mm_pud_folded(struct mm_struct *mm)
506{
507 return mm->context.asce_limit <= _REGION2_SIZE;
508}
509#define mm_pud_folded(mm) mm_pud_folded(mm)
510
511static inline bool mm_pmd_folded(struct mm_struct *mm)
512{
513 return mm->context.asce_limit <= _REGION3_SIZE;
514}
515#define mm_pmd_folded(mm) mm_pmd_folded(mm)
516
517static inline int mm_has_pgste(struct mm_struct *mm)
518{
519#ifdef CONFIG_PGSTE
520 if (unlikely(mm->context.has_pgste))
521 return 1;
522#endif
523 return 0;
524}
525
526static inline int mm_is_protected(struct mm_struct *mm)
527{
528#ifdef CONFIG_PGSTE
529 if (unlikely(atomic_read(&mm->context.is_protected)))
530 return 1;
531#endif
532 return 0;
533}
534
535static inline int mm_alloc_pgste(struct mm_struct *mm)
536{
537#ifdef CONFIG_PGSTE
538 if (unlikely(mm->context.alloc_pgste))
539 return 1;
540#endif
541 return 0;
542}
543
544
545
546
547
548#define mm_forbids_zeropage mm_has_pgste
549static inline int mm_uses_skeys(struct mm_struct *mm)
550{
551#ifdef CONFIG_PGSTE
552 if (mm->context.uses_skeys)
553 return 1;
554#endif
555 return 0;
556}
557
558static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
559{
560 register unsigned long reg2 asm("2") = old;
561 register unsigned long reg3 asm("3") = new;
562 unsigned long address = (unsigned long)ptr | 1;
563
564 asm volatile(
565 " csp %0,%3"
566 : "+d" (reg2), "+m" (*ptr)
567 : "d" (reg3), "d" (address)
568 : "cc");
569}
570
571static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
572{
573 register unsigned long reg2 asm("2") = old;
574 register unsigned long reg3 asm("3") = new;
575 unsigned long address = (unsigned long)ptr | 1;
576
577 asm volatile(
578 " .insn rre,0xb98a0000,%0,%3"
579 : "+d" (reg2), "+m" (*ptr)
580 : "d" (reg3), "d" (address)
581 : "cc");
582}
583
584#define CRDTE_DTT_PAGE 0x00UL
585#define CRDTE_DTT_SEGMENT 0x10UL
586#define CRDTE_DTT_REGION3 0x14UL
587#define CRDTE_DTT_REGION2 0x18UL
588#define CRDTE_DTT_REGION1 0x1cUL
589
590static inline void crdte(unsigned long old, unsigned long new,
591 unsigned long table, unsigned long dtt,
592 unsigned long address, unsigned long asce)
593{
594 register unsigned long reg2 asm("2") = old;
595 register unsigned long reg3 asm("3") = new;
596 register unsigned long reg4 asm("4") = table | dtt;
597 register unsigned long reg5 asm("5") = address;
598
599 asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0"
600 : "+d" (reg2)
601 : "d" (reg3), "d" (reg4), "d" (reg5), "a" (asce)
602 : "memory", "cc");
603}
604
605
606
607
608static inline int pgd_folded(pgd_t pgd)
609{
610 return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1;
611}
612
613static inline int pgd_present(pgd_t pgd)
614{
615 if (pgd_folded(pgd))
616 return 1;
617 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
618}
619
620static inline int pgd_none(pgd_t pgd)
621{
622 if (pgd_folded(pgd))
623 return 0;
624 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
625}
626
627static inline int pgd_bad(pgd_t pgd)
628{
629 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1)
630 return 0;
631 return (pgd_val(pgd) & ~_REGION_ENTRY_BITS) != 0;
632}
633
634static inline unsigned long pgd_pfn(pgd_t pgd)
635{
636 unsigned long origin_mask;
637
638 origin_mask = _REGION_ENTRY_ORIGIN;
639 return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT;
640}
641
642static inline int p4d_folded(p4d_t p4d)
643{
644 return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
645}
646
647static inline int p4d_present(p4d_t p4d)
648{
649 if (p4d_folded(p4d))
650 return 1;
651 return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL;
652}
653
654static inline int p4d_none(p4d_t p4d)
655{
656 if (p4d_folded(p4d))
657 return 0;
658 return p4d_val(p4d) == _REGION2_ENTRY_EMPTY;
659}
660
661static inline unsigned long p4d_pfn(p4d_t p4d)
662{
663 unsigned long origin_mask;
664
665 origin_mask = _REGION_ENTRY_ORIGIN;
666 return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT;
667}
668
669static inline int pud_folded(pud_t pud)
670{
671 return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3;
672}
673
674static inline int pud_present(pud_t pud)
675{
676 if (pud_folded(pud))
677 return 1;
678 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
679}
680
681static inline int pud_none(pud_t pud)
682{
683 if (pud_folded(pud))
684 return 0;
685 return pud_val(pud) == _REGION3_ENTRY_EMPTY;
686}
687
688#define pud_leaf pud_large
689static inline int pud_large(pud_t pud)
690{
691 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
692 return 0;
693 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
694}
695
696#define pmd_leaf pmd_large
697static inline int pmd_large(pmd_t pmd)
698{
699 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
700}
701
702static inline int pmd_bad(pmd_t pmd)
703{
704 if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_large(pmd))
705 return 1;
706 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
707}
708
709static inline int pud_bad(pud_t pud)
710{
711 unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
712
713 if (type > _REGION_ENTRY_TYPE_R3 || pud_large(pud))
714 return 1;
715 if (type < _REGION_ENTRY_TYPE_R3)
716 return 0;
717 return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
718}
719
720static inline int p4d_bad(p4d_t p4d)
721{
722 unsigned long type = p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK;
723
724 if (type > _REGION_ENTRY_TYPE_R2)
725 return 1;
726 if (type < _REGION_ENTRY_TYPE_R2)
727 return 0;
728 return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
729}
730
731static inline int pmd_present(pmd_t pmd)
732{
733 return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
734}
735
736static inline int pmd_none(pmd_t pmd)
737{
738 return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
739}
740
741#define pmd_write pmd_write
742static inline int pmd_write(pmd_t pmd)
743{
744 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
745}
746
747#define pud_write pud_write
748static inline int pud_write(pud_t pud)
749{
750 return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
751}
752
753static inline int pmd_dirty(pmd_t pmd)
754{
755 return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
756}
757
758static inline int pmd_young(pmd_t pmd)
759{
760 return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
761}
762
763static inline int pte_present(pte_t pte)
764{
765
766 return (pte_val(pte) & _PAGE_PRESENT) != 0;
767}
768
769static inline int pte_none(pte_t pte)
770{
771
772 return pte_val(pte) == _PAGE_INVALID;
773}
774
775static inline int pte_swap(pte_t pte)
776{
777
778 return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
779 == _PAGE_PROTECT;
780}
781
782static inline int pte_special(pte_t pte)
783{
784 return (pte_val(pte) & _PAGE_SPECIAL);
785}
786
787#define __HAVE_ARCH_PTE_SAME
788static inline int pte_same(pte_t a, pte_t b)
789{
790 return pte_val(a) == pte_val(b);
791}
792
793#ifdef CONFIG_NUMA_BALANCING
794static inline int pte_protnone(pte_t pte)
795{
796 return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
797}
798
799static inline int pmd_protnone(pmd_t pmd)
800{
801
802 return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
803}
804#endif
805
806static inline int pte_soft_dirty(pte_t pte)
807{
808 return pte_val(pte) & _PAGE_SOFT_DIRTY;
809}
810#define pte_swp_soft_dirty pte_soft_dirty
811
812static inline pte_t pte_mksoft_dirty(pte_t pte)
813{
814 pte_val(pte) |= _PAGE_SOFT_DIRTY;
815 return pte;
816}
817#define pte_swp_mksoft_dirty pte_mksoft_dirty
818
819static inline pte_t pte_clear_soft_dirty(pte_t pte)
820{
821 pte_val(pte) &= ~_PAGE_SOFT_DIRTY;
822 return pte;
823}
824#define pte_swp_clear_soft_dirty pte_clear_soft_dirty
825
826static inline int pmd_soft_dirty(pmd_t pmd)
827{
828 return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
829}
830
831static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
832{
833 pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY;
834 return pmd;
835}
836
837static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
838{
839 pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY;
840 return pmd;
841}
842
843
844
845
846
847static inline int pte_write(pte_t pte)
848{
849 return (pte_val(pte) & _PAGE_WRITE) != 0;
850}
851
852static inline int pte_dirty(pte_t pte)
853{
854 return (pte_val(pte) & _PAGE_DIRTY) != 0;
855}
856
857static inline int pte_young(pte_t pte)
858{
859 return (pte_val(pte) & _PAGE_YOUNG) != 0;
860}
861
862#define __HAVE_ARCH_PTE_UNUSED
863static inline int pte_unused(pte_t pte)
864{
865 return pte_val(pte) & _PAGE_UNUSED;
866}
867
868
869
870
871
872static inline void pgd_clear(pgd_t *pgd)
873{
874 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
875 pgd_val(*pgd) = _REGION1_ENTRY_EMPTY;
876}
877
878static inline void p4d_clear(p4d_t *p4d)
879{
880 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
881 p4d_val(*p4d) = _REGION2_ENTRY_EMPTY;
882}
883
884static inline void pud_clear(pud_t *pud)
885{
886 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
887 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
888}
889
890static inline void pmd_clear(pmd_t *pmdp)
891{
892 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
893}
894
895static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
896{
897 pte_val(*ptep) = _PAGE_INVALID;
898}
899
900
901
902
903
904static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
905{
906 pte_val(pte) &= _PAGE_CHG_MASK;
907 pte_val(pte) |= pgprot_val(newprot);
908
909
910
911
912 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
913 pte_val(pte) &= ~_PAGE_INVALID;
914
915
916
917
918 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
919 pte_val(pte) &= ~_PAGE_PROTECT;
920 return pte;
921}
922
923static inline pte_t pte_wrprotect(pte_t pte)
924{
925 pte_val(pte) &= ~_PAGE_WRITE;
926 pte_val(pte) |= _PAGE_PROTECT;
927 return pte;
928}
929
930static inline pte_t pte_mkwrite(pte_t pte)
931{
932 pte_val(pte) |= _PAGE_WRITE;
933 if (pte_val(pte) & _PAGE_DIRTY)
934 pte_val(pte) &= ~_PAGE_PROTECT;
935 return pte;
936}
937
938static inline pte_t pte_mkclean(pte_t pte)
939{
940 pte_val(pte) &= ~_PAGE_DIRTY;
941 pte_val(pte) |= _PAGE_PROTECT;
942 return pte;
943}
944
945static inline pte_t pte_mkdirty(pte_t pte)
946{
947 pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY;
948 if (pte_val(pte) & _PAGE_WRITE)
949 pte_val(pte) &= ~_PAGE_PROTECT;
950 return pte;
951}
952
953static inline pte_t pte_mkold(pte_t pte)
954{
955 pte_val(pte) &= ~_PAGE_YOUNG;
956 pte_val(pte) |= _PAGE_INVALID;
957 return pte;
958}
959
960static inline pte_t pte_mkyoung(pte_t pte)
961{
962 pte_val(pte) |= _PAGE_YOUNG;
963 if (pte_val(pte) & _PAGE_READ)
964 pte_val(pte) &= ~_PAGE_INVALID;
965 return pte;
966}
967
968static inline pte_t pte_mkspecial(pte_t pte)
969{
970 pte_val(pte) |= _PAGE_SPECIAL;
971 return pte;
972}
973
974#ifdef CONFIG_HUGETLB_PAGE
975static inline pte_t pte_mkhuge(pte_t pte)
976{
977 pte_val(pte) |= _PAGE_LARGE;
978 return pte;
979}
980#endif
981
982#define IPTE_GLOBAL 0
983#define IPTE_LOCAL 1
984
985#define IPTE_NODAT 0x400
986#define IPTE_GUEST_ASCE 0x800
987
988static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
989 unsigned long opt, unsigned long asce,
990 int local)
991{
992 unsigned long pto = (unsigned long) ptep;
993
994 if (__builtin_constant_p(opt) && opt == 0) {
995
996 asm volatile(
997 " .insn rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
998 : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
999 [m4] "i" (local));
1000 return;
1001 }
1002
1003
1004 opt = opt | (asce & _ASCE_ORIGIN);
1005 asm volatile(
1006 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1007 : [r2] "+a" (address), [r3] "+a" (opt)
1008 : [r1] "a" (pto), [m4] "i" (local) : "memory");
1009}
1010
1011static __always_inline void __ptep_ipte_range(unsigned long address, int nr,
1012 pte_t *ptep, int local)
1013{
1014 unsigned long pto = (unsigned long) ptep;
1015
1016
1017 do {
1018 asm volatile(
1019 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1020 : [r2] "+a" (address), [r3] "+a" (nr)
1021 : [r1] "a" (pto), [m4] "i" (local) : "memory");
1022 } while (nr != 255);
1023}
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
1039pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
1040
1041#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1042static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1043 unsigned long addr, pte_t *ptep)
1044{
1045 pte_t pte = *ptep;
1046
1047 pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
1048 return pte_young(pte);
1049}
1050
1051#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1052static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1053 unsigned long address, pte_t *ptep)
1054{
1055 return ptep_test_and_clear_young(vma, address, ptep);
1056}
1057
1058#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1059static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1060 unsigned long addr, pte_t *ptep)
1061{
1062 pte_t res;
1063
1064 res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1065 if (mm_is_protected(mm) && pte_present(res))
1066 uv_convert_from_secure(pte_val(res) & PAGE_MASK);
1067 return res;
1068}
1069
1070#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1071pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
1072void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
1073 pte_t *, pte_t, pte_t);
1074
1075#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1076static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1077 unsigned long addr, pte_t *ptep)
1078{
1079 pte_t res;
1080
1081 res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
1082 if (mm_is_protected(vma->vm_mm) && pte_present(res))
1083 uv_convert_from_secure(pte_val(res) & PAGE_MASK);
1084 return res;
1085}
1086
1087
1088
1089
1090
1091
1092
1093
1094#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1095static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1096 unsigned long addr,
1097 pte_t *ptep, int full)
1098{
1099 pte_t res;
1100
1101 if (full) {
1102 res = *ptep;
1103 *ptep = __pte(_PAGE_INVALID);
1104 } else {
1105 res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1106 }
1107 if (mm_is_protected(mm) && pte_present(res))
1108 uv_convert_from_secure(pte_val(res) & PAGE_MASK);
1109 return res;
1110}
1111
1112#define __HAVE_ARCH_PTEP_SET_WRPROTECT
1113static inline void ptep_set_wrprotect(struct mm_struct *mm,
1114 unsigned long addr, pte_t *ptep)
1115{
1116 pte_t pte = *ptep;
1117
1118 if (pte_write(pte))
1119 ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
1120}
1121
1122#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1123static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1124 unsigned long addr, pte_t *ptep,
1125 pte_t entry, int dirty)
1126{
1127 if (pte_same(*ptep, entry))
1128 return 0;
1129 ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
1130 return 1;
1131}
1132
1133
1134
1135
1136void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
1137 pte_t *ptep, pte_t entry);
1138void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1139void ptep_notify(struct mm_struct *mm, unsigned long addr,
1140 pte_t *ptep, unsigned long bits);
1141int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
1142 pte_t *ptep, int prot, unsigned long bit);
1143void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
1144 pte_t *ptep , int reset);
1145void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1146int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
1147 pte_t *sptep, pte_t *tptep, pte_t pte);
1148void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
1149
1150bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address,
1151 pte_t *ptep);
1152int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1153 unsigned char key, bool nq);
1154int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1155 unsigned char key, unsigned char *oldkey,
1156 bool nq, bool mr, bool mc);
1157int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
1158int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1159 unsigned char *key);
1160
1161int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
1162 unsigned long bits, unsigned long value);
1163int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
1164int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
1165 unsigned long *oldpte, unsigned long *oldpgste);
1166void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
1167void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
1168void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
1169void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
1170
1171#define pgprot_writecombine pgprot_writecombine
1172pgprot_t pgprot_writecombine(pgprot_t prot);
1173
1174#define pgprot_writethrough pgprot_writethrough
1175pgprot_t pgprot_writethrough(pgprot_t prot);
1176
1177
1178
1179
1180
1181
1182static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1183 pte_t *ptep, pte_t entry)
1184{
1185 if (pte_present(entry))
1186 pte_val(entry) &= ~_PAGE_UNUSED;
1187 if (mm_has_pgste(mm))
1188 ptep_set_pte_at(mm, addr, ptep, entry);
1189 else
1190 *ptep = entry;
1191}
1192
1193
1194
1195
1196
1197static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1198{
1199 pte_t __pte;
1200
1201 pte_val(__pte) = physpage | pgprot_val(pgprot);
1202 if (!MACHINE_HAS_NX)
1203 pte_val(__pte) &= ~_PAGE_NOEXEC;
1204 return pte_mkyoung(__pte);
1205}
1206
1207static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1208{
1209 unsigned long physpage = page_to_phys(page);
1210 pte_t __pte = mk_pte_phys(physpage, pgprot);
1211
1212 if (pte_write(__pte) && PageDirty(page))
1213 __pte = pte_mkdirty(__pte);
1214 return __pte;
1215}
1216
1217#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1218#define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
1219#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1220#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1221
1222#define p4d_deref(pud) ((unsigned long)__va(p4d_val(pud) & _REGION_ENTRY_ORIGIN))
1223#define pgd_deref(pgd) ((unsigned long)__va(pgd_val(pgd) & _REGION_ENTRY_ORIGIN))
1224
1225static inline unsigned long pmd_deref(pmd_t pmd)
1226{
1227 unsigned long origin_mask;
1228
1229 origin_mask = _SEGMENT_ENTRY_ORIGIN;
1230 if (pmd_large(pmd))
1231 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
1232 return (unsigned long)__va(pmd_val(pmd) & origin_mask);
1233}
1234
1235static inline unsigned long pmd_pfn(pmd_t pmd)
1236{
1237 return __pa(pmd_deref(pmd)) >> PAGE_SHIFT;
1238}
1239
1240static inline unsigned long pud_deref(pud_t pud)
1241{
1242 unsigned long origin_mask;
1243
1244 origin_mask = _REGION_ENTRY_ORIGIN;
1245 if (pud_large(pud))
1246 origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
1247 return (unsigned long)__va(pud_val(pud) & origin_mask);
1248}
1249
1250static inline unsigned long pud_pfn(pud_t pud)
1251{
1252 return __pa(pud_deref(pud)) >> PAGE_SHIFT;
1253}
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
1267{
1268 unsigned long rste;
1269 unsigned int shift;
1270
1271
1272 rste = pgd_val(*pgd);
1273
1274 shift = ((rste & _REGION_ENTRY_TYPE_MASK) >> 2) * 11 + 20;
1275 return pgd + ((address >> shift) & (PTRS_PER_PGD - 1));
1276}
1277
1278#define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
1279
1280static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address)
1281{
1282 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
1283 return (p4d_t *) pgd_deref(pgd) + p4d_index(address);
1284 return (p4d_t *) pgdp;
1285}
1286#define p4d_offset_lockless p4d_offset_lockless
1287
1288static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address)
1289{
1290 return p4d_offset_lockless(pgdp, *pgdp, address);
1291}
1292
1293static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address)
1294{
1295 if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
1296 return (pud_t *) p4d_deref(p4d) + pud_index(address);
1297 return (pud_t *) p4dp;
1298}
1299#define pud_offset_lockless pud_offset_lockless
1300
1301static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address)
1302{
1303 return pud_offset_lockless(p4dp, *p4dp, address);
1304}
1305#define pud_offset pud_offset
1306
1307static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address)
1308{
1309 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
1310 return (pmd_t *) pud_deref(pud) + pmd_index(address);
1311 return (pmd_t *) pudp;
1312}
1313#define pmd_offset_lockless pmd_offset_lockless
1314
1315static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address)
1316{
1317 return pmd_offset_lockless(pudp, *pudp, address);
1318}
1319#define pmd_offset pmd_offset
1320
1321static inline unsigned long pmd_page_vaddr(pmd_t pmd)
1322{
1323 return (unsigned long) pmd_deref(pmd);
1324}
1325
1326static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
1327{
1328 return end <= current->mm->context.asce_limit;
1329}
1330#define gup_fast_permitted gup_fast_permitted
1331
1332#define pfn_pte(pfn, pgprot) mk_pte_phys(((pfn) << PAGE_SHIFT), (pgprot))
1333#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1334#define pte_page(x) pfn_to_page(pte_pfn(x))
1335
1336#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1337#define pud_page(pud) pfn_to_page(pud_pfn(pud))
1338#define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
1339#define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
1340
1341static inline pmd_t pmd_wrprotect(pmd_t pmd)
1342{
1343 pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
1344 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1345 return pmd;
1346}
1347
1348static inline pmd_t pmd_mkwrite(pmd_t pmd)
1349{
1350 pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
1351 if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)
1352 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1353 return pmd;
1354}
1355
1356static inline pmd_t pmd_mkclean(pmd_t pmd)
1357{
1358 pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1359 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1360 return pmd;
1361}
1362
1363static inline pmd_t pmd_mkdirty(pmd_t pmd)
1364{
1365 pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY;
1366 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1367 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1368 return pmd;
1369}
1370
1371static inline pud_t pud_wrprotect(pud_t pud)
1372{
1373 pud_val(pud) &= ~_REGION3_ENTRY_WRITE;
1374 pud_val(pud) |= _REGION_ENTRY_PROTECT;
1375 return pud;
1376}
1377
1378static inline pud_t pud_mkwrite(pud_t pud)
1379{
1380 pud_val(pud) |= _REGION3_ENTRY_WRITE;
1381 if (pud_val(pud) & _REGION3_ENTRY_DIRTY)
1382 pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1383 return pud;
1384}
1385
1386static inline pud_t pud_mkclean(pud_t pud)
1387{
1388 pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
1389 pud_val(pud) |= _REGION_ENTRY_PROTECT;
1390 return pud;
1391}
1392
1393static inline pud_t pud_mkdirty(pud_t pud)
1394{
1395 pud_val(pud) |= _REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY;
1396 if (pud_val(pud) & _REGION3_ENTRY_WRITE)
1397 pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1398 return pud;
1399}
1400
1401#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1402static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1403{
1404
1405
1406
1407
1408 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1409 return pgprot_val(SEGMENT_NONE);
1410 if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
1411 return pgprot_val(SEGMENT_RO);
1412 if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
1413 return pgprot_val(SEGMENT_RX);
1414 if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
1415 return pgprot_val(SEGMENT_RW);
1416 return pgprot_val(SEGMENT_RWX);
1417}
1418
1419static inline pmd_t pmd_mkyoung(pmd_t pmd)
1420{
1421 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1422 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1423 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
1424 return pmd;
1425}
1426
1427static inline pmd_t pmd_mkold(pmd_t pmd)
1428{
1429 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1430 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1431 return pmd;
1432}
1433
1434static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1435{
1436 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1437 _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1438 _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
1439 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1440 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1441 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1442 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1443 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1444 return pmd;
1445}
1446
1447static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1448{
1449 pmd_t __pmd;
1450 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1451 return __pmd;
1452}
1453
1454#endif
1455
1456static inline void __pmdp_csp(pmd_t *pmdp)
1457{
1458 csp((unsigned int *)pmdp + 1, pmd_val(*pmdp),
1459 pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1460}
1461
1462#define IDTE_GLOBAL 0
1463#define IDTE_LOCAL 1
1464
1465#define IDTE_PTOA 0x0800
1466#define IDTE_NODAT 0x1000
1467#define IDTE_GUEST_ASCE 0x2000
1468
1469static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
1470 unsigned long opt, unsigned long asce,
1471 int local)
1472{
1473 unsigned long sto;
1474
1475 sto = (unsigned long) pmdp - pmd_index(addr) * sizeof(pmd_t);
1476 if (__builtin_constant_p(opt) && opt == 0) {
1477
1478 asm volatile(
1479 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1480 : "+m" (*pmdp)
1481 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)),
1482 [m4] "i" (local)
1483 : "cc" );
1484 } else {
1485
1486 asm volatile(
1487 " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1488 : "+m" (*pmdp)
1489 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt),
1490 [r3] "a" (asce), [m4] "i" (local)
1491 : "cc" );
1492 }
1493}
1494
1495static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp,
1496 unsigned long opt, unsigned long asce,
1497 int local)
1498{
1499 unsigned long r3o;
1500
1501 r3o = (unsigned long) pudp - pud_index(addr) * sizeof(pud_t);
1502 r3o |= _ASCE_TYPE_REGION3;
1503 if (__builtin_constant_p(opt) && opt == 0) {
1504
1505 asm volatile(
1506 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1507 : "+m" (*pudp)
1508 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)),
1509 [m4] "i" (local)
1510 : "cc");
1511 } else {
1512
1513 asm volatile(
1514 " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1515 : "+m" (*pudp)
1516 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt),
1517 [r3] "a" (asce), [m4] "i" (local)
1518 : "cc" );
1519 }
1520}
1521
1522pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1523pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1524pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
1525
1526#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1527
1528#define __HAVE_ARCH_PGTABLE_DEPOSIT
1529void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1530 pgtable_t pgtable);
1531
1532#define __HAVE_ARCH_PGTABLE_WITHDRAW
1533pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1534
1535#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1536static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1537 unsigned long addr, pmd_t *pmdp,
1538 pmd_t entry, int dirty)
1539{
1540 VM_BUG_ON(addr & ~HPAGE_MASK);
1541
1542 entry = pmd_mkyoung(entry);
1543 if (dirty)
1544 entry = pmd_mkdirty(entry);
1545 if (pmd_val(*pmdp) == pmd_val(entry))
1546 return 0;
1547 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
1548 return 1;
1549}
1550
1551#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1552static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1553 unsigned long addr, pmd_t *pmdp)
1554{
1555 pmd_t pmd = *pmdp;
1556
1557 pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
1558 return pmd_young(pmd);
1559}
1560
1561#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1562static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
1563 unsigned long addr, pmd_t *pmdp)
1564{
1565 VM_BUG_ON(addr & ~HPAGE_MASK);
1566 return pmdp_test_and_clear_young(vma, addr, pmdp);
1567}
1568
1569static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1570 pmd_t *pmdp, pmd_t entry)
1571{
1572 if (!MACHINE_HAS_NX)
1573 pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC;
1574 *pmdp = entry;
1575}
1576
1577static inline pmd_t pmd_mkhuge(pmd_t pmd)
1578{
1579 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1580 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1581 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1582 return pmd;
1583}
1584
1585#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1586static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1587 unsigned long addr, pmd_t *pmdp)
1588{
1589 return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1590}
1591
1592#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
1593static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
1594 unsigned long addr,
1595 pmd_t *pmdp, int full)
1596{
1597 if (full) {
1598 pmd_t pmd = *pmdp;
1599 *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
1600 return pmd;
1601 }
1602 return pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1603}
1604
1605#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
1606static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
1607 unsigned long addr, pmd_t *pmdp)
1608{
1609 return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
1610}
1611
1612#define __HAVE_ARCH_PMDP_INVALIDATE
1613static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
1614 unsigned long addr, pmd_t *pmdp)
1615{
1616 pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1617
1618 return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
1619}
1620
1621#define __HAVE_ARCH_PMDP_SET_WRPROTECT
1622static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1623 unsigned long addr, pmd_t *pmdp)
1624{
1625 pmd_t pmd = *pmdp;
1626
1627 if (pmd_write(pmd))
1628 pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
1629}
1630
1631static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1632 unsigned long address,
1633 pmd_t *pmdp)
1634{
1635 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1636}
1637#define pmdp_collapse_flush pmdp_collapse_flush
1638
1639#define pfn_pmd(pfn, pgprot) mk_pmd_phys(((pfn) << PAGE_SHIFT), (pgprot))
1640#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1641
1642static inline int pmd_trans_huge(pmd_t pmd)
1643{
1644 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1645}
1646
1647#define has_transparent_hugepage has_transparent_hugepage
1648static inline int has_transparent_hugepage(void)
1649{
1650 return MACHINE_HAS_EDAT1 ? 1 : 0;
1651}
1652#endif
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671#define __SWP_OFFSET_MASK ((1UL << 52) - 1)
1672#define __SWP_OFFSET_SHIFT 12
1673#define __SWP_TYPE_MASK ((1UL << 5) - 1)
1674#define __SWP_TYPE_SHIFT 2
1675
1676static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1677{
1678 pte_t pte;
1679
1680 pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
1681 pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1682 pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
1683 return pte;
1684}
1685
1686static inline unsigned long __swp_type(swp_entry_t entry)
1687{
1688 return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1689}
1690
1691static inline unsigned long __swp_offset(swp_entry_t entry)
1692{
1693 return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1694}
1695
1696static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1697{
1698 return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1699}
1700
1701#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1702#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1703
1704#define kern_addr_valid(addr) (1)
1705
1706extern int vmem_add_mapping(unsigned long start, unsigned long size);
1707extern void vmem_remove_mapping(unsigned long start, unsigned long size);
1708extern int s390_enable_sie(void);
1709extern int s390_enable_skey(void);
1710extern void s390_reset_cmma(struct mm_struct *mm);
1711
1712
1713#define HAVE_ARCH_UNMAPPED_AREA
1714#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1715
1716#endif
1717