linux/arch/s390/include/asm/tlb.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _S390_TLB_H
   3#define _S390_TLB_H
   4
   5/*
   6 * TLB flushing on s390 is complicated. The following requirement
   7 * from the principles of operation is the most arduous:
   8 *
   9 * "A valid table entry must not be changed while it is attached
  10 * to any CPU and may be used for translation by that CPU except to
  11 * (1) invalidate the entry by using INVALIDATE PAGE TABLE ENTRY,
  12 * or INVALIDATE DAT TABLE ENTRY, (2) alter bits 56-63 of a page
  13 * table entry, or (3) make a change by means of a COMPARE AND SWAP
  14 * AND PURGE instruction that purges the TLB."
  15 *
  16 * The modification of a pte of an active mm struct therefore is
  17 * a two step process: i) invalidate the pte, ii) store the new pte.
  18 * This is true for the page protection bit as well.
  19 * The only possible optimization is to flush at the beginning of
  20 * a tlb_gather_mmu cycle if the mm_struct is currently not in use.
  21 *
  22 * Pages used for the page tables is a different story. FIXME: more
  23 */
  24
  25#include <linux/mm.h>
  26#include <linux/pagemap.h>
  27#include <linux/swap.h>
  28#include <asm/processor.h>
  29#include <asm/pgalloc.h>
  30#include <asm/tlbflush.h>
  31
  32/* RHEL-only, missing 9de7d833e370 */
  33#ifndef nmi_uaccess_okay
  34# define nmi_uaccess_okay() true
  35#endif
  36
  37struct mmu_gather {
  38        struct mm_struct *mm;
  39        struct mmu_table_batch *batch;
  40        unsigned int fullmm;
  41        unsigned long start, end;
  42};
  43
  44struct mmu_table_batch {
  45        struct rcu_head         rcu;
  46        unsigned int            nr;
  47        void                    *tables[0];
  48};
  49
  50#define MAX_TABLE_BATCH         \
  51        ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
  52
  53extern void tlb_table_flush(struct mmu_gather *tlb);
  54extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
  55
  56static inline void
  57arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
  58                        unsigned long start, unsigned long end)
  59{
  60        tlb->mm = mm;
  61        tlb->start = start;
  62        tlb->end = end;
  63        tlb->fullmm = !(start | (end+1));
  64        tlb->batch = NULL;
  65}
  66
  67static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
  68{
  69        __tlb_flush_mm_lazy(tlb->mm);
  70}
  71
  72static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
  73{
  74        tlb_table_flush(tlb);
  75}
  76
  77
  78static inline void tlb_flush_mmu(struct mmu_gather *tlb)
  79{
  80        tlb_flush_mmu_tlbonly(tlb);
  81        tlb_flush_mmu_free(tlb);
  82}
  83
  84static inline void
  85arch_tlb_finish_mmu(struct mmu_gather *tlb,
  86                unsigned long start, unsigned long end, bool force)
  87{
  88        if (force) {
  89                tlb->start = start;
  90                tlb->end = end;
  91        }
  92
  93        tlb_flush_mmu(tlb);
  94}
  95
  96/*
  97 * Release the page cache reference for a pte removed by
  98 * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
  99 * has already been freed, so just do free_page_and_swap_cache.
 100 */
 101static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 102{
 103        free_page_and_swap_cache(page);
 104        return false; /* avoid calling tlb_flush_mmu */
 105}
 106
 107static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 108{
 109        free_page_and_swap_cache(page);
 110}
 111
 112static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
 113                                          struct page *page, int page_size)
 114{
 115        return __tlb_remove_page(tlb, page);
 116}
 117
 118static inline void tlb_remove_page_size(struct mmu_gather *tlb,
 119                                        struct page *page, int page_size)
 120{
 121        return tlb_remove_page(tlb, page);
 122}
 123
 124/*
 125 * pte_free_tlb frees a pte table and clears the CRSTE for the
 126 * page table from the tlb.
 127 */
 128static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
 129                                unsigned long address)
 130{
 131        page_table_free_rcu(tlb, (unsigned long *) pte, address);
 132}
 133
 134/*
 135 * pmd_free_tlb frees a pmd table and clears the CRSTE for the
 136 * segment table entry from the tlb.
 137 * If the mm uses a two level page table the single pmd is freed
 138 * as the pgd. pmd_free_tlb checks the asce_limit against 2GB
 139 * to avoid the double free of the pmd in this case.
 140 */
 141static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
 142                                unsigned long address)
 143{
 144        if (mm_pmd_folded(tlb->mm))
 145                return;
 146        pgtable_pmd_page_dtor(virt_to_page(pmd));
 147        tlb_remove_table(tlb, pmd);
 148}
 149
 150/*
 151 * p4d_free_tlb frees a pud table and clears the CRSTE for the
 152 * region second table entry from the tlb.
 153 * If the mm uses a four level page table the single p4d is freed
 154 * as the pgd. p4d_free_tlb checks the asce_limit against 8PB
 155 * to avoid the double free of the p4d in this case.
 156 */
 157static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
 158                                unsigned long address)
 159{
 160        if (mm_p4d_folded(tlb->mm))
 161                return;
 162        tlb_remove_table(tlb, p4d);
 163}
 164
 165/*
 166 * pud_free_tlb frees a pud table and clears the CRSTE for the
 167 * region third table entry from the tlb.
 168 * If the mm uses a three level page table the single pud is freed
 169 * as the pgd. pud_free_tlb checks the asce_limit against 4TB
 170 * to avoid the double free of the pud in this case.
 171 */
 172static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
 173                                unsigned long address)
 174{
 175        if (mm_pud_folded(tlb->mm))
 176                return;
 177        tlb_remove_table(tlb, pud);
 178}
 179
 180#define tlb_start_vma(tlb, vma)                 do { } while (0)
 181#define tlb_end_vma(tlb, vma)                   do { } while (0)
 182#define tlb_remove_tlb_entry(tlb, ptep, addr)   do { } while (0)
 183#define tlb_remove_pmd_tlb_entry(tlb, pmdp, addr)       do { } while (0)
 184#define tlb_migrate_finish(mm)                  do { } while (0)
 185#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)        \
 186        tlb_remove_tlb_entry(tlb, ptep, address)
 187
 188#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
 189static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
 190                                                     unsigned int page_size)
 191{
 192}
 193
 194#endif /* _S390_TLB_H */
 195