linux/arch/powerpc/include/asm/mmu_context.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __ASM_POWERPC_MMU_CONTEXT_H
   3#define __ASM_POWERPC_MMU_CONTEXT_H
   4#ifdef __KERNEL__
   5
   6#include <linux/kernel.h>
   7#include <linux/mm.h>
   8#include <linux/sched.h>
   9#include <linux/spinlock.h>
  10#include <asm/mmu.h>    
  11#include <asm/cputable.h>
  12#include <asm/cputhreads.h>
  13
  14/*
  15 * Most if the context management is out of line
  16 */
  17extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
  18extern void destroy_context(struct mm_struct *mm);
  19#ifdef CONFIG_SPAPR_TCE_IOMMU
  20struct mm_iommu_table_group_mem_t;
  21
  22extern int isolate_lru_page(struct page *page); /* from internal.h */
  23extern bool mm_iommu_preregistered(struct mm_struct *mm);
  24extern long mm_iommu_get(struct mm_struct *mm,
  25                unsigned long ua, unsigned long entries,
  26                struct mm_iommu_table_group_mem_t **pmem);
  27extern long mm_iommu_put(struct mm_struct *mm,
  28                struct mm_iommu_table_group_mem_t *mem);
  29extern void mm_iommu_init(struct mm_struct *mm);
  30extern void mm_iommu_cleanup(struct mm_struct *mm);
  31extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
  32                unsigned long ua, unsigned long size);
  33extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(
  34                struct mm_struct *mm, unsigned long ua, unsigned long size);
  35extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
  36                unsigned long ua, unsigned long entries);
  37extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
  38                unsigned long ua, unsigned long *hpa);
  39extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
  40                unsigned long ua, unsigned long *hpa);
  41extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
  42extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
  43#endif
  44extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
  45extern void set_context(unsigned long id, pgd_t *pgd);
  46
  47#ifdef CONFIG_PPC_BOOK3S_64
  48extern void radix__switch_mmu_context(struct mm_struct *prev,
  49                                      struct mm_struct *next);
  50static inline void switch_mmu_context(struct mm_struct *prev,
  51                                      struct mm_struct *next,
  52                                      struct task_struct *tsk)
  53{
  54        if (radix_enabled())
  55                return radix__switch_mmu_context(prev, next);
  56        return switch_slb(tsk, next);
  57}
  58
  59extern int hash__alloc_context_id(void);
  60extern void hash__reserve_context_id(int id);
  61extern void __destroy_context(int context_id);
  62static inline void mmu_context_init(void) { }
  63#else
  64extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
  65                               struct task_struct *tsk);
  66extern unsigned long __init_new_context(void);
  67extern void __destroy_context(unsigned long context_id);
  68extern void mmu_context_init(void);
  69#endif
  70
  71#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU)
  72extern void radix_kvm_prefetch_workaround(struct mm_struct *mm);
  73#else
  74static inline void radix_kvm_prefetch_workaround(struct mm_struct *mm) { }
  75#endif
  76
  77extern void switch_cop(struct mm_struct *next);
  78extern int use_cop(unsigned long acop, struct mm_struct *mm);
  79extern void drop_cop(unsigned long acop, struct mm_struct *mm);
  80
  81extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
  82                               struct task_struct *tsk);
  83
  84static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  85                             struct task_struct *tsk)
  86{
  87        unsigned long flags;
  88
  89        local_irq_save(flags);
  90        switch_mm_irqs_off(prev, next, tsk);
  91        local_irq_restore(flags);
  92}
  93#define switch_mm_irqs_off switch_mm_irqs_off
  94
  95
  96#define deactivate_mm(tsk,mm)   do { } while (0)
  97
  98/*
  99 * After we have set current->mm to a new value, this activates
 100 * the context for the new mm so we see the new mappings.
 101 */
 102static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
 103{
 104        switch_mm(prev, next, current);
 105}
 106
 107/* We don't currently use enter_lazy_tlb() for anything */
 108static inline void enter_lazy_tlb(struct mm_struct *mm,
 109                                  struct task_struct *tsk)
 110{
 111        /* 64-bit Book3E keeps track of current PGD in the PACA */
 112#ifdef CONFIG_PPC_BOOK3E_64
 113        get_paca()->pgd = NULL;
 114#endif
 115}
 116
 117static inline void arch_dup_mmap(struct mm_struct *oldmm,
 118                                 struct mm_struct *mm)
 119{
 120}
 121
 122static inline void arch_exit_mmap(struct mm_struct *mm)
 123{
 124}
 125
 126static inline void arch_unmap(struct mm_struct *mm,
 127                              struct vm_area_struct *vma,
 128                              unsigned long start, unsigned long end)
 129{
 130        if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
 131                mm->context.vdso_base = 0;
 132}
 133
 134static inline void arch_bprm_mm_init(struct mm_struct *mm,
 135                                     struct vm_area_struct *vma)
 136{
 137}
 138
 139static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
 140                bool write, bool execute, bool foreign)
 141{
 142        /* by default, allow everything */
 143        return true;
 144}
 145#endif /* __KERNEL__ */
 146#endif /* __ASM_POWERPC_MMU_CONTEXT_H */
 147