linux/arch/powerpc/include/asm/mmu_context.h
<<
>>
Prefs
   1#ifndef __ASM_POWERPC_MMU_CONTEXT_H
   2#define __ASM_POWERPC_MMU_CONTEXT_H
   3#ifdef __KERNEL__
   4
   5#include <linux/kernel.h>
   6#include <linux/mm.h>
   7#include <linux/sched.h>
   8#include <linux/spinlock.h>
   9#include <asm/mmu.h>    
  10#include <asm/cputable.h>
  11#include <asm/cputhreads.h>
  12
  13/*
  14 * Most if the context management is out of line
  15 */
  16extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
  17extern void destroy_context(struct mm_struct *mm);
  18#ifdef CONFIG_SPAPR_TCE_IOMMU
  19struct mm_iommu_table_group_mem_t;
  20
  21extern int isolate_lru_page(struct page *page); /* from internal.h */
  22extern bool mm_iommu_preregistered(void);
  23extern long mm_iommu_get(unsigned long ua, unsigned long entries,
  24                struct mm_iommu_table_group_mem_t **pmem);
  25extern long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem);
  26extern void mm_iommu_init(mm_context_t *ctx);
  27extern void mm_iommu_cleanup(mm_context_t *ctx);
  28extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua,
  29                unsigned long size);
  30extern struct mm_iommu_table_group_mem_t *mm_iommu_find(unsigned long ua,
  31                unsigned long entries);
  32extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
  33                unsigned long ua, unsigned long *hpa);
  34extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
  35extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
  36#endif
  37
  38extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
  39extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm);
  40extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
  41extern void set_context(unsigned long id, pgd_t *pgd);
  42
  43#ifdef CONFIG_PPC_BOOK3S_64
  44extern int __init_new_context(void);
  45extern void __destroy_context(int context_id);
  46static inline void mmu_context_init(void) { }
  47#else
  48extern unsigned long __init_new_context(void);
  49extern void __destroy_context(unsigned long context_id);
  50extern void mmu_context_init(void);
  51#endif
  52
  53extern void switch_cop(struct mm_struct *next);
  54extern int use_cop(unsigned long acop, struct mm_struct *mm);
  55extern void drop_cop(unsigned long acop, struct mm_struct *mm);
  56
  57/*
  58 * switch_mm is the entry point called from the architecture independent
  59 * code in kernel/sched.c
  60 */
  61static inline void switch_mm_irqs_off(struct mm_struct *prev,
  62                                      struct mm_struct *next,
  63                                      struct task_struct *tsk)
  64{
  65        /* Mark this context has been used on the new CPU */
  66        cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
  67
  68        /*
  69         * This full barrier is needed by membarrier when switching
  70         * between processes after store to rq->curr, before user-space
  71         * memory accesses.
  72         */
  73        smp_mb();
  74
  75        /* 32-bit keeps track of the current PGDIR in the thread struct */
  76#ifdef CONFIG_PPC32
  77        tsk->thread.pgdir = next->pgd;
  78#endif /* CONFIG_PPC32 */
  79
  80        /* 64-bit Book3E keeps track of current PGD in the PACA */
  81#ifdef CONFIG_PPC_BOOK3E_64
  82        get_paca()->pgd = next->pgd;
  83#endif
  84        /* Nothing else to do if we aren't actually switching */
  85        if (prev == next)
  86                return;
  87
  88#ifdef CONFIG_PPC_ICSWX
  89        /* Switch coprocessor context only if prev or next uses a coprocessor */
  90        if (prev->context.acop || next->context.acop)
  91                switch_cop(next);
  92#endif /* CONFIG_PPC_ICSWX */
  93
  94        /* We must stop all altivec streams before changing the HW
  95         * context
  96         */
  97#ifdef CONFIG_ALTIVEC
  98        if (cpu_has_feature(CPU_FTR_ALTIVEC))
  99                asm volatile ("dssall");
 100#endif /* CONFIG_ALTIVEC */
 101
 102        /* The actual HW switching method differs between the various
 103         * sub architectures.
 104         */
 105#ifdef CONFIG_PPC_STD_MMU_64
 106        if (mmu_has_feature(MMU_FTR_SLB))
 107                switch_slb(tsk, next);
 108        else
 109                switch_stab(tsk, next);
 110#else
 111        membarrier_arch_switch_mm(prev, next, tsk);
 112        /* Out of line for now */
 113        switch_mmu_context(prev, next);
 114#endif
 115
 116}
 117
 118static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 119                             struct task_struct *tsk)
 120{
 121        unsigned long flags;
 122
 123        local_irq_save(flags);
 124        switch_mm_irqs_off(prev, next, tsk);
 125        local_irq_restore(flags);
 126}
 127#define switch_mm_irqs_off switch_mm_irqs_off
 128
 129
 130#define deactivate_mm(tsk,mm)   do { } while (0)
 131
 132/*
 133 * After we have set current->mm to a new value, this activates
 134 * the context for the new mm so we see the new mappings.
 135 */
 136static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
 137{
 138        unsigned long flags;
 139
 140        local_irq_save(flags);
 141        switch_mm(prev, next, current);
 142        local_irq_restore(flags);
 143}
 144
 145/* We don't currently use enter_lazy_tlb() for anything */
 146static inline void enter_lazy_tlb(struct mm_struct *mm,
 147                                  struct task_struct *tsk)
 148{
 149        /* 64-bit Book3E keeps track of current PGD in the PACA */
 150#ifdef CONFIG_PPC_BOOK3E_64
 151        get_paca()->pgd = NULL;
 152#endif
 153}
 154
 155static inline void arch_dup_mmap(struct mm_struct *oldmm,
 156                                 struct mm_struct *mm)
 157{
 158}
 159
 160static inline void arch_exit_mmap(struct mm_struct *mm)
 161{
 162}
 163
 164static inline void arch_unmap(struct mm_struct *mm,
 165                              struct vm_area_struct *vma,
 166                              unsigned long start, unsigned long end)
 167{
 168        if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
 169                mm->context.vdso_base = 0;
 170}
 171
 172static inline void arch_bprm_mm_init(struct mm_struct *mm,
 173                                     struct vm_area_struct *vma)
 174{
 175}
 176
 177static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
 178                bool write, bool execute, bool foreign)
 179{
 180        /* by default, allow everything */
 181        return true;
 182}
 183
 184static inline bool arch_pte_access_permitted(pte_t pte, bool write)
 185{
 186        /* by default, allow everything */
 187        return true;
 188}
 189#endif /* __KERNEL__ */
 190#endif /* __ASM_POWERPC_MMU_CONTEXT_H */
 191