linux/arch/powerpc/include/asm/mmu_context.h
<<
>>
Prefs
   1#ifndef __ASM_POWERPC_MMU_CONTEXT_H
   2#define __ASM_POWERPC_MMU_CONTEXT_H
   3#ifdef __KERNEL__
   4
   5#include <linux/kernel.h>
   6#include <linux/mm.h>
   7#include <linux/sched.h>
   8#include <linux/spinlock.h>
   9#include <asm/mmu.h>    
  10#include <asm/cputable.h>
  11#include <asm/cputhreads.h>
  12
  13/*
  14 * Most if the context management is out of line
  15 */
  16extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
  17extern void destroy_context(struct mm_struct *mm);
  18#ifdef CONFIG_SPAPR_TCE_IOMMU
  19struct mm_iommu_table_group_mem_t;
  20
  21extern int isolate_lru_page(struct page *page); /* from internal.h */
  22extern bool mm_iommu_preregistered(struct mm_struct *mm);
  23extern long mm_iommu_get(struct mm_struct *mm,
  24                unsigned long ua, unsigned long entries,
  25                struct mm_iommu_table_group_mem_t **pmem);
  26extern long mm_iommu_put(struct mm_struct *mm,
  27                struct mm_iommu_table_group_mem_t *mem);
  28extern void mm_iommu_init(struct mm_struct *mm);
  29extern void mm_iommu_cleanup(struct mm_struct *mm);
  30extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
  31                unsigned long ua, unsigned long size);
  32extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
  33                unsigned long ua, unsigned long entries);
  34extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
  35                unsigned long ua, unsigned long *hpa);
  36extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
  37extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
  38#endif
  39extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
  40extern void set_context(unsigned long id, pgd_t *pgd);
  41
  42#ifdef CONFIG_PPC_BOOK3S_64
  43extern void radix__switch_mmu_context(struct mm_struct *prev,
  44                                     struct mm_struct *next);
  45static inline void switch_mmu_context(struct mm_struct *prev,
  46                                      struct mm_struct *next,
  47                                      struct task_struct *tsk)
  48{
  49        if (radix_enabled())
  50                return radix__switch_mmu_context(prev, next);
  51        return switch_slb(tsk, next);
  52}
  53
  54extern int __init_new_context(void);
  55extern void __destroy_context(int context_id);
  56static inline void mmu_context_init(void) { }
  57#else
  58extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
  59                               struct task_struct *tsk);
  60extern unsigned long __init_new_context(void);
  61extern void __destroy_context(unsigned long context_id);
  62extern void mmu_context_init(void);
  63#endif
  64
  65extern void switch_cop(struct mm_struct *next);
  66extern int use_cop(unsigned long acop, struct mm_struct *mm);
  67extern void drop_cop(unsigned long acop, struct mm_struct *mm);
  68
  69/*
  70 * switch_mm is the entry point called from the architecture independent
  71 * code in kernel/sched/core.c
  72 */
  73static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  74                             struct task_struct *tsk)
  75{
  76        /* Mark this context has been used on the new CPU */
  77        if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next)))
  78                cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
  79
  80        /* 32-bit keeps track of the current PGDIR in the thread struct */
  81#ifdef CONFIG_PPC32
  82        tsk->thread.pgdir = next->pgd;
  83#endif /* CONFIG_PPC32 */
  84
  85        /* 64-bit Book3E keeps track of current PGD in the PACA */
  86#ifdef CONFIG_PPC_BOOK3E_64
  87        get_paca()->pgd = next->pgd;
  88#endif
  89        /* Nothing else to do if we aren't actually switching */
  90        if (prev == next)
  91                return;
  92
  93#ifdef CONFIG_PPC_ICSWX
  94        /* Switch coprocessor context only if prev or next uses a coprocessor */
  95        if (prev->context.acop || next->context.acop)
  96                switch_cop(next);
  97#endif /* CONFIG_PPC_ICSWX */
  98
  99        /* We must stop all altivec streams before changing the HW
 100         * context
 101         */
 102#ifdef CONFIG_ALTIVEC
 103        if (cpu_has_feature(CPU_FTR_ALTIVEC))
 104                asm volatile ("dssall");
 105#endif /* CONFIG_ALTIVEC */
 106        /*
 107         * The actual HW switching method differs between the various
 108         * sub architectures. Out of line for now
 109         */
 110        switch_mmu_context(prev, next, tsk);
 111}
 112
 113#define deactivate_mm(tsk,mm)   do { } while (0)
 114
 115/*
 116 * After we have set current->mm to a new value, this activates
 117 * the context for the new mm so we see the new mappings.
 118 */
 119static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
 120{
 121        unsigned long flags;
 122
 123        local_irq_save(flags);
 124        switch_mm(prev, next, current);
 125        local_irq_restore(flags);
 126}
 127
 128/* We don't currently use enter_lazy_tlb() for anything */
 129static inline void enter_lazy_tlb(struct mm_struct *mm,
 130                                  struct task_struct *tsk)
 131{
 132        /* 64-bit Book3E keeps track of current PGD in the PACA */
 133#ifdef CONFIG_PPC_BOOK3E_64
 134        get_paca()->pgd = NULL;
 135#endif
 136}
 137
 138static inline void arch_dup_mmap(struct mm_struct *oldmm,
 139                                 struct mm_struct *mm)
 140{
 141}
 142
 143static inline void arch_exit_mmap(struct mm_struct *mm)
 144{
 145}
 146
 147static inline void arch_unmap(struct mm_struct *mm,
 148                              struct vm_area_struct *vma,
 149                              unsigned long start, unsigned long end)
 150{
 151        if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
 152                mm->context.vdso_base = 0;
 153}
 154
 155static inline void arch_bprm_mm_init(struct mm_struct *mm,
 156                                     struct vm_area_struct *vma)
 157{
 158}
 159
 160static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
 161                bool write, bool execute, bool foreign)
 162{
 163        /* by default, allow everything */
 164        return true;
 165}
 166
 167static inline bool arch_pte_access_permitted(pte_t pte, bool write)
 168{
 169        /* by default, allow everything */
 170        return true;
 171}
 172#endif /* __KERNEL__ */
 173#endif /* __ASM_POWERPC_MMU_CONTEXT_H */
 174