linux/arch/s390/include/asm/mmu_context.h
<<
>>
Prefs
   1/*
   2 *  S390 version
   3 *
   4 *  Derived from "include/asm-i386/mmu_context.h"
   5 */
   6
   7#ifndef __S390_MMU_CONTEXT_H
   8#define __S390_MMU_CONTEXT_H
   9
  10#include <asm/pgalloc.h>
  11#include <linux/uaccess.h>
  12#include <asm/tlbflush.h>
  13#include <asm/ctl_reg.h>
  14
  15static inline int init_new_context(struct task_struct *tsk,
  16                                   struct mm_struct *mm)
  17{
  18        spin_lock_init(&mm->context.pgtable_lock);
  19        INIT_LIST_HEAD(&mm->context.pgtable_list);
  20        spin_lock_init(&mm->context.gmap_lock);
  21        INIT_LIST_HEAD(&mm->context.gmap_list);
  22        cpumask_clear(&mm->context.cpu_attach_mask);
  23        atomic_set(&mm->context.flush_count, 0);
  24        mm->context.gmap_asce = 0;
  25        mm->context.flush_mm = 0;
  26#ifdef CONFIG_PGSTE
  27        mm->context.alloc_pgste = page_table_allocate_pgste;
  28        mm->context.has_pgste = 0;
  29        mm->context.use_skey = 0;
  30#endif
  31        switch (mm->context.asce_limit) {
  32        case 1UL << 42:
  33                /*
  34                 * forked 3-level task, fall through to set new asce with new
  35                 * mm->pgd
  36                 */
  37        case 0:
  38                /* context created by exec, set asce limit to 4TB */
  39                mm->context.asce_limit = STACK_TOP_MAX;
  40                mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
  41                                   _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
  42                break;
  43        case 1UL << 53:
  44                /* forked 4-level task, set new asce with new mm->pgd */
  45                mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
  46                                   _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
  47                break;
  48        case 1UL << 31:
  49                /* forked 2-level compat task, set new asce with new mm->pgd */
  50                mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
  51                                   _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
  52                /* pgd_alloc() did not increase mm->nr_pmds */
  53                mm_inc_nr_pmds(mm);
  54        }
  55        crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
  56        return 0;
  57}
  58
  59#define destroy_context(mm)             do { } while (0)
  60
  61static inline void set_user_asce(struct mm_struct *mm)
  62{
  63        S390_lowcore.user_asce = mm->context.asce;
  64        if (current->thread.mm_segment.ar4)
  65                __ctl_load(S390_lowcore.user_asce, 7, 7);
  66        set_cpu_flag(CIF_ASCE);
  67}
  68
  69static inline void clear_user_asce(void)
  70{
  71        S390_lowcore.user_asce = S390_lowcore.kernel_asce;
  72
  73        __ctl_load(S390_lowcore.user_asce, 1, 1);
  74        __ctl_load(S390_lowcore.user_asce, 7, 7);
  75}
  76
  77static inline void load_kernel_asce(void)
  78{
  79        unsigned long asce;
  80
  81        __ctl_store(asce, 1, 1);
  82        if (asce != S390_lowcore.kernel_asce)
  83                __ctl_load(S390_lowcore.kernel_asce, 1, 1);
  84        set_cpu_flag(CIF_ASCE);
  85}
  86
  87static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  88                             struct task_struct *tsk)
  89{
  90        int cpu = smp_processor_id();
  91
  92        S390_lowcore.user_asce = next->context.asce;
  93        if (prev == next)
  94                return;
  95        cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
  96        cpumask_set_cpu(cpu, mm_cpumask(next));
  97        /* Clear old ASCE by loading the kernel ASCE. */
  98        __ctl_load(S390_lowcore.kernel_asce, 1, 1);
  99        __ctl_load(S390_lowcore.kernel_asce, 7, 7);
 100        cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
 101}
 102
 103#define finish_arch_post_lock_switch finish_arch_post_lock_switch
 104static inline void finish_arch_post_lock_switch(void)
 105{
 106        struct task_struct *tsk = current;
 107        struct mm_struct *mm = tsk->mm;
 108
 109        load_kernel_asce();
 110        if (mm) {
 111                preempt_disable();
 112                while (atomic_read(&mm->context.flush_count))
 113                        cpu_relax();
 114
 115                if (mm->context.flush_mm)
 116                        __tlb_flush_mm(mm);
 117                preempt_enable();
 118        }
 119        set_fs(current->thread.mm_segment);
 120}
 121
 122#define enter_lazy_tlb(mm,tsk)  do { } while (0)
 123#define deactivate_mm(tsk,mm)   do { } while (0)
 124
 125static inline void activate_mm(struct mm_struct *prev,
 126                               struct mm_struct *next)
 127{
 128        switch_mm(prev, next, current);
 129        set_user_asce(next);
 130}
 131
 132static inline void arch_dup_mmap(struct mm_struct *oldmm,
 133                                 struct mm_struct *mm)
 134{
 135}
 136
 137static inline void arch_exit_mmap(struct mm_struct *mm)
 138{
 139}
 140
 141static inline void arch_unmap(struct mm_struct *mm,
 142                        struct vm_area_struct *vma,
 143                        unsigned long start, unsigned long end)
 144{
 145}
 146
 147static inline void arch_bprm_mm_init(struct mm_struct *mm,
 148                                     struct vm_area_struct *vma)
 149{
 150}
 151
 152static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
 153                bool write, bool execute, bool foreign)
 154{
 155        /* by default, allow everything */
 156        return true;
 157}
 158
 159static inline bool arch_pte_access_permitted(pte_t pte, bool write)
 160{
 161        /* by default, allow everything */
 162        return true;
 163}
 164#endif /* __S390_MMU_CONTEXT_H */
 165