linux/arch/s390/include/asm/mmu_context.h
<<
>>
Prefs
   1/*
   2 *  S390 version
   3 *
   4 *  Derived from "include/asm-i386/mmu_context.h"
   5 */
   6
   7#ifndef __S390_MMU_CONTEXT_H
   8#define __S390_MMU_CONTEXT_H
   9
  10#include <asm/pgalloc.h>
  11#include <asm/uaccess.h>
  12#include <asm/tlbflush.h>
  13#include <asm/ctl_reg.h>
  14
  15static inline int init_new_context(struct task_struct *tsk,
  16                                   struct mm_struct *mm)
  17{
  18        spin_lock_init(&mm->context.list_lock);
  19        INIT_LIST_HEAD(&mm->context.pgtable_list);
  20        INIT_LIST_HEAD(&mm->context.gmap_list);
  21        cpumask_clear(&mm->context.cpu_attach_mask);
  22        atomic_set(&mm->context.attach_count, 0);
  23        mm->context.flush_mm = 0;
  24#ifdef CONFIG_PGSTE
  25        mm->context.alloc_pgste = page_table_allocate_pgste;
  26        mm->context.has_pgste = 0;
  27        mm->context.use_skey = 0;
  28#endif
  29        switch (mm->context.asce_limit) {
  30        case 1UL << 42:
  31                /*
  32                 * forked 3-level task, fall through to set new asce with new
  33                 * mm->pgd
  34                 */
  35        case 0:
  36                /* context created by exec, set asce limit to 4TB */
  37                mm->context.asce_limit = STACK_TOP_MAX;
  38                mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
  39                                   _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
  40                break;
  41        case 1UL << 53:
  42                /* forked 4-level task, set new asce with new mm->pgd */
  43                mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
  44                                   _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
  45                break;
  46        case 1UL << 31:
  47                /* forked 2-level compat task, set new asce with new mm->pgd */
  48                mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
  49                                   _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
  50                /* pgd_alloc() did not increase mm->nr_pmds */
  51                mm_inc_nr_pmds(mm);
  52        }
  53        crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
  54        return 0;
  55}
  56
  57#define destroy_context(mm)             do { } while (0)
  58
  59static inline void set_user_asce(struct mm_struct *mm)
  60{
  61        S390_lowcore.user_asce = mm->context.asce;
  62        if (current->thread.mm_segment.ar4)
  63                __ctl_load(S390_lowcore.user_asce, 7, 7);
  64        set_cpu_flag(CIF_ASCE);
  65}
  66
  67static inline void clear_user_asce(void)
  68{
  69        S390_lowcore.user_asce = S390_lowcore.kernel_asce;
  70
  71        __ctl_load(S390_lowcore.user_asce, 1, 1);
  72        __ctl_load(S390_lowcore.user_asce, 7, 7);
  73}
  74
  75static inline void load_kernel_asce(void)
  76{
  77        unsigned long asce;
  78
  79        __ctl_store(asce, 1, 1);
  80        if (asce != S390_lowcore.kernel_asce)
  81                __ctl_load(S390_lowcore.kernel_asce, 1, 1);
  82        set_cpu_flag(CIF_ASCE);
  83}
  84
  85static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  86                             struct task_struct *tsk)
  87{
  88        int cpu = smp_processor_id();
  89
  90        S390_lowcore.user_asce = next->context.asce;
  91        if (prev == next)
  92                return;
  93        if (MACHINE_HAS_TLB_LC)
  94                cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
  95        /* Clear old ASCE by loading the kernel ASCE. */
  96        __ctl_load(S390_lowcore.kernel_asce, 1, 1);
  97        __ctl_load(S390_lowcore.kernel_asce, 7, 7);
  98        atomic_inc(&next->context.attach_count);
  99        atomic_dec(&prev->context.attach_count);
 100        if (MACHINE_HAS_TLB_LC)
 101                cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
 102}
 103
 104#define finish_arch_post_lock_switch finish_arch_post_lock_switch
 105static inline void finish_arch_post_lock_switch(void)
 106{
 107        struct task_struct *tsk = current;
 108        struct mm_struct *mm = tsk->mm;
 109
 110        load_kernel_asce();
 111        if (mm) {
 112                preempt_disable();
 113                while (atomic_read(&mm->context.attach_count) >> 16)
 114                        cpu_relax();
 115
 116                cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
 117                if (mm->context.flush_mm)
 118                        __tlb_flush_mm(mm);
 119                preempt_enable();
 120        }
 121        set_fs(current->thread.mm_segment);
 122}
 123
 124#define enter_lazy_tlb(mm,tsk)  do { } while (0)
 125#define deactivate_mm(tsk,mm)   do { } while (0)
 126
 127static inline void activate_mm(struct mm_struct *prev,
 128                               struct mm_struct *next)
 129{
 130        switch_mm(prev, next, current);
 131        cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
 132        set_user_asce(next);
 133}
 134
 135static inline void arch_dup_mmap(struct mm_struct *oldmm,
 136                                 struct mm_struct *mm)
 137{
 138}
 139
 140static inline void arch_exit_mmap(struct mm_struct *mm)
 141{
 142}
 143
 144static inline void arch_unmap(struct mm_struct *mm,
 145                        struct vm_area_struct *vma,
 146                        unsigned long start, unsigned long end)
 147{
 148}
 149
 150static inline void arch_bprm_mm_init(struct mm_struct *mm,
 151                                     struct vm_area_struct *vma)
 152{
 153}
 154
 155static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
 156                bool write, bool execute, bool foreign)
 157{
 158        /* by default, allow everything */
 159        return true;
 160}
 161
 162static inline bool arch_pte_access_permitted(pte_t pte, bool write)
 163{
 164        /* by default, allow everything */
 165        return true;
 166}
 167#endif /* __S390_MMU_CONTEXT_H */
 168