linux/arch/unicore32/include/asm/mmu_context.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * linux/arch/unicore32/include/asm/mmu_context.h
   4 *
   5 * Code specific to PKUnity SoC and UniCore ISA
   6 *
   7 * Copyright (C) 2001-2010 GUAN Xue-tao
   8 */
   9#ifndef __UNICORE_MMU_CONTEXT_H__
  10#define __UNICORE_MMU_CONTEXT_H__
  11
  12#include <linux/compiler.h>
  13#include <linux/sched.h>
  14#include <linux/mm.h>
  15#include <linux/vmacache.h>
  16#include <linux/io.h>
  17
  18#include <asm/cacheflush.h>
  19#include <asm/cpu-single.h>
  20
  21#define init_new_context(tsk, mm)       0
  22
  23#define destroy_context(mm)             do { } while (0)
  24
  25/*
  26 * This is called when "tsk" is about to enter lazy TLB mode.
  27 *
  28 * mm:  describes the currently active mm context
  29 * tsk: task which is entering lazy tlb
  30 * cpu: cpu number which is entering lazy tlb
  31 *
  32 * tsk->mm will be NULL
  33 */
  34static inline void
  35enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
  36{
  37}
  38
  39/*
  40 * This is the actual mm switch as far as the scheduler
  41 * is concerned.  No registers are touched.  We avoid
  42 * calling the CPU specific function when the mm hasn't
  43 * actually changed.
  44 */
  45static inline void
  46switch_mm(struct mm_struct *prev, struct mm_struct *next,
  47          struct task_struct *tsk)
  48{
  49        unsigned int cpu = smp_processor_id();
  50
  51        if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
  52                cpu_switch_mm(next->pgd, next);
  53}
  54
  55#define deactivate_mm(tsk, mm)  do { } while (0)
  56#define activate_mm(prev, next) switch_mm(prev, next, NULL)
  57
  58/*
  59 * We are inserting a "fake" vma for the user-accessible vector page so
  60 * gdb and friends can get to it through ptrace and /proc/<pid>/mem.
  61 * But we also want to remove it before the generic code gets to see it
  62 * during process exit or the unmapping of it would  cause total havoc.
  63 * (the macro is used as remove_vma() is static to mm/mmap.c)
  64 */
  65#define arch_exit_mmap(mm) \
  66do { \
  67        struct vm_area_struct *high_vma = find_vma(mm, 0xffff0000); \
  68        if (high_vma) { \
  69                BUG_ON(high_vma->vm_next);  /* it should be last */ \
  70                if (high_vma->vm_prev) \
  71                        high_vma->vm_prev->vm_next = NULL; \
  72                else \
  73                        mm->mmap = NULL; \
  74                rb_erase(&high_vma->vm_rb, &mm->mm_rb); \
  75                vmacache_invalidate(mm); \
  76                mm->map_count--; \
  77                remove_vma(high_vma); \
  78        } \
  79} while (0)
  80
  81static inline int arch_dup_mmap(struct mm_struct *oldmm,
  82                                struct mm_struct *mm)
  83{
  84        return 0;
  85}
  86
  87static inline void arch_unmap(struct mm_struct *mm,
  88                        unsigned long start, unsigned long end)
  89{
  90}
  91
  92static inline void arch_bprm_mm_init(struct mm_struct *mm,
  93                                     struct vm_area_struct *vma)
  94{
  95}
  96
  97static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
  98                bool write, bool execute, bool foreign)
  99{
 100        /* by default, allow everything */
 101        return true;
 102}
 103#endif
 104