linux/arch/microblaze/include/asm/mmu_context_mm.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
   3 * Copyright (C) 2008-2009 PetaLogix
   4 * Copyright (C) 2006 Atmark Techno, Inc.
   5 *
   6 * This file is subject to the terms and conditions of the GNU General Public
   7 * License. See the file "COPYING" in the main directory of this archive
   8 * for more details.
   9 */
  10
  11#ifndef _ASM_MICROBLAZE_MMU_CONTEXT_H
  12#define _ASM_MICROBLAZE_MMU_CONTEXT_H
  13
  14#include <asm/atomic.h>
  15#include <asm/bitops.h>
  16#include <asm/mmu.h>
  17#include <asm-generic/mm_hooks.h>
  18
  19# ifdef __KERNEL__
  20/*
  21 * This function defines the mapping from contexts to VSIDs (virtual
  22 * segment IDs).  We use a skew on both the context and the high 4 bits
  23 * of the 32-bit virtual address (the "effective segment ID") in order
  24 * to spread out the entries in the MMU hash table.
  25 */
  26# define CTX_TO_VSID(ctx, va)   (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \
  27                                 & 0xffffff)
  28
  29/*
  30   MicroBlaze has 256 contexts, so we can just rotate through these
  31   as a way of "switching" contexts.  If the TID of the TLB is zero,
  32   the PID/TID comparison is disabled, so we can use a TID of zero
  33   to represent all kernel pages as shared among all contexts.
  34 */
  35
  36static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
  37{
  38}
  39
  40# define NO_CONTEXT     256
  41# define LAST_CONTEXT   255
  42# define FIRST_CONTEXT  1
  43
  44/*
  45 * Set the current MMU context.
  46 * This is done byloading up the segment registers for the user part of the
  47 * address space.
  48 *
  49 * Since the PGD is immediately available, it is much faster to simply
  50 * pass this along as a second parameter, which is required for 8xx and
  51 * can be used for debugging on all processors (if you happen to have
  52 * an Abatron).
  53 */
  54extern void set_context(mm_context_t context, pgd_t *pgd);
  55
  56/*
  57 * Bitmap of contexts in use.
  58 * The size of this bitmap is LAST_CONTEXT + 1 bits.
  59 */
  60extern unsigned long context_map[];
  61
  62/*
  63 * This caches the next context number that we expect to be free.
  64 * Its use is an optimization only, we can't rely on this context
  65 * number to be free, but it usually will be.
  66 */
  67extern mm_context_t next_mmu_context;
  68
  69/*
  70 * Since we don't have sufficient contexts to give one to every task
  71 * that could be in the system, we need to be able to steal contexts.
  72 * These variables support that.
  73 */
  74extern atomic_t nr_free_contexts;
  75extern struct mm_struct *context_mm[LAST_CONTEXT+1];
  76extern void steal_context(void);
  77
  78/*
  79 * Get a new mmu context for the address space described by `mm'.
  80 */
  81static inline void get_mmu_context(struct mm_struct *mm)
  82{
  83        mm_context_t ctx;
  84
  85        if (mm->context != NO_CONTEXT)
  86                return;
  87        while (atomic_dec_if_positive(&nr_free_contexts) < 0)
  88                steal_context();
  89        ctx = next_mmu_context;
  90        while (test_and_set_bit(ctx, context_map)) {
  91                ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
  92                if (ctx > LAST_CONTEXT)
  93                        ctx = 0;
  94        }
  95        next_mmu_context = (ctx + 1) & LAST_CONTEXT;
  96        mm->context = ctx;
  97        context_mm[ctx] = mm;
  98}
  99
 100/*
 101 * Set up the context for a new address space.
 102 */
 103# define init_new_context(tsk, mm)      (((mm)->context = NO_CONTEXT), 0)
 104
 105/*
 106 * We're finished using the context for an address space.
 107 */
 108static inline void destroy_context(struct mm_struct *mm)
 109{
 110        if (mm->context != NO_CONTEXT) {
 111                clear_bit(mm->context, context_map);
 112                mm->context = NO_CONTEXT;
 113                atomic_inc(&nr_free_contexts);
 114        }
 115}
 116
 117static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 118                             struct task_struct *tsk)
 119{
 120        tsk->thread.pgdir = next->pgd;
 121        get_mmu_context(next);
 122        set_context(next->context, next->pgd);
 123}
 124
 125/*
 126 * After we have set current->mm to a new value, this activates
 127 * the context for the new mm so we see the new mappings.
 128 */
 129static inline void activate_mm(struct mm_struct *active_mm,
 130                        struct mm_struct *mm)
 131{
 132        current->thread.pgdir = mm->pgd;
 133        get_mmu_context(mm);
 134        set_context(mm->context, mm->pgd);
 135}
 136
 137extern void mmu_context_init(void);
 138
 139# endif /* __KERNEL__ */
 140#endif /* _ASM_MICROBLAZE_MMU_CONTEXT_H */
 141