linux/arch/blackfin/include/asm/mmu_context.h
<<
>>
Prefs
   1/*
   2 * Copyright 2004-2009 Analog Devices Inc.
   3 *
   4 * Licensed under the GPL-2 or later.
   5 */
   6
   7#ifndef __BLACKFIN_MMU_CONTEXT_H__
   8#define __BLACKFIN_MMU_CONTEXT_H__
   9
  10#include <linux/gfp.h>
  11#include <linux/sched.h>
  12#include <asm/setup.h>
  13#include <asm/page.h>
  14#include <asm/pgalloc.h>
  15#include <asm/cplbinit.h>
  16
  17/* Note: L1 stacks are CPU-private things, so we bluntly disable this
  18   feature in SMP mode, and use the per-CPU scratch SRAM bank only to
  19   store the PDA instead. */
  20
  21extern void *current_l1_stack_save;
  22extern int nr_l1stack_tasks;
  23extern void *l1_stack_base;
  24extern unsigned long l1_stack_len;
  25
  26extern int l1sram_free(const void*);
  27extern void *l1sram_alloc_max(void*);
  28
  29static inline void free_l1stack(void)
  30{
  31        nr_l1stack_tasks--;
  32        if (nr_l1stack_tasks == 0)
  33                l1sram_free(l1_stack_base);
  34}
  35
  36static inline unsigned long
  37alloc_l1stack(unsigned long length, unsigned long *stack_base)
  38{
  39        if (nr_l1stack_tasks == 0) {
  40                l1_stack_base = l1sram_alloc_max(&l1_stack_len);
  41                if (!l1_stack_base)
  42                        return 0;
  43        }
  44
  45        if (l1_stack_len < length) {
  46                if (nr_l1stack_tasks == 0)
  47                        l1sram_free(l1_stack_base);
  48                return 0;
  49        }
  50        *stack_base = (unsigned long)l1_stack_base;
  51        nr_l1stack_tasks++;
  52        return l1_stack_len;
  53}
  54
  55static inline int
  56activate_l1stack(struct mm_struct *mm, unsigned long sp_base)
  57{
  58        if (current_l1_stack_save)
  59                memcpy(current_l1_stack_save, l1_stack_base, l1_stack_len);
  60        mm->context.l1_stack_save = current_l1_stack_save = (void*)sp_base;
  61        memcpy(l1_stack_base, current_l1_stack_save, l1_stack_len);
  62        return 1;
  63}
  64
  65#define deactivate_mm(tsk,mm)   do { } while (0)
  66
  67#define activate_mm(prev, next) switch_mm(prev, next, NULL)
  68
  69static inline void switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
  70                             struct task_struct *tsk)
  71{
  72#ifdef CONFIG_MPU
  73        unsigned int cpu = smp_processor_id();
  74#endif
  75        if (prev_mm == next_mm)
  76                return;
  77#ifdef CONFIG_MPU
  78        if (prev_mm->context.page_rwx_mask == current_rwx_mask[cpu]) {
  79                flush_switched_cplbs(cpu);
  80                set_mask_dcplbs(next_mm->context.page_rwx_mask, cpu);
  81        }
  82#endif
  83
  84#ifdef CONFIG_APP_STACK_L1
  85        /* L1 stack switching.  */
  86        if (!next_mm->context.l1_stack_save)
  87                return;
  88        if (next_mm->context.l1_stack_save == current_l1_stack_save)
  89                return;
  90        if (current_l1_stack_save) {
  91                memcpy(current_l1_stack_save, l1_stack_base, l1_stack_len);
  92        }
  93        current_l1_stack_save = next_mm->context.l1_stack_save;
  94        memcpy(l1_stack_base, current_l1_stack_save, l1_stack_len);
  95#endif
  96}
  97
  98#ifdef CONFIG_MPU
  99static inline void protect_page(struct mm_struct *mm, unsigned long addr,
 100                                unsigned long flags)
 101{
 102        unsigned long *mask = mm->context.page_rwx_mask;
 103        unsigned long page = addr >> 12;
 104        unsigned long idx = page >> 5;
 105        unsigned long bit = 1 << (page & 31);
 106
 107        if (flags & VM_READ)
 108                mask[idx] |= bit;
 109        else
 110                mask[idx] &= ~bit;
 111        mask += page_mask_nelts;
 112        if (flags & VM_WRITE)
 113                mask[idx] |= bit;
 114        else
 115                mask[idx] &= ~bit;
 116        mask += page_mask_nelts;
 117        if (flags & VM_EXEC)
 118                mask[idx] |= bit;
 119        else
 120                mask[idx] &= ~bit;
 121}
 122
 123static inline void update_protections(struct mm_struct *mm)
 124{
 125        unsigned int cpu = smp_processor_id();
 126        if (mm->context.page_rwx_mask == current_rwx_mask[cpu]) {
 127                flush_switched_cplbs(cpu);
 128                set_mask_dcplbs(mm->context.page_rwx_mask, cpu);
 129        }
 130}
 131#endif
 132
 133static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 134{
 135}
 136
 137/* Called when creating a new context during fork() or execve().  */
 138static inline int
 139init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 140{
 141#ifdef CONFIG_MPU
 142        unsigned long p = __get_free_pages(GFP_KERNEL, page_mask_order);
 143        mm->context.page_rwx_mask = (unsigned long *)p;
 144        memset(mm->context.page_rwx_mask, 0,
 145               page_mask_nelts * 3 * sizeof(long));
 146#endif
 147        return 0;
 148}
 149
 150static inline void destroy_context(struct mm_struct *mm)
 151{
 152        struct sram_list_struct *tmp;
 153#ifdef CONFIG_MPU
 154        unsigned int cpu = smp_processor_id();
 155#endif
 156
 157#ifdef CONFIG_APP_STACK_L1
 158        if (current_l1_stack_save == mm->context.l1_stack_save)
 159                current_l1_stack_save = 0;
 160        if (mm->context.l1_stack_save)
 161                free_l1stack();
 162#endif
 163
 164        while ((tmp = mm->context.sram_list)) {
 165                mm->context.sram_list = tmp->next;
 166                sram_free(tmp->addr);
 167                kfree(tmp);
 168        }
 169#ifdef CONFIG_MPU
 170        if (current_rwx_mask[cpu] == mm->context.page_rwx_mask)
 171                current_rwx_mask[cpu] = NULL;
 172        free_pages((unsigned long)mm->context.page_rwx_mask, page_mask_order);
 173#endif
 174}
 175
 176#endif
 177