linux/arch/tile/include/asm/mmu_context.h
<<
>>
Prefs
   1/*
   2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
   3 *
   4 *   This program is free software; you can redistribute it and/or
   5 *   modify it under the terms of the GNU General Public License
   6 *   as published by the Free Software Foundation, version 2.
   7 *
   8 *   This program is distributed in the hope that it will be useful, but
   9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
  10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11 *   NON INFRINGEMENT.  See the GNU General Public License for
  12 *   more details.
  13 */
  14
  15#ifndef _ASM_TILE_MMU_CONTEXT_H
  16#define _ASM_TILE_MMU_CONTEXT_H
  17
  18#include <linux/smp.h>
  19#include <linux/mm_types.h>
  20
  21#include <asm/setup.h>
  22#include <asm/page.h>
  23#include <asm/pgalloc.h>
  24#include <asm/pgtable.h>
  25#include <asm/tlbflush.h>
  26#include <asm/homecache.h>
  27#include <asm-generic/mm_hooks.h>
  28
  29static inline int
  30init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  31{
  32        return 0;
  33}
  34
  35/*
  36 * Note that arch/tile/kernel/head_NN.S and arch/tile/mm/migrate_NN.S
  37 * also call hv_install_context().
  38 */
  39static inline void __install_page_table(pgd_t *pgdir, int asid, pgprot_t prot)
  40{
  41        /* FIXME: DIRECTIO should not always be set. FIXME. */
  42        int rc = hv_install_context(__pa(pgdir), prot, asid,
  43                                    HV_CTX_DIRECTIO | CTX_PAGE_FLAG);
  44        if (rc < 0)
  45                panic("hv_install_context failed: %d", rc);
  46}
  47
  48static inline void install_page_table(pgd_t *pgdir, int asid)
  49{
  50        pte_t *ptep = virt_to_kpte((unsigned long)pgdir);
  51        __install_page_table(pgdir, asid, *ptep);
  52}
  53
  54/*
  55 * "Lazy" TLB mode is entered when we are switching to a kernel task,
  56 * which borrows the mm of the previous task.  The goal of this
  57 * optimization is to avoid having to install a new page table.  On
  58 * early x86 machines (where the concept originated) you couldn't do
  59 * anything short of a full page table install for invalidation, so
  60 * handling a remote TLB invalidate required doing a page table
  61 * re-install.  Someone clearly decided that it was silly to keep
  62 * doing this while in "lazy" TLB mode, so the optimization involves
  63 * installing the swapper page table instead the first time one
  64 * occurs, and clearing the cpu out of cpu_vm_mask, so the cpu running
  65 * the kernel task doesn't need to take any more interrupts.  At that
  66 * point it's then necessary to explicitly reinstall it when context
  67 * switching back to the original mm.
  68 *
  69 * On Tile, we have to do a page-table install whenever DMA is enabled,
  70 * so in that case lazy mode doesn't help anyway.  And more generally,
  71 * we have efficient per-page TLB shootdown, and don't expect to spend
  72 * that much time in kernel tasks in general, so just leaving the
  73 * kernel task borrowing the old page table, but handling TLB
  74 * shootdowns, is a reasonable thing to do.  And importantly, this
  75 * lets us use the hypervisor's internal APIs for TLB shootdown, which
  76 * means we don't have to worry about having TLB shootdowns blocked
  77 * when Linux is disabling interrupts; see the page migration code for
  78 * an example of where it's important for TLB shootdowns to complete
  79 * even when interrupts are disabled at the Linux level.
  80 */
  81static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *t)
  82{
  83#if CHIP_HAS_TILE_DMA()
  84        /*
  85         * We have to do an "identity" page table switch in order to
  86         * clear any pending DMA interrupts.
  87         */
  88        if (current->thread.tile_dma_state.enabled)
  89                install_page_table(mm->pgd, __this_cpu_read(current_asid));
  90#endif
  91}
  92
  93static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  94                             struct task_struct *tsk)
  95{
  96        if (likely(prev != next)) {
  97
  98                int cpu = smp_processor_id();
  99
 100                /* Pick new ASID. */
 101                int asid = __this_cpu_read(current_asid) + 1;
 102                if (asid > max_asid) {
 103                        asid = min_asid;
 104                        local_flush_tlb();
 105                }
 106                __this_cpu_write(current_asid, asid);
 107
 108                /* Clear cpu from the old mm, and set it in the new one. */
 109                cpumask_clear_cpu(cpu, mm_cpumask(prev));
 110                cpumask_set_cpu(cpu, mm_cpumask(next));
 111
 112                /* Re-load page tables */
 113                install_page_table(next->pgd, asid);
 114
 115                /* See how we should set the red/black cache info */
 116                check_mm_caching(prev, next);
 117
 118                /*
 119                 * Since we're changing to a new mm, we have to flush
 120                 * the icache in case some physical page now being mapped
 121                 * has subsequently been repurposed and has new code.
 122                 */
 123                __flush_icache();
 124
 125        }
 126}
 127
 128static inline void activate_mm(struct mm_struct *prev_mm,
 129                               struct mm_struct *next_mm)
 130{
 131        switch_mm(prev_mm, next_mm, NULL);
 132}
 133
 134#define destroy_context(mm)             do { } while (0)
 135#define deactivate_mm(tsk, mm)          do { } while (0)
 136
 137#endif /* _ASM_TILE_MMU_CONTEXT_H */
 138