linux/mm/mmu_context.c
<<
>>
Prefs
   1/* Copyright (C) 2009 Red Hat, Inc.
   2 *
   3 * See ../COPYING for licensing terms.
   4 */
   5
   6#include <linux/mm.h>
   7#include <linux/mmu_context.h>
   8#include <linux/export.h>
   9#include <linux/sched.h>
  10
  11#include <asm/mmu_context.h>
  12
  13/*
  14 * use_mm
  15 *      Makes the calling kernel thread take on the specified
  16 *      mm context.
  17 *      (Note: this routine is intended to be called only
  18 *      from a kernel thread context)
  19 */
  20void use_mm(struct mm_struct *mm)
  21{
  22        struct mm_struct *active_mm;
  23        struct task_struct *tsk = current;
  24
  25        task_lock(tsk);
  26        active_mm = tsk->active_mm;
  27        if (active_mm != mm) {
  28                atomic_inc(&mm->mm_count);
  29                tsk->active_mm = mm;
  30        }
  31        tsk->mm = mm;
  32        switch_mm(active_mm, mm, tsk);
  33        task_unlock(tsk);
  34#ifdef finish_arch_post_lock_switch
  35        finish_arch_post_lock_switch();
  36#endif
  37
  38        if (active_mm != mm)
  39                mmdrop(active_mm);
  40}
  41EXPORT_SYMBOL_GPL(use_mm);
  42
  43/*
  44 * unuse_mm
  45 *      Reverses the effect of use_mm, i.e. releases the
  46 *      specified mm context which was earlier taken on
  47 *      by the calling kernel thread
  48 *      (Note: this routine is intended to be called only
  49 *      from a kernel thread context)
  50 */
  51void unuse_mm(struct mm_struct *mm)
  52{
  53        struct task_struct *tsk = current;
  54
  55        task_lock(tsk);
  56        sync_mm_rss(mm);
  57        tsk->mm = NULL;
  58        /* active_mm is still 'mm' */
  59        enter_lazy_tlb(mm, tsk);
  60        task_unlock(tsk);
  61}
  62EXPORT_SYMBOL_GPL(unuse_mm);
  63