linux/mm/mmu_context.c
<<
>>
Prefs
   1/* Copyright (C) 2009 Red Hat, Inc.
   2 *
   3 * See ../COPYING for licensing terms.
   4 */
   5
   6#include <linux/mm.h>
   7#include <linux/mmu_context.h>
   8#include <linux/module.h>
   9#include <linux/sched.h>
  10
  11#include <asm/mmu_context.h>
  12
  13/*
  14 * use_mm
  15 *      Makes the calling kernel thread take on the specified
  16 *      mm context.
  17 *      Called by the retry thread execute retries within the
  18 *      iocb issuer's mm context, so that copy_from/to_user
  19 *      operations work seamlessly for aio.
  20 *      (Note: this routine is intended to be called only
  21 *      from a kernel thread context)
  22 */
  23void use_mm(struct mm_struct *mm)
  24{
  25        struct mm_struct *active_mm;
  26        struct task_struct *tsk = current;
  27
  28        task_lock(tsk);
  29        active_mm = tsk->active_mm;
  30        if (active_mm != mm) {
  31                atomic_inc(&mm->mm_count);
  32                tsk->active_mm = mm;
  33        }
  34        tsk->mm = mm;
  35        switch_mm(active_mm, mm, tsk);
  36        task_unlock(tsk);
  37
  38        if (active_mm != mm)
  39                mmdrop(active_mm);
  40}
  41EXPORT_SYMBOL_GPL(use_mm);
  42
  43/*
  44 * unuse_mm
  45 *      Reverses the effect of use_mm, i.e. releases the
  46 *      specified mm context which was earlier taken on
  47 *      by the calling kernel thread
  48 *      (Note: this routine is intended to be called only
  49 *      from a kernel thread context)
  50 */
  51void unuse_mm(struct mm_struct *mm)
  52{
  53        struct task_struct *tsk = current;
  54
  55        task_lock(tsk);
  56        sync_mm_rss(tsk, mm);
  57        tsk->mm = NULL;
  58        /* active_mm is still 'mm' */
  59        enter_lazy_tlb(mm, tsk);
  60        task_unlock(tsk);
  61}
  62EXPORT_SYMBOL_GPL(unuse_mm);
  63