linux/mm/mmu_context.c
<<
>>
Prefs
   1/* Copyright (C) 2009 Red Hat, Inc.
   2 *
   3 * See ../COPYING for licensing terms.
   4 */
   5
   6#include <linux/mm.h>
   7#include <linux/sched.h>
   8#include <linux/sched/mm.h>
   9#include <linux/sched/task.h>
  10#include <linux/mmu_context.h>
  11#include <linux/export.h>
  12
  13#include <asm/mmu_context.h>
  14
  15/*
  16 * use_mm
  17 *      Makes the calling kernel thread take on the specified
  18 *      mm context.
  19 *      (Note: this routine is intended to be called only
  20 *      from a kernel thread context)
  21 */
  22void use_mm(struct mm_struct *mm)
  23{
  24        struct mm_struct *active_mm;
  25        struct task_struct *tsk = current;
  26
  27        task_lock(tsk);
  28        active_mm = tsk->active_mm;
  29        if (active_mm != mm) {
  30                mmgrab(mm);
  31                tsk->active_mm = mm;
  32        }
  33        tsk->mm = mm;
  34        switch_mm(active_mm, mm, tsk);
  35        task_unlock(tsk);
  36#ifdef finish_arch_post_lock_switch
  37        finish_arch_post_lock_switch();
  38#endif
  39
  40        if (active_mm != mm)
  41                mmdrop(active_mm);
  42}
  43EXPORT_SYMBOL_GPL(use_mm);
  44
  45/*
  46 * unuse_mm
  47 *      Reverses the effect of use_mm, i.e. releases the
  48 *      specified mm context which was earlier taken on
  49 *      by the calling kernel thread
  50 *      (Note: this routine is intended to be called only
  51 *      from a kernel thread context)
  52 */
  53void unuse_mm(struct mm_struct *mm)
  54{
  55        struct task_struct *tsk = current;
  56
  57        task_lock(tsk);
  58        sync_mm_rss(mm);
  59        tsk->mm = NULL;
  60        /* active_mm is still 'mm' */
  61        enter_lazy_tlb(mm, tsk);
  62        task_unlock(tsk);
  63}
  64EXPORT_SYMBOL_GPL(unuse_mm);
  65