linux/arch/mips/include/asm/switch_to.h
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle
   7 * Copyright (C) 1996 by Paul M. Antoine
   8 * Copyright (C) 1999 Silicon Graphics
   9 * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
  10 * Copyright (C) 2000 MIPS Technologies, Inc.
  11 */
  12#ifndef _ASM_SWITCH_TO_H
  13#define _ASM_SWITCH_TO_H
  14
  15#include <asm/cpu-features.h>
  16#include <asm/watch.h>
  17#include <asm/dsp.h>
  18#include <asm/cop2.h>
  19
  20struct task_struct;
  21
  22/*
  23 * switch_to(n) should switch tasks to task nr n, first
  24 * checking that n isn't the current task, in which case it does nothing.
  25 */
  26extern asmlinkage void *resume(void *last, void *next, void *next_ti, u32 __usedfpu);
  27
  28extern unsigned int ll_bit;
  29extern struct task_struct *ll_task;
  30
  31#ifdef CONFIG_MIPS_MT_FPAFF
  32
  33/*
  34 * Handle the scheduler resume end of FPU affinity management.  We do this
  35 * inline to try to keep the overhead down. If we have been forced to run on
  36 * a "CPU" with an FPU because of a previous high level of FP computation,
  37 * but did not actually use the FPU during the most recent time-slice (CU1
  38 * isn't set), we undo the restriction on cpus_allowed.
  39 *
  40 * We're not calling set_cpus_allowed() here, because we have no need to
  41 * force prompt migration - we're already switching the current CPU to a
  42 * different thread.
  43 */
  44
  45#define __mips_mt_fpaff_switch_to(prev)                                 \
  46do {                                                                    \
  47        struct thread_info *__prev_ti = task_thread_info(prev);         \
  48                                                                        \
  49        if (cpu_has_fpu &&                                              \
  50            test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) &&             \
  51            (!(KSTK_STATUS(prev) & ST0_CU1))) {                         \
  52                clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND);          \
  53                prev->cpus_allowed = prev->thread.user_cpus_allowed;    \
  54        }                                                               \
  55        next->thread.emulated_fp = 0;                                   \
  56} while(0)
  57
  58#else
  59#define __mips_mt_fpaff_switch_to(prev) do { (void) (prev); } while (0)
  60#endif
  61
  62#define __clear_software_ll_bit()                                       \
  63do {                                                                    \
  64        if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc)       \
  65                ll_bit = 0;                                             \
  66} while (0)
  67
  68#define switch_to(prev, next, last)                                     \
  69do {                                                                    \
  70        u32 __usedfpu, __c0_stat;                                       \
  71        __mips_mt_fpaff_switch_to(prev);                                \
  72        if (cpu_has_dsp)                                                \
  73                __save_dsp(prev);                                       \
  74        if (cop2_present && (KSTK_STATUS(prev) & ST0_CU2)) {            \
  75                if (cop2_lazy_restore)                                  \
  76                        KSTK_STATUS(prev) &= ~ST0_CU2;                  \
  77                __c0_stat = read_c0_status();                           \
  78                write_c0_status(__c0_stat | ST0_CU2);                   \
  79                cop2_save(&prev->thread.cp2);                           \
  80                write_c0_status(__c0_stat & ~ST0_CU2);                  \
  81        }                                                               \
  82        __clear_software_ll_bit();                                      \
  83        __usedfpu = test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU);  \
  84        (last) = resume(prev, next, task_thread_info(next), __usedfpu); \
  85} while (0)
  86
  87#define finish_arch_switch(prev)                                        \
  88do {                                                                    \
  89        u32 __c0_stat;                                                  \
  90        if (cop2_present && !cop2_lazy_restore &&                       \
  91                        (KSTK_STATUS(current) & ST0_CU2)) {             \
  92                __c0_stat = read_c0_status();                           \
  93                write_c0_status(__c0_stat | ST0_CU2);                   \
  94                cop2_restore(&current->thread.cp2);                     \
  95                write_c0_status(__c0_stat & ~ST0_CU2);                  \
  96        }                                                               \
  97        if (cpu_has_dsp)                                                \
  98                __restore_dsp(current);                                 \
  99        if (cpu_has_userlocal)                                          \
 100                write_c0_userlocal(current_thread_info()->tp_value);    \
 101        __restore_watch();                                              \
 102} while (0)
 103
 104#endif /* _ASM_SWITCH_TO_H */
 105