linux/arch/mn10300/include/asm/system.h
<<
>>
Prefs
   1/* MN10300 System definitions
   2 *
   3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
   4 * Written by David Howells (dhowells@redhat.com)
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public Licence
   8 * as published by the Free Software Foundation; either version
   9 * 2 of the Licence, or (at your option) any later version.
  10 */
  11#ifndef _ASM_SYSTEM_H
  12#define _ASM_SYSTEM_H
  13
  14#include <asm/cpu-regs.h>
  15#include <asm/intctl-regs.h>
  16
  17#ifdef __KERNEL__
  18#ifndef __ASSEMBLY__
  19
  20#include <linux/kernel.h>
  21#include <linux/irqflags.h>
  22#include <linux/atomic.h>
  23
  24#if !defined(CONFIG_LAZY_SAVE_FPU)
  25struct fpu_state_struct;
  26extern asmlinkage void fpu_save(struct fpu_state_struct *);
  27#define switch_fpu(prev, next)                                          \
  28        do {                                                            \
  29                if ((prev)->thread.fpu_flags & THREAD_HAS_FPU) {        \
  30                        (prev)->thread.fpu_flags &= ~THREAD_HAS_FPU;    \
  31                        (prev)->thread.uregs->epsw &= ~EPSW_FE;         \
  32                        fpu_save(&(prev)->thread.fpu_state);            \
  33                }                                                       \
  34        } while (0)
  35#else
  36#define switch_fpu(prev, next) do {} while (0)
  37#endif
  38
  39struct task_struct;
  40struct thread_struct;
  41
  42extern asmlinkage
  43struct task_struct *__switch_to(struct thread_struct *prev,
  44                                struct thread_struct *next,
  45                                struct task_struct *prev_task);
  46
  47/* context switching is now performed out-of-line in switch_to.S */
  48#define switch_to(prev, next, last)                                     \
  49do {                                                                    \
  50        switch_fpu(prev, next);                                         \
  51        current->thread.wchan = (u_long) __builtin_return_address(0);   \
  52        (last) = __switch_to(&(prev)->thread, &(next)->thread, (prev)); \
  53        mb();                                                           \
  54        current->thread.wchan = 0;                                      \
  55} while (0)
  56
  57#define arch_align_stack(x) (x)
  58
  59#define nop() asm volatile ("nop")
  60
  61/*
  62 * Force strict CPU ordering.
  63 * And yes, this is required on UP too when we're talking
  64 * to devices.
  65 *
  66 * For now, "wmb()" doesn't actually do anything, as all
  67 * Intel CPU's follow what Intel calls a *Processor Order*,
  68 * in which all writes are seen in the program order even
  69 * outside the CPU.
  70 *
  71 * I expect future Intel CPU's to have a weaker ordering,
  72 * but I'd also expect them to finally get their act together
  73 * and add some real memory barriers if so.
  74 *
  75 * Some non intel clones support out of order store. wmb() ceases to be a
  76 * nop for these.
  77 */
  78
  79#define mb()    asm volatile ("": : :"memory")
  80#define rmb()   mb()
  81#define wmb()   asm volatile ("": : :"memory")
  82
  83#ifdef CONFIG_SMP
  84#define smp_mb()        mb()
  85#define smp_rmb()       rmb()
  86#define smp_wmb()       wmb()
  87#define set_mb(var, value)  do { xchg(&var, value); } while (0)
  88#else  /* CONFIG_SMP */
  89#define smp_mb()        barrier()
  90#define smp_rmb()       barrier()
  91#define smp_wmb()       barrier()
  92#define set_mb(var, value)  do { var = value;  mb(); } while (0)
  93#endif /* CONFIG_SMP */
  94
  95#define set_wmb(var, value) do { var = value; wmb(); } while (0)
  96
  97#define read_barrier_depends()          do {} while (0)
  98#define smp_read_barrier_depends()      do {} while (0)
  99
 100#endif /* !__ASSEMBLY__ */
 101#endif /* __KERNEL__ */
 102#endif /* _ASM_SYSTEM_H */
 103