linux/include/asm-v850/system.h
<<
>>
Prefs
   1/*
   2 * include/asm-v850/system.h -- Low-level interrupt/thread ops
   3 *
   4 *  Copyright (C) 2001,02,03  NEC Electronics Corporation
   5 *  Copyright (C) 2001,02,03  Miles Bader <miles@gnu.org>
   6 *
   7 * This file is subject to the terms and conditions of the GNU General
   8 * Public License.  See the file COPYING in the main directory of this
   9 * archive for more details.
  10 *
  11 * Written by Miles Bader <miles@gnu.org>
  12 */
  13
  14#ifndef __V850_SYSTEM_H__
  15#define __V850_SYSTEM_H__
  16
  17#include <linux/linkage.h>
  18#include <asm/ptrace.h>
  19
  20
  21/*
  22 * switch_to(n) should switch tasks to task ptr, first checking that
  23 * ptr isn't the current task, in which case it does nothing.
  24 */
  25struct thread_struct;
  26extern void *switch_thread (struct thread_struct *last,
  27                            struct thread_struct *next);
  28#define switch_to(prev,next,last)                                             \
  29  do {                                                                        \
  30        if (prev != next) {                                                   \
  31                (last) = switch_thread (&prev->thread, &next->thread);        \
  32        }                                                                     \
  33  } while (0)
  34
  35
  36/* Enable/disable interrupts.  */
  37#define local_irq_enable()      __asm__ __volatile__ ("ei")
  38#define local_irq_disable()     __asm__ __volatile__ ("di")
  39
  40#define local_save_flags(flags) \
  41  __asm__ __volatile__ ("stsr %1, %0" : "=r" (flags) : "i" (SR_PSW))
  42#define local_restore_flags(flags) \
  43  __asm__ __volatile__ ("ldsr %0, %1" :: "r" (flags), "i" (SR_PSW))
  44
  45/* For spinlocks etc */
  46#define local_irq_save(flags) \
  47  do { local_save_flags (flags); local_irq_disable (); } while (0) 
  48#define local_irq_restore(flags) \
  49  local_restore_flags (flags);
  50
  51
  52static inline int irqs_disabled (void)
  53{
  54        unsigned flags;
  55        local_save_flags (flags);
  56        return !!(flags & 0x20);
  57}
  58
  59
  60/*
  61 * Force strict CPU ordering.
  62 * Not really required on v850...
  63 */
  64#define nop()                   __asm__ __volatile__ ("nop")
  65#define mb()                    __asm__ __volatile__ ("" ::: "memory")
  66#define rmb()                   mb ()
  67#define wmb()                   mb ()
  68#define read_barrier_depends()  ((void)0)
  69#define set_mb(var, value)      do { xchg (&var, value); } while (0)
  70
  71#define smp_mb()        mb ()
  72#define smp_rmb()       rmb ()
  73#define smp_wmb()       wmb ()
  74#define smp_read_barrier_depends()      read_barrier_depends()
  75
  76#define xchg(ptr, with) \
  77  ((__typeof__ (*(ptr)))__xchg ((unsigned long)(with), (ptr), sizeof (*(ptr))))
  78
  79static inline unsigned long __xchg (unsigned long with,
  80                                    __volatile__ void *ptr, int size)
  81{
  82        unsigned long tmp, flags;
  83
  84        local_irq_save (flags);
  85
  86        switch (size) {
  87        case 1:
  88                tmp = *(unsigned char *)ptr;
  89                *(unsigned char *)ptr = with;
  90                break;
  91        case 2:
  92                tmp = *(unsigned short *)ptr;
  93                *(unsigned short *)ptr = with;
  94                break;
  95        case 4:
  96                tmp = *(unsigned long *)ptr;
  97                *(unsigned long *)ptr = with;
  98                break;
  99        }
 100
 101        local_irq_restore (flags);
 102
 103        return tmp;
 104}
 105
 106#define arch_align_stack(x) (x)
 107
 108#endif /* __V850_SYSTEM_H__ */
 109