linux/arch/h8300/include/asm/system.h
<<
>>
Prefs
   1#ifndef _H8300_SYSTEM_H
   2#define _H8300_SYSTEM_H
   3
   4#include <linux/linkage.h>
   5#include <linux/irqflags.h>
   6
   7struct pt_regs;
   8
   9/*
  10 * switch_to(n) should switch tasks to task ptr, first checking that
  11 * ptr isn't the current task, in which case it does nothing.  This
  12 * also clears the TS-flag if the task we switched to has used the
  13 * math co-processor latest.
  14 */
  15/*
  16 * switch_to() saves the extra registers, that are not saved
  17 * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
  18 * a0-a1. Some of these are used by schedule() and its predecessors
  19 * and so we might get see unexpected behaviors when a task returns
  20 * with unexpected register values.
  21 *
  22 * syscall stores these registers itself and none of them are used
  23 * by syscall after the function in the syscall has been called.
  24 *
  25 * Beware that resume now expects *next to be in d1 and the offset of
  26 * tss to be in a1. This saves a few instructions as we no longer have
  27 * to push them onto the stack and read them back right after.
  28 *
  29 * 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
  30 *
  31 * Changed 96/09/19 by Andreas Schwab
  32 * pass prev in a0, next in a1, offset of tss in d1, and whether
  33 * the mm structures are shared in d2 (to avoid atc flushing).
  34 *
  35 * H8/300 Porting 2002/09/04 Yoshinori Sato
  36 */
  37
  38asmlinkage void resume(void);
  39#define switch_to(prev,next,last) {                         \
  40  void *_last;                                              \
  41  __asm__ __volatile__(                                     \
  42                        "mov.l  %1, er0\n\t"                \
  43                        "mov.l  %2, er1\n\t"                \
  44                        "mov.l  %3, er2\n\t"                \
  45                        "jsr @_resume\n\t"                  \
  46                        "mov.l  er2,%0\n\t"                 \
  47                       : "=r" (_last)                       \
  48                       : "r" (&(prev->thread)),             \
  49                         "r" (&(next->thread)),             \
  50                         "g" (prev)                         \
  51                       : "cc", "er0", "er1", "er2", "er3"); \
  52  (last) = _last;                                           \
  53}
  54
  55#define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc")
  56
  57/*
  58 * Force strict CPU ordering.
  59 * Not really required on H8...
  60 */
  61#define nop()  asm volatile ("nop"::)
  62#define mb()   asm volatile (""   : : :"memory")
  63#define rmb()  asm volatile (""   : : :"memory")
  64#define wmb()  asm volatile (""   : : :"memory")
  65#define set_mb(var, value) do { xchg(&var, value); } while (0)
  66
  67#ifdef CONFIG_SMP
  68#define smp_mb()        mb()
  69#define smp_rmb()       rmb()
  70#define smp_wmb()       wmb()
  71#define smp_read_barrier_depends()      read_barrier_depends()
  72#else
  73#define smp_mb()        barrier()
  74#define smp_rmb()       barrier()
  75#define smp_wmb()       barrier()
  76#define smp_read_barrier_depends()      do { } while(0)
  77#endif
  78
  79#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
  80
  81struct __xchg_dummy { unsigned long a[100]; };
  82#define __xg(x) ((volatile struct __xchg_dummy *)(x))
  83
  84static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
  85{
  86  unsigned long tmp, flags;
  87
  88  local_irq_save(flags);
  89
  90  switch (size) {
  91  case 1:
  92    __asm__ __volatile__
  93    ("mov.b %2,%0\n\t"
  94     "mov.b %1,%2"
  95    : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory");
  96    break;
  97  case 2:
  98    __asm__ __volatile__
  99    ("mov.w %2,%0\n\t"
 100     "mov.w %1,%2"
 101    : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory");
 102    break;
 103  case 4:
 104    __asm__ __volatile__
 105    ("mov.l %2,%0\n\t"
 106     "mov.l %1,%2"
 107    : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory");
 108    break;
 109  default:
 110    tmp = 0;      
 111  }
 112  local_irq_restore(flags);
 113  return tmp;
 114}
 115
 116#define HARD_RESET_NOW() ({             \
 117        local_irq_disable();            \
 118        asm("jmp @@0");                 \
 119})
 120
 121#include <asm-generic/cmpxchg-local.h>
 122
 123/*
 124 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
 125 * them available.
 126 */
 127#define cmpxchg_local(ptr, o, n)                                               \
 128        ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
 129                        (unsigned long)(n), sizeof(*(ptr))))
 130#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
 131
 132#ifndef CONFIG_SMP
 133#include <asm-generic/cmpxchg.h>
 134#endif
 135
 136#define arch_align_stack(x) (x)
 137
 138extern void die(const char *str, struct pt_regs *fp, unsigned long err);
 139
 140#endif /* _H8300_SYSTEM_H */
 141