linux/arch/m68k/include/asm/system_no.h
<<
>>
Prefs
   1#ifndef _M68KNOMMU_SYSTEM_H
   2#define _M68KNOMMU_SYSTEM_H
   3
   4#include <linux/linkage.h>
   5#include <linux/irqflags.h>
   6#include <asm/segment.h>
   7#include <asm/entry.h>
   8
   9/*
  10 * switch_to(n) should switch tasks to task ptr, first checking that
  11 * ptr isn't the current task, in which case it does nothing.  This
  12 * also clears the TS-flag if the task we switched to has used the
  13 * math co-processor latest.
  14 */
  15/*
  16 * switch_to() saves the extra registers, that are not saved
  17 * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
  18 * a0-a1. Some of these are used by schedule() and its predecessors
  19 * and so we might get see unexpected behaviors when a task returns
  20 * with unexpected register values.
  21 *
  22 * syscall stores these registers itself and none of them are used
  23 * by syscall after the function in the syscall has been called.
  24 *
  25 * Beware that resume now expects *next to be in d1 and the offset of
  26 * tss to be in a1. This saves a few instructions as we no longer have
  27 * to push them onto the stack and read them back right after.
  28 *
  29 * 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
  30 *
  31 * Changed 96/09/19 by Andreas Schwab
  32 * pass prev in a0, next in a1, offset of tss in d1, and whether
  33 * the mm structures are shared in d2 (to avoid atc flushing).
  34 */
  35asmlinkage void resume(void);
  36#define switch_to(prev,next,last)                               \
  37{                                                               \
  38  void *_last;                                                  \
  39  __asm__ __volatile__(                                         \
  40        "movel  %1, %%a0\n\t"                                   \
  41        "movel  %2, %%a1\n\t"                                   \
  42        "jbsr resume\n\t"                                       \
  43        "movel  %%d1, %0\n\t"                                   \
  44       : "=d" (_last)                                           \
  45       : "d" (prev), "d" (next)                                 \
  46       : "cc", "d0", "d1", "d2", "d3", "d4", "d5", "a0", "a1"); \
  47  (last) = _last;                                               \
  48}
  49
  50#define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc")
  51
  52/*
  53 * Force strict CPU ordering.
  54 * Not really required on m68k...
  55 */
  56#define nop()  asm volatile ("nop"::)
  57#define mb()   asm volatile (""   : : :"memory")
  58#define rmb()  asm volatile (""   : : :"memory")
  59#define wmb()  asm volatile (""   : : :"memory")
  60#define set_mb(var, value)      ({ (var) = (value); wmb(); })
  61
  62#define smp_mb()        barrier()
  63#define smp_rmb()       barrier()
  64#define smp_wmb()       barrier()
  65#define smp_read_barrier_depends()      do { } while(0)
  66
  67#define read_barrier_depends()  ((void)0)
  68
  69#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
  70
  71struct __xchg_dummy { unsigned long a[100]; };
  72#define __xg(x) ((volatile struct __xchg_dummy *)(x))
  73
  74#ifndef CONFIG_RMW_INSNS
  75static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
  76{
  77  unsigned long tmp, flags;
  78
  79  local_irq_save(flags);
  80
  81  switch (size) {
  82  case 1:
  83    __asm__ __volatile__
  84    ("moveb %2,%0\n\t"
  85     "moveb %1,%2"
  86    : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
  87    break;
  88  case 2:
  89    __asm__ __volatile__
  90    ("movew %2,%0\n\t"
  91     "movew %1,%2"
  92    : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
  93    break;
  94  case 4:
  95    __asm__ __volatile__
  96    ("movel %2,%0\n\t"
  97     "movel %1,%2"
  98    : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
  99    break;
 100  }
 101  local_irq_restore(flags);
 102  return tmp;
 103}
 104#else
 105static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
 106{
 107        switch (size) {
 108            case 1:
 109                __asm__ __volatile__
 110                        ("moveb %2,%0\n\t"
 111                         "1:\n\t"
 112                         "casb %0,%1,%2\n\t"
 113                         "jne 1b"
 114                         : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
 115                break;
 116            case 2:
 117                __asm__ __volatile__
 118                        ("movew %2,%0\n\t"
 119                         "1:\n\t"
 120                         "casw %0,%1,%2\n\t"
 121                         "jne 1b"
 122                         : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
 123                break;
 124            case 4:
 125                __asm__ __volatile__
 126                        ("movel %2,%0\n\t"
 127                         "1:\n\t"
 128                         "casl %0,%1,%2\n\t"
 129                         "jne 1b"
 130                         : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
 131                break;
 132        }
 133        return x;
 134}
 135#endif
 136
 137#include <asm-generic/cmpxchg-local.h>
 138
 139/*
 140 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
 141 * them available.
 142 */
 143#define cmpxchg_local(ptr, o, n)                                               \
 144        ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
 145                        (unsigned long)(n), sizeof(*(ptr))))
 146#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
 147
 148#include <asm-generic/cmpxchg.h>
 149
 150#define arch_align_stack(x) (x)
 151
 152
 153#endif /* _M68KNOMMU_SYSTEM_H */
 154