linux/arch/m32r/include/asm/system.h
<<
>>
Prefs
   1#ifndef _ASM_M32R_SYSTEM_H
   2#define _ASM_M32R_SYSTEM_H
   3
   4/*
   5 * This file is subject to the terms and conditions of the GNU General Public
   6 * License.  See the file "COPYING" in the main directory of this archive
   7 * for more details.
   8 *
   9 * Copyright (C) 2001  Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto
  10 * Copyright (C) 2004, 2006  Hirokazu Takata <takata at linux-m32r.org>
  11 */
  12
  13#include <linux/compiler.h>
  14#include <linux/irqflags.h>
  15#include <asm/assembler.h>
  16
  17#ifdef __KERNEL__
  18
  19/*
  20 * switch_to(prev, next) should switch from task `prev' to `next'
  21 * `prev' will never be the same as `next'.
  22 *
  23 * `next' and `prev' should be struct task_struct, but it isn't always defined
  24 */
  25
  26#if defined(CONFIG_FRAME_POINTER) || \
  27        !defined(CONFIG_SCHED_OMIT_FRAME_POINTER)
  28#define M32R_PUSH_FP "  push fp\n"
  29#define M32R_POP_FP  "  pop  fp\n"
  30#else
  31#define M32R_PUSH_FP ""
  32#define M32R_POP_FP  ""
  33#endif
  34
  35#define switch_to(prev, next, last)  do { \
  36        __asm__ __volatile__ ( \
  37                "       seth    lr, #high(1f)                           \n" \
  38                "       or3     lr, lr, #low(1f)                        \n" \
  39                "       st      lr, @%4  ; store old LR                 \n" \
  40                "       ld      lr, @%5  ; load new LR                  \n" \
  41                        M32R_PUSH_FP \
  42                "       st      sp, @%2  ; store old SP                 \n" \
  43                "       ld      sp, @%3  ; load new SP                  \n" \
  44                "       push    %1  ; store `prev' on new stack         \n" \
  45                "       jmp     lr                                      \n" \
  46                "       .fillinsn                                       \n" \
  47                "1:                                                     \n" \
  48                "       pop     %0  ; restore `__last' from new stack   \n" \
  49                        M32R_POP_FP \
  50                : "=r" (last) \
  51                : "0" (prev), \
  52                  "r" (&(prev->thread.sp)), "r" (&(next->thread.sp)), \
  53                  "r" (&(prev->thread.lr)), "r" (&(next->thread.lr)) \
  54                : "memory", "lr" \
  55        ); \
  56} while(0)
  57
  58#define nop()   __asm__ __volatile__ ("nop" : : )
  59
  60#define xchg(ptr, x)                                                    \
  61        ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
  62#define xchg_local(ptr, x)                                              \
  63        ((__typeof__(*(ptr)))__xchg_local((unsigned long)(x), (ptr),    \
  64                        sizeof(*(ptr))))
  65
  66extern void  __xchg_called_with_bad_pointer(void);
  67
  68#ifdef CONFIG_CHIP_M32700_TS1
  69#define DCACHE_CLEAR(reg0, reg1, addr)                          \
  70        "seth   "reg1", #high(dcache_dummy);            \n\t"   \
  71        "or3    "reg1", "reg1", #low(dcache_dummy);     \n\t"   \
  72        "lock   "reg0", @"reg1";                        \n\t"   \
  73        "add3   "reg0", "addr", #0x1000;                \n\t"   \
  74        "ld     "reg0", @"reg0";                        \n\t"   \
  75        "add3   "reg0", "addr", #0x2000;                \n\t"   \
  76        "ld     "reg0", @"reg0";                        \n\t"   \
  77        "unlock "reg0", @"reg1";                        \n\t"
  78        /* FIXME: This workaround code cannot handle kernel modules
  79         * correctly under SMP environment.
  80         */
  81#else   /* CONFIG_CHIP_M32700_TS1 */
  82#define DCACHE_CLEAR(reg0, reg1, addr)
  83#endif  /* CONFIG_CHIP_M32700_TS1 */
  84
  85static __always_inline unsigned long
  86__xchg(unsigned long x, volatile void *ptr, int size)
  87{
  88        unsigned long flags;
  89        unsigned long tmp = 0;
  90
  91        local_irq_save(flags);
  92
  93        switch (size) {
  94#ifndef CONFIG_SMP
  95        case 1:
  96                __asm__ __volatile__ (
  97                        "ldb    %0, @%2 \n\t"
  98                        "stb    %1, @%2 \n\t"
  99                        : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
 100                break;
 101        case 2:
 102                __asm__ __volatile__ (
 103                        "ldh    %0, @%2 \n\t"
 104                        "sth    %1, @%2 \n\t"
 105                        : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
 106                break;
 107        case 4:
 108                __asm__ __volatile__ (
 109                        "ld     %0, @%2 \n\t"
 110                        "st     %1, @%2 \n\t"
 111                        : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
 112                break;
 113#else  /* CONFIG_SMP */
 114        case 4:
 115                __asm__ __volatile__ (
 116                        DCACHE_CLEAR("%0", "r4", "%2")
 117                        "lock   %0, @%2;        \n\t"
 118                        "unlock %1, @%2;        \n\t"
 119                        : "=&r" (tmp) : "r" (x), "r" (ptr)
 120                        : "memory"
 121#ifdef CONFIG_CHIP_M32700_TS1
 122                        , "r4"
 123#endif  /* CONFIG_CHIP_M32700_TS1 */
 124                );
 125                break;
 126#endif  /* CONFIG_SMP */
 127        default:
 128                __xchg_called_with_bad_pointer();
 129        }
 130
 131        local_irq_restore(flags);
 132
 133        return (tmp);
 134}
 135
 136static __always_inline unsigned long
 137__xchg_local(unsigned long x, volatile void *ptr, int size)
 138{
 139        unsigned long flags;
 140        unsigned long tmp = 0;
 141
 142        local_irq_save(flags);
 143
 144        switch (size) {
 145        case 1:
 146                __asm__ __volatile__ (
 147                        "ldb    %0, @%2 \n\t"
 148                        "stb    %1, @%2 \n\t"
 149                        : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
 150                break;
 151        case 2:
 152                __asm__ __volatile__ (
 153                        "ldh    %0, @%2 \n\t"
 154                        "sth    %1, @%2 \n\t"
 155                        : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
 156                break;
 157        case 4:
 158                __asm__ __volatile__ (
 159                        "ld     %0, @%2 \n\t"
 160                        "st     %1, @%2 \n\t"
 161                        : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
 162                break;
 163        default:
 164                __xchg_called_with_bad_pointer();
 165        }
 166
 167        local_irq_restore(flags);
 168
 169        return (tmp);
 170}
 171
 172#define __HAVE_ARCH_CMPXCHG     1
 173
 174static inline unsigned long
 175__cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new)
 176{
 177        unsigned long flags;
 178        unsigned int retval;
 179
 180        local_irq_save(flags);
 181        __asm__ __volatile__ (
 182                        DCACHE_CLEAR("%0", "r4", "%1")
 183                        M32R_LOCK" %0, @%1;     \n"
 184                "       bne     %0, %2, 1f;     \n"
 185                        M32R_UNLOCK" %3, @%1;   \n"
 186                "       bra     2f;             \n"
 187                "       .fillinsn               \n"
 188                "1:"
 189                        M32R_UNLOCK" %0, @%1;   \n"
 190                "       .fillinsn               \n"
 191                "2:"
 192                        : "=&r" (retval)
 193                        : "r" (p), "r" (old), "r" (new)
 194                        : "cbit", "memory"
 195#ifdef CONFIG_CHIP_M32700_TS1
 196                        , "r4"
 197#endif  /* CONFIG_CHIP_M32700_TS1 */
 198                );
 199        local_irq_restore(flags);
 200
 201        return retval;
 202}
 203
 204static inline unsigned long
 205__cmpxchg_local_u32(volatile unsigned int *p, unsigned int old,
 206                        unsigned int new)
 207{
 208        unsigned long flags;
 209        unsigned int retval;
 210
 211        local_irq_save(flags);
 212        __asm__ __volatile__ (
 213                        DCACHE_CLEAR("%0", "r4", "%1")
 214                        "ld %0, @%1;            \n"
 215                "       bne     %0, %2, 1f;     \n"
 216                        "st %3, @%1;            \n"
 217                "       bra     2f;             \n"
 218                "       .fillinsn               \n"
 219                "1:"
 220                        "st %0, @%1;            \n"
 221                "       .fillinsn               \n"
 222                "2:"
 223                        : "=&r" (retval)
 224                        : "r" (p), "r" (old), "r" (new)
 225                        : "cbit", "memory"
 226#ifdef CONFIG_CHIP_M32700_TS1
 227                        , "r4"
 228#endif  /* CONFIG_CHIP_M32700_TS1 */
 229                );
 230        local_irq_restore(flags);
 231
 232        return retval;
 233}
 234
 235/* This function doesn't exist, so you'll get a linker error
 236   if something tries to do an invalid cmpxchg().  */
 237extern void __cmpxchg_called_with_bad_pointer(void);
 238
 239static inline unsigned long
 240__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
 241{
 242        switch (size) {
 243        case 4:
 244                return __cmpxchg_u32(ptr, old, new);
 245#if 0   /* we don't have __cmpxchg_u64 */
 246        case 8:
 247                return __cmpxchg_u64(ptr, old, new);
 248#endif /* 0 */
 249        }
 250        __cmpxchg_called_with_bad_pointer();
 251        return old;
 252}
 253
 254#define cmpxchg(ptr, o, n)                                               \
 255        ((__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)(o),       \
 256                        (unsigned long)(n), sizeof(*(ptr))))
 257
 258#include <asm-generic/cmpxchg-local.h>
 259
 260static inline unsigned long __cmpxchg_local(volatile void *ptr,
 261                                      unsigned long old,
 262                                      unsigned long new, int size)
 263{
 264        switch (size) {
 265        case 4:
 266                return __cmpxchg_local_u32(ptr, old, new);
 267        default:
 268                return __cmpxchg_local_generic(ptr, old, new, size);
 269        }
 270
 271        return old;
 272}
 273
 274/*
 275 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
 276 * them available.
 277 */
 278#define cmpxchg_local(ptr, o, n)                                            \
 279        ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o),     \
 280                        (unsigned long)(n), sizeof(*(ptr))))
 281#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
 282
 283#endif  /* __KERNEL__ */
 284
 285/*
 286 * Memory barrier.
 287 *
 288 * mb() prevents loads and stores being reordered across this point.
 289 * rmb() prevents loads being reordered across this point.
 290 * wmb() prevents stores being reordered across this point.
 291 */
 292#define mb()   barrier()
 293#define rmb()  mb()
 294#define wmb()  mb()
 295
 296/**
 297 * read_barrier_depends - Flush all pending reads that subsequents reads
 298 * depend on.
 299 *
 300 * No data-dependent reads from memory-like regions are ever reordered
 301 * over this barrier.  All reads preceding this primitive are guaranteed
 302 * to access memory (but not necessarily other CPUs' caches) before any
 303 * reads following this primitive that depend on the data return by
 304 * any of the preceding reads.  This primitive is much lighter weight than
 305 * rmb() on most CPUs, and is never heavier weight than is
 306 * rmb().
 307 *
 308 * These ordering constraints are respected by both the local CPU
 309 * and the compiler.
 310 *
 311 * Ordering is not guaranteed by anything other than these primitives,
 312 * not even by data dependencies.  See the documentation for
 313 * memory_barrier() for examples and URLs to more information.
 314 *
 315 * For example, the following code would force ordering (the initial
 316 * value of "a" is zero, "b" is one, and "p" is "&a"):
 317 *
 318 * <programlisting>
 319 *      CPU 0                           CPU 1
 320 *
 321 *      b = 2;
 322 *      memory_barrier();
 323 *      p = &b;                         q = p;
 324 *                                      read_barrier_depends();
 325 *                                      d = *q;
 326 * </programlisting>
 327 *
 328 *
 329 * because the read of "*q" depends on the read of "p" and these
 330 * two reads are separated by a read_barrier_depends().  However,
 331 * the following code, with the same initial values for "a" and "b":
 332 *
 333 * <programlisting>
 334 *      CPU 0                           CPU 1
 335 *
 336 *      a = 2;
 337 *      memory_barrier();
 338 *      b = 3;                          y = b;
 339 *                                      read_barrier_depends();
 340 *                                      x = a;
 341 * </programlisting>
 342 *
 343 * does not enforce ordering, since there is no data dependency between
 344 * the read of "a" and the read of "b".  Therefore, on some CPUs, such
 345 * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
 346 * in cases like this where there are no data dependencies.
 347 **/
 348
 349#define read_barrier_depends()  do { } while (0)
 350
 351#ifdef CONFIG_SMP
 352#define smp_mb()        mb()
 353#define smp_rmb()       rmb()
 354#define smp_wmb()       wmb()
 355#define smp_read_barrier_depends()      read_barrier_depends()
 356#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
 357#else
 358#define smp_mb()        barrier()
 359#define smp_rmb()       barrier()
 360#define smp_wmb()       barrier()
 361#define smp_read_barrier_depends()      do { } while (0)
 362#define set_mb(var, value) do { var = value; barrier(); } while (0)
 363#endif
 364
 365#define arch_align_stack(x) (x)
 366
 367#endif /* _ASM_M32R_SYSTEM_H */
 368