uboot/arch/mips/include/asm/system.h
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1994 - 1999 by Ralf Baechle
   7 * Copyright (C) 1996 by Paul M. Antoine
   8 * Copyright (C) 1994 - 1999 by Ralf Baechle
   9 *
  10 * Changed set_except_vector declaration to allow return of previous
  11 * vector address value - necessary for "borrowing" vectors.
  12 *
  13 * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
  14 * Copyright (C) 2000 MIPS Technologies, Inc.
  15 */
  16#ifndef _ASM_SYSTEM_H
  17#define _ASM_SYSTEM_H
  18
  19#include <linux/config.h>
  20#include <asm/sgidefs.h>
  21#include <asm/ptrace.h>
  22#if 0
  23#include <linux/kernel.h>
  24#endif
  25
  26extern __inline__ void
  27__sti(void)
  28{
  29        __asm__ __volatile__(
  30                ".set\tpush\n\t"
  31                ".set\treorder\n\t"
  32                ".set\tnoat\n\t"
  33                "mfc0\t$1,$12\n\t"
  34                "ori\t$1,0x1f\n\t"
  35                "xori\t$1,0x1e\n\t"
  36                "mtc0\t$1,$12\n\t"
  37                ".set\tpop\n\t"
  38                : /* no outputs */
  39                : /* no inputs */
  40                : "$1", "memory");
  41}
  42
  43/*
  44 * For cli() we have to insert nops to make shure that the new value
  45 * has actually arrived in the status register before the end of this
  46 * macro.
  47 * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
  48 * no nops at all.
  49 */
  50extern __inline__ void
  51__cli(void)
  52{
  53        __asm__ __volatile__(
  54                ".set\tpush\n\t"
  55                ".set\treorder\n\t"
  56                ".set\tnoat\n\t"
  57                "mfc0\t$1,$12\n\t"
  58                "ori\t$1,1\n\t"
  59                "xori\t$1,1\n\t"
  60                ".set\tnoreorder\n\t"
  61                "mtc0\t$1,$12\n\t"
  62                "nop\n\t"
  63                "nop\n\t"
  64                "nop\n\t"
  65                ".set\tpop\n\t"
  66                : /* no outputs */
  67                : /* no inputs */
  68                : "$1", "memory");
  69}
  70
  71#define __save_flags(x)                                                 \
  72__asm__ __volatile__(                                                   \
  73        ".set\tpush\n\t"                                                \
  74        ".set\treorder\n\t"                                             \
  75        "mfc0\t%0,$12\n\t"                                              \
  76        ".set\tpop\n\t"                                                 \
  77        : "=r" (x))
  78
  79#define __save_and_cli(x)                                               \
  80__asm__ __volatile__(                                                   \
  81        ".set\tpush\n\t"                                                \
  82        ".set\treorder\n\t"                                             \
  83        ".set\tnoat\n\t"                                                \
  84        "mfc0\t%0,$12\n\t"                                              \
  85        "ori\t$1,%0,1\n\t"                                              \
  86        "xori\t$1,1\n\t"                                                \
  87        ".set\tnoreorder\n\t"                                           \
  88        "mtc0\t$1,$12\n\t"                                              \
  89        "nop\n\t"                                                       \
  90        "nop\n\t"                                                       \
  91        "nop\n\t"                                                       \
  92        ".set\tpop\n\t"                                                 \
  93        : "=r" (x)                                                      \
  94        : /* no inputs */                                               \
  95        : "$1", "memory")
  96
  97#define __restore_flags(flags)                                          \
  98do {                                                                    \
  99        unsigned long __tmp1;                                           \
 100                                                                        \
 101        __asm__ __volatile__(                                           \
 102                ".set\tnoreorder\t\t\t# __restore_flags\n\t"            \
 103                ".set\tnoat\n\t"                                        \
 104                "mfc0\t$1, $12\n\t"                                     \
 105                "andi\t%0, 1\n\t"                                       \
 106                "ori\t$1, 1\n\t"                                        \
 107                "xori\t$1, 1\n\t"                                       \
 108                "or\t%0, $1\n\t"                                        \
 109                "mtc0\t%0, $12\n\t"                                     \
 110                "nop\n\t"                                               \
 111                "nop\n\t"                                               \
 112                "nop\n\t"                                               \
 113                ".set\tat\n\t"                                          \
 114                ".set\treorder"                                         \
 115                : "=r" (__tmp1)                                         \
 116                : "0" (flags)                                           \
 117                : "$1", "memory");                                      \
 118} while(0)
 119
 120#ifdef CONFIG_SMP
 121
 122extern void __global_sti(void);
 123extern void __global_cli(void);
 124extern unsigned long __global_save_flags(void);
 125extern void __global_restore_flags(unsigned long);
 126#  define sti() __global_sti()
 127#  define cli() __global_cli()
 128#  define save_flags(x) do { x = __global_save_flags(); } while (0)
 129#  define restore_flags(x) __global_restore_flags(x)
 130#  define save_and_cli(x) do { save_flags(x); cli(); } while(0)
 131
 132#else /* Single processor */
 133
 134#  define sti() __sti()
 135#  define cli() __cli()
 136#  define save_flags(x) __save_flags(x)
 137#  define save_and_cli(x) __save_and_cli(x)
 138#  define restore_flags(x) __restore_flags(x)
 139
 140#endif /* SMP */
 141
 142/* For spinlocks etc */
 143#define local_irq_save(x)       __save_and_cli(x);
 144#define local_irq_restore(x)    __restore_flags(x);
 145#define local_irq_disable()     __cli();
 146#define local_irq_enable()      __sti();
 147
 148/*
 149 * These are probably defined overly paranoid ...
 150 */
 151#ifdef CONFIG_CPU_HAS_WB
 152
 153#include <asm/wbflush.h>
 154#define rmb()   do { } while(0)
 155#define wmb()   wbflush()
 156#define mb()    wbflush()
 157
 158#else /* CONFIG_CPU_HAS_WB  */
 159
 160#define mb()                                            \
 161__asm__ __volatile__(                                   \
 162        "# prevent instructions being moved around\n\t" \
 163        ".set\tnoreorder\n\t"                           \
 164        "# 8 nops to fool the R4400 pipeline\n\t"       \
 165        "nop;nop;nop;nop;nop;nop;nop;nop\n\t"           \
 166        ".set\treorder"                                 \
 167        : /* no output */                               \
 168        : /* no input */                                \
 169        : "memory")
 170#define rmb() mb()
 171#define wmb() mb()
 172
 173#endif /* CONFIG_CPU_HAS_WB  */
 174
 175#ifdef CONFIG_SMP
 176#define smp_mb()        mb()
 177#define smp_rmb()       rmb()
 178#define smp_wmb()       wmb()
 179#else
 180#define smp_mb()        barrier()
 181#define smp_rmb()       barrier()
 182#define smp_wmb()       barrier()
 183#endif
 184
 185#define set_mb(var, value) \
 186do { var = value; mb(); } while (0)
 187
 188#define set_wmb(var, value) \
 189do { var = value; wmb(); } while (0)
 190
 191#if !defined (_LANGUAGE_ASSEMBLY)
 192/*
 193 * switch_to(n) should switch tasks to task nr n, first
 194 * checking that n isn't the current task, in which case it does nothing.
 195 */
 196#if 0
 197extern asmlinkage void *resume(void *last, void *next);
 198#endif
 199#endif /* !defined (_LANGUAGE_ASSEMBLY) */
 200
 201#define prepare_to_switch()     do { } while(0)
 202#define switch_to(prev,next,last) \
 203do { \
 204        (last) = resume(prev, next); \
 205} while(0)
 206
 207/*
 208 * For 32 and 64 bit operands we can take advantage of ll and sc.
 209 * FIXME: This doesn't work for R3000 machines.
 210 */
 211extern __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val)
 212{
 213#ifdef CONFIG_CPU_HAS_LLSC
 214        unsigned long dummy;
 215
 216        __asm__ __volatile__(
 217                ".set\tnoreorder\t\t\t# xchg_u32\n\t"
 218                ".set\tnoat\n\t"
 219                "ll\t%0, %3\n"
 220                "1:\tmove\t$1, %2\n\t"
 221                "sc\t$1, %1\n\t"
 222                "beqzl\t$1, 1b\n\t"
 223                " ll\t%0, %3\n\t"
 224                ".set\tat\n\t"
 225                ".set\treorder"
 226                : "=r" (val), "=o" (*m), "=r" (dummy)
 227                : "o" (*m), "2" (val)
 228                : "memory");
 229
 230        return val;
 231#else
 232        unsigned long flags, retval;
 233
 234        save_flags(flags);
 235        cli();
 236        retval = *m;
 237        *m = val;
 238        restore_flags(flags);
 239        return retval;
 240#endif /* Processor-dependent optimization */
 241}
 242
 243#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
 244#define tas(ptr) (xchg((ptr),1))
 245
 246static __inline__ unsigned long
 247__xchg(unsigned long x, volatile void * ptr, int size)
 248{
 249        switch (size) {
 250                case 4:
 251                        return xchg_u32(ptr, x);
 252        }
 253        return x;
 254}
 255
 256extern void *set_except_vector(int n, void *addr);
 257
 258extern void __die(const char *, struct pt_regs *, const char *where,
 259        unsigned long line) __attribute__((noreturn));
 260extern void __die_if_kernel(const char *, struct pt_regs *, const char *where,
 261        unsigned long line);
 262
 263#define die(msg, regs)                                                  \
 264        __die(msg, regs, __FILE__ ":"__FUNCTION__, __LINE__)
 265#define die_if_kernel(msg, regs)                                        \
 266        __die_if_kernel(msg, regs, __FILE__ ":"__FUNCTION__, __LINE__)
 267
 268#endif /* _ASM_SYSTEM_H */
 269