uboot/arch/mips/include/asm/system.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 1994 - 1999 by Ralf Baechle
   3 * Copyright (C) 1996 by Paul M. Antoine
   4 * Copyright (C) 1994 - 1999 by Ralf Baechle
   5 *
   6 * Changed set_except_vector declaration to allow return of previous
   7 * vector address value - necessary for "borrowing" vectors.
   8 *
   9 * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
  10 * Copyright (C) 2000 MIPS Technologies, Inc.
  11 *
  12 * SPDX-License-Identifier:     GPL-2.0
  13 */
  14#ifndef _ASM_SYSTEM_H
  15#define _ASM_SYSTEM_H
  16
  17#include <asm/sgidefs.h>
  18#include <asm/ptrace.h>
  19#if 0
  20#include <linux/kernel.h>
  21#endif
  22
  23static __inline__ void
  24__sti(void)
  25{
  26        __asm__ __volatile__(
  27                ".set\tpush\n\t"
  28                ".set\treorder\n\t"
  29                ".set\tnoat\n\t"
  30                "mfc0\t$1,$12\n\t"
  31                "ori\t$1,0x1f\n\t"
  32                "xori\t$1,0x1e\n\t"
  33                "mtc0\t$1,$12\n\t"
  34                ".set\tpop\n\t"
  35                : /* no outputs */
  36                : /* no inputs */
  37                : "$1", "memory");
  38}
  39
  40/*
  41 * For cli() we have to insert nops to make shure that the new value
  42 * has actually arrived in the status register before the end of this
  43 * macro.
  44 * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
  45 * no nops at all.
  46 */
  47static __inline__ void
  48__cli(void)
  49{
  50        __asm__ __volatile__(
  51                ".set\tpush\n\t"
  52                ".set\treorder\n\t"
  53                ".set\tnoat\n\t"
  54                "mfc0\t$1,$12\n\t"
  55                "ori\t$1,1\n\t"
  56                "xori\t$1,1\n\t"
  57                ".set\tnoreorder\n\t"
  58                "mtc0\t$1,$12\n\t"
  59                "nop\n\t"
  60                "nop\n\t"
  61                "nop\n\t"
  62                ".set\tpop\n\t"
  63                : /* no outputs */
  64                : /* no inputs */
  65                : "$1", "memory");
  66}
  67
  68#define __save_flags(x)                                                 \
  69__asm__ __volatile__(                                                   \
  70        ".set\tpush\n\t"                                                \
  71        ".set\treorder\n\t"                                             \
  72        "mfc0\t%0,$12\n\t"                                              \
  73        ".set\tpop\n\t"                                                 \
  74        : "=r" (x))
  75
  76#define __save_and_cli(x)                                               \
  77__asm__ __volatile__(                                                   \
  78        ".set\tpush\n\t"                                                \
  79        ".set\treorder\n\t"                                             \
  80        ".set\tnoat\n\t"                                                \
  81        "mfc0\t%0,$12\n\t"                                              \
  82        "ori\t$1,%0,1\n\t"                                              \
  83        "xori\t$1,1\n\t"                                                \
  84        ".set\tnoreorder\n\t"                                           \
  85        "mtc0\t$1,$12\n\t"                                              \
  86        "nop\n\t"                                                       \
  87        "nop\n\t"                                                       \
  88        "nop\n\t"                                                       \
  89        ".set\tpop\n\t"                                                 \
  90        : "=r" (x)                                                      \
  91        : /* no inputs */                                               \
  92        : "$1", "memory")
  93
  94#define __restore_flags(flags)                                          \
  95do {                                                                    \
  96        unsigned long __tmp1;                                           \
  97                                                                        \
  98        __asm__ __volatile__(                                           \
  99                ".set\tnoreorder\t\t\t# __restore_flags\n\t"            \
 100                ".set\tnoat\n\t"                                        \
 101                "mfc0\t$1, $12\n\t"                                     \
 102                "andi\t%0, 1\n\t"                                       \
 103                "ori\t$1, 1\n\t"                                        \
 104                "xori\t$1, 1\n\t"                                       \
 105                "or\t%0, $1\n\t"                                        \
 106                "mtc0\t%0, $12\n\t"                                     \
 107                "nop\n\t"                                               \
 108                "nop\n\t"                                               \
 109                "nop\n\t"                                               \
 110                ".set\tat\n\t"                                          \
 111                ".set\treorder"                                         \
 112                : "=r" (__tmp1)                                         \
 113                : "0" (flags)                                           \
 114                : "$1", "memory");                                      \
 115} while(0)
 116
 117#ifdef CONFIG_SMP
 118
 119extern void __global_sti(void);
 120extern void __global_cli(void);
 121extern unsigned long __global_save_flags(void);
 122extern void __global_restore_flags(unsigned long);
 123#  define sti() __global_sti()
 124#  define cli() __global_cli()
 125#  define save_flags(x) do { x = __global_save_flags(); } while (0)
 126#  define restore_flags(x) __global_restore_flags(x)
 127#  define save_and_cli(x) do { save_flags(x); cli(); } while(0)
 128
 129#else /* Single processor */
 130
 131#  define sti() __sti()
 132#  define cli() __cli()
 133#  define save_flags(x) __save_flags(x)
 134#  define save_and_cli(x) __save_and_cli(x)
 135#  define restore_flags(x) __restore_flags(x)
 136
 137#endif /* SMP */
 138
 139/* For spinlocks etc */
 140#define local_irq_save(x)       __save_and_cli(x);
 141#define local_irq_restore(x)    __restore_flags(x);
 142#define local_irq_disable()     __cli();
 143#define local_irq_enable()      __sti();
 144
 145/*
 146 * These are probably defined overly paranoid ...
 147 */
 148#ifdef CONFIG_CPU_HAS_WB
 149
 150#include <asm/wbflush.h>
 151#define rmb()   do { } while(0)
 152#define wmb()   wbflush()
 153#define mb()    wbflush()
 154
 155#else /* CONFIG_CPU_HAS_WB  */
 156
 157#define mb()                                            \
 158__asm__ __volatile__(                                   \
 159        "# prevent instructions being moved around\n\t" \
 160        ".set\tnoreorder\n\t"                           \
 161        "# 8 nops to fool the R4400 pipeline\n\t"       \
 162        "nop;nop;nop;nop;nop;nop;nop;nop\n\t"           \
 163        ".set\treorder"                                 \
 164        : /* no output */                               \
 165        : /* no input */                                \
 166        : "memory")
 167#define rmb() mb()
 168#define wmb() mb()
 169
 170#endif /* CONFIG_CPU_HAS_WB  */
 171
 172#ifdef CONFIG_SMP
 173#define smp_mb()        mb()
 174#define smp_rmb()       rmb()
 175#define smp_wmb()       wmb()
 176#else
 177#define smp_mb()        barrier()
 178#define smp_rmb()       barrier()
 179#define smp_wmb()       barrier()
 180#endif
 181
 182#define set_mb(var, value) \
 183do { var = value; mb(); } while (0)
 184
 185#define set_wmb(var, value) \
 186do { var = value; wmb(); } while (0)
 187
 188#if !defined (_LANGUAGE_ASSEMBLY)
 189/*
 190 * switch_to(n) should switch tasks to task nr n, first
 191 * checking that n isn't the current task, in which case it does nothing.
 192 */
 193#if 0
 194extern asmlinkage void *resume(void *last, void *next);
 195#endif
 196#endif /* !defined (_LANGUAGE_ASSEMBLY) */
 197
 198#define prepare_to_switch()     do { } while(0)
 199#define switch_to(prev,next,last) \
 200do { \
 201        (last) = resume(prev, next); \
 202} while(0)
 203
 204/*
 205 * For 32 and 64 bit operands we can take advantage of ll and sc.
 206 * FIXME: This doesn't work for R3000 machines.
 207 */
 208static __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val)
 209{
 210#ifdef CONFIG_CPU_HAS_LLSC
 211        unsigned long dummy;
 212
 213        __asm__ __volatile__(
 214                ".set\tnoreorder\t\t\t# xchg_u32\n\t"
 215                ".set\tnoat\n\t"
 216                "ll\t%0, %3\n"
 217                "1:\tmove\t$1, %2\n\t"
 218                "sc\t$1, %1\n\t"
 219                "beqzl\t$1, 1b\n\t"
 220                " ll\t%0, %3\n\t"
 221                ".set\tat\n\t"
 222                ".set\treorder"
 223                : "=r" (val), "=o" (*m), "=r" (dummy)
 224                : "o" (*m), "2" (val)
 225                : "memory");
 226
 227        return val;
 228#else
 229        unsigned long flags, retval;
 230
 231        save_flags(flags);
 232        cli();
 233        retval = *m;
 234        *m = val;
 235        restore_flags(flags);
 236        return retval;
 237#endif /* Processor-dependent optimization */
 238}
 239
 240#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
 241#define tas(ptr) (xchg((ptr),1))
 242
 243static __inline__ unsigned long
 244__xchg(unsigned long x, volatile void * ptr, int size)
 245{
 246        switch (size) {
 247                case 4:
 248                        return xchg_u32(ptr, x);
 249        }
 250        return x;
 251}
 252
 253extern void *set_except_vector(int n, void *addr);
 254
 255extern void __die(const char *, struct pt_regs *, const char *where,
 256        unsigned long line) __attribute__((noreturn));
 257extern void __die_if_kernel(const char *, struct pt_regs *, const char *where,
 258        unsigned long line);
 259
 260#define die(msg, regs)                                                  \
 261        __die(msg, regs, __FILE__ ":"__FUNCTION__, __LINE__)
 262#define die_if_kernel(msg, regs)                                        \
 263        __die_if_kernel(msg, regs, __FILE__ ":"__FUNCTION__, __LINE__)
 264
 265#endif /* _ASM_SYSTEM_H */
 266