uboot/arch/sh/include/asm/system.h
<<
>>
Prefs
   1#ifndef __ASM_SH_SYSTEM_H
   2#define __ASM_SH_SYSTEM_H
   3
   4/*
   5 * Copyright (C) 1999, 2000  Niibe Yutaka  &  Kaz Kojima
   6 * Copyright (C) 2002 Paul Mundt
   7 *
   8 * from linux kernel code.
   9 */
  10
  11#include <asm/irqflags.h>
  12#include <asm/types.h>
  13
  14/*
  15 *      switch_to() should switch tasks to task nr n, first
  16 */
  17
  18#define switch_to(prev, next, last) do {                                \
  19 struct task_struct *__last;                                            \
  20 register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp;       \
  21 register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc;       \
  22 register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev;  \
  23 register unsigned long *__ts5 __asm__ ("r5") = (unsigned long *)next;  \
  24 register unsigned long *__ts6 __asm__ ("r6") = &next->thread.sp;       \
  25 register unsigned long __ts7 __asm__ ("r7") = next->thread.pc;         \
  26 __asm__ __volatile__ (".balign 4\n\t"                                  \
  27                       "stc.l   gbr, @-r15\n\t"                         \
  28                       "sts.l   pr, @-r15\n\t"                          \
  29                       "mov.l   r8, @-r15\n\t"                          \
  30                       "mov.l   r9, @-r15\n\t"                          \
  31                       "mov.l   r10, @-r15\n\t"                         \
  32                       "mov.l   r11, @-r15\n\t"                         \
  33                       "mov.l   r12, @-r15\n\t"                         \
  34                       "mov.l   r13, @-r15\n\t"                         \
  35                       "mov.l   r14, @-r15\n\t"                         \
  36                       "mov.l   r15, @r1        ! save SP\n\t"          \
  37                       "mov.l   @r6, r15        ! change to new stack\n\t" \
  38                       "mova    1f, %0\n\t"                             \
  39                       "mov.l   %0, @r2         ! save PC\n\t"          \
  40                       "mov.l   2f, %0\n\t"                             \
  41                       "jmp     @%0             ! call __switch_to\n\t" \
  42                       " lds    r7, pr          !  with return to new PC\n\t" \
  43                       ".balign 4\n"                                    \
  44                       "2:\n\t"                                         \
  45                       ".long   __switch_to\n"                          \
  46                       "1:\n\t"                                         \
  47                       "mov.l   @r15+, r14\n\t"                         \
  48                       "mov.l   @r15+, r13\n\t"                         \
  49                       "mov.l   @r15+, r12\n\t"                         \
  50                       "mov.l   @r15+, r11\n\t"                         \
  51                       "mov.l   @r15+, r10\n\t"                         \
  52                       "mov.l   @r15+, r9\n\t"                          \
  53                       "mov.l   @r15+, r8\n\t"                          \
  54                       "lds.l   @r15+, pr\n\t"                          \
  55                       "ldc.l   @r15+, gbr\n\t"                         \
  56                       : "=z" (__last)                                  \
  57                       : "r" (__ts1), "r" (__ts2), "r" (__ts4),         \
  58                         "r" (__ts5), "r" (__ts6), "r" (__ts7)          \
  59                       : "r3", "t");                                    \
  60        last = __last;                                                  \
  61} while (0)
  62
  63/*
  64 * On SMP systems, when the scheduler does migration-cost autodetection,
  65 * it needs a way to flush as much of the CPU's caches as possible.
  66 *
  67 * TODO: fill this in!
  68 */
  69static inline void sched_cacheflush(void)
  70{
  71}
  72
  73static inline unsigned long tas(volatile int *m)
  74{
  75        unsigned long retval;
  76
  77        __asm__ __volatile__ ("tas.b    @%1\n\t"
  78                              "movt     %0"
  79                              : "=r" (retval): "r" (m): "t", "memory");
  80        return retval;
  81}
  82
  83/*
  84 * A brief note on ctrl_barrier(), the control register write barrier.
  85 *
  86 * Legacy SH cores typically require a sequence of 8 nops after
  87 * modification of a control register in order for the changes to take
  88 * effect. On newer cores (like the sh4a and sh5) this is accomplished
  89 * with icbi.
  90 *
  91 * Historically we have only done this type of barrier for the MMUCR, but
  92 * it's also necessary for the CCR, so we make it generic here instead.
  93 */
  94#define mb()            __asm__ __volatile__ ("": : :"memory")
  95#define rmb()           mb()
  96#define wmb()           __asm__ __volatile__ ("": : :"memory")
  97#define ctrl_barrier()  __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop")
  98#define read_barrier_depends()  do { } while(0)
  99
 100#ifdef CONFIG_SMP
 101#define smp_mb()        mb()
 102#define smp_rmb()       rmb()
 103#define smp_wmb()       wmb()
 104#define smp_read_barrier_depends()      read_barrier_depends()
 105#else
 106#define smp_mb()        barrier()
 107#define smp_rmb()       barrier()
 108#define smp_wmb()       barrier()
 109#define smp_read_barrier_depends()      do { } while(0)
 110#endif
 111
 112#define set_mb(var, value) do { xchg(&var, value); } while (0)
 113
 114/*
 115 * Jump to P2 area.
 116 * When handling TLB or caches, we need to do it from P2 area.
 117 */
 118#define jump_to_P2()                    \
 119do {                                    \
 120        unsigned long __dummy;          \
 121        __asm__ __volatile__(           \
 122                "mov.l  1f, %0\n\t"     \
 123                "or     %1, %0\n\t"     \
 124                "jmp    @%0\n\t"        \
 125                " nop\n\t"              \
 126                ".balign 4\n"           \
 127                "1:     .long 2f\n"     \
 128                "2:"                    \
 129                : "=&r" (__dummy)       \
 130                : "r" (0x20000000));    \
 131} while (0)
 132
 133/*
 134 * Back to P1 area.
 135 */
 136#define back_to_P1()                                    \
 137do {                                                    \
 138        unsigned long __dummy;                          \
 139        ctrl_barrier();                                 \
 140        __asm__ __volatile__(                           \
 141                "mov.l  1f, %0\n\t"                     \
 142                "jmp    @%0\n\t"                        \
 143                " nop\n\t"                              \
 144                ".balign 4\n"                           \
 145                "1:     .long 2f\n"                     \
 146                "2:"                                    \
 147                : "=&r" (__dummy));                     \
 148} while (0)
 149
 150static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
 151{
 152        unsigned long flags, retval;
 153
 154        local_irq_save(flags);
 155        retval = *m;
 156        *m = val;
 157        local_irq_restore(flags);
 158        return retval;
 159}
 160
 161static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
 162{
 163        unsigned long flags, retval;
 164
 165        local_irq_save(flags);
 166        retval = *m;
 167        *m = val & 0xff;
 168        local_irq_restore(flags);
 169        return retval;
 170}
 171
 172extern void __xchg_called_with_bad_pointer(void);
 173
 174#define __xchg(ptr, x, size)                            \
 175({                                                      \
 176        unsigned long __xchg__res;                      \
 177        volatile void *__xchg_ptr = (ptr);              \
 178        switch (size) {                                 \
 179        case 4:                                         \
 180                __xchg__res = xchg_u32(__xchg_ptr, x);  \
 181                break;                                  \
 182        case 1:                                         \
 183                __xchg__res = xchg_u8(__xchg_ptr, x);   \
 184                break;                                  \
 185        default:                                        \
 186                __xchg_called_with_bad_pointer();       \
 187                __xchg__res = x;                        \
 188                break;                                  \
 189        }                                               \
 190                                                        \
 191        __xchg__res;                                    \
 192})
 193
 194#define xchg(ptr,x)     \
 195        ((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr))))
 196
 197static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
 198        unsigned long new)
 199{
 200        __u32 retval;
 201        unsigned long flags;
 202
 203        local_irq_save(flags);
 204        retval = *m;
 205        if (retval == old)
 206                *m = new;
 207        local_irq_restore(flags);       /* implies memory barrier  */
 208        return retval;
 209}
 210
 211/* This function doesn't exist, so you'll get a linker error
 212 * if something tries to do an invalid cmpxchg(). */
 213extern void __cmpxchg_called_with_bad_pointer(void);
 214
 215#define __HAVE_ARCH_CMPXCHG 1
 216
 217static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
 218                unsigned long new, int size)
 219{
 220        switch (size) {
 221        case 4:
 222                return __cmpxchg_u32(ptr, old, new);
 223        }
 224        __cmpxchg_called_with_bad_pointer();
 225        return old;
 226}
 227
 228#define cmpxchg(ptr,o,n)                                                 \
 229  ({                                                                     \
 230     __typeof__(*(ptr)) _o_ = (o);                                       \
 231     __typeof__(*(ptr)) _n_ = (n);                                       \
 232     (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,           \
 233                                    (unsigned long)_n_, sizeof(*(ptr))); \
 234  })
 235
 236extern void *set_exception_table_vec(unsigned int vec, void *handler);
 237
 238static inline void *set_exception_table_evt(unsigned int evt, void *handler)
 239{
 240        return set_exception_table_vec(evt >> 5, handler);
 241}
 242
 243/* XXX
 244 * disable hlt during certain critical i/o operations
 245 */
 246#define HAVE_DISABLE_HLT
 247void disable_hlt(void);
 248void enable_hlt(void);
 249
 250#define arch_align_stack(x) (x)
 251
 252static inline void trigger_address_error(void)
 253{
 254        set_bl_bit();
 255        __asm__ __volatile__ (
 256                "mov.l @%1, %0"
 257                :
 258                : "r" (0x10000000), "r" (0x80000001)
 259        );
 260}
 261
 262#endif
 263