linux/arch/s390/include/asm/system.h
<<
>>
Prefs
   1/*
   2 * Copyright IBM Corp. 1999, 2009
   3 *
   4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
   5 */
   6
   7#ifndef __ASM_SYSTEM_H
   8#define __ASM_SYSTEM_H
   9
  10#include <linux/kernel.h>
  11#include <linux/errno.h>
  12#include <asm/types.h>
  13#include <asm/ptrace.h>
  14#include <asm/setup.h>
  15#include <asm/processor.h>
  16#include <asm/lowcore.h>
  17#include <asm/cmpxchg.h>
  18
  19#ifdef __KERNEL__
  20
  21struct task_struct;
  22
  23extern struct task_struct *__switch_to(void *, void *);
  24extern void update_per_regs(struct task_struct *task);
  25
  26static inline void save_fp_regs(s390_fp_regs *fpregs)
  27{
  28        asm volatile(
  29                "       std     0,%O0+8(%R0)\n"
  30                "       std     2,%O0+24(%R0)\n"
  31                "       std     4,%O0+40(%R0)\n"
  32                "       std     6,%O0+56(%R0)"
  33                : "=Q" (*fpregs) : "Q" (*fpregs));
  34        if (!MACHINE_HAS_IEEE)
  35                return;
  36        asm volatile(
  37                "       stfpc   %0\n"
  38                "       std     1,%O0+16(%R0)\n"
  39                "       std     3,%O0+32(%R0)\n"
  40                "       std     5,%O0+48(%R0)\n"
  41                "       std     7,%O0+64(%R0)\n"
  42                "       std     8,%O0+72(%R0)\n"
  43                "       std     9,%O0+80(%R0)\n"
  44                "       std     10,%O0+88(%R0)\n"
  45                "       std     11,%O0+96(%R0)\n"
  46                "       std     12,%O0+104(%R0)\n"
  47                "       std     13,%O0+112(%R0)\n"
  48                "       std     14,%O0+120(%R0)\n"
  49                "       std     15,%O0+128(%R0)\n"
  50                : "=Q" (*fpregs) : "Q" (*fpregs));
  51}
  52
  53static inline void restore_fp_regs(s390_fp_regs *fpregs)
  54{
  55        asm volatile(
  56                "       ld      0,%O0+8(%R0)\n"
  57                "       ld      2,%O0+24(%R0)\n"
  58                "       ld      4,%O0+40(%R0)\n"
  59                "       ld      6,%O0+56(%R0)"
  60                : : "Q" (*fpregs));
  61        if (!MACHINE_HAS_IEEE)
  62                return;
  63        asm volatile(
  64                "       lfpc    %0\n"
  65                "       ld      1,%O0+16(%R0)\n"
  66                "       ld      3,%O0+32(%R0)\n"
  67                "       ld      5,%O0+48(%R0)\n"
  68                "       ld      7,%O0+64(%R0)\n"
  69                "       ld      8,%O0+72(%R0)\n"
  70                "       ld      9,%O0+80(%R0)\n"
  71                "       ld      10,%O0+88(%R0)\n"
  72                "       ld      11,%O0+96(%R0)\n"
  73                "       ld      12,%O0+104(%R0)\n"
  74                "       ld      13,%O0+112(%R0)\n"
  75                "       ld      14,%O0+120(%R0)\n"
  76                "       ld      15,%O0+128(%R0)\n"
  77                : : "Q" (*fpregs));
  78}
  79
  80static inline void save_access_regs(unsigned int *acrs)
  81{
  82        asm volatile("stam 0,15,%0" : "=Q" (*acrs));
  83}
  84
  85static inline void restore_access_regs(unsigned int *acrs)
  86{
  87        asm volatile("lam 0,15,%0" : : "Q" (*acrs));
  88}
  89
  90#define switch_to(prev,next,last) do {                                  \
  91        if (prev->mm) {                                                 \
  92                save_fp_regs(&prev->thread.fp_regs);                    \
  93                save_access_regs(&prev->thread.acrs[0]);                \
  94        }                                                               \
  95        if (next->mm) {                                                 \
  96                restore_fp_regs(&next->thread.fp_regs);                 \
  97                restore_access_regs(&next->thread.acrs[0]);             \
  98                update_per_regs(next);                                  \
  99        }                                                               \
 100        prev = __switch_to(prev,next);                                  \
 101} while (0)
 102
 103extern void account_vtime(struct task_struct *, struct task_struct *);
 104extern void account_tick_vtime(struct task_struct *);
 105
 106#ifdef CONFIG_PFAULT
 107extern int pfault_init(void);
 108extern void pfault_fini(void);
 109#else /* CONFIG_PFAULT */
 110#define pfault_init()           ({-1;})
 111#define pfault_fini()           do { } while (0)
 112#endif /* CONFIG_PFAULT */
 113
 114extern void cmma_init(void);
 115extern int memcpy_real(void *, void *, size_t);
 116
 117#define finish_arch_switch(prev) do {                                        \
 118        set_fs(current->thread.mm_segment);                                  \
 119        account_vtime(prev, current);                                        \
 120} while (0)
 121
 122#define nop() asm volatile("nop")
 123
 124/*
 125 * Force strict CPU ordering.
 126 * And yes, this is required on UP too when we're talking
 127 * to devices.
 128 *
 129 * This is very similar to the ppc eieio/sync instruction in that is
 130 * does a checkpoint syncronisation & makes sure that 
 131 * all memory ops have completed wrt other CPU's ( see 7-15 POP  DJB ).
 132 */
 133
 134#define eieio() asm volatile("bcr 15,0" : : : "memory")
 135#define SYNC_OTHER_CORES(x)   eieio()
 136#define mb()    eieio()
 137#define rmb()   eieio()
 138#define wmb()   eieio()
 139#define read_barrier_depends() do { } while(0)
 140#define smp_mb()       mb()
 141#define smp_rmb()      rmb()
 142#define smp_wmb()      wmb()
 143#define smp_read_barrier_depends()    read_barrier_depends()
 144#define smp_mb__before_clear_bit()     smp_mb()
 145#define smp_mb__after_clear_bit()      smp_mb()
 146
 147
 148#define set_mb(var, value)      do { var = value; mb(); } while (0)
 149
 150#ifdef __s390x__
 151
 152#define __ctl_load(array, low, high) ({                         \
 153        typedef struct { char _[sizeof(array)]; } addrtype;     \
 154        asm volatile(                                           \
 155                "       lctlg   %1,%2,%0\n"                     \
 156                : : "Q" (*(addrtype *)(&array)),                \
 157                    "i" (low), "i" (high));                     \
 158        })
 159
 160#define __ctl_store(array, low, high) ({                        \
 161        typedef struct { char _[sizeof(array)]; } addrtype;     \
 162        asm volatile(                                           \
 163                "       stctg   %1,%2,%0\n"                     \
 164                : "=Q" (*(addrtype *)(&array))                  \
 165                : "i" (low), "i" (high));                       \
 166        })
 167
 168#else /* __s390x__ */
 169
 170#define __ctl_load(array, low, high) ({                         \
 171        typedef struct { char _[sizeof(array)]; } addrtype;     \
 172        asm volatile(                                           \
 173                "       lctl    %1,%2,%0\n"                     \
 174                : : "Q" (*(addrtype *)(&array)),                \
 175                    "i" (low), "i" (high));                     \
 176})
 177
 178#define __ctl_store(array, low, high) ({                        \
 179        typedef struct { char _[sizeof(array)]; } addrtype;     \
 180        asm volatile(                                           \
 181                "       stctl   %1,%2,%0\n"                     \
 182                : "=Q" (*(addrtype *)(&array))                  \
 183                : "i" (low), "i" (high));                       \
 184        })
 185
 186#endif /* __s390x__ */
 187
 188#define __ctl_set_bit(cr, bit) ({       \
 189        unsigned long __dummy;          \
 190        __ctl_store(__dummy, cr, cr);   \
 191        __dummy |= 1UL << (bit);        \
 192        __ctl_load(__dummy, cr, cr);    \
 193})
 194
 195#define __ctl_clear_bit(cr, bit) ({     \
 196        unsigned long __dummy;          \
 197        __ctl_store(__dummy, cr, cr);   \
 198        __dummy &= ~(1UL << (bit));     \
 199        __ctl_load(__dummy, cr, cr);    \
 200})
 201
 202/*
 203 * Use to set psw mask except for the first byte which
 204 * won't be changed by this function.
 205 */
 206static inline void
 207__set_psw_mask(unsigned long mask)
 208{
 209        __load_psw_mask(mask | (arch_local_save_flags() & ~(-1UL >> 8)));
 210}
 211
 212#define local_mcck_enable()  __set_psw_mask(psw_kernel_bits)
 213#define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK)
 214
 215#ifdef CONFIG_SMP
 216
 217extern void smp_ctl_set_bit(int cr, int bit);
 218extern void smp_ctl_clear_bit(int cr, int bit);
 219#define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
 220#define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
 221
 222#else
 223
 224#define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
 225#define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
 226
 227#endif /* CONFIG_SMP */
 228
 229#define MAX_FACILITY_BIT (256*8)        /* stfle_fac_list has 256 bytes */
 230
 231/*
 232 * The test_facility function uses the bit odering where the MSB is bit 0.
 233 * That makes it easier to query facility bits with the bit number as
 234 * documented in the Principles of Operation.
 235 */
 236static inline int test_facility(unsigned long nr)
 237{
 238        unsigned char *ptr;
 239
 240        if (nr >= MAX_FACILITY_BIT)
 241                return 0;
 242        ptr = (unsigned char *) &S390_lowcore.stfle_fac_list + (nr >> 3);
 243        return (*ptr & (0x80 >> (nr & 7))) != 0;
 244}
 245
 246static inline unsigned short stap(void)
 247{
 248        unsigned short cpu_address;
 249
 250        asm volatile("stap %0" : "=m" (cpu_address));
 251        return cpu_address;
 252}
 253
 254extern void (*_machine_restart)(char *command);
 255extern void (*_machine_halt)(void);
 256extern void (*_machine_power_off)(void);
 257
 258extern unsigned long arch_align_stack(unsigned long sp);
 259
 260static inline int tprot(unsigned long addr)
 261{
 262        int rc = -EFAULT;
 263
 264        asm volatile(
 265                "       tprot   0(%1),0\n"
 266                "0:     ipm     %0\n"
 267                "       srl     %0,28\n"
 268                "1:\n"
 269                EX_TABLE(0b,1b)
 270                : "+d" (rc) : "a" (addr) : "cc");
 271        return rc;
 272}
 273
 274#endif /* __KERNEL__ */
 275
 276#endif
 277