linux/arch/s390/include/asm/system.h
<<
>>
Prefs
   1/*
   2 * Copyright IBM Corp. 1999, 2009
   3 *
   4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
   5 */
   6
   7#ifndef __ASM_SYSTEM_H
   8#define __ASM_SYSTEM_H
   9
  10#include <linux/kernel.h>
  11#include <linux/errno.h>
  12#include <asm/types.h>
  13#include <asm/ptrace.h>
  14#include <asm/setup.h>
  15#include <asm/processor.h>
  16#include <asm/lowcore.h>
  17#include <asm/cmpxchg.h>
  18
  19#ifdef __KERNEL__
  20
  21struct task_struct;
  22
  23extern struct task_struct *__switch_to(void *, void *);
  24extern void update_per_regs(struct task_struct *task);
  25
  26static inline void save_fp_regs(s390_fp_regs *fpregs)
  27{
  28        asm volatile(
  29                "       std     0,%O0+8(%R0)\n"
  30                "       std     2,%O0+24(%R0)\n"
  31                "       std     4,%O0+40(%R0)\n"
  32                "       std     6,%O0+56(%R0)"
  33                : "=Q" (*fpregs) : "Q" (*fpregs));
  34        if (!MACHINE_HAS_IEEE)
  35                return;
  36        asm volatile(
  37                "       stfpc   %0\n"
  38                "       std     1,%O0+16(%R0)\n"
  39                "       std     3,%O0+32(%R0)\n"
  40                "       std     5,%O0+48(%R0)\n"
  41                "       std     7,%O0+64(%R0)\n"
  42                "       std     8,%O0+72(%R0)\n"
  43                "       std     9,%O0+80(%R0)\n"
  44                "       std     10,%O0+88(%R0)\n"
  45                "       std     11,%O0+96(%R0)\n"
  46                "       std     12,%O0+104(%R0)\n"
  47                "       std     13,%O0+112(%R0)\n"
  48                "       std     14,%O0+120(%R0)\n"
  49                "       std     15,%O0+128(%R0)\n"
  50                : "=Q" (*fpregs) : "Q" (*fpregs));
  51}
  52
  53static inline void restore_fp_regs(s390_fp_regs *fpregs)
  54{
  55        asm volatile(
  56                "       ld      0,%O0+8(%R0)\n"
  57                "       ld      2,%O0+24(%R0)\n"
  58                "       ld      4,%O0+40(%R0)\n"
  59                "       ld      6,%O0+56(%R0)"
  60                : : "Q" (*fpregs));
  61        if (!MACHINE_HAS_IEEE)
  62                return;
  63        asm volatile(
  64                "       lfpc    %0\n"
  65                "       ld      1,%O0+16(%R0)\n"
  66                "       ld      3,%O0+32(%R0)\n"
  67                "       ld      5,%O0+48(%R0)\n"
  68                "       ld      7,%O0+64(%R0)\n"
  69                "       ld      8,%O0+72(%R0)\n"
  70                "       ld      9,%O0+80(%R0)\n"
  71                "       ld      10,%O0+88(%R0)\n"
  72                "       ld      11,%O0+96(%R0)\n"
  73                "       ld      12,%O0+104(%R0)\n"
  74                "       ld      13,%O0+112(%R0)\n"
  75                "       ld      14,%O0+120(%R0)\n"
  76                "       ld      15,%O0+128(%R0)\n"
  77                : : "Q" (*fpregs));
  78}
  79
  80static inline void save_access_regs(unsigned int *acrs)
  81{
  82        asm volatile("stam 0,15,%0" : "=Q" (*acrs));
  83}
  84
  85static inline void restore_access_regs(unsigned int *acrs)
  86{
  87        asm volatile("lam 0,15,%0" : : "Q" (*acrs));
  88}
  89
  90#define switch_to(prev,next,last) do {                                  \
  91        if (prev->mm) {                                                 \
  92                save_fp_regs(&prev->thread.fp_regs);                    \
  93                save_access_regs(&prev->thread.acrs[0]);                \
  94        }                                                               \
  95        if (next->mm) {                                                 \
  96                restore_fp_regs(&next->thread.fp_regs);                 \
  97                restore_access_regs(&next->thread.acrs[0]);             \
  98                update_per_regs(next);                                  \
  99        }                                                               \
 100        prev = __switch_to(prev,next);                                  \
 101} while (0)
 102
 103extern void account_vtime(struct task_struct *, struct task_struct *);
 104extern void account_tick_vtime(struct task_struct *);
 105
 106#ifdef CONFIG_PFAULT
 107extern int pfault_init(void);
 108extern void pfault_fini(void);
 109#else /* CONFIG_PFAULT */
 110#define pfault_init()           ({-1;})
 111#define pfault_fini()           do { } while (0)
 112#endif /* CONFIG_PFAULT */
 113
 114extern void cmma_init(void);
 115extern int memcpy_real(void *, void *, size_t);
 116extern void copy_to_absolute_zero(void *dest, void *src, size_t count);
 117
 118#define finish_arch_switch(prev) do {                                        \
 119        set_fs(current->thread.mm_segment);                                  \
 120        account_vtime(prev, current);                                        \
 121} while (0)
 122
 123#define nop() asm volatile("nop")
 124
 125/*
 126 * Force strict CPU ordering.
 127 * And yes, this is required on UP too when we're talking
 128 * to devices.
 129 *
 130 * This is very similar to the ppc eieio/sync instruction in that is
 131 * does a checkpoint syncronisation & makes sure that 
 132 * all memory ops have completed wrt other CPU's ( see 7-15 POP  DJB ).
 133 */
 134
 135#define eieio() asm volatile("bcr 15,0" : : : "memory")
 136#define SYNC_OTHER_CORES(x)   eieio()
 137#define mb()    eieio()
 138#define rmb()   eieio()
 139#define wmb()   eieio()
 140#define read_barrier_depends() do { } while(0)
 141#define smp_mb()       mb()
 142#define smp_rmb()      rmb()
 143#define smp_wmb()      wmb()
 144#define smp_read_barrier_depends()    read_barrier_depends()
 145#define smp_mb__before_clear_bit()     smp_mb()
 146#define smp_mb__after_clear_bit()      smp_mb()
 147
 148
 149#define set_mb(var, value)      do { var = value; mb(); } while (0)
 150
 151#ifdef __s390x__
 152
 153#define __ctl_load(array, low, high) ({                         \
 154        typedef struct { char _[sizeof(array)]; } addrtype;     \
 155        asm volatile(                                           \
 156                "       lctlg   %1,%2,%0\n"                     \
 157                : : "Q" (*(addrtype *)(&array)),                \
 158                    "i" (low), "i" (high));                     \
 159        })
 160
 161#define __ctl_store(array, low, high) ({                        \
 162        typedef struct { char _[sizeof(array)]; } addrtype;     \
 163        asm volatile(                                           \
 164                "       stctg   %1,%2,%0\n"                     \
 165                : "=Q" (*(addrtype *)(&array))                  \
 166                : "i" (low), "i" (high));                       \
 167        })
 168
 169#else /* __s390x__ */
 170
 171#define __ctl_load(array, low, high) ({                         \
 172        typedef struct { char _[sizeof(array)]; } addrtype;     \
 173        asm volatile(                                           \
 174                "       lctl    %1,%2,%0\n"                     \
 175                : : "Q" (*(addrtype *)(&array)),                \
 176                    "i" (low), "i" (high));                     \
 177})
 178
 179#define __ctl_store(array, low, high) ({                        \
 180        typedef struct { char _[sizeof(array)]; } addrtype;     \
 181        asm volatile(                                           \
 182                "       stctl   %1,%2,%0\n"                     \
 183                : "=Q" (*(addrtype *)(&array))                  \
 184                : "i" (low), "i" (high));                       \
 185        })
 186
 187#endif /* __s390x__ */
 188
 189#define __ctl_set_bit(cr, bit) ({       \
 190        unsigned long __dummy;          \
 191        __ctl_store(__dummy, cr, cr);   \
 192        __dummy |= 1UL << (bit);        \
 193        __ctl_load(__dummy, cr, cr);    \
 194})
 195
 196#define __ctl_clear_bit(cr, bit) ({     \
 197        unsigned long __dummy;          \
 198        __ctl_store(__dummy, cr, cr);   \
 199        __dummy &= ~(1UL << (bit));     \
 200        __ctl_load(__dummy, cr, cr);    \
 201})
 202
 203/*
 204 * Use to set psw mask except for the first byte which
 205 * won't be changed by this function.
 206 */
 207static inline void
 208__set_psw_mask(unsigned long mask)
 209{
 210        __load_psw_mask(mask | (arch_local_save_flags() & ~(-1UL >> 8)));
 211}
 212
 213#define local_mcck_enable()  __set_psw_mask(psw_kernel_bits)
 214#define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK)
 215
 216#ifdef CONFIG_SMP
 217
 218extern void smp_ctl_set_bit(int cr, int bit);
 219extern void smp_ctl_clear_bit(int cr, int bit);
 220#define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
 221#define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
 222
 223#else
 224
 225#define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
 226#define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
 227
 228#endif /* CONFIG_SMP */
 229
 230#define MAX_FACILITY_BIT (256*8)        /* stfle_fac_list has 256 bytes */
 231
 232/*
 233 * The test_facility function uses the bit odering where the MSB is bit 0.
 234 * That makes it easier to query facility bits with the bit number as
 235 * documented in the Principles of Operation.
 236 */
 237static inline int test_facility(unsigned long nr)
 238{
 239        unsigned char *ptr;
 240
 241        if (nr >= MAX_FACILITY_BIT)
 242                return 0;
 243        ptr = (unsigned char *) &S390_lowcore.stfle_fac_list + (nr >> 3);
 244        return (*ptr & (0x80 >> (nr & 7))) != 0;
 245}
 246
 247static inline unsigned short stap(void)
 248{
 249        unsigned short cpu_address;
 250
 251        asm volatile("stap %0" : "=m" (cpu_address));
 252        return cpu_address;
 253}
 254
 255extern void (*_machine_restart)(char *command);
 256extern void (*_machine_halt)(void);
 257extern void (*_machine_power_off)(void);
 258
 259extern unsigned long arch_align_stack(unsigned long sp);
 260
 261static inline int tprot(unsigned long addr)
 262{
 263        int rc = -EFAULT;
 264
 265        asm volatile(
 266                "       tprot   0(%1),0\n"
 267                "0:     ipm     %0\n"
 268                "       srl     %0,28\n"
 269                "1:\n"
 270                EX_TABLE(0b,1b)
 271                : "+d" (rc) : "a" (addr) : "cc");
 272        return rc;
 273}
 274
 275#endif /* __KERNEL__ */
 276
 277#endif
 278