linux/arch/s390/include/asm/system.h
<<
>>
Prefs
   1/*
   2 * Copyright IBM Corp. 1999, 2009
   3 *
   4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
   5 */
   6
   7#ifndef __ASM_SYSTEM_H
   8#define __ASM_SYSTEM_H
   9
  10#include <linux/kernel.h>
  11#include <linux/errno.h>
  12#include <asm/types.h>
  13#include <asm/ptrace.h>
  14#include <asm/setup.h>
  15#include <asm/processor.h>
  16#include <asm/lowcore.h>
  17
  18#ifdef __KERNEL__
  19
  20struct task_struct;
  21
  22extern struct task_struct *__switch_to(void *, void *);
  23extern void update_per_regs(struct task_struct *task);
  24
  25static inline void save_fp_regs(s390_fp_regs *fpregs)
  26{
  27        asm volatile(
  28                "       std     0,%O0+8(%R0)\n"
  29                "       std     2,%O0+24(%R0)\n"
  30                "       std     4,%O0+40(%R0)\n"
  31                "       std     6,%O0+56(%R0)"
  32                : "=Q" (*fpregs) : "Q" (*fpregs));
  33        if (!MACHINE_HAS_IEEE)
  34                return;
  35        asm volatile(
  36                "       stfpc   %0\n"
  37                "       std     1,%O0+16(%R0)\n"
  38                "       std     3,%O0+32(%R0)\n"
  39                "       std     5,%O0+48(%R0)\n"
  40                "       std     7,%O0+64(%R0)\n"
  41                "       std     8,%O0+72(%R0)\n"
  42                "       std     9,%O0+80(%R0)\n"
  43                "       std     10,%O0+88(%R0)\n"
  44                "       std     11,%O0+96(%R0)\n"
  45                "       std     12,%O0+104(%R0)\n"
  46                "       std     13,%O0+112(%R0)\n"
  47                "       std     14,%O0+120(%R0)\n"
  48                "       std     15,%O0+128(%R0)\n"
  49                : "=Q" (*fpregs) : "Q" (*fpregs));
  50}
  51
  52static inline void restore_fp_regs(s390_fp_regs *fpregs)
  53{
  54        asm volatile(
  55                "       ld      0,%O0+8(%R0)\n"
  56                "       ld      2,%O0+24(%R0)\n"
  57                "       ld      4,%O0+40(%R0)\n"
  58                "       ld      6,%O0+56(%R0)"
  59                : : "Q" (*fpregs));
  60        if (!MACHINE_HAS_IEEE)
  61                return;
  62        asm volatile(
  63                "       lfpc    %0\n"
  64                "       ld      1,%O0+16(%R0)\n"
  65                "       ld      3,%O0+32(%R0)\n"
  66                "       ld      5,%O0+48(%R0)\n"
  67                "       ld      7,%O0+64(%R0)\n"
  68                "       ld      8,%O0+72(%R0)\n"
  69                "       ld      9,%O0+80(%R0)\n"
  70                "       ld      10,%O0+88(%R0)\n"
  71                "       ld      11,%O0+96(%R0)\n"
  72                "       ld      12,%O0+104(%R0)\n"
  73                "       ld      13,%O0+112(%R0)\n"
  74                "       ld      14,%O0+120(%R0)\n"
  75                "       ld      15,%O0+128(%R0)\n"
  76                : : "Q" (*fpregs));
  77}
  78
  79static inline void save_access_regs(unsigned int *acrs)
  80{
  81        asm volatile("stam 0,15,%0" : "=Q" (*acrs));
  82}
  83
  84static inline void restore_access_regs(unsigned int *acrs)
  85{
  86        asm volatile("lam 0,15,%0" : : "Q" (*acrs));
  87}
  88
  89#define switch_to(prev,next,last) do {                                  \
  90        if (prev->mm) {                                                 \
  91                save_fp_regs(&prev->thread.fp_regs);                    \
  92                save_access_regs(&prev->thread.acrs[0]);                \
  93        }                                                               \
  94        if (next->mm) {                                                 \
  95                restore_fp_regs(&next->thread.fp_regs);                 \
  96                restore_access_regs(&next->thread.acrs[0]);             \
  97                update_per_regs(next);                                  \
  98        }                                                               \
  99        prev = __switch_to(prev,next);                                  \
 100} while (0)
 101
 102extern void account_vtime(struct task_struct *, struct task_struct *);
 103extern void account_tick_vtime(struct task_struct *);
 104
 105#ifdef CONFIG_PFAULT
 106extern int pfault_init(void);
 107extern void pfault_fini(void);
 108#else /* CONFIG_PFAULT */
 109#define pfault_init()           ({-1;})
 110#define pfault_fini()           do { } while (0)
 111#endif /* CONFIG_PFAULT */
 112
 113extern void cmma_init(void);
 114extern int memcpy_real(void *, void *, size_t);
 115
 116#define finish_arch_switch(prev) do {                                        \
 117        set_fs(current->thread.mm_segment);                                  \
 118        account_vtime(prev, current);                                        \
 119} while (0)
 120
 121#define nop() asm volatile("nop")
 122
 123#define xchg(ptr,x)                                                       \
 124({                                                                        \
 125        __typeof__(*(ptr)) __ret;                                         \
 126        __ret = (__typeof__(*(ptr)))                                      \
 127                __xchg((unsigned long)(x), (void *)(ptr),sizeof(*(ptr))); \
 128        __ret;                                                            \
 129})
 130
 131extern void __xchg_called_with_bad_pointer(void);
 132
 133static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
 134{
 135        unsigned long addr, old;
 136        int shift;
 137
 138        switch (size) {
 139        case 1:
 140                addr = (unsigned long) ptr;
 141                shift = (3 ^ (addr & 3)) << 3;
 142                addr ^= addr & 3;
 143                asm volatile(
 144                        "       l       %0,%4\n"
 145                        "0:     lr      0,%0\n"
 146                        "       nr      0,%3\n"
 147                        "       or      0,%2\n"
 148                        "       cs      %0,0,%4\n"
 149                        "       jl      0b\n"
 150                        : "=&d" (old), "=Q" (*(int *) addr)
 151                        : "d" (x << shift), "d" (~(255 << shift)),
 152                          "Q" (*(int *) addr) : "memory", "cc", "0");
 153                return old >> shift;
 154        case 2:
 155                addr = (unsigned long) ptr;
 156                shift = (2 ^ (addr & 2)) << 3;
 157                addr ^= addr & 2;
 158                asm volatile(
 159                        "       l       %0,%4\n"
 160                        "0:     lr      0,%0\n"
 161                        "       nr      0,%3\n"
 162                        "       or      0,%2\n"
 163                        "       cs      %0,0,%4\n"
 164                        "       jl      0b\n"
 165                        : "=&d" (old), "=Q" (*(int *) addr)
 166                        : "d" (x << shift), "d" (~(65535 << shift)),
 167                          "Q" (*(int *) addr) : "memory", "cc", "0");
 168                return old >> shift;
 169        case 4:
 170                asm volatile(
 171                        "       l       %0,%3\n"
 172                        "0:     cs      %0,%2,%3\n"
 173                        "       jl      0b\n"
 174                        : "=&d" (old), "=Q" (*(int *) ptr)
 175                        : "d" (x), "Q" (*(int *) ptr)
 176                        : "memory", "cc");
 177                return old;
 178#ifdef __s390x__
 179        case 8:
 180                asm volatile(
 181                        "       lg      %0,%3\n"
 182                        "0:     csg     %0,%2,%3\n"
 183                        "       jl      0b\n"
 184                        : "=&d" (old), "=m" (*(long *) ptr)
 185                        : "d" (x), "Q" (*(long *) ptr)
 186                        : "memory", "cc");
 187                return old;
 188#endif /* __s390x__ */
 189        }
 190        __xchg_called_with_bad_pointer();
 191        return x;
 192}
 193
 194/*
 195 * Atomic compare and exchange.  Compare OLD with MEM, if identical,
 196 * store NEW in MEM.  Return the initial value in MEM.  Success is
 197 * indicated by comparing RETURN with OLD.
 198 */
 199
 200#define __HAVE_ARCH_CMPXCHG 1
 201
 202#define cmpxchg(ptr, o, n)                                              \
 203        ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o),       \
 204                                        (unsigned long)(n), sizeof(*(ptr))))
 205
 206extern void __cmpxchg_called_with_bad_pointer(void);
 207
 208static inline unsigned long
 209__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
 210{
 211        unsigned long addr, prev, tmp;
 212        int shift;
 213
 214        switch (size) {
 215        case 1:
 216                addr = (unsigned long) ptr;
 217                shift = (3 ^ (addr & 3)) << 3;
 218                addr ^= addr & 3;
 219                asm volatile(
 220                        "       l       %0,%2\n"
 221                        "0:     nr      %0,%5\n"
 222                        "       lr      %1,%0\n"
 223                        "       or      %0,%3\n"
 224                        "       or      %1,%4\n"
 225                        "       cs      %0,%1,%2\n"
 226                        "       jnl     1f\n"
 227                        "       xr      %1,%0\n"
 228                        "       nr      %1,%5\n"
 229                        "       jnz     0b\n"
 230                        "1:"
 231                        : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr)
 232                        : "d" (old << shift), "d" (new << shift),
 233                          "d" (~(255 << shift)), "Q" (*(int *) ptr)
 234                        : "memory", "cc");
 235                return prev >> shift;
 236        case 2:
 237                addr = (unsigned long) ptr;
 238                shift = (2 ^ (addr & 2)) << 3;
 239                addr ^= addr & 2;
 240                asm volatile(
 241                        "       l       %0,%2\n"
 242                        "0:     nr      %0,%5\n"
 243                        "       lr      %1,%0\n"
 244                        "       or      %0,%3\n"
 245                        "       or      %1,%4\n"
 246                        "       cs      %0,%1,%2\n"
 247                        "       jnl     1f\n"
 248                        "       xr      %1,%0\n"
 249                        "       nr      %1,%5\n"
 250                        "       jnz     0b\n"
 251                        "1:"
 252                        : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr)
 253                        : "d" (old << shift), "d" (new << shift),
 254                          "d" (~(65535 << shift)), "Q" (*(int *) ptr)
 255                        : "memory", "cc");
 256                return prev >> shift;
 257        case 4:
 258                asm volatile(
 259                        "       cs      %0,%3,%1\n"
 260                        : "=&d" (prev), "=Q" (*(int *) ptr)
 261                        : "0" (old), "d" (new), "Q" (*(int *) ptr)
 262                        : "memory", "cc");
 263                return prev;
 264#ifdef __s390x__
 265        case 8:
 266                asm volatile(
 267                        "       csg     %0,%3,%1\n"
 268                        : "=&d" (prev), "=Q" (*(long *) ptr)
 269                        : "0" (old), "d" (new), "Q" (*(long *) ptr)
 270                        : "memory", "cc");
 271                return prev;
 272#endif /* __s390x__ */
 273        }
 274        __cmpxchg_called_with_bad_pointer();
 275        return old;
 276}
 277
 278/*
 279 * Force strict CPU ordering.
 280 * And yes, this is required on UP too when we're talking
 281 * to devices.
 282 *
 283 * This is very similar to the ppc eieio/sync instruction in that is
 284 * does a checkpoint syncronisation & makes sure that 
 285 * all memory ops have completed wrt other CPU's ( see 7-15 POP  DJB ).
 286 */
 287
 288#define eieio() asm volatile("bcr 15,0" : : : "memory")
 289#define SYNC_OTHER_CORES(x)   eieio()
 290#define mb()    eieio()
 291#define rmb()   eieio()
 292#define wmb()   eieio()
 293#define read_barrier_depends() do { } while(0)
 294#define smp_mb()       mb()
 295#define smp_rmb()      rmb()
 296#define smp_wmb()      wmb()
 297#define smp_read_barrier_depends()    read_barrier_depends()
 298#define smp_mb__before_clear_bit()     smp_mb()
 299#define smp_mb__after_clear_bit()      smp_mb()
 300
 301
 302#define set_mb(var, value)      do { var = value; mb(); } while (0)
 303
 304#ifdef __s390x__
 305
 306#define __ctl_load(array, low, high) ({                         \
 307        typedef struct { char _[sizeof(array)]; } addrtype;     \
 308        asm volatile(                                           \
 309                "       lctlg   %1,%2,%0\n"                     \
 310                : : "Q" (*(addrtype *)(&array)),                \
 311                    "i" (low), "i" (high));                     \
 312        })
 313
 314#define __ctl_store(array, low, high) ({                        \
 315        typedef struct { char _[sizeof(array)]; } addrtype;     \
 316        asm volatile(                                           \
 317                "       stctg   %1,%2,%0\n"                     \
 318                : "=Q" (*(addrtype *)(&array))                  \
 319                : "i" (low), "i" (high));                       \
 320        })
 321
 322#else /* __s390x__ */
 323
 324#define __ctl_load(array, low, high) ({                         \
 325        typedef struct { char _[sizeof(array)]; } addrtype;     \
 326        asm volatile(                                           \
 327                "       lctl    %1,%2,%0\n"                     \
 328                : : "Q" (*(addrtype *)(&array)),                \
 329                    "i" (low), "i" (high));                     \
 330})
 331
 332#define __ctl_store(array, low, high) ({                        \
 333        typedef struct { char _[sizeof(array)]; } addrtype;     \
 334        asm volatile(                                           \
 335                "       stctl   %1,%2,%0\n"                     \
 336                : "=Q" (*(addrtype *)(&array))                  \
 337                : "i" (low), "i" (high));                       \
 338        })
 339
 340#endif /* __s390x__ */
 341
 342#define __ctl_set_bit(cr, bit) ({       \
 343        unsigned long __dummy;          \
 344        __ctl_store(__dummy, cr, cr);   \
 345        __dummy |= 1UL << (bit);        \
 346        __ctl_load(__dummy, cr, cr);    \
 347})
 348
 349#define __ctl_clear_bit(cr, bit) ({     \
 350        unsigned long __dummy;          \
 351        __ctl_store(__dummy, cr, cr);   \
 352        __dummy &= ~(1UL << (bit));     \
 353        __ctl_load(__dummy, cr, cr);    \
 354})
 355
 356#include <linux/irqflags.h>
 357
 358#include <asm-generic/cmpxchg-local.h>
 359
 360static inline unsigned long __cmpxchg_local(volatile void *ptr,
 361                                      unsigned long old,
 362                                      unsigned long new, int size)
 363{
 364        switch (size) {
 365        case 1:
 366        case 2:
 367        case 4:
 368#ifdef __s390x__
 369        case 8:
 370#endif
 371                return __cmpxchg(ptr, old, new, size);
 372        default:
 373                return __cmpxchg_local_generic(ptr, old, new, size);
 374        }
 375
 376        return old;
 377}
 378
 379/*
 380 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
 381 * them available.
 382 */
 383#define cmpxchg_local(ptr, o, n)                                        \
 384        ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
 385                        (unsigned long)(n), sizeof(*(ptr))))
 386#ifdef __s390x__
 387#define cmpxchg64_local(ptr, o, n)                                      \
 388  ({                                                                    \
 389        BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
 390        cmpxchg_local((ptr), (o), (n));                                 \
 391  })
 392#else
 393#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
 394#endif
 395
 396/*
 397 * Use to set psw mask except for the first byte which
 398 * won't be changed by this function.
 399 */
 400static inline void
 401__set_psw_mask(unsigned long mask)
 402{
 403        __load_psw_mask(mask | (arch_local_save_flags() & ~(-1UL >> 8)));
 404}
 405
 406#define local_mcck_enable()  __set_psw_mask(psw_kernel_bits)
 407#define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK)
 408
 409#ifdef CONFIG_SMP
 410
 411extern void smp_ctl_set_bit(int cr, int bit);
 412extern void smp_ctl_clear_bit(int cr, int bit);
 413#define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
 414#define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
 415
 416#else
 417
 418#define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
 419#define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
 420
 421#endif /* CONFIG_SMP */
 422
 423#define MAX_FACILITY_BIT (256*8)        /* stfle_fac_list has 256 bytes */
 424
 425/*
 426 * The test_facility function uses the bit odering where the MSB is bit 0.
 427 * That makes it easier to query facility bits with the bit number as
 428 * documented in the Principles of Operation.
 429 */
 430static inline int test_facility(unsigned long nr)
 431{
 432        unsigned char *ptr;
 433
 434        if (nr >= MAX_FACILITY_BIT)
 435                return 0;
 436        ptr = (unsigned char *) &S390_lowcore.stfle_fac_list + (nr >> 3);
 437        return (*ptr & (0x80 >> (nr & 7))) != 0;
 438}
 439
 440static inline unsigned short stap(void)
 441{
 442        unsigned short cpu_address;
 443
 444        asm volatile("stap %0" : "=m" (cpu_address));
 445        return cpu_address;
 446}
 447
 448extern void (*_machine_restart)(char *command);
 449extern void (*_machine_halt)(void);
 450extern void (*_machine_power_off)(void);
 451
 452extern unsigned long arch_align_stack(unsigned long sp);
 453
 454static inline int tprot(unsigned long addr)
 455{
 456        int rc = -EFAULT;
 457
 458        asm volatile(
 459                "       tprot   0(%1),0\n"
 460                "0:     ipm     %0\n"
 461                "       srl     %0,28\n"
 462                "1:\n"
 463                EX_TABLE(0b,1b)
 464                : "+d" (rc) : "a" (addr) : "cc");
 465        return rc;
 466}
 467
 468#endif /* __KERNEL__ */
 469
 470#endif
 471