linux/arch/s390/include/asm/system.h
<<
>>
Prefs
   1/*
   2 * Copyright IBM Corp. 1999, 2009
   3 *
   4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
   5 */
   6
   7#ifndef __ASM_SYSTEM_H
   8#define __ASM_SYSTEM_H
   9
  10#include <linux/kernel.h>
  11#include <linux/errno.h>
  12#include <asm/types.h>
  13#include <asm/ptrace.h>
  14#include <asm/setup.h>
  15#include <asm/processor.h>
  16#include <asm/lowcore.h>
  17
  18#ifdef __KERNEL__
  19
  20struct task_struct;
  21
  22extern struct task_struct *__switch_to(void *, void *);
  23
  24static inline void save_fp_regs(s390_fp_regs *fpregs)
  25{
  26        asm volatile(
  27                "       std     0,8(%1)\n"
  28                "       std     2,24(%1)\n"
  29                "       std     4,40(%1)\n"
  30                "       std     6,56(%1)"
  31                : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory");
  32        if (!MACHINE_HAS_IEEE)
  33                return;
  34        asm volatile(
  35                "       stfpc   0(%1)\n"
  36                "       std     1,16(%1)\n"
  37                "       std     3,32(%1)\n"
  38                "       std     5,48(%1)\n"
  39                "       std     7,64(%1)\n"
  40                "       std     8,72(%1)\n"
  41                "       std     9,80(%1)\n"
  42                "       std     10,88(%1)\n"
  43                "       std     11,96(%1)\n"
  44                "       std     12,104(%1)\n"
  45                "       std     13,112(%1)\n"
  46                "       std     14,120(%1)\n"
  47                "       std     15,128(%1)\n"
  48                : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory");
  49}
  50
  51static inline void restore_fp_regs(s390_fp_regs *fpregs)
  52{
  53        asm volatile(
  54                "       ld      0,8(%0)\n"
  55                "       ld      2,24(%0)\n"
  56                "       ld      4,40(%0)\n"
  57                "       ld      6,56(%0)"
  58                : : "a" (fpregs), "m" (*fpregs));
  59        if (!MACHINE_HAS_IEEE)
  60                return;
  61        asm volatile(
  62                "       lfpc    0(%0)\n"
  63                "       ld      1,16(%0)\n"
  64                "       ld      3,32(%0)\n"
  65                "       ld      5,48(%0)\n"
  66                "       ld      7,64(%0)\n"
  67                "       ld      8,72(%0)\n"
  68                "       ld      9,80(%0)\n"
  69                "       ld      10,88(%0)\n"
  70                "       ld      11,96(%0)\n"
  71                "       ld      12,104(%0)\n"
  72                "       ld      13,112(%0)\n"
  73                "       ld      14,120(%0)\n"
  74                "       ld      15,128(%0)\n"
  75                : : "a" (fpregs), "m" (*fpregs));
  76}
  77
  78static inline void save_access_regs(unsigned int *acrs)
  79{
  80        asm volatile("stam 0,15,0(%0)" : : "a" (acrs) : "memory");
  81}
  82
  83static inline void restore_access_regs(unsigned int *acrs)
  84{
  85        asm volatile("lam 0,15,0(%0)" : : "a" (acrs));
  86}
  87
  88#define switch_to(prev,next,last) do {                                       \
  89        if (prev == next)                                                    \
  90                break;                                                       \
  91        save_fp_regs(&prev->thread.fp_regs);                                 \
  92        restore_fp_regs(&next->thread.fp_regs);                              \
  93        save_access_regs(&prev->thread.acrs[0]);                             \
  94        restore_access_regs(&next->thread.acrs[0]);                          \
  95        prev = __switch_to(prev,next);                                       \
  96} while (0)
  97
  98extern void account_vtime(struct task_struct *, struct task_struct *);
  99extern void account_tick_vtime(struct task_struct *);
 100extern void account_system_vtime(struct task_struct *);
 101
 102#ifdef CONFIG_PFAULT
 103extern void pfault_irq_init(void);
 104extern int pfault_init(void);
 105extern void pfault_fini(void);
 106#else /* CONFIG_PFAULT */
 107#define pfault_irq_init()       do { } while (0)
 108#define pfault_init()           ({-1;})
 109#define pfault_fini()           do { } while (0)
 110#endif /* CONFIG_PFAULT */
 111
 112extern void cmma_init(void);
 113
 114#define finish_arch_switch(prev) do {                                        \
 115        set_fs(current->thread.mm_segment);                                  \
 116        account_vtime(prev, current);                                        \
 117} while (0)
 118
 119#define nop() asm volatile("nop")
 120
 121#define xchg(ptr,x)                                                       \
 122({                                                                        \
 123        __typeof__(*(ptr)) __ret;                                         \
 124        __ret = (__typeof__(*(ptr)))                                      \
 125                __xchg((unsigned long)(x), (void *)(ptr),sizeof(*(ptr))); \
 126        __ret;                                                            \
 127})
 128
 129extern void __xchg_called_with_bad_pointer(void);
 130
 131static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
 132{
 133        unsigned long addr, old;
 134        int shift;
 135
 136        switch (size) {
 137        case 1:
 138                addr = (unsigned long) ptr;
 139                shift = (3 ^ (addr & 3)) << 3;
 140                addr ^= addr & 3;
 141                asm volatile(
 142                        "       l       %0,0(%4)\n"
 143                        "0:     lr      0,%0\n"
 144                        "       nr      0,%3\n"
 145                        "       or      0,%2\n"
 146                        "       cs      %0,0,0(%4)\n"
 147                        "       jl      0b\n"
 148                        : "=&d" (old), "=m" (*(int *) addr)
 149                        : "d" (x << shift), "d" (~(255 << shift)), "a" (addr),
 150                          "m" (*(int *) addr) : "memory", "cc", "0");
 151                return old >> shift;
 152        case 2:
 153                addr = (unsigned long) ptr;
 154                shift = (2 ^ (addr & 2)) << 3;
 155                addr ^= addr & 2;
 156                asm volatile(
 157                        "       l       %0,0(%4)\n"
 158                        "0:     lr      0,%0\n"
 159                        "       nr      0,%3\n"
 160                        "       or      0,%2\n"
 161                        "       cs      %0,0,0(%4)\n"
 162                        "       jl      0b\n"
 163                        : "=&d" (old), "=m" (*(int *) addr)
 164                        : "d" (x << shift), "d" (~(65535 << shift)), "a" (addr),
 165                          "m" (*(int *) addr) : "memory", "cc", "0");
 166                return old >> shift;
 167        case 4:
 168                asm volatile(
 169                        "       l       %0,0(%3)\n"
 170                        "0:     cs      %0,%2,0(%3)\n"
 171                        "       jl      0b\n"
 172                        : "=&d" (old), "=m" (*(int *) ptr)
 173                        : "d" (x), "a" (ptr), "m" (*(int *) ptr)
 174                        : "memory", "cc");
 175                return old;
 176#ifdef __s390x__
 177        case 8:
 178                asm volatile(
 179                        "       lg      %0,0(%3)\n"
 180                        "0:     csg     %0,%2,0(%3)\n"
 181                        "       jl      0b\n"
 182                        : "=&d" (old), "=m" (*(long *) ptr)
 183                        : "d" (x), "a" (ptr), "m" (*(long *) ptr)
 184                        : "memory", "cc");
 185                return old;
 186#endif /* __s390x__ */
 187        }
 188        __xchg_called_with_bad_pointer();
 189        return x;
 190}
 191
 192/*
 193 * Atomic compare and exchange.  Compare OLD with MEM, if identical,
 194 * store NEW in MEM.  Return the initial value in MEM.  Success is
 195 * indicated by comparing RETURN with OLD.
 196 */
 197
 198#define __HAVE_ARCH_CMPXCHG 1
 199
 200#define cmpxchg(ptr, o, n)                                              \
 201        ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o),       \
 202                                        (unsigned long)(n), sizeof(*(ptr))))
 203
 204extern void __cmpxchg_called_with_bad_pointer(void);
 205
 206static inline unsigned long
 207__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
 208{
 209        unsigned long addr, prev, tmp;
 210        int shift;
 211
 212        switch (size) {
 213        case 1:
 214                addr = (unsigned long) ptr;
 215                shift = (3 ^ (addr & 3)) << 3;
 216                addr ^= addr & 3;
 217                asm volatile(
 218                        "       l       %0,0(%4)\n"
 219                        "0:     nr      %0,%5\n"
 220                        "       lr      %1,%0\n"
 221                        "       or      %0,%2\n"
 222                        "       or      %1,%3\n"
 223                        "       cs      %0,%1,0(%4)\n"
 224                        "       jnl     1f\n"
 225                        "       xr      %1,%0\n"
 226                        "       nr      %1,%5\n"
 227                        "       jnz     0b\n"
 228                        "1:"
 229                        : "=&d" (prev), "=&d" (tmp)
 230                        : "d" (old << shift), "d" (new << shift), "a" (ptr),
 231                          "d" (~(255 << shift))
 232                        : "memory", "cc");
 233                return prev >> shift;
 234        case 2:
 235                addr = (unsigned long) ptr;
 236                shift = (2 ^ (addr & 2)) << 3;
 237                addr ^= addr & 2;
 238                asm volatile(
 239                        "       l       %0,0(%4)\n"
 240                        "0:     nr      %0,%5\n"
 241                        "       lr      %1,%0\n"
 242                        "       or      %0,%2\n"
 243                        "       or      %1,%3\n"
 244                        "       cs      %0,%1,0(%4)\n"
 245                        "       jnl     1f\n"
 246                        "       xr      %1,%0\n"
 247                        "       nr      %1,%5\n"
 248                        "       jnz     0b\n"
 249                        "1:"
 250                        : "=&d" (prev), "=&d" (tmp)
 251                        : "d" (old << shift), "d" (new << shift), "a" (ptr),
 252                          "d" (~(65535 << shift))
 253                        : "memory", "cc");
 254                return prev >> shift;
 255        case 4:
 256                asm volatile(
 257                        "       cs      %0,%2,0(%3)\n"
 258                        : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr)
 259                        : "memory", "cc");
 260                return prev;
 261#ifdef __s390x__
 262        case 8:
 263                asm volatile(
 264                        "       csg     %0,%2,0(%3)\n"
 265                        : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr)
 266                        : "memory", "cc");
 267                return prev;
 268#endif /* __s390x__ */
 269        }
 270        __cmpxchg_called_with_bad_pointer();
 271        return old;
 272}
 273
 274/*
 275 * Force strict CPU ordering.
 276 * And yes, this is required on UP too when we're talking
 277 * to devices.
 278 *
 279 * This is very similar to the ppc eieio/sync instruction in that is
 280 * does a checkpoint syncronisation & makes sure that 
 281 * all memory ops have completed wrt other CPU's ( see 7-15 POP  DJB ).
 282 */
 283
 284#define eieio() asm volatile("bcr 15,0" : : : "memory")
 285#define SYNC_OTHER_CORES(x)   eieio()
 286#define mb()    eieio()
 287#define rmb()   eieio()
 288#define wmb()   eieio()
 289#define read_barrier_depends() do { } while(0)
 290#define smp_mb()       mb()
 291#define smp_rmb()      rmb()
 292#define smp_wmb()      wmb()
 293#define smp_read_barrier_depends()    read_barrier_depends()
 294#define smp_mb__before_clear_bit()     smp_mb()
 295#define smp_mb__after_clear_bit()      smp_mb()
 296
 297
 298#define set_mb(var, value)      do { var = value; mb(); } while (0)
 299
 300#ifdef __s390x__
 301
 302#define __ctl_load(array, low, high) ({                         \
 303        typedef struct { char _[sizeof(array)]; } addrtype;     \
 304        asm volatile(                                           \
 305                "       lctlg   %1,%2,0(%0)\n"                  \
 306                : : "a" (&array), "i" (low), "i" (high),        \
 307                    "m" (*(addrtype *)(&array)));               \
 308        })
 309
 310#define __ctl_store(array, low, high) ({                        \
 311        typedef struct { char _[sizeof(array)]; } addrtype;     \
 312        asm volatile(                                           \
 313                "       stctg   %2,%3,0(%1)\n"                  \
 314                : "=m" (*(addrtype *)(&array))                  \
 315                : "a" (&array), "i" (low), "i" (high));         \
 316        })
 317
 318#else /* __s390x__ */
 319
 320#define __ctl_load(array, low, high) ({                         \
 321        typedef struct { char _[sizeof(array)]; } addrtype;     \
 322        asm volatile(                                           \
 323                "       lctl    %1,%2,0(%0)\n"                  \
 324                : : "a" (&array), "i" (low), "i" (high),        \
 325                    "m" (*(addrtype *)(&array)));               \
 326})
 327
 328#define __ctl_store(array, low, high) ({                        \
 329        typedef struct { char _[sizeof(array)]; } addrtype;     \
 330        asm volatile(                                           \
 331                "       stctl   %2,%3,0(%1)\n"                  \
 332                : "=m" (*(addrtype *)(&array))                  \
 333                : "a" (&array), "i" (low), "i" (high));         \
 334        })
 335
 336#endif /* __s390x__ */
 337
 338#define __ctl_set_bit(cr, bit) ({       \
 339        unsigned long __dummy;          \
 340        __ctl_store(__dummy, cr, cr);   \
 341        __dummy |= 1UL << (bit);        \
 342        __ctl_load(__dummy, cr, cr);    \
 343})
 344
 345#define __ctl_clear_bit(cr, bit) ({     \
 346        unsigned long __dummy;          \
 347        __ctl_store(__dummy, cr, cr);   \
 348        __dummy &= ~(1UL << (bit));     \
 349        __ctl_load(__dummy, cr, cr);    \
 350})
 351
 352#include <linux/irqflags.h>
 353
 354#include <asm-generic/cmpxchg-local.h>
 355
 356static inline unsigned long __cmpxchg_local(volatile void *ptr,
 357                                      unsigned long old,
 358                                      unsigned long new, int size)
 359{
 360        switch (size) {
 361        case 1:
 362        case 2:
 363        case 4:
 364#ifdef __s390x__
 365        case 8:
 366#endif
 367                return __cmpxchg(ptr, old, new, size);
 368        default:
 369                return __cmpxchg_local_generic(ptr, old, new, size);
 370        }
 371
 372        return old;
 373}
 374
 375/*
 376 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
 377 * them available.
 378 */
 379#define cmpxchg_local(ptr, o, n)                                        \
 380        ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
 381                        (unsigned long)(n), sizeof(*(ptr))))
 382#ifdef __s390x__
 383#define cmpxchg64_local(ptr, o, n)                                      \
 384  ({                                                                    \
 385        BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
 386        cmpxchg_local((ptr), (o), (n));                                 \
 387  })
 388#else
 389#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
 390#endif
 391
 392/*
 393 * Use to set psw mask except for the first byte which
 394 * won't be changed by this function.
 395 */
 396static inline void
 397__set_psw_mask(unsigned long mask)
 398{
 399        __load_psw_mask(mask | (__raw_local_irq_stosm(0x00) & ~(-1UL >> 8)));
 400}
 401
 402#define local_mcck_enable()  __set_psw_mask(psw_kernel_bits)
 403#define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK)
 404
 405#ifdef CONFIG_SMP
 406
 407extern void smp_ctl_set_bit(int cr, int bit);
 408extern void smp_ctl_clear_bit(int cr, int bit);
 409#define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
 410#define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
 411
 412#else
 413
 414#define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
 415#define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
 416
 417#endif /* CONFIG_SMP */
 418
 419static inline unsigned int stfl(void)
 420{
 421        asm volatile(
 422                "       .insn   s,0xb2b10000,0(0)\n" /* stfl */
 423                "0:\n"
 424                EX_TABLE(0b,0b));
 425        return S390_lowcore.stfl_fac_list;
 426}
 427
 428static inline int __stfle(unsigned long long *list, int doublewords)
 429{
 430        typedef struct { unsigned long long _[doublewords]; } addrtype;
 431        register unsigned long __nr asm("0") = doublewords - 1;
 432
 433        asm volatile(".insn s,0xb2b00000,%0" /* stfle */
 434                     : "=m" (*(addrtype *) list), "+d" (__nr) : : "cc");
 435        return __nr + 1;
 436}
 437
 438static inline int stfle(unsigned long long *list, int doublewords)
 439{
 440        if (!(stfl() & (1UL << 24)))
 441                return -EOPNOTSUPP;
 442        return __stfle(list, doublewords);
 443}
 444
 445static inline unsigned short stap(void)
 446{
 447        unsigned short cpu_address;
 448
 449        asm volatile("stap %0" : "=m" (cpu_address));
 450        return cpu_address;
 451}
 452
 453extern void (*_machine_restart)(char *command);
 454extern void (*_machine_halt)(void);
 455extern void (*_machine_power_off)(void);
 456
 457#define arch_align_stack(x) (x)
 458
 459#ifdef CONFIG_TRACE_IRQFLAGS
 460extern psw_t sysc_restore_trace_psw;
 461extern psw_t io_restore_trace_psw;
 462#endif
 463
 464static inline int tprot(unsigned long addr)
 465{
 466        int rc = -EFAULT;
 467
 468        asm volatile(
 469                "       tprot   0(%1),0\n"
 470                "0:     ipm     %0\n"
 471                "       srl     %0,28\n"
 472                "1:\n"
 473                EX_TABLE(0b,1b)
 474                : "+d" (rc) : "a" (addr) : "cc");
 475        return rc;
 476}
 477
 478#endif /* __KERNEL__ */
 479
 480#endif
 481