linux/arch/s390/include/asm/processor.h
<<
>>
Prefs
   1/*
   2 *  S390 version
   3 *    Copyright IBM Corp. 1999
   4 *    Author(s): Hartmut Penner (hp@de.ibm.com),
   5 *               Martin Schwidefsky (schwidefsky@de.ibm.com)
   6 *
   7 *  Derived from "include/asm-i386/processor.h"
   8 *    Copyright (C) 1994, Linus Torvalds
   9 */
  10
  11#ifndef __ASM_S390_PROCESSOR_H
  12#define __ASM_S390_PROCESSOR_H
  13
  14#include <linux/const.h>
  15
  16#define CIF_MCCK_PENDING        0       /* machine check handling is pending */
  17#define CIF_ASCE                1       /* user asce needs fixup / uaccess */
  18#define CIF_NOHZ_DELAY          2       /* delay HZ disable for a tick */
  19#define CIF_FPU                 3       /* restore FPU registers */
  20#define CIF_IGNORE_IRQ          4       /* ignore interrupt (for udelay) */
  21#define CIF_ENABLED_WAIT        5       /* in enabled wait state */
  22
  23#define _CIF_MCCK_PENDING       _BITUL(CIF_MCCK_PENDING)
  24#define _CIF_ASCE               _BITUL(CIF_ASCE)
  25#define _CIF_NOHZ_DELAY         _BITUL(CIF_NOHZ_DELAY)
  26#define _CIF_FPU                _BITUL(CIF_FPU)
  27#define _CIF_IGNORE_IRQ         _BITUL(CIF_IGNORE_IRQ)
  28#define _CIF_ENABLED_WAIT       _BITUL(CIF_ENABLED_WAIT)
  29
  30#ifndef __ASSEMBLY__
  31
  32#include <linux/linkage.h>
  33#include <linux/irqflags.h>
  34#include <asm/cpu.h>
  35#include <asm/page.h>
  36#include <asm/ptrace.h>
  37#include <asm/setup.h>
  38#include <asm/runtime_instr.h>
  39#include <asm/fpu/types.h>
  40#include <asm/fpu/internal.h>
  41
  42static inline void set_cpu_flag(int flag)
  43{
  44        S390_lowcore.cpu_flags |= (1UL << flag);
  45}
  46
  47static inline void clear_cpu_flag(int flag)
  48{
  49        S390_lowcore.cpu_flags &= ~(1UL << flag);
  50}
  51
  52static inline int test_cpu_flag(int flag)
  53{
  54        return !!(S390_lowcore.cpu_flags & (1UL << flag));
  55}
  56
  57/*
  58 * Test CIF flag of another CPU. The caller needs to ensure that
  59 * CPU hotplug can not happen, e.g. by disabling preemption.
  60 */
  61static inline int test_cpu_flag_of(int flag, int cpu)
  62{
  63        struct lowcore *lc = lowcore_ptr[cpu];
  64        return !!(lc->cpu_flags & (1UL << flag));
  65}
  66
  67#define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY)
  68
  69/*
  70 * Default implementation of macro that returns current
  71 * instruction pointer ("program counter").
  72 */
  73#define current_text_addr() ({ void *pc; asm("basr %0,0" : "=a" (pc)); pc; })
  74
  75static inline void get_cpu_id(struct cpuid *ptr)
  76{
  77        asm volatile("stidp %0" : "=Q" (*ptr));
  78}
  79
  80void s390_adjust_jiffies(void);
  81void s390_update_cpu_mhz(void);
  82void cpu_detect_mhz_feature(void);
  83
  84extern const struct seq_operations cpuinfo_op;
  85extern int sysctl_ieee_emulation_warnings;
  86extern void execve_tail(void);
  87
  88/*
  89 * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
  90 */
  91
  92#define TASK_SIZE_OF(tsk)       ((tsk)->mm->context.asce_limit)
  93#define TASK_UNMAPPED_BASE      (test_thread_flag(TIF_31BIT) ? \
  94                                        (1UL << 30) : (1UL << 41))
  95#define TASK_SIZE               TASK_SIZE_OF(current)
  96#define TASK_MAX_SIZE           (1UL << 53)
  97
  98#define STACK_TOP               (1UL << (test_thread_flag(TIF_31BIT) ? 31:42))
  99#define STACK_TOP_MAX           (1UL << 42)
 100
 101#define HAVE_ARCH_PICK_MMAP_LAYOUT
 102
 103typedef struct {
 104        __u32 ar4;
 105} mm_segment_t;
 106
 107/*
 108 * Thread structure
 109 */
 110struct thread_struct {
 111        unsigned int  acrs[NUM_ACRS];
 112        unsigned long ksp;              /* kernel stack pointer             */
 113        mm_segment_t mm_segment;
 114        unsigned long gmap_addr;        /* address of last gmap fault. */
 115        unsigned int gmap_write_flag;   /* gmap fault write indication */
 116        unsigned int gmap_int_code;     /* int code of last gmap fault */
 117        unsigned int gmap_pfault;       /* signal of a pending guest pfault */
 118        struct per_regs per_user;       /* User specified PER registers */
 119        struct per_event per_event;     /* Cause of the last PER trap */
 120        unsigned long per_flags;        /* Flags to control debug behavior */
 121        /* pfault_wait is used to block the process on a pfault event */
 122        unsigned long pfault_wait;
 123        struct list_head list;
 124        /* cpu runtime instrumentation */
 125        struct runtime_instr_cb *ri_cb;
 126        unsigned char trap_tdb[256];    /* Transaction abort diagnose block */
 127        /*
 128         * Warning: 'fpu' is dynamically-sized. It *MUST* be at
 129         * the end.
 130         */
 131        struct fpu fpu;                 /* FP and VX register save area */
 132};
 133
 134/* Flag to disable transactions. */
 135#define PER_FLAG_NO_TE                  1UL
 136/* Flag to enable random transaction aborts. */
 137#define PER_FLAG_TE_ABORT_RAND          2UL
 138/* Flag to specify random transaction abort mode:
 139 * - abort each transaction at a random instruction before TEND if set.
 140 * - abort random transactions at a random instruction if cleared.
 141 */
 142#define PER_FLAG_TE_ABORT_RAND_TEND     4UL
 143
 144typedef struct thread_struct thread_struct;
 145
 146/*
 147 * Stack layout of a C stack frame.
 148 */
 149#ifndef __PACK_STACK
 150struct stack_frame {
 151        unsigned long back_chain;
 152        unsigned long empty1[5];
 153        unsigned long gprs[10];
 154        unsigned int  empty2[8];
 155};
 156#else
 157struct stack_frame {
 158        unsigned long empty1[5];
 159        unsigned int  empty2[8];
 160        unsigned long gprs[10];
 161        unsigned long back_chain;
 162};
 163#endif
 164
 165#define ARCH_MIN_TASKALIGN      8
 166
 167#define INIT_THREAD {                                                   \
 168        .ksp = sizeof(init_stack) + (unsigned long) &init_stack,        \
 169        .fpu.regs = (void *) init_task.thread.fpu.fprs,                 \
 170}
 171
 172/*
 173 * Do necessary setup to start up a new thread.
 174 */
 175#define start_thread(regs, new_psw, new_stackp) do {                    \
 176        regs->psw.mask  = PSW_USER_BITS | PSW_MASK_EA | PSW_MASK_BA;    \
 177        regs->psw.addr  = new_psw;                                      \
 178        regs->gprs[15]  = new_stackp;                                   \
 179        execve_tail();                                                  \
 180} while (0)
 181
 182#define start_thread31(regs, new_psw, new_stackp) do {                  \
 183        regs->psw.mask  = PSW_USER_BITS | PSW_MASK_BA;                  \
 184        regs->psw.addr  = new_psw;                                      \
 185        regs->gprs[15]  = new_stackp;                                   \
 186        crst_table_downgrade(current->mm);                              \
 187        execve_tail();                                                  \
 188} while (0)
 189
 190/* Forward declaration, a strange C thing */
 191struct task_struct;
 192struct mm_struct;
 193struct seq_file;
 194
 195typedef int (*dump_trace_func_t)(void *data, unsigned long address, int reliable);
 196void dump_trace(dump_trace_func_t func, void *data,
 197                struct task_struct *task, unsigned long sp);
 198
 199void show_cacheinfo(struct seq_file *m);
 200
 201/* Free all resources held by a thread. */
 202extern void release_thread(struct task_struct *);
 203
 204/*
 205 * Return saved PC of a blocked thread.
 206 */
 207extern unsigned long thread_saved_pc(struct task_struct *t);
 208
 209unsigned long get_wchan(struct task_struct *p);
 210#define task_pt_regs(tsk) ((struct pt_regs *) \
 211        (task_stack_page(tsk) + THREAD_SIZE) - 1)
 212#define KSTK_EIP(tsk)   (task_pt_regs(tsk)->psw.addr)
 213#define KSTK_ESP(tsk)   (task_pt_regs(tsk)->gprs[15])
 214
 215/* Has task runtime instrumentation enabled ? */
 216#define is_ri_task(tsk) (!!(tsk)->thread.ri_cb)
 217
 218static inline unsigned long current_stack_pointer(void)
 219{
 220        unsigned long sp;
 221
 222        asm volatile("la %0,0(15)" : "=a" (sp));
 223        return sp;
 224}
 225
 226static inline unsigned short stap(void)
 227{
 228        unsigned short cpu_address;
 229
 230        asm volatile("stap %0" : "=m" (cpu_address));
 231        return cpu_address;
 232}
 233
 234/*
 235 * Give up the time slice of the virtual PU.
 236 */
 237void cpu_relax(void);
 238
 239#define cpu_relax_lowlatency()  barrier()
 240
 241#define ECAG_CACHE_ATTRIBUTE    0
 242#define ECAG_CPU_ATTRIBUTE      1
 243
 244static inline unsigned long __ecag(unsigned int asi, unsigned char parm)
 245{
 246        unsigned long val;
 247
 248        asm volatile(".insn     rsy,0xeb000000004c,%0,0,0(%1)" /* ecag */
 249                     : "=d" (val) : "a" (asi << 8 | parm));
 250        return val;
 251}
 252
 253static inline void psw_set_key(unsigned int key)
 254{
 255        asm volatile("spka 0(%0)" : : "d" (key));
 256}
 257
 258/*
 259 * Set PSW to specified value.
 260 */
 261static inline void __load_psw(psw_t psw)
 262{
 263        asm volatile("lpswe %0" : : "Q" (psw) : "cc");
 264}
 265
 266/*
 267 * Set PSW mask to specified value, while leaving the
 268 * PSW addr pointing to the next instruction.
 269 */
 270static inline void __load_psw_mask(unsigned long mask)
 271{
 272        unsigned long addr;
 273        psw_t psw;
 274
 275        psw.mask = mask;
 276
 277        asm volatile(
 278                "       larl    %0,1f\n"
 279                "       stg     %0,%O1+8(%R1)\n"
 280                "       lpswe   %1\n"
 281                "1:"
 282                : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
 283}
 284
 285/*
 286 * Extract current PSW mask
 287 */
 288static inline unsigned long __extract_psw(void)
 289{
 290        unsigned int reg1, reg2;
 291
 292        asm volatile("epsw %0,%1" : "=d" (reg1), "=a" (reg2));
 293        return (((unsigned long) reg1) << 32) | ((unsigned long) reg2);
 294}
 295
 296static inline void local_mcck_enable(void)
 297{
 298        __load_psw_mask(__extract_psw() | PSW_MASK_MCHECK);
 299}
 300
 301static inline void local_mcck_disable(void)
 302{
 303        __load_psw_mask(__extract_psw() & ~PSW_MASK_MCHECK);
 304}
 305
 306/*
 307 * Rewind PSW instruction address by specified number of bytes.
 308 */
 309static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc)
 310{
 311        unsigned long mask;
 312
 313        mask = (psw.mask & PSW_MASK_EA) ? -1UL :
 314               (psw.mask & PSW_MASK_BA) ? (1UL << 31) - 1 :
 315                                          (1UL << 24) - 1;
 316        return (psw.addr - ilc) & mask;
 317}
 318
 319/*
 320 * Function to stop a processor until the next interrupt occurs
 321 */
 322void enabled_wait(void);
 323
 324/*
 325 * Function to drop a processor into disabled wait state
 326 */
 327static inline void __noreturn disabled_wait(unsigned long code)
 328{
 329        psw_t psw;
 330
 331        psw.mask = PSW_MASK_BASE | PSW_MASK_WAIT | PSW_MASK_BA | PSW_MASK_EA;
 332        psw.addr = code;
 333        __load_psw(psw);
 334        while (1);
 335}
 336
 337/*
 338 * Basic Machine Check/Program Check Handler.
 339 */
 340
 341extern void s390_base_mcck_handler(void);
 342extern void s390_base_pgm_handler(void);
 343extern void s390_base_ext_handler(void);
 344
 345extern void (*s390_base_mcck_handler_fn)(void);
 346extern void (*s390_base_pgm_handler_fn)(void);
 347extern void (*s390_base_ext_handler_fn)(void);
 348
 349#define ARCH_LOW_ADDRESS_LIMIT  0x7fffffffUL
 350
 351extern int memcpy_real(void *, void *, size_t);
 352extern void memcpy_absolute(void *, void *, size_t);
 353
 354#define mem_assign_absolute(dest, val) {                        \
 355        __typeof__(dest) __tmp = (val);                         \
 356                                                                \
 357        BUILD_BUG_ON(sizeof(__tmp) != sizeof(val));             \
 358        memcpy_absolute(&(dest), &__tmp, sizeof(__tmp));        \
 359}
 360
 361#endif /* __ASSEMBLY__ */
 362
 363#endif /* __ASM_S390_PROCESSOR_H */
 364