linux/arch/sh/include/asm/thread_info.h
<<
>>
Prefs
   1#ifndef __ASM_SH_THREAD_INFO_H
   2#define __ASM_SH_THREAD_INFO_H
   3
   4/* SuperH version
   5 * Copyright (C) 2002  Niibe Yutaka
   6 *
   7 * The copyright of original i386 version is:
   8 *
   9 *  Copyright (C) 2002  David Howells (dhowells@redhat.com)
  10 *  - Incorporating suggestions made by Linus Torvalds and Dave Miller
  11 */
  12#ifdef __KERNEL__
  13
  14#include <asm/page.h>
  15
  16/*
  17 * Page fault error code bits
  18 */
  19#define FAULT_CODE_WRITE        (1 << 0)        /* write access */
  20#define FAULT_CODE_INITIAL      (1 << 1)        /* initial page write */
  21#define FAULT_CODE_ITLB         (1 << 2)        /* ITLB miss */
  22#define FAULT_CODE_PROT         (1 << 3)        /* protection fault */
  23#define FAULT_CODE_USER         (1 << 4)        /* user-mode access */
  24
  25#ifndef __ASSEMBLY__
  26#include <asm/processor.h>
  27
  28struct thread_info {
  29        struct task_struct      *task;          /* main task structure */
  30        unsigned long           flags;          /* low level flags */
  31        __u32                   status;         /* thread synchronous flags */
  32        __u32                   cpu;
  33        int                     preempt_count; /* 0 => preemptable, <0 => BUG */
  34        mm_segment_t            addr_limit;     /* thread address space */
  35        unsigned long           previous_sp;    /* sp of previous stack in case
  36                                                   of nested IRQ stacks */
  37        __u8                    supervisor_stack[0];
  38};
  39
  40#endif
  41
  42#if defined(CONFIG_4KSTACKS)
  43#define THREAD_SHIFT    12
  44#else
  45#define THREAD_SHIFT    13
  46#endif
  47
  48#define THREAD_SIZE     (1 << THREAD_SHIFT)
  49#define STACK_WARN      (THREAD_SIZE >> 3)
  50
  51/*
  52 * macros/functions for gaining access to the thread information structure
  53 */
  54#ifndef __ASSEMBLY__
  55#define INIT_THREAD_INFO(tsk)                   \
  56{                                               \
  57        .task           = &tsk,                 \
  58        .flags          = 0,                    \
  59        .status         = 0,                    \
  60        .cpu            = 0,                    \
  61        .preempt_count  = INIT_PREEMPT_COUNT,   \
  62        .addr_limit     = KERNEL_DS,            \
  63}
  64
  65#define init_thread_info        (init_thread_union.thread_info)
  66#define init_stack              (init_thread_union.stack)
  67
  68/* how to get the current stack pointer from C */
  69register unsigned long current_stack_pointer asm("r15") __used;
  70
  71/* how to get the thread information struct from C */
  72static inline struct thread_info *current_thread_info(void)
  73{
  74        struct thread_info *ti;
  75#if defined(CONFIG_SUPERH64)
  76        __asm__ __volatile__ ("getcon   cr17, %0" : "=r" (ti));
  77#elif defined(CONFIG_CPU_HAS_SR_RB)
  78        __asm__ __volatile__ ("stc      r7_bank, %0" : "=r" (ti));
  79#else
  80        unsigned long __dummy;
  81
  82        __asm__ __volatile__ (
  83                "mov    r15, %0\n\t"
  84                "and    %1, %0\n\t"
  85                : "=&r" (ti), "=r" (__dummy)
  86                : "1" (~(THREAD_SIZE - 1))
  87                : "memory");
  88#endif
  89
  90        return ti;
  91}
  92
  93#define THREAD_SIZE_ORDER       (THREAD_SHIFT - PAGE_SHIFT)
  94
  95extern void arch_task_cache_init(void);
  96extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
  97extern void arch_release_task_struct(struct task_struct *tsk);
  98extern void init_thread_xstate(void);
  99
 100#endif /* __ASSEMBLY__ */
 101
 102/*
 103 * Thread information flags
 104 *
 105 * - Limited to 24 bits, upper byte used for fault code encoding.
 106 *
 107 * - _TIF_ALLWORK_MASK and _TIF_WORK_MASK need to fit within 2 bytes, or
 108 *   we blow the tst immediate size constraints and need to fix up
 109 *   arch/sh/kernel/entry-common.S.
 110 */
 111#define TIF_SYSCALL_TRACE       0       /* syscall trace active */
 112#define TIF_SIGPENDING          1       /* signal pending */
 113#define TIF_NEED_RESCHED        2       /* rescheduling necessary */
 114#define TIF_SINGLESTEP          4       /* singlestepping active */
 115#define TIF_SYSCALL_AUDIT       5       /* syscall auditing active */
 116#define TIF_SECCOMP             6       /* secure computing */
 117#define TIF_NOTIFY_RESUME       7       /* callback before returning to user */
 118#define TIF_SYSCALL_TRACEPOINT  8       /* for ftrace syscall instrumentation */
 119#define TIF_POLLING_NRFLAG      17      /* true if poll_idle() is polling TIF_NEED_RESCHED */
 120#define TIF_MEMDIE              18      /* is terminating due to OOM killer */
 121
 122#define _TIF_SYSCALL_TRACE      (1 << TIF_SYSCALL_TRACE)
 123#define _TIF_SIGPENDING         (1 << TIF_SIGPENDING)
 124#define _TIF_NEED_RESCHED       (1 << TIF_NEED_RESCHED)
 125#define _TIF_SINGLESTEP         (1 << TIF_SINGLESTEP)
 126#define _TIF_SYSCALL_AUDIT      (1 << TIF_SYSCALL_AUDIT)
 127#define _TIF_SECCOMP            (1 << TIF_SECCOMP)
 128#define _TIF_NOTIFY_RESUME      (1 << TIF_NOTIFY_RESUME)
 129#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
 130#define _TIF_POLLING_NRFLAG     (1 << TIF_POLLING_NRFLAG)
 131
 132/* work to do in syscall trace */
 133#define _TIF_WORK_SYSCALL_MASK  (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \
 134                                 _TIF_SYSCALL_AUDIT | _TIF_SECCOMP    | \
 135                                 _TIF_SYSCALL_TRACEPOINT)
 136
 137/* work to do on any return to u-space */
 138#define _TIF_ALLWORK_MASK       (_TIF_SYSCALL_TRACE | _TIF_SIGPENDING      | \
 139                                 _TIF_NEED_RESCHED  | _TIF_SYSCALL_AUDIT   | \
 140                                 _TIF_SINGLESTEP    | _TIF_NOTIFY_RESUME   | \
 141                                 _TIF_SYSCALL_TRACEPOINT)
 142
 143/* work to do on interrupt/exception return */
 144#define _TIF_WORK_MASK          (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \
 145                                 _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP))
 146
 147/*
 148 * Thread-synchronous status.
 149 *
 150 * This is different from the flags in that nobody else
 151 * ever touches our thread-synchronous status, so we don't
 152 * have to worry about atomic accesses.
 153 */
 154#define TS_USEDFPU              0x0002  /* FPU used by this task this quantum */
 155
 156#ifndef __ASSEMBLY__
 157
 158#define TI_FLAG_FAULT_CODE_SHIFT        24
 159
 160/*
 161 * Additional thread flag encoding
 162 */
 163static inline void set_thread_fault_code(unsigned int val)
 164{
 165        struct thread_info *ti = current_thread_info();
 166        ti->flags = (ti->flags & (~0 >> (32 - TI_FLAG_FAULT_CODE_SHIFT)))
 167                | (val << TI_FLAG_FAULT_CODE_SHIFT);
 168}
 169
 170static inline unsigned int get_thread_fault_code(void)
 171{
 172        struct thread_info *ti = current_thread_info();
 173        return ti->flags >> TI_FLAG_FAULT_CODE_SHIFT;
 174}
 175
 176#endif  /* !__ASSEMBLY__ */
 177
 178#endif /* __KERNEL__ */
 179
 180#endif /* __ASM_SH_THREAD_INFO_H */
 181