linux/include/linux/ptrace.h
<<
>>
Prefs
   1#ifndef _LINUX_PTRACE_H
   2#define _LINUX_PTRACE_H
   3/* ptrace.h */
   4/* structs and defines to help the user use the ptrace system call. */
   5
   6/* has the defines to get at the registers. */
   7
   8#define PTRACE_TRACEME             0
   9#define PTRACE_PEEKTEXT            1
  10#define PTRACE_PEEKDATA            2
  11#define PTRACE_PEEKUSR             3
  12#define PTRACE_POKETEXT            4
  13#define PTRACE_POKEDATA            5
  14#define PTRACE_POKEUSR             6
  15#define PTRACE_CONT                7
  16#define PTRACE_KILL                8
  17#define PTRACE_SINGLESTEP          9
  18
  19#define PTRACE_ATTACH             16
  20#define PTRACE_DETACH             17
  21
  22#define PTRACE_SYSCALL            24
  23
  24/* 0x4200-0x4300 are reserved for architecture-independent additions.  */
  25#define PTRACE_SETOPTIONS       0x4200
  26#define PTRACE_GETEVENTMSG      0x4201
  27#define PTRACE_GETSIGINFO       0x4202
  28#define PTRACE_SETSIGINFO       0x4203
  29
  30/*
  31 * Generic ptrace interface that exports the architecture specific regsets
  32 * using the corresponding NT_* types (which are also used in the core dump).
  33 * Please note that the NT_PRSTATUS note type in a core dump contains a full
  34 * 'struct elf_prstatus'. But the user_regset for NT_PRSTATUS contains just the
  35 * elf_gregset_t that is the pr_reg field of 'struct elf_prstatus'. For all the
  36 * other user_regset flavors, the user_regset layout and the ELF core dump note
  37 * payload are exactly the same layout.
  38 *
  39 * This interface usage is as follows:
  40 *      struct iovec iov = { buf, len};
  41 *
  42 *      ret = ptrace(PTRACE_GETREGSET/PTRACE_SETREGSET, pid, NT_XXX_TYPE, &iov);
  43 *
  44 * On the successful completion, iov.len will be updated by the kernel,
  45 * specifying how much the kernel has written/read to/from the user's iov.buf.
  46 */
  47#define PTRACE_GETREGSET        0x4204
  48#define PTRACE_SETREGSET        0x4205
  49
  50/* options set using PTRACE_SETOPTIONS */
  51#define PTRACE_O_TRACESYSGOOD   0x00000001
  52#define PTRACE_O_TRACEFORK      0x00000002
  53#define PTRACE_O_TRACEVFORK     0x00000004
  54#define PTRACE_O_TRACECLONE     0x00000008
  55#define PTRACE_O_TRACEEXEC      0x00000010
  56#define PTRACE_O_TRACEVFORKDONE 0x00000020
  57#define PTRACE_O_TRACEEXIT      0x00000040
  58
  59#define PTRACE_O_MASK           0x0000007f
  60
  61/* Wait extended result codes for the above trace options.  */
  62#define PTRACE_EVENT_FORK       1
  63#define PTRACE_EVENT_VFORK      2
  64#define PTRACE_EVENT_CLONE      3
  65#define PTRACE_EVENT_EXEC       4
  66#define PTRACE_EVENT_VFORK_DONE 5
  67#define PTRACE_EVENT_EXIT       6
  68
  69#include <asm/ptrace.h>
  70
  71#ifdef __KERNEL__
  72/*
  73 * Ptrace flags
  74 *
  75 * The owner ship rules for task->ptrace which holds the ptrace
  76 * flags is simple.  When a task is running it owns it's task->ptrace
  77 * flags.  When the a task is stopped the ptracer owns task->ptrace.
  78 */
  79
  80#define PT_PTRACED      0x00000001
  81#define PT_DTRACE       0x00000002      /* delayed trace (used on m68k, i386) */
  82#define PT_TRACESYSGOOD 0x00000004
  83#define PT_PTRACE_CAP   0x00000008      /* ptracer can follow suid-exec */
  84#define PT_TRACE_FORK   0x00000010
  85#define PT_TRACE_VFORK  0x00000020
  86#define PT_TRACE_CLONE  0x00000040
  87#define PT_TRACE_EXEC   0x00000080
  88#define PT_TRACE_VFORK_DONE     0x00000100
  89#define PT_TRACE_EXIT   0x00000200
  90
  91#define PT_TRACE_MASK   0x000003f4
  92
  93/* single stepping state bits (used on ARM and PA-RISC) */
  94#define PT_SINGLESTEP_BIT       31
  95#define PT_SINGLESTEP           (1<<PT_SINGLESTEP_BIT)
  96#define PT_BLOCKSTEP_BIT        30
  97#define PT_BLOCKSTEP            (1<<PT_BLOCKSTEP_BIT)
  98
  99#include <linux/compiler.h>             /* For unlikely.  */
 100#include <linux/sched.h>                /* For struct task_struct.  */
 101
 102
 103extern long arch_ptrace(struct task_struct *child, long request,
 104                        unsigned long addr, unsigned long data);
 105extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
 106extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
 107extern void ptrace_disable(struct task_struct *);
 108extern int ptrace_check_attach(struct task_struct *task, int kill);
 109extern int ptrace_request(struct task_struct *child, long request,
 110                          unsigned long addr, unsigned long data);
 111extern void ptrace_notify(int exit_code);
 112extern void __ptrace_link(struct task_struct *child,
 113                          struct task_struct *new_parent);
 114extern void __ptrace_unlink(struct task_struct *child);
 115extern void exit_ptrace(struct task_struct *tracer);
 116#define PTRACE_MODE_READ   1
 117#define PTRACE_MODE_ATTACH 2
 118/* Returns 0 on success, -errno on denial. */
 119extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
 120/* Returns true on success, false on denial. */
 121extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
 122
 123static inline int ptrace_reparented(struct task_struct *child)
 124{
 125        return child->real_parent != child->parent;
 126}
 127
 128static inline void ptrace_unlink(struct task_struct *child)
 129{
 130        if (unlikely(child->ptrace))
 131                __ptrace_unlink(child);
 132}
 133
 134int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
 135                            unsigned long data);
 136int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
 137                            unsigned long data);
 138
 139/**
 140 * task_ptrace - return %PT_* flags that apply to a task
 141 * @task:       pointer to &task_struct in question
 142 *
 143 * Returns the %PT_* flags that apply to @task.
 144 */
 145static inline int task_ptrace(struct task_struct *task)
 146{
 147        return task->ptrace;
 148}
 149
 150/**
 151 * ptrace_event - possibly stop for a ptrace event notification
 152 * @mask:       %PT_* bit to check in @current->ptrace
 153 * @event:      %PTRACE_EVENT_* value to report if @mask is set
 154 * @message:    value for %PTRACE_GETEVENTMSG to return
 155 *
 156 * This checks the @mask bit to see if ptrace wants stops for this event.
 157 * If so we stop, reporting @event and @message to the ptrace parent.
 158 *
 159 * Returns nonzero if we did a ptrace notification, zero if not.
 160 *
 161 * Called without locks.
 162 */
 163static inline int ptrace_event(int mask, int event, unsigned long message)
 164{
 165        if (mask && likely(!(current->ptrace & mask)))
 166                return 0;
 167        current->ptrace_message = message;
 168        ptrace_notify((event << 8) | SIGTRAP);
 169        return 1;
 170}
 171
 172/**
 173 * ptrace_init_task - initialize ptrace state for a new child
 174 * @child:              new child task
 175 * @ptrace:             true if child should be ptrace'd by parent's tracer
 176 *
 177 * This is called immediately after adding @child to its parent's children
 178 * list.  @ptrace is false in the normal case, and true to ptrace @child.
 179 *
 180 * Called with current's siglock and write_lock_irq(&tasklist_lock) held.
 181 */
 182static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
 183{
 184        INIT_LIST_HEAD(&child->ptrace_entry);
 185        INIT_LIST_HEAD(&child->ptraced);
 186        child->parent = child->real_parent;
 187        child->ptrace = 0;
 188        if (unlikely(ptrace) && (current->ptrace & PT_PTRACED)) {
 189                child->ptrace = current->ptrace;
 190                __ptrace_link(child, current->parent);
 191        }
 192
 193#ifdef CONFIG_HAVE_HW_BREAKPOINT
 194        atomic_set(&child->ptrace_bp_refcnt, 1);
 195#endif
 196}
 197
 198/**
 199 * ptrace_release_task - final ptrace-related cleanup of a zombie being reaped
 200 * @task:       task in %EXIT_DEAD state
 201 *
 202 * Called with write_lock(&tasklist_lock) held.
 203 */
 204static inline void ptrace_release_task(struct task_struct *task)
 205{
 206        BUG_ON(!list_empty(&task->ptraced));
 207        ptrace_unlink(task);
 208        BUG_ON(!list_empty(&task->ptrace_entry));
 209}
 210
 211#ifndef force_successful_syscall_return
 212/*
 213 * System call handlers that, upon successful completion, need to return a
 214 * negative value should call force_successful_syscall_return() right before
 215 * returning.  On architectures where the syscall convention provides for a
 216 * separate error flag (e.g., alpha, ia64, ppc{,64}, sparc{,64}, possibly
 217 * others), this macro can be used to ensure that the error flag will not get
 218 * set.  On architectures which do not support a separate error flag, the macro
 219 * is a no-op and the spurious error condition needs to be filtered out by some
 220 * other means (e.g., in user-level, by passing an extra argument to the
 221 * syscall handler, or something along those lines).
 222 */
 223#define force_successful_syscall_return() do { } while (0)
 224#endif
 225
 226/*
 227 * <asm/ptrace.h> should define the following things inside #ifdef __KERNEL__.
 228 *
 229 * These do-nothing inlines are used when the arch does not
 230 * implement single-step.  The kerneldoc comments are here
 231 * to document the interface for all arch definitions.
 232 */
 233
 234#ifndef arch_has_single_step
 235/**
 236 * arch_has_single_step - does this CPU support user-mode single-step?
 237 *
 238 * If this is defined, then there must be function declarations or
 239 * inlines for user_enable_single_step() and user_disable_single_step().
 240 * arch_has_single_step() should evaluate to nonzero iff the machine
 241 * supports instruction single-step for user mode.
 242 * It can be a constant or it can test a CPU feature bit.
 243 */
 244#define arch_has_single_step()          (0)
 245
 246/**
 247 * user_enable_single_step - single-step in user-mode task
 248 * @task: either current or a task stopped in %TASK_TRACED
 249 *
 250 * This can only be called when arch_has_single_step() has returned nonzero.
 251 * Set @task so that when it returns to user mode, it will trap after the
 252 * next single instruction executes.  If arch_has_block_step() is defined,
 253 * this must clear the effects of user_enable_block_step() too.
 254 */
 255static inline void user_enable_single_step(struct task_struct *task)
 256{
 257        BUG();                  /* This can never be called.  */
 258}
 259
 260/**
 261 * user_disable_single_step - cancel user-mode single-step
 262 * @task: either current or a task stopped in %TASK_TRACED
 263 *
 264 * Clear @task of the effects of user_enable_single_step() and
 265 * user_enable_block_step().  This can be called whether or not either
 266 * of those was ever called on @task, and even if arch_has_single_step()
 267 * returned zero.
 268 */
 269static inline void user_disable_single_step(struct task_struct *task)
 270{
 271}
 272#else
 273extern void user_enable_single_step(struct task_struct *);
 274extern void user_disable_single_step(struct task_struct *);
 275#endif  /* arch_has_single_step */
 276
 277#ifndef arch_has_block_step
 278/**
 279 * arch_has_block_step - does this CPU support user-mode block-step?
 280 *
 281 * If this is defined, then there must be a function declaration or inline
 282 * for user_enable_block_step(), and arch_has_single_step() must be defined
 283 * too.  arch_has_block_step() should evaluate to nonzero iff the machine
 284 * supports step-until-branch for user mode.  It can be a constant or it
 285 * can test a CPU feature bit.
 286 */
 287#define arch_has_block_step()           (0)
 288
 289/**
 290 * user_enable_block_step - step until branch in user-mode task
 291 * @task: either current or a task stopped in %TASK_TRACED
 292 *
 293 * This can only be called when arch_has_block_step() has returned nonzero,
 294 * and will never be called when single-instruction stepping is being used.
 295 * Set @task so that when it returns to user mode, it will trap after the
 296 * next branch or trap taken.
 297 */
 298static inline void user_enable_block_step(struct task_struct *task)
 299{
 300        BUG();                  /* This can never be called.  */
 301}
 302#else
 303extern void user_enable_block_step(struct task_struct *);
 304#endif  /* arch_has_block_step */
 305
 306#ifdef ARCH_HAS_USER_SINGLE_STEP_INFO
 307extern void user_single_step_siginfo(struct task_struct *tsk,
 308                                struct pt_regs *regs, siginfo_t *info);
 309#else
 310static inline void user_single_step_siginfo(struct task_struct *tsk,
 311                                struct pt_regs *regs, siginfo_t *info)
 312{
 313        memset(info, 0, sizeof(*info));
 314        info->si_signo = SIGTRAP;
 315}
 316#endif
 317
 318#ifndef arch_ptrace_stop_needed
 319/**
 320 * arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called
 321 * @code:       current->exit_code value ptrace will stop with
 322 * @info:       siginfo_t pointer (or %NULL) for signal ptrace will stop with
 323 *
 324 * This is called with the siglock held, to decide whether or not it's
 325 * necessary to release the siglock and call arch_ptrace_stop() with the
 326 * same @code and @info arguments.  It can be defined to a constant if
 327 * arch_ptrace_stop() is never required, or always is.  On machines where
 328 * this makes sense, it should be defined to a quick test to optimize out
 329 * calling arch_ptrace_stop() when it would be superfluous.  For example,
 330 * if the thread has not been back to user mode since the last stop, the
 331 * thread state might indicate that nothing needs to be done.
 332 */
 333#define arch_ptrace_stop_needed(code, info)     (0)
 334#endif
 335
 336#ifndef arch_ptrace_stop
 337/**
 338 * arch_ptrace_stop - Do machine-specific work before stopping for ptrace
 339 * @code:       current->exit_code value ptrace will stop with
 340 * @info:       siginfo_t pointer (or %NULL) for signal ptrace will stop with
 341 *
 342 * This is called with no locks held when arch_ptrace_stop_needed() has
 343 * just returned nonzero.  It is allowed to block, e.g. for user memory
 344 * access.  The arch can have machine-specific work to be done before
 345 * ptrace stops.  On ia64, register backing store gets written back to user
 346 * memory here.  Since this can be costly (requires dropping the siglock),
 347 * we only do it when the arch requires it for this particular stop, as
 348 * indicated by arch_ptrace_stop_needed().
 349 */
 350#define arch_ptrace_stop(code, info)            do { } while (0)
 351#endif
 352
 353extern int task_current_syscall(struct task_struct *target, long *callno,
 354                                unsigned long args[6], unsigned int maxargs,
 355                                unsigned long *sp, unsigned long *pc);
 356
 357#ifdef CONFIG_HAVE_HW_BREAKPOINT
 358extern int ptrace_get_breakpoints(struct task_struct *tsk);
 359extern void ptrace_put_breakpoints(struct task_struct *tsk);
 360#else
 361static inline void ptrace_put_breakpoints(struct task_struct *tsk) { }
 362#endif /* CONFIG_HAVE_HW_BREAKPOINT */
 363
 364#endif /* __KERNEL */
 365
 366#endif
 367