linux/include/linux/ptrace.h
<<
>>
Prefs
   1#ifndef _LINUX_PTRACE_H
   2#define _LINUX_PTRACE_H
   3/* ptrace.h */
   4/* structs and defines to help the user use the ptrace system call. */
   5
   6/* has the defines to get at the registers. */
   7
   8#define PTRACE_TRACEME             0
   9#define PTRACE_PEEKTEXT            1
  10#define PTRACE_PEEKDATA            2
  11#define PTRACE_PEEKUSR             3
  12#define PTRACE_POKETEXT            4
  13#define PTRACE_POKEDATA            5
  14#define PTRACE_POKEUSR             6
  15#define PTRACE_CONT                7
  16#define PTRACE_KILL                8
  17#define PTRACE_SINGLESTEP          9
  18
  19#define PTRACE_ATTACH             16
  20#define PTRACE_DETACH             17
  21
  22#define PTRACE_SYSCALL            24
  23
  24/* 0x4200-0x4300 are reserved for architecture-independent additions.  */
  25#define PTRACE_SETOPTIONS       0x4200
  26#define PTRACE_GETEVENTMSG      0x4201
  27#define PTRACE_GETSIGINFO       0x4202
  28#define PTRACE_SETSIGINFO       0x4203
  29
  30/*
  31 * Generic ptrace interface that exports the architecture specific regsets
  32 * using the corresponding NT_* types (which are also used in the core dump).
  33 * Please note that the NT_PRSTATUS note type in a core dump contains a full
  34 * 'struct elf_prstatus'. But the user_regset for NT_PRSTATUS contains just the
  35 * elf_gregset_t that is the pr_reg field of 'struct elf_prstatus'. For all the
  36 * other user_regset flavors, the user_regset layout and the ELF core dump note
  37 * payload are exactly the same layout.
  38 *
  39 * This interface usage is as follows:
  40 *      struct iovec iov = { buf, len};
  41 *
  42 *      ret = ptrace(PTRACE_GETREGSET/PTRACE_SETREGSET, pid, NT_XXX_TYPE, &iov);
  43 *
  44 * On the successful completion, iov.len will be updated by the kernel,
  45 * specifying how much the kernel has written/read to/from the user's iov.buf.
  46 */
  47#define PTRACE_GETREGSET        0x4204
  48#define PTRACE_SETREGSET        0x4205
  49
  50#define PTRACE_SEIZE            0x4206
  51#define PTRACE_INTERRUPT        0x4207
  52#define PTRACE_LISTEN           0x4208
  53
  54/* Wait extended result codes for the above trace options.  */
  55#define PTRACE_EVENT_FORK       1
  56#define PTRACE_EVENT_VFORK      2
  57#define PTRACE_EVENT_CLONE      3
  58#define PTRACE_EVENT_EXEC       4
  59#define PTRACE_EVENT_VFORK_DONE 5
  60#define PTRACE_EVENT_EXIT       6
  61#define PTRACE_EVENT_SECCOMP    7
  62/* Extended result codes which enabled by means other than options.  */
  63#define PTRACE_EVENT_STOP       128
  64
  65/* Options set using PTRACE_SETOPTIONS or using PTRACE_SEIZE @data param */
  66#define PTRACE_O_TRACESYSGOOD   1
  67#define PTRACE_O_TRACEFORK      (1 << PTRACE_EVENT_FORK)
  68#define PTRACE_O_TRACEVFORK     (1 << PTRACE_EVENT_VFORK)
  69#define PTRACE_O_TRACECLONE     (1 << PTRACE_EVENT_CLONE)
  70#define PTRACE_O_TRACEEXEC      (1 << PTRACE_EVENT_EXEC)
  71#define PTRACE_O_TRACEVFORKDONE (1 << PTRACE_EVENT_VFORK_DONE)
  72#define PTRACE_O_TRACEEXIT      (1 << PTRACE_EVENT_EXIT)
  73#define PTRACE_O_TRACESECCOMP   (1 << PTRACE_EVENT_SECCOMP)
  74
  75#define PTRACE_O_MASK           0x000000ff
  76
  77#include <asm/ptrace.h>
  78
  79#ifdef __KERNEL__
  80/*
  81 * Ptrace flags
  82 *
  83 * The owner ship rules for task->ptrace which holds the ptrace
  84 * flags is simple.  When a task is running it owns it's task->ptrace
  85 * flags.  When the a task is stopped the ptracer owns task->ptrace.
  86 */
  87
  88#define PT_SEIZED       0x00010000      /* SEIZE used, enable new behavior */
  89#define PT_PTRACED      0x00000001
  90#define PT_DTRACE       0x00000002      /* delayed trace (used on m68k, i386) */
  91#define PT_PTRACE_CAP   0x00000004      /* ptracer can follow suid-exec */
  92
  93#define PT_OPT_FLAG_SHIFT       3
  94/* PT_TRACE_* event enable flags */
  95#define PT_EVENT_FLAG(event)    (1 << (PT_OPT_FLAG_SHIFT + (event)))
  96#define PT_TRACESYSGOOD         PT_EVENT_FLAG(0)
  97#define PT_TRACE_FORK           PT_EVENT_FLAG(PTRACE_EVENT_FORK)
  98#define PT_TRACE_VFORK          PT_EVENT_FLAG(PTRACE_EVENT_VFORK)
  99#define PT_TRACE_CLONE          PT_EVENT_FLAG(PTRACE_EVENT_CLONE)
 100#define PT_TRACE_EXEC           PT_EVENT_FLAG(PTRACE_EVENT_EXEC)
 101#define PT_TRACE_VFORK_DONE     PT_EVENT_FLAG(PTRACE_EVENT_VFORK_DONE)
 102#define PT_TRACE_EXIT           PT_EVENT_FLAG(PTRACE_EVENT_EXIT)
 103#define PT_TRACE_SECCOMP        PT_EVENT_FLAG(PTRACE_EVENT_SECCOMP)
 104
 105/* single stepping state bits (used on ARM and PA-RISC) */
 106#define PT_SINGLESTEP_BIT       31
 107#define PT_SINGLESTEP           (1<<PT_SINGLESTEP_BIT)
 108#define PT_BLOCKSTEP_BIT        30
 109#define PT_BLOCKSTEP            (1<<PT_BLOCKSTEP_BIT)
 110
 111#include <linux/compiler.h>             /* For unlikely.  */
 112#include <linux/sched.h>                /* For struct task_struct.  */
 113#include <linux/err.h>                  /* for IS_ERR_VALUE */
 114#include <linux/bug.h>                  /* For BUG_ON.  */
 115
 116
 117extern long arch_ptrace(struct task_struct *child, long request,
 118                        unsigned long addr, unsigned long data);
 119extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
 120extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
 121extern void ptrace_disable(struct task_struct *);
 122extern int ptrace_check_attach(struct task_struct *task, bool ignore_state);
 123extern int ptrace_request(struct task_struct *child, long request,
 124                          unsigned long addr, unsigned long data);
 125extern void ptrace_notify(int exit_code);
 126extern void __ptrace_link(struct task_struct *child,
 127                          struct task_struct *new_parent);
 128extern void __ptrace_unlink(struct task_struct *child);
 129extern void exit_ptrace(struct task_struct *tracer);
 130#define PTRACE_MODE_READ        0x01
 131#define PTRACE_MODE_ATTACH      0x02
 132#define PTRACE_MODE_NOAUDIT     0x04
 133/* Returns 0 on success, -errno on denial. */
 134extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
 135/* Returns true on success, false on denial. */
 136extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
 137
 138static inline int ptrace_reparented(struct task_struct *child)
 139{
 140        return !same_thread_group(child->real_parent, child->parent);
 141}
 142
 143static inline void ptrace_unlink(struct task_struct *child)
 144{
 145        if (unlikely(child->ptrace))
 146                __ptrace_unlink(child);
 147}
 148
 149int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
 150                            unsigned long data);
 151int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
 152                            unsigned long data);
 153
 154/**
 155 * ptrace_parent - return the task that is tracing the given task
 156 * @task: task to consider
 157 *
 158 * Returns %NULL if no one is tracing @task, or the &struct task_struct
 159 * pointer to its tracer.
 160 *
 161 * Must called under rcu_read_lock().  The pointer returned might be kept
 162 * live only by RCU.  During exec, this may be called with task_lock() held
 163 * on @task, still held from when check_unsafe_exec() was called.
 164 */
 165static inline struct task_struct *ptrace_parent(struct task_struct *task)
 166{
 167        if (unlikely(task->ptrace))
 168                return rcu_dereference(task->parent);
 169        return NULL;
 170}
 171
 172/**
 173 * ptrace_event_enabled - test whether a ptrace event is enabled
 174 * @task: ptracee of interest
 175 * @event: %PTRACE_EVENT_* to test
 176 *
 177 * Test whether @event is enabled for ptracee @task.
 178 *
 179 * Returns %true if @event is enabled, %false otherwise.
 180 */
 181static inline bool ptrace_event_enabled(struct task_struct *task, int event)
 182{
 183        return task->ptrace & PT_EVENT_FLAG(event);
 184}
 185
 186/**
 187 * ptrace_event - possibly stop for a ptrace event notification
 188 * @event:      %PTRACE_EVENT_* value to report
 189 * @message:    value for %PTRACE_GETEVENTMSG to return
 190 *
 191 * Check whether @event is enabled and, if so, report @event and @message
 192 * to the ptrace parent.
 193 *
 194 * Called without locks.
 195 */
 196static inline void ptrace_event(int event, unsigned long message)
 197{
 198        if (unlikely(ptrace_event_enabled(current, event))) {
 199                current->ptrace_message = message;
 200                ptrace_notify((event << 8) | SIGTRAP);
 201        } else if (event == PTRACE_EVENT_EXEC) {
 202                /* legacy EXEC report via SIGTRAP */
 203                if ((current->ptrace & (PT_PTRACED|PT_SEIZED)) == PT_PTRACED)
 204                        send_sig(SIGTRAP, current, 0);
 205        }
 206}
 207
 208/**
 209 * ptrace_init_task - initialize ptrace state for a new child
 210 * @child:              new child task
 211 * @ptrace:             true if child should be ptrace'd by parent's tracer
 212 *
 213 * This is called immediately after adding @child to its parent's children
 214 * list.  @ptrace is false in the normal case, and true to ptrace @child.
 215 *
 216 * Called with current's siglock and write_lock_irq(&tasklist_lock) held.
 217 */
 218static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
 219{
 220        INIT_LIST_HEAD(&child->ptrace_entry);
 221        INIT_LIST_HEAD(&child->ptraced);
 222#ifdef CONFIG_HAVE_HW_BREAKPOINT
 223        atomic_set(&child->ptrace_bp_refcnt, 1);
 224#endif
 225        child->jobctl = 0;
 226        child->ptrace = 0;
 227        child->parent = child->real_parent;
 228
 229        if (unlikely(ptrace) && current->ptrace) {
 230                child->ptrace = current->ptrace;
 231                __ptrace_link(child, current->parent);
 232
 233                if (child->ptrace & PT_SEIZED)
 234                        task_set_jobctl_pending(child, JOBCTL_TRAP_STOP);
 235                else
 236                        sigaddset(&child->pending.signal, SIGSTOP);
 237
 238                set_tsk_thread_flag(child, TIF_SIGPENDING);
 239        }
 240}
 241
 242/**
 243 * ptrace_release_task - final ptrace-related cleanup of a zombie being reaped
 244 * @task:       task in %EXIT_DEAD state
 245 *
 246 * Called with write_lock(&tasklist_lock) held.
 247 */
 248static inline void ptrace_release_task(struct task_struct *task)
 249{
 250        BUG_ON(!list_empty(&task->ptraced));
 251        ptrace_unlink(task);
 252        BUG_ON(!list_empty(&task->ptrace_entry));
 253}
 254
 255#ifndef force_successful_syscall_return
 256/*
 257 * System call handlers that, upon successful completion, need to return a
 258 * negative value should call force_successful_syscall_return() right before
 259 * returning.  On architectures where the syscall convention provides for a
 260 * separate error flag (e.g., alpha, ia64, ppc{,64}, sparc{,64}, possibly
 261 * others), this macro can be used to ensure that the error flag will not get
 262 * set.  On architectures which do not support a separate error flag, the macro
 263 * is a no-op and the spurious error condition needs to be filtered out by some
 264 * other means (e.g., in user-level, by passing an extra argument to the
 265 * syscall handler, or something along those lines).
 266 */
 267#define force_successful_syscall_return() do { } while (0)
 268#endif
 269
 270#ifndef is_syscall_success
 271/*
 272 * On most systems we can tell if a syscall is a success based on if the retval
 273 * is an error value.  On some systems like ia64 and powerpc they have different
 274 * indicators of success/failure and must define their own.
 275 */
 276#define is_syscall_success(regs) (!IS_ERR_VALUE((unsigned long)(regs_return_value(regs))))
 277#endif
 278
 279/*
 280 * <asm/ptrace.h> should define the following things inside #ifdef __KERNEL__.
 281 *
 282 * These do-nothing inlines are used when the arch does not
 283 * implement single-step.  The kerneldoc comments are here
 284 * to document the interface for all arch definitions.
 285 */
 286
 287#ifndef arch_has_single_step
 288/**
 289 * arch_has_single_step - does this CPU support user-mode single-step?
 290 *
 291 * If this is defined, then there must be function declarations or
 292 * inlines for user_enable_single_step() and user_disable_single_step().
 293 * arch_has_single_step() should evaluate to nonzero iff the machine
 294 * supports instruction single-step for user mode.
 295 * It can be a constant or it can test a CPU feature bit.
 296 */
 297#define arch_has_single_step()          (0)
 298
 299/**
 300 * user_enable_single_step - single-step in user-mode task
 301 * @task: either current or a task stopped in %TASK_TRACED
 302 *
 303 * This can only be called when arch_has_single_step() has returned nonzero.
 304 * Set @task so that when it returns to user mode, it will trap after the
 305 * next single instruction executes.  If arch_has_block_step() is defined,
 306 * this must clear the effects of user_enable_block_step() too.
 307 */
 308static inline void user_enable_single_step(struct task_struct *task)
 309{
 310        BUG();                  /* This can never be called.  */
 311}
 312
 313/**
 314 * user_disable_single_step - cancel user-mode single-step
 315 * @task: either current or a task stopped in %TASK_TRACED
 316 *
 317 * Clear @task of the effects of user_enable_single_step() and
 318 * user_enable_block_step().  This can be called whether or not either
 319 * of those was ever called on @task, and even if arch_has_single_step()
 320 * returned zero.
 321 */
 322static inline void user_disable_single_step(struct task_struct *task)
 323{
 324}
 325#else
 326extern void user_enable_single_step(struct task_struct *);
 327extern void user_disable_single_step(struct task_struct *);
 328#endif  /* arch_has_single_step */
 329
 330#ifndef arch_has_block_step
 331/**
 332 * arch_has_block_step - does this CPU support user-mode block-step?
 333 *
 334 * If this is defined, then there must be a function declaration or inline
 335 * for user_enable_block_step(), and arch_has_single_step() must be defined
 336 * too.  arch_has_block_step() should evaluate to nonzero iff the machine
 337 * supports step-until-branch for user mode.  It can be a constant or it
 338 * can test a CPU feature bit.
 339 */
 340#define arch_has_block_step()           (0)
 341
 342/**
 343 * user_enable_block_step - step until branch in user-mode task
 344 * @task: either current or a task stopped in %TASK_TRACED
 345 *
 346 * This can only be called when arch_has_block_step() has returned nonzero,
 347 * and will never be called when single-instruction stepping is being used.
 348 * Set @task so that when it returns to user mode, it will trap after the
 349 * next branch or trap taken.
 350 */
 351static inline void user_enable_block_step(struct task_struct *task)
 352{
 353        BUG();                  /* This can never be called.  */
 354}
 355#else
 356extern void user_enable_block_step(struct task_struct *);
 357#endif  /* arch_has_block_step */
 358
 359#ifdef ARCH_HAS_USER_SINGLE_STEP_INFO
 360extern void user_single_step_siginfo(struct task_struct *tsk,
 361                                struct pt_regs *regs, siginfo_t *info);
 362#else
 363static inline void user_single_step_siginfo(struct task_struct *tsk,
 364                                struct pt_regs *regs, siginfo_t *info)
 365{
 366        memset(info, 0, sizeof(*info));
 367        info->si_signo = SIGTRAP;
 368}
 369#endif
 370
 371#ifndef arch_ptrace_stop_needed
 372/**
 373 * arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called
 374 * @code:       current->exit_code value ptrace will stop with
 375 * @info:       siginfo_t pointer (or %NULL) for signal ptrace will stop with
 376 *
 377 * This is called with the siglock held, to decide whether or not it's
 378 * necessary to release the siglock and call arch_ptrace_stop() with the
 379 * same @code and @info arguments.  It can be defined to a constant if
 380 * arch_ptrace_stop() is never required, or always is.  On machines where
 381 * this makes sense, it should be defined to a quick test to optimize out
 382 * calling arch_ptrace_stop() when it would be superfluous.  For example,
 383 * if the thread has not been back to user mode since the last stop, the
 384 * thread state might indicate that nothing needs to be done.
 385 */
 386#define arch_ptrace_stop_needed(code, info)     (0)
 387#endif
 388
 389#ifndef arch_ptrace_stop
 390/**
 391 * arch_ptrace_stop - Do machine-specific work before stopping for ptrace
 392 * @code:       current->exit_code value ptrace will stop with
 393 * @info:       siginfo_t pointer (or %NULL) for signal ptrace will stop with
 394 *
 395 * This is called with no locks held when arch_ptrace_stop_needed() has
 396 * just returned nonzero.  It is allowed to block, e.g. for user memory
 397 * access.  The arch can have machine-specific work to be done before
 398 * ptrace stops.  On ia64, register backing store gets written back to user
 399 * memory here.  Since this can be costly (requires dropping the siglock),
 400 * we only do it when the arch requires it for this particular stop, as
 401 * indicated by arch_ptrace_stop_needed().
 402 */
 403#define arch_ptrace_stop(code, info)            do { } while (0)
 404#endif
 405
 406extern int task_current_syscall(struct task_struct *target, long *callno,
 407                                unsigned long args[6], unsigned int maxargs,
 408                                unsigned long *sp, unsigned long *pc);
 409
 410#ifdef CONFIG_HAVE_HW_BREAKPOINT
 411extern int ptrace_get_breakpoints(struct task_struct *tsk);
 412extern void ptrace_put_breakpoints(struct task_struct *tsk);
 413#else
 414static inline void ptrace_put_breakpoints(struct task_struct *tsk) { }
 415#endif /* CONFIG_HAVE_HW_BREAKPOINT */
 416
 417#endif /* __KERNEL */
 418
 419#endif
 420