linux/include/linux/ptrace.h
<<
>>
Prefs
   1#ifndef _LINUX_PTRACE_H
   2#define _LINUX_PTRACE_H
   3
   4#include <linux/compiler.h>             /* For unlikely.  */
   5#include <linux/sched.h>                /* For struct task_struct.  */
   6#include <linux/err.h>                  /* for IS_ERR_VALUE */
   7#include <linux/bug.h>                  /* For BUG_ON.  */
   8#include <linux/pid_namespace.h>        /* For task_active_pid_ns.  */
   9#include <uapi/linux/ptrace.h>
  10
  11/*
  12 * Ptrace flags
  13 *
  14 * The owner ship rules for task->ptrace which holds the ptrace
  15 * flags is simple.  When a task is running it owns it's task->ptrace
  16 * flags.  When the a task is stopped the ptracer owns task->ptrace.
  17 */
  18
  19#define PT_SEIZED       0x00010000      /* SEIZE used, enable new behavior */
  20#define PT_PTRACED      0x00000001
  21#define PT_DTRACE       0x00000002      /* delayed trace (used on m68k, i386) */
  22#define PT_PTRACE_CAP   0x00000004      /* ptracer can follow suid-exec */
  23
  24#define PT_OPT_FLAG_SHIFT       3
  25/* PT_TRACE_* event enable flags */
  26#define PT_EVENT_FLAG(event)    (1 << (PT_OPT_FLAG_SHIFT + (event)))
  27#define PT_TRACESYSGOOD         PT_EVENT_FLAG(0)
  28#define PT_TRACE_FORK           PT_EVENT_FLAG(PTRACE_EVENT_FORK)
  29#define PT_TRACE_VFORK          PT_EVENT_FLAG(PTRACE_EVENT_VFORK)
  30#define PT_TRACE_CLONE          PT_EVENT_FLAG(PTRACE_EVENT_CLONE)
  31#define PT_TRACE_EXEC           PT_EVENT_FLAG(PTRACE_EVENT_EXEC)
  32#define PT_TRACE_VFORK_DONE     PT_EVENT_FLAG(PTRACE_EVENT_VFORK_DONE)
  33#define PT_TRACE_EXIT           PT_EVENT_FLAG(PTRACE_EVENT_EXIT)
  34#define PT_TRACE_SECCOMP        PT_EVENT_FLAG(PTRACE_EVENT_SECCOMP)
  35
  36#define PT_EXITKILL             (PTRACE_O_EXITKILL << PT_OPT_FLAG_SHIFT)
  37
  38/* single stepping state bits (used on ARM and PA-RISC) */
  39#define PT_SINGLESTEP_BIT       31
  40#define PT_SINGLESTEP           (1<<PT_SINGLESTEP_BIT)
  41#define PT_BLOCKSTEP_BIT        30
  42#define PT_BLOCKSTEP            (1<<PT_BLOCKSTEP_BIT)
  43
  44extern long arch_ptrace(struct task_struct *child, long request,
  45                        unsigned long addr, unsigned long data);
  46extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
  47extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
  48extern void ptrace_disable(struct task_struct *);
  49extern int ptrace_request(struct task_struct *child, long request,
  50                          unsigned long addr, unsigned long data);
  51extern void ptrace_notify(int exit_code);
  52extern void __ptrace_link(struct task_struct *child,
  53                          struct task_struct *new_parent);
  54extern void __ptrace_unlink(struct task_struct *child);
  55extern void exit_ptrace(struct task_struct *tracer);
  56#define PTRACE_MODE_READ        0x01
  57#define PTRACE_MODE_ATTACH      0x02
  58#define PTRACE_MODE_NOAUDIT     0x04
  59/* Returns true on success, false on denial. */
  60extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
  61
  62static inline int ptrace_reparented(struct task_struct *child)
  63{
  64        return !same_thread_group(child->real_parent, child->parent);
  65}
  66
  67static inline void ptrace_unlink(struct task_struct *child)
  68{
  69        if (unlikely(child->ptrace))
  70                __ptrace_unlink(child);
  71}
  72
  73int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
  74                            unsigned long data);
  75int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
  76                            unsigned long data);
  77
  78/**
  79 * ptrace_parent - return the task that is tracing the given task
  80 * @task: task to consider
  81 *
  82 * Returns %NULL if no one is tracing @task, or the &struct task_struct
  83 * pointer to its tracer.
  84 *
  85 * Must called under rcu_read_lock().  The pointer returned might be kept
  86 * live only by RCU.  During exec, this may be called with task_lock() held
  87 * on @task, still held from when check_unsafe_exec() was called.
  88 */
  89static inline struct task_struct *ptrace_parent(struct task_struct *task)
  90{
  91        if (unlikely(task->ptrace))
  92                return rcu_dereference(task->parent);
  93        return NULL;
  94}
  95
  96/**
  97 * ptrace_event_enabled - test whether a ptrace event is enabled
  98 * @task: ptracee of interest
  99 * @event: %PTRACE_EVENT_* to test
 100 *
 101 * Test whether @event is enabled for ptracee @task.
 102 *
 103 * Returns %true if @event is enabled, %false otherwise.
 104 */
 105static inline bool ptrace_event_enabled(struct task_struct *task, int event)
 106{
 107        return task->ptrace & PT_EVENT_FLAG(event);
 108}
 109
 110/**
 111 * ptrace_event - possibly stop for a ptrace event notification
 112 * @event:      %PTRACE_EVENT_* value to report
 113 * @message:    value for %PTRACE_GETEVENTMSG to return
 114 *
 115 * Check whether @event is enabled and, if so, report @event and @message
 116 * to the ptrace parent.
 117 *
 118 * Called without locks.
 119 */
 120static inline void ptrace_event(int event, unsigned long message)
 121{
 122        if (unlikely(ptrace_event_enabled(current, event))) {
 123                current->ptrace_message = message;
 124                ptrace_notify((event << 8) | SIGTRAP);
 125        } else if (event == PTRACE_EVENT_EXEC) {
 126                /* legacy EXEC report via SIGTRAP */
 127                if ((current->ptrace & (PT_PTRACED|PT_SEIZED)) == PT_PTRACED)
 128                        send_sig(SIGTRAP, current, 0);
 129        }
 130}
 131
 132/**
 133 * ptrace_event_pid - possibly stop for a ptrace event notification
 134 * @event:      %PTRACE_EVENT_* value to report
 135 * @pid:        process identifier for %PTRACE_GETEVENTMSG to return
 136 *
 137 * Check whether @event is enabled and, if so, report @event and @pid
 138 * to the ptrace parent.  @pid is reported as the pid_t seen from the
 139 * the ptrace parent's pid namespace.
 140 *
 141 * Called without locks.
 142 */
 143static inline void ptrace_event_pid(int event, struct pid *pid)
 144{
 145        /*
 146         * FIXME: There's a potential race if a ptracer in a different pid
 147         * namespace than parent attaches between computing message below and
 148         * when we acquire tasklist_lock in ptrace_stop().  If this happens,
 149         * the ptracer will get a bogus pid from PTRACE_GETEVENTMSG.
 150         */
 151        unsigned long message = 0;
 152        struct pid_namespace *ns;
 153
 154        rcu_read_lock();
 155        ns = task_active_pid_ns(rcu_dereference(current->parent));
 156        if (ns)
 157                message = pid_nr_ns(pid, ns);
 158        rcu_read_unlock();
 159
 160        ptrace_event(event, message);
 161}
 162
 163/**
 164 * ptrace_init_task - initialize ptrace state for a new child
 165 * @child:              new child task
 166 * @ptrace:             true if child should be ptrace'd by parent's tracer
 167 *
 168 * This is called immediately after adding @child to its parent's children
 169 * list.  @ptrace is false in the normal case, and true to ptrace @child.
 170 *
 171 * Called with current's siglock and write_lock_irq(&tasklist_lock) held.
 172 */
 173static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
 174{
 175        INIT_LIST_HEAD(&child->ptrace_entry);
 176        INIT_LIST_HEAD(&child->ptraced);
 177        child->jobctl = 0;
 178        child->ptrace = 0;
 179        child->parent = child->real_parent;
 180
 181        if (unlikely(ptrace) && current->ptrace) {
 182                child->ptrace = current->ptrace;
 183                __ptrace_link(child, current->parent);
 184
 185                if (child->ptrace & PT_SEIZED)
 186                        task_set_jobctl_pending(child, JOBCTL_TRAP_STOP);
 187                else
 188                        sigaddset(&child->pending.signal, SIGSTOP);
 189
 190                set_tsk_thread_flag(child, TIF_SIGPENDING);
 191        }
 192}
 193
 194/**
 195 * ptrace_release_task - final ptrace-related cleanup of a zombie being reaped
 196 * @task:       task in %EXIT_DEAD state
 197 *
 198 * Called with write_lock(&tasklist_lock) held.
 199 */
 200static inline void ptrace_release_task(struct task_struct *task)
 201{
 202        BUG_ON(!list_empty(&task->ptraced));
 203        ptrace_unlink(task);
 204        BUG_ON(!list_empty(&task->ptrace_entry));
 205}
 206
 207#ifndef force_successful_syscall_return
 208/*
 209 * System call handlers that, upon successful completion, need to return a
 210 * negative value should call force_successful_syscall_return() right before
 211 * returning.  On architectures where the syscall convention provides for a
 212 * separate error flag (e.g., alpha, ia64, ppc{,64}, sparc{,64}, possibly
 213 * others), this macro can be used to ensure that the error flag will not get
 214 * set.  On architectures which do not support a separate error flag, the macro
 215 * is a no-op and the spurious error condition needs to be filtered out by some
 216 * other means (e.g., in user-level, by passing an extra argument to the
 217 * syscall handler, or something along those lines).
 218 */
 219#define force_successful_syscall_return() do { } while (0)
 220#endif
 221
 222#ifndef is_syscall_success
 223/*
 224 * On most systems we can tell if a syscall is a success based on if the retval
 225 * is an error value.  On some systems like ia64 and powerpc they have different
 226 * indicators of success/failure and must define their own.
 227 */
 228#define is_syscall_success(regs) (!IS_ERR_VALUE((unsigned long)(regs_return_value(regs))))
 229#endif
 230
 231/*
 232 * <asm/ptrace.h> should define the following things inside #ifdef __KERNEL__.
 233 *
 234 * These do-nothing inlines are used when the arch does not
 235 * implement single-step.  The kerneldoc comments are here
 236 * to document the interface for all arch definitions.
 237 */
 238
 239#ifndef arch_has_single_step
 240/**
 241 * arch_has_single_step - does this CPU support user-mode single-step?
 242 *
 243 * If this is defined, then there must be function declarations or
 244 * inlines for user_enable_single_step() and user_disable_single_step().
 245 * arch_has_single_step() should evaluate to nonzero iff the machine
 246 * supports instruction single-step for user mode.
 247 * It can be a constant or it can test a CPU feature bit.
 248 */
 249#define arch_has_single_step()          (0)
 250
 251/**
 252 * user_enable_single_step - single-step in user-mode task
 253 * @task: either current or a task stopped in %TASK_TRACED
 254 *
 255 * This can only be called when arch_has_single_step() has returned nonzero.
 256 * Set @task so that when it returns to user mode, it will trap after the
 257 * next single instruction executes.  If arch_has_block_step() is defined,
 258 * this must clear the effects of user_enable_block_step() too.
 259 */
 260static inline void user_enable_single_step(struct task_struct *task)
 261{
 262        BUG();                  /* This can never be called.  */
 263}
 264
 265/**
 266 * user_disable_single_step - cancel user-mode single-step
 267 * @task: either current or a task stopped in %TASK_TRACED
 268 *
 269 * Clear @task of the effects of user_enable_single_step() and
 270 * user_enable_block_step().  This can be called whether or not either
 271 * of those was ever called on @task, and even if arch_has_single_step()
 272 * returned zero.
 273 */
 274static inline void user_disable_single_step(struct task_struct *task)
 275{
 276}
 277#else
 278extern void user_enable_single_step(struct task_struct *);
 279extern void user_disable_single_step(struct task_struct *);
 280#endif  /* arch_has_single_step */
 281
 282#ifndef arch_has_block_step
 283/**
 284 * arch_has_block_step - does this CPU support user-mode block-step?
 285 *
 286 * If this is defined, then there must be a function declaration or inline
 287 * for user_enable_block_step(), and arch_has_single_step() must be defined
 288 * too.  arch_has_block_step() should evaluate to nonzero iff the machine
 289 * supports step-until-branch for user mode.  It can be a constant or it
 290 * can test a CPU feature bit.
 291 */
 292#define arch_has_block_step()           (0)
 293
 294/**
 295 * user_enable_block_step - step until branch in user-mode task
 296 * @task: either current or a task stopped in %TASK_TRACED
 297 *
 298 * This can only be called when arch_has_block_step() has returned nonzero,
 299 * and will never be called when single-instruction stepping is being used.
 300 * Set @task so that when it returns to user mode, it will trap after the
 301 * next branch or trap taken.
 302 */
 303static inline void user_enable_block_step(struct task_struct *task)
 304{
 305        BUG();                  /* This can never be called.  */
 306}
 307#else
 308extern void user_enable_block_step(struct task_struct *);
 309#endif  /* arch_has_block_step */
 310
 311#ifdef ARCH_HAS_USER_SINGLE_STEP_INFO
 312extern void user_single_step_siginfo(struct task_struct *tsk,
 313                                struct pt_regs *regs, siginfo_t *info);
 314#else
 315static inline void user_single_step_siginfo(struct task_struct *tsk,
 316                                struct pt_regs *regs, siginfo_t *info)
 317{
 318        memset(info, 0, sizeof(*info));
 319        info->si_signo = SIGTRAP;
 320}
 321#endif
 322
 323#ifndef arch_ptrace_stop_needed
 324/**
 325 * arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called
 326 * @code:       current->exit_code value ptrace will stop with
 327 * @info:       siginfo_t pointer (or %NULL) for signal ptrace will stop with
 328 *
 329 * This is called with the siglock held, to decide whether or not it's
 330 * necessary to release the siglock and call arch_ptrace_stop() with the
 331 * same @code and @info arguments.  It can be defined to a constant if
 332 * arch_ptrace_stop() is never required, or always is.  On machines where
 333 * this makes sense, it should be defined to a quick test to optimize out
 334 * calling arch_ptrace_stop() when it would be superfluous.  For example,
 335 * if the thread has not been back to user mode since the last stop, the
 336 * thread state might indicate that nothing needs to be done.
 337 *
 338 * This is guaranteed to be invoked once before a task stops for ptrace and
 339 * may include arch-specific operations necessary prior to a ptrace stop.
 340 */
 341#define arch_ptrace_stop_needed(code, info)     (0)
 342#endif
 343
 344#ifndef arch_ptrace_stop
 345/**
 346 * arch_ptrace_stop - Do machine-specific work before stopping for ptrace
 347 * @code:       current->exit_code value ptrace will stop with
 348 * @info:       siginfo_t pointer (or %NULL) for signal ptrace will stop with
 349 *
 350 * This is called with no locks held when arch_ptrace_stop_needed() has
 351 * just returned nonzero.  It is allowed to block, e.g. for user memory
 352 * access.  The arch can have machine-specific work to be done before
 353 * ptrace stops.  On ia64, register backing store gets written back to user
 354 * memory here.  Since this can be costly (requires dropping the siglock),
 355 * we only do it when the arch requires it for this particular stop, as
 356 * indicated by arch_ptrace_stop_needed().
 357 */
 358#define arch_ptrace_stop(code, info)            do { } while (0)
 359#endif
 360
 361#ifndef current_pt_regs
 362#define current_pt_regs() task_pt_regs(current)
 363#endif
 364
 365#ifndef ptrace_signal_deliver
 366#define ptrace_signal_deliver() ((void)0)
 367#endif
 368
 369/*
 370 * unlike current_pt_regs(), this one is equal to task_pt_regs(current)
 371 * on *all* architectures; the only reason to have a per-arch definition
 372 * is optimisation.
 373 */
 374#ifndef signal_pt_regs
 375#define signal_pt_regs() task_pt_regs(current)
 376#endif
 377
 378#ifndef current_user_stack_pointer
 379#define current_user_stack_pointer() user_stack_pointer(current_pt_regs())
 380#endif
 381
 382extern int task_current_syscall(struct task_struct *target, long *callno,
 383                                unsigned long args[6], unsigned int maxargs,
 384                                unsigned long *sp, unsigned long *pc);
 385
 386#endif
 387