1/* 2 * Tracing hooks 3 * 4 * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved. 5 * 6 * This copyrighted material is made available to anyone wishing to use, 7 * modify, copy, or redistribute it subject to the terms and conditions 8 * of the GNU General Public License v.2. 9 * 10 * This file defines hook entry points called by core code where 11 * user tracing/debugging support might need to do something. These 12 * entry points are called tracehook_*(). Each hook declared below 13 * has a detailed kerneldoc comment giving the context (locking et 14 * al) from which it is called, and the meaning of its return value. 15 * 16 * Each function here typically has only one call site, so it is ok 17 * to have some nontrivial tracehook_*() inlines. In all cases, the 18 * fast path when no tracing is enabled should be very short. 19 * 20 * The purpose of this file and the tracehook_* layer is to consolidate 21 * the interface that the kernel core and arch code uses to enable any 22 * user debugging or tracing facility (such as ptrace). The interfaces 23 * here are carefully documented so that maintainers of core and arch 24 * code do not need to think about the implementation details of the 25 * tracing facilities. Likewise, maintainers of the tracing code do not 26 * need to understand all the calling core or arch code in detail, just 27 * documented circumstances of each call, such as locking conditions. 28 * 29 * If the calling core code changes so that locking is different, then 30 * it is ok to change the interface documented here. The maintainer of 31 * core code changing should notify the maintainers of the tracing code 32 * that they need to work out the change. 33 * 34 * Some tracehook_*() inlines take arguments that the current tracing 35 * implementations might not necessarily use. These function signatures 36 * are chosen to pass in all the information that is on hand in the 37 * caller and might conceivably be relevant to a tracer, so that the 38 * core code won't have to be updated when tracing adds more features. 39 * If a call site changes so that some of those parameters are no longer 40 * already on hand without extra work, then the tracehook_* interface 41 * can change so there is no make-work burden on the core code. The 42 * maintainer of core code changing should notify the maintainers of the 43 * tracing code that they need to work out the change. 44 */ 45 46#ifndef _LINUX_TRACEHOOK_H 47#define _LINUX_TRACEHOOK_H 1 48 49#include <linux/sched.h> 50#include <linux/ptrace.h> 51#include <linux/security.h> 52struct linux_binprm; 53 54/** 55 * tracehook_expect_breakpoints - guess if task memory might be touched 56 * @task: current task, making a new mapping 57 * 58 * Return nonzero if @task is expected to want breakpoint insertion in 59 * its memory at some point. A zero return is no guarantee it won't 60 * be done, but this is a hint that it's known to be likely. 61 * 62 * May be called with @task->mm->mmap_sem held for writing. 63 */ 64static inline int tracehook_expect_breakpoints(struct task_struct *task) 65{ 66 return (task_ptrace(task) & PT_PTRACED) != 0; 67} 68 69/* 70 * ptrace report for syscall entry and exit looks identical. 71 */ 72static inline void ptrace_report_syscall(struct pt_regs *regs) 73{ 74 int ptrace = task_ptrace(current); 75 76 if (!(ptrace & PT_PTRACED)) 77 return; 78 79 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); 80 81 /* 82 * this isn't the same as continuing with a signal, but it will do 83 * for normal use. strace only continues with a signal if the 84 * stopping signal is not SIGTRAP. -brl 85 */ 86 if (current->exit_code) { 87 send_sig(current->exit_code, current, 1); 88 current->exit_code = 0; 89 } 90} 91 92/** 93 * tracehook_report_syscall_entry - task is about to attempt a system call 94 * @regs: user register state of current task 95 * 96 * This will be called if %TIF_SYSCALL_TRACE has been set, when the 97 * current task has just entered the kernel for a system call. 98 * Full user register state is available here. Changing the values 99 * in @regs can affect the system call number and arguments to be tried. 100 * It is safe to block here, preventing the system call from beginning. 101 * 102 * Returns zero normally, or nonzero if the calling arch code should abort 103 * the system call. That must prevent normal entry so no system call is 104 * made. If @task ever returns to user mode after this, its register state 105 * is unspecified, but should be something harmless like an %ENOSYS error 106 * return. It should preserve enough information so that syscall_rollback() 107 * can work (see asm-generic/syscall.h). 108 * 109 * Called without locks, just after entering kernel mode. 110 */ 111static inline __must_check int tracehook_report_syscall_entry( 112 struct pt_regs *regs) 113{ 114 ptrace_report_syscall(regs); 115 return 0; 116} 117 118/** 119 * tracehook_report_syscall_exit - task has just finished a system call 120 * @regs: user register state of current task 121 * @step: nonzero if simulating single-step or block-step 122 * 123 * This will be called if %TIF_SYSCALL_TRACE has been set, when the 124 * current task has just finished an attempted system call. Full 125 * user register state is available here. It is safe to block here, 126 * preventing signals from being processed. 127 * 128 * If @step is nonzero, this report is also in lieu of the normal 129 * trap that would follow the system call instruction because 130 * user_enable_block_step() or user_enable_single_step() was used. 131 * In this case, %TIF_SYSCALL_TRACE might not be set. 132 * 133 * Called without locks, just before checking for pending signals. 134 */ 135static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step) 136{ 137 ptrace_report_syscall(regs); 138} 139 140/** 141 * tracehook_unsafe_exec - check for exec declared unsafe due to tracing 142 * @task: current task doing exec 143 * 144 * Return %LSM_UNSAFE_* bits applied to an exec because of tracing. 145 * 146 * @task->cred_guard_mutex is held by the caller through the do_execve(). 147 */ 148static inline int tracehook_unsafe_exec(struct task_struct *task) 149{ 150 int unsafe = 0; 151 int ptrace = task_ptrace(task); 152 if (ptrace & PT_PTRACED) { 153 if (ptrace & PT_PTRACE_CAP) 154 unsafe |= LSM_UNSAFE_PTRACE_CAP; 155 else 156 unsafe |= LSM_UNSAFE_PTRACE; 157 } 158 return unsafe; 159} 160 161/** 162 * tracehook_tracer_task - return the task that is tracing the given task 163 * @tsk: task to consider 164 * 165 * Returns NULL if noone is tracing @task, or the &struct task_struct 166 * pointer to its tracer. 167 * 168 * Must called under rcu_read_lock(). The pointer returned might be kept 169 * live only by RCU. During exec, this may be called with task_lock() 170 * held on @task, still held from when tracehook_unsafe_exec() was called. 171 */ 172static inline struct task_struct *tracehook_tracer_task(struct task_struct *tsk) 173{ 174 if (task_ptrace(tsk) & PT_PTRACED) 175 return rcu_dereference(tsk->parent); 176 return NULL; 177} 178 179/** 180 * tracehook_report_exec - a successful exec was completed 181 * @fmt: &struct linux_binfmt that performed the exec 182 * @bprm: &struct linux_binprm containing exec details 183 * @regs: user-mode register state 184 * 185 * An exec just completed, we are shortly going to return to user mode. 186 * The freshly initialized register state can be seen and changed in @regs. 187 * The name, file and other pointers in @bprm are still on hand to be 188 * inspected, but will be freed as soon as this returns. 189 * 190 * Called with no locks, but with some kernel resources held live 191 * and a reference on @fmt->module. 192 */ 193static inline void tracehook_report_exec(struct linux_binfmt *fmt, 194 struct linux_binprm *bprm, 195 struct pt_regs *regs) 196{ 197 if (!ptrace_event(PT_TRACE_EXEC, PTRACE_EVENT_EXEC, 0) && 198 unlikely(task_ptrace(current) & PT_PTRACED)) 199 send_sig(SIGTRAP, current, 0); 200} 201 202/** 203 * tracehook_report_exit - task has begun to exit 204 * @exit_code: pointer to value destined for @current->exit_code 205 * 206 * @exit_code points to the value passed to do_exit(), which tracing 207 * might change here. This is almost the first thing in do_exit(), 208 * before freeing any resources or setting the %PF_EXITING flag. 209 * 210 * Called with no locks held. 211 */ 212static inline void tracehook_report_exit(long *exit_code) 213{ 214 ptrace_event(PT_TRACE_EXIT, PTRACE_EVENT_EXIT, *exit_code); 215} 216 217/** 218 * tracehook_prepare_clone - prepare for new child to be cloned 219 * @clone_flags: %CLONE_* flags from clone/fork/vfork system call 220 * 221 * This is called before a new user task is to be cloned. 222 * Its return value will be passed to tracehook_finish_clone(). 223 * 224 * Called with no locks held. 225 */ 226static inline int tracehook_prepare_clone(unsigned clone_flags) 227{ 228 if (clone_flags & CLONE_UNTRACED) 229 return 0; 230 231 if (clone_flags & CLONE_VFORK) { 232 if (current->ptrace & PT_TRACE_VFORK) 233 return PTRACE_EVENT_VFORK; 234 } else if ((clone_flags & CSIGNAL) != SIGCHLD) { 235 if (current->ptrace & PT_TRACE_CLONE) 236 return PTRACE_EVENT_CLONE; 237 } else if (current->ptrace & PT_TRACE_FORK) 238 return PTRACE_EVENT_FORK; 239 240 return 0; 241} 242 243/** 244 * tracehook_finish_clone - new child created and being attached 245 * @child: new child task 246 * @clone_flags: %CLONE_* flags from clone/fork/vfork system call 247 * @trace: return value from tracehook_prepare_clone() 248 * 249 * This is called immediately after adding @child to its parent's children list. 250 * The @trace value is that returned by tracehook_prepare_clone(). 251 * 252 * Called with current's siglock and write_lock_irq(&tasklist_lock) held. 253 */ 254static inline void tracehook_finish_clone(struct task_struct *child, 255 unsigned long clone_flags, int trace) 256{ 257 ptrace_init_task(child, (clone_flags & CLONE_PTRACE) || trace); 258} 259 260/** 261 * tracehook_report_clone - in parent, new child is about to start running 262 * @regs: parent's user register state 263 * @clone_flags: flags from parent's system call 264 * @pid: new child's PID in the parent's namespace 265 * @child: new child task 266 * 267 * Called after a child is set up, but before it has been started running. 268 * This is not a good place to block, because the child has not started 269 * yet. Suspend the child here if desired, and then block in 270 * tracehook_report_clone_complete(). This must prevent the child from 271 * self-reaping if tracehook_report_clone_complete() uses the @child 272 * pointer; otherwise it might have died and been released by the time 273 * tracehook_report_clone_complete() is called. 274 * 275 * Called with no locks held, but the child cannot run until this returns. 276 */ 277static inline void tracehook_report_clone(struct pt_regs *regs, 278 unsigned long clone_flags, 279 pid_t pid, struct task_struct *child) 280{ 281 if (unlikely(task_ptrace(child))) { 282 /* 283 * It doesn't matter who attached/attaching to this 284 * task, the pending SIGSTOP is right in any case. 285 */ 286 sigaddset(&child->pending.signal, SIGSTOP); 287 set_tsk_thread_flag(child, TIF_SIGPENDING); 288 } 289} 290 291/** 292 * tracehook_report_clone_complete - new child is running 293 * @trace: return value from tracehook_prepare_clone() 294 * @regs: parent's user register state 295 * @clone_flags: flags from parent's system call 296 * @pid: new child's PID in the parent's namespace 297 * @child: child task, already running 298 * 299 * This is called just after the child has started running. This is 300 * just before the clone/fork syscall returns, or blocks for vfork 301 * child completion if @clone_flags has the %CLONE_VFORK bit set. 302 * The @child pointer may be invalid if a self-reaping child died and 303 * tracehook_report_clone() took no action to prevent it from self-reaping. 304 * 305 * Called with no locks held. 306 */ 307static inline void tracehook_report_clone_complete(int trace, 308 struct pt_regs *regs, 309 unsigned long clone_flags, 310 pid_t pid, 311 struct task_struct *child) 312{ 313 if (unlikely(trace)) 314 ptrace_event(0, trace, pid); 315} 316 317/** 318 * tracehook_report_vfork_done - vfork parent's child has exited or exec'd 319 * @child: child task, already running 320 * @pid: new child's PID in the parent's namespace 321 * 322 * Called after a %CLONE_VFORK parent has waited for the child to complete. 323 * The clone/vfork system call will return immediately after this. 324 * The @child pointer may be invalid if a self-reaping child died and 325 * tracehook_report_clone() took no action to prevent it from self-reaping. 326 * 327 * Called with no locks held. 328 */ 329static inline void tracehook_report_vfork_done(struct task_struct *child, 330 pid_t pid) 331{ 332 ptrace_event(PT_TRACE_VFORK_DONE, PTRACE_EVENT_VFORK_DONE, pid); 333} 334 335/** 336 * tracehook_prepare_release_task - task is being reaped, clean up tracing 337 * @task: task in %EXIT_DEAD state 338 * 339 * This is called in release_task() just before @task gets finally reaped 340 * and freed. This would be the ideal place to remove and clean up any 341 * tracing-related state for @task. 342 * 343 * Called with no locks held. 344 */ 345static inline void tracehook_prepare_release_task(struct task_struct *task) 346{ 347} 348 349/** 350 * tracehook_finish_release_task - final tracing clean-up 351 * @task: task in %EXIT_DEAD state 352 * 353 * This is called in release_task() when @task is being in the middle of 354 * being reaped. After this, there must be no tracing entanglements. 355 * 356 * Called with write_lock_irq(&tasklist_lock) held. 357 */ 358static inline void tracehook_finish_release_task(struct task_struct *task) 359{ 360 ptrace_release_task(task); 361} 362 363/** 364 * tracehook_signal_handler - signal handler setup is complete 365 * @sig: number of signal being delivered 366 * @info: siginfo_t of signal being delivered 367 * @ka: sigaction setting that chose the handler 368 * @regs: user register state 369 * @stepping: nonzero if debugger single-step or block-step in use 370 * 371 * Called by the arch code after a signal handler has been set up. 372 * Register and stack state reflects the user handler about to run. 373 * Signal mask changes have already been made. 374 * 375 * Called without locks, shortly before returning to user mode 376 * (or handling more signals). 377 */ 378static inline void tracehook_signal_handler(int sig, siginfo_t *info, 379 const struct k_sigaction *ka, 380 struct pt_regs *regs, int stepping) 381{ 382 if (stepping) 383 ptrace_notify(SIGTRAP); 384} 385 386/** 387 * tracehook_consider_ignored_signal - suppress short-circuit of ignored signal 388 * @task: task receiving the signal 389 * @sig: signal number being sent 390 * 391 * Return zero iff tracing doesn't care to examine this ignored signal, 392 * so it can short-circuit normal delivery and never even get queued. 393 * 394 * Called with @task->sighand->siglock held. 395 */ 396static inline int tracehook_consider_ignored_signal(struct task_struct *task, 397 int sig) 398{ 399 return (task_ptrace(task) & PT_PTRACED) != 0; 400} 401 402/** 403 * tracehook_consider_fatal_signal - suppress special handling of fatal signal 404 * @task: task receiving the signal 405 * @sig: signal number being sent 406 * 407 * Return nonzero to prevent special handling of this termination signal. 408 * Normally handler for signal is %SIG_DFL. It can be %SIG_IGN if @sig is 409 * ignored, in which case force_sig() is about to reset it to %SIG_DFL. 410 * When this returns zero, this signal might cause a quick termination 411 * that does not give the debugger a chance to intercept the signal. 412 * 413 * Called with or without @task->sighand->siglock held. 414 */ 415static inline int tracehook_consider_fatal_signal(struct task_struct *task, 416 int sig) 417{ 418 return (task_ptrace(task) & PT_PTRACED) != 0; 419} 420 421/** 422 * tracehook_force_sigpending - let tracing force signal_pending(current) on 423 * 424 * Called when recomputing our signal_pending() flag. Return nonzero 425 * to force the signal_pending() flag on, so that tracehook_get_signal() 426 * will be called before the next return to user mode. 427 * 428 * Called with @current->sighand->siglock held. 429 */ 430static inline int tracehook_force_sigpending(void) 431{ 432 return 0; 433} 434 435/** 436 * tracehook_get_signal - deliver synthetic signal to traced task 437 * @task: @current 438 * @regs: task_pt_regs(@current) 439 * @info: details of synthetic signal 440 * @return_ka: sigaction for synthetic signal 441 * 442 * Return zero to check for a real pending signal normally. 443 * Return -1 after releasing the siglock to repeat the check. 444 * Return a signal number to induce an artifical signal delivery, 445 * setting *@info and *@return_ka to specify its details and behavior. 446 * 447 * The @return_ka->sa_handler value controls the disposition of the 448 * signal, no matter the signal number. For %SIG_DFL, the return value 449 * is a representative signal to indicate the behavior (e.g. %SIGTERM 450 * for death, %SIGQUIT for core dump, %SIGSTOP for job control stop, 451 * %SIGTSTP for stop unless in an orphaned pgrp), but the signal number 452 * reported will be @info->si_signo instead. 453 * 454 * Called with @task->sighand->siglock held, before dequeuing pending signals. 455 */ 456static inline int tracehook_get_signal(struct task_struct *task, 457 struct pt_regs *regs, 458 siginfo_t *info, 459 struct k_sigaction *return_ka) 460{ 461 return 0; 462} 463 464/** 465 * tracehook_notify_jctl - report about job control stop/continue 466 * @notify: zero, %CLD_STOPPED or %CLD_CONTINUED 467 * @why: %CLD_STOPPED or %CLD_CONTINUED 468 * 469 * This is called when we might call do_notify_parent_cldstop(). 470 * 471 * @notify is zero if we would not ordinarily send a %SIGCHLD, 472 * or is the %CLD_STOPPED or %CLD_CONTINUED .si_code for %SIGCHLD. 473 * 474 * @why is %CLD_STOPPED when about to stop for job control; 475 * we are already in %TASK_STOPPED state, about to call schedule(). 476 * It might also be that we have just exited (check %PF_EXITING), 477 * but need to report that a group-wide stop is complete. 478 * 479 * @why is %CLD_CONTINUED when waking up after job control stop and 480 * ready to make a delayed @notify report. 481 * 482 * Return the %CLD_* value for %SIGCHLD, or zero to generate no signal. 483 * 484 * Called with the siglock held. 485 */ 486static inline int tracehook_notify_jctl(int notify, int why) 487{ 488 return notify ?: (current->ptrace & PT_PTRACED) ? why : 0; 489} 490 491/** 492 * tracehook_finish_jctl - report about return from job control stop 493 * 494 * This is called by do_signal_stop() after wakeup. 495 */ 496static inline void tracehook_finish_jctl(void) 497{ 498} 499 500#define DEATH_REAP -1 501#define DEATH_DELAYED_GROUP_LEADER -2 502 503/** 504 * tracehook_notify_death - task is dead, ready to notify parent 505 * @task: @current task now exiting 506 * @death_cookie: value to pass to tracehook_report_death() 507 * @group_dead: nonzero if this was the last thread in the group to die 508 * 509 * A return value >= 0 means call do_notify_parent() with that signal 510 * number. Negative return value can be %DEATH_REAP to self-reap right 511 * now, or %DEATH_DELAYED_GROUP_LEADER to a zombie without notifying our 512 * parent. Note that a return value of 0 means a do_notify_parent() call 513 * that sends no signal, but still wakes up a parent blocked in wait*(). 514 * 515 * Called with write_lock_irq(&tasklist_lock) held. 516 */ 517static inline int tracehook_notify_death(struct task_struct *task, 518 void **death_cookie, int group_dead) 519{ 520 if (task_detached(task)) 521 return task->ptrace ? SIGCHLD : DEATH_REAP; 522 523 /* 524 * If something other than our normal parent is ptracing us, then 525 * send it a SIGCHLD instead of honoring exit_signal. exit_signal 526 * only has special meaning to our real parent. 527 */ 528 if (thread_group_empty(task) && !ptrace_reparented(task)) 529 return task->exit_signal; 530 531 return task->ptrace ? SIGCHLD : DEATH_DELAYED_GROUP_LEADER; 532} 533 534/** 535 * tracehook_report_death - task is dead and ready to be reaped 536 * @task: @current task now exiting 537 * @signal: return value from tracheook_notify_death() 538 * @death_cookie: value passed back from tracehook_notify_death() 539 * @group_dead: nonzero if this was the last thread in the group to die 540 * 541 * Thread has just become a zombie or is about to self-reap. If positive, 542 * @signal is the signal number just sent to the parent (usually %SIGCHLD). 543 * If @signal is %DEATH_REAP, this thread will self-reap. If @signal is 544 * %DEATH_DELAYED_GROUP_LEADER, this is a delayed_group_leader() zombie. 545 * The @death_cookie was passed back by tracehook_notify_death(). 546 * 547 * If normal reaping is not inhibited, @task->exit_state might be changing 548 * in parallel. 549 * 550 * Called without locks. 551 */ 552static inline void tracehook_report_death(struct task_struct *task, 553 int signal, void *death_cookie, 554 int group_dead) 555{ 556} 557 558#ifdef TIF_NOTIFY_RESUME 559/** 560 * set_notify_resume - cause tracehook_notify_resume() to be called 561 * @task: task that will call tracehook_notify_resume() 562 * 563 * Calling this arranges that @task will call tracehook_notify_resume() 564 * before returning to user mode. If it's already running in user mode, 565 * it will enter the kernel and call tracehook_notify_resume() soon. 566 * If it's blocked, it will not be woken. 567 */ 568static inline void set_notify_resume(struct task_struct *task) 569{ 570 if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_RESUME)) 571 kick_process(task); 572} 573 574/** 575 * tracehook_notify_resume - report when about to return to user mode 576 * @regs: user-mode registers of @current task 577 * 578 * This is called when %TIF_NOTIFY_RESUME has been set. Now we are 579 * about to return to user mode, and the user state in @regs can be 580 * inspected or adjusted. The caller in arch code has cleared 581 * %TIF_NOTIFY_RESUME before the call. If the flag gets set again 582 * asynchronously, this will be called again before we return to 583 * user mode. 584 * 585 * Called without locks. 586 */ 587static inline void tracehook_notify_resume(struct pt_regs *regs) 588{ 589} 590#endif /* TIF_NOTIFY_RESUME */ 591 592#endif /* <linux/tracehook.h> */ 593