linux/kernel/hung_task.c
<<
>>
Prefs
   1/*
   2 * Detect Hung Task
   3 *
   4 * kernel/hung_task.c - kernel thread for detecting tasks stuck in D state
   5 *
   6 */
   7
   8#include <linux/mm.h>
   9#include <linux/cpu.h>
  10#include <linux/nmi.h>
  11#include <linux/init.h>
  12#include <linux/delay.h>
  13#include <linux/freezer.h>
  14#include <linux/kthread.h>
  15#include <linux/lockdep.h>
  16#include <linux/export.h>
  17#include <linux/sysctl.h>
  18
  19/*
  20 * The number of tasks checked:
  21 */
  22unsigned long __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;
  23
  24/*
  25 * Limit number of tasks checked in a batch.
  26 *
  27 * This value controls the preemptibility of khungtaskd since preemption
  28 * is disabled during the critical section. It also controls the size of
  29 * the RCU grace period. So it needs to be upper-bound.
  30 */
  31#define HUNG_TASK_BATCHING 1024
  32
  33/*
  34 * Zero means infinite timeout - no checking done:
  35 */
  36unsigned long __read_mostly sysctl_hung_task_timeout_secs = CONFIG_DEFAULT_HUNG_TASK_TIMEOUT;
  37
  38unsigned long __read_mostly sysctl_hung_task_warnings = 10;
  39
  40static int __read_mostly did_panic;
  41
  42static struct task_struct *watchdog_task;
  43
  44/*
  45 * Should we panic (and reboot, if panic_timeout= is set) when a
  46 * hung task is detected:
  47 */
  48unsigned int __read_mostly sysctl_hung_task_panic =
  49                                CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE;
  50
  51static int __init hung_task_panic_setup(char *str)
  52{
  53        sysctl_hung_task_panic = simple_strtoul(str, NULL, 0);
  54
  55        return 1;
  56}
  57__setup("hung_task_panic=", hung_task_panic_setup);
  58
  59static int
  60hung_task_panic(struct notifier_block *this, unsigned long event, void *ptr)
  61{
  62        did_panic = 1;
  63
  64        return NOTIFY_DONE;
  65}
  66
  67static struct notifier_block panic_block = {
  68        .notifier_call = hung_task_panic,
  69};
  70
  71static void check_hung_task(struct task_struct *t, unsigned long timeout)
  72{
  73        unsigned long switch_count = t->nvcsw + t->nivcsw;
  74
  75        /*
  76         * Ensure the task is not frozen.
  77         * Also, skip vfork and any other user process that freezer should skip.
  78         */
  79        if (unlikely(t->flags & (PF_FROZEN | PF_FREEZER_SKIP)))
  80            return;
  81
  82        /*
  83         * When a freshly created task is scheduled once, changes its state to
  84         * TASK_UNINTERRUPTIBLE without having ever been switched out once, it
  85         * musn't be checked.
  86         */
  87        if (unlikely(!switch_count))
  88                return;
  89
  90        if (switch_count != t->last_switch_count) {
  91                t->last_switch_count = switch_count;
  92                return;
  93        }
  94        if (!sysctl_hung_task_warnings)
  95                return;
  96        sysctl_hung_task_warnings--;
  97
  98        /*
  99         * Ok, the task did not get scheduled for more than 2 minutes,
 100         * complain:
 101         */
 102        printk(KERN_ERR "INFO: task %s:%d blocked for more than "
 103                        "%ld seconds.\n", t->comm, t->pid, timeout);
 104        printk(KERN_ERR "\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
 105                        " disables this message.\n");
 106        sched_show_task(t);
 107        debug_show_held_locks(t);
 108
 109        touch_nmi_watchdog();
 110
 111        if (sysctl_hung_task_panic)
 112                panic("hung_task: blocked tasks");
 113}
 114
 115/*
 116 * To avoid extending the RCU grace period for an unbounded amount of time,
 117 * periodically exit the critical section and enter a new one.
 118 *
 119 * For preemptible RCU it is sufficient to call rcu_read_unlock in order
 120 * to exit the grace period. For classic RCU, a reschedule is required.
 121 */
 122static bool rcu_lock_break(struct task_struct *g, struct task_struct *t)
 123{
 124        bool can_cont;
 125
 126        get_task_struct(g);
 127        get_task_struct(t);
 128        rcu_read_unlock();
 129        cond_resched();
 130        rcu_read_lock();
 131        can_cont = pid_alive(g) && pid_alive(t);
 132        put_task_struct(t);
 133        put_task_struct(g);
 134
 135        return can_cont;
 136}
 137
 138/*
 139 * Check whether a TASK_UNINTERRUPTIBLE does not get woken up for
 140 * a really long time (120 seconds). If that happens, print out
 141 * a warning.
 142 */
 143static void check_hung_uninterruptible_tasks(unsigned long timeout)
 144{
 145        int max_count = sysctl_hung_task_check_count;
 146        int batch_count = HUNG_TASK_BATCHING;
 147        struct task_struct *g, *t;
 148
 149        /*
 150         * If the system crashed already then all bets are off,
 151         * do not report extra hung tasks:
 152         */
 153        if (test_taint(TAINT_DIE) || did_panic)
 154                return;
 155
 156        rcu_read_lock();
 157        do_each_thread(g, t) {
 158                if (!max_count--)
 159                        goto unlock;
 160                if (!--batch_count) {
 161                        batch_count = HUNG_TASK_BATCHING;
 162                        if (!rcu_lock_break(g, t))
 163                                goto unlock;
 164                }
 165                /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */
 166                if (t->state == TASK_UNINTERRUPTIBLE)
 167                        check_hung_task(t, timeout);
 168        } while_each_thread(g, t);
 169 unlock:
 170        rcu_read_unlock();
 171}
 172
 173static unsigned long timeout_jiffies(unsigned long timeout)
 174{
 175        /* timeout of 0 will disable the watchdog */
 176        return timeout ? timeout * HZ : MAX_SCHEDULE_TIMEOUT;
 177}
 178
 179/*
 180 * Process updating of timeout sysctl
 181 */
 182int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
 183                                  void __user *buffer,
 184                                  size_t *lenp, loff_t *ppos)
 185{
 186        int ret;
 187
 188        ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 189
 190        if (ret || !write)
 191                goto out;
 192
 193        wake_up_process(watchdog_task);
 194
 195 out:
 196        return ret;
 197}
 198
 199/*
 200 * kthread which checks for tasks stuck in D state
 201 */
 202static int watchdog(void *dummy)
 203{
 204        set_user_nice(current, 0);
 205
 206        for ( ; ; ) {
 207                unsigned long timeout = sysctl_hung_task_timeout_secs;
 208
 209                while (schedule_timeout_interruptible(timeout_jiffies(timeout)))
 210                        timeout = sysctl_hung_task_timeout_secs;
 211
 212                check_hung_uninterruptible_tasks(timeout);
 213        }
 214
 215        return 0;
 216}
 217
 218static int __init hung_task_init(void)
 219{
 220        atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
 221        watchdog_task = kthread_run(watchdog, NULL, "khungtaskd");
 222
 223        return 0;
 224}
 225
 226module_init(hung_task_init);
 227