1
2
3
4
5#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/delay.h>
10#include <linux/sched.h>
11#include <linux/sched/signal.h>
12#include <linux/sched/clock.h>
13#include <linux/cpu.h>
14#include <linux/nmi.h>
15#include <linux/mm.h>
16#include <linux/uaccess.h>
17#include <linux/file.h>
18
19static unsigned int time_secs;
20module_param(time_secs, uint, 0600);
21MODULE_PARM_DESC(time_secs, "lockup time in seconds, default 0");
22
23static unsigned int time_nsecs;
24module_param(time_nsecs, uint, 0600);
25MODULE_PARM_DESC(time_nsecs, "nanoseconds part of lockup time, default 0");
26
27static unsigned int cooldown_secs;
28module_param(cooldown_secs, uint, 0600);
29MODULE_PARM_DESC(cooldown_secs, "cooldown time between iterations in seconds, default 0");
30
31static unsigned int cooldown_nsecs;
32module_param(cooldown_nsecs, uint, 0600);
33MODULE_PARM_DESC(cooldown_nsecs, "nanoseconds part of cooldown, default 0");
34
35static unsigned int iterations = 1;
36module_param(iterations, uint, 0600);
37MODULE_PARM_DESC(iterations, "lockup iterations, default 1");
38
39static bool all_cpus;
40module_param(all_cpus, bool, 0400);
41MODULE_PARM_DESC(all_cpus, "trigger lockup at all cpus at once");
42
43static int wait_state;
44static char *state = "R";
45module_param(state, charp, 0400);
46MODULE_PARM_DESC(state, "wait in 'R' running (default), 'D' uninterruptible, 'K' killable, 'S' interruptible state");
47
48static bool use_hrtimer;
49module_param(use_hrtimer, bool, 0400);
50MODULE_PARM_DESC(use_hrtimer, "use high-resolution timer for sleeping");
51
52static bool iowait;
53module_param(iowait, bool, 0400);
54MODULE_PARM_DESC(iowait, "account sleep time as iowait");
55
56static bool lock_read;
57module_param(lock_read, bool, 0400);
58MODULE_PARM_DESC(lock_read, "lock read-write locks for read");
59
60static bool lock_single;
61module_param(lock_single, bool, 0400);
62MODULE_PARM_DESC(lock_single, "acquire locks only at one cpu");
63
64static bool reacquire_locks;
65module_param(reacquire_locks, bool, 0400);
66MODULE_PARM_DESC(reacquire_locks, "release and reacquire locks/irq/preempt between iterations");
67
68static bool touch_softlockup;
69module_param(touch_softlockup, bool, 0600);
70MODULE_PARM_DESC(touch_softlockup, "touch soft-lockup watchdog between iterations");
71
72static bool touch_hardlockup;
73module_param(touch_hardlockup, bool, 0600);
74MODULE_PARM_DESC(touch_hardlockup, "touch hard-lockup watchdog between iterations");
75
76static bool call_cond_resched;
77module_param(call_cond_resched, bool, 0600);
78MODULE_PARM_DESC(call_cond_resched, "call cond_resched() between iterations");
79
80static bool measure_lock_wait;
81module_param(measure_lock_wait, bool, 0400);
82MODULE_PARM_DESC(measure_lock_wait, "measure lock wait time");
83
84static unsigned long lock_wait_threshold = ULONG_MAX;
85module_param(lock_wait_threshold, ulong, 0400);
86MODULE_PARM_DESC(lock_wait_threshold, "print lock wait time longer than this in nanoseconds, default off");
87
88static bool test_disable_irq;
89module_param_named(disable_irq, test_disable_irq, bool, 0400);
90MODULE_PARM_DESC(disable_irq, "disable interrupts: generate hard-lockups");
91
92static bool disable_softirq;
93module_param(disable_softirq, bool, 0400);
94MODULE_PARM_DESC(disable_softirq, "disable bottom-half irq handlers");
95
96static bool disable_preempt;
97module_param(disable_preempt, bool, 0400);
98MODULE_PARM_DESC(disable_preempt, "disable preemption: generate soft-lockups");
99
100static bool lock_rcu;
101module_param(lock_rcu, bool, 0400);
102MODULE_PARM_DESC(lock_rcu, "grab rcu_read_lock: generate rcu stalls");
103
104static bool lock_mmap_sem;
105module_param(lock_mmap_sem, bool, 0400);
106MODULE_PARM_DESC(lock_mmap_sem, "lock mm->mmap_lock: block procfs interfaces");
107
108static unsigned long lock_rwsem_ptr;
109module_param_unsafe(lock_rwsem_ptr, ulong, 0400);
110MODULE_PARM_DESC(lock_rwsem_ptr, "lock rw_semaphore at address");
111
112static unsigned long lock_mutex_ptr;
113module_param_unsafe(lock_mutex_ptr, ulong, 0400);
114MODULE_PARM_DESC(lock_mutex_ptr, "lock mutex at address");
115
116static unsigned long lock_spinlock_ptr;
117module_param_unsafe(lock_spinlock_ptr, ulong, 0400);
118MODULE_PARM_DESC(lock_spinlock_ptr, "lock spinlock at address");
119
120static unsigned long lock_rwlock_ptr;
121module_param_unsafe(lock_rwlock_ptr, ulong, 0400);
122MODULE_PARM_DESC(lock_rwlock_ptr, "lock rwlock at address");
123
124static unsigned int alloc_pages_nr;
125module_param_unsafe(alloc_pages_nr, uint, 0600);
126MODULE_PARM_DESC(alloc_pages_nr, "allocate and free pages under locks");
127
128static unsigned int alloc_pages_order;
129module_param(alloc_pages_order, uint, 0400);
130MODULE_PARM_DESC(alloc_pages_order, "page order to allocate");
131
132static gfp_t alloc_pages_gfp = GFP_KERNEL;
133module_param_unsafe(alloc_pages_gfp, uint, 0400);
134MODULE_PARM_DESC(alloc_pages_gfp, "allocate pages with this gfp_mask, default GFP_KERNEL");
135
136static bool alloc_pages_atomic;
137module_param(alloc_pages_atomic, bool, 0400);
138MODULE_PARM_DESC(alloc_pages_atomic, "allocate pages with GFP_ATOMIC");
139
140static bool reallocate_pages;
141module_param(reallocate_pages, bool, 0400);
142MODULE_PARM_DESC(reallocate_pages, "free and allocate pages between iterations");
143
144struct file *test_file;
145static struct inode *test_inode;
146static char test_file_path[256];
147module_param_string(file_path, test_file_path, sizeof(test_file_path), 0400);
148MODULE_PARM_DESC(file_path, "file path to test");
149
150static bool test_lock_inode;
151module_param_named(lock_inode, test_lock_inode, bool, 0400);
152MODULE_PARM_DESC(lock_inode, "lock file -> inode -> i_rwsem");
153
154static bool test_lock_mapping;
155module_param_named(lock_mapping, test_lock_mapping, bool, 0400);
156MODULE_PARM_DESC(lock_mapping, "lock file -> mapping -> i_mmap_rwsem");
157
158static bool test_lock_sb_umount;
159module_param_named(lock_sb_umount, test_lock_sb_umount, bool, 0400);
160MODULE_PARM_DESC(lock_sb_umount, "lock file -> sb -> s_umount");
161
162static atomic_t alloc_pages_failed = ATOMIC_INIT(0);
163
164static atomic64_t max_lock_wait = ATOMIC64_INIT(0);
165
166static struct task_struct *main_task;
167static int master_cpu;
168
169static void test_lock(bool master, bool verbose)
170{
171 u64 wait_start;
172
173 if (measure_lock_wait)
174 wait_start = local_clock();
175
176 if (lock_mutex_ptr && master) {
177 if (verbose)
178 pr_notice("lock mutex %ps\n", (void *)lock_mutex_ptr);
179 mutex_lock((struct mutex *)lock_mutex_ptr);
180 }
181
182 if (lock_rwsem_ptr && master) {
183 if (verbose)
184 pr_notice("lock rw_semaphore %ps\n",
185 (void *)lock_rwsem_ptr);
186 if (lock_read)
187 down_read((struct rw_semaphore *)lock_rwsem_ptr);
188 else
189 down_write((struct rw_semaphore *)lock_rwsem_ptr);
190 }
191
192 if (lock_mmap_sem && master) {
193 if (verbose)
194 pr_notice("lock mmap_lock pid=%d\n", main_task->pid);
195 if (lock_read)
196 mmap_read_lock(main_task->mm);
197 else
198 mmap_write_lock(main_task->mm);
199 }
200
201 if (test_disable_irq)
202 local_irq_disable();
203
204 if (disable_softirq)
205 local_bh_disable();
206
207 if (disable_preempt)
208 preempt_disable();
209
210 if (lock_rcu)
211 rcu_read_lock();
212
213 if (lock_spinlock_ptr && master) {
214 if (verbose)
215 pr_notice("lock spinlock %ps\n",
216 (void *)lock_spinlock_ptr);
217 spin_lock((spinlock_t *)lock_spinlock_ptr);
218 }
219
220 if (lock_rwlock_ptr && master) {
221 if (verbose)
222 pr_notice("lock rwlock %ps\n",
223 (void *)lock_rwlock_ptr);
224 if (lock_read)
225 read_lock((rwlock_t *)lock_rwlock_ptr);
226 else
227 write_lock((rwlock_t *)lock_rwlock_ptr);
228 }
229
230 if (measure_lock_wait) {
231 s64 cur_wait = local_clock() - wait_start;
232 s64 max_wait = atomic64_read(&max_lock_wait);
233
234 do {
235 if (cur_wait < max_wait)
236 break;
237 max_wait = atomic64_cmpxchg(&max_lock_wait,
238 max_wait, cur_wait);
239 } while (max_wait != cur_wait);
240
241 if (cur_wait > lock_wait_threshold)
242 pr_notice_ratelimited("lock wait %lld ns\n", cur_wait);
243 }
244}
245
246static void test_unlock(bool master, bool verbose)
247{
248 if (lock_rwlock_ptr && master) {
249 if (lock_read)
250 read_unlock((rwlock_t *)lock_rwlock_ptr);
251 else
252 write_unlock((rwlock_t *)lock_rwlock_ptr);
253 if (verbose)
254 pr_notice("unlock rwlock %ps\n",
255 (void *)lock_rwlock_ptr);
256 }
257
258 if (lock_spinlock_ptr && master) {
259 spin_unlock((spinlock_t *)lock_spinlock_ptr);
260 if (verbose)
261 pr_notice("unlock spinlock %ps\n",
262 (void *)lock_spinlock_ptr);
263 }
264
265 if (lock_rcu)
266 rcu_read_unlock();
267
268 if (disable_preempt)
269 preempt_enable();
270
271 if (disable_softirq)
272 local_bh_enable();
273
274 if (test_disable_irq)
275 local_irq_enable();
276
277 if (lock_mmap_sem && master) {
278 if (lock_read)
279 mmap_read_unlock(main_task->mm);
280 else
281 mmap_write_unlock(main_task->mm);
282 if (verbose)
283 pr_notice("unlock mmap_lock pid=%d\n", main_task->pid);
284 }
285
286 if (lock_rwsem_ptr && master) {
287 if (lock_read)
288 up_read((struct rw_semaphore *)lock_rwsem_ptr);
289 else
290 up_write((struct rw_semaphore *)lock_rwsem_ptr);
291 if (verbose)
292 pr_notice("unlock rw_semaphore %ps\n",
293 (void *)lock_rwsem_ptr);
294 }
295
296 if (lock_mutex_ptr && master) {
297 mutex_unlock((struct mutex *)lock_mutex_ptr);
298 if (verbose)
299 pr_notice("unlock mutex %ps\n",
300 (void *)lock_mutex_ptr);
301 }
302}
303
304static void test_alloc_pages(struct list_head *pages)
305{
306 struct page *page;
307 unsigned int i;
308
309 for (i = 0; i < alloc_pages_nr; i++) {
310 page = alloc_pages(alloc_pages_gfp, alloc_pages_order);
311 if (!page) {
312 atomic_inc(&alloc_pages_failed);
313 break;
314 }
315 list_add(&page->lru, pages);
316 }
317}
318
319static void test_free_pages(struct list_head *pages)
320{
321 struct page *page, *next;
322
323 list_for_each_entry_safe(page, next, pages, lru)
324 __free_pages(page, alloc_pages_order);
325 INIT_LIST_HEAD(pages);
326}
327
328static void test_wait(unsigned int secs, unsigned int nsecs)
329{
330 if (wait_state == TASK_RUNNING) {
331 if (secs)
332 mdelay(secs * MSEC_PER_SEC);
333 if (nsecs)
334 ndelay(nsecs);
335 return;
336 }
337
338 __set_current_state(wait_state);
339 if (use_hrtimer) {
340 ktime_t time;
341
342 time = ns_to_ktime((u64)secs * NSEC_PER_SEC + nsecs);
343 schedule_hrtimeout(&time, HRTIMER_MODE_REL);
344 } else {
345 schedule_timeout(secs * HZ + nsecs_to_jiffies(nsecs));
346 }
347}
348
349static void test_lockup(bool master)
350{
351 u64 lockup_start = local_clock();
352 unsigned int iter = 0;
353 LIST_HEAD(pages);
354
355 pr_notice("Start on CPU%d\n", raw_smp_processor_id());
356
357 test_lock(master, true);
358
359 test_alloc_pages(&pages);
360
361 while (iter++ < iterations && !signal_pending(main_task)) {
362
363 if (iowait)
364 current->in_iowait = 1;
365
366 test_wait(time_secs, time_nsecs);
367
368 if (iowait)
369 current->in_iowait = 0;
370
371 if (reallocate_pages)
372 test_free_pages(&pages);
373
374 if (reacquire_locks)
375 test_unlock(master, false);
376
377 if (touch_softlockup)
378 touch_softlockup_watchdog();
379
380 if (touch_hardlockup)
381 touch_nmi_watchdog();
382
383 if (call_cond_resched)
384 cond_resched();
385
386 test_wait(cooldown_secs, cooldown_nsecs);
387
388 if (reacquire_locks)
389 test_lock(master, false);
390
391 if (reallocate_pages)
392 test_alloc_pages(&pages);
393 }
394
395 pr_notice("Finish on CPU%d in %lld ns\n", raw_smp_processor_id(),
396 local_clock() - lockup_start);
397
398 test_free_pages(&pages);
399
400 test_unlock(master, true);
401}
402
403static DEFINE_PER_CPU(struct work_struct, test_works);
404
405static void test_work_fn(struct work_struct *work)
406{
407 test_lockup(!lock_single ||
408 work == per_cpu_ptr(&test_works, master_cpu));
409}
410
411static bool test_kernel_ptr(unsigned long addr, int size)
412{
413 void *ptr = (void *)addr;
414 char buf;
415
416 if (!addr)
417 return false;
418
419
420 if (access_ok(ptr, 1) ||
421 access_ok(ptr + size - 1, 1) ||
422 get_kernel_nofault(buf, ptr) ||
423 get_kernel_nofault(buf, ptr + size - 1)) {
424 pr_err("invalid kernel ptr: %#lx\n", addr);
425 return true;
426 }
427
428 return false;
429}
430
431static bool __maybe_unused test_magic(unsigned long addr, int offset,
432 unsigned int expected)
433{
434 void *ptr = (void *)addr + offset;
435 unsigned int magic = 0;
436
437 if (!addr)
438 return false;
439
440 if (get_kernel_nofault(magic, ptr) || magic != expected) {
441 pr_err("invalid magic at %#lx + %#x = %#x, expected %#x\n",
442 addr, offset, magic, expected);
443 return true;
444 }
445
446 return false;
447}
448
449static int __init test_lockup_init(void)
450{
451 u64 test_start = local_clock();
452
453 main_task = current;
454
455 switch (state[0]) {
456 case 'S':
457 wait_state = TASK_INTERRUPTIBLE;
458 break;
459 case 'D':
460 wait_state = TASK_UNINTERRUPTIBLE;
461 break;
462 case 'K':
463 wait_state = TASK_KILLABLE;
464 break;
465 case 'R':
466 wait_state = TASK_RUNNING;
467 break;
468 default:
469 pr_err("unknown state=%s\n", state);
470 return -EINVAL;
471 }
472
473 if (alloc_pages_atomic)
474 alloc_pages_gfp = GFP_ATOMIC;
475
476 if (test_kernel_ptr(lock_spinlock_ptr, sizeof(spinlock_t)) ||
477 test_kernel_ptr(lock_rwlock_ptr, sizeof(rwlock_t)) ||
478 test_kernel_ptr(lock_mutex_ptr, sizeof(struct mutex)) ||
479 test_kernel_ptr(lock_rwsem_ptr, sizeof(struct rw_semaphore)))
480 return -EINVAL;
481
482#ifdef CONFIG_DEBUG_SPINLOCK
483#ifdef CONFIG_PREEMPT_RT
484 if (test_magic(lock_spinlock_ptr,
485 offsetof(spinlock_t, lock.wait_lock.magic),
486 SPINLOCK_MAGIC) ||
487 test_magic(lock_rwlock_ptr,
488 offsetof(rwlock_t, rwbase.rtmutex.wait_lock.magic),
489 SPINLOCK_MAGIC) ||
490 test_magic(lock_mutex_ptr,
491 offsetof(struct mutex, rtmutex.wait_lock.magic),
492 SPINLOCK_MAGIC) ||
493 test_magic(lock_rwsem_ptr,
494 offsetof(struct rw_semaphore, rwbase.rtmutex.wait_lock.magic),
495 SPINLOCK_MAGIC))
496 return -EINVAL;
497#else
498 if (test_magic(lock_spinlock_ptr,
499 offsetof(spinlock_t, rlock.magic),
500 SPINLOCK_MAGIC) ||
501 test_magic(lock_rwlock_ptr,
502 offsetof(rwlock_t, magic),
503 RWLOCK_MAGIC) ||
504 test_magic(lock_mutex_ptr,
505 offsetof(struct mutex, wait_lock.magic),
506 SPINLOCK_MAGIC) ||
507 test_magic(lock_rwsem_ptr,
508 offsetof(struct rw_semaphore, wait_lock.magic),
509 SPINLOCK_MAGIC))
510 return -EINVAL;
511#endif
512#endif
513
514 if ((wait_state != TASK_RUNNING ||
515 (call_cond_resched && !reacquire_locks) ||
516 (alloc_pages_nr && gfpflags_allow_blocking(alloc_pages_gfp))) &&
517 (test_disable_irq || disable_softirq || disable_preempt ||
518 lock_rcu || lock_spinlock_ptr || lock_rwlock_ptr)) {
519 pr_err("refuse to sleep in atomic context\n");
520 return -EINVAL;
521 }
522
523 if (lock_mmap_sem && !main_task->mm) {
524 pr_err("no mm to lock mmap_lock\n");
525 return -EINVAL;
526 }
527
528 if (test_file_path[0]) {
529 test_file = filp_open(test_file_path, O_RDONLY, 0);
530 if (IS_ERR(test_file)) {
531 pr_err("failed to open %s: %ld\n", test_file_path, PTR_ERR(test_file));
532 return PTR_ERR(test_file);
533 }
534 test_inode = file_inode(test_file);
535 } else if (test_lock_inode ||
536 test_lock_mapping ||
537 test_lock_sb_umount) {
538 pr_err("no file to lock\n");
539 return -EINVAL;
540 }
541
542 if (test_lock_inode && test_inode)
543 lock_rwsem_ptr = (unsigned long)&test_inode->i_rwsem;
544
545 if (test_lock_mapping && test_file && test_file->f_mapping)
546 lock_rwsem_ptr = (unsigned long)&test_file->f_mapping->i_mmap_rwsem;
547
548 if (test_lock_sb_umount && test_inode)
549 lock_rwsem_ptr = (unsigned long)&test_inode->i_sb->s_umount;
550
551 pr_notice("START pid=%d time=%u +%u ns cooldown=%u +%u ns iterations=%u state=%s %s%s%s%s%s%s%s%s%s%s%s\n",
552 main_task->pid, time_secs, time_nsecs,
553 cooldown_secs, cooldown_nsecs, iterations, state,
554 all_cpus ? "all_cpus " : "",
555 iowait ? "iowait " : "",
556 test_disable_irq ? "disable_irq " : "",
557 disable_softirq ? "disable_softirq " : "",
558 disable_preempt ? "disable_preempt " : "",
559 lock_rcu ? "lock_rcu " : "",
560 lock_read ? "lock_read " : "",
561 touch_softlockup ? "touch_softlockup " : "",
562 touch_hardlockup ? "touch_hardlockup " : "",
563 call_cond_resched ? "call_cond_resched " : "",
564 reacquire_locks ? "reacquire_locks " : "");
565
566 if (alloc_pages_nr)
567 pr_notice("ALLOCATE PAGES nr=%u order=%u gfp=%pGg %s\n",
568 alloc_pages_nr, alloc_pages_order, &alloc_pages_gfp,
569 reallocate_pages ? "reallocate_pages " : "");
570
571 if (all_cpus) {
572 unsigned int cpu;
573
574 cpus_read_lock();
575
576 preempt_disable();
577 master_cpu = smp_processor_id();
578 for_each_online_cpu(cpu) {
579 INIT_WORK(per_cpu_ptr(&test_works, cpu), test_work_fn);
580 queue_work_on(cpu, system_highpri_wq,
581 per_cpu_ptr(&test_works, cpu));
582 }
583 preempt_enable();
584
585 for_each_online_cpu(cpu)
586 flush_work(per_cpu_ptr(&test_works, cpu));
587
588 cpus_read_unlock();
589 } else {
590 test_lockup(true);
591 }
592
593 if (measure_lock_wait)
594 pr_notice("Maximum lock wait: %lld ns\n",
595 atomic64_read(&max_lock_wait));
596
597 if (alloc_pages_nr)
598 pr_notice("Page allocation failed %u times\n",
599 atomic_read(&alloc_pages_failed));
600
601 pr_notice("FINISH in %llu ns\n", local_clock() - test_start);
602
603 if (test_file)
604 fput(test_file);
605
606 if (signal_pending(main_task))
607 return -EINTR;
608
609 return -EAGAIN;
610}
611module_init(test_lockup_init);
612
613MODULE_LICENSE("GPL");
614MODULE_AUTHOR("Konstantin Khlebnikov <khlebnikov@yandex-team.ru>");
615MODULE_DESCRIPTION("Test module to generate lockups");
616