1
2
3
4
5
6
7
8#include <linux/debug_locks.h>
9#include <linux/delay.h>
10#include <linux/jiffies.h>
11#include <linux/kernel.h>
12#include <linux/lockdep.h>
13#include <linux/preempt.h>
14#include <linux/printk.h>
15#include <linux/sched.h>
16#include <linux/spinlock.h>
17#include <linux/stacktrace.h>
18
19#include "kcsan.h"
20#include "encoding.h"
21
22
23
24
25#define NUM_STACK_ENTRIES 64
26
27
28struct access_info {
29 const volatile void *ptr;
30 size_t size;
31 int access_type;
32 int task_pid;
33 int cpu_id;
34};
35
36
37
38
39
40struct other_info {
41 struct access_info ai;
42 unsigned long stack_entries[NUM_STACK_ENTRIES];
43 int num_stack_entries;
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62 struct task_struct *task;
63};
64
65
66
67
68
69static struct other_info other_infos[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
70
71
72
73
74struct report_time {
75
76
77
78 unsigned long time;
79
80
81
82
83
84 unsigned long frame1;
85 unsigned long frame2;
86};
87
88
89
90
91
92
93
94
95
96
97
98#define REPORT_TIMES_MAX (PAGE_SIZE / sizeof(struct report_time))
99#define REPORT_TIMES_SIZE \
100 (CONFIG_KCSAN_REPORT_ONCE_IN_MS > REPORT_TIMES_MAX ? \
101 REPORT_TIMES_MAX : \
102 CONFIG_KCSAN_REPORT_ONCE_IN_MS)
103static struct report_time report_times[REPORT_TIMES_SIZE];
104
105
106
107
108
109
110static DEFINE_RAW_SPINLOCK(report_lock);
111
112
113
114
115
116static bool rate_limit_report(unsigned long frame1, unsigned long frame2)
117{
118 struct report_time *use_entry = &report_times[0];
119 unsigned long invalid_before;
120 int i;
121
122 BUILD_BUG_ON(CONFIG_KCSAN_REPORT_ONCE_IN_MS != 0 && REPORT_TIMES_SIZE == 0);
123
124 if (CONFIG_KCSAN_REPORT_ONCE_IN_MS == 0)
125 return false;
126
127 invalid_before = jiffies - msecs_to_jiffies(CONFIG_KCSAN_REPORT_ONCE_IN_MS);
128
129
130 for (i = 0; i < REPORT_TIMES_SIZE; ++i) {
131 struct report_time *rt = &report_times[i];
132
133
134
135
136
137
138
139 if (time_before(rt->time, use_entry->time))
140 use_entry = rt;
141
142
143
144
145
146 if (rt->time == 0)
147 break;
148
149
150 if (time_before(rt->time, invalid_before))
151 continue;
152
153
154 if ((rt->frame1 == frame1 && rt->frame2 == frame2) ||
155 (rt->frame1 == frame2 && rt->frame2 == frame1))
156 return true;
157 }
158
159 use_entry->time = jiffies;
160 use_entry->frame1 = frame1;
161 use_entry->frame2 = frame2;
162 return false;
163}
164
165
166
167
168static bool
169skip_report(enum kcsan_value_change value_change, unsigned long top_frame)
170{
171
172 WARN_ON_ONCE(value_change == KCSAN_VALUE_CHANGE_FALSE);
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191 if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY) &&
192 value_change == KCSAN_VALUE_CHANGE_MAYBE) {
193
194
195
196
197
198
199 char buf[64];
200 int len = scnprintf(buf, sizeof(buf), "%ps", (void *)top_frame);
201
202 if (!strnstr(buf, "rcu_", len) &&
203 !strnstr(buf, "_rcu", len) &&
204 !strnstr(buf, "_srcu", len))
205 return true;
206 }
207
208 return kcsan_skip_report_debugfs(top_frame);
209}
210
211static const char *get_access_type(int type)
212{
213 if (type & KCSAN_ACCESS_ASSERT) {
214 if (type & KCSAN_ACCESS_SCOPED) {
215 if (type & KCSAN_ACCESS_WRITE)
216 return "assert no accesses (scoped)";
217 else
218 return "assert no writes (scoped)";
219 } else {
220 if (type & KCSAN_ACCESS_WRITE)
221 return "assert no accesses";
222 else
223 return "assert no writes";
224 }
225 }
226
227 switch (type) {
228 case 0:
229 return "read";
230 case KCSAN_ACCESS_ATOMIC:
231 return "read (marked)";
232 case KCSAN_ACCESS_WRITE:
233 return "write";
234 case KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
235 return "write (marked)";
236 case KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE:
237 return "read-write";
238 case KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
239 return "read-write (marked)";
240 case KCSAN_ACCESS_SCOPED:
241 return "read (scoped)";
242 case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_ATOMIC:
243 return "read (marked, scoped)";
244 case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE:
245 return "write (scoped)";
246 case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
247 return "write (marked, scoped)";
248 default:
249 BUG();
250 }
251}
252
253static const char *get_bug_type(int type)
254{
255 return (type & KCSAN_ACCESS_ASSERT) != 0 ? "assert: race" : "data-race";
256}
257
258
259static const char *get_thread_desc(int task_id)
260{
261 if (task_id != -1) {
262 static char buf[32];
263
264 snprintf(buf, sizeof(buf), "task %i", task_id);
265 return buf;
266 }
267 return "interrupt";
268}
269
270
271static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries)
272{
273 char buf[64];
274 char *cur;
275 int len, skip;
276
277 for (skip = 0; skip < num_entries; ++skip) {
278 len = scnprintf(buf, sizeof(buf), "%ps", (void *)stack_entries[skip]);
279
280
281 if (strnstr(buf, "tsan_", len) ||
282 strnstr(buf, "_once_size", len))
283 continue;
284
285 cur = strnstr(buf, "kcsan_", len);
286 if (cur) {
287 cur += strlen("kcsan_");
288 if (!str_has_prefix(cur, "test"))
289 continue;
290
291 }
292
293
294
295
296
297 break;
298 }
299
300 return skip;
301}
302
303
304static int sym_strcmp(void *addr1, void *addr2)
305{
306 char buf1[64];
307 char buf2[64];
308
309 snprintf(buf1, sizeof(buf1), "%pS", addr1);
310 snprintf(buf2, sizeof(buf2), "%pS", addr2);
311
312 return strncmp(buf1, buf2, sizeof(buf1));
313}
314
315static void print_verbose_info(struct task_struct *task)
316{
317 if (!task)
318 return;
319
320
321 kcsan_restore_irqtrace(task);
322
323 pr_err("\n");
324 debug_show_held_locks(task);
325 print_irqtrace_events(task);
326}
327
328static void print_report(enum kcsan_value_change value_change,
329 const struct access_info *ai,
330 const struct other_info *other_info,
331 u64 old, u64 new, u64 mask)
332{
333 unsigned long stack_entries[NUM_STACK_ENTRIES] = { 0 };
334 int num_stack_entries = stack_trace_save(stack_entries, NUM_STACK_ENTRIES, 1);
335 int skipnr = get_stack_skipnr(stack_entries, num_stack_entries);
336 unsigned long this_frame = stack_entries[skipnr];
337 unsigned long other_frame = 0;
338 int other_skipnr = 0;
339
340
341
342
343 if (skip_report(KCSAN_VALUE_CHANGE_TRUE, stack_entries[skipnr]))
344 return;
345
346 if (other_info) {
347 other_skipnr = get_stack_skipnr(other_info->stack_entries,
348 other_info->num_stack_entries);
349 other_frame = other_info->stack_entries[other_skipnr];
350
351
352 if (skip_report(value_change, other_frame))
353 return;
354 }
355
356 if (rate_limit_report(this_frame, other_frame))
357 return;
358
359
360 pr_err("==================================================================\n");
361 if (other_info) {
362 int cmp;
363
364
365
366
367
368 cmp = sym_strcmp((void *)other_frame, (void *)this_frame);
369 pr_err("BUG: KCSAN: %s in %ps / %ps\n",
370 get_bug_type(ai->access_type | other_info->ai.access_type),
371 (void *)(cmp < 0 ? other_frame : this_frame),
372 (void *)(cmp < 0 ? this_frame : other_frame));
373 } else {
374 pr_err("BUG: KCSAN: %s in %pS\n", get_bug_type(ai->access_type),
375 (void *)this_frame);
376 }
377
378 pr_err("\n");
379
380
381 if (other_info) {
382 pr_err("%s to 0x%px of %zu bytes by %s on cpu %i:\n",
383 get_access_type(other_info->ai.access_type), other_info->ai.ptr,
384 other_info->ai.size, get_thread_desc(other_info->ai.task_pid),
385 other_info->ai.cpu_id);
386
387
388 stack_trace_print(other_info->stack_entries + other_skipnr,
389 other_info->num_stack_entries - other_skipnr,
390 0);
391
392 if (IS_ENABLED(CONFIG_KCSAN_VERBOSE))
393 print_verbose_info(other_info->task);
394
395 pr_err("\n");
396 pr_err("%s to 0x%px of %zu bytes by %s on cpu %i:\n",
397 get_access_type(ai->access_type), ai->ptr, ai->size,
398 get_thread_desc(ai->task_pid), ai->cpu_id);
399 } else {
400 pr_err("race at unknown origin, with %s to 0x%px of %zu bytes by %s on cpu %i:\n",
401 get_access_type(ai->access_type), ai->ptr, ai->size,
402 get_thread_desc(ai->task_pid), ai->cpu_id);
403 }
404
405 stack_trace_print(stack_entries + skipnr, num_stack_entries - skipnr,
406 0);
407
408 if (IS_ENABLED(CONFIG_KCSAN_VERBOSE))
409 print_verbose_info(current);
410
411
412 if (ai->size <= 8) {
413 int hex_len = ai->size * 2;
414 u64 diff = old ^ new;
415
416 if (mask)
417 diff &= mask;
418 if (diff) {
419 pr_err("\n");
420 pr_err("value changed: 0x%0*llx -> 0x%0*llx\n",
421 hex_len, old, hex_len, new);
422 if (mask) {
423 pr_err(" bits changed: 0x%0*llx with mask 0x%0*llx\n",
424 hex_len, diff, hex_len, mask);
425 }
426 }
427 }
428
429
430 pr_err("\n");
431 pr_err("Reported by Kernel Concurrency Sanitizer on:\n");
432 dump_stack_print_info(KERN_DEFAULT);
433 pr_err("==================================================================\n");
434
435 if (panic_on_warn)
436 panic("panic_on_warn set ...\n");
437}
438
439static void release_report(unsigned long *flags, struct other_info *other_info)
440{
441
442
443
444
445 other_info->ai.size = 0;
446 raw_spin_unlock_irqrestore(&report_lock, *flags);
447}
448
449
450
451
452
453
454
455static void set_other_info_task_blocking(unsigned long *flags,
456 const struct access_info *ai,
457 struct other_info *other_info)
458{
459
460
461
462
463 const bool is_running = task_is_running(current);
464
465
466
467
468
469
470
471
472 int timeout = max(kcsan_udelay_task, kcsan_udelay_interrupt);
473
474 other_info->task = current;
475 do {
476 if (is_running) {
477
478
479
480
481
482 set_current_state(TASK_UNINTERRUPTIBLE);
483 }
484 raw_spin_unlock_irqrestore(&report_lock, *flags);
485
486
487
488
489
490 udelay(1);
491 raw_spin_lock_irqsave(&report_lock, *flags);
492 if (timeout-- < 0) {
493
494
495
496
497
498
499 other_info->task = NULL;
500 break;
501 }
502
503
504
505
506 } while (other_info->ai.size && other_info->ai.ptr == ai->ptr &&
507 other_info->task == current);
508 if (is_running)
509 set_current_state(TASK_RUNNING);
510}
511
512
513static void prepare_report_producer(unsigned long *flags,
514 const struct access_info *ai,
515 struct other_info *other_info)
516{
517 raw_spin_lock_irqsave(&report_lock, *flags);
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532 WARN_ON(other_info->ai.size);
533
534 other_info->ai = *ai;
535 other_info->num_stack_entries = stack_trace_save(other_info->stack_entries, NUM_STACK_ENTRIES, 2);
536
537 if (IS_ENABLED(CONFIG_KCSAN_VERBOSE))
538 set_other_info_task_blocking(flags, ai, other_info);
539
540 raw_spin_unlock_irqrestore(&report_lock, *flags);
541}
542
543
544static bool prepare_report_consumer(unsigned long *flags,
545 const struct access_info *ai,
546 struct other_info *other_info)
547{
548
549 raw_spin_lock_irqsave(&report_lock, *flags);
550 while (!other_info->ai.size) {
551 raw_spin_unlock_irqrestore(&report_lock, *flags);
552 cpu_relax();
553 raw_spin_lock_irqsave(&report_lock, *flags);
554 }
555
556
557 if (WARN_ON(!matching_access((unsigned long)other_info->ai.ptr & WATCHPOINT_ADDR_MASK, other_info->ai.size,
558 (unsigned long)ai->ptr & WATCHPOINT_ADDR_MASK, ai->size)))
559 goto discard;
560
561 if (!matching_access((unsigned long)other_info->ai.ptr, other_info->ai.size,
562 (unsigned long)ai->ptr, ai->size)) {
563
564
565
566
567 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ENCODING_FALSE_POSITIVES]);
568 goto discard;
569 }
570
571 return true;
572
573discard:
574 release_report(flags, other_info);
575 return false;
576}
577
578static struct access_info prepare_access_info(const volatile void *ptr, size_t size,
579 int access_type)
580{
581 return (struct access_info) {
582 .ptr = ptr,
583 .size = size,
584 .access_type = access_type,
585 .task_pid = in_task() ? task_pid_nr(current) : -1,
586 .cpu_id = raw_smp_processor_id()
587 };
588}
589
590void kcsan_report_set_info(const volatile void *ptr, size_t size, int access_type,
591 int watchpoint_idx)
592{
593 const struct access_info ai = prepare_access_info(ptr, size, access_type);
594 unsigned long flags;
595
596 kcsan_disable_current();
597 lockdep_off();
598
599 prepare_report_producer(&flags, &ai, &other_infos[watchpoint_idx]);
600
601 lockdep_on();
602 kcsan_enable_current();
603}
604
605void kcsan_report_known_origin(const volatile void *ptr, size_t size, int access_type,
606 enum kcsan_value_change value_change, int watchpoint_idx,
607 u64 old, u64 new, u64 mask)
608{
609 const struct access_info ai = prepare_access_info(ptr, size, access_type);
610 struct other_info *other_info = &other_infos[watchpoint_idx];
611 unsigned long flags = 0;
612
613 kcsan_disable_current();
614
615
616
617
618
619
620
621 lockdep_off();
622
623 if (!prepare_report_consumer(&flags, &ai, other_info))
624 goto out;
625
626
627
628
629
630 if (value_change != KCSAN_VALUE_CHANGE_FALSE)
631 print_report(value_change, &ai, other_info, old, new, mask);
632
633 release_report(&flags, other_info);
634out:
635 lockdep_on();
636 kcsan_enable_current();
637}
638
639void kcsan_report_unknown_origin(const volatile void *ptr, size_t size, int access_type,
640 u64 old, u64 new, u64 mask)
641{
642 const struct access_info ai = prepare_access_info(ptr, size, access_type);
643 unsigned long flags;
644
645 kcsan_disable_current();
646 lockdep_off();
647
648 raw_spin_lock_irqsave(&report_lock, flags);
649 print_report(KCSAN_VALUE_CHANGE_TRUE, &ai, NULL, old, new, mask);
650 raw_spin_unlock_irqrestore(&report_lock, flags);
651
652 lockdep_on();
653 kcsan_enable_current();
654}
655