1
2
3
4
5
6
7
8#define pr_fmt(fmt) "kcsan: " fmt
9
10#include <linux/atomic.h>
11#include <linux/bug.h>
12#include <linux/delay.h>
13#include <linux/export.h>
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/list.h>
17#include <linux/moduleparam.h>
18#include <linux/percpu.h>
19#include <linux/preempt.h>
20#include <linux/sched.h>
21#include <linux/uaccess.h>
22
23#include "encoding.h"
24#include "kcsan.h"
25#include "permissive.h"
26
27static bool kcsan_early_enable = IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE);
28unsigned int kcsan_udelay_task = CONFIG_KCSAN_UDELAY_TASK;
29unsigned int kcsan_udelay_interrupt = CONFIG_KCSAN_UDELAY_INTERRUPT;
30static long kcsan_skip_watch = CONFIG_KCSAN_SKIP_WATCH;
31static bool kcsan_interrupt_watcher = IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER);
32
33#ifdef MODULE_PARAM_PREFIX
34#undef MODULE_PARAM_PREFIX
35#endif
36#define MODULE_PARAM_PREFIX "kcsan."
37module_param_named(early_enable, kcsan_early_enable, bool, 0);
38module_param_named(udelay_task, kcsan_udelay_task, uint, 0644);
39module_param_named(udelay_interrupt, kcsan_udelay_interrupt, uint, 0644);
40module_param_named(skip_watch, kcsan_skip_watch, long, 0644);
41module_param_named(interrupt_watcher, kcsan_interrupt_watcher, bool, 0444);
42
43bool kcsan_enabled;
44
45
46static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
47 .disable_count = 0,
48 .atomic_next = 0,
49 .atomic_nest_count = 0,
50 .in_flat_atomic = false,
51 .access_mask = 0,
52 .scoped_accesses = {LIST_POISON1, NULL},
53};
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80#define SLOT_IDX(slot, i) (slot + ((i + KCSAN_CHECK_ADJACENT) % NUM_SLOTS))
81
82
83
84
85
86
87
88#define SLOT_IDX_FAST(slot, i) (slot + i)
89
90
91
92
93
94
95
96
97
98
99static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
100
101
102
103
104
105static DEFINE_PER_CPU(long, kcsan_skip);
106
107
108static DEFINE_PER_CPU(u32, kcsan_rand_state);
109
110static __always_inline atomic_long_t *find_watchpoint(unsigned long addr,
111 size_t size,
112 bool expect_write,
113 long *encoded_watchpoint)
114{
115 const int slot = watchpoint_slot(addr);
116 const unsigned long addr_masked = addr & WATCHPOINT_ADDR_MASK;
117 atomic_long_t *watchpoint;
118 unsigned long wp_addr_masked;
119 size_t wp_size;
120 bool is_write;
121 int i;
122
123 BUILD_BUG_ON(CONFIG_KCSAN_NUM_WATCHPOINTS < NUM_SLOTS);
124
125 for (i = 0; i < NUM_SLOTS; ++i) {
126 watchpoint = &watchpoints[SLOT_IDX_FAST(slot, i)];
127 *encoded_watchpoint = atomic_long_read(watchpoint);
128 if (!decode_watchpoint(*encoded_watchpoint, &wp_addr_masked,
129 &wp_size, &is_write))
130 continue;
131
132 if (expect_write && !is_write)
133 continue;
134
135
136 if (matching_access(wp_addr_masked, wp_size, addr_masked, size))
137 return watchpoint;
138 }
139
140 return NULL;
141}
142
143static inline atomic_long_t *
144insert_watchpoint(unsigned long addr, size_t size, bool is_write)
145{
146 const int slot = watchpoint_slot(addr);
147 const long encoded_watchpoint = encode_watchpoint(addr, size, is_write);
148 atomic_long_t *watchpoint;
149 int i;
150
151
152 BUILD_BUG_ON(SLOT_IDX(0, 0) != KCSAN_CHECK_ADJACENT);
153 BUILD_BUG_ON(SLOT_IDX(0, KCSAN_CHECK_ADJACENT+1) != 0);
154 BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT) != ARRAY_SIZE(watchpoints)-1);
155 BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT+1) != ARRAY_SIZE(watchpoints) - NUM_SLOTS);
156
157 for (i = 0; i < NUM_SLOTS; ++i) {
158 long expect_val = INVALID_WATCHPOINT;
159
160
161 watchpoint = &watchpoints[SLOT_IDX(slot, i)];
162 if (atomic_long_try_cmpxchg_relaxed(watchpoint, &expect_val, encoded_watchpoint))
163 return watchpoint;
164 }
165
166 return NULL;
167}
168
169
170
171
172
173
174
175
176
177
178static __always_inline bool
179try_consume_watchpoint(atomic_long_t *watchpoint, long encoded_watchpoint)
180{
181 return atomic_long_try_cmpxchg_relaxed(watchpoint, &encoded_watchpoint, CONSUMED_WATCHPOINT);
182}
183
184
185static inline bool consume_watchpoint(atomic_long_t *watchpoint)
186{
187 return atomic_long_xchg_relaxed(watchpoint, CONSUMED_WATCHPOINT) != CONSUMED_WATCHPOINT;
188}
189
190
191static inline void remove_watchpoint(atomic_long_t *watchpoint)
192{
193 atomic_long_set(watchpoint, INVALID_WATCHPOINT);
194}
195
196static __always_inline struct kcsan_ctx *get_ctx(void)
197{
198
199
200
201
202 return in_task() ? ¤t->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx);
203}
204
205
206static noinline void kcsan_check_scoped_accesses(void)
207{
208 struct kcsan_ctx *ctx = get_ctx();
209 struct list_head *prev_save = ctx->scoped_accesses.prev;
210 struct kcsan_scoped_access *scoped_access;
211
212 ctx->scoped_accesses.prev = NULL;
213 list_for_each_entry(scoped_access, &ctx->scoped_accesses, list)
214 __kcsan_check_access(scoped_access->ptr, scoped_access->size, scoped_access->type);
215 ctx->scoped_accesses.prev = prev_save;
216}
217
218
219static __always_inline bool
220is_atomic(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx)
221{
222 if (type & KCSAN_ACCESS_ATOMIC)
223 return true;
224
225
226
227
228
229
230 if (type & KCSAN_ACCESS_ASSERT)
231 return false;
232
233 if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) &&
234 (type & KCSAN_ACCESS_WRITE) && size <= sizeof(long) &&
235 !(type & KCSAN_ACCESS_COMPOUND) && IS_ALIGNED((unsigned long)ptr, size))
236 return true;
237
238 if (ctx->atomic_next > 0) {
239
240
241
242
243
244
245
246
247
248 if ((hardirq_count() >> HARDIRQ_SHIFT) < 2)
249 --ctx->atomic_next;
250 return true;
251 }
252
253 return ctx->atomic_nest_count > 0 || ctx->in_flat_atomic;
254}
255
256static __always_inline bool
257should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx)
258{
259
260
261
262
263
264
265
266 if (is_atomic(ptr, size, type, ctx))
267 return false;
268
269 if (this_cpu_dec_return(kcsan_skip) >= 0)
270 return false;
271
272
273
274
275
276
277
278 return true;
279}
280
281
282
283
284
285static u32 kcsan_prandom_u32_max(u32 ep_ro)
286{
287 u32 state = this_cpu_read(kcsan_rand_state);
288
289 state = 1664525 * state + 1013904223;
290 this_cpu_write(kcsan_rand_state, state);
291
292 return state % ep_ro;
293}
294
295static inline void reset_kcsan_skip(void)
296{
297 long skip_count = kcsan_skip_watch -
298 (IS_ENABLED(CONFIG_KCSAN_SKIP_WATCH_RANDOMIZE) ?
299 kcsan_prandom_u32_max(kcsan_skip_watch) :
300 0);
301 this_cpu_write(kcsan_skip, skip_count);
302}
303
304static __always_inline bool kcsan_is_enabled(struct kcsan_ctx *ctx)
305{
306 return READ_ONCE(kcsan_enabled) && !ctx->disable_count;
307}
308
309
310static void delay_access(int type)
311{
312 unsigned int delay = in_task() ? kcsan_udelay_task : kcsan_udelay_interrupt;
313
314 unsigned int skew_delay_order =
315 (type & (KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_ASSERT)) ? 1 : 0;
316
317 delay -= IS_ENABLED(CONFIG_KCSAN_DELAY_RANDOMIZE) ?
318 kcsan_prandom_u32_max(delay >> skew_delay_order) :
319 0;
320 udelay(delay);
321}
322
323void kcsan_save_irqtrace(struct task_struct *task)
324{
325#ifdef CONFIG_TRACE_IRQFLAGS
326 task->kcsan_save_irqtrace = task->irqtrace;
327#endif
328}
329
330void kcsan_restore_irqtrace(struct task_struct *task)
331{
332#ifdef CONFIG_TRACE_IRQFLAGS
333 task->irqtrace = task->kcsan_save_irqtrace;
334#endif
335}
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350static noinline void kcsan_found_watchpoint(const volatile void *ptr,
351 size_t size,
352 int type,
353 atomic_long_t *watchpoint,
354 long encoded_watchpoint)
355{
356 const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0;
357 struct kcsan_ctx *ctx = get_ctx();
358 unsigned long flags;
359 bool consumed;
360
361
362
363
364
365
366
367 if (!kcsan_is_enabled(ctx))
368 return;
369
370
371
372
373
374
375 if (ctx->access_mask)
376 return;
377
378
379
380
381
382
383
384
385 if (!is_assert && kcsan_ignore_address(ptr))
386 return;
387
388
389
390
391
392 consumed = try_consume_watchpoint(watchpoint, encoded_watchpoint);
393
394
395 flags = user_access_save();
396
397 if (consumed) {
398 kcsan_save_irqtrace(current);
399 kcsan_report_set_info(ptr, size, type, watchpoint - watchpoints);
400 kcsan_restore_irqtrace(current);
401 } else {
402
403
404
405
406
407 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_REPORT_RACES]);
408 }
409
410 if (is_assert)
411 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
412 else
413 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_DATA_RACES]);
414
415 user_access_restore(flags);
416}
417
418static noinline void
419kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
420{
421 const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
422 const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0;
423 atomic_long_t *watchpoint;
424 u64 old, new, diff;
425 unsigned long access_mask;
426 enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE;
427 unsigned long ua_flags = user_access_save();
428 struct kcsan_ctx *ctx = get_ctx();
429 unsigned long irq_flags = 0;
430
431
432
433
434
435 reset_kcsan_skip();
436
437 if (!kcsan_is_enabled(ctx))
438 goto out;
439
440
441
442
443
444 if (!is_assert && kcsan_ignore_address(ptr))
445 goto out;
446
447 if (!check_encodable((unsigned long)ptr, size)) {
448 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_UNENCODABLE_ACCESSES]);
449 goto out;
450 }
451
452
453
454
455
456
457 kcsan_save_irqtrace(current);
458 if (!kcsan_interrupt_watcher)
459 local_irq_save(irq_flags);
460
461 watchpoint = insert_watchpoint((unsigned long)ptr, size, is_write);
462 if (watchpoint == NULL) {
463
464
465
466
467
468 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_NO_CAPACITY]);
469 goto out_unlock;
470 }
471
472 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_SETUP_WATCHPOINTS]);
473 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_USED_WATCHPOINTS]);
474
475
476
477
478
479 old = 0;
480 switch (size) {
481 case 1:
482 old = READ_ONCE(*(const u8 *)ptr);
483 break;
484 case 2:
485 old = READ_ONCE(*(const u16 *)ptr);
486 break;
487 case 4:
488 old = READ_ONCE(*(const u32 *)ptr);
489 break;
490 case 8:
491 old = READ_ONCE(*(const u64 *)ptr);
492 break;
493 default:
494 break;
495 }
496
497
498
499
500
501 delay_access(type);
502
503
504
505
506
507 access_mask = ctx->access_mask;
508 new = 0;
509 switch (size) {
510 case 1:
511 new = READ_ONCE(*(const u8 *)ptr);
512 break;
513 case 2:
514 new = READ_ONCE(*(const u16 *)ptr);
515 break;
516 case 4:
517 new = READ_ONCE(*(const u32 *)ptr);
518 break;
519 case 8:
520 new = READ_ONCE(*(const u64 *)ptr);
521 break;
522 default:
523 break;
524 }
525
526 diff = old ^ new;
527 if (access_mask)
528 diff &= access_mask;
529
530
531
532
533
534
535
536
537 if (diff && !kcsan_ignore_data_race(size, type, old, new, diff))
538 value_change = KCSAN_VALUE_CHANGE_TRUE;
539
540
541 if (!consume_watchpoint(watchpoint)) {
542
543
544
545
546 if (value_change == KCSAN_VALUE_CHANGE_MAYBE) {
547 if (access_mask != 0) {
548
549
550
551
552
553 value_change = KCSAN_VALUE_CHANGE_FALSE;
554 } else if (size > 8 || is_assert) {
555
556 value_change = KCSAN_VALUE_CHANGE_TRUE;
557 }
558 }
559
560
561
562
563
564
565
566
567
568 if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE)
569 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
570
571 kcsan_report_known_origin(ptr, size, type, value_change,
572 watchpoint - watchpoints,
573 old, new, access_mask);
574 } else if (value_change == KCSAN_VALUE_CHANGE_TRUE) {
575
576
577 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN]);
578 if (is_assert)
579 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
580
581 if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert)
582 kcsan_report_unknown_origin(ptr, size, type, old, new, access_mask);
583 }
584
585
586
587
588
589 remove_watchpoint(watchpoint);
590 atomic_long_dec(&kcsan_counters[KCSAN_COUNTER_USED_WATCHPOINTS]);
591out_unlock:
592 if (!kcsan_interrupt_watcher)
593 local_irq_restore(irq_flags);
594 kcsan_restore_irqtrace(current);
595out:
596 user_access_restore(ua_flags);
597}
598
599static __always_inline void check_access(const volatile void *ptr, size_t size,
600 int type)
601{
602 const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
603 atomic_long_t *watchpoint;
604 long encoded_watchpoint;
605
606
607
608
609
610 if (unlikely(size == 0))
611 return;
612
613
614
615
616
617
618 watchpoint = find_watchpoint((unsigned long)ptr, size, !is_write,
619 &encoded_watchpoint);
620
621
622
623
624
625
626
627 if (unlikely(watchpoint != NULL))
628 kcsan_found_watchpoint(ptr, size, type, watchpoint,
629 encoded_watchpoint);
630 else {
631 struct kcsan_ctx *ctx = get_ctx();
632
633 if (unlikely(should_watch(ptr, size, type, ctx)))
634 kcsan_setup_watchpoint(ptr, size, type);
635 else if (unlikely(ctx->scoped_accesses.prev))
636 kcsan_check_scoped_accesses();
637 }
638}
639
640
641
642void __init kcsan_init(void)
643{
644 int cpu;
645
646 BUG_ON(!in_task());
647
648 for_each_possible_cpu(cpu)
649 per_cpu(kcsan_rand_state, cpu) = (u32)get_cycles();
650
651
652
653
654
655 if (kcsan_early_enable) {
656 pr_info("enabled early\n");
657 WRITE_ONCE(kcsan_enabled, true);
658 }
659
660 if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY) ||
661 IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) ||
662 IS_ENABLED(CONFIG_KCSAN_PERMISSIVE) ||
663 IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) {
664 pr_warn("non-strict mode configured - use CONFIG_KCSAN_STRICT=y to see all data races\n");
665 } else {
666 pr_info("strict mode configured\n");
667 }
668}
669
670
671
672void kcsan_disable_current(void)
673{
674 ++get_ctx()->disable_count;
675}
676EXPORT_SYMBOL(kcsan_disable_current);
677
678void kcsan_enable_current(void)
679{
680 if (get_ctx()->disable_count-- == 0) {
681
682
683
684
685
686 kcsan_disable_current();
687 kcsan_disable_current();
688 WARN(1, "Unbalanced %s()", __func__);
689 kcsan_enable_current();
690 }
691}
692EXPORT_SYMBOL(kcsan_enable_current);
693
694void kcsan_enable_current_nowarn(void)
695{
696 if (get_ctx()->disable_count-- == 0)
697 kcsan_disable_current();
698}
699EXPORT_SYMBOL(kcsan_enable_current_nowarn);
700
701void kcsan_nestable_atomic_begin(void)
702{
703
704
705
706
707
708
709
710 ++get_ctx()->atomic_nest_count;
711}
712EXPORT_SYMBOL(kcsan_nestable_atomic_begin);
713
714void kcsan_nestable_atomic_end(void)
715{
716 if (get_ctx()->atomic_nest_count-- == 0) {
717
718
719
720
721
722 kcsan_nestable_atomic_begin();
723 kcsan_disable_current();
724 WARN(1, "Unbalanced %s()", __func__);
725 kcsan_enable_current();
726 }
727}
728EXPORT_SYMBOL(kcsan_nestable_atomic_end);
729
730void kcsan_flat_atomic_begin(void)
731{
732 get_ctx()->in_flat_atomic = true;
733}
734EXPORT_SYMBOL(kcsan_flat_atomic_begin);
735
736void kcsan_flat_atomic_end(void)
737{
738 get_ctx()->in_flat_atomic = false;
739}
740EXPORT_SYMBOL(kcsan_flat_atomic_end);
741
742void kcsan_atomic_next(int n)
743{
744 get_ctx()->atomic_next = n;
745}
746EXPORT_SYMBOL(kcsan_atomic_next);
747
748void kcsan_set_access_mask(unsigned long mask)
749{
750 get_ctx()->access_mask = mask;
751}
752EXPORT_SYMBOL(kcsan_set_access_mask);
753
754struct kcsan_scoped_access *
755kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
756 struct kcsan_scoped_access *sa)
757{
758 struct kcsan_ctx *ctx = get_ctx();
759
760 __kcsan_check_access(ptr, size, type);
761
762 ctx->disable_count++;
763
764 INIT_LIST_HEAD(&sa->list);
765 sa->ptr = ptr;
766 sa->size = size;
767 sa->type = type;
768
769 if (!ctx->scoped_accesses.prev)
770 INIT_LIST_HEAD(&ctx->scoped_accesses);
771 list_add(&sa->list, &ctx->scoped_accesses);
772
773 ctx->disable_count--;
774 return sa;
775}
776EXPORT_SYMBOL(kcsan_begin_scoped_access);
777
778void kcsan_end_scoped_access(struct kcsan_scoped_access *sa)
779{
780 struct kcsan_ctx *ctx = get_ctx();
781
782 if (WARN(!ctx->scoped_accesses.prev, "Unbalanced %s()?", __func__))
783 return;
784
785 ctx->disable_count++;
786
787 list_del(&sa->list);
788 if (list_empty(&ctx->scoped_accesses))
789
790
791
792
793
794
795 ctx->scoped_accesses.prev = NULL;
796
797 ctx->disable_count--;
798
799 __kcsan_check_access(sa->ptr, sa->size, sa->type);
800}
801EXPORT_SYMBOL(kcsan_end_scoped_access);
802
803void __kcsan_check_access(const volatile void *ptr, size_t size, int type)
804{
805 check_access(ptr, size, type);
806}
807EXPORT_SYMBOL(__kcsan_check_access);
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822#define DEFINE_TSAN_READ_WRITE(size) \
823 void __tsan_read##size(void *ptr); \
824 void __tsan_read##size(void *ptr) \
825 { \
826 check_access(ptr, size, 0); \
827 } \
828 EXPORT_SYMBOL(__tsan_read##size); \
829 void __tsan_unaligned_read##size(void *ptr) \
830 __alias(__tsan_read##size); \
831 EXPORT_SYMBOL(__tsan_unaligned_read##size); \
832 void __tsan_write##size(void *ptr); \
833 void __tsan_write##size(void *ptr) \
834 { \
835 check_access(ptr, size, KCSAN_ACCESS_WRITE); \
836 } \
837 EXPORT_SYMBOL(__tsan_write##size); \
838 void __tsan_unaligned_write##size(void *ptr) \
839 __alias(__tsan_write##size); \
840 EXPORT_SYMBOL(__tsan_unaligned_write##size); \
841 void __tsan_read_write##size(void *ptr); \
842 void __tsan_read_write##size(void *ptr) \
843 { \
844 check_access(ptr, size, \
845 KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE); \
846 } \
847 EXPORT_SYMBOL(__tsan_read_write##size); \
848 void __tsan_unaligned_read_write##size(void *ptr) \
849 __alias(__tsan_read_write##size); \
850 EXPORT_SYMBOL(__tsan_unaligned_read_write##size)
851
852DEFINE_TSAN_READ_WRITE(1);
853DEFINE_TSAN_READ_WRITE(2);
854DEFINE_TSAN_READ_WRITE(4);
855DEFINE_TSAN_READ_WRITE(8);
856DEFINE_TSAN_READ_WRITE(16);
857
858void __tsan_read_range(void *ptr, size_t size);
859void __tsan_read_range(void *ptr, size_t size)
860{
861 check_access(ptr, size, 0);
862}
863EXPORT_SYMBOL(__tsan_read_range);
864
865void __tsan_write_range(void *ptr, size_t size);
866void __tsan_write_range(void *ptr, size_t size)
867{
868 check_access(ptr, size, KCSAN_ACCESS_WRITE);
869}
870EXPORT_SYMBOL(__tsan_write_range);
871
872
873
874
875
876
877
878
879
880
881#define DEFINE_TSAN_VOLATILE_READ_WRITE(size) \
882 void __tsan_volatile_read##size(void *ptr); \
883 void __tsan_volatile_read##size(void *ptr) \
884 { \
885 const bool is_atomic = size <= sizeof(long long) && \
886 IS_ALIGNED((unsigned long)ptr, size); \
887 if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic) \
888 return; \
889 check_access(ptr, size, is_atomic ? KCSAN_ACCESS_ATOMIC : 0); \
890 } \
891 EXPORT_SYMBOL(__tsan_volatile_read##size); \
892 void __tsan_unaligned_volatile_read##size(void *ptr) \
893 __alias(__tsan_volatile_read##size); \
894 EXPORT_SYMBOL(__tsan_unaligned_volatile_read##size); \
895 void __tsan_volatile_write##size(void *ptr); \
896 void __tsan_volatile_write##size(void *ptr) \
897 { \
898 const bool is_atomic = size <= sizeof(long long) && \
899 IS_ALIGNED((unsigned long)ptr, size); \
900 if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic) \
901 return; \
902 check_access(ptr, size, \
903 KCSAN_ACCESS_WRITE | \
904 (is_atomic ? KCSAN_ACCESS_ATOMIC : 0)); \
905 } \
906 EXPORT_SYMBOL(__tsan_volatile_write##size); \
907 void __tsan_unaligned_volatile_write##size(void *ptr) \
908 __alias(__tsan_volatile_write##size); \
909 EXPORT_SYMBOL(__tsan_unaligned_volatile_write##size)
910
911DEFINE_TSAN_VOLATILE_READ_WRITE(1);
912DEFINE_TSAN_VOLATILE_READ_WRITE(2);
913DEFINE_TSAN_VOLATILE_READ_WRITE(4);
914DEFINE_TSAN_VOLATILE_READ_WRITE(8);
915DEFINE_TSAN_VOLATILE_READ_WRITE(16);
916
917
918
919
920
921void __tsan_func_entry(void *call_pc);
922void __tsan_func_entry(void *call_pc)
923{
924}
925EXPORT_SYMBOL(__tsan_func_entry);
926void __tsan_func_exit(void);
927void __tsan_func_exit(void)
928{
929}
930EXPORT_SYMBOL(__tsan_func_exit);
931void __tsan_init(void);
932void __tsan_init(void)
933{
934}
935EXPORT_SYMBOL(__tsan_init);
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953#define DEFINE_TSAN_ATOMIC_LOAD_STORE(bits) \
954 u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder); \
955 u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder) \
956 { \
957 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
958 check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_ATOMIC); \
959 } \
960 return __atomic_load_n(ptr, memorder); \
961 } \
962 EXPORT_SYMBOL(__tsan_atomic##bits##_load); \
963 void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder); \
964 void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder) \
965 { \
966 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
967 check_access(ptr, bits / BITS_PER_BYTE, \
968 KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC); \
969 } \
970 __atomic_store_n(ptr, v, memorder); \
971 } \
972 EXPORT_SYMBOL(__tsan_atomic##bits##_store)
973
974#define DEFINE_TSAN_ATOMIC_RMW(op, bits, suffix) \
975 u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder); \
976 u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder) \
977 { \
978 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
979 check_access(ptr, bits / BITS_PER_BYTE, \
980 KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
981 KCSAN_ACCESS_ATOMIC); \
982 } \
983 return __atomic_##op##suffix(ptr, v, memorder); \
984 } \
985 EXPORT_SYMBOL(__tsan_atomic##bits##_##op)
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004#define DEFINE_TSAN_ATOMIC_CMPXCHG(bits, strength, weak) \
1005 int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp, \
1006 u##bits val, int mo, int fail_mo); \
1007 int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp, \
1008 u##bits val, int mo, int fail_mo) \
1009 { \
1010 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
1011 check_access(ptr, bits / BITS_PER_BYTE, \
1012 KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
1013 KCSAN_ACCESS_ATOMIC); \
1014 } \
1015 return __atomic_compare_exchange_n(ptr, exp, val, weak, mo, fail_mo); \
1016 } \
1017 EXPORT_SYMBOL(__tsan_atomic##bits##_compare_exchange_##strength)
1018
1019#define DEFINE_TSAN_ATOMIC_CMPXCHG_VAL(bits) \
1020 u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \
1021 int mo, int fail_mo); \
1022 u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \
1023 int mo, int fail_mo) \
1024 { \
1025 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
1026 check_access(ptr, bits / BITS_PER_BYTE, \
1027 KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
1028 KCSAN_ACCESS_ATOMIC); \
1029 } \
1030 __atomic_compare_exchange_n(ptr, &exp, val, 0, mo, fail_mo); \
1031 return exp; \
1032 } \
1033 EXPORT_SYMBOL(__tsan_atomic##bits##_compare_exchange_val)
1034
1035#define DEFINE_TSAN_ATOMIC_OPS(bits) \
1036 DEFINE_TSAN_ATOMIC_LOAD_STORE(bits); \
1037 DEFINE_TSAN_ATOMIC_RMW(exchange, bits, _n); \
1038 DEFINE_TSAN_ATOMIC_RMW(fetch_add, bits, ); \
1039 DEFINE_TSAN_ATOMIC_RMW(fetch_sub, bits, ); \
1040 DEFINE_TSAN_ATOMIC_RMW(fetch_and, bits, ); \
1041 DEFINE_TSAN_ATOMIC_RMW(fetch_or, bits, ); \
1042 DEFINE_TSAN_ATOMIC_RMW(fetch_xor, bits, ); \
1043 DEFINE_TSAN_ATOMIC_RMW(fetch_nand, bits, ); \
1044 DEFINE_TSAN_ATOMIC_CMPXCHG(bits, strong, 0); \
1045 DEFINE_TSAN_ATOMIC_CMPXCHG(bits, weak, 1); \
1046 DEFINE_TSAN_ATOMIC_CMPXCHG_VAL(bits)
1047
1048DEFINE_TSAN_ATOMIC_OPS(8);
1049DEFINE_TSAN_ATOMIC_OPS(16);
1050DEFINE_TSAN_ATOMIC_OPS(32);
1051DEFINE_TSAN_ATOMIC_OPS(64);
1052
1053void __tsan_atomic_thread_fence(int memorder);
1054void __tsan_atomic_thread_fence(int memorder)
1055{
1056 __atomic_thread_fence(memorder);
1057}
1058EXPORT_SYMBOL(__tsan_atomic_thread_fence);
1059
1060void __tsan_atomic_signal_fence(int memorder);
1061void __tsan_atomic_signal_fence(int memorder) { }
1062EXPORT_SYMBOL(__tsan_atomic_signal_fence);
1063