1
2
3
4
5
6
7
8
9#define pr_fmt(fmt) "kasan: " fmt
10
11#include <linux/export.h>
12#include <linux/interrupt.h>
13#include <linux/init.h>
14#include <linux/kasan.h>
15#include <linux/kernel.h>
16#include <linux/kmemleak.h>
17#include <linux/linkage.h>
18#include <linux/memblock.h>
19#include <linux/memory.h>
20#include <linux/mm.h>
21#include <linux/module.h>
22#include <linux/printk.h>
23#include <linux/random.h>
24#include <linux/sched.h>
25#include <linux/sched/task_stack.h>
26#include <linux/slab.h>
27#include <linux/stacktrace.h>
28#include <linux/string.h>
29#include <linux/types.h>
30#include <linux/vmalloc.h>
31#include <linux/bug.h>
32
33#include "kasan.h"
34#include "../slab.h"
35
36static DEFINE_PER_CPU(u32, prng_state);
37
38void __init kasan_init_sw_tags(void)
39{
40 int cpu;
41
42 for_each_possible_cpu(cpu)
43 per_cpu(prng_state, cpu) = (u32)get_cycles();
44
45 pr_info("KernelAddressSanitizer initialized (sw-tags)\n");
46}
47
48
49
50
51
52
53
54
55
56
57
58
59
60u8 kasan_random_tag(void)
61{
62 u32 state = this_cpu_read(prng_state);
63
64 state = 1664525 * state + 1013904223;
65 this_cpu_write(prng_state, state);
66
67 return (u8)(state % (KASAN_TAG_MAX + 1));
68}
69
70bool kasan_check_range(unsigned long addr, size_t size, bool write,
71 unsigned long ret_ip)
72{
73 u8 tag;
74 u8 *shadow_first, *shadow_last, *shadow;
75 void *untagged_addr;
76
77 if (unlikely(size == 0))
78 return true;
79
80 if (unlikely(addr + size < addr))
81 return !kasan_report(addr, size, write, ret_ip);
82
83 tag = get_tag((const void *)addr);
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102 if (tag == KASAN_TAG_KERNEL)
103 return true;
104
105 untagged_addr = kasan_reset_tag((const void *)addr);
106 if (unlikely(untagged_addr <
107 kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
108 return !kasan_report(addr, size, write, ret_ip);
109 }
110 shadow_first = kasan_mem_to_shadow(untagged_addr);
111 shadow_last = kasan_mem_to_shadow(untagged_addr + size - 1);
112 for (shadow = shadow_first; shadow <= shadow_last; shadow++) {
113 if (*shadow != tag) {
114 return !kasan_report(addr, size, write, ret_ip);
115 }
116 }
117
118 return true;
119}
120
121bool kasan_byte_accessible(const void *addr)
122{
123 u8 tag = get_tag(addr);
124 void *untagged_addr = kasan_reset_tag(addr);
125 u8 shadow_byte;
126
127 if (untagged_addr < kasan_shadow_to_mem((void *)KASAN_SHADOW_START))
128 return false;
129
130 shadow_byte = READ_ONCE(*(u8 *)kasan_mem_to_shadow(untagged_addr));
131 return tag == KASAN_TAG_KERNEL || tag == shadow_byte;
132}
133
134#define DEFINE_HWASAN_LOAD_STORE(size) \
135 void __hwasan_load##size##_noabort(unsigned long addr) \
136 { \
137 kasan_check_range(addr, size, false, _RET_IP_); \
138 } \
139 EXPORT_SYMBOL(__hwasan_load##size##_noabort); \
140 void __hwasan_store##size##_noabort(unsigned long addr) \
141 { \
142 kasan_check_range(addr, size, true, _RET_IP_); \
143 } \
144 EXPORT_SYMBOL(__hwasan_store##size##_noabort)
145
146DEFINE_HWASAN_LOAD_STORE(1);
147DEFINE_HWASAN_LOAD_STORE(2);
148DEFINE_HWASAN_LOAD_STORE(4);
149DEFINE_HWASAN_LOAD_STORE(8);
150DEFINE_HWASAN_LOAD_STORE(16);
151
152void __hwasan_loadN_noabort(unsigned long addr, unsigned long size)
153{
154 kasan_check_range(addr, size, false, _RET_IP_);
155}
156EXPORT_SYMBOL(__hwasan_loadN_noabort);
157
158void __hwasan_storeN_noabort(unsigned long addr, unsigned long size)
159{
160 kasan_check_range(addr, size, true, _RET_IP_);
161}
162EXPORT_SYMBOL(__hwasan_storeN_noabort);
163
164void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size)
165{
166 kasan_poison((void *)addr, size, tag, false);
167}
168EXPORT_SYMBOL(__hwasan_tag_memory);
169
170void kasan_tag_mismatch(unsigned long addr, unsigned long access_info,
171 unsigned long ret_ip)
172{
173 kasan_report(addr, 1 << (access_info & 0xf), access_info & 0x10,
174 ret_ip);
175}
176