1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77#include <linux/kernel.h>
78#include <linux/module.h>
79#include <linux/sched.h>
80#include <linux/slab.h>
81#include <linux/stat.h>
82#include <linux/workqueue.h>
83
84MODULE_LICENSE("GPL");
85MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>");
86MODULE_DESCRIPTION("Buggy module for shadow variable demo");
87
88
89#define ALLOC_PERIOD 1
90
91#define CLEANUP_PERIOD (3 * ALLOC_PERIOD)
92
93#define EXPIRE_PERIOD (4 * CLEANUP_PERIOD)
94
95
96
97
98
99LIST_HEAD(dummy_list);
100DEFINE_MUTEX(dummy_list_mutex);
101
102struct dummy {
103 struct list_head list;
104 unsigned long jiffies_expire;
105};
106
107noinline struct dummy *dummy_alloc(void)
108{
109 struct dummy *d;
110 void *leak;
111
112 d = kzalloc(sizeof(*d), GFP_KERNEL);
113 if (!d)
114 return NULL;
115
116 d->jiffies_expire = jiffies +
117 msecs_to_jiffies(1000 * EXPIRE_PERIOD);
118
119
120 leak = kzalloc(sizeof(int), GFP_KERNEL);
121
122 pr_info("%s: dummy @ %p, expires @ %lx\n",
123 __func__, d, d->jiffies_expire);
124
125 return d;
126}
127
128noinline void dummy_free(struct dummy *d)
129{
130 pr_info("%s: dummy @ %p, expired = %lx\n",
131 __func__, d, d->jiffies_expire);
132
133 kfree(d);
134}
135
136noinline bool dummy_check(struct dummy *d, unsigned long jiffies)
137{
138 return time_after(jiffies, d->jiffies_expire);
139}
140
141
142
143
144
145
146
147static void alloc_work_func(struct work_struct *work);
148static DECLARE_DELAYED_WORK(alloc_dwork, alloc_work_func);
149
150static void alloc_work_func(struct work_struct *work)
151{
152 struct dummy *d;
153
154 d = dummy_alloc();
155 if (!d)
156 return;
157
158 mutex_lock(&dummy_list_mutex);
159 list_add(&d->list, &dummy_list);
160 mutex_unlock(&dummy_list_mutex);
161
162 schedule_delayed_work(&alloc_dwork,
163 msecs_to_jiffies(1000 * ALLOC_PERIOD));
164}
165
166
167
168
169
170
171
172static void cleanup_work_func(struct work_struct *work);
173static DECLARE_DELAYED_WORK(cleanup_dwork, cleanup_work_func);
174
175static void cleanup_work_func(struct work_struct *work)
176{
177 struct dummy *d, *tmp;
178 unsigned long j;
179
180 j = jiffies;
181 pr_info("%s: jiffies = %lx\n", __func__, j);
182
183 mutex_lock(&dummy_list_mutex);
184 list_for_each_entry_safe(d, tmp, &dummy_list, list) {
185
186
187 if (dummy_check(d, j)) {
188 list_del(&d->list);
189 dummy_free(d);
190 }
191 }
192 mutex_unlock(&dummy_list_mutex);
193
194 schedule_delayed_work(&cleanup_dwork,
195 msecs_to_jiffies(1000 * CLEANUP_PERIOD));
196}
197
198static int livepatch_shadow_mod_init(void)
199{
200 schedule_delayed_work(&alloc_dwork,
201 msecs_to_jiffies(1000 * ALLOC_PERIOD));
202 schedule_delayed_work(&cleanup_dwork,
203 msecs_to_jiffies(1000 * CLEANUP_PERIOD));
204
205 return 0;
206}
207
208static void livepatch_shadow_mod_exit(void)
209{
210 struct dummy *d, *tmp;
211
212
213 cancel_delayed_work_sync(&alloc_dwork);
214 cancel_delayed_work_sync(&cleanup_dwork);
215
216
217 list_for_each_entry_safe(d, tmp, &dummy_list, list) {
218 list_del(&d->list);
219 dummy_free(d);
220 }
221}
222
223module_init(livepatch_shadow_mod_init);
224module_exit(livepatch_shadow_mod_exit);
225