1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
24#include <linux/livepatch.h>
25#include <linux/list.h>
26#include <linux/ftrace.h>
27#include <linux/rculist.h>
28#include <linux/slab.h>
29#include <linux/bug.h>
30#include <linux/printk.h>
31#include "patch.h"
32#include "transition.h"
33
34static LIST_HEAD(klp_ops);
35
36struct klp_ops *klp_find_ops(unsigned long old_addr)
37{
38 struct klp_ops *ops;
39 struct klp_func *func;
40
41 list_for_each_entry(ops, &klp_ops, node) {
42 func = list_first_entry(&ops->func_stack, struct klp_func,
43 stack_node);
44 if (func->old_addr == old_addr)
45 return ops;
46 }
47
48 return NULL;
49}
50
51static void notrace klp_ftrace_handler(unsigned long ip,
52 unsigned long parent_ip,
53 struct ftrace_ops *fops,
54 struct pt_regs *regs)
55{
56 struct klp_ops *ops;
57 struct klp_func *func;
58 int patch_state;
59
60 ops = container_of(fops, struct klp_ops, fops);
61
62
63
64
65
66 preempt_disable_notrace();
67
68 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
69 stack_node);
70
71
72
73
74
75
76 if (WARN_ON_ONCE(!func))
77 goto unlock;
78
79
80
81
82
83
84
85
86
87
88
89
90 smp_rmb();
91
92 if (unlikely(func->transition)) {
93
94
95
96
97
98
99
100 smp_rmb();
101
102 patch_state = current->patch_state;
103
104 WARN_ON_ONCE(patch_state == KLP_UNDEFINED);
105
106 if (patch_state == KLP_UNPATCHED) {
107
108
109
110
111
112 func = list_entry_rcu(func->stack_node.next,
113 struct klp_func, stack_node);
114
115 if (&func->stack_node == &ops->func_stack)
116 goto unlock;
117 }
118 }
119
120 klp_arch_set_pc(regs, (unsigned long)func->new_func);
121unlock:
122 preempt_enable_notrace();
123}
124
125
126
127
128
129
130
131#ifndef klp_get_ftrace_location
132static unsigned long klp_get_ftrace_location(unsigned long faddr)
133{
134 return faddr;
135}
136#endif
137
138static void klp_unpatch_func(struct klp_func *func)
139{
140 struct klp_ops *ops;
141
142 if (WARN_ON(!func->patched))
143 return;
144 if (WARN_ON(!func->old_addr))
145 return;
146
147 ops = klp_find_ops(func->old_addr);
148 if (WARN_ON(!ops))
149 return;
150
151 if (list_is_singular(&ops->func_stack)) {
152 unsigned long ftrace_loc;
153
154 ftrace_loc = klp_get_ftrace_location(func->old_addr);
155 if (WARN_ON(!ftrace_loc))
156 return;
157
158 WARN_ON(unregister_ftrace_function(&ops->fops));
159 WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
160
161 list_del_rcu(&func->stack_node);
162 list_del(&ops->node);
163 kfree(ops);
164 } else {
165 list_del_rcu(&func->stack_node);
166 }
167
168 func->patched = false;
169}
170
171static int klp_patch_func(struct klp_func *func)
172{
173 struct klp_ops *ops;
174 int ret;
175
176 if (WARN_ON(!func->old_addr))
177 return -EINVAL;
178
179 if (WARN_ON(func->patched))
180 return -EINVAL;
181
182 ops = klp_find_ops(func->old_addr);
183 if (!ops) {
184 unsigned long ftrace_loc;
185
186 ftrace_loc = klp_get_ftrace_location(func->old_addr);
187 if (!ftrace_loc) {
188 pr_err("failed to find location for function '%s'\n",
189 func->old_name);
190 return -EINVAL;
191 }
192
193 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
194 if (!ops)
195 return -ENOMEM;
196
197 ops->fops.func = klp_ftrace_handler;
198 ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
199 FTRACE_OPS_FL_DYNAMIC |
200 FTRACE_OPS_FL_IPMODIFY;
201
202 list_add(&ops->node, &klp_ops);
203
204 INIT_LIST_HEAD(&ops->func_stack);
205 list_add_rcu(&func->stack_node, &ops->func_stack);
206
207 ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
208 if (ret) {
209 pr_err("failed to set ftrace filter for function '%s' (%d)\n",
210 func->old_name, ret);
211 goto err;
212 }
213
214 ret = register_ftrace_function(&ops->fops);
215 if (ret) {
216 pr_err("failed to register ftrace handler for function '%s' (%d)\n",
217 func->old_name, ret);
218 ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
219 goto err;
220 }
221
222
223 } else {
224 list_add_rcu(&func->stack_node, &ops->func_stack);
225 }
226
227 func->patched = true;
228
229 return 0;
230
231err:
232 list_del_rcu(&func->stack_node);
233 list_del(&ops->node);
234 kfree(ops);
235 return ret;
236}
237
238void klp_unpatch_object(struct klp_object *obj)
239{
240 struct klp_func *func;
241
242 klp_for_each_func(obj, func)
243 if (func->patched)
244 klp_unpatch_func(func);
245
246 obj->patched = false;
247}
248
249int klp_patch_object(struct klp_object *obj)
250{
251 struct klp_func *func;
252 int ret;
253
254 if (WARN_ON(obj->patched))
255 return -EINVAL;
256
257 klp_for_each_func(obj, func) {
258 ret = klp_patch_func(func);
259 if (ret) {
260 klp_unpatch_object(obj);
261 return ret;
262 }
263 }
264 obj->patched = true;
265
266 return 0;
267}
268
269void klp_unpatch_objects(struct klp_patch *patch)
270{
271 struct klp_object *obj;
272
273 klp_for_each_object(patch, obj)
274 if (obj->patched)
275 klp_unpatch_object(obj);
276}
277