1
2
3
4
5
6
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/cpu.h>
11#include <linux/stacktrace.h>
12#include "core.h"
13#include "patch.h"
14#include "transition.h"
15
16#define MAX_STACK_ENTRIES 100
17#define STACK_ERR_BUF_SIZE 128
18
19#define SIGNALS_TIMEOUT 15
20
21struct klp_patch *klp_transition_patch;
22
23static int klp_target_state = KLP_UNDEFINED;
24
25static unsigned int klp_signals_cnt;
26
27
28
29
30
31static void klp_transition_work_fn(struct work_struct *work)
32{
33 mutex_lock(&klp_mutex);
34
35 if (klp_transition_patch)
36 klp_try_complete_transition();
37
38 mutex_unlock(&klp_mutex);
39}
40static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
41
42
43
44
45
46
47static void klp_sync(struct work_struct *work)
48{
49}
50
51
52
53
54
55
56
57
58
59static void klp_synchronize_transition(void)
60{
61 schedule_on_each_cpu(klp_sync);
62}
63
64
65
66
67
68static void klp_complete_transition(void)
69{
70 struct klp_object *obj;
71 struct klp_func *func;
72 struct task_struct *g, *task;
73 unsigned int cpu;
74
75 pr_debug("'%s': completing %s transition\n",
76 klp_transition_patch->mod->name,
77 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
78
79 if (klp_transition_patch->replace && klp_target_state == KLP_PATCHED) {
80 klp_unpatch_replaced_patches(klp_transition_patch);
81 klp_discard_nops(klp_transition_patch);
82 }
83
84 if (klp_target_state == KLP_UNPATCHED) {
85
86
87
88
89 klp_unpatch_objects(klp_transition_patch);
90
91
92
93
94
95
96
97 klp_synchronize_transition();
98 }
99
100 klp_for_each_object(klp_transition_patch, obj)
101 klp_for_each_func(obj, func)
102 func->transition = false;
103
104
105 if (klp_target_state == KLP_PATCHED)
106 klp_synchronize_transition();
107
108 read_lock(&tasklist_lock);
109 for_each_process_thread(g, task) {
110 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
111 task->patch_state = KLP_UNDEFINED;
112 }
113 read_unlock(&tasklist_lock);
114
115 for_each_possible_cpu(cpu) {
116 task = idle_task(cpu);
117 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
118 task->patch_state = KLP_UNDEFINED;
119 }
120
121 klp_for_each_object(klp_transition_patch, obj) {
122 if (!klp_is_object_loaded(obj))
123 continue;
124 if (klp_target_state == KLP_PATCHED)
125 klp_post_patch_callback(obj);
126 else if (klp_target_state == KLP_UNPATCHED)
127 klp_post_unpatch_callback(obj);
128 }
129
130 pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
131 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
132
133 klp_target_state = KLP_UNDEFINED;
134 klp_transition_patch = NULL;
135}
136
137
138
139
140
141
142
143void klp_cancel_transition(void)
144{
145 if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED))
146 return;
147
148 pr_debug("'%s': canceling patching transition, going to unpatch\n",
149 klp_transition_patch->mod->name);
150
151 klp_target_state = KLP_UNPATCHED;
152 klp_complete_transition();
153}
154
155
156
157
158
159
160
161
162void klp_update_patch_state(struct task_struct *task)
163{
164
165
166
167
168 preempt_disable_notrace();
169
170
171
172
173
174
175
176
177
178
179
180
181
182 if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
183 task->patch_state = READ_ONCE(klp_target_state);
184
185 preempt_enable_notrace();
186}
187
188
189
190
191
192static int klp_check_stack_func(struct klp_func *func, unsigned long *entries,
193 unsigned int nr_entries)
194{
195 unsigned long func_addr, func_size, address;
196 struct klp_ops *ops;
197 int i;
198
199 for (i = 0; i < nr_entries; i++) {
200 address = entries[i];
201
202 if (klp_target_state == KLP_UNPATCHED) {
203
204
205
206
207 func_addr = (unsigned long)func->new_func;
208 func_size = func->new_size;
209 } else {
210
211
212
213
214 ops = klp_find_ops(func->old_func);
215
216 if (list_is_singular(&ops->func_stack)) {
217
218 func_addr = (unsigned long)func->old_func;
219 func_size = func->old_size;
220 } else {
221
222 struct klp_func *prev;
223
224 prev = list_next_entry(func, stack_node);
225 func_addr = (unsigned long)prev->new_func;
226 func_size = prev->new_size;
227 }
228 }
229
230 if (address >= func_addr && address < func_addr + func_size)
231 return -EAGAIN;
232 }
233
234 return 0;
235}
236
237
238
239
240
241static int klp_check_stack(struct task_struct *task, const char **oldname)
242{
243 static unsigned long entries[MAX_STACK_ENTRIES];
244 struct klp_object *obj;
245 struct klp_func *func;
246 int ret, nr_entries;
247
248 ret = stack_trace_save_tsk_reliable(task, entries, ARRAY_SIZE(entries));
249 if (ret < 0)
250 return -EINVAL;
251 nr_entries = ret;
252
253 klp_for_each_object(klp_transition_patch, obj) {
254 if (!obj->patched)
255 continue;
256 klp_for_each_func(obj, func) {
257 ret = klp_check_stack_func(func, entries, nr_entries);
258 if (ret) {
259 *oldname = func->old_name;
260 return -EADDRINUSE;
261 }
262 }
263 }
264
265 return 0;
266}
267
268static int klp_check_and_switch_task(struct task_struct *task, void *arg)
269{
270 int ret;
271
272 if (task_curr(task) && task != current)
273 return -EBUSY;
274
275 ret = klp_check_stack(task, arg);
276 if (ret)
277 return ret;
278
279 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
280 task->patch_state = klp_target_state;
281 return 0;
282}
283
284
285
286
287
288
289static bool klp_try_switch_task(struct task_struct *task)
290{
291 const char *old_name;
292 int ret;
293
294
295 if (task->patch_state == klp_target_state)
296 return true;
297
298
299
300
301
302 if (!klp_have_reliable_stack())
303 return false;
304
305
306
307
308
309
310 ret = task_call_func(task, klp_check_and_switch_task, &old_name);
311 switch (ret) {
312 case 0:
313 break;
314
315 case -EBUSY:
316 pr_debug("%s: %s:%d is running\n",
317 __func__, task->comm, task->pid);
318 break;
319 case -EINVAL:
320 pr_debug("%s: %s:%d has an unreliable stack\n",
321 __func__, task->comm, task->pid);
322 break;
323 case -EADDRINUSE:
324 pr_debug("%s: %s:%d is sleeping on function %s\n",
325 __func__, task->comm, task->pid, old_name);
326 break;
327
328 default:
329 pr_debug("%s: Unknown error code (%d) when trying to switch %s:%d\n",
330 __func__, ret, task->comm, task->pid);
331 break;
332 }
333
334 return !ret;
335}
336
337
338
339
340
341static void klp_send_signals(void)
342{
343 struct task_struct *g, *task;
344
345 if (klp_signals_cnt == SIGNALS_TIMEOUT)
346 pr_notice("signaling remaining tasks\n");
347
348 read_lock(&tasklist_lock);
349 for_each_process_thread(g, task) {
350 if (!klp_patch_pending(task))
351 continue;
352
353
354
355
356
357
358
359 if (task->flags & PF_KTHREAD) {
360
361
362
363
364 wake_up_state(task, TASK_INTERRUPTIBLE);
365 } else {
366
367
368
369
370 set_notify_signal(task);
371 }
372 }
373 read_unlock(&tasklist_lock);
374}
375
376
377
378
379
380
381
382
383
384void klp_try_complete_transition(void)
385{
386 unsigned int cpu;
387 struct task_struct *g, *task;
388 struct klp_patch *patch;
389 bool complete = true;
390
391 WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
392
393
394
395
396
397
398
399
400
401
402 read_lock(&tasklist_lock);
403 for_each_process_thread(g, task)
404 if (!klp_try_switch_task(task))
405 complete = false;
406 read_unlock(&tasklist_lock);
407
408
409
410
411 cpus_read_lock();
412 for_each_possible_cpu(cpu) {
413 task = idle_task(cpu);
414 if (cpu_online(cpu)) {
415 if (!klp_try_switch_task(task)) {
416 complete = false;
417
418 wake_up_if_idle(cpu);
419 }
420 } else if (task->patch_state != klp_target_state) {
421
422 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
423 task->patch_state = klp_target_state;
424 }
425 }
426 cpus_read_unlock();
427
428 if (!complete) {
429 if (klp_signals_cnt && !(klp_signals_cnt % SIGNALS_TIMEOUT))
430 klp_send_signals();
431 klp_signals_cnt++;
432
433
434
435
436
437
438 schedule_delayed_work(&klp_transition_work,
439 round_jiffies_relative(HZ));
440 return;
441 }
442
443
444 patch = klp_transition_patch;
445 klp_complete_transition();
446
447
448
449
450
451
452 if (!patch->enabled)
453 klp_free_patch_async(patch);
454 else if (patch->replace)
455 klp_free_replaced_patches_async(patch);
456}
457
458
459
460
461
462void klp_start_transition(void)
463{
464 struct task_struct *g, *task;
465 unsigned int cpu;
466
467 WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
468
469 pr_notice("'%s': starting %s transition\n",
470 klp_transition_patch->mod->name,
471 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
472
473
474
475
476
477
478 read_lock(&tasklist_lock);
479 for_each_process_thread(g, task)
480 if (task->patch_state != klp_target_state)
481 set_tsk_thread_flag(task, TIF_PATCH_PENDING);
482 read_unlock(&tasklist_lock);
483
484
485
486
487
488
489 for_each_possible_cpu(cpu) {
490 task = idle_task(cpu);
491 if (task->patch_state != klp_target_state)
492 set_tsk_thread_flag(task, TIF_PATCH_PENDING);
493 }
494
495 klp_signals_cnt = 0;
496}
497
498
499
500
501
502
503void klp_init_transition(struct klp_patch *patch, int state)
504{
505 struct task_struct *g, *task;
506 unsigned int cpu;
507 struct klp_object *obj;
508 struct klp_func *func;
509 int initial_state = !state;
510
511 WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED);
512
513 klp_transition_patch = patch;
514
515
516
517
518
519 klp_target_state = state;
520
521 pr_debug("'%s': initializing %s transition\n", patch->mod->name,
522 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
523
524
525
526
527
528 read_lock(&tasklist_lock);
529 for_each_process_thread(g, task) {
530 WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
531 task->patch_state = initial_state;
532 }
533 read_unlock(&tasklist_lock);
534
535
536
537
538 for_each_possible_cpu(cpu) {
539 task = idle_task(cpu);
540 WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
541 task->patch_state = initial_state;
542 }
543
544
545
546
547
548
549
550
551
552
553 smp_wmb();
554
555
556
557
558
559
560
561
562
563
564
565
566 klp_for_each_object(patch, obj)
567 klp_for_each_func(obj, func)
568 func->transition = true;
569}
570
571
572
573
574
575
576
577void klp_reverse_transition(void)
578{
579 unsigned int cpu;
580 struct task_struct *g, *task;
581
582 pr_debug("'%s': reversing transition from %s\n",
583 klp_transition_patch->mod->name,
584 klp_target_state == KLP_PATCHED ? "patching to unpatching" :
585 "unpatching to patching");
586
587 klp_transition_patch->enabled = !klp_transition_patch->enabled;
588
589 klp_target_state = !klp_target_state;
590
591
592
593
594
595
596 read_lock(&tasklist_lock);
597 for_each_process_thread(g, task)
598 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
599 read_unlock(&tasklist_lock);
600
601 for_each_possible_cpu(cpu)
602 clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
603
604
605 klp_synchronize_transition();
606
607 klp_start_transition();
608}
609
610
611void klp_copy_process(struct task_struct *child)
612{
613 child->patch_state = current->patch_state;
614
615
616}
617
618
619
620
621
622
623
624
625
626
627void klp_force_transition(void)
628{
629 struct klp_patch *patch;
630 struct task_struct *g, *task;
631 unsigned int cpu;
632
633 pr_warn("forcing remaining tasks to the patched state\n");
634
635 read_lock(&tasklist_lock);
636 for_each_process_thread(g, task)
637 klp_update_patch_state(task);
638 read_unlock(&tasklist_lock);
639
640 for_each_possible_cpu(cpu)
641 klp_update_patch_state(idle_task(cpu));
642
643
644 if (klp_target_state == KLP_UNPATCHED)
645 klp_transition_patch->forced = true;
646 else if (klp_transition_patch->replace) {
647 klp_for_each_patch(patch) {
648 if (patch != klp_transition_patch)
649 patch->forced = true;
650 }
651 }
652}
653