1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22#include <linux/cpu.h>
23#include <linux/stacktrace.h>
24#include "core.h"
25#include "patch.h"
26#include "transition.h"
27#include "../sched/sched.h"
28
29#define MAX_STACK_ENTRIES 100
30#define STACK_ERR_BUF_SIZE 128
31
32struct klp_patch *klp_transition_patch;
33
34static int klp_target_state = KLP_UNDEFINED;
35
36static bool klp_forced = false;
37
38
39
40
41
42static void klp_transition_work_fn(struct work_struct *work)
43{
44 mutex_lock(&klp_mutex);
45
46 if (klp_transition_patch)
47 klp_try_complete_transition();
48
49 mutex_unlock(&klp_mutex);
50}
51static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
52
53
54
55
56
57
58static void klp_sync(struct work_struct *work)
59{
60}
61
62
63
64
65
66
67
68
69
70static void klp_synchronize_transition(void)
71{
72 schedule_on_each_cpu(klp_sync);
73}
74
75
76
77
78
79static void klp_complete_transition(void)
80{
81 struct klp_object *obj;
82 struct klp_func *func;
83 struct task_struct *g, *task;
84 unsigned int cpu;
85
86 pr_debug("'%s': completing %s transition\n",
87 klp_transition_patch->mod->name,
88 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
89
90 if (klp_target_state == KLP_UNPATCHED) {
91
92
93
94
95 klp_unpatch_objects(klp_transition_patch);
96
97
98
99
100
101
102
103 klp_synchronize_transition();
104 }
105
106 klp_for_each_object(klp_transition_patch, obj)
107 klp_for_each_func(obj, func)
108 func->transition = false;
109
110
111 if (klp_target_state == KLP_PATCHED)
112 klp_synchronize_transition();
113
114 read_lock(&tasklist_lock);
115 for_each_process_thread(g, task) {
116 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
117 task->patch_state = KLP_UNDEFINED;
118 }
119 read_unlock(&tasklist_lock);
120
121 for_each_possible_cpu(cpu) {
122 task = idle_task(cpu);
123 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
124 task->patch_state = KLP_UNDEFINED;
125 }
126
127 klp_for_each_object(klp_transition_patch, obj) {
128 if (!klp_is_object_loaded(obj))
129 continue;
130 if (klp_target_state == KLP_PATCHED)
131 klp_post_patch_callback(obj);
132 else if (klp_target_state == KLP_UNPATCHED)
133 klp_post_unpatch_callback(obj);
134 }
135
136 pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
137 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
138
139
140
141
142
143 if (!klp_forced && klp_target_state == KLP_UNPATCHED)
144 module_put(klp_transition_patch->mod);
145
146 klp_target_state = KLP_UNDEFINED;
147 klp_transition_patch = NULL;
148}
149
150
151
152
153
154
155
156void klp_cancel_transition(void)
157{
158 if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED))
159 return;
160
161 pr_debug("'%s': canceling patching transition, going to unpatch\n",
162 klp_transition_patch->mod->name);
163
164 klp_target_state = KLP_UNPATCHED;
165 klp_complete_transition();
166}
167
168
169
170
171
172
173
174
175void klp_update_patch_state(struct task_struct *task)
176{
177
178
179
180
181 preempt_disable_notrace();
182
183
184
185
186
187
188
189
190
191
192
193
194
195 if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
196 task->patch_state = READ_ONCE(klp_target_state);
197
198 preempt_enable_notrace();
199}
200
201
202
203
204
205static int klp_check_stack_func(struct klp_func *func,
206 struct stack_trace *trace)
207{
208 unsigned long func_addr, func_size, address;
209 struct klp_ops *ops;
210 int i;
211
212 for (i = 0; i < trace->nr_entries; i++) {
213 address = trace->entries[i];
214
215 if (klp_target_state == KLP_UNPATCHED) {
216
217
218
219
220 func_addr = (unsigned long)func->new_func;
221 func_size = func->new_size;
222 } else {
223
224
225
226
227 ops = klp_find_ops(func->old_addr);
228
229 if (list_is_singular(&ops->func_stack)) {
230
231 func_addr = func->old_addr;
232 func_size = func->old_size;
233 } else {
234
235 struct klp_func *prev;
236
237 prev = list_next_entry(func, stack_node);
238 func_addr = (unsigned long)prev->new_func;
239 func_size = prev->new_size;
240 }
241 }
242
243 if (address >= func_addr && address < func_addr + func_size)
244 return -EAGAIN;
245 }
246
247 return 0;
248}
249
250
251
252
253
254static int klp_check_stack(struct task_struct *task, char *err_buf)
255{
256 static unsigned long entries[MAX_STACK_ENTRIES];
257 struct stack_trace trace;
258 struct klp_object *obj;
259 struct klp_func *func;
260 int ret;
261
262 trace.skip = 0;
263 trace.nr_entries = 0;
264 trace.max_entries = MAX_STACK_ENTRIES;
265 trace.entries = entries;
266 ret = save_stack_trace_tsk_reliable(task, &trace);
267 WARN_ON_ONCE(ret == -ENOSYS);
268 if (ret) {
269 snprintf(err_buf, STACK_ERR_BUF_SIZE,
270 "%s: %s:%d has an unreliable stack\n",
271 __func__, task->comm, task->pid);
272 return ret;
273 }
274
275 klp_for_each_object(klp_transition_patch, obj) {
276 if (!obj->patched)
277 continue;
278 klp_for_each_func(obj, func) {
279 ret = klp_check_stack_func(func, &trace);
280 if (ret) {
281 snprintf(err_buf, STACK_ERR_BUF_SIZE,
282 "%s: %s:%d is sleeping on function %s\n",
283 __func__, task->comm, task->pid,
284 func->old_name);
285 return ret;
286 }
287 }
288 }
289
290 return 0;
291}
292
293
294
295
296
297
298static bool klp_try_switch_task(struct task_struct *task)
299{
300 struct rq *rq;
301 struct rq_flags flags;
302 int ret;
303 bool success = false;
304 char err_buf[STACK_ERR_BUF_SIZE];
305
306 err_buf[0] = '\0';
307
308
309 if (task->patch_state == klp_target_state)
310 return true;
311
312
313
314
315
316 if (!klp_have_reliable_stack())
317 return false;
318
319
320
321
322
323
324 rq = task_rq_lock(task, &flags);
325
326 if (task_running(rq, task) && task != current) {
327 snprintf(err_buf, STACK_ERR_BUF_SIZE,
328 "%s: %s:%d is running\n", __func__, task->comm,
329 task->pid);
330 goto done;
331 }
332
333 ret = klp_check_stack(task, err_buf);
334 if (ret)
335 goto done;
336
337 success = true;
338
339 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
340 task->patch_state = klp_target_state;
341
342done:
343 task_rq_unlock(rq, task, &flags);
344
345
346
347
348
349
350 if (err_buf[0] != '\0')
351 pr_debug("%s", err_buf);
352
353 return success;
354
355}
356
357
358
359
360
361
362
363
364
365void klp_try_complete_transition(void)
366{
367 unsigned int cpu;
368 struct task_struct *g, *task;
369 bool complete = true;
370
371 WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
372
373
374
375
376
377
378
379
380
381
382 read_lock(&tasklist_lock);
383 for_each_process_thread(g, task)
384 if (!klp_try_switch_task(task))
385 complete = false;
386 read_unlock(&tasklist_lock);
387
388
389
390
391 get_online_cpus();
392 for_each_possible_cpu(cpu) {
393 task = idle_task(cpu);
394 if (cpu_online(cpu)) {
395 if (!klp_try_switch_task(task))
396 complete = false;
397 } else if (task->patch_state != klp_target_state) {
398
399 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
400 task->patch_state = klp_target_state;
401 }
402 }
403 put_online_cpus();
404
405 if (!complete) {
406
407
408
409
410
411 schedule_delayed_work(&klp_transition_work,
412 round_jiffies_relative(HZ));
413 return;
414 }
415
416
417 klp_complete_transition();
418}
419
420
421
422
423
424void klp_start_transition(void)
425{
426 struct task_struct *g, *task;
427 unsigned int cpu;
428
429 WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
430
431 pr_notice("'%s': starting %s transition\n",
432 klp_transition_patch->mod->name,
433 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
434
435
436
437
438
439
440 read_lock(&tasklist_lock);
441 for_each_process_thread(g, task)
442 if (task->patch_state != klp_target_state)
443 set_tsk_thread_flag(task, TIF_PATCH_PENDING);
444 read_unlock(&tasklist_lock);
445
446
447
448
449
450
451 for_each_possible_cpu(cpu) {
452 task = idle_task(cpu);
453 if (task->patch_state != klp_target_state)
454 set_tsk_thread_flag(task, TIF_PATCH_PENDING);
455 }
456}
457
458
459
460
461
462
463void klp_init_transition(struct klp_patch *patch, int state)
464{
465 struct task_struct *g, *task;
466 unsigned int cpu;
467 struct klp_object *obj;
468 struct klp_func *func;
469 int initial_state = !state;
470
471 WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED);
472
473 klp_transition_patch = patch;
474
475
476
477
478
479 klp_target_state = state;
480
481 pr_debug("'%s': initializing %s transition\n", patch->mod->name,
482 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
483
484
485
486
487
488 read_lock(&tasklist_lock);
489 for_each_process_thread(g, task) {
490 WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
491 task->patch_state = initial_state;
492 }
493 read_unlock(&tasklist_lock);
494
495
496
497
498 for_each_possible_cpu(cpu) {
499 task = idle_task(cpu);
500 WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
501 task->patch_state = initial_state;
502 }
503
504
505
506
507
508
509
510
511
512
513 smp_wmb();
514
515
516
517
518
519
520
521
522
523
524
525
526 klp_for_each_object(patch, obj)
527 klp_for_each_func(obj, func)
528 func->transition = true;
529}
530
531
532
533
534
535
536
537void klp_reverse_transition(void)
538{
539 unsigned int cpu;
540 struct task_struct *g, *task;
541
542 pr_debug("'%s': reversing transition from %s\n",
543 klp_transition_patch->mod->name,
544 klp_target_state == KLP_PATCHED ? "patching to unpatching" :
545 "unpatching to patching");
546
547 klp_transition_patch->enabled = !klp_transition_patch->enabled;
548
549 klp_target_state = !klp_target_state;
550
551
552
553
554
555
556 read_lock(&tasklist_lock);
557 for_each_process_thread(g, task)
558 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
559 read_unlock(&tasklist_lock);
560
561 for_each_possible_cpu(cpu)
562 clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
563
564
565 klp_synchronize_transition();
566
567 klp_start_transition();
568}
569
570
571void klp_copy_process(struct task_struct *child)
572{
573 child->patch_state = current->patch_state;
574
575
576}
577
578
579
580
581
582
583void klp_send_signals(void)
584{
585 struct task_struct *g, *task;
586
587 pr_notice("signaling remaining tasks\n");
588
589 read_lock(&tasklist_lock);
590 for_each_process_thread(g, task) {
591 if (!klp_patch_pending(task))
592 continue;
593
594
595
596
597
598
599
600 if (task->flags & PF_KTHREAD) {
601
602
603
604
605 wake_up_state(task, TASK_INTERRUPTIBLE);
606 } else {
607
608
609
610
611 spin_lock_irq(&task->sighand->siglock);
612 signal_wake_up(task, 0);
613 spin_unlock_irq(&task->sighand->siglock);
614 }
615 }
616 read_unlock(&tasklist_lock);
617}
618
619
620
621
622
623
624
625
626
627
628void klp_force_transition(void)
629{
630 struct task_struct *g, *task;
631 unsigned int cpu;
632
633 pr_warn("forcing remaining tasks to the patched state\n");
634
635 read_lock(&tasklist_lock);
636 for_each_process_thread(g, task)
637 klp_update_patch_state(task);
638 read_unlock(&tasklist_lock);
639
640 for_each_possible_cpu(cpu)
641 klp_update_patch_state(idle_task(cpu));
642
643 klp_forced = true;
644}
645