1
2
3
4
5
6
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/cpu.h>
11#include <linux/stacktrace.h>
12#include <linux/tracehook.h>
13#include "core.h"
14#include "patch.h"
15#include "transition.h"
16#include "../sched/sched.h"
17
18#define MAX_STACK_ENTRIES 100
19#define STACK_ERR_BUF_SIZE 128
20
21#define SIGNALS_TIMEOUT 15
22
23struct klp_patch *klp_transition_patch;
24
25static int klp_target_state = KLP_UNDEFINED;
26
27static unsigned int klp_signals_cnt;
28
29
30
31
32
33static void klp_transition_work_fn(struct work_struct *work)
34{
35 mutex_lock(&klp_mutex);
36
37 if (klp_transition_patch)
38 klp_try_complete_transition();
39
40 mutex_unlock(&klp_mutex);
41}
42static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
43
44
45
46
47
48
49static void klp_sync(struct work_struct *work)
50{
51}
52
53
54
55
56
57
58
59
60
61static void klp_synchronize_transition(void)
62{
63 schedule_on_each_cpu(klp_sync);
64}
65
66
67
68
69
70static void klp_complete_transition(void)
71{
72 struct klp_object *obj;
73 struct klp_func *func;
74 struct task_struct *g, *task;
75 unsigned int cpu;
76
77 pr_debug("'%s': completing %s transition\n",
78 klp_transition_patch->mod->name,
79 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
80
81 if (klp_transition_patch->replace && klp_target_state == KLP_PATCHED) {
82 klp_unpatch_replaced_patches(klp_transition_patch);
83 klp_discard_nops(klp_transition_patch);
84 }
85
86 if (klp_target_state == KLP_UNPATCHED) {
87
88
89
90
91 klp_unpatch_objects(klp_transition_patch);
92
93
94
95
96
97
98
99 klp_synchronize_transition();
100 }
101
102 klp_for_each_object(klp_transition_patch, obj)
103 klp_for_each_func(obj, func)
104 func->transition = false;
105
106
107 if (klp_target_state == KLP_PATCHED)
108 klp_synchronize_transition();
109
110 read_lock(&tasklist_lock);
111 for_each_process_thread(g, task) {
112 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
113 task->patch_state = KLP_UNDEFINED;
114 }
115 read_unlock(&tasklist_lock);
116
117 for_each_possible_cpu(cpu) {
118 task = idle_task(cpu);
119 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
120 task->patch_state = KLP_UNDEFINED;
121 }
122
123 klp_for_each_object(klp_transition_patch, obj) {
124 if (!klp_is_object_loaded(obj))
125 continue;
126 if (klp_target_state == KLP_PATCHED)
127 klp_post_patch_callback(obj);
128 else if (klp_target_state == KLP_UNPATCHED)
129 klp_post_unpatch_callback(obj);
130 }
131
132 pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
133 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
134
135 klp_target_state = KLP_UNDEFINED;
136 klp_transition_patch = NULL;
137}
138
139
140
141
142
143
144
145void klp_cancel_transition(void)
146{
147 if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED))
148 return;
149
150 pr_debug("'%s': canceling patching transition, going to unpatch\n",
151 klp_transition_patch->mod->name);
152
153 klp_target_state = KLP_UNPATCHED;
154 klp_complete_transition();
155}
156
157
158
159
160
161
162
163
164void klp_update_patch_state(struct task_struct *task)
165{
166
167
168
169
170 preempt_disable_notrace();
171
172
173
174
175
176
177
178
179
180
181
182
183
184 if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
185 task->patch_state = READ_ONCE(klp_target_state);
186
187 preempt_enable_notrace();
188}
189
190
191
192
193
194static int klp_check_stack_func(struct klp_func *func, unsigned long *entries,
195 unsigned int nr_entries)
196{
197 unsigned long func_addr, func_size, address;
198 struct klp_ops *ops;
199 int i;
200
201 for (i = 0; i < nr_entries; i++) {
202 address = entries[i];
203
204 if (klp_target_state == KLP_UNPATCHED) {
205
206
207
208
209 func_addr = (unsigned long)func->new_func;
210 func_size = func->new_size;
211 } else {
212
213
214
215
216 ops = klp_find_ops(func->old_func);
217
218 if (list_is_singular(&ops->func_stack)) {
219
220 func_addr = (unsigned long)func->old_func;
221 func_size = func->old_size;
222 } else {
223
224 struct klp_func *prev;
225
226 prev = list_next_entry(func, stack_node);
227 func_addr = (unsigned long)prev->new_func;
228 func_size = prev->new_size;
229 }
230 }
231
232 if (address >= func_addr && address < func_addr + func_size)
233 return -EAGAIN;
234 }
235
236 return 0;
237}
238
239
240
241
242
243static int klp_check_stack(struct task_struct *task, char *err_buf)
244{
245 static unsigned long entries[MAX_STACK_ENTRIES];
246 struct klp_object *obj;
247 struct klp_func *func;
248 int ret, nr_entries;
249
250 ret = stack_trace_save_tsk_reliable(task, entries, ARRAY_SIZE(entries));
251 if (ret < 0) {
252 snprintf(err_buf, STACK_ERR_BUF_SIZE,
253 "%s: %s:%d has an unreliable stack\n",
254 __func__, task->comm, task->pid);
255 return ret;
256 }
257 nr_entries = ret;
258
259 klp_for_each_object(klp_transition_patch, obj) {
260 if (!obj->patched)
261 continue;
262 klp_for_each_func(obj, func) {
263 ret = klp_check_stack_func(func, entries, nr_entries);
264 if (ret) {
265 snprintf(err_buf, STACK_ERR_BUF_SIZE,
266 "%s: %s:%d is sleeping on function %s\n",
267 __func__, task->comm, task->pid,
268 func->old_name);
269 return ret;
270 }
271 }
272 }
273
274 return 0;
275}
276
277
278
279
280
281
282static bool klp_try_switch_task(struct task_struct *task)
283{
284 static char err_buf[STACK_ERR_BUF_SIZE];
285 struct rq *rq;
286 struct rq_flags flags;
287 int ret;
288 bool success = false;
289
290 err_buf[0] = '\0';
291
292
293 if (task->patch_state == klp_target_state)
294 return true;
295
296
297
298
299
300 if (!klp_have_reliable_stack())
301 return false;
302
303
304
305
306
307
308 rq = task_rq_lock(task, &flags);
309
310 if (task_running(rq, task) && task != current) {
311 snprintf(err_buf, STACK_ERR_BUF_SIZE,
312 "%s: %s:%d is running\n", __func__, task->comm,
313 task->pid);
314 goto done;
315 }
316
317 ret = klp_check_stack(task, err_buf);
318 if (ret)
319 goto done;
320
321 success = true;
322
323 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
324 task->patch_state = klp_target_state;
325
326done:
327 task_rq_unlock(rq, task, &flags);
328
329
330
331
332
333
334 if (err_buf[0] != '\0')
335 pr_debug("%s", err_buf);
336
337 return success;
338}
339
340
341
342
343
344static void klp_send_signals(void)
345{
346 struct task_struct *g, *task;
347
348 if (klp_signals_cnt == SIGNALS_TIMEOUT)
349 pr_notice("signaling remaining tasks\n");
350
351 read_lock(&tasklist_lock);
352 for_each_process_thread(g, task) {
353 if (!klp_patch_pending(task))
354 continue;
355
356
357
358
359
360
361
362 if (task->flags & PF_KTHREAD) {
363
364
365
366
367 wake_up_state(task, TASK_INTERRUPTIBLE);
368 } else {
369
370
371
372
373 set_notify_signal(task);
374 }
375 }
376 read_unlock(&tasklist_lock);
377}
378
379
380
381
382
383
384
385
386
387void klp_try_complete_transition(void)
388{
389 unsigned int cpu;
390 struct task_struct *g, *task;
391 struct klp_patch *patch;
392 bool complete = true;
393
394 WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
395
396
397
398
399
400
401
402
403
404
405 read_lock(&tasklist_lock);
406 for_each_process_thread(g, task)
407 if (!klp_try_switch_task(task))
408 complete = false;
409 read_unlock(&tasklist_lock);
410
411
412
413
414 cpus_read_lock();
415 for_each_possible_cpu(cpu) {
416 task = idle_task(cpu);
417 if (cpu_online(cpu)) {
418 if (!klp_try_switch_task(task))
419 complete = false;
420 } else if (task->patch_state != klp_target_state) {
421
422 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
423 task->patch_state = klp_target_state;
424 }
425 }
426 cpus_read_unlock();
427
428 if (!complete) {
429 if (klp_signals_cnt && !(klp_signals_cnt % SIGNALS_TIMEOUT))
430 klp_send_signals();
431 klp_signals_cnt++;
432
433
434
435
436
437
438 schedule_delayed_work(&klp_transition_work,
439 round_jiffies_relative(HZ));
440 return;
441 }
442
443
444 patch = klp_transition_patch;
445 klp_complete_transition();
446
447
448
449
450
451
452 if (!patch->enabled)
453 klp_free_patch_async(patch);
454 else if (patch->replace)
455 klp_free_replaced_patches_async(patch);
456}
457
458
459
460
461
462void klp_start_transition(void)
463{
464 struct task_struct *g, *task;
465 unsigned int cpu;
466
467 WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
468
469 pr_notice("'%s': starting %s transition\n",
470 klp_transition_patch->mod->name,
471 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
472
473
474
475
476
477
478 read_lock(&tasklist_lock);
479 for_each_process_thread(g, task)
480 if (task->patch_state != klp_target_state)
481 set_tsk_thread_flag(task, TIF_PATCH_PENDING);
482 read_unlock(&tasklist_lock);
483
484
485
486
487
488
489 for_each_possible_cpu(cpu) {
490 task = idle_task(cpu);
491 if (task->patch_state != klp_target_state)
492 set_tsk_thread_flag(task, TIF_PATCH_PENDING);
493 }
494
495 klp_signals_cnt = 0;
496}
497
498
499
500
501
502
503void klp_init_transition(struct klp_patch *patch, int state)
504{
505 struct task_struct *g, *task;
506 unsigned int cpu;
507 struct klp_object *obj;
508 struct klp_func *func;
509 int initial_state = !state;
510
511 WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED);
512
513 klp_transition_patch = patch;
514
515
516
517
518
519 klp_target_state = state;
520
521 pr_debug("'%s': initializing %s transition\n", patch->mod->name,
522 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
523
524
525
526
527
528 read_lock(&tasklist_lock);
529 for_each_process_thread(g, task) {
530 WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
531 task->patch_state = initial_state;
532 }
533 read_unlock(&tasklist_lock);
534
535
536
537
538 for_each_possible_cpu(cpu) {
539 task = idle_task(cpu);
540 WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
541 task->patch_state = initial_state;
542 }
543
544
545
546
547
548
549
550
551
552
553 smp_wmb();
554
555
556
557
558
559
560
561
562
563
564
565
566 klp_for_each_object(patch, obj)
567 klp_for_each_func(obj, func)
568 func->transition = true;
569}
570
571
572
573
574
575
576
577void klp_reverse_transition(void)
578{
579 unsigned int cpu;
580 struct task_struct *g, *task;
581
582 pr_debug("'%s': reversing transition from %s\n",
583 klp_transition_patch->mod->name,
584 klp_target_state == KLP_PATCHED ? "patching to unpatching" :
585 "unpatching to patching");
586
587 klp_transition_patch->enabled = !klp_transition_patch->enabled;
588
589 klp_target_state = !klp_target_state;
590
591
592
593
594
595
596 read_lock(&tasklist_lock);
597 for_each_process_thread(g, task)
598 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
599 read_unlock(&tasklist_lock);
600
601 for_each_possible_cpu(cpu)
602 clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
603
604
605 klp_synchronize_transition();
606
607 klp_start_transition();
608}
609
610
611void klp_copy_process(struct task_struct *child)
612{
613 child->patch_state = current->patch_state;
614
615
616}
617
618
619
620
621
622
623
624
625
626
627void klp_force_transition(void)
628{
629 struct klp_patch *patch;
630 struct task_struct *g, *task;
631 unsigned int cpu;
632
633 pr_warn("forcing remaining tasks to the patched state\n");
634
635 read_lock(&tasklist_lock);
636 for_each_process_thread(g, task)
637 klp_update_patch_state(task);
638 read_unlock(&tasklist_lock);
639
640 for_each_possible_cpu(cpu)
641 klp_update_patch_state(idle_task(cpu));
642
643 klp_for_each_patch(patch)
644 patch->forced = true;
645}
646