linux/kernel/livepatch/transition.c
<<
>>
Prefs
   1/*
   2 * transition.c - Kernel Live Patching transition functions
   3 *
   4 * Copyright (C) 2015-2016 Josh Poimboeuf <jpoimboe@redhat.com>
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * as published by the Free Software Foundation; either version 2
   9 * of the License, or (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  21
  22#include <linux/cpu.h>
  23#include <linux/stacktrace.h>
  24#include "core.h"
  25#include "patch.h"
  26#include "transition.h"
  27#include "../sched/sched.h"
  28
  29#define MAX_STACK_ENTRIES  100
  30#define STACK_ERR_BUF_SIZE 128
  31
  32struct klp_patch *klp_transition_patch;
  33
  34static int klp_target_state = KLP_UNDEFINED;
  35
  36/*
  37 * This work can be performed periodically to finish patching or unpatching any
  38 * "straggler" tasks which failed to transition in the first attempt.
  39 */
  40static void klp_transition_work_fn(struct work_struct *work)
  41{
  42        mutex_lock(&klp_mutex);
  43
  44        if (klp_transition_patch)
  45                klp_try_complete_transition();
  46
  47        mutex_unlock(&klp_mutex);
  48}
  49static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
  50
  51/*
  52 * This function is just a stub to implement a hard force
  53 * of synchronize_sched(). This requires synchronizing
  54 * tasks even in userspace and idle.
  55 */
  56static void klp_sync(struct work_struct *work)
  57{
  58}
  59
  60/*
  61 * We allow to patch also functions where RCU is not watching,
  62 * e.g. before user_exit(). We can not rely on the RCU infrastructure
  63 * to do the synchronization. Instead hard force the sched synchronization.
  64 *
  65 * This approach allows to use RCU functions for manipulating func_stack
  66 * safely.
  67 */
  68static void klp_synchronize_transition(void)
  69{
  70        schedule_on_each_cpu(klp_sync);
  71}
  72
  73/*
  74 * The transition to the target patch state is complete.  Clean up the data
  75 * structures.
  76 */
  77static void klp_complete_transition(void)
  78{
  79        struct klp_object *obj;
  80        struct klp_func *func;
  81        struct task_struct *g, *task;
  82        unsigned int cpu;
  83        bool immediate_func = false;
  84
  85        if (klp_target_state == KLP_UNPATCHED) {
  86                /*
  87                 * All tasks have transitioned to KLP_UNPATCHED so we can now
  88                 * remove the new functions from the func_stack.
  89                 */
  90                klp_unpatch_objects(klp_transition_patch);
  91
  92                /*
  93                 * Make sure klp_ftrace_handler() can no longer see functions
  94                 * from this patch on the ops->func_stack.  Otherwise, after
  95                 * func->transition gets cleared, the handler may choose a
  96                 * removed function.
  97                 */
  98                klp_synchronize_transition();
  99        }
 100
 101        if (klp_transition_patch->immediate)
 102                goto done;
 103
 104        klp_for_each_object(klp_transition_patch, obj) {
 105                klp_for_each_func(obj, func) {
 106                        func->transition = false;
 107                        if (func->immediate)
 108                                immediate_func = true;
 109                }
 110        }
 111
 112        if (klp_target_state == KLP_UNPATCHED && !immediate_func)
 113                module_put(klp_transition_patch->mod);
 114
 115        /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
 116        if (klp_target_state == KLP_PATCHED)
 117                klp_synchronize_transition();
 118
 119        read_lock(&tasklist_lock);
 120        for_each_process_thread(g, task) {
 121                WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
 122                task->patch_state = KLP_UNDEFINED;
 123        }
 124        read_unlock(&tasklist_lock);
 125
 126        for_each_possible_cpu(cpu) {
 127                task = idle_task(cpu);
 128                WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
 129                task->patch_state = KLP_UNDEFINED;
 130        }
 131
 132done:
 133        klp_target_state = KLP_UNDEFINED;
 134        klp_transition_patch = NULL;
 135}
 136
 137/*
 138 * This is called in the error path, to cancel a transition before it has
 139 * started, i.e. klp_init_transition() has been called but
 140 * klp_start_transition() hasn't.  If the transition *has* been started,
 141 * klp_reverse_transition() should be used instead.
 142 */
 143void klp_cancel_transition(void)
 144{
 145        if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED))
 146                return;
 147
 148        klp_target_state = KLP_UNPATCHED;
 149        klp_complete_transition();
 150}
 151
 152/*
 153 * Switch the patched state of the task to the set of functions in the target
 154 * patch state.
 155 *
 156 * NOTE: If task is not 'current', the caller must ensure the task is inactive.
 157 * Otherwise klp_ftrace_handler() might read the wrong 'patch_state' value.
 158 */
 159void klp_update_patch_state(struct task_struct *task)
 160{
 161        /*
 162         * A variant of synchronize_sched() is used to allow patching functions
 163         * where RCU is not watching, see klp_synchronize_transition().
 164         */
 165        preempt_disable_notrace();
 166
 167        /*
 168         * This test_and_clear_tsk_thread_flag() call also serves as a read
 169         * barrier (smp_rmb) for two cases:
 170         *
 171         * 1) Enforce the order of the TIF_PATCH_PENDING read and the
 172         *    klp_target_state read.  The corresponding write barrier is in
 173         *    klp_init_transition().
 174         *
 175         * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read
 176         *    of func->transition, if klp_ftrace_handler() is called later on
 177         *    the same CPU.  See __klp_disable_patch().
 178         */
 179        if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
 180                task->patch_state = READ_ONCE(klp_target_state);
 181
 182        preempt_enable_notrace();
 183}
 184
 185/*
 186 * Determine whether the given stack trace includes any references to a
 187 * to-be-patched or to-be-unpatched function.
 188 */
 189static int klp_check_stack_func(struct klp_func *func,
 190                                struct stack_trace *trace)
 191{
 192        unsigned long func_addr, func_size, address;
 193        struct klp_ops *ops;
 194        int i;
 195
 196        if (func->immediate)
 197                return 0;
 198
 199        for (i = 0; i < trace->nr_entries; i++) {
 200                address = trace->entries[i];
 201
 202                if (klp_target_state == KLP_UNPATCHED) {
 203                         /*
 204                          * Check for the to-be-unpatched function
 205                          * (the func itself).
 206                          */
 207                        func_addr = (unsigned long)func->new_func;
 208                        func_size = func->new_size;
 209                } else {
 210                        /*
 211                         * Check for the to-be-patched function
 212                         * (the previous func).
 213                         */
 214                        ops = klp_find_ops(func->old_addr);
 215
 216                        if (list_is_singular(&ops->func_stack)) {
 217                                /* original function */
 218                                func_addr = func->old_addr;
 219                                func_size = func->old_size;
 220                        } else {
 221                                /* previously patched function */
 222                                struct klp_func *prev;
 223
 224                                prev = list_next_entry(func, stack_node);
 225                                func_addr = (unsigned long)prev->new_func;
 226                                func_size = prev->new_size;
 227                        }
 228                }
 229
 230                if (address >= func_addr && address < func_addr + func_size)
 231                        return -EAGAIN;
 232        }
 233
 234        return 0;
 235}
 236
 237/*
 238 * Determine whether it's safe to transition the task to the target patch state
 239 * by looking for any to-be-patched or to-be-unpatched functions on its stack.
 240 */
 241static int klp_check_stack(struct task_struct *task, char *err_buf)
 242{
 243        static unsigned long entries[MAX_STACK_ENTRIES];
 244        struct stack_trace trace;
 245        struct klp_object *obj;
 246        struct klp_func *func;
 247        int ret;
 248
 249        trace.skip = 0;
 250        trace.nr_entries = 0;
 251        trace.max_entries = MAX_STACK_ENTRIES;
 252        trace.entries = entries;
 253        ret = save_stack_trace_tsk_reliable(task, &trace);
 254        WARN_ON_ONCE(ret == -ENOSYS);
 255        if (ret) {
 256                snprintf(err_buf, STACK_ERR_BUF_SIZE,
 257                         "%s: %s:%d has an unreliable stack\n",
 258                         __func__, task->comm, task->pid);
 259                return ret;
 260        }
 261
 262        klp_for_each_object(klp_transition_patch, obj) {
 263                if (!obj->patched)
 264                        continue;
 265                klp_for_each_func(obj, func) {
 266                        ret = klp_check_stack_func(func, &trace);
 267                        if (ret) {
 268                                snprintf(err_buf, STACK_ERR_BUF_SIZE,
 269                                         "%s: %s:%d is sleeping on function %s\n",
 270                                         __func__, task->comm, task->pid,
 271                                         func->old_name);
 272                                return ret;
 273                        }
 274                }
 275        }
 276
 277        return 0;
 278}
 279
 280/*
 281 * Try to safely switch a task to the target patch state.  If it's currently
 282 * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or
 283 * if the stack is unreliable, return false.
 284 */
 285static bool klp_try_switch_task(struct task_struct *task)
 286{
 287        struct rq *rq;
 288        struct rq_flags flags;
 289        int ret;
 290        bool success = false;
 291        char err_buf[STACK_ERR_BUF_SIZE];
 292
 293        err_buf[0] = '\0';
 294
 295        /* check if this task has already switched over */
 296        if (task->patch_state == klp_target_state)
 297                return true;
 298
 299        /*
 300         * For arches which don't have reliable stack traces, we have to rely
 301         * on other methods (e.g., switching tasks at kernel exit).
 302         */
 303        if (!klp_have_reliable_stack())
 304                return false;
 305
 306        /*
 307         * Now try to check the stack for any to-be-patched or to-be-unpatched
 308         * functions.  If all goes well, switch the task to the target patch
 309         * state.
 310         */
 311        rq = task_rq_lock(task, &flags);
 312
 313        if (task_running(rq, task) && task != current) {
 314                snprintf(err_buf, STACK_ERR_BUF_SIZE,
 315                         "%s: %s:%d is running\n", __func__, task->comm,
 316                         task->pid);
 317                goto done;
 318        }
 319
 320        ret = klp_check_stack(task, err_buf);
 321        if (ret)
 322                goto done;
 323
 324        success = true;
 325
 326        clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
 327        task->patch_state = klp_target_state;
 328
 329done:
 330        task_rq_unlock(rq, task, &flags);
 331
 332        /*
 333         * Due to console deadlock issues, pr_debug() can't be used while
 334         * holding the task rq lock.  Instead we have to use a temporary buffer
 335         * and print the debug message after releasing the lock.
 336         */
 337        if (err_buf[0] != '\0')
 338                pr_debug("%s", err_buf);
 339
 340        return success;
 341
 342}
 343
 344/*
 345 * Try to switch all remaining tasks to the target patch state by walking the
 346 * stacks of sleeping tasks and looking for any to-be-patched or
 347 * to-be-unpatched functions.  If such functions are found, the task can't be
 348 * switched yet.
 349 *
 350 * If any tasks are still stuck in the initial patch state, schedule a retry.
 351 */
 352void klp_try_complete_transition(void)
 353{
 354        unsigned int cpu;
 355        struct task_struct *g, *task;
 356        bool complete = true;
 357
 358        WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
 359
 360        /*
 361         * If the patch can be applied or reverted immediately, skip the
 362         * per-task transitions.
 363         */
 364        if (klp_transition_patch->immediate)
 365                goto success;
 366
 367        /*
 368         * Try to switch the tasks to the target patch state by walking their
 369         * stacks and looking for any to-be-patched or to-be-unpatched
 370         * functions.  If such functions are found on a stack, or if the stack
 371         * is deemed unreliable, the task can't be switched yet.
 372         *
 373         * Usually this will transition most (or all) of the tasks on a system
 374         * unless the patch includes changes to a very common function.
 375         */
 376        read_lock(&tasklist_lock);
 377        for_each_process_thread(g, task)
 378                if (!klp_try_switch_task(task))
 379                        complete = false;
 380        read_unlock(&tasklist_lock);
 381
 382        /*
 383         * Ditto for the idle "swapper" tasks.
 384         */
 385        get_online_cpus();
 386        for_each_possible_cpu(cpu) {
 387                task = idle_task(cpu);
 388                if (cpu_online(cpu)) {
 389                        if (!klp_try_switch_task(task))
 390                                complete = false;
 391                } else if (task->patch_state != klp_target_state) {
 392                        /* offline idle tasks can be switched immediately */
 393                        clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
 394                        task->patch_state = klp_target_state;
 395                }
 396        }
 397        put_online_cpus();
 398
 399        if (!complete) {
 400                /*
 401                 * Some tasks weren't able to be switched over.  Try again
 402                 * later and/or wait for other methods like kernel exit
 403                 * switching.
 404                 */
 405                schedule_delayed_work(&klp_transition_work,
 406                                      round_jiffies_relative(HZ));
 407                return;
 408        }
 409
 410success:
 411        pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
 412                  klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
 413
 414        /* we're done, now cleanup the data structures */
 415        klp_complete_transition();
 416}
 417
 418/*
 419 * Start the transition to the specified target patch state so tasks can begin
 420 * switching to it.
 421 */
 422void klp_start_transition(void)
 423{
 424        struct task_struct *g, *task;
 425        unsigned int cpu;
 426
 427        WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
 428
 429        pr_notice("'%s': %s...\n", klp_transition_patch->mod->name,
 430                  klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
 431
 432        /*
 433         * If the patch can be applied or reverted immediately, skip the
 434         * per-task transitions.
 435         */
 436        if (klp_transition_patch->immediate)
 437                return;
 438
 439        /*
 440         * Mark all normal tasks as needing a patch state update.  They'll
 441         * switch either in klp_try_complete_transition() or as they exit the
 442         * kernel.
 443         */
 444        read_lock(&tasklist_lock);
 445        for_each_process_thread(g, task)
 446                if (task->patch_state != klp_target_state)
 447                        set_tsk_thread_flag(task, TIF_PATCH_PENDING);
 448        read_unlock(&tasklist_lock);
 449
 450        /*
 451         * Mark all idle tasks as needing a patch state update.  They'll switch
 452         * either in klp_try_complete_transition() or at the idle loop switch
 453         * point.
 454         */
 455        for_each_possible_cpu(cpu) {
 456                task = idle_task(cpu);
 457                if (task->patch_state != klp_target_state)
 458                        set_tsk_thread_flag(task, TIF_PATCH_PENDING);
 459        }
 460}
 461
 462/*
 463 * Initialize the global target patch state and all tasks to the initial patch
 464 * state, and initialize all function transition states to true in preparation
 465 * for patching or unpatching.
 466 */
 467void klp_init_transition(struct klp_patch *patch, int state)
 468{
 469        struct task_struct *g, *task;
 470        unsigned int cpu;
 471        struct klp_object *obj;
 472        struct klp_func *func;
 473        int initial_state = !state;
 474
 475        WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED);
 476
 477        klp_transition_patch = patch;
 478
 479        /*
 480         * Set the global target patch state which tasks will switch to.  This
 481         * has no effect until the TIF_PATCH_PENDING flags get set later.
 482         */
 483        klp_target_state = state;
 484
 485        /*
 486         * If the patch can be applied or reverted immediately, skip the
 487         * per-task transitions.
 488         */
 489        if (patch->immediate)
 490                return;
 491
 492        /*
 493         * Initialize all tasks to the initial patch state to prepare them for
 494         * switching to the target state.
 495         */
 496        read_lock(&tasklist_lock);
 497        for_each_process_thread(g, task) {
 498                WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
 499                task->patch_state = initial_state;
 500        }
 501        read_unlock(&tasklist_lock);
 502
 503        /*
 504         * Ditto for the idle "swapper" tasks.
 505         */
 506        for_each_possible_cpu(cpu) {
 507                task = idle_task(cpu);
 508                WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
 509                task->patch_state = initial_state;
 510        }
 511
 512        /*
 513         * Enforce the order of the task->patch_state initializations and the
 514         * func->transition updates to ensure that klp_ftrace_handler() doesn't
 515         * see a func in transition with a task->patch_state of KLP_UNDEFINED.
 516         *
 517         * Also enforce the order of the klp_target_state write and future
 518         * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() doesn't
 519         * set a task->patch_state to KLP_UNDEFINED.
 520         */
 521        smp_wmb();
 522
 523        /*
 524         * Set the func transition states so klp_ftrace_handler() will know to
 525         * switch to the transition logic.
 526         *
 527         * When patching, the funcs aren't yet in the func_stack and will be
 528         * made visible to the ftrace handler shortly by the calls to
 529         * klp_patch_object().
 530         *
 531         * When unpatching, the funcs are already in the func_stack and so are
 532         * already visible to the ftrace handler.
 533         */
 534        klp_for_each_object(patch, obj)
 535                klp_for_each_func(obj, func)
 536                        func->transition = true;
 537}
 538
 539/*
 540 * This function can be called in the middle of an existing transition to
 541 * reverse the direction of the target patch state.  This can be done to
 542 * effectively cancel an existing enable or disable operation if there are any
 543 * tasks which are stuck in the initial patch state.
 544 */
 545void klp_reverse_transition(void)
 546{
 547        unsigned int cpu;
 548        struct task_struct *g, *task;
 549
 550        klp_transition_patch->enabled = !klp_transition_patch->enabled;
 551
 552        klp_target_state = !klp_target_state;
 553
 554        /*
 555         * Clear all TIF_PATCH_PENDING flags to prevent races caused by
 556         * klp_update_patch_state() running in parallel with
 557         * klp_start_transition().
 558         */
 559        read_lock(&tasklist_lock);
 560        for_each_process_thread(g, task)
 561                clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
 562        read_unlock(&tasklist_lock);
 563
 564        for_each_possible_cpu(cpu)
 565                clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
 566
 567        /* Let any remaining calls to klp_update_patch_state() complete */
 568        klp_synchronize_transition();
 569
 570        klp_start_transition();
 571}
 572
 573/* Called from copy_process() during fork */
 574void klp_copy_process(struct task_struct *child)
 575{
 576        child->patch_state = current->patch_state;
 577
 578        /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */
 579}
 580