linux/kernel/livepatch/patch.c
<<
>>
Prefs
   1/*
   2 * patch.c - livepatch patching functions
   3 *
   4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
   5 * Copyright (C) 2014 SUSE
   6 * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
   7 *
   8 * This program is free software; you can redistribute it and/or
   9 * modify it under the terms of the GNU General Public License
  10 * as published by the Free Software Foundation; either version 2
  11 * of the License, or (at your option) any later version.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 * GNU General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
  20 */
  21
  22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  23
  24#include <linux/livepatch.h>
  25#include <linux/list.h>
  26#include <linux/ftrace.h>
  27#include <linux/rculist.h>
  28#include <linux/slab.h>
  29#include <linux/bug.h>
  30#include <linux/printk.h>
  31#include "core.h"
  32#include "patch.h"
  33#include "transition.h"
  34
  35static LIST_HEAD(klp_ops);
  36
  37struct klp_ops *klp_find_ops(unsigned long old_addr)
  38{
  39        struct klp_ops *ops;
  40        struct klp_func *func;
  41
  42        list_for_each_entry(ops, &klp_ops, node) {
  43                func = list_first_entry(&ops->func_stack, struct klp_func,
  44                                        stack_node);
  45                if (func->old_addr == old_addr)
  46                        return ops;
  47        }
  48
  49        return NULL;
  50}
  51
  52static void notrace klp_ftrace_handler(unsigned long ip,
  53                                       unsigned long parent_ip,
  54                                       struct ftrace_ops *fops,
  55                                       struct pt_regs *regs)
  56{
  57        struct klp_ops *ops;
  58        struct klp_func *func;
  59        int patch_state;
  60
  61        ops = container_of(fops, struct klp_ops, fops);
  62
  63        /*
  64         * A variant of synchronize_sched() is used to allow patching functions
  65         * where RCU is not watching, see klp_synchronize_transition().
  66         */
  67        preempt_disable_notrace();
  68
  69        func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
  70                                      stack_node);
  71
  72        /*
  73         * func should never be NULL because preemption should be disabled here
  74         * and unregister_ftrace_function() does the equivalent of a
  75         * synchronize_sched() before the func_stack removal.
  76         */
  77        if (WARN_ON_ONCE(!func))
  78                goto unlock;
  79
  80        /*
  81         * In the enable path, enforce the order of the ops->func_stack and
  82         * func->transition reads.  The corresponding write barrier is in
  83         * __klp_enable_patch().
  84         *
  85         * (Note that this barrier technically isn't needed in the disable
  86         * path.  In the rare case where klp_update_patch_state() runs before
  87         * this handler, its TIF_PATCH_PENDING read and this func->transition
  88         * read need to be ordered.  But klp_update_patch_state() already
  89         * enforces that.)
  90         */
  91        smp_rmb();
  92
  93        if (unlikely(func->transition)) {
  94
  95                /*
  96                 * Enforce the order of the func->transition and
  97                 * current->patch_state reads.  Otherwise we could read an
  98                 * out-of-date task state and pick the wrong function.  The
  99                 * corresponding write barrier is in klp_init_transition().
 100                 */
 101                smp_rmb();
 102
 103                patch_state = current->patch_state;
 104
 105                WARN_ON_ONCE(patch_state == KLP_UNDEFINED);
 106
 107                if (patch_state == KLP_UNPATCHED) {
 108                        /*
 109                         * Use the previously patched version of the function.
 110                         * If no previous patches exist, continue with the
 111                         * original function.
 112                         */
 113                        func = list_entry_rcu(func->stack_node.next,
 114                                              struct klp_func, stack_node);
 115
 116                        if (&func->stack_node == &ops->func_stack)
 117                                goto unlock;
 118                }
 119        }
 120
 121        klp_arch_set_pc(regs, (unsigned long)func->new_func);
 122unlock:
 123        preempt_enable_notrace();
 124}
 125
 126/*
 127 * Convert a function address into the appropriate ftrace location.
 128 *
 129 * Usually this is just the address of the function, but on some architectures
 130 * it's more complicated so allow them to provide a custom behaviour.
 131 */
 132#ifndef klp_get_ftrace_location
 133static unsigned long klp_get_ftrace_location(unsigned long faddr)
 134{
 135        return faddr;
 136}
 137#endif
 138
 139static void klp_unpatch_func(struct klp_func *func)
 140{
 141        struct klp_ops *ops;
 142
 143        if (WARN_ON(!func->patched))
 144                return;
 145        if (WARN_ON(!func->old_addr))
 146                return;
 147
 148        ops = klp_find_ops(func->old_addr);
 149        if (WARN_ON(!ops))
 150                return;
 151
 152        if (list_is_singular(&ops->func_stack)) {
 153                unsigned long ftrace_loc;
 154
 155                ftrace_loc = klp_get_ftrace_location(func->old_addr);
 156                if (WARN_ON(!ftrace_loc))
 157                        return;
 158
 159                WARN_ON(unregister_ftrace_function(&ops->fops));
 160                WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
 161
 162                list_del_rcu(&func->stack_node);
 163                list_del(&ops->node);
 164                kfree(ops);
 165        } else {
 166                list_del_rcu(&func->stack_node);
 167        }
 168
 169        func->patched = false;
 170}
 171
 172static int klp_patch_func(struct klp_func *func)
 173{
 174        struct klp_ops *ops;
 175        int ret;
 176
 177        if (WARN_ON(!func->old_addr))
 178                return -EINVAL;
 179
 180        if (WARN_ON(func->patched))
 181                return -EINVAL;
 182
 183        ops = klp_find_ops(func->old_addr);
 184        if (!ops) {
 185                unsigned long ftrace_loc;
 186
 187                ftrace_loc = klp_get_ftrace_location(func->old_addr);
 188                if (!ftrace_loc) {
 189                        pr_err("failed to find location for function '%s'\n",
 190                                func->old_name);
 191                        return -EINVAL;
 192                }
 193
 194                ops = kzalloc(sizeof(*ops), GFP_KERNEL);
 195                if (!ops)
 196                        return -ENOMEM;
 197
 198                ops->fops.func = klp_ftrace_handler;
 199                ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
 200                                  FTRACE_OPS_FL_DYNAMIC |
 201                                  FTRACE_OPS_FL_IPMODIFY;
 202
 203                list_add(&ops->node, &klp_ops);
 204
 205                INIT_LIST_HEAD(&ops->func_stack);
 206                list_add_rcu(&func->stack_node, &ops->func_stack);
 207
 208                ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
 209                if (ret) {
 210                        pr_err("failed to set ftrace filter for function '%s' (%d)\n",
 211                               func->old_name, ret);
 212                        goto err;
 213                }
 214
 215                ret = register_ftrace_function(&ops->fops);
 216                if (ret) {
 217                        pr_err("failed to register ftrace handler for function '%s' (%d)\n",
 218                               func->old_name, ret);
 219                        ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
 220                        goto err;
 221                }
 222
 223
 224        } else {
 225                list_add_rcu(&func->stack_node, &ops->func_stack);
 226        }
 227
 228        func->patched = true;
 229
 230        return 0;
 231
 232err:
 233        list_del_rcu(&func->stack_node);
 234        list_del(&ops->node);
 235        kfree(ops);
 236        return ret;
 237}
 238
 239void klp_unpatch_object(struct klp_object *obj)
 240{
 241        struct klp_func *func;
 242
 243        klp_for_each_func(obj, func)
 244                if (func->patched)
 245                        klp_unpatch_func(func);
 246
 247        obj->patched = false;
 248}
 249
 250int klp_patch_object(struct klp_object *obj)
 251{
 252        struct klp_func *func;
 253        int ret;
 254
 255        if (WARN_ON(obj->patched))
 256                return -EINVAL;
 257
 258        klp_for_each_func(obj, func) {
 259                ret = klp_patch_func(func);
 260                if (ret) {
 261                        klp_unpatch_object(obj);
 262                        return ret;
 263                }
 264        }
 265        obj->patched = true;
 266
 267        return 0;
 268}
 269
 270void klp_unpatch_objects(struct klp_patch *patch)
 271{
 272        struct klp_object *obj;
 273
 274        klp_for_each_object(patch, obj)
 275                if (obj->patched)
 276                        klp_unpatch_object(obj);
 277}
 278