linux/kernel/livepatch/core.c
<<
>>
Prefs
   1/*
   2 * core.c - Kernel Live Patching Core
   3 *
   4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
   5 * Copyright (C) 2014 SUSE
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License
   9 * as published by the Free Software Foundation; either version 2
  10 * of the License, or (at your option) any later version.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  22
  23#include <linux/module.h>
  24#include <linux/kernel.h>
  25#include <linux/mutex.h>
  26#include <linux/slab.h>
  27#include <linux/ftrace.h>
  28#include <linux/list.h>
  29#include <linux/kallsyms.h>
  30#include <linux/livepatch.h>
  31#include <asm/cacheflush.h>
  32
  33/**
  34 * struct klp_ops - structure for tracking registered ftrace ops structs
  35 *
  36 * A single ftrace_ops is shared between all enabled replacement functions
  37 * (klp_func structs) which have the same old_addr.  This allows the switch
  38 * between function versions to happen instantaneously by updating the klp_ops
  39 * struct's func_stack list.  The winner is the klp_func at the top of the
  40 * func_stack (front of the list).
  41 *
  42 * @node:       node for the global klp_ops list
  43 * @func_stack: list head for the stack of klp_func's (active func is on top)
  44 * @fops:       registered ftrace ops struct
  45 */
  46struct klp_ops {
  47        struct list_head node;
  48        struct list_head func_stack;
  49        struct ftrace_ops fops;
  50};
  51
  52/*
  53 * The klp_mutex protects the global lists and state transitions of any
  54 * structure reachable from them.  References to any structure must be obtained
  55 * under mutex protection (except in klp_ftrace_handler(), which uses RCU to
  56 * ensure it gets consistent data).
  57 */
  58static DEFINE_MUTEX(klp_mutex);
  59
  60static LIST_HEAD(klp_patches);
  61static LIST_HEAD(klp_ops);
  62
  63static struct kobject *klp_root_kobj;
  64
  65static struct klp_ops *klp_find_ops(unsigned long old_addr)
  66{
  67        struct klp_ops *ops;
  68        struct klp_func *func;
  69
  70        list_for_each_entry(ops, &klp_ops, node) {
  71                func = list_first_entry(&ops->func_stack, struct klp_func,
  72                                        stack_node);
  73                if (func->old_addr == old_addr)
  74                        return ops;
  75        }
  76
  77        return NULL;
  78}
  79
  80static bool klp_is_module(struct klp_object *obj)
  81{
  82        return obj->name;
  83}
  84
  85static bool klp_is_object_loaded(struct klp_object *obj)
  86{
  87        return !obj->name || obj->mod;
  88}
  89
  90/* sets obj->mod if object is not vmlinux and module is found */
  91static void klp_find_object_module(struct klp_object *obj)
  92{
  93        struct module *mod;
  94
  95        if (!klp_is_module(obj))
  96                return;
  97
  98        mutex_lock(&module_mutex);
  99        /*
 100         * We do not want to block removal of patched modules and therefore
 101         * we do not take a reference here. The patches are removed by
 102         * klp_module_going() instead.
 103         */
 104        mod = find_module(obj->name);
 105        /*
 106         * Do not mess work of klp_module_coming() and klp_module_going().
 107         * Note that the patch might still be needed before klp_module_going()
 108         * is called. Module functions can be called even in the GOING state
 109         * until mod->exit() finishes. This is especially important for
 110         * patches that modify semantic of the functions.
 111         */
 112        if (mod && mod->klp_alive)
 113                obj->mod = mod;
 114
 115        mutex_unlock(&module_mutex);
 116}
 117
 118/* klp_mutex must be held by caller */
 119static bool klp_is_patch_registered(struct klp_patch *patch)
 120{
 121        struct klp_patch *mypatch;
 122
 123        list_for_each_entry(mypatch, &klp_patches, list)
 124                if (mypatch == patch)
 125                        return true;
 126
 127        return false;
 128}
 129
 130static bool klp_initialized(void)
 131{
 132        return !!klp_root_kobj;
 133}
 134
 135struct klp_find_arg {
 136        const char *objname;
 137        const char *name;
 138        unsigned long addr;
 139        unsigned long count;
 140        unsigned long pos;
 141};
 142
 143static int klp_find_callback(void *data, const char *name,
 144                             struct module *mod, unsigned long addr)
 145{
 146        struct klp_find_arg *args = data;
 147
 148        if ((mod && !args->objname) || (!mod && args->objname))
 149                return 0;
 150
 151        if (strcmp(args->name, name))
 152                return 0;
 153
 154        if (args->objname && strcmp(args->objname, mod->name))
 155                return 0;
 156
 157        args->addr = addr;
 158        args->count++;
 159
 160        /*
 161         * Finish the search when the symbol is found for the desired position
 162         * or the position is not defined for a non-unique symbol.
 163         */
 164        if ((args->pos && (args->count == args->pos)) ||
 165            (!args->pos && (args->count > 1)))
 166                return 1;
 167
 168        return 0;
 169}
 170
 171static int klp_find_object_symbol(const char *objname, const char *name,
 172                                  unsigned long sympos, unsigned long *addr)
 173{
 174        struct klp_find_arg args = {
 175                .objname = objname,
 176                .name = name,
 177                .addr = 0,
 178                .count = 0,
 179                .pos = sympos,
 180        };
 181
 182        mutex_lock(&module_mutex);
 183        kallsyms_on_each_symbol(klp_find_callback, &args);
 184        mutex_unlock(&module_mutex);
 185
 186        /*
 187         * Ensure an address was found. If sympos is 0, ensure symbol is unique;
 188         * otherwise ensure the symbol position count matches sympos.
 189         */
 190        if (args.addr == 0)
 191                pr_err("symbol '%s' not found in symbol table\n", name);
 192        else if (args.count > 1 && sympos == 0) {
 193                pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
 194                       name, objname);
 195        } else if (sympos != args.count && sympos > 0) {
 196                pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
 197                       sympos, name, objname ? objname : "vmlinux");
 198        } else {
 199                *addr = args.addr;
 200                return 0;
 201        }
 202
 203        *addr = 0;
 204        return -EINVAL;
 205}
 206
 207/*
 208 * external symbols are located outside the parent object (where the parent
 209 * object is either vmlinux or the kmod being patched).
 210 */
 211static int klp_find_external_symbol(struct module *pmod, const char *name,
 212                                    unsigned long *addr)
 213{
 214        const struct kernel_symbol *sym;
 215
 216        /* first, check if it's an exported symbol */
 217        preempt_disable();
 218        sym = find_symbol(name, NULL, NULL, true, true);
 219        if (sym) {
 220                *addr = sym->value;
 221                preempt_enable();
 222                return 0;
 223        }
 224        preempt_enable();
 225
 226        /*
 227         * Check if it's in another .o within the patch module. This also
 228         * checks that the external symbol is unique.
 229         */
 230        return klp_find_object_symbol(pmod->name, name, 0, addr);
 231}
 232
 233static int klp_write_object_relocations(struct module *pmod,
 234                                        struct klp_object *obj)
 235{
 236        int ret = 0;
 237        unsigned long val;
 238        struct klp_reloc *reloc;
 239
 240        if (WARN_ON(!klp_is_object_loaded(obj)))
 241                return -EINVAL;
 242
 243        if (WARN_ON(!obj->relocs))
 244                return -EINVAL;
 245
 246        module_disable_ro(pmod);
 247
 248        for (reloc = obj->relocs; reloc->name; reloc++) {
 249                /* discover the address of the referenced symbol */
 250                if (reloc->external) {
 251                        if (reloc->sympos > 0) {
 252                                pr_err("non-zero sympos for external reloc symbol '%s' is not supported\n",
 253                                       reloc->name);
 254                                ret = -EINVAL;
 255                                goto out;
 256                        }
 257                        ret = klp_find_external_symbol(pmod, reloc->name, &val);
 258                } else
 259                        ret = klp_find_object_symbol(obj->name,
 260                                                     reloc->name,
 261                                                     reloc->sympos,
 262                                                     &val);
 263                if (ret)
 264                        goto out;
 265
 266                ret = klp_write_module_reloc(pmod, reloc->type, reloc->loc,
 267                                             val + reloc->addend);
 268                if (ret) {
 269                        pr_err("relocation failed for symbol '%s' at 0x%016lx (%d)\n",
 270                               reloc->name, val, ret);
 271                        goto out;
 272                }
 273        }
 274
 275out:
 276        module_enable_ro(pmod);
 277        return ret;
 278}
 279
 280static void notrace klp_ftrace_handler(unsigned long ip,
 281                                       unsigned long parent_ip,
 282                                       struct ftrace_ops *fops,
 283                                       struct pt_regs *regs)
 284{
 285        struct klp_ops *ops;
 286        struct klp_func *func;
 287
 288        ops = container_of(fops, struct klp_ops, fops);
 289
 290        rcu_read_lock();
 291        func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
 292                                      stack_node);
 293        if (WARN_ON_ONCE(!func))
 294                goto unlock;
 295
 296        klp_arch_set_pc(regs, (unsigned long)func->new_func);
 297unlock:
 298        rcu_read_unlock();
 299}
 300
 301static void klp_disable_func(struct klp_func *func)
 302{
 303        struct klp_ops *ops;
 304
 305        if (WARN_ON(func->state != KLP_ENABLED))
 306                return;
 307        if (WARN_ON(!func->old_addr))
 308                return;
 309
 310        ops = klp_find_ops(func->old_addr);
 311        if (WARN_ON(!ops))
 312                return;
 313
 314        if (list_is_singular(&ops->func_stack)) {
 315                WARN_ON(unregister_ftrace_function(&ops->fops));
 316                WARN_ON(ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0));
 317
 318                list_del_rcu(&func->stack_node);
 319                list_del(&ops->node);
 320                kfree(ops);
 321        } else {
 322                list_del_rcu(&func->stack_node);
 323        }
 324
 325        func->state = KLP_DISABLED;
 326}
 327
 328static int klp_enable_func(struct klp_func *func)
 329{
 330        struct klp_ops *ops;
 331        int ret;
 332
 333        if (WARN_ON(!func->old_addr))
 334                return -EINVAL;
 335
 336        if (WARN_ON(func->state != KLP_DISABLED))
 337                return -EINVAL;
 338
 339        ops = klp_find_ops(func->old_addr);
 340        if (!ops) {
 341                ops = kzalloc(sizeof(*ops), GFP_KERNEL);
 342                if (!ops)
 343                        return -ENOMEM;
 344
 345                ops->fops.func = klp_ftrace_handler;
 346                ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
 347                                  FTRACE_OPS_FL_DYNAMIC |
 348                                  FTRACE_OPS_FL_IPMODIFY;
 349
 350                list_add(&ops->node, &klp_ops);
 351
 352                INIT_LIST_HEAD(&ops->func_stack);
 353                list_add_rcu(&func->stack_node, &ops->func_stack);
 354
 355                ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 0, 0);
 356                if (ret) {
 357                        pr_err("failed to set ftrace filter for function '%s' (%d)\n",
 358                               func->old_name, ret);
 359                        goto err;
 360                }
 361
 362                ret = register_ftrace_function(&ops->fops);
 363                if (ret) {
 364                        pr_err("failed to register ftrace handler for function '%s' (%d)\n",
 365                               func->old_name, ret);
 366                        ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0);
 367                        goto err;
 368                }
 369
 370
 371        } else {
 372                list_add_rcu(&func->stack_node, &ops->func_stack);
 373        }
 374
 375        func->state = KLP_ENABLED;
 376
 377        return 0;
 378
 379err:
 380        list_del_rcu(&func->stack_node);
 381        list_del(&ops->node);
 382        kfree(ops);
 383        return ret;
 384}
 385
 386static void klp_disable_object(struct klp_object *obj)
 387{
 388        struct klp_func *func;
 389
 390        klp_for_each_func(obj, func)
 391                if (func->state == KLP_ENABLED)
 392                        klp_disable_func(func);
 393
 394        obj->state = KLP_DISABLED;
 395}
 396
 397static int klp_enable_object(struct klp_object *obj)
 398{
 399        struct klp_func *func;
 400        int ret;
 401
 402        if (WARN_ON(obj->state != KLP_DISABLED))
 403                return -EINVAL;
 404
 405        if (WARN_ON(!klp_is_object_loaded(obj)))
 406                return -EINVAL;
 407
 408        klp_for_each_func(obj, func) {
 409                ret = klp_enable_func(func);
 410                if (ret) {
 411                        klp_disable_object(obj);
 412                        return ret;
 413                }
 414        }
 415        obj->state = KLP_ENABLED;
 416
 417        return 0;
 418}
 419
 420static int __klp_disable_patch(struct klp_patch *patch)
 421{
 422        struct klp_object *obj;
 423
 424        /* enforce stacking: only the last enabled patch can be disabled */
 425        if (!list_is_last(&patch->list, &klp_patches) &&
 426            list_next_entry(patch, list)->state == KLP_ENABLED)
 427                return -EBUSY;
 428
 429        pr_notice("disabling patch '%s'\n", patch->mod->name);
 430
 431        klp_for_each_object(patch, obj) {
 432                if (obj->state == KLP_ENABLED)
 433                        klp_disable_object(obj);
 434        }
 435
 436        patch->state = KLP_DISABLED;
 437
 438        return 0;
 439}
 440
 441/**
 442 * klp_disable_patch() - disables a registered patch
 443 * @patch:      The registered, enabled patch to be disabled
 444 *
 445 * Unregisters the patched functions from ftrace.
 446 *
 447 * Return: 0 on success, otherwise error
 448 */
 449int klp_disable_patch(struct klp_patch *patch)
 450{
 451        int ret;
 452
 453        mutex_lock(&klp_mutex);
 454
 455        if (!klp_is_patch_registered(patch)) {
 456                ret = -EINVAL;
 457                goto err;
 458        }
 459
 460        if (patch->state == KLP_DISABLED) {
 461                ret = -EINVAL;
 462                goto err;
 463        }
 464
 465        ret = __klp_disable_patch(patch);
 466
 467err:
 468        mutex_unlock(&klp_mutex);
 469        return ret;
 470}
 471EXPORT_SYMBOL_GPL(klp_disable_patch);
 472
 473static int __klp_enable_patch(struct klp_patch *patch)
 474{
 475        struct klp_object *obj;
 476        int ret;
 477
 478        if (WARN_ON(patch->state != KLP_DISABLED))
 479                return -EINVAL;
 480
 481        /* enforce stacking: only the first disabled patch can be enabled */
 482        if (patch->list.prev != &klp_patches &&
 483            list_prev_entry(patch, list)->state == KLP_DISABLED)
 484                return -EBUSY;
 485
 486        pr_notice_once("tainting kernel with TAINT_LIVEPATCH\n");
 487        add_taint(TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
 488
 489        pr_notice("enabling patch '%s'\n", patch->mod->name);
 490
 491        klp_for_each_object(patch, obj) {
 492                if (!klp_is_object_loaded(obj))
 493                        continue;
 494
 495                ret = klp_enable_object(obj);
 496                if (ret)
 497                        goto unregister;
 498        }
 499
 500        patch->state = KLP_ENABLED;
 501
 502        return 0;
 503
 504unregister:
 505        WARN_ON(__klp_disable_patch(patch));
 506        return ret;
 507}
 508
 509/**
 510 * klp_enable_patch() - enables a registered patch
 511 * @patch:      The registered, disabled patch to be enabled
 512 *
 513 * Performs the needed symbol lookups and code relocations,
 514 * then registers the patched functions with ftrace.
 515 *
 516 * Return: 0 on success, otherwise error
 517 */
 518int klp_enable_patch(struct klp_patch *patch)
 519{
 520        int ret;
 521
 522        mutex_lock(&klp_mutex);
 523
 524        if (!klp_is_patch_registered(patch)) {
 525                ret = -EINVAL;
 526                goto err;
 527        }
 528
 529        ret = __klp_enable_patch(patch);
 530
 531err:
 532        mutex_unlock(&klp_mutex);
 533        return ret;
 534}
 535EXPORT_SYMBOL_GPL(klp_enable_patch);
 536
 537/*
 538 * Sysfs Interface
 539 *
 540 * /sys/kernel/livepatch
 541 * /sys/kernel/livepatch/<patch>
 542 * /sys/kernel/livepatch/<patch>/enabled
 543 * /sys/kernel/livepatch/<patch>/<object>
 544 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
 545 */
 546
 547static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
 548                             const char *buf, size_t count)
 549{
 550        struct klp_patch *patch;
 551        int ret;
 552        unsigned long val;
 553
 554        ret = kstrtoul(buf, 10, &val);
 555        if (ret)
 556                return -EINVAL;
 557
 558        if (val != KLP_DISABLED && val != KLP_ENABLED)
 559                return -EINVAL;
 560
 561        patch = container_of(kobj, struct klp_patch, kobj);
 562
 563        mutex_lock(&klp_mutex);
 564
 565        if (val == patch->state) {
 566                /* already in requested state */
 567                ret = -EINVAL;
 568                goto err;
 569        }
 570
 571        if (val == KLP_ENABLED) {
 572                ret = __klp_enable_patch(patch);
 573                if (ret)
 574                        goto err;
 575        } else {
 576                ret = __klp_disable_patch(patch);
 577                if (ret)
 578                        goto err;
 579        }
 580
 581        mutex_unlock(&klp_mutex);
 582
 583        return count;
 584
 585err:
 586        mutex_unlock(&klp_mutex);
 587        return ret;
 588}
 589
 590static ssize_t enabled_show(struct kobject *kobj,
 591                            struct kobj_attribute *attr, char *buf)
 592{
 593        struct klp_patch *patch;
 594
 595        patch = container_of(kobj, struct klp_patch, kobj);
 596        return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->state);
 597}
 598
 599static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
 600static struct attribute *klp_patch_attrs[] = {
 601        &enabled_kobj_attr.attr,
 602        NULL
 603};
 604
 605static void klp_kobj_release_patch(struct kobject *kobj)
 606{
 607        /*
 608         * Once we have a consistency model we'll need to module_put() the
 609         * patch module here.  See klp_register_patch() for more details.
 610         */
 611}
 612
 613static struct kobj_type klp_ktype_patch = {
 614        .release = klp_kobj_release_patch,
 615        .sysfs_ops = &kobj_sysfs_ops,
 616        .default_attrs = klp_patch_attrs,
 617};
 618
 619static void klp_kobj_release_object(struct kobject *kobj)
 620{
 621}
 622
 623static struct kobj_type klp_ktype_object = {
 624        .release = klp_kobj_release_object,
 625        .sysfs_ops = &kobj_sysfs_ops,
 626};
 627
 628static void klp_kobj_release_func(struct kobject *kobj)
 629{
 630}
 631
 632static struct kobj_type klp_ktype_func = {
 633        .release = klp_kobj_release_func,
 634        .sysfs_ops = &kobj_sysfs_ops,
 635};
 636
 637/*
 638 * Free all functions' kobjects in the array up to some limit. When limit is
 639 * NULL, all kobjects are freed.
 640 */
 641static void klp_free_funcs_limited(struct klp_object *obj,
 642                                   struct klp_func *limit)
 643{
 644        struct klp_func *func;
 645
 646        for (func = obj->funcs; func->old_name && func != limit; func++)
 647                kobject_put(&func->kobj);
 648}
 649
 650/* Clean up when a patched object is unloaded */
 651static void klp_free_object_loaded(struct klp_object *obj)
 652{
 653        struct klp_func *func;
 654
 655        obj->mod = NULL;
 656
 657        klp_for_each_func(obj, func)
 658                func->old_addr = 0;
 659}
 660
 661/*
 662 * Free all objects' kobjects in the array up to some limit. When limit is
 663 * NULL, all kobjects are freed.
 664 */
 665static void klp_free_objects_limited(struct klp_patch *patch,
 666                                     struct klp_object *limit)
 667{
 668        struct klp_object *obj;
 669
 670        for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
 671                klp_free_funcs_limited(obj, NULL);
 672                kobject_put(&obj->kobj);
 673        }
 674}
 675
 676static void klp_free_patch(struct klp_patch *patch)
 677{
 678        klp_free_objects_limited(patch, NULL);
 679        if (!list_empty(&patch->list))
 680                list_del(&patch->list);
 681        kobject_put(&patch->kobj);
 682}
 683
 684static int klp_init_func(struct klp_object *obj, struct klp_func *func)
 685{
 686        INIT_LIST_HEAD(&func->stack_node);
 687        func->state = KLP_DISABLED;
 688
 689        /* The format for the sysfs directory is <function,sympos> where sympos
 690         * is the nth occurrence of this symbol in kallsyms for the patched
 691         * object. If the user selects 0 for old_sympos, then 1 will be used
 692         * since a unique symbol will be the first occurrence.
 693         */
 694        return kobject_init_and_add(&func->kobj, &klp_ktype_func,
 695                                    &obj->kobj, "%s,%lu", func->old_name,
 696                                    func->old_sympos ? func->old_sympos : 1);
 697}
 698
 699/* parts of the initialization that is done only when the object is loaded */
 700static int klp_init_object_loaded(struct klp_patch *patch,
 701                                  struct klp_object *obj)
 702{
 703        struct klp_func *func;
 704        int ret;
 705
 706        if (obj->relocs) {
 707                ret = klp_write_object_relocations(patch->mod, obj);
 708                if (ret)
 709                        return ret;
 710        }
 711
 712        klp_for_each_func(obj, func) {
 713                ret = klp_find_object_symbol(obj->name, func->old_name,
 714                                             func->old_sympos,
 715                                             &func->old_addr);
 716                if (ret)
 717                        return ret;
 718        }
 719
 720        return 0;
 721}
 722
 723static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
 724{
 725        struct klp_func *func;
 726        int ret;
 727        const char *name;
 728
 729        if (!obj->funcs)
 730                return -EINVAL;
 731
 732        obj->state = KLP_DISABLED;
 733        obj->mod = NULL;
 734
 735        klp_find_object_module(obj);
 736
 737        name = klp_is_module(obj) ? obj->name : "vmlinux";
 738        ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object,
 739                                   &patch->kobj, "%s", name);
 740        if (ret)
 741                return ret;
 742
 743        klp_for_each_func(obj, func) {
 744                ret = klp_init_func(obj, func);
 745                if (ret)
 746                        goto free;
 747        }
 748
 749        if (klp_is_object_loaded(obj)) {
 750                ret = klp_init_object_loaded(patch, obj);
 751                if (ret)
 752                        goto free;
 753        }
 754
 755        return 0;
 756
 757free:
 758        klp_free_funcs_limited(obj, func);
 759        kobject_put(&obj->kobj);
 760        return ret;
 761}
 762
 763static int klp_init_patch(struct klp_patch *patch)
 764{
 765        struct klp_object *obj;
 766        int ret;
 767
 768        if (!patch->objs)
 769                return -EINVAL;
 770
 771        mutex_lock(&klp_mutex);
 772
 773        patch->state = KLP_DISABLED;
 774
 775        ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
 776                                   klp_root_kobj, "%s", patch->mod->name);
 777        if (ret)
 778                goto unlock;
 779
 780        klp_for_each_object(patch, obj) {
 781                ret = klp_init_object(patch, obj);
 782                if (ret)
 783                        goto free;
 784        }
 785
 786        list_add_tail(&patch->list, &klp_patches);
 787
 788        mutex_unlock(&klp_mutex);
 789
 790        return 0;
 791
 792free:
 793        klp_free_objects_limited(patch, obj);
 794        kobject_put(&patch->kobj);
 795unlock:
 796        mutex_unlock(&klp_mutex);
 797        return ret;
 798}
 799
 800/**
 801 * klp_unregister_patch() - unregisters a patch
 802 * @patch:      Disabled patch to be unregistered
 803 *
 804 * Frees the data structures and removes the sysfs interface.
 805 *
 806 * Return: 0 on success, otherwise error
 807 */
 808int klp_unregister_patch(struct klp_patch *patch)
 809{
 810        int ret = 0;
 811
 812        mutex_lock(&klp_mutex);
 813
 814        if (!klp_is_patch_registered(patch)) {
 815                ret = -EINVAL;
 816                goto out;
 817        }
 818
 819        if (patch->state == KLP_ENABLED) {
 820                ret = -EBUSY;
 821                goto out;
 822        }
 823
 824        klp_free_patch(patch);
 825
 826out:
 827        mutex_unlock(&klp_mutex);
 828        return ret;
 829}
 830EXPORT_SYMBOL_GPL(klp_unregister_patch);
 831
 832/**
 833 * klp_register_patch() - registers a patch
 834 * @patch:      Patch to be registered
 835 *
 836 * Initializes the data structure associated with the patch and
 837 * creates the sysfs interface.
 838 *
 839 * Return: 0 on success, otherwise error
 840 */
 841int klp_register_patch(struct klp_patch *patch)
 842{
 843        int ret;
 844
 845        if (!klp_initialized())
 846                return -ENODEV;
 847
 848        if (!patch || !patch->mod)
 849                return -EINVAL;
 850
 851        /*
 852         * A reference is taken on the patch module to prevent it from being
 853         * unloaded.  Right now, we don't allow patch modules to unload since
 854         * there is currently no method to determine if a thread is still
 855         * running in the patched code contained in the patch module once
 856         * the ftrace registration is successful.
 857         */
 858        if (!try_module_get(patch->mod))
 859                return -ENODEV;
 860
 861        ret = klp_init_patch(patch);
 862        if (ret)
 863                module_put(patch->mod);
 864
 865        return ret;
 866}
 867EXPORT_SYMBOL_GPL(klp_register_patch);
 868
 869int klp_module_coming(struct module *mod)
 870{
 871        int ret;
 872        struct klp_patch *patch;
 873        struct klp_object *obj;
 874
 875        if (WARN_ON(mod->state != MODULE_STATE_COMING))
 876                return -EINVAL;
 877
 878        mutex_lock(&klp_mutex);
 879        /*
 880         * Each module has to know that klp_module_coming()
 881         * has been called. We never know what module will
 882         * get patched by a new patch.
 883         */
 884        mod->klp_alive = true;
 885
 886        list_for_each_entry(patch, &klp_patches, list) {
 887                klp_for_each_object(patch, obj) {
 888                        if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
 889                                continue;
 890
 891                        obj->mod = mod;
 892
 893                        ret = klp_init_object_loaded(patch, obj);
 894                        if (ret) {
 895                                pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
 896                                        patch->mod->name, obj->mod->name, ret);
 897                                goto err;
 898                        }
 899
 900                        if (patch->state == KLP_DISABLED)
 901                                break;
 902
 903                        pr_notice("applying patch '%s' to loading module '%s'\n",
 904                                  patch->mod->name, obj->mod->name);
 905
 906                        ret = klp_enable_object(obj);
 907                        if (ret) {
 908                                pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
 909                                        patch->mod->name, obj->mod->name, ret);
 910                                goto err;
 911                        }
 912
 913                        break;
 914                }
 915        }
 916
 917        mutex_unlock(&klp_mutex);
 918
 919        return 0;
 920
 921err:
 922        /*
 923         * If a patch is unsuccessfully applied, return
 924         * error to the module loader.
 925         */
 926        pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
 927                patch->mod->name, obj->mod->name, obj->mod->name);
 928        mod->klp_alive = false;
 929        klp_free_object_loaded(obj);
 930        mutex_unlock(&klp_mutex);
 931
 932        return ret;
 933}
 934
 935void klp_module_going(struct module *mod)
 936{
 937        struct klp_patch *patch;
 938        struct klp_object *obj;
 939
 940        if (WARN_ON(mod->state != MODULE_STATE_GOING &&
 941                    mod->state != MODULE_STATE_COMING))
 942                return;
 943
 944        mutex_lock(&klp_mutex);
 945        /*
 946         * Each module has to know that klp_module_going()
 947         * has been called. We never know what module will
 948         * get patched by a new patch.
 949         */
 950        mod->klp_alive = false;
 951
 952        list_for_each_entry(patch, &klp_patches, list) {
 953                klp_for_each_object(patch, obj) {
 954                        if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
 955                                continue;
 956
 957                        if (patch->state != KLP_DISABLED) {
 958                                pr_notice("reverting patch '%s' on unloading module '%s'\n",
 959                                          patch->mod->name, obj->mod->name);
 960                                klp_disable_object(obj);
 961                        }
 962
 963                        klp_free_object_loaded(obj);
 964                        break;
 965                }
 966        }
 967
 968        mutex_unlock(&klp_mutex);
 969}
 970
 971static int __init klp_init(void)
 972{
 973        int ret;
 974
 975        ret = klp_check_compiler_support();
 976        if (ret) {
 977                pr_info("Your compiler is too old; turning off.\n");
 978                return -EINVAL;
 979        }
 980
 981        klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
 982        if (!klp_root_kobj)
 983                return -ENOMEM;
 984
 985        return 0;
 986}
 987
 988module_init(klp_init);
 989