linux/kernel/livepatch/core.c
<<
>>
Prefs
   1/*
   2 * core.c - Kernel Live Patching Core
   3 *
   4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
   5 * Copyright (C) 2014 SUSE
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License
   9 * as published by the Free Software Foundation; either version 2
  10 * of the License, or (at your option) any later version.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  22
  23#include <linux/module.h>
  24#include <linux/kernel.h>
  25#include <linux/mutex.h>
  26#include <linux/slab.h>
  27#include <linux/ftrace.h>
  28#include <linux/list.h>
  29#include <linux/kallsyms.h>
  30#include <linux/livepatch.h>
  31
  32/**
  33 * struct klp_ops - structure for tracking registered ftrace ops structs
  34 *
  35 * A single ftrace_ops is shared between all enabled replacement functions
  36 * (klp_func structs) which have the same old_addr.  This allows the switch
  37 * between function versions to happen instantaneously by updating the klp_ops
  38 * struct's func_stack list.  The winner is the klp_func at the top of the
  39 * func_stack (front of the list).
  40 *
  41 * @node:       node for the global klp_ops list
  42 * @func_stack: list head for the stack of klp_func's (active func is on top)
  43 * @fops:       registered ftrace ops struct
  44 */
  45struct klp_ops {
  46        struct list_head node;
  47        struct list_head func_stack;
  48        struct ftrace_ops fops;
  49};
  50
  51/*
  52 * The klp_mutex protects the global lists and state transitions of any
  53 * structure reachable from them.  References to any structure must be obtained
  54 * under mutex protection (except in klp_ftrace_handler(), which uses RCU to
  55 * ensure it gets consistent data).
  56 */
  57static DEFINE_MUTEX(klp_mutex);
  58
  59static LIST_HEAD(klp_patches);
  60static LIST_HEAD(klp_ops);
  61
  62static struct kobject *klp_root_kobj;
  63
  64static struct klp_ops *klp_find_ops(unsigned long old_addr)
  65{
  66        struct klp_ops *ops;
  67        struct klp_func *func;
  68
  69        list_for_each_entry(ops, &klp_ops, node) {
  70                func = list_first_entry(&ops->func_stack, struct klp_func,
  71                                        stack_node);
  72                if (func->old_addr == old_addr)
  73                        return ops;
  74        }
  75
  76        return NULL;
  77}
  78
  79static bool klp_is_module(struct klp_object *obj)
  80{
  81        return obj->name;
  82}
  83
  84static bool klp_is_object_loaded(struct klp_object *obj)
  85{
  86        return !obj->name || obj->mod;
  87}
  88
  89/* sets obj->mod if object is not vmlinux and module is found */
  90static void klp_find_object_module(struct klp_object *obj)
  91{
  92        struct module *mod;
  93
  94        if (!klp_is_module(obj))
  95                return;
  96
  97        mutex_lock(&module_mutex);
  98        /*
  99         * We do not want to block removal of patched modules and therefore
 100         * we do not take a reference here. The patches are removed by
 101         * a going module handler instead.
 102         */
 103        mod = find_module(obj->name);
 104        /*
 105         * Do not mess work of the module coming and going notifiers.
 106         * Note that the patch might still be needed before the going handler
 107         * is called. Module functions can be called even in the GOING state
 108         * until mod->exit() finishes. This is especially important for
 109         * patches that modify semantic of the functions.
 110         */
 111        if (mod && mod->klp_alive)
 112                obj->mod = mod;
 113
 114        mutex_unlock(&module_mutex);
 115}
 116
 117/* klp_mutex must be held by caller */
 118static bool klp_is_patch_registered(struct klp_patch *patch)
 119{
 120        struct klp_patch *mypatch;
 121
 122        list_for_each_entry(mypatch, &klp_patches, list)
 123                if (mypatch == patch)
 124                        return true;
 125
 126        return false;
 127}
 128
 129static bool klp_initialized(void)
 130{
 131        return klp_root_kobj;
 132}
 133
 134struct klp_find_arg {
 135        const char *objname;
 136        const char *name;
 137        unsigned long addr;
 138        /*
 139         * If count == 0, the symbol was not found. If count == 1, a unique
 140         * match was found and addr is set.  If count > 1, there is
 141         * unresolvable ambiguity among "count" number of symbols with the same
 142         * name in the same object.
 143         */
 144        unsigned long count;
 145};
 146
 147static int klp_find_callback(void *data, const char *name,
 148                             struct module *mod, unsigned long addr)
 149{
 150        struct klp_find_arg *args = data;
 151
 152        if ((mod && !args->objname) || (!mod && args->objname))
 153                return 0;
 154
 155        if (strcmp(args->name, name))
 156                return 0;
 157
 158        if (args->objname && strcmp(args->objname, mod->name))
 159                return 0;
 160
 161        /*
 162         * args->addr might be overwritten if another match is found
 163         * but klp_find_object_symbol() handles this and only returns the
 164         * addr if count == 1.
 165         */
 166        args->addr = addr;
 167        args->count++;
 168
 169        return 0;
 170}
 171
 172static int klp_find_object_symbol(const char *objname, const char *name,
 173                                  unsigned long *addr)
 174{
 175        struct klp_find_arg args = {
 176                .objname = objname,
 177                .name = name,
 178                .addr = 0,
 179                .count = 0
 180        };
 181
 182        kallsyms_on_each_symbol(klp_find_callback, &args);
 183
 184        if (args.count == 0)
 185                pr_err("symbol '%s' not found in symbol table\n", name);
 186        else if (args.count > 1)
 187                pr_err("unresolvable ambiguity (%lu matches) on symbol '%s' in object '%s'\n",
 188                       args.count, name, objname);
 189        else {
 190                *addr = args.addr;
 191                return 0;
 192        }
 193
 194        *addr = 0;
 195        return -EINVAL;
 196}
 197
 198struct klp_verify_args {
 199        const char *name;
 200        const unsigned long addr;
 201};
 202
 203static int klp_verify_callback(void *data, const char *name,
 204                               struct module *mod, unsigned long addr)
 205{
 206        struct klp_verify_args *args = data;
 207
 208        if (!mod &&
 209            !strcmp(args->name, name) &&
 210            args->addr == addr)
 211                return 1;
 212
 213        return 0;
 214}
 215
 216static int klp_verify_vmlinux_symbol(const char *name, unsigned long addr)
 217{
 218        struct klp_verify_args args = {
 219                .name = name,
 220                .addr = addr,
 221        };
 222
 223        if (kallsyms_on_each_symbol(klp_verify_callback, &args))
 224                return 0;
 225
 226        pr_err("symbol '%s' not found at specified address 0x%016lx, kernel mismatch?\n",
 227                name, addr);
 228        return -EINVAL;
 229}
 230
 231static int klp_find_verify_func_addr(struct klp_object *obj,
 232                                     struct klp_func *func)
 233{
 234        int ret;
 235
 236#if defined(CONFIG_RANDOMIZE_BASE)
 237        /* KASLR is enabled, disregard old_addr from user */
 238        func->old_addr = 0;
 239#endif
 240
 241        if (!func->old_addr || klp_is_module(obj))
 242                ret = klp_find_object_symbol(obj->name, func->old_name,
 243                                             &func->old_addr);
 244        else
 245                ret = klp_verify_vmlinux_symbol(func->old_name,
 246                                                func->old_addr);
 247
 248        return ret;
 249}
 250
 251/*
 252 * external symbols are located outside the parent object (where the parent
 253 * object is either vmlinux or the kmod being patched).
 254 */
 255static int klp_find_external_symbol(struct module *pmod, const char *name,
 256                                    unsigned long *addr)
 257{
 258        const struct kernel_symbol *sym;
 259
 260        /* first, check if it's an exported symbol */
 261        preempt_disable();
 262        sym = find_symbol(name, NULL, NULL, true, true);
 263        if (sym) {
 264                *addr = sym->value;
 265                preempt_enable();
 266                return 0;
 267        }
 268        preempt_enable();
 269
 270        /* otherwise check if it's in another .o within the patch module */
 271        return klp_find_object_symbol(pmod->name, name, addr);
 272}
 273
 274static int klp_write_object_relocations(struct module *pmod,
 275                                        struct klp_object *obj)
 276{
 277        int ret;
 278        struct klp_reloc *reloc;
 279
 280        if (WARN_ON(!klp_is_object_loaded(obj)))
 281                return -EINVAL;
 282
 283        if (WARN_ON(!obj->relocs))
 284                return -EINVAL;
 285
 286        for (reloc = obj->relocs; reloc->name; reloc++) {
 287                if (!klp_is_module(obj)) {
 288                        ret = klp_verify_vmlinux_symbol(reloc->name,
 289                                                        reloc->val);
 290                        if (ret)
 291                                return ret;
 292                } else {
 293                        /* module, reloc->val needs to be discovered */
 294                        if (reloc->external)
 295                                ret = klp_find_external_symbol(pmod,
 296                                                               reloc->name,
 297                                                               &reloc->val);
 298                        else
 299                                ret = klp_find_object_symbol(obj->mod->name,
 300                                                             reloc->name,
 301                                                             &reloc->val);
 302                        if (ret)
 303                                return ret;
 304                }
 305                ret = klp_write_module_reloc(pmod, reloc->type, reloc->loc,
 306                                             reloc->val + reloc->addend);
 307                if (ret) {
 308                        pr_err("relocation failed for symbol '%s' at 0x%016lx (%d)\n",
 309                               reloc->name, reloc->val, ret);
 310                        return ret;
 311                }
 312        }
 313
 314        return 0;
 315}
 316
 317static void notrace klp_ftrace_handler(unsigned long ip,
 318                                       unsigned long parent_ip,
 319                                       struct ftrace_ops *fops,
 320                                       struct pt_regs *regs)
 321{
 322        struct klp_ops *ops;
 323        struct klp_func *func;
 324
 325        ops = container_of(fops, struct klp_ops, fops);
 326
 327        rcu_read_lock();
 328        func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
 329                                      stack_node);
 330        if (WARN_ON_ONCE(!func))
 331                goto unlock;
 332
 333        klp_arch_set_pc(regs, (unsigned long)func->new_func);
 334unlock:
 335        rcu_read_unlock();
 336}
 337
 338static void klp_disable_func(struct klp_func *func)
 339{
 340        struct klp_ops *ops;
 341
 342        WARN_ON(func->state != KLP_ENABLED);
 343        WARN_ON(!func->old_addr);
 344
 345        ops = klp_find_ops(func->old_addr);
 346        if (WARN_ON(!ops))
 347                return;
 348
 349        if (list_is_singular(&ops->func_stack)) {
 350                WARN_ON(unregister_ftrace_function(&ops->fops));
 351                WARN_ON(ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0));
 352
 353                list_del_rcu(&func->stack_node);
 354                list_del(&ops->node);
 355                kfree(ops);
 356        } else {
 357                list_del_rcu(&func->stack_node);
 358        }
 359
 360        func->state = KLP_DISABLED;
 361}
 362
 363static int klp_enable_func(struct klp_func *func)
 364{
 365        struct klp_ops *ops;
 366        int ret;
 367
 368        if (WARN_ON(!func->old_addr))
 369                return -EINVAL;
 370
 371        if (WARN_ON(func->state != KLP_DISABLED))
 372                return -EINVAL;
 373
 374        ops = klp_find_ops(func->old_addr);
 375        if (!ops) {
 376                ops = kzalloc(sizeof(*ops), GFP_KERNEL);
 377                if (!ops)
 378                        return -ENOMEM;
 379
 380                ops->fops.func = klp_ftrace_handler;
 381                ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
 382                                  FTRACE_OPS_FL_DYNAMIC |
 383                                  FTRACE_OPS_FL_IPMODIFY;
 384
 385                list_add(&ops->node, &klp_ops);
 386
 387                INIT_LIST_HEAD(&ops->func_stack);
 388                list_add_rcu(&func->stack_node, &ops->func_stack);
 389
 390                ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 0, 0);
 391                if (ret) {
 392                        pr_err("failed to set ftrace filter for function '%s' (%d)\n",
 393                               func->old_name, ret);
 394                        goto err;
 395                }
 396
 397                ret = register_ftrace_function(&ops->fops);
 398                if (ret) {
 399                        pr_err("failed to register ftrace handler for function '%s' (%d)\n",
 400                               func->old_name, ret);
 401                        ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0);
 402                        goto err;
 403                }
 404
 405
 406        } else {
 407                list_add_rcu(&func->stack_node, &ops->func_stack);
 408        }
 409
 410        func->state = KLP_ENABLED;
 411
 412        return 0;
 413
 414err:
 415        list_del_rcu(&func->stack_node);
 416        list_del(&ops->node);
 417        kfree(ops);
 418        return ret;
 419}
 420
 421static void klp_disable_object(struct klp_object *obj)
 422{
 423        struct klp_func *func;
 424
 425        for (func = obj->funcs; func->old_name; func++)
 426                if (func->state == KLP_ENABLED)
 427                        klp_disable_func(func);
 428
 429        obj->state = KLP_DISABLED;
 430}
 431
 432static int klp_enable_object(struct klp_object *obj)
 433{
 434        struct klp_func *func;
 435        int ret;
 436
 437        if (WARN_ON(obj->state != KLP_DISABLED))
 438                return -EINVAL;
 439
 440        if (WARN_ON(!klp_is_object_loaded(obj)))
 441                return -EINVAL;
 442
 443        for (func = obj->funcs; func->old_name; func++) {
 444                ret = klp_enable_func(func);
 445                if (ret) {
 446                        klp_disable_object(obj);
 447                        return ret;
 448                }
 449        }
 450        obj->state = KLP_ENABLED;
 451
 452        return 0;
 453}
 454
 455static int __klp_disable_patch(struct klp_patch *patch)
 456{
 457        struct klp_object *obj;
 458
 459        /* enforce stacking: only the last enabled patch can be disabled */
 460        if (!list_is_last(&patch->list, &klp_patches) &&
 461            list_next_entry(patch, list)->state == KLP_ENABLED)
 462                return -EBUSY;
 463
 464        pr_notice("disabling patch '%s'\n", patch->mod->name);
 465
 466        for (obj = patch->objs; obj->funcs; obj++) {
 467                if (obj->state == KLP_ENABLED)
 468                        klp_disable_object(obj);
 469        }
 470
 471        patch->state = KLP_DISABLED;
 472
 473        return 0;
 474}
 475
 476/**
 477 * klp_disable_patch() - disables a registered patch
 478 * @patch:      The registered, enabled patch to be disabled
 479 *
 480 * Unregisters the patched functions from ftrace.
 481 *
 482 * Return: 0 on success, otherwise error
 483 */
 484int klp_disable_patch(struct klp_patch *patch)
 485{
 486        int ret;
 487
 488        mutex_lock(&klp_mutex);
 489
 490        if (!klp_is_patch_registered(patch)) {
 491                ret = -EINVAL;
 492                goto err;
 493        }
 494
 495        if (patch->state == KLP_DISABLED) {
 496                ret = -EINVAL;
 497                goto err;
 498        }
 499
 500        ret = __klp_disable_patch(patch);
 501
 502err:
 503        mutex_unlock(&klp_mutex);
 504        return ret;
 505}
 506EXPORT_SYMBOL_GPL(klp_disable_patch);
 507
 508static int __klp_enable_patch(struct klp_patch *patch)
 509{
 510        struct klp_object *obj;
 511        int ret;
 512
 513        if (WARN_ON(patch->state != KLP_DISABLED))
 514                return -EINVAL;
 515
 516        /* enforce stacking: only the first disabled patch can be enabled */
 517        if (patch->list.prev != &klp_patches &&
 518            list_prev_entry(patch, list)->state == KLP_DISABLED)
 519                return -EBUSY;
 520
 521        pr_notice_once("tainting kernel with TAINT_LIVEPATCH\n");
 522        add_taint(TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
 523
 524        pr_notice("enabling patch '%s'\n", patch->mod->name);
 525
 526        for (obj = patch->objs; obj->funcs; obj++) {
 527                if (!klp_is_object_loaded(obj))
 528                        continue;
 529
 530                ret = klp_enable_object(obj);
 531                if (ret)
 532                        goto unregister;
 533        }
 534
 535        patch->state = KLP_ENABLED;
 536
 537        return 0;
 538
 539unregister:
 540        WARN_ON(__klp_disable_patch(patch));
 541        return ret;
 542}
 543
 544/**
 545 * klp_enable_patch() - enables a registered patch
 546 * @patch:      The registered, disabled patch to be enabled
 547 *
 548 * Performs the needed symbol lookups and code relocations,
 549 * then registers the patched functions with ftrace.
 550 *
 551 * Return: 0 on success, otherwise error
 552 */
 553int klp_enable_patch(struct klp_patch *patch)
 554{
 555        int ret;
 556
 557        mutex_lock(&klp_mutex);
 558
 559        if (!klp_is_patch_registered(patch)) {
 560                ret = -EINVAL;
 561                goto err;
 562        }
 563
 564        ret = __klp_enable_patch(patch);
 565
 566err:
 567        mutex_unlock(&klp_mutex);
 568        return ret;
 569}
 570EXPORT_SYMBOL_GPL(klp_enable_patch);
 571
 572/*
 573 * Sysfs Interface
 574 *
 575 * /sys/kernel/livepatch
 576 * /sys/kernel/livepatch/<patch>
 577 * /sys/kernel/livepatch/<patch>/enabled
 578 * /sys/kernel/livepatch/<patch>/<object>
 579 * /sys/kernel/livepatch/<patch>/<object>/<func>
 580 */
 581
 582static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
 583                             const char *buf, size_t count)
 584{
 585        struct klp_patch *patch;
 586        int ret;
 587        unsigned long val;
 588
 589        ret = kstrtoul(buf, 10, &val);
 590        if (ret)
 591                return -EINVAL;
 592
 593        if (val != KLP_DISABLED && val != KLP_ENABLED)
 594                return -EINVAL;
 595
 596        patch = container_of(kobj, struct klp_patch, kobj);
 597
 598        mutex_lock(&klp_mutex);
 599
 600        if (val == patch->state) {
 601                /* already in requested state */
 602                ret = -EINVAL;
 603                goto err;
 604        }
 605
 606        if (val == KLP_ENABLED) {
 607                ret = __klp_enable_patch(patch);
 608                if (ret)
 609                        goto err;
 610        } else {
 611                ret = __klp_disable_patch(patch);
 612                if (ret)
 613                        goto err;
 614        }
 615
 616        mutex_unlock(&klp_mutex);
 617
 618        return count;
 619
 620err:
 621        mutex_unlock(&klp_mutex);
 622        return ret;
 623}
 624
 625static ssize_t enabled_show(struct kobject *kobj,
 626                            struct kobj_attribute *attr, char *buf)
 627{
 628        struct klp_patch *patch;
 629
 630        patch = container_of(kobj, struct klp_patch, kobj);
 631        return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->state);
 632}
 633
 634static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
 635static struct attribute *klp_patch_attrs[] = {
 636        &enabled_kobj_attr.attr,
 637        NULL
 638};
 639
 640static void klp_kobj_release_patch(struct kobject *kobj)
 641{
 642        /*
 643         * Once we have a consistency model we'll need to module_put() the
 644         * patch module here.  See klp_register_patch() for more details.
 645         */
 646}
 647
 648static struct kobj_type klp_ktype_patch = {
 649        .release = klp_kobj_release_patch,
 650        .sysfs_ops = &kobj_sysfs_ops,
 651        .default_attrs = klp_patch_attrs,
 652};
 653
 654static void klp_kobj_release_func(struct kobject *kobj)
 655{
 656}
 657
 658static struct kobj_type klp_ktype_func = {
 659        .release = klp_kobj_release_func,
 660        .sysfs_ops = &kobj_sysfs_ops,
 661};
 662
 663/*
 664 * Free all functions' kobjects in the array up to some limit. When limit is
 665 * NULL, all kobjects are freed.
 666 */
 667static void klp_free_funcs_limited(struct klp_object *obj,
 668                                   struct klp_func *limit)
 669{
 670        struct klp_func *func;
 671
 672        for (func = obj->funcs; func->old_name && func != limit; func++)
 673                kobject_put(&func->kobj);
 674}
 675
 676/* Clean up when a patched object is unloaded */
 677static void klp_free_object_loaded(struct klp_object *obj)
 678{
 679        struct klp_func *func;
 680
 681        obj->mod = NULL;
 682
 683        for (func = obj->funcs; func->old_name; func++)
 684                func->old_addr = 0;
 685}
 686
 687/*
 688 * Free all objects' kobjects in the array up to some limit. When limit is
 689 * NULL, all kobjects are freed.
 690 */
 691static void klp_free_objects_limited(struct klp_patch *patch,
 692                                     struct klp_object *limit)
 693{
 694        struct klp_object *obj;
 695
 696        for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
 697                klp_free_funcs_limited(obj, NULL);
 698                kobject_put(obj->kobj);
 699        }
 700}
 701
 702static void klp_free_patch(struct klp_patch *patch)
 703{
 704        klp_free_objects_limited(patch, NULL);
 705        if (!list_empty(&patch->list))
 706                list_del(&patch->list);
 707        kobject_put(&patch->kobj);
 708}
 709
 710static int klp_init_func(struct klp_object *obj, struct klp_func *func)
 711{
 712        INIT_LIST_HEAD(&func->stack_node);
 713        func->state = KLP_DISABLED;
 714
 715        return kobject_init_and_add(&func->kobj, &klp_ktype_func,
 716                                    obj->kobj, "%s", func->old_name);
 717}
 718
 719/* parts of the initialization that is done only when the object is loaded */
 720static int klp_init_object_loaded(struct klp_patch *patch,
 721                                  struct klp_object *obj)
 722{
 723        struct klp_func *func;
 724        int ret;
 725
 726        if (obj->relocs) {
 727                ret = klp_write_object_relocations(patch->mod, obj);
 728                if (ret)
 729                        return ret;
 730        }
 731
 732        for (func = obj->funcs; func->old_name; func++) {
 733                ret = klp_find_verify_func_addr(obj, func);
 734                if (ret)
 735                        return ret;
 736        }
 737
 738        return 0;
 739}
 740
 741static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
 742{
 743        struct klp_func *func;
 744        int ret;
 745        const char *name;
 746
 747        if (!obj->funcs)
 748                return -EINVAL;
 749
 750        obj->state = KLP_DISABLED;
 751        obj->mod = NULL;
 752
 753        klp_find_object_module(obj);
 754
 755        name = klp_is_module(obj) ? obj->name : "vmlinux";
 756        obj->kobj = kobject_create_and_add(name, &patch->kobj);
 757        if (!obj->kobj)
 758                return -ENOMEM;
 759
 760        for (func = obj->funcs; func->old_name; func++) {
 761                ret = klp_init_func(obj, func);
 762                if (ret)
 763                        goto free;
 764        }
 765
 766        if (klp_is_object_loaded(obj)) {
 767                ret = klp_init_object_loaded(patch, obj);
 768                if (ret)
 769                        goto free;
 770        }
 771
 772        return 0;
 773
 774free:
 775        klp_free_funcs_limited(obj, func);
 776        kobject_put(obj->kobj);
 777        return ret;
 778}
 779
 780static int klp_init_patch(struct klp_patch *patch)
 781{
 782        struct klp_object *obj;
 783        int ret;
 784
 785        if (!patch->objs)
 786                return -EINVAL;
 787
 788        mutex_lock(&klp_mutex);
 789
 790        patch->state = KLP_DISABLED;
 791
 792        ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
 793                                   klp_root_kobj, "%s", patch->mod->name);
 794        if (ret)
 795                goto unlock;
 796
 797        for (obj = patch->objs; obj->funcs; obj++) {
 798                ret = klp_init_object(patch, obj);
 799                if (ret)
 800                        goto free;
 801        }
 802
 803        list_add_tail(&patch->list, &klp_patches);
 804
 805        mutex_unlock(&klp_mutex);
 806
 807        return 0;
 808
 809free:
 810        klp_free_objects_limited(patch, obj);
 811        kobject_put(&patch->kobj);
 812unlock:
 813        mutex_unlock(&klp_mutex);
 814        return ret;
 815}
 816
 817/**
 818 * klp_unregister_patch() - unregisters a patch
 819 * @patch:      Disabled patch to be unregistered
 820 *
 821 * Frees the data structures and removes the sysfs interface.
 822 *
 823 * Return: 0 on success, otherwise error
 824 */
 825int klp_unregister_patch(struct klp_patch *patch)
 826{
 827        int ret = 0;
 828
 829        mutex_lock(&klp_mutex);
 830
 831        if (!klp_is_patch_registered(patch)) {
 832                ret = -EINVAL;
 833                goto out;
 834        }
 835
 836        if (patch->state == KLP_ENABLED) {
 837                ret = -EBUSY;
 838                goto out;
 839        }
 840
 841        klp_free_patch(patch);
 842
 843out:
 844        mutex_unlock(&klp_mutex);
 845        return ret;
 846}
 847EXPORT_SYMBOL_GPL(klp_unregister_patch);
 848
 849/**
 850 * klp_register_patch() - registers a patch
 851 * @patch:      Patch to be registered
 852 *
 853 * Initializes the data structure associated with the patch and
 854 * creates the sysfs interface.
 855 *
 856 * Return: 0 on success, otherwise error
 857 */
 858int klp_register_patch(struct klp_patch *patch)
 859{
 860        int ret;
 861
 862        if (!klp_initialized())
 863                return -ENODEV;
 864
 865        if (!patch || !patch->mod)
 866                return -EINVAL;
 867
 868        /*
 869         * A reference is taken on the patch module to prevent it from being
 870         * unloaded.  Right now, we don't allow patch modules to unload since
 871         * there is currently no method to determine if a thread is still
 872         * running in the patched code contained in the patch module once
 873         * the ftrace registration is successful.
 874         */
 875        if (!try_module_get(patch->mod))
 876                return -ENODEV;
 877
 878        ret = klp_init_patch(patch);
 879        if (ret)
 880                module_put(patch->mod);
 881
 882        return ret;
 883}
 884EXPORT_SYMBOL_GPL(klp_register_patch);
 885
 886static void klp_module_notify_coming(struct klp_patch *patch,
 887                                     struct klp_object *obj)
 888{
 889        struct module *pmod = patch->mod;
 890        struct module *mod = obj->mod;
 891        int ret;
 892
 893        ret = klp_init_object_loaded(patch, obj);
 894        if (ret)
 895                goto err;
 896
 897        if (patch->state == KLP_DISABLED)
 898                return;
 899
 900        pr_notice("applying patch '%s' to loading module '%s'\n",
 901                  pmod->name, mod->name);
 902
 903        ret = klp_enable_object(obj);
 904        if (!ret)
 905                return;
 906
 907err:
 908        pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
 909                pmod->name, mod->name, ret);
 910}
 911
 912static void klp_module_notify_going(struct klp_patch *patch,
 913                                    struct klp_object *obj)
 914{
 915        struct module *pmod = patch->mod;
 916        struct module *mod = obj->mod;
 917
 918        if (patch->state == KLP_DISABLED)
 919                goto disabled;
 920
 921        pr_notice("reverting patch '%s' on unloading module '%s'\n",
 922                  pmod->name, mod->name);
 923
 924        klp_disable_object(obj);
 925
 926disabled:
 927        klp_free_object_loaded(obj);
 928}
 929
 930static int klp_module_notify(struct notifier_block *nb, unsigned long action,
 931                             void *data)
 932{
 933        struct module *mod = data;
 934        struct klp_patch *patch;
 935        struct klp_object *obj;
 936
 937        if (action != MODULE_STATE_COMING && action != MODULE_STATE_GOING)
 938                return 0;
 939
 940        mutex_lock(&klp_mutex);
 941
 942        /*
 943         * Each module has to know that the notifier has been called.
 944         * We never know what module will get patched by a new patch.
 945         */
 946        if (action == MODULE_STATE_COMING)
 947                mod->klp_alive = true;
 948        else /* MODULE_STATE_GOING */
 949                mod->klp_alive = false;
 950
 951        list_for_each_entry(patch, &klp_patches, list) {
 952                for (obj = patch->objs; obj->funcs; obj++) {
 953                        if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
 954                                continue;
 955
 956                        if (action == MODULE_STATE_COMING) {
 957                                obj->mod = mod;
 958                                klp_module_notify_coming(patch, obj);
 959                        } else /* MODULE_STATE_GOING */
 960                                klp_module_notify_going(patch, obj);
 961
 962                        break;
 963                }
 964        }
 965
 966        mutex_unlock(&klp_mutex);
 967
 968        return 0;
 969}
 970
 971static struct notifier_block klp_module_nb = {
 972        .notifier_call = klp_module_notify,
 973        .priority = INT_MIN+1, /* called late but before ftrace notifier */
 974};
 975
 976static int klp_init(void)
 977{
 978        int ret;
 979
 980        ret = klp_check_compiler_support();
 981        if (ret) {
 982                pr_info("Your compiler is too old; turning off.\n");
 983                return -EINVAL;
 984        }
 985
 986        ret = register_module_notifier(&klp_module_nb);
 987        if (ret)
 988                return ret;
 989
 990        klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
 991        if (!klp_root_kobj) {
 992                ret = -ENOMEM;
 993                goto unregister;
 994        }
 995
 996        return 0;
 997
 998unregister:
 999        unregister_module_notifier(&klp_module_nb);
1000        return ret;
1001}
1002
1003module_init(klp_init);
1004