linux/kernel/livepatch/core.c
<<
>>
Prefs
   1/*
   2 * core.c - Kernel Live Patching Core
   3 *
   4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
   5 * Copyright (C) 2014 SUSE
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License
   9 * as published by the Free Software Foundation; either version 2
  10 * of the License, or (at your option) any later version.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  22
  23#include <linux/module.h>
  24#include <linux/kernel.h>
  25#include <linux/mutex.h>
  26#include <linux/slab.h>
  27#include <linux/list.h>
  28#include <linux/kallsyms.h>
  29#include <linux/livepatch.h>
  30#include <linux/elf.h>
  31#include <linux/moduleloader.h>
  32#include <linux/completion.h>
  33#include <asm/cacheflush.h>
  34#include "core.h"
  35#include "patch.h"
  36#include "transition.h"
  37
  38/*
  39 * klp_mutex is a coarse lock which serializes access to klp data.  All
  40 * accesses to klp-related variables and structures must have mutex protection,
  41 * except within the following functions which carefully avoid the need for it:
  42 *
  43 * - klp_ftrace_handler()
  44 * - klp_update_patch_state()
  45 */
  46DEFINE_MUTEX(klp_mutex);
  47
  48static LIST_HEAD(klp_patches);
  49
  50static struct kobject *klp_root_kobj;
  51
  52static bool klp_is_module(struct klp_object *obj)
  53{
  54        return obj->name;
  55}
  56
  57/* sets obj->mod if object is not vmlinux and module is found */
  58static void klp_find_object_module(struct klp_object *obj)
  59{
  60        struct module *mod;
  61
  62        if (!klp_is_module(obj))
  63                return;
  64
  65        mutex_lock(&module_mutex);
  66        /*
  67         * We do not want to block removal of patched modules and therefore
  68         * we do not take a reference here. The patches are removed by
  69         * klp_module_going() instead.
  70         */
  71        mod = find_module(obj->name);
  72        /*
  73         * Do not mess work of klp_module_coming() and klp_module_going().
  74         * Note that the patch might still be needed before klp_module_going()
  75         * is called. Module functions can be called even in the GOING state
  76         * until mod->exit() finishes. This is especially important for
  77         * patches that modify semantic of the functions.
  78         */
  79        if (mod && mod->klp_alive)
  80                obj->mod = mod;
  81
  82        mutex_unlock(&module_mutex);
  83}
  84
  85static bool klp_is_patch_registered(struct klp_patch *patch)
  86{
  87        struct klp_patch *mypatch;
  88
  89        list_for_each_entry(mypatch, &klp_patches, list)
  90                if (mypatch == patch)
  91                        return true;
  92
  93        return false;
  94}
  95
  96static bool klp_initialized(void)
  97{
  98        return !!klp_root_kobj;
  99}
 100
 101struct klp_find_arg {
 102        const char *objname;
 103        const char *name;
 104        unsigned long addr;
 105        unsigned long count;
 106        unsigned long pos;
 107};
 108
 109static int klp_find_callback(void *data, const char *name,
 110                             struct module *mod, unsigned long addr)
 111{
 112        struct klp_find_arg *args = data;
 113
 114        if ((mod && !args->objname) || (!mod && args->objname))
 115                return 0;
 116
 117        if (strcmp(args->name, name))
 118                return 0;
 119
 120        if (args->objname && strcmp(args->objname, mod->name))
 121                return 0;
 122
 123        args->addr = addr;
 124        args->count++;
 125
 126        /*
 127         * Finish the search when the symbol is found for the desired position
 128         * or the position is not defined for a non-unique symbol.
 129         */
 130        if ((args->pos && (args->count == args->pos)) ||
 131            (!args->pos && (args->count > 1)))
 132                return 1;
 133
 134        return 0;
 135}
 136
 137static int klp_find_object_symbol(const char *objname, const char *name,
 138                                  unsigned long sympos, unsigned long *addr)
 139{
 140        struct klp_find_arg args = {
 141                .objname = objname,
 142                .name = name,
 143                .addr = 0,
 144                .count = 0,
 145                .pos = sympos,
 146        };
 147
 148        mutex_lock(&module_mutex);
 149        if (objname)
 150                module_kallsyms_on_each_symbol(klp_find_callback, &args);
 151        else
 152                kallsyms_on_each_symbol(klp_find_callback, &args);
 153        mutex_unlock(&module_mutex);
 154
 155        /*
 156         * Ensure an address was found. If sympos is 0, ensure symbol is unique;
 157         * otherwise ensure the symbol position count matches sympos.
 158         */
 159        if (args.addr == 0)
 160                pr_err("symbol '%s' not found in symbol table\n", name);
 161        else if (args.count > 1 && sympos == 0) {
 162                pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
 163                       name, objname);
 164        } else if (sympos != args.count && sympos > 0) {
 165                pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
 166                       sympos, name, objname ? objname : "vmlinux");
 167        } else {
 168                *addr = args.addr;
 169                return 0;
 170        }
 171
 172        *addr = 0;
 173        return -EINVAL;
 174}
 175
 176static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
 177{
 178        int i, cnt, vmlinux, ret;
 179        char objname[MODULE_NAME_LEN];
 180        char symname[KSYM_NAME_LEN];
 181        char *strtab = pmod->core_kallsyms.strtab;
 182        Elf_Rela *relas;
 183        Elf_Sym *sym;
 184        unsigned long sympos, addr;
 185
 186        /*
 187         * Since the field widths for objname and symname in the sscanf()
 188         * call are hard-coded and correspond to MODULE_NAME_LEN and
 189         * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
 190         * and KSYM_NAME_LEN have the values we expect them to have.
 191         *
 192         * Because the value of MODULE_NAME_LEN can differ among architectures,
 193         * we use the smallest/strictest upper bound possible (56, based on
 194         * the current definition of MODULE_NAME_LEN) to prevent overflows.
 195         */
 196        BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
 197
 198        relas = (Elf_Rela *) relasec->sh_addr;
 199        /* For each rela in this klp relocation section */
 200        for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
 201                sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info);
 202                if (sym->st_shndx != SHN_LIVEPATCH) {
 203                        pr_err("symbol %s is not marked as a livepatch symbol\n",
 204                               strtab + sym->st_name);
 205                        return -EINVAL;
 206                }
 207
 208                /* Format: .klp.sym.objname.symname,sympos */
 209                cnt = sscanf(strtab + sym->st_name,
 210                             ".klp.sym.%55[^.].%127[^,],%lu",
 211                             objname, symname, &sympos);
 212                if (cnt != 3) {
 213                        pr_err("symbol %s has an incorrectly formatted name\n",
 214                               strtab + sym->st_name);
 215                        return -EINVAL;
 216                }
 217
 218                /* klp_find_object_symbol() treats a NULL objname as vmlinux */
 219                vmlinux = !strcmp(objname, "vmlinux");
 220                ret = klp_find_object_symbol(vmlinux ? NULL : objname,
 221                                             symname, sympos, &addr);
 222                if (ret)
 223                        return ret;
 224
 225                sym->st_value = addr;
 226        }
 227
 228        return 0;
 229}
 230
 231static int klp_write_object_relocations(struct module *pmod,
 232                                        struct klp_object *obj)
 233{
 234        int i, cnt, ret = 0;
 235        const char *objname, *secname;
 236        char sec_objname[MODULE_NAME_LEN];
 237        Elf_Shdr *sec;
 238
 239        if (WARN_ON(!klp_is_object_loaded(obj)))
 240                return -EINVAL;
 241
 242        objname = klp_is_module(obj) ? obj->name : "vmlinux";
 243
 244        /* For each klp relocation section */
 245        for (i = 1; i < pmod->klp_info->hdr.e_shnum; i++) {
 246                sec = pmod->klp_info->sechdrs + i;
 247                secname = pmod->klp_info->secstrings + sec->sh_name;
 248                if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
 249                        continue;
 250
 251                /*
 252                 * Format: .klp.rela.sec_objname.section_name
 253                 * See comment in klp_resolve_symbols() for an explanation
 254                 * of the selected field width value.
 255                 */
 256                cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname);
 257                if (cnt != 1) {
 258                        pr_err("section %s has an incorrectly formatted name\n",
 259                               secname);
 260                        ret = -EINVAL;
 261                        break;
 262                }
 263
 264                if (strcmp(objname, sec_objname))
 265                        continue;
 266
 267                ret = klp_resolve_symbols(sec, pmod);
 268                if (ret)
 269                        break;
 270
 271                ret = apply_relocate_add(pmod->klp_info->sechdrs,
 272                                         pmod->core_kallsyms.strtab,
 273                                         pmod->klp_info->symndx, i, pmod);
 274                if (ret)
 275                        break;
 276        }
 277
 278        return ret;
 279}
 280
 281static int __klp_disable_patch(struct klp_patch *patch)
 282{
 283        struct klp_object *obj;
 284
 285        if (WARN_ON(!patch->enabled))
 286                return -EINVAL;
 287
 288        if (klp_transition_patch)
 289                return -EBUSY;
 290
 291        /* enforce stacking: only the last enabled patch can be disabled */
 292        if (!list_is_last(&patch->list, &klp_patches) &&
 293            list_next_entry(patch, list)->enabled)
 294                return -EBUSY;
 295
 296        klp_init_transition(patch, KLP_UNPATCHED);
 297
 298        klp_for_each_object(patch, obj)
 299                if (obj->patched)
 300                        klp_pre_unpatch_callback(obj);
 301
 302        /*
 303         * Enforce the order of the func->transition writes in
 304         * klp_init_transition() and the TIF_PATCH_PENDING writes in
 305         * klp_start_transition().  In the rare case where klp_ftrace_handler()
 306         * is called shortly after klp_update_patch_state() switches the task,
 307         * this ensures the handler sees that func->transition is set.
 308         */
 309        smp_wmb();
 310
 311        klp_start_transition();
 312        klp_try_complete_transition();
 313        patch->enabled = false;
 314
 315        return 0;
 316}
 317
 318/**
 319 * klp_disable_patch() - disables a registered patch
 320 * @patch:      The registered, enabled patch to be disabled
 321 *
 322 * Unregisters the patched functions from ftrace.
 323 *
 324 * Return: 0 on success, otherwise error
 325 */
 326int klp_disable_patch(struct klp_patch *patch)
 327{
 328        int ret;
 329
 330        mutex_lock(&klp_mutex);
 331
 332        if (!klp_is_patch_registered(patch)) {
 333                ret = -EINVAL;
 334                goto err;
 335        }
 336
 337        if (!patch->enabled) {
 338                ret = -EINVAL;
 339                goto err;
 340        }
 341
 342        ret = __klp_disable_patch(patch);
 343
 344err:
 345        mutex_unlock(&klp_mutex);
 346        return ret;
 347}
 348EXPORT_SYMBOL_GPL(klp_disable_patch);
 349
 350static int __klp_enable_patch(struct klp_patch *patch)
 351{
 352        struct klp_object *obj;
 353        int ret;
 354
 355        if (klp_transition_patch)
 356                return -EBUSY;
 357
 358        if (WARN_ON(patch->enabled))
 359                return -EINVAL;
 360
 361        /* enforce stacking: only the first disabled patch can be enabled */
 362        if (patch->list.prev != &klp_patches &&
 363            !list_prev_entry(patch, list)->enabled)
 364                return -EBUSY;
 365
 366        /*
 367         * A reference is taken on the patch module to prevent it from being
 368         * unloaded.
 369         */
 370        if (!try_module_get(patch->mod))
 371                return -ENODEV;
 372
 373        pr_notice("enabling patch '%s'\n", patch->mod->name);
 374
 375        klp_init_transition(patch, KLP_PATCHED);
 376
 377        /*
 378         * Enforce the order of the func->transition writes in
 379         * klp_init_transition() and the ops->func_stack writes in
 380         * klp_patch_object(), so that klp_ftrace_handler() will see the
 381         * func->transition updates before the handler is registered and the
 382         * new funcs become visible to the handler.
 383         */
 384        smp_wmb();
 385
 386        klp_for_each_object(patch, obj) {
 387                if (!klp_is_object_loaded(obj))
 388                        continue;
 389
 390                ret = klp_pre_patch_callback(obj);
 391                if (ret) {
 392                        pr_warn("pre-patch callback failed for object '%s'\n",
 393                                klp_is_module(obj) ? obj->name : "vmlinux");
 394                        goto err;
 395                }
 396
 397                ret = klp_patch_object(obj);
 398                if (ret) {
 399                        pr_warn("failed to patch object '%s'\n",
 400                                klp_is_module(obj) ? obj->name : "vmlinux");
 401                        goto err;
 402                }
 403        }
 404
 405        klp_start_transition();
 406        klp_try_complete_transition();
 407        patch->enabled = true;
 408
 409        return 0;
 410err:
 411        pr_warn("failed to enable patch '%s'\n", patch->mod->name);
 412
 413        klp_cancel_transition();
 414        return ret;
 415}
 416
 417/**
 418 * klp_enable_patch() - enables a registered patch
 419 * @patch:      The registered, disabled patch to be enabled
 420 *
 421 * Performs the needed symbol lookups and code relocations,
 422 * then registers the patched functions with ftrace.
 423 *
 424 * Return: 0 on success, otherwise error
 425 */
 426int klp_enable_patch(struct klp_patch *patch)
 427{
 428        int ret;
 429
 430        mutex_lock(&klp_mutex);
 431
 432        if (!klp_is_patch_registered(patch)) {
 433                ret = -EINVAL;
 434                goto err;
 435        }
 436
 437        ret = __klp_enable_patch(patch);
 438
 439err:
 440        mutex_unlock(&klp_mutex);
 441        return ret;
 442}
 443EXPORT_SYMBOL_GPL(klp_enable_patch);
 444
 445/*
 446 * Sysfs Interface
 447 *
 448 * /sys/kernel/livepatch
 449 * /sys/kernel/livepatch/<patch>
 450 * /sys/kernel/livepatch/<patch>/enabled
 451 * /sys/kernel/livepatch/<patch>/transition
 452 * /sys/kernel/livepatch/<patch>/signal
 453 * /sys/kernel/livepatch/<patch>/force
 454 * /sys/kernel/livepatch/<patch>/<object>
 455 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
 456 */
 457
 458static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
 459                             const char *buf, size_t count)
 460{
 461        struct klp_patch *patch;
 462        int ret;
 463        bool enabled;
 464
 465        ret = kstrtobool(buf, &enabled);
 466        if (ret)
 467                return ret;
 468
 469        patch = container_of(kobj, struct klp_patch, kobj);
 470
 471        mutex_lock(&klp_mutex);
 472
 473        if (!klp_is_patch_registered(patch)) {
 474                /*
 475                 * Module with the patch could either disappear meanwhile or is
 476                 * not properly initialized yet.
 477                 */
 478                ret = -EINVAL;
 479                goto err;
 480        }
 481
 482        if (patch->enabled == enabled) {
 483                /* already in requested state */
 484                ret = -EINVAL;
 485                goto err;
 486        }
 487
 488        if (patch == klp_transition_patch) {
 489                klp_reverse_transition();
 490        } else if (enabled) {
 491                ret = __klp_enable_patch(patch);
 492                if (ret)
 493                        goto err;
 494        } else {
 495                ret = __klp_disable_patch(patch);
 496                if (ret)
 497                        goto err;
 498        }
 499
 500        mutex_unlock(&klp_mutex);
 501
 502        return count;
 503
 504err:
 505        mutex_unlock(&klp_mutex);
 506        return ret;
 507}
 508
 509static ssize_t enabled_show(struct kobject *kobj,
 510                            struct kobj_attribute *attr, char *buf)
 511{
 512        struct klp_patch *patch;
 513
 514        patch = container_of(kobj, struct klp_patch, kobj);
 515        return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled);
 516}
 517
 518static ssize_t transition_show(struct kobject *kobj,
 519                               struct kobj_attribute *attr, char *buf)
 520{
 521        struct klp_patch *patch;
 522
 523        patch = container_of(kobj, struct klp_patch, kobj);
 524        return snprintf(buf, PAGE_SIZE-1, "%d\n",
 525                        patch == klp_transition_patch);
 526}
 527
 528static ssize_t signal_store(struct kobject *kobj, struct kobj_attribute *attr,
 529                            const char *buf, size_t count)
 530{
 531        struct klp_patch *patch;
 532        int ret;
 533        bool val;
 534
 535        ret = kstrtobool(buf, &val);
 536        if (ret)
 537                return ret;
 538
 539        if (!val)
 540                return count;
 541
 542        mutex_lock(&klp_mutex);
 543
 544        patch = container_of(kobj, struct klp_patch, kobj);
 545        if (patch != klp_transition_patch) {
 546                mutex_unlock(&klp_mutex);
 547                return -EINVAL;
 548        }
 549
 550        klp_send_signals();
 551
 552        mutex_unlock(&klp_mutex);
 553
 554        return count;
 555}
 556
 557static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
 558                           const char *buf, size_t count)
 559{
 560        struct klp_patch *patch;
 561        int ret;
 562        bool val;
 563
 564        ret = kstrtobool(buf, &val);
 565        if (ret)
 566                return ret;
 567
 568        if (!val)
 569                return count;
 570
 571        mutex_lock(&klp_mutex);
 572
 573        patch = container_of(kobj, struct klp_patch, kobj);
 574        if (patch != klp_transition_patch) {
 575                mutex_unlock(&klp_mutex);
 576                return -EINVAL;
 577        }
 578
 579        klp_force_transition();
 580
 581        mutex_unlock(&klp_mutex);
 582
 583        return count;
 584}
 585
 586static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
 587static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
 588static struct kobj_attribute signal_kobj_attr = __ATTR_WO(signal);
 589static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
 590static struct attribute *klp_patch_attrs[] = {
 591        &enabled_kobj_attr.attr,
 592        &transition_kobj_attr.attr,
 593        &signal_kobj_attr.attr,
 594        &force_kobj_attr.attr,
 595        NULL
 596};
 597
 598static void klp_kobj_release_patch(struct kobject *kobj)
 599{
 600        struct klp_patch *patch;
 601
 602        patch = container_of(kobj, struct klp_patch, kobj);
 603        complete(&patch->finish);
 604}
 605
 606static struct kobj_type klp_ktype_patch = {
 607        .release = klp_kobj_release_patch,
 608        .sysfs_ops = &kobj_sysfs_ops,
 609        .default_attrs = klp_patch_attrs,
 610};
 611
 612static void klp_kobj_release_object(struct kobject *kobj)
 613{
 614}
 615
 616static struct kobj_type klp_ktype_object = {
 617        .release = klp_kobj_release_object,
 618        .sysfs_ops = &kobj_sysfs_ops,
 619};
 620
 621static void klp_kobj_release_func(struct kobject *kobj)
 622{
 623}
 624
 625static struct kobj_type klp_ktype_func = {
 626        .release = klp_kobj_release_func,
 627        .sysfs_ops = &kobj_sysfs_ops,
 628};
 629
 630/*
 631 * Free all functions' kobjects in the array up to some limit. When limit is
 632 * NULL, all kobjects are freed.
 633 */
 634static void klp_free_funcs_limited(struct klp_object *obj,
 635                                   struct klp_func *limit)
 636{
 637        struct klp_func *func;
 638
 639        for (func = obj->funcs; func->old_name && func != limit; func++)
 640                kobject_put(&func->kobj);
 641}
 642
 643/* Clean up when a patched object is unloaded */
 644static void klp_free_object_loaded(struct klp_object *obj)
 645{
 646        struct klp_func *func;
 647
 648        obj->mod = NULL;
 649
 650        klp_for_each_func(obj, func)
 651                func->old_addr = 0;
 652}
 653
 654/*
 655 * Free all objects' kobjects in the array up to some limit. When limit is
 656 * NULL, all kobjects are freed.
 657 */
 658static void klp_free_objects_limited(struct klp_patch *patch,
 659                                     struct klp_object *limit)
 660{
 661        struct klp_object *obj;
 662
 663        for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
 664                klp_free_funcs_limited(obj, NULL);
 665                kobject_put(&obj->kobj);
 666        }
 667}
 668
 669static void klp_free_patch(struct klp_patch *patch)
 670{
 671        klp_free_objects_limited(patch, NULL);
 672        if (!list_empty(&patch->list))
 673                list_del(&patch->list);
 674}
 675
 676static int klp_init_func(struct klp_object *obj, struct klp_func *func)
 677{
 678        if (!func->old_name || !func->new_func)
 679                return -EINVAL;
 680
 681        INIT_LIST_HEAD(&func->stack_node);
 682        func->patched = false;
 683        func->transition = false;
 684
 685        /* The format for the sysfs directory is <function,sympos> where sympos
 686         * is the nth occurrence of this symbol in kallsyms for the patched
 687         * object. If the user selects 0 for old_sympos, then 1 will be used
 688         * since a unique symbol will be the first occurrence.
 689         */
 690        return kobject_init_and_add(&func->kobj, &klp_ktype_func,
 691                                    &obj->kobj, "%s,%lu", func->old_name,
 692                                    func->old_sympos ? func->old_sympos : 1);
 693}
 694
 695/* Arches may override this to finish any remaining arch-specific tasks */
 696void __weak arch_klp_init_object_loaded(struct klp_patch *patch,
 697                                        struct klp_object *obj)
 698{
 699}
 700
 701/* parts of the initialization that is done only when the object is loaded */
 702static int klp_init_object_loaded(struct klp_patch *patch,
 703                                  struct klp_object *obj)
 704{
 705        struct klp_func *func;
 706        int ret;
 707
 708        module_disable_ro(patch->mod);
 709        ret = klp_write_object_relocations(patch->mod, obj);
 710        if (ret) {
 711                module_enable_ro(patch->mod, true);
 712                return ret;
 713        }
 714
 715        arch_klp_init_object_loaded(patch, obj);
 716        module_enable_ro(patch->mod, true);
 717
 718        klp_for_each_func(obj, func) {
 719                ret = klp_find_object_symbol(obj->name, func->old_name,
 720                                             func->old_sympos,
 721                                             &func->old_addr);
 722                if (ret)
 723                        return ret;
 724
 725                ret = kallsyms_lookup_size_offset(func->old_addr,
 726                                                  &func->old_size, NULL);
 727                if (!ret) {
 728                        pr_err("kallsyms size lookup failed for '%s'\n",
 729                               func->old_name);
 730                        return -ENOENT;
 731                }
 732
 733                ret = kallsyms_lookup_size_offset((unsigned long)func->new_func,
 734                                                  &func->new_size, NULL);
 735                if (!ret) {
 736                        pr_err("kallsyms size lookup failed for '%s' replacement\n",
 737                               func->old_name);
 738                        return -ENOENT;
 739                }
 740        }
 741
 742        return 0;
 743}
 744
 745static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
 746{
 747        struct klp_func *func;
 748        int ret;
 749        const char *name;
 750
 751        if (!obj->funcs)
 752                return -EINVAL;
 753
 754        obj->patched = false;
 755        obj->mod = NULL;
 756
 757        klp_find_object_module(obj);
 758
 759        name = klp_is_module(obj) ? obj->name : "vmlinux";
 760        ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object,
 761                                   &patch->kobj, "%s", name);
 762        if (ret)
 763                return ret;
 764
 765        klp_for_each_func(obj, func) {
 766                ret = klp_init_func(obj, func);
 767                if (ret)
 768                        goto free;
 769        }
 770
 771        if (klp_is_object_loaded(obj)) {
 772                ret = klp_init_object_loaded(patch, obj);
 773                if (ret)
 774                        goto free;
 775        }
 776
 777        return 0;
 778
 779free:
 780        klp_free_funcs_limited(obj, func);
 781        kobject_put(&obj->kobj);
 782        return ret;
 783}
 784
 785static int klp_init_patch(struct klp_patch *patch)
 786{
 787        struct klp_object *obj;
 788        int ret;
 789
 790        if (!patch->objs)
 791                return -EINVAL;
 792
 793        mutex_lock(&klp_mutex);
 794
 795        patch->enabled = false;
 796        init_completion(&patch->finish);
 797
 798        ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
 799                                   klp_root_kobj, "%s", patch->mod->name);
 800        if (ret) {
 801                mutex_unlock(&klp_mutex);
 802                return ret;
 803        }
 804
 805        klp_for_each_object(patch, obj) {
 806                ret = klp_init_object(patch, obj);
 807                if (ret)
 808                        goto free;
 809        }
 810
 811        list_add_tail(&patch->list, &klp_patches);
 812
 813        mutex_unlock(&klp_mutex);
 814
 815        return 0;
 816
 817free:
 818        klp_free_objects_limited(patch, obj);
 819
 820        mutex_unlock(&klp_mutex);
 821
 822        kobject_put(&patch->kobj);
 823        wait_for_completion(&patch->finish);
 824
 825        return ret;
 826}
 827
 828/**
 829 * klp_unregister_patch() - unregisters a patch
 830 * @patch:      Disabled patch to be unregistered
 831 *
 832 * Frees the data structures and removes the sysfs interface.
 833 *
 834 * Return: 0 on success, otherwise error
 835 */
 836int klp_unregister_patch(struct klp_patch *patch)
 837{
 838        int ret;
 839
 840        mutex_lock(&klp_mutex);
 841
 842        if (!klp_is_patch_registered(patch)) {
 843                ret = -EINVAL;
 844                goto err;
 845        }
 846
 847        if (patch->enabled) {
 848                ret = -EBUSY;
 849                goto err;
 850        }
 851
 852        klp_free_patch(patch);
 853
 854        mutex_unlock(&klp_mutex);
 855
 856        kobject_put(&patch->kobj);
 857        wait_for_completion(&patch->finish);
 858
 859        return 0;
 860err:
 861        mutex_unlock(&klp_mutex);
 862        return ret;
 863}
 864EXPORT_SYMBOL_GPL(klp_unregister_patch);
 865
 866/**
 867 * klp_register_patch() - registers a patch
 868 * @patch:      Patch to be registered
 869 *
 870 * Initializes the data structure associated with the patch and
 871 * creates the sysfs interface.
 872 *
 873 * There is no need to take the reference on the patch module here. It is done
 874 * later when the patch is enabled.
 875 *
 876 * Return: 0 on success, otherwise error
 877 */
 878int klp_register_patch(struct klp_patch *patch)
 879{
 880        if (!patch || !patch->mod)
 881                return -EINVAL;
 882
 883        if (!is_livepatch_module(patch->mod)) {
 884                pr_err("module %s is not marked as a livepatch module\n",
 885                       patch->mod->name);
 886                return -EINVAL;
 887        }
 888
 889        if (!klp_initialized())
 890                return -ENODEV;
 891
 892        if (!klp_have_reliable_stack()) {
 893                pr_err("This architecture doesn't have support for the livepatch consistency model.\n");
 894                return -ENOSYS;
 895        }
 896
 897        return klp_init_patch(patch);
 898}
 899EXPORT_SYMBOL_GPL(klp_register_patch);
 900
 901/*
 902 * Remove parts of patches that touch a given kernel module. The list of
 903 * patches processed might be limited. When limit is NULL, all patches
 904 * will be handled.
 905 */
 906static void klp_cleanup_module_patches_limited(struct module *mod,
 907                                               struct klp_patch *limit)
 908{
 909        struct klp_patch *patch;
 910        struct klp_object *obj;
 911
 912        list_for_each_entry(patch, &klp_patches, list) {
 913                if (patch == limit)
 914                        break;
 915
 916                klp_for_each_object(patch, obj) {
 917                        if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
 918                                continue;
 919
 920                        /*
 921                         * Only unpatch the module if the patch is enabled or
 922                         * is in transition.
 923                         */
 924                        if (patch->enabled || patch == klp_transition_patch) {
 925
 926                                if (patch != klp_transition_patch)
 927                                        klp_pre_unpatch_callback(obj);
 928
 929                                pr_notice("reverting patch '%s' on unloading module '%s'\n",
 930                                          patch->mod->name, obj->mod->name);
 931                                klp_unpatch_object(obj);
 932
 933                                klp_post_unpatch_callback(obj);
 934                        }
 935
 936                        klp_free_object_loaded(obj);
 937                        break;
 938                }
 939        }
 940}
 941
 942int klp_module_coming(struct module *mod)
 943{
 944        int ret;
 945        struct klp_patch *patch;
 946        struct klp_object *obj;
 947
 948        if (WARN_ON(mod->state != MODULE_STATE_COMING))
 949                return -EINVAL;
 950
 951        mutex_lock(&klp_mutex);
 952        /*
 953         * Each module has to know that klp_module_coming()
 954         * has been called. We never know what module will
 955         * get patched by a new patch.
 956         */
 957        mod->klp_alive = true;
 958
 959        list_for_each_entry(patch, &klp_patches, list) {
 960                klp_for_each_object(patch, obj) {
 961                        if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
 962                                continue;
 963
 964                        obj->mod = mod;
 965
 966                        ret = klp_init_object_loaded(patch, obj);
 967                        if (ret) {
 968                                pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
 969                                        patch->mod->name, obj->mod->name, ret);
 970                                goto err;
 971                        }
 972
 973                        /*
 974                         * Only patch the module if the patch is enabled or is
 975                         * in transition.
 976                         */
 977                        if (!patch->enabled && patch != klp_transition_patch)
 978                                break;
 979
 980                        pr_notice("applying patch '%s' to loading module '%s'\n",
 981                                  patch->mod->name, obj->mod->name);
 982
 983                        ret = klp_pre_patch_callback(obj);
 984                        if (ret) {
 985                                pr_warn("pre-patch callback failed for object '%s'\n",
 986                                        obj->name);
 987                                goto err;
 988                        }
 989
 990                        ret = klp_patch_object(obj);
 991                        if (ret) {
 992                                pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
 993                                        patch->mod->name, obj->mod->name, ret);
 994
 995                                klp_post_unpatch_callback(obj);
 996                                goto err;
 997                        }
 998
 999                        if (patch != klp_transition_patch)
1000                                klp_post_patch_callback(obj);
1001
1002                        break;
1003                }
1004        }
1005
1006        mutex_unlock(&klp_mutex);
1007
1008        return 0;
1009
1010err:
1011        /*
1012         * If a patch is unsuccessfully applied, return
1013         * error to the module loader.
1014         */
1015        pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
1016                patch->mod->name, obj->mod->name, obj->mod->name);
1017        mod->klp_alive = false;
1018        klp_cleanup_module_patches_limited(mod, patch);
1019        mutex_unlock(&klp_mutex);
1020
1021        return ret;
1022}
1023
1024void klp_module_going(struct module *mod)
1025{
1026        if (WARN_ON(mod->state != MODULE_STATE_GOING &&
1027                    mod->state != MODULE_STATE_COMING))
1028                return;
1029
1030        mutex_lock(&klp_mutex);
1031        /*
1032         * Each module has to know that klp_module_going()
1033         * has been called. We never know what module will
1034         * get patched by a new patch.
1035         */
1036        mod->klp_alive = false;
1037
1038        klp_cleanup_module_patches_limited(mod, NULL);
1039
1040        mutex_unlock(&klp_mutex);
1041}
1042
1043static int __init klp_init(void)
1044{
1045        int ret;
1046
1047        ret = klp_check_compiler_support();
1048        if (ret) {
1049                pr_info("Your compiler is too old; turning off.\n");
1050                return -EINVAL;
1051        }
1052
1053        klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
1054        if (!klp_root_kobj)
1055                return -ENOMEM;
1056
1057        return 0;
1058}
1059
1060module_init(klp_init);
1061