linux/include/linux/livepatch.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/*
   3 * livepatch.h - Kernel Live Patching Core
   4 *
   5 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
   6 * Copyright (C) 2014 SUSE
   7 */
   8
   9#ifndef _LINUX_LIVEPATCH_H_
  10#define _LINUX_LIVEPATCH_H_
  11
  12#include <linux/module.h>
  13#include <linux/ftrace.h>
  14#include <linux/completion.h>
  15#include <linux/list.h>
  16
  17#if IS_ENABLED(CONFIG_LIVEPATCH)
  18
  19#include <asm/livepatch.h>
  20
  21/* task patch states */
  22#define KLP_UNDEFINED   -1
  23#define KLP_UNPATCHED    0
  24#define KLP_PATCHED      1
  25
  26/**
  27 * struct klp_func - function structure for live patching
  28 * @old_name:   name of the function to be patched
  29 * @new_func:   pointer to the patched function code
  30 * @old_sympos: a hint indicating which symbol position the old function
  31 *              can be found (optional)
  32 * @old_func:   pointer to the function being patched
  33 * @kobj:       kobject for sysfs resources
  34 * @node:       list node for klp_object func_list
  35 * @stack_node: list node for klp_ops func_stack list
  36 * @old_size:   size of the old function
  37 * @new_size:   size of the new function
  38 * @kobj_added: @kobj has been added and needs freeing
  39 * @nop:        temporary patch to use the original code again; dyn. allocated
  40 * @patched:    the func has been added to the klp_ops list
  41 * @transition: the func is currently being applied or reverted
  42 *
  43 * The patched and transition variables define the func's patching state.  When
  44 * patching, a func is always in one of the following states:
  45 *
  46 *   patched=0 transition=0: unpatched
  47 *   patched=0 transition=1: unpatched, temporary starting state
  48 *   patched=1 transition=1: patched, may be visible to some tasks
  49 *   patched=1 transition=0: patched, visible to all tasks
  50 *
  51 * And when unpatching, it goes in the reverse order:
  52 *
  53 *   patched=1 transition=0: patched, visible to all tasks
  54 *   patched=1 transition=1: patched, may be visible to some tasks
  55 *   patched=0 transition=1: unpatched, temporary ending state
  56 *   patched=0 transition=0: unpatched
  57 */
  58struct klp_func {
  59        /* external */
  60        const char *old_name;
  61        void *new_func;
  62        /*
  63         * The old_sympos field is optional and can be used to resolve
  64         * duplicate symbol names in livepatch objects. If this field is zero,
  65         * it is expected the symbol is unique, otherwise patching fails. If
  66         * this value is greater than zero then that occurrence of the symbol
  67         * in kallsyms for the given object is used.
  68         */
  69        unsigned long old_sympos;
  70
  71        /* internal */
  72        void *old_func;
  73        struct kobject kobj;
  74        struct list_head node;
  75        struct list_head stack_node;
  76        unsigned long old_size, new_size;
  77        bool nop;
  78        bool patched;
  79        bool transition;
  80};
  81
  82struct klp_object;
  83
  84/**
  85 * struct klp_callbacks - pre/post live-(un)patch callback structure
  86 * @pre_patch:          executed before code patching
  87 * @post_patch:         executed after code patching
  88 * @pre_unpatch:        executed before code unpatching
  89 * @post_unpatch:       executed after code unpatching
  90 * @post_unpatch_enabled:       flag indicating if post-unpatch callback
  91 *                              should run
  92 *
  93 * All callbacks are optional.  Only the pre-patch callback, if provided,
  94 * will be unconditionally executed.  If the parent klp_object fails to
  95 * patch for any reason, including a non-zero error status returned from
  96 * the pre-patch callback, no further callbacks will be executed.
  97 */
  98struct klp_callbacks {
  99        int (*pre_patch)(struct klp_object *obj);
 100        void (*post_patch)(struct klp_object *obj);
 101        void (*pre_unpatch)(struct klp_object *obj);
 102        void (*post_unpatch)(struct klp_object *obj);
 103        bool post_unpatch_enabled;
 104};
 105
 106/**
 107 * struct klp_object - kernel object structure for live patching
 108 * @name:       module name (or NULL for vmlinux)
 109 * @funcs:      function entries for functions to be patched in the object
 110 * @callbacks:  functions to be executed pre/post (un)patching
 111 * @kobj:       kobject for sysfs resources
 112 * @func_list:  dynamic list of the function entries
 113 * @node:       list node for klp_patch obj_list
 114 * @mod:        kernel module associated with the patched object
 115 *              (NULL for vmlinux)
 116 * @kobj_added: @kobj has been added and needs freeing
 117 * @dynamic:    temporary object for nop functions; dynamically allocated
 118 * @patched:    the object's funcs have been added to the klp_ops list
 119 */
 120struct klp_object {
 121        /* external */
 122        const char *name;
 123        struct klp_func *funcs;
 124        struct klp_callbacks callbacks;
 125
 126        /* internal */
 127        struct kobject kobj;
 128        struct list_head func_list;
 129        struct list_head node;
 130        struct module *mod;
 131        bool dynamic;
 132        bool patched;
 133};
 134
 135/**
 136 * struct klp_patch - patch structure for live patching
 137 * @mod:        reference to the live patch module
 138 * @objs:       object entries for kernel objects to be patched
 139 * @replace:    replace all actively used patches
 140 * @list:       list node for global list of actively used patches
 141 * @kobj:       kobject for sysfs resources
 142 * @obj_list:   dynamic list of the object entries
 143 * @kobj_added: @kobj has been added and needs freeing
 144 * @enabled:    the patch is enabled (but operation may be incomplete)
 145 * @forced:     was involved in a forced transition
 146 * @free_work:  patch cleanup from workqueue-context
 147 * @finish:     for waiting till it is safe to remove the patch module
 148 */
 149struct klp_patch {
 150        /* external */
 151        struct module *mod;
 152        struct klp_object *objs;
 153        bool replace;
 154
 155        /* internal */
 156        struct list_head list;
 157        struct kobject kobj;
 158        struct list_head obj_list;
 159        bool enabled;
 160        bool forced;
 161        struct work_struct free_work;
 162        struct completion finish;
 163};
 164
 165#define klp_for_each_object_static(patch, obj) \
 166        for (obj = patch->objs; obj->funcs || obj->name; obj++)
 167
 168#define klp_for_each_object_safe(patch, obj, tmp_obj)           \
 169        list_for_each_entry_safe(obj, tmp_obj, &patch->obj_list, node)
 170
 171#define klp_for_each_object(patch, obj) \
 172        list_for_each_entry(obj, &patch->obj_list, node)
 173
 174#define klp_for_each_func_static(obj, func) \
 175        for (func = obj->funcs; \
 176             func->old_name || func->new_func || func->old_sympos; \
 177             func++)
 178
 179#define klp_for_each_func_safe(obj, func, tmp_func)                     \
 180        list_for_each_entry_safe(func, tmp_func, &obj->func_list, node)
 181
 182#define klp_for_each_func(obj, func)    \
 183        list_for_each_entry(func, &obj->func_list, node)
 184
 185int klp_enable_patch(struct klp_patch *);
 186
 187void arch_klp_init_object_loaded(struct klp_patch *patch,
 188                                 struct klp_object *obj);
 189
 190/* Called from the module loader during module coming/going states */
 191int klp_module_coming(struct module *mod);
 192void klp_module_going(struct module *mod);
 193
 194void klp_copy_process(struct task_struct *child);
 195void klp_update_patch_state(struct task_struct *task);
 196
 197static inline bool klp_patch_pending(struct task_struct *task)
 198{
 199        return test_tsk_thread_flag(task, TIF_PATCH_PENDING);
 200}
 201
 202static inline bool klp_have_reliable_stack(void)
 203{
 204        return IS_ENABLED(CONFIG_STACKTRACE) &&
 205               IS_ENABLED(CONFIG_HAVE_RELIABLE_STACKTRACE);
 206}
 207
 208typedef int (*klp_shadow_ctor_t)(void *obj,
 209                                 void *shadow_data,
 210                                 void *ctor_data);
 211typedef void (*klp_shadow_dtor_t)(void *obj, void *shadow_data);
 212
 213void *klp_shadow_get(void *obj, unsigned long id);
 214void *klp_shadow_alloc(void *obj, unsigned long id,
 215                       size_t size, gfp_t gfp_flags,
 216                       klp_shadow_ctor_t ctor, void *ctor_data);
 217void *klp_shadow_get_or_alloc(void *obj, unsigned long id,
 218                              size_t size, gfp_t gfp_flags,
 219                              klp_shadow_ctor_t ctor, void *ctor_data);
 220void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor);
 221void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor);
 222
 223#else /* !CONFIG_LIVEPATCH */
 224
 225static inline int klp_module_coming(struct module *mod) { return 0; }
 226static inline void klp_module_going(struct module *mod) {}
 227static inline bool klp_patch_pending(struct task_struct *task) { return false; }
 228static inline void klp_update_patch_state(struct task_struct *task) {}
 229static inline void klp_copy_process(struct task_struct *child) {}
 230
 231#endif /* CONFIG_LIVEPATCH */
 232
 233#endif /* _LINUX_LIVEPATCH_H_ */
 234