blob: 2734008140208c94d9060a056a3440c9cb2cbc48 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0-or-later */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * livepatch.h - Kernel Live Patching Core
4 *
5 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
6 * Copyright (C) 2014 SUSE
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007 */
8
9#ifndef _LINUX_LIVEPATCH_H_
10#define _LINUX_LIVEPATCH_H_
11
12#include <linux/module.h>
13#include <linux/ftrace.h>
14#include <linux/completion.h>
David Brazdil0f672f62019-12-10 10:32:29 +000015#include <linux/list.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000016
17#if IS_ENABLED(CONFIG_LIVEPATCH)
18
19#include <asm/livepatch.h>
20
21/* task patch states */
22#define KLP_UNDEFINED -1
23#define KLP_UNPATCHED 0
24#define KLP_PATCHED 1
25
26/**
27 * struct klp_func - function structure for live patching
28 * @old_name: name of the function to be patched
29 * @new_func: pointer to the patched function code
30 * @old_sympos: a hint indicating which symbol position the old function
31 * can be found (optional)
David Brazdil0f672f62019-12-10 10:32:29 +000032 * @old_func: pointer to the function being patched
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000033 * @kobj: kobject for sysfs resources
David Brazdil0f672f62019-12-10 10:32:29 +000034 * @node: list node for klp_object func_list
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000035 * @stack_node: list node for klp_ops func_stack list
36 * @old_size: size of the old function
37 * @new_size: size of the new function
David Brazdil0f672f62019-12-10 10:32:29 +000038 * @nop: temporary patch to use the original code again; dyn. allocated
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000039 * @patched: the func has been added to the klp_ops list
40 * @transition: the func is currently being applied or reverted
41 *
42 * The patched and transition variables define the func's patching state. When
43 * patching, a func is always in one of the following states:
44 *
45 * patched=0 transition=0: unpatched
46 * patched=0 transition=1: unpatched, temporary starting state
47 * patched=1 transition=1: patched, may be visible to some tasks
48 * patched=1 transition=0: patched, visible to all tasks
49 *
50 * And when unpatching, it goes in the reverse order:
51 *
52 * patched=1 transition=0: patched, visible to all tasks
53 * patched=1 transition=1: patched, may be visible to some tasks
54 * patched=0 transition=1: unpatched, temporary ending state
55 * patched=0 transition=0: unpatched
56 */
57struct klp_func {
58 /* external */
59 const char *old_name;
60 void *new_func;
61 /*
62 * The old_sympos field is optional and can be used to resolve
63 * duplicate symbol names in livepatch objects. If this field is zero,
64 * it is expected the symbol is unique, otherwise patching fails. If
65 * this value is greater than zero then that occurrence of the symbol
66 * in kallsyms for the given object is used.
67 */
68 unsigned long old_sympos;
69
70 /* internal */
David Brazdil0f672f62019-12-10 10:32:29 +000071 void *old_func;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000072 struct kobject kobj;
David Brazdil0f672f62019-12-10 10:32:29 +000073 struct list_head node;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000074 struct list_head stack_node;
75 unsigned long old_size, new_size;
David Brazdil0f672f62019-12-10 10:32:29 +000076 bool nop;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000077 bool patched;
78 bool transition;
79};
80
81struct klp_object;
82
83/**
84 * struct klp_callbacks - pre/post live-(un)patch callback structure
85 * @pre_patch: executed before code patching
86 * @post_patch: executed after code patching
87 * @pre_unpatch: executed before code unpatching
88 * @post_unpatch: executed after code unpatching
89 * @post_unpatch_enabled: flag indicating if post-unpatch callback
90 * should run
91 *
92 * All callbacks are optional. Only the pre-patch callback, if provided,
93 * will be unconditionally executed. If the parent klp_object fails to
94 * patch for any reason, including a non-zero error status returned from
95 * the pre-patch callback, no further callbacks will be executed.
96 */
97struct klp_callbacks {
98 int (*pre_patch)(struct klp_object *obj);
99 void (*post_patch)(struct klp_object *obj);
100 void (*pre_unpatch)(struct klp_object *obj);
101 void (*post_unpatch)(struct klp_object *obj);
102 bool post_unpatch_enabled;
103};
104
105/**
106 * struct klp_object - kernel object structure for live patching
107 * @name: module name (or NULL for vmlinux)
108 * @funcs: function entries for functions to be patched in the object
109 * @callbacks: functions to be executed pre/post (un)patching
110 * @kobj: kobject for sysfs resources
David Brazdil0f672f62019-12-10 10:32:29 +0000111 * @func_list: dynamic list of the function entries
112 * @node: list node for klp_patch obj_list
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000113 * @mod: kernel module associated with the patched object
114 * (NULL for vmlinux)
David Brazdil0f672f62019-12-10 10:32:29 +0000115 * @dynamic: temporary object for nop functions; dynamically allocated
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000116 * @patched: the object's funcs have been added to the klp_ops list
117 */
118struct klp_object {
119 /* external */
120 const char *name;
121 struct klp_func *funcs;
122 struct klp_callbacks callbacks;
123
124 /* internal */
125 struct kobject kobj;
David Brazdil0f672f62019-12-10 10:32:29 +0000126 struct list_head func_list;
127 struct list_head node;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000128 struct module *mod;
David Brazdil0f672f62019-12-10 10:32:29 +0000129 bool dynamic;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000130 bool patched;
131};
132
133/**
134 * struct klp_patch - patch structure for live patching
135 * @mod: reference to the live patch module
136 * @objs: object entries for kernel objects to be patched
David Brazdil0f672f62019-12-10 10:32:29 +0000137 * @replace: replace all actively used patches
138 * @list: list node for global list of actively used patches
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000139 * @kobj: kobject for sysfs resources
David Brazdil0f672f62019-12-10 10:32:29 +0000140 * @obj_list: dynamic list of the object entries
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000141 * @enabled: the patch is enabled (but operation may be incomplete)
David Brazdil0f672f62019-12-10 10:32:29 +0000142 * @forced: was involved in a forced transition
143 * @free_work: patch cleanup from workqueue-context
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000144 * @finish: for waiting till it is safe to remove the patch module
145 */
146struct klp_patch {
147 /* external */
148 struct module *mod;
149 struct klp_object *objs;
David Brazdil0f672f62019-12-10 10:32:29 +0000150 bool replace;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000151
152 /* internal */
153 struct list_head list;
154 struct kobject kobj;
David Brazdil0f672f62019-12-10 10:32:29 +0000155 struct list_head obj_list;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000156 bool enabled;
David Brazdil0f672f62019-12-10 10:32:29 +0000157 bool forced;
158 struct work_struct free_work;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000159 struct completion finish;
160};
161
David Brazdil0f672f62019-12-10 10:32:29 +0000162#define klp_for_each_object_static(patch, obj) \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000163 for (obj = patch->objs; obj->funcs || obj->name; obj++)
164
David Brazdil0f672f62019-12-10 10:32:29 +0000165#define klp_for_each_object_safe(patch, obj, tmp_obj) \
166 list_for_each_entry_safe(obj, tmp_obj, &patch->obj_list, node)
167
168#define klp_for_each_object(patch, obj) \
169 list_for_each_entry(obj, &patch->obj_list, node)
170
171#define klp_for_each_func_static(obj, func) \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000172 for (func = obj->funcs; \
173 func->old_name || func->new_func || func->old_sympos; \
174 func++)
175
David Brazdil0f672f62019-12-10 10:32:29 +0000176#define klp_for_each_func_safe(obj, func, tmp_func) \
177 list_for_each_entry_safe(func, tmp_func, &obj->func_list, node)
178
179#define klp_for_each_func(obj, func) \
180 list_for_each_entry(func, &obj->func_list, node)
181
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000182int klp_enable_patch(struct klp_patch *);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000183
184void arch_klp_init_object_loaded(struct klp_patch *patch,
185 struct klp_object *obj);
186
187/* Called from the module loader during module coming/going states */
188int klp_module_coming(struct module *mod);
189void klp_module_going(struct module *mod);
190
191void klp_copy_process(struct task_struct *child);
192void klp_update_patch_state(struct task_struct *task);
193
194static inline bool klp_patch_pending(struct task_struct *task)
195{
196 return test_tsk_thread_flag(task, TIF_PATCH_PENDING);
197}
198
199static inline bool klp_have_reliable_stack(void)
200{
201 return IS_ENABLED(CONFIG_STACKTRACE) &&
202 IS_ENABLED(CONFIG_HAVE_RELIABLE_STACKTRACE);
203}
204
205typedef int (*klp_shadow_ctor_t)(void *obj,
206 void *shadow_data,
207 void *ctor_data);
208typedef void (*klp_shadow_dtor_t)(void *obj, void *shadow_data);
209
210void *klp_shadow_get(void *obj, unsigned long id);
211void *klp_shadow_alloc(void *obj, unsigned long id,
212 size_t size, gfp_t gfp_flags,
213 klp_shadow_ctor_t ctor, void *ctor_data);
214void *klp_shadow_get_or_alloc(void *obj, unsigned long id,
215 size_t size, gfp_t gfp_flags,
216 klp_shadow_ctor_t ctor, void *ctor_data);
217void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor);
218void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor);
219
220#else /* !CONFIG_LIVEPATCH */
221
222static inline int klp_module_coming(struct module *mod) { return 0; }
223static inline void klp_module_going(struct module *mod) {}
224static inline bool klp_patch_pending(struct task_struct *task) { return false; }
225static inline void klp_update_patch_state(struct task_struct *task) {}
226static inline void klp_copy_process(struct task_struct *child) {}
227
228#endif /* CONFIG_LIVEPATCH */
229
230#endif /* _LINUX_LIVEPATCH_H_ */