1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * livepatch.h - Kernel Live Patching Core
4 *
5 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
6 * Copyright (C) 2014 SUSE
7 */
8
9 #ifndef _LINUX_LIVEPATCH_H_
10 #define _LINUX_LIVEPATCH_H_
11
12 #include <linux/module.h>
13 #include <linux/ftrace.h>
14 #include <linux/completion.h>
15 #include <linux/list.h>
16
17 #if IS_ENABLED(CONFIG_LIVEPATCH)
18
19 /* task patch states */
20 #define KLP_UNDEFINED -1
21 #define KLP_UNPATCHED 0
22 #define KLP_PATCHED 1
23
24 /**
25 * struct klp_func - function structure for live patching
26 * @old_name: name of the function to be patched
27 * @new_func: pointer to the patched function code
28 * @old_sympos: a hint indicating which symbol position the old function
29 * can be found (optional)
30 * @old_func: pointer to the function being patched
31 * @kobj: kobject for sysfs resources
32 * @node: list node for klp_object func_list
33 * @stack_node: list node for klp_ops func_stack list
34 * @old_size: size of the old function
35 * @new_size: size of the new function
36 * @nop: temporary patch to use the original code again; dyn. allocated
37 * @patched: the func has been added to the klp_ops list
38 * @transition: the func is currently being applied or reverted
39 *
40 * The patched and transition variables define the func's patching state. When
41 * patching, a func is always in one of the following states:
42 *
43 * patched=0 transition=0: unpatched
44 * patched=0 transition=1: unpatched, temporary starting state
45 * patched=1 transition=1: patched, may be visible to some tasks
46 * patched=1 transition=0: patched, visible to all tasks
47 *
48 * And when unpatching, it goes in the reverse order:
49 *
50 * patched=1 transition=0: patched, visible to all tasks
51 * patched=1 transition=1: patched, may be visible to some tasks
52 * patched=0 transition=1: unpatched, temporary ending state
53 * patched=0 transition=0: unpatched
54 */
55 struct klp_func {
56 /* external */
57 const char *old_name;
58 void *new_func;
59 /*
60 * The old_sympos field is optional and can be used to resolve
61 * duplicate symbol names in livepatch objects. If this field is zero,
62 * it is expected the symbol is unique, otherwise patching fails. If
63 * this value is greater than zero then that occurrence of the symbol
64 * in kallsyms for the given object is used.
65 */
66 unsigned long old_sympos;
67
68 /* internal */
69 void *old_func;
70 struct kobject kobj;
71 struct list_head node;
72 struct list_head stack_node;
73 unsigned long old_size, new_size;
74 bool nop;
75 bool patched;
76 bool transition;
77 };
78
79 struct klp_object;
80
81 /**
82 * struct klp_callbacks - pre/post live-(un)patch callback structure
83 * @pre_patch: executed before code patching
84 * @post_patch: executed after code patching
85 * @pre_unpatch: executed before code unpatching
86 * @post_unpatch: executed after code unpatching
87 * @post_unpatch_enabled: flag indicating if post-unpatch callback
88 * should run
89 *
90 * All callbacks are optional. Only the pre-patch callback, if provided,
91 * will be unconditionally executed. If the parent klp_object fails to
92 * patch for any reason, including a non-zero error status returned from
93 * the pre-patch callback, no further callbacks will be executed.
94 */
95 struct klp_callbacks {
96 int (*pre_patch)(struct klp_object *obj);
97 void (*post_patch)(struct klp_object *obj);
98 void (*pre_unpatch)(struct klp_object *obj);
99 void (*post_unpatch)(struct klp_object *obj);
100 bool post_unpatch_enabled;
101 };
102
103 /**
104 * struct klp_object - kernel object structure for live patching
105 * @name: module name (or NULL for vmlinux)
106 * @funcs: function entries for functions to be patched in the object
107 * @callbacks: functions to be executed pre/post (un)patching
108 * @kobj: kobject for sysfs resources
109 * @func_list: dynamic list of the function entries
110 * @node: list node for klp_patch obj_list
111 * @mod: kernel module associated with the patched object
112 * (NULL for vmlinux)
113 * @dynamic: temporary object for nop functions; dynamically allocated
114 * @patched: the object's funcs have been added to the klp_ops list
115 */
116 struct klp_object {
117 /* external */
118 const char *name;
119 struct klp_func *funcs;
120 struct klp_callbacks callbacks;
121
122 /* internal */
123 struct kobject kobj;
124 struct list_head func_list;
125 struct list_head node;
126 struct module *mod;
127 bool dynamic;
128 bool patched;
129 };
130
131 /**
132 * struct klp_state - state of the system modified by the livepatch
133 * @id: system state identifier (non-zero)
134 * @version: version of the change
135 * @data: custom data
136 */
137 struct klp_state {
138 unsigned long id;
139 unsigned int version;
140 void *data;
141 };
142
143 /**
144 * struct klp_patch - patch structure for live patching
145 * @mod: reference to the live patch module
146 * @objs: object entries for kernel objects to be patched
147 * @states: system states that can get modified
148 * @replace: replace all actively used patches
149 * @list: list node for global list of actively used patches
150 * @kobj: kobject for sysfs resources
151 * @obj_list: dynamic list of the object entries
152 * @enabled: the patch is enabled (but operation may be incomplete)
153 * @forced: was involved in a forced transition
154 * @free_work: patch cleanup from workqueue-context
155 * @finish: for waiting till it is safe to remove the patch module
156 */
157 struct klp_patch {
158 /* external */
159 struct module *mod;
160 struct klp_object *objs;
161 struct klp_state *states;
162 bool replace;
163
164 /* internal */
165 struct list_head list;
166 struct kobject kobj;
167 struct list_head obj_list;
168 bool enabled;
169 bool forced;
170 struct work_struct free_work;
171 struct completion finish;
172 };
173
174 #define klp_for_each_object_static(patch, obj) \
175 for (obj = patch->objs; obj->funcs || obj->name; obj++)
176
177 #define klp_for_each_object_safe(patch, obj, tmp_obj) \
178 list_for_each_entry_safe(obj, tmp_obj, &patch->obj_list, node)
179
180 #define klp_for_each_object(patch, obj) \
181 list_for_each_entry(obj, &patch->obj_list, node)
182
183 #define klp_for_each_func_static(obj, func) \
184 for (func = obj->funcs; \
185 func->old_name || func->new_func || func->old_sympos; \
186 func++)
187
188 #define klp_for_each_func_safe(obj, func, tmp_func) \
189 list_for_each_entry_safe(func, tmp_func, &obj->func_list, node)
190
191 #define klp_for_each_func(obj, func) \
192 list_for_each_entry(func, &obj->func_list, node)
193
194 int klp_enable_patch(struct klp_patch *);
195
196 /* Called from the module loader during module coming/going states */
197 int klp_module_coming(struct module *mod);
198 void klp_module_going(struct module *mod);
199
200 void klp_copy_process(struct task_struct *child);
201 void klp_update_patch_state(struct task_struct *task);
202
klp_patch_pending(struct task_struct * task)203 static inline bool klp_patch_pending(struct task_struct *task)
204 {
205 return test_tsk_thread_flag(task, TIF_PATCH_PENDING);
206 }
207
klp_have_reliable_stack(void)208 static inline bool klp_have_reliable_stack(void)
209 {
210 return IS_ENABLED(CONFIG_STACKTRACE) &&
211 IS_ENABLED(CONFIG_HAVE_RELIABLE_STACKTRACE);
212 }
213
214 typedef int (*klp_shadow_ctor_t)(void *obj,
215 void *shadow_data,
216 void *ctor_data);
217 typedef void (*klp_shadow_dtor_t)(void *obj, void *shadow_data);
218
219 void *klp_shadow_get(void *obj, unsigned long id);
220 void *klp_shadow_alloc(void *obj, unsigned long id,
221 size_t size, gfp_t gfp_flags,
222 klp_shadow_ctor_t ctor, void *ctor_data);
223 void *klp_shadow_get_or_alloc(void *obj, unsigned long id,
224 size_t size, gfp_t gfp_flags,
225 klp_shadow_ctor_t ctor, void *ctor_data);
226 void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor);
227 void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor);
228
229 struct klp_state *klp_get_state(struct klp_patch *patch, unsigned long id);
230 struct klp_state *klp_get_prev_state(unsigned long id);
231
232 int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
233 const char *shstrtab, const char *strtab,
234 unsigned int symindex, unsigned int secindex,
235 const char *objname);
236
237 #else /* !CONFIG_LIVEPATCH */
238
klp_module_coming(struct module * mod)239 static inline int klp_module_coming(struct module *mod) { return 0; }
klp_module_going(struct module * mod)240 static inline void klp_module_going(struct module *mod) {}
klp_patch_pending(struct task_struct * task)241 static inline bool klp_patch_pending(struct task_struct *task) { return false; }
klp_update_patch_state(struct task_struct * task)242 static inline void klp_update_patch_state(struct task_struct *task) {}
klp_copy_process(struct task_struct * child)243 static inline void klp_copy_process(struct task_struct *child) {}
244
245 static inline
klp_apply_section_relocs(struct module * pmod,Elf_Shdr * sechdrs,const char * shstrtab,const char * strtab,unsigned int symindex,unsigned int secindex,const char * objname)246 int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
247 const char *shstrtab, const char *strtab,
248 unsigned int symindex, unsigned int secindex,
249 const char *objname)
250 {
251 return 0;
252 }
253
254 #endif /* CONFIG_LIVEPATCH */
255
256 #endif /* _LINUX_LIVEPATCH_H_ */
257