• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /* SPDX-License-Identifier: GPL-2.0-or-later */
2  /*
3   * livepatch.h - Kernel Live Patching Core
4   *
5   * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
6   * Copyright (C) 2014 SUSE
7   */
8  
9  #ifndef _LINUX_LIVEPATCH_H_
10  #define _LINUX_LIVEPATCH_H_
11  
12  #include <linux/module.h>
13  #include <linux/ftrace.h>
14  #include <linux/completion.h>
15  #include <linux/list.h>
16  
17  #if IS_ENABLED(CONFIG_LIVEPATCH)
18  
19  #include <asm/livepatch.h>
20  
21  /* task patch states */
22  #define KLP_UNDEFINED	-1
23  #define KLP_UNPATCHED	 0
24  #define KLP_PATCHED	 1
25  
26  /**
27   * struct klp_func - function structure for live patching
28   * @old_name:	name of the function to be patched
29   * @new_func:	pointer to the patched function code
30   * @old_sympos: a hint indicating which symbol position the old function
31   *		can be found (optional)
32   * @old_func:	pointer to the function being patched
33   * @kobj:	kobject for sysfs resources
34   * @node:	list node for klp_object func_list
35   * @stack_node:	list node for klp_ops func_stack list
36   * @old_size:	size of the old function
37   * @new_size:	size of the new function
38   * @nop:        temporary patch to use the original code again; dyn. allocated
39   * @patched:	the func has been added to the klp_ops list
40   * @transition:	the func is currently being applied or reverted
41   *
42   * The patched and transition variables define the func's patching state.  When
43   * patching, a func is always in one of the following states:
44   *
45   *   patched=0 transition=0: unpatched
46   *   patched=0 transition=1: unpatched, temporary starting state
47   *   patched=1 transition=1: patched, may be visible to some tasks
48   *   patched=1 transition=0: patched, visible to all tasks
49   *
50   * And when unpatching, it goes in the reverse order:
51   *
52   *   patched=1 transition=0: patched, visible to all tasks
53   *   patched=1 transition=1: patched, may be visible to some tasks
54   *   patched=0 transition=1: unpatched, temporary ending state
55   *   patched=0 transition=0: unpatched
56   */
57  struct klp_func {
58  	/* external */
59  	const char *old_name;
60  	void *new_func;
61  	/*
62  	 * The old_sympos field is optional and can be used to resolve
63  	 * duplicate symbol names in livepatch objects. If this field is zero,
64  	 * it is expected the symbol is unique, otherwise patching fails. If
65  	 * this value is greater than zero then that occurrence of the symbol
66  	 * in kallsyms for the given object is used.
67  	 */
68  	unsigned long old_sympos;
69  
70  	/* internal */
71  	void *old_func;
72  	struct kobject kobj;
73  	struct list_head node;
74  	struct list_head stack_node;
75  	unsigned long old_size, new_size;
76  	bool nop;
77  	bool patched;
78  	bool transition;
79  };
80  
81  struct klp_object;
82  
83  /**
84   * struct klp_callbacks - pre/post live-(un)patch callback structure
85   * @pre_patch:		executed before code patching
86   * @post_patch:		executed after code patching
87   * @pre_unpatch:	executed before code unpatching
88   * @post_unpatch:	executed after code unpatching
89   * @post_unpatch_enabled:	flag indicating if post-unpatch callback
90   * 				should run
91   *
92   * All callbacks are optional.  Only the pre-patch callback, if provided,
93   * will be unconditionally executed.  If the parent klp_object fails to
94   * patch for any reason, including a non-zero error status returned from
95   * the pre-patch callback, no further callbacks will be executed.
96   */
97  struct klp_callbacks {
98  	int (*pre_patch)(struct klp_object *obj);
99  	void (*post_patch)(struct klp_object *obj);
100  	void (*pre_unpatch)(struct klp_object *obj);
101  	void (*post_unpatch)(struct klp_object *obj);
102  	bool post_unpatch_enabled;
103  };
104  
105  /**
106   * struct klp_object - kernel object structure for live patching
107   * @name:	module name (or NULL for vmlinux)
108   * @funcs:	function entries for functions to be patched in the object
109   * @callbacks:	functions to be executed pre/post (un)patching
110   * @kobj:	kobject for sysfs resources
111   * @func_list:	dynamic list of the function entries
112   * @node:	list node for klp_patch obj_list
113   * @mod:	kernel module associated with the patched object
114   *		(NULL for vmlinux)
115   * @dynamic:    temporary object for nop functions; dynamically allocated
116   * @patched:	the object's funcs have been added to the klp_ops list
117   */
118  struct klp_object {
119  	/* external */
120  	const char *name;
121  	struct klp_func *funcs;
122  	struct klp_callbacks callbacks;
123  
124  	/* internal */
125  	struct kobject kobj;
126  	struct list_head func_list;
127  	struct list_head node;
128  	struct module *mod;
129  	bool dynamic;
130  	bool patched;
131  };
132  
133  /**
134   * struct klp_patch - patch structure for live patching
135   * @mod:	reference to the live patch module
136   * @objs:	object entries for kernel objects to be patched
137   * @replace:	replace all actively used patches
138   * @list:	list node for global list of actively used patches
139   * @kobj:	kobject for sysfs resources
140   * @obj_list:	dynamic list of the object entries
141   * @enabled:	the patch is enabled (but operation may be incomplete)
142   * @forced:	was involved in a forced transition
143   * @free_work:	patch cleanup from workqueue-context
144   * @finish:	for waiting till it is safe to remove the patch module
145   */
146  struct klp_patch {
147  	/* external */
148  	struct module *mod;
149  	struct klp_object *objs;
150  	bool replace;
151  
152  	/* internal */
153  	struct list_head list;
154  	struct kobject kobj;
155  	struct list_head obj_list;
156  	bool enabled;
157  	bool forced;
158  	struct work_struct free_work;
159  	struct completion finish;
160  };
161  
162  #define klp_for_each_object_static(patch, obj) \
163  	for (obj = patch->objs; obj->funcs || obj->name; obj++)
164  
165  #define klp_for_each_object_safe(patch, obj, tmp_obj)		\
166  	list_for_each_entry_safe(obj, tmp_obj, &patch->obj_list, node)
167  
168  #define klp_for_each_object(patch, obj)	\
169  	list_for_each_entry(obj, &patch->obj_list, node)
170  
171  #define klp_for_each_func_static(obj, func) \
172  	for (func = obj->funcs; \
173  	     func->old_name || func->new_func || func->old_sympos; \
174  	     func++)
175  
176  #define klp_for_each_func_safe(obj, func, tmp_func)			\
177  	list_for_each_entry_safe(func, tmp_func, &obj->func_list, node)
178  
179  #define klp_for_each_func(obj, func)	\
180  	list_for_each_entry(func, &obj->func_list, node)
181  
182  int klp_enable_patch(struct klp_patch *);
183  
184  void arch_klp_init_object_loaded(struct klp_patch *patch,
185  				 struct klp_object *obj);
186  
187  /* Called from the module loader during module coming/going states */
188  int klp_module_coming(struct module *mod);
189  void klp_module_going(struct module *mod);
190  
191  void klp_copy_process(struct task_struct *child);
192  void klp_update_patch_state(struct task_struct *task);
193  
klp_patch_pending(struct task_struct * task)194  static inline bool klp_patch_pending(struct task_struct *task)
195  {
196  	return test_tsk_thread_flag(task, TIF_PATCH_PENDING);
197  }
198  
klp_have_reliable_stack(void)199  static inline bool klp_have_reliable_stack(void)
200  {
201  	return IS_ENABLED(CONFIG_STACKTRACE) &&
202  	       IS_ENABLED(CONFIG_HAVE_RELIABLE_STACKTRACE);
203  }
204  
205  typedef int (*klp_shadow_ctor_t)(void *obj,
206  				 void *shadow_data,
207  				 void *ctor_data);
208  typedef void (*klp_shadow_dtor_t)(void *obj, void *shadow_data);
209  
210  void *klp_shadow_get(void *obj, unsigned long id);
211  void *klp_shadow_alloc(void *obj, unsigned long id,
212  		       size_t size, gfp_t gfp_flags,
213  		       klp_shadow_ctor_t ctor, void *ctor_data);
214  void *klp_shadow_get_or_alloc(void *obj, unsigned long id,
215  			      size_t size, gfp_t gfp_flags,
216  			      klp_shadow_ctor_t ctor, void *ctor_data);
217  void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor);
218  void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor);
219  
220  #else /* !CONFIG_LIVEPATCH */
221  
klp_module_coming(struct module * mod)222  static inline int klp_module_coming(struct module *mod) { return 0; }
klp_module_going(struct module * mod)223  static inline void klp_module_going(struct module *mod) {}
klp_patch_pending(struct task_struct * task)224  static inline bool klp_patch_pending(struct task_struct *task) { return false; }
klp_update_patch_state(struct task_struct * task)225  static inline void klp_update_patch_state(struct task_struct *task) {}
klp_copy_process(struct task_struct * child)226  static inline void klp_copy_process(struct task_struct *child) {}
227  
228  #endif /* CONFIG_LIVEPATCH */
229  
230  #endif /* _LINUX_LIVEPATCH_H_ */
231