xref: /OK3568_Linux_fs/kernel/kernel/livepatch/patch.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * patch.c - livepatch patching functions
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
6*4882a593Smuzhiyun  * Copyright (C) 2014 SUSE
7*4882a593Smuzhiyun  * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/livepatch.h>
13*4882a593Smuzhiyun #include <linux/list.h>
14*4882a593Smuzhiyun #include <linux/ftrace.h>
15*4882a593Smuzhiyun #include <linux/rculist.h>
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun #include <linux/bug.h>
18*4882a593Smuzhiyun #include <linux/printk.h>
19*4882a593Smuzhiyun #include "core.h"
20*4882a593Smuzhiyun #include "patch.h"
21*4882a593Smuzhiyun #include "transition.h"
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun static LIST_HEAD(klp_ops);
24*4882a593Smuzhiyun 
klp_find_ops(void * old_func)25*4882a593Smuzhiyun struct klp_ops *klp_find_ops(void *old_func)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun 	struct klp_ops *ops;
28*4882a593Smuzhiyun 	struct klp_func *func;
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun 	list_for_each_entry(ops, &klp_ops, node) {
31*4882a593Smuzhiyun 		func = list_first_entry(&ops->func_stack, struct klp_func,
32*4882a593Smuzhiyun 					stack_node);
33*4882a593Smuzhiyun 		if (func->old_func == old_func)
34*4882a593Smuzhiyun 			return ops;
35*4882a593Smuzhiyun 	}
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	return NULL;
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun 
klp_ftrace_handler(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * fops,struct pt_regs * regs)40*4882a593Smuzhiyun static void notrace klp_ftrace_handler(unsigned long ip,
41*4882a593Smuzhiyun 				       unsigned long parent_ip,
42*4882a593Smuzhiyun 				       struct ftrace_ops *fops,
43*4882a593Smuzhiyun 				       struct pt_regs *regs)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun 	struct klp_ops *ops;
46*4882a593Smuzhiyun 	struct klp_func *func;
47*4882a593Smuzhiyun 	int patch_state;
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	ops = container_of(fops, struct klp_ops, fops);
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	/*
52*4882a593Smuzhiyun 	 * A variant of synchronize_rcu() is used to allow patching functions
53*4882a593Smuzhiyun 	 * where RCU is not watching, see klp_synchronize_transition().
54*4882a593Smuzhiyun 	 */
55*4882a593Smuzhiyun 	preempt_disable_notrace();
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
58*4882a593Smuzhiyun 				      stack_node);
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	/*
61*4882a593Smuzhiyun 	 * func should never be NULL because preemption should be disabled here
62*4882a593Smuzhiyun 	 * and unregister_ftrace_function() does the equivalent of a
63*4882a593Smuzhiyun 	 * synchronize_rcu() before the func_stack removal.
64*4882a593Smuzhiyun 	 */
65*4882a593Smuzhiyun 	if (WARN_ON_ONCE(!func))
66*4882a593Smuzhiyun 		goto unlock;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	/*
69*4882a593Smuzhiyun 	 * In the enable path, enforce the order of the ops->func_stack and
70*4882a593Smuzhiyun 	 * func->transition reads.  The corresponding write barrier is in
71*4882a593Smuzhiyun 	 * __klp_enable_patch().
72*4882a593Smuzhiyun 	 *
73*4882a593Smuzhiyun 	 * (Note that this barrier technically isn't needed in the disable
74*4882a593Smuzhiyun 	 * path.  In the rare case where klp_update_patch_state() runs before
75*4882a593Smuzhiyun 	 * this handler, its TIF_PATCH_PENDING read and this func->transition
76*4882a593Smuzhiyun 	 * read need to be ordered.  But klp_update_patch_state() already
77*4882a593Smuzhiyun 	 * enforces that.)
78*4882a593Smuzhiyun 	 */
79*4882a593Smuzhiyun 	smp_rmb();
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	if (unlikely(func->transition)) {
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 		/*
84*4882a593Smuzhiyun 		 * Enforce the order of the func->transition and
85*4882a593Smuzhiyun 		 * current->patch_state reads.  Otherwise we could read an
86*4882a593Smuzhiyun 		 * out-of-date task state and pick the wrong function.  The
87*4882a593Smuzhiyun 		 * corresponding write barrier is in klp_init_transition().
88*4882a593Smuzhiyun 		 */
89*4882a593Smuzhiyun 		smp_rmb();
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 		patch_state = current->patch_state;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 		WARN_ON_ONCE(patch_state == KLP_UNDEFINED);
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 		if (patch_state == KLP_UNPATCHED) {
96*4882a593Smuzhiyun 			/*
97*4882a593Smuzhiyun 			 * Use the previously patched version of the function.
98*4882a593Smuzhiyun 			 * If no previous patches exist, continue with the
99*4882a593Smuzhiyun 			 * original function.
100*4882a593Smuzhiyun 			 */
101*4882a593Smuzhiyun 			func = list_entry_rcu(func->stack_node.next,
102*4882a593Smuzhiyun 					      struct klp_func, stack_node);
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 			if (&func->stack_node == &ops->func_stack)
105*4882a593Smuzhiyun 				goto unlock;
106*4882a593Smuzhiyun 		}
107*4882a593Smuzhiyun 	}
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	/*
110*4882a593Smuzhiyun 	 * NOPs are used to replace existing patches with original code.
111*4882a593Smuzhiyun 	 * Do nothing! Setting pc would cause an infinite loop.
112*4882a593Smuzhiyun 	 */
113*4882a593Smuzhiyun 	if (func->nop)
114*4882a593Smuzhiyun 		goto unlock;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	klp_arch_set_pc(regs, (unsigned long)func->new_func);
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun unlock:
119*4882a593Smuzhiyun 	preempt_enable_notrace();
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun /*
123*4882a593Smuzhiyun  * Convert a function address into the appropriate ftrace location.
124*4882a593Smuzhiyun  *
125*4882a593Smuzhiyun  * Usually this is just the address of the function, but on some architectures
126*4882a593Smuzhiyun  * it's more complicated so allow them to provide a custom behaviour.
127*4882a593Smuzhiyun  */
128*4882a593Smuzhiyun #ifndef klp_get_ftrace_location
klp_get_ftrace_location(unsigned long faddr)129*4882a593Smuzhiyun static unsigned long klp_get_ftrace_location(unsigned long faddr)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun 	return faddr;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun #endif
134*4882a593Smuzhiyun 
klp_unpatch_func(struct klp_func * func)135*4882a593Smuzhiyun static void klp_unpatch_func(struct klp_func *func)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun 	struct klp_ops *ops;
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	if (WARN_ON(!func->patched))
140*4882a593Smuzhiyun 		return;
141*4882a593Smuzhiyun 	if (WARN_ON(!func->old_func))
142*4882a593Smuzhiyun 		return;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	ops = klp_find_ops(func->old_func);
145*4882a593Smuzhiyun 	if (WARN_ON(!ops))
146*4882a593Smuzhiyun 		return;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	if (list_is_singular(&ops->func_stack)) {
149*4882a593Smuzhiyun 		unsigned long ftrace_loc;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 		ftrace_loc =
152*4882a593Smuzhiyun 			klp_get_ftrace_location((unsigned long)func->old_func);
153*4882a593Smuzhiyun 		if (WARN_ON(!ftrace_loc))
154*4882a593Smuzhiyun 			return;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 		WARN_ON(unregister_ftrace_function(&ops->fops));
157*4882a593Smuzhiyun 		WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 		list_del_rcu(&func->stack_node);
160*4882a593Smuzhiyun 		list_del(&ops->node);
161*4882a593Smuzhiyun 		kfree(ops);
162*4882a593Smuzhiyun 	} else {
163*4882a593Smuzhiyun 		list_del_rcu(&func->stack_node);
164*4882a593Smuzhiyun 	}
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	func->patched = false;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun 
klp_patch_func(struct klp_func * func)169*4882a593Smuzhiyun static int klp_patch_func(struct klp_func *func)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun 	struct klp_ops *ops;
172*4882a593Smuzhiyun 	int ret;
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	if (WARN_ON(!func->old_func))
175*4882a593Smuzhiyun 		return -EINVAL;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	if (WARN_ON(func->patched))
178*4882a593Smuzhiyun 		return -EINVAL;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	ops = klp_find_ops(func->old_func);
181*4882a593Smuzhiyun 	if (!ops) {
182*4882a593Smuzhiyun 		unsigned long ftrace_loc;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 		ftrace_loc =
185*4882a593Smuzhiyun 			klp_get_ftrace_location((unsigned long)func->old_func);
186*4882a593Smuzhiyun 		if (!ftrace_loc) {
187*4882a593Smuzhiyun 			pr_err("failed to find location for function '%s'\n",
188*4882a593Smuzhiyun 				func->old_name);
189*4882a593Smuzhiyun 			return -EINVAL;
190*4882a593Smuzhiyun 		}
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 		ops = kzalloc(sizeof(*ops), GFP_KERNEL);
193*4882a593Smuzhiyun 		if (!ops)
194*4882a593Smuzhiyun 			return -ENOMEM;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 		ops->fops.func = klp_ftrace_handler;
197*4882a593Smuzhiyun 		ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
198*4882a593Smuzhiyun 				  FTRACE_OPS_FL_DYNAMIC |
199*4882a593Smuzhiyun 				  FTRACE_OPS_FL_IPMODIFY |
200*4882a593Smuzhiyun 				  FTRACE_OPS_FL_PERMANENT;
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 		list_add(&ops->node, &klp_ops);
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 		INIT_LIST_HEAD(&ops->func_stack);
205*4882a593Smuzhiyun 		list_add_rcu(&func->stack_node, &ops->func_stack);
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 		ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
208*4882a593Smuzhiyun 		if (ret) {
209*4882a593Smuzhiyun 			pr_err("failed to set ftrace filter for function '%s' (%d)\n",
210*4882a593Smuzhiyun 			       func->old_name, ret);
211*4882a593Smuzhiyun 			goto err;
212*4882a593Smuzhiyun 		}
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 		ret = register_ftrace_function(&ops->fops);
215*4882a593Smuzhiyun 		if (ret) {
216*4882a593Smuzhiyun 			pr_err("failed to register ftrace handler for function '%s' (%d)\n",
217*4882a593Smuzhiyun 			       func->old_name, ret);
218*4882a593Smuzhiyun 			ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
219*4882a593Smuzhiyun 			goto err;
220*4882a593Smuzhiyun 		}
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	} else {
224*4882a593Smuzhiyun 		list_add_rcu(&func->stack_node, &ops->func_stack);
225*4882a593Smuzhiyun 	}
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	func->patched = true;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	return 0;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun err:
232*4882a593Smuzhiyun 	list_del_rcu(&func->stack_node);
233*4882a593Smuzhiyun 	list_del(&ops->node);
234*4882a593Smuzhiyun 	kfree(ops);
235*4882a593Smuzhiyun 	return ret;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun 
__klp_unpatch_object(struct klp_object * obj,bool nops_only)238*4882a593Smuzhiyun static void __klp_unpatch_object(struct klp_object *obj, bool nops_only)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun 	struct klp_func *func;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	klp_for_each_func(obj, func) {
243*4882a593Smuzhiyun 		if (nops_only && !func->nop)
244*4882a593Smuzhiyun 			continue;
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 		if (func->patched)
247*4882a593Smuzhiyun 			klp_unpatch_func(func);
248*4882a593Smuzhiyun 	}
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	if (obj->dynamic || !nops_only)
251*4882a593Smuzhiyun 		obj->patched = false;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 
klp_unpatch_object(struct klp_object * obj)255*4882a593Smuzhiyun void klp_unpatch_object(struct klp_object *obj)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun 	__klp_unpatch_object(obj, false);
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun 
klp_patch_object(struct klp_object * obj)260*4882a593Smuzhiyun int klp_patch_object(struct klp_object *obj)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun 	struct klp_func *func;
263*4882a593Smuzhiyun 	int ret;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	if (WARN_ON(obj->patched))
266*4882a593Smuzhiyun 		return -EINVAL;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	klp_for_each_func(obj, func) {
269*4882a593Smuzhiyun 		ret = klp_patch_func(func);
270*4882a593Smuzhiyun 		if (ret) {
271*4882a593Smuzhiyun 			klp_unpatch_object(obj);
272*4882a593Smuzhiyun 			return ret;
273*4882a593Smuzhiyun 		}
274*4882a593Smuzhiyun 	}
275*4882a593Smuzhiyun 	obj->patched = true;
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	return 0;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun 
__klp_unpatch_objects(struct klp_patch * patch,bool nops_only)280*4882a593Smuzhiyun static void __klp_unpatch_objects(struct klp_patch *patch, bool nops_only)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun 	struct klp_object *obj;
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	klp_for_each_object(patch, obj)
285*4882a593Smuzhiyun 		if (obj->patched)
286*4882a593Smuzhiyun 			__klp_unpatch_object(obj, nops_only);
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun 
klp_unpatch_objects(struct klp_patch * patch)289*4882a593Smuzhiyun void klp_unpatch_objects(struct klp_patch *patch)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun 	__klp_unpatch_objects(patch, false);
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun 
klp_unpatch_objects_dynamic(struct klp_patch * patch)294*4882a593Smuzhiyun void klp_unpatch_objects_dynamic(struct klp_patch *patch)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun 	__klp_unpatch_objects(patch, true);
297*4882a593Smuzhiyun }
298