diff --git a/Documentation/livepatch/livepatch.txt b/Documentation/livepatch/livepatch.txt index 8f56490a4bb6..2a70f43166f6 100644 --- a/Documentation/livepatch/livepatch.txt +++ b/Documentation/livepatch/livepatch.txt @@ -15,8 +15,9 @@ Table of Contents: 5. Livepatch life-cycle 5.1. Loading 5.2. Enabling - 5.3. Disabling - 5.4. Removing + 5.3. Replacing + 5.4. Disabling + 5.5. Removing 6. Sysfs 7. Limitations @@ -300,8 +301,12 @@ into three levels: 5. Livepatch life-cycle ======================= -Livepatching can be described by four basic operations: -loading, enabling, disabling, removing. +Livepatching can be described by five basic operations: +loading, enabling, replacing, disabling, removing. + +Where the replacing and the disabling operations are mutually +exclusive. They have the same result for the given patch but +not for the system. 5.1. Loading @@ -347,7 +352,21 @@ to '0'. the "Consistency model" section. -5.3. Disabling +5.3. Replacing +-------------- + +All enabled patches might get replaced by a cumulative patch that +has the .replace flag set. + +Once the new patch is enabled and the 'transition' finishes then +all the functions (struct klp_func) associated with the replaced +patches are removed from the corresponding struct klp_ops. Also +the ftrace handler is unregistered and the struct klp_ops is +freed when the related function is not modified by the new patch +and func_stack list becomes empty. + + +5.4. Disabling -------------- Enabled patches might get disabled by writing '0' to @@ -372,7 +391,7 @@ Note that patches must be disabled in exactly the reverse order in which they were enabled. It makes the problem and the implementation much easier. -5.4. Removing +5.5. Removing ------------- Module removal is only safe when there are no users of functions provided diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index e117e20ff771..53551f470722 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -48,6 +48,7 @@ * @old_size: size of the old function * @new_size: size of the new function * @kobj_added: @kobj has been added and needs freeing + * @nop: temporary patch to use the original code again; dyn. allocated * @patched: the func has been added to the klp_ops list * @transition: the func is currently being applied or reverted * @@ -86,6 +87,7 @@ struct klp_func { struct list_head stack_node; unsigned long old_size, new_size; bool kobj_added; + bool nop; bool patched; bool transition; }; @@ -125,6 +127,7 @@ struct klp_callbacks { * @mod: kernel module associated with the patched object * (NULL for vmlinux) * @kobj_added: @kobj has been added and needs freeing + * @dynamic: temporary object for nop functions; dynamically allocated * @patched: the object's funcs have been added to the klp_ops list */ struct klp_object { @@ -139,6 +142,7 @@ struct klp_object { struct list_head node; struct module *mod; bool kobj_added; + bool dynamic; bool patched; }; @@ -146,6 +150,7 @@ struct klp_object { * struct klp_patch - patch structure for live patching * @mod: reference to the live patch module * @objs: object entries for kernel objects to be patched + * @replace: replace all actively used patches * @list: list node for global list of actively used patches * @kobj: kobject for sysfs resources * @obj_list: dynamic list of the object entries @@ -159,6 +164,7 @@ struct klp_patch { /* external */ struct module *mod; struct klp_object *objs; + bool replace; /* internal */ struct list_head list; @@ -174,6 +180,9 @@ struct klp_patch { #define klp_for_each_object_static(patch, obj) \ for (obj = patch->objs; obj->funcs || obj->name; obj++) +#define klp_for_each_object_safe(patch, obj, tmp_obj) \ + list_for_each_entry_safe(obj, tmp_obj, &patch->obj_list, node) + #define klp_for_each_object(patch, obj) \ list_for_each_entry(obj, &patch->obj_list, node) @@ -182,6 +191,9 @@ struct klp_patch { func->old_name || func->new_func || func->old_sympos; \ func++) +#define klp_for_each_func_safe(obj, func, tmp_func) \ + list_for_each_entry_safe(func, tmp_func, &obj->func_list, node) + #define klp_for_each_func(obj, func) \ list_for_each_entry(func, &obj->func_list, node) diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 37d0d3645fa6..ecb7660f1d8b 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -92,6 +92,40 @@ static bool klp_initialized(void) return !!klp_root_kobj; } +static struct klp_func *klp_find_func(struct klp_object *obj, + struct klp_func *old_func) +{ + struct klp_func *func; + + klp_for_each_func(obj, func) { + if ((strcmp(old_func->old_name, func->old_name) == 0) && + (old_func->old_sympos == func->old_sympos)) { + return func; + } + } + + return NULL; +} + +static struct klp_object *klp_find_object(struct klp_patch *patch, + struct klp_object *old_obj) +{ + struct klp_object *obj; + + klp_for_each_object(patch, obj) { + if (klp_is_module(old_obj)) { + if (klp_is_module(obj) && + strcmp(old_obj->name, obj->name) == 0) { + return obj; + } + } else if (!klp_is_module(obj)) { + return obj; + } + } + + return NULL; +} + struct klp_find_arg { const char *objname; const char *name; @@ -418,6 +452,121 @@ static struct attribute *klp_patch_attrs[] = { NULL }; +static void klp_free_object_dynamic(struct klp_object *obj) +{ + kfree(obj->name); + kfree(obj); +} + +static struct klp_object *klp_alloc_object_dynamic(const char *name) +{ + struct klp_object *obj; + + obj = kzalloc(sizeof(*obj), GFP_KERNEL); + if (!obj) + return NULL; + + if (name) { + obj->name = kstrdup(name, GFP_KERNEL); + if (!obj->name) { + kfree(obj); + return NULL; + } + } + + INIT_LIST_HEAD(&obj->func_list); + obj->dynamic = true; + + return obj; +} + +static void klp_free_func_nop(struct klp_func *func) +{ + kfree(func->old_name); + kfree(func); +} + +static struct klp_func *klp_alloc_func_nop(struct klp_func *old_func, + struct klp_object *obj) +{ + struct klp_func *func; + + func = kzalloc(sizeof(*func), GFP_KERNEL); + if (!func) + return NULL; + + if (old_func->old_name) { + func->old_name = kstrdup(old_func->old_name, GFP_KERNEL); + if (!func->old_name) { + kfree(func); + return NULL; + } + } + + /* + * func->new_func is same as func->old_func. These addresses are + * set when the object is loaded, see klp_init_object_loaded(). + */ + func->old_sympos = old_func->old_sympos; + func->nop = true; + + return func; +} + +static int klp_add_object_nops(struct klp_patch *patch, + struct klp_object *old_obj) +{ + struct klp_object *obj; + struct klp_func *func, *old_func; + + obj = klp_find_object(patch, old_obj); + + if (!obj) { + obj = klp_alloc_object_dynamic(old_obj->name); + if (!obj) + return -ENOMEM; + + list_add_tail(&obj->node, &patch->obj_list); + } + + klp_for_each_func(old_obj, old_func) { + func = klp_find_func(obj, old_func); + if (func) + continue; + + func = klp_alloc_func_nop(old_func, obj); + if (!func) + return -ENOMEM; + + list_add_tail(&func->node, &obj->func_list); + } + + return 0; +} + +/* + * Add 'nop' functions which simply return to the caller to run + * the original function. The 'nop' functions are added to a + * patch to facilitate a 'replace' mode. + */ +static int klp_add_nops(struct klp_patch *patch) +{ + struct klp_patch *old_patch; + struct klp_object *old_obj; + + list_for_each_entry(old_patch, &klp_patches, list) { + klp_for_each_object(old_patch, old_obj) { + int err; + + err = klp_add_object_nops(patch, old_obj); + if (err) + return err; + } + } + + return 0; +} + static void klp_kobj_release_patch(struct kobject *kobj) { struct klp_patch *patch; @@ -434,6 +583,12 @@ static struct kobj_type klp_ktype_patch = { static void klp_kobj_release_object(struct kobject *kobj) { + struct klp_object *obj; + + obj = container_of(kobj, struct klp_object, kobj); + + if (obj->dynamic) + klp_free_object_dynamic(obj); } static struct kobj_type klp_ktype_object = { @@ -443,6 +598,12 @@ static struct kobj_type klp_ktype_object = { static void klp_kobj_release_func(struct kobject *kobj) { + struct klp_func *func; + + func = container_of(kobj, struct klp_func, kobj); + + if (func->nop) + klp_free_func_nop(func); } static struct kobj_type klp_ktype_func = { @@ -452,12 +613,15 @@ static struct kobj_type klp_ktype_func = { static void klp_free_funcs(struct klp_object *obj) { - struct klp_func *func; + struct klp_func *func, *tmp_func; - klp_for_each_func(obj, func) { + klp_for_each_func_safe(obj, func, tmp_func) { /* Might be called from klp_init_patch() error path. */ - if (func->kobj_added) + if (func->kobj_added) { kobject_put(&func->kobj); + } else if (func->nop) { + klp_free_func_nop(func); + } } } @@ -468,20 +632,27 @@ static void klp_free_object_loaded(struct klp_object *obj) obj->mod = NULL; - klp_for_each_func(obj, func) + klp_for_each_func(obj, func) { func->old_func = NULL; + + if (func->nop) + func->new_func = NULL; + } } static void klp_free_objects(struct klp_patch *patch) { - struct klp_object *obj; + struct klp_object *obj, *tmp_obj; - klp_for_each_object(patch, obj) { + klp_for_each_object_safe(patch, obj, tmp_obj) { klp_free_funcs(obj); /* Might be called from klp_init_patch() error path. */ - if (obj->kobj_added) + if (obj->kobj_added) { kobject_put(&obj->kobj); + } else if (obj->dynamic) { + klp_free_object_dynamic(obj); + } } } @@ -543,7 +714,14 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func) { int ret; - if (!func->old_name || !func->new_func) + if (!func->old_name) + return -EINVAL; + + /* + * NOPs get the address later. The patched module must be loaded, + * see klp_init_object_loaded(). + */ + if (!func->new_func && !func->nop) return -EINVAL; if (strlen(func->old_name) >= KSYM_NAME_LEN) @@ -605,6 +783,9 @@ static int klp_init_object_loaded(struct klp_patch *patch, return -ENOENT; } + if (func->nop) + func->new_func = func->old_func; + ret = kallsyms_lookup_size_offset((unsigned long)func->new_func, &func->new_size, NULL); if (!ret) { @@ -697,6 +878,12 @@ static int klp_init_patch(struct klp_patch *patch) return ret; patch->kobj_added = true; + if (patch->replace) { + ret = klp_add_nops(patch); + if (ret) + return ret; + } + klp_for_each_object(patch, obj) { ret = klp_init_object(patch, obj); if (ret) @@ -868,6 +1055,35 @@ err: } EXPORT_SYMBOL_GPL(klp_enable_patch); +/* + * This function removes replaced patches. + * + * We could be pretty aggressive here. It is called in the situation where + * these structures are no longer accessible. All functions are redirected + * by the klp_transition_patch. They use either a new code or they are in + * the original code because of the special nop function patches. + * + * The only exception is when the transition was forced. In this case, + * klp_ftrace_handler() might still see the replaced patch on the stack. + * Fortunately, it is carefully designed to work with removed functions + * thanks to RCU. We only have to keep the patches on the system. Also + * this is handled transparently by patch->module_put. + */ +void klp_discard_replaced_patches(struct klp_patch *new_patch) +{ + struct klp_patch *old_patch, *tmp_patch; + + list_for_each_entry_safe(old_patch, tmp_patch, &klp_patches, list) { + if (old_patch == new_patch) + return; + + old_patch->enabled = false; + klp_unpatch_objects(old_patch); + klp_free_patch_start(old_patch); + schedule_work(&old_patch->free_work); + } +} + /* * Remove parts of patches that touch a given kernel module. The list of * patches processed might be limited. When limit is NULL, all patches diff --git a/kernel/livepatch/core.h b/kernel/livepatch/core.h index d4eefc520c08..f6a853adcc00 100644 --- a/kernel/livepatch/core.h +++ b/kernel/livepatch/core.h @@ -8,6 +8,7 @@ extern struct mutex klp_mutex; extern struct list_head klp_patches; void klp_free_patch_start(struct klp_patch *patch); +void klp_discard_replaced_patches(struct klp_patch *new_patch); static inline bool klp_is_object_loaded(struct klp_object *obj) { diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c index 825022d70912..0ff466ab4b5a 100644 --- a/kernel/livepatch/patch.c +++ b/kernel/livepatch/patch.c @@ -118,7 +118,15 @@ static void notrace klp_ftrace_handler(unsigned long ip, } } + /* + * NOPs are used to replace existing patches with original code. + * Do nothing! Setting pc would cause an infinite loop. + */ + if (func->nop) + goto unlock; + klp_arch_set_pc(regs, (unsigned long)func->new_func); + unlock: preempt_enable_notrace(); } diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c index c9917a24b3a4..f4c5908a9731 100644 --- a/kernel/livepatch/transition.c +++ b/kernel/livepatch/transition.c @@ -85,6 +85,9 @@ static void klp_complete_transition(void) klp_transition_patch->mod->name, klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); + if (klp_transition_patch->replace && klp_target_state == KLP_PATCHED) + klp_discard_replaced_patches(klp_transition_patch); + if (klp_target_state == KLP_UNPATCHED) { /* * All tasks have transitioned to KLP_UNPATCHED so we can now