Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/livepatching/livepatching

Pull livepatching updates from Jiri Kosina:

 - support for something we call 'atomic replace', and allows for much
   better handling of cumulative patches (which is something very useful
   for distros), from Jason Baron with help of Petr Mladek and Joe
   Lawrence

 - improvement of handling of tasks blocking finalization, from Miroslav
   Benes

 - update of MAINTAINERS file to reflect move towards group
   maintainership

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/livepatching/livepatching: (22 commits)
  livepatch/selftests: use "$@" to preserve argument list
  livepatch: Module coming and going callbacks can proceed with all listed patches
  livepatch: Proper error handling in the shadow variables selftest
  livepatch: return -ENOMEM on ptr_id() allocation failure
  livepatch: Introduce klp_for_each_patch macro
  livepatch: core: Return EOPNOTSUPP instead of ENOSYS
  selftests/livepatch: add DYNAMIC_DEBUG config dependency
  livepatch: samples: non static warnings fix
  livepatch: update MAINTAINERS
  livepatch: Remove signal sysfs attribute
  livepatch: Send a fake signal periodically
  selftests/livepatch: introduce tests
  livepatch: Remove ordering (stacking) of the livepatches
  livepatch: Atomic replace and cumulative patches documentation
  livepatch: Remove Nop structures when unused
  livepatch: Add atomic replace
  livepatch: Use lists to manage patches, objects and functions
  livepatch: Simplify API by removing registration step
  livepatch: Don't block the removal of patches loaded after a forced transition
  livepatch: Consolidate klp_free functions
  ...
This commit is contained in:
Linus Torvalds
2019-03-08 08:58:25 -08:00
35 changed files with 2856 additions and 1278 deletions

File diff suppressed because it is too large Load Diff

View File

@ -5,6 +5,17 @@
#include <linux/livepatch.h>
extern struct mutex klp_mutex;
extern struct list_head klp_patches;
#define klp_for_each_patch_safe(patch, tmp_patch) \
list_for_each_entry_safe(patch, tmp_patch, &klp_patches, list)
#define klp_for_each_patch(patch) \
list_for_each_entry(patch, &klp_patches, list)
void klp_free_patch_start(struct klp_patch *patch);
void klp_discard_replaced_patches(struct klp_patch *new_patch);
void klp_discard_nops(struct klp_patch *new_patch);
static inline bool klp_is_object_loaded(struct klp_object *obj)
{

View File

@ -34,7 +34,7 @@
static LIST_HEAD(klp_ops);
struct klp_ops *klp_find_ops(unsigned long old_addr)
struct klp_ops *klp_find_ops(void *old_func)
{
struct klp_ops *ops;
struct klp_func *func;
@ -42,7 +42,7 @@ struct klp_ops *klp_find_ops(unsigned long old_addr)
list_for_each_entry(ops, &klp_ops, node) {
func = list_first_entry(&ops->func_stack, struct klp_func,
stack_node);
if (func->old_addr == old_addr)
if (func->old_func == old_func)
return ops;
}
@ -118,7 +118,15 @@ static void notrace klp_ftrace_handler(unsigned long ip,
}
}
/*
* NOPs are used to replace existing patches with original code.
* Do nothing! Setting pc would cause an infinite loop.
*/
if (func->nop)
goto unlock;
klp_arch_set_pc(regs, (unsigned long)func->new_func);
unlock:
preempt_enable_notrace();
}
@ -142,17 +150,18 @@ static void klp_unpatch_func(struct klp_func *func)
if (WARN_ON(!func->patched))
return;
if (WARN_ON(!func->old_addr))
if (WARN_ON(!func->old_func))
return;
ops = klp_find_ops(func->old_addr);
ops = klp_find_ops(func->old_func);
if (WARN_ON(!ops))
return;
if (list_is_singular(&ops->func_stack)) {
unsigned long ftrace_loc;
ftrace_loc = klp_get_ftrace_location(func->old_addr);
ftrace_loc =
klp_get_ftrace_location((unsigned long)func->old_func);
if (WARN_ON(!ftrace_loc))
return;
@ -174,17 +183,18 @@ static int klp_patch_func(struct klp_func *func)
struct klp_ops *ops;
int ret;
if (WARN_ON(!func->old_addr))
if (WARN_ON(!func->old_func))
return -EINVAL;
if (WARN_ON(func->patched))
return -EINVAL;
ops = klp_find_ops(func->old_addr);
ops = klp_find_ops(func->old_func);
if (!ops) {
unsigned long ftrace_loc;
ftrace_loc = klp_get_ftrace_location(func->old_addr);
ftrace_loc =
klp_get_ftrace_location((unsigned long)func->old_func);
if (!ftrace_loc) {
pr_err("failed to find location for function '%s'\n",
func->old_name);
@ -236,15 +246,26 @@ err:
return ret;
}
void klp_unpatch_object(struct klp_object *obj)
static void __klp_unpatch_object(struct klp_object *obj, bool nops_only)
{
struct klp_func *func;
klp_for_each_func(obj, func)
klp_for_each_func(obj, func) {
if (nops_only && !func->nop)
continue;
if (func->patched)
klp_unpatch_func(func);
}
obj->patched = false;
if (obj->dynamic || !nops_only)
obj->patched = false;
}
void klp_unpatch_object(struct klp_object *obj)
{
__klp_unpatch_object(obj, false);
}
int klp_patch_object(struct klp_object *obj)
@ -267,11 +288,21 @@ int klp_patch_object(struct klp_object *obj)
return 0;
}
void klp_unpatch_objects(struct klp_patch *patch)
static void __klp_unpatch_objects(struct klp_patch *patch, bool nops_only)
{
struct klp_object *obj;
klp_for_each_object(patch, obj)
if (obj->patched)
klp_unpatch_object(obj);
__klp_unpatch_object(obj, nops_only);
}
void klp_unpatch_objects(struct klp_patch *patch)
{
__klp_unpatch_objects(patch, false);
}
void klp_unpatch_objects_dynamic(struct klp_patch *patch)
{
__klp_unpatch_objects(patch, true);
}

View File

@ -10,7 +10,7 @@
* struct klp_ops - structure for tracking registered ftrace ops structs
*
* A single ftrace_ops is shared between all enabled replacement functions
* (klp_func structs) which have the same old_addr. This allows the switch
* (klp_func structs) which have the same old_func. This allows the switch
* between function versions to happen instantaneously by updating the klp_ops
* struct's func_stack list. The winner is the klp_func at the top of the
* func_stack (front of the list).
@ -25,10 +25,11 @@ struct klp_ops {
struct ftrace_ops fops;
};
struct klp_ops *klp_find_ops(unsigned long old_addr);
struct klp_ops *klp_find_ops(void *old_func);
int klp_patch_object(struct klp_object *obj);
void klp_unpatch_object(struct klp_object *obj);
void klp_unpatch_objects(struct klp_patch *patch);
void klp_unpatch_objects_dynamic(struct klp_patch *patch);
#endif /* _LIVEPATCH_PATCH_H */

View File

@ -29,11 +29,13 @@
#define MAX_STACK_ENTRIES 100
#define STACK_ERR_BUF_SIZE 128
#define SIGNALS_TIMEOUT 15
struct klp_patch *klp_transition_patch;
static int klp_target_state = KLP_UNDEFINED;
static bool klp_forced = false;
static unsigned int klp_signals_cnt;
/*
* This work can be performed periodically to finish patching or unpatching any
@ -87,6 +89,11 @@ static void klp_complete_transition(void)
klp_transition_patch->mod->name,
klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
if (klp_transition_patch->replace && klp_target_state == KLP_PATCHED) {
klp_discard_replaced_patches(klp_transition_patch);
klp_discard_nops(klp_transition_patch);
}
if (klp_target_state == KLP_UNPATCHED) {
/*
* All tasks have transitioned to KLP_UNPATCHED so we can now
@ -136,13 +143,6 @@ static void klp_complete_transition(void)
pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
/*
* klp_forced set implies unbounded increase of module's ref count if
* the module is disabled/enabled in a loop.
*/
if (!klp_forced && klp_target_state == KLP_UNPATCHED)
module_put(klp_transition_patch->mod);
klp_target_state = KLP_UNDEFINED;
klp_transition_patch = NULL;
}
@ -224,11 +224,11 @@ static int klp_check_stack_func(struct klp_func *func,
* Check for the to-be-patched function
* (the previous func).
*/
ops = klp_find_ops(func->old_addr);
ops = klp_find_ops(func->old_func);
if (list_is_singular(&ops->func_stack)) {
/* original function */
func_addr = func->old_addr;
func_addr = (unsigned long)func->old_func;
func_size = func->old_size;
} else {
/* previously patched function */
@ -347,6 +347,47 @@ done:
}
/*
* Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set.
* Kthreads with TIF_PATCH_PENDING set are woken up.
*/
static void klp_send_signals(void)
{
struct task_struct *g, *task;
if (klp_signals_cnt == SIGNALS_TIMEOUT)
pr_notice("signaling remaining tasks\n");
read_lock(&tasklist_lock);
for_each_process_thread(g, task) {
if (!klp_patch_pending(task))
continue;
/*
* There is a small race here. We could see TIF_PATCH_PENDING
* set and decide to wake up a kthread or send a fake signal.
* Meanwhile the task could migrate itself and the action
* would be meaningless. It is not serious though.
*/
if (task->flags & PF_KTHREAD) {
/*
* Wake up a kthread which sleeps interruptedly and
* still has not been migrated.
*/
wake_up_state(task, TASK_INTERRUPTIBLE);
} else {
/*
* Send fake signal to all non-kthread tasks which are
* still not migrated.
*/
spin_lock_irq(&task->sighand->siglock);
signal_wake_up(task, 0);
spin_unlock_irq(&task->sighand->siglock);
}
}
read_unlock(&tasklist_lock);
}
/*
* Try to switch all remaining tasks to the target patch state by walking the
* stacks of sleeping tasks and looking for any to-be-patched or
@ -359,6 +400,7 @@ void klp_try_complete_transition(void)
{
unsigned int cpu;
struct task_struct *g, *task;
struct klp_patch *patch;
bool complete = true;
WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
@ -396,6 +438,10 @@ void klp_try_complete_transition(void)
put_online_cpus();
if (!complete) {
if (klp_signals_cnt && !(klp_signals_cnt % SIGNALS_TIMEOUT))
klp_send_signals();
klp_signals_cnt++;
/*
* Some tasks weren't able to be switched over. Try again
* later and/or wait for other methods like kernel exit
@ -407,7 +453,18 @@ void klp_try_complete_transition(void)
}
/* we're done, now cleanup the data structures */
patch = klp_transition_patch;
klp_complete_transition();
/*
* It would make more sense to free the patch in
* klp_complete_transition() but it is called also
* from klp_cancel_transition().
*/
if (!patch->enabled) {
klp_free_patch_start(patch);
schedule_work(&patch->free_work);
}
}
/*
@ -446,6 +503,8 @@ void klp_start_transition(void)
if (task->patch_state != klp_target_state)
set_tsk_thread_flag(task, TIF_PATCH_PENDING);
}
klp_signals_cnt = 0;
}
/*
@ -568,47 +627,6 @@ void klp_copy_process(struct task_struct *child)
/* TIF_PATCH_PENDING gets copied in setup_thread_stack() */
}
/*
* Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set.
* Kthreads with TIF_PATCH_PENDING set are woken up. Only admin can request this
* action currently.
*/
void klp_send_signals(void)
{
struct task_struct *g, *task;
pr_notice("signaling remaining tasks\n");
read_lock(&tasklist_lock);
for_each_process_thread(g, task) {
if (!klp_patch_pending(task))
continue;
/*
* There is a small race here. We could see TIF_PATCH_PENDING
* set and decide to wake up a kthread or send a fake signal.
* Meanwhile the task could migrate itself and the action
* would be meaningless. It is not serious though.
*/
if (task->flags & PF_KTHREAD) {
/*
* Wake up a kthread which sleeps interruptedly and
* still has not been migrated.
*/
wake_up_state(task, TASK_INTERRUPTIBLE);
} else {
/*
* Send fake signal to all non-kthread tasks which are
* still not migrated.
*/
spin_lock_irq(&task->sighand->siglock);
signal_wake_up(task, 0);
spin_unlock_irq(&task->sighand->siglock);
}
}
read_unlock(&tasklist_lock);
}
/*
* Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an
* existing transition to finish.
@ -620,6 +638,7 @@ void klp_send_signals(void)
*/
void klp_force_transition(void)
{
struct klp_patch *patch;
struct task_struct *g, *task;
unsigned int cpu;
@ -633,5 +652,6 @@ void klp_force_transition(void)
for_each_possible_cpu(cpu)
klp_update_patch_state(idle_task(cpu));
klp_forced = true;
klp_for_each_patch(patch)
patch->forced = true;
}

View File

@ -11,7 +11,6 @@ void klp_cancel_transition(void);
void klp_start_transition(void);
void klp_try_complete_transition(void);
void klp_reverse_transition(void);
void klp_send_signals(void);
void klp_force_transition(void);
#endif /* _LIVEPATCH_TRANSITION_H */