Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Cross-merge networking fixes after downstream PR.

No conflicts.

Adjacent changes:

drivers/net/ethernet/sfc/tc.c
  622ab65634 ("sfc: fix error unwinds in TC offload")
  b6583d5e9e ("sfc: support TC decap rules matching on enc_src_port")

net/mptcp/protocol.c
  5b825727d0 ("mptcp: add annotations around msk->subflow accesses")
  e76c8ef5cc ("mptcp: refactor mptcp_stream_accept()")

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski
2023-06-01 15:33:03 -07:00
429 changed files with 2479 additions and 1380 deletions

View File

@@ -411,7 +411,10 @@ static void coredump_task_exit(struct task_struct *tsk)
tsk->flags |= PF_POSTCOREDUMP;
core_state = tsk->signal->core_state;
spin_unlock_irq(&tsk->sighand->siglock);
if (core_state) {
/* The vhost_worker does not particpate in coredumps */
if (core_state &&
((tsk->flags & (PF_IO_WORKER | PF_USER_WORKER)) != PF_USER_WORKER)) {
struct core_thread self;
self.task = current;

View File

@@ -2336,16 +2336,16 @@ __latent_entropy struct task_struct *copy_process(
p->flags &= ~PF_KTHREAD;
if (args->kthread)
p->flags |= PF_KTHREAD;
if (args->user_worker)
p->flags |= PF_USER_WORKER;
if (args->io_thread) {
if (args->user_worker) {
/*
* Mark us an IO worker, and block any signal that isn't
* Mark us a user worker, and block any signal that isn't
* fatal or STOP
*/
p->flags |= PF_IO_WORKER;
p->flags |= PF_USER_WORKER;
siginitsetinv(&p->blocked, sigmask(SIGKILL)|sigmask(SIGSTOP));
}
if (args->io_thread)
p->flags |= PF_IO_WORKER;
if (args->name)
strscpy_pad(p->comm, args->name, sizeof(p->comm));
@@ -2517,9 +2517,6 @@ __latent_entropy struct task_struct *copy_process(
if (retval)
goto bad_fork_cleanup_io;
if (args->ignore_signals)
ignore_signals(p);
stackleak_task_init(p);
if (pid != &init_struct_pid) {

View File

@@ -542,7 +542,7 @@ fail:
return ret;
}
#ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
#if defined(CONFIG_PCI_MSI_ARCH_FALLBACKS) || defined(CONFIG_PCI_XEN)
/**
* msi_device_populate_sysfs - Populate msi_irqs sysfs entries for a device
* @dev: The device (PCI, platform etc) which will get sysfs entries
@@ -574,7 +574,7 @@ void msi_device_destroy_sysfs(struct device *dev)
msi_for_each_desc(desc, dev, MSI_DESC_ALL)
msi_sysfs_remove_desc(dev, desc);
}
#endif /* CONFIG_PCI_MSI_ARCH_FALLBACK */
#endif /* CONFIG_PCI_MSI_ARCH_FALLBACK || CONFIG_PCI_XEN */
#else /* CONFIG_SYSFS */
static inline int msi_sysfs_create_group(struct device *dev) { return 0; }
static inline int msi_sysfs_populate_desc(struct device *dev, struct msi_desc *desc) { return 0; }

View File

@@ -2263,6 +2263,9 @@ static inline bool usage_match(struct lock_list *entry, void *mask)
static inline bool usage_skip(struct lock_list *entry, void *mask)
{
if (entry->class->lock_type == LD_LOCK_NORMAL)
return false;
/*
* Skip local_lock() for irq inversion detection.
*
@@ -2289,14 +2292,16 @@ static inline bool usage_skip(struct lock_list *entry, void *mask)
* As a result, we will skip local_lock(), when we search for irq
* inversion bugs.
*/
if (entry->class->lock_type == LD_LOCK_PERCPU) {
if (DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG))
return false;
if (entry->class->lock_type == LD_LOCK_PERCPU &&
DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG))
return false;
return true;
}
/*
* Skip WAIT_OVERRIDE for irq inversion detection -- it's not actually
* a lock and only used to override the wait_type.
*/
return false;
return true;
}
/*
@@ -4768,7 +4773,8 @@ static int check_wait_context(struct task_struct *curr, struct held_lock *next)
for (; depth < curr->lockdep_depth; depth++) {
struct held_lock *prev = curr->held_locks + depth;
u8 prev_inner = hlock_class(prev)->wait_type_inner;
struct lock_class *class = hlock_class(prev);
u8 prev_inner = class->wait_type_inner;
if (prev_inner) {
/*
@@ -4778,6 +4784,14 @@ static int check_wait_context(struct task_struct *curr, struct held_lock *next)
* Also due to trylocks.
*/
curr_inner = min(curr_inner, prev_inner);
/*
* Allow override for annotations -- this is typically
* only valid/needed for code that only exists when
* CONFIG_PREEMPT_RT=n.
*/
if (unlikely(class->lock_type == LD_LOCK_WAIT_OVERRIDE))
curr_inner = prev_inner;
}
}

View File

@@ -1521,14 +1521,14 @@ static void __layout_sections(struct module *mod, struct load_info *info, bool i
MOD_RODATA,
MOD_RO_AFTER_INIT,
MOD_DATA,
MOD_INVALID, /* This is needed to match the masks array */
MOD_DATA,
};
static const int init_m_to_mem_type[] = {
MOD_INIT_TEXT,
MOD_INIT_RODATA,
MOD_INVALID,
MOD_INIT_DATA,
MOD_INVALID, /* This is needed to match the masks array */
MOD_INIT_DATA,
};
for (m = 0; m < ARRAY_SIZE(masks); ++m) {

View File

@@ -1368,7 +1368,9 @@ int zap_other_threads(struct task_struct *p)
while_each_thread(p, t) {
task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
count++;
/* Don't require de_thread to wait for the vhost_worker */
if ((t->flags & (PF_IO_WORKER | PF_USER_WORKER)) != PF_USER_WORKER)
count++;
/* Don't bother with already dead threads */
if (t->exit_state)
@@ -2861,11 +2863,11 @@ relock:
}
/*
* PF_IO_WORKER threads will catch and exit on fatal signals
* PF_USER_WORKER threads will catch and exit on fatal signals
* themselves. They have cleanup that must be performed, so
* we cannot call do_exit() on their behalf.
*/
if (current->flags & PF_IO_WORKER)
if (current->flags & PF_USER_WORKER)
goto out;
/*

View File

@@ -60,6 +60,7 @@
*/
bool ring_buffer_expanded;
#ifdef CONFIG_FTRACE_STARTUP_TEST
/*
* We need to change this state when a selftest is running.
* A selftest will lurk into the ring-buffer to count the
@@ -75,7 +76,6 @@ static bool __read_mostly tracing_selftest_running;
*/
bool __read_mostly tracing_selftest_disabled;
#ifdef CONFIG_FTRACE_STARTUP_TEST
void __init disable_tracing_selftest(const char *reason)
{
if (!tracing_selftest_disabled) {
@@ -83,6 +83,9 @@ void __init disable_tracing_selftest(const char *reason)
pr_info("Ftrace startup test is disabled due to %s\n", reason);
}
}
#else
#define tracing_selftest_running 0
#define tracing_selftest_disabled 0
#endif
/* Pipe tracepoints to printk */
@@ -1051,7 +1054,10 @@ int __trace_array_puts(struct trace_array *tr, unsigned long ip,
if (!(tr->trace_flags & TRACE_ITER_PRINTK))
return 0;
if (unlikely(tracing_selftest_running || tracing_disabled))
if (unlikely(tracing_selftest_running && tr == &global_trace))
return 0;
if (unlikely(tracing_disabled))
return 0;
alloc = sizeof(*entry) + size + 2; /* possible \n added */
@@ -2041,6 +2047,24 @@ static int run_tracer_selftest(struct tracer *type)
return 0;
}
static int do_run_tracer_selftest(struct tracer *type)
{
int ret;
/*
* Tests can take a long time, especially if they are run one after the
* other, as does happen during bootup when all the tracers are
* registered. This could cause the soft lockup watchdog to trigger.
*/
cond_resched();
tracing_selftest_running = true;
ret = run_tracer_selftest(type);
tracing_selftest_running = false;
return ret;
}
static __init int init_trace_selftests(void)
{
struct trace_selftests *p, *n;
@@ -2092,6 +2116,10 @@ static inline int run_tracer_selftest(struct tracer *type)
{
return 0;
}
static inline int do_run_tracer_selftest(struct tracer *type)
{
return 0;
}
#endif /* CONFIG_FTRACE_STARTUP_TEST */
static void add_tracer_options(struct trace_array *tr, struct tracer *t);
@@ -2127,8 +2155,6 @@ int __init register_tracer(struct tracer *type)
mutex_lock(&trace_types_lock);
tracing_selftest_running = true;
for (t = trace_types; t; t = t->next) {
if (strcmp(type->name, t->name) == 0) {
/* already found */
@@ -2157,7 +2183,7 @@ int __init register_tracer(struct tracer *type)
/* store the tracer for __set_tracer_option */
type->flags->trace = type;
ret = run_tracer_selftest(type);
ret = do_run_tracer_selftest(type);
if (ret < 0)
goto out;
@@ -2166,7 +2192,6 @@ int __init register_tracer(struct tracer *type)
add_tracer_options(&global_trace, type);
out:
tracing_selftest_running = false;
mutex_unlock(&trace_types_lock);
if (ret || !default_bootup_tracer)
@@ -3490,7 +3515,7 @@ __trace_array_vprintk(struct trace_buffer *buffer,
unsigned int trace_ctx;
char *tbuffer;
if (tracing_disabled || tracing_selftest_running)
if (tracing_disabled)
return 0;
/* Don't pollute graph traces with trace_vprintk internals */
@@ -3538,6 +3563,9 @@ __printf(3, 0)
int trace_array_vprintk(struct trace_array *tr,
unsigned long ip, const char *fmt, va_list args)
{
if (tracing_selftest_running && tr == &global_trace)
return 0;
return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
}
@@ -5752,7 +5780,7 @@ static const char readme_msg[] =
"\t table using the key(s) and value(s) named, and the value of a\n"
"\t sum called 'hitcount' is incremented. Keys and values\n"
"\t correspond to fields in the event's format description. Keys\n"
"\t can be any field, or the special string 'stacktrace'.\n"
"\t can be any field, or the special string 'common_stacktrace'.\n"
"\t Compound keys consisting of up to two fields can be specified\n"
"\t by the 'keys' keyword. Values must correspond to numeric\n"
"\t fields. Sort keys consisting of up to two fields can be\n"

View File

@@ -194,6 +194,8 @@ static int trace_define_generic_fields(void)
__generic_field(int, common_cpu, FILTER_CPU);
__generic_field(char *, COMM, FILTER_COMM);
__generic_field(char *, comm, FILTER_COMM);
__generic_field(char *, stacktrace, FILTER_STACKTRACE);
__generic_field(char *, STACKTRACE, FILTER_STACKTRACE);
return ret;
}

View File

@@ -1364,7 +1364,7 @@ static const char *hist_field_name(struct hist_field *field,
if (field->field)
field_name = field->field->name;
else
field_name = "stacktrace";
field_name = "common_stacktrace";
} else if (field->flags & HIST_FIELD_FL_HITCOUNT)
field_name = "hitcount";
@@ -2367,7 +2367,7 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
hist_data->enable_timestamps = true;
if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS)
hist_data->attrs->ts_in_usecs = true;
} else if (strcmp(field_name, "stacktrace") == 0) {
} else if (strcmp(field_name, "common_stacktrace") == 0) {
*flags |= HIST_FIELD_FL_STACKTRACE;
} else if (strcmp(field_name, "common_cpu") == 0)
*flags |= HIST_FIELD_FL_CPU;
@@ -2378,11 +2378,15 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
if (!field || !field->size) {
/*
* For backward compatibility, if field_name
* was "cpu", then we treat this the same as
* common_cpu. This also works for "CPU".
* was "cpu" or "stacktrace", then we treat this
* the same as common_cpu and common_stacktrace
* respectively. This also works for "CPU", and
* "STACKTRACE".
*/
if (field && field->filter_type == FILTER_CPU) {
*flags |= HIST_FIELD_FL_CPU;
} else if (field && field->filter_type == FILTER_STACKTRACE) {
*flags |= HIST_FIELD_FL_STACKTRACE;
} else {
hist_err(tr, HIST_ERR_FIELD_NOT_FOUND,
errpos(field_name));
@@ -4238,13 +4242,19 @@ static int __create_val_field(struct hist_trigger_data *hist_data,
goto out;
}
/* Some types cannot be a value */
if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT |
HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2 |
HIST_FIELD_FL_SYM | HIST_FIELD_FL_SYM_OFFSET |
HIST_FIELD_FL_SYSCALL | HIST_FIELD_FL_STACKTRACE)) {
hist_err(file->tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(field_str));
ret = -EINVAL;
/* values and variables should not have some modifiers */
if (hist_field->flags & HIST_FIELD_FL_VAR) {
/* Variable */
if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT |
HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2))
goto err;
} else {
/* Value */
if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT |
HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2 |
HIST_FIELD_FL_SYM | HIST_FIELD_FL_SYM_OFFSET |
HIST_FIELD_FL_SYSCALL | HIST_FIELD_FL_STACKTRACE))
goto err;
}
hist_data->fields[val_idx] = hist_field;
@@ -4256,6 +4266,9 @@ static int __create_val_field(struct hist_trigger_data *hist_data,
ret = -EINVAL;
out:
return ret;
err:
hist_err(file->tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(field_str));
return -EINVAL;
}
static int create_val_field(struct hist_trigger_data *hist_data,
@@ -5385,7 +5398,7 @@ static void hist_trigger_print_key(struct seq_file *m,
if (key_field->field)
seq_printf(m, "%s.stacktrace", key_field->field->name);
else
seq_puts(m, "stacktrace:\n");
seq_puts(m, "common_stacktrace:\n");
hist_trigger_stacktrace_print(m,
key + key_field->offset,
HIST_STACKTRACE_DEPTH);
@@ -5968,7 +5981,7 @@ static int event_hist_trigger_print(struct seq_file *m,
if (field->field)
seq_printf(m, "%s.stacktrace", field->field->name);
else
seq_puts(m, "stacktrace");
seq_puts(m, "common_stacktrace");
} else
hist_field_print(m, field);
}

View File

@@ -96,12 +96,12 @@ struct user_event {
* these to track enablement sites that are tied to an event.
*/
struct user_event_enabler {
struct list_head link;
struct list_head mm_enablers_link;
struct user_event *event;
unsigned long addr;
/* Track enable bit, flags, etc. Aligned for bitops. */
unsigned int values;
unsigned long values;
};
/* Bits 0-5 are for the bit to update upon enable/disable (0-63 allowed) */
@@ -116,7 +116,9 @@ struct user_event_enabler {
/* Only duplicate the bit value */
#define ENABLE_VAL_DUP_MASK ENABLE_VAL_BIT_MASK
#define ENABLE_BITOPS(e) ((unsigned long *)&(e)->values)
#define ENABLE_BITOPS(e) (&(e)->values)
#define ENABLE_BIT(e) ((int)((e)->values & ENABLE_VAL_BIT_MASK))
/* Used for asynchronous faulting in of pages */
struct user_event_enabler_fault {
@@ -153,7 +155,7 @@ struct user_event_file_info {
#define VALIDATOR_REL (1 << 1)
struct user_event_validator {
struct list_head link;
struct list_head user_event_link;
int offset;
int flags;
};
@@ -259,7 +261,7 @@ error:
static void user_event_enabler_destroy(struct user_event_enabler *enabler)
{
list_del_rcu(&enabler->link);
list_del_rcu(&enabler->mm_enablers_link);
/* No longer tracking the event via the enabler */
refcount_dec(&enabler->event->refcnt);
@@ -423,9 +425,9 @@ static int user_event_enabler_write(struct user_event_mm *mm,
/* Update bit atomically, user tracers must be atomic as well */
if (enabler->event && enabler->event->status)
set_bit(enabler->values & ENABLE_VAL_BIT_MASK, ptr);
set_bit(ENABLE_BIT(enabler), ptr);
else
clear_bit(enabler->values & ENABLE_VAL_BIT_MASK, ptr);
clear_bit(ENABLE_BIT(enabler), ptr);
kunmap_local(kaddr);
unpin_user_pages_dirty_lock(&page, 1, true);
@@ -437,11 +439,9 @@ static bool user_event_enabler_exists(struct user_event_mm *mm,
unsigned long uaddr, unsigned char bit)
{
struct user_event_enabler *enabler;
struct user_event_enabler *next;
list_for_each_entry_safe(enabler, next, &mm->enablers, link) {
if (enabler->addr == uaddr &&
(enabler->values & ENABLE_VAL_BIT_MASK) == bit)
list_for_each_entry(enabler, &mm->enablers, mm_enablers_link) {
if (enabler->addr == uaddr && ENABLE_BIT(enabler) == bit)
return true;
}
@@ -451,23 +451,36 @@ static bool user_event_enabler_exists(struct user_event_mm *mm,
static void user_event_enabler_update(struct user_event *user)
{
struct user_event_enabler *enabler;
struct user_event_mm *mm = user_event_mm_get_all(user);
struct user_event_mm *next;
struct user_event_mm *mm;
int attempt;
lockdep_assert_held(&event_mutex);
/*
* We need to build a one-shot list of all the mms that have an
* enabler for the user_event passed in. This list is only valid
* while holding the event_mutex. The only reason for this is due
* to the global mm list being RCU protected and we use methods
* which can wait (mmap_read_lock and pin_user_pages_remote).
*
* NOTE: user_event_mm_get_all() increments the ref count of each
* mm that is added to the list to prevent removal timing windows.
* We must always put each mm after they are used, which may wait.
*/
mm = user_event_mm_get_all(user);
while (mm) {
next = mm->next;
mmap_read_lock(mm->mm);
rcu_read_lock();
list_for_each_entry_rcu(enabler, &mm->enablers, link) {
list_for_each_entry(enabler, &mm->enablers, mm_enablers_link) {
if (enabler->event == user) {
attempt = 0;
user_event_enabler_write(mm, enabler, true, &attempt);
}
}
rcu_read_unlock();
mmap_read_unlock(mm->mm);
user_event_mm_put(mm);
mm = next;
@@ -495,7 +508,9 @@ static bool user_event_enabler_dup(struct user_event_enabler *orig,
enabler->values = orig->values & ENABLE_VAL_DUP_MASK;
refcount_inc(&enabler->event->refcnt);
list_add_rcu(&enabler->link, &mm->enablers);
/* Enablers not exposed yet, RCU not required */
list_add(&enabler->mm_enablers_link, &mm->enablers);
return true;
}
@@ -513,6 +528,14 @@ static struct user_event_mm *user_event_mm_get_all(struct user_event *user)
struct user_event_enabler *enabler;
struct user_event_mm *mm;
/*
* We use the mm->next field to build a one-shot list from the global
* RCU protected list. To build this list the event_mutex must be held.
* This lets us build a list without requiring allocs that could fail
* when user based events are most wanted for diagnostics.
*/
lockdep_assert_held(&event_mutex);
/*
* We do not want to block fork/exec while enablements are being
* updated, so we use RCU to walk the current tasks that have used
@@ -525,23 +548,24 @@ static struct user_event_mm *user_event_mm_get_all(struct user_event *user)
*/
rcu_read_lock();
list_for_each_entry_rcu(mm, &user_event_mms, link)
list_for_each_entry_rcu(enabler, &mm->enablers, link)
list_for_each_entry_rcu(mm, &user_event_mms, mms_link) {
list_for_each_entry_rcu(enabler, &mm->enablers, mm_enablers_link) {
if (enabler->event == user) {
mm->next = found;
found = user_event_mm_get(mm);
break;
}
}
}
rcu_read_unlock();
return found;
}
static struct user_event_mm *user_event_mm_create(struct task_struct *t)
static struct user_event_mm *user_event_mm_alloc(struct task_struct *t)
{
struct user_event_mm *user_mm;
unsigned long flags;
user_mm = kzalloc(sizeof(*user_mm), GFP_KERNEL_ACCOUNT);
@@ -553,12 +577,6 @@ static struct user_event_mm *user_event_mm_create(struct task_struct *t)
refcount_set(&user_mm->refcnt, 1);
refcount_set(&user_mm->tasks, 1);
spin_lock_irqsave(&user_event_mms_lock, flags);
list_add_rcu(&user_mm->link, &user_event_mms);
spin_unlock_irqrestore(&user_event_mms_lock, flags);
t->user_event_mm = user_mm;
/*
* The lifetime of the memory descriptor can slightly outlast
* the task lifetime if a ref to the user_event_mm is taken
@@ -572,6 +590,17 @@ static struct user_event_mm *user_event_mm_create(struct task_struct *t)
return user_mm;
}
static void user_event_mm_attach(struct user_event_mm *user_mm, struct task_struct *t)
{
unsigned long flags;
spin_lock_irqsave(&user_event_mms_lock, flags);
list_add_rcu(&user_mm->mms_link, &user_event_mms);
spin_unlock_irqrestore(&user_event_mms_lock, flags);
t->user_event_mm = user_mm;
}
static struct user_event_mm *current_user_event_mm(void)
{
struct user_event_mm *user_mm = current->user_event_mm;
@@ -579,10 +608,12 @@ static struct user_event_mm *current_user_event_mm(void)
if (user_mm)
goto inc;
user_mm = user_event_mm_create(current);
user_mm = user_event_mm_alloc(current);
if (!user_mm)
goto error;
user_event_mm_attach(user_mm, current);
inc:
refcount_inc(&user_mm->refcnt);
error:
@@ -593,7 +624,7 @@ static void user_event_mm_destroy(struct user_event_mm *mm)
{
struct user_event_enabler *enabler, *next;
list_for_each_entry_safe(enabler, next, &mm->enablers, link)
list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link)
user_event_enabler_destroy(enabler);
mmdrop(mm->mm);
@@ -630,7 +661,7 @@ void user_event_mm_remove(struct task_struct *t)
/* Remove the mm from the list, so it can no longer be enabled */
spin_lock_irqsave(&user_event_mms_lock, flags);
list_del_rcu(&mm->link);
list_del_rcu(&mm->mms_link);
spin_unlock_irqrestore(&user_event_mms_lock, flags);
/*
@@ -670,7 +701,7 @@ void user_event_mm_remove(struct task_struct *t)
void user_event_mm_dup(struct task_struct *t, struct user_event_mm *old_mm)
{
struct user_event_mm *mm = user_event_mm_create(t);
struct user_event_mm *mm = user_event_mm_alloc(t);
struct user_event_enabler *enabler;
if (!mm)
@@ -678,16 +709,18 @@ void user_event_mm_dup(struct task_struct *t, struct user_event_mm *old_mm)
rcu_read_lock();
list_for_each_entry_rcu(enabler, &old_mm->enablers, link)
list_for_each_entry_rcu(enabler, &old_mm->enablers, mm_enablers_link) {
if (!user_event_enabler_dup(enabler, mm))
goto error;
}
rcu_read_unlock();
user_event_mm_attach(mm, t);
return;
error:
rcu_read_unlock();
user_event_mm_remove(t);
user_event_mm_destroy(mm);
}
static bool current_user_event_enabler_exists(unsigned long uaddr,
@@ -748,7 +781,7 @@ retry:
*/
if (!*write_result) {
refcount_inc(&enabler->event->refcnt);
list_add_rcu(&enabler->link, &user_mm->enablers);
list_add_rcu(&enabler->mm_enablers_link, &user_mm->enablers);
}
mutex_unlock(&event_mutex);
@@ -904,8 +937,8 @@ static void user_event_destroy_validators(struct user_event *user)
struct user_event_validator *validator, *next;
struct list_head *head = &user->validators;
list_for_each_entry_safe(validator, next, head, link) {
list_del(&validator->link);
list_for_each_entry_safe(validator, next, head, user_event_link) {
list_del(&validator->user_event_link);
kfree(validator);
}
}
@@ -959,7 +992,7 @@ add_validator:
validator->offset = offset;
/* Want sequential access when validating */
list_add_tail(&validator->link, &user->validators);
list_add_tail(&validator->user_event_link, &user->validators);
add_field:
field->type = type;
@@ -1349,7 +1382,7 @@ static int user_event_validate(struct user_event *user, void *data, int len)
void *pos, *end = data + len;
u32 loc, offset, size;
list_for_each_entry(validator, head, link) {
list_for_each_entry(validator, head, user_event_link) {
pos = data + validator->offset;
/* Already done min_size check, no bounds check here */
@@ -2270,9 +2303,9 @@ static long user_events_ioctl_unreg(unsigned long uarg)
*/
mutex_lock(&event_mutex);
list_for_each_entry_safe(enabler, next, &mm->enablers, link)
list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link) {
if (enabler->addr == reg.disable_addr &&
(enabler->values & ENABLE_VAL_BIT_MASK) == reg.disable_bit) {
ENABLE_BIT(enabler) == reg.disable_bit) {
set_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler));
if (!test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)))
@@ -2281,6 +2314,7 @@ static long user_events_ioctl_unreg(unsigned long uarg)
/* Removed at least one */
ret = 0;
}
}
mutex_unlock(&event_mutex);

View File

@@ -1652,6 +1652,8 @@ static enum hrtimer_restart timerlat_irq(struct hrtimer *timer)
osnoise_stop_tracing();
notify_new_max_latency(diff);
wake_up_process(tlat->kthread);
return HRTIMER_NORESTART;
}
}

View File

@@ -848,6 +848,12 @@ trace_selftest_startup_function_graph(struct tracer *trace,
}
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
/*
* These tests can take some time to run. Make sure on non PREEMPT
* kernels, we do not trigger the softlockup detector.
*/
cond_resched();
tracing_reset_online_cpus(&tr->array_buffer);
set_graph_array(tr);
@@ -869,6 +875,8 @@ trace_selftest_startup_function_graph(struct tracer *trace,
if (ret)
goto out;
cond_resched();
ret = register_ftrace_graph(&fgraph_ops);
if (ret) {
warn_failed_init_tracer(trace, ret);
@@ -891,6 +899,8 @@ trace_selftest_startup_function_graph(struct tracer *trace,
if (ret)
goto out;
cond_resched();
tracing_start();
if (!ret && !count) {

View File

@@ -12,58 +12,88 @@ enum vhost_task_flags {
VHOST_TASK_FLAGS_STOP,
};
struct vhost_task {
bool (*fn)(void *data);
void *data;
struct completion exited;
unsigned long flags;
struct task_struct *task;
};
static int vhost_task_fn(void *data)
{
struct vhost_task *vtsk = data;
int ret;
bool dead = false;
for (;;) {
bool did_work;
/* mb paired w/ vhost_task_stop */
if (test_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags))
break;
if (!dead && signal_pending(current)) {
struct ksignal ksig;
/*
* Calling get_signal will block in SIGSTOP,
* or clear fatal_signal_pending, but remember
* what was set.
*
* This thread won't actually exit until all
* of the file descriptors are closed, and
* the release function is called.
*/
dead = get_signal(&ksig);
if (dead)
clear_thread_flag(TIF_SIGPENDING);
}
did_work = vtsk->fn(vtsk->data);
if (!did_work) {
set_current_state(TASK_INTERRUPTIBLE);
schedule();
}
}
ret = vtsk->fn(vtsk->data);
complete(&vtsk->exited);
do_exit(ret);
do_exit(0);
}
/**
* vhost_task_wake - wakeup the vhost_task
* @vtsk: vhost_task to wake
*
* wake up the vhost_task worker thread
*/
void vhost_task_wake(struct vhost_task *vtsk)
{
wake_up_process(vtsk->task);
}
EXPORT_SYMBOL_GPL(vhost_task_wake);
/**
* vhost_task_stop - stop a vhost_task
* @vtsk: vhost_task to stop
*
* Callers must call vhost_task_should_stop and return from their worker
* function when it returns true;
* vhost_task_fn ensures the worker thread exits after
* VHOST_TASK_FLAGS_SOP becomes true.
*/
void vhost_task_stop(struct vhost_task *vtsk)
{
pid_t pid = vtsk->task->pid;
set_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags);
wake_up_process(vtsk->task);
vhost_task_wake(vtsk);
/*
* Make sure vhost_task_fn is no longer accessing the vhost_task before
* freeing it below. If userspace crashed or exited without closing,
* then the vhost_task->task could already be marked dead so
* kernel_wait will return early.
* freeing it below.
*/
wait_for_completion(&vtsk->exited);
/*
* If we are just closing/removing a device and the parent process is
* not exiting then reap the task.
*/
kernel_wait4(pid, NULL, __WCLONE, NULL);
kfree(vtsk);
}
EXPORT_SYMBOL_GPL(vhost_task_stop);
/**
* vhost_task_should_stop - should the vhost task return from the work function
* @vtsk: vhost_task to stop
*/
bool vhost_task_should_stop(struct vhost_task *vtsk)
{
return test_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags);
}
EXPORT_SYMBOL_GPL(vhost_task_should_stop);
/**
* vhost_task_create - create a copy of a process to be used by the kernel
* @fn: thread stack
* vhost_task_create - create a copy of a task to be used by the kernel
* @fn: vhost worker function
* @arg: data to be passed to fn
* @name: the thread's name
*
@@ -71,17 +101,17 @@ EXPORT_SYMBOL_GPL(vhost_task_should_stop);
* failure. The returned task is inactive, and the caller must fire it up
* through vhost_task_start().
*/
struct vhost_task *vhost_task_create(int (*fn)(void *), void *arg,
struct vhost_task *vhost_task_create(bool (*fn)(void *), void *arg,
const char *name)
{
struct kernel_clone_args args = {
.flags = CLONE_FS | CLONE_UNTRACED | CLONE_VM,
.flags = CLONE_FS | CLONE_UNTRACED | CLONE_VM |
CLONE_THREAD | CLONE_SIGHAND,
.exit_signal = 0,
.fn = vhost_task_fn,
.name = name,
.user_worker = 1,
.no_files = 1,
.ignore_signals = 1,
};
struct vhost_task *vtsk;
struct task_struct *tsk;