Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: net/bridge/br_mdb.c br_mdb.c conflict was a function call being removed to fix a bug in 'net' but whose signature was changed in 'net-next'. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@ -527,18 +527,9 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen)
|
||||
goto out_notify;
|
||||
}
|
||||
|
||||
/*
|
||||
* Some architectures have to walk the irq descriptors to
|
||||
* setup the vector space for the cpu which comes online.
|
||||
* Prevent irq alloc/free across the bringup.
|
||||
*/
|
||||
irq_lock_sparse();
|
||||
|
||||
/* Arch-specific enabling code. */
|
||||
ret = __cpu_up(cpu, idle);
|
||||
|
||||
irq_unlock_sparse();
|
||||
|
||||
if (ret != 0)
|
||||
goto out_notify;
|
||||
BUG_ON(!cpu_online(cpu));
|
||||
|
@ -287,6 +287,11 @@ static void set_max_threads(unsigned int max_threads_suggested)
|
||||
max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
|
||||
/* Initialized by the architecture: */
|
||||
int arch_task_struct_size __read_mostly;
|
||||
#endif
|
||||
|
||||
void __init fork_init(void)
|
||||
{
|
||||
#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
|
||||
@ -295,7 +300,7 @@ void __init fork_init(void)
|
||||
#endif
|
||||
/* create a slab on which task_structs can be allocated */
|
||||
task_struct_cachep =
|
||||
kmem_cache_create("task_struct", sizeof(struct task_struct),
|
||||
kmem_cache_create("task_struct", arch_task_struct_size,
|
||||
ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
|
||||
#endif
|
||||
|
||||
|
@ -75,13 +75,21 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq)
|
||||
!desc->irq_data.chip->irq_retrigger(&desc->irq_data)) {
|
||||
#ifdef CONFIG_HARDIRQS_SW_RESEND
|
||||
/*
|
||||
* If the interrupt has a parent irq and runs
|
||||
* in the thread context of the parent irq,
|
||||
* retrigger the parent.
|
||||
* If the interrupt is running in the thread
|
||||
* context of the parent irq we need to be
|
||||
* careful, because we cannot trigger it
|
||||
* directly.
|
||||
*/
|
||||
if (desc->parent_irq &&
|
||||
irq_settings_is_nested_thread(desc))
|
||||
if (irq_settings_is_nested_thread(desc)) {
|
||||
/*
|
||||
* If the parent_irq is valid, we
|
||||
* retrigger the parent, otherwise we
|
||||
* do nothing.
|
||||
*/
|
||||
if (!desc->parent_irq)
|
||||
return;
|
||||
irq = desc->parent_irq;
|
||||
}
|
||||
/* Set it pending and activate the softirq: */
|
||||
set_bit(irq, irqs_resend);
|
||||
tasklet_schedule(&resend_tasklet);
|
||||
|
@ -3683,7 +3683,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
|
||||
cfs_rq->throttled = 1;
|
||||
cfs_rq->throttled_clock = rq_clock(rq);
|
||||
raw_spin_lock(&cfs_b->lock);
|
||||
empty = list_empty(&cfs_rq->throttled_list);
|
||||
empty = list_empty(&cfs_b->throttled_cfs_rq);
|
||||
|
||||
/*
|
||||
* Add to the _head_ of the list, so that an already-started
|
||||
|
@ -839,7 +839,6 @@ out:
|
||||
raw_spin_unlock(&tick_broadcast_lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tick_broadcast_oneshot_control);
|
||||
|
||||
/*
|
||||
* Reset the one shot broadcast for a cpu
|
||||
|
@ -363,6 +363,7 @@ int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
|
||||
|
||||
return __tick_broadcast_oneshot_control(state);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tick_broadcast_oneshot_control);
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
/*
|
||||
|
@ -444,6 +444,7 @@ enum {
|
||||
|
||||
TRACE_CONTROL_BIT,
|
||||
|
||||
TRACE_BRANCH_BIT,
|
||||
/*
|
||||
* Abuse of the trace_recursion.
|
||||
* As we need a way to maintain state if we are tracing the function
|
||||
|
@ -36,9 +36,12 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
|
||||
struct trace_branch *entry;
|
||||
struct ring_buffer *buffer;
|
||||
unsigned long flags;
|
||||
int cpu, pc;
|
||||
int pc;
|
||||
const char *p;
|
||||
|
||||
if (current->trace_recursion & TRACE_BRANCH_BIT)
|
||||
return;
|
||||
|
||||
/*
|
||||
* I would love to save just the ftrace_likely_data pointer, but
|
||||
* this code can also be used by modules. Ugly things can happen
|
||||
@ -49,10 +52,10 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
|
||||
if (unlikely(!tr))
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
cpu = raw_smp_processor_id();
|
||||
data = per_cpu_ptr(tr->trace_buffer.data, cpu);
|
||||
if (atomic_inc_return(&data->disabled) != 1)
|
||||
raw_local_irq_save(flags);
|
||||
current->trace_recursion |= TRACE_BRANCH_BIT;
|
||||
data = this_cpu_ptr(tr->trace_buffer.data);
|
||||
if (atomic_read(&data->disabled))
|
||||
goto out;
|
||||
|
||||
pc = preempt_count();
|
||||
@ -81,8 +84,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
|
||||
__buffer_unlock_commit(buffer, event);
|
||||
|
||||
out:
|
||||
atomic_dec(&data->disabled);
|
||||
local_irq_restore(flags);
|
||||
current->trace_recursion &= ~TRACE_BRANCH_BIT;
|
||||
raw_local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static inline
|
||||
|
Reference in New Issue
Block a user