Merge branch 'linus' into sched/core, to resolve conflict
Conflicts: arch/sparc/include/asm/topology_64.h Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@ -139,64 +139,42 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* cpumask_set_cpu_local_first - set i'th cpu with local numa cpu's first
|
||||
*
|
||||
* cpumask_local_spread - select the i'th cpu with local numa cpu's first
|
||||
* @i: index number
|
||||
* @numa_node: local numa_node
|
||||
* @dstp: cpumask with the relevant cpu bit set according to the policy
|
||||
* @node: local numa_node
|
||||
*
|
||||
* This function sets the cpumask according to a numa aware policy.
|
||||
* cpumask could be used as an affinity hint for the IRQ related to a
|
||||
* queue. When the policy is to spread queues across cores - local cores
|
||||
* first.
|
||||
* This function selects an online CPU according to a numa aware policy;
|
||||
* local cpus are returned first, followed by non-local ones, then it
|
||||
* wraps around.
|
||||
*
|
||||
* Returns 0 on success, -ENOMEM for no memory, and -EAGAIN when failed to set
|
||||
* the cpu bit and need to re-call the function.
|
||||
* It's not very efficient, but useful for setup.
|
||||
*/
|
||||
int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp)
|
||||
unsigned int cpumask_local_spread(unsigned int i, int node)
|
||||
{
|
||||
cpumask_var_t mask;
|
||||
int cpu;
|
||||
int ret = 0;
|
||||
|
||||
if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
/* Wrap: we always want a cpu. */
|
||||
i %= num_online_cpus();
|
||||
|
||||
if (numa_node == -1 || !cpumask_of_node(numa_node)) {
|
||||
/* Use all online cpu's for non numa aware system */
|
||||
cpumask_copy(mask, cpu_online_mask);
|
||||
if (node == -1) {
|
||||
for_each_cpu(cpu, cpu_online_mask)
|
||||
if (i-- == 0)
|
||||
return cpu;
|
||||
} else {
|
||||
int n;
|
||||
/* NUMA first. */
|
||||
for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
|
||||
if (i-- == 0)
|
||||
return cpu;
|
||||
|
||||
cpumask_and(mask,
|
||||
cpumask_of_node(numa_node), cpu_online_mask);
|
||||
for_each_cpu(cpu, cpu_online_mask) {
|
||||
/* Skip NUMA nodes, done above. */
|
||||
if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
|
||||
continue;
|
||||
|
||||
n = cpumask_weight(mask);
|
||||
if (i >= n) {
|
||||
i -= n;
|
||||
|
||||
/* If index > number of local cpu's, mask out local
|
||||
* cpu's
|
||||
*/
|
||||
cpumask_andnot(mask, cpu_online_mask, mask);
|
||||
if (i-- == 0)
|
||||
return cpu;
|
||||
}
|
||||
}
|
||||
|
||||
for_each_cpu(cpu, mask) {
|
||||
if (--i < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = -EAGAIN;
|
||||
|
||||
out:
|
||||
free_cpumask_var(mask);
|
||||
|
||||
if (!ret)
|
||||
cpumask_set_cpu(cpu, dstp);
|
||||
|
||||
return ret;
|
||||
BUG();
|
||||
}
|
||||
EXPORT_SYMBOL(cpumask_set_cpu_local_first);
|
||||
EXPORT_SYMBOL(cpumask_local_spread);
|
||||
|
@ -197,13 +197,13 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
|
||||
* Compare counter against given value.
|
||||
* Return 1 if greater, 0 if equal and -1 if less
|
||||
*/
|
||||
int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
|
||||
int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
|
||||
{
|
||||
s64 count;
|
||||
|
||||
count = percpu_counter_read(fbc);
|
||||
/* Check to see if rough count will be sufficient for comparison */
|
||||
if (abs(count - rhs) > (percpu_counter_batch*num_online_cpus())) {
|
||||
if (abs(count - rhs) > (batch * num_online_cpus())) {
|
||||
if (count > rhs)
|
||||
return 1;
|
||||
else
|
||||
@ -218,7 +218,7 @@ int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(percpu_counter_compare);
|
||||
EXPORT_SYMBOL(__percpu_counter_compare);
|
||||
|
||||
static int __init percpu_counter_startup(void)
|
||||
{
|
||||
|
@ -14,6 +14,7 @@
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/log2.h>
|
||||
@ -446,6 +447,10 @@ int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
|
||||
if (key && rhashtable_lookup_fast(ht, key, ht->p))
|
||||
goto exit;
|
||||
|
||||
err = -E2BIG;
|
||||
if (unlikely(rht_grow_above_max(ht, tbl)))
|
||||
goto exit;
|
||||
|
||||
err = -EAGAIN;
|
||||
if (rhashtable_check_elasticity(ht, tbl, hash) ||
|
||||
rht_grow_above_100(ht, tbl))
|
||||
@ -738,6 +743,12 @@ int rhashtable_init(struct rhashtable *ht,
|
||||
if (params->max_size)
|
||||
ht->p.max_size = rounddown_pow_of_two(params->max_size);
|
||||
|
||||
if (params->insecure_max_entries)
|
||||
ht->p.insecure_max_entries =
|
||||
rounddown_pow_of_two(params->insecure_max_entries);
|
||||
else
|
||||
ht->p.insecure_max_entries = ht->p.max_size * 2;
|
||||
|
||||
ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
|
||||
|
||||
/* The maximum (not average) chain length grows with the
|
||||
|
Reference in New Issue
Block a user