sbitmap: remove swap_lock
map->swap_lock protects map->cleared from concurrent modification, however sbitmap_deferred_clear() is already atomically drains it, so it's guaranteed to not loose bits on concurrent sbitmap_deferred_clear(). A one threaded tag heavy test on top of nullbk showed ~1.5% t-put increase, and 3% -> 1% cycle reduction of sbitmap_get() according to perf. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
b78beea038
commit
661d4f55a7
@ -32,11 +32,6 @@ struct sbitmap_word {
|
|||||||
* @cleared: word holding cleared bits
|
* @cleared: word holding cleared bits
|
||||||
*/
|
*/
|
||||||
unsigned long cleared ____cacheline_aligned_in_smp;
|
unsigned long cleared ____cacheline_aligned_in_smp;
|
||||||
|
|
||||||
/**
|
|
||||||
* @swap_lock: Held while swapping word <-> cleared
|
|
||||||
*/
|
|
||||||
spinlock_t swap_lock;
|
|
||||||
} ____cacheline_aligned_in_smp;
|
} ____cacheline_aligned_in_smp;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -15,13 +15,9 @@
|
|||||||
static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
|
static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
|
||||||
{
|
{
|
||||||
unsigned long mask, val;
|
unsigned long mask, val;
|
||||||
bool ret = false;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&map->swap_lock, flags);
|
if (!READ_ONCE(map->cleared))
|
||||||
|
return false;
|
||||||
if (!map->cleared)
|
|
||||||
goto out_unlock;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* First get a stable cleared mask, setting the old mask to 0.
|
* First get a stable cleared mask, setting the old mask to 0.
|
||||||
@ -35,10 +31,7 @@ static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
|
|||||||
val = map->word;
|
val = map->word;
|
||||||
} while (cmpxchg(&map->word, val, val & ~mask) != val);
|
} while (cmpxchg(&map->word, val, val & ~mask) != val);
|
||||||
|
|
||||||
ret = true;
|
return true;
|
||||||
out_unlock:
|
|
||||||
spin_unlock_irqrestore(&map->swap_lock, flags);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
|
int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
|
||||||
@ -80,7 +73,6 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
|
|||||||
for (i = 0; i < sb->map_nr; i++) {
|
for (i = 0; i < sb->map_nr; i++) {
|
||||||
sb->map[i].depth = min(depth, bits_per_word);
|
sb->map[i].depth = min(depth, bits_per_word);
|
||||||
depth -= sb->map[i].depth;
|
depth -= sb->map[i].depth;
|
||||||
spin_lock_init(&sb->map[i].swap_lock);
|
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user