mm/vmalloc: check free space in vmap_block lockless
vb_alloc() unconditionally locks a vmap_block on the free list to check the free space. This can be done locklessly because vmap_block::free never increases, it's only decreased on allocations. Check the free space lockless and only if that succeeds, recheck under the lock. Link: https://lkml.kernel.org/r/20230525124504.750481992@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Uladzislau Rezki (Sony) <urezki@gmail.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Lorenzo Stoakes <lstoakes@gmail.com> Reviewed-by: Baoquan He <bhe@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
a09fad96ff
commit
43d7650234
@ -2168,6 +2168,9 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
|
|||||||
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
|
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
|
||||||
unsigned long pages_off;
|
unsigned long pages_off;
|
||||||
|
|
||||||
|
if (READ_ONCE(vb->free) < (1UL << order))
|
||||||
|
continue;
|
||||||
|
|
||||||
spin_lock(&vb->lock);
|
spin_lock(&vb->lock);
|
||||||
if (vb->free < (1UL << order)) {
|
if (vb->free < (1UL << order)) {
|
||||||
spin_unlock(&vb->lock);
|
spin_unlock(&vb->lock);
|
||||||
@ -2176,7 +2179,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
|
|||||||
|
|
||||||
pages_off = VMAP_BBMAP_BITS - vb->free;
|
pages_off = VMAP_BBMAP_BITS - vb->free;
|
||||||
vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
|
vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
|
||||||
vb->free -= 1UL << order;
|
WRITE_ONCE(vb->free, vb->free - (1UL << order));
|
||||||
bitmap_set(vb->used_map, pages_off, (1UL << order));
|
bitmap_set(vb->used_map, pages_off, (1UL << order));
|
||||||
if (vb->free == 0) {
|
if (vb->free == 0) {
|
||||||
spin_lock(&vbq->lock);
|
spin_lock(&vbq->lock);
|
||||||
|
Loading…
Reference in New Issue
Block a user