mm/filemap: inline __wait_on_page_locked_async into caller
The previous patch removed wait_on_page_locked_async(), so inline __wait_on_page_locked_async into __lock_page_async(). Link: https://lkml.kernel.org/r/20210122160140.223228-8-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Kent Overstreet <kent.overstreet@gmail.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Cc: Miaohe Lin <linmiaohe@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
bd8a1f3655
commit
f32b5dd721
53
mm/filemap.c
53
mm/filemap.c
@ -1343,36 +1343,6 @@ int wait_on_page_bit_killable(struct page *page, int bit_nr)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(wait_on_page_bit_killable);
|
EXPORT_SYMBOL(wait_on_page_bit_killable);
|
||||||
|
|
||||||
static int __wait_on_page_locked_async(struct page *page,
|
|
||||||
struct wait_page_queue *wait, bool set)
|
|
||||||
{
|
|
||||||
struct wait_queue_head *q = page_waitqueue(page);
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
wait->page = page;
|
|
||||||
wait->bit_nr = PG_locked;
|
|
||||||
|
|
||||||
spin_lock_irq(&q->lock);
|
|
||||||
__add_wait_queue_entry_tail(q, &wait->wait);
|
|
||||||
SetPageWaiters(page);
|
|
||||||
if (set)
|
|
||||||
ret = !trylock_page(page);
|
|
||||||
else
|
|
||||||
ret = PageLocked(page);
|
|
||||||
/*
|
|
||||||
* If we were successful now, we know we're still on the
|
|
||||||
* waitqueue as we're still under the lock. This means it's
|
|
||||||
* safe to remove and return success, we know the callback
|
|
||||||
* isn't going to trigger.
|
|
||||||
*/
|
|
||||||
if (!ret)
|
|
||||||
__remove_wait_queue(q, &wait->wait);
|
|
||||||
else
|
|
||||||
ret = -EIOCBQUEUED;
|
|
||||||
spin_unlock_irq(&q->lock);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* put_and_wait_on_page_locked - Drop a reference and wait for it to be unlocked
|
* put_and_wait_on_page_locked - Drop a reference and wait for it to be unlocked
|
||||||
* @page: The page to wait for.
|
* @page: The page to wait for.
|
||||||
@ -1548,7 +1518,28 @@ EXPORT_SYMBOL_GPL(__lock_page_killable);
|
|||||||
|
|
||||||
int __lock_page_async(struct page *page, struct wait_page_queue *wait)
|
int __lock_page_async(struct page *page, struct wait_page_queue *wait)
|
||||||
{
|
{
|
||||||
return __wait_on_page_locked_async(page, wait, true);
|
struct wait_queue_head *q = page_waitqueue(page);
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
wait->page = page;
|
||||||
|
wait->bit_nr = PG_locked;
|
||||||
|
|
||||||
|
spin_lock_irq(&q->lock);
|
||||||
|
__add_wait_queue_entry_tail(q, &wait->wait);
|
||||||
|
SetPageWaiters(page);
|
||||||
|
ret = !trylock_page(page);
|
||||||
|
/*
|
||||||
|
* If we were successful now, we know we're still on the
|
||||||
|
* waitqueue as we're still under the lock. This means it's
|
||||||
|
* safe to remove and return success, we know the callback
|
||||||
|
* isn't going to trigger.
|
||||||
|
*/
|
||||||
|
if (!ret)
|
||||||
|
__remove_wait_queue(q, &wait->wait);
|
||||||
|
else
|
||||||
|
ret = -EIOCBQUEUED;
|
||||||
|
spin_unlock_irq(&q->lock);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
Loading…
x
Reference in New Issue
Block a user