aio: Convert to migrate_folio
Use a folio throughout this function. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
parent
1d5b9bd656
commit
3648951ceb
36
fs/aio.c
36
fs/aio.c
@ -400,8 +400,8 @@ static const struct file_operations aio_ring_fops = {
|
||||
};
|
||||
|
||||
#if IS_ENABLED(CONFIG_MIGRATION)
|
||||
static int aio_migratepage(struct address_space *mapping, struct page *new,
|
||||
struct page *old, enum migrate_mode mode)
|
||||
static int aio_migrate_folio(struct address_space *mapping, struct folio *dst,
|
||||
struct folio *src, enum migrate_mode mode)
|
||||
{
|
||||
struct kioctx *ctx;
|
||||
unsigned long flags;
|
||||
@ -435,10 +435,10 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
|
||||
goto out;
|
||||
}
|
||||
|
||||
idx = old->index;
|
||||
idx = src->index;
|
||||
if (idx < (pgoff_t)ctx->nr_pages) {
|
||||
/* Make sure the old page hasn't already been changed */
|
||||
if (ctx->ring_pages[idx] != old)
|
||||
/* Make sure the old folio hasn't already been changed */
|
||||
if (ctx->ring_pages[idx] != &src->page)
|
||||
rc = -EAGAIN;
|
||||
} else
|
||||
rc = -EINVAL;
|
||||
@ -447,27 +447,27 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
|
||||
goto out_unlock;
|
||||
|
||||
/* Writeback must be complete */
|
||||
BUG_ON(PageWriteback(old));
|
||||
get_page(new);
|
||||
BUG_ON(folio_test_writeback(src));
|
||||
folio_get(dst);
|
||||
|
||||
rc = migrate_page_move_mapping(mapping, new, old, 1);
|
||||
rc = folio_migrate_mapping(mapping, dst, src, 1);
|
||||
if (rc != MIGRATEPAGE_SUCCESS) {
|
||||
put_page(new);
|
||||
folio_put(dst);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* Take completion_lock to prevent other writes to the ring buffer
|
||||
* while the old page is copied to the new. This prevents new
|
||||
* while the old folio is copied to the new. This prevents new
|
||||
* events from being lost.
|
||||
*/
|
||||
spin_lock_irqsave(&ctx->completion_lock, flags);
|
||||
migrate_page_copy(new, old);
|
||||
BUG_ON(ctx->ring_pages[idx] != old);
|
||||
ctx->ring_pages[idx] = new;
|
||||
folio_migrate_copy(dst, src);
|
||||
BUG_ON(ctx->ring_pages[idx] != &src->page);
|
||||
ctx->ring_pages[idx] = &dst->page;
|
||||
spin_unlock_irqrestore(&ctx->completion_lock, flags);
|
||||
|
||||
/* The old page is no longer accessible. */
|
||||
put_page(old);
|
||||
/* The old folio is no longer accessible. */
|
||||
folio_put(src);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&ctx->ring_lock);
|
||||
@ -475,13 +475,13 @@ out:
|
||||
spin_unlock(&mapping->private_lock);
|
||||
return rc;
|
||||
}
|
||||
#else
|
||||
#define aio_migrate_folio NULL
|
||||
#endif
|
||||
|
||||
static const struct address_space_operations aio_ctx_aops = {
|
||||
.dirty_folio = noop_dirty_folio,
|
||||
#if IS_ENABLED(CONFIG_MIGRATION)
|
||||
.migratepage = aio_migratepage,
|
||||
#endif
|
||||
.migrate_folio = aio_migrate_folio,
|
||||
};
|
||||
|
||||
static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
|
||||
|
Loading…
Reference in New Issue
Block a user