aio: Convert to migrate_folio

Use a folio throughout this function.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
Matthew Wilcox (Oracle) 2022-06-06 10:47:21 -04:00
parent 1d5b9bd656
commit 3648951ceb

View File

@ -400,8 +400,8 @@ static const struct file_operations aio_ring_fops = {
}; };
#if IS_ENABLED(CONFIG_MIGRATION) #if IS_ENABLED(CONFIG_MIGRATION)
static int aio_migratepage(struct address_space *mapping, struct page *new, static int aio_migrate_folio(struct address_space *mapping, struct folio *dst,
struct page *old, enum migrate_mode mode) struct folio *src, enum migrate_mode mode)
{ {
struct kioctx *ctx; struct kioctx *ctx;
unsigned long flags; unsigned long flags;
@ -435,10 +435,10 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
goto out; goto out;
} }
idx = old->index; idx = src->index;
if (idx < (pgoff_t)ctx->nr_pages) { if (idx < (pgoff_t)ctx->nr_pages) {
/* Make sure the old page hasn't already been changed */ /* Make sure the old folio hasn't already been changed */
if (ctx->ring_pages[idx] != old) if (ctx->ring_pages[idx] != &src->page)
rc = -EAGAIN; rc = -EAGAIN;
} else } else
rc = -EINVAL; rc = -EINVAL;
@ -447,27 +447,27 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
goto out_unlock; goto out_unlock;
/* Writeback must be complete */ /* Writeback must be complete */
BUG_ON(PageWriteback(old)); BUG_ON(folio_test_writeback(src));
get_page(new); folio_get(dst);
rc = migrate_page_move_mapping(mapping, new, old, 1); rc = folio_migrate_mapping(mapping, dst, src, 1);
if (rc != MIGRATEPAGE_SUCCESS) { if (rc != MIGRATEPAGE_SUCCESS) {
put_page(new); folio_put(dst);
goto out_unlock; goto out_unlock;
} }
/* Take completion_lock to prevent other writes to the ring buffer /* Take completion_lock to prevent other writes to the ring buffer
* while the old page is copied to the new. This prevents new * while the old folio is copied to the new. This prevents new
* events from being lost. * events from being lost.
*/ */
spin_lock_irqsave(&ctx->completion_lock, flags); spin_lock_irqsave(&ctx->completion_lock, flags);
migrate_page_copy(new, old); folio_migrate_copy(dst, src);
BUG_ON(ctx->ring_pages[idx] != old); BUG_ON(ctx->ring_pages[idx] != &src->page);
ctx->ring_pages[idx] = new; ctx->ring_pages[idx] = &dst->page;
spin_unlock_irqrestore(&ctx->completion_lock, flags); spin_unlock_irqrestore(&ctx->completion_lock, flags);
/* The old page is no longer accessible. */ /* The old folio is no longer accessible. */
put_page(old); folio_put(src);
out_unlock: out_unlock:
mutex_unlock(&ctx->ring_lock); mutex_unlock(&ctx->ring_lock);
@ -475,13 +475,13 @@ out:
spin_unlock(&mapping->private_lock); spin_unlock(&mapping->private_lock);
return rc; return rc;
} }
#else
#define aio_migrate_folio NULL
#endif #endif
static const struct address_space_operations aio_ctx_aops = { static const struct address_space_operations aio_ctx_aops = {
.dirty_folio = noop_dirty_folio, .dirty_folio = noop_dirty_folio,
#if IS_ENABLED(CONFIG_MIGRATION) .migrate_folio = aio_migrate_folio,
.migratepage = aio_migratepage,
#endif
}; };
static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events) static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)