NFS: Rename readpage_async_filler to nfs_read_add_folio
Rename readpage_async_filler to nfs_read_add_folio to better reflect what this function does (add a folio to the nfs_pageio_descriptor), and simplify arguments to this function by removing struct nfs_readdesc. Signed-off-by: Dave Wysochanski <dwysocha@redhat.com> Tested-by: Daire Byrne <daire@dneg.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
This commit is contained in:
parent
703c6d03f1
commit
01c3a40084
@ -127,11 +127,6 @@ static void nfs_readpage_release(struct nfs_page *req, int error)
|
||||
nfs_release_request(req);
|
||||
}
|
||||
|
||||
struct nfs_readdesc {
|
||||
struct nfs_pageio_descriptor pgio;
|
||||
struct nfs_open_context *ctx;
|
||||
};
|
||||
|
||||
static void nfs_page_group_set_uptodate(struct nfs_page *req)
|
||||
{
|
||||
if (nfs_page_group_sync_on_bit(req, PG_UPTODATE))
|
||||
@ -153,7 +148,8 @@ static void nfs_read_completion(struct nfs_pgio_header *hdr)
|
||||
|
||||
if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
|
||||
/* note: regions of the page not covered by a
|
||||
* request are zeroed in readpage_async_filler */
|
||||
* request are zeroed in nfs_read_add_folio
|
||||
*/
|
||||
if (bytes > hdr->good_bytes) {
|
||||
/* nothing in this request was good, so zero
|
||||
* the full extent of the request */
|
||||
@ -281,7 +277,9 @@ static void nfs_readpage_result(struct rpc_task *task,
|
||||
nfs_readpage_retry(task, hdr);
|
||||
}
|
||||
|
||||
static int readpage_async_filler(struct nfs_readdesc *desc, struct folio *folio)
|
||||
static int nfs_read_add_folio(struct nfs_pageio_descriptor *pgio,
|
||||
struct nfs_open_context *ctx,
|
||||
struct folio *folio)
|
||||
{
|
||||
struct inode *inode = folio_file_mapping(folio)->host;
|
||||
struct nfs_server *server = NFS_SERVER(inode);
|
||||
@ -303,15 +301,15 @@ static int readpage_async_filler(struct nfs_readdesc *desc, struct folio *folio)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
new = nfs_page_create_from_folio(desc->ctx, folio, 0, aligned_len);
|
||||
new = nfs_page_create_from_folio(ctx, folio, 0, aligned_len);
|
||||
if (IS_ERR(new))
|
||||
goto out_error;
|
||||
|
||||
if (len < fsize)
|
||||
folio_zero_segment(folio, len, fsize);
|
||||
if (!nfs_pageio_add_request(&desc->pgio, new)) {
|
||||
if (!nfs_pageio_add_request(pgio, new)) {
|
||||
nfs_list_remove_request(new);
|
||||
error = desc->pgio.pg_error;
|
||||
error = pgio->pg_error;
|
||||
nfs_readpage_release(new, error);
|
||||
goto out;
|
||||
}
|
||||
@ -332,8 +330,9 @@ out:
|
||||
*/
|
||||
int nfs_read_folio(struct file *file, struct folio *folio)
|
||||
{
|
||||
struct nfs_readdesc desc;
|
||||
struct inode *inode = file_inode(file);
|
||||
struct nfs_pageio_descriptor pgio;
|
||||
struct nfs_open_context *ctx;
|
||||
int ret;
|
||||
|
||||
trace_nfs_aop_readpage(inode, folio);
|
||||
@ -357,25 +356,25 @@ int nfs_read_folio(struct file *file, struct folio *folio)
|
||||
if (NFS_STALE(inode))
|
||||
goto out_unlock;
|
||||
|
||||
desc.ctx = get_nfs_open_context(nfs_file_open_context(file));
|
||||
ctx = get_nfs_open_context(nfs_file_open_context(file));
|
||||
|
||||
xchg(&desc.ctx->error, 0);
|
||||
nfs_pageio_init_read(&desc.pgio, inode, false,
|
||||
xchg(&ctx->error, 0);
|
||||
nfs_pageio_init_read(&pgio, inode, false,
|
||||
&nfs_async_read_completion_ops);
|
||||
|
||||
ret = readpage_async_filler(&desc, folio);
|
||||
ret = nfs_read_add_folio(&pgio, ctx, folio);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
nfs_pageio_complete_read(&desc.pgio);
|
||||
ret = desc.pgio.pg_error < 0 ? desc.pgio.pg_error : 0;
|
||||
nfs_pageio_complete_read(&pgio);
|
||||
ret = pgio.pg_error < 0 ? pgio.pg_error : 0;
|
||||
if (!ret) {
|
||||
ret = folio_wait_locked_killable(folio);
|
||||
if (!folio_test_uptodate(folio) && !ret)
|
||||
ret = xchg(&desc.ctx->error, 0);
|
||||
ret = xchg(&ctx->error, 0);
|
||||
}
|
||||
out:
|
||||
put_nfs_open_context(desc.ctx);
|
||||
put_nfs_open_context(ctx);
|
||||
trace_nfs_aop_readpage_done(inode, folio, ret);
|
||||
return ret;
|
||||
out_unlock:
|
||||
@ -386,9 +385,10 @@ out_unlock:
|
||||
|
||||
void nfs_readahead(struct readahead_control *ractl)
|
||||
{
|
||||
struct nfs_pageio_descriptor pgio;
|
||||
struct nfs_open_context *ctx;
|
||||
unsigned int nr_pages = readahead_count(ractl);
|
||||
struct file *file = ractl->file;
|
||||
struct nfs_readdesc desc;
|
||||
struct inode *inode = ractl->mapping->host;
|
||||
struct folio *folio;
|
||||
int ret;
|
||||
@ -403,24 +403,24 @@ void nfs_readahead(struct readahead_control *ractl)
|
||||
|
||||
if (file == NULL) {
|
||||
ret = -EBADF;
|
||||
desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
|
||||
if (desc.ctx == NULL)
|
||||
ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
|
||||
if (ctx == NULL)
|
||||
goto out;
|
||||
} else
|
||||
desc.ctx = get_nfs_open_context(nfs_file_open_context(file));
|
||||
ctx = get_nfs_open_context(nfs_file_open_context(file));
|
||||
|
||||
nfs_pageio_init_read(&desc.pgio, inode, false,
|
||||
nfs_pageio_init_read(&pgio, inode, false,
|
||||
&nfs_async_read_completion_ops);
|
||||
|
||||
while ((folio = readahead_folio(ractl)) != NULL) {
|
||||
ret = readpage_async_filler(&desc, folio);
|
||||
ret = nfs_read_add_folio(&pgio, ctx, folio);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
nfs_pageio_complete_read(&desc.pgio);
|
||||
nfs_pageio_complete_read(&pgio);
|
||||
|
||||
put_nfs_open_context(desc.ctx);
|
||||
put_nfs_open_context(ctx);
|
||||
out:
|
||||
trace_nfs_aop_readahead_done(inode, nr_pages, ret);
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user