fs/nfs: Replace kmap_atomic() with kmap_local_page() in dir.c

kmap_atomic() is deprecated in favor of kmap_local_page().

With kmap_local_page() the mappings are per thread, CPU local, can take
page-faults, and can be called from any context (including interrupts).
Furthermore, the tasks can be preempted and, when they are scheduled to
run again, the kernel virtual addresses are restored and still valid.

kmap_atomic() is implemented like a kmap_local_page() which also disables
page-faults and preemption (the latter only for !PREEMPT_RT kernels,
otherwise it only disables migration).

The code within the mappings/un-mappings in the functions of dir.c don't
depend on the above-mentioned side effects of kmap_atomic(), so that mere
replacements of the old API with the new one is all that is required
(i.e., there is no need to explicitly add calls to pagefault_disable()
and/or preempt_disable()).

Therefore, replace kmap_atomic() with kmap_local_page() in fs/nfs/dir.c.

Tested in a QEMU/KVM x86_32 VM, 6GB RAM, booting a kernel with
HIGHMEM64GB enabled.

Suggested-by: Ira Weiny <ira.weiny@intel.com>
Signed-off-by: Fabio M. De Francesco <fmdefrancesco@gmail.com>
Reviewed-by: Luis Chamberlain <mcgrof@kernel.org>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
This commit is contained in:
Fabio M. De Francesco 2023-01-27 22:54:52 +01:00 committed by Anna Schumaker
parent 28d4411fc3
commit 1683ed16ff

View File

@ -203,14 +203,14 @@ static void nfs_readdir_page_init_array(struct page *page, u64 last_cookie,
{
struct nfs_cache_array *array;
array = kmap_atomic(page);
array = kmap_local_page(page);
array->change_attr = change_attr;
array->last_cookie = last_cookie;
array->size = 0;
array->page_full = 0;
array->page_is_eof = 0;
array->cookies_are_ordered = 1;
kunmap_atomic(array);
kunmap_local(array);
}
/*
@ -221,11 +221,11 @@ static void nfs_readdir_clear_array(struct page *page)
struct nfs_cache_array *array;
unsigned int i;
array = kmap_atomic(page);
array = kmap_local_page(page);
for (i = 0; i < array->size; i++)
kfree(array->array[i].name);
array->size = 0;
kunmap_atomic(array);
kunmap_local(array);
}
static void nfs_readdir_free_folio(struct folio *folio)
@ -371,14 +371,14 @@ static pgoff_t nfs_readdir_page_cookie_hash(u64 cookie)
static bool nfs_readdir_page_validate(struct page *page, u64 last_cookie,
u64 change_attr)
{
struct nfs_cache_array *array = kmap_atomic(page);
struct nfs_cache_array *array = kmap_local_page(page);
int ret = true;
if (array->change_attr != change_attr)
ret = false;
if (nfs_readdir_array_index_cookie(array) != last_cookie)
ret = false;
kunmap_atomic(array);
kunmap_local(array);
return ret;
}
@ -418,9 +418,9 @@ static u64 nfs_readdir_page_last_cookie(struct page *page)
struct nfs_cache_array *array;
u64 ret;
array = kmap_atomic(page);
array = kmap_local_page(page);
ret = array->last_cookie;
kunmap_atomic(array);
kunmap_local(array);
return ret;
}
@ -429,9 +429,9 @@ static bool nfs_readdir_page_needs_filling(struct page *page)
struct nfs_cache_array *array;
bool ret;
array = kmap_atomic(page);
array = kmap_local_page(page);
ret = !nfs_readdir_array_is_full(array);
kunmap_atomic(array);
kunmap_local(array);
return ret;
}
@ -439,9 +439,9 @@ static void nfs_readdir_page_set_eof(struct page *page)
{
struct nfs_cache_array *array;
array = kmap_atomic(page);
array = kmap_local_page(page);
nfs_readdir_array_set_eof(array);
kunmap_atomic(array);
kunmap_local(array);
}
static struct page *nfs_readdir_page_get_next(struct address_space *mapping,
@ -568,14 +568,14 @@ static int nfs_readdir_search_array(struct nfs_readdir_descriptor *desc)
struct nfs_cache_array *array;
int status;
array = kmap_atomic(desc->page);
array = kmap_local_page(desc->page);
if (desc->dir_cookie == 0)
status = nfs_readdir_search_for_pos(array, desc);
else
status = nfs_readdir_search_for_cookie(array, desc);
kunmap_atomic(array);
kunmap_local(array);
return status;
}