s390/uv: gmap_make_secure() cleanups for further changes
Let's factor out handling of LRU cache draining and convert the if-else chain to a switch-case. Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com> Signed-off-by: David Hildenbrand <david@redhat.com> Link: https://lore.kernel.org/r/20240508182955.358628-3-david@redhat.com Signed-off-by: Heiko Carstens <hca@linux.ibm.com> Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
This commit is contained in:
parent
3f29f6537f
commit
68ad4743be
@ -266,6 +266,36 @@ static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_str
|
||||
return atomic_read(&mm->context.protected_count) > 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Drain LRU caches: the local one on first invocation and the ones of all
|
||||
* CPUs on successive invocations. Returns "true" on the first invocation.
|
||||
*/
|
||||
static bool drain_lru(bool *drain_lru_called)
|
||||
{
|
||||
/*
|
||||
* If we have tried a local drain and the folio refcount
|
||||
* still does not match our expected safe value, try with a
|
||||
* system wide drain. This is needed if the pagevecs holding
|
||||
* the page are on a different CPU.
|
||||
*/
|
||||
if (*drain_lru_called) {
|
||||
lru_add_drain_all();
|
||||
/* We give up here, don't retry immediately. */
|
||||
return false;
|
||||
}
|
||||
/*
|
||||
* We are here if the folio refcount does not match the
|
||||
* expected safe value. The main culprits are usually
|
||||
* pagevecs. With lru_add_drain() we drain the pagevecs
|
||||
* on the local CPU so that hopefully the refcount will
|
||||
* reach the expected safe value.
|
||||
*/
|
||||
lru_add_drain();
|
||||
*drain_lru_called = true;
|
||||
/* The caller should try again immediately */
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Requests the Ultravisor to make a page accessible to a guest.
|
||||
* If it's brought in the first time, it will be cleared. If
|
||||
@ -275,7 +305,7 @@ static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_str
|
||||
int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
bool local_drain = false;
|
||||
bool drain_lru_called = false;
|
||||
spinlock_t *ptelock;
|
||||
unsigned long uaddr;
|
||||
struct folio *folio;
|
||||
@ -331,37 +361,21 @@ unlock:
|
||||
out:
|
||||
mmap_read_unlock(gmap->mm);
|
||||
|
||||
if (rc == -EAGAIN) {
|
||||
switch (rc) {
|
||||
case -EAGAIN:
|
||||
/*
|
||||
* If we are here because the UVC returned busy or partial
|
||||
* completion, this is just a useless check, but it is safe.
|
||||
*/
|
||||
folio_wait_writeback(folio);
|
||||
folio_put(folio);
|
||||
} else if (rc == -EBUSY) {
|
||||
/*
|
||||
* If we have tried a local drain and the folio refcount
|
||||
* still does not match our expected safe value, try with a
|
||||
* system wide drain. This is needed if the pagevecs holding
|
||||
* the page are on a different CPU.
|
||||
*/
|
||||
if (local_drain) {
|
||||
lru_add_drain_all();
|
||||
/* We give up here, and let the caller try again */
|
||||
return -EAGAIN;
|
||||
}
|
||||
/*
|
||||
* We are here if the folio refcount does not match the
|
||||
* expected safe value. The main culprits are usually
|
||||
* pagevecs. With lru_add_drain() we drain the pagevecs
|
||||
* on the local CPU so that hopefully the refcount will
|
||||
* reach the expected safe value.
|
||||
*/
|
||||
lru_add_drain();
|
||||
local_drain = true;
|
||||
/* And now we try again immediately after draining */
|
||||
goto again;
|
||||
} else if (rc == -ENXIO) {
|
||||
return -EAGAIN;
|
||||
case -EBUSY:
|
||||
/* Additional folio references. */
|
||||
if (drain_lru(&drain_lru_called))
|
||||
goto again;
|
||||
return -EAGAIN;
|
||||
case -ENXIO:
|
||||
if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE))
|
||||
return -EFAULT;
|
||||
return -EAGAIN;
|
||||
|
Loading…
x
Reference in New Issue
Block a user