2019-06-04 11:11:37 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2010-10-14 13:22:46 +04:00
/*
* kvm asynchronous fault support
*
* Copyright 2010 Red Hat , Inc .
*
* Author :
* Gleb Natapov < gleb @ redhat . com >
*/
# include <linux/kvm_host.h>
# include <linux/slab.h>
# include <linux/module.h>
# include <linux/mmu_context.h>
2017-02-08 20:51:29 +03:00
# include <linux/sched/mm.h>
2010-10-14 13:22:46 +04:00
# include "async_pf.h"
# include <trace/events/kvm.h>
static struct kmem_cache * async_pf_cache ;
int kvm_async_pf_init ( void )
{
async_pf_cache = KMEM_CACHE ( kvm_async_pf , 0 ) ;
if ( ! async_pf_cache )
return - ENOMEM ;
return 0 ;
}
void kvm_async_pf_deinit ( void )
{
2015-11-15 12:40:36 +03:00
kmem_cache_destroy ( async_pf_cache ) ;
2010-10-14 13:22:46 +04:00
async_pf_cache = NULL ;
}
void kvm_async_pf_vcpu_init ( struct kvm_vcpu * vcpu )
{
INIT_LIST_HEAD ( & vcpu - > async_pf . done ) ;
INIT_LIST_HEAD ( & vcpu - > async_pf . queue ) ;
spin_lock_init ( & vcpu - > async_pf . lock ) ;
}
static void async_pf_execute ( struct work_struct * work )
{
struct kvm_async_pf * apf =
container_of ( work , struct kvm_async_pf , work ) ;
struct mm_struct * mm = apf - > mm ;
struct kvm_vcpu * vcpu = apf - > vcpu ;
unsigned long addr = apf - > addr ;
2019-12-07 02:57:14 +03:00
gpa_t cr2_or_gpa = apf - > cr2_or_gpa ;
2016-12-15 02:06:55 +03:00
int locked = 1 ;
2020-05-25 17:41:21 +03:00
bool first ;
2010-10-14 13:22:46 +04:00
might_sleep ( ) ;
2016-02-13 00:01:54 +03:00
/*
2018-11-05 09:45:03 +03:00
* This work is run asynchronously to the task which owns
2016-02-13 00:01:54 +03:00
* mm and might be done in another context , so we must
2016-12-15 02:06:55 +03:00
* access remotely .
2016-02-13 00:01:54 +03:00
*/
2020-06-09 07:33:25 +03:00
mmap_read_lock ( mm ) ;
2020-08-12 04:39:01 +03:00
get_user_pages_remote ( mm , addr , 1 , FOLL_WRITE , NULL , NULL ,
2016-12-15 02:06:55 +03:00
& locked ) ;
if ( locked )
2020-06-09 07:33:25 +03:00
mmap_read_unlock ( mm ) ;
2016-02-13 00:01:54 +03:00
2020-01-20 18:14:37 +03:00
if ( IS_ENABLED ( CONFIG_KVM_ASYNC_PF_SYNC ) )
kvm_arch_async_page_present ( vcpu , apf ) ;
2010-10-14 13:22:46 +04:00
spin_lock ( & vcpu - > async_pf . lock ) ;
2020-05-25 17:41:21 +03:00
first = list_empty ( & vcpu - > async_pf . done ) ;
2010-10-14 13:22:46 +04:00
list_add_tail ( & apf - > link , & vcpu - > async_pf . done ) ;
2016-11-17 17:55:45 +03:00
apf - > vcpu = NULL ;
2010-10-14 13:22:46 +04:00
spin_unlock ( & vcpu - > async_pf . lock ) ;
2020-05-25 17:41:21 +03:00
if ( ! IS_ENABLED ( CONFIG_KVM_ASYNC_PF_SYNC ) & & first )
kvm_arch_async_page_present_queued ( vcpu ) ;
2010-10-14 13:22:46 +04:00
/*
* apf may be freed by kvm_check_async_pf_completion ( ) after
* this point
*/
2019-12-07 02:57:14 +03:00
trace_kvm_async_pf_completed ( addr , cr2_or_gpa ) ;
2010-10-14 13:22:46 +04:00
2021-10-09 05:12:12 +03:00
__kvm_vcpu_wake_up ( vcpu ) ;
2010-10-14 13:22:46 +04:00
2014-04-21 17:26:01 +04:00
mmput ( mm ) ;
2010-10-14 13:22:46 +04:00
kvm_put_kvm ( vcpu - > kvm ) ;
}
void kvm_clear_async_pf_completion_queue ( struct kvm_vcpu * vcpu )
{
2016-11-17 17:55:45 +03:00
spin_lock ( & vcpu - > async_pf . lock ) ;
2010-10-14 13:22:46 +04:00
/* cancel outstanding work queue item */
while ( ! list_empty ( & vcpu - > async_pf . queue ) ) {
struct kvm_async_pf * work =
2016-01-01 14:47:15 +03:00
list_first_entry ( & vcpu - > async_pf . queue ,
typeof ( * work ) , queue ) ;
2010-10-14 13:22:46 +04:00
list_del ( & work - > queue ) ;
2013-09-03 14:31:16 +04:00
2016-11-17 17:55:45 +03:00
/*
* We know it ' s present in vcpu - > async_pf . done , do
* nothing here .
*/
if ( ! work - > vcpu )
continue ;
spin_unlock ( & vcpu - > async_pf . lock ) ;
2013-09-03 14:31:16 +04:00
# ifdef CONFIG_KVM_ASYNC_PF_SYNC
flush_work ( & work - > work ) ;
# else
2013-09-05 00:32:24 +04:00
if ( cancel_work_sync ( & work - > work ) ) {
2014-04-21 17:26:01 +04:00
mmput ( work - > mm ) ;
2013-09-05 00:32:23 +04:00
kvm_put_kvm ( vcpu - > kvm ) ; /* == work->vcpu->kvm */
2010-10-14 13:22:46 +04:00
kmem_cache_free ( async_pf_cache , work ) ;
2013-09-05 00:32:23 +04:00
}
2013-09-03 14:31:16 +04:00
# endif
2016-11-17 17:55:45 +03:00
spin_lock ( & vcpu - > async_pf . lock ) ;
2010-10-14 13:22:46 +04:00
}
while ( ! list_empty ( & vcpu - > async_pf . done ) ) {
struct kvm_async_pf * work =
2016-01-01 14:47:15 +03:00
list_first_entry ( & vcpu - > async_pf . done ,
typeof ( * work ) , link ) ;
2010-10-14 13:22:46 +04:00
list_del ( & work - > link ) ;
kmem_cache_free ( async_pf_cache , work ) ;
}
spin_unlock ( & vcpu - > async_pf . lock ) ;
vcpu - > async_pf . queued = 0 ;
}
void kvm_check_async_pf_completion ( struct kvm_vcpu * vcpu )
{
struct kvm_async_pf * work ;
2010-11-02 12:35:35 +03:00
while ( ! list_empty_careful ( & vcpu - > async_pf . done ) & &
2020-05-25 17:41:18 +03:00
kvm_arch_can_dequeue_async_page_present ( vcpu ) ) {
2010-11-02 12:35:35 +03:00
spin_lock ( & vcpu - > async_pf . lock ) ;
work = list_first_entry ( & vcpu - > async_pf . done , typeof ( * work ) ,
link ) ;
list_del ( & work - > link ) ;
spin_unlock ( & vcpu - > async_pf . lock ) ;
2010-10-14 13:22:46 +04:00
2013-10-14 18:22:33 +04:00
kvm_arch_async_page_ready ( vcpu , work ) ;
2020-01-20 18:14:37 +03:00
if ( ! IS_ENABLED ( CONFIG_KVM_ASYNC_PF_SYNC ) )
kvm_arch_async_page_present ( vcpu , work ) ;
2010-10-14 13:22:46 +04:00
2010-11-02 12:35:35 +03:00
list_del ( & work - > queue ) ;
vcpu - > async_pf . queued - - ;
kmem_cache_free ( async_pf_cache , work ) ;
}
2010-10-14 13:22:46 +04:00
}
2020-06-15 15:13:34 +03:00
/*
* Try to schedule a job to handle page fault asynchronously . Returns ' true ' on
* success , ' false ' on failure ( page fault has to be handled synchronously ) .
*/
bool kvm_setup_async_pf ( struct kvm_vcpu * vcpu , gpa_t cr2_or_gpa ,
unsigned long hva , struct kvm_arch_async_pf * arch )
2010-10-14 13:22:46 +04:00
{
struct kvm_async_pf * work ;
if ( vcpu - > async_pf . queued > = ASYNC_PF_PER_VCPU )
2020-06-15 15:13:34 +03:00
return false ;
2010-10-14 13:22:46 +04:00
2020-06-10 20:55:31 +03:00
/* Arch specific code should not do async PF in this case */
if ( unlikely ( kvm_is_error_hva ( hva ) ) )
2020-06-15 15:13:34 +03:00
return false ;
2010-10-14 13:22:46 +04:00
/*
* do alloc nowait since if we are going to sleep anyway we
* may as well sleep faulting in page
*/
2016-02-19 15:11:46 +03:00
work = kmem_cache_zalloc ( async_pf_cache , GFP_NOWAIT | __GFP_NOWARN ) ;
2010-10-14 13:22:46 +04:00
if ( ! work )
2020-06-15 15:13:34 +03:00
return false ;
2010-10-14 13:22:46 +04:00
2013-10-14 18:22:33 +04:00
work - > wakeup_all = false ;
2010-10-14 13:22:46 +04:00
work - > vcpu = vcpu ;
2019-12-07 02:57:14 +03:00
work - > cr2_or_gpa = cr2_or_gpa ;
2013-06-06 17:32:37 +04:00
work - > addr = hva ;
2010-10-14 13:22:46 +04:00
work - > arch = * arch ;
work - > mm = current - > mm ;
2017-02-28 01:30:10 +03:00
mmget ( work - > mm ) ;
2010-10-14 13:22:46 +04:00
kvm_get_kvm ( work - > vcpu - > kvm ) ;
INIT_WORK ( & work - > work , async_pf_execute ) ;
list_add_tail ( & work - > queue , & vcpu - > async_pf . queue ) ;
vcpu - > async_pf . queued + + ;
2020-06-10 20:55:32 +03:00
work - > notpresent_injected = kvm_arch_async_page_not_present ( vcpu , work ) ;
2020-06-10 20:55:31 +03:00
schedule_work ( & work - > work ) ;
2020-06-15 15:13:34 +03:00
return true ;
2010-10-14 13:22:46 +04:00
}
2010-10-14 13:22:50 +04:00
int kvm_async_pf_wakeup_all ( struct kvm_vcpu * vcpu )
{
struct kvm_async_pf * work ;
2020-05-25 17:41:21 +03:00
bool first ;
2010-10-14 13:22:50 +04:00
2010-11-01 12:03:44 +03:00
if ( ! list_empty_careful ( & vcpu - > async_pf . done ) )
2010-10-14 13:22:50 +04:00
return 0 ;
work = kmem_cache_zalloc ( async_pf_cache , GFP_ATOMIC ) ;
if ( ! work )
return - ENOMEM ;
2013-10-14 18:22:33 +04:00
work - > wakeup_all = true ;
2010-10-14 13:22:50 +04:00
INIT_LIST_HEAD ( & work - > queue ) ; /* for list_del to work */
2010-11-01 12:03:44 +03:00
spin_lock ( & vcpu - > async_pf . lock ) ;
2020-05-25 17:41:21 +03:00
first = list_empty ( & vcpu - > async_pf . done ) ;
2010-10-14 13:22:50 +04:00
list_add_tail ( & work - > link , & vcpu - > async_pf . done ) ;
2010-11-01 12:03:44 +03:00
spin_unlock ( & vcpu - > async_pf . lock ) ;
2020-05-25 17:41:21 +03:00
if ( ! IS_ENABLED ( CONFIG_KVM_ASYNC_PF_SYNC ) & & first )
kvm_arch_async_page_present_queued ( vcpu ) ;
2010-10-14 13:22:50 +04:00
vcpu - > async_pf . queued + + ;
return 0 ;
}