2019-06-04 11:11:37 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2010-10-14 13:22:46 +04:00
/*
* kvm asynchronous fault support
*
* Copyright 2010 Red Hat , Inc .
*
* Author :
* Gleb Natapov < gleb @ redhat . com >
*/
# include <linux/kvm_host.h>
# include <linux/slab.h>
# include <linux/module.h>
# include <linux/mmu_context.h>
2017-02-08 20:51:29 +03:00
# include <linux/sched/mm.h>
2010-10-14 13:22:46 +04:00
# include "async_pf.h"
# include <trace/events/kvm.h>
2013-06-06 17:32:37 +04:00
static inline void kvm_async_page_present_sync ( struct kvm_vcpu * vcpu ,
struct kvm_async_pf * work )
{
# ifdef CONFIG_KVM_ASYNC_PF_SYNC
kvm_arch_async_page_present ( vcpu , work ) ;
# endif
}
static inline void kvm_async_page_present_async ( struct kvm_vcpu * vcpu ,
struct kvm_async_pf * work )
{
# ifndef CONFIG_KVM_ASYNC_PF_SYNC
kvm_arch_async_page_present ( vcpu , work ) ;
# endif
}
2010-10-14 13:22:46 +04:00
static struct kmem_cache * async_pf_cache ;
int kvm_async_pf_init ( void )
{
async_pf_cache = KMEM_CACHE ( kvm_async_pf , 0 ) ;
if ( ! async_pf_cache )
return - ENOMEM ;
return 0 ;
}
void kvm_async_pf_deinit ( void )
{
2015-11-15 12:40:36 +03:00
kmem_cache_destroy ( async_pf_cache ) ;
2010-10-14 13:22:46 +04:00
async_pf_cache = NULL ;
}
void kvm_async_pf_vcpu_init ( struct kvm_vcpu * vcpu )
{
INIT_LIST_HEAD ( & vcpu - > async_pf . done ) ;
INIT_LIST_HEAD ( & vcpu - > async_pf . queue ) ;
spin_lock_init ( & vcpu - > async_pf . lock ) ;
}
static void async_pf_execute ( struct work_struct * work )
{
struct kvm_async_pf * apf =
container_of ( work , struct kvm_async_pf , work ) ;
struct mm_struct * mm = apf - > mm ;
struct kvm_vcpu * vcpu = apf - > vcpu ;
unsigned long addr = apf - > addr ;
2019-12-07 02:57:14 +03:00
gpa_t cr2_or_gpa = apf - > cr2_or_gpa ;
2016-12-15 02:06:55 +03:00
int locked = 1 ;
2010-10-14 13:22:46 +04:00
might_sleep ( ) ;
2016-02-13 00:01:54 +03:00
/*
2018-11-05 09:45:03 +03:00
* This work is run asynchronously to the task which owns
2016-02-13 00:01:54 +03:00
* mm and might be done in another context , so we must
2016-12-15 02:06:55 +03:00
* access remotely .
2016-02-13 00:01:54 +03:00
*/
2016-12-15 02:06:55 +03:00
down_read ( & mm - > mmap_sem ) ;
get_user_pages_remote ( NULL , mm , addr , 1 , FOLL_WRITE , NULL , NULL ,
& locked ) ;
if ( locked )
up_read ( & mm - > mmap_sem ) ;
2016-02-13 00:01:54 +03:00
2013-06-06 17:32:37 +04:00
kvm_async_page_present_sync ( vcpu , apf ) ;
2010-10-14 13:22:46 +04:00
spin_lock ( & vcpu - > async_pf . lock ) ;
list_add_tail ( & apf - > link , & vcpu - > async_pf . done ) ;
2016-11-17 17:55:45 +03:00
apf - > vcpu = NULL ;
2010-10-14 13:22:46 +04:00
spin_unlock ( & vcpu - > async_pf . lock ) ;
/*
* apf may be freed by kvm_check_async_pf_completion ( ) after
* this point
*/
2019-12-07 02:57:14 +03:00
trace_kvm_async_pf_completed ( addr , cr2_or_gpa ) ;
2010-10-14 13:22:46 +04:00
2017-09-13 23:08:19 +03:00
if ( swq_has_sleeper ( & vcpu - > wq ) )
2018-06-12 11:34:52 +03:00
swake_up_one ( & vcpu - > wq ) ;
2010-10-14 13:22:46 +04:00
2014-04-21 17:26:01 +04:00
mmput ( mm ) ;
2010-10-14 13:22:46 +04:00
kvm_put_kvm ( vcpu - > kvm ) ;
}
void kvm_clear_async_pf_completion_queue ( struct kvm_vcpu * vcpu )
{
2016-11-17 17:55:45 +03:00
spin_lock ( & vcpu - > async_pf . lock ) ;
2010-10-14 13:22:46 +04:00
/* cancel outstanding work queue item */
while ( ! list_empty ( & vcpu - > async_pf . queue ) ) {
struct kvm_async_pf * work =
2016-01-01 14:47:15 +03:00
list_first_entry ( & vcpu - > async_pf . queue ,
typeof ( * work ) , queue ) ;
2010-10-14 13:22:46 +04:00
list_del ( & work - > queue ) ;
2013-09-03 14:31:16 +04:00
2016-11-17 17:55:45 +03:00
/*
* We know it ' s present in vcpu - > async_pf . done , do
* nothing here .
*/
if ( ! work - > vcpu )
continue ;
spin_unlock ( & vcpu - > async_pf . lock ) ;
2013-09-03 14:31:16 +04:00
# ifdef CONFIG_KVM_ASYNC_PF_SYNC
flush_work ( & work - > work ) ;
# else
2013-09-05 00:32:24 +04:00
if ( cancel_work_sync ( & work - > work ) ) {
2014-04-21 17:26:01 +04:00
mmput ( work - > mm ) ;
2013-09-05 00:32:23 +04:00
kvm_put_kvm ( vcpu - > kvm ) ; /* == work->vcpu->kvm */
2010-10-14 13:22:46 +04:00
kmem_cache_free ( async_pf_cache , work ) ;
2013-09-05 00:32:23 +04:00
}
2013-09-03 14:31:16 +04:00
# endif
2016-11-17 17:55:45 +03:00
spin_lock ( & vcpu - > async_pf . lock ) ;
2010-10-14 13:22:46 +04:00
}
while ( ! list_empty ( & vcpu - > async_pf . done ) ) {
struct kvm_async_pf * work =
2016-01-01 14:47:15 +03:00
list_first_entry ( & vcpu - > async_pf . done ,
typeof ( * work ) , link ) ;
2010-10-14 13:22:46 +04:00
list_del ( & work - > link ) ;
kmem_cache_free ( async_pf_cache , work ) ;
}
spin_unlock ( & vcpu - > async_pf . lock ) ;
vcpu - > async_pf . queued = 0 ;
}
void kvm_check_async_pf_completion ( struct kvm_vcpu * vcpu )
{
struct kvm_async_pf * work ;
2010-11-02 12:35:35 +03:00
while ( ! list_empty_careful ( & vcpu - > async_pf . done ) & &
kvm_arch_can_inject_async_page_present ( vcpu ) ) {
spin_lock ( & vcpu - > async_pf . lock ) ;
work = list_first_entry ( & vcpu - > async_pf . done , typeof ( * work ) ,
link ) ;
list_del ( & work - > link ) ;
spin_unlock ( & vcpu - > async_pf . lock ) ;
2010-10-14 13:22:46 +04:00
2013-10-14 18:22:33 +04:00
kvm_arch_async_page_ready ( vcpu , work ) ;
2014-01-31 17:32:46 +04:00
kvm_async_page_present_async ( vcpu , work ) ;
2010-10-14 13:22:46 +04:00
2010-11-02 12:35:35 +03:00
list_del ( & work - > queue ) ;
vcpu - > async_pf . queued - - ;
kmem_cache_free ( async_pf_cache , work ) ;
}
2010-10-14 13:22:46 +04:00
}
2019-12-07 02:57:14 +03:00
int kvm_setup_async_pf ( struct kvm_vcpu * vcpu , gpa_t cr2_or_gpa ,
unsigned long hva , struct kvm_arch_async_pf * arch )
2010-10-14 13:22:46 +04:00
{
struct kvm_async_pf * work ;
if ( vcpu - > async_pf . queued > = ASYNC_PF_PER_VCPU )
return 0 ;
/* setup delayed work */
/*
* do alloc nowait since if we are going to sleep anyway we
* may as well sleep faulting in page
*/
2016-02-19 15:11:46 +03:00
work = kmem_cache_zalloc ( async_pf_cache , GFP_NOWAIT | __GFP_NOWARN ) ;
2010-10-14 13:22:46 +04:00
if ( ! work )
return 0 ;
2013-10-14 18:22:33 +04:00
work - > wakeup_all = false ;
2010-10-14 13:22:46 +04:00
work - > vcpu = vcpu ;
2019-12-07 02:57:14 +03:00
work - > cr2_or_gpa = cr2_or_gpa ;
2013-06-06 17:32:37 +04:00
work - > addr = hva ;
2010-10-14 13:22:46 +04:00
work - > arch = * arch ;
work - > mm = current - > mm ;
2017-02-28 01:30:10 +03:00
mmget ( work - > mm ) ;
2010-10-14 13:22:46 +04:00
kvm_get_kvm ( work - > vcpu - > kvm ) ;
/* this can't really happen otherwise gfn_to_pfn_async
would succeed */
if ( unlikely ( kvm_is_error_hva ( work - > addr ) ) )
goto retry_sync ;
INIT_WORK ( & work - > work , async_pf_execute ) ;
if ( ! schedule_work ( & work - > work ) )
goto retry_sync ;
list_add_tail ( & work - > queue , & vcpu - > async_pf . queue ) ;
vcpu - > async_pf . queued + + ;
kvm_arch_async_page_not_present ( vcpu , work ) ;
return 1 ;
retry_sync :
kvm_put_kvm ( work - > vcpu - > kvm ) ;
2014-04-21 17:26:01 +04:00
mmput ( work - > mm ) ;
2010-10-14 13:22:46 +04:00
kmem_cache_free ( async_pf_cache , work ) ;
return 0 ;
}
2010-10-14 13:22:50 +04:00
int kvm_async_pf_wakeup_all ( struct kvm_vcpu * vcpu )
{
struct kvm_async_pf * work ;
2010-11-01 12:03:44 +03:00
if ( ! list_empty_careful ( & vcpu - > async_pf . done ) )
2010-10-14 13:22:50 +04:00
return 0 ;
work = kmem_cache_zalloc ( async_pf_cache , GFP_ATOMIC ) ;
if ( ! work )
return - ENOMEM ;
2013-10-14 18:22:33 +04:00
work - > wakeup_all = true ;
2010-10-14 13:22:50 +04:00
INIT_LIST_HEAD ( & work - > queue ) ; /* for list_del to work */
2010-11-01 12:03:44 +03:00
spin_lock ( & vcpu - > async_pf . lock ) ;
2010-10-14 13:22:50 +04:00
list_add_tail ( & work - > link , & vcpu - > async_pf . done ) ;
2010-11-01 12:03:44 +03:00
spin_unlock ( & vcpu - > async_pf . lock ) ;
2010-10-14 13:22:50 +04:00
vcpu - > async_pf . queued + + ;
return 0 ;
}