2010-10-14 13:22:46 +04:00
/*
* kvm asynchronous fault support
*
* Copyright 2010 Red Hat , Inc .
*
* Author :
* Gleb Natapov < gleb @ redhat . com >
*
* This file is free software ; you can redistribute it and / or modify
* it under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software Foundation ,
* Inc . , 51 Franklin St , Fifth Floor , Boston , MA 02110 - 1301 , USA .
*/
# include <linux/kvm_host.h>
# include <linux/slab.h>
# include <linux/module.h>
# include <linux/mmu_context.h>
# include "async_pf.h"
# include <trace/events/kvm.h>
2013-06-06 17:32:37 +04:00
static inline void kvm_async_page_present_sync ( struct kvm_vcpu * vcpu ,
struct kvm_async_pf * work )
{
# ifdef CONFIG_KVM_ASYNC_PF_SYNC
kvm_arch_async_page_present ( vcpu , work ) ;
# endif
}
static inline void kvm_async_page_present_async ( struct kvm_vcpu * vcpu ,
struct kvm_async_pf * work )
{
# ifndef CONFIG_KVM_ASYNC_PF_SYNC
kvm_arch_async_page_present ( vcpu , work ) ;
# endif
}
2010-10-14 13:22:46 +04:00
static struct kmem_cache * async_pf_cache ;
int kvm_async_pf_init ( void )
{
async_pf_cache = KMEM_CACHE ( kvm_async_pf , 0 ) ;
if ( ! async_pf_cache )
return - ENOMEM ;
return 0 ;
}
void kvm_async_pf_deinit ( void )
{
2015-11-15 12:40:36 +03:00
kmem_cache_destroy ( async_pf_cache ) ;
2010-10-14 13:22:46 +04:00
async_pf_cache = NULL ;
}
void kvm_async_pf_vcpu_init ( struct kvm_vcpu * vcpu )
{
INIT_LIST_HEAD ( & vcpu - > async_pf . done ) ;
INIT_LIST_HEAD ( & vcpu - > async_pf . queue ) ;
spin_lock_init ( & vcpu - > async_pf . lock ) ;
}
static void async_pf_execute ( struct work_struct * work )
{
struct kvm_async_pf * apf =
container_of ( work , struct kvm_async_pf , work ) ;
struct mm_struct * mm = apf - > mm ;
struct kvm_vcpu * vcpu = apf - > vcpu ;
unsigned long addr = apf - > addr ;
gva_t gva = apf - > gva ;
might_sleep ( ) ;
2016-02-13 00:01:54 +03:00
/*
* This work is run asynchromously to the task which owns
* mm and might be done in another context , so we must
* use FOLL_REMOTE .
*/
2016-10-13 03:20:12 +03:00
__get_user_pages_unlocked ( NULL , mm , addr , 1 , NULL ,
FOLL_WRITE | FOLL_REMOTE ) ;
2016-02-13 00:01:54 +03:00
2013-06-06 17:32:37 +04:00
kvm_async_page_present_sync ( vcpu , apf ) ;
2010-10-14 13:22:46 +04:00
spin_lock ( & vcpu - > async_pf . lock ) ;
list_add_tail ( & apf - > link , & vcpu - > async_pf . done ) ;
spin_unlock ( & vcpu - > async_pf . lock ) ;
/*
* apf may be freed by kvm_check_async_pf_completion ( ) after
* this point
*/
2013-10-14 18:22:33 +04:00
trace_kvm_async_pf_completed ( addr , gva ) ;
2010-10-14 13:22:46 +04:00
2015-10-09 15:21:55 +03:00
/*
* This memory barrier pairs with prepare_to_wait ' s set_current_state ( )
*/
smp_mb ( ) ;
2016-02-19 11:46:39 +03:00
if ( swait_active ( & vcpu - > wq ) )
swake_up ( & vcpu - > wq ) ;
2010-10-14 13:22:46 +04:00
2014-04-21 17:26:01 +04:00
mmput ( mm ) ;
2010-10-14 13:22:46 +04:00
kvm_put_kvm ( vcpu - > kvm ) ;
}
void kvm_clear_async_pf_completion_queue ( struct kvm_vcpu * vcpu )
{
/* cancel outstanding work queue item */
while ( ! list_empty ( & vcpu - > async_pf . queue ) ) {
struct kvm_async_pf * work =
2016-01-01 14:47:15 +03:00
list_first_entry ( & vcpu - > async_pf . queue ,
typeof ( * work ) , queue ) ;
2010-10-14 13:22:46 +04:00
list_del ( & work - > queue ) ;
2013-09-03 14:31:16 +04:00
# ifdef CONFIG_KVM_ASYNC_PF_SYNC
flush_work ( & work - > work ) ;
# else
2013-09-05 00:32:24 +04:00
if ( cancel_work_sync ( & work - > work ) ) {
2014-04-21 17:26:01 +04:00
mmput ( work - > mm ) ;
2013-09-05 00:32:23 +04:00
kvm_put_kvm ( vcpu - > kvm ) ; /* == work->vcpu->kvm */
2010-10-14 13:22:46 +04:00
kmem_cache_free ( async_pf_cache , work ) ;
2013-09-05 00:32:23 +04:00
}
2013-09-03 14:31:16 +04:00
# endif
2010-10-14 13:22:46 +04:00
}
spin_lock ( & vcpu - > async_pf . lock ) ;
while ( ! list_empty ( & vcpu - > async_pf . done ) ) {
struct kvm_async_pf * work =
2016-01-01 14:47:15 +03:00
list_first_entry ( & vcpu - > async_pf . done ,
typeof ( * work ) , link ) ;
2010-10-14 13:22:46 +04:00
list_del ( & work - > link ) ;
kmem_cache_free ( async_pf_cache , work ) ;
}
spin_unlock ( & vcpu - > async_pf . lock ) ;
vcpu - > async_pf . queued = 0 ;
}
void kvm_check_async_pf_completion ( struct kvm_vcpu * vcpu )
{
struct kvm_async_pf * work ;
2010-11-02 12:35:35 +03:00
while ( ! list_empty_careful ( & vcpu - > async_pf . done ) & &
kvm_arch_can_inject_async_page_present ( vcpu ) ) {
spin_lock ( & vcpu - > async_pf . lock ) ;
work = list_first_entry ( & vcpu - > async_pf . done , typeof ( * work ) ,
link ) ;
list_del ( & work - > link ) ;
spin_unlock ( & vcpu - > async_pf . lock ) ;
2010-10-14 13:22:46 +04:00
2013-10-14 18:22:33 +04:00
kvm_arch_async_page_ready ( vcpu , work ) ;
2014-01-31 17:32:46 +04:00
kvm_async_page_present_async ( vcpu , work ) ;
2010-10-14 13:22:46 +04:00
2010-11-02 12:35:35 +03:00
list_del ( & work - > queue ) ;
vcpu - > async_pf . queued - - ;
kmem_cache_free ( async_pf_cache , work ) ;
}
2010-10-14 13:22:46 +04:00
}
2013-06-06 17:32:37 +04:00
int kvm_setup_async_pf ( struct kvm_vcpu * vcpu , gva_t gva , unsigned long hva ,
2010-10-14 13:22:46 +04:00
struct kvm_arch_async_pf * arch )
{
struct kvm_async_pf * work ;
if ( vcpu - > async_pf . queued > = ASYNC_PF_PER_VCPU )
return 0 ;
/* setup delayed work */
/*
* do alloc nowait since if we are going to sleep anyway we
* may as well sleep faulting in page
*/
2016-02-19 15:11:46 +03:00
work = kmem_cache_zalloc ( async_pf_cache , GFP_NOWAIT | __GFP_NOWARN ) ;
2010-10-14 13:22:46 +04:00
if ( ! work )
return 0 ;
2013-10-14 18:22:33 +04:00
work - > wakeup_all = false ;
2010-10-14 13:22:46 +04:00
work - > vcpu = vcpu ;
work - > gva = gva ;
2013-06-06 17:32:37 +04:00
work - > addr = hva ;
2010-10-14 13:22:46 +04:00
work - > arch = * arch ;
work - > mm = current - > mm ;
2014-04-21 17:26:01 +04:00
atomic_inc ( & work - > mm - > mm_users ) ;
2010-10-14 13:22:46 +04:00
kvm_get_kvm ( work - > vcpu - > kvm ) ;
/* this can't really happen otherwise gfn_to_pfn_async
would succeed */
if ( unlikely ( kvm_is_error_hva ( work - > addr ) ) )
goto retry_sync ;
INIT_WORK ( & work - > work , async_pf_execute ) ;
if ( ! schedule_work ( & work - > work ) )
goto retry_sync ;
list_add_tail ( & work - > queue , & vcpu - > async_pf . queue ) ;
vcpu - > async_pf . queued + + ;
kvm_arch_async_page_not_present ( vcpu , work ) ;
return 1 ;
retry_sync :
kvm_put_kvm ( work - > vcpu - > kvm ) ;
2014-04-21 17:26:01 +04:00
mmput ( work - > mm ) ;
2010-10-14 13:22:46 +04:00
kmem_cache_free ( async_pf_cache , work ) ;
return 0 ;
}
2010-10-14 13:22:50 +04:00
int kvm_async_pf_wakeup_all ( struct kvm_vcpu * vcpu )
{
struct kvm_async_pf * work ;
2010-11-01 12:03:44 +03:00
if ( ! list_empty_careful ( & vcpu - > async_pf . done ) )
2010-10-14 13:22:50 +04:00
return 0 ;
work = kmem_cache_zalloc ( async_pf_cache , GFP_ATOMIC ) ;
if ( ! work )
return - ENOMEM ;
2013-10-14 18:22:33 +04:00
work - > wakeup_all = true ;
2010-10-14 13:22:50 +04:00
INIT_LIST_HEAD ( & work - > queue ) ; /* for list_del to work */
2010-11-01 12:03:44 +03:00
spin_lock ( & vcpu - > async_pf . lock ) ;
2010-10-14 13:22:50 +04:00
list_add_tail ( & work - > link , & vcpu - > async_pf . done ) ;
2010-11-01 12:03:44 +03:00
spin_unlock ( & vcpu - > async_pf . lock ) ;
2010-10-14 13:22:50 +04:00
vcpu - > async_pf . queued + + ;
return 0 ;
}