2010-04-23 21:18:08 +04:00
/*
* VMware Balloon driver .
*
2015-08-07 01:18:01 +03:00
* Copyright ( C ) 2000 - 2014 , VMware , Inc . All Rights Reserved .
2010-04-23 21:18:08 +04:00
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation ; version 2 of the License and no later version .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE , GOOD TITLE or
* NON INFRINGEMENT . See the GNU General Public License for more
* details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 51 Franklin St , Fifth Floor , Boston , MA 02110 - 1301 USA .
*
2014-06-20 21:14:58 +04:00
* Maintained by : Xavier Deguillard < xdeguillard @ vmware . com >
* Philip Moltmann < moltmann @ vmware . com >
2010-04-23 21:18:08 +04:00
*/
/*
* This is VMware physical memory management driver for Linux . The driver
* acts like a " balloon " that can be inflated to reclaim physical pages by
* reserving them in the guest and invalidating them in the monitor ,
* freeing up the underlying machine pages so they can be allocated to
* other guests . The balloon can also be deflated to allow the guest to
* use more physical memory . Higher level policies can control the sizes
* of balloons in VMs in order to manage physical memory resources .
*/
//#define DEBUG
# define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
# include <linux/types.h>
# include <linux/kernel.h>
# include <linux/mm.h>
2015-08-07 01:17:58 +03:00
# include <linux/vmalloc.h>
2010-04-23 21:18:08 +04:00
# include <linux/sched.h>
# include <linux/module.h>
# include <linux/workqueue.h>
# include <linux/debugfs.h>
# include <linux/seq_file.h>
2015-08-07 01:18:01 +03:00
# include <linux/vmw_vmci_defs.h>
# include <linux/vmw_vmci_api.h>
2010-05-09 12:13:42 +04:00
# include <asm/hypervisor.h>
2010-04-23 21:18:08 +04:00
MODULE_AUTHOR ( " VMware, Inc. " ) ;
MODULE_DESCRIPTION ( " VMware Memory Control (Balloon) Driver " ) ;
2015-08-07 01:18:01 +03:00
MODULE_VERSION ( " 1.5.0.0-k " ) ;
2010-04-23 21:18:08 +04:00
MODULE_ALIAS ( " dmi:*:svnVMware*:* " ) ;
MODULE_ALIAS ( " vmware_vmmemctl " ) ;
MODULE_LICENSE ( " GPL " ) ;
/*
* Various constants controlling rate of inflaint / deflating balloon ,
* measured in pages .
*/
/*
* Rates of memory allocaton when guest experiences memory pressure
* ( driver performs sleeping allocations ) .
*/
# define VMW_BALLOON_RATE_ALLOC_MIN 512U
# define VMW_BALLOON_RATE_ALLOC_MAX 2048U
# define VMW_BALLOON_RATE_ALLOC_INC 16U
/*
* When guest is under memory pressure , use a reduced page allocation
* rate for next several cycles .
*/
# define VMW_BALLOON_SLOW_CYCLES 4
/*
* Use __GFP_HIGHMEM to allow pages from HIGHMEM zone . We don ' t
2015-11-07 03:28:28 +03:00
* allow wait ( __GFP_RECLAIM ) for NOSLEEP page allocations . Use
2010-04-23 21:18:08 +04:00
* __GFP_NOWARN , to suppress page allocation failure warnings .
*/
# define VMW_PAGE_ALLOC_NOSLEEP (__GFP_HIGHMEM|__GFP_NOWARN)
/*
* Use GFP_HIGHUSER when executing in a separate kernel thread
* context and allocation can sleep . This is less stressful to
* the guest memory system , since it allows the thread to block
* while memory is reclaimed , and won ' t take pages from emergency
* low - memory pools .
*/
# define VMW_PAGE_ALLOC_CANSLEEP (GFP_HIGHUSER)
2010-06-05 01:14:52 +04:00
/* Maximum number of refused pages we accumulate during inflation cycle */
# define VMW_BALLOON_MAX_REFUSED 16
2010-04-23 21:18:08 +04:00
/*
* Hypervisor communication port definitions .
*/
# define VMW_BALLOON_HV_PORT 0x5670
# define VMW_BALLOON_HV_MAGIC 0x456c6d6f
# define VMW_BALLOON_GUEST_ID 1 /* Linux */
2015-06-12 21:43:23 +03:00
enum vmwballoon_capabilities {
/*
* Bit 0 is reserved and not associated to any capability .
*/
2015-08-07 01:18:01 +03:00
VMW_BALLOON_BASIC_CMDS = ( 1 < < 1 ) ,
VMW_BALLOON_BATCHED_CMDS = ( 1 < < 2 ) ,
VMW_BALLOON_BATCHED_2M_CMDS = ( 1 < < 3 ) ,
VMW_BALLOON_SIGNALLED_WAKEUP_CMD = ( 1 < < 4 ) ,
2015-06-12 21:43:23 +03:00
} ;
2015-08-07 01:17:58 +03:00
# define VMW_BALLOON_CAPABILITIES (VMW_BALLOON_BASIC_CMDS \
2015-08-07 01:18:01 +03:00
| VMW_BALLOON_BATCHED_CMDS \
2015-08-07 01:18:01 +03:00
| VMW_BALLOON_BATCHED_2M_CMDS \
| VMW_BALLOON_SIGNALLED_WAKEUP_CMD )
2015-08-07 01:18:01 +03:00
# define VMW_BALLOON_2M_SHIFT (9)
# define VMW_BALLOON_NUM_PAGE_SIZES (2)
2015-06-12 21:43:23 +03:00
2015-08-07 01:17:58 +03:00
/*
* Backdoor commands availability :
*
* START , GET_TARGET and GUEST_ID are always available ,
*
* VMW_BALLOON_BASIC_CMDS :
* LOCK and UNLOCK commands ,
* VMW_BALLOON_BATCHED_CMDS :
* BATCHED_LOCK and BATCHED_UNLOCK commands .
2015-08-07 01:18:01 +03:00
* VMW BALLOON_BATCHED_2M_CMDS :
2015-08-07 01:18:01 +03:00
* BATCHED_2M_LOCK and BATCHED_2M_UNLOCK commands ,
* VMW VMW_BALLOON_SIGNALLED_WAKEUP_CMD :
* VMW_BALLOON_CMD_VMCI_DOORBELL_SET command .
2015-08-07 01:17:58 +03:00
*/
2015-08-07 01:18:01 +03:00
# define VMW_BALLOON_CMD_START 0
# define VMW_BALLOON_CMD_GET_TARGET 1
# define VMW_BALLOON_CMD_LOCK 2
# define VMW_BALLOON_CMD_UNLOCK 3
# define VMW_BALLOON_CMD_GUEST_ID 4
# define VMW_BALLOON_CMD_BATCHED_LOCK 6
# define VMW_BALLOON_CMD_BATCHED_UNLOCK 7
# define VMW_BALLOON_CMD_BATCHED_2M_LOCK 8
# define VMW_BALLOON_CMD_BATCHED_2M_UNLOCK 9
2015-08-07 01:18:01 +03:00
# define VMW_BALLOON_CMD_VMCI_DOORBELL_SET 10
2015-08-07 01:18:01 +03:00
2010-04-23 21:18:08 +04:00
/* error codes */
2015-06-12 21:43:23 +03:00
# define VMW_BALLOON_SUCCESS 0
# define VMW_BALLOON_FAILURE -1
# define VMW_BALLOON_ERROR_CMD_INVALID 1
# define VMW_BALLOON_ERROR_PPN_INVALID 2
# define VMW_BALLOON_ERROR_PPN_LOCKED 3
# define VMW_BALLOON_ERROR_PPN_UNLOCKED 4
# define VMW_BALLOON_ERROR_PPN_PINNED 5
# define VMW_BALLOON_ERROR_PPN_NOTNEEDED 6
# define VMW_BALLOON_ERROR_RESET 7
# define VMW_BALLOON_ERROR_BUSY 8
# define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES (0x03000000)
2015-08-07 01:17:58 +03:00
/* Batch page description */
/*
* Layout of a page in the batch page :
*
* + - - - - - - - - - - - - - + - - - - - - - - - - + - - - - - - - - +
* | | | |
* | Page number | Reserved | Status |
* | | | |
* + - - - - - - - - - - - - - + - - - - - - - - - - + - - - - - - - - +
* 64 PAGE_SHIFT 6 0
*
* The reserved field should be set to 0.
*/
# define VMW_BALLOON_BATCH_MAX_PAGES (PAGE_SIZE / sizeof(u64))
# define VMW_BALLOON_BATCH_STATUS_MASK ((1UL << 5) - 1)
# define VMW_BALLOON_BATCH_PAGE_MASK (~((1UL << PAGE_SHIFT) - 1))
struct vmballoon_batch_page {
u64 pages [ VMW_BALLOON_BATCH_MAX_PAGES ] ;
} ;
static u64 vmballoon_batch_get_pa ( struct vmballoon_batch_page * batch , int idx )
{
return batch - > pages [ idx ] & VMW_BALLOON_BATCH_PAGE_MASK ;
}
static int vmballoon_batch_get_status ( struct vmballoon_batch_page * batch ,
int idx )
{
return ( int ) ( batch - > pages [ idx ] & VMW_BALLOON_BATCH_STATUS_MASK ) ;
}
static void vmballoon_batch_set_pa ( struct vmballoon_batch_page * batch , int idx ,
u64 pa )
{
batch - > pages [ idx ] = pa ;
}
# define VMWARE_BALLOON_CMD(cmd, arg1, arg2, result) \
2015-06-12 21:43:23 +03:00
( { \
2015-08-07 01:17:58 +03:00
unsigned long __status , __dummy1 , __dummy2 , __dummy3 ; \
2015-06-12 21:43:23 +03:00
__asm__ __volatile__ ( " inl %%dx " : \
" =a " ( __status ) , \
" =c " ( __dummy1 ) , \
" =d " ( __dummy2 ) , \
2015-08-07 01:17:58 +03:00
" =b " ( result ) , \
" =S " ( __dummy3 ) : \
2015-06-12 21:43:23 +03:00
" 0 " ( VMW_BALLOON_HV_MAGIC ) , \
" 1 " ( VMW_BALLOON_CMD_ # # cmd ) , \
" 2 " ( VMW_BALLOON_HV_PORT ) , \
2015-08-07 01:17:58 +03:00
" 3 " ( arg1 ) , \
" 4 " ( arg2 ) : \
2015-06-12 21:43:23 +03:00
" memory " ) ; \
if ( VMW_BALLOON_CMD_ # # cmd = = VMW_BALLOON_CMD_START ) \
result = __dummy1 ; \
result & = - 1UL ; \
__status & - 1UL ; \
2010-04-23 21:18:08 +04:00
} )
# ifdef CONFIG_DEBUG_FS
struct vmballoon_stats {
unsigned int timer ;
2015-08-07 01:18:01 +03:00
unsigned int doorbell ;
2010-04-23 21:18:08 +04:00
2011-11-03 00:40:07 +04:00
/* allocation statistics */
2015-08-07 01:18:01 +03:00
unsigned int alloc [ VMW_BALLOON_NUM_PAGE_SIZES ] ;
unsigned int alloc_fail [ VMW_BALLOON_NUM_PAGE_SIZES ] ;
2010-04-23 21:18:08 +04:00
unsigned int sleep_alloc ;
unsigned int sleep_alloc_fail ;
2015-08-07 01:18:01 +03:00
unsigned int refused_alloc [ VMW_BALLOON_NUM_PAGE_SIZES ] ;
unsigned int refused_free [ VMW_BALLOON_NUM_PAGE_SIZES ] ;
unsigned int free [ VMW_BALLOON_NUM_PAGE_SIZES ] ;
2010-04-23 21:18:08 +04:00
/* monitor operations */
2015-08-07 01:18:01 +03:00
unsigned int lock [ VMW_BALLOON_NUM_PAGE_SIZES ] ;
unsigned int lock_fail [ VMW_BALLOON_NUM_PAGE_SIZES ] ;
unsigned int unlock [ VMW_BALLOON_NUM_PAGE_SIZES ] ;
unsigned int unlock_fail [ VMW_BALLOON_NUM_PAGE_SIZES ] ;
2010-04-23 21:18:08 +04:00
unsigned int target ;
unsigned int target_fail ;
unsigned int start ;
unsigned int start_fail ;
unsigned int guest_type ;
unsigned int guest_type_fail ;
2015-08-07 01:18:01 +03:00
unsigned int doorbell_set ;
unsigned int doorbell_unset ;
2010-04-23 21:18:08 +04:00
} ;
# define STATS_INC(stat) (stat)++
# else
# define STATS_INC(stat)
# endif
2015-08-07 01:17:58 +03:00
struct vmballoon ;
struct vmballoon_ops {
void ( * add_page ) ( struct vmballoon * b , int idx , struct page * p ) ;
2015-08-07 01:17:59 +03:00
int ( * lock ) ( struct vmballoon * b , unsigned int num_pages ,
2015-08-07 01:18:01 +03:00
bool is_2m_pages , unsigned int * target ) ;
2015-08-07 01:17:59 +03:00
int ( * unlock ) ( struct vmballoon * b , unsigned int num_pages ,
2015-08-07 01:18:01 +03:00
bool is_2m_pages , unsigned int * target ) ;
2015-08-07 01:17:58 +03:00
} ;
2015-08-07 01:18:01 +03:00
struct vmballoon_page_size {
2010-04-23 21:18:08 +04:00
/* list of reserved physical pages */
struct list_head pages ;
/* transient list of non-balloonable pages */
struct list_head refused_pages ;
2010-06-05 01:14:52 +04:00
unsigned int n_refused_pages ;
2015-08-07 01:18:01 +03:00
} ;
struct vmballoon {
struct vmballoon_page_size page_sizes [ VMW_BALLOON_NUM_PAGE_SIZES ] ;
/* supported page sizes. 1 == 4k pages only, 2 == 4k and 2m pages */
unsigned supported_page_sizes ;
2010-04-23 21:18:08 +04:00
/* balloon size in pages */
unsigned int size ;
unsigned int target ;
/* reset flag */
bool reset_required ;
/* adjustment rates (pages per second) */
unsigned int rate_alloc ;
/* slowdown page allocations for next few cycles */
unsigned int slow_allocation_cycles ;
2015-08-07 01:17:58 +03:00
unsigned long capabilities ;
struct vmballoon_batch_page * batch_page ;
unsigned int batch_max_pages ;
struct page * page ;
const struct vmballoon_ops * ops ;
2010-04-23 21:18:08 +04:00
# ifdef CONFIG_DEBUG_FS
/* statistics */
struct vmballoon_stats stats ;
/* debugfs file exporting statistics */
struct dentry * dbg_entry ;
# endif
struct sysinfo sysinfo ;
struct delayed_work dwork ;
2015-08-07 01:18:01 +03:00
struct vmci_handle vmci_doorbell ;
2010-04-23 21:18:08 +04:00
} ;
static struct vmballoon balloon ;
/*
* Send " start " command to the host , communicating supported version
* of the protocol .
*/
2015-08-07 01:17:58 +03:00
static bool vmballoon_send_start ( struct vmballoon * b , unsigned long req_caps )
2010-04-23 21:18:08 +04:00
{
2015-08-07 01:17:58 +03:00
unsigned long status , capabilities , dummy = 0 ;
2015-08-07 01:18:01 +03:00
bool success ;
2010-04-23 21:18:08 +04:00
STATS_INC ( b - > stats . start ) ;
2015-08-07 01:17:58 +03:00
status = VMWARE_BALLOON_CMD ( START , req_caps , dummy , capabilities ) ;
switch ( status ) {
case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES :
b - > capabilities = capabilities ;
2015-08-07 01:18:01 +03:00
success = true ;
break ;
2015-08-07 01:17:58 +03:00
case VMW_BALLOON_SUCCESS :
b - > capabilities = VMW_BALLOON_BASIC_CMDS ;
2015-08-07 01:18:01 +03:00
success = true ;
break ;
default :
success = false ;
2015-08-07 01:17:58 +03:00
}
2010-04-23 21:18:08 +04:00
2015-08-07 01:18:01 +03:00
if ( b - > capabilities & VMW_BALLOON_BATCHED_2M_CMDS )
b - > supported_page_sizes = 2 ;
else
b - > supported_page_sizes = 1 ;
if ( ! success ) {
pr_debug ( " %s - failed, hv returns %ld \n " , __func__ , status ) ;
STATS_INC ( b - > stats . start_fail ) ;
}
return success ;
2010-04-23 21:18:08 +04:00
}
static bool vmballoon_check_status ( struct vmballoon * b , unsigned long status )
{
switch ( status ) {
case VMW_BALLOON_SUCCESS :
return true ;
case VMW_BALLOON_ERROR_RESET :
b - > reset_required = true ;
/* fall through */
default :
return false ;
}
}
/*
* Communicate guest type to the host so that it can adjust ballooning
* algorithm to the one most appropriate for the guest . This command
* is normally issued after sending " start " command and is part of
* standard reset sequence .
*/
static bool vmballoon_send_guest_id ( struct vmballoon * b )
{
2015-08-07 01:17:58 +03:00
unsigned long status , dummy = 0 ;
2010-04-23 21:18:08 +04:00
2015-08-07 01:17:58 +03:00
status = VMWARE_BALLOON_CMD ( GUEST_ID , VMW_BALLOON_GUEST_ID , dummy ,
dummy ) ;
2010-04-23 21:18:08 +04:00
STATS_INC ( b - > stats . guest_type ) ;
if ( vmballoon_check_status ( b , status ) )
return true ;
pr_debug ( " %s - failed, hv returns %ld \n " , __func__ , status ) ;
STATS_INC ( b - > stats . guest_type_fail ) ;
return false ;
}
2015-08-07 01:18:01 +03:00
static u16 vmballoon_page_size ( bool is_2m_page )
{
if ( is_2m_page )
return 1 < < VMW_BALLOON_2M_SHIFT ;
return 1 ;
}
2010-04-23 21:18:08 +04:00
/*
* Retrieve desired balloon size from the host .
*/
static bool vmballoon_send_get_target ( struct vmballoon * b , u32 * new_target )
{
unsigned long status ;
unsigned long target ;
unsigned long limit ;
2015-08-07 01:17:58 +03:00
unsigned long dummy = 0 ;
2010-04-23 21:18:08 +04:00
u32 limit32 ;
/*
* si_meminfo ( ) is cheap . Moreover , we want to provide dynamic
* max balloon size later . So let us call si_meminfo ( ) every
* iteration .
*/
si_meminfo ( & b - > sysinfo ) ;
limit = b - > sysinfo . totalram ;
/* Ensure limit fits in 32-bits */
limit32 = ( u32 ) limit ;
if ( limit ! = limit32 )
return false ;
/* update stats */
STATS_INC ( b - > stats . target ) ;
2015-08-07 01:17:58 +03:00
status = VMWARE_BALLOON_CMD ( GET_TARGET , limit , dummy , target ) ;
2010-04-23 21:18:08 +04:00
if ( vmballoon_check_status ( b , status ) ) {
* new_target = target ;
return true ;
}
pr_debug ( " %s - failed, hv returns %ld \n " , __func__ , status ) ;
STATS_INC ( b - > stats . target_fail ) ;
return false ;
}
/*
* Notify the host about allocated page so that host can use it without
* fear that guest will need it . Host may reject some pages , we need to
* check the return value and maybe submit a different page .
*/
2012-01-31 02:00:08 +04:00
static int vmballoon_send_lock_page ( struct vmballoon * b , unsigned long pfn ,
2015-08-07 01:17:59 +03:00
unsigned int * hv_status , unsigned int * target )
2010-04-23 21:18:08 +04:00
{
2015-08-07 01:17:58 +03:00
unsigned long status , dummy = 0 ;
2010-04-23 21:18:08 +04:00
u32 pfn32 ;
pfn32 = ( u32 ) pfn ;
if ( pfn32 ! = pfn )
2012-01-31 02:00:08 +04:00
return - 1 ;
2010-04-23 21:18:08 +04:00
2015-08-07 01:18:01 +03:00
STATS_INC ( b - > stats . lock [ false ] ) ;
2010-04-23 21:18:08 +04:00
2015-08-07 01:17:59 +03:00
* hv_status = status = VMWARE_BALLOON_CMD ( LOCK , pfn , dummy , * target ) ;
2010-04-23 21:18:08 +04:00
if ( vmballoon_check_status ( b , status ) )
2012-01-31 02:00:08 +04:00
return 0 ;
2010-04-23 21:18:08 +04:00
pr_debug ( " %s - ppn %lx, hv returns %ld \n " , __func__ , pfn , status ) ;
2015-08-07 01:18:01 +03:00
STATS_INC ( b - > stats . lock_fail [ false ] ) ;
2012-01-31 02:00:08 +04:00
return 1 ;
2010-04-23 21:18:08 +04:00
}
2015-08-07 01:17:58 +03:00
static int vmballoon_send_batched_lock ( struct vmballoon * b ,
2015-08-07 01:18:01 +03:00
unsigned int num_pages , bool is_2m_pages , unsigned int * target )
2015-08-07 01:17:58 +03:00
{
2015-08-07 01:17:59 +03:00
unsigned long status ;
2015-08-07 01:17:58 +03:00
unsigned long pfn = page_to_pfn ( b - > page ) ;
2015-08-07 01:18:01 +03:00
STATS_INC ( b - > stats . lock [ is_2m_pages ] ) ;
if ( is_2m_pages )
status = VMWARE_BALLOON_CMD ( BATCHED_2M_LOCK , pfn , num_pages ,
* target ) ;
else
status = VMWARE_BALLOON_CMD ( BATCHED_LOCK , pfn , num_pages ,
* target ) ;
2015-08-07 01:17:58 +03:00
if ( vmballoon_check_status ( b , status ) )
return 0 ;
pr_debug ( " %s - batch ppn %lx, hv returns %ld \n " , __func__ , pfn , status ) ;
2015-08-07 01:18:01 +03:00
STATS_INC ( b - > stats . lock_fail [ is_2m_pages ] ) ;
2015-08-07 01:17:58 +03:00
return 1 ;
}
2010-04-23 21:18:08 +04:00
/*
* Notify the host that guest intends to release given page back into
* the pool of available ( to the guest ) pages .
*/
2015-08-07 01:17:59 +03:00
static bool vmballoon_send_unlock_page ( struct vmballoon * b , unsigned long pfn ,
unsigned int * target )
2010-04-23 21:18:08 +04:00
{
2015-08-07 01:17:58 +03:00
unsigned long status , dummy = 0 ;
2010-04-23 21:18:08 +04:00
u32 pfn32 ;
pfn32 = ( u32 ) pfn ;
if ( pfn32 ! = pfn )
return false ;
2015-08-07 01:18:01 +03:00
STATS_INC ( b - > stats . unlock [ false ] ) ;
2010-04-23 21:18:08 +04:00
2015-08-07 01:17:59 +03:00
status = VMWARE_BALLOON_CMD ( UNLOCK , pfn , dummy , * target ) ;
2010-04-23 21:18:08 +04:00
if ( vmballoon_check_status ( b , status ) )
return true ;
pr_debug ( " %s - ppn %lx, hv returns %ld \n " , __func__ , pfn , status ) ;
2015-08-07 01:18:01 +03:00
STATS_INC ( b - > stats . unlock_fail [ false ] ) ;
2010-04-23 21:18:08 +04:00
return false ;
}
2015-08-07 01:17:58 +03:00
static bool vmballoon_send_batched_unlock ( struct vmballoon * b ,
2015-08-07 01:18:01 +03:00
unsigned int num_pages , bool is_2m_pages , unsigned int * target )
2015-08-07 01:17:58 +03:00
{
2015-08-07 01:17:59 +03:00
unsigned long status ;
2015-08-07 01:17:58 +03:00
unsigned long pfn = page_to_pfn ( b - > page ) ;
2015-08-07 01:18:01 +03:00
STATS_INC ( b - > stats . unlock [ is_2m_pages ] ) ;
if ( is_2m_pages )
status = VMWARE_BALLOON_CMD ( BATCHED_2M_UNLOCK , pfn , num_pages ,
* target ) ;
else
status = VMWARE_BALLOON_CMD ( BATCHED_UNLOCK , pfn , num_pages ,
* target ) ;
2015-08-07 01:17:58 +03:00
if ( vmballoon_check_status ( b , status ) )
return true ;
pr_debug ( " %s - batch ppn %lx, hv returns %ld \n " , __func__ , pfn , status ) ;
2015-08-07 01:18:01 +03:00
STATS_INC ( b - > stats . unlock_fail [ is_2m_pages ] ) ;
2015-08-07 01:17:58 +03:00
return false ;
}
2015-08-07 01:18:01 +03:00
static struct page * vmballoon_alloc_page ( gfp_t flags , bool is_2m_page )
{
if ( is_2m_page )
return alloc_pages ( flags , VMW_BALLOON_2M_SHIFT ) ;
return alloc_page ( flags ) ;
}
static void vmballoon_free_page ( struct page * page , bool is_2m_page )
{
if ( is_2m_page )
__free_pages ( page , VMW_BALLOON_2M_SHIFT ) ;
else
__free_page ( page ) ;
}
2010-04-23 21:18:08 +04:00
/*
* Quickly release all pages allocated for the balloon . This function is
* called when host decides to " reset " balloon for one reason or another .
* Unlike normal " deflate " we do not ( shall not ) notify host of the pages
* being released .
*/
static void vmballoon_pop ( struct vmballoon * b )
{
struct page * page , * next ;
2015-08-07 01:18:01 +03:00
unsigned is_2m_pages ;
for ( is_2m_pages = 0 ; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES ;
is_2m_pages + + ) {
struct vmballoon_page_size * page_size =
& b - > page_sizes [ is_2m_pages ] ;
u16 size_per_page = vmballoon_page_size ( is_2m_pages ) ;
list_for_each_entry_safe ( page , next , & page_size - > pages , lru ) {
list_del ( & page - > lru ) ;
vmballoon_free_page ( page , is_2m_pages ) ;
STATS_INC ( b - > stats . free [ is_2m_pages ] ) ;
b - > size - = size_per_page ;
cond_resched ( ) ;
}
2010-04-23 21:18:08 +04:00
}
2015-08-07 01:18:01 +03:00
if ( b - > batch_page ) {
vunmap ( b - > batch_page ) ;
b - > batch_page = NULL ;
}
2010-04-23 21:18:08 +04:00
2015-08-07 01:18:01 +03:00
if ( b - > page ) {
__free_page ( b - > page ) ;
b - > page = NULL ;
2010-04-23 21:18:08 +04:00
}
}
/*
2015-06-12 21:43:22 +03:00
* Notify the host of a ballooned page . If host rejects the page put it on the
* refuse list , those refused page are then released at the end of the
* inflation cycle .
2010-04-23 21:18:08 +04:00
*/
2015-08-07 01:17:59 +03:00
static int vmballoon_lock_page ( struct vmballoon * b , unsigned int num_pages ,
2015-08-07 01:18:01 +03:00
bool is_2m_pages , unsigned int * target )
2010-04-23 21:18:08 +04:00
{
2015-06-12 21:43:22 +03:00
int locked , hv_status ;
2015-08-07 01:17:58 +03:00
struct page * page = b - > page ;
2015-08-07 01:18:01 +03:00
struct vmballoon_page_size * page_size = & b - > page_sizes [ false ] ;
/* is_2m_pages can never happen as 2m pages support implies batching */
2010-04-23 21:18:08 +04:00
2015-08-07 01:17:59 +03:00
locked = vmballoon_send_lock_page ( b , page_to_pfn ( page ) , & hv_status ,
target ) ;
2015-06-12 21:43:22 +03:00
if ( locked > 0 ) {
2015-08-07 01:18:01 +03:00
STATS_INC ( b - > stats . refused_alloc [ false ] ) ;
2010-04-23 21:18:08 +04:00
2015-06-12 21:43:22 +03:00
if ( hv_status = = VMW_BALLOON_ERROR_RESET | |
hv_status = = VMW_BALLOON_ERROR_PPN_NOTNEEDED ) {
2015-08-07 01:18:01 +03:00
vmballoon_free_page ( page , false ) ;
2015-06-12 21:43:22 +03:00
return - EIO ;
}
2010-04-23 21:18:08 +04:00
2015-06-12 21:43:22 +03:00
/*
* Place page on the list of non - balloonable pages
* and retry allocation , unless we already accumulated
* too many of them , in which case take a breather .
*/
2015-08-07 01:18:01 +03:00
if ( page_size - > n_refused_pages < VMW_BALLOON_MAX_REFUSED ) {
page_size - > n_refused_pages + + ;
list_add ( & page - > lru , & page_size - > refused_pages ) ;
2015-06-12 21:43:22 +03:00
} else {
2015-08-07 01:18:01 +03:00
vmballoon_free_page ( page , false ) ;
2010-04-23 21:18:08 +04:00
}
2015-06-12 21:43:22 +03:00
return - EIO ;
}
2010-04-23 21:18:08 +04:00
/* track allocated page */
2015-08-07 01:18:01 +03:00
list_add ( & page - > lru , & page_size - > pages ) ;
2010-04-23 21:18:08 +04:00
/* update balloon size */
b - > size + + ;
return 0 ;
}
2015-08-07 01:17:58 +03:00
static int vmballoon_lock_batched_page ( struct vmballoon * b ,
2015-08-07 01:18:01 +03:00
unsigned int num_pages , bool is_2m_pages , unsigned int * target )
2015-08-07 01:17:58 +03:00
{
int locked , i ;
2015-08-07 01:18:01 +03:00
u16 size_per_page = vmballoon_page_size ( is_2m_pages ) ;
2015-08-07 01:17:58 +03:00
2015-08-07 01:18:01 +03:00
locked = vmballoon_send_batched_lock ( b , num_pages , is_2m_pages ,
target ) ;
2015-08-07 01:17:58 +03:00
if ( locked > 0 ) {
for ( i = 0 ; i < num_pages ; i + + ) {
u64 pa = vmballoon_batch_get_pa ( b - > batch_page , i ) ;
struct page * p = pfn_to_page ( pa > > PAGE_SHIFT ) ;
2015-08-07 01:18:01 +03:00
vmballoon_free_page ( p , is_2m_pages ) ;
2015-08-07 01:17:58 +03:00
}
return - EIO ;
}
for ( i = 0 ; i < num_pages ; i + + ) {
u64 pa = vmballoon_batch_get_pa ( b - > batch_page , i ) ;
struct page * p = pfn_to_page ( pa > > PAGE_SHIFT ) ;
2015-08-07 01:18:01 +03:00
struct vmballoon_page_size * page_size =
& b - > page_sizes [ is_2m_pages ] ;
2015-08-07 01:17:58 +03:00
locked = vmballoon_batch_get_status ( b - > batch_page , i ) ;
switch ( locked ) {
case VMW_BALLOON_SUCCESS :
2015-08-07 01:18:01 +03:00
list_add ( & p - > lru , & page_size - > pages ) ;
b - > size + = size_per_page ;
2015-08-07 01:17:58 +03:00
break ;
case VMW_BALLOON_ERROR_PPN_PINNED :
case VMW_BALLOON_ERROR_PPN_INVALID :
2015-08-07 01:18:01 +03:00
if ( page_size - > n_refused_pages
< VMW_BALLOON_MAX_REFUSED ) {
list_add ( & p - > lru , & page_size - > refused_pages ) ;
page_size - > n_refused_pages + + ;
2015-08-07 01:17:58 +03:00
break ;
}
/* Fallthrough */
case VMW_BALLOON_ERROR_RESET :
case VMW_BALLOON_ERROR_PPN_NOTNEEDED :
2015-08-07 01:18:01 +03:00
vmballoon_free_page ( p , is_2m_pages ) ;
2015-08-07 01:17:58 +03:00
break ;
default :
/* This should never happen */
WARN_ON_ONCE ( true ) ;
}
}
return 0 ;
}
2010-04-23 21:18:08 +04:00
/*
* Release the page allocated for the balloon . Note that we first notify
* the host so it can make sure the page will be available for the guest
* to use , if needed .
*/
2015-08-07 01:17:59 +03:00
static int vmballoon_unlock_page ( struct vmballoon * b , unsigned int num_pages ,
2015-08-07 01:18:01 +03:00
bool is_2m_pages , unsigned int * target )
2010-04-23 21:18:08 +04:00
{
2015-08-07 01:17:58 +03:00
struct page * page = b - > page ;
2015-08-07 01:18:01 +03:00
struct vmballoon_page_size * page_size = & b - > page_sizes [ false ] ;
/* is_2m_pages can never happen as 2m pages support implies batching */
2010-04-23 21:18:08 +04:00
2015-08-07 01:17:59 +03:00
if ( ! vmballoon_send_unlock_page ( b , page_to_pfn ( page ) , target ) ) {
2015-08-07 01:18:01 +03:00
list_add ( & page - > lru , & page_size - > pages ) ;
2015-08-07 01:17:58 +03:00
return - EIO ;
}
2010-04-23 21:18:08 +04:00
/* deallocate page */
2015-08-07 01:18:01 +03:00
vmballoon_free_page ( page , false ) ;
STATS_INC ( b - > stats . free [ false ] ) ;
2010-04-23 21:18:08 +04:00
/* update balloon size */
b - > size - - ;
return 0 ;
}
2015-08-07 01:17:58 +03:00
static int vmballoon_unlock_batched_page ( struct vmballoon * b ,
2015-08-07 01:18:01 +03:00
unsigned int num_pages , bool is_2m_pages ,
unsigned int * target )
2015-08-07 01:17:58 +03:00
{
int locked , i , ret = 0 ;
bool hv_success ;
2015-08-07 01:18:01 +03:00
u16 size_per_page = vmballoon_page_size ( is_2m_pages ) ;
2015-08-07 01:17:58 +03:00
2015-08-07 01:18:01 +03:00
hv_success = vmballoon_send_batched_unlock ( b , num_pages , is_2m_pages ,
target ) ;
2015-08-07 01:17:58 +03:00
if ( ! hv_success )
ret = - EIO ;
for ( i = 0 ; i < num_pages ; i + + ) {
u64 pa = vmballoon_batch_get_pa ( b - > batch_page , i ) ;
struct page * p = pfn_to_page ( pa > > PAGE_SHIFT ) ;
2015-08-07 01:18:01 +03:00
struct vmballoon_page_size * page_size =
& b - > page_sizes [ is_2m_pages ] ;
2015-08-07 01:17:58 +03:00
locked = vmballoon_batch_get_status ( b - > batch_page , i ) ;
if ( ! hv_success | | locked ! = VMW_BALLOON_SUCCESS ) {
/*
* That page wasn ' t successfully unlocked by the
* hypervisor , re - add it to the list of pages owned by
* the balloon driver .
*/
2015-08-07 01:18:01 +03:00
list_add ( & p - > lru , & page_size - > pages ) ;
2015-08-07 01:17:58 +03:00
} else {
/* deallocate page */
2015-08-07 01:18:01 +03:00
vmballoon_free_page ( p , is_2m_pages ) ;
STATS_INC ( b - > stats . free [ is_2m_pages ] ) ;
2015-08-07 01:17:58 +03:00
/* update balloon size */
2015-08-07 01:18:01 +03:00
b - > size - = size_per_page ;
2015-08-07 01:17:58 +03:00
}
}
return ret ;
}
2010-04-23 21:18:08 +04:00
/*
* Release pages that were allocated while attempting to inflate the
* balloon but were refused by the host for one reason or another .
*/
2015-08-07 01:18:01 +03:00
static void vmballoon_release_refused_pages ( struct vmballoon * b ,
bool is_2m_pages )
2010-04-23 21:18:08 +04:00
{
struct page * page , * next ;
2015-08-07 01:18:01 +03:00
struct vmballoon_page_size * page_size =
& b - > page_sizes [ is_2m_pages ] ;
2010-04-23 21:18:08 +04:00
2015-08-07 01:18:01 +03:00
list_for_each_entry_safe ( page , next , & page_size - > refused_pages , lru ) {
2010-04-23 21:18:08 +04:00
list_del ( & page - > lru ) ;
2015-08-07 01:18:01 +03:00
vmballoon_free_page ( page , is_2m_pages ) ;
STATS_INC ( b - > stats . refused_free [ is_2m_pages ] ) ;
2010-04-23 21:18:08 +04:00
}
2010-06-05 01:14:52 +04:00
2015-08-07 01:18:01 +03:00
page_size - > n_refused_pages = 0 ;
2010-04-23 21:18:08 +04:00
}
2015-08-07 01:17:58 +03:00
static void vmballoon_add_page ( struct vmballoon * b , int idx , struct page * p )
{
b - > page = p ;
}
static void vmballoon_add_batched_page ( struct vmballoon * b , int idx ,
struct page * p )
{
vmballoon_batch_set_pa ( b - > batch_page , idx ,
( u64 ) page_to_pfn ( p ) < < PAGE_SHIFT ) ;
}
2010-04-23 21:18:08 +04:00
/*
* Inflate the balloon towards its target size . Note that we try to limit
* the rate of allocation to make sure we are not choking the rest of the
* system .
*/
static void vmballoon_inflate ( struct vmballoon * b )
{
2015-08-07 01:18:01 +03:00
unsigned rate ;
2010-04-23 21:18:08 +04:00
unsigned int allocations = 0 ;
2015-08-07 01:17:58 +03:00
unsigned int num_pages = 0 ;
2010-04-23 21:18:08 +04:00
int error = 0 ;
2015-06-12 21:43:22 +03:00
gfp_t flags = VMW_PAGE_ALLOC_NOSLEEP ;
2015-08-07 01:18:01 +03:00
bool is_2m_pages ;
2010-04-23 21:18:08 +04:00
pr_debug ( " %s - size: %d, target %d \n " , __func__ , b - > size , b - > target ) ;
/*
* First try NOSLEEP page allocations to inflate balloon .
*
* If we do not throttle nosleep allocations , we can drain all
* free pages in the guest quickly ( if the balloon target is high ) .
* As a side - effect , draining free pages helps to inform ( force )
* the guest to start swapping if balloon target is not met yet ,
* which is a desired behavior . However , balloon driver can consume
* all available CPU cycles if too many pages are allocated in a
* second . Therefore , we throttle nosleep allocations even when
* the guest is not under memory pressure . OTOH , if we have already
* predicted that the guest is under memory pressure , then we
* slowdown page allocations considerably .
*/
/*
* Start with no sleep allocation rate which may be higher
* than sleeping allocation rate .
*/
2015-08-07 01:18:01 +03:00
if ( b - > slow_allocation_cycles ) {
rate = b - > rate_alloc ;
is_2m_pages = false ;
} else {
rate = UINT_MAX ;
is_2m_pages =
b - > supported_page_sizes = = VMW_BALLOON_NUM_PAGE_SIZES ;
}
2010-04-23 21:18:08 +04:00
2015-08-07 01:18:01 +03:00
pr_debug ( " %s - goal: %d, no-sleep rate: %u, sleep rate: %d \n " ,
2015-08-07 01:17:59 +03:00
__func__ , b - > target - b - > size , rate , b - > rate_alloc ) ;
2010-04-23 21:18:08 +04:00
2015-08-07 01:18:01 +03:00
while ( ! b - > reset_required & &
2015-08-07 01:18:01 +03:00
b - > size + num_pages * vmballoon_page_size ( is_2m_pages )
< b - > target ) {
2015-08-07 01:17:59 +03:00
struct page * page ;
2010-04-23 21:18:08 +04:00
2015-06-12 21:43:22 +03:00
if ( flags = = VMW_PAGE_ALLOC_NOSLEEP )
2015-08-07 01:18:01 +03:00
STATS_INC ( b - > stats . alloc [ is_2m_pages ] ) ;
2015-06-12 21:43:22 +03:00
else
STATS_INC ( b - > stats . sleep_alloc ) ;
2010-04-23 21:18:08 +04:00
2015-08-07 01:18:01 +03:00
page = vmballoon_alloc_page ( flags , is_2m_pages ) ;
2015-06-12 21:43:22 +03:00
if ( ! page ) {
2015-08-07 01:18:01 +03:00
STATS_INC ( b - > stats . alloc_fail [ is_2m_pages ] ) ;
if ( is_2m_pages ) {
b - > ops - > lock ( b , num_pages , true , & b - > target ) ;
/*
* ignore errors from locking as we now switch
* to 4 k pages and we might get different
* errors .
*/
num_pages = 0 ;
is_2m_pages = false ;
continue ;
}
2015-06-12 21:43:22 +03:00
if ( flags = = VMW_PAGE_ALLOC_CANSLEEP ) {
2010-04-23 21:18:08 +04:00
/*
* CANSLEEP page allocation failed , so guest
* is under severe memory pressure . Quickly
* decrease allocation rate .
*/
b - > rate_alloc = max ( b - > rate_alloc / 2 ,
VMW_BALLOON_RATE_ALLOC_MIN ) ;
2015-06-12 21:43:22 +03:00
STATS_INC ( b - > stats . sleep_alloc_fail ) ;
2010-04-23 21:18:08 +04:00
break ;
}
/*
* NOSLEEP page allocation failed , so the guest is
* under memory pressure . Let us slow down page
* allocations for next few cycles so that the guest
* gets out of memory pressure . Also , if we already
* allocated b - > rate_alloc pages , let ' s pause ,
* otherwise switch to sleeping allocations .
*/
b - > slow_allocation_cycles = VMW_BALLOON_SLOW_CYCLES ;
2015-08-07 01:17:59 +03:00
if ( allocations > = b - > rate_alloc )
2010-04-23 21:18:08 +04:00
break ;
2015-06-12 21:43:22 +03:00
flags = VMW_PAGE_ALLOC_CANSLEEP ;
2010-04-23 21:18:08 +04:00
/* Lower rate for sleeping allocations. */
rate = b - > rate_alloc ;
2015-06-12 21:43:22 +03:00
continue ;
2010-04-23 21:18:08 +04:00
}
2015-08-07 01:17:58 +03:00
b - > ops - > add_page ( b , num_pages + + , page ) ;
if ( num_pages = = b - > batch_max_pages ) {
2015-08-07 01:18:01 +03:00
error = b - > ops - > lock ( b , num_pages , is_2m_pages ,
& b - > target ) ;
2015-08-07 01:17:58 +03:00
num_pages = 0 ;
if ( error )
break ;
}
2015-06-12 21:43:22 +03:00
2015-08-07 01:18:01 +03:00
cond_resched ( ) ;
2010-04-23 21:18:08 +04:00
2015-08-07 01:17:59 +03:00
if ( allocations > = rate ) {
2010-04-23 21:18:08 +04:00
/* We allocated enough pages, let's take a break. */
break ;
}
}
2015-08-07 01:17:58 +03:00
if ( num_pages > 0 )
2015-08-07 01:18:01 +03:00
b - > ops - > lock ( b , num_pages , is_2m_pages , & b - > target ) ;
2015-08-07 01:17:58 +03:00
2010-04-23 21:18:08 +04:00
/*
* We reached our goal without failures so try increasing
* allocation rate .
*/
2015-08-07 01:17:59 +03:00
if ( error = = 0 & & allocations > = b - > rate_alloc ) {
unsigned int mult = allocations / b - > rate_alloc ;
2010-04-23 21:18:08 +04:00
b - > rate_alloc =
min ( b - > rate_alloc + mult * VMW_BALLOON_RATE_ALLOC_INC ,
VMW_BALLOON_RATE_ALLOC_MAX ) ;
}
2015-08-07 01:18:01 +03:00
vmballoon_release_refused_pages ( b , true ) ;
vmballoon_release_refused_pages ( b , false ) ;
2010-04-23 21:18:08 +04:00
}
/*
* Decrease the size of the balloon allowing guest to use more memory .
*/
static void vmballoon_deflate ( struct vmballoon * b )
{
2015-08-07 01:18:01 +03:00
unsigned is_2m_pages ;
2010-04-23 21:18:08 +04:00
2015-08-07 01:18:01 +03:00
pr_debug ( " %s - size: %d, target %d \n " , __func__ , b - > size , b - > target ) ;
2010-04-23 21:18:08 +04:00
/* free pages to reach target */
2015-08-07 01:18:01 +03:00
for ( is_2m_pages = 0 ; is_2m_pages < b - > supported_page_sizes ;
is_2m_pages + + ) {
struct page * page , * next ;
unsigned int num_pages = 0 ;
struct vmballoon_page_size * page_size =
& b - > page_sizes [ is_2m_pages ] ;
list_for_each_entry_safe ( page , next , & page_size - > pages , lru ) {
if ( b - > reset_required | |
( b - > target > 0 & &
b - > size - num_pages
* vmballoon_page_size ( is_2m_pages )
< b - > target + vmballoon_page_size ( true ) ) )
break ;
2015-08-07 01:17:58 +03:00
2015-08-07 01:18:01 +03:00
list_del ( & page - > lru ) ;
b - > ops - > add_page ( b , num_pages + + , page ) ;
2015-08-07 01:18:01 +03:00
2015-08-07 01:18:01 +03:00
if ( num_pages = = b - > batch_max_pages ) {
int error ;
2010-04-23 21:18:08 +04:00
2015-08-07 01:18:01 +03:00
error = b - > ops - > unlock ( b , num_pages ,
is_2m_pages , & b - > target ) ;
num_pages = 0 ;
if ( error )
return ;
}
2015-08-07 01:18:01 +03:00
2015-08-07 01:18:01 +03:00
cond_resched ( ) ;
}
2010-04-23 21:18:08 +04:00
2015-08-07 01:18:01 +03:00
if ( num_pages > 0 )
b - > ops - > unlock ( b , num_pages , is_2m_pages , & b - > target ) ;
}
2015-08-07 01:17:58 +03:00
}
static const struct vmballoon_ops vmballoon_basic_ops = {
. add_page = vmballoon_add_page ,
. lock = vmballoon_lock_page ,
. unlock = vmballoon_unlock_page
} ;
static const struct vmballoon_ops vmballoon_batched_ops = {
. add_page = vmballoon_add_batched_page ,
. lock = vmballoon_lock_batched_page ,
. unlock = vmballoon_unlock_batched_page
} ;
static bool vmballoon_init_batching ( struct vmballoon * b )
{
b - > page = alloc_page ( VMW_PAGE_ALLOC_NOSLEEP ) ;
if ( ! b - > page )
return false ;
b - > batch_page = vmap ( & b - > page , 1 , VM_MAP , PAGE_KERNEL ) ;
if ( ! b - > batch_page ) {
__free_page ( b - > page ) ;
return false ;
}
return true ;
}
2015-08-07 01:18:01 +03:00
/*
* Receive notification and resize balloon
*/
static void vmballoon_doorbell ( void * client_data )
{
struct vmballoon * b = client_data ;
STATS_INC ( b - > stats . doorbell ) ;
mod_delayed_work ( system_freezable_wq , & b - > dwork , 0 ) ;
}
/*
* Clean up vmci doorbell
*/
static void vmballoon_vmci_cleanup ( struct vmballoon * b )
{
int error ;
VMWARE_BALLOON_CMD ( VMCI_DOORBELL_SET , VMCI_INVALID_ID ,
VMCI_INVALID_ID , error ) ;
STATS_INC ( b - > stats . doorbell_unset ) ;
if ( ! vmci_handle_is_invalid ( b - > vmci_doorbell ) ) {
vmci_doorbell_destroy ( b - > vmci_doorbell ) ;
b - > vmci_doorbell = VMCI_INVALID_HANDLE ;
}
}
/*
* Initialize vmci doorbell , to get notified as soon as balloon changes
*/
static int vmballoon_vmci_init ( struct vmballoon * b )
{
int error = 0 ;
if ( ( b - > capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD ) ! = 0 ) {
error = vmci_doorbell_create ( & b - > vmci_doorbell ,
VMCI_FLAG_DELAYED_CB ,
VMCI_PRIVILEGE_FLAG_RESTRICTED ,
vmballoon_doorbell , b ) ;
if ( error = = VMCI_SUCCESS ) {
VMWARE_BALLOON_CMD ( VMCI_DOORBELL_SET ,
b - > vmci_doorbell . context ,
b - > vmci_doorbell . resource , error ) ;
STATS_INC ( b - > stats . doorbell_set ) ;
}
}
if ( error ! = 0 ) {
vmballoon_vmci_cleanup ( b ) ;
return - EIO ;
}
return 0 ;
}
2015-08-07 01:17:58 +03:00
/*
* Perform standard reset sequence by popping the balloon ( in case it
* is not empty ) and then restarting protocol . This operation normally
* happens when host responds with VMW_BALLOON_ERROR_RESET to a command .
*/
static void vmballoon_reset ( struct vmballoon * b )
{
2015-08-07 01:18:01 +03:00
int error ;
vmballoon_vmci_cleanup ( b ) ;
2015-08-07 01:17:58 +03:00
/* free all pages, skipping monitor unlock */
vmballoon_pop ( b ) ;
if ( ! vmballoon_send_start ( b , VMW_BALLOON_CAPABILITIES ) )
return ;
if ( ( b - > capabilities & VMW_BALLOON_BATCHED_CMDS ) ! = 0 ) {
b - > ops = & vmballoon_batched_ops ;
b - > batch_max_pages = VMW_BALLOON_BATCH_MAX_PAGES ;
if ( ! vmballoon_init_batching ( b ) ) {
/*
* We failed to initialize batching , inform the monitor
* about it by sending a null capability .
*
* The guest will retry in one second .
*/
vmballoon_send_start ( b , 0 ) ;
return ;
}
} else if ( ( b - > capabilities & VMW_BALLOON_BASIC_CMDS ) ! = 0 ) {
b - > ops = & vmballoon_basic_ops ;
b - > batch_max_pages = 1 ;
}
b - > reset_required = false ;
2015-08-07 01:18:01 +03:00
error = vmballoon_vmci_init ( b ) ;
if ( error )
pr_err ( " failed to initialize vmci doorbell \n " ) ;
2015-08-07 01:17:58 +03:00
if ( ! vmballoon_send_guest_id ( b ) )
pr_err ( " failed to send guest ID to the host \n " ) ;
2010-04-23 21:18:08 +04:00
}
/*
* Balloon work function : reset protocol , if needed , get the new size and
* adjust balloon as needed . Repeat in 1 sec .
*/
static void vmballoon_work ( struct work_struct * work )
{
struct delayed_work * dwork = to_delayed_work ( work ) ;
struct vmballoon * b = container_of ( dwork , struct vmballoon , dwork ) ;
unsigned int target ;
STATS_INC ( b - > stats . timer ) ;
if ( b - > reset_required )
vmballoon_reset ( b ) ;
if ( b - > slow_allocation_cycles > 0 )
b - > slow_allocation_cycles - - ;
2015-08-07 01:18:01 +03:00
if ( ! b - > reset_required & & vmballoon_send_get_target ( b , & target ) ) {
2010-04-23 21:18:08 +04:00
/* update target, adjust size */
b - > target = target ;
if ( b - > size < target )
vmballoon_inflate ( b ) ;
2015-08-07 01:18:01 +03:00
else if ( target = = 0 | |
b - > size > target + vmballoon_page_size ( true ) )
2010-04-23 21:18:08 +04:00
vmballoon_deflate ( b ) ;
}
2011-07-27 03:08:56 +04:00
/*
* We are using a freezable workqueue so that balloon operations are
* stopped while the system transitions to / from sleep / hibernation .
*/
queue_delayed_work ( system_freezable_wq ,
dwork , round_jiffies_relative ( HZ ) ) ;
2010-04-23 21:18:08 +04:00
}
/*
* DEBUGFS Interface
*/
# ifdef CONFIG_DEBUG_FS
static int vmballoon_debug_show ( struct seq_file * f , void * offset )
{
struct vmballoon * b = f - > private ;
struct vmballoon_stats * stats = & b - > stats ;
2015-08-07 01:18:00 +03:00
/* format capabilities info */
seq_printf ( f ,
" balloon capabilities: %#4x \n "
2015-08-07 01:18:01 +03:00
" used capabilities: %#4lx \n "
" is resetting: %c \n " ,
VMW_BALLOON_CAPABILITIES , b - > capabilities ,
b - > reset_required ? ' y ' : ' n ' ) ;
2015-08-07 01:18:00 +03:00
2010-04-23 21:18:08 +04:00
/* format size info */
seq_printf ( f ,
" target: %8d pages \n "
" current: %8d pages \n " ,
b - > target , b - > size ) ;
/* format rate info */
seq_printf ( f ,
2015-08-07 01:18:01 +03:00
" rateSleepAlloc: %8d pages/sec \n " ,
b - > rate_alloc ) ;
2010-04-23 21:18:08 +04:00
seq_printf ( f ,
" \n "
" timer: %8u \n "
2015-08-07 01:18:01 +03:00
" doorbell: %8u \n "
2010-04-23 21:18:08 +04:00
" start: %8u (%4u failed) \n "
" guestType: %8u (%4u failed) \n "
2015-08-07 01:18:01 +03:00
" 2m-lock: %8u (%4u failed) \n "
2010-04-23 21:18:08 +04:00
" lock: %8u (%4u failed) \n "
2015-08-07 01:18:01 +03:00
" 2m-unlock: %8u (%4u failed) \n "
2010-04-23 21:18:08 +04:00
" unlock: %8u (%4u failed) \n "
" target: %8u (%4u failed) \n "
2015-08-07 01:18:01 +03:00
" prim2mAlloc: %8u (%4u failed) \n "
2010-04-23 21:18:08 +04:00
" primNoSleepAlloc: %8u (%4u failed) \n "
" primCanSleepAlloc: %8u (%4u failed) \n "
2015-08-07 01:18:01 +03:00
" prim2mFree: %8u \n "
2010-04-23 21:18:08 +04:00
" primFree: %8u \n "
2015-08-07 01:18:01 +03:00
" err2mAlloc: %8u \n "
2010-04-23 21:18:08 +04:00
" errAlloc: %8u \n "
2015-08-07 01:18:01 +03:00
" err2mFree: %8u \n "
2015-08-07 01:18:01 +03:00
" errFree: %8u \n "
" doorbellSet: %8u \n "
" doorbellUnset: %8u \n " ,
2010-04-23 21:18:08 +04:00
stats - > timer ,
2015-08-07 01:18:01 +03:00
stats - > doorbell ,
2010-04-23 21:18:08 +04:00
stats - > start , stats - > start_fail ,
stats - > guest_type , stats - > guest_type_fail ,
2015-08-07 01:18:01 +03:00
stats - > lock [ true ] , stats - > lock_fail [ true ] ,
stats - > lock [ false ] , stats - > lock_fail [ false ] ,
stats - > unlock [ true ] , stats - > unlock_fail [ true ] ,
stats - > unlock [ false ] , stats - > unlock_fail [ false ] ,
2010-04-23 21:18:08 +04:00
stats - > target , stats - > target_fail ,
2015-08-07 01:18:01 +03:00
stats - > alloc [ true ] , stats - > alloc_fail [ true ] ,
stats - > alloc [ false ] , stats - > alloc_fail [ false ] ,
2010-04-23 21:18:08 +04:00
stats - > sleep_alloc , stats - > sleep_alloc_fail ,
2015-08-07 01:18:01 +03:00
stats - > free [ true ] ,
stats - > free [ false ] ,
stats - > refused_alloc [ true ] , stats - > refused_alloc [ false ] ,
2015-08-07 01:18:01 +03:00
stats - > refused_free [ true ] , stats - > refused_free [ false ] ,
stats - > doorbell_set , stats - > doorbell_unset ) ;
2010-04-23 21:18:08 +04:00
return 0 ;
}
static int vmballoon_debug_open ( struct inode * inode , struct file * file )
{
return single_open ( file , vmballoon_debug_show , inode - > i_private ) ;
}
static const struct file_operations vmballoon_debug_fops = {
. owner = THIS_MODULE ,
. open = vmballoon_debug_open ,
. read = seq_read ,
. llseek = seq_lseek ,
. release = single_release ,
} ;
static int __init vmballoon_debugfs_init ( struct vmballoon * b )
{
int error ;
b - > dbg_entry = debugfs_create_file ( " vmmemctl " , S_IRUGO , NULL , b ,
& vmballoon_debug_fops ) ;
if ( IS_ERR ( b - > dbg_entry ) ) {
error = PTR_ERR ( b - > dbg_entry ) ;
pr_err ( " failed to create debugfs entry, error: %d \n " , error ) ;
return error ;
}
return 0 ;
}
static void __exit vmballoon_debugfs_exit ( struct vmballoon * b )
{
debugfs_remove ( b - > dbg_entry ) ;
}
# else
static inline int vmballoon_debugfs_init ( struct vmballoon * b )
{
return 0 ;
}
static inline void vmballoon_debugfs_exit ( struct vmballoon * b )
{
}
# endif /* CONFIG_DEBUG_FS */
static int __init vmballoon_init ( void )
{
int error ;
2015-08-07 01:18:01 +03:00
unsigned is_2m_pages ;
2010-04-23 21:18:08 +04:00
/*
* Check if we are running on VMware ' s hypervisor and bail out
* if we are not .
*/
2010-05-09 12:13:42 +04:00
if ( x86_hyper ! = & x86_hyper_vmware )
2010-04-23 21:18:08 +04:00
return - ENODEV ;
2015-08-07 01:18:01 +03:00
for ( is_2m_pages = 0 ; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES ;
is_2m_pages + + ) {
INIT_LIST_HEAD ( & balloon . page_sizes [ is_2m_pages ] . pages ) ;
INIT_LIST_HEAD ( & balloon . page_sizes [ is_2m_pages ] . refused_pages ) ;
}
2010-04-23 21:18:08 +04:00
/* initialize rates */
balloon . rate_alloc = VMW_BALLOON_RATE_ALLOC_MAX ;
INIT_DELAYED_WORK ( & balloon . dwork , vmballoon_work ) ;
error = vmballoon_debugfs_init ( & balloon ) ;
if ( error )
2011-07-27 03:08:56 +04:00
return error ;
2010-04-23 21:18:08 +04:00
2015-08-07 01:18:01 +03:00
balloon . vmci_doorbell = VMCI_INVALID_HANDLE ;
2015-08-07 01:18:01 +03:00
balloon . batch_page = NULL ;
balloon . page = NULL ;
balloon . reset_required = true ;
2011-07-27 03:08:56 +04:00
queue_delayed_work ( system_freezable_wq , & balloon . dwork , 0 ) ;
2010-04-23 21:18:08 +04:00
return 0 ;
}
module_init ( vmballoon_init ) ;
static void __exit vmballoon_exit ( void )
{
2015-08-07 01:18:01 +03:00
vmballoon_vmci_cleanup ( & balloon ) ;
2010-04-23 21:18:08 +04:00
cancel_delayed_work_sync ( & balloon . dwork ) ;
vmballoon_debugfs_exit ( & balloon ) ;
/*
* Deallocate all reserved memory , and reset connection with monitor .
* Reset connection before deallocating memory to avoid potential for
* additional spurious resets from guest touching deallocated pages .
*/
2015-08-07 01:18:01 +03:00
vmballoon_send_start ( & balloon , 0 ) ;
2010-04-23 21:18:08 +04:00
vmballoon_pop ( & balloon ) ;
}
module_exit ( vmballoon_exit ) ;