2003-07-05 02:34:56 +04:00
/*
2008-01-30 17:00:02 +03:00
* Copyright ( C ) 2003 - 2004 Sistina Software , Inc . All rights reserved .
2011-02-18 17:16:11 +03:00
* Copyright ( C ) 2004 - 2011 Red Hat , Inc . All rights reserved .
2003-07-05 02:34:56 +04:00
*
2004-03-30 23:35:44 +04:00
* This file is part of LVM2 .
*
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
2007-08-21 00:55:30 +04:00
* of the GNU Lesser General Public License v .2 .1 .
2004-03-30 23:35:44 +04:00
*
2007-08-21 00:55:30 +04:00
* You should have received a copy of the GNU Lesser General Public License
2004-03-30 23:35:44 +04:00
* along with this program ; if not , write to the Free Software Foundation ,
* Inc . , 59 Temple Place , Suite 330 , Boston , MA 02111 - 1307 USA
2003-07-05 02:34:56 +04:00
*/
# include "lib.h"
# include "memlock.h"
# include "defaults.h"
2004-03-27 00:11:34 +03:00
# include "config.h"
# include "toolcontext.h"
2003-07-05 02:34:56 +04:00
# include <limits.h>
# include <fcntl.h>
# include <unistd.h>
# include <sys/mman.h>
# include <sys/time.h>
# include <sys/resource.h>
# ifndef DEVMAPPER_SUPPORT
2011-02-18 17:16:11 +03:00
void memlock_inc_daemon ( struct cmd_context * cmd )
2003-07-05 02:34:56 +04:00
{
return ;
}
2011-02-18 17:16:11 +03:00
void memlock_dec_daemon ( struct cmd_context * cmd )
2003-07-05 02:34:56 +04:00
{
return ;
}
2011-02-18 17:16:11 +03:00
void critical_section_inc ( struct cmd_context * cmd )
{
return ;
}
void critical_section_dec ( struct cmd_context * cmd )
{
return ;
}
int critical_section ( void )
2003-07-05 02:34:56 +04:00
{
return 0 ;
}
2003-08-20 19:48:27 +04:00
void memlock_init ( struct cmd_context * cmd )
{
return ;
}
2003-07-05 02:34:56 +04:00
2011-02-18 17:16:11 +03:00
void memlock_unlock ( struct cmd_context * cmd )
{
return ;
}
void memlock_reset ( void )
{
return ;
}
2003-07-05 02:34:56 +04:00
# else /* DEVMAPPER_SUPPORT */
static size_t _size_stack ;
static size_t _size_malloc_tmp ;
static size_t _size_malloc = 2000000 ;
static void * _malloc_mem = NULL ;
2011-02-18 17:16:11 +03:00
static int _mem_locked = 0 ;
static int _critical_section_count = 0 ;
2009-11-19 04:11:57 +03:00
static int _memlock_count_daemon = 0 ;
2003-07-05 02:34:56 +04:00
static int _priority ;
static int _default_priority ;
2010-03-05 17:48:33 +03:00
/* list of maps, that are unconditionaly ignored */
static const char * const _ignore_maps [ ] = {
2010-03-08 18:55:52 +03:00
" [vdso] " ,
" [vsyscall] " ,
2010-03-05 17:48:33 +03:00
} ;
/* default blacklist for maps */
static const char * const _blacklist_maps [ ] = {
" locale/locale-archive " ,
" gconv/gconv-modules.cache " ,
" /libreadline.so. " , /* not using readline during mlock */
" /libncurses.so. " , /* not using readline during mlock */
" /libdl- " , /* not using dlopen,dlsym during mlock */
/* "/libdevmapper-event.so" */
} ;
typedef enum { LVM_MLOCK , LVM_MUNLOCK } lvmlock_t ;
2010-03-09 06:16:11 +03:00
static unsigned _use_mlockall ;
2010-09-30 15:32:40 +04:00
static int _maps_fd ;
static size_t _maps_len = 8192 ; /* Initial buffer size for reading /proc/self/maps */
static char * _maps_buffer ;
2010-03-09 06:16:11 +03:00
static char _procselfmaps [ PATH_MAX ] = " " ;
2010-03-09 13:25:50 +03:00
# define SELF_MAPS " / self / maps"
2010-03-09 06:16:11 +03:00
2010-03-30 18:41:58 +04:00
static size_t _mstats ; /* statistic for maps locking */
2010-03-05 17:48:33 +03:00
2003-07-05 02:34:56 +04:00
static void _touch_memory ( void * mem , size_t size )
{
2006-08-17 22:23:44 +04:00
size_t pagesize = lvm_getpagesize ( ) ;
2010-10-25 17:00:35 +04:00
char * pos = mem ;
char * end = pos + size - sizeof ( long ) ;
2003-07-05 02:34:56 +04:00
while ( pos < end ) {
* ( long * ) pos = 1 ;
pos + = pagesize ;
}
}
static void _allocate_memory ( void )
{
void * stack_mem , * temp_malloc_mem ;
if ( ( stack_mem = alloca ( _size_stack ) ) )
_touch_memory ( stack_mem , _size_stack ) ;
if ( ( temp_malloc_mem = malloc ( _size_malloc_tmp ) ) )
_touch_memory ( temp_malloc_mem , _size_malloc_tmp ) ;
if ( ( _malloc_mem = malloc ( _size_malloc ) ) )
_touch_memory ( _malloc_mem , _size_malloc ) ;
free ( temp_malloc_mem ) ;
}
static void _release_memory ( void )
{
free ( _malloc_mem ) ;
}
2010-03-05 17:48:33 +03:00
/*
* mlock / munlock memory areas from / proc / self / maps
* format described in kernel / Documentation / filesystem / proc . txt
*/
2010-10-15 13:48:23 +04:00
static int _maps_line ( const struct config_node * cn , lvmlock_t lock ,
2010-03-30 18:41:58 +04:00
const char * line , size_t * mstats )
2003-07-05 02:34:56 +04:00
{
2010-12-20 16:12:55 +03:00
const struct config_value * cv ;
2010-03-05 17:48:33 +03:00
long from , to ;
int pos , i ;
char fr , fw , fx , fp ;
size_t sz ;
if ( sscanf ( line , " %lx-%lx %c%c%c%c%n " ,
& from , & to , & fr , & fw , & fx , & fp , & pos ) ! = 6 ) {
log_error ( " Failed to parse maps line: %s " , line ) ;
return 0 ;
}
2010-03-09 13:25:50 +03:00
/* Select readable maps */
2010-03-09 15:31:51 +03:00
if ( fr ! = ' r ' ) {
2010-07-08 18:47:46 +04:00
log_debug ( " %s area unreadable %s : Skipping. " ,
2010-07-08 17:05:27 +04:00
( lock = = LVM_MLOCK ) ? " mlock " : " munlock " , line ) ;
2010-03-05 17:48:33 +03:00
return 1 ;
2010-03-09 15:31:51 +03:00
}
2010-03-05 17:48:33 +03:00
/* always ignored areas */
for ( i = 0 ; i < sizeof ( _ignore_maps ) / sizeof ( _ignore_maps [ 0 ] ) ; + + i )
2010-03-09 15:31:51 +03:00
if ( strstr ( line + pos , _ignore_maps [ i ] ) ) {
log_debug ( " mlock ignore filter '%s' matches '%s': Skipping. " ,
_ignore_maps [ i ] , line ) ;
2010-03-05 17:48:33 +03:00
return 1 ;
2010-03-09 15:31:51 +03:00
}
2010-03-05 17:48:33 +03:00
sz = to - from ;
2010-10-15 13:48:23 +04:00
if ( ! cn ) {
2010-03-05 17:48:33 +03:00
/* If no blacklist configured, use an internal set */
for ( i = 0 ; i < sizeof ( _blacklist_maps ) / sizeof ( _blacklist_maps [ 0 ] ) ; + + i )
if ( strstr ( line + pos , _blacklist_maps [ i ] ) ) {
2010-03-09 06:16:11 +03:00
log_debug ( " mlock default filter '%s' matches '%s': Skipping. " ,
2010-03-05 17:48:33 +03:00
_blacklist_maps [ i ] , line ) ;
return 1 ;
}
} else {
for ( cv = cn - > v ; cv ; cv = cv - > next ) {
2010-03-09 06:16:11 +03:00
if ( ( cv - > type ! = CFG_STRING ) | | ! cv - > v . str [ 0 ] )
2010-03-05 17:48:33 +03:00
continue ;
if ( strstr ( line + pos , cv - > v . str ) ) {
2010-03-09 06:16:11 +03:00
log_debug ( " mlock_filter '%s' matches '%s': Skipping. " ,
2010-03-05 17:48:33 +03:00
cv - > v . str , line ) ;
return 1 ;
}
}
}
2010-03-30 18:41:58 +04:00
* mstats + = sz ;
2010-07-08 17:05:27 +04:00
log_debug ( " %s %10ldKiB %12lx - %12lx %c%c%c%c%s " ,
2010-03-09 06:16:11 +03:00
( lock = = LVM_MLOCK ) ? " mlock " : " munlock " ,
( ( long ) sz + 1023 ) / 1024 , from , to , fr , fw , fx , fp , line + pos ) ;
2010-03-05 17:48:33 +03:00
if ( lock = = LVM_MLOCK ) {
if ( mlock ( ( const void * ) from , sz ) < 0 ) {
log_sys_error ( " mlock " , line ) ;
return 0 ;
}
} else {
if ( munlock ( ( const void * ) from , sz ) < 0 ) {
log_sys_error ( " munlock " , line ) ;
return 0 ;
}
}
return 1 ;
}
2010-03-30 18:41:58 +04:00
static int _memlock_maps ( struct cmd_context * cmd , lvmlock_t lock , size_t * mstats )
2010-03-05 17:48:33 +03:00
{
2010-10-15 13:48:23 +04:00
const struct config_node * cn ;
2010-09-30 15:32:40 +04:00
char * line , * line_end ;
2010-03-05 17:48:33 +03:00
size_t len ;
2010-03-09 06:16:11 +03:00
ssize_t n ;
2010-04-01 17:43:12 +04:00
int ret = 1 ;
2010-03-05 17:48:33 +03:00
2010-03-09 06:16:11 +03:00
if ( _use_mlockall ) {
2003-07-05 02:34:56 +04:00
# ifdef MCL_CURRENT
2010-03-05 17:48:33 +03:00
if ( lock = = LVM_MLOCK ) {
if ( mlockall ( MCL_CURRENT | MCL_FUTURE ) ) {
log_sys_error ( " mlockall " , " " ) ;
return 0 ;
}
} else {
if ( munlockall ( ) ) {
log_sys_error ( " munlockall " , " " ) ;
return 0 ;
}
}
return 1 ;
# else
return 0 ;
2003-07-05 02:34:56 +04:00
# endif
2010-03-05 17:48:33 +03:00
}
2010-06-24 12:29:30 +04:00
/* Force libc.mo load */
if ( lock = = LVM_MLOCK )
( void ) strerror ( 0 ) ;
2010-03-30 18:41:23 +04:00
/* Reset statistic counters */
2010-03-30 18:41:58 +04:00
* mstats = 0 ;
2010-03-30 18:41:23 +04:00
2010-09-30 15:32:40 +04:00
/* read mapping into a single memory chunk without reallocation
* in the middle of reading maps file */
for ( len = 0 ; ; ) {
if ( ! _maps_buffer | | len > = _maps_len ) {
if ( _maps_buffer )
_maps_len * = 2 ;
if ( ! ( _maps_buffer = dm_realloc ( _maps_buffer , _maps_len ) ) ) {
log_error ( " Allocation of maps buffer failed " ) ;
return 0 ;
}
}
lseek ( _maps_fd , 0 , SEEK_SET ) ;
for ( len = 0 ; len < _maps_len ; len + = n ) {
if ( ! ( n = read ( _maps_fd , _maps_buffer + len , _maps_len - len ) ) ) {
_maps_buffer [ len ] = ' \0 ' ;
break ; /* EOF */
}
if ( n = = - 1 )
return_0 ;
}
if ( len < _maps_len ) /* fits in buffer */
break ;
2010-03-05 17:48:33 +03:00
}
2010-09-30 15:32:40 +04:00
line = _maps_buffer ;
2010-10-15 13:48:23 +04:00
cn = find_config_tree_node ( cmd , " activation/mlock_filter " ) ;
2010-09-30 15:32:40 +04:00
while ( ( line_end = strchr ( line , ' \n ' ) ) ) {
* line_end = ' \0 ' ; /* remove \n */
2010-10-15 13:48:23 +04:00
if ( ! _maps_line ( cn , lock , line , mstats ) )
2010-09-30 15:32:40 +04:00
ret = 0 ;
line = line_end + 1 ;
}
2010-03-05 17:48:33 +03:00
2010-04-01 18:53:47 +04:00
log_debug ( " %socked %ld bytes " ,
( lock = = LVM_MLOCK ) ? " L " : " Unl " , ( long ) * mstats ) ;
2010-03-05 17:48:33 +03:00
return ret ;
}
/* Stop memory getting swapped out */
static void _lock_mem ( struct cmd_context * cmd )
{
2003-07-05 02:34:56 +04:00
_allocate_memory ( ) ;
2010-03-30 18:41:23 +04:00
/*
* For daemon we need to use mlockall ( )
* so even future adition of thread which may not even use lvm lib
* will not block memory locked thread
* Note : assuming _memlock_count_daemon is updated before _memlock_count
*/
_use_mlockall = _memlock_count_daemon ? 1 :
find_config_tree_bool ( cmd , " activation/use_mlockall " , DEFAULT_USE_MLOCKALL ) ;
2010-03-09 06:16:11 +03:00
if ( ! _use_mlockall ) {
2010-03-09 13:25:50 +03:00
if ( ! * _procselfmaps & &
dm_snprintf ( _procselfmaps , sizeof ( _procselfmaps ) ,
" %s " SELF_MAPS , cmd - > proc_dir ) < 0 ) {
log_error ( " proc_dir too long " ) ;
return ;
2010-03-09 06:16:11 +03:00
}
2010-09-30 15:32:40 +04:00
if ( ! ( _maps_fd = open ( _procselfmaps , O_RDONLY ) ) ) {
log_sys_error ( " open " , _procselfmaps ) ;
2010-03-09 06:16:11 +03:00
return ;
}
}
log_very_verbose ( " Locking memory " ) ;
if ( ! _memlock_maps ( cmd , LVM_MLOCK , & _mstats ) )
stack ;
2010-03-05 17:48:33 +03:00
2003-07-05 02:34:56 +04:00
errno = 0 ;
if ( ( ( _priority = getpriority ( PRIO_PROCESS , 0 ) ) = = - 1 ) & & errno )
log_sys_error ( " getpriority " , " " ) ;
else
if ( setpriority ( PRIO_PROCESS , 0 , _default_priority ) )
2008-05-29 03:12:45 +04:00
log_error ( " setpriority %d failed: %s " ,
2003-07-05 02:34:56 +04:00
_default_priority , strerror ( errno ) ) ;
}
2010-03-05 17:48:33 +03:00
static void _unlock_mem ( struct cmd_context * cmd )
2003-07-05 02:34:56 +04:00
{
2010-03-30 18:41:58 +04:00
size_t unlock_mstats ;
2010-03-09 06:16:11 +03:00
log_very_verbose ( " Unlocking memory " ) ;
if ( ! _memlock_maps ( cmd , LVM_MUNLOCK , & unlock_mstats ) )
stack ;
2010-03-05 17:48:33 +03:00
2010-03-09 06:16:11 +03:00
if ( ! _use_mlockall ) {
2010-09-30 15:32:40 +04:00
if ( close ( _maps_fd ) )
log_sys_error ( " close " , _procselfmaps ) ;
dm_free ( _maps_buffer ) ;
_maps_buffer = NULL ;
2011-02-18 17:51:04 +03:00
if ( _mstats < unlock_mstats ) {
if ( ( _mstats + 4096 ) < unlock_mstats )
log_error ( INTERNAL_ERROR
" Maps lock %ld < unlock %ld " ,
( long ) _mstats , ( long ) unlock_mstats ) ;
else
log_debug ( " Maps lock %ld < unlock %ld, 1 page difference! " ,
( long ) _mstats , ( long ) unlock_mstats ) ;
}
2010-03-09 06:16:11 +03:00
}
2010-03-05 17:48:33 +03:00
2003-07-05 02:34:56 +04:00
if ( setpriority ( PRIO_PROCESS , 0 , _priority ) )
log_error ( " setpriority %u failed: %s " , _priority ,
strerror ( errno ) ) ;
2010-03-30 18:41:23 +04:00
_release_memory ( ) ;
2003-07-05 02:34:56 +04:00
}
2010-03-30 18:41:23 +04:00
static void _lock_mem_if_needed ( struct cmd_context * cmd )
{
2011-02-18 17:16:11 +03:00
if ( ! _mem_locked & &
( ( _critical_section_count + _memlock_count_daemon ) = = 1 ) ) {
_mem_locked = 1 ;
2010-03-05 17:48:33 +03:00
_lock_mem ( cmd ) ;
2011-02-18 17:16:11 +03:00
}
2009-11-19 04:11:57 +03:00
}
2010-03-30 18:41:23 +04:00
static void _unlock_mem_if_possible ( struct cmd_context * cmd )
{
2011-02-18 17:16:11 +03:00
log_debug ( " UnlockMem l:%d cs:%d md:%d " , _mem_locked ,
_critical_section_count , _memlock_count_daemon ) ;
if ( _mem_locked & &
! _critical_section_count & &
! _memlock_count_daemon ) {
2010-03-05 17:48:33 +03:00
_unlock_mem ( cmd ) ;
2011-02-18 17:16:11 +03:00
_mem_locked = 0 ;
}
2009-11-19 04:11:57 +03:00
}
2011-02-18 17:16:11 +03:00
void critical_section_inc ( struct cmd_context * cmd )
2003-07-05 02:34:56 +04:00
{
2011-02-18 17:16:11 +03:00
+ + _critical_section_count ;
log_debug ( " critical_section_inc to %d " , _critical_section_count ) ;
2010-03-05 17:48:33 +03:00
_lock_mem_if_needed ( cmd ) ;
2003-07-05 02:34:56 +04:00
}
2011-02-18 17:16:11 +03:00
void critical_section_dec ( struct cmd_context * cmd )
2003-07-05 02:34:56 +04:00
{
2011-02-18 17:16:11 +03:00
if ( ! _critical_section_count )
log_error ( INTERNAL_ERROR " _critical_section has dropped below 0. " ) ;
- - _critical_section_count ;
log_debug ( " critical_section_dec to %d " , _critical_section_count ) ;
}
int critical_section ( void )
{
return _critical_section_count ;
2003-07-05 02:34:56 +04:00
}
2009-11-19 04:11:57 +03:00
/*
* The memlock_ * _daemon functions will force the mlockall ( ) call that we need
* to stay in memory , but they will have no effect on device scans ( unlike
2011-02-18 17:16:11 +03:00
* normal critical_section_inc / dec ) . Memory is kept locked as long as either
* of critical_section or memlock_daemon is in effect .
2009-11-19 04:11:57 +03:00
*/
2010-03-05 17:48:33 +03:00
void memlock_inc_daemon ( struct cmd_context * cmd )
2009-11-19 04:11:57 +03:00
{
+ + _memlock_count_daemon ;
2011-02-18 17:16:11 +03:00
if ( _memlock_count_daemon = = 1 & & _critical_section_count > 0 )
log_error ( INTERNAL_ERROR " _memlock_inc_daemon used in critical section. " ) ;
2009-11-19 04:11:57 +03:00
log_debug ( " memlock_count_daemon inc to %d " , _memlock_count_daemon ) ;
2011-02-18 17:16:11 +03:00
_lock_mem_if_needed ( cmd ) ;
2009-11-19 04:11:57 +03:00
}
2010-03-05 17:48:33 +03:00
void memlock_dec_daemon ( struct cmd_context * cmd )
2009-11-19 04:11:57 +03:00
{
if ( ! _memlock_count_daemon )
2009-12-16 22:22:11 +03:00
log_error ( INTERNAL_ERROR " _memlock_count_daemon has dropped below 0. " ) ;
2009-11-19 04:11:57 +03:00
- - _memlock_count_daemon ;
log_debug ( " memlock_count_daemon dec to %d " , _memlock_count_daemon ) ;
2011-02-18 17:16:11 +03:00
_unlock_mem_if_possible ( cmd ) ;
2003-07-05 02:34:56 +04:00
}
void memlock_init ( struct cmd_context * cmd )
{
2006-05-16 20:48:31 +04:00
_size_stack = find_config_tree_int ( cmd ,
2003-07-05 02:34:56 +04:00
" activation/reserved_stack " ,
2004-03-08 21:28:45 +03:00
DEFAULT_RESERVED_STACK ) * 1024 ;
2006-05-16 20:48:31 +04:00
_size_malloc_tmp = find_config_tree_int ( cmd ,
2003-07-05 02:34:56 +04:00
" activation/reserved_memory " ,
2004-03-08 21:28:45 +03:00
DEFAULT_RESERVED_MEMORY ) * 1024 ;
2006-05-16 20:48:31 +04:00
_default_priority = find_config_tree_int ( cmd ,
2008-01-30 17:00:02 +03:00
" activation/process_priority " ,
DEFAULT_PROCESS_PRIORITY ) ;
2003-07-05 02:34:56 +04:00
}
2011-02-18 17:16:11 +03:00
void memlock_reset ( void )
{
log_debug ( " memlock reset. " ) ;
_mem_locked = 0 ;
_critical_section_count = 0 ;
_memlock_count_daemon = 0 ;
}
void memlock_unlock ( struct cmd_context * cmd )
{
_unlock_mem_if_possible ( cmd ) ;
}
2003-07-05 02:34:56 +04:00
# endif