2003-07-05 02:34:56 +04:00
/*
2008-01-30 17:00:02 +03:00
* Copyright ( C ) 2003 - 2004 Sistina Software , Inc . All rights reserved .
2007-08-21 00:55:30 +04:00
* Copyright ( C ) 2004 - 2006 Red Hat , Inc . All rights reserved .
2003-07-05 02:34:56 +04:00
*
2004-03-30 23:35:44 +04:00
* This file is part of LVM2 .
*
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
2007-08-21 00:55:30 +04:00
* of the GNU Lesser General Public License v .2 .1 .
2004-03-30 23:35:44 +04:00
*
2007-08-21 00:55:30 +04:00
* You should have received a copy of the GNU Lesser General Public License
2004-03-30 23:35:44 +04:00
* along with this program ; if not , write to the Free Software Foundation ,
* Inc . , 59 Temple Place , Suite 330 , Boston , MA 02111 - 1307 USA
2003-07-05 02:34:56 +04:00
*/
# include "lib.h"
# include "memlock.h"
# include "defaults.h"
2004-03-27 00:11:34 +03:00
# include "config.h"
# include "toolcontext.h"
2003-07-05 02:34:56 +04:00
# include <limits.h>
# include <fcntl.h>
# include <unistd.h>
# include <sys/mman.h>
# include <sys/time.h>
# include <sys/resource.h>
# ifndef DEVMAPPER_SUPPORT
2010-03-05 17:48:33 +03:00
void memlock_inc ( struct cmd_context * cmd )
2003-07-05 02:34:56 +04:00
{
return ;
}
2010-03-05 17:48:33 +03:00
void memlock_dec ( struct cmd_context * cmd )
2003-07-05 02:34:56 +04:00
{
return ;
}
int memlock ( void )
{
return 0 ;
}
2003-08-20 19:48:27 +04:00
void memlock_init ( struct cmd_context * cmd )
{
return ;
}
2003-07-05 02:34:56 +04:00
# else /* DEVMAPPER_SUPPORT */
static size_t _size_stack ;
static size_t _size_malloc_tmp ;
static size_t _size_malloc = 2000000 ;
static void * _malloc_mem = NULL ;
static int _memlock_count = 0 ;
2009-11-19 04:11:57 +03:00
static int _memlock_count_daemon = 0 ;
2003-07-05 02:34:56 +04:00
static int _priority ;
static int _default_priority ;
2010-03-05 17:48:33 +03:00
/* list of maps, that are unconditionaly ignored */
static const char * const _ignore_maps [ ] = {
2010-03-08 18:55:52 +03:00
" [vdso] " ,
" [vsyscall] " ,
2010-03-05 17:48:33 +03:00
} ;
/* default blacklist for maps */
static const char * const _blacklist_maps [ ] = {
" locale/locale-archive " ,
" gconv/gconv-modules.cache " ,
" /libreadline.so. " , /* not using readline during mlock */
" /libncurses.so. " , /* not using readline during mlock */
" /libdl- " , /* not using dlopen,dlsym during mlock */
/* "/libdevmapper-event.so" */
} ;
typedef enum { LVM_MLOCK , LVM_MUNLOCK } lvmlock_t ;
2010-03-09 06:16:11 +03:00
static unsigned _use_mlockall ;
static FILE * _mapsh ;
static char _procselfmaps [ PATH_MAX ] = " " ;
2010-03-09 13:25:50 +03:00
# define SELF_MAPS " / self / maps"
2010-03-09 06:16:11 +03:00
2010-03-30 18:41:58 +04:00
static size_t _mstats ; /* statistic for maps locking */
2010-03-05 17:48:33 +03:00
2003-07-05 02:34:56 +04:00
static void _touch_memory ( void * mem , size_t size )
{
2006-08-17 22:23:44 +04:00
size_t pagesize = lvm_getpagesize ( ) ;
2003-07-05 02:34:56 +04:00
void * pos = mem ;
void * end = mem + size - sizeof ( long ) ;
while ( pos < end ) {
* ( long * ) pos = 1 ;
pos + = pagesize ;
}
}
static void _allocate_memory ( void )
{
void * stack_mem , * temp_malloc_mem ;
if ( ( stack_mem = alloca ( _size_stack ) ) )
_touch_memory ( stack_mem , _size_stack ) ;
if ( ( temp_malloc_mem = malloc ( _size_malloc_tmp ) ) )
_touch_memory ( temp_malloc_mem , _size_malloc_tmp ) ;
if ( ( _malloc_mem = malloc ( _size_malloc ) ) )
_touch_memory ( _malloc_mem , _size_malloc ) ;
free ( temp_malloc_mem ) ;
}
static void _release_memory ( void )
{
free ( _malloc_mem ) ;
}
2010-03-05 17:48:33 +03:00
/*
* mlock / munlock memory areas from / proc / self / maps
* format described in kernel / Documentation / filesystem / proc . txt
*/
static int _maps_line ( struct cmd_context * cmd , lvmlock_t lock ,
2010-03-30 18:41:58 +04:00
const char * line , size_t * mstats )
2003-07-05 02:34:56 +04:00
{
2010-03-05 17:48:33 +03:00
const struct config_node * cn ;
struct config_value * cv ;
long from , to ;
int pos , i ;
char fr , fw , fx , fp ;
size_t sz ;
if ( sscanf ( line , " %lx-%lx %c%c%c%c%n " ,
& from , & to , & fr , & fw , & fx , & fp , & pos ) ! = 6 ) {
log_error ( " Failed to parse maps line: %s " , line ) ;
return 0 ;
}
2010-03-09 13:25:50 +03:00
/* Select readable maps */
2010-03-09 15:31:51 +03:00
if ( fr ! = ' r ' ) {
log_debug ( " mlock area unreadable '%s': Skipping. " , line ) ;
2010-03-05 17:48:33 +03:00
return 1 ;
2010-03-09 15:31:51 +03:00
}
2010-03-05 17:48:33 +03:00
/* always ignored areas */
for ( i = 0 ; i < sizeof ( _ignore_maps ) / sizeof ( _ignore_maps [ 0 ] ) ; + + i )
2010-03-09 15:31:51 +03:00
if ( strstr ( line + pos , _ignore_maps [ i ] ) ) {
log_debug ( " mlock ignore filter '%s' matches '%s': Skipping. " ,
_ignore_maps [ i ] , line ) ;
2010-03-05 17:48:33 +03:00
return 1 ;
2010-03-09 15:31:51 +03:00
}
2010-03-05 17:48:33 +03:00
sz = to - from ;
if ( ! ( cn = find_config_tree_node ( cmd , " activation/mlock_filter " ) ) ) {
/* If no blacklist configured, use an internal set */
for ( i = 0 ; i < sizeof ( _blacklist_maps ) / sizeof ( _blacklist_maps [ 0 ] ) ; + + i )
if ( strstr ( line + pos , _blacklist_maps [ i ] ) ) {
2010-03-09 06:16:11 +03:00
log_debug ( " mlock default filter '%s' matches '%s': Skipping. " ,
2010-03-05 17:48:33 +03:00
_blacklist_maps [ i ] , line ) ;
return 1 ;
}
} else {
for ( cv = cn - > v ; cv ; cv = cv - > next ) {
2010-03-09 06:16:11 +03:00
if ( ( cv - > type ! = CFG_STRING ) | | ! cv - > v . str [ 0 ] )
2010-03-05 17:48:33 +03:00
continue ;
if ( strstr ( line + pos , cv - > v . str ) ) {
2010-03-09 06:16:11 +03:00
log_debug ( " mlock_filter '%s' matches '%s': Skipping. " ,
2010-03-05 17:48:33 +03:00
cv - > v . str , line ) ;
return 1 ;
}
}
}
2010-03-30 18:41:58 +04:00
* mstats + = sz ;
2010-03-09 06:16:11 +03:00
log_debug ( " %s %10ldKiB %12lx - %12lx %c%c%c%c %s " ,
( lock = = LVM_MLOCK ) ? " mlock " : " munlock " ,
( ( long ) sz + 1023 ) / 1024 , from , to , fr , fw , fx , fp , line + pos ) ;
2010-03-05 17:48:33 +03:00
if ( lock = = LVM_MLOCK ) {
if ( mlock ( ( const void * ) from , sz ) < 0 ) {
log_sys_error ( " mlock " , line ) ;
return 0 ;
}
} else {
if ( munlock ( ( const void * ) from , sz ) < 0 ) {
log_sys_error ( " munlock " , line ) ;
return 0 ;
}
}
return 1 ;
}
2010-03-30 18:41:58 +04:00
static int _memlock_maps ( struct cmd_context * cmd , lvmlock_t lock , size_t * mstats )
2010-03-05 17:48:33 +03:00
{
char * line = NULL ;
size_t len ;
2010-03-09 06:16:11 +03:00
ssize_t n ;
2010-04-01 17:43:12 +04:00
int ret = 1 ;
2010-03-05 17:48:33 +03:00
2010-03-09 06:16:11 +03:00
if ( _use_mlockall ) {
2003-07-05 02:34:56 +04:00
# ifdef MCL_CURRENT
2010-03-05 17:48:33 +03:00
if ( lock = = LVM_MLOCK ) {
if ( mlockall ( MCL_CURRENT | MCL_FUTURE ) ) {
log_sys_error ( " mlockall " , " " ) ;
return 0 ;
}
} else {
if ( munlockall ( ) ) {
log_sys_error ( " munlockall " , " " ) ;
return 0 ;
}
}
return 1 ;
# else
return 0 ;
2003-07-05 02:34:56 +04:00
# endif
2010-03-05 17:48:33 +03:00
}
2010-06-24 12:29:30 +04:00
/* Force libc.mo load */
if ( lock = = LVM_MLOCK )
( void ) strerror ( 0 ) ;
2010-03-30 18:41:23 +04:00
/* Reset statistic counters */
2010-03-30 18:41:58 +04:00
* mstats = 0 ;
2010-03-30 18:41:23 +04:00
rewind ( _mapsh ) ;
2010-03-09 06:16:11 +03:00
while ( ( n = getline ( & line , & len , _mapsh ) ) ! = - 1 ) {
line [ n > 0 ? n - 1 : 0 ] = ' \0 ' ; /* remove \n */
2010-03-30 18:41:58 +04:00
if ( ! _maps_line ( cmd , lock , line , mstats ) )
ret = 0 ;
2010-03-05 17:48:33 +03:00
}
free ( line ) ;
2010-04-01 18:53:47 +04:00
log_debug ( " %socked %ld bytes " ,
( lock = = LVM_MLOCK ) ? " L " : " Unl " , ( long ) * mstats ) ;
2010-03-05 17:48:33 +03:00
return ret ;
}
/* Stop memory getting swapped out */
static void _lock_mem ( struct cmd_context * cmd )
{
2003-07-05 02:34:56 +04:00
_allocate_memory ( ) ;
2010-03-30 18:41:23 +04:00
/*
* For daemon we need to use mlockall ( )
* so even future adition of thread which may not even use lvm lib
* will not block memory locked thread
* Note : assuming _memlock_count_daemon is updated before _memlock_count
*/
_use_mlockall = _memlock_count_daemon ? 1 :
find_config_tree_bool ( cmd , " activation/use_mlockall " , DEFAULT_USE_MLOCKALL ) ;
2010-03-09 06:16:11 +03:00
if ( ! _use_mlockall ) {
2010-03-09 13:25:50 +03:00
if ( ! * _procselfmaps & &
dm_snprintf ( _procselfmaps , sizeof ( _procselfmaps ) ,
" %s " SELF_MAPS , cmd - > proc_dir ) < 0 ) {
log_error ( " proc_dir too long " ) ;
return ;
2010-03-09 06:16:11 +03:00
}
if ( ! ( _mapsh = fopen ( _procselfmaps , " r " ) ) ) {
log_sys_error ( " fopen " , _procselfmaps ) ;
return ;
}
}
log_very_verbose ( " Locking memory " ) ;
if ( ! _memlock_maps ( cmd , LVM_MLOCK , & _mstats ) )
stack ;
2010-03-05 17:48:33 +03:00
2003-07-05 02:34:56 +04:00
errno = 0 ;
if ( ( ( _priority = getpriority ( PRIO_PROCESS , 0 ) ) = = - 1 ) & & errno )
log_sys_error ( " getpriority " , " " ) ;
else
if ( setpriority ( PRIO_PROCESS , 0 , _default_priority ) )
2008-05-29 03:12:45 +04:00
log_error ( " setpriority %d failed: %s " ,
2003-07-05 02:34:56 +04:00
_default_priority , strerror ( errno ) ) ;
}
2010-03-05 17:48:33 +03:00
static void _unlock_mem ( struct cmd_context * cmd )
2003-07-05 02:34:56 +04:00
{
2010-03-30 18:41:58 +04:00
size_t unlock_mstats ;
2010-03-09 06:16:11 +03:00
log_very_verbose ( " Unlocking memory " ) ;
if ( ! _memlock_maps ( cmd , LVM_MUNLOCK , & unlock_mstats ) )
stack ;
2010-03-05 17:48:33 +03:00
2010-03-09 06:16:11 +03:00
if ( ! _use_mlockall ) {
if ( fclose ( _mapsh ) )
log_sys_error ( " fclose " , _procselfmaps ) ;
2010-03-30 18:41:58 +04:00
if ( _mstats < unlock_mstats )
log_error ( INTERNAL_ERROR " Maps lock %ld < unlock %ld " ,
( long ) _mstats , ( long ) unlock_mstats ) ;
2010-03-09 06:16:11 +03:00
}
2010-03-05 17:48:33 +03:00
2003-07-05 02:34:56 +04:00
if ( setpriority ( PRIO_PROCESS , 0 , _priority ) )
log_error ( " setpriority %u failed: %s " , _priority ,
strerror ( errno ) ) ;
2010-03-30 18:41:23 +04:00
_release_memory ( ) ;
2003-07-05 02:34:56 +04:00
}
2010-03-30 18:41:23 +04:00
static void _lock_mem_if_needed ( struct cmd_context * cmd )
{
2009-11-19 04:11:57 +03:00
if ( ( _memlock_count + _memlock_count_daemon ) = = 1 )
2010-03-05 17:48:33 +03:00
_lock_mem ( cmd ) ;
2009-11-19 04:11:57 +03:00
}
2010-03-30 18:41:23 +04:00
static void _unlock_mem_if_possible ( struct cmd_context * cmd )
{
2009-11-19 04:11:57 +03:00
if ( ( _memlock_count + _memlock_count_daemon ) = = 0 )
2010-03-05 17:48:33 +03:00
_unlock_mem ( cmd ) ;
2009-11-19 04:11:57 +03:00
}
2010-03-05 17:48:33 +03:00
void memlock_inc ( struct cmd_context * cmd )
2003-07-05 02:34:56 +04:00
{
2009-11-19 04:11:57 +03:00
+ + _memlock_count ;
2010-03-05 17:48:33 +03:00
_lock_mem_if_needed ( cmd ) ;
2003-07-05 02:34:56 +04:00
log_debug ( " memlock_count inc to %d " , _memlock_count ) ;
}
2010-03-05 17:48:33 +03:00
void memlock_dec ( struct cmd_context * cmd )
2003-07-05 02:34:56 +04:00
{
2009-11-19 04:11:57 +03:00
if ( ! _memlock_count )
2009-12-16 22:22:11 +03:00
log_error ( INTERNAL_ERROR " _memlock_count has dropped below 0. " ) ;
2009-11-19 04:11:57 +03:00
- - _memlock_count ;
2010-03-05 17:48:33 +03:00
_unlock_mem_if_possible ( cmd ) ;
2009-11-19 04:11:57 +03:00
log_debug ( " memlock_count dec to %d " , _memlock_count ) ;
2003-07-05 02:34:56 +04:00
}
2009-11-19 04:11:57 +03:00
/*
* The memlock_ * _daemon functions will force the mlockall ( ) call that we need
* to stay in memory , but they will have no effect on device scans ( unlike
* normal memlock_inc and memlock_dec ) . Memory is kept locked as long as either
* of memlock or memlock_daemon is in effect .
*/
2010-03-05 17:48:33 +03:00
void memlock_inc_daemon ( struct cmd_context * cmd )
2009-11-19 04:11:57 +03:00
{
+ + _memlock_count_daemon ;
2010-03-30 18:41:23 +04:00
if ( _memlock_count_daemon = = 1 & & _memlock_count > 0 )
log_error ( INTERNAL_ERROR " _memlock_inc_daemon used after _memlock_inc. " ) ;
2010-03-05 17:48:33 +03:00
_lock_mem_if_needed ( cmd ) ;
2009-11-19 04:11:57 +03:00
log_debug ( " memlock_count_daemon inc to %d " , _memlock_count_daemon ) ;
}
2010-03-05 17:48:33 +03:00
void memlock_dec_daemon ( struct cmd_context * cmd )
2009-11-19 04:11:57 +03:00
{
if ( ! _memlock_count_daemon )
2009-12-16 22:22:11 +03:00
log_error ( INTERNAL_ERROR " _memlock_count_daemon has dropped below 0. " ) ;
2009-11-19 04:11:57 +03:00
- - _memlock_count_daemon ;
2010-03-05 17:48:33 +03:00
_unlock_mem_if_possible ( cmd ) ;
2009-11-19 04:11:57 +03:00
log_debug ( " memlock_count_daemon dec to %d " , _memlock_count_daemon ) ;
}
/*
* This disregards the daemon ( dmeventd ) locks , since we use memlock ( ) to check
* whether it is safe to run a device scan , which would normally coincide with
* ! memlock ( ) - - but the daemon global memory lock breaks this assumption , so
* we do not take those into account here .
*/
2003-07-05 02:34:56 +04:00
int memlock ( void )
{
return _memlock_count ;
}
void memlock_init ( struct cmd_context * cmd )
{
2006-05-16 20:48:31 +04:00
_size_stack = find_config_tree_int ( cmd ,
2003-07-05 02:34:56 +04:00
" activation/reserved_stack " ,
2004-03-08 21:28:45 +03:00
DEFAULT_RESERVED_STACK ) * 1024 ;
2006-05-16 20:48:31 +04:00
_size_malloc_tmp = find_config_tree_int ( cmd ,
2003-07-05 02:34:56 +04:00
" activation/reserved_memory " ,
2004-03-08 21:28:45 +03:00
DEFAULT_RESERVED_MEMORY ) * 1024 ;
2006-05-16 20:48:31 +04:00
_default_priority = find_config_tree_int ( cmd ,
2008-01-30 17:00:02 +03:00
" activation/process_priority " ,
DEFAULT_PROCESS_PRIORITY ) ;
2003-07-05 02:34:56 +04:00
}
# endif