2003-07-05 02:34:56 +04:00
/*
2008-01-30 17:00:02 +03:00
* Copyright ( C ) 2003 - 2004 Sistina Software , Inc . All rights reserved .
2011-02-18 17:16:11 +03:00
* Copyright ( C ) 2004 - 2011 Red Hat , Inc . All rights reserved .
2003-07-05 02:34:56 +04:00
*
2004-03-30 23:35:44 +04:00
* This file is part of LVM2 .
*
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
2007-08-21 00:55:30 +04:00
* of the GNU Lesser General Public License v .2 .1 .
2004-03-30 23:35:44 +04:00
*
2007-08-21 00:55:30 +04:00
* You should have received a copy of the GNU Lesser General Public License
2004-03-30 23:35:44 +04:00
* along with this program ; if not , write to the Free Software Foundation ,
2016-01-21 13:49:46 +03:00
* Inc . , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 USA
2003-07-05 02:34:56 +04:00
*/
2018-05-14 12:30:20 +03:00
# include "lib/misc/lib.h"
# include "lib/mm/memlock.h"
# include "lib/config/defaults.h"
# include "lib/config/config.h"
# include "lib/commands/toolcontext.h"
2003-07-05 02:34:56 +04:00
# include <limits.h>
# include <fcntl.h>
# include <unistd.h>
# include <sys/mman.h>
# include <sys/time.h>
# include <sys/resource.h>
2014-09-30 12:56:59 +04:00
# include <malloc.h>
2003-07-05 02:34:56 +04:00
2014-10-01 10:17:59 +04:00
# ifdef HAVE_VALGRIND
# include <valgrind.h>
# endif
2003-07-05 02:34:56 +04:00
# ifndef DEVMAPPER_SUPPORT
2011-02-18 17:16:11 +03:00
void memlock_inc_daemon ( struct cmd_context * cmd )
2003-07-05 02:34:56 +04:00
{
return ;
}
2011-02-18 17:16:11 +03:00
void memlock_dec_daemon ( struct cmd_context * cmd )
2003-07-05 02:34:56 +04:00
{
return ;
}
2011-02-18 17:16:11 +03:00
2012-01-25 17:12:59 +04:00
void critical_section_inc ( struct cmd_context * cmd , const char * reason )
2011-02-18 17:16:11 +03:00
{
return ;
}
2012-01-25 17:12:59 +04:00
void critical_section_dec ( struct cmd_context * cmd , const char * reason )
2011-02-18 17:16:11 +03:00
{
return ;
}
int critical_section ( void )
2003-07-05 02:34:56 +04:00
{
return 0 ;
}
2003-08-20 19:48:27 +04:00
void memlock_init ( struct cmd_context * cmd )
{
return ;
}
2003-07-05 02:34:56 +04:00
2011-02-18 17:16:11 +03:00
void memlock_unlock ( struct cmd_context * cmd )
{
return ;
}
void memlock_reset ( void )
{
return ;
}
2016-06-03 13:38:46 +03:00
int memlock_count_daemon ( void )
{
return 0 ;
}
2003-07-05 02:34:56 +04:00
# else /* DEVMAPPER_SUPPORT */
static size_t _size_stack ;
static size_t _size_malloc_tmp ;
static size_t _size_malloc = 2000000 ;
static void * _malloc_mem = NULL ;
2011-02-18 17:16:11 +03:00
static int _mem_locked = 0 ;
2017-12-01 13:50:14 +03:00
static int _priority_raised = 0 ;
2011-06-13 07:32:45 +04:00
static int _critical_section = 0 ;
2018-03-15 14:30:45 +03:00
static int _prioritized_section = 0 ;
2009-11-19 04:11:57 +03:00
static int _memlock_count_daemon = 0 ;
2003-07-05 02:34:56 +04:00
static int _priority ;
static int _default_priority ;
2010-03-05 17:48:33 +03:00
/* list of maps, that are unconditionaly ignored */
static const char * const _ignore_maps [ ] = {
2014-04-14 13:20:20 +04:00
" [vdso] " ,
" [vsyscall] " ,
" [vectors] " ,
2010-03-05 17:48:33 +03:00
} ;
/* default blacklist for maps */
static const char * const _blacklist_maps [ ] = {
2014-04-14 13:20:20 +04:00
" locale/locale-archive " ,
" /LC_MESSAGES/ " ,
" gconv/gconv-modules.cache " ,
2015-11-22 02:26:06 +03:00
" /ld-2. " , /* not using dlopen,dlsym during mlock */
2018-06-30 12:05:14 +03:00
" /libaio.so. " , /* not using aio during mlock */
2015-11-22 02:26:06 +03:00
" /libattr.so. " , /* not using during mlock (udev) */
2018-06-30 12:05:14 +03:00
" /libblkid.so. " , /* not using blkid during mlock (udev) */
2015-11-22 02:26:06 +03:00
" /libbz2.so. " , /* not using during mlock (udev) */
2018-06-30 12:05:14 +03:00
" /libcap.so. " , /* not using during mlock (systemd) */
" /libdl- " , /* not using dlopen,dlsym during mlock */
2015-11-22 02:26:06 +03:00
" /libdw- " , /* not using during mlock (udev) */
" /libelf- " , /* not using during mlock (udev) */
2018-06-30 12:05:14 +03:00
" /libgcrypt.so. " , /* not using during mlock (systemd) */
" /libgpg-error.so. " , /* not using gpg-error during mlock (systemd) */
" /liblz4.so. " , /* not using lz4 during mlock (systemd) */
" /liblzma.so. " , /* not using lzma during mlock (systemd) */
" /libmount.so. " , /* not using mount during mlock (udev) */
2014-04-14 13:20:20 +04:00
" /libncurses.so. " , /* not using ncurses during mlock */
2018-06-30 12:05:14 +03:00
" /libpcre.so. " , /* not using pcre during mlock (selinux) */
" /libpcre2- " , /* not using pcre during mlock (selinux) */
2014-04-14 13:20:20 +04:00
" /libreadline.so. " , /* not using readline during mlock */
2018-06-30 12:05:14 +03:00
" /libresolv- " , /* not using during mlock (udev) */
2014-04-14 13:20:20 +04:00
" /libselinux.so. " , /* not using selinux during mlock */
" /libsepol.so. " , /* not using sepol during mlock */
2018-06-30 12:05:14 +03:00
" /libsystemd.so. " , /* not using systemd during mlock */
2014-04-14 13:20:20 +04:00
" /libtinfo.so. " , /* not using tinfo during mlock */
2014-06-20 13:10:42 +04:00
" /libudev.so. " , /* not using udev during mlock */
" /libuuid.so. " , /* not using uuid during mlock (blkid) */
2015-11-22 02:26:06 +03:00
" /libz.so. " , /* not using during mlock (udev) */
2014-06-20 13:10:42 +04:00
" /etc/selinux " , /* not using selinux during mlock */
2014-04-14 13:20:20 +04:00
/* "/libdevmapper-event.so" */
2010-03-05 17:48:33 +03:00
} ;
typedef enum { LVM_MLOCK , LVM_MUNLOCK } lvmlock_t ;
2010-03-09 06:16:11 +03:00
static unsigned _use_mlockall ;
2010-09-30 15:32:40 +04:00
static int _maps_fd ;
static size_t _maps_len = 8192 ; /* Initial buffer size for reading /proc/self/maps */
static char * _maps_buffer ;
2010-03-09 06:16:11 +03:00
static char _procselfmaps [ PATH_MAX ] = " " ;
2010-03-09 13:25:50 +03:00
# define SELF_MAPS " / self / maps"
2010-03-09 06:16:11 +03:00
2010-03-30 18:41:58 +04:00
static size_t _mstats ; /* statistic for maps locking */
2010-03-05 17:48:33 +03:00
2003-07-05 02:34:56 +04:00
static void _touch_memory ( void * mem , size_t size )
{
2006-08-17 22:23:44 +04:00
size_t pagesize = lvm_getpagesize ( ) ;
2010-10-25 17:00:35 +04:00
char * pos = mem ;
char * end = pos + size - sizeof ( long ) ;
2003-07-05 02:34:56 +04:00
while ( pos < end ) {
* ( long * ) pos = 1 ;
pos + = pagesize ;
}
}
static void _allocate_memory ( void )
{
2014-09-27 18:27:34 +04:00
# ifndef VALGRIND_POOL
2014-11-18 17:52:46 +03:00
void * stack_mem ;
2012-06-22 13:15:14 +04:00
struct rlimit limit ;
2014-09-30 12:56:59 +04:00
int i , area = 0 , missing = _size_malloc_tmp , max_areas = 32 , hblks ;
char * areas [ max_areas ] ;
2012-06-22 13:15:14 +04:00
/* Check if we could preallocate requested stack */
if ( ( getrlimit ( RLIMIT_STACK , & limit ) = = 0 ) & &
( ( _size_stack * 2 ) < limit . rlim_cur ) & &
( ( stack_mem = alloca ( _size_stack ) ) ) )
2003-07-05 02:34:56 +04:00
_touch_memory ( stack_mem , _size_stack ) ;
2012-08-07 21:34:30 +04:00
/* FIXME else warn user setting got ignored */
2003-07-05 02:34:56 +04:00
2014-09-30 12:56:59 +04:00
/*
* When a brk ( ) fails due to fragmented address space ( which sometimes
* happens when we try to grab 8 M or so ) , glibc will make a new
* arena . In this arena , the rules for using “ direct ” mmap are relaxed ,
* circumventing the MAX_MMAPs and MMAP_THRESHOLD settings . We can ,
* however , detect when this happens with mallinfo ( ) and try to co - opt
* malloc into using MMAP as a MORECORE substitute instead of returning
* MMAP ' d memory directly . Since MMAP - as - MORECORE does not munmap the
* memory on free ( ) , this is good enough for our purposes .
*/
while ( missing > 0 ) {
struct mallinfo inf = mallinfo ( ) ;
hblks = inf . hblks ;
if ( ( areas [ area ] = malloc ( _size_malloc_tmp ) ) )
_touch_memory ( areas [ area ] , _size_malloc_tmp ) ;
inf = mallinfo ( ) ;
if ( hblks < inf . hblks ) {
/* malloc cheated and used mmap, even though we told it
not to ; we try with twice as many areas , each half
the size , to circumvent the faulty logic in glibc */
free ( areas [ area ] ) ;
_size_malloc_tmp / = 2 ;
} else {
+ + area ;
missing - = _size_malloc_tmp ;
}
if ( area = = max_areas & & missing > 0 ) {
/* Too bad. Warn the user and proceed, as things are
* most likely going to work out anyway . */
log_warn ( " WARNING: Failed to reserve memory, %d bytes missing. " , missing ) ;
break ;
}
}
2003-07-05 02:34:56 +04:00
if ( ( _malloc_mem = malloc ( _size_malloc ) ) )
_touch_memory ( _malloc_mem , _size_malloc ) ;
2014-09-30 12:56:59 +04:00
/* free up the reserves so subsequent malloc's can use that memory */
for ( i = 0 ; i < area ; + + i )
free ( areas [ i ] ) ;
2014-09-27 18:27:34 +04:00
# endif
2003-07-05 02:34:56 +04:00
}
static void _release_memory ( void )
{
free ( _malloc_mem ) ;
}
2010-03-05 17:48:33 +03:00
/*
* mlock / munlock memory areas from / proc / self / maps
* format described in kernel / Documentation / filesystem / proc . txt
*/
2011-08-30 18:55:15 +04:00
static int _maps_line ( const struct dm_config_node * cn , lvmlock_t lock ,
2012-01-12 22:29:07 +04:00
const char * line , size_t * mstats )
2003-07-05 02:34:56 +04:00
{
2011-08-30 18:55:15 +04:00
const struct dm_config_value * cv ;
2010-03-05 17:48:33 +03:00
long from , to ;
2011-04-08 18:40:18 +04:00
int pos ;
unsigned i ;
2010-03-05 17:48:33 +03:00
char fr , fw , fx , fp ;
size_t sz ;
2012-02-01 14:48:22 +04:00
const char * lock_str = ( lock = = LVM_MLOCK ) ? " mlock " : " munlock " ;
2010-03-05 17:48:33 +03:00
if ( sscanf ( line , " %lx-%lx %c%c%c%c%n " ,
& from , & to , & fr , & fw , & fx , & fp , & pos ) ! = 6 ) {
log_error ( " Failed to parse maps line: %s " , line ) ;
return 0 ;
}
2010-03-09 13:25:50 +03:00
/* Select readable maps */
2010-03-09 15:31:51 +03:00
if ( fr ! = ' r ' ) {
2013-01-08 02:30:29 +04:00
log_debug_mem ( " %s area unreadable %s : Skipping. " , lock_str , line ) ;
2010-03-05 17:48:33 +03:00
return 1 ;
2010-03-09 15:31:51 +03:00
}
2010-03-05 17:48:33 +03:00
/* always ignored areas */
2014-04-04 23:10:30 +04:00
for ( i = 0 ; i < DM_ARRAY_SIZE ( _ignore_maps ) ; + + i )
2010-03-09 15:31:51 +03:00
if ( strstr ( line + pos , _ignore_maps [ i ] ) ) {
2013-01-08 02:30:29 +04:00
log_debug_mem ( " %s ignore filter '%s' matches '%s': Skipping. " ,
lock_str , _ignore_maps [ i ] , line ) ;
2010-03-05 17:48:33 +03:00
return 1 ;
2010-03-09 15:31:51 +03:00
}
2010-03-05 17:48:33 +03:00
sz = to - from ;
2010-10-15 13:48:23 +04:00
if ( ! cn ) {
2010-03-05 17:48:33 +03:00
/* If no blacklist configured, use an internal set */
2014-04-04 23:10:30 +04:00
for ( i = 0 ; i < DM_ARRAY_SIZE ( _blacklist_maps ) ; + + i )
2010-03-05 17:48:33 +03:00
if ( strstr ( line + pos , _blacklist_maps [ i ] ) ) {
2013-01-08 02:30:29 +04:00
log_debug_mem ( " %s default filter '%s' matches '%s': Skipping. " ,
lock_str , _blacklist_maps [ i ] , line ) ;
2010-03-05 17:48:33 +03:00
return 1 ;
}
} else {
for ( cv = cn - > v ; cv ; cv = cv - > next ) {
2011-08-30 18:55:15 +04:00
if ( ( cv - > type ! = DM_CFG_STRING ) | | ! cv - > v . str [ 0 ] )
2010-03-05 17:48:33 +03:00
continue ;
if ( strstr ( line + pos , cv - > v . str ) ) {
2013-01-08 02:30:29 +04:00
log_debug_mem ( " %s_filter '%s' matches '%s': Skipping. " ,
lock_str , cv - > v . str , line ) ;
2010-03-05 17:48:33 +03:00
return 1 ;
}
}
}
2014-10-01 10:17:59 +04:00
# ifdef HAVE_VALGRIND
2011-03-30 16:43:32 +04:00
/*
* Valgrind is continually eating memory while executing code
* so we need to deactivate check of locked memory size
2015-02-12 17:30:43 +03:00
*/
# ifndef VALGRIND_POOL
2014-10-01 10:17:59 +04:00
if ( RUNNING_ON_VALGRIND )
2015-02-12 17:30:43 +03:00
# endif
2014-10-01 10:17:59 +04:00
sz - = sz ; /* = 0, but avoids getting warning about dead assigment */
2011-03-30 16:43:32 +04:00
# endif
2010-03-30 18:41:58 +04:00
* mstats + = sz ;
2013-01-08 02:30:29 +04:00
log_debug_mem ( " %s %10ldKiB %12lx - %12lx %c%c%c%c%s " , lock_str ,
( ( long ) sz + 1023 ) / 1024 , from , to , fr , fw , fx , fp , line + pos ) ;
2010-03-05 17:48:33 +03:00
if ( lock = = LVM_MLOCK ) {
if ( mlock ( ( const void * ) from , sz ) < 0 ) {
log_sys_error ( " mlock " , line ) ;
return 0 ;
}
} else {
if ( munlock ( ( const void * ) from , sz ) < 0 ) {
log_sys_error ( " munlock " , line ) ;
return 0 ;
}
}
return 1 ;
}
2010-03-30 18:41:58 +04:00
static int _memlock_maps ( struct cmd_context * cmd , lvmlock_t lock , size_t * mstats )
2010-03-05 17:48:33 +03:00
{
2011-08-30 18:55:15 +04:00
const struct dm_config_node * cn ;
2010-09-30 15:32:40 +04:00
char * line , * line_end ;
2010-03-05 17:48:33 +03:00
size_t len ;
2010-03-09 06:16:11 +03:00
ssize_t n ;
2010-04-01 17:43:12 +04:00
int ret = 1 ;
2010-03-05 17:48:33 +03:00
2010-03-09 06:16:11 +03:00
if ( _use_mlockall ) {
2003-07-05 02:34:56 +04:00
# ifdef MCL_CURRENT
2010-03-05 17:48:33 +03:00
if ( lock = = LVM_MLOCK ) {
if ( mlockall ( MCL_CURRENT | MCL_FUTURE ) ) {
log_sys_error ( " mlockall " , " " ) ;
return 0 ;
}
} else {
if ( munlockall ( ) ) {
log_sys_error ( " munlockall " , " " ) ;
return 0 ;
}
}
return 1 ;
# else
return 0 ;
2003-07-05 02:34:56 +04:00
# endif
2010-03-05 17:48:33 +03:00
}
2010-03-30 18:41:23 +04:00
/* Reset statistic counters */
2010-03-30 18:41:58 +04:00
* mstats = 0 ;
2010-03-30 18:41:23 +04:00
2010-09-30 15:32:40 +04:00
/* read mapping into a single memory chunk without reallocation
* in the middle of reading maps file */
for ( len = 0 ; ; ) {
if ( ! _maps_buffer | | len > = _maps_len ) {
if ( _maps_buffer )
_maps_len * = 2 ;
2018-06-08 15:40:53 +03:00
if ( ! ( line = realloc ( _maps_buffer , _maps_len ) ) ) {
2013-05-13 14:59:38 +04:00
log_error ( " Allocation of maps buffer failed. " ) ;
2010-09-30 15:32:40 +04:00
return 0 ;
}
2013-05-13 14:59:38 +04:00
_maps_buffer = line ;
2010-09-30 15:32:40 +04:00
}
2012-03-02 01:19:20 +04:00
if ( lseek ( _maps_fd , 0 , SEEK_SET ) )
log_sys_error ( " lseek " , _procselfmaps ) ;
2010-09-30 15:32:40 +04:00
for ( len = 0 ; len < _maps_len ; len + = n ) {
2013-05-13 14:59:38 +04:00
if ( ! ( n = read ( _maps_fd , _maps_buffer + len , _maps_len - len ) ) )
2010-09-30 15:32:40 +04:00
break ; /* EOF */
2013-05-13 14:59:38 +04:00
if ( n = = - 1 ) {
log_sys_error ( " read " , _procselfmaps ) ;
return 0 ;
2010-09-30 15:32:40 +04:00
}
}
2013-05-13 14:59:38 +04:00
if ( len < _maps_len ) { /* fits in buffer */
_maps_buffer [ len ] = ' \0 ' ;
2010-09-30 15:32:40 +04:00
break ;
2013-05-13 14:59:38 +04:00
}
2010-03-05 17:48:33 +03:00
}
2010-09-30 15:32:40 +04:00
line = _maps_buffer ;
2015-07-15 11:52:23 +03:00
cn = find_config_tree_array ( cmd , activation_mlock_filter_CFG , NULL ) ;
2010-09-30 15:32:40 +04:00
while ( ( line_end = strchr ( line , ' \n ' ) ) ) {
* line_end = ' \0 ' ; /* remove \n */
2010-10-15 13:48:23 +04:00
if ( ! _maps_line ( cn , lock , line , mstats ) )
2010-09-30 15:32:40 +04:00
ret = 0 ;
line = line_end + 1 ;
}
2010-03-05 17:48:33 +03:00
2013-01-08 02:30:29 +04:00
log_debug_mem ( " %socked %ld bytes " ,
( lock = = LVM_MLOCK ) ? " L " : " Unl " , ( long ) * mstats ) ;
2010-03-05 17:48:33 +03:00
return ret ;
}
2014-09-18 02:40:45 +04:00
# ifdef DEBUG_MEMLOCK
/*
* LVM is not supposed to use mmap while devices are suspended .
* This code causes a core dump if gets called . "
*/
# ifdef __i386__
# define ARCH_X86
# endif /* __i386__ */
# ifdef __x86_64__
# ifndef ARCH_X86
# define ARCH_X86
# endif /* ARCH_X86 */
# endif /* __x86_64__ */
# endif /* DEBUG_MEMLOCK */
# ifdef ARCH_X86
2014-09-19 02:59:46 +04:00
# ifndef _GNU_SOURCE
# define _GNU_SOURCE
# endif
# include <dlfcn.h>
2017-10-18 17:57:46 +03:00
static const unsigned char _instruction_hlt = 0x94 ;
2014-09-18 02:40:45 +04:00
static char _mmap_orig ;
static unsigned char * _mmap_addr ;
2014-09-19 02:59:46 +04:00
# ifdef __i386__
static char _mmap64_orig ;
static unsigned char * _mmap64_addr ;
# endif /* __i386__ */
# endif /* ARCH_X86 */
2014-09-18 02:40:45 +04:00
static int _disable_mmap ( void )
{
# ifdef ARCH_X86
2014-09-19 02:59:46 +04:00
volatile unsigned char * abs_addr ;
2014-09-18 02:40:45 +04:00
if ( ! _mmap_addr ) {
2014-09-19 02:59:46 +04:00
_mmap_addr = ( unsigned char * ) dlsym ( RTLD_NEXT , " mmap " ) ;
if ( _mmap_addr [ 0 ] = = 0xff & & _mmap_addr [ 1 ] = = 0x25 ) { /* plt */
2014-09-18 02:40:45 +04:00
# ifdef __x86_64__
2014-09-19 02:59:46 +04:00
abs_addr = _mmap_addr + 6 + * ( int32_t * ) ( _mmap_addr + 2 ) ;
2014-09-18 02:40:45 +04:00
# endif /* __x86_64__ */
# ifdef __i386__
2014-09-19 02:59:46 +04:00
abs_addr = * ( void * * ) ( _mmap_addr + 2 ) ;
2014-09-18 02:40:45 +04:00
# endif /* __i386__ */
2014-09-18 18:56:13 +04:00
_mmap_addr = * ( void * * ) abs_addr ;
2014-09-19 02:59:46 +04:00
} else
2014-09-19 13:04:36 +04:00
log_debug_mem ( " Can't find PLT jump entry assuming -fPIE linkage. " ) ;
2014-09-18 02:40:45 +04:00
if ( mprotect ( ( void * ) ( ( unsigned long ) _mmap_addr & ~ 4095UL ) , 4096 , PROT_READ | PROT_WRITE | PROT_EXEC ) ) {
log_sys_error ( " mprotect " , " " ) ;
_mmap_addr = NULL ;
2014-09-18 18:56:13 +04:00
return 0 ;
2014-09-18 02:40:45 +04:00
}
_mmap_orig = * _mmap_addr ;
}
2017-10-18 17:57:46 +03:00
log_debug_mem ( " Remapping mmap entry %02x to %02x. " , _mmap_orig , _instruction_hlt ) ;
* _mmap_addr = _instruction_hlt ;
2014-09-19 02:59:46 +04:00
# ifdef __i386__
if ( ! _mmap64_addr ) {
_mmap64_addr = ( unsigned char * ) dlsym ( RTLD_NEXT , " mmap64 " ) ;
if ( _mmap64_addr [ 0 ] = = 0xff & & _mmap64_addr [ 1 ] = = 0x25 ) {
abs_addr = * ( void * * ) ( _mmap64_addr + 2 ) ;
_mmap64_addr = * ( void * * ) abs_addr ;
2014-09-19 13:04:36 +04:00
} /* Can't find PLT jump entry assuming -fPIE linkage */
2014-09-19 02:59:46 +04:00
if ( mprotect ( ( void * ) ( ( unsigned long ) _mmap64_addr & ~ 4095UL ) , 4096 , PROT_READ | PROT_WRITE | PROT_EXEC ) ) {
log_sys_error ( " mprotect " , " " ) ;
_mmap64_addr = NULL ;
return 0 ;
}
_mmap64_orig = * _mmap64_addr ;
}
2014-09-19 13:04:36 +04:00
* _mmap64_addr = INSTRUCTION_HLT ;
2014-09-19 02:59:46 +04:00
# endif /* __i386__ */
2014-09-18 02:40:45 +04:00
# endif /* ARCH_X86 */
return 1 ;
}
static int _restore_mmap ( void )
{
# ifdef ARCH_X86
2014-09-19 13:04:36 +04:00
if ( _mmap_addr )
2014-09-19 02:59:46 +04:00
* _mmap_addr = _mmap_orig ;
# ifdef __i386__
2014-09-19 13:04:36 +04:00
if ( _mmap64_addr )
2014-09-19 02:59:46 +04:00
* _mmap64_addr = _mmap64_orig ;
# endif /* __i386__ */
2014-09-19 13:04:36 +04:00
log_debug_mem ( " Restored mmap entry. " ) ;
2014-09-18 02:40:45 +04:00
# endif /* ARCH_X86 */
return 1 ;
}
2017-12-01 13:50:14 +03:00
static void _raise_priority ( struct cmd_context * cmd )
{
if ( _priority_raised )
return ;
_priority_raised = 1 ;
errno = 0 ;
if ( ( ( _priority = getpriority ( PRIO_PROCESS , 0 ) ) = = - 1 ) & & errno )
log_sys_debug ( " getpriority " , " " ) ;
else if ( _default_priority < _priority ) {
if ( setpriority ( PRIO_PROCESS , 0 , _default_priority ) = = 0 )
log_debug_activation ( " Raised task priority %d -> %d. " ,
_priority , _default_priority ) ;
else
log_warn ( " WARNING: setpriority %d failed: %s. " ,
_default_priority , strerror ( errno ) ) ;
}
}
static void _restore_priority_if_possible ( struct cmd_context * cmd )
{
if ( ! _priority_raised | | _critical_section | | _memlock_count_daemon )
return ;
if ( setpriority ( PRIO_PROCESS , 0 , _priority ) = = 0 )
log_debug_activation ( " Restoring original task priority %d. " , _priority ) ;
else
log_warn ( " WARNING: setpriority %u failed: %s. " ,
_priority , strerror ( errno ) ) ;
_priority_raised = 0 ;
}
2014-09-18 02:40:45 +04:00
2010-03-05 17:48:33 +03:00
/* Stop memory getting swapped out */
static void _lock_mem ( struct cmd_context * cmd )
{
2003-07-05 02:34:56 +04:00
_allocate_memory ( ) ;
2014-09-19 13:04:36 +04:00
( void ) strerror ( 0 ) ; /* Force libc.mo load */
( void ) dm_udev_get_sync_support ( ) ; /* udev is initialized */
log_very_verbose ( " Locking memory " ) ;
2003-07-05 02:34:56 +04:00
2010-03-30 18:41:23 +04:00
/*
* For daemon we need to use mlockall ( )
* so even future adition of thread which may not even use lvm lib
* will not block memory locked thread
* Note : assuming _memlock_count_daemon is updated before _memlock_count
2013-05-13 14:59:38 +04:00
*/
2010-03-30 18:41:23 +04:00
_use_mlockall = _memlock_count_daemon ? 1 :
2013-06-25 14:31:53 +04:00
find_config_tree_bool ( cmd , activation_use_mlockall_CFG , NULL ) ;
2010-03-09 06:16:11 +03:00
if ( ! _use_mlockall ) {
2010-03-09 13:25:50 +03:00
if ( ! * _procselfmaps & &
dm_snprintf ( _procselfmaps , sizeof ( _procselfmaps ) ,
" %s " SELF_MAPS , cmd - > proc_dir ) < 0 ) {
log_error ( " proc_dir too long " ) ;
return ;
2010-03-09 06:16:11 +03:00
}
2010-09-30 15:32:40 +04:00
if ( ! ( _maps_fd = open ( _procselfmaps , O_RDONLY ) ) ) {
log_sys_error ( " open " , _procselfmaps ) ;
2010-03-09 06:16:11 +03:00
return ;
}
2014-09-18 18:56:13 +04:00
if ( ! _disable_mmap ( ) )
stack ;
2010-03-09 06:16:11 +03:00
}
if ( ! _memlock_maps ( cmd , LVM_MLOCK , & _mstats ) )
stack ;
2003-07-05 02:34:56 +04:00
}
2010-03-05 17:48:33 +03:00
static void _unlock_mem ( struct cmd_context * cmd )
2003-07-05 02:34:56 +04:00
{
2010-03-30 18:41:58 +04:00
size_t unlock_mstats ;
2010-03-09 06:16:11 +03:00
log_very_verbose ( " Unlocking memory " ) ;
if ( ! _memlock_maps ( cmd , LVM_MUNLOCK , & unlock_mstats ) )
stack ;
2010-03-05 17:48:33 +03:00
2010-03-09 06:16:11 +03:00
if ( ! _use_mlockall ) {
2014-09-18 18:56:13 +04:00
_restore_mmap ( ) ;
2010-09-30 15:32:40 +04:00
if ( close ( _maps_fd ) )
log_sys_error ( " close " , _procselfmaps ) ;
2018-06-08 15:40:53 +03:00
free ( _maps_buffer ) ;
2010-09-30 15:32:40 +04:00
_maps_buffer = NULL ;
2011-02-18 17:51:04 +03:00
if ( _mstats < unlock_mstats ) {
2011-03-06 20:52:07 +03:00
if ( ( _mstats + lvm_getpagesize ( ) ) < unlock_mstats )
2011-02-18 17:51:04 +03:00
log_error ( INTERNAL_ERROR
2012-01-12 22:29:07 +04:00
" Reserved memory (%ld) not enough: used %ld. Increase activation/reserved_memory? " ,
2011-02-18 17:51:04 +03:00
( long ) _mstats , ( long ) unlock_mstats ) ;
else
2011-04-29 04:21:13 +04:00
/* FIXME Believed due to incorrect use of yes_no_prompt while locks held */
2013-01-08 02:30:29 +04:00
log_debug_mem ( " Suppressed internal error: Maps lock %ld < unlock %ld, a one-page difference. " ,
( long ) _mstats , ( long ) unlock_mstats ) ;
2011-02-18 17:51:04 +03:00
}
2010-03-09 06:16:11 +03:00
}
2010-03-05 17:48:33 +03:00
2017-12-01 13:50:14 +03:00
_restore_priority_if_possible ( cmd ) ;
2010-03-30 18:41:23 +04:00
_release_memory ( ) ;
2003-07-05 02:34:56 +04:00
}
2010-03-30 18:41:23 +04:00
static void _lock_mem_if_needed ( struct cmd_context * cmd )
{
2017-12-01 13:50:14 +03:00
log_debug_mem ( " Lock: Memlock counters: prioritized:%d locked:%d critical:%d daemon:%d suspended:%d " ,
_priority_raised , _mem_locked , _critical_section , _memlock_count_daemon , dm_get_suspended_counter ( ) ) ;
2011-02-18 17:16:11 +03:00
if ( ! _mem_locked & &
2011-06-13 07:32:45 +04:00
( ( _critical_section + _memlock_count_daemon ) = = 1 ) ) {
2011-02-18 17:16:11 +03:00
_mem_locked = 1 ;
2010-03-05 17:48:33 +03:00
_lock_mem ( cmd ) ;
2011-02-18 17:16:11 +03:00
}
2009-11-19 04:11:57 +03:00
}
2010-03-30 18:41:23 +04:00
static void _unlock_mem_if_possible ( struct cmd_context * cmd )
{
2017-12-01 13:50:14 +03:00
log_debug_mem ( " Unlock: Memlock counters: prioritized:%d locked:%d critical:%d daemon:%d suspended:%d " ,
_priority_raised , _mem_locked , _critical_section , _memlock_count_daemon , dm_get_suspended_counter ( ) ) ;
2011-02-18 17:16:11 +03:00
if ( _mem_locked & &
2011-06-13 07:32:45 +04:00
! _critical_section & &
2011-02-18 17:16:11 +03:00
! _memlock_count_daemon ) {
2010-03-05 17:48:33 +03:00
_unlock_mem ( cmd ) ;
2011-02-18 17:16:11 +03:00
_mem_locked = 0 ;
}
2009-11-19 04:11:57 +03:00
}
2017-12-01 13:50:14 +03:00
/*
* Critical section is only triggered with suspending reason .
* Other reasons only raise process priority so the table manipulation
* remains fast .
*
* Memory stays locked until ' memlock_unlock ( ) ' is called so when possible
* it may stay locked across multiple crictical section entrances .
*/
2011-06-11 04:03:06 +04:00
void critical_section_inc ( struct cmd_context * cmd , const char * reason )
2003-07-05 02:34:56 +04:00
{
2017-12-04 20:40:37 +03:00
if ( ! _critical_section & &
2018-06-30 11:35:08 +03:00
( strcmp ( reason , " suspending " ) = = 0 ) ) {
2017-12-01 13:50:14 +03:00
/*
* Profiles are loaded on - demand so make sure that before
* entering the critical section all needed profiles are
* loaded to avoid the disk access later .
*/
( void ) load_pending_profiles ( cmd ) ;
2011-06-13 07:32:45 +04:00
_critical_section = 1 ;
2017-12-01 13:50:14 +03:00
log_debug_activation ( " Entering critical section (%s). " , reason ) ;
_lock_mem_if_needed ( cmd ) ;
} else
log_debug_activation ( " Entering prioritized section (%s). " , reason ) ;
2011-06-13 07:32:45 +04:00
2017-12-01 13:50:14 +03:00
_raise_priority ( cmd ) ;
2018-03-15 14:30:45 +03:00
_prioritized_section + + ;
2003-07-05 02:34:56 +04:00
}
2011-06-11 04:03:06 +04:00
void critical_section_dec ( struct cmd_context * cmd , const char * reason )
2003-07-05 02:34:56 +04:00
{
2011-06-13 07:32:45 +04:00
if ( _critical_section & & ! dm_get_suspended_counter ( ) ) {
_critical_section = 0 ;
2017-12-01 13:50:14 +03:00
log_debug_activation ( " Leaving critical section (%s). " , reason ) ;
} else
log_debug_activation ( " Leaving section (%s). " , reason ) ;
2018-03-15 14:30:45 +03:00
if ( _prioritized_section > 0 )
_prioritized_section - - ;
2011-02-18 17:16:11 +03:00
}
int critical_section ( void )
{
2011-06-13 07:32:45 +04:00
return _critical_section ;
2003-07-05 02:34:56 +04:00
}
2018-03-15 11:07:24 +03:00
int prioritized_section ( void )
{
2018-03-15 14:30:45 +03:00
return _prioritized_section ;
2018-03-15 11:07:24 +03:00
}
2009-11-19 04:11:57 +03:00
/*
* The memlock_ * _daemon functions will force the mlockall ( ) call that we need
* to stay in memory , but they will have no effect on device scans ( unlike
2011-02-18 17:16:11 +03:00
* normal critical_section_inc / dec ) . Memory is kept locked as long as either
* of critical_section or memlock_daemon is in effect .
2009-11-19 04:11:57 +03:00
*/
2010-03-05 17:48:33 +03:00
void memlock_inc_daemon ( struct cmd_context * cmd )
2009-11-19 04:11:57 +03:00
{
+ + _memlock_count_daemon ;
2011-06-13 07:32:45 +04:00
if ( _memlock_count_daemon = = 1 & & _critical_section > 0 )
2014-03-20 16:44:03 +04:00
log_error ( INTERNAL_ERROR " _memlock_inc_daemon used in critical section. " ) ;
2013-01-08 02:30:29 +04:00
log_debug_mem ( " memlock_count_daemon inc to %d " , _memlock_count_daemon ) ;
2011-02-18 17:16:11 +03:00
_lock_mem_if_needed ( cmd ) ;
2017-12-01 13:50:14 +03:00
_raise_priority ( cmd ) ;
2009-11-19 04:11:57 +03:00
}
2010-03-05 17:48:33 +03:00
void memlock_dec_daemon ( struct cmd_context * cmd )
2009-11-19 04:11:57 +03:00
{
if ( ! _memlock_count_daemon )
2009-12-16 22:22:11 +03:00
log_error ( INTERNAL_ERROR " _memlock_count_daemon has dropped below 0. " ) ;
2009-11-19 04:11:57 +03:00
- - _memlock_count_daemon ;
2013-01-08 02:30:29 +04:00
log_debug_mem ( " memlock_count_daemon dec to %d " , _memlock_count_daemon ) ;
2011-02-18 17:16:11 +03:00
_unlock_mem_if_possible ( cmd ) ;
2003-07-05 02:34:56 +04:00
}
void memlock_init ( struct cmd_context * cmd )
{
2011-12-09 01:24:08 +04:00
/* When threaded, caller already limited stack size so just use the default. */
2012-06-21 14:59:14 +04:00
_size_stack = 1024ULL * ( cmd - > threaded ? DEFAULT_RESERVED_STACK :
2013-06-25 14:30:34 +04:00
find_config_tree_int ( cmd , activation_reserved_stack_CFG , NULL ) ) ;
_size_malloc_tmp = find_config_tree_int ( cmd , activation_reserved_memory_CFG , NULL ) * 1024ULL ;
_default_priority = find_config_tree_int ( cmd , activation_process_priority_CFG , NULL ) ;
2003-07-05 02:34:56 +04:00
}
2011-02-18 17:16:11 +03:00
void memlock_reset ( void )
{
2013-01-08 02:30:29 +04:00
log_debug_mem ( " memlock reset. " ) ;
2011-02-18 17:16:11 +03:00
_mem_locked = 0 ;
2017-12-01 13:50:14 +03:00
_priority_raised = 0 ;
2011-06-13 07:32:45 +04:00
_critical_section = 0 ;
2018-03-15 14:30:45 +03:00
_prioritized_section = 0 ;
2011-02-18 17:16:11 +03:00
_memlock_count_daemon = 0 ;
}
void memlock_unlock ( struct cmd_context * cmd )
{
_unlock_mem_if_possible ( cmd ) ;
2017-12-08 15:11:35 +03:00
_restore_priority_if_possible ( cmd ) ;
2011-02-18 17:16:11 +03:00
}
2015-10-23 10:42:38 +03:00
int memlock_count_daemon ( void )
2015-10-22 11:43:58 +03:00
{
return _memlock_count_daemon ;
}
2016-06-03 13:38:46 +03:00
# endif