2003-07-05 02:34:56 +04:00
/*
2008-01-30 17:00:02 +03:00
* Copyright ( C ) 2003 - 2004 Sistina Software , Inc . All rights reserved .
2011-02-18 17:16:11 +03:00
* Copyright ( C ) 2004 - 2011 Red Hat , Inc . All rights reserved .
2003-07-05 02:34:56 +04:00
*
2004-03-30 23:35:44 +04:00
* This file is part of LVM2 .
*
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
2007-08-21 00:55:30 +04:00
* of the GNU Lesser General Public License v .2 .1 .
2004-03-30 23:35:44 +04:00
*
2007-08-21 00:55:30 +04:00
* You should have received a copy of the GNU Lesser General Public License
2004-03-30 23:35:44 +04:00
* along with this program ; if not , write to the Free Software Foundation ,
* Inc . , 59 Temple Place , Suite 330 , Boston , MA 02111 - 1307 USA
2003-07-05 02:34:56 +04:00
*/
# include "lib.h"
# include "memlock.h"
# include "defaults.h"
2004-03-27 00:11:34 +03:00
# include "config.h"
# include "toolcontext.h"
2003-07-05 02:34:56 +04:00
# include <limits.h>
# include <fcntl.h>
# include <unistd.h>
# include <sys/mman.h>
# include <sys/time.h>
# include <sys/resource.h>
# ifndef DEVMAPPER_SUPPORT
2011-02-18 17:16:11 +03:00
void memlock_inc_daemon ( struct cmd_context * cmd )
2003-07-05 02:34:56 +04:00
{
return ;
}
2011-02-18 17:16:11 +03:00
void memlock_dec_daemon ( struct cmd_context * cmd )
2003-07-05 02:34:56 +04:00
{
return ;
}
2011-02-18 17:16:11 +03:00
2012-01-25 17:12:59 +04:00
void critical_section_inc ( struct cmd_context * cmd , const char * reason )
2011-02-18 17:16:11 +03:00
{
return ;
}
2012-01-25 17:12:59 +04:00
void critical_section_dec ( struct cmd_context * cmd , const char * reason )
2011-02-18 17:16:11 +03:00
{
return ;
}
int critical_section ( void )
2003-07-05 02:34:56 +04:00
{
return 0 ;
}
2003-08-20 19:48:27 +04:00
void memlock_init ( struct cmd_context * cmd )
{
return ;
}
2003-07-05 02:34:56 +04:00
2011-02-18 17:16:11 +03:00
void memlock_unlock ( struct cmd_context * cmd )
{
return ;
}
void memlock_reset ( void )
{
return ;
}
2003-07-05 02:34:56 +04:00
# else /* DEVMAPPER_SUPPORT */
static size_t _size_stack ;
static size_t _size_malloc_tmp ;
static size_t _size_malloc = 2000000 ;
static void * _malloc_mem = NULL ;
2011-02-18 17:16:11 +03:00
static int _mem_locked = 0 ;
2011-06-13 07:32:45 +04:00
static int _critical_section = 0 ;
2009-11-19 04:11:57 +03:00
static int _memlock_count_daemon = 0 ;
2003-07-05 02:34:56 +04:00
static int _priority ;
static int _default_priority ;
2010-03-05 17:48:33 +03:00
/* list of maps, that are unconditionaly ignored */
static const char * const _ignore_maps [ ] = {
2010-03-08 18:55:52 +03:00
" [vdso] " ,
" [vsyscall] " ,
2012-11-20 12:58:53 +04:00
" [vectors] " ,
2010-03-05 17:48:33 +03:00
} ;
/* default blacklist for maps */
static const char * const _blacklist_maps [ ] = {
" locale/locale-archive " ,
2011-03-30 17:06:13 +04:00
" /LC_MESSAGES/ " ,
2010-03-05 17:48:33 +03:00
" gconv/gconv-modules.cache " ,
" /libreadline.so. " , /* not using readline during mlock */
" /libncurses.so. " , /* not using readline during mlock */
2011-03-30 17:06:13 +04:00
" /libtinfo.so. " , /* not using readline during mlock */
2010-03-05 17:48:33 +03:00
" /libdl- " , /* not using dlopen,dlsym during mlock */
/* "/libdevmapper-event.so" */
} ;
typedef enum { LVM_MLOCK , LVM_MUNLOCK } lvmlock_t ;
2010-03-09 06:16:11 +03:00
static unsigned _use_mlockall ;
2010-09-30 15:32:40 +04:00
static int _maps_fd ;
static size_t _maps_len = 8192 ; /* Initial buffer size for reading /proc/self/maps */
static char * _maps_buffer ;
2010-03-09 06:16:11 +03:00
static char _procselfmaps [ PATH_MAX ] = " " ;
2010-03-09 13:25:50 +03:00
# define SELF_MAPS " / self / maps"
2010-03-09 06:16:11 +03:00
2010-03-30 18:41:58 +04:00
static size_t _mstats ; /* statistic for maps locking */
2010-03-05 17:48:33 +03:00
2003-07-05 02:34:56 +04:00
static void _touch_memory ( void * mem , size_t size )
{
2006-08-17 22:23:44 +04:00
size_t pagesize = lvm_getpagesize ( ) ;
2010-10-25 17:00:35 +04:00
char * pos = mem ;
char * end = pos + size - sizeof ( long ) ;
2003-07-05 02:34:56 +04:00
while ( pos < end ) {
* ( long * ) pos = 1 ;
pos + = pagesize ;
}
}
static void _allocate_memory ( void )
{
void * stack_mem , * temp_malloc_mem ;
2012-06-22 13:15:14 +04:00
struct rlimit limit ;
2003-07-05 02:34:56 +04:00
2012-06-22 13:15:14 +04:00
/* Check if we could preallocate requested stack */
if ( ( getrlimit ( RLIMIT_STACK , & limit ) = = 0 ) & &
( ( _size_stack * 2 ) < limit . rlim_cur ) & &
( ( stack_mem = alloca ( _size_stack ) ) ) )
2003-07-05 02:34:56 +04:00
_touch_memory ( stack_mem , _size_stack ) ;
2012-08-07 21:34:30 +04:00
/* FIXME else warn user setting got ignored */
2003-07-05 02:34:56 +04:00
if ( ( temp_malloc_mem = malloc ( _size_malloc_tmp ) ) )
_touch_memory ( temp_malloc_mem , _size_malloc_tmp ) ;
if ( ( _malloc_mem = malloc ( _size_malloc ) ) )
_touch_memory ( _malloc_mem , _size_malloc ) ;
free ( temp_malloc_mem ) ;
}
static void _release_memory ( void )
{
free ( _malloc_mem ) ;
}
2010-03-05 17:48:33 +03:00
/*
* mlock / munlock memory areas from / proc / self / maps
* format described in kernel / Documentation / filesystem / proc . txt
*/
2011-08-30 18:55:15 +04:00
static int _maps_line ( const struct dm_config_node * cn , lvmlock_t lock ,
2012-01-12 22:29:07 +04:00
const char * line , size_t * mstats )
2003-07-05 02:34:56 +04:00
{
2011-08-30 18:55:15 +04:00
const struct dm_config_value * cv ;
2010-03-05 17:48:33 +03:00
long from , to ;
2011-04-08 18:40:18 +04:00
int pos ;
unsigned i ;
2010-03-05 17:48:33 +03:00
char fr , fw , fx , fp ;
size_t sz ;
2012-02-01 14:48:22 +04:00
const char * lock_str = ( lock = = LVM_MLOCK ) ? " mlock " : " munlock " ;
2010-03-05 17:48:33 +03:00
if ( sscanf ( line , " %lx-%lx %c%c%c%c%n " ,
& from , & to , & fr , & fw , & fx , & fp , & pos ) ! = 6 ) {
log_error ( " Failed to parse maps line: %s " , line ) ;
return 0 ;
}
2010-03-09 13:25:50 +03:00
/* Select readable maps */
2010-03-09 15:31:51 +03:00
if ( fr ! = ' r ' ) {
2013-01-08 02:30:29 +04:00
log_debug_mem ( " %s area unreadable %s : Skipping. " , lock_str , line ) ;
2010-03-05 17:48:33 +03:00
return 1 ;
2010-03-09 15:31:51 +03:00
}
2010-03-05 17:48:33 +03:00
/* always ignored areas */
for ( i = 0 ; i < sizeof ( _ignore_maps ) / sizeof ( _ignore_maps [ 0 ] ) ; + + i )
2010-03-09 15:31:51 +03:00
if ( strstr ( line + pos , _ignore_maps [ i ] ) ) {
2013-01-08 02:30:29 +04:00
log_debug_mem ( " %s ignore filter '%s' matches '%s': Skipping. " ,
lock_str , _ignore_maps [ i ] , line ) ;
2010-03-05 17:48:33 +03:00
return 1 ;
2010-03-09 15:31:51 +03:00
}
2010-03-05 17:48:33 +03:00
sz = to - from ;
2010-10-15 13:48:23 +04:00
if ( ! cn ) {
2010-03-05 17:48:33 +03:00
/* If no blacklist configured, use an internal set */
for ( i = 0 ; i < sizeof ( _blacklist_maps ) / sizeof ( _blacklist_maps [ 0 ] ) ; + + i )
if ( strstr ( line + pos , _blacklist_maps [ i ] ) ) {
2013-01-08 02:30:29 +04:00
log_debug_mem ( " %s default filter '%s' matches '%s': Skipping. " ,
lock_str , _blacklist_maps [ i ] , line ) ;
2010-03-05 17:48:33 +03:00
return 1 ;
}
} else {
for ( cv = cn - > v ; cv ; cv = cv - > next ) {
2011-08-30 18:55:15 +04:00
if ( ( cv - > type ! = DM_CFG_STRING ) | | ! cv - > v . str [ 0 ] )
2010-03-05 17:48:33 +03:00
continue ;
if ( strstr ( line + pos , cv - > v . str ) ) {
2013-01-08 02:30:29 +04:00
log_debug_mem ( " %s_filter '%s' matches '%s': Skipping. " ,
lock_str , cv - > v . str , line ) ;
2010-03-05 17:48:33 +03:00
return 1 ;
}
}
}
2011-03-30 16:43:32 +04:00
# ifdef VALGRIND_POOL
/*
* Valgrind is continually eating memory while executing code
* so we need to deactivate check of locked memory size
*/
sz - = sz ; /* = 0, but avoids getting warning about dead assigment */
# endif
2010-03-30 18:41:58 +04:00
* mstats + = sz ;
2013-01-08 02:30:29 +04:00
log_debug_mem ( " %s %10ldKiB %12lx - %12lx %c%c%c%c%s " , lock_str ,
( ( long ) sz + 1023 ) / 1024 , from , to , fr , fw , fx , fp , line + pos ) ;
2010-03-05 17:48:33 +03:00
if ( lock = = LVM_MLOCK ) {
if ( mlock ( ( const void * ) from , sz ) < 0 ) {
log_sys_error ( " mlock " , line ) ;
return 0 ;
}
} else {
if ( munlock ( ( const void * ) from , sz ) < 0 ) {
log_sys_error ( " munlock " , line ) ;
return 0 ;
}
}
return 1 ;
}
2010-03-30 18:41:58 +04:00
static int _memlock_maps ( struct cmd_context * cmd , lvmlock_t lock , size_t * mstats )
2010-03-05 17:48:33 +03:00
{
2011-08-30 18:55:15 +04:00
const struct dm_config_node * cn ;
2010-09-30 15:32:40 +04:00
char * line , * line_end ;
2010-03-05 17:48:33 +03:00
size_t len ;
2010-03-09 06:16:11 +03:00
ssize_t n ;
2010-04-01 17:43:12 +04:00
int ret = 1 ;
2010-03-05 17:48:33 +03:00
2010-03-09 06:16:11 +03:00
if ( _use_mlockall ) {
2003-07-05 02:34:56 +04:00
# ifdef MCL_CURRENT
2010-03-05 17:48:33 +03:00
if ( lock = = LVM_MLOCK ) {
if ( mlockall ( MCL_CURRENT | MCL_FUTURE ) ) {
log_sys_error ( " mlockall " , " " ) ;
return 0 ;
}
} else {
if ( munlockall ( ) ) {
log_sys_error ( " munlockall " , " " ) ;
return 0 ;
}
}
return 1 ;
# else
return 0 ;
2003-07-05 02:34:56 +04:00
# endif
2010-03-05 17:48:33 +03:00
}
2010-06-24 12:29:30 +04:00
/* Force libc.mo load */
if ( lock = = LVM_MLOCK )
( void ) strerror ( 0 ) ;
2010-03-30 18:41:23 +04:00
/* Reset statistic counters */
2010-03-30 18:41:58 +04:00
* mstats = 0 ;
2010-03-30 18:41:23 +04:00
2010-09-30 15:32:40 +04:00
/* read mapping into a single memory chunk without reallocation
* in the middle of reading maps file */
for ( len = 0 ; ; ) {
if ( ! _maps_buffer | | len > = _maps_len ) {
if ( _maps_buffer )
_maps_len * = 2 ;
if ( ! ( _maps_buffer = dm_realloc ( _maps_buffer , _maps_len ) ) ) {
log_error ( " Allocation of maps buffer failed " ) ;
return 0 ;
}
}
2012-03-02 01:19:20 +04:00
if ( lseek ( _maps_fd , 0 , SEEK_SET ) )
log_sys_error ( " lseek " , _procselfmaps ) ;
2010-09-30 15:32:40 +04:00
for ( len = 0 ; len < _maps_len ; len + = n ) {
if ( ! ( n = read ( _maps_fd , _maps_buffer + len , _maps_len - len ) ) ) {
_maps_buffer [ len ] = ' \0 ' ;
break ; /* EOF */
}
if ( n = = - 1 )
return_0 ;
}
if ( len < _maps_len ) /* fits in buffer */
break ;
2010-03-05 17:48:33 +03:00
}
2010-09-30 15:32:40 +04:00
line = _maps_buffer ;
2013-03-05 20:00:43 +04:00
cn = find_config_tree_node ( cmd , activation_mlock_filter_CFG ) ;
2010-09-30 15:32:40 +04:00
while ( ( line_end = strchr ( line , ' \n ' ) ) ) {
* line_end = ' \0 ' ; /* remove \n */
2010-10-15 13:48:23 +04:00
if ( ! _maps_line ( cn , lock , line , mstats ) )
2010-09-30 15:32:40 +04:00
ret = 0 ;
line = line_end + 1 ;
}
2010-03-05 17:48:33 +03:00
2013-01-08 02:30:29 +04:00
log_debug_mem ( " %socked %ld bytes " ,
( lock = = LVM_MLOCK ) ? " L " : " Unl " , ( long ) * mstats ) ;
2010-03-05 17:48:33 +03:00
return ret ;
}
/* Stop memory getting swapped out */
static void _lock_mem ( struct cmd_context * cmd )
{
2003-07-05 02:34:56 +04:00
_allocate_memory ( ) ;
2010-03-30 18:41:23 +04:00
/*
* For daemon we need to use mlockall ( )
* so even future adition of thread which may not even use lvm lib
* will not block memory locked thread
* Note : assuming _memlock_count_daemon is updated before _memlock_count
*/
_use_mlockall = _memlock_count_daemon ? 1 :
2013-03-05 20:00:43 +04:00
find_config_tree_bool ( cmd , activation_use_mlockall_CFG ) ;
2010-03-09 06:16:11 +03:00
if ( ! _use_mlockall ) {
2010-03-09 13:25:50 +03:00
if ( ! * _procselfmaps & &
dm_snprintf ( _procselfmaps , sizeof ( _procselfmaps ) ,
" %s " SELF_MAPS , cmd - > proc_dir ) < 0 ) {
log_error ( " proc_dir too long " ) ;
return ;
2010-03-09 06:16:11 +03:00
}
2010-09-30 15:32:40 +04:00
if ( ! ( _maps_fd = open ( _procselfmaps , O_RDONLY ) ) ) {
log_sys_error ( " open " , _procselfmaps ) ;
2010-03-09 06:16:11 +03:00
return ;
}
}
log_very_verbose ( " Locking memory " ) ;
if ( ! _memlock_maps ( cmd , LVM_MLOCK , & _mstats ) )
stack ;
2010-03-05 17:48:33 +03:00
2003-07-05 02:34:56 +04:00
errno = 0 ;
if ( ( ( _priority = getpriority ( PRIO_PROCESS , 0 ) ) = = - 1 ) & & errno )
log_sys_error ( " getpriority " , " " ) ;
else
if ( setpriority ( PRIO_PROCESS , 0 , _default_priority ) )
2008-05-29 03:12:45 +04:00
log_error ( " setpriority %d failed: %s " ,
2003-07-05 02:34:56 +04:00
_default_priority , strerror ( errno ) ) ;
}
2010-03-05 17:48:33 +03:00
static void _unlock_mem ( struct cmd_context * cmd )
2003-07-05 02:34:56 +04:00
{
2010-03-30 18:41:58 +04:00
size_t unlock_mstats ;
2010-03-09 06:16:11 +03:00
log_very_verbose ( " Unlocking memory " ) ;
if ( ! _memlock_maps ( cmd , LVM_MUNLOCK , & unlock_mstats ) )
stack ;
2010-03-05 17:48:33 +03:00
2010-03-09 06:16:11 +03:00
if ( ! _use_mlockall ) {
2010-09-30 15:32:40 +04:00
if ( close ( _maps_fd ) )
log_sys_error ( " close " , _procselfmaps ) ;
dm_free ( _maps_buffer ) ;
_maps_buffer = NULL ;
2011-02-18 17:51:04 +03:00
if ( _mstats < unlock_mstats ) {
2011-03-06 20:52:07 +03:00
if ( ( _mstats + lvm_getpagesize ( ) ) < unlock_mstats )
2011-02-18 17:51:04 +03:00
log_error ( INTERNAL_ERROR
2012-01-12 22:29:07 +04:00
" Reserved memory (%ld) not enough: used %ld. Increase activation/reserved_memory? " ,
2011-02-18 17:51:04 +03:00
( long ) _mstats , ( long ) unlock_mstats ) ;
else
2011-04-29 04:21:13 +04:00
/* FIXME Believed due to incorrect use of yes_no_prompt while locks held */
2013-01-08 02:30:29 +04:00
log_debug_mem ( " Suppressed internal error: Maps lock %ld < unlock %ld, a one-page difference. " ,
( long ) _mstats , ( long ) unlock_mstats ) ;
2011-02-18 17:51:04 +03:00
}
2010-03-09 06:16:11 +03:00
}
2010-03-05 17:48:33 +03:00
2003-07-05 02:34:56 +04:00
if ( setpriority ( PRIO_PROCESS , 0 , _priority ) )
log_error ( " setpriority %u failed: %s " , _priority ,
strerror ( errno ) ) ;
2010-03-30 18:41:23 +04:00
_release_memory ( ) ;
2003-07-05 02:34:56 +04:00
}
2010-03-30 18:41:23 +04:00
static void _lock_mem_if_needed ( struct cmd_context * cmd )
{
2013-01-08 02:30:29 +04:00
log_debug_mem ( " Lock: Memlock counters: locked:%d critical:%d daemon:%d suspended:%d " ,
_mem_locked , _critical_section , _memlock_count_daemon , dm_get_suspended_counter ( ) ) ;
2011-02-18 17:16:11 +03:00
if ( ! _mem_locked & &
2011-06-13 07:32:45 +04:00
( ( _critical_section + _memlock_count_daemon ) = = 1 ) ) {
2011-02-18 17:16:11 +03:00
_mem_locked = 1 ;
2010-03-05 17:48:33 +03:00
_lock_mem ( cmd ) ;
2011-02-18 17:16:11 +03:00
}
2009-11-19 04:11:57 +03:00
}
2010-03-30 18:41:23 +04:00
static void _unlock_mem_if_possible ( struct cmd_context * cmd )
{
2013-01-08 02:30:29 +04:00
log_debug_mem ( " Unlock: Memlock counters: locked:%d critical:%d daemon:%d suspended:%d " ,
_mem_locked , _critical_section , _memlock_count_daemon , dm_get_suspended_counter ( ) ) ;
2011-02-18 17:16:11 +03:00
if ( _mem_locked & &
2011-06-13 07:32:45 +04:00
! _critical_section & &
2011-02-18 17:16:11 +03:00
! _memlock_count_daemon ) {
2010-03-05 17:48:33 +03:00
_unlock_mem ( cmd ) ;
2011-02-18 17:16:11 +03:00
_mem_locked = 0 ;
}
2009-11-19 04:11:57 +03:00
}
2011-06-11 04:03:06 +04:00
void critical_section_inc ( struct cmd_context * cmd , const char * reason )
2003-07-05 02:34:56 +04:00
{
2011-06-13 07:32:45 +04:00
if ( ! _critical_section ) {
_critical_section = 1 ;
2013-01-08 02:30:29 +04:00
log_debug_mem ( " Entering critical section (%s). " , reason ) ;
2011-06-13 07:32:45 +04:00
}
2010-03-05 17:48:33 +03:00
_lock_mem_if_needed ( cmd ) ;
2003-07-05 02:34:56 +04:00
}
2011-06-11 04:03:06 +04:00
void critical_section_dec ( struct cmd_context * cmd , const char * reason )
2003-07-05 02:34:56 +04:00
{
2011-06-13 07:32:45 +04:00
if ( _critical_section & & ! dm_get_suspended_counter ( ) ) {
_critical_section = 0 ;
2013-01-08 02:30:29 +04:00
log_debug_mem ( " Leaving critical section (%s). " , reason ) ;
2011-06-13 07:32:45 +04:00
}
2011-02-18 17:16:11 +03:00
}
int critical_section ( void )
{
2011-06-13 07:32:45 +04:00
return _critical_section ;
2003-07-05 02:34:56 +04:00
}
2009-11-19 04:11:57 +03:00
/*
* The memlock_ * _daemon functions will force the mlockall ( ) call that we need
* to stay in memory , but they will have no effect on device scans ( unlike
2011-02-18 17:16:11 +03:00
* normal critical_section_inc / dec ) . Memory is kept locked as long as either
* of critical_section or memlock_daemon is in effect .
2009-11-19 04:11:57 +03:00
*/
2010-03-05 17:48:33 +03:00
void memlock_inc_daemon ( struct cmd_context * cmd )
2009-11-19 04:11:57 +03:00
{
+ + _memlock_count_daemon ;
2011-06-13 07:32:45 +04:00
if ( _memlock_count_daemon = = 1 & & _critical_section > 0 )
2011-02-18 17:16:11 +03:00
log_error ( INTERNAL_ERROR " _memlock_inc_daemon used in critical section. " ) ;
2013-01-08 02:30:29 +04:00
log_debug_mem ( " memlock_count_daemon inc to %d " , _memlock_count_daemon ) ;
2011-02-18 17:16:11 +03:00
_lock_mem_if_needed ( cmd ) ;
2009-11-19 04:11:57 +03:00
}
2010-03-05 17:48:33 +03:00
void memlock_dec_daemon ( struct cmd_context * cmd )
2009-11-19 04:11:57 +03:00
{
if ( ! _memlock_count_daemon )
2009-12-16 22:22:11 +03:00
log_error ( INTERNAL_ERROR " _memlock_count_daemon has dropped below 0. " ) ;
2009-11-19 04:11:57 +03:00
- - _memlock_count_daemon ;
2013-01-08 02:30:29 +04:00
log_debug_mem ( " memlock_count_daemon dec to %d " , _memlock_count_daemon ) ;
2011-02-18 17:16:11 +03:00
_unlock_mem_if_possible ( cmd ) ;
2003-07-05 02:34:56 +04:00
}
void memlock_init ( struct cmd_context * cmd )
{
2011-12-09 01:24:08 +04:00
/* When threaded, caller already limited stack size so just use the default. */
2012-06-21 14:59:14 +04:00
_size_stack = 1024ULL * ( cmd - > threaded ? DEFAULT_RESERVED_STACK :
2013-03-05 20:00:43 +04:00
find_config_tree_int ( cmd , activation_reserved_stack_CFG ) ) ;
_size_malloc_tmp = find_config_tree_int ( cmd , activation_reserved_memory_CFG ) * 1024ULL ;
_default_priority = find_config_tree_int ( cmd , activation_process_priority_CFG ) ;
2003-07-05 02:34:56 +04:00
}
2011-02-18 17:16:11 +03:00
void memlock_reset ( void )
{
2013-01-08 02:30:29 +04:00
log_debug_mem ( " memlock reset. " ) ;
2011-02-18 17:16:11 +03:00
_mem_locked = 0 ;
2011-06-13 07:32:45 +04:00
_critical_section = 0 ;
2011-02-18 17:16:11 +03:00
_memlock_count_daemon = 0 ;
}
void memlock_unlock ( struct cmd_context * cmd )
{
_unlock_mem_if_possible ( cmd ) ;
}
2003-07-05 02:34:56 +04:00
# endif