2005-10-16 18:33:22 +04:00
/*
2010-08-09 14:56:01 +04:00
* Copyright ( C ) 2001 - 2004 Sistina Software , Inc . All rights reserved .
2011-08-11 21:29:04 +04:00
* Copyright ( C ) 2004 - 2011 Red Hat , Inc . All rights reserved .
2005-10-16 18:33:22 +04:00
*
* This file is part of the device - mapper userspace tools .
*
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
2007-08-21 20:26:07 +04:00
* of the GNU Lesser General Public License v .2 .1 .
2005-10-16 18:33:22 +04:00
*
2007-08-21 20:26:07 +04:00
* You should have received a copy of the GNU Lesser General Public License
2005-10-16 18:33:22 +04:00
* along with this program ; if not , write to the Free Software Foundation ,
2016-01-21 13:49:46 +03:00
* Inc . , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 USA
2005-10-16 18:33:22 +04:00
*/
2010-08-09 14:56:01 +04:00
# ifdef VALGRIND_POOL
2012-10-09 22:54:41 +04:00
# include "memcheck.h"
2010-08-09 14:56:01 +04:00
# endif
2008-11-03 21:59:59 +03:00
# include "dmlib.h"
2013-06-15 04:28:54 +04:00
# include <stddef.h> /* For musl libc */
2011-08-11 21:29:04 +04:00
# include <malloc.h>
2005-10-16 18:33:22 +04:00
struct chunk {
char * begin , * end ;
struct chunk * prev ;
2011-10-20 18:43:33 +04:00
} __attribute__ ( ( aligned ( 8 ) ) ) ;
2005-10-16 18:33:22 +04:00
2005-10-17 02:57:20 +04:00
struct dm_pool {
2009-04-10 13:56:58 +04:00
struct dm_list list ;
2005-10-16 18:33:22 +04:00
struct chunk * chunk , * spare_chunk ; /* spare_chunk is a one entry free
list to stop ' bobbling ' */
2011-03-10 17:49:01 +03:00
const char * name ;
2005-10-16 18:33:22 +04:00
size_t chunk_size ;
size_t object_len ;
unsigned object_alignment ;
2011-08-11 21:29:04 +04:00
int locked ;
long crc ;
2005-10-16 18:33:22 +04:00
} ;
2010-10-26 12:59:05 +04:00
static void _align_chunk ( struct chunk * c , unsigned alignment ) ;
static struct chunk * _new_chunk ( struct dm_pool * p , size_t s ) ;
2010-08-09 14:56:01 +04:00
static void _free_chunk ( struct chunk * c ) ;
2005-10-16 18:33:22 +04:00
/* by default things come out aligned for doubles */
# define DEFAULT_ALIGNMENT __alignof__ (double)
2005-10-17 02:57:20 +04:00
struct dm_pool * dm_pool_create ( const char * name , size_t chunk_hint )
2005-10-16 18:33:22 +04:00
{
size_t new_size = 1024 ;
2010-10-01 01:06:50 +04:00
struct dm_pool * p = dm_zalloc ( sizeof ( * p ) ) ;
2005-10-16 18:33:22 +04:00
if ( ! p ) {
log_error ( " Couldn't create memory pool %s (size % "
PRIsize_t " ) " , name , sizeof ( * p ) ) ;
return 0 ;
}
2011-03-10 17:49:01 +03:00
p - > name = name ;
2005-10-16 18:33:22 +04:00
/* round chunk_hint up to the next power of 2 */
p - > chunk_size = chunk_hint + sizeof ( struct chunk ) ;
while ( new_size < p - > chunk_size )
new_size < < = 1 ;
p - > chunk_size = new_size ;
2013-10-10 00:19:06 +04:00
pthread_mutex_lock ( & _dm_pools_mutex ) ;
2009-04-10 13:56:58 +04:00
dm_list_add ( & _dm_pools , & p - > list ) ;
2013-10-10 00:19:06 +04:00
pthread_mutex_unlock ( & _dm_pools_mutex ) ;
2005-10-16 18:33:22 +04:00
return p ;
}
2005-10-17 02:57:20 +04:00
void dm_pool_destroy ( struct dm_pool * p )
2005-10-16 18:33:22 +04:00
{
struct chunk * c , * pr ;
2010-08-09 14:56:01 +04:00
_free_chunk ( p - > spare_chunk ) ;
2005-10-16 18:33:22 +04:00
c = p - > chunk ;
while ( c ) {
pr = c - > prev ;
2010-08-09 14:56:01 +04:00
_free_chunk ( c ) ;
2005-10-16 18:33:22 +04:00
c = pr ;
}
2013-10-10 00:19:06 +04:00
pthread_mutex_lock ( & _dm_pools_mutex ) ;
2009-04-10 13:56:58 +04:00
dm_list_del ( & p - > list ) ;
2013-10-10 00:19:06 +04:00
pthread_mutex_unlock ( & _dm_pools_mutex ) ;
2005-10-17 02:57:20 +04:00
dm_free ( p ) ;
2005-10-16 18:33:22 +04:00
}
2005-10-17 02:57:20 +04:00
void * dm_pool_alloc ( struct dm_pool * p , size_t s )
2005-10-16 18:33:22 +04:00
{
2005-10-17 02:57:20 +04:00
return dm_pool_alloc_aligned ( p , s , DEFAULT_ALIGNMENT ) ;
2005-10-16 18:33:22 +04:00
}
2005-10-17 02:57:20 +04:00
void * dm_pool_alloc_aligned ( struct dm_pool * p , size_t s , unsigned alignment )
2005-10-16 18:33:22 +04:00
{
struct chunk * c = p - > chunk ;
void * r ;
/* realign begin */
if ( c )
_align_chunk ( c , alignment ) ;
/* have we got room ? */
if ( ! c | | ( c - > begin > c - > end ) | | ( c - > end - c - > begin < s ) ) {
/* allocate new chunk */
2006-01-31 17:50:38 +03:00
size_t needed = s + alignment + sizeof ( struct chunk ) ;
2005-10-16 18:33:22 +04:00
c = _new_chunk ( p , ( needed > p - > chunk_size ) ?
needed : p - > chunk_size ) ;
if ( ! c )
2014-11-06 22:36:53 +03:00
return_NULL ;
2005-10-16 18:33:22 +04:00
_align_chunk ( c , alignment ) ;
}
r = c - > begin ;
c - > begin + = s ;
2010-08-09 14:56:01 +04:00
# ifdef VALGRIND_POOL
VALGRIND_MAKE_MEM_UNDEFINED ( r , s ) ;
# endif
2005-10-16 18:33:22 +04:00
return r ;
}
2005-10-17 02:57:20 +04:00
void dm_pool_empty ( struct dm_pool * p )
2005-10-16 18:33:22 +04:00
{
struct chunk * c ;
for ( c = p - > chunk ; c & & c - > prev ; c = c - > prev )
;
if ( c )
2005-10-17 02:57:20 +04:00
dm_pool_free ( p , ( char * ) ( c + 1 ) ) ;
2005-10-16 18:33:22 +04:00
}
2005-10-17 02:57:20 +04:00
void dm_pool_free ( struct dm_pool * p , void * ptr )
2005-10-16 18:33:22 +04:00
{
struct chunk * c = p - > chunk ;
while ( c ) {
if ( ( ( char * ) c < ( char * ) ptr ) & &
( ( char * ) c - > end > ( char * ) ptr ) ) {
c - > begin = ptr ;
2010-08-09 14:56:01 +04:00
# ifdef VALGRIND_POOL
VALGRIND_MAKE_MEM_NOACCESS ( c - > begin , c - > end - c - > begin ) ;
# endif
2005-10-16 18:33:22 +04:00
break ;
}
if ( p - > spare_chunk )
2010-08-09 14:56:01 +04:00
_free_chunk ( p - > spare_chunk ) ;
c - > begin = ( char * ) ( c + 1 ) ;
# ifdef VALGRIND_POOL
VALGRIND_MAKE_MEM_NOACCESS ( c - > begin , c - > end - c - > begin ) ;
# endif
2005-10-16 18:33:22 +04:00
p - > spare_chunk = c ;
c = c - > prev ;
}
if ( ! c )
2010-03-25 21:22:04 +03:00
log_error ( INTERNAL_ERROR " pool_free asked to free pointer "
2005-10-16 18:33:22 +04:00
" not in pool " ) ;
else
p - > chunk = c ;
}
2005-10-17 02:57:20 +04:00
int dm_pool_begin_object ( struct dm_pool * p , size_t hint )
2005-10-16 18:33:22 +04:00
{
struct chunk * c = p - > chunk ;
const size_t align = DEFAULT_ALIGNMENT ;
p - > object_len = 0 ;
p - > object_alignment = align ;
if ( c )
_align_chunk ( c , align ) ;
if ( ! c | | ( c - > begin > c - > end ) | | ( c - > end - c - > begin < hint ) ) {
/* allocate a new chunk */
c = _new_chunk ( p ,
hint > ( p - > chunk_size - sizeof ( struct chunk ) ) ?
hint + sizeof ( struct chunk ) + align :
p - > chunk_size ) ;
if ( ! c )
return 0 ;
_align_chunk ( c , align ) ;
}
return 1 ;
}
2008-04-19 19:50:18 +04:00
int dm_pool_grow_object ( struct dm_pool * p , const void * extra , size_t delta )
2005-10-16 18:33:22 +04:00
{
struct chunk * c = p - > chunk , * nc ;
2008-04-19 19:50:18 +04:00
if ( ! delta )
delta = strlen ( extra ) ;
if ( c - > end - ( c - > begin + p - > object_len ) < delta ) {
2005-10-16 18:33:22 +04:00
/* move into a new chunk */
2008-04-19 19:50:18 +04:00
if ( p - > object_len + delta > ( p - > chunk_size / 2 ) )
nc = _new_chunk ( p , ( p - > object_len + delta ) * 2 ) ;
2005-10-16 18:33:22 +04:00
else
nc = _new_chunk ( p , p - > chunk_size ) ;
if ( ! nc )
return 0 ;
_align_chunk ( p - > chunk , p - > object_alignment ) ;
2010-08-09 14:56:01 +04:00
# ifdef VALGRIND_POOL
VALGRIND_MAKE_MEM_UNDEFINED ( p - > chunk - > begin , p - > object_len ) ;
# endif
2005-10-16 18:33:22 +04:00
memcpy ( p - > chunk - > begin , c - > begin , p - > object_len ) ;
2010-08-09 14:56:01 +04:00
# ifdef VALGRIND_POOL
VALGRIND_MAKE_MEM_NOACCESS ( c - > begin , p - > object_len ) ;
# endif
2005-10-16 18:33:22 +04:00
c = p - > chunk ;
}
2010-08-09 14:56:01 +04:00
# ifdef VALGRIND_POOL
VALGRIND_MAKE_MEM_UNDEFINED ( p - > chunk - > begin + p - > object_len , delta ) ;
# endif
2008-04-19 19:50:18 +04:00
memcpy ( c - > begin + p - > object_len , extra , delta ) ;
p - > object_len + = delta ;
2005-10-16 18:33:22 +04:00
return 1 ;
}
2005-10-17 02:57:20 +04:00
void * dm_pool_end_object ( struct dm_pool * p )
2005-10-16 18:33:22 +04:00
{
struct chunk * c = p - > chunk ;
void * r = c - > begin ;
c - > begin + = p - > object_len ;
p - > object_len = 0u ;
p - > object_alignment = DEFAULT_ALIGNMENT ;
return r ;
}
2005-10-17 02:57:20 +04:00
void dm_pool_abandon_object ( struct dm_pool * p )
2005-10-16 18:33:22 +04:00
{
2011-03-30 16:43:32 +04:00
# ifdef VALGRIND_POOL
VALGRIND_MAKE_MEM_NOACCESS ( p - > chunk , p - > object_len ) ;
# endif
2005-10-16 18:33:22 +04:00
p - > object_len = 0 ;
p - > object_alignment = DEFAULT_ALIGNMENT ;
}
2010-10-26 12:59:05 +04:00
static void _align_chunk ( struct chunk * c , unsigned alignment )
2005-10-16 18:33:22 +04:00
{
c - > begin + = alignment - ( ( unsigned long ) c - > begin & ( alignment - 1 ) ) ;
}
2010-10-26 12:59:05 +04:00
static struct chunk * _new_chunk ( struct dm_pool * p , size_t s )
2005-10-16 18:33:22 +04:00
{
struct chunk * c ;
if ( p - > spare_chunk & &
2012-03-01 14:31:35 +04:00
( ( p - > spare_chunk - > end - p - > spare_chunk - > begin ) > = ( ptrdiff_t ) s ) ) {
2005-10-16 18:33:22 +04:00
/* reuse old chunk */
c = p - > spare_chunk ;
p - > spare_chunk = 0 ;
} else {
2011-08-11 21:29:04 +04:00
# ifdef DEBUG_ENFORCE_POOL_LOCKING
if ( ! pagesize ) {
pagesize = getpagesize ( ) ; /* lvm_pagesize(); */
pagesize_mask = pagesize - 1 ;
}
/*
* Allocate page aligned size so malloc could work .
* Otherwise page fault would happen from pool unrelated
* memory writes of internal malloc pointers .
*/
# define aligned_malloc(s) (posix_memalign((void**)&c, pagesize, \
ALIGN_ON_PAGE ( s ) ) = = 0 )
# else
# define aligned_malloc(s) (c = dm_malloc(s))
# endif /* DEBUG_ENFORCE_POOL_LOCKING */
if ( ! aligned_malloc ( s ) ) {
# undef aligned_malloc
2005-10-16 18:33:22 +04:00
log_error ( " Out of memory. Requested % " PRIsize_t
" bytes. " , s ) ;
return NULL ;
}
2010-08-09 14:56:01 +04:00
c - > begin = ( char * ) ( c + 1 ) ;
2005-10-16 18:33:22 +04:00
c - > end = ( char * ) c + s ;
2010-08-09 14:56:01 +04:00
# ifdef VALGRIND_POOL
VALGRIND_MAKE_MEM_NOACCESS ( c - > begin , c - > end - c - > begin ) ;
# endif
2005-10-16 18:33:22 +04:00
}
c - > prev = p - > chunk ;
p - > chunk = c ;
return c ;
}
2010-08-09 14:56:01 +04:00
static void _free_chunk ( struct chunk * c )
{
2011-10-20 17:39:57 +04:00
# ifdef VALGRIND_POOL
# ifdef DEBUG_MEM
if ( c )
VALGRIND_MAKE_MEM_UNDEFINED ( c + 1 , c - > end - ( char * ) ( c + 1 ) ) ;
# endif
# endif
2011-10-23 19:38:02 +04:00
# ifdef DEBUG_ENFORCE_POOL_LOCKING
/* since DEBUG_MEM is using own memory list */
free ( c ) ; /* for posix_memalign() */
# else
2011-03-30 16:43:32 +04:00
dm_free ( c ) ;
2011-10-23 19:38:02 +04:00
# endif
2010-08-09 14:56:01 +04:00
}
2011-08-11 21:29:04 +04:00
/**
* Calc crc / hash from pool ' s memory chunks with internal pointers
*/
static long _pool_crc ( const struct dm_pool * p )
{
long crc_hash = 0 ;
# ifndef DEBUG_ENFORCE_POOL_LOCKING
const struct chunk * c ;
const long * ptr , * end ;
for ( c = p - > chunk ; c ; c = c - > prev ) {
end = ( const long * ) ( c - > begin < c - > end ? ( long ) c - > begin & ~ 7 : ( long ) c - > end ) ;
ptr = ( const long * ) c ;
# ifdef VALGRIND_POOL
VALGRIND_MAKE_MEM_DEFINED ( ptr , ( end - ptr ) * sizeof ( * end ) ) ;
# endif
while ( ptr < end ) {
crc_hash + = * ptr + + ;
crc_hash + = ( crc_hash < < 10 ) ;
crc_hash ^ = ( crc_hash > > 6 ) ;
}
}
# endif /* DEBUG_ENFORCE_POOL_LOCKING */
return crc_hash ;
}
static int _pool_protect ( struct dm_pool * p , int prot )
{
# ifdef DEBUG_ENFORCE_POOL_LOCKING
struct chunk * c ;
for ( c = p - > chunk ; c ; c = c - > prev ) {
if ( mprotect ( c , ( size_t ) ( ( c - > end - ( char * ) c ) - 1 ) , prot ) ! = 0 ) {
log_sys_error ( " mprotect " , " " ) ;
return 0 ;
}
}
# endif
return 1 ;
}