2005-04-17 02:20:36 +04:00
/*
* Internal header file for device mapper
*
* Copyright ( C ) 2001 , 2002 Sistina Software
2006-06-26 11:27:32 +04:00
* Copyright ( C ) 2004 - 2006 Red Hat , Inc . All rights reserved .
2005-04-17 02:20:36 +04:00
*
* This file is released under the LGPL .
*/
# ifndef DM_INTERNAL_H
# define DM_INTERNAL_H
# include <linux/fs.h>
# include <linux/device-mapper.h>
# include <linux/list.h>
# include <linux/blkdev.h>
2006-03-27 13:17:54 +04:00
# include <linux/hdreg.h>
2005-04-17 02:20:36 +04:00
2006-12-08 13:41:04 +03:00
/*
* Suspend feature flags
*/
# define DM_SUSPEND_LOCKFS_FLAG (1 << 0)
2006-12-08 13:41:07 +03:00
# define DM_SUSPEND_NOFLUSH_FLAG (1 << 1)
2006-12-08 13:41:04 +03:00
2005-04-17 02:20:36 +04:00
/*
* List of devices that a metadevice uses and should open / close .
*/
2008-10-10 16:37:09 +04:00
struct dm_dev_internal {
2005-04-17 02:20:36 +04:00
struct list_head list ;
atomic_t count ;
2008-10-10 16:37:09 +04:00
struct dm_dev dm_dev ;
2005-04-17 02:20:36 +04:00
} ;
struct dm_table ;
/*-----------------------------------------------------------------
2006-06-26 11:27:33 +04:00
* Internal table functions .
2005-04-17 02:20:36 +04:00
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
dm table: rework reference counting
Rework table reference counting.
The existing code uses a reference counter. When the last reference is
dropped and the counter reaches zero, the table destructor is called.
Table reference counters are acquired/released from upcalls from other
kernel code (dm_any_congested, dm_merge_bvec, dm_unplug_all).
If the reference counter reaches zero in one of the upcalls, the table
destructor is called from almost random kernel code.
This leads to various problems:
* dm_any_congested being called under a spinlock, which calls the
destructor, which calls some sleeping function.
* the destructor attempting to take a lock that is already taken by the
same process.
* stale reference from some other kernel code keeps the table
constructed, which keeps some devices open, even after successful
return from "dmsetup remove". This can confuse lvm and prevent closing
of underlying devices or reusing device minor numbers.
The patch changes reference counting so that the table destructor can be
called only at predetermined places.
The table has always exactly one reference from either mapped_device->map
or hash_cell->new_map. After this patch, this reference is not counted
in table->holders. A pair of dm_create_table/dm_destroy_table functions
is used for table creation/destruction.
Temporary references from the other code increase table->holders. A pair
of dm_table_get/dm_table_put functions is used to manipulate it.
When the table is about to be destroyed, we wait for table->holders to
reach 0. Then, we call the table destructor. We use active waiting with
msleep(1), because the situation happens rarely (to one user in 5 years)
and removing the device isn't performance-critical task: the user doesn't
care if it takes one tick more or not.
This way, the destructor is called only at specific points
(dm_table_destroy function) and the above problems associated with lazy
destruction can't happen.
Finally remove the temporary protection added to dm_any_congested().
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-01-06 06:05:10 +03:00
void dm_table_destroy ( struct dm_table * t ) ;
2005-04-17 02:20:36 +04:00
void dm_table_event_callback ( struct dm_table * t ,
void ( * fn ) ( void * ) , void * context ) ;
struct dm_target * dm_table_get_target ( struct dm_table * t , unsigned int index ) ;
struct dm_target * dm_table_find_target ( struct dm_table * t , sector_t sector ) ;
2009-06-22 13:12:34 +04:00
int dm_calculate_queue_limits ( struct dm_table * table ,
struct queue_limits * limits ) ;
void dm_table_set_restrictions ( struct dm_table * t , struct request_queue * q ,
struct queue_limits * limits ) ;
2005-04-17 02:20:36 +04:00
struct list_head * dm_table_get_devices ( struct dm_table * t ) ;
void dm_table_presuspend_targets ( struct dm_table * t ) ;
void dm_table_postsuspend_targets ( struct dm_table * t ) ;
2006-10-03 12:15:36 +04:00
int dm_table_resume_targets ( struct dm_table * t ) ;
2005-04-17 02:20:36 +04:00
int dm_table_any_congested ( struct dm_table * t , int bdi_bits ) ;
2007-12-13 17:15:25 +03:00
/*
* To check the return value from dm_table_find_target ( ) .
*/
# define dm_target_is_valid(t) ((t)->table)
2005-04-17 02:20:36 +04:00
/*-----------------------------------------------------------------
* A registry of target types .
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
int dm_target_init ( void ) ;
void dm_target_exit ( void ) ;
struct target_type * dm_get_target_type ( const char * name ) ;
2009-04-02 22:55:28 +04:00
void dm_put_target_type ( struct target_type * tt ) ;
2005-04-17 02:20:36 +04:00
int dm_target_iterate ( void ( * iter_func ) ( struct target_type * tt ,
void * param ) , void * param ) ;
int dm_split_args ( int * argc , char * * * argvp , char * input ) ;
/*
* The device - mapper can be driven through one of two interfaces ;
* ioctl or filesystem , depending which patch you have applied .
*/
int dm_interface_init ( void ) ;
void dm_interface_exit ( void ) ;
2009-01-06 06:05:12 +03:00
/*
* sysfs interface
*/
int dm_sysfs_init ( struct mapped_device * md ) ;
void dm_sysfs_exit ( struct mapped_device * md ) ;
struct kobject * dm_kobject ( struct mapped_device * md ) ;
struct mapped_device * dm_get_from_kobject ( struct kobject * kobj ) ;
2005-04-17 02:20:36 +04:00
/*
* Targets for linear and striped mappings
*/
int dm_linear_init ( void ) ;
void dm_linear_exit ( void ) ;
int dm_stripe_init ( void ) ;
void dm_stripe_exit ( void ) ;
2006-06-26 11:27:34 +04:00
int dm_open_count ( struct mapped_device * md ) ;
int dm_lock_for_deletion ( struct mapped_device * md ) ;
2005-04-17 02:20:36 +04:00
2009-06-22 13:12:30 +04:00
void dm_kobject_uevent ( struct mapped_device * md , enum kobject_action action ,
unsigned cookie ) ;
2007-12-13 17:15:57 +03:00
2008-04-25 00:43:49 +04:00
int dm_kcopyd_init ( void ) ;
void dm_kcopyd_exit ( void ) ;
2005-04-17 02:20:36 +04:00
# endif