2023-01-25 23:00:44 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2012-10-13 00:02:13 +04:00
/*
* Copyright ( C ) 2012 Red Hat , Inc .
*
* This file is released under the GPL .
*/
# include "dm.h"
2016-10-21 17:06:40 +03:00
# include "dm-bio-prison-v1.h"
# include "dm-bio-prison-v2.h"
2012-10-13 00:02:13 +04:00
# include <linux/spinlock.h>
# include <linux/mempool.h>
# include <linux/module.h>
# include <linux/slab.h>
/*----------------------------------------------------------------*/
2014-10-07 00:30:06 +04:00
# define MIN_CELLS 1024
2014-06-05 18:23:09 +04:00
struct dm_bio_prison {
2014-10-07 00:30:06 +04:00
spinlock_t lock ;
struct rb_root cells ;
2018-05-23 01:26:20 +03:00
mempool_t cell_pool ;
2012-10-13 00:02:13 +04:00
} ;
static struct kmem_cache * _cell_cache ;
2014-10-07 00:30:06 +04:00
/*----------------------------------------------------------------*/
2014-06-05 18:23:09 +04:00
2012-10-13 00:02:13 +04:00
/*
* @ nr_cells should be the number of cells you want in use _concurrently_ .
* Don ' t confuse it with the number of distinct keys .
*/
2014-10-07 00:30:06 +04:00
struct dm_bio_prison * dm_bio_prison_create ( void )
2012-10-13 00:02:13 +04:00
{
2018-06-05 12:26:33 +03:00
struct dm_bio_prison * prison = kzalloc ( sizeof ( * prison ) , GFP_KERNEL ) ;
2018-05-21 01:25:53 +03:00
int ret ;
2012-10-13 00:02:13 +04:00
if ( ! prison )
return NULL ;
2014-10-07 00:30:06 +04:00
spin_lock_init ( & prison - > lock ) ;
2018-05-21 01:25:53 +03:00
ret = mempool_init_slab_pool ( & prison - > cell_pool , MIN_CELLS , _cell_cache ) ;
if ( ret ) {
2012-10-13 00:02:13 +04:00
kfree ( prison ) ;
return NULL ;
}
2014-10-07 00:30:06 +04:00
prison - > cells = RB_ROOT ;
2012-10-13 00:02:13 +04:00
return prison ;
}
EXPORT_SYMBOL_GPL ( dm_bio_prison_create ) ;
void dm_bio_prison_destroy ( struct dm_bio_prison * prison )
{
2018-05-21 01:25:53 +03:00
mempool_exit ( & prison - > cell_pool ) ;
2012-10-13 00:02:13 +04:00
kfree ( prison ) ;
}
EXPORT_SYMBOL_GPL ( dm_bio_prison_destroy ) ;
2013-03-02 02:45:50 +04:00
struct dm_bio_prison_cell * dm_bio_prison_alloc_cell ( struct dm_bio_prison * prison , gfp_t gfp )
{
2018-05-21 01:25:53 +03:00
return mempool_alloc ( & prison - > cell_pool , gfp ) ;
2013-03-02 02:45:50 +04:00
}
EXPORT_SYMBOL_GPL ( dm_bio_prison_alloc_cell ) ;
void dm_bio_prison_free_cell ( struct dm_bio_prison * prison ,
struct dm_bio_prison_cell * cell )
{
2018-05-21 01:25:53 +03:00
mempool_free ( cell , & prison - > cell_pool ) ;
2013-03-02 02:45:50 +04:00
}
EXPORT_SYMBOL_GPL ( dm_bio_prison_free_cell ) ;
2014-10-07 00:30:06 +04:00
static void __setup_new_cell ( struct dm_cell_key * key ,
struct bio * holder ,
struct dm_bio_prison_cell * cell )
2012-10-13 00:02:13 +04:00
{
2014-10-07 00:30:06 +04:00
memcpy ( & cell - > key , key , sizeof ( cell - > key ) ) ;
cell - > holder = holder ;
bio_list_init ( & cell - > bios ) ;
2012-10-13 00:02:13 +04:00
}
2014-10-07 00:30:06 +04:00
static int cmp_keys ( struct dm_cell_key * lhs ,
struct dm_cell_key * rhs )
2012-10-13 00:02:13 +04:00
{
2014-10-07 00:30:06 +04:00
if ( lhs - > virtual < rhs - > virtual )
return - 1 ;
2012-10-13 00:02:13 +04:00
2014-10-07 00:30:06 +04:00
if ( lhs - > virtual > rhs - > virtual )
return 1 ;
2014-06-05 18:23:09 +04:00
2014-10-07 00:30:06 +04:00
if ( lhs - > dev < rhs - > dev )
return - 1 ;
2012-10-13 00:02:13 +04:00
2014-10-07 00:30:06 +04:00
if ( lhs - > dev > rhs - > dev )
return 1 ;
2012-10-13 00:02:13 +04:00
2014-09-17 13:17:39 +04:00
if ( lhs - > block_end < = rhs - > block_begin )
2014-10-07 00:30:06 +04:00
return - 1 ;
2012-10-13 00:02:13 +04:00
2014-09-17 13:17:39 +04:00
if ( lhs - > block_begin > = rhs - > block_end )
2014-10-07 00:30:06 +04:00
return 1 ;
return 0 ;
2013-03-02 02:45:50 +04:00
}
2012-10-13 00:02:13 +04:00
2014-10-07 00:30:06 +04:00
static int __bio_detain ( struct dm_bio_prison * prison ,
2013-03-02 02:45:50 +04:00
struct dm_cell_key * key ,
struct bio * inmate ,
struct dm_bio_prison_cell * cell_prealloc ,
struct dm_bio_prison_cell * * cell_result )
{
2014-10-07 00:30:06 +04:00
int r ;
struct rb_node * * new = & prison - > cells . rb_node , * parent = NULL ;
while ( * new ) {
struct dm_bio_prison_cell * cell =
2017-05-06 18:39:10 +03:00
rb_entry ( * new , struct dm_bio_prison_cell , node ) ;
2014-10-07 00:30:06 +04:00
r = cmp_keys ( key , & cell - > key ) ;
parent = * new ;
if ( r < 0 )
new = & ( ( * new ) - > rb_left ) ;
else if ( r > 0 )
new = & ( ( * new ) - > rb_right ) ;
else {
if ( inmate )
bio_list_add ( & cell - > bios , inmate ) ;
* cell_result = cell ;
return 1 ;
}
2012-10-13 00:02:13 +04:00
}
2014-10-07 00:30:06 +04:00
__setup_new_cell ( key , inmate , cell_prealloc ) ;
2013-03-02 02:45:50 +04:00
* cell_result = cell_prealloc ;
2014-10-07 00:30:06 +04:00
rb_link_node ( & cell_prealloc - > node , parent , new ) ;
rb_insert_color ( & cell_prealloc - > node , & prison - > cells ) ;
2013-03-02 02:45:50 +04:00
return 0 ;
}
2012-10-13 00:02:13 +04:00
2013-03-02 02:45:50 +04:00
static int bio_detain ( struct dm_bio_prison * prison ,
struct dm_cell_key * key ,
struct bio * inmate ,
struct dm_bio_prison_cell * cell_prealloc ,
struct dm_bio_prison_cell * * cell_result )
{
int r ;
2012-10-13 00:02:13 +04:00
2019-10-15 15:16:51 +03:00
spin_lock_irq ( & prison - > lock ) ;
2014-10-07 00:30:06 +04:00
r = __bio_detain ( prison , key , inmate , cell_prealloc , cell_result ) ;
2019-10-15 15:16:51 +03:00
spin_unlock_irq ( & prison - > lock ) ;
2012-10-13 00:02:13 +04:00
return r ;
}
2013-03-02 02:45:50 +04:00
int dm_bio_detain ( struct dm_bio_prison * prison ,
struct dm_cell_key * key ,
struct bio * inmate ,
struct dm_bio_prison_cell * cell_prealloc ,
struct dm_bio_prison_cell * * cell_result )
{
return bio_detain ( prison , key , inmate , cell_prealloc , cell_result ) ;
}
2012-10-13 00:02:13 +04:00
EXPORT_SYMBOL_GPL ( dm_bio_detain ) ;
2013-03-02 02:45:51 +04:00
int dm_get_cell ( struct dm_bio_prison * prison ,
struct dm_cell_key * key ,
struct dm_bio_prison_cell * cell_prealloc ,
struct dm_bio_prison_cell * * cell_result )
{
return bio_detain ( prison , key , NULL , cell_prealloc , cell_result ) ;
}
EXPORT_SYMBOL_GPL ( dm_get_cell ) ;
2012-10-13 00:02:13 +04:00
/*
* @ inmates must have been initialised prior to this call
*/
2014-10-07 00:30:06 +04:00
static void __cell_release ( struct dm_bio_prison * prison ,
struct dm_bio_prison_cell * cell ,
2013-03-02 02:45:50 +04:00
struct bio_list * inmates )
2012-10-13 00:02:13 +04:00
{
2014-10-07 00:30:06 +04:00
rb_erase ( & cell - > node , & prison - > cells ) ;
2012-10-13 00:02:13 +04:00
if ( inmates ) {
2013-03-02 02:45:50 +04:00
if ( cell - > holder )
bio_list_add ( inmates , cell - > holder ) ;
2012-10-13 00:02:13 +04:00
bio_list_merge ( inmates , & cell - > bios ) ;
}
}
2013-03-02 02:45:50 +04:00
void dm_cell_release ( struct dm_bio_prison * prison ,
struct dm_bio_prison_cell * cell ,
struct bio_list * bios )
2012-10-13 00:02:13 +04:00
{
2019-10-15 15:16:51 +03:00
spin_lock_irq ( & prison - > lock ) ;
2014-10-07 00:30:06 +04:00
__cell_release ( prison , cell , bios ) ;
2019-10-15 15:16:51 +03:00
spin_unlock_irq ( & prison - > lock ) ;
2012-10-13 00:02:13 +04:00
}
EXPORT_SYMBOL_GPL ( dm_cell_release ) ;
/*
* Sometimes we don ' t want the holder , just the additional bios .
*/
2014-10-07 00:30:06 +04:00
static void __cell_release_no_holder ( struct dm_bio_prison * prison ,
struct dm_bio_prison_cell * cell ,
2013-03-02 02:45:50 +04:00
struct bio_list * inmates )
2012-10-13 00:02:13 +04:00
{
2014-10-07 00:30:06 +04:00
rb_erase ( & cell - > node , & prison - > cells ) ;
2012-10-13 00:02:13 +04:00
bio_list_merge ( inmates , & cell - > bios ) ;
}
2013-03-02 02:45:50 +04:00
void dm_cell_release_no_holder ( struct dm_bio_prison * prison ,
struct dm_bio_prison_cell * cell ,
struct bio_list * inmates )
2012-10-13 00:02:13 +04:00
{
unsigned long flags ;
2014-10-07 00:30:06 +04:00
spin_lock_irqsave ( & prison - > lock , flags ) ;
__cell_release_no_holder ( prison , cell , inmates ) ;
spin_unlock_irqrestore ( & prison - > lock , flags ) ;
2012-10-13 00:02:13 +04:00
}
EXPORT_SYMBOL_GPL ( dm_cell_release_no_holder ) ;
2013-03-02 02:45:50 +04:00
void dm_cell_error ( struct dm_bio_prison * prison ,
2017-06-03 10:38:06 +03:00
struct dm_bio_prison_cell * cell , blk_status_t error )
2012-10-13 00:02:13 +04:00
{
struct bio_list bios ;
struct bio * bio ;
bio_list_init ( & bios ) ;
2014-06-05 18:23:09 +04:00
dm_cell_release ( prison , cell , & bios ) ;
2012-10-13 00:02:13 +04:00
2015-07-20 16:29:37 +03:00
while ( ( bio = bio_list_pop ( & bios ) ) ) {
2017-06-03 10:38:06 +03:00
bio - > bi_status = error ;
2015-07-20 16:29:37 +03:00
bio_endio ( bio ) ;
}
2012-10-13 00:02:13 +04:00
}
EXPORT_SYMBOL_GPL ( dm_cell_error ) ;
2014-10-10 18:27:16 +04:00
void dm_cell_visit_release ( struct dm_bio_prison * prison ,
void ( * visit_fn ) ( void * , struct dm_bio_prison_cell * ) ,
void * context ,
struct dm_bio_prison_cell * cell )
{
2019-10-15 15:16:51 +03:00
spin_lock_irq ( & prison - > lock ) ;
2014-10-10 18:27:16 +04:00
visit_fn ( context , cell ) ;
rb_erase ( & cell - > node , & prison - > cells ) ;
2019-10-15 15:16:51 +03:00
spin_unlock_irq ( & prison - > lock ) ;
2014-10-10 18:27:16 +04:00
}
EXPORT_SYMBOL_GPL ( dm_cell_visit_release ) ;
2015-05-15 17:23:35 +03:00
static int __promote_or_release ( struct dm_bio_prison * prison ,
struct dm_bio_prison_cell * cell )
{
if ( bio_list_empty ( & cell - > bios ) ) {
rb_erase ( & cell - > node , & prison - > cells ) ;
return 1 ;
}
cell - > holder = bio_list_pop ( & cell - > bios ) ;
return 0 ;
}
int dm_cell_promote_or_release ( struct dm_bio_prison * prison ,
struct dm_bio_prison_cell * cell )
{
int r ;
2019-10-15 15:16:51 +03:00
spin_lock_irq ( & prison - > lock ) ;
2015-05-15 17:23:35 +03:00
r = __promote_or_release ( prison , cell ) ;
2019-10-15 15:16:51 +03:00
spin_unlock_irq ( & prison - > lock ) ;
2015-05-15 17:23:35 +03:00
return r ;
}
EXPORT_SYMBOL_GPL ( dm_cell_promote_or_release ) ;
2012-10-13 00:02:13 +04:00
/*----------------------------------------------------------------*/
# define DEFERRED_SET_SIZE 64
struct dm_deferred_entry {
struct dm_deferred_set * ds ;
unsigned count ;
struct list_head work_items ;
} ;
struct dm_deferred_set {
spinlock_t lock ;
unsigned current_entry ;
unsigned sweeper ;
struct dm_deferred_entry entries [ DEFERRED_SET_SIZE ] ;
} ;
struct dm_deferred_set * dm_deferred_set_create ( void )
{
int i ;
struct dm_deferred_set * ds ;
ds = kmalloc ( sizeof ( * ds ) , GFP_KERNEL ) ;
if ( ! ds )
return NULL ;
spin_lock_init ( & ds - > lock ) ;
ds - > current_entry = 0 ;
ds - > sweeper = 0 ;
for ( i = 0 ; i < DEFERRED_SET_SIZE ; i + + ) {
ds - > entries [ i ] . ds = ds ;
ds - > entries [ i ] . count = 0 ;
INIT_LIST_HEAD ( & ds - > entries [ i ] . work_items ) ;
}
return ds ;
}
EXPORT_SYMBOL_GPL ( dm_deferred_set_create ) ;
void dm_deferred_set_destroy ( struct dm_deferred_set * ds )
{
kfree ( ds ) ;
}
EXPORT_SYMBOL_GPL ( dm_deferred_set_destroy ) ;
struct dm_deferred_entry * dm_deferred_entry_inc ( struct dm_deferred_set * ds )
{
unsigned long flags ;
struct dm_deferred_entry * entry ;
spin_lock_irqsave ( & ds - > lock , flags ) ;
entry = ds - > entries + ds - > current_entry ;
entry - > count + + ;
spin_unlock_irqrestore ( & ds - > lock , flags ) ;
return entry ;
}
EXPORT_SYMBOL_GPL ( dm_deferred_entry_inc ) ;
static unsigned ds_next ( unsigned index )
{
return ( index + 1 ) % DEFERRED_SET_SIZE ;
}
static void __sweep ( struct dm_deferred_set * ds , struct list_head * head )
{
while ( ( ds - > sweeper ! = ds - > current_entry ) & &
! ds - > entries [ ds - > sweeper ] . count ) {
list_splice_init ( & ds - > entries [ ds - > sweeper ] . work_items , head ) ;
ds - > sweeper = ds_next ( ds - > sweeper ) ;
}
if ( ( ds - > sweeper = = ds - > current_entry ) & & ! ds - > entries [ ds - > sweeper ] . count )
list_splice_init ( & ds - > entries [ ds - > sweeper ] . work_items , head ) ;
}
void dm_deferred_entry_dec ( struct dm_deferred_entry * entry , struct list_head * head )
{
unsigned long flags ;
spin_lock_irqsave ( & entry - > ds - > lock , flags ) ;
BUG_ON ( ! entry - > count ) ;
- - entry - > count ;
__sweep ( entry - > ds , head ) ;
spin_unlock_irqrestore ( & entry - > ds - > lock , flags ) ;
}
EXPORT_SYMBOL_GPL ( dm_deferred_entry_dec ) ;
/*
* Returns 1 if deferred or 0 if no pending items to delay job .
*/
int dm_deferred_set_add_work ( struct dm_deferred_set * ds , struct list_head * work )
{
int r = 1 ;
unsigned next_entry ;
2019-10-15 15:16:51 +03:00
spin_lock_irq ( & ds - > lock ) ;
2012-10-13 00:02:13 +04:00
if ( ( ds - > sweeper = = ds - > current_entry ) & &
! ds - > entries [ ds - > current_entry ] . count )
r = 0 ;
else {
list_add ( work , & ds - > entries [ ds - > current_entry ] . work_items ) ;
next_entry = ds_next ( ds - > current_entry ) ;
if ( ! ds - > entries [ next_entry ] . count )
ds - > current_entry = next_entry ;
}
2019-10-15 15:16:51 +03:00
spin_unlock_irq ( & ds - > lock ) ;
2012-10-13 00:02:13 +04:00
return r ;
}
EXPORT_SYMBOL_GPL ( dm_deferred_set_add_work ) ;
/*----------------------------------------------------------------*/
2016-10-21 17:06:40 +03:00
static int __init dm_bio_prison_init_v1 ( void )
2012-10-13 00:02:13 +04:00
{
_cell_cache = KMEM_CACHE ( dm_bio_prison_cell , 0 ) ;
if ( ! _cell_cache )
return - ENOMEM ;
return 0 ;
}
2016-10-21 17:06:40 +03:00
static void dm_bio_prison_exit_v1 ( void )
2012-10-13 00:02:13 +04:00
{
kmem_cache_destroy ( _cell_cache ) ;
_cell_cache = NULL ;
}
2016-10-21 17:06:40 +03:00
static int ( * _inits [ ] ) ( void ) __initdata = {
dm_bio_prison_init_v1 ,
dm_bio_prison_init_v2 ,
} ;
static void ( * _exits [ ] ) ( void ) = {
dm_bio_prison_exit_v1 ,
dm_bio_prison_exit_v2 ,
} ;
static int __init dm_bio_prison_init ( void )
{
const int count = ARRAY_SIZE ( _inits ) ;
int r , i ;
for ( i = 0 ; i < count ; i + + ) {
r = _inits [ i ] ( ) ;
if ( r )
goto bad ;
}
return 0 ;
bad :
while ( i - - )
_exits [ i ] ( ) ;
return r ;
}
static void __exit dm_bio_prison_exit ( void )
{
int i = ARRAY_SIZE ( _exits ) ;
while ( i - - )
_exits [ i ] ( ) ;
}
2012-10-13 00:02:13 +04:00
/*
* module hooks
*/
module_init ( dm_bio_prison_init ) ;
module_exit ( dm_bio_prison_exit ) ;
MODULE_DESCRIPTION ( DM_NAME " bio prison " ) ;
MODULE_AUTHOR ( " Joe Thornber <dm-devel@redhat.com> " ) ;
MODULE_LICENSE ( " GPL " ) ;