2018-04-03 20:23:33 +03:00
// SPDX-License-Identifier: GPL-2.0
2011-09-13 14:29:12 +04:00
/*
* Copyright ( C ) 2011 STRATO AG
* written by Arne Jansen < sensille @ gmx . net >
*/
# include <linux/slab.h>
2022-10-19 17:50:49 +03:00
# include "messages.h"
2011-09-13 14:29:12 +04:00
# include "ulist.h"
2014-01-28 20:25:34 +04:00
# include "ctree.h"
2011-09-13 14:29:12 +04:00
/*
* ulist is a generic data structure to hold a collection of unique u64
* values . The only operations it supports is adding to the list and
* enumerating it .
* It is possible to store an auxiliary value along with the key .
*
* A sample usage for ulists is the enumeration of directed graphs without
* visiting a node twice . The pseudo - code could look like this :
*
* ulist = ulist_alloc ( ) ;
* ulist_add ( ulist , root ) ;
2012-05-22 16:56:50 +04:00
* ULIST_ITER_INIT ( & uiter ) ;
2011-09-13 14:29:12 +04:00
*
2012-05-22 16:56:50 +04:00
* while ( ( elem = ulist_next ( ulist , & uiter ) ) {
2011-09-13 14:29:12 +04:00
* for ( all child nodes n in elem )
* ulist_add ( ulist , n ) ;
* do something useful with the node ;
* }
* ulist_free ( ulist ) ;
*
2016-05-20 04:18:45 +03:00
* This assumes the graph nodes are addressable by u64 . This stems from the
2011-09-13 14:29:12 +04:00
* usage for tree enumeration in btrfs , where the logical addresses are
* 64 bit .
*
* It is also useful for tree enumeration which could be done elegantly
* recursively , but is not possible due to kernel stack limitations . The
* loop would be similar to the above .
*/
2022-10-27 15:21:42 +03:00
/*
* Freshly initialize a ulist .
*
2011-09-13 14:29:12 +04:00
* @ ulist : the ulist to initialize
*
* Note : don ' t use this function to init an already used ulist , use
* ulist_reinit instead .
*/
void ulist_init ( struct ulist * ulist )
{
2014-01-28 20:25:34 +04:00
INIT_LIST_HEAD ( & ulist - > nodes ) ;
2013-04-12 16:12:17 +04:00
ulist - > root = RB_ROOT ;
2014-01-28 20:25:34 +04:00
ulist - > nnodes = 0 ;
2011-09-13 14:29:12 +04:00
}
2022-10-27 15:21:42 +03:00
/*
* Free up additionally allocated memory for the ulist .
*
2011-09-13 14:29:12 +04:00
* @ ulist : the ulist from which to free the additional memory
*
* This is useful in cases where the base ' struct ulist ' has been statically
* allocated .
*/
2017-02-15 18:47:36 +03:00
void ulist_release ( struct ulist * ulist )
2011-09-13 14:29:12 +04:00
{
2014-01-28 20:25:34 +04:00
struct ulist_node * node ;
struct ulist_node * next ;
list_for_each_entry_safe ( node , next , & ulist - > nodes , list ) {
kfree ( node ) ;
}
2013-04-12 16:12:17 +04:00
ulist - > root = RB_ROOT ;
2014-01-28 20:25:34 +04:00
INIT_LIST_HEAD ( & ulist - > nodes ) ;
2011-09-13 14:29:12 +04:00
}
2022-10-27 15:21:42 +03:00
/*
* Prepare a ulist for reuse .
*
2011-09-13 14:29:12 +04:00
* @ ulist : ulist to be reused
*
* Free up all additional memory allocated for the list elements and reinit
* the ulist .
*/
void ulist_reinit ( struct ulist * ulist )
{
2017-02-15 18:47:36 +03:00
ulist_release ( ulist ) ;
2011-09-13 14:29:12 +04:00
ulist_init ( ulist ) ;
}
2022-10-27 15:21:42 +03:00
/*
* Dynamically allocate a ulist .
*
2011-09-13 14:29:12 +04:00
* @ gfp_mask : allocation flags to for base allocation
*
* The allocated ulist will be returned in an initialized state .
*/
2012-04-25 20:37:14 +04:00
struct ulist * ulist_alloc ( gfp_t gfp_mask )
2011-09-13 14:29:12 +04:00
{
struct ulist * ulist = kmalloc ( sizeof ( * ulist ) , gfp_mask ) ;
if ( ! ulist )
return NULL ;
ulist_init ( ulist ) ;
return ulist ;
}
2022-10-27 15:21:42 +03:00
/*
* Free dynamically allocated ulist .
*
2011-09-13 14:29:12 +04:00
* @ ulist : ulist to free
*
2017-02-15 18:47:36 +03:00
* It is not necessary to call ulist_release before .
2011-09-13 14:29:12 +04:00
*/
void ulist_free ( struct ulist * ulist )
{
if ( ! ulist )
return ;
2017-02-15 18:47:36 +03:00
ulist_release ( ulist ) ;
2011-09-13 14:29:12 +04:00
kfree ( ulist ) ;
}
2013-04-12 16:12:17 +04:00
static struct ulist_node * ulist_rbtree_search ( struct ulist * ulist , u64 val )
{
struct rb_node * n = ulist - > root . rb_node ;
struct ulist_node * u = NULL ;
while ( n ) {
u = rb_entry ( n , struct ulist_node , rb_node ) ;
if ( u - > val < val )
n = n - > rb_right ;
else if ( u - > val > val )
n = n - > rb_left ;
else
return u ;
}
return NULL ;
}
2015-04-20 04:26:02 +03:00
static void ulist_rbtree_erase ( struct ulist * ulist , struct ulist_node * node )
{
rb_erase ( & node - > rb_node , & ulist - > root ) ;
list_del ( & node - > list ) ;
kfree ( node ) ;
BUG_ON ( ulist - > nnodes = = 0 ) ;
ulist - > nnodes - - ;
}
2013-04-12 16:12:17 +04:00
static int ulist_rbtree_insert ( struct ulist * ulist , struct ulist_node * ins )
{
struct rb_node * * p = & ulist - > root . rb_node ;
struct rb_node * parent = NULL ;
struct ulist_node * cur = NULL ;
while ( * p ) {
parent = * p ;
cur = rb_entry ( parent , struct ulist_node , rb_node ) ;
if ( cur - > val < ins - > val )
p = & ( * p ) - > rb_right ;
else if ( cur - > val > ins - > val )
p = & ( * p ) - > rb_left ;
else
return - EEXIST ;
}
rb_link_node ( & ins - > rb_node , parent , p ) ;
rb_insert_color ( & ins - > rb_node , & ulist - > root ) ;
return 0 ;
}
2022-10-27 15:21:42 +03:00
/*
* Add an element to the ulist .
*
2011-09-13 14:29:12 +04:00
* @ ulist : ulist to add the element to
* @ val : value to add to ulist
* @ aux : auxiliary value to store along with val
* @ gfp_mask : flags to use for allocation
*
* Note : locking must be provided by the caller . In case of rwlocks write
* locking is needed
*
* Add an element to a ulist . The @ val will only be added if it doesn ' t
* already exist . If it is added , the auxiliary value @ aux is stored along with
* it . In case @ val already exists in the ulist , @ aux is ignored , even if
* it differs from the already stored value .
*
* ulist_add returns 0 if @ val already exists in ulist and 1 if @ val has been
* inserted .
* In case of allocation failure - ENOMEM is returned and the ulist stays
* unaltered .
*/
2012-07-28 18:18:58 +04:00
int ulist_add ( struct ulist * ulist , u64 val , u64 aux , gfp_t gfp_mask )
2012-05-30 20:05:21 +04:00
{
return ulist_add_merge ( ulist , val , aux , NULL , gfp_mask ) ;
}
2012-07-28 18:18:58 +04:00
int ulist_add_merge ( struct ulist * ulist , u64 val , u64 aux ,
u64 * old_aux , gfp_t gfp_mask )
2011-09-13 14:29:12 +04:00
{
2014-01-28 20:25:34 +04:00
int ret ;
struct ulist_node * node ;
2013-04-12 16:12:17 +04:00
node = ulist_rbtree_search ( ulist , val ) ;
if ( node ) {
if ( old_aux )
* old_aux = node - > aux ;
return 0 ;
2011-09-13 14:29:12 +04:00
}
2014-01-28 20:25:34 +04:00
node = kmalloc ( sizeof ( * node ) , gfp_mask ) ;
if ( ! node )
return - ENOMEM ;
2011-09-13 14:29:12 +04:00
2014-01-28 20:25:34 +04:00
node - > val = val ;
node - > aux = aux ;
2011-09-13 14:29:12 +04:00
2014-01-28 20:25:34 +04:00
ret = ulist_rbtree_insert ( ulist , node ) ;
ASSERT ( ! ret ) ;
list_add_tail ( & node - > list , & ulist - > nodes ) ;
ulist - > nnodes + + ;
2011-09-13 14:29:12 +04:00
return 1 ;
}
2015-04-20 04:26:02 +03:00
/*
2023-09-08 02:09:25 +03:00
* Delete one node from ulist .
*
2015-04-20 04:26:02 +03:00
* @ ulist : ulist to remove node from
* @ val : value to delete
* @ aux : aux to delete
*
* The deletion will only be done when * BOTH * val and aux matches .
* Return 0 for successful delete .
* Return > 0 for not found .
*/
int ulist_del ( struct ulist * ulist , u64 val , u64 aux )
{
struct ulist_node * node ;
node = ulist_rbtree_search ( ulist , val ) ;
/* Not found */
if ( ! node )
return 1 ;
if ( node - > aux ! = aux )
return 1 ;
/* Found and delete */
ulist_rbtree_erase ( ulist , node ) ;
return 0 ;
}
2022-10-27 15:21:42 +03:00
/*
* Iterate ulist .
*
2011-09-13 14:29:12 +04:00
* @ ulist : ulist to iterate
2012-05-22 16:56:50 +04:00
* @ uiter : iterator variable , initialized with ULIST_ITER_INIT ( & iterator )
2011-09-13 14:29:12 +04:00
*
* Note : locking must be provided by the caller . In case of rwlocks only read
* locking is needed
*
2012-05-22 16:56:50 +04:00
* This function is used to iterate an ulist .
* It returns the next element from the ulist or % NULL when the
2011-09-13 14:29:12 +04:00
* end is reached . No guarantee is made with respect to the order in which
* the elements are returned . They might neither be returned in order of
* addition nor in ascending order .
* It is allowed to call ulist_add during an enumeration . Newly added items
* are guaranteed to show up in the running enumeration .
*/
2022-11-01 19:15:49 +03:00
struct ulist_node * ulist_next ( const struct ulist * ulist , struct ulist_iterator * uiter )
2011-09-13 14:29:12 +04:00
{
2014-01-28 20:25:34 +04:00
struct ulist_node * node ;
if ( list_empty ( & ulist - > nodes ) )
2011-09-13 14:29:12 +04:00
return NULL ;
2014-01-28 20:25:34 +04:00
if ( uiter - > cur_list & & uiter - > cur_list - > next = = & ulist - > nodes )
2011-09-13 14:29:12 +04:00
return NULL ;
2014-01-28 20:25:34 +04:00
if ( uiter - > cur_list ) {
uiter - > cur_list = uiter - > cur_list - > next ;
} else {
uiter - > cur_list = ulist - > nodes . next ;
}
node = list_entry ( uiter - > cur_list , struct ulist_node , list ) ;
return node ;
2011-09-13 14:29:12 +04:00
}