2021-08-13 17:21:29 +03:00
// SPDX-License-Identifier: GPL-2.0
/*
*
* Copyright ( C ) 2019 - 2021 Paragon Software GmbH , All rights reserved .
*
* This code builds two trees of free clusters extents .
* Trees are sorted by start of extent and by length of extent .
* NTFS_MAX_WND_EXTENTS defines the maximum number of elements in trees .
2021-08-03 14:57:09 +03:00
* In extreme case code reads on - disk bitmap to find free clusters .
2021-08-13 17:21:29 +03:00
*
*/
# include <linux/buffer_head.h>
# include <linux/fs.h>
2021-09-07 17:28:42 +03:00
# include <linux/kernel.h>
2021-08-13 17:21:29 +03:00
# include "ntfs.h"
# include "ntfs_fs.h"
/*
* Maximum number of extents in tree .
*/
# define NTFS_MAX_WND_EXTENTS (32u * 1024u)
struct rb_node_key {
struct rb_node node ;
size_t key ;
} ;
struct e_node {
2021-08-03 14:57:09 +03:00
struct rb_node_key start ; /* Tree sorted by start. */
struct rb_node_key count ; /* Tree sorted by len. */
2021-08-13 17:21:29 +03:00
} ;
static int wnd_rescan ( struct wnd_bitmap * wnd ) ;
static struct buffer_head * wnd_map ( struct wnd_bitmap * wnd , size_t iw ) ;
static bool wnd_is_free_hlp ( struct wnd_bitmap * wnd , size_t bit , size_t bits ) ;
static struct kmem_cache * ntfs_enode_cachep ;
int __init ntfs3_init_bitmap ( void )
{
2023-01-17 14:01:00 +03:00
ntfs_enode_cachep = kmem_cache_create ( " ntfs3_enode_cache " ,
sizeof ( struct e_node ) , 0 ,
SLAB_RECLAIM_ACCOUNT , NULL ) ;
2021-08-13 17:21:29 +03:00
return ntfs_enode_cachep ? 0 : - ENOMEM ;
}
void ntfs3_exit_bitmap ( void )
{
kmem_cache_destroy ( ntfs_enode_cachep ) ;
}
/*
2021-08-03 14:57:09 +03:00
* wnd_scan
*
* b_pos + b_len - biggest fragment .
* Scan range [ wpos wbits ) window @ buf .
*
* Return : - 1 if not found .
2021-08-13 17:21:29 +03:00
*/
2022-10-06 19:42:24 +03:00
static size_t wnd_scan ( const void * buf , size_t wbit , u32 wpos , u32 wend ,
2021-08-13 17:21:29 +03:00
size_t to_alloc , size_t * prev_tail , size_t * b_pos ,
size_t * b_len )
{
while ( wpos < wend ) {
size_t free_len ;
u32 free_bits , end ;
2021-12-07 13:24:55 +03:00
u32 used = find_next_zero_bit_le ( buf , wend , wpos ) ;
2021-08-13 17:21:29 +03:00
if ( used > = wend ) {
if ( * b_len < * prev_tail ) {
* b_pos = wbit - * prev_tail ;
* b_len = * prev_tail ;
}
* prev_tail = 0 ;
return - 1 ;
}
if ( used > wpos ) {
wpos = used ;
if ( * b_len < * prev_tail ) {
* b_pos = wbit - * prev_tail ;
* b_len = * prev_tail ;
}
* prev_tail = 0 ;
}
/*
2021-08-03 14:57:09 +03:00
* Now we have a fragment [ wpos , wend ) staring with 0.
2021-08-13 17:21:29 +03:00
*/
end = wpos + to_alloc - * prev_tail ;
2021-12-07 13:24:55 +03:00
free_bits = find_next_bit_le ( buf , min ( end , wend ) , wpos ) ;
2021-08-13 17:21:29 +03:00
free_len = * prev_tail + free_bits - wpos ;
if ( * b_len < free_len ) {
* b_pos = wbit + wpos - * prev_tail ;
* b_len = free_len ;
}
if ( free_len > = to_alloc )
return wbit + wpos - * prev_tail ;
if ( free_bits > = wend ) {
* prev_tail + = free_bits - wpos ;
return - 1 ;
}
wpos = free_bits + 1 ;
* prev_tail = 0 ;
}
return - 1 ;
}
/*
2021-08-03 14:57:09 +03:00
* wnd_close - Frees all resources .
2021-08-13 17:21:29 +03:00
*/
void wnd_close ( struct wnd_bitmap * wnd )
{
struct rb_node * node , * next ;
2021-08-24 21:37:07 +03:00
kfree ( wnd - > free_bits ) ;
2023-09-22 13:07:59 +03:00
wnd - > free_bits = NULL ;
2021-08-13 17:21:29 +03:00
run_close ( & wnd - > run ) ;
node = rb_first ( & wnd - > start_tree ) ;
while ( node ) {
next = rb_next ( node ) ;
rb_erase ( node , & wnd - > start_tree ) ;
kmem_cache_free ( ntfs_enode_cachep ,
rb_entry ( node , struct e_node , start . node ) ) ;
node = next ;
}
}
static struct rb_node * rb_lookup ( struct rb_root * root , size_t v )
{
struct rb_node * * p = & root - > rb_node ;
struct rb_node * r = NULL ;
while ( * p ) {
struct rb_node_key * k ;
k = rb_entry ( * p , struct rb_node_key , node ) ;
if ( v < k - > key ) {
p = & ( * p ) - > rb_left ;
} else if ( v > k - > key ) {
r = & k - > node ;
p = & ( * p ) - > rb_right ;
} else {
return & k - > node ;
}
}
return r ;
}
/*
2021-08-03 14:57:09 +03:00
* rb_insert_count - Helper function to insert special kind of ' count ' tree .
2021-08-13 17:21:29 +03:00
*/
static inline bool rb_insert_count ( struct rb_root * root , struct e_node * e )
{
struct rb_node * * p = & root - > rb_node ;
struct rb_node * parent = NULL ;
size_t e_ckey = e - > count . key ;
size_t e_skey = e - > start . key ;
while ( * p ) {
struct e_node * k =
rb_entry ( parent = * p , struct e_node , count . node ) ;
if ( e_ckey > k - > count . key ) {
p = & ( * p ) - > rb_left ;
} else if ( e_ckey < k - > count . key ) {
p = & ( * p ) - > rb_right ;
} else if ( e_skey < k - > start . key ) {
p = & ( * p ) - > rb_left ;
} else if ( e_skey > k - > start . key ) {
p = & ( * p ) - > rb_right ;
} else {
WARN_ON ( 1 ) ;
return false ;
}
}
rb_link_node ( & e - > count . node , parent , p ) ;
rb_insert_color ( & e - > count . node , root ) ;
return true ;
}
/*
2021-08-03 14:57:09 +03:00
* rb_insert_start - Helper function to insert special kind of ' count ' tree .
2021-08-13 17:21:29 +03:00
*/
static inline bool rb_insert_start ( struct rb_root * root , struct e_node * e )
{
struct rb_node * * p = & root - > rb_node ;
struct rb_node * parent = NULL ;
size_t e_skey = e - > start . key ;
while ( * p ) {
struct e_node * k ;
parent = * p ;
k = rb_entry ( parent , struct e_node , start . node ) ;
if ( e_skey < k - > start . key ) {
p = & ( * p ) - > rb_left ;
} else if ( e_skey > k - > start . key ) {
p = & ( * p ) - > rb_right ;
} else {
WARN_ON ( 1 ) ;
return false ;
}
}
rb_link_node ( & e - > start . node , parent , p ) ;
rb_insert_color ( & e - > start . node , root ) ;
return true ;
}
/*
2021-08-03 14:57:09 +03:00
* wnd_add_free_ext - Adds a new extent of free space .
* @ build : 1 when building tree .
2021-08-13 17:21:29 +03:00
*/
static void wnd_add_free_ext ( struct wnd_bitmap * wnd , size_t bit , size_t len ,
bool build )
{
struct e_node * e , * e0 = NULL ;
size_t ib , end_in = bit + len ;
struct rb_node * n ;
if ( build ) {
2021-08-03 14:57:09 +03:00
/* Use extent_min to filter too short extents. */
2021-08-13 17:21:29 +03:00
if ( wnd - > count > = NTFS_MAX_WND_EXTENTS & &
len < = wnd - > extent_min ) {
wnd - > uptodated = - 1 ;
return ;
}
} else {
2021-08-03 14:57:09 +03:00
/* Try to find extent before 'bit'. */
2021-08-13 17:21:29 +03:00
n = rb_lookup ( & wnd - > start_tree , bit ) ;
if ( ! n ) {
n = rb_first ( & wnd - > start_tree ) ;
} else {
e = rb_entry ( n , struct e_node , start . node ) ;
n = rb_next ( n ) ;
if ( e - > start . key + e - > count . key = = bit ) {
2021-08-03 14:57:09 +03:00
/* Remove left. */
2021-08-13 17:21:29 +03:00
bit = e - > start . key ;
len + = e - > count . key ;
rb_erase ( & e - > start . node , & wnd - > start_tree ) ;
rb_erase ( & e - > count . node , & wnd - > count_tree ) ;
wnd - > count - = 1 ;
e0 = e ;
}
}
while ( n ) {
size_t next_end ;
e = rb_entry ( n , struct e_node , start . node ) ;
next_end = e - > start . key + e - > count . key ;
if ( e - > start . key > end_in )
break ;
2021-08-03 14:57:09 +03:00
/* Remove right. */
2021-08-13 17:21:29 +03:00
n = rb_next ( n ) ;
len + = next_end - end_in ;
end_in = next_end ;
rb_erase ( & e - > start . node , & wnd - > start_tree ) ;
rb_erase ( & e - > count . node , & wnd - > count_tree ) ;
wnd - > count - = 1 ;
if ( ! e0 )
e0 = e ;
else
kmem_cache_free ( ntfs_enode_cachep , e ) ;
}
if ( wnd - > uptodated ! = 1 ) {
2021-08-03 14:57:09 +03:00
/* Check bits before 'bit'. */
2021-08-13 17:21:29 +03:00
ib = wnd - > zone_bit = = wnd - > zone_end | |
2023-01-17 14:01:00 +03:00
bit < wnd - > zone_end ?
2023-05-08 11:22:05 +03:00
0 :
wnd - > zone_end ;
2021-08-13 17:21:29 +03:00
while ( bit > ib & & wnd_is_free_hlp ( wnd , bit - 1 , 1 ) ) {
bit - = 1 ;
len + = 1 ;
}
2021-08-03 14:57:09 +03:00
/* Check bits after 'end_in'. */
2021-08-13 17:21:29 +03:00
ib = wnd - > zone_bit = = wnd - > zone_end | |
2023-01-17 14:01:00 +03:00
end_in > wnd - > zone_bit ?
2023-05-08 11:22:05 +03:00
wnd - > nbits :
wnd - > zone_bit ;
2021-08-13 17:21:29 +03:00
while ( end_in < ib & & wnd_is_free_hlp ( wnd , end_in , 1 ) ) {
end_in + = 1 ;
len + = 1 ;
}
}
}
2021-08-03 14:57:09 +03:00
/* Insert new fragment. */
2021-08-13 17:21:29 +03:00
if ( wnd - > count > = NTFS_MAX_WND_EXTENTS ) {
if ( e0 )
kmem_cache_free ( ntfs_enode_cachep , e0 ) ;
wnd - > uptodated = - 1 ;
2021-08-03 14:57:09 +03:00
/* Compare with smallest fragment. */
2021-08-13 17:21:29 +03:00
n = rb_last ( & wnd - > count_tree ) ;
e = rb_entry ( n , struct e_node , count . node ) ;
if ( len < = e - > count . key )
2021-08-03 14:57:09 +03:00
goto out ; /* Do not insert small fragments. */
2021-08-13 17:21:29 +03:00
if ( build ) {
struct e_node * e2 ;
n = rb_prev ( n ) ;
e2 = rb_entry ( n , struct e_node , count . node ) ;
2021-08-03 14:57:09 +03:00
/* Smallest fragment will be 'e2->count.key'. */
2021-08-13 17:21:29 +03:00
wnd - > extent_min = e2 - > count . key ;
}
2021-08-03 14:57:09 +03:00
/* Replace smallest fragment by new one. */
2021-08-13 17:21:29 +03:00
rb_erase ( & e - > start . node , & wnd - > start_tree ) ;
rb_erase ( & e - > count . node , & wnd - > count_tree ) ;
wnd - > count - = 1 ;
} else {
e = e0 ? e0 : kmem_cache_alloc ( ntfs_enode_cachep , GFP_ATOMIC ) ;
if ( ! e ) {
wnd - > uptodated = - 1 ;
goto out ;
}
if ( build & & len < = wnd - > extent_min )
wnd - > extent_min = len ;
}
e - > start . key = bit ;
e - > count . key = len ;
if ( len > wnd - > extent_max )
wnd - > extent_max = len ;
rb_insert_start ( & wnd - > start_tree , e ) ;
rb_insert_count ( & wnd - > count_tree , e ) ;
wnd - > count + = 1 ;
out : ;
}
/*
2021-08-03 14:57:09 +03:00
* wnd_remove_free_ext - Remove a run from the cached free space .
2021-08-13 17:21:29 +03:00
*/
static void wnd_remove_free_ext ( struct wnd_bitmap * wnd , size_t bit , size_t len )
{
struct rb_node * n , * n3 ;
struct e_node * e , * e3 ;
size_t end_in = bit + len ;
size_t end3 , end , new_key , new_len , max_new_len ;
2021-08-03 14:57:09 +03:00
/* Try to find extent before 'bit'. */
2021-08-13 17:21:29 +03:00
n = rb_lookup ( & wnd - > start_tree , bit ) ;
if ( ! n )
return ;
e = rb_entry ( n , struct e_node , start . node ) ;
end = e - > start . key + e - > count . key ;
new_key = new_len = 0 ;
len = e - > count . key ;
2021-08-03 14:57:09 +03:00
/* Range [bit,end_in) must be inside 'e' or outside 'e' and 'n'. */
2021-08-13 17:21:29 +03:00
if ( e - > start . key > bit )
;
else if ( end_in < = end ) {
2021-08-03 14:57:09 +03:00
/* Range [bit,end_in) inside 'e'. */
2021-08-13 17:21:29 +03:00
new_key = end_in ;
new_len = end - end_in ;
len = bit - e - > start . key ;
} else if ( bit > end ) {
bool bmax = false ;
n3 = rb_next ( n ) ;
while ( n3 ) {
e3 = rb_entry ( n3 , struct e_node , start . node ) ;
if ( e3 - > start . key > = end_in )
break ;
if ( e3 - > count . key = = wnd - > extent_max )
bmax = true ;
end3 = e3 - > start . key + e3 - > count . key ;
if ( end3 > end_in ) {
e3 - > start . key = end_in ;
rb_erase ( & e3 - > count . node , & wnd - > count_tree ) ;
e3 - > count . key = end3 - end_in ;
rb_insert_count ( & wnd - > count_tree , e3 ) ;
break ;
}
n3 = rb_next ( n3 ) ;
rb_erase ( & e3 - > start . node , & wnd - > start_tree ) ;
rb_erase ( & e3 - > count . node , & wnd - > count_tree ) ;
wnd - > count - = 1 ;
kmem_cache_free ( ntfs_enode_cachep , e3 ) ;
}
if ( ! bmax )
return ;
n3 = rb_first ( & wnd - > count_tree ) ;
wnd - > extent_max =
2023-01-17 14:01:00 +03:00
n3 ? rb_entry ( n3 , struct e_node , count . node ) - > count . key :
2023-05-08 11:22:05 +03:00
0 ;
2021-08-13 17:21:29 +03:00
return ;
}
if ( e - > count . key ! = wnd - > extent_max ) {
;
} else if ( rb_prev ( & e - > count . node ) ) {
;
} else {
n3 = rb_next ( & e - > count . node ) ;
2021-09-07 17:28:42 +03:00
max_new_len = max ( len , new_len ) ;
2021-08-13 17:21:29 +03:00
if ( ! n3 ) {
wnd - > extent_max = max_new_len ;
} else {
e3 = rb_entry ( n3 , struct e_node , count . node ) ;
wnd - > extent_max = max ( e3 - > count . key , max_new_len ) ;
}
}
if ( ! len ) {
if ( new_len ) {
e - > start . key = new_key ;
rb_erase ( & e - > count . node , & wnd - > count_tree ) ;
e - > count . key = new_len ;
rb_insert_count ( & wnd - > count_tree , e ) ;
} else {
rb_erase ( & e - > start . node , & wnd - > start_tree ) ;
rb_erase ( & e - > count . node , & wnd - > count_tree ) ;
wnd - > count - = 1 ;
kmem_cache_free ( ntfs_enode_cachep , e ) ;
}
goto out ;
}
rb_erase ( & e - > count . node , & wnd - > count_tree ) ;
e - > count . key = len ;
rb_insert_count ( & wnd - > count_tree , e ) ;
if ( ! new_len )
goto out ;
if ( wnd - > count > = NTFS_MAX_WND_EXTENTS ) {
wnd - > uptodated = - 1 ;
2021-08-03 14:57:09 +03:00
/* Get minimal extent. */
2021-08-13 17:21:29 +03:00
e = rb_entry ( rb_last ( & wnd - > count_tree ) , struct e_node ,
count . node ) ;
if ( e - > count . key > new_len )
goto out ;
2021-08-03 14:57:09 +03:00
/* Replace minimum. */
2021-08-13 17:21:29 +03:00
rb_erase ( & e - > start . node , & wnd - > start_tree ) ;
rb_erase ( & e - > count . node , & wnd - > count_tree ) ;
wnd - > count - = 1 ;
} else {
e = kmem_cache_alloc ( ntfs_enode_cachep , GFP_ATOMIC ) ;
if ( ! e )
wnd - > uptodated = - 1 ;
}
if ( e ) {
e - > start . key = new_key ;
e - > count . key = new_len ;
rb_insert_start ( & wnd - > start_tree , e ) ;
rb_insert_count ( & wnd - > count_tree , e ) ;
wnd - > count + = 1 ;
}
out :
if ( ! wnd - > count & & 1 ! = wnd - > uptodated )
wnd_rescan ( wnd ) ;
}
/*
2021-08-03 14:57:09 +03:00
* wnd_rescan - Scan all bitmap . Used while initialization .
2021-08-13 17:21:29 +03:00
*/
static int wnd_rescan ( struct wnd_bitmap * wnd )
{
int err = 0 ;
size_t prev_tail = 0 ;
struct super_block * sb = wnd - > sb ;
struct ntfs_sb_info * sbi = sb - > s_fs_info ;
u64 lbo , len = 0 ;
u32 blocksize = sb - > s_blocksize ;
u8 cluster_bits = sbi - > cluster_bits ;
u32 wbits = 8 * sb - > s_blocksize ;
u32 used , frb ;
size_t wpos , wbit , iw , vbo ;
struct buffer_head * bh = NULL ;
CLST lcn , clen ;
wnd - > uptodated = 0 ;
wnd - > extent_max = 0 ;
wnd - > extent_min = MINUS_ONE_T ;
wnd - > total_zeroes = 0 ;
vbo = 0 ;
for ( iw = 0 ; iw < wnd - > nwnd ; iw + + ) {
if ( iw + 1 = = wnd - > nwnd )
wbits = wnd - > bits_last ;
if ( wnd - > inited ) {
if ( ! wnd - > free_bits [ iw ] ) {
2021-08-03 14:57:09 +03:00
/* All ones. */
2021-08-13 17:21:29 +03:00
if ( prev_tail ) {
wnd_add_free_ext ( wnd ,
vbo * 8 - prev_tail ,
prev_tail , true ) ;
prev_tail = 0 ;
}
goto next_wnd ;
}
if ( wbits = = wnd - > free_bits [ iw ] ) {
2021-08-03 14:57:09 +03:00
/* All zeroes. */
2021-08-13 17:21:29 +03:00
prev_tail + = wbits ;
wnd - > total_zeroes + = wbits ;
goto next_wnd ;
}
}
if ( ! len ) {
u32 off = vbo & sbi - > cluster_mask ;
if ( ! run_lookup_entry ( & wnd - > run , vbo > > cluster_bits ,
& lcn , & clen , NULL ) ) {
err = - ENOENT ;
goto out ;
}
lbo = ( ( u64 ) lcn < < cluster_bits ) + off ;
len = ( ( u64 ) clen < < cluster_bits ) - off ;
}
bh = ntfs_bread ( sb , lbo > > sb - > s_blocksize_bits ) ;
if ( ! bh ) {
err = - EIO ;
goto out ;
}
2022-10-06 19:42:24 +03:00
used = ntfs_bitmap_weight_le ( bh - > b_data , wbits ) ;
2021-08-13 17:21:29 +03:00
if ( used < wbits ) {
frb = wbits - used ;
wnd - > free_bits [ iw ] = frb ;
wnd - > total_zeroes + = frb ;
}
wpos = 0 ;
wbit = vbo * 8 ;
if ( wbit + wbits > wnd - > nbits )
wbits = wnd - > nbits - wbit ;
do {
2022-10-06 19:42:24 +03:00
used = find_next_zero_bit_le ( bh - > b_data , wbits , wpos ) ;
2021-08-13 17:21:29 +03:00
if ( used > wpos & & prev_tail ) {
wnd_add_free_ext ( wnd , wbit + wpos - prev_tail ,
prev_tail , true ) ;
prev_tail = 0 ;
}
wpos = used ;
if ( wpos > = wbits ) {
2021-08-03 14:57:09 +03:00
/* No free blocks. */
2021-08-13 17:21:29 +03:00
prev_tail = 0 ;
break ;
}
2022-10-06 19:42:24 +03:00
frb = find_next_bit_le ( bh - > b_data , wbits , wpos ) ;
2021-08-13 17:21:29 +03:00
if ( frb > = wbits ) {
2021-08-03 14:57:09 +03:00
/* Keep last free block. */
2021-08-13 17:21:29 +03:00
prev_tail + = frb - wpos ;
break ;
}
wnd_add_free_ext ( wnd , wbit + wpos - prev_tail ,
frb + prev_tail - wpos , true ) ;
2021-08-03 14:57:09 +03:00
/* Skip free block and first '1'. */
2021-08-13 17:21:29 +03:00
wpos = frb + 1 ;
2021-08-03 14:57:09 +03:00
/* Reset previous tail. */
2021-08-13 17:21:29 +03:00
prev_tail = 0 ;
} while ( wpos < wbits ) ;
next_wnd :
if ( bh )
put_bh ( bh ) ;
bh = NULL ;
vbo + = blocksize ;
if ( len ) {
len - = blocksize ;
lbo + = blocksize ;
}
}
2021-08-03 14:57:09 +03:00
/* Add last block. */
2021-08-13 17:21:29 +03:00
if ( prev_tail )
wnd_add_free_ext ( wnd , wnd - > nbits - prev_tail , prev_tail , true ) ;
/*
2021-08-03 14:57:09 +03:00
* Before init cycle wnd - > uptodated was 0.
2021-08-13 17:21:29 +03:00
* If any errors or limits occurs while initialization then
2021-08-03 14:57:09 +03:00
* wnd - > uptodated will be - 1.
* If ' uptodated ' is still 0 then Tree is really updated .
2021-08-13 17:21:29 +03:00
*/
if ( ! wnd - > uptodated )
wnd - > uptodated = 1 ;
if ( wnd - > zone_bit ! = wnd - > zone_end ) {
size_t zlen = wnd - > zone_end - wnd - > zone_bit ;
wnd - > zone_end = wnd - > zone_bit ;
wnd_zone_set ( wnd , wnd - > zone_bit , zlen ) ;
}
out :
return err ;
}
int wnd_init ( struct wnd_bitmap * wnd , struct super_block * sb , size_t nbits )
{
int err ;
u32 blocksize = sb - > s_blocksize ;
u32 wbits = blocksize * 8 ;
init_rwsem ( & wnd - > rw_lock ) ;
wnd - > sb = sb ;
wnd - > nbits = nbits ;
wnd - > total_zeroes = nbits ;
wnd - > extent_max = MINUS_ONE_T ;
wnd - > zone_bit = wnd - > zone_end = 0 ;
wnd - > nwnd = bytes_to_block ( sb , bitmap_size ( nbits ) ) ;
wnd - > bits_last = nbits & ( wbits - 1 ) ;
if ( ! wnd - > bits_last )
wnd - > bits_last = wbits ;
2022-12-30 13:58:25 +03:00
wnd - > free_bits =
2023-06-30 15:12:58 +03:00
kvmalloc_array ( wnd - > nwnd , sizeof ( u16 ) , GFP_KERNEL | __GFP_ZERO ) ;
2021-08-13 17:21:29 +03:00
if ( ! wnd - > free_bits )
return - ENOMEM ;
err = wnd_rescan ( wnd ) ;
if ( err )
return err ;
wnd - > inited = true ;
return 0 ;
}
/*
2021-08-03 14:57:09 +03:00
* wnd_map - Call sb_bread for requested window .
2021-08-13 17:21:29 +03:00
*/
static struct buffer_head * wnd_map ( struct wnd_bitmap * wnd , size_t iw )
{
size_t vbo ;
CLST lcn , clen ;
struct super_block * sb = wnd - > sb ;
struct ntfs_sb_info * sbi ;
struct buffer_head * bh ;
u64 lbo ;
sbi = sb - > s_fs_info ;
vbo = ( u64 ) iw < < sb - > s_blocksize_bits ;
if ( ! run_lookup_entry ( & wnd - > run , vbo > > sbi - > cluster_bits , & lcn , & clen ,
NULL ) ) {
return ERR_PTR ( - ENOENT ) ;
}
lbo = ( ( u64 ) lcn < < sbi - > cluster_bits ) + ( vbo & sbi - > cluster_mask ) ;
bh = ntfs_bread ( wnd - > sb , lbo > > sb - > s_blocksize_bits ) ;
if ( ! bh )
return ERR_PTR ( - EIO ) ;
return bh ;
}
/*
2021-08-03 14:57:09 +03:00
* wnd_set_free - Mark the bits range from bit to bit + bits as free .
2021-08-13 17:21:29 +03:00
*/
int wnd_set_free ( struct wnd_bitmap * wnd , size_t bit , size_t bits )
{
int err = 0 ;
struct super_block * sb = wnd - > sb ;
size_t bits0 = bits ;
u32 wbits = 8 * sb - > s_blocksize ;
size_t iw = bit > > ( sb - > s_blocksize_bits + 3 ) ;
u32 wbit = bit & ( wbits - 1 ) ;
struct buffer_head * bh ;
while ( iw < wnd - > nwnd & & bits ) {
u32 tail , op ;
if ( iw + 1 = = wnd - > nwnd )
wbits = wnd - > bits_last ;
tail = wbits - wbit ;
2021-09-07 17:28:42 +03:00
op = min_t ( u32 , tail , bits ) ;
2021-08-13 17:21:29 +03:00
bh = wnd_map ( wnd , iw ) ;
if ( IS_ERR ( bh ) ) {
err = PTR_ERR ( bh ) ;
break ;
}
lock_buffer ( bh ) ;
2022-10-06 19:42:24 +03:00
ntfs_bitmap_clear_le ( bh - > b_data , wbit , op ) ;
2021-08-13 17:21:29 +03:00
wnd - > free_bits [ iw ] + = op ;
set_buffer_uptodate ( bh ) ;
mark_buffer_dirty ( bh ) ;
unlock_buffer ( bh ) ;
put_bh ( bh ) ;
wnd - > total_zeroes + = op ;
bits - = op ;
wbit = 0 ;
iw + = 1 ;
}
wnd_add_free_ext ( wnd , bit , bits0 , false ) ;
return err ;
}
/*
2021-08-03 14:57:09 +03:00
* wnd_set_used - Mark the bits range from bit to bit + bits as used .
2021-08-13 17:21:29 +03:00
*/
int wnd_set_used ( struct wnd_bitmap * wnd , size_t bit , size_t bits )
{
int err = 0 ;
struct super_block * sb = wnd - > sb ;
size_t bits0 = bits ;
size_t iw = bit > > ( sb - > s_blocksize_bits + 3 ) ;
u32 wbits = 8 * sb - > s_blocksize ;
u32 wbit = bit & ( wbits - 1 ) ;
struct buffer_head * bh ;
while ( iw < wnd - > nwnd & & bits ) {
u32 tail , op ;
if ( unlikely ( iw + 1 = = wnd - > nwnd ) )
wbits = wnd - > bits_last ;
tail = wbits - wbit ;
2021-09-07 17:28:42 +03:00
op = min_t ( u32 , tail , bits ) ;
2021-08-13 17:21:29 +03:00
bh = wnd_map ( wnd , iw ) ;
if ( IS_ERR ( bh ) ) {
err = PTR_ERR ( bh ) ;
break ;
}
lock_buffer ( bh ) ;
2022-10-06 19:42:24 +03:00
ntfs_bitmap_set_le ( bh - > b_data , wbit , op ) ;
2021-08-13 17:21:29 +03:00
wnd - > free_bits [ iw ] - = op ;
set_buffer_uptodate ( bh ) ;
mark_buffer_dirty ( bh ) ;
unlock_buffer ( bh ) ;
put_bh ( bh ) ;
wnd - > total_zeroes - = op ;
bits - = op ;
wbit = 0 ;
iw + = 1 ;
}
if ( ! RB_EMPTY_ROOT ( & wnd - > start_tree ) )
wnd_remove_free_ext ( wnd , bit , bits0 ) ;
return err ;
}
2022-10-11 20:19:36 +03:00
/*
* wnd_set_used_safe - Mark the bits range from bit to bit + bits as used .
*
* Unlikely wnd_set_used / wnd_set_free this function is not full trusted .
* It scans every bit in bitmap and marks free bit as used .
* @ done - how many bits were marked as used .
*
* NOTE : normally * done should be 0.
*/
int wnd_set_used_safe ( struct wnd_bitmap * wnd , size_t bit , size_t bits ,
size_t * done )
{
size_t i , from = 0 , len = 0 ;
int err = 0 ;
* done = 0 ;
for ( i = 0 ; i < bits ; i + + ) {
if ( wnd_is_free ( wnd , bit + i , 1 ) ) {
if ( ! len )
from = bit + i ;
len + = 1 ;
} else if ( len ) {
err = wnd_set_used ( wnd , from , len ) ;
* done + = len ;
len = 0 ;
if ( err )
break ;
}
}
if ( len ) {
/* last fragment. */
err = wnd_set_used ( wnd , from , len ) ;
* done + = len ;
}
return err ;
}
2021-08-13 17:21:29 +03:00
/*
* wnd_is_free_hlp
*
2021-08-03 14:57:09 +03:00
* Return : True if all clusters [ bit , bit + bits ) are free ( bitmap only ) .
2021-08-13 17:21:29 +03:00
*/
static bool wnd_is_free_hlp ( struct wnd_bitmap * wnd , size_t bit , size_t bits )
{
struct super_block * sb = wnd - > sb ;
size_t iw = bit > > ( sb - > s_blocksize_bits + 3 ) ;
u32 wbits = 8 * sb - > s_blocksize ;
u32 wbit = bit & ( wbits - 1 ) ;
while ( iw < wnd - > nwnd & & bits ) {
u32 tail , op ;
if ( unlikely ( iw + 1 = = wnd - > nwnd ) )
wbits = wnd - > bits_last ;
tail = wbits - wbit ;
2021-09-07 17:28:42 +03:00
op = min_t ( u32 , tail , bits ) ;
2021-08-13 17:21:29 +03:00
if ( wbits ! = wnd - > free_bits [ iw ] ) {
bool ret ;
struct buffer_head * bh = wnd_map ( wnd , iw ) ;
if ( IS_ERR ( bh ) )
return false ;
2022-10-06 19:42:24 +03:00
ret = are_bits_clear ( bh - > b_data , wbit , op ) ;
2021-08-13 17:21:29 +03:00
put_bh ( bh ) ;
if ( ! ret )
return false ;
}
bits - = op ;
wbit = 0 ;
iw + = 1 ;
}
return true ;
}
/*
* wnd_is_free
*
2021-08-03 14:57:09 +03:00
* Return : True if all clusters [ bit , bit + bits ) are free .
2021-08-13 17:21:29 +03:00
*/
bool wnd_is_free ( struct wnd_bitmap * wnd , size_t bit , size_t bits )
{
bool ret ;
struct rb_node * n ;
size_t end ;
struct e_node * e ;
if ( RB_EMPTY_ROOT ( & wnd - > start_tree ) )
goto use_wnd ;
n = rb_lookup ( & wnd - > start_tree , bit ) ;
if ( ! n )
goto use_wnd ;
e = rb_entry ( n , struct e_node , start . node ) ;
end = e - > start . key + e - > count . key ;
if ( bit < end & & bit + bits < = end )
return true ;
use_wnd :
ret = wnd_is_free_hlp ( wnd , bit , bits ) ;
return ret ;
}
/*
* wnd_is_used
*
2021-08-03 14:57:09 +03:00
* Return : True if all clusters [ bit , bit + bits ) are used .
2021-08-13 17:21:29 +03:00
*/
bool wnd_is_used ( struct wnd_bitmap * wnd , size_t bit , size_t bits )
{
bool ret = false ;
struct super_block * sb = wnd - > sb ;
size_t iw = bit > > ( sb - > s_blocksize_bits + 3 ) ;
u32 wbits = 8 * sb - > s_blocksize ;
u32 wbit = bit & ( wbits - 1 ) ;
size_t end ;
struct rb_node * n ;
struct e_node * e ;
if ( RB_EMPTY_ROOT ( & wnd - > start_tree ) )
goto use_wnd ;
end = bit + bits ;
n = rb_lookup ( & wnd - > start_tree , end - 1 ) ;
if ( ! n )
goto use_wnd ;
e = rb_entry ( n , struct e_node , start . node ) ;
if ( e - > start . key + e - > count . key > bit )
return false ;
use_wnd :
while ( iw < wnd - > nwnd & & bits ) {
u32 tail , op ;
if ( unlikely ( iw + 1 = = wnd - > nwnd ) )
wbits = wnd - > bits_last ;
tail = wbits - wbit ;
2021-09-07 17:28:42 +03:00
op = min_t ( u32 , tail , bits ) ;
2021-08-13 17:21:29 +03:00
if ( wnd - > free_bits [ iw ] ) {
bool ret ;
struct buffer_head * bh = wnd_map ( wnd , iw ) ;
if ( IS_ERR ( bh ) )
goto out ;
2022-10-06 19:42:24 +03:00
ret = are_bits_set ( bh - > b_data , wbit , op ) ;
2021-08-13 17:21:29 +03:00
put_bh ( bh ) ;
if ( ! ret )
goto out ;
}
bits - = op ;
wbit = 0 ;
iw + = 1 ;
}
ret = true ;
out :
return ret ;
}
/*
2021-08-03 14:57:09 +03:00
* wnd_find - Look for free space .
*
2021-08-13 17:21:29 +03:00
* - flags - BITMAP_FIND_XXX flags
*
2021-08-03 14:57:09 +03:00
* Return : 0 if not found .
2021-08-13 17:21:29 +03:00
*/
size_t wnd_find ( struct wnd_bitmap * wnd , size_t to_alloc , size_t hint ,
size_t flags , size_t * allocated )
{
struct super_block * sb ;
u32 wbits , wpos , wzbit , wzend ;
size_t fnd , max_alloc , b_len , b_pos ;
size_t iw , prev_tail , nwnd , wbit , ebit , zbit , zend ;
size_t to_alloc0 = to_alloc ;
const struct e_node * e ;
const struct rb_node * pr , * cr ;
u8 log2_bits ;
bool fbits_valid ;
struct buffer_head * bh ;
2021-08-03 14:57:09 +03:00
/* Fast checking for available free space. */
2021-08-13 17:21:29 +03:00
if ( flags & BITMAP_FIND_FULL ) {
size_t zeroes = wnd_zeroes ( wnd ) ;
zeroes - = wnd - > zone_end - wnd - > zone_bit ;
if ( zeroes < to_alloc0 )
goto no_space ;
if ( to_alloc0 > wnd - > extent_max )
goto no_space ;
} else {
if ( to_alloc > wnd - > extent_max )
to_alloc = wnd - > extent_max ;
}
if ( wnd - > zone_bit < = hint & & hint < wnd - > zone_end )
hint = wnd - > zone_end ;
max_alloc = wnd - > nbits ;
b_len = b_pos = 0 ;
if ( hint > = max_alloc )
hint = 0 ;
if ( RB_EMPTY_ROOT ( & wnd - > start_tree ) ) {
if ( wnd - > uptodated = = 1 ) {
2021-08-03 14:57:09 +03:00
/* Extents tree is updated -> No free space. */
2021-08-13 17:21:29 +03:00
goto no_space ;
}
goto scan_bitmap ;
}
e = NULL ;
if ( ! hint )
goto allocate_biggest ;
2021-08-03 14:57:09 +03:00
/* Use hint: Enumerate extents by start >= hint. */
2021-08-13 17:21:29 +03:00
pr = NULL ;
cr = wnd - > start_tree . rb_node ;
for ( ; ; ) {
e = rb_entry ( cr , struct e_node , start . node ) ;
if ( e - > start . key = = hint )
break ;
if ( e - > start . key < hint ) {
pr = cr ;
cr = cr - > rb_right ;
if ( ! cr )
break ;
continue ;
}
cr = cr - > rb_left ;
if ( ! cr ) {
e = pr ? rb_entry ( pr , struct e_node , start . node ) : NULL ;
break ;
}
}
if ( ! e )
goto allocate_biggest ;
if ( e - > start . key + e - > count . key > hint ) {
2021-08-03 14:57:09 +03:00
/* We have found extension with 'hint' inside. */
2021-08-13 17:21:29 +03:00
size_t len = e - > start . key + e - > count . key - hint ;
if ( len > = to_alloc & & hint + to_alloc < = max_alloc ) {
fnd = hint ;
goto found ;
}
if ( ! ( flags & BITMAP_FIND_FULL ) ) {
if ( len > to_alloc )
len = to_alloc ;
if ( hint + len < = max_alloc ) {
fnd = hint ;
to_alloc = len ;
goto found ;
}
}
}
allocate_biggest :
2021-08-03 14:57:09 +03:00
/* Allocate from biggest free extent. */
2021-08-13 17:21:29 +03:00
e = rb_entry ( rb_first ( & wnd - > count_tree ) , struct e_node , count . node ) ;
if ( e - > count . key ! = wnd - > extent_max )
wnd - > extent_max = e - > count . key ;
if ( e - > count . key < max_alloc ) {
if ( e - > count . key > = to_alloc ) {
;
} else if ( flags & BITMAP_FIND_FULL ) {
if ( e - > count . key < to_alloc0 ) {
2021-08-03 14:57:09 +03:00
/* Biggest free block is less then requested. */
2021-08-13 17:21:29 +03:00
goto no_space ;
}
to_alloc = e - > count . key ;
} else if ( - 1 ! = wnd - > uptodated ) {
to_alloc = e - > count . key ;
} else {
2021-08-03 14:57:09 +03:00
/* Check if we can use more bits. */
2021-08-13 17:21:29 +03:00
size_t op , max_check ;
struct rb_root start_tree ;
memcpy ( & start_tree , & wnd - > start_tree ,
sizeof ( struct rb_root ) ) ;
memset ( & wnd - > start_tree , 0 , sizeof ( struct rb_root ) ) ;
max_check = e - > start . key + to_alloc ;
if ( max_check > max_alloc )
max_check = max_alloc ;
for ( op = e - > start . key + e - > count . key ; op < max_check ;
op + + ) {
if ( ! wnd_is_free ( wnd , op , 1 ) )
break ;
}
memcpy ( & wnd - > start_tree , & start_tree ,
sizeof ( struct rb_root ) ) ;
to_alloc = op - e - > start . key ;
}
2021-08-03 14:57:09 +03:00
/* Prepare to return. */
2021-08-13 17:21:29 +03:00
fnd = e - > start . key ;
if ( e - > start . key + to_alloc > max_alloc )
to_alloc = max_alloc - e - > start . key ;
goto found ;
}
if ( wnd - > uptodated = = 1 ) {
2021-08-03 14:57:09 +03:00
/* Extents tree is updated -> no free space. */
2021-08-13 17:21:29 +03:00
goto no_space ;
}
b_len = e - > count . key ;
b_pos = e - > start . key ;
scan_bitmap :
sb = wnd - > sb ;
log2_bits = sb - > s_blocksize_bits + 3 ;
2021-08-31 16:57:40 +03:00
/* At most two ranges [hint, max_alloc) + [0, hint). */
2021-08-13 17:21:29 +03:00
Again :
2021-08-03 14:57:09 +03:00
/* TODO: Optimize request for case nbits > wbits. */
2021-08-13 17:21:29 +03:00
iw = hint > > log2_bits ;
wbits = sb - > s_blocksize * 8 ;
wpos = hint & ( wbits - 1 ) ;
prev_tail = 0 ;
fbits_valid = true ;
if ( max_alloc = = wnd - > nbits ) {
nwnd = wnd - > nwnd ;
} else {
size_t t = max_alloc + wbits - 1 ;
nwnd = likely ( t > max_alloc ) ? ( t > > log2_bits ) : wnd - > nwnd ;
}
2021-08-03 14:57:09 +03:00
/* Enumerate all windows. */
2021-08-13 17:21:29 +03:00
for ( ; iw < nwnd ; iw + + ) {
wbit = iw < < log2_bits ;
if ( ! wnd - > free_bits [ iw ] ) {
if ( prev_tail > b_len ) {
b_pos = wbit - prev_tail ;
b_len = prev_tail ;
}
2021-08-03 14:57:09 +03:00
/* Skip full used window. */
2021-08-13 17:21:29 +03:00
prev_tail = 0 ;
wpos = 0 ;
continue ;
}
if ( unlikely ( iw + 1 = = nwnd ) ) {
if ( max_alloc = = wnd - > nbits ) {
wbits = wnd - > bits_last ;
} else {
size_t t = max_alloc & ( wbits - 1 ) ;
if ( t ) {
wbits = t ;
fbits_valid = false ;
}
}
}
if ( wnd - > zone_end > wnd - > zone_bit ) {
ebit = wbit + wbits ;
zbit = max ( wnd - > zone_bit , wbit ) ;
zend = min ( wnd - > zone_end , ebit ) ;
2021-08-03 14:57:09 +03:00
/* Here we have a window [wbit, ebit) and zone [zbit, zend). */
2021-08-13 17:21:29 +03:00
if ( zend < = zbit ) {
2021-08-03 14:57:09 +03:00
/* Zone does not overlap window. */
2021-08-13 17:21:29 +03:00
} else {
wzbit = zbit - wbit ;
wzend = zend - wbit ;
2021-08-03 14:57:09 +03:00
/* Zone overlaps window. */
2021-08-13 17:21:29 +03:00
if ( wnd - > free_bits [ iw ] = = wzend - wzbit ) {
prev_tail = 0 ;
wpos = 0 ;
continue ;
}
2021-08-03 14:57:09 +03:00
/* Scan two ranges window: [wbit, zbit) and [zend, ebit). */
2021-08-13 17:21:29 +03:00
bh = wnd_map ( wnd , iw ) ;
if ( IS_ERR ( bh ) ) {
2021-08-03 14:57:09 +03:00
/* TODO: Error */
2021-08-13 17:21:29 +03:00
prev_tail = 0 ;
wpos = 0 ;
continue ;
}
2021-08-03 14:57:09 +03:00
/* Scan range [wbit, zbit). */
2021-08-13 17:21:29 +03:00
if ( wpos < wzbit ) {
2021-08-03 14:57:09 +03:00
/* Scan range [wpos, zbit). */
2022-10-06 19:42:24 +03:00
fnd = wnd_scan ( bh - > b_data , wbit , wpos ,
wzbit , to_alloc ,
& prev_tail , & b_pos ,
& b_len ) ;
2021-08-13 17:21:29 +03:00
if ( fnd ! = MINUS_ONE_T ) {
put_bh ( bh ) ;
goto found ;
}
}
prev_tail = 0 ;
2021-08-03 14:57:09 +03:00
/* Scan range [zend, ebit). */
2021-08-13 17:21:29 +03:00
if ( wzend < wbits ) {
2022-10-06 19:42:24 +03:00
fnd = wnd_scan ( bh - > b_data , wbit ,
2021-08-13 17:21:29 +03:00
max ( wzend , wpos ) , wbits ,
to_alloc , & prev_tail ,
& b_pos , & b_len ) ;
if ( fnd ! = MINUS_ONE_T ) {
put_bh ( bh ) ;
goto found ;
}
}
wpos = 0 ;
put_bh ( bh ) ;
continue ;
}
}
2021-08-03 14:57:09 +03:00
/* Current window does not overlap zone. */
2021-08-13 17:21:29 +03:00
if ( ! wpos & & fbits_valid & & wnd - > free_bits [ iw ] = = wbits ) {
2021-08-03 14:57:09 +03:00
/* Window is empty. */
2021-08-13 17:21:29 +03:00
if ( prev_tail + wbits > = to_alloc ) {
fnd = wbit + wpos - prev_tail ;
goto found ;
}
2021-08-03 14:57:09 +03:00
/* Increase 'prev_tail' and process next window. */
2021-08-13 17:21:29 +03:00
prev_tail + = wbits ;
wpos = 0 ;
continue ;
}
2021-08-31 16:57:40 +03:00
/* Read window. */
2021-08-13 17:21:29 +03:00
bh = wnd_map ( wnd , iw ) ;
if ( IS_ERR ( bh ) ) {
2021-08-03 14:57:09 +03:00
// TODO: Error.
2021-08-13 17:21:29 +03:00
prev_tail = 0 ;
wpos = 0 ;
continue ;
}
2021-08-03 14:57:09 +03:00
/* Scan range [wpos, eBits). */
2022-10-06 19:42:24 +03:00
fnd = wnd_scan ( bh - > b_data , wbit , wpos , wbits , to_alloc ,
& prev_tail , & b_pos , & b_len ) ;
2021-08-13 17:21:29 +03:00
put_bh ( bh ) ;
if ( fnd ! = MINUS_ONE_T )
goto found ;
}
if ( b_len < prev_tail ) {
2021-08-03 14:57:09 +03:00
/* The last fragment. */
2021-08-13 17:21:29 +03:00
b_len = prev_tail ;
b_pos = max_alloc - prev_tail ;
}
if ( hint ) {
/*
2021-08-03 14:57:09 +03:00
* We have scanned range [ hint max_alloc ) .
* Prepare to scan range [ 0 hint + to_alloc ) .
2021-08-13 17:21:29 +03:00
*/
size_t nextmax = hint + to_alloc ;
if ( likely ( nextmax > = hint ) & & nextmax < max_alloc )
max_alloc = nextmax ;
hint = 0 ;
goto Again ;
}
if ( ! b_len )
goto no_space ;
wnd - > extent_max = b_len ;
if ( flags & BITMAP_FIND_FULL )
goto no_space ;
fnd = b_pos ;
to_alloc = b_len ;
found :
if ( flags & BITMAP_FIND_MARK_AS_USED ) {
2021-08-03 14:57:09 +03:00
/* TODO: Optimize remove extent (pass 'e'?). */
2021-08-13 17:21:29 +03:00
if ( wnd_set_used ( wnd , fnd , to_alloc ) )
goto no_space ;
} else if ( wnd - > extent_max ! = MINUS_ONE_T & &
to_alloc > wnd - > extent_max ) {
wnd - > extent_max = to_alloc ;
}
* allocated = fnd ;
return to_alloc ;
no_space :
return 0 ;
}
/*
2021-08-03 14:57:09 +03:00
* wnd_extend - Extend bitmap ( $ MFT bitmap ) .
2021-08-13 17:21:29 +03:00
*/
int wnd_extend ( struct wnd_bitmap * wnd , size_t new_bits )
{
int err ;
struct super_block * sb = wnd - > sb ;
struct ntfs_sb_info * sbi = sb - > s_fs_info ;
u32 blocksize = sb - > s_blocksize ;
u32 wbits = blocksize * 8 ;
u32 b0 , new_last ;
size_t bits , iw , new_wnd ;
size_t old_bits = wnd - > nbits ;
u16 * new_free ;
if ( new_bits < = old_bits )
return - EINVAL ;
2021-08-03 14:57:09 +03:00
/* Align to 8 byte boundary. */
2021-08-13 17:21:29 +03:00
new_wnd = bytes_to_block ( sb , bitmap_size ( new_bits ) ) ;
new_last = new_bits & ( wbits - 1 ) ;
if ( ! new_last )
new_last = wbits ;
if ( new_wnd ! = wnd - > nwnd ) {
2022-08-19 08:51:17 +03:00
new_free = kmalloc_array ( new_wnd , sizeof ( u16 ) , GFP_NOFS ) ;
2021-08-13 17:21:29 +03:00
if ( ! new_free )
return - ENOMEM ;
2021-12-23 15:20:00 +03:00
memcpy ( new_free , wnd - > free_bits , wnd - > nwnd * sizeof ( short ) ) ;
2021-08-13 17:21:29 +03:00
memset ( new_free + wnd - > nwnd , 0 ,
( new_wnd - wnd - > nwnd ) * sizeof ( short ) ) ;
2021-08-24 21:37:07 +03:00
kfree ( wnd - > free_bits ) ;
2021-08-13 17:21:29 +03:00
wnd - > free_bits = new_free ;
}
2021-08-03 14:57:09 +03:00
/* Zero bits [old_bits,new_bits). */
2021-08-13 17:21:29 +03:00
bits = new_bits - old_bits ;
b0 = old_bits & ( wbits - 1 ) ;
for ( iw = old_bits > > ( sb - > s_blocksize_bits + 3 ) ; bits ; iw + = 1 ) {
u32 op ;
size_t frb ;
u64 vbo , lbo , bytes ;
struct buffer_head * bh ;
if ( iw + 1 = = new_wnd )
wbits = new_last ;
op = b0 + bits > wbits ? wbits - b0 : bits ;
vbo = ( u64 ) iw * blocksize ;
err = ntfs_vbo_to_lbo ( sbi , & wnd - > run , vbo , & lbo , & bytes ) ;
if ( err )
break ;
bh = ntfs_bread ( sb , lbo > > sb - > s_blocksize_bits ) ;
if ( ! bh )
return - EIO ;
lock_buffer ( bh ) ;
2022-10-06 19:42:24 +03:00
ntfs_bitmap_clear_le ( bh - > b_data , b0 , blocksize * 8 - b0 ) ;
frb = wbits - ntfs_bitmap_weight_le ( bh - > b_data , wbits ) ;
2021-08-13 17:21:29 +03:00
wnd - > total_zeroes + = frb - wnd - > free_bits [ iw ] ;
wnd - > free_bits [ iw ] = frb ;
set_buffer_uptodate ( bh ) ;
mark_buffer_dirty ( bh ) ;
unlock_buffer ( bh ) ;
2021-08-03 14:57:09 +03:00
/* err = sync_dirty_buffer(bh); */
2021-08-13 17:21:29 +03:00
b0 = 0 ;
bits - = op ;
}
wnd - > nbits = new_bits ;
wnd - > nwnd = new_wnd ;
wnd - > bits_last = new_last ;
wnd_add_free_ext ( wnd , old_bits , new_bits - old_bits , false ) ;
return 0 ;
}
void wnd_zone_set ( struct wnd_bitmap * wnd , size_t lcn , size_t len )
{
2022-07-06 20:01:11 +03:00
size_t zlen = wnd - > zone_end - wnd - > zone_bit ;
2021-08-13 17:21:29 +03:00
if ( zlen )
wnd_add_free_ext ( wnd , wnd - > zone_bit , zlen , false ) ;
if ( ! RB_EMPTY_ROOT ( & wnd - > start_tree ) & & len )
wnd_remove_free_ext ( wnd , lcn , len ) ;
wnd - > zone_bit = lcn ;
wnd - > zone_end = lcn + len ;
}
int ntfs_trim_fs ( struct ntfs_sb_info * sbi , struct fstrim_range * range )
{
int err = 0 ;
struct super_block * sb = sbi - > sb ;
struct wnd_bitmap * wnd = & sbi - > used . bitmap ;
u32 wbits = 8 * sb - > s_blocksize ;
CLST len = 0 , lcn = 0 , done = 0 ;
CLST minlen = bytes_to_cluster ( sbi , range - > minlen ) ;
CLST lcn_from = bytes_to_cluster ( sbi , range - > start ) ;
size_t iw = lcn_from > > ( sb - > s_blocksize_bits + 3 ) ;
u32 wbit = lcn_from & ( wbits - 1 ) ;
CLST lcn_to ;
if ( ! minlen )
minlen = 1 ;
if ( range - > len = = ( u64 ) - 1 )
lcn_to = wnd - > nbits ;
else
lcn_to = bytes_to_cluster ( sbi , range - > start + range - > len ) ;
down_read_nested ( & wnd - > rw_lock , BITMAP_MUTEX_CLUSTERS ) ;
2022-10-01 10:00:24 +03:00
for ( ; iw < wnd - > nwnd ; iw + + , wbit = 0 ) {
2021-08-13 17:21:29 +03:00
CLST lcn_wnd = iw * wbits ;
struct buffer_head * bh ;
if ( lcn_wnd > lcn_to )
break ;
if ( ! wnd - > free_bits [ iw ] )
continue ;
if ( iw + 1 = = wnd - > nwnd )
wbits = wnd - > bits_last ;
if ( lcn_wnd + wbits > lcn_to )
wbits = lcn_to - lcn_wnd ;
bh = wnd_map ( wnd , iw ) ;
if ( IS_ERR ( bh ) ) {
err = PTR_ERR ( bh ) ;
break ;
}
for ( ; wbit < wbits ; wbit + + ) {
2022-10-06 19:42:24 +03:00
if ( ! test_bit_le ( wbit , bh - > b_data ) ) {
2021-08-13 17:21:29 +03:00
if ( ! len )
lcn = lcn_wnd + wbit ;
len + = 1 ;
continue ;
}
if ( len > = minlen ) {
err = ntfs_discard ( sbi , lcn , len ) ;
if ( err )
goto out ;
done + = len ;
}
len = 0 ;
}
put_bh ( bh ) ;
}
2021-08-03 14:57:09 +03:00
/* Process the last fragment. */
2021-08-13 17:21:29 +03:00
if ( len > = minlen ) {
err = ntfs_discard ( sbi , lcn , len ) ;
if ( err )
goto out ;
done + = len ;
}
out :
range - > len = ( u64 ) done < < sbi - > cluster_bits ;
up_read ( & wnd - > rw_lock ) ;
return err ;
}
2021-12-07 13:24:54 +03:00
2022-10-06 19:42:24 +03:00
# if BITS_PER_LONG == 64
typedef __le64 bitmap_ulong ;
# define cpu_to_ul(x) cpu_to_le64(x)
# define ul_to_cpu(x) le64_to_cpu(x)
# else
typedef __le32 bitmap_ulong ;
# define cpu_to_ul(x) cpu_to_le32(x)
# define ul_to_cpu(x) le32_to_cpu(x)
# endif
void ntfs_bitmap_set_le ( void * map , unsigned int start , int len )
2021-12-07 13:24:54 +03:00
{
2022-10-06 19:42:24 +03:00
bitmap_ulong * p = ( bitmap_ulong * ) map + BIT_WORD ( start ) ;
2021-12-07 13:24:54 +03:00
const unsigned int size = start + len ;
int bits_to_set = BITS_PER_LONG - ( start % BITS_PER_LONG ) ;
2022-10-06 19:42:24 +03:00
bitmap_ulong mask_to_set = cpu_to_ul ( BITMAP_FIRST_WORD_MASK ( start ) ) ;
2021-12-07 13:24:54 +03:00
while ( len - bits_to_set > = 0 ) {
* p | = mask_to_set ;
len - = bits_to_set ;
bits_to_set = BITS_PER_LONG ;
2022-10-06 19:42:24 +03:00
mask_to_set = cpu_to_ul ( ~ 0UL ) ;
2021-12-07 13:24:54 +03:00
p + + ;
}
if ( len ) {
2022-10-06 19:42:24 +03:00
mask_to_set & = cpu_to_ul ( BITMAP_LAST_WORD_MASK ( size ) ) ;
2021-12-07 13:24:54 +03:00
* p | = mask_to_set ;
}
}
2022-10-06 19:42:24 +03:00
void ntfs_bitmap_clear_le ( void * map , unsigned int start , int len )
2021-12-07 13:24:54 +03:00
{
2022-10-06 19:42:24 +03:00
bitmap_ulong * p = ( bitmap_ulong * ) map + BIT_WORD ( start ) ;
2021-12-07 13:24:54 +03:00
const unsigned int size = start + len ;
int bits_to_clear = BITS_PER_LONG - ( start % BITS_PER_LONG ) ;
2022-10-06 19:42:24 +03:00
bitmap_ulong mask_to_clear = cpu_to_ul ( BITMAP_FIRST_WORD_MASK ( start ) ) ;
2021-12-07 13:24:54 +03:00
while ( len - bits_to_clear > = 0 ) {
* p & = ~ mask_to_clear ;
len - = bits_to_clear ;
bits_to_clear = BITS_PER_LONG ;
2022-10-06 19:42:24 +03:00
mask_to_clear = cpu_to_ul ( ~ 0UL ) ;
2021-12-07 13:24:54 +03:00
p + + ;
}
if ( len ) {
2022-10-06 19:42:24 +03:00
mask_to_clear & = cpu_to_ul ( BITMAP_LAST_WORD_MASK ( size ) ) ;
2021-12-07 13:24:54 +03:00
* p & = ~ mask_to_clear ;
}
}
2022-10-06 19:42:24 +03:00
unsigned int ntfs_bitmap_weight_le ( const void * bitmap , int bits )
{
const ulong * bmp = bitmap ;
unsigned int k , lim = bits / BITS_PER_LONG ;
unsigned int w = 0 ;
for ( k = 0 ; k < lim ; k + + )
w + = hweight_long ( bmp [ k ] ) ;
if ( bits % BITS_PER_LONG ) {
w + = hweight_long ( ul_to_cpu ( ( ( bitmap_ulong * ) bitmap ) [ k ] ) &
BITMAP_LAST_WORD_MASK ( bits ) ) ;
}
return w ;
}