2021-08-13 17:21:29 +03:00
// SPDX-License-Identifier: GPL-2.0
/*
*
* Copyright ( C ) 2019 - 2021 Paragon Software GmbH , All rights reserved .
*
* TODO : try to use extents tree ( instead of array )
*/
# include <linux/blkdev.h>
# include <linux/fs.h>
2021-08-16 13:37:32 +03:00
# include <linux/log2.h>
2021-08-13 17:21:29 +03:00
# include "debug.h"
# include "ntfs.h"
# include "ntfs_fs.h"
2021-08-03 14:57:09 +03:00
/* runs_tree is a continues memory. Try to avoid big size. */
2021-08-13 17:21:29 +03:00
# define NTFS3_RUN_MAX_BYTES 0x10000
struct ntfs_run {
2021-08-03 14:57:09 +03:00
CLST vcn ; /* Virtual cluster number. */
CLST len ; /* Length in clusters. */
CLST lcn ; /* Logical cluster number. */
2021-08-13 17:21:29 +03:00
} ;
/*
2021-08-03 14:57:09 +03:00
* run_lookup - Lookup the index of a MCB entry that is first < = vcn .
2021-08-13 17:21:29 +03:00
*
2021-08-03 14:57:09 +03:00
* Case of success it will return non - zero value and set
* @ index parameter to index of entry been found .
* Case of entry missing from list ' index ' will be set to
2021-08-13 17:21:29 +03:00
* point to insertion position for the entry question .
*/
2022-07-06 20:01:11 +03:00
static bool run_lookup ( const struct runs_tree * run , CLST vcn , size_t * index )
2021-08-13 17:21:29 +03:00
{
size_t min_idx , max_idx , mid_idx ;
struct ntfs_run * r ;
if ( ! run - > count ) {
* index = 0 ;
return false ;
}
min_idx = 0 ;
max_idx = run - > count - 1 ;
2021-08-03 14:57:09 +03:00
/* Check boundary cases specially, 'cause they cover the often requests. */
2021-08-13 17:21:29 +03:00
r = run - > runs ;
if ( vcn < r - > vcn ) {
* index = 0 ;
return false ;
}
if ( vcn < r - > vcn + r - > len ) {
* index = 0 ;
return true ;
}
r + = max_idx ;
if ( vcn > = r - > vcn + r - > len ) {
* index = run - > count ;
return false ;
}
if ( vcn > = r - > vcn ) {
* index = max_idx ;
return true ;
}
do {
mid_idx = min_idx + ( ( max_idx - min_idx ) > > 1 ) ;
r = run - > runs + mid_idx ;
if ( vcn < r - > vcn ) {
max_idx = mid_idx - 1 ;
if ( ! mid_idx )
break ;
} else if ( vcn > = r - > vcn + r - > len ) {
min_idx = mid_idx + 1 ;
} else {
* index = mid_idx ;
return true ;
}
} while ( min_idx < = max_idx ) ;
* index = max_idx + 1 ;
return false ;
}
/*
2021-08-03 14:57:09 +03:00
* run_consolidate - Consolidate runs starting from a given one .
2021-08-13 17:21:29 +03:00
*/
static void run_consolidate ( struct runs_tree * run , size_t index )
{
size_t i ;
struct ntfs_run * r = run - > runs + index ;
while ( index + 1 < run - > count ) {
/*
* I should merge current run with next
* if start of the next run lies inside one being tested .
*/
struct ntfs_run * n = r + 1 ;
CLST end = r - > vcn + r - > len ;
CLST dl ;
/* Stop if runs are not aligned one to another. */
if ( n - > vcn > end )
break ;
dl = end - n - > vcn ;
/*
* If range at index overlaps with next one
* then I will either adjust it ' s start position
* or ( if completely matches ) dust remove one from the list .
*/
if ( dl > 0 ) {
if ( n - > len < = dl )
goto remove_next_range ;
n - > len - = dl ;
n - > vcn + = dl ;
if ( n - > lcn ! = SPARSE_LCN )
n - > lcn + = dl ;
dl = 0 ;
}
/*
* Stop if sparse mode does not match
* both current and next runs .
*/
if ( ( n - > lcn = = SPARSE_LCN ) ! = ( r - > lcn = = SPARSE_LCN ) ) {
index + = 1 ;
r = n ;
continue ;
}
/*
* Check if volume block
* of a next run lcn does not match
* last volume block of the current run .
*/
if ( n - > lcn ! = SPARSE_LCN & & n - > lcn ! = r - > lcn + r - > len )
break ;
/*
* Next and current are siblings .
* Eat / join .
*/
r - > len + = n - > len - dl ;
remove_next_range :
i = run - > count - ( index + 1 ) ;
if ( i > 1 )
memmove ( n , n + 1 , sizeof ( * n ) * ( i - 1 ) ) ;
run - > count - = 1 ;
}
}
2021-08-03 14:57:09 +03:00
/*
* run_is_mapped_full
*
* Return : True if range [ svcn - evcn ] is mapped .
*/
2021-08-13 17:21:29 +03:00
bool run_is_mapped_full ( const struct runs_tree * run , CLST svcn , CLST evcn )
{
size_t i ;
const struct ntfs_run * r , * end ;
CLST next_vcn ;
if ( ! run_lookup ( run , svcn , & i ) )
return false ;
end = run - > runs + run - > count ;
r = run - > runs + i ;
for ( ; ; ) {
next_vcn = r - > vcn + r - > len ;
if ( next_vcn > evcn )
return true ;
if ( + + r > = end )
return false ;
if ( r - > vcn ! = next_vcn )
return false ;
}
}
bool run_lookup_entry ( const struct runs_tree * run , CLST vcn , CLST * lcn ,
CLST * len , size_t * index )
{
size_t idx ;
CLST gap ;
struct ntfs_run * r ;
/* Fail immediately if nrun was not touched yet. */
if ( ! run - > runs )
return false ;
if ( ! run_lookup ( run , vcn , & idx ) )
return false ;
r = run - > runs + idx ;
if ( vcn > = r - > vcn + r - > len )
return false ;
gap = vcn - r - > vcn ;
if ( r - > len < = gap )
return false ;
* lcn = r - > lcn = = SPARSE_LCN ? SPARSE_LCN : ( r - > lcn + gap ) ;
if ( len )
* len = r - > len - gap ;
if ( index )
* index = idx ;
return true ;
}
/*
2021-08-03 14:57:09 +03:00
* run_truncate_head - Decommit the range before vcn .
2021-08-13 17:21:29 +03:00
*/
void run_truncate_head ( struct runs_tree * run , CLST vcn )
{
size_t index ;
struct ntfs_run * r ;
if ( run_lookup ( run , vcn , & index ) ) {
r = run - > runs + index ;
if ( vcn > r - > vcn ) {
CLST dlen = vcn - r - > vcn ;
r - > vcn = vcn ;
r - > len - = dlen ;
if ( r - > lcn ! = SPARSE_LCN )
r - > lcn + = dlen ;
}
if ( ! index )
return ;
}
r = run - > runs ;
memmove ( r , r + index , sizeof ( * r ) * ( run - > count - index ) ) ;
run - > count - = index ;
if ( ! run - > count ) {
2021-08-24 21:37:07 +03:00
kvfree ( run - > runs ) ;
2021-08-13 17:21:29 +03:00
run - > runs = NULL ;
run - > allocated = 0 ;
}
}
/*
2021-08-03 14:57:09 +03:00
* run_truncate - Decommit the range after vcn .
2021-08-13 17:21:29 +03:00
*/
void run_truncate ( struct runs_tree * run , CLST vcn )
{
size_t index ;
/*
* If I hit the range then
* I have to truncate one .
* If range to be truncated is becoming empty
* then it will entirely be removed .
*/
if ( run_lookup ( run , vcn , & index ) ) {
struct ntfs_run * r = run - > runs + index ;
r - > len = vcn - r - > vcn ;
if ( r - > len > 0 )
index + = 1 ;
}
/*
2021-08-03 14:57:09 +03:00
* At this point ' index ' is set to position that
* should be thrown away ( including index itself )
2021-08-13 17:21:29 +03:00
* Simple one - just set the limit .
*/
run - > count = index ;
2021-08-03 14:57:09 +03:00
/* Do not reallocate array 'runs'. Only free if possible. */
2021-08-13 17:21:29 +03:00
if ( ! index ) {
2021-08-24 21:37:07 +03:00
kvfree ( run - > runs ) ;
2021-08-13 17:21:29 +03:00
run - > runs = NULL ;
run - > allocated = 0 ;
}
}
2021-08-03 14:57:09 +03:00
/*
* run_truncate_around - Trim head and tail if necessary .
*/
2021-08-13 17:21:29 +03:00
void run_truncate_around ( struct runs_tree * run , CLST vcn )
{
run_truncate_head ( run , vcn ) ;
if ( run - > count > = NTFS3_RUN_MAX_BYTES / sizeof ( struct ntfs_run ) / 2 )
run_truncate ( run , ( run - > runs + ( run - > count > > 1 ) ) - > vcn ) ;
}
/*
* run_add_entry
*
2021-08-03 14:57:09 +03:00
* Sets location to known state .
* Run to be added may overlap with existing location .
*
* Return : false if of memory .
2021-08-13 17:21:29 +03:00
*/
bool run_add_entry ( struct runs_tree * run , CLST vcn , CLST lcn , CLST len ,
bool is_mft )
{
size_t used , index ;
struct ntfs_run * r ;
bool inrange ;
CLST tail_vcn = 0 , tail_len = 0 , tail_lcn = 0 ;
bool should_add_tail = false ;
/*
* Lookup the insertion point .
*
* Execute bsearch for the entry containing
* start position question .
*/
inrange = run_lookup ( run , vcn , & index ) ;
/*
* Shortcut here would be case of
* range not been found but one been added
* continues previous run .
2021-08-03 14:57:09 +03:00
* This case I can directly make use of
2021-08-13 17:21:29 +03:00
* existing range as my start point .
*/
if ( ! inrange & & index > 0 ) {
struct ntfs_run * t = run - > runs + index - 1 ;
if ( t - > vcn + t - > len = = vcn & &
( t - > lcn = = SPARSE_LCN ) = = ( lcn = = SPARSE_LCN ) & &
( lcn = = SPARSE_LCN | | lcn = = t - > lcn + t - > len ) ) {
inrange = true ;
index - = 1 ;
}
}
/*
* At this point ' index ' either points to the range
* containing start position or to the insertion position
* for a new range .
* So first let ' s check if range I ' m probing is here already .
*/
if ( ! inrange ) {
requires_new_range :
/*
* Range was not found .
* Insert at position ' index '
*/
used = run - > count * sizeof ( struct ntfs_run ) ;
/*
* Check allocated space .
* If one is not enough to get one more entry
2021-08-03 14:57:09 +03:00
* then it will be reallocated .
2021-08-13 17:21:29 +03:00
*/
if ( run - > allocated < used + sizeof ( struct ntfs_run ) ) {
size_t bytes ;
struct ntfs_run * new_ptr ;
2021-08-03 14:57:09 +03:00
/* Use power of 2 for 'bytes'. */
2021-08-13 17:21:29 +03:00
if ( ! used ) {
bytes = 64 ;
} else if ( used < = 16 * PAGE_SIZE ) {
2021-08-16 13:37:32 +03:00
if ( is_power_of_2 ( run - > allocated ) )
2021-08-13 17:21:29 +03:00
bytes = run - > allocated < < 1 ;
else
bytes = ( size_t ) 1
< < ( 2 + blksize_bits ( used ) ) ;
} else {
bytes = run - > allocated + ( 16 * PAGE_SIZE ) ;
}
WARN_ON ( ! is_mft & & bytes > NTFS3_RUN_MAX_BYTES ) ;
2021-08-24 21:37:07 +03:00
new_ptr = kvmalloc ( bytes , GFP_KERNEL ) ;
2021-08-13 17:21:29 +03:00
if ( ! new_ptr )
return false ;
r = new_ptr + index ;
memcpy ( new_ptr , run - > runs ,
index * sizeof ( struct ntfs_run ) ) ;
memcpy ( r + 1 , run - > runs + index ,
sizeof ( struct ntfs_run ) * ( run - > count - index ) ) ;
2021-08-24 21:37:07 +03:00
kvfree ( run - > runs ) ;
2021-08-13 17:21:29 +03:00
run - > runs = new_ptr ;
run - > allocated = bytes ;
} else {
size_t i = run - > count - index ;
r = run - > runs + index ;
/* memmove appears to be a bottle neck here... */
if ( i > 0 )
memmove ( r + 1 , r , sizeof ( struct ntfs_run ) * i ) ;
}
r - > vcn = vcn ;
r - > lcn = lcn ;
r - > len = len ;
run - > count + = 1 ;
} else {
r = run - > runs + index ;
/*
2021-08-03 14:57:09 +03:00
* If one of ranges was not allocated then we
* have to split location we just matched and
* insert current one .
* A common case this requires tail to be reinserted
2021-08-13 17:21:29 +03:00
* a recursive call .
*/
if ( ( ( lcn = = SPARSE_LCN ) ! = ( r - > lcn = = SPARSE_LCN ) ) | |
( lcn ! = SPARSE_LCN & & lcn ! = r - > lcn + ( vcn - r - > vcn ) ) ) {
CLST to_eat = vcn - r - > vcn ;
CLST Tovcn = to_eat + len ;
should_add_tail = Tovcn < r - > len ;
if ( should_add_tail ) {
2023-01-17 14:01:00 +03:00
tail_lcn = r - > lcn = = SPARSE_LCN ?
2023-05-08 11:22:05 +03:00
SPARSE_LCN :
( r - > lcn + Tovcn ) ;
2021-08-13 17:21:29 +03:00
tail_vcn = r - > vcn + Tovcn ;
tail_len = r - > len - Tovcn ;
}
if ( to_eat > 0 ) {
r - > len = to_eat ;
inrange = false ;
index + = 1 ;
goto requires_new_range ;
}
2021-08-03 14:57:09 +03:00
/* lcn should match one were going to add. */
2021-08-13 17:21:29 +03:00
r - > lcn = lcn ;
}
/*
2021-08-03 14:57:09 +03:00
* If existing range fits then were done .
2021-08-13 17:21:29 +03:00
* Otherwise extend found one and fall back to range jocode .
*/
if ( r - > vcn + r - > len < vcn + len )
r - > len + = len - ( ( r - > vcn + r - > len ) - vcn ) ;
}
/*
* And normalize it starting from insertion point .
* It ' s possible that no insertion needed case if
* start point lies within the range of an entry
* that ' index ' points to .
*/
if ( inrange & & index > 0 )
index - = 1 ;
run_consolidate ( run , index ) ;
run_consolidate ( run , index + 1 ) ;
/*
2021-08-03 14:57:09 +03:00
* A special case .
* We have to add extra range a tail .
2021-08-13 17:21:29 +03:00
*/
if ( should_add_tail & &
! run_add_entry ( run , tail_vcn , tail_lcn , tail_len , is_mft ) )
return false ;
return true ;
}
2021-08-03 14:57:09 +03:00
/* run_collapse_range
*
* Helper for attr_collapse_range ( ) ,
* which is helper for fallocate ( collapse_range ) .
*/
2021-08-13 17:21:29 +03:00
bool run_collapse_range ( struct runs_tree * run , CLST vcn , CLST len )
{
size_t index , eat ;
struct ntfs_run * r , * e , * eat_start , * eat_end ;
CLST end ;
if ( WARN_ON ( ! run_lookup ( run , vcn , & index ) ) )
2021-08-03 14:57:09 +03:00
return true ; /* Should never be here. */
2021-08-13 17:21:29 +03:00
e = run - > runs + run - > count ;
r = run - > runs + index ;
end = vcn + len ;
if ( vcn > r - > vcn ) {
if ( r - > vcn + r - > len < = end ) {
2021-08-03 14:57:09 +03:00
/* Collapse tail of run .*/
2021-08-13 17:21:29 +03:00
r - > len = vcn - r - > vcn ;
} else if ( r - > lcn = = SPARSE_LCN ) {
2021-08-03 14:57:09 +03:00
/* Collapse a middle part of sparsed run. */
2021-08-13 17:21:29 +03:00
r - > len - = len ;
} else {
2021-08-03 14:57:09 +03:00
/* Collapse a middle part of normal run, split. */
2021-08-13 17:21:29 +03:00
if ( ! run_add_entry ( run , vcn , SPARSE_LCN , len , false ) )
return false ;
return run_collapse_range ( run , vcn , len ) ;
}
r + = 1 ;
}
eat_start = r ;
eat_end = r ;
for ( ; r < e ; r + + ) {
CLST d ;
if ( r - > vcn > = end ) {
r - > vcn - = len ;
continue ;
}
if ( r - > vcn + r - > len < = end ) {
2021-08-03 14:57:09 +03:00
/* Eat this run. */
2021-08-13 17:21:29 +03:00
eat_end = r + 1 ;
continue ;
}
d = end - r - > vcn ;
if ( r - > lcn ! = SPARSE_LCN )
r - > lcn + = d ;
r - > len - = d ;
r - > vcn - = len - d ;
}
eat = eat_end - eat_start ;
memmove ( eat_start , eat_end , ( e - eat_end ) * sizeof ( * r ) ) ;
run - > count - = eat ;
return true ;
}
2022-06-21 12:46:25 +03:00
/* run_insert_range
*
* Helper for attr_insert_range ( ) ,
* which is helper for fallocate ( insert_range ) .
*/
bool run_insert_range ( struct runs_tree * run , CLST vcn , CLST len )
{
size_t index ;
struct ntfs_run * r , * e ;
if ( WARN_ON ( ! run_lookup ( run , vcn , & index ) ) )
return false ; /* Should never be here. */
e = run - > runs + run - > count ;
r = run - > runs + index ;
if ( vcn > r - > vcn )
r + = 1 ;
for ( ; r < e ; r + + )
r - > vcn + = len ;
r = run - > runs + index ;
if ( vcn > r - > vcn ) {
/* split fragment. */
CLST len1 = vcn - r - > vcn ;
CLST len2 = r - > len - len1 ;
CLST lcn2 = r - > lcn = = SPARSE_LCN ? SPARSE_LCN : ( r - > lcn + len1 ) ;
r - > len = len1 ;
if ( ! run_add_entry ( run , vcn + len , lcn2 , len2 , false ) )
return false ;
}
if ( ! run_add_entry ( run , vcn , SPARSE_LCN , len , false ) )
return false ;
return true ;
}
2021-08-13 17:21:29 +03:00
/*
2021-08-03 14:57:09 +03:00
* run_get_entry - Return index - th mapped region .
2021-08-13 17:21:29 +03:00
*/
bool run_get_entry ( const struct runs_tree * run , size_t index , CLST * vcn ,
CLST * lcn , CLST * len )
{
const struct ntfs_run * r ;
if ( index > = run - > count )
return false ;
r = run - > runs + index ;
if ( ! r - > len )
return false ;
if ( vcn )
* vcn = r - > vcn ;
if ( lcn )
* lcn = r - > lcn ;
if ( len )
* len = r - > len ;
return true ;
}
/*
2021-08-03 14:57:09 +03:00
* run_packed_size - Calculate the size of packed int64 .
2021-08-13 17:21:29 +03:00
*/
# ifdef __BIG_ENDIAN
static inline int run_packed_size ( const s64 n )
{
const u8 * p = ( const u8 * ) & n + sizeof ( n ) - 1 ;
if ( n > = 0 ) {
if ( p [ - 7 ] | | p [ - 6 ] | | p [ - 5 ] | | p [ - 4 ] )
p - = 4 ;
if ( p [ - 3 ] | | p [ - 2 ] )
p - = 2 ;
if ( p [ - 1 ] )
p - = 1 ;
if ( p [ 0 ] & 0x80 )
p - = 1 ;
} else {
if ( p [ - 7 ] ! = 0xff | | p [ - 6 ] ! = 0xff | | p [ - 5 ] ! = 0xff | |
p [ - 4 ] ! = 0xff )
p - = 4 ;
if ( p [ - 3 ] ! = 0xff | | p [ - 2 ] ! = 0xff )
p - = 2 ;
if ( p [ - 1 ] ! = 0xff )
p - = 1 ;
if ( ! ( p [ 0 ] & 0x80 ) )
p - = 1 ;
}
return ( const u8 * ) & n + sizeof ( n ) - p ;
}
2021-08-03 14:57:09 +03:00
/* Full trusted function. It does not check 'size' for errors. */
2021-08-13 17:21:29 +03:00
static inline void run_pack_s64 ( u8 * run_buf , u8 size , s64 v )
{
const u8 * p = ( u8 * ) & v ;
switch ( size ) {
case 8 :
run_buf [ 7 ] = p [ 0 ] ;
fallthrough ;
case 7 :
run_buf [ 6 ] = p [ 1 ] ;
fallthrough ;
case 6 :
run_buf [ 5 ] = p [ 2 ] ;
fallthrough ;
case 5 :
run_buf [ 4 ] = p [ 3 ] ;
fallthrough ;
case 4 :
run_buf [ 3 ] = p [ 4 ] ;
fallthrough ;
case 3 :
run_buf [ 2 ] = p [ 5 ] ;
fallthrough ;
case 2 :
run_buf [ 1 ] = p [ 6 ] ;
fallthrough ;
case 1 :
run_buf [ 0 ] = p [ 7 ] ;
}
}
2021-08-03 14:57:09 +03:00
/* Full trusted function. It does not check 'size' for errors. */
2021-08-13 17:21:29 +03:00
static inline s64 run_unpack_s64 ( const u8 * run_buf , u8 size , s64 v )
{
u8 * p = ( u8 * ) & v ;
switch ( size ) {
case 8 :
p [ 0 ] = run_buf [ 7 ] ;
fallthrough ;
case 7 :
p [ 1 ] = run_buf [ 6 ] ;
fallthrough ;
case 6 :
p [ 2 ] = run_buf [ 5 ] ;
fallthrough ;
case 5 :
p [ 3 ] = run_buf [ 4 ] ;
fallthrough ;
case 4 :
p [ 4 ] = run_buf [ 3 ] ;
fallthrough ;
case 3 :
p [ 5 ] = run_buf [ 2 ] ;
fallthrough ;
case 2 :
p [ 6 ] = run_buf [ 1 ] ;
fallthrough ;
case 1 :
p [ 7 ] = run_buf [ 0 ] ;
}
return v ;
}
# else
static inline int run_packed_size ( const s64 n )
{
const u8 * p = ( const u8 * ) & n ;
if ( n > = 0 ) {
if ( p [ 7 ] | | p [ 6 ] | | p [ 5 ] | | p [ 4 ] )
p + = 4 ;
if ( p [ 3 ] | | p [ 2 ] )
p + = 2 ;
if ( p [ 1 ] )
p + = 1 ;
if ( p [ 0 ] & 0x80 )
p + = 1 ;
} else {
if ( p [ 7 ] ! = 0xff | | p [ 6 ] ! = 0xff | | p [ 5 ] ! = 0xff | |
p [ 4 ] ! = 0xff )
p + = 4 ;
if ( p [ 3 ] ! = 0xff | | p [ 2 ] ! = 0xff )
p + = 2 ;
if ( p [ 1 ] ! = 0xff )
p + = 1 ;
if ( ! ( p [ 0 ] & 0x80 ) )
p + = 1 ;
}
return 1 + p - ( const u8 * ) & n ;
}
2021-08-03 14:57:09 +03:00
/* Full trusted function. It does not check 'size' for errors. */
2021-08-13 17:21:29 +03:00
static inline void run_pack_s64 ( u8 * run_buf , u8 size , s64 v )
{
const u8 * p = ( u8 * ) & v ;
2021-08-03 14:57:09 +03:00
/* memcpy( run_buf, &v, size); Is it faster? */
2021-08-13 17:21:29 +03:00
switch ( size ) {
case 8 :
run_buf [ 7 ] = p [ 7 ] ;
fallthrough ;
case 7 :
run_buf [ 6 ] = p [ 6 ] ;
fallthrough ;
case 6 :
run_buf [ 5 ] = p [ 5 ] ;
fallthrough ;
case 5 :
run_buf [ 4 ] = p [ 4 ] ;
fallthrough ;
case 4 :
run_buf [ 3 ] = p [ 3 ] ;
fallthrough ;
case 3 :
run_buf [ 2 ] = p [ 2 ] ;
fallthrough ;
case 2 :
run_buf [ 1 ] = p [ 1 ] ;
fallthrough ;
case 1 :
run_buf [ 0 ] = p [ 0 ] ;
}
}
/* full trusted function. It does not check 'size' for errors */
static inline s64 run_unpack_s64 ( const u8 * run_buf , u8 size , s64 v )
{
u8 * p = ( u8 * ) & v ;
2021-08-03 14:57:09 +03:00
/* memcpy( &v, run_buf, size); Is it faster? */
2021-08-13 17:21:29 +03:00
switch ( size ) {
case 8 :
p [ 7 ] = run_buf [ 7 ] ;
fallthrough ;
case 7 :
p [ 6 ] = run_buf [ 6 ] ;
fallthrough ;
case 6 :
p [ 5 ] = run_buf [ 5 ] ;
fallthrough ;
case 5 :
p [ 4 ] = run_buf [ 4 ] ;
fallthrough ;
case 4 :
p [ 3 ] = run_buf [ 3 ] ;
fallthrough ;
case 3 :
p [ 2 ] = run_buf [ 2 ] ;
fallthrough ;
case 2 :
p [ 1 ] = run_buf [ 1 ] ;
fallthrough ;
case 1 :
p [ 0 ] = run_buf [ 0 ] ;
}
return v ;
}
# endif
/*
2021-08-03 14:57:09 +03:00
* run_pack - Pack runs into buffer .
2021-08-13 17:21:29 +03:00
*
2021-08-03 14:57:09 +03:00
* packed_vcns - How much runs we have packed .
* packed_size - How much bytes we have used run_buf .
2021-08-13 17:21:29 +03:00
*/
int run_pack ( const struct runs_tree * run , CLST svcn , CLST len , u8 * run_buf ,
u32 run_buf_size , CLST * packed_vcns )
{
CLST next_vcn , vcn , lcn ;
CLST prev_lcn = 0 ;
CLST evcn1 = svcn + len ;
2022-07-06 20:17:28 +03:00
const struct ntfs_run * r , * r_end ;
2021-08-13 17:21:29 +03:00
int packed_size = 0 ;
size_t i ;
s64 dlcn ;
int offset_size , size_size , tmp ;
* packed_vcns = 0 ;
if ( ! len )
goto out ;
2022-07-06 20:17:28 +03:00
/* Check all required entries [svcn, encv1) available. */
if ( ! run_lookup ( run , svcn , & i ) )
return - ENOENT ;
2021-08-13 17:21:29 +03:00
2022-07-06 20:17:28 +03:00
r_end = run - > runs + run - > count ;
r = run - > runs + i ;
2021-08-13 17:21:29 +03:00
2022-07-06 20:17:28 +03:00
for ( next_vcn = r - > vcn + r - > len ; next_vcn < evcn1 ;
next_vcn = r - > vcn + r - > len ) {
if ( + + r > = r_end | | r - > vcn ! = next_vcn )
return - ENOENT ;
}
/* Repeat cycle above and pack runs. Assume no errors. */
r = run - > runs + i ;
len = svcn - r - > vcn ;
vcn = svcn ;
lcn = r - > lcn = = SPARSE_LCN ? SPARSE_LCN : ( r - > lcn + len ) ;
len = r - > len - len ;
2021-08-13 17:21:29 +03:00
for ( ; ; ) {
next_vcn = vcn + len ;
if ( next_vcn > evcn1 )
len = evcn1 - vcn ;
2021-08-03 14:57:09 +03:00
/* How much bytes required to pack len. */
2021-08-13 17:21:29 +03:00
size_size = run_packed_size ( len ) ;
2021-08-03 14:57:09 +03:00
/* offset_size - How much bytes is packed dlcn. */
2021-08-13 17:21:29 +03:00
if ( lcn = = SPARSE_LCN ) {
offset_size = 0 ;
dlcn = 0 ;
} else {
/* NOTE: lcn can be less than prev_lcn! */
dlcn = ( s64 ) lcn - prev_lcn ;
offset_size = run_packed_size ( dlcn ) ;
prev_lcn = lcn ;
}
tmp = run_buf_size - packed_size - 2 - offset_size ;
if ( tmp < = 0 )
goto out ;
2021-08-03 14:57:09 +03:00
/* Can we store this entire run. */
2021-08-13 17:21:29 +03:00
if ( tmp < size_size )
goto out ;
if ( run_buf ) {
2021-08-03 14:57:09 +03:00
/* Pack run header. */
2021-08-13 17:21:29 +03:00
run_buf [ 0 ] = ( ( u8 ) ( size_size | ( offset_size < < 4 ) ) ) ;
run_buf + = 1 ;
2021-08-03 14:57:09 +03:00
/* Pack the length of run. */
2021-08-13 17:21:29 +03:00
run_pack_s64 ( run_buf , size_size , len ) ;
run_buf + = size_size ;
2021-08-03 14:57:09 +03:00
/* Pack the offset from previous LCN. */
2021-08-13 17:21:29 +03:00
run_pack_s64 ( run_buf , offset_size , dlcn ) ;
run_buf + = offset_size ;
}
packed_size + = 1 + offset_size + size_size ;
* packed_vcns + = len ;
if ( packed_size + 1 > = run_buf_size | | next_vcn > = evcn1 )
goto out ;
2022-07-06 20:17:28 +03:00
r + = 1 ;
vcn = r - > vcn ;
lcn = r - > lcn ;
len = r - > len ;
2021-08-13 17:21:29 +03:00
}
out :
2021-08-03 14:57:09 +03:00
/* Store last zero. */
2021-08-13 17:21:29 +03:00
if ( run_buf )
run_buf [ 0 ] = 0 ;
return packed_size + 1 ;
}
/*
2021-08-03 14:57:09 +03:00
* run_unpack - Unpack packed runs from @ run_buf .
2021-08-13 17:21:29 +03:00
*
2021-08-03 14:57:09 +03:00
* Return : Error if negative , or real used bytes .
2021-08-13 17:21:29 +03:00
*/
int run_unpack ( struct runs_tree * run , struct ntfs_sb_info * sbi , CLST ino ,
CLST svcn , CLST evcn , CLST vcn , const u8 * run_buf ,
2022-10-10 13:15:33 +03:00
int run_buf_size )
2021-08-13 17:21:29 +03:00
{
u64 prev_lcn , vcn64 , lcn , next_vcn ;
const u8 * run_last , * run_0 ;
bool is_mft = ino = = MFT_REC_MFT ;
2022-10-10 13:15:33 +03:00
if ( run_buf_size < 0 )
return - EINVAL ;
2021-08-03 14:57:09 +03:00
/* Check for empty. */
2021-08-13 17:21:29 +03:00
if ( evcn + 1 = = svcn )
return 0 ;
if ( evcn < svcn )
return - EINVAL ;
run_0 = run_buf ;
run_last = run_buf + run_buf_size ;
prev_lcn = 0 ;
vcn64 = svcn ;
2021-08-03 14:57:09 +03:00
/* Read all runs the chain. */
/* size_size - How much bytes is packed len. */
2021-08-13 17:21:29 +03:00
while ( run_buf < run_last ) {
2021-08-03 14:57:09 +03:00
/* size_size - How much bytes is packed len. */
2021-08-13 17:21:29 +03:00
u8 size_size = * run_buf & 0xF ;
2021-08-03 14:57:09 +03:00
/* offset_size - How much bytes is packed dlcn. */
2021-08-13 17:21:29 +03:00
u8 offset_size = * run_buf + + > > 4 ;
u64 len ;
if ( ! size_size )
break ;
/*
* Unpack runs .
2021-08-03 14:57:09 +03:00
* NOTE : Runs are stored little endian order
* " len " is unsigned value , " dlcn " is signed .
2021-08-13 17:21:29 +03:00
* Large positive number requires to store 5 bytes
* e . g . : 05 FF 7 E FF FF 00 00 00
*/
if ( size_size > 8 )
return - EINVAL ;
len = run_unpack_s64 ( run_buf , size_size , 0 ) ;
2021-08-03 14:57:09 +03:00
/* Skip size_size. */
2021-08-13 17:21:29 +03:00
run_buf + = size_size ;
if ( ! len )
return - EINVAL ;
if ( ! offset_size )
lcn = SPARSE_LCN64 ;
else if ( offset_size < = 8 ) {
s64 dlcn ;
2021-08-03 14:57:09 +03:00
/* Initial value of dlcn is -1 or 0. */
2021-08-13 17:21:29 +03:00
dlcn = ( run_buf [ offset_size - 1 ] & 0x80 ) ? ( s64 ) - 1 : 0 ;
dlcn = run_unpack_s64 ( run_buf , offset_size , dlcn ) ;
2021-08-03 14:57:09 +03:00
/* Skip offset_size. */
2021-08-13 17:21:29 +03:00
run_buf + = offset_size ;
if ( ! dlcn )
return - EINVAL ;
lcn = prev_lcn + dlcn ;
prev_lcn = lcn ;
} else
return - EINVAL ;
next_vcn = vcn64 + len ;
2021-08-03 14:57:09 +03:00
/* Check boundary. */
2021-08-13 17:21:29 +03:00
if ( next_vcn > evcn + 1 )
return - EINVAL ;
# ifndef CONFIG_NTFS3_64BIT_CLUSTER
if ( next_vcn > 0x100000000ull | | ( lcn + len ) > 0x100000000ull ) {
ntfs_err (
sbi - > sb ,
2021-08-16 13:13:08 +03:00
" This driver is compiled without CONFIG_NTFS3_64BIT_CLUSTER (like windows driver). \n "
2021-08-13 17:21:29 +03:00
" Volume contains 64 bits run: vcn %llx, lcn %llx, len %llx. \n "
" Activate CONFIG_NTFS3_64BIT_CLUSTER to process this case " ,
vcn64 , lcn , len ) ;
return - EOPNOTSUPP ;
}
# endif
if ( lcn ! = SPARSE_LCN64 & & lcn + len > sbi - > used . bitmap . nbits ) {
2021-08-03 14:57:09 +03:00
/* LCN range is out of volume. */
2021-08-13 17:21:29 +03:00
return - EINVAL ;
}
if ( ! run )
2021-08-03 14:57:09 +03:00
; /* Called from check_attr(fslog.c) to check run. */
2021-08-13 17:21:29 +03:00
else if ( run = = RUN_DEALLOCATE ) {
2021-08-03 14:57:09 +03:00
/*
* Called from ni_delete_all to free clusters
* without storing in run .
*/
2021-08-13 17:21:29 +03:00
if ( lcn ! = SPARSE_LCN64 )
mark_as_free_ex ( sbi , lcn , len , true ) ;
} else if ( vcn64 > = vcn ) {
if ( ! run_add_entry ( run , vcn64 , lcn , len , is_mft ) )
return - ENOMEM ;
} else if ( next_vcn > vcn ) {
u64 dlen = vcn - vcn64 ;
if ( ! run_add_entry ( run , vcn , lcn + dlen , len - dlen ,
is_mft ) )
return - ENOMEM ;
}
vcn64 = next_vcn ;
}
if ( vcn64 ! = evcn + 1 ) {
2021-08-03 14:57:09 +03:00
/* Not expected length of unpacked runs. */
2021-08-13 17:21:29 +03:00
return - EINVAL ;
}
return run_buf - run_0 ;
}
# ifdef NTFS3_CHECK_FREE_CLST
/*
2021-08-03 14:57:09 +03:00
* run_unpack_ex - Unpack packed runs from " run_buf " .
*
* Checks unpacked runs to be used in bitmap .
2021-08-13 17:21:29 +03:00
*
2021-08-03 14:57:09 +03:00
* Return : Error if negative , or real used bytes .
2021-08-13 17:21:29 +03:00
*/
int run_unpack_ex ( struct runs_tree * run , struct ntfs_sb_info * sbi , CLST ino ,
CLST svcn , CLST evcn , CLST vcn , const u8 * run_buf ,
2022-10-10 13:15:33 +03:00
int run_buf_size )
2021-08-13 17:21:29 +03:00
{
int ret , err ;
CLST next_vcn , lcn , len ;
size_t index ;
bool ok ;
struct wnd_bitmap * wnd ;
ret = run_unpack ( run , sbi , ino , svcn , evcn , vcn , run_buf , run_buf_size ) ;
if ( ret < = 0 )
return ret ;
if ( ! sbi - > used . bitmap . sb | | ! run | | run = = RUN_DEALLOCATE )
return ret ;
if ( ino = = MFT_REC_BADCLUST )
return ret ;
next_vcn = vcn = svcn ;
wnd = & sbi - > used . bitmap ;
for ( ok = run_lookup_entry ( run , vcn , & lcn , & len , & index ) ;
next_vcn < = evcn ;
ok = run_get_entry ( run , + + index , & vcn , & lcn , & len ) ) {
if ( ! ok | | next_vcn ! = vcn )
return - EINVAL ;
next_vcn = vcn + len ;
if ( lcn = = SPARSE_LCN )
continue ;
if ( sbi - > flags & NTFS_FLAGS_NEED_REPLAY )
continue ;
down_read_nested ( & wnd - > rw_lock , BITMAP_MUTEX_CLUSTERS ) ;
2021-08-03 14:57:09 +03:00
/* Check for free blocks. */
2021-08-13 17:21:29 +03:00
ok = wnd_is_used ( wnd , lcn , len ) ;
up_read ( & wnd - > rw_lock ) ;
if ( ok )
continue ;
2021-08-03 14:57:09 +03:00
/* Looks like volume is corrupted. */
2021-08-13 17:21:29 +03:00
ntfs_set_state ( sbi , NTFS_DIRTY_ERROR ) ;
if ( down_write_trylock ( & wnd - > rw_lock ) ) {
2021-08-03 14:57:09 +03:00
/* Mark all zero bits as used in range [lcn, lcn+len). */
2022-10-11 20:19:36 +03:00
size_t done ;
err = wnd_set_used_safe ( wnd , lcn , len , & done ) ;
2021-08-13 17:21:29 +03:00
up_write ( & wnd - > rw_lock ) ;
if ( err )
return err ;
}
}
return ret ;
}
# endif
/*
* run_get_highest_vcn
*
2021-08-03 14:57:09 +03:00
* Return the highest vcn from a mapping pairs array
* it used while replaying log file .
2021-08-13 17:21:29 +03:00
*/
int run_get_highest_vcn ( CLST vcn , const u8 * run_buf , u64 * highest_vcn )
{
u64 vcn64 = vcn ;
u8 size_size ;
while ( ( size_size = * run_buf & 0xF ) ) {
u8 offset_size = * run_buf + + > > 4 ;
u64 len ;
if ( size_size > 8 | | offset_size > 8 )
return - EINVAL ;
len = run_unpack_s64 ( run_buf , size_size , 0 ) ;
if ( ! len )
return - EINVAL ;
run_buf + = size_size + offset_size ;
vcn64 + = len ;
# ifndef CONFIG_NTFS3_64BIT_CLUSTER
if ( vcn64 > 0x100000000ull )
return - EINVAL ;
# endif
}
* highest_vcn = vcn64 - 1 ;
return 0 ;
}
2022-07-13 18:18:48 +03:00
/*
* run_clone
*
* Make a copy of run
*/
int run_clone ( const struct runs_tree * run , struct runs_tree * new_run )
{
size_t bytes = run - > count * sizeof ( struct ntfs_run ) ;
if ( bytes > new_run - > allocated ) {
struct ntfs_run * new_ptr = kvmalloc ( bytes , GFP_KERNEL ) ;
if ( ! new_ptr )
return - ENOMEM ;
kvfree ( new_run - > runs ) ;
new_run - > runs = new_ptr ;
new_run - > allocated = bytes ;
}
memcpy ( new_run - > runs , run - > runs , bytes ) ;
new_run - > count = run - > count ;
return 0 ;
}