2018-08-11 13:19:58 +08:00
// SPDX-License-Identifier: GPL-2.0
2013-03-23 16:11:31 -07:00
/*
* random utiility code , for bcache but in theory not specific to bcache
*
* Copyright 2010 , 2011 Kent Overstreet < kent . overstreet @ gmail . com >
* Copyright 2012 Google , Inc .
*/
# include <linux/bio.h>
# include <linux/blkdev.h>
# include <linux/ctype.h>
# include <linux/debugfs.h>
# include <linux/module.h>
# include <linux/seq_file.h>
# include <linux/types.h>
2017-02-01 16:36:40 +01:00
# include <linux/sched/clock.h>
2013-03-23 16:11:31 -07:00
# include "util.h"
# define simple_strtoint(c, end, base) simple_strtol(c, end, base)
# define simple_strtouint(c, end, base) simple_strtoul(c, end, base)
# define STRTO_H(name, type) \
2013-03-28 12:50:55 -06:00
int bch_ # # name # # _h ( const char * cp , type * res ) \
2013-03-23 16:11:31 -07:00
{ \
int u = 0 ; \
char * e ; \
type i = simple_ # # name ( cp , & e , 10 ) ; \
\
switch ( tolower ( * e ) ) { \
default : \
return - EINVAL ; \
case ' y ' : \
case ' z ' : \
u + + ; \
2018-03-18 17:36:28 -07:00
/* fall through */ \
2013-03-23 16:11:31 -07:00
case ' e ' : \
u + + ; \
2018-03-18 17:36:28 -07:00
/* fall through */ \
2013-03-23 16:11:31 -07:00
case ' p ' : \
u + + ; \
2018-03-18 17:36:28 -07:00
/* fall through */ \
2013-03-23 16:11:31 -07:00
case ' t ' : \
u + + ; \
2018-03-18 17:36:28 -07:00
/* fall through */ \
2013-03-23 16:11:31 -07:00
case ' g ' : \
u + + ; \
2018-03-18 17:36:28 -07:00
/* fall through */ \
2013-03-23 16:11:31 -07:00
case ' m ' : \
u + + ; \
2018-03-18 17:36:28 -07:00
/* fall through */ \
2013-03-23 16:11:31 -07:00
case ' k ' : \
u + + ; \
if ( e + + = = cp ) \
return - EINVAL ; \
2018-03-18 17:36:28 -07:00
/* fall through */ \
2013-03-23 16:11:31 -07:00
case ' \n ' : \
case ' \0 ' : \
if ( * e = = ' \n ' ) \
e + + ; \
} \
\
if ( * e ) \
return - EINVAL ; \
\
while ( u - - ) { \
if ( ( type ) ~ 0 > 0 & & \
( type ) ~ 0 / 1024 < = i ) \
return - EINVAL ; \
if ( ( i > 0 & & ANYSINT_MAX ( type ) / 1024 < i ) | | \
( i < 0 & & - ANYSINT_MAX ( type ) / 1024 > i ) ) \
return - EINVAL ; \
i * = 1024 ; \
} \
\
* res = i ; \
return 0 ; \
} \
STRTO_H ( strtoint , int )
STRTO_H ( strtouint , unsigned int )
STRTO_H ( strtoll , long long )
STRTO_H ( strtoull , unsigned long long )
2017-09-06 14:26:02 +08:00
/**
2018-03-18 17:36:29 -07:00
* bch_hprint - formats @ v to human readable string for sysfs .
* @ buf : the ( at least 8 byte ) buffer to format the result into .
* @ v : signed 64 bit integer
2017-09-06 14:26:02 +08:00
*
* Returns the number of bytes used by format .
*/
2013-03-28 12:50:55 -06:00
ssize_t bch_hprint ( char * buf , int64_t v )
2013-03-23 16:11:31 -07:00
{
static const char units [ ] = " ?kMGTPEZY " ;
2017-09-06 14:26:02 +08:00
int u = 0 , t ;
uint64_t q ;
2013-03-23 16:11:31 -07:00
2017-09-06 14:26:02 +08:00
if ( v < 0 )
q = - v ;
else
q = v ;
2013-03-23 16:11:31 -07:00
2017-09-06 14:26:02 +08:00
/* For as long as the number is more than 3 digits, but at least
* once , shift right / divide by 1024. Keep the remainder for
* a digit after the decimal point .
*/
do {
u + + ;
2013-03-23 16:11:31 -07:00
2017-09-06 14:26:02 +08:00
t = q & ~ ( ~ 0 < < 10 ) ;
q > > = 10 ;
} while ( q > = 1000 ) ;
2013-03-23 16:11:31 -07:00
2017-09-06 14:26:02 +08:00
if ( v < 0 )
/* '-', up to 3 digits, '.', 1 digit, 1 character, null;
* yields 8 bytes .
*/
return sprintf ( buf , " -%llu.%i%c " , q , t * 10 / 1024 , units [ u ] ) ;
else
return sprintf ( buf , " %llu.%i%c " , q , t * 10 / 1024 , units [ u ] ) ;
2013-03-23 16:11:31 -07:00
}
2013-03-28 12:50:55 -06:00
bool bch_is_zero ( const char * p , size_t n )
2013-03-23 16:11:31 -07:00
{
size_t i ;
for ( i = 0 ; i < n ; i + + )
if ( p [ i ] )
return false ;
return true ;
}
2013-03-28 12:50:55 -06:00
int bch_parse_uuid ( const char * s , char * uuid )
2013-03-23 16:11:31 -07:00
{
size_t i , j , x ;
2018-08-11 13:19:45 +08:00
2013-03-23 16:11:31 -07:00
memset ( uuid , 0 , 16 ) ;
for ( i = 0 , j = 0 ;
i < strspn ( s , " -0123456789:ABCDEFabcdef " ) & & j < 32 ;
i + + ) {
x = s [ i ] | 32 ;
switch ( x ) {
case ' 0 ' . . . ' 9 ' :
x - = ' 0 ' ;
break ;
case ' a ' . . . ' f ' :
x - = ' a ' - 10 ;
break ;
default :
continue ;
}
if ( ! ( j & 1 ) )
x < < = 4 ;
uuid [ j + + > > 1 ] | = x ;
}
return i ;
}
2013-03-28 12:50:55 -06:00
void bch_time_stats_update ( struct time_stats * stats , uint64_t start_time )
2013-03-23 16:11:31 -07:00
{
2013-07-31 00:03:54 -07:00
uint64_t now , duration , last ;
spin_lock ( & stats - > lock ) ;
now = local_clock ( ) ;
duration = time_after64 ( now , start_time )
2013-03-23 16:11:31 -07:00
? now - start_time : 0 ;
2013-07-31 00:03:54 -07:00
last = time_after64 ( now , stats - > last )
2013-03-23 16:11:31 -07:00
? now - stats - > last : 0 ;
stats - > max_duration = max ( stats - > max_duration , duration ) ;
if ( stats - > last ) {
ewma_add ( stats - > average_duration , duration , 8 , 8 ) ;
if ( stats - > average_frequency )
ewma_add ( stats - > average_frequency , last , 8 , 8 ) ;
else
stats - > average_frequency = last < < 8 ;
} else {
stats - > average_duration = duration < < 8 ;
}
stats - > last = now ? : 1 ;
2013-07-31 00:03:54 -07:00
spin_unlock ( & stats - > lock ) ;
2013-03-23 16:11:31 -07:00
}
2013-09-23 23:17:31 -07:00
/**
2018-03-18 17:36:29 -07:00
* bch_next_delay ( ) - update ratelimiting statistics and calculate next delay
* @ d : the struct bch_ratelimit to update
* @ done : the amount of work done , in arbitrary units
2013-09-23 23:17:31 -07:00
*
2018-03-18 17:36:29 -07:00
* Increment @ d by the amount of work done , and return how long to delay in
* jiffies until the next time to do some work .
2013-09-23 23:17:31 -07:00
*/
uint64_t bch_next_delay ( struct bch_ratelimit * d , uint64_t done )
2013-03-23 16:11:31 -07:00
{
uint64_t now = local_clock ( ) ;
2018-08-09 15:48:49 +08:00
d - > next + = div_u64 ( done * NSEC_PER_SEC , atomic_long_read ( & d - > rate ) ) ;
2013-11-11 13:58:34 -08:00
2017-10-13 16:35:37 -07:00
/* Bound the time. Don't let us fall further than 2 seconds behind
* ( this prevents unnecessary backlog that would make it impossible
* to catch up ) . If we ' re ahead of the desired writeback rate ,
* don ' t let us sleep more than 2.5 seconds ( so we can notice / respond
* if the control system tells us to speed up ! ) .
*/
2017-10-16 10:34:47 -07:00
if ( time_before64 ( now + NSEC_PER_SEC * 5LLU / 2LLU , d - > next ) )
d - > next = now + NSEC_PER_SEC * 5LLU / 2LLU ;
2013-11-11 13:58:34 -08:00
if ( time_after64 ( now - NSEC_PER_SEC * 2 , d - > next ) )
d - > next = now - NSEC_PER_SEC * 2 ;
2013-03-23 16:11:31 -07:00
return time_after64 ( d - > next , now )
? div_u64 ( d - > next - now , NSEC_PER_SEC / HZ )
: 0 ;
}
2017-12-18 20:22:09 +08:00
/*
* Generally it isn ' t good to access . bi_io_vec and . bi_vcnt directly ,
* the preferred way is bio_add_page , but in this case , bch_bio_map ( )
* supposes that the bvec table is empty , so it is safe to access
* . bi_vcnt & . bi_io_vec in this way even after multipage bvec is
* supported .
*/
2013-03-28 12:50:55 -06:00
void bch_bio_map ( struct bio * bio , void * base )
2013-03-23 16:11:31 -07:00
{
2013-10-11 15:44:27 -07:00
size_t size = bio - > bi_iter . bi_size ;
2013-03-23 16:11:31 -07:00
struct bio_vec * bv = bio - > bi_io_vec ;
2013-10-11 15:44:27 -07:00
BUG_ON ( ! bio - > bi_iter . bi_size ) ;
2013-03-23 16:11:31 -07:00
BUG_ON ( bio - > bi_vcnt ) ;
2016-01-02 13:30:54 -05:00
bv - > bv_offset = base ? offset_in_page ( base ) : 0 ;
2013-03-23 16:11:31 -07:00
goto start ;
for ( ; size ; bio - > bi_vcnt + + , bv + + ) {
bv - > bv_offset = 0 ;
start : bv - > bv_len = min_t ( size_t , PAGE_SIZE - bv - > bv_offset ,
size ) ;
if ( base ) {
bv - > bv_page = is_vmalloc_addr ( base )
? vmalloc_to_page ( base )
: virt_to_page ( base ) ;
base + = bv - > bv_len ;
}
size - = bv - > bv_len ;
}
}
2017-12-18 20:22:10 +08:00
/**
* bch_bio_alloc_pages - allocates a single page for each bvec in a bio
* @ bio : bio to allocate pages for
* @ gfp_mask : flags for allocation
*
* Allocates pages up to @ bio - > bi_vcnt .
*
* Returns 0 on success , - ENOMEM on failure . On failure , any allocated pages are
* freed .
*/
int bch_bio_alloc_pages ( struct bio * bio , gfp_t gfp_mask )
{
int i ;
struct bio_vec * bv ;
bio_for_each_segment_all ( bv , bio , i ) {
bv - > bv_page = alloc_page ( gfp_mask ) ;
if ( ! bv - > bv_page ) {
while ( - - bv > = bio - > bi_io_vec )
__free_page ( bv - > bv_page ) ;
return - ENOMEM ;
}
}
return 0 ;
}