2017-03-17 09:18:50 +03:00
// SPDX-License-Identifier: GPL-2.0
# include "bcachefs.h"
# include "clock.h"
# include <linux/freezer.h>
# include <linux/kthread.h>
# include <linux/preempt.h>
2024-05-24 18:29:58 +03:00
static inline bool io_timer_cmp ( const void * l , const void * r , void __always_unused * args )
2017-03-17 09:18:50 +03:00
{
2024-05-24 18:29:58 +03:00
struct io_timer * * _l = ( struct io_timer * * ) l ;
struct io_timer * * _r = ( struct io_timer * * ) r ;
return ( * _l ) - > expire < ( * _r ) - > expire ;
}
static inline void io_timer_swp ( void * l , void * r , void __always_unused * args )
{
struct io_timer * * _l = ( struct io_timer * * ) l ;
struct io_timer * * _r = ( struct io_timer * * ) r ;
swap ( * _l , * _r ) ;
2017-03-17 09:18:50 +03:00
}
void bch2_io_timer_add ( struct io_clock * clock , struct io_timer * timer )
{
2024-05-24 18:29:58 +03:00
const struct min_heap_callbacks callbacks = {
. less = io_timer_cmp ,
. swp = io_timer_swp ,
} ;
2017-03-17 09:18:50 +03:00
spin_lock ( & clock - > timer_lock ) ;
2019-12-19 23:07:51 +03:00
2024-06-30 01:08:20 +03:00
if ( time_after_eq64 ( ( u64 ) atomic64_read ( & clock - > now ) , timer - > expire ) ) {
2019-12-19 23:07:51 +03:00
spin_unlock ( & clock - > timer_lock ) ;
timer - > fn ( timer ) ;
return ;
}
2024-07-22 03:56:22 +03:00
for ( size_t i = 0 ; i < clock - > timers . nr ; i + + )
2017-03-17 09:18:50 +03:00
if ( clock - > timers . data [ i ] = = timer )
goto out ;
2024-05-24 18:29:58 +03:00
BUG_ON ( ! min_heap_push ( & clock - > timers , & timer , & callbacks , NULL ) ) ;
2017-03-17 09:18:50 +03:00
out :
spin_unlock ( & clock - > timer_lock ) ;
}
void bch2_io_timer_del ( struct io_clock * clock , struct io_timer * timer )
{
2024-05-24 18:29:58 +03:00
const struct min_heap_callbacks callbacks = {
. less = io_timer_cmp ,
. swp = io_timer_swp ,
} ;
2017-03-17 09:18:50 +03:00
spin_lock ( & clock - > timer_lock ) ;
2024-07-22 03:56:22 +03:00
for ( size_t i = 0 ; i < clock - > timers . nr ; i + + )
2017-03-17 09:18:50 +03:00
if ( clock - > timers . data [ i ] = = timer ) {
2024-05-24 18:29:58 +03:00
min_heap_del ( & clock - > timers , i , & callbacks , NULL ) ;
2017-03-17 09:18:50 +03:00
break ;
}
spin_unlock ( & clock - > timer_lock ) ;
}
struct io_clock_wait {
struct io_timer io_timer ;
struct timer_list cpu_timer ;
struct task_struct * task ;
int expired ;
} ;
static void io_clock_wait_fn ( struct io_timer * timer )
{
struct io_clock_wait * wait = container_of ( timer ,
struct io_clock_wait , io_timer ) ;
wait - > expired = 1 ;
wake_up_process ( wait - > task ) ;
}
static void io_clock_cpu_timeout ( struct timer_list * timer )
{
struct io_clock_wait * wait = container_of ( timer ,
struct io_clock_wait , cpu_timer ) ;
wait - > expired = 1 ;
wake_up_process ( wait - > task ) ;
}
2024-06-30 01:08:20 +03:00
void bch2_io_clock_schedule_timeout ( struct io_clock * clock , u64 until )
2017-03-17 09:18:50 +03:00
{
2024-06-30 01:08:20 +03:00
struct io_clock_wait wait = {
. io_timer . expire = until ,
. io_timer . fn = io_clock_wait_fn ,
. io_timer . fn2 = ( void * ) _RET_IP_ ,
. task = current ,
} ;
2017-03-17 09:18:50 +03:00
bch2_io_timer_add ( clock , & wait . io_timer ) ;
schedule ( ) ;
bch2_io_timer_del ( clock , & wait . io_timer ) ;
}
void bch2_kthread_io_clock_wait ( struct io_clock * clock ,
2024-06-30 01:08:20 +03:00
u64 io_until , unsigned long cpu_timeout )
2017-03-17 09:18:50 +03:00
{
bool kthread = ( current - > flags & PF_KTHREAD ) ! = 0 ;
2024-06-30 01:08:20 +03:00
struct io_clock_wait wait = {
. io_timer . expire = io_until ,
. io_timer . fn = io_clock_wait_fn ,
. io_timer . fn2 = ( void * ) _RET_IP_ ,
. task = current ,
} ;
2017-03-17 09:18:50 +03:00
bch2_io_timer_add ( clock , & wait . io_timer ) ;
timer_setup_on_stack ( & wait . cpu_timer , io_clock_cpu_timeout , 0 ) ;
if ( cpu_timeout ! = MAX_SCHEDULE_TIMEOUT )
mod_timer ( & wait . cpu_timer , cpu_timeout + jiffies ) ;
2024-01-15 22:15:26 +03:00
do {
2017-03-17 09:18:50 +03:00
set_current_state ( TASK_INTERRUPTIBLE ) ;
if ( kthread & & kthread_should_stop ( ) )
break ;
if ( wait . expired )
break ;
schedule ( ) ;
try_to_freeze ( ) ;
2024-01-15 22:15:26 +03:00
} while ( 0 ) ;
2017-03-17 09:18:50 +03:00
__set_current_state ( TASK_RUNNING ) ;
del_timer_sync ( & wait . cpu_timer ) ;
destroy_timer_on_stack ( & wait . cpu_timer ) ;
bch2_io_timer_del ( clock , & wait . io_timer ) ;
}
2024-06-30 01:08:20 +03:00
static struct io_timer * get_expired_timer ( struct io_clock * clock , u64 now )
2017-03-17 09:18:50 +03:00
{
struct io_timer * ret = NULL ;
2024-05-24 18:29:58 +03:00
const struct min_heap_callbacks callbacks = {
. less = io_timer_cmp ,
. swp = io_timer_swp ,
} ;
2017-03-17 09:18:50 +03:00
2024-05-24 18:29:58 +03:00
if ( clock - > timers . nr & &
2024-07-22 03:56:22 +03:00
time_after_eq64 ( now , clock - > timers . data [ 0 ] - > expire ) ) {
2024-05-24 18:29:58 +03:00
ret = * min_heap_peek ( & clock - > timers ) ;
min_heap_pop ( & clock - > timers , & callbacks , NULL ) ;
}
2017-03-17 09:18:50 +03:00
return ret ;
}
2024-06-30 01:08:20 +03:00
void __bch2_increment_clock ( struct io_clock * clock , u64 sectors )
2017-03-17 09:18:50 +03:00
{
struct io_timer * timer ;
2024-06-30 01:08:20 +03:00
u64 now = atomic64_add_return ( sectors , & clock - > now ) ;
2017-03-17 09:18:50 +03:00
2024-06-30 04:02:17 +03:00
spin_lock ( & clock - > timer_lock ) ;
2019-12-19 23:07:51 +03:00
while ( ( timer = get_expired_timer ( clock , now ) ) )
timer - > fn ( timer ) ;
2024-06-30 04:02:17 +03:00
spin_unlock ( & clock - > timer_lock ) ;
2019-12-19 23:07:51 +03:00
}
2017-03-17 09:18:50 +03:00
2020-07-26 00:06:11 +03:00
void bch2_io_timers_to_text ( struct printbuf * out , struct io_clock * clock )
2019-12-19 23:07:51 +03:00
{
2022-02-25 21:18:19 +03:00
out - > atomic + + ;
2019-12-19 23:07:51 +03:00
spin_lock ( & clock - > timer_lock ) ;
2024-06-30 01:08:20 +03:00
u64 now = atomic64_read ( & clock - > now ) ;
printbuf_tabstop_push ( out , 40 ) ;
prt_printf ( out , " current time: \t %llu \n " , now ) ;
2017-03-17 09:18:50 +03:00
2024-07-22 03:56:22 +03:00
for ( unsigned i = 0 ; i < clock - > timers . nr ; i + + )
2024-06-30 01:08:20 +03:00
prt_printf ( out , " %ps %ps: \t %llu \n " ,
2019-12-19 23:07:51 +03:00
clock - > timers . data [ i ] - > fn ,
2024-06-30 01:08:20 +03:00
clock - > timers . data [ i ] - > fn2 ,
clock - > timers . data [ i ] - > expire ) ;
2019-12-19 23:07:51 +03:00
spin_unlock ( & clock - > timer_lock ) ;
2022-02-25 21:18:19 +03:00
- - out - > atomic ;
2017-03-17 09:18:50 +03:00
}
void bch2_io_clock_exit ( struct io_clock * clock )
{
free_heap ( & clock - > timers ) ;
free_percpu ( clock - > pcpu_buf ) ;
}
int bch2_io_clock_init ( struct io_clock * clock )
{
2021-01-21 23:28:59 +03:00
atomic64_set ( & clock - > now , 0 ) ;
2017-03-17 09:18:50 +03:00
spin_lock_init ( & clock - > timer_lock ) ;
2019-12-19 23:07:51 +03:00
clock - > max_slop = IO_CLOCK_PCPU_SECTORS * num_possible_cpus ( ) ;
2017-03-17 09:18:50 +03:00
clock - > pcpu_buf = alloc_percpu ( * clock - > pcpu_buf ) ;
if ( ! clock - > pcpu_buf )
2023-03-14 22:35:57 +03:00
return - BCH_ERR_ENOMEM_io_clock_init ;
2017-03-17 09:18:50 +03:00
if ( ! init_heap ( & clock - > timers , NR_IO_TIMERS , GFP_KERNEL ) )
2023-03-14 22:35:57 +03:00
return - BCH_ERR_ENOMEM_io_clock_init ;
2017-03-17 09:18:50 +03:00
return 0 ;
}