2017-03-17 09:18:50 +03:00
// SPDX-License-Identifier: GPL-2.0
# ifndef NO_BCACHEFS_CHARDEV
# include "bcachefs.h"
# include "bcachefs_ioctl.h"
# include "buckets.h"
# include "chardev.h"
2024-01-07 04:29:25 +03:00
# include "disk_accounting.h"
2020-11-16 22:23:06 +03:00
# include "journal.h"
2017-03-17 09:18:50 +03:00
# include "move.h"
2024-03-24 03:07:46 +03:00
# include "recovery_passes.h"
2019-12-17 01:53:59 +03:00
# include "replicas.h"
2017-03-17 09:18:50 +03:00
# include "super.h"
# include "super-io.h"
2023-12-31 18:04:54 +03:00
# include "thread_with_file.h"
2017-03-17 09:18:50 +03:00
# include <linux/cdev.h>
# include <linux/device.h>
# include <linux/fs.h>
# include <linux/ioctl.h>
# include <linux/major.h>
# include <linux/sched/task.h>
# include <linux/slab.h>
# include <linux/uaccess.h>
/* returns with ref on ca->ref */
static struct bch_dev * bch2_device_lookup ( struct bch_fs * c , u64 dev ,
unsigned flags )
{
struct bch_dev * ca ;
if ( flags & BCH_BY_INDEX ) {
if ( dev > = c - > sb . nr_devices )
return ERR_PTR ( - EINVAL ) ;
2024-05-04 00:48:23 +03:00
ca = bch2_dev_tryget_noerror ( c , dev ) ;
2017-03-17 09:18:50 +03:00
if ( ! ca )
return ERR_PTR ( - EINVAL ) ;
} else {
char * path ;
path = strndup_user ( ( const char __user * )
( unsigned long ) dev , PATH_MAX ) ;
if ( IS_ERR ( path ) )
return ERR_CAST ( path ) ;
ca = bch2_dev_lookup ( c , path ) ;
kfree ( path ) ;
}
return ca ;
}
#if 0
static long bch2_ioctl_assemble ( struct bch_ioctl_assemble __user * user_arg )
{
struct bch_ioctl_assemble arg ;
struct bch_fs * c ;
u64 * user_devs = NULL ;
char * * devs = NULL ;
unsigned i ;
int ret = - EFAULT ;
if ( copy_from_user ( & arg , user_arg , sizeof ( arg ) ) )
return - EFAULT ;
if ( arg . flags | | arg . pad )
return - EINVAL ;
user_devs = kmalloc_array ( arg . nr_devs , sizeof ( u64 ) , GFP_KERNEL ) ;
if ( ! user_devs )
return - ENOMEM ;
devs = kcalloc ( arg . nr_devs , sizeof ( char * ) , GFP_KERNEL ) ;
if ( copy_from_user ( user_devs , user_arg - > devs ,
sizeof ( u64 ) * arg . nr_devs ) )
goto err ;
for ( i = 0 ; i < arg . nr_devs ; i + + ) {
devs [ i ] = strndup_user ( ( const char __user * ) ( unsigned long )
user_devs [ i ] ,
PATH_MAX ) ;
2023-09-20 08:19:53 +03:00
ret = PTR_ERR_OR_ZERO ( devs [ i ] ) ;
if ( ret )
2017-03-17 09:18:50 +03:00
goto err ;
}
c = bch2_fs_open ( devs , arg . nr_devs , bch2_opts_empty ( ) ) ;
ret = PTR_ERR_OR_ZERO ( c ) ;
if ( ! ret )
closure_put ( & c - > cl ) ;
err :
if ( devs )
for ( i = 0 ; i < arg . nr_devs ; i + + )
kfree ( devs [ i ] ) ;
kfree ( devs ) ;
return ret ;
}
static long bch2_ioctl_incremental ( struct bch_ioctl_incremental __user * user_arg )
{
struct bch_ioctl_incremental arg ;
const char * err ;
char * path ;
if ( copy_from_user ( & arg , user_arg , sizeof ( arg ) ) )
return - EFAULT ;
if ( arg . flags | | arg . pad )
return - EINVAL ;
path = strndup_user ( ( const char __user * ) ( unsigned long ) arg . dev , PATH_MAX ) ;
2023-09-20 08:19:53 +03:00
ret = PTR_ERR_OR_ZERO ( path ) ;
if ( ret )
return ret ;
2017-03-17 09:18:50 +03:00
err = bch2_fs_open_incremental ( path ) ;
kfree ( path ) ;
if ( err ) {
pr_err ( " Could not register bcachefs devices: %s " , err ) ;
return - EINVAL ;
}
return 0 ;
}
# endif
2023-07-12 06:23:40 +03:00
struct fsck_thread {
2023-12-31 18:04:54 +03:00
struct thread_with_stdio thr ;
2023-12-04 21:45:33 +03:00
struct bch_fs * c ;
2023-07-12 06:23:40 +03:00
struct bch_opts opts ;
} ;
2023-12-31 18:04:54 +03:00
static void bch2_fsck_thread_exit ( struct thread_with_stdio * _thr )
2023-07-12 06:23:40 +03:00
{
2023-12-31 18:04:54 +03:00
struct fsck_thread * thr = container_of ( _thr , struct fsck_thread , thr ) ;
2023-07-12 06:23:40 +03:00
kfree ( thr ) ;
}
2024-02-18 04:49:11 +03:00
static int bch2_fsck_offline_thread_fn ( struct thread_with_stdio * stdio )
2023-07-12 06:23:40 +03:00
{
2024-02-05 06:49:34 +03:00
struct fsck_thread * thr = container_of ( stdio , struct fsck_thread , thr ) ;
2024-04-06 05:23:29 +03:00
struct bch_fs * c = thr - > c ;
2023-07-12 06:23:40 +03:00
2024-04-06 05:23:29 +03:00
int ret = PTR_ERR_OR_ZERO ( c ) ;
if ( ret )
return ret ;
2024-02-18 04:49:11 +03:00
2024-04-06 05:23:29 +03:00
ret = bch2_fs_start ( thr - > c ) ;
if ( ret )
goto err ;
2024-02-18 04:49:11 +03:00
2024-04-06 05:23:29 +03:00
if ( test_bit ( BCH_FS_errors_fixed , & c - > flags ) ) {
2024-02-18 04:49:11 +03:00
bch2_stdio_redirect_printf ( & stdio - > stdio , false , " %s: errors fixed \n " , c - > name ) ;
2024-04-06 05:23:29 +03:00
ret | = 1 ;
}
if ( test_bit ( BCH_FS_error , & c - > flags ) ) {
2024-02-18 04:49:11 +03:00
bch2_stdio_redirect_printf ( & stdio - > stdio , false , " %s: still has errors \n " , c - > name ) ;
2024-04-06 05:23:29 +03:00
ret | = 4 ;
}
err :
bch2_fs_stop ( c ) ;
2024-02-18 04:49:11 +03:00
return ret ;
2023-07-12 06:23:40 +03:00
}
2024-02-10 22:23:01 +03:00
static const struct thread_with_stdio_ops bch2_offline_fsck_ops = {
. exit = bch2_fsck_thread_exit ,
. fn = bch2_fsck_offline_thread_fn ,
} ;
2023-07-12 06:23:40 +03:00
static long bch2_ioctl_fsck_offline ( struct bch_ioctl_fsck_offline __user * user_arg )
{
struct bch_ioctl_fsck_offline arg ;
struct fsck_thread * thr = NULL ;
2024-04-06 05:23:29 +03:00
darray_str ( devs ) = { } ;
2023-07-12 06:23:40 +03:00
long ret = 0 ;
if ( copy_from_user ( & arg , user_arg , sizeof ( arg ) ) )
return - EFAULT ;
if ( arg . flags )
return - EINVAL ;
if ( ! capable ( CAP_SYS_ADMIN ) )
return - EPERM ;
2024-04-06 05:23:29 +03:00
for ( size_t i = 0 ; i < arg . nr_devs ; i + + ) {
u64 dev_u64 ;
ret = copy_from_user_errcode ( & dev_u64 , & user_arg - > devs [ i ] , sizeof ( u64 ) ) ;
if ( ret )
goto err ;
2023-07-12 06:23:40 +03:00
2024-04-06 05:23:29 +03:00
char * dev_str = strndup_user ( ( char __user * ) ( unsigned long ) dev_u64 , PATH_MAX ) ;
ret = PTR_ERR_OR_ZERO ( dev_str ) ;
if ( ret )
goto err ;
2023-07-12 06:23:40 +03:00
2024-04-06 05:23:29 +03:00
ret = darray_push ( & devs , dev_str ) ;
if ( ret ) {
kfree ( dev_str ) ;
goto err ;
}
2023-07-12 06:23:40 +03:00
}
2024-04-06 05:23:29 +03:00
thr = kzalloc ( sizeof ( * thr ) , GFP_KERNEL ) ;
if ( ! thr ) {
ret = - ENOMEM ;
goto err ;
2023-07-12 06:23:40 +03:00
}
2024-04-06 05:23:29 +03:00
thr - > opts = bch2_opts_empty ( ) ;
2023-07-12 06:23:40 +03:00
if ( arg . opts ) {
char * optstr = strndup_user ( ( char __user * ) ( unsigned long ) arg . opts , 1 < < 16 ) ;
ret = PTR_ERR_OR_ZERO ( optstr ) ? :
2024-05-28 07:36:09 +03:00
bch2_parse_mount_opts ( NULL , & thr - > opts , NULL , optstr ) ;
2024-06-23 00:22:24 +03:00
if ( ! IS_ERR ( optstr ) )
kfree ( optstr ) ;
2023-07-12 06:23:40 +03:00
if ( ret )
goto err ;
}
2023-12-31 18:04:54 +03:00
opt_set ( thr - > opts , stdio , ( u64 ) ( unsigned long ) & thr - > thr . stdio ) ;
2024-05-26 22:08:19 +03:00
opt_set ( thr - > opts , read_only , 1 ) ;
2023-07-12 06:23:40 +03:00
2024-04-06 05:23:29 +03:00
/* We need request_key() to be called before we punt to kthread: */
opt_set ( thr - > opts , nostart , true ) ;
2024-04-17 00:55:02 +03:00
bch2_thread_with_stdio_init ( & thr - > thr , & bch2_offline_fsck_ops ) ;
2024-04-06 05:23:29 +03:00
thr - > c = bch2_fs_open ( devs . data , arg . nr_devs , thr - > opts ) ;
2024-04-06 05:30:30 +03:00
if ( ! IS_ERR ( thr - > c ) & &
thr - > c - > opts . errors = = BCH_ON_ERROR_panic )
thr - > c - > opts . errors = BCH_ON_ERROR_ro ;
2024-04-17 00:55:02 +03:00
ret = __bch2_run_thread_with_stdio ( & thr - > thr ) ;
2024-04-06 05:23:29 +03:00
out :
darray_for_each ( devs , i )
kfree ( * i ) ;
darray_exit ( & devs ) ;
2023-07-12 06:23:40 +03:00
return ret ;
2024-04-06 05:23:29 +03:00
err :
if ( thr )
bch2_fsck_thread_exit ( & thr - > thr ) ;
pr_err ( " ret %s " , bch2_err_str ( ret ) ) ;
goto out ;
2023-07-12 06:23:40 +03:00
}
2017-03-17 09:18:50 +03:00
static long bch2_global_ioctl ( unsigned cmd , void __user * arg )
{
2023-07-12 06:23:40 +03:00
long ret ;
2017-03-17 09:18:50 +03:00
switch ( cmd ) {
#if 0
case BCH_IOCTL_ASSEMBLE :
return bch2_ioctl_assemble ( arg ) ;
case BCH_IOCTL_INCREMENTAL :
return bch2_ioctl_incremental ( arg ) ;
# endif
2023-07-12 06:23:40 +03:00
case BCH_IOCTL_FSCK_OFFLINE : {
ret = bch2_ioctl_fsck_offline ( arg ) ;
break ;
}
2017-03-17 09:18:50 +03:00
default :
2023-07-12 06:23:40 +03:00
ret = - ENOTTY ;
break ;
2017-03-17 09:18:50 +03:00
}
2023-07-12 06:23:40 +03:00
if ( ret < 0 )
ret = bch2_err_class ( ret ) ;
return ret ;
2017-03-17 09:18:50 +03:00
}
static long bch2_ioctl_query_uuid ( struct bch_fs * c ,
struct bch_ioctl_query_uuid __user * user_arg )
{
2023-11-24 03:26:27 +03:00
return copy_to_user_errcode ( & user_arg - > uuid , & c - > sb . user_uuid ,
sizeof ( c - > sb . user_uuid ) ) ;
2017-03-17 09:18:50 +03:00
}
#if 0
static long bch2_ioctl_start ( struct bch_fs * c , struct bch_ioctl_start arg )
{
2021-07-04 22:35:32 +03:00
if ( ! capable ( CAP_SYS_ADMIN ) )
return - EPERM ;
2017-03-17 09:18:50 +03:00
if ( arg . flags | | arg . pad )
return - EINVAL ;
2019-04-18 01:21:19 +03:00
return bch2_fs_start ( c ) ;
2017-03-17 09:18:50 +03:00
}
static long bch2_ioctl_stop ( struct bch_fs * c )
{
2021-07-04 22:35:32 +03:00
if ( ! capable ( CAP_SYS_ADMIN ) )
return - EPERM ;
2017-03-17 09:18:50 +03:00
bch2_fs_stop ( c ) ;
return 0 ;
}
# endif
static long bch2_ioctl_disk_add ( struct bch_fs * c , struct bch_ioctl_disk arg )
{
char * path ;
int ret ;
2021-07-04 22:35:32 +03:00
if ( ! capable ( CAP_SYS_ADMIN ) )
return - EPERM ;
2017-03-17 09:18:50 +03:00
if ( arg . flags | | arg . pad )
return - EINVAL ;
path = strndup_user ( ( const char __user * ) ( unsigned long ) arg . dev , PATH_MAX ) ;
2023-09-20 08:19:53 +03:00
ret = PTR_ERR_OR_ZERO ( path ) ;
if ( ret )
return ret ;
2017-03-17 09:18:50 +03:00
ret = bch2_dev_add ( c , path ) ;
2024-06-23 00:22:24 +03:00
if ( ! IS_ERR ( path ) )
kfree ( path ) ;
2017-03-17 09:18:50 +03:00
return ret ;
}
static long bch2_ioctl_disk_remove ( struct bch_fs * c , struct bch_ioctl_disk arg )
{
struct bch_dev * ca ;
2021-07-04 22:35:32 +03:00
if ( ! capable ( CAP_SYS_ADMIN ) )
return - EPERM ;
2017-03-17 09:18:50 +03:00
if ( ( arg . flags & ~ ( BCH_FORCE_IF_DATA_LOST |
BCH_FORCE_IF_METADATA_LOST |
BCH_FORCE_IF_DEGRADED |
BCH_BY_INDEX ) ) | |
arg . pad )
return - EINVAL ;
ca = bch2_device_lookup ( c , arg . dev , arg . flags ) ;
if ( IS_ERR ( ca ) )
return PTR_ERR ( ca ) ;
return bch2_dev_remove ( c , ca , arg . flags ) ;
}
static long bch2_ioctl_disk_online ( struct bch_fs * c , struct bch_ioctl_disk arg )
{
char * path ;
int ret ;
2021-07-04 22:35:32 +03:00
if ( ! capable ( CAP_SYS_ADMIN ) )
return - EPERM ;
2017-03-17 09:18:50 +03:00
if ( arg . flags | | arg . pad )
return - EINVAL ;
path = strndup_user ( ( const char __user * ) ( unsigned long ) arg . dev , PATH_MAX ) ;
2023-09-20 08:19:53 +03:00
ret = PTR_ERR_OR_ZERO ( path ) ;
if ( ret )
return ret ;
2017-03-17 09:18:50 +03:00
ret = bch2_dev_online ( c , path ) ;
kfree ( path ) ;
return ret ;
}
static long bch2_ioctl_disk_offline ( struct bch_fs * c , struct bch_ioctl_disk arg )
{
struct bch_dev * ca ;
int ret ;
2021-07-04 22:35:32 +03:00
if ( ! capable ( CAP_SYS_ADMIN ) )
return - EPERM ;
2017-03-17 09:18:50 +03:00
if ( ( arg . flags & ~ ( BCH_FORCE_IF_DATA_LOST |
BCH_FORCE_IF_METADATA_LOST |
BCH_FORCE_IF_DEGRADED |
BCH_BY_INDEX ) ) | |
arg . pad )
return - EINVAL ;
ca = bch2_device_lookup ( c , arg . dev , arg . flags ) ;
if ( IS_ERR ( ca ) )
return PTR_ERR ( ca ) ;
ret = bch2_dev_offline ( c , ca , arg . flags ) ;
2024-05-04 00:39:16 +03:00
bch2_dev_put ( ca ) ;
2017-03-17 09:18:50 +03:00
return ret ;
}
static long bch2_ioctl_disk_set_state ( struct bch_fs * c ,
struct bch_ioctl_disk_set_state arg )
{
struct bch_dev * ca ;
int ret ;
2021-07-04 22:35:32 +03:00
if ( ! capable ( CAP_SYS_ADMIN ) )
return - EPERM ;
2017-03-17 09:18:50 +03:00
if ( ( arg . flags & ~ ( BCH_FORCE_IF_DATA_LOST |
BCH_FORCE_IF_METADATA_LOST |
BCH_FORCE_IF_DEGRADED |
BCH_BY_INDEX ) ) | |
2021-07-09 01:15:38 +03:00
arg . pad [ 0 ] | | arg . pad [ 1 ] | | arg . pad [ 2 ] | |
arg . new_state > = BCH_MEMBER_STATE_NR )
2017-03-17 09:18:50 +03:00
return - EINVAL ;
ca = bch2_device_lookup ( c , arg . dev , arg . flags ) ;
if ( IS_ERR ( ca ) )
return PTR_ERR ( ca ) ;
ret = bch2_dev_set_state ( c , ca , arg . new_state , arg . flags ) ;
2022-11-26 02:29:36 +03:00
if ( ret )
bch_err ( c , " Error setting device state: %s " , bch2_err_str ( ret ) ) ;
2017-03-17 09:18:50 +03:00
2024-05-04 00:39:16 +03:00
bch2_dev_put ( ca ) ;
2017-03-17 09:18:50 +03:00
return ret ;
}
struct bch_data_ctx {
2023-07-12 07:20:22 +03:00
struct thread_with_file thr ;
2017-03-17 09:18:50 +03:00
struct bch_fs * c ;
struct bch_ioctl_data arg ;
struct bch_move_stats stats ;
} ;
static int bch2_data_thread ( void * arg )
{
2023-07-12 07:20:22 +03:00
struct bch_data_ctx * ctx = container_of ( arg , struct bch_data_ctx , thr ) ;
2017-03-17 09:18:50 +03:00
2023-07-12 07:20:22 +03:00
ctx - > thr . ret = bch2_data_job ( ctx - > c , & ctx - > stats , ctx - > arg ) ;
2017-03-17 09:18:50 +03:00
ctx - > stats . data_type = U8_MAX ;
return 0 ;
}
static int bch2_data_job_release ( struct inode * inode , struct file * file )
{
2023-07-12 07:20:22 +03:00
struct bch_data_ctx * ctx = container_of ( file - > private_data , struct bch_data_ctx , thr ) ;
2017-03-17 09:18:50 +03:00
2023-12-31 18:04:54 +03:00
bch2_thread_with_file_exit ( & ctx - > thr ) ;
2017-03-17 09:18:50 +03:00
kfree ( ctx ) ;
return 0 ;
}
static ssize_t bch2_data_job_read ( struct file * file , char __user * buf ,
size_t len , loff_t * ppos )
{
2023-07-12 07:20:22 +03:00
struct bch_data_ctx * ctx = container_of ( file - > private_data , struct bch_data_ctx , thr ) ;
2017-03-17 09:18:50 +03:00
struct bch_fs * c = ctx - > c ;
struct bch_ioctl_data_event e = {
. type = BCH_DATA_EVENT_PROGRESS ,
. p . data_type = ctx - > stats . data_type ,
2023-10-23 22:36:45 +03:00
. p . btree_id = ctx - > stats . pos . btree ,
. p . pos = ctx - > stats . pos . pos ,
2017-03-17 09:18:50 +03:00
. p . sectors_done = atomic64_read ( & ctx - > stats . sectors_seen ) ,
2018-11-27 16:23:22 +03:00
. p . sectors_total = bch2_fs_usage_read_short ( c ) . used ,
2017-03-17 09:18:50 +03:00
} ;
if ( len < sizeof ( e ) )
return - EINVAL ;
2023-11-24 03:26:27 +03:00
return copy_to_user_errcode ( buf , & e , sizeof ( e ) ) ? : sizeof ( e ) ;
2017-03-17 09:18:50 +03:00
}
static const struct file_operations bcachefs_data_ops = {
. release = bch2_data_job_release ,
. read = bch2_data_job_read ,
. llseek = no_llseek ,
} ;
static long bch2_ioctl_data ( struct bch_fs * c ,
struct bch_ioctl_data arg )
{
2023-07-12 07:20:22 +03:00
struct bch_data_ctx * ctx ;
int ret ;
2017-03-17 09:18:50 +03:00
2021-07-04 22:35:32 +03:00
if ( ! capable ( CAP_SYS_ADMIN ) )
return - EPERM ;
2017-03-17 09:18:50 +03:00
if ( arg . op > = BCH_DATA_OP_NR | | arg . flags )
return - EINVAL ;
ctx = kzalloc ( sizeof ( * ctx ) , GFP_KERNEL ) ;
if ( ! ctx )
return - ENOMEM ;
ctx - > c = c ;
ctx - > arg = arg ;
2023-12-31 18:04:54 +03:00
ret = bch2_run_thread_with_file ( & ctx - > thr ,
& bcachefs_data_ops ,
bch2_data_thread ) ;
2017-03-17 09:18:50 +03:00
if ( ret < 0 )
2023-07-12 07:20:22 +03:00
kfree ( ctx ) ;
2017-03-17 09:18:50 +03:00
return ret ;
}
2019-12-17 01:53:59 +03:00
static long bch2_ioctl_fs_usage ( struct bch_fs * c ,
struct bch_ioctl_fs_usage __user * user_arg )
2017-03-17 09:18:50 +03:00
{
2024-06-06 16:58:26 +03:00
struct bch_ioctl_fs_usage arg = { } ;
2024-01-07 04:29:25 +03:00
darray_char replicas = { } ;
u32 replica_entries_bytes ;
2019-12-17 01:53:59 +03:00
int ret = 0 ;
2017-03-17 09:18:50 +03:00
2023-11-27 01:05:02 +03:00
if ( ! test_bit ( BCH_FS_started , & c - > flags ) )
2017-03-17 09:18:50 +03:00
return - EINVAL ;
2019-12-17 01:53:59 +03:00
if ( get_user ( replica_entries_bytes , & user_arg - > replica_entries_bytes ) )
2017-03-17 09:18:50 +03:00
return - EFAULT ;
2024-01-07 04:29:25 +03:00
ret = bch2_fs_replicas_usage_read ( c , & replicas ) ? :
( replica_entries_bytes < replicas . nr ? - ERANGE : 0 ) ? :
copy_to_user_errcode ( & user_arg - > replicas , replicas . data , replicas . nr ) ;
if ( ret )
goto err ;
2024-01-02 07:36:23 +03:00
struct bch_fs_usage_short u = bch2_fs_usage_read_short ( c ) ;
2024-01-07 04:29:25 +03:00
arg . capacity = c - > capacity ;
2024-01-02 07:36:23 +03:00
arg . used = u . used ;
arg . online_reserved = percpu_u64_get ( c - > online_reserved ) ;
2024-01-07 04:29:25 +03:00
arg . replica_entries_bytes = replicas . nr ;
2017-03-17 09:18:50 +03:00
2024-01-02 07:36:23 +03:00
for ( unsigned i = 0 ; i < BCH_REPLICAS_MAX ; i + + ) {
struct disk_accounting_pos k = {
. type = BCH_DISK_ACCOUNTING_persistent_reserved ,
. persistent_reserved . nr_replicas = i ,
} ;
2017-03-17 09:18:50 +03:00
2024-01-02 07:36:23 +03:00
bch2_accounting_mem_read ( c ,
disk_accounting_pos_to_bpos ( & k ) ,
& arg . persistent_reserved [ i ] , 1 ) ;
}
2023-11-24 03:26:27 +03:00
2024-01-07 04:29:25 +03:00
ret = copy_to_user_errcode ( user_arg , & arg , sizeof ( arg ) ) ;
2019-12-17 01:53:59 +03:00
err :
2024-01-07 04:29:25 +03:00
darray_exit ( & replicas ) ;
2019-12-17 01:53:59 +03:00
return ret ;
}
2024-03-02 02:43:39 +03:00
static long bch2_ioctl_query_accounting ( struct bch_fs * c ,
struct bch_ioctl_query_accounting __user * user_arg )
{
struct bch_ioctl_query_accounting arg ;
darray_char accounting = { } ;
int ret = 0 ;
if ( ! test_bit ( BCH_FS_started , & c - > flags ) )
return - EINVAL ;
ret = copy_from_user_errcode ( & arg , user_arg , sizeof ( arg ) ) ? :
bch2_fs_accounting_read ( c , & accounting , arg . accounting_types_mask ) ? :
( arg . accounting_u64s * sizeof ( u64 ) < accounting . nr ? - ERANGE : 0 ) ? :
copy_to_user_errcode ( & user_arg - > accounting , accounting . data , accounting . nr ) ;
if ( ret )
goto err ;
arg . capacity = c - > capacity ;
arg . used = bch2_fs_usage_read_short ( c ) . used ;
arg . online_reserved = percpu_u64_get ( c - > online_reserved ) ;
arg . accounting_u64s = accounting . nr / sizeof ( u64 ) ;
ret = copy_to_user_errcode ( user_arg , & arg , sizeof ( arg ) ) ;
err :
darray_exit ( & accounting ) ;
return ret ;
}
2023-11-24 03:26:27 +03:00
/* obsolete, didn't allow for new data types: */
2019-12-17 01:53:59 +03:00
static long bch2_ioctl_dev_usage ( struct bch_fs * c ,
struct bch_ioctl_dev_usage __user * user_arg )
{
struct bch_ioctl_dev_usage arg ;
struct bch_dev_usage src ;
struct bch_dev * ca ;
unsigned i ;
2023-11-27 01:05:02 +03:00
if ( ! test_bit ( BCH_FS_started , & c - > flags ) )
2019-12-17 01:53:59 +03:00
return - EINVAL ;
2017-03-17 09:18:50 +03:00
2019-12-17 01:53:59 +03:00
if ( copy_from_user ( & arg , user_arg , sizeof ( arg ) ) )
return - EFAULT ;
if ( ( arg . flags & ~ BCH_BY_INDEX ) | |
arg . pad [ 0 ] | |
arg . pad [ 1 ] | |
arg . pad [ 2 ] )
return - EINVAL ;
ca = bch2_device_lookup ( c , arg . dev , arg . flags ) ;
if ( IS_ERR ( ca ) )
return PTR_ERR ( ca ) ;
2020-07-22 20:27:00 +03:00
src = bch2_dev_usage_read ( ca ) ;
2019-12-17 01:53:59 +03:00
2020-06-19 04:06:42 +03:00
arg . state = ca - > mi . state ;
arg . bucket_size = ca - > mi . bucket_size ;
arg . nr_buckets = ca - > mi . nbuckets - ca - > mi . first_bucket ;
2019-12-17 01:53:59 +03:00
2023-11-24 00:34:03 +03:00
for ( i = 0 ; i < ARRAY_SIZE ( arg . d ) ; i + + ) {
2022-04-01 08:29:59 +03:00
arg . d [ i ] . buckets = src . d [ i ] . buckets ;
arg . d [ i ] . sectors = src . d [ i ] . sectors ;
arg . d [ i ] . fragmented = src . d [ i ] . fragmented ;
2017-03-17 09:18:50 +03:00
}
2024-05-04 00:39:16 +03:00
bch2_dev_put ( ca ) ;
2019-12-17 01:53:59 +03:00
2023-11-24 03:26:27 +03:00
return copy_to_user_errcode ( user_arg , & arg , sizeof ( arg ) ) ;
}
static long bch2_ioctl_dev_usage_v2 ( struct bch_fs * c ,
struct bch_ioctl_dev_usage_v2 __user * user_arg )
{
struct bch_ioctl_dev_usage_v2 arg ;
struct bch_dev_usage src ;
struct bch_dev * ca ;
int ret = 0 ;
2023-11-27 01:05:02 +03:00
if ( ! test_bit ( BCH_FS_started , & c - > flags ) )
2023-11-24 03:26:27 +03:00
return - EINVAL ;
if ( copy_from_user ( & arg , user_arg , sizeof ( arg ) ) )
2023-09-14 17:58:07 +03:00
return - EFAULT ;
2023-11-24 03:26:27 +03:00
if ( ( arg . flags & ~ BCH_BY_INDEX ) | |
arg . pad [ 0 ] | |
arg . pad [ 1 ] | |
arg . pad [ 2 ] )
return - EINVAL ;
ca = bch2_device_lookup ( c , arg . dev , arg . flags ) ;
if ( IS_ERR ( ca ) )
return PTR_ERR ( ca ) ;
src = bch2_dev_usage_read ( ca ) ;
arg . state = ca - > mi . state ;
arg . bucket_size = ca - > mi . bucket_size ;
arg . nr_data_types = min ( arg . nr_data_types , BCH_DATA_NR ) ;
arg . nr_buckets = ca - > mi . nbuckets - ca - > mi . first_bucket ;
ret = copy_to_user_errcode ( user_arg , & arg , sizeof ( arg ) ) ;
if ( ret )
goto err ;
for ( unsigned i = 0 ; i < arg . nr_data_types ; i + + ) {
struct bch_ioctl_dev_usage_type t = {
. buckets = src . d [ i ] . buckets ,
. sectors = src . d [ i ] . sectors ,
. fragmented = src . d [ i ] . fragmented ,
} ;
ret = copy_to_user_errcode ( & user_arg - > d [ i ] , & t , sizeof ( t ) ) ;
if ( ret )
goto err ;
}
err :
2024-05-04 00:39:16 +03:00
bch2_dev_put ( ca ) ;
2023-11-24 03:26:27 +03:00
return ret ;
2017-03-17 09:18:50 +03:00
}
static long bch2_ioctl_read_super ( struct bch_fs * c ,
struct bch_ioctl_read_super arg )
{
struct bch_dev * ca = NULL ;
struct bch_sb * sb ;
int ret = 0 ;
2021-07-04 22:35:32 +03:00
if ( ! capable ( CAP_SYS_ADMIN ) )
return - EPERM ;
2017-03-17 09:18:50 +03:00
if ( ( arg . flags & ~ ( BCH_BY_INDEX | BCH_READ_DEV ) ) | |
arg . pad )
return - EINVAL ;
mutex_lock ( & c - > sb_lock ) ;
if ( arg . flags & BCH_READ_DEV ) {
ca = bch2_device_lookup ( c , arg . dev , arg . flags ) ;
2024-05-04 00:39:16 +03:00
ret = PTR_ERR_OR_ZERO ( ca ) ;
if ( ret )
goto err_unlock ;
2017-03-17 09:18:50 +03:00
sb = ca - > disk_sb . sb ;
} else {
sb = c - > disk_sb . sb ;
}
if ( vstruct_bytes ( sb ) > arg . size ) {
ret = - ERANGE ;
goto err ;
}
2023-11-24 03:26:27 +03:00
ret = copy_to_user_errcode ( ( void __user * ) ( unsigned long ) arg . sb , sb ,
vstruct_bytes ( sb ) ) ;
2017-03-17 09:18:50 +03:00
err :
2024-05-04 00:39:16 +03:00
bch2_dev_put ( ca ) ;
err_unlock :
2017-03-17 09:18:50 +03:00
mutex_unlock ( & c - > sb_lock ) ;
return ret ;
}
static long bch2_ioctl_disk_get_idx ( struct bch_fs * c ,
struct bch_ioctl_disk_get_idx arg )
{
dev_t dev = huge_decode_dev ( arg . dev ) ;
2021-07-04 22:35:32 +03:00
if ( ! capable ( CAP_SYS_ADMIN ) )
return - EPERM ;
2022-01-03 05:45:35 +03:00
if ( ! dev )
return - EINVAL ;
2023-12-17 07:47:29 +03:00
for_each_online_member ( c , ca )
2022-01-03 05:45:35 +03:00
if ( ca - > dev = = dev ) {
2017-03-17 09:18:50 +03:00
percpu_ref_put ( & ca - > io_ref ) ;
2023-12-17 07:47:29 +03:00
return ca - > dev_idx ;
2017-03-17 09:18:50 +03:00
}
2023-05-28 02:59:59 +03:00
return - BCH_ERR_ENOENT_dev_idx_not_found ;
2017-03-17 09:18:50 +03:00
}
static long bch2_ioctl_disk_resize ( struct bch_fs * c ,
struct bch_ioctl_disk_resize arg )
{
struct bch_dev * ca ;
int ret ;
2021-07-04 22:35:32 +03:00
if ( ! capable ( CAP_SYS_ADMIN ) )
return - EPERM ;
2017-03-17 09:18:50 +03:00
if ( ( arg . flags & ~ BCH_BY_INDEX ) | |
arg . pad )
return - EINVAL ;
ca = bch2_device_lookup ( c , arg . dev , arg . flags ) ;
if ( IS_ERR ( ca ) )
return PTR_ERR ( ca ) ;
ret = bch2_dev_resize ( c , ca , arg . nbuckets ) ;
2024-05-04 00:39:16 +03:00
bch2_dev_put ( ca ) ;
2017-03-17 09:18:50 +03:00
return ret ;
}
2020-11-16 22:23:06 +03:00
static long bch2_ioctl_disk_resize_journal ( struct bch_fs * c ,
struct bch_ioctl_disk_resize_journal arg )
{
struct bch_dev * ca ;
int ret ;
2021-07-04 22:35:32 +03:00
if ( ! capable ( CAP_SYS_ADMIN ) )
return - EPERM ;
2020-11-16 22:23:06 +03:00
if ( ( arg . flags & ~ BCH_BY_INDEX ) | |
arg . pad )
return - EINVAL ;
2023-09-20 05:26:18 +03:00
if ( arg . nbuckets > U32_MAX )
return - EINVAL ;
2020-11-16 22:23:06 +03:00
ca = bch2_device_lookup ( c , arg . dev , arg . flags ) ;
if ( IS_ERR ( ca ) )
return PTR_ERR ( ca ) ;
ret = bch2_set_nr_journal_buckets ( c , ca , arg . nbuckets ) ;
2024-05-04 00:39:16 +03:00
bch2_dev_put ( ca ) ;
2020-11-16 22:23:06 +03:00
return ret ;
}
2024-02-18 04:49:11 +03:00
static int bch2_fsck_online_thread_fn ( struct thread_with_stdio * stdio )
2023-12-04 21:45:33 +03:00
{
2024-02-05 06:49:34 +03:00
struct fsck_thread * thr = container_of ( stdio , struct fsck_thread , thr ) ;
2023-12-04 21:45:33 +03:00
struct bch_fs * c = thr - > c ;
2023-12-31 18:04:54 +03:00
c - > stdio_filter = current ;
c - > stdio = & thr - > thr . stdio ;
2023-12-04 21:45:33 +03:00
/*
* XXX : can we figure out a way to do this without mucking with c - > opts ?
*/
2024-01-01 03:41:45 +03:00
unsigned old_fix_errors = c - > opts . fix_errors ;
2023-12-04 21:45:33 +03:00
if ( opt_defined ( thr - > opts , fix_errors ) )
c - > opts . fix_errors = thr - > opts . fix_errors ;
2024-01-01 03:41:45 +03:00
else
c - > opts . fix_errors = FSCK_FIX_ask ;
2023-12-04 21:45:33 +03:00
c - > opts . fsck = true ;
2024-01-01 03:41:45 +03:00
set_bit ( BCH_FS_fsck_running , & c - > flags ) ;
2023-12-04 21:45:33 +03:00
c - > curr_recovery_pass = BCH_RECOVERY_PASS_check_alloc_info ;
2024-01-01 03:41:45 +03:00
int ret = bch2_run_online_recovery_passes ( c ) ;
clear_bit ( BCH_FS_fsck_running , & c - > flags ) ;
bch_err_fn ( c , ret ) ;
2023-12-04 21:45:33 +03:00
2023-12-31 18:04:54 +03:00
c - > stdio = NULL ;
c - > stdio_filter = NULL ;
2024-01-01 03:41:45 +03:00
c - > opts . fix_errors = old_fix_errors ;
2023-12-04 21:45:33 +03:00
up ( & c - > online_fsck_mutex ) ;
bch2_ro_ref_put ( c ) ;
2024-02-18 04:49:11 +03:00
return ret ;
2023-12-04 21:45:33 +03:00
}
2024-02-10 22:23:01 +03:00
static const struct thread_with_stdio_ops bch2_online_fsck_ops = {
. exit = bch2_fsck_thread_exit ,
. fn = bch2_fsck_online_thread_fn ,
} ;
2023-12-04 21:45:33 +03:00
static long bch2_ioctl_fsck_online ( struct bch_fs * c ,
struct bch_ioctl_fsck_online arg )
{
struct fsck_thread * thr = NULL ;
long ret = 0 ;
if ( arg . flags )
return - EINVAL ;
if ( ! capable ( CAP_SYS_ADMIN ) )
return - EPERM ;
if ( ! bch2_ro_ref_tryget ( c ) )
return - EROFS ;
if ( down_trylock ( & c - > online_fsck_mutex ) ) {
bch2_ro_ref_put ( c ) ;
return - EAGAIN ;
}
thr = kzalloc ( sizeof ( * thr ) , GFP_KERNEL ) ;
if ( ! thr ) {
ret = - ENOMEM ;
goto err ;
}
thr - > c = c ;
thr - > opts = bch2_opts_empty ( ) ;
if ( arg . opts ) {
char * optstr = strndup_user ( ( char __user * ) ( unsigned long ) arg . opts , 1 < < 16 ) ;
ret = PTR_ERR_OR_ZERO ( optstr ) ? :
2024-05-28 07:36:09 +03:00
bch2_parse_mount_opts ( c , & thr - > opts , NULL , optstr ) ;
2024-06-23 00:22:24 +03:00
if ( ! IS_ERR ( optstr ) )
kfree ( optstr ) ;
2023-12-04 21:45:33 +03:00
if ( ret )
goto err ;
}
2024-02-10 22:23:01 +03:00
ret = bch2_run_thread_with_stdio ( & thr - > thr , & bch2_online_fsck_ops ) ;
2023-12-04 21:45:33 +03:00
err :
if ( ret < 0 ) {
bch_err_fn ( c , ret ) ;
if ( thr )
2023-12-31 18:04:54 +03:00
bch2_fsck_thread_exit ( & thr - > thr ) ;
2023-12-04 21:45:33 +03:00
up ( & c - > online_fsck_mutex ) ;
bch2_ro_ref_put ( c ) ;
}
return ret ;
}
2017-03-17 09:18:50 +03:00
# define BCH_IOCTL(_name, _argtype) \
do { \
_argtype i ; \
\
if ( copy_from_user ( & i , arg , sizeof ( i ) ) ) \
return - EFAULT ; \
2022-11-26 02:29:36 +03:00
ret = bch2_ioctl_ # # _name ( c , i ) ; \
goto out ; \
2017-03-17 09:18:50 +03:00
} while ( 0 )
long bch2_fs_ioctl ( struct bch_fs * c , unsigned cmd , void __user * arg )
{
2022-11-26 02:29:36 +03:00
long ret ;
2017-03-17 09:18:50 +03:00
switch ( cmd ) {
case BCH_IOCTL_QUERY_UUID :
return bch2_ioctl_query_uuid ( c , arg ) ;
2019-12-17 01:53:59 +03:00
case BCH_IOCTL_FS_USAGE :
return bch2_ioctl_fs_usage ( c , arg ) ;
case BCH_IOCTL_DEV_USAGE :
return bch2_ioctl_dev_usage ( c , arg ) ;
2023-11-24 03:26:27 +03:00
case BCH_IOCTL_DEV_USAGE_V2 :
return bch2_ioctl_dev_usage_v2 ( c , arg ) ;
2017-03-17 09:18:50 +03:00
#if 0
case BCH_IOCTL_START :
BCH_IOCTL ( start , struct bch_ioctl_start ) ;
case BCH_IOCTL_STOP :
return bch2_ioctl_stop ( c ) ;
# endif
case BCH_IOCTL_READ_SUPER :
BCH_IOCTL ( read_super , struct bch_ioctl_read_super ) ;
case BCH_IOCTL_DISK_GET_IDX :
BCH_IOCTL ( disk_get_idx , struct bch_ioctl_disk_get_idx ) ;
}
2023-11-27 01:05:02 +03:00
if ( ! test_bit ( BCH_FS_started , & c - > flags ) )
2017-03-17 09:18:50 +03:00
return - EINVAL ;
switch ( cmd ) {
case BCH_IOCTL_DISK_ADD :
BCH_IOCTL ( disk_add , struct bch_ioctl_disk ) ;
case BCH_IOCTL_DISK_REMOVE :
BCH_IOCTL ( disk_remove , struct bch_ioctl_disk ) ;
case BCH_IOCTL_DISK_ONLINE :
BCH_IOCTL ( disk_online , struct bch_ioctl_disk ) ;
case BCH_IOCTL_DISK_OFFLINE :
BCH_IOCTL ( disk_offline , struct bch_ioctl_disk ) ;
case BCH_IOCTL_DISK_SET_STATE :
BCH_IOCTL ( disk_set_state , struct bch_ioctl_disk_set_state ) ;
case BCH_IOCTL_DATA :
BCH_IOCTL ( data , struct bch_ioctl_data ) ;
case BCH_IOCTL_DISK_RESIZE :
BCH_IOCTL ( disk_resize , struct bch_ioctl_disk_resize ) ;
2020-11-16 22:23:06 +03:00
case BCH_IOCTL_DISK_RESIZE_JOURNAL :
BCH_IOCTL ( disk_resize_journal , struct bch_ioctl_disk_resize_journal ) ;
2023-12-04 21:45:33 +03:00
case BCH_IOCTL_FSCK_ONLINE :
BCH_IOCTL ( fsck_online , struct bch_ioctl_fsck_online ) ;
2024-03-02 02:43:39 +03:00
case BCH_IOCTL_QUERY_ACCOUNTING :
return bch2_ioctl_query_accounting ( c , arg ) ;
2017-03-17 09:18:50 +03:00
default :
return - ENOTTY ;
}
2022-11-26 02:29:36 +03:00
out :
if ( ret < 0 )
ret = bch2_err_class ( ret ) ;
return ret ;
2017-03-17 09:18:50 +03:00
}
static DEFINE_IDR ( bch_chardev_minor ) ;
static long bch2_chardev_ioctl ( struct file * filp , unsigned cmd , unsigned long v )
{
unsigned minor = iminor ( file_inode ( filp ) ) ;
struct bch_fs * c = minor < U8_MAX ? idr_find ( & bch_chardev_minor , minor ) : NULL ;
void __user * arg = ( void __user * ) v ;
return c
? bch2_fs_ioctl ( c , cmd , arg )
: bch2_global_ioctl ( cmd , arg ) ;
}
static const struct file_operations bch_chardev_fops = {
. owner = THIS_MODULE ,
. unlocked_ioctl = bch2_chardev_ioctl ,
. open = nonseekable_open ,
} ;
static int bch_chardev_major ;
2024-03-08 15:12:47 +03:00
static const struct class bch_chardev_class = {
. name = " bcachefs " ,
} ;
2017-03-17 09:18:50 +03:00
static struct device * bch_chardev ;
void bch2_fs_chardev_exit ( struct bch_fs * c )
{
if ( ! IS_ERR_OR_NULL ( c - > chardev ) )
device_unregister ( c - > chardev ) ;
if ( c - > minor > = 0 )
idr_remove ( & bch_chardev_minor , c - > minor ) ;
}
int bch2_fs_chardev_init ( struct bch_fs * c )
{
c - > minor = idr_alloc ( & bch_chardev_minor , c , 0 , 0 , GFP_KERNEL ) ;
if ( c - > minor < 0 )
return c - > minor ;
2024-03-08 15:12:47 +03:00
c - > chardev = device_create ( & bch_chardev_class , NULL ,
2017-03-17 09:18:50 +03:00
MKDEV ( bch_chardev_major , c - > minor ) , c ,
" bcachefs%u-ctl " , c - > minor ) ;
if ( IS_ERR ( c - > chardev ) )
return PTR_ERR ( c - > chardev ) ;
return 0 ;
}
void bch2_chardev_exit ( void )
{
2024-03-08 15:12:47 +03:00
device_destroy ( & bch_chardev_class , MKDEV ( bch_chardev_major , U8_MAX ) ) ;
class_unregister ( & bch_chardev_class ) ;
2017-03-17 09:18:50 +03:00
if ( bch_chardev_major > 0 )
unregister_chrdev ( bch_chardev_major , " bcachefs " ) ;
}
int __init bch2_chardev_init ( void )
{
2024-03-08 15:12:47 +03:00
int ret ;
2017-03-17 09:18:50 +03:00
bch_chardev_major = register_chrdev ( 0 , " bcachefs-ctl " , & bch_chardev_fops ) ;
if ( bch_chardev_major < 0 )
return bch_chardev_major ;
2024-03-08 15:12:47 +03:00
ret = class_register ( & bch_chardev_class ) ;
if ( ret )
goto major_out ;
2017-03-17 09:18:50 +03:00
2024-03-08 15:12:47 +03:00
bch_chardev = device_create ( & bch_chardev_class , NULL ,
2017-03-17 09:18:50 +03:00
MKDEV ( bch_chardev_major , U8_MAX ) ,
NULL , " bcachefs-ctl " ) ;
2024-03-08 15:12:47 +03:00
if ( IS_ERR ( bch_chardev ) ) {
ret = PTR_ERR ( bch_chardev ) ;
goto class_out ;
}
2017-03-17 09:18:50 +03:00
return 0 ;
2024-03-08 15:12:47 +03:00
class_out :
class_unregister ( & bch_chardev_class ) ;
major_out :
unregister_chrdev ( bch_chardev_major , " bcachefs-ctl " ) ;
return ret ;
2017-03-17 09:18:50 +03:00
}
# endif /* NO_BCACHEFS_CHARDEV */