2013-05-29 15:21:46 +04:00
/*
Unix SMB / CIFS implementation .
Wrap GlusterFS GFAPI calls in vfs functions .
Copyright ( c ) 2013 Anand Avati < avati @ redhat . com >
This program is free software ; you can redistribute it and / or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation ; either version 3 of the License , or
( at your option ) any later version .
This program is distributed in the hope that it will be useful ,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
GNU General Public License for more details .
You should have received a copy of the GNU General Public License
along with this program . If not , see < http : //www.gnu.org/licenses/>.
*/
2013-12-12 22:27:10 +04:00
/**
* @ file vfs_glusterfs . c
* @ author Anand Avati < avati @ redhat . com >
* @ date May 2013
* @ brief Samba VFS module for glusterfs
*
* @ todo
* - sendfile / recvfile support
*
* A Samba VFS module for GlusterFS , based on Gluster ' s libgfapi .
* This is a " bottom " vfs module ( not something to be stacked on top of
* another module ) , and translates ( most ) calls to the closest actions
* available in libgfapi .
*
*/
2013-05-29 15:21:46 +04:00
# include "includes.h"
# include "smbd/smbd.h"
# include <stdio.h>
2017-10-31 17:52:49 +03:00
# include <glusterfs/api/glfs.h>
2013-05-29 15:21:46 +04:00
# include "lib/util/dlinklist.h"
2014-12-11 05:05:10 +03:00
# include "lib/util/tevent_unix.h"
# include "smbd/globals.h"
2015-10-12 16:57:34 +03:00
# include "lib/util/sys_rw.h"
2016-03-28 10:20:22 +03:00
# include "smbprofile.h"
2016-03-21 05:42:20 +03:00
# include "modules/posixacl_xattr.h"
2019-07-24 12:45:33 +03:00
# include "lib/pthreadpool/pthreadpool_tevent.h"
2013-05-29 15:21:46 +04:00
# define DEFAULT_VOLFILE_SERVER "localhost"
2019-06-03 17:25:46 +03:00
# define GLUSTER_NAME_MAX 255
2013-05-29 15:21:46 +04:00
2013-12-12 22:29:20 +04:00
/**
* Helper to convert struct stat to struct stat_ex .
*/
2013-05-29 15:21:46 +04:00
static void smb_stat_ex_from_stat ( struct stat_ex * dst , const struct stat * src )
{
ZERO_STRUCTP ( dst ) ;
dst - > st_ex_dev = src - > st_dev ;
dst - > st_ex_ino = src - > st_ino ;
dst - > st_ex_mode = src - > st_mode ;
dst - > st_ex_nlink = src - > st_nlink ;
dst - > st_ex_uid = src - > st_uid ;
dst - > st_ex_gid = src - > st_gid ;
dst - > st_ex_rdev = src - > st_rdev ;
dst - > st_ex_size = src - > st_size ;
dst - > st_ex_atime . tv_sec = src - > st_atime ;
dst - > st_ex_mtime . tv_sec = src - > st_mtime ;
dst - > st_ex_ctime . tv_sec = src - > st_ctime ;
dst - > st_ex_btime . tv_sec = src - > st_mtime ;
2013-12-12 22:29:20 +04:00
dst - > st_ex_blksize = src - > st_blksize ;
dst - > st_ex_blocks = src - > st_blocks ;
2019-08-14 11:11:15 +03:00
dst - > st_ex_file_id = dst - > st_ex_ino ;
dst - > st_ex_iflags | = ST_EX_IFLAG_CALCULATED_FILE_ID ;
2013-05-29 15:21:46 +04:00
# ifdef STAT_HAVE_NSEC
2013-12-12 22:29:20 +04:00
dst - > st_ex_atime . tv_nsec = src - > st_atime_nsec ;
dst - > st_ex_mtime . tv_nsec = src - > st_mtime_nsec ;
dst - > st_ex_ctime . tv_nsec = src - > st_ctime_nsec ;
2013-05-29 15:21:46 +04:00
dst - > st_ex_btime . tv_nsec = src - > st_mtime_nsec ;
# endif
2019-08-14 11:11:15 +03:00
dst - > st_ex_itime = dst - > st_ex_btime ;
dst - > st_ex_iflags | = ST_EX_IFLAG_CALCULATED_ITIME ;
2013-05-29 15:21:46 +04:00
}
/* pre-opened glfs_t */
static struct glfs_preopened {
char * volume ;
2014-12-10 09:56:34 +03:00
char * connectpath ;
2013-05-29 15:21:46 +04:00
glfs_t * fs ;
int ref ;
struct glfs_preopened * next , * prev ;
} * glfs_preopened ;
2014-12-10 09:56:34 +03:00
static int glfs_set_preopened ( const char * volume , const char * connectpath , glfs_t * fs )
2013-05-29 15:21:46 +04:00
{
struct glfs_preopened * entry = NULL ;
entry = talloc_zero ( NULL , struct glfs_preopened ) ;
if ( ! entry ) {
errno = ENOMEM ;
return - 1 ;
}
entry - > volume = talloc_strdup ( entry , volume ) ;
if ( ! entry - > volume ) {
talloc_free ( entry ) ;
errno = ENOMEM ;
return - 1 ;
}
2014-12-10 09:56:34 +03:00
entry - > connectpath = talloc_strdup ( entry , connectpath ) ;
if ( entry - > connectpath = = NULL ) {
talloc_free ( entry ) ;
errno = ENOMEM ;
return - 1 ;
}
2013-05-29 15:21:46 +04:00
entry - > fs = fs ;
entry - > ref = 1 ;
DLIST_ADD ( glfs_preopened , entry ) ;
return 0 ;
}
2014-12-10 09:56:34 +03:00
static glfs_t * glfs_find_preopened ( const char * volume , const char * connectpath )
2013-05-29 15:21:46 +04:00
{
struct glfs_preopened * entry = NULL ;
for ( entry = glfs_preopened ; entry ; entry = entry - > next ) {
2014-12-10 09:56:34 +03:00
if ( strcmp ( entry - > volume , volume ) = = 0 & &
strcmp ( entry - > connectpath , connectpath ) = = 0 )
{
2013-05-29 15:21:46 +04:00
entry - > ref + + ;
return entry - > fs ;
}
}
return NULL ;
}
static void glfs_clear_preopened ( glfs_t * fs )
{
struct glfs_preopened * entry = NULL ;
for ( entry = glfs_preopened ; entry ; entry = entry - > next ) {
if ( entry - > fs = = fs ) {
if ( - - entry - > ref )
return ;
DLIST_REMOVE ( glfs_preopened , entry ) ;
glfs_fini ( entry - > fs ) ;
talloc_free ( entry ) ;
}
}
}
2016-08-25 13:33:55 +03:00
static int vfs_gluster_set_volfile_servers ( glfs_t * fs ,
const char * volfile_servers )
{
char * server = NULL ;
2019-04-23 13:57:02 +03:00
size_t server_count = 0 ;
size_t server_success = 0 ;
2016-08-25 13:33:55 +03:00
int ret = - 1 ;
TALLOC_CTX * frame = talloc_stackframe ( ) ;
DBG_INFO ( " servers list %s \n " , volfile_servers ) ;
while ( next_token_talloc ( frame , & volfile_servers , & server , " \t " ) ) {
char * transport = NULL ;
char * host = NULL ;
int port = 0 ;
server_count + + ;
2019-04-23 13:57:02 +03:00
DBG_INFO ( " server %zu %s \n " , server_count , server ) ;
2016-08-25 13:33:55 +03:00
/* Determine the transport type */
if ( strncmp ( server , " unix+ " , 5 ) = = 0 ) {
port = 0 ;
transport = talloc_strdup ( frame , " unix " ) ;
if ( ! transport ) {
errno = ENOMEM ;
goto out ;
}
host = talloc_strdup ( frame , server + 5 ) ;
if ( ! host ) {
errno = ENOMEM ;
goto out ;
}
} else {
char * p = NULL ;
char * port_index = NULL ;
if ( strncmp ( server , " tcp+ " , 4 ) = = 0 ) {
server + = 4 ;
}
/* IPv6 is enclosed in []
* ' : ' before ' ] ' is part of IPv6
* ' : ' after ' ] ' indicates port
*/
p = server ;
if ( server [ 0 ] = = ' [ ' ) {
server + + ;
p = index ( server , ' ] ' ) ;
if ( p = = NULL ) {
/* Malformed IPv6 */
continue ;
}
p [ 0 ] = ' \0 ' ;
p + + ;
}
port_index = index ( p , ' : ' ) ;
if ( port_index = = NULL ) {
port = 0 ;
} else {
port = atoi ( port_index + 1 ) ;
port_index [ 0 ] = ' \0 ' ;
}
transport = talloc_strdup ( frame , " tcp " ) ;
if ( ! transport ) {
errno = ENOMEM ;
goto out ;
}
host = talloc_strdup ( frame , server ) ;
if ( ! host ) {
errno = ENOMEM ;
goto out ;
}
}
DBG_INFO ( " Calling set volfile server with params "
" transport=%s, host=%s, port=%d \n " , transport ,
host , port ) ;
ret = glfs_set_volfile_server ( fs , transport , host , port ) ;
if ( ret < 0 ) {
DBG_WARNING ( " Failed to set volfile_server "
" transport=%s, host=%s, port=%d (%s) \n " ,
transport , host , port , strerror ( errno ) ) ;
} else {
server_success + + ;
}
}
out :
if ( server_count = = 0 ) {
ret = - 1 ;
} else if ( server_success < server_count ) {
2019-04-23 13:57:02 +03:00
DBG_WARNING ( " Failed to set %zu out of %zu servers parsed \n " ,
2016-08-25 13:33:55 +03:00
server_count - server_success , server_count ) ;
ret = 0 ;
}
TALLOC_FREE ( frame ) ;
return ret ;
}
2013-05-29 15:21:46 +04:00
/* Disk Operations */
2020-11-02 14:30:36 +03:00
static int check_for_write_behind_translator ( TALLOC_CTX * mem_ctx ,
glfs_t * fs ,
const char * volume )
{
char * buf = NULL ;
char * * lines = NULL ;
int numlines = 0 ;
int i ;
char * option ;
bool write_behind_present = false ;
size_t newlen ;
int ret ;
ret = glfs_get_volfile ( fs , NULL , 0 ) ;
if ( ret = = 0 ) {
DBG_ERR ( " %s: Failed to get volfile for "
" volume (%s): No volfile \n " ,
volume ,
strerror ( errno ) ) ;
return - 1 ;
}
if ( ret > 0 ) {
DBG_ERR ( " %s: Invalid return %d for glfs_get_volfile for "
" volume (%s): No volfile \n " ,
volume ,
ret ,
strerror ( errno ) ) ;
return - 1 ;
}
newlen = 0 - ret ;
buf = talloc_zero_array ( mem_ctx , char , newlen ) ;
if ( buf = = NULL ) {
return - 1 ;
}
ret = glfs_get_volfile ( fs , buf , newlen ) ;
if ( ret ! = newlen ) {
TALLOC_FREE ( buf ) ;
DBG_ERR ( " %s: Failed to get volfile for volume (%s) \n " ,
volume , strerror ( errno ) ) ;
return - 1 ;
}
option = talloc_asprintf ( mem_ctx , " volume %s-write-behind " , volume ) ;
if ( option = = NULL ) {
TALLOC_FREE ( buf ) ;
return - 1 ;
}
2020-11-10 21:18:18 +03:00
/*
* file_lines_parse ( ) plays horrible tricks with
* the passed - in talloc pointers and the hierarcy
* which makes freeing hard to get right .
*
* As we know mem_ctx is freed by the caller , after
* this point don ' t free on exit and let the caller
* handle it . This violates good Samba coding practice
* but we know we ' re not leaking here .
*/
2020-11-02 14:30:36 +03:00
lines = file_lines_parse ( buf ,
newlen ,
& numlines ,
mem_ctx ) ;
if ( lines = = NULL | | numlines < = 0 ) {
return - 1 ;
}
2020-11-10 21:18:18 +03:00
/* On success, buf is now a talloc child of lines !! */
2020-11-02 14:30:36 +03:00
for ( i = 0 ; i < numlines ; i + + ) {
if ( strequal ( lines [ i ] , option ) ) {
write_behind_present = true ;
break ;
}
}
if ( write_behind_present ) {
DBG_ERR ( " Write behind translator is enabled for "
" volume (%s), refusing to connect! "
2020-11-24 17:38:41 +03:00
" Please turn off the write behind translator by calling "
" 'gluster volume set %s performance.write-behind off' "
" on the commandline. "
" Check the vfs_glusterfs(8) manpage for "
2020-11-02 14:30:36 +03:00
" further details. \n " ,
2020-11-24 17:38:41 +03:00
volume , volume ) ;
2020-11-02 14:30:36 +03:00
return - 1 ;
}
return 0 ;
}
2013-05-29 15:21:46 +04:00
static int vfs_gluster_connect ( struct vfs_handle_struct * handle ,
const char * service ,
const char * user )
{
2019-10-15 15:04:27 +03:00
const struct loadparm_substitution * lp_sub =
loadparm_s3_global_substitution ( ) ;
2016-08-25 13:33:55 +03:00
const char * volfile_servers ;
2013-05-29 15:21:46 +04:00
const char * volume ;
2013-11-22 09:04:11 +04:00
char * logfile ;
2013-05-29 15:21:46 +04:00
int loglevel ;
2013-11-22 09:04:11 +04:00
glfs_t * fs = NULL ;
TALLOC_CTX * tmp_ctx ;
int ret = 0 ;
2020-11-02 18:10:44 +03:00
bool write_behind_pass_through_set = false ;
2013-05-29 15:21:46 +04:00
2013-11-22 09:04:11 +04:00
tmp_ctx = talloc_new ( NULL ) ;
if ( tmp_ctx = = NULL ) {
ret = - 1 ;
goto done ;
}
2019-10-15 15:04:27 +03:00
logfile = lp_parm_substituted_string ( tmp_ctx ,
lp_sub ,
SNUM ( handle - > conn ) ,
" glusterfs " ,
" logfile " ,
NULL ) ;
2013-05-29 15:21:46 +04:00
loglevel = lp_parm_int ( SNUM ( handle - > conn ) , " glusterfs " , " loglevel " , - 1 ) ;
2019-10-15 15:04:27 +03:00
volfile_servers = lp_parm_substituted_string ( tmp_ctx ,
lp_sub ,
SNUM ( handle - > conn ) ,
" glusterfs " ,
" volfile_server " ,
NULL ) ;
2016-08-25 13:33:55 +03:00
if ( volfile_servers = = NULL ) {
volfile_servers = DEFAULT_VOLFILE_SERVER ;
2013-05-29 15:21:46 +04:00
}
volume = lp_parm_const_string ( SNUM ( handle - > conn ) , " glusterfs " , " volume " ,
NULL ) ;
if ( volume = = NULL ) {
volume = service ;
}
2014-12-10 09:56:34 +03:00
fs = glfs_find_preopened ( volume , handle - > conn - > connectpath ) ;
2013-05-29 15:21:46 +04:00
if ( fs ) {
2013-11-22 09:04:11 +04:00
goto done ;
2013-05-29 15:21:46 +04:00
}
fs = glfs_new ( volume ) ;
if ( fs = = NULL ) {
2013-11-22 09:04:11 +04:00
ret = - 1 ;
goto done ;
2013-05-29 15:21:46 +04:00
}
2016-08-25 13:33:55 +03:00
ret = vfs_gluster_set_volfile_servers ( fs , volfile_servers ) ;
2013-05-29 15:21:46 +04:00
if ( ret < 0 ) {
2016-08-25 13:33:55 +03:00
DBG_ERR ( " Failed to set volfile_servers from list %s \n " ,
volfile_servers ) ;
2013-11-22 09:04:11 +04:00
goto done ;
2013-05-29 15:21:46 +04:00
}
ret = glfs_set_xlator_option ( fs , " *-md-cache " , " cache-posix-acl " ,
" true " ) ;
if ( ret < 0 ) {
DEBUG ( 0 , ( " %s: Failed to set xlator options \n " , volume ) ) ;
2013-11-22 09:04:11 +04:00
goto done ;
2013-05-29 15:21:46 +04:00
}
2020-05-31 20:00:53 +03:00
ret = glfs_set_xlator_option ( fs , " *-md-cache " , " cache-selinux " ,
" true " ) ;
if ( ret < 0 ) {
DEBUG ( 0 , ( " %s: Failed to set xlator options \n " , volume ) ) ;
goto done ;
}
2014-12-10 10:00:10 +03:00
ret = glfs_set_xlator_option ( fs , " *-snapview-client " ,
" snapdir-entry-path " ,
handle - > conn - > connectpath ) ;
if ( ret < 0 ) {
DEBUG ( 0 , ( " %s: Failed to set xlator option: "
" snapdir-entry-path \n " , volume ) ) ;
2016-10-07 13:35:29 +03:00
goto done ;
2014-12-10 10:00:10 +03:00
}
2020-11-02 18:10:44 +03:00
# ifdef HAVE_GFAPI_VER_7_9
ret = glfs_set_xlator_option ( fs , " *-write-behind " , " pass-through " ,
" true " ) ;
if ( ret < 0 ) {
DBG_ERR ( " %s: Failed to set xlator option: pass-through \n " ,
volume ) ;
goto done ;
}
write_behind_pass_through_set = true ;
# endif
2013-05-29 15:21:46 +04:00
ret = glfs_set_logging ( fs , logfile , loglevel ) ;
if ( ret < 0 ) {
DEBUG ( 0 , ( " %s: Failed to set logfile %s loglevel %d \n " ,
volume , logfile , loglevel ) ) ;
2013-11-22 09:04:11 +04:00
goto done ;
2013-05-29 15:21:46 +04:00
}
ret = glfs_init ( fs ) ;
if ( ret < 0 ) {
DEBUG ( 0 , ( " %s: Failed to initialize volume (%s) \n " ,
volume , strerror ( errno ) ) ) ;
2013-11-22 09:04:11 +04:00
goto done ;
2013-05-29 15:21:46 +04:00
}
2020-11-02 18:10:44 +03:00
if ( ! write_behind_pass_through_set ) {
ret = check_for_write_behind_translator ( tmp_ctx , fs , volume ) ;
if ( ret < 0 ) {
goto done ;
}
2020-11-02 14:30:36 +03:00
}
2014-12-10 09:56:34 +03:00
ret = glfs_set_preopened ( volume , handle - > conn - > connectpath , fs ) ;
2013-05-29 15:21:46 +04:00
if ( ret < 0 ) {
DEBUG ( 0 , ( " %s: Failed to register volume (%s) \n " ,
volume , strerror ( errno ) ) ) ;
2013-11-22 09:04:11 +04:00
goto done ;
}
2017-10-20 15:55:10 +03:00
/*
* The shadow_copy2 module will fail to export subdirectories
* of a gluster volume unless we specify the mount point ,
* because the detection fails if the file system is not
* locally mounted :
* https : //bugzilla.samba.org/show_bug.cgi?id=13091
*/
lp_do_parameter ( SNUM ( handle - > conn ) , " shadow:mountpoint " , " / " ) ;
smbd: use async dos_mode_at_send in smbd_smb2_query_directory_send()
Finally: use the new dos_mode_at_send() in the directory enumeration
loop. This means that fetching the DOS attributes for directory entries
is done asynchronously with regard to the enumeration loop.
As the DOS attribute is typically read from an extended attribute in the
filesytem, this avoids sequentially blocking on IO. If the IO subsystem
is slow servicing these request, enabling async processing can result in
performance improvements.
A parametric option
smbd:async dosmode = true | false (default: false)
can be used to enable the new async processing.
Simulating slow IO with usleep(5000) in the synchronous and asynchronous
versions of SMB_VFS_GET_DOS_ATTRIBUTES(), the results of enumerating a
directory with 10,000 files are:
smbd:async dosmode = no:
$ time bin/smbclient -U slow%x //localhost/test -c "ls dir\*" > /dev/null
real 0m59.597s
user 0m0.024s
sys 0m0.012s
smbd:async dosmode = yes:
$ time bin/smbclient -U slow%x //localhost/test -c "ls dir\*" > /dev/null
real 0m0.698s
user 0m0.038s
sys 0m0.025s
Performance gains in real world workloads depends on whether the actual
IO requests can be merged and parallelized by the kernel. Without such
wins at the IO layer, the async processing may even be slower then the
sync processing due to the additional overhead.
The following parameters can be used to adapt async processing behaviour
for specific workloads and systems:
aio max threads = X (default: 100)
smbd:max async dosmode = Y (default: "aio max threads" * 2)
By default we have at most twice the number of async requests in flight
as threads provided by the underlying threadpool. This ensures a worker
thread that finishes a job can directly pick up a new one without going
to sleep.
It may be advisable to reduce the number of threads to avoid scheduling
overhead while also increasing "smbd:max async dosmode".
Note that we disable async processing for certain VFS modules in the VFS
connect function to avoid the overhead of triggering the sync fallback
in dos_mode_at_send(). This is done for VFS modules that implement the
sync SMB_VFS_GET_DOS_ATTRIBUTES(), but not the async version (gpfs), and
for VFS modules that don't share a real filesystem where fchdir() can be
used (ceph, gluster). It is disabled for catia, because we realized that
the catia name translation macros used on
fsps (CATIA_FETCH_FSP_[PRE|POST]_NEXT) have a bug (#13547).
We use threadpool = smb_vfs_ev_glue_tp_chdir_safe() and then
pthreadpool_tevent_max_threads(threadpool) to get the number of maximum
worker threads which matches the pool used by the low level
SMB_VFS_GETXATTRAT_[SEND|RECV] implementation in vfs_default.
This is a terrible abstraction leak that should be removed in the future
by maybe making it possible to ask a VFS function which threadpool it
uses, internally suporting chaining so VFS function FOO that internally
uses BAR can forward the question to BAR.
On a hyphotetical system that had a getxattrat(dirfd, path, ...)
syscall and at the same time doesn't support per-thread current working
directories (eg FreeBSD doesn't have the latter) but has support for
per-thread-credentials, pthreadpool_tevent_max_threads() on the
tp_chdir_safe threadpool returns 1.
So when hooking the hyphotetical getxattrat() into the async
SMB_VFS_GETXATTRAT_[SEND|RECV] implementation in an VFS module, the
implementation could use the tp_path_safe threadpool, but the SMB2
layer would use the wrong threadpool in the call to
pthreadpool_tevent_max_threads(), resulting in no parallelism.
Signed-off-by: Ralph Boehme <slow@samba.org>
Reviewed-by: Stefan Metzmacher <metze@samba.org>
2018-07-25 20:14:25 +03:00
/*
* Unless we have an async implementation of getxattrat turn this off .
*/
2019-08-05 11:59:22 +03:00
lp_do_parameter ( SNUM ( handle - > conn ) , " smbd async dosmode " , " false " ) ;
smbd: use async dos_mode_at_send in smbd_smb2_query_directory_send()
Finally: use the new dos_mode_at_send() in the directory enumeration
loop. This means that fetching the DOS attributes for directory entries
is done asynchronously with regard to the enumeration loop.
As the DOS attribute is typically read from an extended attribute in the
filesytem, this avoids sequentially blocking on IO. If the IO subsystem
is slow servicing these request, enabling async processing can result in
performance improvements.
A parametric option
smbd:async dosmode = true | false (default: false)
can be used to enable the new async processing.
Simulating slow IO with usleep(5000) in the synchronous and asynchronous
versions of SMB_VFS_GET_DOS_ATTRIBUTES(), the results of enumerating a
directory with 10,000 files are:
smbd:async dosmode = no:
$ time bin/smbclient -U slow%x //localhost/test -c "ls dir\*" > /dev/null
real 0m59.597s
user 0m0.024s
sys 0m0.012s
smbd:async dosmode = yes:
$ time bin/smbclient -U slow%x //localhost/test -c "ls dir\*" > /dev/null
real 0m0.698s
user 0m0.038s
sys 0m0.025s
Performance gains in real world workloads depends on whether the actual
IO requests can be merged and parallelized by the kernel. Without such
wins at the IO layer, the async processing may even be slower then the
sync processing due to the additional overhead.
The following parameters can be used to adapt async processing behaviour
for specific workloads and systems:
aio max threads = X (default: 100)
smbd:max async dosmode = Y (default: "aio max threads" * 2)
By default we have at most twice the number of async requests in flight
as threads provided by the underlying threadpool. This ensures a worker
thread that finishes a job can directly pick up a new one without going
to sleep.
It may be advisable to reduce the number of threads to avoid scheduling
overhead while also increasing "smbd:max async dosmode".
Note that we disable async processing for certain VFS modules in the VFS
connect function to avoid the overhead of triggering the sync fallback
in dos_mode_at_send(). This is done for VFS modules that implement the
sync SMB_VFS_GET_DOS_ATTRIBUTES(), but not the async version (gpfs), and
for VFS modules that don't share a real filesystem where fchdir() can be
used (ceph, gluster). It is disabled for catia, because we realized that
the catia name translation macros used on
fsps (CATIA_FETCH_FSP_[PRE|POST]_NEXT) have a bug (#13547).
We use threadpool = smb_vfs_ev_glue_tp_chdir_safe() and then
pthreadpool_tevent_max_threads(threadpool) to get the number of maximum
worker threads which matches the pool used by the low level
SMB_VFS_GETXATTRAT_[SEND|RECV] implementation in vfs_default.
This is a terrible abstraction leak that should be removed in the future
by maybe making it possible to ask a VFS function which threadpool it
uses, internally suporting chaining so VFS function FOO that internally
uses BAR can forward the question to BAR.
On a hyphotetical system that had a getxattrat(dirfd, path, ...)
syscall and at the same time doesn't support per-thread current working
directories (eg FreeBSD doesn't have the latter) but has support for
per-thread-credentials, pthreadpool_tevent_max_threads() on the
tp_chdir_safe threadpool returns 1.
So when hooking the hyphotetical getxattrat() into the async
SMB_VFS_GETXATTRAT_[SEND|RECV] implementation in an VFS module, the
implementation could use the tp_path_safe threadpool, but the SMB2
layer would use the wrong threadpool in the call to
pthreadpool_tevent_max_threads(), resulting in no parallelism.
Signed-off-by: Ralph Boehme <slow@samba.org>
Reviewed-by: Stefan Metzmacher <metze@samba.org>
2018-07-25 20:14:25 +03:00
2013-11-22 09:04:11 +04:00
done :
if ( ret < 0 ) {
if ( fs )
glfs_fini ( fs ) ;
} else {
2016-08-25 13:33:55 +03:00
DBG_ERR ( " %s: Initialized volume from servers %s \n " ,
volume , volfile_servers ) ;
2013-11-22 09:04:11 +04:00
handle - > data = fs ;
2013-05-29 15:21:46 +04:00
}
2016-08-25 13:33:55 +03:00
talloc_free ( tmp_ctx ) ;
return ret ;
2013-05-29 15:21:46 +04:00
}
static void vfs_gluster_disconnect ( struct vfs_handle_struct * handle )
{
glfs_t * fs = NULL ;
fs = handle - > data ;
glfs_clear_preopened ( fs ) ;
}
static uint64_t vfs_gluster_disk_free ( struct vfs_handle_struct * handle ,
2017-05-23 20:40:47 +03:00
const struct smb_filename * smb_fname ,
uint64_t * bsize_p ,
uint64_t * dfree_p ,
uint64_t * dsize_p )
2013-05-29 15:21:46 +04:00
{
struct statvfs statvfs = { 0 , } ;
int ret ;
2017-05-23 20:40:47 +03:00
ret = glfs_statvfs ( handle - > data , smb_fname - > base_name , & statvfs ) ;
2013-05-29 15:21:46 +04:00
if ( ret < 0 ) {
return - 1 ;
}
if ( bsize_p ! = NULL ) {
2013-08-07 01:45:06 +04:00
* bsize_p = ( uint64_t ) statvfs . f_bsize ; /* Block size */
2013-05-29 15:21:46 +04:00
}
if ( dfree_p ! = NULL ) {
2013-08-07 01:45:06 +04:00
* dfree_p = ( uint64_t ) statvfs . f_bavail ; /* Available Block units */
2013-05-29 15:21:46 +04:00
}
if ( dsize_p ! = NULL ) {
2013-08-07 01:45:06 +04:00
* dsize_p = ( uint64_t ) statvfs . f_blocks ; /* Total Block units */
2013-05-29 15:21:46 +04:00
}
2013-08-07 01:45:06 +04:00
return ( uint64_t ) statvfs . f_bavail ;
2013-05-29 15:21:46 +04:00
}
static int vfs_gluster_get_quota ( struct vfs_handle_struct * handle ,
2017-06-01 21:45:25 +03:00
const struct smb_filename * smb_fname ,
enum SMB_QUOTA_TYPE qtype ,
unid_t id ,
SMB_DISK_QUOTA * qt )
2013-05-29 15:21:46 +04:00
{
errno = ENOSYS ;
return - 1 ;
}
static int
vfs_gluster_set_quota ( struct vfs_handle_struct * handle ,
enum SMB_QUOTA_TYPE qtype , unid_t id , SMB_DISK_QUOTA * qt )
{
errno = ENOSYS ;
return - 1 ;
}
static int vfs_gluster_statvfs ( struct vfs_handle_struct * handle ,
2017-06-03 01:26:06 +03:00
const struct smb_filename * smb_fname ,
struct vfs_statvfs_struct * vfs_statvfs )
2013-05-29 15:21:46 +04:00
{
struct statvfs statvfs = { 0 , } ;
int ret ;
2017-06-03 01:26:06 +03:00
ret = glfs_statvfs ( handle - > data , smb_fname - > base_name , & statvfs ) ;
2013-05-29 15:21:46 +04:00
if ( ret < 0 ) {
DEBUG ( 0 , ( " glfs_statvfs(%s) failed: %s \n " ,
2017-06-03 01:26:06 +03:00
smb_fname - > base_name , strerror ( errno ) ) ) ;
2013-05-29 15:21:46 +04:00
return - 1 ;
}
ZERO_STRUCTP ( vfs_statvfs ) ;
vfs_statvfs - > OptimalTransferSize = statvfs . f_frsize ;
vfs_statvfs - > BlockSize = statvfs . f_bsize ;
vfs_statvfs - > TotalBlocks = statvfs . f_blocks ;
vfs_statvfs - > BlocksAvail = statvfs . f_bfree ;
vfs_statvfs - > UserBlocksAvail = statvfs . f_bavail ;
vfs_statvfs - > TotalFileNodes = statvfs . f_files ;
vfs_statvfs - > FreeFileNodes = statvfs . f_ffree ;
vfs_statvfs - > FsIdentifier = statvfs . f_fsid ;
vfs_statvfs - > FsCapabilities =
FILE_CASE_SENSITIVE_SEARCH | FILE_CASE_PRESERVED_NAMES ;
return ret ;
}
static uint32_t vfs_gluster_fs_capabilities ( struct vfs_handle_struct * handle ,
enum timestamp_set_resolution * p_ts_res )
{
uint32_t caps = FILE_CASE_SENSITIVE_SEARCH | FILE_CASE_PRESERVED_NAMES ;
2017-11-14 13:21:44 +03:00
# ifdef HAVE_GFAPI_VER_6
caps | = FILE_SUPPORTS_SPARSE_FILES ;
# endif
2013-05-29 15:21:46 +04:00
# ifdef STAT_HAVE_NSEC
* p_ts_res = TIMESTAMP_SET_NT_OR_BETTER ;
# endif
return caps ;
}
2018-10-10 18:32:25 +03:00
static glfs_fd_t * vfs_gluster_fetch_glfd ( struct vfs_handle_struct * handle ,
files_struct * fsp )
{
glfs_fd_t * * glfd = ( glfs_fd_t * * ) VFS_FETCH_FSP_EXTENSION ( handle , fsp ) ;
if ( glfd = = NULL ) {
DBG_INFO ( " Failed to fetch fsp extension \n " ) ;
return NULL ;
}
if ( * glfd = = NULL ) {
DBG_INFO ( " Empty glfs_fd_t pointer \n " ) ;
return NULL ;
}
return * glfd ;
}
2013-05-29 15:21:46 +04:00
static DIR * vfs_gluster_fdopendir ( struct vfs_handle_struct * handle ,
files_struct * fsp , const char * mask ,
2015-05-03 06:11:02 +03:00
uint32_t attributes )
2013-05-29 15:21:46 +04:00
{
2018-10-10 18:32:25 +03:00
glfs_fd_t * glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
if ( glfd = = NULL ) {
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return NULL ;
}
return ( DIR * ) glfd ;
2013-05-29 15:21:46 +04:00
}
static int vfs_gluster_closedir ( struct vfs_handle_struct * handle , DIR * dirp )
{
2019-08-05 08:15:01 +03:00
int ret ;
START_PROFILE ( syscall_closedir ) ;
ret = glfs_closedir ( ( void * ) dirp ) ;
END_PROFILE ( syscall_closedir ) ;
return ret ;
2013-05-29 15:21:46 +04:00
}
static struct dirent * vfs_gluster_readdir ( struct vfs_handle_struct * handle ,
2020-11-22 15:57:27 +03:00
struct files_struct * dirfsp ,
DIR * dirp ,
SMB_STRUCT_STAT * sbuf )
2013-05-29 15:21:46 +04:00
{
static char direntbuf [ 512 ] ;
int ret ;
struct stat stat ;
struct dirent * dirent = 0 ;
2019-08-05 08:15:01 +03:00
START_PROFILE ( syscall_readdir ) ;
2013-05-29 15:21:46 +04:00
if ( sbuf ! = NULL ) {
ret = glfs_readdirplus_r ( ( void * ) dirp , & stat , ( void * ) direntbuf ,
& dirent ) ;
} else {
ret = glfs_readdir_r ( ( void * ) dirp , ( void * ) direntbuf , & dirent ) ;
}
if ( ( ret < 0 ) | | ( dirent = = NULL ) ) {
2019-08-05 08:15:01 +03:00
END_PROFILE ( syscall_readdir ) ;
2013-05-29 15:21:46 +04:00
return NULL ;
}
if ( sbuf ! = NULL ) {
2019-11-12 17:28:43 +03:00
SET_STAT_INVALID ( * sbuf ) ;
if ( ! S_ISLNK ( stat . st_mode ) ) {
smb_stat_ex_from_stat ( sbuf , & stat ) ;
}
2013-05-29 15:21:46 +04:00
}
2019-08-05 08:15:01 +03:00
END_PROFILE ( syscall_readdir ) ;
2013-05-29 15:21:46 +04:00
return dirent ;
}
static long vfs_gluster_telldir ( struct vfs_handle_struct * handle , DIR * dirp )
{
2019-08-05 08:15:01 +03:00
long ret ;
START_PROFILE ( syscall_telldir ) ;
ret = glfs_telldir ( ( void * ) dirp ) ;
END_PROFILE ( syscall_telldir ) ;
return ret ;
2013-05-29 15:21:46 +04:00
}
static void vfs_gluster_seekdir ( struct vfs_handle_struct * handle , DIR * dirp ,
long offset )
{
2019-08-05 08:15:01 +03:00
START_PROFILE ( syscall_seekdir ) ;
2013-05-29 15:21:46 +04:00
glfs_seekdir ( ( void * ) dirp , offset ) ;
2019-08-05 08:15:01 +03:00
END_PROFILE ( syscall_seekdir ) ;
2013-05-29 15:21:46 +04:00
}
static void vfs_gluster_rewinddir ( struct vfs_handle_struct * handle , DIR * dirp )
{
2019-08-05 08:15:01 +03:00
START_PROFILE ( syscall_rewinddir ) ;
2013-05-29 15:21:46 +04:00
glfs_seekdir ( ( void * ) dirp , 0 ) ;
2019-08-05 08:15:01 +03:00
END_PROFILE ( syscall_rewinddir ) ;
2013-05-29 15:21:46 +04:00
}
2019-09-05 19:33:32 +03:00
static int vfs_gluster_mkdirat ( struct vfs_handle_struct * handle ,
struct files_struct * dirfsp ,
const struct smb_filename * smb_fname ,
mode_t mode )
{
int ret ;
START_PROFILE ( syscall_mkdirat ) ;
SMB_ASSERT ( dirfsp = = dirfsp - > conn - > cwd_fsp ) ;
ret = glfs_mkdir ( handle - > data , smb_fname - > base_name , mode ) ;
END_PROFILE ( syscall_mkdirat ) ;
return ret ;
}
2020-05-20 22:32:30 +03:00
static int vfs_gluster_openat ( struct vfs_handle_struct * handle ,
const struct files_struct * dirfsp ,
const struct smb_filename * smb_fname ,
files_struct * fsp ,
int flags ,
mode_t mode )
{
2020-10-01 16:44:15 +03:00
bool became_root = false ;
2020-05-20 22:32:30 +03:00
glfs_fd_t * glfd ;
glfs_fd_t * * p_tmp ;
START_PROFILE ( syscall_openat ) ;
/*
* Looks like glfs API doesn ' t have openat ( ) .
*/
2020-10-17 18:01:47 +03:00
SMB_ASSERT ( fsp_get_pathref_fd ( dirfsp ) = = AT_FDCWD ) ;
2020-05-20 22:32:30 +03:00
p_tmp = VFS_ADD_FSP_EXTENSION ( handle , fsp , glfs_fd_t * , NULL ) ;
if ( p_tmp = = NULL ) {
END_PROFILE ( syscall_openat ) ;
errno = ENOMEM ;
return - 1 ;
}
2020-10-01 16:44:15 +03:00
if ( fsp - > fsp_flags . is_pathref ) {
/*
* ceph doesn ' t support O_PATH so we have to fallback to
* become_root ( ) .
*/
become_root ( ) ;
became_root = true ;
}
2020-05-20 22:32:30 +03:00
if ( flags & O_DIRECTORY ) {
glfd = glfs_opendir ( handle - > data , smb_fname - > base_name ) ;
} else if ( flags & O_CREAT ) {
glfd = glfs_creat ( handle - > data , smb_fname - > base_name , flags ,
mode ) ;
} else {
glfd = glfs_open ( handle - > data , smb_fname - > base_name , flags ) ;
}
2020-10-01 16:44:15 +03:00
if ( became_root ) {
unbecome_root ( ) ;
}
fsp - > fsp_flags . have_proc_fds = false ;
2020-05-20 22:32:30 +03:00
if ( glfd = = NULL ) {
END_PROFILE ( syscall_openat ) ;
/* no extension destroy_fn, so no need to save errno */
VFS_REMOVE_FSP_EXTENSION ( handle , fsp ) ;
return - 1 ;
}
* p_tmp = glfd ;
END_PROFILE ( syscall_openat ) ;
/* An arbitrary value for error reporting, so you know its us. */
return 13371337 ;
}
2013-05-29 15:21:46 +04:00
static int vfs_gluster_close ( struct vfs_handle_struct * handle ,
files_struct * fsp )
{
2019-08-05 08:15:01 +03:00
int ret ;
glfs_fd_t * glfd = NULL ;
START_PROFILE ( syscall_close ) ;
glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
2018-10-10 18:32:25 +03:00
if ( glfd = = NULL ) {
2019-08-05 08:15:01 +03:00
END_PROFILE ( syscall_close ) ;
2018-10-10 18:32:25 +03:00
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return - 1 ;
}
2013-12-13 02:06:36 +04:00
VFS_REMOVE_FSP_EXTENSION ( handle , fsp ) ;
2019-08-05 08:15:01 +03:00
ret = glfs_close ( glfd ) ;
END_PROFILE ( syscall_close ) ;
return ret ;
2013-05-29 15:21:46 +04:00
}
static ssize_t vfs_gluster_pread ( struct vfs_handle_struct * handle ,
files_struct * fsp , void * data , size_t n ,
off_t offset )
{
2019-08-05 08:15:01 +03:00
ssize_t ret ;
glfs_fd_t * glfd = NULL ;
START_PROFILE_BYTES ( syscall_pread , n ) ;
glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
2018-10-10 18:32:25 +03:00
if ( glfd = = NULL ) {
2019-08-05 08:15:01 +03:00
END_PROFILE_BYTES ( syscall_pread ) ;
2018-10-10 18:32:25 +03:00
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return - 1 ;
}
2018-03-20 09:02:20 +03:00
# ifdef HAVE_GFAPI_VER_7_6
2019-08-05 08:15:01 +03:00
ret = glfs_pread ( glfd , data , n , offset , 0 , NULL ) ;
2018-03-20 09:02:20 +03:00
# else
2019-08-05 08:15:01 +03:00
ret = glfs_pread ( glfd , data , n , offset , 0 ) ;
2018-03-20 09:02:20 +03:00
# endif
2019-08-05 08:15:01 +03:00
END_PROFILE_BYTES ( syscall_pread ) ;
return ret ;
2013-05-29 15:21:46 +04:00
}
2019-07-24 12:45:33 +03:00
struct vfs_gluster_pread_state {
2014-12-11 05:05:10 +03:00
ssize_t ret ;
2019-07-24 12:45:33 +03:00
glfs_fd_t * fd ;
void * buf ;
size_t count ;
off_t offset ;
2016-02-26 12:54:01 +03:00
struct vfs_aio_state vfs_aio_state ;
2019-08-05 08:15:01 +03:00
SMBPROFILE_BYTES_ASYNC_STATE ( profile_bytes ) ;
2014-12-11 05:05:10 +03:00
} ;
2019-07-24 12:45:33 +03:00
static void vfs_gluster_pread_do ( void * private_data ) ;
static void vfs_gluster_pread_done ( struct tevent_req * subreq ) ;
static int vfs_gluster_pread_state_destructor ( struct vfs_gluster_pread_state * state ) ;
2015-11-18 19:09:06 +03:00
2019-07-24 12:45:33 +03:00
static struct tevent_req * vfs_gluster_pread_send ( struct vfs_handle_struct
* handle , TALLOC_CTX * mem_ctx ,
struct tevent_context * ev ,
files_struct * fsp ,
void * data , size_t n ,
off_t offset )
2014-12-11 05:05:10 +03:00
{
2019-07-24 12:45:33 +03:00
struct vfs_gluster_pread_state * state ;
struct tevent_req * req , * subreq ;
2014-12-11 05:05:10 +03:00
2019-07-24 12:45:33 +03:00
glfs_fd_t * glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
if ( glfd = = NULL ) {
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return NULL ;
}
2016-02-26 13:14:36 +03:00
2019-07-24 12:45:33 +03:00
req = tevent_req_create ( mem_ctx , & state , struct vfs_gluster_pread_state ) ;
if ( req = = NULL ) {
return NULL ;
2014-12-11 05:05:10 +03:00
}
2019-07-24 12:45:33 +03:00
state - > ret = - 1 ;
state - > fd = glfd ;
state - > buf = data ;
state - > count = n ;
state - > offset = offset ;
2019-08-05 08:15:01 +03:00
2019-07-24 12:45:33 +03:00
SMBPROFILE_BYTES_ASYNC_START ( syscall_asys_pread , profile_p ,
state - > profile_bytes , n ) ;
SMBPROFILE_BYTES_ASYNC_SET_IDLE ( state - > profile_bytes ) ;
2014-12-11 05:05:10 +03:00
2019-07-24 12:45:33 +03:00
subreq = pthreadpool_tevent_job_send (
state , ev , handle - > conn - > sconn - > pool ,
vfs_gluster_pread_do , state ) ;
if ( tevent_req_nomem ( subreq , req ) ) {
return tevent_req_post ( req , ev ) ;
2014-12-11 05:05:10 +03:00
}
2020-03-13 20:37:30 +03:00
tevent_req_set_callback ( subreq , vfs_gluster_pread_done , req ) ;
2019-07-24 12:45:33 +03:00
talloc_set_destructor ( state , vfs_gluster_pread_state_destructor ) ;
2015-01-20 07:08:17 +03:00
2019-07-24 12:45:33 +03:00
return req ;
2014-12-11 05:05:10 +03:00
}
2019-07-24 12:45:33 +03:00
static void vfs_gluster_pread_do ( void * private_data )
2014-12-11 05:05:10 +03:00
{
2019-07-24 12:45:33 +03:00
struct vfs_gluster_pread_state * state = talloc_get_type_abort (
private_data , struct vfs_gluster_pread_state ) ;
struct timespec start_time ;
struct timespec end_time ;
2015-01-20 07:08:17 +03:00
2019-07-24 12:45:33 +03:00
SMBPROFILE_BYTES_ASYNC_SET_BUSY ( state - > profile_bytes ) ;
2015-01-23 01:14:31 +03:00
2019-07-24 12:45:33 +03:00
PROFILE_TIMESTAMP ( & start_time ) ;
2015-11-18 19:09:06 +03:00
2019-07-24 12:45:33 +03:00
do {
# ifdef HAVE_GFAPI_VER_7_6
state - > ret = glfs_pread ( state - > fd , state - > buf , state - > count ,
state - > offset , 0 , NULL ) ;
# else
state - > ret = glfs_pread ( state - > fd , state - > buf , state - > count ,
state - > offset , 0 ) ;
# endif
} while ( ( state - > ret = = - 1 ) & & ( errno = = EINTR ) ) ;
2015-01-20 07:08:17 +03:00
2019-07-24 12:45:33 +03:00
if ( state - > ret = = - 1 ) {
state - > vfs_aio_state . error = errno ;
2015-11-18 19:09:06 +03:00
}
2019-07-24 12:45:33 +03:00
PROFILE_TIMESTAMP ( & end_time ) ;
2015-12-17 17:19:22 +03:00
2019-07-24 12:45:33 +03:00
state - > vfs_aio_state . duration = nsec_time_diff ( & end_time , & start_time ) ;
SMBPROFILE_BYTES_ASYNC_SET_IDLE ( state - > profile_bytes ) ;
2014-12-11 05:05:10 +03:00
}
2019-07-24 12:45:33 +03:00
static int vfs_gluster_pread_state_destructor ( struct vfs_gluster_pread_state * state )
2014-12-11 05:05:10 +03:00
{
2019-07-24 12:45:33 +03:00
return - 1 ;
}
2015-01-20 07:08:17 +03:00
2019-07-24 12:45:33 +03:00
static void vfs_gluster_pread_done ( struct tevent_req * subreq )
{
2020-03-13 20:37:30 +03:00
struct tevent_req * req = tevent_req_callback_data (
subreq , struct tevent_req ) ;
struct vfs_gluster_pread_state * state = tevent_req_data (
req , struct vfs_gluster_pread_state ) ;
2019-07-24 12:45:33 +03:00
int ret ;
ret = pthreadpool_tevent_job_recv ( subreq ) ;
TALLOC_FREE ( subreq ) ;
SMBPROFILE_BYTES_ASYNC_END ( state - > profile_bytes ) ;
talloc_set_destructor ( state , NULL ) ;
if ( ret ! = 0 ) {
if ( ret ! = EAGAIN ) {
tevent_req_error ( req , ret ) ;
return ;
}
2014-12-11 05:05:10 +03:00
/*
2019-07-24 12:45:33 +03:00
* If we get EAGAIN from pthreadpool_tevent_job_recv ( ) this
* means the lower level pthreadpool failed to create a new
* thread . Fallback to sync processing in that case to allow
* some progress for the client .
2014-12-11 05:05:10 +03:00
*/
2019-07-24 12:45:33 +03:00
vfs_gluster_pread_do ( state ) ;
2014-12-11 05:05:10 +03:00
}
2019-07-24 12:45:33 +03:00
tevent_req_done ( req ) ;
2014-12-11 05:05:10 +03:00
}
2019-07-24 12:45:33 +03:00
static ssize_t vfs_gluster_pread_recv ( struct tevent_req * req ,
struct vfs_aio_state * vfs_aio_state )
2013-05-29 15:21:46 +04:00
{
2019-07-24 12:45:33 +03:00
struct vfs_gluster_pread_state * state = tevent_req_data (
req , struct vfs_gluster_pread_state ) ;
2015-11-18 19:09:06 +03:00
2019-07-24 12:45:33 +03:00
if ( tevent_req_is_unix_error ( req , & vfs_aio_state - > error ) ) {
return - 1 ;
2014-12-11 05:05:10 +03:00
}
2019-07-24 12:45:33 +03:00
* vfs_aio_state = state - > vfs_aio_state ;
return state - > ret ;
}
2015-11-18 19:09:06 +03:00
2019-07-24 12:45:33 +03:00
struct vfs_gluster_pwrite_state {
ssize_t ret ;
glfs_fd_t * fd ;
const void * buf ;
size_t count ;
off_t offset ;
2015-11-18 19:09:06 +03:00
2019-07-24 12:45:33 +03:00
struct vfs_aio_state vfs_aio_state ;
SMBPROFILE_BYTES_ASYNC_STATE ( profile_bytes ) ;
} ;
2015-11-18 19:09:06 +03:00
2019-07-24 12:45:33 +03:00
static void vfs_gluster_pwrite_do ( void * private_data ) ;
static void vfs_gluster_pwrite_done ( struct tevent_req * subreq ) ;
static int vfs_gluster_pwrite_state_destructor ( struct vfs_gluster_pwrite_state * state ) ;
2015-11-18 19:09:06 +03:00
2019-07-24 12:45:33 +03:00
static struct tevent_req * vfs_gluster_pwrite_send ( struct vfs_handle_struct
2015-11-18 19:09:06 +03:00
* handle , TALLOC_CTX * mem_ctx ,
struct tevent_context * ev ,
files_struct * fsp ,
2019-07-24 12:45:33 +03:00
const void * data , size_t n ,
2015-11-18 19:09:06 +03:00
off_t offset )
{
2019-07-24 12:45:33 +03:00
struct tevent_req * req , * subreq ;
struct vfs_gluster_pwrite_state * state ;
2018-10-10 18:32:25 +03:00
2019-07-24 12:45:33 +03:00
glfs_fd_t * glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
2018-10-10 18:32:25 +03:00
if ( glfd = = NULL ) {
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return NULL ;
}
2015-11-18 19:09:06 +03:00
2019-07-24 12:45:33 +03:00
req = tevent_req_create ( mem_ctx , & state , struct vfs_gluster_pwrite_state ) ;
if ( req = = NULL ) {
2015-11-18 19:09:06 +03:00
return NULL ;
}
2019-07-24 12:45:33 +03:00
state - > ret = - 1 ;
state - > fd = glfd ;
state - > buf = data ;
state - > count = n ;
state - > offset = offset ;
2015-11-18 19:09:06 +03:00
2019-07-24 12:45:33 +03:00
SMBPROFILE_BYTES_ASYNC_START ( syscall_asys_pwrite , profile_p ,
2019-08-05 08:15:01 +03:00
state - > profile_bytes , n ) ;
2019-07-24 12:45:33 +03:00
SMBPROFILE_BYTES_ASYNC_SET_IDLE ( state - > profile_bytes ) ;
subreq = pthreadpool_tevent_job_send (
state , ev , handle - > conn - > sconn - > pool ,
vfs_gluster_pwrite_do , state ) ;
if ( tevent_req_nomem ( subreq , req ) ) {
2014-12-11 05:05:10 +03:00
return tevent_req_post ( req , ev ) ;
}
2020-03-13 20:36:39 +03:00
tevent_req_set_callback ( subreq , vfs_gluster_pwrite_done , req ) ;
2019-07-24 12:45:33 +03:00
talloc_set_destructor ( state , vfs_gluster_pwrite_state_destructor ) ;
2014-12-11 05:05:10 +03:00
return req ;
2013-05-29 15:21:46 +04:00
}
2019-07-24 12:45:33 +03:00
static void vfs_gluster_pwrite_do ( void * private_data )
2013-05-29 15:21:46 +04:00
{
2019-07-24 12:45:33 +03:00
struct vfs_gluster_pwrite_state * state = talloc_get_type_abort (
private_data , struct vfs_gluster_pwrite_state ) ;
struct timespec start_time ;
struct timespec end_time ;
2014-12-11 05:05:10 +03:00
2019-07-24 12:45:33 +03:00
SMBPROFILE_BYTES_ASYNC_SET_BUSY ( state - > profile_bytes ) ;
2015-11-18 19:09:06 +03:00
2019-07-24 12:45:33 +03:00
PROFILE_TIMESTAMP ( & start_time ) ;
2015-11-18 19:09:06 +03:00
2019-07-24 12:45:33 +03:00
do {
# ifdef HAVE_GFAPI_VER_7_6
state - > ret = glfs_pwrite ( state - > fd , state - > buf , state - > count ,
state - > offset , 0 , NULL , NULL ) ;
# else
state - > ret = glfs_pwrite ( state - > fd , state - > buf , state - > count ,
state - > offset , 0 ) ;
# endif
} while ( ( state - > ret = = - 1 ) & & ( errno = = EINTR ) ) ;
2015-11-18 19:09:06 +03:00
2019-07-24 12:45:33 +03:00
if ( state - > ret = = - 1 ) {
state - > vfs_aio_state . error = errno ;
2014-12-11 05:05:10 +03:00
}
2015-12-11 15:37:53 +03:00
2019-07-24 12:45:33 +03:00
PROFILE_TIMESTAMP ( & end_time ) ;
2018-05-23 09:53:47 +03:00
2019-07-24 12:45:33 +03:00
state - > vfs_aio_state . duration = nsec_time_diff ( & end_time , & start_time ) ;
2015-12-11 15:37:53 +03:00
2019-07-24 12:45:33 +03:00
SMBPROFILE_BYTES_ASYNC_SET_IDLE ( state - > profile_bytes ) ;
2013-05-29 15:21:46 +04:00
}
2019-07-24 12:45:33 +03:00
static int vfs_gluster_pwrite_state_destructor ( struct vfs_gluster_pwrite_state * state )
2013-05-29 15:21:46 +04:00
{
2019-07-24 12:45:33 +03:00
return - 1 ;
}
2015-11-18 19:09:06 +03:00
2019-07-24 12:45:33 +03:00
static void vfs_gluster_pwrite_done ( struct tevent_req * subreq )
{
2020-03-13 20:36:39 +03:00
struct tevent_req * req = tevent_req_callback_data (
subreq , struct tevent_req ) ;
struct vfs_gluster_pwrite_state * state = tevent_req_data (
req , struct vfs_gluster_pwrite_state ) ;
2019-07-24 12:45:33 +03:00
int ret ;
2015-11-18 19:09:06 +03:00
2019-07-24 12:45:33 +03:00
ret = pthreadpool_tevent_job_recv ( subreq ) ;
TALLOC_FREE ( subreq ) ;
SMBPROFILE_BYTES_ASYNC_END ( state - > profile_bytes ) ;
talloc_set_destructor ( state , NULL ) ;
if ( ret ! = 0 ) {
if ( ret ! = EAGAIN ) {
tevent_req_error ( req , ret ) ;
return ;
}
/*
* If we get EAGAIN from pthreadpool_tevent_job_recv ( ) this
* means the lower level pthreadpool failed to create a new
* thread . Fallback to sync processing in that case to allow
* some progress for the client .
*/
vfs_gluster_pwrite_do ( state ) ;
2015-11-18 19:09:06 +03:00
}
2019-07-24 12:45:33 +03:00
tevent_req_done ( req ) ;
}
static ssize_t vfs_gluster_pwrite_recv ( struct tevent_req * req ,
struct vfs_aio_state * vfs_aio_state )
{
struct vfs_gluster_pwrite_state * state = tevent_req_data (
req , struct vfs_gluster_pwrite_state ) ;
2014-12-11 05:05:10 +03:00
2016-02-26 12:54:01 +03:00
if ( tevent_req_is_unix_error ( req , & vfs_aio_state - > error ) ) {
2014-12-11 05:05:10 +03:00
return - 1 ;
}
2015-11-18 19:09:06 +03:00
2019-07-24 12:45:33 +03:00
* vfs_aio_state = state - > vfs_aio_state ;
2015-11-18 19:09:06 +03:00
2019-07-24 12:45:33 +03:00
return state - > ret ;
2013-05-29 15:21:46 +04:00
}
2015-12-11 14:27:17 +03:00
static ssize_t vfs_gluster_pwrite ( struct vfs_handle_struct * handle ,
files_struct * fsp , const void * data ,
size_t n , off_t offset )
{
2019-08-05 08:15:01 +03:00
ssize_t ret ;
glfs_fd_t * glfd = NULL ;
START_PROFILE_BYTES ( syscall_pwrite , n ) ;
glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
2018-10-10 18:32:25 +03:00
if ( glfd = = NULL ) {
2019-08-05 08:15:01 +03:00
END_PROFILE_BYTES ( syscall_pwrite ) ;
2018-10-10 18:32:25 +03:00
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return - 1 ;
}
2018-03-20 09:02:20 +03:00
# ifdef HAVE_GFAPI_VER_7_6
2019-08-05 08:15:01 +03:00
ret = glfs_pwrite ( glfd , data , n , offset , 0 , NULL , NULL ) ;
2018-03-20 09:02:20 +03:00
# else
2019-08-05 08:15:01 +03:00
ret = glfs_pwrite ( glfd , data , n , offset , 0 ) ;
2018-03-20 09:02:20 +03:00
# endif
2019-08-05 08:15:01 +03:00
END_PROFILE_BYTES ( syscall_pwrite ) ;
return ret ;
2015-12-11 14:27:17 +03:00
}
2013-05-29 15:21:46 +04:00
static off_t vfs_gluster_lseek ( struct vfs_handle_struct * handle ,
files_struct * fsp , off_t offset , int whence )
{
2019-08-05 08:15:01 +03:00
off_t ret = 0 ;
glfs_fd_t * glfd = NULL ;
START_PROFILE ( syscall_lseek ) ;
glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
2018-10-10 18:32:25 +03:00
if ( glfd = = NULL ) {
2019-08-05 08:15:01 +03:00
END_PROFILE ( syscall_lseek ) ;
2018-10-10 18:32:25 +03:00
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return - 1 ;
}
2019-08-05 08:15:01 +03:00
ret = glfs_lseek ( glfd , offset , whence ) ;
END_PROFILE ( syscall_lseek ) ;
return ret ;
2013-05-29 15:21:46 +04:00
}
static ssize_t vfs_gluster_sendfile ( struct vfs_handle_struct * handle , int tofd ,
files_struct * fromfsp ,
const DATA_BLOB * hdr ,
off_t offset , size_t n )
{
errno = ENOTSUP ;
return - 1 ;
}
static ssize_t vfs_gluster_recvfile ( struct vfs_handle_struct * handle ,
int fromfd , files_struct * tofsp ,
off_t offset , size_t n )
{
errno = ENOTSUP ;
return - 1 ;
}
2019-08-10 01:02:35 +03:00
static int vfs_gluster_renameat ( struct vfs_handle_struct * handle ,
files_struct * srcfsp ,
const struct smb_filename * smb_fname_src ,
files_struct * dstfsp ,
const struct smb_filename * smb_fname_dst )
{
2019-08-05 08:15:01 +03:00
int ret ;
START_PROFILE ( syscall_renameat ) ;
ret = glfs_rename ( handle - > data , smb_fname_src - > base_name ,
smb_fname_dst - > base_name ) ;
END_PROFILE ( syscall_renameat ) ;
return ret ;
2019-08-10 01:02:35 +03:00
}
2019-07-24 12:45:33 +03:00
struct vfs_gluster_fsync_state {
ssize_t ret ;
glfs_fd_t * fd ;
struct vfs_aio_state vfs_aio_state ;
SMBPROFILE_BYTES_ASYNC_STATE ( profile_bytes ) ;
} ;
static void vfs_gluster_fsync_do ( void * private_data ) ;
static void vfs_gluster_fsync_done ( struct tevent_req * subreq ) ;
static int vfs_gluster_fsync_state_destructor ( struct vfs_gluster_fsync_state * state ) ;
2013-05-29 15:21:46 +04:00
static struct tevent_req * vfs_gluster_fsync_send ( struct vfs_handle_struct
* handle , TALLOC_CTX * mem_ctx ,
struct tevent_context * ev ,
files_struct * fsp )
{
2019-07-24 12:45:33 +03:00
struct tevent_req * req , * subreq ;
struct vfs_gluster_fsync_state * state ;
2018-10-10 18:32:25 +03:00
2019-07-24 12:45:33 +03:00
glfs_fd_t * glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
2018-10-10 18:32:25 +03:00
if ( glfd = = NULL ) {
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return NULL ;
}
2014-12-11 05:05:10 +03:00
2019-07-24 12:45:33 +03:00
req = tevent_req_create ( mem_ctx , & state , struct vfs_gluster_fsync_state ) ;
if ( req = = NULL ) {
2014-12-11 05:05:10 +03:00
return NULL ;
}
2015-11-18 19:09:06 +03:00
2019-07-24 12:45:33 +03:00
state - > ret = - 1 ;
state - > fd = glfd ;
2015-11-18 19:09:06 +03:00
2019-07-24 12:45:33 +03:00
SMBPROFILE_BYTES_ASYNC_START ( syscall_asys_fsync , profile_p ,
state - > profile_bytes , 0 ) ;
SMBPROFILE_BYTES_ASYNC_SET_IDLE ( state - > profile_bytes ) ;
subreq = pthreadpool_tevent_job_send (
state , ev , handle - > conn - > sconn - > pool , vfs_gluster_fsync_do , state ) ;
if ( tevent_req_nomem ( subreq , req ) ) {
2014-12-11 05:05:10 +03:00
return tevent_req_post ( req , ev ) ;
}
2020-03-13 20:35:44 +03:00
tevent_req_set_callback ( subreq , vfs_gluster_fsync_done , req ) ;
2016-02-26 13:14:36 +03:00
2019-07-24 12:45:33 +03:00
talloc_set_destructor ( state , vfs_gluster_fsync_state_destructor ) ;
2018-05-23 09:53:47 +03:00
2014-12-11 05:05:10 +03:00
return req ;
2013-05-29 15:21:46 +04:00
}
2019-07-24 12:45:33 +03:00
static void vfs_gluster_fsync_do ( void * private_data )
{
struct vfs_gluster_fsync_state * state = talloc_get_type_abort (
private_data , struct vfs_gluster_fsync_state ) ;
struct timespec start_time ;
struct timespec end_time ;
SMBPROFILE_BYTES_ASYNC_SET_BUSY ( state - > profile_bytes ) ;
PROFILE_TIMESTAMP ( & start_time ) ;
do {
# ifdef HAVE_GFAPI_VER_7_6
state - > ret = glfs_fsync ( state - > fd , NULL , NULL ) ;
# else
state - > ret = glfs_fsync ( state - > fd ) ;
# endif
} while ( ( state - > ret = = - 1 ) & & ( errno = = EINTR ) ) ;
if ( state - > ret = = - 1 ) {
state - > vfs_aio_state . error = errno ;
}
PROFILE_TIMESTAMP ( & end_time ) ;
state - > vfs_aio_state . duration = nsec_time_diff ( & end_time , & start_time ) ;
SMBPROFILE_BYTES_ASYNC_SET_IDLE ( state - > profile_bytes ) ;
}
static int vfs_gluster_fsync_state_destructor ( struct vfs_gluster_fsync_state * state )
{
return - 1 ;
}
static void vfs_gluster_fsync_done ( struct tevent_req * subreq )
{
2020-03-13 20:35:44 +03:00
struct tevent_req * req = tevent_req_callback_data (
subreq , struct tevent_req ) ;
struct vfs_gluster_fsync_state * state = tevent_req_data (
req , struct vfs_gluster_fsync_state ) ;
2019-07-24 12:45:33 +03:00
int ret ;
ret = pthreadpool_tevent_job_recv ( subreq ) ;
TALLOC_FREE ( subreq ) ;
SMBPROFILE_BYTES_ASYNC_END ( state - > profile_bytes ) ;
talloc_set_destructor ( state , NULL ) ;
if ( ret ! = 0 ) {
if ( ret ! = EAGAIN ) {
tevent_req_error ( req , ret ) ;
return ;
}
/*
* If we get EAGAIN from pthreadpool_tevent_job_recv ( ) this
* means the lower level pthreadpool failed to create a new
* thread . Fallback to sync processing in that case to allow
* some progress for the client .
*/
vfs_gluster_fsync_do ( state ) ;
}
tevent_req_done ( req ) ;
}
2016-02-26 12:54:01 +03:00
static int vfs_gluster_fsync_recv ( struct tevent_req * req ,
struct vfs_aio_state * vfs_aio_state )
2013-05-29 15:21:46 +04:00
{
2019-07-24 12:45:33 +03:00
struct vfs_gluster_fsync_state * state = tevent_req_data (
req , struct vfs_gluster_fsync_state ) ;
if ( tevent_req_is_unix_error ( req , & vfs_aio_state - > error ) ) {
return - 1 ;
}
* vfs_aio_state = state - > vfs_aio_state ;
return state - > ret ;
2013-05-29 15:21:46 +04:00
}
static int vfs_gluster_stat ( struct vfs_handle_struct * handle ,
struct smb_filename * smb_fname )
{
struct stat st ;
int ret ;
2019-08-05 08:15:01 +03:00
START_PROFILE ( syscall_stat ) ;
2013-05-29 15:21:46 +04:00
ret = glfs_stat ( handle - > data , smb_fname - > base_name , & st ) ;
if ( ret = = 0 ) {
smb_stat_ex_from_stat ( & smb_fname - > st , & st ) ;
}
if ( ret < 0 & & errno ! = ENOENT ) {
DEBUG ( 0 , ( " glfs_stat(%s) failed: %s \n " ,
smb_fname - > base_name , strerror ( errno ) ) ) ;
}
2019-08-05 08:15:01 +03:00
END_PROFILE ( syscall_stat ) ;
2013-05-29 15:21:46 +04:00
return ret ;
}
static int vfs_gluster_fstat ( struct vfs_handle_struct * handle ,
files_struct * fsp , SMB_STRUCT_STAT * sbuf )
{
struct stat st ;
int ret ;
2019-08-05 08:15:01 +03:00
glfs_fd_t * glfd = NULL ;
START_PROFILE ( syscall_fstat ) ;
2013-05-29 15:21:46 +04:00
2019-08-05 08:15:01 +03:00
glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
2018-10-10 18:32:25 +03:00
if ( glfd = = NULL ) {
2019-08-05 08:15:01 +03:00
END_PROFILE ( syscall_fstat ) ;
2018-10-10 18:32:25 +03:00
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return - 1 ;
}
ret = glfs_fstat ( glfd , & st ) ;
2013-05-29 15:21:46 +04:00
if ( ret = = 0 ) {
smb_stat_ex_from_stat ( sbuf , & st ) ;
}
if ( ret < 0 ) {
DEBUG ( 0 , ( " glfs_fstat(%d) failed: %s \n " ,
2020-09-26 22:52:52 +03:00
fsp_get_io_fd ( fsp ) , strerror ( errno ) ) ) ;
2013-05-29 15:21:46 +04:00
}
2019-08-05 08:15:01 +03:00
END_PROFILE ( syscall_fstat ) ;
2013-05-29 15:21:46 +04:00
return ret ;
}
static int vfs_gluster_lstat ( struct vfs_handle_struct * handle ,
struct smb_filename * smb_fname )
{
struct stat st ;
int ret ;
2019-08-05 08:15:01 +03:00
START_PROFILE ( syscall_lstat ) ;
2013-05-29 15:21:46 +04:00
ret = glfs_lstat ( handle - > data , smb_fname - > base_name , & st ) ;
if ( ret = = 0 ) {
smb_stat_ex_from_stat ( & smb_fname - > st , & st ) ;
}
if ( ret < 0 & & errno ! = ENOENT ) {
DEBUG ( 0 , ( " glfs_lstat(%s) failed: %s \n " ,
smb_fname - > base_name , strerror ( errno ) ) ) ;
}
2019-08-05 08:15:01 +03:00
END_PROFILE ( syscall_lstat ) ;
2013-05-29 15:21:46 +04:00
return ret ;
}
static uint64_t vfs_gluster_get_alloc_size ( struct vfs_handle_struct * handle ,
files_struct * fsp ,
const SMB_STRUCT_STAT * sbuf )
{
2019-08-05 08:15:01 +03:00
uint64_t ret ;
START_PROFILE ( syscall_get_alloc_size ) ;
ret = sbuf - > st_ex_blocks * 512 ;
END_PROFILE ( syscall_get_alloc_size ) ;
return ret ;
2013-05-29 15:21:46 +04:00
}
2019-09-12 21:04:18 +03:00
static int vfs_gluster_unlinkat ( struct vfs_handle_struct * handle ,
struct files_struct * dirfsp ,
const struct smb_filename * smb_fname ,
int flags )
{
int ret ;
START_PROFILE ( syscall_unlinkat ) ;
SMB_ASSERT ( dirfsp = = dirfsp - > conn - > cwd_fsp ) ;
if ( flags & AT_REMOVEDIR ) {
ret = glfs_rmdir ( handle - > data , smb_fname - > base_name ) ;
} else {
ret = glfs_unlink ( handle - > data , smb_fname - > base_name ) ;
}
END_PROFILE ( syscall_unlinkat ) ;
return ret ;
}
2013-05-29 15:21:46 +04:00
static int vfs_gluster_chmod ( struct vfs_handle_struct * handle ,
2016-03-02 03:20:25 +03:00
const struct smb_filename * smb_fname ,
mode_t mode )
2013-05-29 15:21:46 +04:00
{
2019-08-05 08:15:01 +03:00
int ret ;
START_PROFILE ( syscall_chmod ) ;
ret = glfs_chmod ( handle - > data , smb_fname - > base_name , mode ) ;
END_PROFILE ( syscall_chmod ) ;
return ret ;
2013-05-29 15:21:46 +04:00
}
static int vfs_gluster_fchmod ( struct vfs_handle_struct * handle ,
files_struct * fsp , mode_t mode )
{
2019-08-05 08:15:01 +03:00
int ret ;
glfs_fd_t * glfd = NULL ;
2018-10-10 18:32:25 +03:00
2019-08-05 08:15:01 +03:00
START_PROFILE ( syscall_fchmod ) ;
glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
2018-10-10 18:32:25 +03:00
if ( glfd = = NULL ) {
2019-08-05 08:15:01 +03:00
END_PROFILE ( syscall_fchmod ) ;
2018-10-10 18:32:25 +03:00
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return - 1 ;
}
2019-08-05 08:15:01 +03:00
ret = glfs_fchmod ( glfd , mode ) ;
END_PROFILE ( syscall_fchmod ) ;
return ret ;
2013-05-29 15:21:46 +04:00
}
static int vfs_gluster_fchown ( struct vfs_handle_struct * handle ,
files_struct * fsp , uid_t uid , gid_t gid )
{
2019-08-05 08:15:01 +03:00
int ret ;
glfs_fd_t * glfd = NULL ;
START_PROFILE ( syscall_fchown ) ;
glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
2018-10-10 18:32:25 +03:00
if ( glfd = = NULL ) {
2019-08-05 08:15:01 +03:00
END_PROFILE ( syscall_fchown ) ;
2018-10-10 18:32:25 +03:00
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return - 1 ;
}
2019-08-05 08:15:01 +03:00
ret = glfs_fchown ( glfd , uid , gid ) ;
END_PROFILE ( syscall_fchown ) ;
return ret ;
2013-05-29 15:21:46 +04:00
}
static int vfs_gluster_lchown ( struct vfs_handle_struct * handle ,
2016-03-04 01:34:57 +03:00
const struct smb_filename * smb_fname ,
uid_t uid ,
gid_t gid )
2013-05-29 15:21:46 +04:00
{
2019-08-05 08:15:01 +03:00
int ret ;
START_PROFILE ( syscall_lchown ) ;
ret = glfs_lchown ( handle - > data , smb_fname - > base_name , uid , gid ) ;
END_PROFILE ( syscall_lchown ) ;
return ret ;
2013-05-29 15:21:46 +04:00
}
2017-06-29 21:29:33 +03:00
static int vfs_gluster_chdir ( struct vfs_handle_struct * handle ,
const struct smb_filename * smb_fname )
2013-05-29 15:21:46 +04:00
{
2019-08-05 08:15:01 +03:00
int ret ;
START_PROFILE ( syscall_chdir ) ;
ret = glfs_chdir ( handle - > data , smb_fname - > base_name ) ;
END_PROFILE ( syscall_chdir ) ;
return ret ;
2013-05-29 15:21:46 +04:00
}
2017-06-30 00:32:47 +03:00
static struct smb_filename * vfs_gluster_getwd ( struct vfs_handle_struct * handle ,
TALLOC_CTX * ctx )
2013-05-29 15:21:46 +04:00
{
char * cwd ;
char * ret ;
2017-06-30 00:32:47 +03:00
struct smb_filename * smb_fname = NULL ;
2013-05-29 15:21:46 +04:00
2019-08-05 08:15:01 +03:00
START_PROFILE ( syscall_getwd ) ;
2013-11-04 15:32:04 +04:00
cwd = SMB_CALLOC_ARRAY ( char , PATH_MAX ) ;
2013-05-29 15:21:46 +04:00
if ( cwd = = NULL ) {
2019-08-05 08:15:01 +03:00
END_PROFILE ( syscall_getwd ) ;
2013-05-29 15:21:46 +04:00
return NULL ;
}
2013-11-04 15:32:04 +04:00
ret = glfs_getcwd ( handle - > data , cwd , PATH_MAX - 1 ) ;
2019-08-05 08:15:01 +03:00
END_PROFILE ( syscall_getwd ) ;
2017-10-25 20:39:34 +03:00
if ( ret = = NULL ) {
2017-10-26 09:05:20 +03:00
SAFE_FREE ( cwd ) ;
2017-10-25 20:39:34 +03:00
return NULL ;
2013-05-29 15:21:46 +04:00
}
2017-06-30 00:32:47 +03:00
smb_fname = synthetic_smb_fname ( ctx ,
ret ,
NULL ,
NULL ,
2020-04-30 12:48:32 +03:00
0 ,
2017-06-30 00:32:47 +03:00
0 ) ;
2017-10-26 09:05:20 +03:00
SAFE_FREE ( cwd ) ;
2017-06-30 00:32:47 +03:00
return smb_fname ;
2013-05-29 15:21:46 +04:00
}
static int vfs_gluster_ntimes ( struct vfs_handle_struct * handle ,
const struct smb_filename * smb_fname ,
struct smb_file_time * ft )
{
2019-08-05 08:15:01 +03:00
int ret = - 1 ;
2013-05-29 15:21:46 +04:00
struct timespec times [ 2 ] ;
2019-08-05 08:15:01 +03:00
START_PROFILE ( syscall_ntimes ) ;
2019-12-02 18:30:50 +03:00
if ( is_omit_timespec ( & ft - > atime ) ) {
2014-01-10 19:26:18 +04:00
times [ 0 ] . tv_sec = smb_fname - > st . st_ex_atime . tv_sec ;
times [ 0 ] . tv_nsec = smb_fname - > st . st_ex_atime . tv_nsec ;
} else {
times [ 0 ] . tv_sec = ft - > atime . tv_sec ;
times [ 0 ] . tv_nsec = ft - > atime . tv_nsec ;
}
2019-12-02 18:30:50 +03:00
if ( is_omit_timespec ( & ft - > mtime ) ) {
2014-01-10 19:26:18 +04:00
times [ 1 ] . tv_sec = smb_fname - > st . st_ex_mtime . tv_sec ;
times [ 1 ] . tv_nsec = smb_fname - > st . st_ex_mtime . tv_nsec ;
} else {
times [ 1 ] . tv_sec = ft - > mtime . tv_sec ;
times [ 1 ] . tv_nsec = ft - > mtime . tv_nsec ;
}
if ( ( timespec_compare ( & times [ 0 ] ,
& smb_fname - > st . st_ex_atime ) = = 0 ) & &
( timespec_compare ( & times [ 1 ] ,
& smb_fname - > st . st_ex_mtime ) = = 0 ) ) {
2019-08-05 08:15:01 +03:00
END_PROFILE ( syscall_ntimes ) ;
2014-01-10 19:26:18 +04:00
return 0 ;
}
2013-05-29 15:21:46 +04:00
2019-08-05 08:15:01 +03:00
ret = glfs_utimens ( handle - > data , smb_fname - > base_name , times ) ;
END_PROFILE ( syscall_ntimes ) ;
return ret ;
2013-05-29 15:21:46 +04:00
}
static int vfs_gluster_ftruncate ( struct vfs_handle_struct * handle ,
files_struct * fsp , off_t offset )
{
2019-08-05 08:15:01 +03:00
int ret ;
glfs_fd_t * glfd = NULL ;
START_PROFILE ( syscall_ftruncate ) ;
glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
2018-10-10 18:32:25 +03:00
if ( glfd = = NULL ) {
2019-08-05 08:15:01 +03:00
END_PROFILE ( syscall_ftruncate ) ;
2018-10-10 18:32:25 +03:00
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return - 1 ;
}
2018-03-20 09:02:20 +03:00
# ifdef HAVE_GFAPI_VER_7_6
2019-08-05 08:15:01 +03:00
ret = glfs_ftruncate ( glfd , offset , NULL , NULL ) ;
2018-03-20 09:02:20 +03:00
# else
2019-08-05 08:15:01 +03:00
ret = glfs_ftruncate ( glfd , offset ) ;
2018-03-20 09:02:20 +03:00
# endif
2019-08-05 08:15:01 +03:00
END_PROFILE ( syscall_ftruncate ) ;
return ret ;
2013-05-29 15:21:46 +04:00
}
static int vfs_gluster_fallocate ( struct vfs_handle_struct * handle ,
struct files_struct * fsp ,
2015-02-09 20:21:59 +03:00
uint32_t mode ,
2013-05-29 15:21:46 +04:00
off_t offset , off_t len )
{
2019-08-05 08:15:01 +03:00
int ret ;
2017-11-14 13:21:44 +03:00
# ifdef HAVE_GFAPI_VER_6
2019-08-05 08:15:01 +03:00
glfs_fd_t * glfd = NULL ;
2017-11-14 13:21:44 +03:00
int keep_size , punch_hole ;
2019-08-05 08:15:01 +03:00
START_PROFILE ( syscall_fallocate ) ;
glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
2018-10-10 18:32:25 +03:00
if ( glfd = = NULL ) {
2019-08-05 08:15:01 +03:00
END_PROFILE ( syscall_fallocate ) ;
2018-10-10 18:32:25 +03:00
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return - 1 ;
}
2017-11-14 13:21:44 +03:00
keep_size = mode & VFS_FALLOCATE_FL_KEEP_SIZE ;
punch_hole = mode & VFS_FALLOCATE_FL_PUNCH_HOLE ;
mode & = ~ ( VFS_FALLOCATE_FL_KEEP_SIZE | VFS_FALLOCATE_FL_PUNCH_HOLE ) ;
if ( mode ! = 0 ) {
2019-08-05 08:15:01 +03:00
END_PROFILE ( syscall_fallocate ) ;
2017-11-14 13:21:44 +03:00
errno = ENOTSUP ;
return - 1 ;
}
if ( punch_hole ) {
2019-08-05 08:15:01 +03:00
ret = glfs_discard ( glfd , offset , len ) ;
2019-11-19 16:29:52 +03:00
if ( ret ! = 0 ) {
DBG_DEBUG ( " glfs_discard failed: %s \n " ,
strerror ( errno ) ) ;
}
2017-11-14 13:21:44 +03:00
}
2019-08-05 08:15:01 +03:00
ret = glfs_fallocate ( glfd , keep_size , offset , len ) ;
END_PROFILE ( syscall_fallocate ) ;
2017-11-14 13:21:44 +03:00
# else
2013-05-29 15:21:46 +04:00
errno = ENOTSUP ;
2019-08-05 08:15:01 +03:00
ret = - 1 ;
2017-11-14 13:21:44 +03:00
# endif
2019-08-05 08:15:01 +03:00
return ret ;
2013-05-29 15:21:46 +04:00
}
2017-06-30 21:32:59 +03:00
static struct smb_filename * vfs_gluster_realpath ( struct vfs_handle_struct * handle ,
TALLOC_CTX * ctx ,
const struct smb_filename * smb_fname )
2013-05-29 15:21:46 +04:00
{
2016-10-21 01:15:06 +03:00
char * result = NULL ;
2017-06-30 21:32:59 +03:00
struct smb_filename * result_fname = NULL ;
2019-08-05 08:15:01 +03:00
char * resolved_path = NULL ;
2016-10-21 01:15:06 +03:00
2019-08-05 08:15:01 +03:00
START_PROFILE ( syscall_realpath ) ;
resolved_path = SMB_MALLOC_ARRAY ( char , PATH_MAX + 1 ) ;
2016-10-21 01:15:06 +03:00
if ( resolved_path = = NULL ) {
2019-08-05 08:15:01 +03:00
END_PROFILE ( syscall_realpath ) ;
2016-10-21 01:15:06 +03:00
errno = ENOMEM ;
return NULL ;
}
2017-06-30 21:32:59 +03:00
result = glfs_realpath ( handle - > data ,
smb_fname - > base_name ,
resolved_path ) ;
if ( result ! = NULL ) {
2020-05-03 16:04:36 +03:00
result_fname = synthetic_smb_fname ( ctx ,
result ,
NULL ,
NULL ,
2020-04-30 12:48:32 +03:00
0 ,
2020-05-03 16:04:36 +03:00
0 ) ;
2016-10-21 01:15:06 +03:00
}
2017-06-30 21:32:59 +03:00
SAFE_FREE ( resolved_path ) ;
2019-08-05 08:15:01 +03:00
END_PROFILE ( syscall_realpath ) ;
2017-06-30 21:32:59 +03:00
return result_fname ;
2013-05-29 15:21:46 +04:00
}
static bool vfs_gluster_lock ( struct vfs_handle_struct * handle ,
files_struct * fsp , int op , off_t offset ,
off_t count , int type )
{
struct flock flock = { 0 , } ;
int ret ;
2019-08-05 08:15:01 +03:00
glfs_fd_t * glfd = NULL ;
bool ok = false ;
START_PROFILE ( syscall_fcntl_lock ) ;
glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
2018-10-10 18:32:25 +03:00
if ( glfd = = NULL ) {
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
2019-08-05 08:15:01 +03:00
ok = false ;
goto out ;
2018-10-10 18:32:25 +03:00
}
2013-05-29 15:21:46 +04:00
flock . l_type = type ;
flock . l_whence = SEEK_SET ;
flock . l_start = offset ;
flock . l_len = count ;
flock . l_pid = 0 ;
2018-10-10 18:32:25 +03:00
ret = glfs_posix_lock ( glfd , op , & flock ) ;
2013-05-29 15:21:46 +04:00
if ( op = = F_GETLK ) {
/* lock query, true if someone else has locked */
if ( ( ret ! = - 1 ) & &
( flock . l_type ! = F_UNLCK ) & &
2019-08-05 08:15:01 +03:00
( flock . l_pid ! = 0 ) & & ( flock . l_pid ! = getpid ( ) ) ) {
ok = true ;
goto out ;
}
2013-05-29 15:21:46 +04:00
/* not me */
2019-08-05 08:15:01 +03:00
ok = false ;
goto out ;
2013-05-29 15:21:46 +04:00
}
if ( ret = = - 1 ) {
2019-08-05 08:15:01 +03:00
ok = false ;
goto out ;
2013-05-29 15:21:46 +04:00
}
2019-08-05 08:15:01 +03:00
ok = true ;
out :
END_PROFILE ( syscall_fcntl_lock ) ;
return ok ;
2013-05-29 15:21:46 +04:00
}
static int vfs_gluster_kernel_flock ( struct vfs_handle_struct * handle ,
2020-01-03 22:41:00 +03:00
files_struct * fsp , uint32_t share_access ,
2013-05-29 15:21:46 +04:00
uint32_t access_mask )
{
errno = ENOSYS ;
return - 1 ;
}
2020-01-21 03:14:38 +03:00
static int vfs_gluster_fcntl ( vfs_handle_struct * handle ,
files_struct * fsp , int cmd , va_list cmd_arg )
{
/*
* SMB_VFS_FCNTL ( ) is currently only called by vfs_set_blocking ( ) to
* clear O_NONBLOCK , etc for LOCK_MAND and FIFOs . Ignore it .
*/
if ( cmd = = F_GETFL ) {
return 0 ;
} else if ( cmd = = F_SETFL ) {
va_list dup_cmd_arg ;
int opt ;
va_copy ( dup_cmd_arg , cmd_arg ) ;
opt = va_arg ( dup_cmd_arg , int ) ;
va_end ( dup_cmd_arg ) ;
if ( opt = = 0 ) {
return 0 ;
}
DBG_ERR ( " unexpected fcntl SETFL(%d) \n " , opt ) ;
goto err_out ;
}
DBG_ERR ( " unexpected fcntl: %d \n " , cmd ) ;
err_out :
errno = EINVAL ;
return - 1 ;
}
2013-05-29 15:21:46 +04:00
static int vfs_gluster_linux_setlease ( struct vfs_handle_struct * handle ,
files_struct * fsp , int leasetype )
{
errno = ENOSYS ;
return - 1 ;
}
static bool vfs_gluster_getlock ( struct vfs_handle_struct * handle ,
files_struct * fsp , off_t * poffset ,
off_t * pcount , int * ptype , pid_t * ppid )
{
struct flock flock = { 0 , } ;
int ret ;
2019-08-05 08:15:01 +03:00
glfs_fd_t * glfd = NULL ;
START_PROFILE ( syscall_fcntl_getlock ) ;
glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
2018-10-10 18:32:25 +03:00
if ( glfd = = NULL ) {
2019-08-05 08:15:01 +03:00
END_PROFILE ( syscall_fcntl_getlock ) ;
2018-10-10 18:32:25 +03:00
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return false ;
}
2013-05-29 15:21:46 +04:00
flock . l_type = * ptype ;
flock . l_whence = SEEK_SET ;
flock . l_start = * poffset ;
flock . l_len = * pcount ;
flock . l_pid = 0 ;
2018-10-10 18:32:25 +03:00
ret = glfs_posix_lock ( glfd , F_GETLK , & flock ) ;
2013-05-29 15:21:46 +04:00
if ( ret = = - 1 ) {
2019-08-05 08:15:01 +03:00
END_PROFILE ( syscall_fcntl_getlock ) ;
2013-05-29 15:21:46 +04:00
return false ;
}
* ptype = flock . l_type ;
* poffset = flock . l_start ;
* pcount = flock . l_len ;
* ppid = flock . l_pid ;
2019-08-05 08:15:01 +03:00
END_PROFILE ( syscall_fcntl_getlock ) ;
2013-05-29 15:21:46 +04:00
return true ;
}
2019-08-30 23:49:57 +03:00
static int vfs_gluster_symlinkat ( struct vfs_handle_struct * handle ,
2020-04-30 20:30:50 +03:00
const struct smb_filename * link_target ,
2019-08-30 23:49:57 +03:00
struct files_struct * dirfsp ,
const struct smb_filename * new_smb_fname )
{
int ret ;
START_PROFILE ( syscall_symlinkat ) ;
SMB_ASSERT ( dirfsp = = dirfsp - > conn - > cwd_fsp ) ;
ret = glfs_symlink ( handle - > data ,
2020-04-30 20:30:50 +03:00
link_target - > base_name ,
2019-08-30 23:49:57 +03:00
new_smb_fname - > base_name ) ;
END_PROFILE ( syscall_symlinkat ) ;
return ret ;
}
2019-08-23 00:22:55 +03:00
static int vfs_gluster_readlinkat ( struct vfs_handle_struct * handle ,
2020-10-13 16:19:30 +03:00
const struct files_struct * dirfsp ,
2019-08-23 00:22:55 +03:00
const struct smb_filename * smb_fname ,
char * buf ,
size_t bufsiz )
{
int ret ;
START_PROFILE ( syscall_readlinkat ) ;
SMB_ASSERT ( dirfsp = = dirfsp - > conn - > cwd_fsp ) ;
ret = glfs_readlink ( handle - > data , smb_fname - > base_name , buf , bufsiz ) ;
END_PROFILE ( syscall_readlinkat ) ;
return ret ;
}
2019-08-14 23:01:39 +03:00
static int vfs_gluster_linkat ( struct vfs_handle_struct * handle ,
files_struct * srcfsp ,
const struct smb_filename * old_smb_fname ,
files_struct * dstfsp ,
const struct smb_filename * new_smb_fname ,
int flags )
{
int ret ;
START_PROFILE ( syscall_linkat ) ;
SMB_ASSERT ( srcfsp = = srcfsp - > conn - > cwd_fsp ) ;
SMB_ASSERT ( dstfsp = = dstfsp - > conn - > cwd_fsp ) ;
ret = glfs_link ( handle - > data ,
old_smb_fname - > base_name ,
new_smb_fname - > base_name ) ;
END_PROFILE ( syscall_linkat ) ;
return ret ;
}
2019-08-21 02:54:05 +03:00
static int vfs_gluster_mknodat ( struct vfs_handle_struct * handle ,
files_struct * dirfsp ,
const struct smb_filename * smb_fname ,
mode_t mode ,
SMB_DEV_T dev )
{
int ret ;
START_PROFILE ( syscall_mknodat ) ;
SMB_ASSERT ( dirfsp = = dirfsp - > conn - > cwd_fsp ) ;
ret = glfs_mknod ( handle - > data , smb_fname - > base_name , mode , dev ) ;
END_PROFILE ( syscall_mknodat ) ;
return ret ;
}
2013-05-29 15:21:46 +04:00
static int vfs_gluster_chflags ( struct vfs_handle_struct * handle ,
2017-05-20 02:15:55 +03:00
const struct smb_filename * smb_fname ,
unsigned int flags )
2013-05-29 15:21:46 +04:00
{
errno = ENOSYS ;
return - 1 ;
}
static int vfs_gluster_get_real_filename ( struct vfs_handle_struct * handle ,
2020-04-30 17:40:28 +03:00
const struct smb_filename * path ,
2020-04-30 17:28:55 +03:00
const char * name ,
TALLOC_CTX * mem_ctx ,
char * * found_name )
2013-05-29 15:21:46 +04:00
{
int ret ;
2019-06-03 17:25:46 +03:00
char key_buf [ GLUSTER_NAME_MAX + 64 ] ;
char val_buf [ GLUSTER_NAME_MAX + 1 ] ;
2013-05-29 15:21:46 +04:00
2019-06-03 17:25:46 +03:00
if ( strlen ( name ) > = GLUSTER_NAME_MAX ) {
2013-05-29 15:21:46 +04:00
errno = ENAMETOOLONG ;
return - 1 ;
}
2019-06-03 17:25:46 +03:00
snprintf ( key_buf , GLUSTER_NAME_MAX + 64 ,
2019-06-03 15:27:18 +03:00
" glusterfs.get_real_filename:%s " , name ) ;
2013-05-29 15:21:46 +04:00
2020-04-30 17:40:28 +03:00
ret = glfs_getxattr ( handle - > data , path - > base_name , key_buf , val_buf ,
2019-06-03 17:25:46 +03:00
GLUSTER_NAME_MAX + 1 ) ;
2013-06-21 20:56:22 +04:00
if ( ret = = - 1 ) {
2019-01-23 13:10:43 +03:00
if ( errno = = ENOATTR ) {
vfs:glusterfs: treat ENOATTR as ENOENT
The original implementation of the virtual xattr get_real_filename
in gluster was misusing the ENOENT errno as the authoritative anwer
that the file/dir that we were asking the real filename for does not
exist. But since the getxattr call is done on the parent directory,
this is a violation of the getxattr API which uses ENOENT for the
case that the file/dir that the getxattr call is done against does
not exist.
Now after a recent regression for fuse-mount re-exports due to
gluster mapping ENOENT to ESTALE in the fuse-bridge, the gluster
implementation is changed to more correctly return ENOATTR if the
requested file does not exist.
This patch changes the glusterfs vfs module to treat ENOATTR as ENOENT
to be fully functional again with latest gluster.
- Without this patch, samba against a new gluster will work correctly,
but the get_real_filename optimization for a non-existing entry
is lost.
- With this patch, Samba will not work correctly any more against
very old gluster servers: Those (correctly) returned ENOATTR
always, which Samba originally interpreted as EOPNOTSUPP, triggering
the expensive directory scan. With this patch, ENOATTR is
interpreted as ENOENT, the authoritative answer that the requested
entry does not exist, which is wrong unless it really does not exist.
Signed-off-by: Michael Adam <obnox@samba.org>
Reviewed-by: Guenther Deschner <gd@samba.org>
2019-06-20 16:14:57 +03:00
errno = ENOENT ;
2013-06-21 20:56:22 +04:00
}
2013-05-29 15:21:46 +04:00
return - 1 ;
}
2019-06-03 15:27:18 +03:00
* found_name = talloc_strdup ( mem_ctx , val_buf ) ;
if ( found_name [ 0 ] = = NULL ) {
2013-05-29 15:21:46 +04:00
errno = ENOMEM ;
return - 1 ;
}
return 0 ;
}
static const char * vfs_gluster_connectpath ( struct vfs_handle_struct * handle ,
2017-06-30 23:37:03 +03:00
const struct smb_filename * smb_fname )
2013-05-29 15:21:46 +04:00
{
return handle - > conn - > connectpath ;
}
/* EA Operations */
static ssize_t vfs_gluster_getxattr ( struct vfs_handle_struct * handle ,
2017-05-26 02:42:04 +03:00
const struct smb_filename * smb_fname ,
const char * name ,
void * value ,
size_t size )
2013-05-29 15:21:46 +04:00
{
2017-05-26 02:42:04 +03:00
return glfs_getxattr ( handle - > data , smb_fname - > base_name ,
2019-08-05 08:15:01 +03:00
name , value , size ) ;
2013-05-29 15:21:46 +04:00
}
static ssize_t vfs_gluster_fgetxattr ( struct vfs_handle_struct * handle ,
files_struct * fsp , const char * name ,
void * value , size_t size )
{
2018-10-10 18:32:25 +03:00
glfs_fd_t * glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
if ( glfd = = NULL ) {
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return - 1 ;
}
return glfs_fgetxattr ( glfd , name , value , size ) ;
2013-05-29 15:21:46 +04:00
}
static ssize_t vfs_gluster_listxattr ( struct vfs_handle_struct * handle ,
2017-05-23 23:12:29 +03:00
const struct smb_filename * smb_fname ,
char * list ,
size_t size )
2013-05-29 15:21:46 +04:00
{
2017-05-23 23:12:29 +03:00
return glfs_listxattr ( handle - > data , smb_fname - > base_name , list , size ) ;
2013-05-29 15:21:46 +04:00
}
static ssize_t vfs_gluster_flistxattr ( struct vfs_handle_struct * handle ,
files_struct * fsp , char * list ,
size_t size )
{
2018-10-10 18:32:25 +03:00
glfs_fd_t * glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
if ( glfd = = NULL ) {
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return - 1 ;
}
return glfs_flistxattr ( glfd , list , size ) ;
2013-05-29 15:21:46 +04:00
}
static int vfs_gluster_removexattr ( struct vfs_handle_struct * handle ,
2017-05-24 21:35:50 +03:00
const struct smb_filename * smb_fname ,
const char * name )
2013-05-29 15:21:46 +04:00
{
2017-05-24 21:35:50 +03:00
return glfs_removexattr ( handle - > data , smb_fname - > base_name , name ) ;
2013-05-29 15:21:46 +04:00
}
static int vfs_gluster_fremovexattr ( struct vfs_handle_struct * handle ,
files_struct * fsp , const char * name )
{
2018-10-10 18:32:25 +03:00
glfs_fd_t * glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
if ( glfd = = NULL ) {
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return - 1 ;
}
return glfs_fremovexattr ( glfd , name ) ;
2013-05-29 15:21:46 +04:00
}
static int vfs_gluster_setxattr ( struct vfs_handle_struct * handle ,
2017-05-25 22:41:31 +03:00
const struct smb_filename * smb_fname ,
const char * name ,
2013-05-29 15:21:46 +04:00
const void * value , size_t size , int flags )
{
2017-05-25 22:41:31 +03:00
return glfs_setxattr ( handle - > data , smb_fname - > base_name , name , value , size , flags ) ;
2013-05-29 15:21:46 +04:00
}
static int vfs_gluster_fsetxattr ( struct vfs_handle_struct * handle ,
files_struct * fsp , const char * name ,
const void * value , size_t size , int flags )
{
2018-10-10 18:32:25 +03:00
glfs_fd_t * glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
if ( glfd = = NULL ) {
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return - 1 ;
}
return glfs_fsetxattr ( glfd , name , value , size , flags ) ;
2013-05-29 15:21:46 +04:00
}
/* AIO Operations */
static bool vfs_gluster_aio_force ( struct vfs_handle_struct * handle ,
files_struct * fsp )
{
return false ;
}
2020-01-10 01:48:33 +03:00
static NTSTATUS vfs_gluster_create_dfs_pathat ( struct vfs_handle_struct * handle ,
struct files_struct * dirfsp ,
const struct smb_filename * smb_fname ,
const struct referral * reflist ,
size_t referral_count )
{
TALLOC_CTX * frame = talloc_stackframe ( ) ;
NTSTATUS status = NT_STATUS_NO_MEMORY ;
int ret ;
char * msdfs_link = NULL ;
SMB_ASSERT ( dirfsp = = dirfsp - > conn - > cwd_fsp ) ;
/* Form the msdfs_link contents */
msdfs_link = msdfs_link_string ( frame ,
reflist ,
referral_count ) ;
if ( msdfs_link = = NULL ) {
goto out ;
}
ret = glfs_symlink ( handle - > data ,
msdfs_link ,
smb_fname - > base_name ) ;
if ( ret = = 0 ) {
status = NT_STATUS_OK ;
} else {
status = map_nt_error_from_unix ( errno ) ;
}
out :
TALLOC_FREE ( frame ) ;
return status ;
}
2020-01-28 21:46:43 +03:00
/*
* Read and return the contents of a DFS redirect given a
* pathname . A caller can pass in NULL for ppreflist and
* preferral_count but still determine if this was a
* DFS redirect point by getting NT_STATUS_OK back
* without incurring the overhead of reading and parsing
* the referral contents .
*/
static NTSTATUS vfs_gluster_read_dfs_pathat ( struct vfs_handle_struct * handle ,
TALLOC_CTX * mem_ctx ,
struct files_struct * dirfsp ,
2020-05-30 02:32:12 +03:00
struct smb_filename * smb_fname ,
2020-01-28 21:46:43 +03:00
struct referral * * ppreflist ,
size_t * preferral_count )
{
NTSTATUS status = NT_STATUS_NO_MEMORY ;
size_t bufsize ;
char * link_target = NULL ;
int referral_len ;
bool ok ;
# if defined(HAVE_BROKEN_READLINK)
char link_target_buf [ PATH_MAX ] ;
# else
char link_target_buf [ 7 ] ;
# endif
2020-05-30 03:49:17 +03:00
struct stat st ;
int ret ;
2020-01-28 21:46:43 +03:00
SMB_ASSERT ( dirfsp = = dirfsp - > conn - > cwd_fsp ) ;
2020-05-30 03:49:17 +03:00
if ( is_named_stream ( smb_fname ) ) {
status = NT_STATUS_OBJECT_NAME_NOT_FOUND ;
goto err ;
}
2020-01-28 21:46:43 +03:00
if ( ppreflist = = NULL & & preferral_count = = NULL ) {
/*
* We ' re only checking if this is a DFS
* redirect . We don ' t need to return data .
*/
bufsize = sizeof ( link_target_buf ) ;
link_target = link_target_buf ;
} else {
bufsize = PATH_MAX ;
link_target = talloc_array ( mem_ctx , char , bufsize ) ;
if ( ! link_target ) {
goto err ;
}
}
2020-05-30 03:49:17 +03:00
ret = glfs_lstat ( handle - > data , smb_fname - > base_name , & st ) ;
if ( ret < 0 ) {
status = map_nt_error_from_unix ( errno ) ;
goto err ;
}
2020-01-28 21:46:43 +03:00
referral_len = glfs_readlink ( handle - > data ,
smb_fname - > base_name ,
link_target ,
bufsize - 1 ) ;
if ( referral_len < 0 ) {
if ( errno = = EINVAL ) {
DBG_INFO ( " %s is not a link. \n " , smb_fname - > base_name ) ;
status = NT_STATUS_OBJECT_TYPE_MISMATCH ;
} else {
status = map_nt_error_from_unix ( errno ) ;
DBG_ERR ( " Error reading "
" msdfs link %s: %s \n " ,
smb_fname - > base_name ,
strerror ( errno ) ) ;
}
goto err ;
}
link_target [ referral_len ] = ' \0 ' ;
DBG_INFO ( " %s -> %s \n " ,
smb_fname - > base_name ,
link_target ) ;
if ( ! strnequal ( link_target , " msdfs: " , 6 ) ) {
status = NT_STATUS_OBJECT_TYPE_MISMATCH ;
goto err ;
}
if ( ppreflist = = NULL & & preferral_count = = NULL ) {
/* Early return for checking if this is a DFS link. */
2020-05-30 03:49:17 +03:00
smb_stat_ex_from_stat ( & smb_fname - > st , & st ) ;
2020-01-28 21:46:43 +03:00
return NT_STATUS_OK ;
}
ok = parse_msdfs_symlink ( mem_ctx ,
lp_msdfs_shuffle_referrals ( SNUM ( handle - > conn ) ) ,
link_target ,
ppreflist ,
preferral_count ) ;
if ( ok ) {
2020-05-30 03:49:17 +03:00
smb_stat_ex_from_stat ( & smb_fname - > st , & st ) ;
2020-01-28 21:46:43 +03:00
status = NT_STATUS_OK ;
} else {
status = NT_STATUS_NO_MEMORY ;
}
err :
if ( link_target ! = link_target_buf ) {
TALLOC_FREE ( link_target ) ;
}
return status ;
}
2013-05-29 15:21:46 +04:00
static struct vfs_fn_pointers glusterfs_fns = {
/* Disk Operations */
. connect_fn = vfs_gluster_connect ,
. disconnect_fn = vfs_gluster_disconnect ,
. disk_free_fn = vfs_gluster_disk_free ,
. get_quota_fn = vfs_gluster_get_quota ,
. set_quota_fn = vfs_gluster_set_quota ,
. statvfs_fn = vfs_gluster_statvfs ,
. fs_capabilities_fn = vfs_gluster_fs_capabilities ,
. get_dfs_referrals_fn = NULL ,
/* Directory Operations */
. fdopendir_fn = vfs_gluster_fdopendir ,
. readdir_fn = vfs_gluster_readdir ,
. seekdir_fn = vfs_gluster_seekdir ,
. telldir_fn = vfs_gluster_telldir ,
. rewind_dir_fn = vfs_gluster_rewinddir ,
2019-09-05 19:33:32 +03:00
. mkdirat_fn = vfs_gluster_mkdirat ,
2013-05-29 15:21:46 +04:00
. closedir_fn = vfs_gluster_closedir ,
/* File Operations */
2020-05-20 22:32:30 +03:00
. openat_fn = vfs_gluster_openat ,
2013-05-29 15:21:46 +04:00
. create_file_fn = NULL ,
. close_fn = vfs_gluster_close ,
. pread_fn = vfs_gluster_pread ,
. pread_send_fn = vfs_gluster_pread_send ,
2019-07-24 12:45:33 +03:00
. pread_recv_fn = vfs_gluster_pread_recv ,
2013-05-29 15:21:46 +04:00
. pwrite_fn = vfs_gluster_pwrite ,
. pwrite_send_fn = vfs_gluster_pwrite_send ,
2019-07-24 12:45:33 +03:00
. pwrite_recv_fn = vfs_gluster_pwrite_recv ,
2013-05-29 15:21:46 +04:00
. lseek_fn = vfs_gluster_lseek ,
. sendfile_fn = vfs_gluster_sendfile ,
. recvfile_fn = vfs_gluster_recvfile ,
2019-08-10 01:02:35 +03:00
. renameat_fn = vfs_gluster_renameat ,
2013-05-29 15:21:46 +04:00
. fsync_send_fn = vfs_gluster_fsync_send ,
. fsync_recv_fn = vfs_gluster_fsync_recv ,
. stat_fn = vfs_gluster_stat ,
. fstat_fn = vfs_gluster_fstat ,
. lstat_fn = vfs_gluster_lstat ,
. get_alloc_size_fn = vfs_gluster_get_alloc_size ,
2019-09-12 21:04:18 +03:00
. unlinkat_fn = vfs_gluster_unlinkat ,
2013-05-29 15:21:46 +04:00
. chmod_fn = vfs_gluster_chmod ,
. fchmod_fn = vfs_gluster_fchmod ,
. fchown_fn = vfs_gluster_fchown ,
. lchown_fn = vfs_gluster_lchown ,
. chdir_fn = vfs_gluster_chdir ,
. getwd_fn = vfs_gluster_getwd ,
. ntimes_fn = vfs_gluster_ntimes ,
. ftruncate_fn = vfs_gluster_ftruncate ,
. fallocate_fn = vfs_gluster_fallocate ,
. lock_fn = vfs_gluster_lock ,
. kernel_flock_fn = vfs_gluster_kernel_flock ,
2020-01-21 03:14:38 +03:00
. fcntl_fn = vfs_gluster_fcntl ,
2013-05-29 15:21:46 +04:00
. linux_setlease_fn = vfs_gluster_linux_setlease ,
. getlock_fn = vfs_gluster_getlock ,
2019-08-30 23:49:57 +03:00
. symlinkat_fn = vfs_gluster_symlinkat ,
2019-08-23 00:22:55 +03:00
. readlinkat_fn = vfs_gluster_readlinkat ,
2019-08-14 23:01:39 +03:00
. linkat_fn = vfs_gluster_linkat ,
2019-08-21 02:54:05 +03:00
. mknodat_fn = vfs_gluster_mknodat ,
2013-05-29 15:21:46 +04:00
. realpath_fn = vfs_gluster_realpath ,
. chflags_fn = vfs_gluster_chflags ,
. file_id_create_fn = NULL ,
. streaminfo_fn = NULL ,
. get_real_filename_fn = vfs_gluster_get_real_filename ,
. connectpath_fn = vfs_gluster_connectpath ,
2020-01-10 01:48:33 +03:00
. create_dfs_pathat_fn = vfs_gluster_create_dfs_pathat ,
2020-01-28 21:46:43 +03:00
. read_dfs_pathat_fn = vfs_gluster_read_dfs_pathat ,
2013-05-29 15:21:46 +04:00
. brl_lock_windows_fn = NULL ,
. brl_unlock_windows_fn = NULL ,
2017-07-09 15:34:10 +03:00
. strict_lock_check_fn = NULL ,
2013-05-29 15:21:46 +04:00
. translate_name_fn = NULL ,
. fsctl_fn = NULL ,
/* NT ACL Operations */
. fget_nt_acl_fn = NULL ,
2020-04-13 23:50:30 +03:00
. get_nt_acl_at_fn = NULL ,
2013-05-29 15:21:46 +04:00
. fset_nt_acl_fn = NULL ,
. audit_file_fn = NULL ,
/* Posix ACL Operations */
2016-03-21 05:42:20 +03:00
. sys_acl_get_file_fn = posixacl_xattr_acl_get_file ,
. sys_acl_get_fd_fn = posixacl_xattr_acl_get_fd ,
2013-05-29 15:21:46 +04:00
. sys_acl_blob_get_file_fn = posix_sys_acl_blob_get_file ,
. sys_acl_blob_get_fd_fn = posix_sys_acl_blob_get_fd ,
2016-03-21 05:42:20 +03:00
. sys_acl_set_file_fn = posixacl_xattr_acl_set_file ,
. sys_acl_set_fd_fn = posixacl_xattr_acl_set_fd ,
. sys_acl_delete_def_file_fn = posixacl_xattr_acl_delete_def_file ,
2013-05-29 15:21:46 +04:00
/* EA Operations */
. getxattr_fn = vfs_gluster_getxattr ,
2018-03-13 10:14:53 +03:00
. getxattrat_send_fn = vfs_not_implemented_getxattrat_send ,
. getxattrat_recv_fn = vfs_not_implemented_getxattrat_recv ,
2013-05-29 15:21:46 +04:00
. fgetxattr_fn = vfs_gluster_fgetxattr ,
. listxattr_fn = vfs_gluster_listxattr ,
. flistxattr_fn = vfs_gluster_flistxattr ,
. removexattr_fn = vfs_gluster_removexattr ,
. fremovexattr_fn = vfs_gluster_fremovexattr ,
. setxattr_fn = vfs_gluster_setxattr ,
. fsetxattr_fn = vfs_gluster_fsetxattr ,
/* AIO Operations */
. aio_force_fn = vfs_gluster_aio_force ,
/* Durable handle Operations */
. durable_cookie_fn = NULL ,
. durable_disconnect_fn = NULL ,
. durable_reconnect_fn = NULL ,
} ;
2017-12-16 01:32:12 +03:00
static_decl_vfs ;
2017-04-20 22:24:43 +03:00
NTSTATUS vfs_glusterfs_init ( TALLOC_CTX * ctx )
2013-05-29 15:21:46 +04:00
{
return smb_register_vfs ( SMB_VFS_INTERFACE_VERSION ,
" glusterfs " , & glusterfs_fns ) ;
}