2013-05-29 15:21:46 +04:00
/*
Unix SMB / CIFS implementation .
Wrap GlusterFS GFAPI calls in vfs functions .
Copyright ( c ) 2013 Anand Avati < avati @ redhat . com >
This program is free software ; you can redistribute it and / or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation ; either version 3 of the License , or
( at your option ) any later version .
This program is distributed in the hope that it will be useful ,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
GNU General Public License for more details .
You should have received a copy of the GNU General Public License
along with this program . If not , see < http : //www.gnu.org/licenses/>.
*/
2013-12-12 22:27:10 +04:00
/**
* @ file vfs_glusterfs . c
* @ author Anand Avati < avati @ redhat . com >
* @ date May 2013
* @ brief Samba VFS module for glusterfs
*
* @ todo
* - sendfile / recvfile support
*
* A Samba VFS module for GlusterFS , based on Gluster ' s libgfapi .
* This is a " bottom " vfs module ( not something to be stacked on top of
* another module ) , and translates ( most ) calls to the closest actions
* available in libgfapi .
*
*/
2013-05-29 15:21:46 +04:00
# include "includes.h"
# include "smbd/smbd.h"
# include <stdio.h>
2017-10-31 17:52:49 +03:00
# include <glusterfs/api/glfs.h>
2013-05-29 15:21:46 +04:00
# include "lib/util/dlinklist.h"
2014-12-11 05:05:10 +03:00
# include "lib/util/tevent_unix.h"
# include "smbd/globals.h"
2015-10-12 16:57:34 +03:00
# include "lib/util/sys_rw.h"
2016-03-28 10:20:22 +03:00
# include "smbprofile.h"
2016-03-21 05:42:20 +03:00
# include "modules/posixacl_xattr.h"
2013-05-29 15:21:46 +04:00
# define DEFAULT_VOLFILE_SERVER "localhost"
2019-06-03 17:25:46 +03:00
# define GLUSTER_NAME_MAX 255
2013-05-29 15:21:46 +04:00
2015-01-20 07:08:17 +03:00
static int read_fd = - 1 ;
static int write_fd = - 1 ;
2014-12-11 05:05:10 +03:00
static struct tevent_fd * aio_read_event = NULL ;
2013-12-12 22:29:20 +04:00
/**
* Helper to convert struct stat to struct stat_ex .
*/
2013-05-29 15:21:46 +04:00
static void smb_stat_ex_from_stat ( struct stat_ex * dst , const struct stat * src )
{
ZERO_STRUCTP ( dst ) ;
dst - > st_ex_dev = src - > st_dev ;
dst - > st_ex_ino = src - > st_ino ;
dst - > st_ex_mode = src - > st_mode ;
dst - > st_ex_nlink = src - > st_nlink ;
dst - > st_ex_uid = src - > st_uid ;
dst - > st_ex_gid = src - > st_gid ;
dst - > st_ex_rdev = src - > st_rdev ;
dst - > st_ex_size = src - > st_size ;
dst - > st_ex_atime . tv_sec = src - > st_atime ;
dst - > st_ex_mtime . tv_sec = src - > st_mtime ;
dst - > st_ex_ctime . tv_sec = src - > st_ctime ;
dst - > st_ex_btime . tv_sec = src - > st_mtime ;
2013-12-12 22:29:20 +04:00
dst - > st_ex_blksize = src - > st_blksize ;
dst - > st_ex_blocks = src - > st_blocks ;
2013-05-29 15:21:46 +04:00
# ifdef STAT_HAVE_NSEC
2013-12-12 22:29:20 +04:00
dst - > st_ex_atime . tv_nsec = src - > st_atime_nsec ;
dst - > st_ex_mtime . tv_nsec = src - > st_mtime_nsec ;
dst - > st_ex_ctime . tv_nsec = src - > st_ctime_nsec ;
2013-05-29 15:21:46 +04:00
dst - > st_ex_btime . tv_nsec = src - > st_mtime_nsec ;
# endif
}
/* pre-opened glfs_t */
static struct glfs_preopened {
char * volume ;
2014-12-10 09:56:34 +03:00
char * connectpath ;
2013-05-29 15:21:46 +04:00
glfs_t * fs ;
int ref ;
struct glfs_preopened * next , * prev ;
} * glfs_preopened ;
2014-12-10 09:56:34 +03:00
static int glfs_set_preopened ( const char * volume , const char * connectpath , glfs_t * fs )
2013-05-29 15:21:46 +04:00
{
struct glfs_preopened * entry = NULL ;
entry = talloc_zero ( NULL , struct glfs_preopened ) ;
if ( ! entry ) {
errno = ENOMEM ;
return - 1 ;
}
entry - > volume = talloc_strdup ( entry , volume ) ;
if ( ! entry - > volume ) {
talloc_free ( entry ) ;
errno = ENOMEM ;
return - 1 ;
}
2014-12-10 09:56:34 +03:00
entry - > connectpath = talloc_strdup ( entry , connectpath ) ;
if ( entry - > connectpath = = NULL ) {
talloc_free ( entry ) ;
errno = ENOMEM ;
return - 1 ;
}
2013-05-29 15:21:46 +04:00
entry - > fs = fs ;
entry - > ref = 1 ;
DLIST_ADD ( glfs_preopened , entry ) ;
return 0 ;
}
2014-12-10 09:56:34 +03:00
static glfs_t * glfs_find_preopened ( const char * volume , const char * connectpath )
2013-05-29 15:21:46 +04:00
{
struct glfs_preopened * entry = NULL ;
for ( entry = glfs_preopened ; entry ; entry = entry - > next ) {
2014-12-10 09:56:34 +03:00
if ( strcmp ( entry - > volume , volume ) = = 0 & &
strcmp ( entry - > connectpath , connectpath ) = = 0 )
{
2013-05-29 15:21:46 +04:00
entry - > ref + + ;
return entry - > fs ;
}
}
return NULL ;
}
static void glfs_clear_preopened ( glfs_t * fs )
{
struct glfs_preopened * entry = NULL ;
for ( entry = glfs_preopened ; entry ; entry = entry - > next ) {
if ( entry - > fs = = fs ) {
if ( - - entry - > ref )
return ;
DLIST_REMOVE ( glfs_preopened , entry ) ;
glfs_fini ( entry - > fs ) ;
talloc_free ( entry ) ;
}
}
}
2016-08-25 13:33:55 +03:00
static int vfs_gluster_set_volfile_servers ( glfs_t * fs ,
const char * volfile_servers )
{
char * server = NULL ;
2019-04-23 13:57:02 +03:00
size_t server_count = 0 ;
size_t server_success = 0 ;
2016-08-25 13:33:55 +03:00
int ret = - 1 ;
TALLOC_CTX * frame = talloc_stackframe ( ) ;
DBG_INFO ( " servers list %s \n " , volfile_servers ) ;
while ( next_token_talloc ( frame , & volfile_servers , & server , " \t " ) ) {
char * transport = NULL ;
char * host = NULL ;
int port = 0 ;
server_count + + ;
2019-04-23 13:57:02 +03:00
DBG_INFO ( " server %zu %s \n " , server_count , server ) ;
2016-08-25 13:33:55 +03:00
/* Determine the transport type */
if ( strncmp ( server , " unix+ " , 5 ) = = 0 ) {
port = 0 ;
transport = talloc_strdup ( frame , " unix " ) ;
if ( ! transport ) {
errno = ENOMEM ;
goto out ;
}
host = talloc_strdup ( frame , server + 5 ) ;
if ( ! host ) {
errno = ENOMEM ;
goto out ;
}
} else {
char * p = NULL ;
char * port_index = NULL ;
if ( strncmp ( server , " tcp+ " , 4 ) = = 0 ) {
server + = 4 ;
}
/* IPv6 is enclosed in []
* ' : ' before ' ] ' is part of IPv6
* ' : ' after ' ] ' indicates port
*/
p = server ;
if ( server [ 0 ] = = ' [ ' ) {
server + + ;
p = index ( server , ' ] ' ) ;
if ( p = = NULL ) {
/* Malformed IPv6 */
continue ;
}
p [ 0 ] = ' \0 ' ;
p + + ;
}
port_index = index ( p , ' : ' ) ;
if ( port_index = = NULL ) {
port = 0 ;
} else {
port = atoi ( port_index + 1 ) ;
port_index [ 0 ] = ' \0 ' ;
}
transport = talloc_strdup ( frame , " tcp " ) ;
if ( ! transport ) {
errno = ENOMEM ;
goto out ;
}
host = talloc_strdup ( frame , server ) ;
if ( ! host ) {
errno = ENOMEM ;
goto out ;
}
}
DBG_INFO ( " Calling set volfile server with params "
" transport=%s, host=%s, port=%d \n " , transport ,
host , port ) ;
ret = glfs_set_volfile_server ( fs , transport , host , port ) ;
if ( ret < 0 ) {
DBG_WARNING ( " Failed to set volfile_server "
" transport=%s, host=%s, port=%d (%s) \n " ,
transport , host , port , strerror ( errno ) ) ;
} else {
server_success + + ;
}
}
out :
if ( server_count = = 0 ) {
ret = - 1 ;
} else if ( server_success < server_count ) {
2019-04-23 13:57:02 +03:00
DBG_WARNING ( " Failed to set %zu out of %zu servers parsed \n " ,
2016-08-25 13:33:55 +03:00
server_count - server_success , server_count ) ;
ret = 0 ;
}
TALLOC_FREE ( frame ) ;
return ret ;
}
2013-05-29 15:21:46 +04:00
/* Disk Operations */
static int vfs_gluster_connect ( struct vfs_handle_struct * handle ,
const char * service ,
const char * user )
{
2016-08-25 13:33:55 +03:00
const char * volfile_servers ;
2013-05-29 15:21:46 +04:00
const char * volume ;
2013-11-22 09:04:11 +04:00
char * logfile ;
2013-05-29 15:21:46 +04:00
int loglevel ;
2013-11-22 09:04:11 +04:00
glfs_t * fs = NULL ;
TALLOC_CTX * tmp_ctx ;
int ret = 0 ;
2013-05-29 15:21:46 +04:00
2013-11-22 09:04:11 +04:00
tmp_ctx = talloc_new ( NULL ) ;
if ( tmp_ctx = = NULL ) {
ret = - 1 ;
goto done ;
}
logfile = lp_parm_talloc_string ( tmp_ctx , SNUM ( handle - > conn ) , " glusterfs " ,
2013-05-29 15:21:46 +04:00
" logfile " , NULL ) ;
loglevel = lp_parm_int ( SNUM ( handle - > conn ) , " glusterfs " , " loglevel " , - 1 ) ;
2016-08-25 13:33:55 +03:00
volfile_servers = lp_parm_talloc_string ( tmp_ctx , SNUM ( handle - > conn ) ,
" glusterfs " , " volfile_server " ,
NULL ) ;
if ( volfile_servers = = NULL ) {
volfile_servers = DEFAULT_VOLFILE_SERVER ;
2013-05-29 15:21:46 +04:00
}
volume = lp_parm_const_string ( SNUM ( handle - > conn ) , " glusterfs " , " volume " ,
NULL ) ;
if ( volume = = NULL ) {
volume = service ;
}
2014-12-10 09:56:34 +03:00
fs = glfs_find_preopened ( volume , handle - > conn - > connectpath ) ;
2013-05-29 15:21:46 +04:00
if ( fs ) {
2013-11-22 09:04:11 +04:00
goto done ;
2013-05-29 15:21:46 +04:00
}
fs = glfs_new ( volume ) ;
if ( fs = = NULL ) {
2013-11-22 09:04:11 +04:00
ret = - 1 ;
goto done ;
2013-05-29 15:21:46 +04:00
}
2016-08-25 13:33:55 +03:00
ret = vfs_gluster_set_volfile_servers ( fs , volfile_servers ) ;
2013-05-29 15:21:46 +04:00
if ( ret < 0 ) {
2016-08-25 13:33:55 +03:00
DBG_ERR ( " Failed to set volfile_servers from list %s \n " ,
volfile_servers ) ;
2013-11-22 09:04:11 +04:00
goto done ;
2013-05-29 15:21:46 +04:00
}
ret = glfs_set_xlator_option ( fs , " *-md-cache " , " cache-posix-acl " ,
" true " ) ;
if ( ret < 0 ) {
DEBUG ( 0 , ( " %s: Failed to set xlator options \n " , volume ) ) ;
2013-11-22 09:04:11 +04:00
goto done ;
2013-05-29 15:21:46 +04:00
}
2014-12-10 10:00:10 +03:00
ret = glfs_set_xlator_option ( fs , " *-snapview-client " ,
" snapdir-entry-path " ,
handle - > conn - > connectpath ) ;
if ( ret < 0 ) {
DEBUG ( 0 , ( " %s: Failed to set xlator option: "
" snapdir-entry-path \n " , volume ) ) ;
2016-10-07 13:35:29 +03:00
goto done ;
2014-12-10 10:00:10 +03:00
}
2013-05-29 15:21:46 +04:00
ret = glfs_set_logging ( fs , logfile , loglevel ) ;
if ( ret < 0 ) {
DEBUG ( 0 , ( " %s: Failed to set logfile %s loglevel %d \n " ,
volume , logfile , loglevel ) ) ;
2013-11-22 09:04:11 +04:00
goto done ;
2013-05-29 15:21:46 +04:00
}
ret = glfs_init ( fs ) ;
if ( ret < 0 ) {
DEBUG ( 0 , ( " %s: Failed to initialize volume (%s) \n " ,
volume , strerror ( errno ) ) ) ;
2013-11-22 09:04:11 +04:00
goto done ;
2013-05-29 15:21:46 +04:00
}
2014-12-10 09:56:34 +03:00
ret = glfs_set_preopened ( volume , handle - > conn - > connectpath , fs ) ;
2013-05-29 15:21:46 +04:00
if ( ret < 0 ) {
DEBUG ( 0 , ( " %s: Failed to register volume (%s) \n " ,
volume , strerror ( errno ) ) ) ;
2013-11-22 09:04:11 +04:00
goto done ;
}
2017-10-20 15:55:10 +03:00
/*
* The shadow_copy2 module will fail to export subdirectories
* of a gluster volume unless we specify the mount point ,
* because the detection fails if the file system is not
* locally mounted :
* https : //bugzilla.samba.org/show_bug.cgi?id=13091
*/
lp_do_parameter ( SNUM ( handle - > conn ) , " shadow:mountpoint " , " / " ) ;
smbd: use async dos_mode_at_send in smbd_smb2_query_directory_send()
Finally: use the new dos_mode_at_send() in the directory enumeration
loop. This means that fetching the DOS attributes for directory entries
is done asynchronously with regard to the enumeration loop.
As the DOS attribute is typically read from an extended attribute in the
filesytem, this avoids sequentially blocking on IO. If the IO subsystem
is slow servicing these request, enabling async processing can result in
performance improvements.
A parametric option
smbd:async dosmode = true | false (default: false)
can be used to enable the new async processing.
Simulating slow IO with usleep(5000) in the synchronous and asynchronous
versions of SMB_VFS_GET_DOS_ATTRIBUTES(), the results of enumerating a
directory with 10,000 files are:
smbd:async dosmode = no:
$ time bin/smbclient -U slow%x //localhost/test -c "ls dir\*" > /dev/null
real 0m59.597s
user 0m0.024s
sys 0m0.012s
smbd:async dosmode = yes:
$ time bin/smbclient -U slow%x //localhost/test -c "ls dir\*" > /dev/null
real 0m0.698s
user 0m0.038s
sys 0m0.025s
Performance gains in real world workloads depends on whether the actual
IO requests can be merged and parallelized by the kernel. Without such
wins at the IO layer, the async processing may even be slower then the
sync processing due to the additional overhead.
The following parameters can be used to adapt async processing behaviour
for specific workloads and systems:
aio max threads = X (default: 100)
smbd:max async dosmode = Y (default: "aio max threads" * 2)
By default we have at most twice the number of async requests in flight
as threads provided by the underlying threadpool. This ensures a worker
thread that finishes a job can directly pick up a new one without going
to sleep.
It may be advisable to reduce the number of threads to avoid scheduling
overhead while also increasing "smbd:max async dosmode".
Note that we disable async processing for certain VFS modules in the VFS
connect function to avoid the overhead of triggering the sync fallback
in dos_mode_at_send(). This is done for VFS modules that implement the
sync SMB_VFS_GET_DOS_ATTRIBUTES(), but not the async version (gpfs), and
for VFS modules that don't share a real filesystem where fchdir() can be
used (ceph, gluster). It is disabled for catia, because we realized that
the catia name translation macros used on
fsps (CATIA_FETCH_FSP_[PRE|POST]_NEXT) have a bug (#13547).
We use threadpool = smb_vfs_ev_glue_tp_chdir_safe() and then
pthreadpool_tevent_max_threads(threadpool) to get the number of maximum
worker threads which matches the pool used by the low level
SMB_VFS_GETXATTRAT_[SEND|RECV] implementation in vfs_default.
This is a terrible abstraction leak that should be removed in the future
by maybe making it possible to ask a VFS function which threadpool it
uses, internally suporting chaining so VFS function FOO that internally
uses BAR can forward the question to BAR.
On a hyphotetical system that had a getxattrat(dirfd, path, ...)
syscall and at the same time doesn't support per-thread current working
directories (eg FreeBSD doesn't have the latter) but has support for
per-thread-credentials, pthreadpool_tevent_max_threads() on the
tp_chdir_safe threadpool returns 1.
So when hooking the hyphotetical getxattrat() into the async
SMB_VFS_GETXATTRAT_[SEND|RECV] implementation in an VFS module, the
implementation could use the tp_path_safe threadpool, but the SMB2
layer would use the wrong threadpool in the call to
pthreadpool_tevent_max_threads(), resulting in no parallelism.
Signed-off-by: Ralph Boehme <slow@samba.org>
Reviewed-by: Stefan Metzmacher <metze@samba.org>
2018-07-25 20:14:25 +03:00
/*
* Unless we have an async implementation of getxattrat turn this off .
*/
lp_do_parameter ( SNUM ( handle - > conn ) , " smbd:async dosmode " , " false " ) ;
2013-11-22 09:04:11 +04:00
done :
if ( ret < 0 ) {
if ( fs )
glfs_fini ( fs ) ;
} else {
2016-08-25 13:33:55 +03:00
DBG_ERR ( " %s: Initialized volume from servers %s \n " ,
volume , volfile_servers ) ;
2013-11-22 09:04:11 +04:00
handle - > data = fs ;
2013-05-29 15:21:46 +04:00
}
2016-08-25 13:33:55 +03:00
talloc_free ( tmp_ctx ) ;
return ret ;
2013-05-29 15:21:46 +04:00
}
static void vfs_gluster_disconnect ( struct vfs_handle_struct * handle )
{
glfs_t * fs = NULL ;
fs = handle - > data ;
glfs_clear_preopened ( fs ) ;
}
static uint64_t vfs_gluster_disk_free ( struct vfs_handle_struct * handle ,
2017-05-23 20:40:47 +03:00
const struct smb_filename * smb_fname ,
uint64_t * bsize_p ,
uint64_t * dfree_p ,
uint64_t * dsize_p )
2013-05-29 15:21:46 +04:00
{
struct statvfs statvfs = { 0 , } ;
int ret ;
2017-05-23 20:40:47 +03:00
ret = glfs_statvfs ( handle - > data , smb_fname - > base_name , & statvfs ) ;
2013-05-29 15:21:46 +04:00
if ( ret < 0 ) {
return - 1 ;
}
if ( bsize_p ! = NULL ) {
2013-08-07 01:45:06 +04:00
* bsize_p = ( uint64_t ) statvfs . f_bsize ; /* Block size */
2013-05-29 15:21:46 +04:00
}
if ( dfree_p ! = NULL ) {
2013-08-07 01:45:06 +04:00
* dfree_p = ( uint64_t ) statvfs . f_bavail ; /* Available Block units */
2013-05-29 15:21:46 +04:00
}
if ( dsize_p ! = NULL ) {
2013-08-07 01:45:06 +04:00
* dsize_p = ( uint64_t ) statvfs . f_blocks ; /* Total Block units */
2013-05-29 15:21:46 +04:00
}
2013-08-07 01:45:06 +04:00
return ( uint64_t ) statvfs . f_bavail ;
2013-05-29 15:21:46 +04:00
}
static int vfs_gluster_get_quota ( struct vfs_handle_struct * handle ,
2017-06-01 21:45:25 +03:00
const struct smb_filename * smb_fname ,
enum SMB_QUOTA_TYPE qtype ,
unid_t id ,
SMB_DISK_QUOTA * qt )
2013-05-29 15:21:46 +04:00
{
errno = ENOSYS ;
return - 1 ;
}
static int
vfs_gluster_set_quota ( struct vfs_handle_struct * handle ,
enum SMB_QUOTA_TYPE qtype , unid_t id , SMB_DISK_QUOTA * qt )
{
errno = ENOSYS ;
return - 1 ;
}
static int vfs_gluster_statvfs ( struct vfs_handle_struct * handle ,
2017-06-03 01:26:06 +03:00
const struct smb_filename * smb_fname ,
struct vfs_statvfs_struct * vfs_statvfs )
2013-05-29 15:21:46 +04:00
{
struct statvfs statvfs = { 0 , } ;
int ret ;
2017-06-03 01:26:06 +03:00
ret = glfs_statvfs ( handle - > data , smb_fname - > base_name , & statvfs ) ;
2013-05-29 15:21:46 +04:00
if ( ret < 0 ) {
DEBUG ( 0 , ( " glfs_statvfs(%s) failed: %s \n " ,
2017-06-03 01:26:06 +03:00
smb_fname - > base_name , strerror ( errno ) ) ) ;
2013-05-29 15:21:46 +04:00
return - 1 ;
}
ZERO_STRUCTP ( vfs_statvfs ) ;
vfs_statvfs - > OptimalTransferSize = statvfs . f_frsize ;
vfs_statvfs - > BlockSize = statvfs . f_bsize ;
vfs_statvfs - > TotalBlocks = statvfs . f_blocks ;
vfs_statvfs - > BlocksAvail = statvfs . f_bfree ;
vfs_statvfs - > UserBlocksAvail = statvfs . f_bavail ;
vfs_statvfs - > TotalFileNodes = statvfs . f_files ;
vfs_statvfs - > FreeFileNodes = statvfs . f_ffree ;
vfs_statvfs - > FsIdentifier = statvfs . f_fsid ;
vfs_statvfs - > FsCapabilities =
FILE_CASE_SENSITIVE_SEARCH | FILE_CASE_PRESERVED_NAMES ;
return ret ;
}
static uint32_t vfs_gluster_fs_capabilities ( struct vfs_handle_struct * handle ,
enum timestamp_set_resolution * p_ts_res )
{
uint32_t caps = FILE_CASE_SENSITIVE_SEARCH | FILE_CASE_PRESERVED_NAMES ;
2017-11-14 13:21:44 +03:00
# ifdef HAVE_GFAPI_VER_6
caps | = FILE_SUPPORTS_SPARSE_FILES ;
# endif
2013-05-29 15:21:46 +04:00
# ifdef STAT_HAVE_NSEC
* p_ts_res = TIMESTAMP_SET_NT_OR_BETTER ;
# endif
return caps ;
}
static DIR * vfs_gluster_opendir ( struct vfs_handle_struct * handle ,
2016-03-02 02:18:32 +03:00
const struct smb_filename * smb_fname ,
const char * mask ,
2015-05-03 06:11:02 +03:00
uint32_t attributes )
2013-05-29 15:21:46 +04:00
{
glfs_fd_t * fd ;
2016-03-02 02:18:32 +03:00
fd = glfs_opendir ( handle - > data , smb_fname - > base_name ) ;
2013-05-29 15:21:46 +04:00
if ( fd = = NULL ) {
DEBUG ( 0 , ( " glfs_opendir(%s) failed: %s \n " ,
2016-03-02 02:18:32 +03:00
smb_fname - > base_name , strerror ( errno ) ) ) ;
2013-05-29 15:21:46 +04:00
}
return ( DIR * ) fd ;
}
2018-10-10 18:32:25 +03:00
static glfs_fd_t * vfs_gluster_fetch_glfd ( struct vfs_handle_struct * handle ,
files_struct * fsp )
{
glfs_fd_t * * glfd = ( glfs_fd_t * * ) VFS_FETCH_FSP_EXTENSION ( handle , fsp ) ;
if ( glfd = = NULL ) {
DBG_INFO ( " Failed to fetch fsp extension \n " ) ;
return NULL ;
}
if ( * glfd = = NULL ) {
DBG_INFO ( " Empty glfs_fd_t pointer \n " ) ;
return NULL ;
}
return * glfd ;
}
2013-05-29 15:21:46 +04:00
static DIR * vfs_gluster_fdopendir ( struct vfs_handle_struct * handle ,
files_struct * fsp , const char * mask ,
2015-05-03 06:11:02 +03:00
uint32_t attributes )
2013-05-29 15:21:46 +04:00
{
2018-10-10 18:32:25 +03:00
glfs_fd_t * glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
if ( glfd = = NULL ) {
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return NULL ;
}
return ( DIR * ) glfd ;
2013-05-29 15:21:46 +04:00
}
static int vfs_gluster_closedir ( struct vfs_handle_struct * handle , DIR * dirp )
{
return glfs_closedir ( ( void * ) dirp ) ;
}
static struct dirent * vfs_gluster_readdir ( struct vfs_handle_struct * handle ,
DIR * dirp , SMB_STRUCT_STAT * sbuf )
{
static char direntbuf [ 512 ] ;
int ret ;
struct stat stat ;
struct dirent * dirent = 0 ;
if ( sbuf ! = NULL ) {
ret = glfs_readdirplus_r ( ( void * ) dirp , & stat , ( void * ) direntbuf ,
& dirent ) ;
} else {
ret = glfs_readdir_r ( ( void * ) dirp , ( void * ) direntbuf , & dirent ) ;
}
if ( ( ret < 0 ) | | ( dirent = = NULL ) ) {
return NULL ;
}
if ( sbuf ! = NULL ) {
smb_stat_ex_from_stat ( sbuf , & stat ) ;
}
return dirent ;
}
static long vfs_gluster_telldir ( struct vfs_handle_struct * handle , DIR * dirp )
{
return glfs_telldir ( ( void * ) dirp ) ;
}
static void vfs_gluster_seekdir ( struct vfs_handle_struct * handle , DIR * dirp ,
long offset )
{
glfs_seekdir ( ( void * ) dirp , offset ) ;
}
static void vfs_gluster_rewinddir ( struct vfs_handle_struct * handle , DIR * dirp )
{
glfs_seekdir ( ( void * ) dirp , 0 ) ;
}
2016-02-25 00:58:16 +03:00
static int vfs_gluster_mkdir ( struct vfs_handle_struct * handle ,
const struct smb_filename * smb_fname ,
2013-05-29 15:21:46 +04:00
mode_t mode )
{
2016-02-25 00:58:16 +03:00
return glfs_mkdir ( handle - > data , smb_fname - > base_name , mode ) ;
2013-05-29 15:21:46 +04:00
}
2016-02-25 01:02:45 +03:00
static int vfs_gluster_rmdir ( struct vfs_handle_struct * handle ,
const struct smb_filename * smb_fname )
2013-05-29 15:21:46 +04:00
{
2016-02-25 01:02:45 +03:00
return glfs_rmdir ( handle - > data , smb_fname - > base_name ) ;
2013-05-29 15:21:46 +04:00
}
static int vfs_gluster_open ( struct vfs_handle_struct * handle ,
struct smb_filename * smb_fname , files_struct * fsp ,
int flags , mode_t mode )
{
glfs_fd_t * glfd ;
2013-12-13 02:06:36 +04:00
glfs_fd_t * * p_tmp ;
2013-05-29 15:21:46 +04:00
2019-02-18 19:41:08 +03:00
p_tmp = VFS_ADD_FSP_EXTENSION ( handle , fsp , glfs_fd_t * , NULL ) ;
if ( p_tmp = = NULL ) {
errno = ENOMEM ;
return - 1 ;
}
2013-05-29 15:21:46 +04:00
if ( flags & O_DIRECTORY ) {
glfd = glfs_opendir ( handle - > data , smb_fname - > base_name ) ;
} else if ( flags & O_CREAT ) {
glfd = glfs_creat ( handle - > data , smb_fname - > base_name , flags ,
mode ) ;
} else {
glfd = glfs_open ( handle - > data , smb_fname - > base_name , flags ) ;
}
if ( glfd = = NULL ) {
2019-02-18 19:41:08 +03:00
/* no extension destroy_fn, so no need to save errno */
VFS_REMOVE_FSP_EXTENSION ( handle , fsp ) ;
2013-05-29 15:21:46 +04:00
return - 1 ;
}
2019-02-18 19:41:08 +03:00
2013-12-13 02:06:36 +04:00
* p_tmp = glfd ;
/* An arbitrary value for error reporting, so you know its us. */
return 13371337 ;
2013-05-29 15:21:46 +04:00
}
static int vfs_gluster_close ( struct vfs_handle_struct * handle ,
files_struct * fsp )
{
2018-10-10 18:32:25 +03:00
glfs_fd_t * glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
if ( glfd = = NULL ) {
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return - 1 ;
}
2013-12-13 02:06:36 +04:00
VFS_REMOVE_FSP_EXTENSION ( handle , fsp ) ;
return glfs_close ( glfd ) ;
2013-05-29 15:21:46 +04:00
}
static ssize_t vfs_gluster_pread ( struct vfs_handle_struct * handle ,
files_struct * fsp , void * data , size_t n ,
off_t offset )
{
2018-10-10 18:32:25 +03:00
glfs_fd_t * glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
if ( glfd = = NULL ) {
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return - 1 ;
}
2018-03-20 09:02:20 +03:00
# ifdef HAVE_GFAPI_VER_7_6
return glfs_pread ( glfd , data , n , offset , 0 , NULL ) ;
# else
2018-10-10 18:32:25 +03:00
return glfs_pread ( glfd , data , n , offset , 0 ) ;
2018-03-20 09:02:20 +03:00
# endif
2013-05-29 15:21:46 +04:00
}
2015-11-18 19:09:06 +03:00
struct glusterfs_aio_state ;
struct glusterfs_aio_wrapper {
struct glusterfs_aio_state * state ;
} ;
2014-12-11 05:05:10 +03:00
struct glusterfs_aio_state {
ssize_t ret ;
2015-11-18 19:09:06 +03:00
struct tevent_req * req ;
bool cancelled ;
2016-02-26 12:54:01 +03:00
struct vfs_aio_state vfs_aio_state ;
2016-02-26 13:14:36 +03:00
struct timespec start ;
2014-12-11 05:05:10 +03:00
} ;
2015-12-15 15:20:38 +03:00
static int aio_wrapper_destructor ( struct glusterfs_aio_wrapper * wrap )
2015-11-18 19:09:06 +03:00
{
2016-03-05 02:00:07 +03:00
if ( wrap - > state ! = NULL ) {
wrap - > state - > cancelled = true ;
}
2015-11-18 19:09:06 +03:00
return 0 ;
}
2014-12-11 05:05:10 +03:00
/*
* This function is the callback that will be called on glusterfs
* threads once the async IO submitted is complete . To notify
2015-01-21 22:49:24 +03:00
* Samba of the completion we use a pipe based queue .
2014-12-11 05:05:10 +03:00
*/
2018-03-20 09:02:20 +03:00
# ifdef HAVE_GFAPI_VER_7_6
static void aio_glusterfs_done ( glfs_fd_t * fd , ssize_t ret ,
struct glfs_stat * prestat ,
struct glfs_stat * poststat ,
void * data )
# else
2014-12-11 05:05:10 +03:00
static void aio_glusterfs_done ( glfs_fd_t * fd , ssize_t ret , void * data )
2018-03-20 09:02:20 +03:00
# endif
2014-12-11 05:05:10 +03:00
{
struct glusterfs_aio_state * state = NULL ;
2015-01-20 07:08:17 +03:00
int sts = 0 ;
2016-02-26 13:14:36 +03:00
struct timespec end ;
2014-12-11 05:05:10 +03:00
2015-11-18 19:09:06 +03:00
state = ( struct glusterfs_aio_state * ) data ;
2014-12-11 05:05:10 +03:00
2016-03-28 10:20:22 +03:00
PROFILE_TIMESTAMP ( & end ) ;
2016-02-26 13:14:36 +03:00
2014-12-11 05:05:10 +03:00
if ( ret < 0 ) {
state - > ret = - 1 ;
2016-02-26 12:54:01 +03:00
state - > vfs_aio_state . error = errno ;
2014-12-11 05:05:10 +03:00
} else {
state - > ret = ret ;
}
2016-02-26 13:14:36 +03:00
state - > vfs_aio_state . duration = nsec_time_diff ( & end , & state - > start ) ;
2014-12-11 05:05:10 +03:00
/*
2015-11-18 19:09:06 +03:00
* Write the state pointer to glusterfs_aio_state to the
* pipe , so we can call tevent_req_done ( ) from the main thread ,
* because tevent_req_done ( ) is not designed to be executed in
* the multithread environment , so tevent_req_done ( ) must be
2014-12-11 05:05:10 +03:00
* executed from the smbd main thread .
2015-01-23 01:14:31 +03:00
*
* write ( 2 ) on pipes with sizes under _POSIX_PIPE_BUF
* in size is atomic , without this , the use op pipes in this
* code would not work .
*
* sys_write is a thin enough wrapper around write ( 2 )
* that we can trust it here .
2014-12-11 05:05:10 +03:00
*/
2015-11-18 19:09:06 +03:00
sts = sys_write ( write_fd , & state , sizeof ( struct glusterfs_aio_state * ) ) ;
2015-01-22 00:07:53 +03:00
if ( sts < 0 ) {
DEBUG ( 0 , ( " \n Write to pipe failed (%s) " , strerror ( errno ) ) ) ;
2014-12-11 05:05:10 +03:00
}
2015-01-20 07:08:17 +03:00
2014-12-11 05:05:10 +03:00
return ;
}
2015-01-20 07:08:17 +03:00
/*
* Read each req off the pipe and process it .
*/
2014-12-11 05:05:10 +03:00
static void aio_tevent_fd_done ( struct tevent_context * event_ctx ,
struct tevent_fd * fde ,
2015-05-03 06:11:02 +03:00
uint16_t flags , void * data )
2014-12-11 05:05:10 +03:00
{
struct tevent_req * req = NULL ;
2015-11-18 19:09:06 +03:00
struct glusterfs_aio_state * state = NULL ;
2015-01-20 07:08:17 +03:00
int sts = 0 ;
2015-01-23 01:14:31 +03:00
/*
* read ( 2 ) on pipes is atomic if the needed data is available
* in the pipe , per SUS and POSIX . Because we always write
* to the pipe in sizeof ( struct tevent_req * ) chunks , we can
* always read in those chunks , atomically .
*
* sys_read is a thin enough wrapper around read ( 2 ) that we
* can trust it here .
*/
2015-11-18 19:09:06 +03:00
sts = sys_read ( read_fd , & state , sizeof ( struct glusterfs_aio_state * ) ) ;
2015-01-20 07:08:17 +03:00
if ( sts < 0 ) {
DEBUG ( 0 , ( " \n Read from pipe failed (%s) " , strerror ( errno ) ) ) ;
}
2015-11-18 19:09:06 +03:00
/* if we've cancelled the op, there is no req, so just clean up. */
if ( state - > cancelled = = true ) {
TALLOC_FREE ( state ) ;
return ;
}
2015-12-17 17:19:22 +03:00
req = state - > req ;
2015-01-20 07:08:17 +03:00
if ( req ) {
tevent_req_done ( req ) ;
2014-12-11 05:05:10 +03:00
}
return ;
}
static bool init_gluster_aio ( struct vfs_handle_struct * handle )
{
2015-01-20 07:08:17 +03:00
int fds [ 2 ] ;
int ret = - 1 ;
if ( read_fd ! = - 1 ) {
2014-12-11 05:05:10 +03:00
/*
* Already initialized .
*/
return true ;
}
2015-01-20 07:08:17 +03:00
ret = pipe ( fds ) ;
if ( ret = = - 1 ) {
2014-12-11 05:05:10 +03:00
goto fail ;
}
2015-01-20 07:08:17 +03:00
read_fd = fds [ 0 ] ;
write_fd = fds [ 1 ] ;
2018-12-27 17:26:08 +03:00
aio_read_event = tevent_add_fd ( handle - > conn - > sconn - > ev_ctx ,
2014-12-11 05:05:10 +03:00
NULL ,
2015-01-20 07:08:17 +03:00
read_fd ,
2014-12-11 05:05:10 +03:00
TEVENT_FD_READ ,
aio_tevent_fd_done ,
NULL ) ;
if ( aio_read_event = = NULL ) {
goto fail ;
}
return true ;
fail :
TALLOC_FREE ( aio_read_event ) ;
2015-01-20 07:08:17 +03:00
if ( read_fd ! = - 1 ) {
close ( read_fd ) ;
close ( write_fd ) ;
read_fd = - 1 ;
write_fd = - 1 ;
2014-12-11 05:05:10 +03:00
}
return false ;
}
2015-11-18 19:09:06 +03:00
static struct glusterfs_aio_state * aio_state_create ( TALLOC_CTX * mem_ctx )
2013-05-29 15:21:46 +04:00
{
2014-12-11 05:05:10 +03:00
struct tevent_req * req = NULL ;
struct glusterfs_aio_state * state = NULL ;
2015-11-18 19:09:06 +03:00
struct glusterfs_aio_wrapper * wrapper = NULL ;
req = tevent_req_create ( mem_ctx , & wrapper , struct glusterfs_aio_wrapper ) ;
2014-12-11 05:05:10 +03:00
if ( req = = NULL ) {
return NULL ;
}
2016-02-26 12:54:01 +03:00
state = talloc_zero ( NULL , struct glusterfs_aio_state ) ;
2015-11-18 19:09:06 +03:00
if ( state = = NULL ) {
TALLOC_FREE ( req ) ;
return NULL ;
}
2015-12-15 15:20:38 +03:00
talloc_set_destructor ( wrapper , aio_wrapper_destructor ) ;
2015-11-18 19:09:06 +03:00
state - > cancelled = false ;
state - > req = req ;
wrapper - > state = state ;
return state ;
}
static struct tevent_req * vfs_gluster_pread_send ( struct vfs_handle_struct
* handle , TALLOC_CTX * mem_ctx ,
struct tevent_context * ev ,
files_struct * fsp ,
void * data , size_t n ,
off_t offset )
{
struct glusterfs_aio_state * state = NULL ;
struct tevent_req * req = NULL ;
int ret = 0 ;
2018-10-10 18:32:25 +03:00
glfs_fd_t * glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
if ( glfd = = NULL ) {
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return NULL ;
}
2015-11-18 19:09:06 +03:00
state = aio_state_create ( mem_ctx ) ;
if ( state = = NULL ) {
return NULL ;
}
req = state - > req ;
2014-12-11 05:05:10 +03:00
if ( ! init_gluster_aio ( handle ) ) {
tevent_req_error ( req , EIO ) ;
return tevent_req_post ( req , ev ) ;
}
2015-12-11 15:37:53 +03:00
2018-05-23 09:53:47 +03:00
/*
* aio_glusterfs_done and aio_tevent_fd_done ( )
* use the raw tevent context . We need to use
* tevent_req_defer_callback ( ) in order to
* use the event context we ' re started with .
*/
tevent_req_defer_callback ( req , ev ) ;
2016-03-28 10:20:22 +03:00
PROFILE_TIMESTAMP ( & state - > start ) ;
2018-10-10 18:32:25 +03:00
ret = glfs_pread_async ( glfd , data , n , offset , 0 , aio_glusterfs_done ,
2015-11-18 19:09:06 +03:00
state ) ;
2014-12-11 05:05:10 +03:00
if ( ret < 0 ) {
tevent_req_error ( req , - ret ) ;
return tevent_req_post ( req , ev ) ;
}
return req ;
2013-05-29 15:21:46 +04:00
}
static struct tevent_req * vfs_gluster_pwrite_send ( struct vfs_handle_struct
* handle , TALLOC_CTX * mem_ctx ,
struct tevent_context * ev ,
files_struct * fsp ,
const void * data , size_t n ,
off_t offset )
{
2014-12-11 05:05:10 +03:00
struct glusterfs_aio_state * state = NULL ;
2015-11-18 19:09:06 +03:00
struct tevent_req * req = NULL ;
2014-12-11 05:05:10 +03:00
int ret = 0 ;
2018-10-10 18:32:25 +03:00
glfs_fd_t * glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
if ( glfd = = NULL ) {
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return NULL ;
}
2014-12-11 05:05:10 +03:00
2015-11-18 19:09:06 +03:00
state = aio_state_create ( mem_ctx ) ;
if ( state = = NULL ) {
2014-12-11 05:05:10 +03:00
return NULL ;
}
2015-11-18 19:09:06 +03:00
req = state - > req ;
2014-12-11 05:05:10 +03:00
if ( ! init_gluster_aio ( handle ) ) {
tevent_req_error ( req , EIO ) ;
return tevent_req_post ( req , ev ) ;
}
2015-12-11 15:37:53 +03:00
2018-05-23 09:53:47 +03:00
/*
* aio_glusterfs_done and aio_tevent_fd_done ( )
* use the raw tevent context . We need to use
* tevent_req_defer_callback ( ) in order to
* use the event context we ' re started with .
*/
tevent_req_defer_callback ( req , ev ) ;
2016-03-28 10:20:22 +03:00
PROFILE_TIMESTAMP ( & state - > start ) ;
2018-10-10 18:32:25 +03:00
ret = glfs_pwrite_async ( glfd , data , n , offset , 0 , aio_glusterfs_done ,
2015-11-18 19:09:06 +03:00
state ) ;
2014-12-11 05:05:10 +03:00
if ( ret < 0 ) {
tevent_req_error ( req , - ret ) ;
return tevent_req_post ( req , ev ) ;
}
2015-12-11 15:37:53 +03:00
2014-12-11 05:05:10 +03:00
return req ;
2013-05-29 15:21:46 +04:00
}
2016-02-26 12:54:01 +03:00
static ssize_t vfs_gluster_recv ( struct tevent_req * req ,
struct vfs_aio_state * vfs_aio_state )
2013-05-29 15:21:46 +04:00
{
2015-11-18 19:09:06 +03:00
struct glusterfs_aio_wrapper * wrapper = NULL ;
int ret = 0 ;
wrapper = tevent_req_data ( req , struct glusterfs_aio_wrapper ) ;
if ( wrapper = = NULL ) {
return - 1 ;
}
2016-03-05 02:00:07 +03:00
if ( wrapper - > state = = NULL ) {
2014-12-11 05:05:10 +03:00
return - 1 ;
}
2016-02-26 12:54:01 +03:00
if ( tevent_req_is_unix_error ( req , & vfs_aio_state - > error ) ) {
2014-12-11 05:05:10 +03:00
return - 1 ;
}
2015-11-18 19:09:06 +03:00
2016-03-05 02:00:07 +03:00
* vfs_aio_state = wrapper - > state - > vfs_aio_state ;
ret = wrapper - > state - > ret ;
2015-11-18 19:09:06 +03:00
/* Clean up the state, it is in a NULL context. */
2016-03-05 02:00:07 +03:00
TALLOC_FREE ( wrapper - > state ) ;
2015-11-18 19:09:06 +03:00
return ret ;
2013-05-29 15:21:46 +04:00
}
2015-12-11 14:27:17 +03:00
static ssize_t vfs_gluster_pwrite ( struct vfs_handle_struct * handle ,
files_struct * fsp , const void * data ,
size_t n , off_t offset )
{
2018-10-10 18:32:25 +03:00
glfs_fd_t * glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
if ( glfd = = NULL ) {
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return - 1 ;
}
2018-03-20 09:02:20 +03:00
# ifdef HAVE_GFAPI_VER_7_6
return glfs_pwrite ( glfd , data , n , offset , 0 , NULL , NULL ) ;
# else
2018-10-10 18:32:25 +03:00
return glfs_pwrite ( glfd , data , n , offset , 0 ) ;
2018-03-20 09:02:20 +03:00
# endif
2015-12-11 14:27:17 +03:00
}
2013-05-29 15:21:46 +04:00
static off_t vfs_gluster_lseek ( struct vfs_handle_struct * handle ,
files_struct * fsp , off_t offset , int whence )
{
2018-10-10 18:32:25 +03:00
glfs_fd_t * glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
if ( glfd = = NULL ) {
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return - 1 ;
}
return glfs_lseek ( glfd , offset , whence ) ;
2013-05-29 15:21:46 +04:00
}
static ssize_t vfs_gluster_sendfile ( struct vfs_handle_struct * handle , int tofd ,
files_struct * fromfsp ,
const DATA_BLOB * hdr ,
off_t offset , size_t n )
{
errno = ENOTSUP ;
return - 1 ;
}
static ssize_t vfs_gluster_recvfile ( struct vfs_handle_struct * handle ,
int fromfd , files_struct * tofsp ,
off_t offset , size_t n )
{
errno = ENOTSUP ;
return - 1 ;
}
static int vfs_gluster_rename ( struct vfs_handle_struct * handle ,
const struct smb_filename * smb_fname_src ,
const struct smb_filename * smb_fname_dst )
{
return glfs_rename ( handle - > data , smb_fname_src - > base_name ,
smb_fname_dst - > base_name ) ;
}
static struct tevent_req * vfs_gluster_fsync_send ( struct vfs_handle_struct
* handle , TALLOC_CTX * mem_ctx ,
struct tevent_context * ev ,
files_struct * fsp )
{
2014-12-11 05:05:10 +03:00
struct tevent_req * req = NULL ;
struct glusterfs_aio_state * state = NULL ;
int ret = 0 ;
2018-10-10 18:32:25 +03:00
glfs_fd_t * glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
if ( glfd = = NULL ) {
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return NULL ;
}
2014-12-11 05:05:10 +03:00
2015-11-18 19:09:06 +03:00
state = aio_state_create ( mem_ctx ) ;
if ( state = = NULL ) {
2014-12-11 05:05:10 +03:00
return NULL ;
}
2015-11-18 19:09:06 +03:00
req = state - > req ;
2014-12-11 05:05:10 +03:00
if ( ! init_gluster_aio ( handle ) ) {
tevent_req_error ( req , EIO ) ;
return tevent_req_post ( req , ev ) ;
}
2016-02-26 13:14:36 +03:00
2018-05-23 09:53:47 +03:00
/*
* aio_glusterfs_done and aio_tevent_fd_done ( )
* use the raw tevent context . We need to use
* tevent_req_defer_callback ( ) in order to
* use the event context we ' re started with .
*/
tevent_req_defer_callback ( req , ev ) ;
2016-03-28 10:20:22 +03:00
PROFILE_TIMESTAMP ( & state - > start ) ;
2018-10-10 18:32:25 +03:00
ret = glfs_fsync_async ( glfd , aio_glusterfs_done , state ) ;
2014-12-11 05:05:10 +03:00
if ( ret < 0 ) {
tevent_req_error ( req , - ret ) ;
return tevent_req_post ( req , ev ) ;
}
return req ;
2013-05-29 15:21:46 +04:00
}
2016-02-26 12:54:01 +03:00
static int vfs_gluster_fsync_recv ( struct tevent_req * req ,
struct vfs_aio_state * vfs_aio_state )
2013-05-29 15:21:46 +04:00
{
2014-12-11 05:05:10 +03:00
/*
* Use implicit conversion ssize_t - > int
*/
2016-02-26 12:54:01 +03:00
return vfs_gluster_recv ( req , vfs_aio_state ) ;
2013-05-29 15:21:46 +04:00
}
static int vfs_gluster_stat ( struct vfs_handle_struct * handle ,
struct smb_filename * smb_fname )
{
struct stat st ;
int ret ;
ret = glfs_stat ( handle - > data , smb_fname - > base_name , & st ) ;
if ( ret = = 0 ) {
smb_stat_ex_from_stat ( & smb_fname - > st , & st ) ;
}
if ( ret < 0 & & errno ! = ENOENT ) {
DEBUG ( 0 , ( " glfs_stat(%s) failed: %s \n " ,
smb_fname - > base_name , strerror ( errno ) ) ) ;
}
return ret ;
}
static int vfs_gluster_fstat ( struct vfs_handle_struct * handle ,
files_struct * fsp , SMB_STRUCT_STAT * sbuf )
{
struct stat st ;
int ret ;
2018-10-10 18:32:25 +03:00
glfs_fd_t * glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
2013-05-29 15:21:46 +04:00
2018-10-10 18:32:25 +03:00
if ( glfd = = NULL ) {
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return - 1 ;
}
ret = glfs_fstat ( glfd , & st ) ;
2013-05-29 15:21:46 +04:00
if ( ret = = 0 ) {
smb_stat_ex_from_stat ( sbuf , & st ) ;
}
if ( ret < 0 ) {
DEBUG ( 0 , ( " glfs_fstat(%d) failed: %s \n " ,
fsp - > fh - > fd , strerror ( errno ) ) ) ;
}
return ret ;
}
static int vfs_gluster_lstat ( struct vfs_handle_struct * handle ,
struct smb_filename * smb_fname )
{
struct stat st ;
int ret ;
ret = glfs_lstat ( handle - > data , smb_fname - > base_name , & st ) ;
if ( ret = = 0 ) {
smb_stat_ex_from_stat ( & smb_fname - > st , & st ) ;
}
if ( ret < 0 & & errno ! = ENOENT ) {
DEBUG ( 0 , ( " glfs_lstat(%s) failed: %s \n " ,
smb_fname - > base_name , strerror ( errno ) ) ) ;
}
return ret ;
}
static uint64_t vfs_gluster_get_alloc_size ( struct vfs_handle_struct * handle ,
files_struct * fsp ,
const SMB_STRUCT_STAT * sbuf )
{
return sbuf - > st_ex_blocks * 512 ;
}
static int vfs_gluster_unlink ( struct vfs_handle_struct * handle ,
const struct smb_filename * smb_fname )
{
return glfs_unlink ( handle - > data , smb_fname - > base_name ) ;
}
static int vfs_gluster_chmod ( struct vfs_handle_struct * handle ,
2016-03-02 03:20:25 +03:00
const struct smb_filename * smb_fname ,
mode_t mode )
2013-05-29 15:21:46 +04:00
{
2016-03-02 03:20:25 +03:00
return glfs_chmod ( handle - > data , smb_fname - > base_name , mode ) ;
2013-05-29 15:21:46 +04:00
}
static int vfs_gluster_fchmod ( struct vfs_handle_struct * handle ,
files_struct * fsp , mode_t mode )
{
2018-10-10 18:32:25 +03:00
glfs_fd_t * glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
if ( glfd = = NULL ) {
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return - 1 ;
}
return glfs_fchmod ( glfd , mode ) ;
2013-05-29 15:21:46 +04:00
}
static int vfs_gluster_chown ( struct vfs_handle_struct * handle ,
2016-03-03 22:54:23 +03:00
const struct smb_filename * smb_fname ,
uid_t uid ,
gid_t gid )
2013-05-29 15:21:46 +04:00
{
2016-03-03 22:54:23 +03:00
return glfs_chown ( handle - > data , smb_fname - > base_name , uid , gid ) ;
2013-05-29 15:21:46 +04:00
}
static int vfs_gluster_fchown ( struct vfs_handle_struct * handle ,
files_struct * fsp , uid_t uid , gid_t gid )
{
2018-10-10 18:32:25 +03:00
glfs_fd_t * glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
if ( glfd = = NULL ) {
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return - 1 ;
}
return glfs_fchown ( glfd , uid , gid ) ;
2013-05-29 15:21:46 +04:00
}
static int vfs_gluster_lchown ( struct vfs_handle_struct * handle ,
2016-03-04 01:34:57 +03:00
const struct smb_filename * smb_fname ,
uid_t uid ,
gid_t gid )
2013-05-29 15:21:46 +04:00
{
2016-03-04 01:34:57 +03:00
return glfs_lchown ( handle - > data , smb_fname - > base_name , uid , gid ) ;
2013-05-29 15:21:46 +04:00
}
2017-06-29 21:29:33 +03:00
static int vfs_gluster_chdir ( struct vfs_handle_struct * handle ,
const struct smb_filename * smb_fname )
2013-05-29 15:21:46 +04:00
{
2017-06-29 21:29:33 +03:00
return glfs_chdir ( handle - > data , smb_fname - > base_name ) ;
2013-05-29 15:21:46 +04:00
}
2017-06-30 00:32:47 +03:00
static struct smb_filename * vfs_gluster_getwd ( struct vfs_handle_struct * handle ,
TALLOC_CTX * ctx )
2013-05-29 15:21:46 +04:00
{
char * cwd ;
char * ret ;
2017-06-30 00:32:47 +03:00
struct smb_filename * smb_fname = NULL ;
2013-05-29 15:21:46 +04:00
2013-11-04 15:32:04 +04:00
cwd = SMB_CALLOC_ARRAY ( char , PATH_MAX ) ;
2013-05-29 15:21:46 +04:00
if ( cwd = = NULL ) {
return NULL ;
}
2013-11-04 15:32:04 +04:00
ret = glfs_getcwd ( handle - > data , cwd , PATH_MAX - 1 ) ;
2017-10-25 20:39:34 +03:00
if ( ret = = NULL ) {
2017-10-26 09:05:20 +03:00
SAFE_FREE ( cwd ) ;
2017-10-25 20:39:34 +03:00
return NULL ;
2013-05-29 15:21:46 +04:00
}
2017-06-30 00:32:47 +03:00
smb_fname = synthetic_smb_fname ( ctx ,
ret ,
NULL ,
NULL ,
0 ) ;
2017-10-26 09:05:20 +03:00
SAFE_FREE ( cwd ) ;
2017-06-30 00:32:47 +03:00
return smb_fname ;
2013-05-29 15:21:46 +04:00
}
static int vfs_gluster_ntimes ( struct vfs_handle_struct * handle ,
const struct smb_filename * smb_fname ,
struct smb_file_time * ft )
{
struct timespec times [ 2 ] ;
2014-01-10 19:26:18 +04:00
if ( null_timespec ( ft - > atime ) ) {
times [ 0 ] . tv_sec = smb_fname - > st . st_ex_atime . tv_sec ;
times [ 0 ] . tv_nsec = smb_fname - > st . st_ex_atime . tv_nsec ;
} else {
times [ 0 ] . tv_sec = ft - > atime . tv_sec ;
times [ 0 ] . tv_nsec = ft - > atime . tv_nsec ;
}
if ( null_timespec ( ft - > mtime ) ) {
times [ 1 ] . tv_sec = smb_fname - > st . st_ex_mtime . tv_sec ;
times [ 1 ] . tv_nsec = smb_fname - > st . st_ex_mtime . tv_nsec ;
} else {
times [ 1 ] . tv_sec = ft - > mtime . tv_sec ;
times [ 1 ] . tv_nsec = ft - > mtime . tv_nsec ;
}
if ( ( timespec_compare ( & times [ 0 ] ,
& smb_fname - > st . st_ex_atime ) = = 0 ) & &
( timespec_compare ( & times [ 1 ] ,
& smb_fname - > st . st_ex_mtime ) = = 0 ) ) {
return 0 ;
}
2013-05-29 15:21:46 +04:00
return glfs_utimens ( handle - > data , smb_fname - > base_name , times ) ;
}
static int vfs_gluster_ftruncate ( struct vfs_handle_struct * handle ,
files_struct * fsp , off_t offset )
{
2018-10-10 18:32:25 +03:00
glfs_fd_t * glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
if ( glfd = = NULL ) {
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return - 1 ;
}
2018-03-20 09:02:20 +03:00
# ifdef HAVE_GFAPI_VER_7_6
return glfs_ftruncate ( glfd , offset , NULL , NULL ) ;
# else
2018-10-10 18:32:25 +03:00
return glfs_ftruncate ( glfd , offset ) ;
2018-03-20 09:02:20 +03:00
# endif
2013-05-29 15:21:46 +04:00
}
static int vfs_gluster_fallocate ( struct vfs_handle_struct * handle ,
struct files_struct * fsp ,
2015-02-09 20:21:59 +03:00
uint32_t mode ,
2013-05-29 15:21:46 +04:00
off_t offset , off_t len )
{
2017-11-14 13:21:44 +03:00
# ifdef HAVE_GFAPI_VER_6
int keep_size , punch_hole ;
2018-10-10 18:32:25 +03:00
glfs_fd_t * glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
if ( glfd = = NULL ) {
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return - 1 ;
}
2017-11-14 13:21:44 +03:00
keep_size = mode & VFS_FALLOCATE_FL_KEEP_SIZE ;
punch_hole = mode & VFS_FALLOCATE_FL_PUNCH_HOLE ;
mode & = ~ ( VFS_FALLOCATE_FL_KEEP_SIZE | VFS_FALLOCATE_FL_PUNCH_HOLE ) ;
if ( mode ! = 0 ) {
errno = ENOTSUP ;
return - 1 ;
}
if ( punch_hole ) {
2018-10-10 18:32:25 +03:00
return glfs_discard ( glfd , offset , len ) ;
2017-11-14 13:21:44 +03:00
}
2018-10-10 18:32:25 +03:00
return glfs_fallocate ( glfd , keep_size , offset , len ) ;
2017-11-14 13:21:44 +03:00
# else
2013-05-29 15:21:46 +04:00
errno = ENOTSUP ;
return - 1 ;
2017-11-14 13:21:44 +03:00
# endif
2013-05-29 15:21:46 +04:00
}
2017-06-30 21:32:59 +03:00
static struct smb_filename * vfs_gluster_realpath ( struct vfs_handle_struct * handle ,
TALLOC_CTX * ctx ,
const struct smb_filename * smb_fname )
2013-05-29 15:21:46 +04:00
{
2016-10-21 01:15:06 +03:00
char * result = NULL ;
2017-06-30 21:32:59 +03:00
struct smb_filename * result_fname = NULL ;
2016-10-21 01:15:06 +03:00
char * resolved_path = SMB_MALLOC_ARRAY ( char , PATH_MAX + 1 ) ;
if ( resolved_path = = NULL ) {
errno = ENOMEM ;
return NULL ;
}
2017-06-30 21:32:59 +03:00
result = glfs_realpath ( handle - > data ,
smb_fname - > base_name ,
resolved_path ) ;
if ( result ! = NULL ) {
result_fname = synthetic_smb_fname ( ctx , result , NULL , NULL , 0 ) ;
2016-10-21 01:15:06 +03:00
}
2017-06-30 21:32:59 +03:00
SAFE_FREE ( resolved_path ) ;
return result_fname ;
2013-05-29 15:21:46 +04:00
}
static bool vfs_gluster_lock ( struct vfs_handle_struct * handle ,
files_struct * fsp , int op , off_t offset ,
off_t count , int type )
{
struct flock flock = { 0 , } ;
int ret ;
2018-10-10 18:32:25 +03:00
glfs_fd_t * glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
if ( glfd = = NULL ) {
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return false ;
}
2013-05-29 15:21:46 +04:00
flock . l_type = type ;
flock . l_whence = SEEK_SET ;
flock . l_start = offset ;
flock . l_len = count ;
flock . l_pid = 0 ;
2018-10-10 18:32:25 +03:00
ret = glfs_posix_lock ( glfd , op , & flock ) ;
2013-05-29 15:21:46 +04:00
if ( op = = F_GETLK ) {
/* lock query, true if someone else has locked */
if ( ( ret ! = - 1 ) & &
( flock . l_type ! = F_UNLCK ) & &
( flock . l_pid ! = 0 ) & & ( flock . l_pid ! = getpid ( ) ) )
return true ;
/* not me */
return false ;
}
if ( ret = = - 1 ) {
return false ;
}
return true ;
}
static int vfs_gluster_kernel_flock ( struct vfs_handle_struct * handle ,
2015-05-03 06:11:02 +03:00
files_struct * fsp , uint32_t share_mode ,
2013-05-29 15:21:46 +04:00
uint32_t access_mask )
{
errno = ENOSYS ;
return - 1 ;
}
static int vfs_gluster_linux_setlease ( struct vfs_handle_struct * handle ,
files_struct * fsp , int leasetype )
{
errno = ENOSYS ;
return - 1 ;
}
static bool vfs_gluster_getlock ( struct vfs_handle_struct * handle ,
files_struct * fsp , off_t * poffset ,
off_t * pcount , int * ptype , pid_t * ppid )
{
struct flock flock = { 0 , } ;
int ret ;
2018-10-10 18:32:25 +03:00
glfs_fd_t * glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
if ( glfd = = NULL ) {
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return false ;
}
2013-05-29 15:21:46 +04:00
flock . l_type = * ptype ;
flock . l_whence = SEEK_SET ;
flock . l_start = * poffset ;
flock . l_len = * pcount ;
flock . l_pid = 0 ;
2018-10-10 18:32:25 +03:00
ret = glfs_posix_lock ( glfd , F_GETLK , & flock ) ;
2013-05-29 15:21:46 +04:00
if ( ret = = - 1 ) {
return false ;
}
* ptype = flock . l_type ;
* poffset = flock . l_start ;
* pcount = flock . l_len ;
* ppid = flock . l_pid ;
return true ;
}
static int vfs_gluster_symlink ( struct vfs_handle_struct * handle ,
2017-06-09 02:25:58 +03:00
const char * link_target ,
const struct smb_filename * new_smb_fname )
2013-05-29 15:21:46 +04:00
{
2017-06-09 02:25:58 +03:00
return glfs_symlink ( handle - > data ,
link_target ,
new_smb_fname - > base_name ) ;
2013-05-29 15:21:46 +04:00
}
static int vfs_gluster_readlink ( struct vfs_handle_struct * handle ,
2017-06-08 01:03:37 +03:00
const struct smb_filename * smb_fname ,
char * buf ,
size_t bufsiz )
2013-05-29 15:21:46 +04:00
{
2017-06-08 01:03:37 +03:00
return glfs_readlink ( handle - > data , smb_fname - > base_name , buf , bufsiz ) ;
2013-05-29 15:21:46 +04:00
}
static int vfs_gluster_link ( struct vfs_handle_struct * handle ,
2017-06-03 00:21:54 +03:00
const struct smb_filename * old_smb_fname ,
const struct smb_filename * new_smb_fname )
2013-05-29 15:21:46 +04:00
{
2017-06-03 00:21:54 +03:00
return glfs_link ( handle - > data ,
old_smb_fname - > base_name ,
new_smb_fname - > base_name ) ;
2013-05-29 15:21:46 +04:00
}
2017-05-20 01:01:52 +03:00
static int vfs_gluster_mknod ( struct vfs_handle_struct * handle ,
const struct smb_filename * smb_fname ,
mode_t mode ,
SMB_DEV_T dev )
2013-05-29 15:21:46 +04:00
{
2017-05-20 01:01:52 +03:00
return glfs_mknod ( handle - > data , smb_fname - > base_name , mode , dev ) ;
2013-05-29 15:21:46 +04:00
}
static int vfs_gluster_chflags ( struct vfs_handle_struct * handle ,
2017-05-20 02:15:55 +03:00
const struct smb_filename * smb_fname ,
unsigned int flags )
2013-05-29 15:21:46 +04:00
{
errno = ENOSYS ;
return - 1 ;
}
static int vfs_gluster_get_real_filename ( struct vfs_handle_struct * handle ,
const char * path , const char * name ,
2019-06-03 15:27:18 +03:00
TALLOC_CTX * mem_ctx , char * * found_name )
2013-05-29 15:21:46 +04:00
{
int ret ;
2019-06-03 17:25:46 +03:00
char key_buf [ GLUSTER_NAME_MAX + 64 ] ;
char val_buf [ GLUSTER_NAME_MAX + 1 ] ;
2013-05-29 15:21:46 +04:00
2019-06-03 17:25:46 +03:00
if ( strlen ( name ) > = GLUSTER_NAME_MAX ) {
2013-05-29 15:21:46 +04:00
errno = ENAMETOOLONG ;
return - 1 ;
}
2019-06-03 17:25:46 +03:00
snprintf ( key_buf , GLUSTER_NAME_MAX + 64 ,
2019-06-03 15:27:18 +03:00
" glusterfs.get_real_filename:%s " , name ) ;
2013-05-29 15:21:46 +04:00
2019-06-03 17:25:46 +03:00
ret = glfs_getxattr ( handle - > data , path , key_buf , val_buf ,
GLUSTER_NAME_MAX + 1 ) ;
2013-06-21 20:56:22 +04:00
if ( ret = = - 1 ) {
2019-01-23 13:10:43 +03:00
if ( errno = = ENOATTR ) {
vfs:glusterfs: treat ENOATTR as ENOENT
The original implementation of the virtual xattr get_real_filename
in gluster was misusing the ENOENT errno as the authoritative anwer
that the file/dir that we were asking the real filename for does not
exist. But since the getxattr call is done on the parent directory,
this is a violation of the getxattr API which uses ENOENT for the
case that the file/dir that the getxattr call is done against does
not exist.
Now after a recent regression for fuse-mount re-exports due to
gluster mapping ENOENT to ESTALE in the fuse-bridge, the gluster
implementation is changed to more correctly return ENOATTR if the
requested file does not exist.
This patch changes the glusterfs vfs module to treat ENOATTR as ENOENT
to be fully functional again with latest gluster.
- Without this patch, samba against a new gluster will work correctly,
but the get_real_filename optimization for a non-existing entry
is lost.
- With this patch, Samba will not work correctly any more against
very old gluster servers: Those (correctly) returned ENOATTR
always, which Samba originally interpreted as EOPNOTSUPP, triggering
the expensive directory scan. With this patch, ENOATTR is
interpreted as ENOENT, the authoritative answer that the requested
entry does not exist, which is wrong unless it really does not exist.
Signed-off-by: Michael Adam <obnox@samba.org>
Reviewed-by: Guenther Deschner <gd@samba.org>
2019-06-20 16:14:57 +03:00
errno = ENOENT ;
2013-06-21 20:56:22 +04:00
}
2013-05-29 15:21:46 +04:00
return - 1 ;
}
2019-06-03 15:27:18 +03:00
* found_name = talloc_strdup ( mem_ctx , val_buf ) ;
if ( found_name [ 0 ] = = NULL ) {
2013-05-29 15:21:46 +04:00
errno = ENOMEM ;
return - 1 ;
}
return 0 ;
}
static const char * vfs_gluster_connectpath ( struct vfs_handle_struct * handle ,
2017-06-30 23:37:03 +03:00
const struct smb_filename * smb_fname )
2013-05-29 15:21:46 +04:00
{
return handle - > conn - > connectpath ;
}
/* EA Operations */
static ssize_t vfs_gluster_getxattr ( struct vfs_handle_struct * handle ,
2017-05-26 02:42:04 +03:00
const struct smb_filename * smb_fname ,
const char * name ,
void * value ,
size_t size )
2013-05-29 15:21:46 +04:00
{
2017-05-26 02:42:04 +03:00
return glfs_getxattr ( handle - > data , smb_fname - > base_name ,
name , value , size ) ;
2013-05-29 15:21:46 +04:00
}
static ssize_t vfs_gluster_fgetxattr ( struct vfs_handle_struct * handle ,
files_struct * fsp , const char * name ,
void * value , size_t size )
{
2018-10-10 18:32:25 +03:00
glfs_fd_t * glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
if ( glfd = = NULL ) {
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return - 1 ;
}
return glfs_fgetxattr ( glfd , name , value , size ) ;
2013-05-29 15:21:46 +04:00
}
static ssize_t vfs_gluster_listxattr ( struct vfs_handle_struct * handle ,
2017-05-23 23:12:29 +03:00
const struct smb_filename * smb_fname ,
char * list ,
size_t size )
2013-05-29 15:21:46 +04:00
{
2017-05-23 23:12:29 +03:00
return glfs_listxattr ( handle - > data , smb_fname - > base_name , list , size ) ;
2013-05-29 15:21:46 +04:00
}
static ssize_t vfs_gluster_flistxattr ( struct vfs_handle_struct * handle ,
files_struct * fsp , char * list ,
size_t size )
{
2018-10-10 18:32:25 +03:00
glfs_fd_t * glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
if ( glfd = = NULL ) {
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return - 1 ;
}
return glfs_flistxattr ( glfd , list , size ) ;
2013-05-29 15:21:46 +04:00
}
static int vfs_gluster_removexattr ( struct vfs_handle_struct * handle ,
2017-05-24 21:35:50 +03:00
const struct smb_filename * smb_fname ,
const char * name )
2013-05-29 15:21:46 +04:00
{
2017-05-24 21:35:50 +03:00
return glfs_removexattr ( handle - > data , smb_fname - > base_name , name ) ;
2013-05-29 15:21:46 +04:00
}
static int vfs_gluster_fremovexattr ( struct vfs_handle_struct * handle ,
files_struct * fsp , const char * name )
{
2018-10-10 18:32:25 +03:00
glfs_fd_t * glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
if ( glfd = = NULL ) {
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return - 1 ;
}
return glfs_fremovexattr ( glfd , name ) ;
2013-05-29 15:21:46 +04:00
}
static int vfs_gluster_setxattr ( struct vfs_handle_struct * handle ,
2017-05-25 22:41:31 +03:00
const struct smb_filename * smb_fname ,
const char * name ,
2013-05-29 15:21:46 +04:00
const void * value , size_t size , int flags )
{
2017-05-25 22:41:31 +03:00
return glfs_setxattr ( handle - > data , smb_fname - > base_name , name , value , size , flags ) ;
2013-05-29 15:21:46 +04:00
}
static int vfs_gluster_fsetxattr ( struct vfs_handle_struct * handle ,
files_struct * fsp , const char * name ,
const void * value , size_t size , int flags )
{
2018-10-10 18:32:25 +03:00
glfs_fd_t * glfd = vfs_gluster_fetch_glfd ( handle , fsp ) ;
if ( glfd = = NULL ) {
DBG_ERR ( " Failed to fetch gluster fd \n " ) ;
return - 1 ;
}
return glfs_fsetxattr ( glfd , name , value , size , flags ) ;
2013-05-29 15:21:46 +04:00
}
/* AIO Operations */
static bool vfs_gluster_aio_force ( struct vfs_handle_struct * handle ,
files_struct * fsp )
{
return false ;
}
static struct vfs_fn_pointers glusterfs_fns = {
/* Disk Operations */
. connect_fn = vfs_gluster_connect ,
. disconnect_fn = vfs_gluster_disconnect ,
. disk_free_fn = vfs_gluster_disk_free ,
. get_quota_fn = vfs_gluster_get_quota ,
. set_quota_fn = vfs_gluster_set_quota ,
. statvfs_fn = vfs_gluster_statvfs ,
. fs_capabilities_fn = vfs_gluster_fs_capabilities ,
. get_dfs_referrals_fn = NULL ,
/* Directory Operations */
. opendir_fn = vfs_gluster_opendir ,
. fdopendir_fn = vfs_gluster_fdopendir ,
. readdir_fn = vfs_gluster_readdir ,
. seekdir_fn = vfs_gluster_seekdir ,
. telldir_fn = vfs_gluster_telldir ,
. rewind_dir_fn = vfs_gluster_rewinddir ,
. mkdir_fn = vfs_gluster_mkdir ,
. rmdir_fn = vfs_gluster_rmdir ,
. closedir_fn = vfs_gluster_closedir ,
/* File Operations */
. open_fn = vfs_gluster_open ,
. create_file_fn = NULL ,
. close_fn = vfs_gluster_close ,
. pread_fn = vfs_gluster_pread ,
. pread_send_fn = vfs_gluster_pread_send ,
2014-12-11 05:05:10 +03:00
. pread_recv_fn = vfs_gluster_recv ,
2013-05-29 15:21:46 +04:00
. pwrite_fn = vfs_gluster_pwrite ,
. pwrite_send_fn = vfs_gluster_pwrite_send ,
2014-12-11 05:05:10 +03:00
. pwrite_recv_fn = vfs_gluster_recv ,
2013-05-29 15:21:46 +04:00
. lseek_fn = vfs_gluster_lseek ,
. sendfile_fn = vfs_gluster_sendfile ,
. recvfile_fn = vfs_gluster_recvfile ,
. rename_fn = vfs_gluster_rename ,
. fsync_send_fn = vfs_gluster_fsync_send ,
. fsync_recv_fn = vfs_gluster_fsync_recv ,
. stat_fn = vfs_gluster_stat ,
. fstat_fn = vfs_gluster_fstat ,
. lstat_fn = vfs_gluster_lstat ,
. get_alloc_size_fn = vfs_gluster_get_alloc_size ,
. unlink_fn = vfs_gluster_unlink ,
. chmod_fn = vfs_gluster_chmod ,
. fchmod_fn = vfs_gluster_fchmod ,
. chown_fn = vfs_gluster_chown ,
. fchown_fn = vfs_gluster_fchown ,
. lchown_fn = vfs_gluster_lchown ,
. chdir_fn = vfs_gluster_chdir ,
. getwd_fn = vfs_gluster_getwd ,
. ntimes_fn = vfs_gluster_ntimes ,
. ftruncate_fn = vfs_gluster_ftruncate ,
. fallocate_fn = vfs_gluster_fallocate ,
. lock_fn = vfs_gluster_lock ,
. kernel_flock_fn = vfs_gluster_kernel_flock ,
. linux_setlease_fn = vfs_gluster_linux_setlease ,
. getlock_fn = vfs_gluster_getlock ,
. symlink_fn = vfs_gluster_symlink ,
. readlink_fn = vfs_gluster_readlink ,
. link_fn = vfs_gluster_link ,
. mknod_fn = vfs_gluster_mknod ,
. realpath_fn = vfs_gluster_realpath ,
. chflags_fn = vfs_gluster_chflags ,
. file_id_create_fn = NULL ,
. streaminfo_fn = NULL ,
. get_real_filename_fn = vfs_gluster_get_real_filename ,
. connectpath_fn = vfs_gluster_connectpath ,
. brl_lock_windows_fn = NULL ,
. brl_unlock_windows_fn = NULL ,
2017-07-09 15:34:10 +03:00
. strict_lock_check_fn = NULL ,
2013-05-29 15:21:46 +04:00
. translate_name_fn = NULL ,
. fsctl_fn = NULL ,
/* NT ACL Operations */
. fget_nt_acl_fn = NULL ,
. get_nt_acl_fn = NULL ,
. fset_nt_acl_fn = NULL ,
. audit_file_fn = NULL ,
/* Posix ACL Operations */
2016-03-21 05:42:20 +03:00
. sys_acl_get_file_fn = posixacl_xattr_acl_get_file ,
. sys_acl_get_fd_fn = posixacl_xattr_acl_get_fd ,
2013-05-29 15:21:46 +04:00
. sys_acl_blob_get_file_fn = posix_sys_acl_blob_get_file ,
. sys_acl_blob_get_fd_fn = posix_sys_acl_blob_get_fd ,
2016-03-21 05:42:20 +03:00
. sys_acl_set_file_fn = posixacl_xattr_acl_set_file ,
. sys_acl_set_fd_fn = posixacl_xattr_acl_set_fd ,
. sys_acl_delete_def_file_fn = posixacl_xattr_acl_delete_def_file ,
2013-05-29 15:21:46 +04:00
/* EA Operations */
. getxattr_fn = vfs_gluster_getxattr ,
2018-03-13 10:14:53 +03:00
. getxattrat_send_fn = vfs_not_implemented_getxattrat_send ,
. getxattrat_recv_fn = vfs_not_implemented_getxattrat_recv ,
2013-05-29 15:21:46 +04:00
. fgetxattr_fn = vfs_gluster_fgetxattr ,
. listxattr_fn = vfs_gluster_listxattr ,
. flistxattr_fn = vfs_gluster_flistxattr ,
. removexattr_fn = vfs_gluster_removexattr ,
. fremovexattr_fn = vfs_gluster_fremovexattr ,
. setxattr_fn = vfs_gluster_setxattr ,
. fsetxattr_fn = vfs_gluster_fsetxattr ,
/* AIO Operations */
. aio_force_fn = vfs_gluster_aio_force ,
/* Durable handle Operations */
. durable_cookie_fn = NULL ,
. durable_disconnect_fn = NULL ,
. durable_reconnect_fn = NULL ,
} ;
2017-12-16 01:32:12 +03:00
static_decl_vfs ;
2017-04-20 22:24:43 +03:00
NTSTATUS vfs_glusterfs_init ( TALLOC_CTX * ctx )
2013-05-29 15:21:46 +04:00
{
return smb_register_vfs ( SMB_VFS_INTERFACE_VERSION ,
" glusterfs " , & glusterfs_fns ) ;
}