features/shard: Fix permission issues
This patch does the following:
* reverts commit b467af0e99
* changes ownership on shards under /.shard to be root:root
* makes readv, writev, [f]truncate, rename, and unlink fops
to perform operations on files under /.shard with
frame->root->{uid,gid} as 0.
This would ensure that a [f]setattr on a sharded file
does not need to be called on all the shards associated with it.
Change-Id: Idcfb8c0dd354b0baab6b2356d2ab83ce51caa20e
BUG: 1251824
Signed-off-by: Krutika Dhananjay <kdhananj@redhat.com>
Reviewed-on: http://review.gluster.org/11992
Reviewed-by: Pranith Kumar Karampuri <pkarampu@redhat.com>
Tested-by: NetBSD Build System <jenkins@build.gluster.org>
Tested-by: Gluster Build System <jenkins@build.gluster.com>
This commit is contained in:
parent
b0e125e937
commit
beb7abe876
@ -1,6 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
. $(dirname $0)/../../include.rc
|
||||
. $(dirname $0)/../../common-utils.rc
|
||||
|
||||
cleanup;
|
||||
|
||||
@ -8,52 +9,97 @@ TEST glusterd
|
||||
TEST pidof glusterd
|
||||
TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1}
|
||||
TEST $CLI volume set $V0 features.shard on
|
||||
TEST $CLI volume set $V0 performance.strict-write-ordering on
|
||||
TEST $CLI volume start $V0
|
||||
|
||||
TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
|
||||
TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
|
||||
TEST useradd -M test_user 2>/dev/null
|
||||
|
||||
# Create 3 files as root.
|
||||
TEST touch $M0/foo
|
||||
TEST touch $M0/bar
|
||||
TEST touch $M0/baz
|
||||
TEST touch $M0/qux
|
||||
TEST mkdir $M0/dir
|
||||
|
||||
# Change ownership to non-root on foo and bar.
|
||||
TEST chown test_user:test_user $M0/foo
|
||||
TEST chown test_user:test_user $M0/bar
|
||||
|
||||
# Write 6M of data on foo as non-root, 2M overflowing into block-1.
|
||||
su -m test_user -c "dd if=/dev/zero of=$M0/foo bs=1M count=6"
|
||||
TEST run_cmd_as_user test_user "dd if=/dev/zero of=$M0/foo bs=1M count=6"
|
||||
|
||||
# Ensure owner and group are same on the shard as the main file.
|
||||
# Ensure owner and group are root on the block-1 shard.
|
||||
gfid_foo=`getfattr -n glusterfs.gfid.string $M0/foo 2>/dev/null \
|
||||
| grep glusterfs.gfid.string | cut -d '"' -f 2`
|
||||
|
||||
EXPECT "test_user" echo `find $B0 -name $gfid_foo.1 | xargs stat -c %U`
|
||||
EXPECT "test_user" echo `find $B0 -name $gfid_foo.1 | xargs stat -c %G`
|
||||
EXPECT "root" echo `find $B0 -name $gfid_foo.1 | xargs stat -c %U`
|
||||
EXPECT "root" echo `find $B0 -name $gfid_foo.1 | xargs stat -c %G`
|
||||
|
||||
#Ensure /.shard is owned by root.
|
||||
EXPECT "root" echo `find $B0/${V0}0 -name .shard | xargs stat -c %U`
|
||||
EXPECT "root" echo `find $B0/${V0}0 -name .shard | xargs stat -c %G`
|
||||
EXPECT "root" echo `find $B0/${V0}1 -name .shard | xargs stat -c %U`
|
||||
EXPECT "root" echo `find $B0/${V0}1 -name .shard | xargs stat -c %G`
|
||||
|
||||
# Write 6M of data on bar as root.
|
||||
TEST dd if=/dev/zero of=$M0/bar bs=1M count=6
|
||||
|
||||
# Ensure owner and group are same on the shard as the main file.
|
||||
# Ensure owner and group are root on the block-1 shard.
|
||||
gfid_bar=`getfattr -n glusterfs.gfid.string $M0/bar 2>/dev/null \
|
||||
| grep glusterfs.gfid.string | cut -d '"' -f 2`
|
||||
|
||||
EXPECT "test_user" echo `find $B0 -name $gfid_bar.1 | xargs stat -c %U`
|
||||
EXPECT "test_user" echo `find $B0 -name $gfid_bar.1 | xargs stat -c %G`
|
||||
EXPECT "root" echo `find $B0 -name $gfid_bar.1 | xargs stat -c %U`
|
||||
EXPECT "root" echo `find $B0 -name $gfid_bar.1 | xargs stat -c %G`
|
||||
|
||||
# Write 6M of data on baz as root.
|
||||
TEST dd if=/dev/zero of=$M0/baz bs=1M count=6
|
||||
|
||||
# Ensure owner andgroup are same on the shard as the main file.
|
||||
gfid_baz=`getfattr -n glusterfs.gfid.string $M0/baz 2>/dev/null \
|
||||
| grep glusterfs.gfid.string | cut -d '"' -f 2`
|
||||
|
||||
# Ensure owner and group are root on the block-1 shard.
|
||||
EXPECT "root" echo `find $B0 -name $gfid_baz.1 | xargs stat -c %U`
|
||||
EXPECT "root" echo `find $B0 -name $gfid_baz.1 | xargs stat -c %G`
|
||||
userdel test_user
|
||||
|
||||
TEST umount $M0
|
||||
# Test to ensure unlink from an unauthorized user does not lead to only
|
||||
# the shards under /.shard getting unlinked while that on the base file fails
|
||||
# with EPERM/ACCES.
|
||||
|
||||
TEST ! run_cmd_as_user test_user "unlink $M0/baz"
|
||||
TEST find $B0/*/.shard/$gfid_baz.1
|
||||
|
||||
# Test to ensure rename of a file where the dest file exists and is sharded,
|
||||
# from an unauthorized user does not lead to only the shards under /.shard
|
||||
# getting unlinked while that on the base file fails with EPERM/ACCES.
|
||||
|
||||
TEST ! run_cmd_as_user test_user "mv -f $M0/qux $M0/baz"
|
||||
TEST find $B0/*/.shard/$gfid_baz.1
|
||||
TEST stat $M0/qux
|
||||
|
||||
# Shard translator executes steps in the following order while doing a truncate
|
||||
# to a lower size:
|
||||
# 1) unlinking shards under /.shard first with frame->root->{uid,gid} being 0,
|
||||
# 2) truncate the original file by the right amount.
|
||||
# The following two tests are towards ensuring that truncate attempt from an
|
||||
# unauthorised user doesn't result in only the shards under /.shard getting
|
||||
# removed (since they're being performed as root) while step 2) above fails,
|
||||
# leaving the file in an inconsistent state.
|
||||
|
||||
TEST ! run_cmd_as_user test_user "truncate -s 1M $M0/baz"
|
||||
TEST find $B0/*/.shard/$gfid_baz.1
|
||||
|
||||
# Perform a cp as non-root user. This should trigger readv() which will trigger
|
||||
# reads on first shard of "foo" under /.shard, and this must not fail if shard
|
||||
# translator correctly sets frame->root->uid,gid to 0 before reading off the
|
||||
# first shard, since it's owned by root.
|
||||
TEST chown test_user:test_user $M0/dir
|
||||
TEST run_cmd_as_user test_user "cp $M0/foo $M0/dir/quux"
|
||||
|
||||
md5sum_foo=$(md5sum $M0/foo | awk '{print $1}')
|
||||
EXPECT "$md5sum_foo" echo `md5sum $M0/dir/quux | awk '{print $1}'`
|
||||
|
||||
userdel test_user
|
||||
|
||||
TEST $CLI volume stop $V0
|
||||
TEST $CLI volume delete $V0
|
||||
|
7
tests/common-utils.rc
Normal file
7
tests/common-utils.rc
Normal file
@ -0,0 +1,7 @@
|
||||
|
||||
function run_cmd_as_user {
|
||||
local user=$1
|
||||
shift
|
||||
su -m $user -c "$*" || return 1
|
||||
return 0
|
||||
}
|
@ -913,6 +913,8 @@ shard_truncate_last_shard_cbk (call_frame_t *frame, void *cookie,
|
||||
|
||||
local = frame->local;
|
||||
|
||||
SHARD_UNSET_ROOT_FS_ID (frame, local);
|
||||
|
||||
if (op_ret < 0) {
|
||||
local->op_ret = op_ret;
|
||||
local->op_errno = op_errno;
|
||||
@ -948,6 +950,19 @@ shard_truncate_last_shard (call_frame_t *frame, xlator_t *this, inode_t *inode)
|
||||
|
||||
local = frame->local;
|
||||
|
||||
/* A NULL inode could be due to the fact that the last shard which
|
||||
* needs to be truncated does not exist due to it lying in a hole
|
||||
* region. So the only thing left to do in that case would be an
|
||||
* update to file size xattr.
|
||||
*/
|
||||
if (!inode) {
|
||||
shard_update_file_size (frame, this, NULL, &local->loc,
|
||||
shard_post_update_size_truncate_handler);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SHARD_SET_ROOT_FS_ID (frame, local);
|
||||
|
||||
loc.inode = inode_ref (inode);
|
||||
gf_uuid_copy (loc.gfid, inode->gfid);
|
||||
|
||||
@ -1012,6 +1027,8 @@ shard_truncate_htol (call_frame_t *frame, xlator_t *this, inode_t *inode)
|
||||
|
||||
local->call_count = call_count;
|
||||
i = 1;
|
||||
|
||||
SHARD_SET_ROOT_FS_ID (frame, local);
|
||||
while (cur_block <= last_block) {
|
||||
if (!local->inode_list[i]) {
|
||||
cur_block++;
|
||||
@ -1145,7 +1162,9 @@ shard_common_lookup_shards_cbk (call_frame_t *frame, void *cookie,
|
||||
if (op_ret < 0) {
|
||||
/* Ignore absence of shards in the backend in truncate fop. */
|
||||
if (((local->fop == GF_FOP_TRUNCATE) ||
|
||||
(local->fop == GF_FOP_FTRUNCATE)) && (op_errno == ENOENT))
|
||||
(local->fop == GF_FOP_FTRUNCATE) ||
|
||||
(local->fop == GF_FOP_RENAME) ||
|
||||
(local->fop == GF_FOP_UNLINK)) && (op_errno == ENOENT))
|
||||
goto done;
|
||||
local->op_ret = op_ret;
|
||||
local->op_errno = op_errno;
|
||||
@ -1711,6 +1730,8 @@ shard_unlink_shards_do_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
|
||||
done:
|
||||
call_count = shard_call_count_return (frame);
|
||||
if (call_count == 0) {
|
||||
SHARD_UNSET_ROOT_FS_ID (frame, local);
|
||||
|
||||
if (local->fop == GF_FOP_UNLINK)
|
||||
shard_unlink_base_file (frame, this);
|
||||
else if (local->fop == GF_FOP_RENAME)
|
||||
@ -1768,6 +1789,8 @@ shard_unlink_shards_do (call_frame_t *frame, xlator_t *this, inode_t *inode)
|
||||
return 0;
|
||||
}
|
||||
|
||||
SHARD_SET_ROOT_FS_ID (frame, local);
|
||||
|
||||
while (cur_block <= last_block) {
|
||||
/* The base file is unlinked in the end to mark the
|
||||
* successful completion of the fop.
|
||||
@ -2377,6 +2400,7 @@ out:
|
||||
fd_unref (anon_fd);
|
||||
call_count = shard_call_count_return (frame);
|
||||
if (call_count == 0) {
|
||||
SHARD_UNSET_ROOT_FS_ID (frame, local);
|
||||
if (local->op_ret < 0) {
|
||||
SHARD_STACK_UNWIND (readv, frame, local->op_ret,
|
||||
local->op_errno, NULL, 0, NULL,
|
||||
@ -2423,6 +2447,8 @@ shard_readv_do (call_frame_t *frame, xlator_t *this)
|
||||
remaining_size = local->total_size;
|
||||
local->call_count = call_count = local->num_blocks;
|
||||
|
||||
SHARD_SET_ROOT_FS_ID (frame, local);
|
||||
|
||||
while (cur_block <= last_block) {
|
||||
if (wind_failed) {
|
||||
shard_readv_do_cbk (frame, (void *) (long) 0, this, -1,
|
||||
@ -2552,8 +2578,7 @@ shard_common_mknod_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
|
||||
done:
|
||||
call_count = shard_call_count_return (frame);
|
||||
if (call_count == 0) {
|
||||
frame->root->uid = local->uid;
|
||||
frame->root->gid = local->gid;
|
||||
SHARD_UNSET_ROOT_FS_ID (frame, local);
|
||||
local->post_mknod_handler (frame, this);
|
||||
}
|
||||
|
||||
@ -2587,10 +2612,7 @@ shard_common_resume_mknod (call_frame_t *frame, xlator_t *this,
|
||||
call_count = local->call_count = local->create_count;
|
||||
local->post_mknod_handler = post_mknod_handler;
|
||||
|
||||
local->uid = frame->root->uid;
|
||||
local->gid = frame->root->gid;
|
||||
frame->root->uid = local->prebuf.ia_uid;
|
||||
frame->root->gid = local->prebuf.ia_gid;
|
||||
SHARD_SET_ROOT_FS_ID (frame, local);
|
||||
|
||||
ret = shard_inode_ctx_get_all (fd->inode, this, &ctx_tmp);
|
||||
if (ret) {
|
||||
@ -2676,6 +2698,7 @@ err:
|
||||
* This block is for handling failure in shard_inode_ctx_get_all().
|
||||
* Failures in the while-loop are handled within the loop.
|
||||
*/
|
||||
SHARD_UNSET_ROOT_FS_ID (frame, local);
|
||||
post_mknod_handler (frame, this);
|
||||
return 0;
|
||||
}
|
||||
@ -2925,6 +2948,7 @@ shard_writev_do_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
|
||||
|
||||
call_count = shard_call_count_return (frame);
|
||||
if (call_count == 0) {
|
||||
SHARD_UNSET_ROOT_FS_ID (frame, local);
|
||||
if (local->op_ret < 0) {
|
||||
SHARD_STACK_UNWIND (writev, frame, local->written_size,
|
||||
local->op_errno, NULL, NULL, NULL);
|
||||
@ -2974,10 +2998,13 @@ shard_writev_do (call_frame_t *frame, xlator_t *this)
|
||||
local->call_count = call_count = local->num_blocks;
|
||||
last_block = local->last_block;
|
||||
|
||||
SHARD_SET_ROOT_FS_ID (frame, local);
|
||||
|
||||
if (dict_set_uint32 (local->xattr_req,
|
||||
GLUSTERFS_WRITE_UPDATE_ATOMIC, 4)) {
|
||||
local->op_ret = -1;
|
||||
local->op_errno = ENOMEM;
|
||||
local->call_count = 1;
|
||||
shard_writev_do_cbk (frame, (void *)(long)0, this, -1, ENOMEM,
|
||||
NULL, NULL, NULL);
|
||||
return 0;
|
||||
@ -3152,6 +3179,8 @@ shard_writev_mkdir_dot_shard_cbk (call_frame_t *frame, void *cookie,
|
||||
|
||||
local = frame->local;
|
||||
|
||||
SHARD_UNSET_ROOT_FS_ID (frame, local);
|
||||
|
||||
if (op_ret == -1) {
|
||||
if (op_errno != EEXIST) {
|
||||
goto unwind;
|
||||
@ -3201,9 +3230,11 @@ shard_writev_mkdir_dot_shard (call_frame_t *frame, xlator_t *this)
|
||||
goto err;
|
||||
}
|
||||
|
||||
SHARD_SET_ROOT_FS_ID (frame, local);
|
||||
|
||||
STACK_WIND (frame, shard_writev_mkdir_dot_shard_cbk, FIRST_CHILD(this),
|
||||
FIRST_CHILD(this)->fops->mkdir, &local->dot_shard_loc,
|
||||
0777, 0, xattr_req);
|
||||
0755, 0, xattr_req);
|
||||
dict_unref (xattr_req);
|
||||
return 0;
|
||||
|
||||
|
@ -111,6 +111,23 @@
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define SHARD_SET_ROOT_FS_ID(frame, local) do { \
|
||||
if (!local->is_set_fsid) { \
|
||||
local->uid = frame->root->uid; \
|
||||
local->gid = frame->root->gid; \
|
||||
frame->root->uid = 0; \
|
||||
frame->root->gid = 0; \
|
||||
local->is_set_fsid = _gf_true; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define SHARD_UNSET_ROOT_FS_ID(frame, local) do { \
|
||||
if (local->is_set_fsid) { \
|
||||
frame->root->uid = local->uid; \
|
||||
frame->root->gid = local->gid; \
|
||||
local->is_set_fsid = _gf_false; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
typedef struct shard_priv {
|
||||
uint64_t block_size;
|
||||
@ -179,6 +196,7 @@ typedef struct shard_local {
|
||||
struct iobref *iobref;
|
||||
struct iobuf *iobuf;
|
||||
gf_dirent_t entries_head;
|
||||
gf_boolean_t is_set_fsid;
|
||||
gf_boolean_t list_inited;
|
||||
gf_boolean_t is_write_extending;
|
||||
shard_post_fop_handler_t handler;
|
||||
|
Loading…
Reference in New Issue
Block a user