linux/fs/xfs/libxfs/xfs_attr.c

1601 lines
39 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
* All Rights Reserved.
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
#include "xfs_defer.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
#include "xfs_attr_sf.h"
#include "xfs_inode.h"
#include "xfs_trans.h"
#include "xfs_bmap.h"
#include "xfs_bmap_btree.h"
#include "xfs_attr.h"
#include "xfs_attr_leaf.h"
#include "xfs_attr_remote.h"
#include "xfs_quota.h"
#include "xfs_trans_space.h"
xfs: event tracing support Convert the old xfs tracing support that could only be used with the out of tree kdb and xfsidbg patches to use the generic event tracer. To use it make sure CONFIG_EVENT_TRACING is enabled and then enable all xfs trace channels by: echo 1 > /sys/kernel/debug/tracing/events/xfs/enable or alternatively enable single events by just doing the same in one event subdirectory, e.g. echo 1 > /sys/kernel/debug/tracing/events/xfs/xfs_ihold/enable or set more complex filters, etc. In Documentation/trace/events.txt all this is desctribed in more detail. To reads the events do a cat /sys/kernel/debug/tracing/trace Compared to the last posting this patch converts the tracing mostly to the one tracepoint per callsite model that other users of the new tracing facility also employ. This allows a very fine-grained control of the tracing, a cleaner output of the traces and also enables the perf tool to use each tracepoint as a virtual performance counter, allowing us to e.g. count how often certain workloads git various spots in XFS. Take a look at http://lwn.net/Articles/346470/ for some examples. Also the btree tracing isn't included at all yet, as it will require additional core tracing features not in mainline yet, I plan to deliver it later. And the really nice thing about this patch is that it actually removes many lines of code while adding this nice functionality: fs/xfs/Makefile | 8 fs/xfs/linux-2.6/xfs_acl.c | 1 fs/xfs/linux-2.6/xfs_aops.c | 52 - fs/xfs/linux-2.6/xfs_aops.h | 2 fs/xfs/linux-2.6/xfs_buf.c | 117 +-- fs/xfs/linux-2.6/xfs_buf.h | 33 fs/xfs/linux-2.6/xfs_fs_subr.c | 3 fs/xfs/linux-2.6/xfs_ioctl.c | 1 fs/xfs/linux-2.6/xfs_ioctl32.c | 1 fs/xfs/linux-2.6/xfs_iops.c | 1 fs/xfs/linux-2.6/xfs_linux.h | 1 fs/xfs/linux-2.6/xfs_lrw.c | 87 -- fs/xfs/linux-2.6/xfs_lrw.h | 45 - fs/xfs/linux-2.6/xfs_super.c | 104 --- fs/xfs/linux-2.6/xfs_super.h | 7 fs/xfs/linux-2.6/xfs_sync.c | 1 fs/xfs/linux-2.6/xfs_trace.c | 75 ++ fs/xfs/linux-2.6/xfs_trace.h | 1369 +++++++++++++++++++++++++++++++++++++++++ fs/xfs/linux-2.6/xfs_vnode.h | 4 fs/xfs/quota/xfs_dquot.c | 110 --- fs/xfs/quota/xfs_dquot.h | 21 fs/xfs/quota/xfs_qm.c | 40 - fs/xfs/quota/xfs_qm_syscalls.c | 4 fs/xfs/support/ktrace.c | 323 --------- fs/xfs/support/ktrace.h | 85 -- fs/xfs/xfs.h | 16 fs/xfs/xfs_ag.h | 14 fs/xfs/xfs_alloc.c | 230 +----- fs/xfs/xfs_alloc.h | 27 fs/xfs/xfs_alloc_btree.c | 1 fs/xfs/xfs_attr.c | 107 --- fs/xfs/xfs_attr.h | 10 fs/xfs/xfs_attr_leaf.c | 14 fs/xfs/xfs_attr_sf.h | 40 - fs/xfs/xfs_bmap.c | 507 +++------------ fs/xfs/xfs_bmap.h | 49 - fs/xfs/xfs_bmap_btree.c | 6 fs/xfs/xfs_btree.c | 5 fs/xfs/xfs_btree_trace.h | 17 fs/xfs/xfs_buf_item.c | 87 -- fs/xfs/xfs_buf_item.h | 20 fs/xfs/xfs_da_btree.c | 3 fs/xfs/xfs_da_btree.h | 7 fs/xfs/xfs_dfrag.c | 2 fs/xfs/xfs_dir2.c | 8 fs/xfs/xfs_dir2_block.c | 20 fs/xfs/xfs_dir2_leaf.c | 21 fs/xfs/xfs_dir2_node.c | 27 fs/xfs/xfs_dir2_sf.c | 26 fs/xfs/xfs_dir2_trace.c | 216 ------ fs/xfs/xfs_dir2_trace.h | 72 -- fs/xfs/xfs_filestream.c | 8 fs/xfs/xfs_fsops.c | 2 fs/xfs/xfs_iget.c | 111 --- fs/xfs/xfs_inode.c | 67 -- fs/xfs/xfs_inode.h | 76 -- fs/xfs/xfs_inode_item.c | 5 fs/xfs/xfs_iomap.c | 85 -- fs/xfs/xfs_iomap.h | 8 fs/xfs/xfs_log.c | 181 +---- fs/xfs/xfs_log_priv.h | 20 fs/xfs/xfs_log_recover.c | 1 fs/xfs/xfs_mount.c | 2 fs/xfs/xfs_quota.h | 8 fs/xfs/xfs_rename.c | 1 fs/xfs/xfs_rtalloc.c | 1 fs/xfs/xfs_rw.c | 3 fs/xfs/xfs_trans.h | 47 + fs/xfs/xfs_trans_buf.c | 62 - fs/xfs/xfs_vnodeops.c | 8 70 files changed, 2151 insertions(+), 2592 deletions(-) Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Alex Elder <aelder@sgi.com>
2009-12-15 02:14:59 +03:00
#include "xfs_trace.h"
xfs: Set up infrastructure for log attribute replay Currently attributes are modified directly across one or more transactions. But they are not logged or replayed in the event of an error. The goal of log attr replay is to enable logging and replaying of attribute operations using the existing delayed operations infrastructure. This will later enable the attributes to become part of larger multi part operations that also must first be recorded to the log. This is mostly of interest in the scheme of parent pointers which would need to maintain an attribute containing parent inode information any time an inode is moved, created, or removed. Parent pointers would then be of interest to any feature that would need to quickly derive an inode path from the mount point. Online scrub, nfs lookups and fs grow or shrink operations are all features that could take advantage of this. This patch adds two new log item types for setting or removing attributes as deferred operations. The xfs_attri_log_item will log an intent to set or remove an attribute. The corresponding xfs_attrd_log_item holds a reference to the xfs_attri_log_item and is freed once the transaction is done. Both log items use a generic xfs_attr_log_format structure that contains the attribute name, value, flags, inode, and an op_flag that indicates if the operations is a set or remove. [dchinner: added extra little bits needed for intent whiteouts] Signed-off-by: Allison Henderson <allison.henderson@oracle.com> Reviewed-by: Chandan Babu R <chandanrlinux@gmail.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-04 05:41:02 +03:00
#include "xfs_attr_item.h"
#include "xfs_xattr.h"
xfs: Set up infrastructure for log attribute replay Currently attributes are modified directly across one or more transactions. But they are not logged or replayed in the event of an error. The goal of log attr replay is to enable logging and replaying of attribute operations using the existing delayed operations infrastructure. This will later enable the attributes to become part of larger multi part operations that also must first be recorded to the log. This is mostly of interest in the scheme of parent pointers which would need to maintain an attribute containing parent inode information any time an inode is moved, created, or removed. Parent pointers would then be of interest to any feature that would need to quickly derive an inode path from the mount point. Online scrub, nfs lookups and fs grow or shrink operations are all features that could take advantage of this. This patch adds two new log item types for setting or removing attributes as deferred operations. The xfs_attri_log_item will log an intent to set or remove an attribute. The corresponding xfs_attrd_log_item holds a reference to the xfs_attri_log_item and is freed once the transaction is done. Both log items use a generic xfs_attr_log_format structure that contains the attribute name, value, flags, inode, and an op_flag that indicates if the operations is a set or remove. [dchinner: added extra little bits needed for intent whiteouts] Signed-off-by: Allison Henderson <allison.henderson@oracle.com> Reviewed-by: Chandan Babu R <chandanrlinux@gmail.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-04 05:41:02 +03:00
struct kmem_cache *xfs_attr_intent_cache;
/*
* xfs_attr.c
*
* Provide the external interfaces to manage attribute lists.
*/
/*========================================================================
* Function prototypes for the kernel.
*========================================================================*/
/*
* Internal routines when attribute list fits inside the inode.
*/
STATIC int xfs_attr_shortform_addname(xfs_da_args_t *args);
/*
* Internal routines when attribute list is one block.
*/
STATIC int xfs_attr_leaf_get(xfs_da_args_t *args);
STATIC int xfs_attr_leaf_removename(xfs_da_args_t *args);
STATIC int xfs_attr_leaf_hasname(struct xfs_da_args *args, struct xfs_buf **bp);
STATIC int xfs_attr_leaf_try_add(struct xfs_da_args *args);
/*
* Internal routines when attribute list is more than one block.
*/
STATIC int xfs_attr_node_get(xfs_da_args_t *args);
STATIC void xfs_attr_restore_rmt_blk(struct xfs_da_args *args);
static int xfs_attr_node_try_addname(struct xfs_attr_intent *attr);
STATIC int xfs_attr_node_addname_find_attr(struct xfs_attr_intent *attr);
STATIC int xfs_attr_node_remove_attr(struct xfs_attr_intent *attr);
STATIC int xfs_attr_node_lookup(struct xfs_da_args *args,
struct xfs_da_state *state);
int
xfs_inode_hasattr(
struct xfs_inode *ip)
{
if (!xfs_inode_has_attr_fork(ip))
xfs: ATTR_REPLACE algorithm with LARP enabled needs rework We can't use the same algorithm for replacing an existing attribute when logging attributes. The existing algorithm is essentially: 1. create new attr w/ INCOMPLETE 2. atomically flip INCOMPLETE flags between old + new attribute 3. remove old attr which is marked w/ INCOMPLETE This algorithm guarantees that we see either the old or new attribute, and if we fail after the atomic flag flip, we don't have to recover the removal of the old attr because we never see INCOMPLETE attributes in lookups. For logged attributes, however, this does not work. The logged attribute intents do not track the work that has been done as the transaction rolls, and hence the only recovery mechanism we have is "run the replace operation from scratch". This is further exacerbated by the attempt to avoid needing the INCOMPLETE flag to create an atomic swap. This means we can create a second active attribute of the same name before we remove the original. If we fail at any point after the create but before the removal has completed, we end up with duplicate attributes in the attr btree and recovery only tries to replace one of them. There are several other failure modes where we can leave partially allocated remote attributes that expose stale data, partially free remote attributes that enable UAF based stale data exposure, etc. TO fix this, we need a different algorithm for replace operations when LARP is enabled. Luckily, it's not that complex if we take the right first step. That is, the first thing we log is the attri intent with the new name/value pair and mark the old attr as INCOMPLETE in the same transaction. From there, we then remove the old attr and keep relogging the new name/value in the intent, such that we always know that we have to create the new attr in recovery. Once the old attr is removed, we then run a normal ATTR_CREATE operation relogging the intent as we go. If the new attr is local, then it gets created in a single atomic transaction that also logs the final intent done. If the new attr is remote, the we set INCOMPLETE on the new attr while we allocate and set the remote value, and then we clear the INCOMPLETE flag at in the last transaction taht logs the final intent done. If we fail at any point in this algorithm, log recovery will always see the same state on disk: the new name/value in the intent, and either an INCOMPLETE attr or no attr in the attr btree. If we find an INCOMPLETE attr, we run the full replace starting with removing the INCOMPLETE attr. If we don't find it, then we simply create the new attr. Notably, recovery of a failed create that has an INCOMPLETE flag set is now the same - we start with the lookup of the INCOMPLETE attr, and if that exists then we do the full replace recovery process, otherwise we just create the new attr. Hence changing the way we do the replace operation when LARP is enabled allows us to use the same log recovery algorithm for both the ATTR_CREATE and ATTR_REPLACE operations. This is also the same algorithm we use for runtime ATTR_REPLACE operations (except for the step setting up the initial conditions). The result is that: - ATTR_CREATE uses the same algorithm regardless of whether LARP is enabled or not - ATTR_REPLACE with larp=0 is identical to the old algorithm - ATTR_REPLACE with larp=1 runs an unmodified attr removal algorithm from the larp=0 code and then runs the unmodified ATTR_CREATE code. - log recovery when larp=1 runs the same ATTR_REPLACE algorithm as it uses at runtime. Because the state machine is now quite clean, changing the algorithm is really just a case of changing the initial state and how the states link together for the ATTR_REPLACE case. Hence it's not a huge amount of code for what is a fairly substantial rework of the attr logging and recovery algorithm.... Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-12 08:12:56 +03:00
return 0;
xfs: make inode attribute forks a permanent part of struct xfs_inode Syzkaller reported a UAF bug a while back: ================================================================== BUG: KASAN: use-after-free in xfs_ilock_attr_map_shared+0xe3/0xf6 fs/xfs/xfs_inode.c:127 Read of size 4 at addr ffff88802cec919c by task syz-executor262/2958 CPU: 2 PID: 2958 Comm: syz-executor262 Not tainted 5.15.0-0.30.3-20220406_1406 #3 Hardware name: Red Hat KVM, BIOS 1.13.0-2.module+el8.3.0+7860+a7792d29 04/01/2014 Call Trace: <TASK> __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0x82/0xa9 lib/dump_stack.c:106 print_address_description.constprop.9+0x21/0x2d5 mm/kasan/report.c:256 __kasan_report mm/kasan/report.c:442 [inline] kasan_report.cold.14+0x7f/0x11b mm/kasan/report.c:459 xfs_ilock_attr_map_shared+0xe3/0xf6 fs/xfs/xfs_inode.c:127 xfs_attr_get+0x378/0x4c2 fs/xfs/libxfs/xfs_attr.c:159 xfs_xattr_get+0xe3/0x150 fs/xfs/xfs_xattr.c:36 __vfs_getxattr+0xdf/0x13d fs/xattr.c:399 cap_inode_need_killpriv+0x41/0x5d security/commoncap.c:300 security_inode_need_killpriv+0x4c/0x97 security/security.c:1408 dentry_needs_remove_privs.part.28+0x21/0x63 fs/inode.c:1912 dentry_needs_remove_privs+0x80/0x9e fs/inode.c:1908 do_truncate+0xc3/0x1e0 fs/open.c:56 handle_truncate fs/namei.c:3084 [inline] do_open fs/namei.c:3432 [inline] path_openat+0x30ab/0x396d fs/namei.c:3561 do_filp_open+0x1c4/0x290 fs/namei.c:3588 do_sys_openat2+0x60d/0x98c fs/open.c:1212 do_sys_open+0xcf/0x13c fs/open.c:1228 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3a/0x7e arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0x0 RIP: 0033:0x7f7ef4bb753d Code: 00 c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 1b 79 2c 00 f7 d8 64 89 01 48 RSP: 002b:00007f7ef52c2ed8 EFLAGS: 00000246 ORIG_RAX: 0000000000000055 RAX: ffffffffffffffda RBX: 0000000000404148 RCX: 00007f7ef4bb753d RDX: 00007f7ef4bb753d RSI: 0000000000000000 RDI: 0000000020004fc0 RBP: 0000000000404140 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 0030656c69662f2e R13: 00007ffd794db37f R14: 00007ffd794db470 R15: 00007f7ef52c2fc0 </TASK> Allocated by task 2953: kasan_save_stack+0x19/0x38 mm/kasan/common.c:38 kasan_set_track mm/kasan/common.c:46 [inline] set_alloc_info mm/kasan/common.c:434 [inline] __kasan_slab_alloc+0x68/0x7c mm/kasan/common.c:467 kasan_slab_alloc include/linux/kasan.h:254 [inline] slab_post_alloc_hook mm/slab.h:519 [inline] slab_alloc_node mm/slub.c:3213 [inline] slab_alloc mm/slub.c:3221 [inline] kmem_cache_alloc+0x11b/0x3eb mm/slub.c:3226 kmem_cache_zalloc include/linux/slab.h:711 [inline] xfs_ifork_alloc+0x25/0xa2 fs/xfs/libxfs/xfs_inode_fork.c:287 xfs_bmap_add_attrfork+0x3f2/0x9b1 fs/xfs/libxfs/xfs_bmap.c:1098 xfs_attr_set+0xe38/0x12a7 fs/xfs/libxfs/xfs_attr.c:746 xfs_xattr_set+0xeb/0x1a9 fs/xfs/xfs_xattr.c:59 __vfs_setxattr+0x11b/0x177 fs/xattr.c:180 __vfs_setxattr_noperm+0x128/0x5e0 fs/xattr.c:214 __vfs_setxattr_locked+0x1d4/0x258 fs/xattr.c:275 vfs_setxattr+0x154/0x33d fs/xattr.c:301 setxattr+0x216/0x29f fs/xattr.c:575 __do_sys_fsetxattr fs/xattr.c:632 [inline] __se_sys_fsetxattr fs/xattr.c:621 [inline] __x64_sys_fsetxattr+0x243/0x2fe fs/xattr.c:621 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3a/0x7e arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0x0 Freed by task 2949: kasan_save_stack+0x19/0x38 mm/kasan/common.c:38 kasan_set_track+0x1c/0x21 mm/kasan/common.c:46 kasan_set_free_info+0x20/0x30 mm/kasan/generic.c:360 ____kasan_slab_free mm/kasan/common.c:366 [inline] ____kasan_slab_free mm/kasan/common.c:328 [inline] __kasan_slab_free+0xe2/0x10e mm/kasan/common.c:374 kasan_slab_free include/linux/kasan.h:230 [inline] slab_free_hook mm/slub.c:1700 [inline] slab_free_freelist_hook mm/slub.c:1726 [inline] slab_free mm/slub.c:3492 [inline] kmem_cache_free+0xdc/0x3ce mm/slub.c:3508 xfs_attr_fork_remove+0x8d/0x132 fs/xfs/libxfs/xfs_attr_leaf.c:773 xfs_attr_sf_removename+0x5dd/0x6cb fs/xfs/libxfs/xfs_attr_leaf.c:822 xfs_attr_remove_iter+0x68c/0x805 fs/xfs/libxfs/xfs_attr.c:1413 xfs_attr_remove_args+0xb1/0x10d fs/xfs/libxfs/xfs_attr.c:684 xfs_attr_set+0xf1e/0x12a7 fs/xfs/libxfs/xfs_attr.c:802 xfs_xattr_set+0xeb/0x1a9 fs/xfs/xfs_xattr.c:59 __vfs_removexattr+0x106/0x16a fs/xattr.c:468 cap_inode_killpriv+0x24/0x47 security/commoncap.c:324 security_inode_killpriv+0x54/0xa1 security/security.c:1414 setattr_prepare+0x1a6/0x897 fs/attr.c:146 xfs_vn_change_ok+0x111/0x15e fs/xfs/xfs_iops.c:682 xfs_vn_setattr_size+0x5f/0x15a fs/xfs/xfs_iops.c:1065 xfs_vn_setattr+0x125/0x2ad fs/xfs/xfs_iops.c:1093 notify_change+0xae5/0x10a1 fs/attr.c:410 do_truncate+0x134/0x1e0 fs/open.c:64 handle_truncate fs/namei.c:3084 [inline] do_open fs/namei.c:3432 [inline] path_openat+0x30ab/0x396d fs/namei.c:3561 do_filp_open+0x1c4/0x290 fs/namei.c:3588 do_sys_openat2+0x60d/0x98c fs/open.c:1212 do_sys_open+0xcf/0x13c fs/open.c:1228 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3a/0x7e arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0x0 The buggy address belongs to the object at ffff88802cec9188 which belongs to the cache xfs_ifork of size 40 The buggy address is located 20 bytes inside of 40-byte region [ffff88802cec9188, ffff88802cec91b0) The buggy address belongs to the page: page:00000000c3af36a1 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x2cec9 flags: 0xfffffc0000200(slab|node=0|zone=1|lastcpupid=0x1fffff) raw: 000fffffc0000200 ffffea00009d2580 0000000600000006 ffff88801a9ffc80 raw: 0000000000000000 0000000080490049 00000001ffffffff 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: ffff88802cec9080: fb fb fb fc fc fa fb fb fb fb fc fc fb fb fb fb ffff88802cec9100: fb fc fc fb fb fb fb fb fc fc fb fb fb fb fb fc >ffff88802cec9180: fc fa fb fb fb fb fc fc fa fb fb fb fb fc fc fb ^ ffff88802cec9200: fb fb fb fb fc fc fb fb fb fb fb fc fc fb fb fb ffff88802cec9280: fb fb fc fc fa fb fb fb fb fc fc fa fb fb fb fb ================================================================== The root cause of this bug is the unlocked access to xfs_inode.i_afp from the getxattr code paths while trying to determine which ILOCK mode to use to stabilize the xattr data. Unfortunately, the VFS does not acquire i_rwsem when vfs_getxattr (or listxattr) call into the filesystem, which means that getxattr can race with a removexattr that's tearing down the attr fork and crash: xfs_attr_set: xfs_attr_get: xfs_attr_fork_remove: xfs_ilock_attr_map_shared: xfs_idestroy_fork(ip->i_afp); kmem_cache_free(xfs_ifork_cache, ip->i_afp); if (ip->i_afp && ip->i_afp = NULL; xfs_need_iread_extents(ip->i_afp)) <KABOOM> ip->i_forkoff = 0; Regrettably, the VFS is much more lax about i_rwsem and getxattr than is immediately obvious -- not only does it not guarantee that we hold i_rwsem, it actually doesn't guarantee that we *don't* hold it either. The getxattr system call won't acquire the lock before calling XFS, but the file capabilities code calls getxattr with and without i_rwsem held to determine if the "security.capabilities" xattr is set on the file. Fixing the VFS locking requires a treewide investigation into every code path that could touch an xattr and what i_rwsem state it expects or sets up. That could take years or even prove impossible; fortunately, we can fix this UAF problem inside XFS. An earlier version of this patch used smp_wmb in xfs_attr_fork_remove to ensure that i_forkoff is always zeroed before i_afp is set to null and changed the read paths to use smp_rmb before accessing i_forkoff and i_afp, which avoided these UAF problems. However, the patch author was too busy dealing with other problems in the meantime, and by the time he came back to this issue, the situation had changed a bit. On a modern system with selinux, each inode will always have at least one xattr for the selinux label, so it doesn't make much sense to keep incurring the extra pointer dereference. Furthermore, Allison's upcoming parent pointer patchset will also cause nearly every inode in the filesystem to have extended attributes. Therefore, make the inode attribute fork structure part of struct xfs_inode, at a cost of 40 more bytes. This patch adds a clunky if_present field where necessary to maintain the existing logic of xattr fork null pointer testing in the existing codebase. The next patch switches the logic over to XFS_IFORK_Q and it all goes away. Signed-off-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Dave Chinner <dchinner@redhat.com>
2022-07-09 20:56:06 +03:00
if (ip->i_af.if_format == XFS_DINODE_FMT_EXTENTS &&
ip->i_af.if_nextents == 0)
return 0;
return 1;
}
/*
* Returns true if the there is exactly only block in the attr fork, in which
* case the attribute fork consists of a single leaf block entry.
*/
bool
xfs_attr_is_leaf(
struct xfs_inode *ip)
{
xfs: make inode attribute forks a permanent part of struct xfs_inode Syzkaller reported a UAF bug a while back: ================================================================== BUG: KASAN: use-after-free in xfs_ilock_attr_map_shared+0xe3/0xf6 fs/xfs/xfs_inode.c:127 Read of size 4 at addr ffff88802cec919c by task syz-executor262/2958 CPU: 2 PID: 2958 Comm: syz-executor262 Not tainted 5.15.0-0.30.3-20220406_1406 #3 Hardware name: Red Hat KVM, BIOS 1.13.0-2.module+el8.3.0+7860+a7792d29 04/01/2014 Call Trace: <TASK> __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0x82/0xa9 lib/dump_stack.c:106 print_address_description.constprop.9+0x21/0x2d5 mm/kasan/report.c:256 __kasan_report mm/kasan/report.c:442 [inline] kasan_report.cold.14+0x7f/0x11b mm/kasan/report.c:459 xfs_ilock_attr_map_shared+0xe3/0xf6 fs/xfs/xfs_inode.c:127 xfs_attr_get+0x378/0x4c2 fs/xfs/libxfs/xfs_attr.c:159 xfs_xattr_get+0xe3/0x150 fs/xfs/xfs_xattr.c:36 __vfs_getxattr+0xdf/0x13d fs/xattr.c:399 cap_inode_need_killpriv+0x41/0x5d security/commoncap.c:300 security_inode_need_killpriv+0x4c/0x97 security/security.c:1408 dentry_needs_remove_privs.part.28+0x21/0x63 fs/inode.c:1912 dentry_needs_remove_privs+0x80/0x9e fs/inode.c:1908 do_truncate+0xc3/0x1e0 fs/open.c:56 handle_truncate fs/namei.c:3084 [inline] do_open fs/namei.c:3432 [inline] path_openat+0x30ab/0x396d fs/namei.c:3561 do_filp_open+0x1c4/0x290 fs/namei.c:3588 do_sys_openat2+0x60d/0x98c fs/open.c:1212 do_sys_open+0xcf/0x13c fs/open.c:1228 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3a/0x7e arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0x0 RIP: 0033:0x7f7ef4bb753d Code: 00 c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 1b 79 2c 00 f7 d8 64 89 01 48 RSP: 002b:00007f7ef52c2ed8 EFLAGS: 00000246 ORIG_RAX: 0000000000000055 RAX: ffffffffffffffda RBX: 0000000000404148 RCX: 00007f7ef4bb753d RDX: 00007f7ef4bb753d RSI: 0000000000000000 RDI: 0000000020004fc0 RBP: 0000000000404140 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 0030656c69662f2e R13: 00007ffd794db37f R14: 00007ffd794db470 R15: 00007f7ef52c2fc0 </TASK> Allocated by task 2953: kasan_save_stack+0x19/0x38 mm/kasan/common.c:38 kasan_set_track mm/kasan/common.c:46 [inline] set_alloc_info mm/kasan/common.c:434 [inline] __kasan_slab_alloc+0x68/0x7c mm/kasan/common.c:467 kasan_slab_alloc include/linux/kasan.h:254 [inline] slab_post_alloc_hook mm/slab.h:519 [inline] slab_alloc_node mm/slub.c:3213 [inline] slab_alloc mm/slub.c:3221 [inline] kmem_cache_alloc+0x11b/0x3eb mm/slub.c:3226 kmem_cache_zalloc include/linux/slab.h:711 [inline] xfs_ifork_alloc+0x25/0xa2 fs/xfs/libxfs/xfs_inode_fork.c:287 xfs_bmap_add_attrfork+0x3f2/0x9b1 fs/xfs/libxfs/xfs_bmap.c:1098 xfs_attr_set+0xe38/0x12a7 fs/xfs/libxfs/xfs_attr.c:746 xfs_xattr_set+0xeb/0x1a9 fs/xfs/xfs_xattr.c:59 __vfs_setxattr+0x11b/0x177 fs/xattr.c:180 __vfs_setxattr_noperm+0x128/0x5e0 fs/xattr.c:214 __vfs_setxattr_locked+0x1d4/0x258 fs/xattr.c:275 vfs_setxattr+0x154/0x33d fs/xattr.c:301 setxattr+0x216/0x29f fs/xattr.c:575 __do_sys_fsetxattr fs/xattr.c:632 [inline] __se_sys_fsetxattr fs/xattr.c:621 [inline] __x64_sys_fsetxattr+0x243/0x2fe fs/xattr.c:621 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3a/0x7e arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0x0 Freed by task 2949: kasan_save_stack+0x19/0x38 mm/kasan/common.c:38 kasan_set_track+0x1c/0x21 mm/kasan/common.c:46 kasan_set_free_info+0x20/0x30 mm/kasan/generic.c:360 ____kasan_slab_free mm/kasan/common.c:366 [inline] ____kasan_slab_free mm/kasan/common.c:328 [inline] __kasan_slab_free+0xe2/0x10e mm/kasan/common.c:374 kasan_slab_free include/linux/kasan.h:230 [inline] slab_free_hook mm/slub.c:1700 [inline] slab_free_freelist_hook mm/slub.c:1726 [inline] slab_free mm/slub.c:3492 [inline] kmem_cache_free+0xdc/0x3ce mm/slub.c:3508 xfs_attr_fork_remove+0x8d/0x132 fs/xfs/libxfs/xfs_attr_leaf.c:773 xfs_attr_sf_removename+0x5dd/0x6cb fs/xfs/libxfs/xfs_attr_leaf.c:822 xfs_attr_remove_iter+0x68c/0x805 fs/xfs/libxfs/xfs_attr.c:1413 xfs_attr_remove_args+0xb1/0x10d fs/xfs/libxfs/xfs_attr.c:684 xfs_attr_set+0xf1e/0x12a7 fs/xfs/libxfs/xfs_attr.c:802 xfs_xattr_set+0xeb/0x1a9 fs/xfs/xfs_xattr.c:59 __vfs_removexattr+0x106/0x16a fs/xattr.c:468 cap_inode_killpriv+0x24/0x47 security/commoncap.c:324 security_inode_killpriv+0x54/0xa1 security/security.c:1414 setattr_prepare+0x1a6/0x897 fs/attr.c:146 xfs_vn_change_ok+0x111/0x15e fs/xfs/xfs_iops.c:682 xfs_vn_setattr_size+0x5f/0x15a fs/xfs/xfs_iops.c:1065 xfs_vn_setattr+0x125/0x2ad fs/xfs/xfs_iops.c:1093 notify_change+0xae5/0x10a1 fs/attr.c:410 do_truncate+0x134/0x1e0 fs/open.c:64 handle_truncate fs/namei.c:3084 [inline] do_open fs/namei.c:3432 [inline] path_openat+0x30ab/0x396d fs/namei.c:3561 do_filp_open+0x1c4/0x290 fs/namei.c:3588 do_sys_openat2+0x60d/0x98c fs/open.c:1212 do_sys_open+0xcf/0x13c fs/open.c:1228 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3a/0x7e arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0x0 The buggy address belongs to the object at ffff88802cec9188 which belongs to the cache xfs_ifork of size 40 The buggy address is located 20 bytes inside of 40-byte region [ffff88802cec9188, ffff88802cec91b0) The buggy address belongs to the page: page:00000000c3af36a1 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x2cec9 flags: 0xfffffc0000200(slab|node=0|zone=1|lastcpupid=0x1fffff) raw: 000fffffc0000200 ffffea00009d2580 0000000600000006 ffff88801a9ffc80 raw: 0000000000000000 0000000080490049 00000001ffffffff 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: ffff88802cec9080: fb fb fb fc fc fa fb fb fb fb fc fc fb fb fb fb ffff88802cec9100: fb fc fc fb fb fb fb fb fc fc fb fb fb fb fb fc >ffff88802cec9180: fc fa fb fb fb fb fc fc fa fb fb fb fb fc fc fb ^ ffff88802cec9200: fb fb fb fb fc fc fb fb fb fb fb fc fc fb fb fb ffff88802cec9280: fb fb fc fc fa fb fb fb fb fc fc fa fb fb fb fb ================================================================== The root cause of this bug is the unlocked access to xfs_inode.i_afp from the getxattr code paths while trying to determine which ILOCK mode to use to stabilize the xattr data. Unfortunately, the VFS does not acquire i_rwsem when vfs_getxattr (or listxattr) call into the filesystem, which means that getxattr can race with a removexattr that's tearing down the attr fork and crash: xfs_attr_set: xfs_attr_get: xfs_attr_fork_remove: xfs_ilock_attr_map_shared: xfs_idestroy_fork(ip->i_afp); kmem_cache_free(xfs_ifork_cache, ip->i_afp); if (ip->i_afp && ip->i_afp = NULL; xfs_need_iread_extents(ip->i_afp)) <KABOOM> ip->i_forkoff = 0; Regrettably, the VFS is much more lax about i_rwsem and getxattr than is immediately obvious -- not only does it not guarantee that we hold i_rwsem, it actually doesn't guarantee that we *don't* hold it either. The getxattr system call won't acquire the lock before calling XFS, but the file capabilities code calls getxattr with and without i_rwsem held to determine if the "security.capabilities" xattr is set on the file. Fixing the VFS locking requires a treewide investigation into every code path that could touch an xattr and what i_rwsem state it expects or sets up. That could take years or even prove impossible; fortunately, we can fix this UAF problem inside XFS. An earlier version of this patch used smp_wmb in xfs_attr_fork_remove to ensure that i_forkoff is always zeroed before i_afp is set to null and changed the read paths to use smp_rmb before accessing i_forkoff and i_afp, which avoided these UAF problems. However, the patch author was too busy dealing with other problems in the meantime, and by the time he came back to this issue, the situation had changed a bit. On a modern system with selinux, each inode will always have at least one xattr for the selinux label, so it doesn't make much sense to keep incurring the extra pointer dereference. Furthermore, Allison's upcoming parent pointer patchset will also cause nearly every inode in the filesystem to have extended attributes. Therefore, make the inode attribute fork structure part of struct xfs_inode, at a cost of 40 more bytes. This patch adds a clunky if_present field where necessary to maintain the existing logic of xattr fork null pointer testing in the existing codebase. The next patch switches the logic over to XFS_IFORK_Q and it all goes away. Signed-off-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Dave Chinner <dchinner@redhat.com>
2022-07-09 20:56:06 +03:00
struct xfs_ifork *ifp = &ip->i_af;
struct xfs_iext_cursor icur;
struct xfs_bmbt_irec imap;
if (ifp->if_nextents != 1 || ifp->if_format != XFS_DINODE_FMT_EXTENTS)
return false;
xfs_iext_first(ifp, &icur);
xfs_iext_get_extent(ifp, &icur, &imap);
return imap.br_startoff == 0 && imap.br_blockcount == 1;
}
/*
* XXX (dchinner): name path state saving and refilling is an optimisation to
* avoid needing to look up name entries after rolling transactions removing
* remote xattr blocks between the name entry lookup and name entry removal.
* This optimisation got sidelined when combining the set and remove state
* machines, but the code has been left in place because it is worthwhile to
* restore the optimisation once the combined state machine paths have settled.
*
* This comment is a public service announcement to remind Future Dave that he
* still needs to restore this code to working order.
*/
#if 0
/*
* Fill in the disk block numbers in the state structure for the buffers
* that are attached to the state structure.
* This is done so that we can quickly reattach ourselves to those buffers
* after some set of transaction commits have released these buffers.
*/
static int
xfs_attr_fillstate(xfs_da_state_t *state)
{
xfs_da_state_path_t *path;
xfs_da_state_blk_t *blk;
int level;
trace_xfs_attr_fillstate(state->args);
/*
* Roll down the "path" in the state structure, storing the on-disk
* block number for those buffers in the "path".
*/
path = &state->path;
ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
for (blk = path->blk, level = 0; level < path->active; blk++, level++) {
if (blk->bp) {
blk->disk_blkno = xfs_buf_daddr(blk->bp);
blk->bp = NULL;
} else {
blk->disk_blkno = 0;
}
}
/*
* Roll down the "altpath" in the state structure, storing the on-disk
* block number for those buffers in the "altpath".
*/
path = &state->altpath;
ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
for (blk = path->blk, level = 0; level < path->active; blk++, level++) {
if (blk->bp) {
blk->disk_blkno = xfs_buf_daddr(blk->bp);
blk->bp = NULL;
} else {
blk->disk_blkno = 0;
}
}
return 0;
}
/*
* Reattach the buffers to the state structure based on the disk block
* numbers stored in the state structure.
* This is done after some set of transaction commits have released those
* buffers from our grip.
*/
static int
xfs_attr_refillstate(xfs_da_state_t *state)
{
xfs_da_state_path_t *path;
xfs_da_state_blk_t *blk;
int level, error;
trace_xfs_attr_refillstate(state->args);
/*
* Roll down the "path" in the state structure, storing the on-disk
* block number for those buffers in the "path".
*/
path = &state->path;
ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
for (blk = path->blk, level = 0; level < path->active; blk++, level++) {
if (blk->disk_blkno) {
error = xfs_da3_node_read_mapped(state->args->trans,
state->args->dp, blk->disk_blkno,
&blk->bp, XFS_ATTR_FORK);
if (error)
return error;
} else {
blk->bp = NULL;
}
}
/*
* Roll down the "altpath" in the state structure, storing the on-disk
* block number for those buffers in the "altpath".
*/
path = &state->altpath;
ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
for (blk = path->blk, level = 0; level < path->active; blk++, level++) {
if (blk->disk_blkno) {
error = xfs_da3_node_read_mapped(state->args->trans,
state->args->dp, blk->disk_blkno,
&blk->bp, XFS_ATTR_FORK);
if (error)
return error;
} else {
blk->bp = NULL;
}
}
return 0;
}
#else
static int xfs_attr_fillstate(xfs_da_state_t *state) { return 0; }
#endif
/*========================================================================
* Overall external interface routines.
*========================================================================*/
/*
* Retrieve an extended attribute and its value. Must have ilock.
* Returns 0 on successful retrieval, otherwise an error.
*/
int
xfs_attr_get_ilocked(
struct xfs_da_args *args)
{
ASSERT(xfs_isilocked(args->dp, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
if (!xfs_inode_hasattr(args->dp))
return -ENOATTR;
xfs: make inode attribute forks a permanent part of struct xfs_inode Syzkaller reported a UAF bug a while back: ================================================================== BUG: KASAN: use-after-free in xfs_ilock_attr_map_shared+0xe3/0xf6 fs/xfs/xfs_inode.c:127 Read of size 4 at addr ffff88802cec919c by task syz-executor262/2958 CPU: 2 PID: 2958 Comm: syz-executor262 Not tainted 5.15.0-0.30.3-20220406_1406 #3 Hardware name: Red Hat KVM, BIOS 1.13.0-2.module+el8.3.0+7860+a7792d29 04/01/2014 Call Trace: <TASK> __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0x82/0xa9 lib/dump_stack.c:106 print_address_description.constprop.9+0x21/0x2d5 mm/kasan/report.c:256 __kasan_report mm/kasan/report.c:442 [inline] kasan_report.cold.14+0x7f/0x11b mm/kasan/report.c:459 xfs_ilock_attr_map_shared+0xe3/0xf6 fs/xfs/xfs_inode.c:127 xfs_attr_get+0x378/0x4c2 fs/xfs/libxfs/xfs_attr.c:159 xfs_xattr_get+0xe3/0x150 fs/xfs/xfs_xattr.c:36 __vfs_getxattr+0xdf/0x13d fs/xattr.c:399 cap_inode_need_killpriv+0x41/0x5d security/commoncap.c:300 security_inode_need_killpriv+0x4c/0x97 security/security.c:1408 dentry_needs_remove_privs.part.28+0x21/0x63 fs/inode.c:1912 dentry_needs_remove_privs+0x80/0x9e fs/inode.c:1908 do_truncate+0xc3/0x1e0 fs/open.c:56 handle_truncate fs/namei.c:3084 [inline] do_open fs/namei.c:3432 [inline] path_openat+0x30ab/0x396d fs/namei.c:3561 do_filp_open+0x1c4/0x290 fs/namei.c:3588 do_sys_openat2+0x60d/0x98c fs/open.c:1212 do_sys_open+0xcf/0x13c fs/open.c:1228 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3a/0x7e arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0x0 RIP: 0033:0x7f7ef4bb753d Code: 00 c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 1b 79 2c 00 f7 d8 64 89 01 48 RSP: 002b:00007f7ef52c2ed8 EFLAGS: 00000246 ORIG_RAX: 0000000000000055 RAX: ffffffffffffffda RBX: 0000000000404148 RCX: 00007f7ef4bb753d RDX: 00007f7ef4bb753d RSI: 0000000000000000 RDI: 0000000020004fc0 RBP: 0000000000404140 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 0030656c69662f2e R13: 00007ffd794db37f R14: 00007ffd794db470 R15: 00007f7ef52c2fc0 </TASK> Allocated by task 2953: kasan_save_stack+0x19/0x38 mm/kasan/common.c:38 kasan_set_track mm/kasan/common.c:46 [inline] set_alloc_info mm/kasan/common.c:434 [inline] __kasan_slab_alloc+0x68/0x7c mm/kasan/common.c:467 kasan_slab_alloc include/linux/kasan.h:254 [inline] slab_post_alloc_hook mm/slab.h:519 [inline] slab_alloc_node mm/slub.c:3213 [inline] slab_alloc mm/slub.c:3221 [inline] kmem_cache_alloc+0x11b/0x3eb mm/slub.c:3226 kmem_cache_zalloc include/linux/slab.h:711 [inline] xfs_ifork_alloc+0x25/0xa2 fs/xfs/libxfs/xfs_inode_fork.c:287 xfs_bmap_add_attrfork+0x3f2/0x9b1 fs/xfs/libxfs/xfs_bmap.c:1098 xfs_attr_set+0xe38/0x12a7 fs/xfs/libxfs/xfs_attr.c:746 xfs_xattr_set+0xeb/0x1a9 fs/xfs/xfs_xattr.c:59 __vfs_setxattr+0x11b/0x177 fs/xattr.c:180 __vfs_setxattr_noperm+0x128/0x5e0 fs/xattr.c:214 __vfs_setxattr_locked+0x1d4/0x258 fs/xattr.c:275 vfs_setxattr+0x154/0x33d fs/xattr.c:301 setxattr+0x216/0x29f fs/xattr.c:575 __do_sys_fsetxattr fs/xattr.c:632 [inline] __se_sys_fsetxattr fs/xattr.c:621 [inline] __x64_sys_fsetxattr+0x243/0x2fe fs/xattr.c:621 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3a/0x7e arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0x0 Freed by task 2949: kasan_save_stack+0x19/0x38 mm/kasan/common.c:38 kasan_set_track+0x1c/0x21 mm/kasan/common.c:46 kasan_set_free_info+0x20/0x30 mm/kasan/generic.c:360 ____kasan_slab_free mm/kasan/common.c:366 [inline] ____kasan_slab_free mm/kasan/common.c:328 [inline] __kasan_slab_free+0xe2/0x10e mm/kasan/common.c:374 kasan_slab_free include/linux/kasan.h:230 [inline] slab_free_hook mm/slub.c:1700 [inline] slab_free_freelist_hook mm/slub.c:1726 [inline] slab_free mm/slub.c:3492 [inline] kmem_cache_free+0xdc/0x3ce mm/slub.c:3508 xfs_attr_fork_remove+0x8d/0x132 fs/xfs/libxfs/xfs_attr_leaf.c:773 xfs_attr_sf_removename+0x5dd/0x6cb fs/xfs/libxfs/xfs_attr_leaf.c:822 xfs_attr_remove_iter+0x68c/0x805 fs/xfs/libxfs/xfs_attr.c:1413 xfs_attr_remove_args+0xb1/0x10d fs/xfs/libxfs/xfs_attr.c:684 xfs_attr_set+0xf1e/0x12a7 fs/xfs/libxfs/xfs_attr.c:802 xfs_xattr_set+0xeb/0x1a9 fs/xfs/xfs_xattr.c:59 __vfs_removexattr+0x106/0x16a fs/xattr.c:468 cap_inode_killpriv+0x24/0x47 security/commoncap.c:324 security_inode_killpriv+0x54/0xa1 security/security.c:1414 setattr_prepare+0x1a6/0x897 fs/attr.c:146 xfs_vn_change_ok+0x111/0x15e fs/xfs/xfs_iops.c:682 xfs_vn_setattr_size+0x5f/0x15a fs/xfs/xfs_iops.c:1065 xfs_vn_setattr+0x125/0x2ad fs/xfs/xfs_iops.c:1093 notify_change+0xae5/0x10a1 fs/attr.c:410 do_truncate+0x134/0x1e0 fs/open.c:64 handle_truncate fs/namei.c:3084 [inline] do_open fs/namei.c:3432 [inline] path_openat+0x30ab/0x396d fs/namei.c:3561 do_filp_open+0x1c4/0x290 fs/namei.c:3588 do_sys_openat2+0x60d/0x98c fs/open.c:1212 do_sys_open+0xcf/0x13c fs/open.c:1228 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3a/0x7e arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0x0 The buggy address belongs to the object at ffff88802cec9188 which belongs to the cache xfs_ifork of size 40 The buggy address is located 20 bytes inside of 40-byte region [ffff88802cec9188, ffff88802cec91b0) The buggy address belongs to the page: page:00000000c3af36a1 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x2cec9 flags: 0xfffffc0000200(slab|node=0|zone=1|lastcpupid=0x1fffff) raw: 000fffffc0000200 ffffea00009d2580 0000000600000006 ffff88801a9ffc80 raw: 0000000000000000 0000000080490049 00000001ffffffff 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: ffff88802cec9080: fb fb fb fc fc fa fb fb fb fb fc fc fb fb fb fb ffff88802cec9100: fb fc fc fb fb fb fb fb fc fc fb fb fb fb fb fc >ffff88802cec9180: fc fa fb fb fb fb fc fc fa fb fb fb fb fc fc fb ^ ffff88802cec9200: fb fb fb fb fc fc fb fb fb fb fb fc fc fb fb fb ffff88802cec9280: fb fb fc fc fa fb fb fb fb fc fc fa fb fb fb fb ================================================================== The root cause of this bug is the unlocked access to xfs_inode.i_afp from the getxattr code paths while trying to determine which ILOCK mode to use to stabilize the xattr data. Unfortunately, the VFS does not acquire i_rwsem when vfs_getxattr (or listxattr) call into the filesystem, which means that getxattr can race with a removexattr that's tearing down the attr fork and crash: xfs_attr_set: xfs_attr_get: xfs_attr_fork_remove: xfs_ilock_attr_map_shared: xfs_idestroy_fork(ip->i_afp); kmem_cache_free(xfs_ifork_cache, ip->i_afp); if (ip->i_afp && ip->i_afp = NULL; xfs_need_iread_extents(ip->i_afp)) <KABOOM> ip->i_forkoff = 0; Regrettably, the VFS is much more lax about i_rwsem and getxattr than is immediately obvious -- not only does it not guarantee that we hold i_rwsem, it actually doesn't guarantee that we *don't* hold it either. The getxattr system call won't acquire the lock before calling XFS, but the file capabilities code calls getxattr with and without i_rwsem held to determine if the "security.capabilities" xattr is set on the file. Fixing the VFS locking requires a treewide investigation into every code path that could touch an xattr and what i_rwsem state it expects or sets up. That could take years or even prove impossible; fortunately, we can fix this UAF problem inside XFS. An earlier version of this patch used smp_wmb in xfs_attr_fork_remove to ensure that i_forkoff is always zeroed before i_afp is set to null and changed the read paths to use smp_rmb before accessing i_forkoff and i_afp, which avoided these UAF problems. However, the patch author was too busy dealing with other problems in the meantime, and by the time he came back to this issue, the situation had changed a bit. On a modern system with selinux, each inode will always have at least one xattr for the selinux label, so it doesn't make much sense to keep incurring the extra pointer dereference. Furthermore, Allison's upcoming parent pointer patchset will also cause nearly every inode in the filesystem to have extended attributes. Therefore, make the inode attribute fork structure part of struct xfs_inode, at a cost of 40 more bytes. This patch adds a clunky if_present field where necessary to maintain the existing logic of xattr fork null pointer testing in the existing codebase. The next patch switches the logic over to XFS_IFORK_Q and it all goes away. Signed-off-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Dave Chinner <dchinner@redhat.com>
2022-07-09 20:56:06 +03:00
if (args->dp->i_af.if_format == XFS_DINODE_FMT_LOCAL)
return xfs_attr_shortform_getvalue(args);
if (xfs_attr_is_leaf(args->dp))
return xfs_attr_leaf_get(args);
return xfs_attr_node_get(args);
}
xfs: allocate xattr buffer on demand When doing file lookups and checking for permissions, we end up in xfs_get_acl() to see if there are any ACLs on the inode. This requires and xattr lookup, and to do that we have to supply a buffer large enough to hold an maximum sized xattr. On workloads were we are accessing a wide range of cache cold files under memory pressure (e.g. NFS fileservers) we end up spending a lot of time allocating the buffer. The buffer is 64k in length, so is a contiguous multi-page allocation, and if that then fails we fall back to vmalloc(). Hence the allocation here is /expensive/ when we are looking up hundreds of thousands of files a second. Initial numbers from a bpf trace show average time in xfs_get_acl() is ~32us, with ~19us of that in the memory allocation. Note these are average times, so there are going to be affected by the worst case allocations more than the common fast case... To avoid this, we could just do a "null" lookup to see if the ACL xattr exists and then only do the allocation if it exists. This, however, optimises the path for the "no ACL present" case at the expense of the "acl present" case. i.e. we can halve the time in xfs_get_acl() for the no acl case (i.e down to ~10-15us), but that then increases the ACL case by 30% (i.e. up to 40-45us). To solve this and speed up both cases, drive the xattr buffer allocation into the attribute code once we know what the actual xattr length is. For the no-xattr case, we avoid the allocation completely, speeding up that case. For the common ACL case, we'll end up with a fast heap allocation (because it'll be smaller than a page), and only for the rarer "we have a remote xattr" will we have a multi-page allocation occur. Hence the common ACL case will be much faster, too. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2019-08-29 19:04:10 +03:00
/*
* Retrieve an extended attribute by name, and its value if requested.
*
* If args->valuelen is zero, then the caller does not want the value, just an
* indication whether the attribute exists and the size of the value if it
* exists. The size is returned in args.valuelen.
xfs: allocate xattr buffer on demand When doing file lookups and checking for permissions, we end up in xfs_get_acl() to see if there are any ACLs on the inode. This requires and xattr lookup, and to do that we have to supply a buffer large enough to hold an maximum sized xattr. On workloads were we are accessing a wide range of cache cold files under memory pressure (e.g. NFS fileservers) we end up spending a lot of time allocating the buffer. The buffer is 64k in length, so is a contiguous multi-page allocation, and if that then fails we fall back to vmalloc(). Hence the allocation here is /expensive/ when we are looking up hundreds of thousands of files a second. Initial numbers from a bpf trace show average time in xfs_get_acl() is ~32us, with ~19us of that in the memory allocation. Note these are average times, so there are going to be affected by the worst case allocations more than the common fast case... To avoid this, we could just do a "null" lookup to see if the ACL xattr exists and then only do the allocation if it exists. This, however, optimises the path for the "no ACL present" case at the expense of the "acl present" case. i.e. we can halve the time in xfs_get_acl() for the no acl case (i.e down to ~10-15us), but that then increases the ACL case by 30% (i.e. up to 40-45us). To solve this and speed up both cases, drive the xattr buffer allocation into the attribute code once we know what the actual xattr length is. For the no-xattr case, we avoid the allocation completely, speeding up that case. For the common ACL case, we'll end up with a fast heap allocation (because it'll be smaller than a page), and only for the rarer "we have a remote xattr" will we have a multi-page allocation occur. Hence the common ACL case will be much faster, too. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2019-08-29 19:04:10 +03:00
*
* If args->value is NULL but args->valuelen is non-zero, allocate the buffer
* for the value after existence of the attribute has been determined. The
* caller always has to free args->value if it is set, no matter if this
* function was successful or not.
*
xfs: allocate xattr buffer on demand When doing file lookups and checking for permissions, we end up in xfs_get_acl() to see if there are any ACLs on the inode. This requires and xattr lookup, and to do that we have to supply a buffer large enough to hold an maximum sized xattr. On workloads were we are accessing a wide range of cache cold files under memory pressure (e.g. NFS fileservers) we end up spending a lot of time allocating the buffer. The buffer is 64k in length, so is a contiguous multi-page allocation, and if that then fails we fall back to vmalloc(). Hence the allocation here is /expensive/ when we are looking up hundreds of thousands of files a second. Initial numbers from a bpf trace show average time in xfs_get_acl() is ~32us, with ~19us of that in the memory allocation. Note these are average times, so there are going to be affected by the worst case allocations more than the common fast case... To avoid this, we could just do a "null" lookup to see if the ACL xattr exists and then only do the allocation if it exists. This, however, optimises the path for the "no ACL present" case at the expense of the "acl present" case. i.e. we can halve the time in xfs_get_acl() for the no acl case (i.e down to ~10-15us), but that then increases the ACL case by 30% (i.e. up to 40-45us). To solve this and speed up both cases, drive the xattr buffer allocation into the attribute code once we know what the actual xattr length is. For the no-xattr case, we avoid the allocation completely, speeding up that case. For the common ACL case, we'll end up with a fast heap allocation (because it'll be smaller than a page), and only for the rarer "we have a remote xattr" will we have a multi-page allocation occur. Hence the common ACL case will be much faster, too. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2019-08-29 19:04:10 +03:00
* If the attribute is found, but exceeds the size limit set by the caller in
* args->valuelen, return -ERANGE with the size of the attribute that was found
* in args->valuelen.
xfs: allocate xattr buffer on demand When doing file lookups and checking for permissions, we end up in xfs_get_acl() to see if there are any ACLs on the inode. This requires and xattr lookup, and to do that we have to supply a buffer large enough to hold an maximum sized xattr. On workloads were we are accessing a wide range of cache cold files under memory pressure (e.g. NFS fileservers) we end up spending a lot of time allocating the buffer. The buffer is 64k in length, so is a contiguous multi-page allocation, and if that then fails we fall back to vmalloc(). Hence the allocation here is /expensive/ when we are looking up hundreds of thousands of files a second. Initial numbers from a bpf trace show average time in xfs_get_acl() is ~32us, with ~19us of that in the memory allocation. Note these are average times, so there are going to be affected by the worst case allocations more than the common fast case... To avoid this, we could just do a "null" lookup to see if the ACL xattr exists and then only do the allocation if it exists. This, however, optimises the path for the "no ACL present" case at the expense of the "acl present" case. i.e. we can halve the time in xfs_get_acl() for the no acl case (i.e down to ~10-15us), but that then increases the ACL case by 30% (i.e. up to 40-45us). To solve this and speed up both cases, drive the xattr buffer allocation into the attribute code once we know what the actual xattr length is. For the no-xattr case, we avoid the allocation completely, speeding up that case. For the common ACL case, we'll end up with a fast heap allocation (because it'll be smaller than a page), and only for the rarer "we have a remote xattr" will we have a multi-page allocation occur. Hence the common ACL case will be much faster, too. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2019-08-29 19:04:10 +03:00
*/
int
xfs_attr_get(
struct xfs_da_args *args)
{
uint lock_mode;
int error;
XFS_STATS_INC(args->dp->i_mount, xs_attr_get);
if (xfs_is_shutdown(args->dp->i_mount))
return -EIO;
args->geo = args->dp->i_mount->m_attr_geo;
args->whichfork = XFS_ATTR_FORK;
args->hashval = xfs_da_hashname(args->name, args->namelen);
/* Entirely possible to look up a name which doesn't exist */
args->op_flags = XFS_DA_OP_OKNOENT;
lock_mode = xfs_ilock_attr_map_shared(args->dp);
error = xfs_attr_get_ilocked(args);
xfs_iunlock(args->dp, lock_mode);
xfs: allocate xattr buffer on demand When doing file lookups and checking for permissions, we end up in xfs_get_acl() to see if there are any ACLs on the inode. This requires and xattr lookup, and to do that we have to supply a buffer large enough to hold an maximum sized xattr. On workloads were we are accessing a wide range of cache cold files under memory pressure (e.g. NFS fileservers) we end up spending a lot of time allocating the buffer. The buffer is 64k in length, so is a contiguous multi-page allocation, and if that then fails we fall back to vmalloc(). Hence the allocation here is /expensive/ when we are looking up hundreds of thousands of files a second. Initial numbers from a bpf trace show average time in xfs_get_acl() is ~32us, with ~19us of that in the memory allocation. Note these are average times, so there are going to be affected by the worst case allocations more than the common fast case... To avoid this, we could just do a "null" lookup to see if the ACL xattr exists and then only do the allocation if it exists. This, however, optimises the path for the "no ACL present" case at the expense of the "acl present" case. i.e. we can halve the time in xfs_get_acl() for the no acl case (i.e down to ~10-15us), but that then increases the ACL case by 30% (i.e. up to 40-45us). To solve this and speed up both cases, drive the xattr buffer allocation into the attribute code once we know what the actual xattr length is. For the no-xattr case, we avoid the allocation completely, speeding up that case. For the common ACL case, we'll end up with a fast heap allocation (because it'll be smaller than a page), and only for the rarer "we have a remote xattr" will we have a multi-page allocation occur. Hence the common ACL case will be much faster, too. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2019-08-29 19:04:10 +03:00
return error;
}
/*
* Calculate how many blocks we need for the new attribute,
*/
xfs: Set up infrastructure for log attribute replay Currently attributes are modified directly across one or more transactions. But they are not logged or replayed in the event of an error. The goal of log attr replay is to enable logging and replaying of attribute operations using the existing delayed operations infrastructure. This will later enable the attributes to become part of larger multi part operations that also must first be recorded to the log. This is mostly of interest in the scheme of parent pointers which would need to maintain an attribute containing parent inode information any time an inode is moved, created, or removed. Parent pointers would then be of interest to any feature that would need to quickly derive an inode path from the mount point. Online scrub, nfs lookups and fs grow or shrink operations are all features that could take advantage of this. This patch adds two new log item types for setting or removing attributes as deferred operations. The xfs_attri_log_item will log an intent to set or remove an attribute. The corresponding xfs_attrd_log_item holds a reference to the xfs_attri_log_item and is freed once the transaction is done. Both log items use a generic xfs_attr_log_format structure that contains the attribute name, value, flags, inode, and an op_flag that indicates if the operations is a set or remove. [dchinner: added extra little bits needed for intent whiteouts] Signed-off-by: Allison Henderson <allison.henderson@oracle.com> Reviewed-by: Chandan Babu R <chandanrlinux@gmail.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-04 05:41:02 +03:00
int
xfs_attr_calc_size(
struct xfs_da_args *args,
int *local)
{
struct xfs_mount *mp = args->dp->i_mount;
int size;
int nblks;
/*
* Determine space new attribute will use, and if it would be
* "local" or "remote" (note: local != inline).
*/
size = xfs_attr_leaf_newentsize(args, local);
nblks = XFS_DAENTER_SPACE_RES(mp, XFS_ATTR_FORK);
if (*local) {
if (size > (args->geo->blksize / 2)) {
/* Double split possible */
nblks *= 2;
}
} else {
/*
* Out of line attribute, cannot double split, but
* make room for the attribute value itself.
*/
uint dblocks = xfs_attr3_rmt_blocks(mp, args->valuelen);
nblks += dblocks;
nblks += XFS_NEXTENTADD_SPACE_RES(mp, dblocks, XFS_ATTR_FORK);
}
return nblks;
}
/* Initialize transaction reservation for attr operations */
void
xfs_init_attr_trans(
struct xfs_da_args *args,
struct xfs_trans_res *tres,
unsigned int *total)
{
struct xfs_mount *mp = args->dp->i_mount;
if (args->value) {
tres->tr_logres = M_RES(mp)->tr_attrsetm.tr_logres +
M_RES(mp)->tr_attrsetrt.tr_logres *
args->total;
tres->tr_logcount = XFS_ATTRSET_LOG_COUNT;
tres->tr_logflags = XFS_TRANS_PERM_LOG_RES;
*total = args->total;
} else {
*tres = M_RES(mp)->tr_attrrm;
*total = XFS_ATTRRM_SPACE_RES(mp);
}
}
/*
* Add an attr to a shortform fork. If there is no space,
* xfs_attr_shortform_addname() will convert to leaf format and return -ENOSPC.
* to use.
*/
STATIC int
xfs_attr_try_sf_addname(
struct xfs_inode *dp,
struct xfs_da_args *args)
{
int error;
/*
* Build initial attribute list (if required).
*/
xfs: make inode attribute forks a permanent part of struct xfs_inode Syzkaller reported a UAF bug a while back: ================================================================== BUG: KASAN: use-after-free in xfs_ilock_attr_map_shared+0xe3/0xf6 fs/xfs/xfs_inode.c:127 Read of size 4 at addr ffff88802cec919c by task syz-executor262/2958 CPU: 2 PID: 2958 Comm: syz-executor262 Not tainted 5.15.0-0.30.3-20220406_1406 #3 Hardware name: Red Hat KVM, BIOS 1.13.0-2.module+el8.3.0+7860+a7792d29 04/01/2014 Call Trace: <TASK> __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0x82/0xa9 lib/dump_stack.c:106 print_address_description.constprop.9+0x21/0x2d5 mm/kasan/report.c:256 __kasan_report mm/kasan/report.c:442 [inline] kasan_report.cold.14+0x7f/0x11b mm/kasan/report.c:459 xfs_ilock_attr_map_shared+0xe3/0xf6 fs/xfs/xfs_inode.c:127 xfs_attr_get+0x378/0x4c2 fs/xfs/libxfs/xfs_attr.c:159 xfs_xattr_get+0xe3/0x150 fs/xfs/xfs_xattr.c:36 __vfs_getxattr+0xdf/0x13d fs/xattr.c:399 cap_inode_need_killpriv+0x41/0x5d security/commoncap.c:300 security_inode_need_killpriv+0x4c/0x97 security/security.c:1408 dentry_needs_remove_privs.part.28+0x21/0x63 fs/inode.c:1912 dentry_needs_remove_privs+0x80/0x9e fs/inode.c:1908 do_truncate+0xc3/0x1e0 fs/open.c:56 handle_truncate fs/namei.c:3084 [inline] do_open fs/namei.c:3432 [inline] path_openat+0x30ab/0x396d fs/namei.c:3561 do_filp_open+0x1c4/0x290 fs/namei.c:3588 do_sys_openat2+0x60d/0x98c fs/open.c:1212 do_sys_open+0xcf/0x13c fs/open.c:1228 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3a/0x7e arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0x0 RIP: 0033:0x7f7ef4bb753d Code: 00 c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 1b 79 2c 00 f7 d8 64 89 01 48 RSP: 002b:00007f7ef52c2ed8 EFLAGS: 00000246 ORIG_RAX: 0000000000000055 RAX: ffffffffffffffda RBX: 0000000000404148 RCX: 00007f7ef4bb753d RDX: 00007f7ef4bb753d RSI: 0000000000000000 RDI: 0000000020004fc0 RBP: 0000000000404140 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 0030656c69662f2e R13: 00007ffd794db37f R14: 00007ffd794db470 R15: 00007f7ef52c2fc0 </TASK> Allocated by task 2953: kasan_save_stack+0x19/0x38 mm/kasan/common.c:38 kasan_set_track mm/kasan/common.c:46 [inline] set_alloc_info mm/kasan/common.c:434 [inline] __kasan_slab_alloc+0x68/0x7c mm/kasan/common.c:467 kasan_slab_alloc include/linux/kasan.h:254 [inline] slab_post_alloc_hook mm/slab.h:519 [inline] slab_alloc_node mm/slub.c:3213 [inline] slab_alloc mm/slub.c:3221 [inline] kmem_cache_alloc+0x11b/0x3eb mm/slub.c:3226 kmem_cache_zalloc include/linux/slab.h:711 [inline] xfs_ifork_alloc+0x25/0xa2 fs/xfs/libxfs/xfs_inode_fork.c:287 xfs_bmap_add_attrfork+0x3f2/0x9b1 fs/xfs/libxfs/xfs_bmap.c:1098 xfs_attr_set+0xe38/0x12a7 fs/xfs/libxfs/xfs_attr.c:746 xfs_xattr_set+0xeb/0x1a9 fs/xfs/xfs_xattr.c:59 __vfs_setxattr+0x11b/0x177 fs/xattr.c:180 __vfs_setxattr_noperm+0x128/0x5e0 fs/xattr.c:214 __vfs_setxattr_locked+0x1d4/0x258 fs/xattr.c:275 vfs_setxattr+0x154/0x33d fs/xattr.c:301 setxattr+0x216/0x29f fs/xattr.c:575 __do_sys_fsetxattr fs/xattr.c:632 [inline] __se_sys_fsetxattr fs/xattr.c:621 [inline] __x64_sys_fsetxattr+0x243/0x2fe fs/xattr.c:621 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3a/0x7e arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0x0 Freed by task 2949: kasan_save_stack+0x19/0x38 mm/kasan/common.c:38 kasan_set_track+0x1c/0x21 mm/kasan/common.c:46 kasan_set_free_info+0x20/0x30 mm/kasan/generic.c:360 ____kasan_slab_free mm/kasan/common.c:366 [inline] ____kasan_slab_free mm/kasan/common.c:328 [inline] __kasan_slab_free+0xe2/0x10e mm/kasan/common.c:374 kasan_slab_free include/linux/kasan.h:230 [inline] slab_free_hook mm/slub.c:1700 [inline] slab_free_freelist_hook mm/slub.c:1726 [inline] slab_free mm/slub.c:3492 [inline] kmem_cache_free+0xdc/0x3ce mm/slub.c:3508 xfs_attr_fork_remove+0x8d/0x132 fs/xfs/libxfs/xfs_attr_leaf.c:773 xfs_attr_sf_removename+0x5dd/0x6cb fs/xfs/libxfs/xfs_attr_leaf.c:822 xfs_attr_remove_iter+0x68c/0x805 fs/xfs/libxfs/xfs_attr.c:1413 xfs_attr_remove_args+0xb1/0x10d fs/xfs/libxfs/xfs_attr.c:684 xfs_attr_set+0xf1e/0x12a7 fs/xfs/libxfs/xfs_attr.c:802 xfs_xattr_set+0xeb/0x1a9 fs/xfs/xfs_xattr.c:59 __vfs_removexattr+0x106/0x16a fs/xattr.c:468 cap_inode_killpriv+0x24/0x47 security/commoncap.c:324 security_inode_killpriv+0x54/0xa1 security/security.c:1414 setattr_prepare+0x1a6/0x897 fs/attr.c:146 xfs_vn_change_ok+0x111/0x15e fs/xfs/xfs_iops.c:682 xfs_vn_setattr_size+0x5f/0x15a fs/xfs/xfs_iops.c:1065 xfs_vn_setattr+0x125/0x2ad fs/xfs/xfs_iops.c:1093 notify_change+0xae5/0x10a1 fs/attr.c:410 do_truncate+0x134/0x1e0 fs/open.c:64 handle_truncate fs/namei.c:3084 [inline] do_open fs/namei.c:3432 [inline] path_openat+0x30ab/0x396d fs/namei.c:3561 do_filp_open+0x1c4/0x290 fs/namei.c:3588 do_sys_openat2+0x60d/0x98c fs/open.c:1212 do_sys_open+0xcf/0x13c fs/open.c:1228 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3a/0x7e arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0x0 The buggy address belongs to the object at ffff88802cec9188 which belongs to the cache xfs_ifork of size 40 The buggy address is located 20 bytes inside of 40-byte region [ffff88802cec9188, ffff88802cec91b0) The buggy address belongs to the page: page:00000000c3af36a1 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x2cec9 flags: 0xfffffc0000200(slab|node=0|zone=1|lastcpupid=0x1fffff) raw: 000fffffc0000200 ffffea00009d2580 0000000600000006 ffff88801a9ffc80 raw: 0000000000000000 0000000080490049 00000001ffffffff 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: ffff88802cec9080: fb fb fb fc fc fa fb fb fb fb fc fc fb fb fb fb ffff88802cec9100: fb fc fc fb fb fb fb fb fc fc fb fb fb fb fb fc >ffff88802cec9180: fc fa fb fb fb fb fc fc fa fb fb fb fb fc fc fb ^ ffff88802cec9200: fb fb fb fb fc fc fb fb fb fb fb fc fc fb fb fb ffff88802cec9280: fb fb fc fc fa fb fb fb fb fc fc fa fb fb fb fb ================================================================== The root cause of this bug is the unlocked access to xfs_inode.i_afp from the getxattr code paths while trying to determine which ILOCK mode to use to stabilize the xattr data. Unfortunately, the VFS does not acquire i_rwsem when vfs_getxattr (or listxattr) call into the filesystem, which means that getxattr can race with a removexattr that's tearing down the attr fork and crash: xfs_attr_set: xfs_attr_get: xfs_attr_fork_remove: xfs_ilock_attr_map_shared: xfs_idestroy_fork(ip->i_afp); kmem_cache_free(xfs_ifork_cache, ip->i_afp); if (ip->i_afp && ip->i_afp = NULL; xfs_need_iread_extents(ip->i_afp)) <KABOOM> ip->i_forkoff = 0; Regrettably, the VFS is much more lax about i_rwsem and getxattr than is immediately obvious -- not only does it not guarantee that we hold i_rwsem, it actually doesn't guarantee that we *don't* hold it either. The getxattr system call won't acquire the lock before calling XFS, but the file capabilities code calls getxattr with and without i_rwsem held to determine if the "security.capabilities" xattr is set on the file. Fixing the VFS locking requires a treewide investigation into every code path that could touch an xattr and what i_rwsem state it expects or sets up. That could take years or even prove impossible; fortunately, we can fix this UAF problem inside XFS. An earlier version of this patch used smp_wmb in xfs_attr_fork_remove to ensure that i_forkoff is always zeroed before i_afp is set to null and changed the read paths to use smp_rmb before accessing i_forkoff and i_afp, which avoided these UAF problems. However, the patch author was too busy dealing with other problems in the meantime, and by the time he came back to this issue, the situation had changed a bit. On a modern system with selinux, each inode will always have at least one xattr for the selinux label, so it doesn't make much sense to keep incurring the extra pointer dereference. Furthermore, Allison's upcoming parent pointer patchset will also cause nearly every inode in the filesystem to have extended attributes. Therefore, make the inode attribute fork structure part of struct xfs_inode, at a cost of 40 more bytes. This patch adds a clunky if_present field where necessary to maintain the existing logic of xattr fork null pointer testing in the existing codebase. The next patch switches the logic over to XFS_IFORK_Q and it all goes away. Signed-off-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Dave Chinner <dchinner@redhat.com>
2022-07-09 20:56:06 +03:00
if (dp->i_af.if_format == XFS_DINODE_FMT_EXTENTS)
xfs_attr_shortform_create(args);
error = xfs_attr_shortform_addname(args);
if (error == -ENOSPC)
return error;
/*
* Commit the shortform mods, and we're done.
* NOTE: this is also the error path (EEXIST, etc).
*/
if (!error && !(args->op_flags & XFS_DA_OP_NOTIME))
xfs_trans_ichgtime(args->trans, dp, XFS_ICHGTIME_CHG);
if (xfs_has_wsync(dp->i_mount))
xfs_trans_set_sync(args->trans);
return error;
}
static int
xfs_attr_sf_addname(
struct xfs_attr_intent *attr)
{
struct xfs_da_args *args = attr->xattri_da_args;
struct xfs_inode *dp = args->dp;
int error = 0;
error = xfs_attr_try_sf_addname(dp, args);
if (error != -ENOSPC) {
ASSERT(!error || error == -EEXIST);
attr->xattri_dela_state = XFS_DAS_DONE;
goto out;
}
/*
* It won't fit in the shortform, transform to a leaf block. GROT:
* another possible req'mt for a double-split btree op.
*/
error = xfs_attr_shortform_to_leaf(args);
if (error)
return error;
attr->xattri_dela_state = XFS_DAS_LEAF_ADD;
out:
trace_xfs_attr_sf_addname_return(attr->xattri_dela_state, args->dp);
return error;
}
/*
xfs: ATTR_REPLACE algorithm with LARP enabled needs rework We can't use the same algorithm for replacing an existing attribute when logging attributes. The existing algorithm is essentially: 1. create new attr w/ INCOMPLETE 2. atomically flip INCOMPLETE flags between old + new attribute 3. remove old attr which is marked w/ INCOMPLETE This algorithm guarantees that we see either the old or new attribute, and if we fail after the atomic flag flip, we don't have to recover the removal of the old attr because we never see INCOMPLETE attributes in lookups. For logged attributes, however, this does not work. The logged attribute intents do not track the work that has been done as the transaction rolls, and hence the only recovery mechanism we have is "run the replace operation from scratch". This is further exacerbated by the attempt to avoid needing the INCOMPLETE flag to create an atomic swap. This means we can create a second active attribute of the same name before we remove the original. If we fail at any point after the create but before the removal has completed, we end up with duplicate attributes in the attr btree and recovery only tries to replace one of them. There are several other failure modes where we can leave partially allocated remote attributes that expose stale data, partially free remote attributes that enable UAF based stale data exposure, etc. TO fix this, we need a different algorithm for replace operations when LARP is enabled. Luckily, it's not that complex if we take the right first step. That is, the first thing we log is the attri intent with the new name/value pair and mark the old attr as INCOMPLETE in the same transaction. From there, we then remove the old attr and keep relogging the new name/value in the intent, such that we always know that we have to create the new attr in recovery. Once the old attr is removed, we then run a normal ATTR_CREATE operation relogging the intent as we go. If the new attr is local, then it gets created in a single atomic transaction that also logs the final intent done. If the new attr is remote, the we set INCOMPLETE on the new attr while we allocate and set the remote value, and then we clear the INCOMPLETE flag at in the last transaction taht logs the final intent done. If we fail at any point in this algorithm, log recovery will always see the same state on disk: the new name/value in the intent, and either an INCOMPLETE attr or no attr in the attr btree. If we find an INCOMPLETE attr, we run the full replace starting with removing the INCOMPLETE attr. If we don't find it, then we simply create the new attr. Notably, recovery of a failed create that has an INCOMPLETE flag set is now the same - we start with the lookup of the INCOMPLETE attr, and if that exists then we do the full replace recovery process, otherwise we just create the new attr. Hence changing the way we do the replace operation when LARP is enabled allows us to use the same log recovery algorithm for both the ATTR_CREATE and ATTR_REPLACE operations. This is also the same algorithm we use for runtime ATTR_REPLACE operations (except for the step setting up the initial conditions). The result is that: - ATTR_CREATE uses the same algorithm regardless of whether LARP is enabled or not - ATTR_REPLACE with larp=0 is identical to the old algorithm - ATTR_REPLACE with larp=1 runs an unmodified attr removal algorithm from the larp=0 code and then runs the unmodified ATTR_CREATE code. - log recovery when larp=1 runs the same ATTR_REPLACE algorithm as it uses at runtime. Because the state machine is now quite clean, changing the algorithm is really just a case of changing the initial state and how the states link together for the ATTR_REPLACE case. Hence it's not a huge amount of code for what is a fairly substantial rework of the attr logging and recovery algorithm.... Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-12 08:12:56 +03:00
* Handle the state change on completion of a multi-state attr operation.
*
* If the XFS_DA_OP_REPLACE flag is set, this means the operation was the first
* modification in a attr replace operation and we still have to do the second
* state, indicated by @replace_state.
*
* We consume the XFS_DA_OP_REPLACE flag so that when we are called again on
* completion of the second half of the attr replace operation we correctly
* signal that it is done.
*/
xfs: ATTR_REPLACE algorithm with LARP enabled needs rework We can't use the same algorithm for replacing an existing attribute when logging attributes. The existing algorithm is essentially: 1. create new attr w/ INCOMPLETE 2. atomically flip INCOMPLETE flags between old + new attribute 3. remove old attr which is marked w/ INCOMPLETE This algorithm guarantees that we see either the old or new attribute, and if we fail after the atomic flag flip, we don't have to recover the removal of the old attr because we never see INCOMPLETE attributes in lookups. For logged attributes, however, this does not work. The logged attribute intents do not track the work that has been done as the transaction rolls, and hence the only recovery mechanism we have is "run the replace operation from scratch". This is further exacerbated by the attempt to avoid needing the INCOMPLETE flag to create an atomic swap. This means we can create a second active attribute of the same name before we remove the original. If we fail at any point after the create but before the removal has completed, we end up with duplicate attributes in the attr btree and recovery only tries to replace one of them. There are several other failure modes where we can leave partially allocated remote attributes that expose stale data, partially free remote attributes that enable UAF based stale data exposure, etc. TO fix this, we need a different algorithm for replace operations when LARP is enabled. Luckily, it's not that complex if we take the right first step. That is, the first thing we log is the attri intent with the new name/value pair and mark the old attr as INCOMPLETE in the same transaction. From there, we then remove the old attr and keep relogging the new name/value in the intent, such that we always know that we have to create the new attr in recovery. Once the old attr is removed, we then run a normal ATTR_CREATE operation relogging the intent as we go. If the new attr is local, then it gets created in a single atomic transaction that also logs the final intent done. If the new attr is remote, the we set INCOMPLETE on the new attr while we allocate and set the remote value, and then we clear the INCOMPLETE flag at in the last transaction taht logs the final intent done. If we fail at any point in this algorithm, log recovery will always see the same state on disk: the new name/value in the intent, and either an INCOMPLETE attr or no attr in the attr btree. If we find an INCOMPLETE attr, we run the full replace starting with removing the INCOMPLETE attr. If we don't find it, then we simply create the new attr. Notably, recovery of a failed create that has an INCOMPLETE flag set is now the same - we start with the lookup of the INCOMPLETE attr, and if that exists then we do the full replace recovery process, otherwise we just create the new attr. Hence changing the way we do the replace operation when LARP is enabled allows us to use the same log recovery algorithm for both the ATTR_CREATE and ATTR_REPLACE operations. This is also the same algorithm we use for runtime ATTR_REPLACE operations (except for the step setting up the initial conditions). The result is that: - ATTR_CREATE uses the same algorithm regardless of whether LARP is enabled or not - ATTR_REPLACE with larp=0 is identical to the old algorithm - ATTR_REPLACE with larp=1 runs an unmodified attr removal algorithm from the larp=0 code and then runs the unmodified ATTR_CREATE code. - log recovery when larp=1 runs the same ATTR_REPLACE algorithm as it uses at runtime. Because the state machine is now quite clean, changing the algorithm is really just a case of changing the initial state and how the states link together for the ATTR_REPLACE case. Hence it's not a huge amount of code for what is a fairly substantial rework of the attr logging and recovery algorithm.... Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-12 08:12:56 +03:00
static enum xfs_delattr_state
xfs_attr_complete_op(
struct xfs_attr_intent *attr,
xfs: ATTR_REPLACE algorithm with LARP enabled needs rework We can't use the same algorithm for replacing an existing attribute when logging attributes. The existing algorithm is essentially: 1. create new attr w/ INCOMPLETE 2. atomically flip INCOMPLETE flags between old + new attribute 3. remove old attr which is marked w/ INCOMPLETE This algorithm guarantees that we see either the old or new attribute, and if we fail after the atomic flag flip, we don't have to recover the removal of the old attr because we never see INCOMPLETE attributes in lookups. For logged attributes, however, this does not work. The logged attribute intents do not track the work that has been done as the transaction rolls, and hence the only recovery mechanism we have is "run the replace operation from scratch". This is further exacerbated by the attempt to avoid needing the INCOMPLETE flag to create an atomic swap. This means we can create a second active attribute of the same name before we remove the original. If we fail at any point after the create but before the removal has completed, we end up with duplicate attributes in the attr btree and recovery only tries to replace one of them. There are several other failure modes where we can leave partially allocated remote attributes that expose stale data, partially free remote attributes that enable UAF based stale data exposure, etc. TO fix this, we need a different algorithm for replace operations when LARP is enabled. Luckily, it's not that complex if we take the right first step. That is, the first thing we log is the attri intent with the new name/value pair and mark the old attr as INCOMPLETE in the same transaction. From there, we then remove the old attr and keep relogging the new name/value in the intent, such that we always know that we have to create the new attr in recovery. Once the old attr is removed, we then run a normal ATTR_CREATE operation relogging the intent as we go. If the new attr is local, then it gets created in a single atomic transaction that also logs the final intent done. If the new attr is remote, the we set INCOMPLETE on the new attr while we allocate and set the remote value, and then we clear the INCOMPLETE flag at in the last transaction taht logs the final intent done. If we fail at any point in this algorithm, log recovery will always see the same state on disk: the new name/value in the intent, and either an INCOMPLETE attr or no attr in the attr btree. If we find an INCOMPLETE attr, we run the full replace starting with removing the INCOMPLETE attr. If we don't find it, then we simply create the new attr. Notably, recovery of a failed create that has an INCOMPLETE flag set is now the same - we start with the lookup of the INCOMPLETE attr, and if that exists then we do the full replace recovery process, otherwise we just create the new attr. Hence changing the way we do the replace operation when LARP is enabled allows us to use the same log recovery algorithm for both the ATTR_CREATE and ATTR_REPLACE operations. This is also the same algorithm we use for runtime ATTR_REPLACE operations (except for the step setting up the initial conditions). The result is that: - ATTR_CREATE uses the same algorithm regardless of whether LARP is enabled or not - ATTR_REPLACE with larp=0 is identical to the old algorithm - ATTR_REPLACE with larp=1 runs an unmodified attr removal algorithm from the larp=0 code and then runs the unmodified ATTR_CREATE code. - log recovery when larp=1 runs the same ATTR_REPLACE algorithm as it uses at runtime. Because the state machine is now quite clean, changing the algorithm is really just a case of changing the initial state and how the states link together for the ATTR_REPLACE case. Hence it's not a huge amount of code for what is a fairly substantial rework of the attr logging and recovery algorithm.... Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-12 08:12:56 +03:00
enum xfs_delattr_state replace_state)
{
struct xfs_da_args *args = attr->xattri_da_args;
xfs: ATTR_REPLACE algorithm with LARP enabled needs rework We can't use the same algorithm for replacing an existing attribute when logging attributes. The existing algorithm is essentially: 1. create new attr w/ INCOMPLETE 2. atomically flip INCOMPLETE flags between old + new attribute 3. remove old attr which is marked w/ INCOMPLETE This algorithm guarantees that we see either the old or new attribute, and if we fail after the atomic flag flip, we don't have to recover the removal of the old attr because we never see INCOMPLETE attributes in lookups. For logged attributes, however, this does not work. The logged attribute intents do not track the work that has been done as the transaction rolls, and hence the only recovery mechanism we have is "run the replace operation from scratch". This is further exacerbated by the attempt to avoid needing the INCOMPLETE flag to create an atomic swap. This means we can create a second active attribute of the same name before we remove the original. If we fail at any point after the create but before the removal has completed, we end up with duplicate attributes in the attr btree and recovery only tries to replace one of them. There are several other failure modes where we can leave partially allocated remote attributes that expose stale data, partially free remote attributes that enable UAF based stale data exposure, etc. TO fix this, we need a different algorithm for replace operations when LARP is enabled. Luckily, it's not that complex if we take the right first step. That is, the first thing we log is the attri intent with the new name/value pair and mark the old attr as INCOMPLETE in the same transaction. From there, we then remove the old attr and keep relogging the new name/value in the intent, such that we always know that we have to create the new attr in recovery. Once the old attr is removed, we then run a normal ATTR_CREATE operation relogging the intent as we go. If the new attr is local, then it gets created in a single atomic transaction that also logs the final intent done. If the new attr is remote, the we set INCOMPLETE on the new attr while we allocate and set the remote value, and then we clear the INCOMPLETE flag at in the last transaction taht logs the final intent done. If we fail at any point in this algorithm, log recovery will always see the same state on disk: the new name/value in the intent, and either an INCOMPLETE attr or no attr in the attr btree. If we find an INCOMPLETE attr, we run the full replace starting with removing the INCOMPLETE attr. If we don't find it, then we simply create the new attr. Notably, recovery of a failed create that has an INCOMPLETE flag set is now the same - we start with the lookup of the INCOMPLETE attr, and if that exists then we do the full replace recovery process, otherwise we just create the new attr. Hence changing the way we do the replace operation when LARP is enabled allows us to use the same log recovery algorithm for both the ATTR_CREATE and ATTR_REPLACE operations. This is also the same algorithm we use for runtime ATTR_REPLACE operations (except for the step setting up the initial conditions). The result is that: - ATTR_CREATE uses the same algorithm regardless of whether LARP is enabled or not - ATTR_REPLACE with larp=0 is identical to the old algorithm - ATTR_REPLACE with larp=1 runs an unmodified attr removal algorithm from the larp=0 code and then runs the unmodified ATTR_CREATE code. - log recovery when larp=1 runs the same ATTR_REPLACE algorithm as it uses at runtime. Because the state machine is now quite clean, changing the algorithm is really just a case of changing the initial state and how the states link together for the ATTR_REPLACE case. Hence it's not a huge amount of code for what is a fairly substantial rework of the attr logging and recovery algorithm.... Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-12 08:12:56 +03:00
bool do_replace = args->op_flags & XFS_DA_OP_REPLACE;
xfs: ATTR_REPLACE algorithm with LARP enabled needs rework We can't use the same algorithm for replacing an existing attribute when logging attributes. The existing algorithm is essentially: 1. create new attr w/ INCOMPLETE 2. atomically flip INCOMPLETE flags between old + new attribute 3. remove old attr which is marked w/ INCOMPLETE This algorithm guarantees that we see either the old or new attribute, and if we fail after the atomic flag flip, we don't have to recover the removal of the old attr because we never see INCOMPLETE attributes in lookups. For logged attributes, however, this does not work. The logged attribute intents do not track the work that has been done as the transaction rolls, and hence the only recovery mechanism we have is "run the replace operation from scratch". This is further exacerbated by the attempt to avoid needing the INCOMPLETE flag to create an atomic swap. This means we can create a second active attribute of the same name before we remove the original. If we fail at any point after the create but before the removal has completed, we end up with duplicate attributes in the attr btree and recovery only tries to replace one of them. There are several other failure modes where we can leave partially allocated remote attributes that expose stale data, partially free remote attributes that enable UAF based stale data exposure, etc. TO fix this, we need a different algorithm for replace operations when LARP is enabled. Luckily, it's not that complex if we take the right first step. That is, the first thing we log is the attri intent with the new name/value pair and mark the old attr as INCOMPLETE in the same transaction. From there, we then remove the old attr and keep relogging the new name/value in the intent, such that we always know that we have to create the new attr in recovery. Once the old attr is removed, we then run a normal ATTR_CREATE operation relogging the intent as we go. If the new attr is local, then it gets created in a single atomic transaction that also logs the final intent done. If the new attr is remote, the we set INCOMPLETE on the new attr while we allocate and set the remote value, and then we clear the INCOMPLETE flag at in the last transaction taht logs the final intent done. If we fail at any point in this algorithm, log recovery will always see the same state on disk: the new name/value in the intent, and either an INCOMPLETE attr or no attr in the attr btree. If we find an INCOMPLETE attr, we run the full replace starting with removing the INCOMPLETE attr. If we don't find it, then we simply create the new attr. Notably, recovery of a failed create that has an INCOMPLETE flag set is now the same - we start with the lookup of the INCOMPLETE attr, and if that exists then we do the full replace recovery process, otherwise we just create the new attr. Hence changing the way we do the replace operation when LARP is enabled allows us to use the same log recovery algorithm for both the ATTR_CREATE and ATTR_REPLACE operations. This is also the same algorithm we use for runtime ATTR_REPLACE operations (except for the step setting up the initial conditions). The result is that: - ATTR_CREATE uses the same algorithm regardless of whether LARP is enabled or not - ATTR_REPLACE with larp=0 is identical to the old algorithm - ATTR_REPLACE with larp=1 runs an unmodified attr removal algorithm from the larp=0 code and then runs the unmodified ATTR_CREATE code. - log recovery when larp=1 runs the same ATTR_REPLACE algorithm as it uses at runtime. Because the state machine is now quite clean, changing the algorithm is really just a case of changing the initial state and how the states link together for the ATTR_REPLACE case. Hence it's not a huge amount of code for what is a fairly substantial rework of the attr logging and recovery algorithm.... Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-12 08:12:56 +03:00
args->op_flags &= ~XFS_DA_OP_REPLACE;
if (do_replace) {
args->attr_filter &= ~XFS_ATTR_INCOMPLETE;
return replace_state;
}
return XFS_DAS_DONE;
}
static int
xfs_attr_leaf_addname(
struct xfs_attr_intent *attr)
{
struct xfs_da_args *args = attr->xattri_da_args;
int error;
ASSERT(xfs_attr_is_leaf(args->dp));
/*
* Use the leaf buffer we may already hold locked as a result of
* a sf-to-leaf conversion.
*/
error = xfs_attr_leaf_try_add(args);
if (error == -ENOSPC) {
error = xfs_attr3_leaf_to_node(args);
if (error)
return error;
/*
* We're not in leaf format anymore, so roll the transaction and
* retry the add to the newly allocated node block.
*/
attr->xattri_dela_state = XFS_DAS_NODE_ADD;
goto out;
xfs: avoid empty xattr transaction when attrs are inline generic/642 triggered a reproducable assert failure in xlog_cil_commit() that resulted from a xfs_attr_set() committing an empty but dirty transaction. When the CIL is empty and this occurs, xlog_cil_commit() tries a background push and this triggers a "pushing an empty CIL" assert. XFS: Assertion failed: !list_empty(&cil->xc_cil), file: fs/xfs/xfs_log_cil.c, line: 1274 Call Trace: <TASK> xlog_cil_commit+0xa5a/0xad0 __xfs_trans_commit+0xb8/0x330 xfs_trans_commit+0x10/0x20 xfs_attr_set+0x3e2/0x4c0 xfs_xattr_set+0x8d/0xe0 __vfs_setxattr+0x6b/0x90 __vfs_setxattr_noperm+0x76/0x220 __vfs_setxattr_locked+0xdf/0x100 vfs_setxattr+0x94/0x170 setxattr+0x110/0x200 path_setxattr+0xbf/0xe0 __x64_sys_setxattr+0x2b/0x30 do_syscall_64+0x35/0x80 The problem is related to the breakdown of attribute addition in xfs_attr_set_iter() and how it is called from deferred operations. When we have a pure leaf xattr insert, we add the xattr to the leaf and set the next state to XFS_DAS_FOUND_LBLK and return -EAGAIN. This requeues the xattr defered work, rolls the transaction and runs xfs_attr_set_iter() again. This then checks the xattr for being remote (it's not) and whether a replace op is being done (this is a create op) and if neither are true it returns without having done anything. xfs_xattri_finish_update() then unconditionally sets the transaction dirty, and the deferops finishes and returns to __xfs_trans_commit() which sees the transaction dirty and tries to commit it by calling xlog_cil_commit(). The transaction is empty, and then the assert fires if this happens when the CIL is empty. This patch addresses the structure of xfs_attr_set_iter() that requires re-entry on leaf add even when nothing will be done. This gets rid of the trailing empty transaction and so doesn't trigger the XFS_TRANS_DIRTY assignment in xfs_xattri_finish_update() incorrectly. Addressing that is for a different patch. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Allison Henderson<allison.henderson@oracle.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-11 10:02:23 +03:00
}
if (error)
return error;
xfs: avoid empty xattr transaction when attrs are inline generic/642 triggered a reproducable assert failure in xlog_cil_commit() that resulted from a xfs_attr_set() committing an empty but dirty transaction. When the CIL is empty and this occurs, xlog_cil_commit() tries a background push and this triggers a "pushing an empty CIL" assert. XFS: Assertion failed: !list_empty(&cil->xc_cil), file: fs/xfs/xfs_log_cil.c, line: 1274 Call Trace: <TASK> xlog_cil_commit+0xa5a/0xad0 __xfs_trans_commit+0xb8/0x330 xfs_trans_commit+0x10/0x20 xfs_attr_set+0x3e2/0x4c0 xfs_xattr_set+0x8d/0xe0 __vfs_setxattr+0x6b/0x90 __vfs_setxattr_noperm+0x76/0x220 __vfs_setxattr_locked+0xdf/0x100 vfs_setxattr+0x94/0x170 setxattr+0x110/0x200 path_setxattr+0xbf/0xe0 __x64_sys_setxattr+0x2b/0x30 do_syscall_64+0x35/0x80 The problem is related to the breakdown of attribute addition in xfs_attr_set_iter() and how it is called from deferred operations. When we have a pure leaf xattr insert, we add the xattr to the leaf and set the next state to XFS_DAS_FOUND_LBLK and return -EAGAIN. This requeues the xattr defered work, rolls the transaction and runs xfs_attr_set_iter() again. This then checks the xattr for being remote (it's not) and whether a replace op is being done (this is a create op) and if neither are true it returns without having done anything. xfs_xattri_finish_update() then unconditionally sets the transaction dirty, and the deferops finishes and returns to __xfs_trans_commit() which sees the transaction dirty and tries to commit it by calling xlog_cil_commit(). The transaction is empty, and then the assert fires if this happens when the CIL is empty. This patch addresses the structure of xfs_attr_set_iter() that requires re-entry on leaf add even when nothing will be done. This gets rid of the trailing empty transaction and so doesn't trigger the XFS_TRANS_DIRTY assignment in xfs_xattri_finish_update() incorrectly. Addressing that is for a different patch. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Allison Henderson<allison.henderson@oracle.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-11 10:02:23 +03:00
/*
* We need to commit and roll if we need to allocate remote xattr blocks
* or perform more xattr manipulations. Otherwise there is nothing more
* to do and we can return success.
*/
if (args->rmtblkno)
attr->xattri_dela_state = XFS_DAS_LEAF_SET_RMT;
else
xfs: ATTR_REPLACE algorithm with LARP enabled needs rework We can't use the same algorithm for replacing an existing attribute when logging attributes. The existing algorithm is essentially: 1. create new attr w/ INCOMPLETE 2. atomically flip INCOMPLETE flags between old + new attribute 3. remove old attr which is marked w/ INCOMPLETE This algorithm guarantees that we see either the old or new attribute, and if we fail after the atomic flag flip, we don't have to recover the removal of the old attr because we never see INCOMPLETE attributes in lookups. For logged attributes, however, this does not work. The logged attribute intents do not track the work that has been done as the transaction rolls, and hence the only recovery mechanism we have is "run the replace operation from scratch". This is further exacerbated by the attempt to avoid needing the INCOMPLETE flag to create an atomic swap. This means we can create a second active attribute of the same name before we remove the original. If we fail at any point after the create but before the removal has completed, we end up with duplicate attributes in the attr btree and recovery only tries to replace one of them. There are several other failure modes where we can leave partially allocated remote attributes that expose stale data, partially free remote attributes that enable UAF based stale data exposure, etc. TO fix this, we need a different algorithm for replace operations when LARP is enabled. Luckily, it's not that complex if we take the right first step. That is, the first thing we log is the attri intent with the new name/value pair and mark the old attr as INCOMPLETE in the same transaction. From there, we then remove the old attr and keep relogging the new name/value in the intent, such that we always know that we have to create the new attr in recovery. Once the old attr is removed, we then run a normal ATTR_CREATE operation relogging the intent as we go. If the new attr is local, then it gets created in a single atomic transaction that also logs the final intent done. If the new attr is remote, the we set INCOMPLETE on the new attr while we allocate and set the remote value, and then we clear the INCOMPLETE flag at in the last transaction taht logs the final intent done. If we fail at any point in this algorithm, log recovery will always see the same state on disk: the new name/value in the intent, and either an INCOMPLETE attr or no attr in the attr btree. If we find an INCOMPLETE attr, we run the full replace starting with removing the INCOMPLETE attr. If we don't find it, then we simply create the new attr. Notably, recovery of a failed create that has an INCOMPLETE flag set is now the same - we start with the lookup of the INCOMPLETE attr, and if that exists then we do the full replace recovery process, otherwise we just create the new attr. Hence changing the way we do the replace operation when LARP is enabled allows us to use the same log recovery algorithm for both the ATTR_CREATE and ATTR_REPLACE operations. This is also the same algorithm we use for runtime ATTR_REPLACE operations (except for the step setting up the initial conditions). The result is that: - ATTR_CREATE uses the same algorithm regardless of whether LARP is enabled or not - ATTR_REPLACE with larp=0 is identical to the old algorithm - ATTR_REPLACE with larp=1 runs an unmodified attr removal algorithm from the larp=0 code and then runs the unmodified ATTR_CREATE code. - log recovery when larp=1 runs the same ATTR_REPLACE algorithm as it uses at runtime. Because the state machine is now quite clean, changing the algorithm is really just a case of changing the initial state and how the states link together for the ATTR_REPLACE case. Hence it's not a huge amount of code for what is a fairly substantial rework of the attr logging and recovery algorithm.... Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-12 08:12:56 +03:00
attr->xattri_dela_state = xfs_attr_complete_op(attr,
XFS_DAS_LEAF_REPLACE);
xfs: avoid empty xattr transaction when attrs are inline generic/642 triggered a reproducable assert failure in xlog_cil_commit() that resulted from a xfs_attr_set() committing an empty but dirty transaction. When the CIL is empty and this occurs, xlog_cil_commit() tries a background push and this triggers a "pushing an empty CIL" assert. XFS: Assertion failed: !list_empty(&cil->xc_cil), file: fs/xfs/xfs_log_cil.c, line: 1274 Call Trace: <TASK> xlog_cil_commit+0xa5a/0xad0 __xfs_trans_commit+0xb8/0x330 xfs_trans_commit+0x10/0x20 xfs_attr_set+0x3e2/0x4c0 xfs_xattr_set+0x8d/0xe0 __vfs_setxattr+0x6b/0x90 __vfs_setxattr_noperm+0x76/0x220 __vfs_setxattr_locked+0xdf/0x100 vfs_setxattr+0x94/0x170 setxattr+0x110/0x200 path_setxattr+0xbf/0xe0 __x64_sys_setxattr+0x2b/0x30 do_syscall_64+0x35/0x80 The problem is related to the breakdown of attribute addition in xfs_attr_set_iter() and how it is called from deferred operations. When we have a pure leaf xattr insert, we add the xattr to the leaf and set the next state to XFS_DAS_FOUND_LBLK and return -EAGAIN. This requeues the xattr defered work, rolls the transaction and runs xfs_attr_set_iter() again. This then checks the xattr for being remote (it's not) and whether a replace op is being done (this is a create op) and if neither are true it returns without having done anything. xfs_xattri_finish_update() then unconditionally sets the transaction dirty, and the deferops finishes and returns to __xfs_trans_commit() which sees the transaction dirty and tries to commit it by calling xlog_cil_commit(). The transaction is empty, and then the assert fires if this happens when the CIL is empty. This patch addresses the structure of xfs_attr_set_iter() that requires re-entry on leaf add even when nothing will be done. This gets rid of the trailing empty transaction and so doesn't trigger the XFS_TRANS_DIRTY assignment in xfs_xattri_finish_update() incorrectly. Addressing that is for a different patch. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Allison Henderson<allison.henderson@oracle.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-11 10:02:23 +03:00
out:
trace_xfs_attr_leaf_addname_return(attr->xattri_dela_state, args->dp);
xfs: avoid empty xattr transaction when attrs are inline generic/642 triggered a reproducable assert failure in xlog_cil_commit() that resulted from a xfs_attr_set() committing an empty but dirty transaction. When the CIL is empty and this occurs, xlog_cil_commit() tries a background push and this triggers a "pushing an empty CIL" assert. XFS: Assertion failed: !list_empty(&cil->xc_cil), file: fs/xfs/xfs_log_cil.c, line: 1274 Call Trace: <TASK> xlog_cil_commit+0xa5a/0xad0 __xfs_trans_commit+0xb8/0x330 xfs_trans_commit+0x10/0x20 xfs_attr_set+0x3e2/0x4c0 xfs_xattr_set+0x8d/0xe0 __vfs_setxattr+0x6b/0x90 __vfs_setxattr_noperm+0x76/0x220 __vfs_setxattr_locked+0xdf/0x100 vfs_setxattr+0x94/0x170 setxattr+0x110/0x200 path_setxattr+0xbf/0xe0 __x64_sys_setxattr+0x2b/0x30 do_syscall_64+0x35/0x80 The problem is related to the breakdown of attribute addition in xfs_attr_set_iter() and how it is called from deferred operations. When we have a pure leaf xattr insert, we add the xattr to the leaf and set the next state to XFS_DAS_FOUND_LBLK and return -EAGAIN. This requeues the xattr defered work, rolls the transaction and runs xfs_attr_set_iter() again. This then checks the xattr for being remote (it's not) and whether a replace op is being done (this is a create op) and if neither are true it returns without having done anything. xfs_xattri_finish_update() then unconditionally sets the transaction dirty, and the deferops finishes and returns to __xfs_trans_commit() which sees the transaction dirty and tries to commit it by calling xlog_cil_commit(). The transaction is empty, and then the assert fires if this happens when the CIL is empty. This patch addresses the structure of xfs_attr_set_iter() that requires re-entry on leaf add even when nothing will be done. This gets rid of the trailing empty transaction and so doesn't trigger the XFS_TRANS_DIRTY assignment in xfs_xattri_finish_update() incorrectly. Addressing that is for a different patch. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Allison Henderson<allison.henderson@oracle.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-11 10:02:23 +03:00
return error;
}
/*
* Add an entry to a node format attr tree.
*
* Note that we might still have a leaf here - xfs_attr_is_leaf() cannot tell
* the difference between leaf + remote attr blocks and a node format tree,
* so we may still end up having to convert from leaf to node format here.
*/
static int
xfs_attr_node_addname(
struct xfs_attr_intent *attr)
{
struct xfs_da_args *args = attr->xattri_da_args;
int error;
error = xfs_attr_node_addname_find_attr(attr);
if (error)
return error;
error = xfs_attr_node_try_addname(attr);
if (error == -ENOSPC) {
error = xfs_attr3_leaf_to_node(args);
if (error)
return error;
/*
* No state change, we really are in node form now
* but we need the transaction rolled to continue.
*/
goto out;
}
if (error)
return error;
if (args->rmtblkno)
attr->xattri_dela_state = XFS_DAS_NODE_SET_RMT;
else
xfs: ATTR_REPLACE algorithm with LARP enabled needs rework We can't use the same algorithm for replacing an existing attribute when logging attributes. The existing algorithm is essentially: 1. create new attr w/ INCOMPLETE 2. atomically flip INCOMPLETE flags between old + new attribute 3. remove old attr which is marked w/ INCOMPLETE This algorithm guarantees that we see either the old or new attribute, and if we fail after the atomic flag flip, we don't have to recover the removal of the old attr because we never see INCOMPLETE attributes in lookups. For logged attributes, however, this does not work. The logged attribute intents do not track the work that has been done as the transaction rolls, and hence the only recovery mechanism we have is "run the replace operation from scratch". This is further exacerbated by the attempt to avoid needing the INCOMPLETE flag to create an atomic swap. This means we can create a second active attribute of the same name before we remove the original. If we fail at any point after the create but before the removal has completed, we end up with duplicate attributes in the attr btree and recovery only tries to replace one of them. There are several other failure modes where we can leave partially allocated remote attributes that expose stale data, partially free remote attributes that enable UAF based stale data exposure, etc. TO fix this, we need a different algorithm for replace operations when LARP is enabled. Luckily, it's not that complex if we take the right first step. That is, the first thing we log is the attri intent with the new name/value pair and mark the old attr as INCOMPLETE in the same transaction. From there, we then remove the old attr and keep relogging the new name/value in the intent, such that we always know that we have to create the new attr in recovery. Once the old attr is removed, we then run a normal ATTR_CREATE operation relogging the intent as we go. If the new attr is local, then it gets created in a single atomic transaction that also logs the final intent done. If the new attr is remote, the we set INCOMPLETE on the new attr while we allocate and set the remote value, and then we clear the INCOMPLETE flag at in the last transaction taht logs the final intent done. If we fail at any point in this algorithm, log recovery will always see the same state on disk: the new name/value in the intent, and either an INCOMPLETE attr or no attr in the attr btree. If we find an INCOMPLETE attr, we run the full replace starting with removing the INCOMPLETE attr. If we don't find it, then we simply create the new attr. Notably, recovery of a failed create that has an INCOMPLETE flag set is now the same - we start with the lookup of the INCOMPLETE attr, and if that exists then we do the full replace recovery process, otherwise we just create the new attr. Hence changing the way we do the replace operation when LARP is enabled allows us to use the same log recovery algorithm for both the ATTR_CREATE and ATTR_REPLACE operations. This is also the same algorithm we use for runtime ATTR_REPLACE operations (except for the step setting up the initial conditions). The result is that: - ATTR_CREATE uses the same algorithm regardless of whether LARP is enabled or not - ATTR_REPLACE with larp=0 is identical to the old algorithm - ATTR_REPLACE with larp=1 runs an unmodified attr removal algorithm from the larp=0 code and then runs the unmodified ATTR_CREATE code. - log recovery when larp=1 runs the same ATTR_REPLACE algorithm as it uses at runtime. Because the state machine is now quite clean, changing the algorithm is really just a case of changing the initial state and how the states link together for the ATTR_REPLACE case. Hence it's not a huge amount of code for what is a fairly substantial rework of the attr logging and recovery algorithm.... Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-12 08:12:56 +03:00
attr->xattri_dela_state = xfs_attr_complete_op(attr,
XFS_DAS_NODE_REPLACE);
out:
trace_xfs_attr_node_addname_return(attr->xattri_dela_state, args->dp);
return error;
}
static int
xfs_attr_rmtval_alloc(
struct xfs_attr_intent *attr)
{
struct xfs_da_args *args = attr->xattri_da_args;
int error = 0;
/*
* If there was an out-of-line value, allocate the blocks we
* identified for its storage and copy the value. This is done
* after we create the attribute so that we don't overflow the
* maximum size of a transaction and/or hit a deadlock.
*/
if (attr->xattri_blkcnt > 0) {
error = xfs_attr_rmtval_set_blk(attr);
if (error)
return error;
/* Roll the transaction only if there is more to allocate. */
if (attr->xattri_blkcnt > 0)
goto out;
}
error = xfs_attr_rmtval_set_value(args);
if (error)
return error;
xfs: ATTR_REPLACE algorithm with LARP enabled needs rework We can't use the same algorithm for replacing an existing attribute when logging attributes. The existing algorithm is essentially: 1. create new attr w/ INCOMPLETE 2. atomically flip INCOMPLETE flags between old + new attribute 3. remove old attr which is marked w/ INCOMPLETE This algorithm guarantees that we see either the old or new attribute, and if we fail after the atomic flag flip, we don't have to recover the removal of the old attr because we never see INCOMPLETE attributes in lookups. For logged attributes, however, this does not work. The logged attribute intents do not track the work that has been done as the transaction rolls, and hence the only recovery mechanism we have is "run the replace operation from scratch". This is further exacerbated by the attempt to avoid needing the INCOMPLETE flag to create an atomic swap. This means we can create a second active attribute of the same name before we remove the original. If we fail at any point after the create but before the removal has completed, we end up with duplicate attributes in the attr btree and recovery only tries to replace one of them. There are several other failure modes where we can leave partially allocated remote attributes that expose stale data, partially free remote attributes that enable UAF based stale data exposure, etc. TO fix this, we need a different algorithm for replace operations when LARP is enabled. Luckily, it's not that complex if we take the right first step. That is, the first thing we log is the attri intent with the new name/value pair and mark the old attr as INCOMPLETE in the same transaction. From there, we then remove the old attr and keep relogging the new name/value in the intent, such that we always know that we have to create the new attr in recovery. Once the old attr is removed, we then run a normal ATTR_CREATE operation relogging the intent as we go. If the new attr is local, then it gets created in a single atomic transaction that also logs the final intent done. If the new attr is remote, the we set INCOMPLETE on the new attr while we allocate and set the remote value, and then we clear the INCOMPLETE flag at in the last transaction taht logs the final intent done. If we fail at any point in this algorithm, log recovery will always see the same state on disk: the new name/value in the intent, and either an INCOMPLETE attr or no attr in the attr btree. If we find an INCOMPLETE attr, we run the full replace starting with removing the INCOMPLETE attr. If we don't find it, then we simply create the new attr. Notably, recovery of a failed create that has an INCOMPLETE flag set is now the same - we start with the lookup of the INCOMPLETE attr, and if that exists then we do the full replace recovery process, otherwise we just create the new attr. Hence changing the way we do the replace operation when LARP is enabled allows us to use the same log recovery algorithm for both the ATTR_CREATE and ATTR_REPLACE operations. This is also the same algorithm we use for runtime ATTR_REPLACE operations (except for the step setting up the initial conditions). The result is that: - ATTR_CREATE uses the same algorithm regardless of whether LARP is enabled or not - ATTR_REPLACE with larp=0 is identical to the old algorithm - ATTR_REPLACE with larp=1 runs an unmodified attr removal algorithm from the larp=0 code and then runs the unmodified ATTR_CREATE code. - log recovery when larp=1 runs the same ATTR_REPLACE algorithm as it uses at runtime. Because the state machine is now quite clean, changing the algorithm is really just a case of changing the initial state and how the states link together for the ATTR_REPLACE case. Hence it's not a huge amount of code for what is a fairly substantial rework of the attr logging and recovery algorithm.... Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-12 08:12:56 +03:00
attr->xattri_dela_state = xfs_attr_complete_op(attr,
++attr->xattri_dela_state);
/*
* If we are not doing a rename, we've finished the operation but still
* have to clear the incomplete flag protecting the new attr from
* exposing partially initialised state if we crash during creation.
*/
if (attr->xattri_dela_state == XFS_DAS_DONE)
error = xfs_attr3_leaf_clearflag(args);
out:
trace_xfs_attr_rmtval_alloc(attr->xattri_dela_state, args->dp);
return error;
}
/*
* Mark an attribute entry INCOMPLETE and save pointers to the relevant buffers
* for later deletion of the entry.
*/
static int
xfs_attr_leaf_mark_incomplete(
struct xfs_da_args *args,
struct xfs_da_state *state)
{
int error;
/*
* Fill in disk block numbers in the state structure
* so that we can get the buffers back after we commit
* several transactions in the following calls.
*/
error = xfs_attr_fillstate(state);
if (error)
return error;
/*
* Mark the attribute as INCOMPLETE
*/
return xfs_attr3_leaf_setflag(args);
}
/* Ensure the da state of an xattr deferred work item is ready to go. */
static inline void
xfs_attr_item_init_da_state(
struct xfs_attr_intent *attr)
{
struct xfs_da_args *args = attr->xattri_da_args;
if (!attr->xattri_da_state)
attr->xattri_da_state = xfs_da_state_alloc(args);
else
xfs_da_state_reset(attr->xattri_da_state, args);
}
/*
* Initial setup for xfs_attr_node_removename. Make sure the attr is there and
* the blocks are valid. Attr keys with remote blocks will be marked
* incomplete.
*/
static
int xfs_attr_node_removename_setup(
struct xfs_attr_intent *attr)
{
struct xfs_da_args *args = attr->xattri_da_args;
xfs: don't leak da state when freeing the attr intent item kmemleak reported that we lost an xfs_da_state while removing xattrs in generic/020: unreferenced object 0xffff88801c0e4b40 (size 480): comm "attr", pid 30515, jiffies 4294931061 (age 5.960s) hex dump (first 32 bytes): 78 bc 65 07 00 c9 ff ff 00 30 60 1c 80 88 ff ff x.e......0`..... 02 00 00 00 00 00 00 00 80 18 83 4e 80 88 ff ff ...........N.... backtrace: [<ffffffffa023ef4a>] xfs_da_state_alloc+0x1a/0x30 [xfs] [<ffffffffa021b6f3>] xfs_attr_node_hasname+0x23/0x90 [xfs] [<ffffffffa021c6f1>] xfs_attr_set_iter+0x441/0xa30 [xfs] [<ffffffffa02b5104>] xfs_xattri_finish_update+0x44/0x80 [xfs] [<ffffffffa02b515e>] xfs_attr_finish_item+0x1e/0x40 [xfs] [<ffffffffa0244744>] xfs_defer_finish_noroll+0x184/0x740 [xfs] [<ffffffffa02a6473>] __xfs_trans_commit+0x153/0x3e0 [xfs] [<ffffffffa021d149>] xfs_attr_set+0x469/0x7e0 [xfs] [<ffffffffa02a78d9>] xfs_xattr_set+0x89/0xd0 [xfs] [<ffffffff812e6512>] __vfs_removexattr+0x52/0x70 [<ffffffff812e6a08>] __vfs_removexattr_locked+0xb8/0x150 [<ffffffff812e6af6>] vfs_removexattr+0x56/0x100 [<ffffffff812e6bf8>] removexattr+0x58/0x90 [<ffffffff812e6cce>] path_removexattr+0x9e/0xc0 [<ffffffff812e6d44>] __x64_sys_lremovexattr+0x14/0x20 [<ffffffff81786b35>] do_syscall_64+0x35/0x80 I think this is a consequence of xfs_attr_node_removename_setup attaching a new da(btree) state to xfs_attr_item and never freeing it. I /think/ it's the case that the remove paths could detach the da state earlier in the remove state machine since nothing else accesses the state. However, let's future-proof the new xattr code by adding a catch-all when we free the xfs_attr_item to make sure we never leak the da state. Signed-off-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Reviewed-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-20 07:41:34 +03:00
struct xfs_da_state *state;
int error;
xfs_attr_item_init_da_state(attr);
error = xfs_attr_node_lookup(args, attr->xattri_da_state);
if (error != -EEXIST)
goto out;
error = 0;
xfs: don't leak da state when freeing the attr intent item kmemleak reported that we lost an xfs_da_state while removing xattrs in generic/020: unreferenced object 0xffff88801c0e4b40 (size 480): comm "attr", pid 30515, jiffies 4294931061 (age 5.960s) hex dump (first 32 bytes): 78 bc 65 07 00 c9 ff ff 00 30 60 1c 80 88 ff ff x.e......0`..... 02 00 00 00 00 00 00 00 80 18 83 4e 80 88 ff ff ...........N.... backtrace: [<ffffffffa023ef4a>] xfs_da_state_alloc+0x1a/0x30 [xfs] [<ffffffffa021b6f3>] xfs_attr_node_hasname+0x23/0x90 [xfs] [<ffffffffa021c6f1>] xfs_attr_set_iter+0x441/0xa30 [xfs] [<ffffffffa02b5104>] xfs_xattri_finish_update+0x44/0x80 [xfs] [<ffffffffa02b515e>] xfs_attr_finish_item+0x1e/0x40 [xfs] [<ffffffffa0244744>] xfs_defer_finish_noroll+0x184/0x740 [xfs] [<ffffffffa02a6473>] __xfs_trans_commit+0x153/0x3e0 [xfs] [<ffffffffa021d149>] xfs_attr_set+0x469/0x7e0 [xfs] [<ffffffffa02a78d9>] xfs_xattr_set+0x89/0xd0 [xfs] [<ffffffff812e6512>] __vfs_removexattr+0x52/0x70 [<ffffffff812e6a08>] __vfs_removexattr_locked+0xb8/0x150 [<ffffffff812e6af6>] vfs_removexattr+0x56/0x100 [<ffffffff812e6bf8>] removexattr+0x58/0x90 [<ffffffff812e6cce>] path_removexattr+0x9e/0xc0 [<ffffffff812e6d44>] __x64_sys_lremovexattr+0x14/0x20 [<ffffffff81786b35>] do_syscall_64+0x35/0x80 I think this is a consequence of xfs_attr_node_removename_setup attaching a new da(btree) state to xfs_attr_item and never freeing it. I /think/ it's the case that the remove paths could detach the da state earlier in the remove state machine since nothing else accesses the state. However, let's future-proof the new xattr code by adding a catch-all when we free the xfs_attr_item to make sure we never leak the da state. Signed-off-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Reviewed-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-20 07:41:34 +03:00
state = attr->xattri_da_state;
ASSERT(state->path.blk[state->path.active - 1].bp != NULL);
ASSERT(state->path.blk[state->path.active - 1].magic ==
XFS_ATTR_LEAF_MAGIC);
xfs: don't leak da state when freeing the attr intent item kmemleak reported that we lost an xfs_da_state while removing xattrs in generic/020: unreferenced object 0xffff88801c0e4b40 (size 480): comm "attr", pid 30515, jiffies 4294931061 (age 5.960s) hex dump (first 32 bytes): 78 bc 65 07 00 c9 ff ff 00 30 60 1c 80 88 ff ff x.e......0`..... 02 00 00 00 00 00 00 00 80 18 83 4e 80 88 ff ff ...........N.... backtrace: [<ffffffffa023ef4a>] xfs_da_state_alloc+0x1a/0x30 [xfs] [<ffffffffa021b6f3>] xfs_attr_node_hasname+0x23/0x90 [xfs] [<ffffffffa021c6f1>] xfs_attr_set_iter+0x441/0xa30 [xfs] [<ffffffffa02b5104>] xfs_xattri_finish_update+0x44/0x80 [xfs] [<ffffffffa02b515e>] xfs_attr_finish_item+0x1e/0x40 [xfs] [<ffffffffa0244744>] xfs_defer_finish_noroll+0x184/0x740 [xfs] [<ffffffffa02a6473>] __xfs_trans_commit+0x153/0x3e0 [xfs] [<ffffffffa021d149>] xfs_attr_set+0x469/0x7e0 [xfs] [<ffffffffa02a78d9>] xfs_xattr_set+0x89/0xd0 [xfs] [<ffffffff812e6512>] __vfs_removexattr+0x52/0x70 [<ffffffff812e6a08>] __vfs_removexattr_locked+0xb8/0x150 [<ffffffff812e6af6>] vfs_removexattr+0x56/0x100 [<ffffffff812e6bf8>] removexattr+0x58/0x90 [<ffffffff812e6cce>] path_removexattr+0x9e/0xc0 [<ffffffff812e6d44>] __x64_sys_lremovexattr+0x14/0x20 [<ffffffff81786b35>] do_syscall_64+0x35/0x80 I think this is a consequence of xfs_attr_node_removename_setup attaching a new da(btree) state to xfs_attr_item and never freeing it. I /think/ it's the case that the remove paths could detach the da state earlier in the remove state machine since nothing else accesses the state. However, let's future-proof the new xattr code by adding a catch-all when we free the xfs_attr_item to make sure we never leak the da state. Signed-off-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Reviewed-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-20 07:41:34 +03:00
error = xfs_attr_leaf_mark_incomplete(args, state);
if (error)
goto out;
if (args->rmtblkno > 0)
error = xfs_attr_rmtval_invalidate(args);
out:
xfs: don't leak da state when freeing the attr intent item kmemleak reported that we lost an xfs_da_state while removing xattrs in generic/020: unreferenced object 0xffff88801c0e4b40 (size 480): comm "attr", pid 30515, jiffies 4294931061 (age 5.960s) hex dump (first 32 bytes): 78 bc 65 07 00 c9 ff ff 00 30 60 1c 80 88 ff ff x.e......0`..... 02 00 00 00 00 00 00 00 80 18 83 4e 80 88 ff ff ...........N.... backtrace: [<ffffffffa023ef4a>] xfs_da_state_alloc+0x1a/0x30 [xfs] [<ffffffffa021b6f3>] xfs_attr_node_hasname+0x23/0x90 [xfs] [<ffffffffa021c6f1>] xfs_attr_set_iter+0x441/0xa30 [xfs] [<ffffffffa02b5104>] xfs_xattri_finish_update+0x44/0x80 [xfs] [<ffffffffa02b515e>] xfs_attr_finish_item+0x1e/0x40 [xfs] [<ffffffffa0244744>] xfs_defer_finish_noroll+0x184/0x740 [xfs] [<ffffffffa02a6473>] __xfs_trans_commit+0x153/0x3e0 [xfs] [<ffffffffa021d149>] xfs_attr_set+0x469/0x7e0 [xfs] [<ffffffffa02a78d9>] xfs_xattr_set+0x89/0xd0 [xfs] [<ffffffff812e6512>] __vfs_removexattr+0x52/0x70 [<ffffffff812e6a08>] __vfs_removexattr_locked+0xb8/0x150 [<ffffffff812e6af6>] vfs_removexattr+0x56/0x100 [<ffffffff812e6bf8>] removexattr+0x58/0x90 [<ffffffff812e6cce>] path_removexattr+0x9e/0xc0 [<ffffffff812e6d44>] __x64_sys_lremovexattr+0x14/0x20 [<ffffffff81786b35>] do_syscall_64+0x35/0x80 I think this is a consequence of xfs_attr_node_removename_setup attaching a new da(btree) state to xfs_attr_item and never freeing it. I /think/ it's the case that the remove paths could detach the da state earlier in the remove state machine since nothing else accesses the state. However, let's future-proof the new xattr code by adding a catch-all when we free the xfs_attr_item to make sure we never leak the da state. Signed-off-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Reviewed-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-20 07:41:34 +03:00
if (error) {
xfs_da_state_free(attr->xattri_da_state);
attr->xattri_da_state = NULL;
}
return error;
}
/*
* Remove the original attr we have just replaced. This is dependent on the
* original lookup and insert placing the old attr in args->blkno/args->index
* and the new attr in args->blkno2/args->index2.
*/
static int
xfs_attr_leaf_remove_attr(
struct xfs_attr_intent *attr)
{
struct xfs_da_args *args = attr->xattri_da_args;
struct xfs_inode *dp = args->dp;
struct xfs_buf *bp = NULL;
int forkoff;
int error;
error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno,
&bp);
if (error)
return error;
xfs_attr3_leaf_remove(bp, args);
forkoff = xfs_attr_shortform_allfit(bp, dp);
if (forkoff)
error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
/* bp is gone due to xfs_da_shrink_inode */
return error;
}
/*
* Shrink an attribute from leaf to shortform. Used by the node format remove
* path when the node format collapses to a single block and so we have to check
* if it can be collapsed further.
*/
static int
xfs_attr_leaf_shrink(
struct xfs_da_args *args)
{
struct xfs_inode *dp = args->dp;
struct xfs_buf *bp;
int forkoff;
int error;
if (!xfs_attr_is_leaf(dp))
return 0;
error = xfs_attr3_leaf_read(args->trans, args->dp, 0, &bp);
if (error)
return error;
forkoff = xfs_attr_shortform_allfit(bp, dp);
if (forkoff) {
error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
/* bp is gone due to xfs_da_shrink_inode */
} else {
xfs_trans_brelse(args->trans, bp);
}
return error;
}
/*
* Run the attribute operation specified in @attr.
*
* This routine is meant to function as a delayed operation and will set the
* state to XFS_DAS_DONE when the operation is complete. Calling functions will
* need to handle this, and recall the function until either an error or
* XFS_DAS_DONE is detected.
*/
int
xfs_attr_set_iter(
struct xfs_attr_intent *attr)
{
struct xfs_da_args *args = attr->xattri_da_args;
int error = 0;
/* State machine switch */
next_state:
switch (attr->xattri_dela_state) {
case XFS_DAS_UNINIT:
ASSERT(0);
return -EFSCORRUPTED;
case XFS_DAS_SF_ADD:
return xfs_attr_sf_addname(attr);
case XFS_DAS_LEAF_ADD:
return xfs_attr_leaf_addname(attr);
case XFS_DAS_NODE_ADD:
return xfs_attr_node_addname(attr);
case XFS_DAS_SF_REMOVE:
xfs: ATTR_REPLACE algorithm with LARP enabled needs rework We can't use the same algorithm for replacing an existing attribute when logging attributes. The existing algorithm is essentially: 1. create new attr w/ INCOMPLETE 2. atomically flip INCOMPLETE flags between old + new attribute 3. remove old attr which is marked w/ INCOMPLETE This algorithm guarantees that we see either the old or new attribute, and if we fail after the atomic flag flip, we don't have to recover the removal of the old attr because we never see INCOMPLETE attributes in lookups. For logged attributes, however, this does not work. The logged attribute intents do not track the work that has been done as the transaction rolls, and hence the only recovery mechanism we have is "run the replace operation from scratch". This is further exacerbated by the attempt to avoid needing the INCOMPLETE flag to create an atomic swap. This means we can create a second active attribute of the same name before we remove the original. If we fail at any point after the create but before the removal has completed, we end up with duplicate attributes in the attr btree and recovery only tries to replace one of them. There are several other failure modes where we can leave partially allocated remote attributes that expose stale data, partially free remote attributes that enable UAF based stale data exposure, etc. TO fix this, we need a different algorithm for replace operations when LARP is enabled. Luckily, it's not that complex if we take the right first step. That is, the first thing we log is the attri intent with the new name/value pair and mark the old attr as INCOMPLETE in the same transaction. From there, we then remove the old attr and keep relogging the new name/value in the intent, such that we always know that we have to create the new attr in recovery. Once the old attr is removed, we then run a normal ATTR_CREATE operation relogging the intent as we go. If the new attr is local, then it gets created in a single atomic transaction that also logs the final intent done. If the new attr is remote, the we set INCOMPLETE on the new attr while we allocate and set the remote value, and then we clear the INCOMPLETE flag at in the last transaction taht logs the final intent done. If we fail at any point in this algorithm, log recovery will always see the same state on disk: the new name/value in the intent, and either an INCOMPLETE attr or no attr in the attr btree. If we find an INCOMPLETE attr, we run the full replace starting with removing the INCOMPLETE attr. If we don't find it, then we simply create the new attr. Notably, recovery of a failed create that has an INCOMPLETE flag set is now the same - we start with the lookup of the INCOMPLETE attr, and if that exists then we do the full replace recovery process, otherwise we just create the new attr. Hence changing the way we do the replace operation when LARP is enabled allows us to use the same log recovery algorithm for both the ATTR_CREATE and ATTR_REPLACE operations. This is also the same algorithm we use for runtime ATTR_REPLACE operations (except for the step setting up the initial conditions). The result is that: - ATTR_CREATE uses the same algorithm regardless of whether LARP is enabled or not - ATTR_REPLACE with larp=0 is identical to the old algorithm - ATTR_REPLACE with larp=1 runs an unmodified attr removal algorithm from the larp=0 code and then runs the unmodified ATTR_CREATE code. - log recovery when larp=1 runs the same ATTR_REPLACE algorithm as it uses at runtime. Because the state machine is now quite clean, changing the algorithm is really just a case of changing the initial state and how the states link together for the ATTR_REPLACE case. Hence it's not a huge amount of code for what is a fairly substantial rework of the attr logging and recovery algorithm.... Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-12 08:12:56 +03:00
error = xfs_attr_sf_removename(args);
attr->xattri_dela_state = xfs_attr_complete_op(attr,
xfs_attr_init_add_state(args));
break;
case XFS_DAS_LEAF_REMOVE:
xfs: ATTR_REPLACE algorithm with LARP enabled needs rework We can't use the same algorithm for replacing an existing attribute when logging attributes. The existing algorithm is essentially: 1. create new attr w/ INCOMPLETE 2. atomically flip INCOMPLETE flags between old + new attribute 3. remove old attr which is marked w/ INCOMPLETE This algorithm guarantees that we see either the old or new attribute, and if we fail after the atomic flag flip, we don't have to recover the removal of the old attr because we never see INCOMPLETE attributes in lookups. For logged attributes, however, this does not work. The logged attribute intents do not track the work that has been done as the transaction rolls, and hence the only recovery mechanism we have is "run the replace operation from scratch". This is further exacerbated by the attempt to avoid needing the INCOMPLETE flag to create an atomic swap. This means we can create a second active attribute of the same name before we remove the original. If we fail at any point after the create but before the removal has completed, we end up with duplicate attributes in the attr btree and recovery only tries to replace one of them. There are several other failure modes where we can leave partially allocated remote attributes that expose stale data, partially free remote attributes that enable UAF based stale data exposure, etc. TO fix this, we need a different algorithm for replace operations when LARP is enabled. Luckily, it's not that complex if we take the right first step. That is, the first thing we log is the attri intent with the new name/value pair and mark the old attr as INCOMPLETE in the same transaction. From there, we then remove the old attr and keep relogging the new name/value in the intent, such that we always know that we have to create the new attr in recovery. Once the old attr is removed, we then run a normal ATTR_CREATE operation relogging the intent as we go. If the new attr is local, then it gets created in a single atomic transaction that also logs the final intent done. If the new attr is remote, the we set INCOMPLETE on the new attr while we allocate and set the remote value, and then we clear the INCOMPLETE flag at in the last transaction taht logs the final intent done. If we fail at any point in this algorithm, log recovery will always see the same state on disk: the new name/value in the intent, and either an INCOMPLETE attr or no attr in the attr btree. If we find an INCOMPLETE attr, we run the full replace starting with removing the INCOMPLETE attr. If we don't find it, then we simply create the new attr. Notably, recovery of a failed create that has an INCOMPLETE flag set is now the same - we start with the lookup of the INCOMPLETE attr, and if that exists then we do the full replace recovery process, otherwise we just create the new attr. Hence changing the way we do the replace operation when LARP is enabled allows us to use the same log recovery algorithm for both the ATTR_CREATE and ATTR_REPLACE operations. This is also the same algorithm we use for runtime ATTR_REPLACE operations (except for the step setting up the initial conditions). The result is that: - ATTR_CREATE uses the same algorithm regardless of whether LARP is enabled or not - ATTR_REPLACE with larp=0 is identical to the old algorithm - ATTR_REPLACE with larp=1 runs an unmodified attr removal algorithm from the larp=0 code and then runs the unmodified ATTR_CREATE code. - log recovery when larp=1 runs the same ATTR_REPLACE algorithm as it uses at runtime. Because the state machine is now quite clean, changing the algorithm is really just a case of changing the initial state and how the states link together for the ATTR_REPLACE case. Hence it's not a huge amount of code for what is a fairly substantial rework of the attr logging and recovery algorithm.... Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-12 08:12:56 +03:00
error = xfs_attr_leaf_removename(args);
attr->xattri_dela_state = xfs_attr_complete_op(attr,
xfs_attr_init_add_state(args));
break;
case XFS_DAS_NODE_REMOVE:
error = xfs_attr_node_removename_setup(attr);
xfs: ATTR_REPLACE algorithm with LARP enabled needs rework We can't use the same algorithm for replacing an existing attribute when logging attributes. The existing algorithm is essentially: 1. create new attr w/ INCOMPLETE 2. atomically flip INCOMPLETE flags between old + new attribute 3. remove old attr which is marked w/ INCOMPLETE This algorithm guarantees that we see either the old or new attribute, and if we fail after the atomic flag flip, we don't have to recover the removal of the old attr because we never see INCOMPLETE attributes in lookups. For logged attributes, however, this does not work. The logged attribute intents do not track the work that has been done as the transaction rolls, and hence the only recovery mechanism we have is "run the replace operation from scratch". This is further exacerbated by the attempt to avoid needing the INCOMPLETE flag to create an atomic swap. This means we can create a second active attribute of the same name before we remove the original. If we fail at any point after the create but before the removal has completed, we end up with duplicate attributes in the attr btree and recovery only tries to replace one of them. There are several other failure modes where we can leave partially allocated remote attributes that expose stale data, partially free remote attributes that enable UAF based stale data exposure, etc. TO fix this, we need a different algorithm for replace operations when LARP is enabled. Luckily, it's not that complex if we take the right first step. That is, the first thing we log is the attri intent with the new name/value pair and mark the old attr as INCOMPLETE in the same transaction. From there, we then remove the old attr and keep relogging the new name/value in the intent, such that we always know that we have to create the new attr in recovery. Once the old attr is removed, we then run a normal ATTR_CREATE operation relogging the intent as we go. If the new attr is local, then it gets created in a single atomic transaction that also logs the final intent done. If the new attr is remote, the we set INCOMPLETE on the new attr while we allocate and set the remote value, and then we clear the INCOMPLETE flag at in the last transaction taht logs the final intent done. If we fail at any point in this algorithm, log recovery will always see the same state on disk: the new name/value in the intent, and either an INCOMPLETE attr or no attr in the attr btree. If we find an INCOMPLETE attr, we run the full replace starting with removing the INCOMPLETE attr. If we don't find it, then we simply create the new attr. Notably, recovery of a failed create that has an INCOMPLETE flag set is now the same - we start with the lookup of the INCOMPLETE attr, and if that exists then we do the full replace recovery process, otherwise we just create the new attr. Hence changing the way we do the replace operation when LARP is enabled allows us to use the same log recovery algorithm for both the ATTR_CREATE and ATTR_REPLACE operations. This is also the same algorithm we use for runtime ATTR_REPLACE operations (except for the step setting up the initial conditions). The result is that: - ATTR_CREATE uses the same algorithm regardless of whether LARP is enabled or not - ATTR_REPLACE with larp=0 is identical to the old algorithm - ATTR_REPLACE with larp=1 runs an unmodified attr removal algorithm from the larp=0 code and then runs the unmodified ATTR_CREATE code. - log recovery when larp=1 runs the same ATTR_REPLACE algorithm as it uses at runtime. Because the state machine is now quite clean, changing the algorithm is really just a case of changing the initial state and how the states link together for the ATTR_REPLACE case. Hence it's not a huge amount of code for what is a fairly substantial rework of the attr logging and recovery algorithm.... Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-12 08:12:56 +03:00
if (error == -ENOATTR &&
(args->op_flags & XFS_DA_OP_RECOVERY)) {
attr->xattri_dela_state = xfs_attr_complete_op(attr,
xfs_attr_init_add_state(args));
error = 0;
break;
}
if (error)
return error;
attr->xattri_dela_state = XFS_DAS_NODE_REMOVE_RMT;
if (args->rmtblkno == 0)
attr->xattri_dela_state++;
break;
case XFS_DAS_LEAF_SET_RMT:
case XFS_DAS_NODE_SET_RMT:
error = xfs_attr_rmtval_find_space(attr);
if (error)
return error;
attr->xattri_dela_state++;
fallthrough;
case XFS_DAS_LEAF_ALLOC_RMT:
case XFS_DAS_NODE_ALLOC_RMT:
error = xfs_attr_rmtval_alloc(attr);
if (error)
return error;
if (attr->xattri_dela_state == XFS_DAS_DONE)
break;
goto next_state;
case XFS_DAS_LEAF_REPLACE:
case XFS_DAS_NODE_REPLACE:
/*
* We must "flip" the incomplete flags on the "new" and "old"
* attribute/value pairs so that one disappears and one appears
* atomically.
*/
error = xfs_attr3_leaf_flipflags(args);
if (error)
return error;
/*
* We must commit the flag value change now to make it atomic
* and then we can start the next trans in series at REMOVE_OLD.
*/
attr->xattri_dela_state++;
break;
case XFS_DAS_LEAF_REMOVE_OLD:
case XFS_DAS_NODE_REMOVE_OLD:
/*
* If we have a remote attr, start the process of removing it
* by invalidating any cached buffers.
*
* If we don't have a remote attr, we skip the remote block
* removal state altogether with a second state increment.
*/
xfs_attr_restore_rmt_blk(args);
if (args->rmtblkno) {
error = xfs_attr_rmtval_invalidate(args);
if (error)
return error;
} else {
attr->xattri_dela_state++;
}
attr->xattri_dela_state++;
goto next_state;
case XFS_DAS_LEAF_REMOVE_RMT:
case XFS_DAS_NODE_REMOVE_RMT:
error = xfs_attr_rmtval_remove(attr);
if (error == -EAGAIN) {
error = 0;
break;
}
if (error)
return error;
/*
* We've finished removing the remote attr blocks, so commit the
* transaction and move on to removing the attr name from the
* leaf/node block. Removing the attr might require a full
* transaction reservation for btree block freeing, so we
* can't do that in the same transaction where we removed the
* remote attr blocks.
*/
attr->xattri_dela_state++;
break;
case XFS_DAS_LEAF_REMOVE_ATTR:
error = xfs_attr_leaf_remove_attr(attr);
xfs: ATTR_REPLACE algorithm with LARP enabled needs rework We can't use the same algorithm for replacing an existing attribute when logging attributes. The existing algorithm is essentially: 1. create new attr w/ INCOMPLETE 2. atomically flip INCOMPLETE flags between old + new attribute 3. remove old attr which is marked w/ INCOMPLETE This algorithm guarantees that we see either the old or new attribute, and if we fail after the atomic flag flip, we don't have to recover the removal of the old attr because we never see INCOMPLETE attributes in lookups. For logged attributes, however, this does not work. The logged attribute intents do not track the work that has been done as the transaction rolls, and hence the only recovery mechanism we have is "run the replace operation from scratch". This is further exacerbated by the attempt to avoid needing the INCOMPLETE flag to create an atomic swap. This means we can create a second active attribute of the same name before we remove the original. If we fail at any point after the create but before the removal has completed, we end up with duplicate attributes in the attr btree and recovery only tries to replace one of them. There are several other failure modes where we can leave partially allocated remote attributes that expose stale data, partially free remote attributes that enable UAF based stale data exposure, etc. TO fix this, we need a different algorithm for replace operations when LARP is enabled. Luckily, it's not that complex if we take the right first step. That is, the first thing we log is the attri intent with the new name/value pair and mark the old attr as INCOMPLETE in the same transaction. From there, we then remove the old attr and keep relogging the new name/value in the intent, such that we always know that we have to create the new attr in recovery. Once the old attr is removed, we then run a normal ATTR_CREATE operation relogging the intent as we go. If the new attr is local, then it gets created in a single atomic transaction that also logs the final intent done. If the new attr is remote, the we set INCOMPLETE on the new attr while we allocate and set the remote value, and then we clear the INCOMPLETE flag at in the last transaction taht logs the final intent done. If we fail at any point in this algorithm, log recovery will always see the same state on disk: the new name/value in the intent, and either an INCOMPLETE attr or no attr in the attr btree. If we find an INCOMPLETE attr, we run the full replace starting with removing the INCOMPLETE attr. If we don't find it, then we simply create the new attr. Notably, recovery of a failed create that has an INCOMPLETE flag set is now the same - we start with the lookup of the INCOMPLETE attr, and if that exists then we do the full replace recovery process, otherwise we just create the new attr. Hence changing the way we do the replace operation when LARP is enabled allows us to use the same log recovery algorithm for both the ATTR_CREATE and ATTR_REPLACE operations. This is also the same algorithm we use for runtime ATTR_REPLACE operations (except for the step setting up the initial conditions). The result is that: - ATTR_CREATE uses the same algorithm regardless of whether LARP is enabled or not - ATTR_REPLACE with larp=0 is identical to the old algorithm - ATTR_REPLACE with larp=1 runs an unmodified attr removal algorithm from the larp=0 code and then runs the unmodified ATTR_CREATE code. - log recovery when larp=1 runs the same ATTR_REPLACE algorithm as it uses at runtime. Because the state machine is now quite clean, changing the algorithm is really just a case of changing the initial state and how the states link together for the ATTR_REPLACE case. Hence it's not a huge amount of code for what is a fairly substantial rework of the attr logging and recovery algorithm.... Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-12 08:12:56 +03:00
attr->xattri_dela_state = xfs_attr_complete_op(attr,
xfs_attr_init_add_state(args));
break;
case XFS_DAS_NODE_REMOVE_ATTR:
error = xfs_attr_node_remove_attr(attr);
if (!error)
error = xfs_attr_leaf_shrink(args);
xfs: ATTR_REPLACE algorithm with LARP enabled needs rework We can't use the same algorithm for replacing an existing attribute when logging attributes. The existing algorithm is essentially: 1. create new attr w/ INCOMPLETE 2. atomically flip INCOMPLETE flags between old + new attribute 3. remove old attr which is marked w/ INCOMPLETE This algorithm guarantees that we see either the old or new attribute, and if we fail after the atomic flag flip, we don't have to recover the removal of the old attr because we never see INCOMPLETE attributes in lookups. For logged attributes, however, this does not work. The logged attribute intents do not track the work that has been done as the transaction rolls, and hence the only recovery mechanism we have is "run the replace operation from scratch". This is further exacerbated by the attempt to avoid needing the INCOMPLETE flag to create an atomic swap. This means we can create a second active attribute of the same name before we remove the original. If we fail at any point after the create but before the removal has completed, we end up with duplicate attributes in the attr btree and recovery only tries to replace one of them. There are several other failure modes where we can leave partially allocated remote attributes that expose stale data, partially free remote attributes that enable UAF based stale data exposure, etc. TO fix this, we need a different algorithm for replace operations when LARP is enabled. Luckily, it's not that complex if we take the right first step. That is, the first thing we log is the attri intent with the new name/value pair and mark the old attr as INCOMPLETE in the same transaction. From there, we then remove the old attr and keep relogging the new name/value in the intent, such that we always know that we have to create the new attr in recovery. Once the old attr is removed, we then run a normal ATTR_CREATE operation relogging the intent as we go. If the new attr is local, then it gets created in a single atomic transaction that also logs the final intent done. If the new attr is remote, the we set INCOMPLETE on the new attr while we allocate and set the remote value, and then we clear the INCOMPLETE flag at in the last transaction taht logs the final intent done. If we fail at any point in this algorithm, log recovery will always see the same state on disk: the new name/value in the intent, and either an INCOMPLETE attr or no attr in the attr btree. If we find an INCOMPLETE attr, we run the full replace starting with removing the INCOMPLETE attr. If we don't find it, then we simply create the new attr. Notably, recovery of a failed create that has an INCOMPLETE flag set is now the same - we start with the lookup of the INCOMPLETE attr, and if that exists then we do the full replace recovery process, otherwise we just create the new attr. Hence changing the way we do the replace operation when LARP is enabled allows us to use the same log recovery algorithm for both the ATTR_CREATE and ATTR_REPLACE operations. This is also the same algorithm we use for runtime ATTR_REPLACE operations (except for the step setting up the initial conditions). The result is that: - ATTR_CREATE uses the same algorithm regardless of whether LARP is enabled or not - ATTR_REPLACE with larp=0 is identical to the old algorithm - ATTR_REPLACE with larp=1 runs an unmodified attr removal algorithm from the larp=0 code and then runs the unmodified ATTR_CREATE code. - log recovery when larp=1 runs the same ATTR_REPLACE algorithm as it uses at runtime. Because the state machine is now quite clean, changing the algorithm is really just a case of changing the initial state and how the states link together for the ATTR_REPLACE case. Hence it's not a huge amount of code for what is a fairly substantial rework of the attr logging and recovery algorithm.... Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-12 08:12:56 +03:00
attr->xattri_dela_state = xfs_attr_complete_op(attr,
xfs_attr_init_add_state(args));
break;
default:
ASSERT(0);
break;
}
trace_xfs_attr_set_iter_return(attr->xattri_dela_state, args->dp);
return error;
}
/*
* Return EEXIST if attr is found, or ENOATTR if not
*/
static int
xfs_attr_lookup(
struct xfs_da_args *args)
{
struct xfs_inode *dp = args->dp;
struct xfs_buf *bp = NULL;
struct xfs_da_state *state;
int error;
if (!xfs_inode_hasattr(dp))
return -ENOATTR;
xfs: make inode attribute forks a permanent part of struct xfs_inode Syzkaller reported a UAF bug a while back: ================================================================== BUG: KASAN: use-after-free in xfs_ilock_attr_map_shared+0xe3/0xf6 fs/xfs/xfs_inode.c:127 Read of size 4 at addr ffff88802cec919c by task syz-executor262/2958 CPU: 2 PID: 2958 Comm: syz-executor262 Not tainted 5.15.0-0.30.3-20220406_1406 #3 Hardware name: Red Hat KVM, BIOS 1.13.0-2.module+el8.3.0+7860+a7792d29 04/01/2014 Call Trace: <TASK> __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0x82/0xa9 lib/dump_stack.c:106 print_address_description.constprop.9+0x21/0x2d5 mm/kasan/report.c:256 __kasan_report mm/kasan/report.c:442 [inline] kasan_report.cold.14+0x7f/0x11b mm/kasan/report.c:459 xfs_ilock_attr_map_shared+0xe3/0xf6 fs/xfs/xfs_inode.c:127 xfs_attr_get+0x378/0x4c2 fs/xfs/libxfs/xfs_attr.c:159 xfs_xattr_get+0xe3/0x150 fs/xfs/xfs_xattr.c:36 __vfs_getxattr+0xdf/0x13d fs/xattr.c:399 cap_inode_need_killpriv+0x41/0x5d security/commoncap.c:300 security_inode_need_killpriv+0x4c/0x97 security/security.c:1408 dentry_needs_remove_privs.part.28+0x21/0x63 fs/inode.c:1912 dentry_needs_remove_privs+0x80/0x9e fs/inode.c:1908 do_truncate+0xc3/0x1e0 fs/open.c:56 handle_truncate fs/namei.c:3084 [inline] do_open fs/namei.c:3432 [inline] path_openat+0x30ab/0x396d fs/namei.c:3561 do_filp_open+0x1c4/0x290 fs/namei.c:3588 do_sys_openat2+0x60d/0x98c fs/open.c:1212 do_sys_open+0xcf/0x13c fs/open.c:1228 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3a/0x7e arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0x0 RIP: 0033:0x7f7ef4bb753d Code: 00 c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 1b 79 2c 00 f7 d8 64 89 01 48 RSP: 002b:00007f7ef52c2ed8 EFLAGS: 00000246 ORIG_RAX: 0000000000000055 RAX: ffffffffffffffda RBX: 0000000000404148 RCX: 00007f7ef4bb753d RDX: 00007f7ef4bb753d RSI: 0000000000000000 RDI: 0000000020004fc0 RBP: 0000000000404140 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 0030656c69662f2e R13: 00007ffd794db37f R14: 00007ffd794db470 R15: 00007f7ef52c2fc0 </TASK> Allocated by task 2953: kasan_save_stack+0x19/0x38 mm/kasan/common.c:38 kasan_set_track mm/kasan/common.c:46 [inline] set_alloc_info mm/kasan/common.c:434 [inline] __kasan_slab_alloc+0x68/0x7c mm/kasan/common.c:467 kasan_slab_alloc include/linux/kasan.h:254 [inline] slab_post_alloc_hook mm/slab.h:519 [inline] slab_alloc_node mm/slub.c:3213 [inline] slab_alloc mm/slub.c:3221 [inline] kmem_cache_alloc+0x11b/0x3eb mm/slub.c:3226 kmem_cache_zalloc include/linux/slab.h:711 [inline] xfs_ifork_alloc+0x25/0xa2 fs/xfs/libxfs/xfs_inode_fork.c:287 xfs_bmap_add_attrfork+0x3f2/0x9b1 fs/xfs/libxfs/xfs_bmap.c:1098 xfs_attr_set+0xe38/0x12a7 fs/xfs/libxfs/xfs_attr.c:746 xfs_xattr_set+0xeb/0x1a9 fs/xfs/xfs_xattr.c:59 __vfs_setxattr+0x11b/0x177 fs/xattr.c:180 __vfs_setxattr_noperm+0x128/0x5e0 fs/xattr.c:214 __vfs_setxattr_locked+0x1d4/0x258 fs/xattr.c:275 vfs_setxattr+0x154/0x33d fs/xattr.c:301 setxattr+0x216/0x29f fs/xattr.c:575 __do_sys_fsetxattr fs/xattr.c:632 [inline] __se_sys_fsetxattr fs/xattr.c:621 [inline] __x64_sys_fsetxattr+0x243/0x2fe fs/xattr.c:621 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3a/0x7e arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0x0 Freed by task 2949: kasan_save_stack+0x19/0x38 mm/kasan/common.c:38 kasan_set_track+0x1c/0x21 mm/kasan/common.c:46 kasan_set_free_info+0x20/0x30 mm/kasan/generic.c:360 ____kasan_slab_free mm/kasan/common.c:366 [inline] ____kasan_slab_free mm/kasan/common.c:328 [inline] __kasan_slab_free+0xe2/0x10e mm/kasan/common.c:374 kasan_slab_free include/linux/kasan.h:230 [inline] slab_free_hook mm/slub.c:1700 [inline] slab_free_freelist_hook mm/slub.c:1726 [inline] slab_free mm/slub.c:3492 [inline] kmem_cache_free+0xdc/0x3ce mm/slub.c:3508 xfs_attr_fork_remove+0x8d/0x132 fs/xfs/libxfs/xfs_attr_leaf.c:773 xfs_attr_sf_removename+0x5dd/0x6cb fs/xfs/libxfs/xfs_attr_leaf.c:822 xfs_attr_remove_iter+0x68c/0x805 fs/xfs/libxfs/xfs_attr.c:1413 xfs_attr_remove_args+0xb1/0x10d fs/xfs/libxfs/xfs_attr.c:684 xfs_attr_set+0xf1e/0x12a7 fs/xfs/libxfs/xfs_attr.c:802 xfs_xattr_set+0xeb/0x1a9 fs/xfs/xfs_xattr.c:59 __vfs_removexattr+0x106/0x16a fs/xattr.c:468 cap_inode_killpriv+0x24/0x47 security/commoncap.c:324 security_inode_killpriv+0x54/0xa1 security/security.c:1414 setattr_prepare+0x1a6/0x897 fs/attr.c:146 xfs_vn_change_ok+0x111/0x15e fs/xfs/xfs_iops.c:682 xfs_vn_setattr_size+0x5f/0x15a fs/xfs/xfs_iops.c:1065 xfs_vn_setattr+0x125/0x2ad fs/xfs/xfs_iops.c:1093 notify_change+0xae5/0x10a1 fs/attr.c:410 do_truncate+0x134/0x1e0 fs/open.c:64 handle_truncate fs/namei.c:3084 [inline] do_open fs/namei.c:3432 [inline] path_openat+0x30ab/0x396d fs/namei.c:3561 do_filp_open+0x1c4/0x290 fs/namei.c:3588 do_sys_openat2+0x60d/0x98c fs/open.c:1212 do_sys_open+0xcf/0x13c fs/open.c:1228 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3a/0x7e arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0x0 The buggy address belongs to the object at ffff88802cec9188 which belongs to the cache xfs_ifork of size 40 The buggy address is located 20 bytes inside of 40-byte region [ffff88802cec9188, ffff88802cec91b0) The buggy address belongs to the page: page:00000000c3af36a1 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x2cec9 flags: 0xfffffc0000200(slab|node=0|zone=1|lastcpupid=0x1fffff) raw: 000fffffc0000200 ffffea00009d2580 0000000600000006 ffff88801a9ffc80 raw: 0000000000000000 0000000080490049 00000001ffffffff 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: ffff88802cec9080: fb fb fb fc fc fa fb fb fb fb fc fc fb fb fb fb ffff88802cec9100: fb fc fc fb fb fb fb fb fc fc fb fb fb fb fb fc >ffff88802cec9180: fc fa fb fb fb fb fc fc fa fb fb fb fb fc fc fb ^ ffff88802cec9200: fb fb fb fb fc fc fb fb fb fb fb fc fc fb fb fb ffff88802cec9280: fb fb fc fc fa fb fb fb fb fc fc fa fb fb fb fb ================================================================== The root cause of this bug is the unlocked access to xfs_inode.i_afp from the getxattr code paths while trying to determine which ILOCK mode to use to stabilize the xattr data. Unfortunately, the VFS does not acquire i_rwsem when vfs_getxattr (or listxattr) call into the filesystem, which means that getxattr can race with a removexattr that's tearing down the attr fork and crash: xfs_attr_set: xfs_attr_get: xfs_attr_fork_remove: xfs_ilock_attr_map_shared: xfs_idestroy_fork(ip->i_afp); kmem_cache_free(xfs_ifork_cache, ip->i_afp); if (ip->i_afp && ip->i_afp = NULL; xfs_need_iread_extents(ip->i_afp)) <KABOOM> ip->i_forkoff = 0; Regrettably, the VFS is much more lax about i_rwsem and getxattr than is immediately obvious -- not only does it not guarantee that we hold i_rwsem, it actually doesn't guarantee that we *don't* hold it either. The getxattr system call won't acquire the lock before calling XFS, but the file capabilities code calls getxattr with and without i_rwsem held to determine if the "security.capabilities" xattr is set on the file. Fixing the VFS locking requires a treewide investigation into every code path that could touch an xattr and what i_rwsem state it expects or sets up. That could take years or even prove impossible; fortunately, we can fix this UAF problem inside XFS. An earlier version of this patch used smp_wmb in xfs_attr_fork_remove to ensure that i_forkoff is always zeroed before i_afp is set to null and changed the read paths to use smp_rmb before accessing i_forkoff and i_afp, which avoided these UAF problems. However, the patch author was too busy dealing with other problems in the meantime, and by the time he came back to this issue, the situation had changed a bit. On a modern system with selinux, each inode will always have at least one xattr for the selinux label, so it doesn't make much sense to keep incurring the extra pointer dereference. Furthermore, Allison's upcoming parent pointer patchset will also cause nearly every inode in the filesystem to have extended attributes. Therefore, make the inode attribute fork structure part of struct xfs_inode, at a cost of 40 more bytes. This patch adds a clunky if_present field where necessary to maintain the existing logic of xattr fork null pointer testing in the existing codebase. The next patch switches the logic over to XFS_IFORK_Q and it all goes away. Signed-off-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Dave Chinner <dchinner@redhat.com>
2022-07-09 20:56:06 +03:00
if (dp->i_af.if_format == XFS_DINODE_FMT_LOCAL)
return xfs_attr_sf_findname(args, NULL, NULL);
if (xfs_attr_is_leaf(dp)) {
error = xfs_attr_leaf_hasname(args, &bp);
if (bp)
xfs_trans_brelse(args->trans, bp);
return error;
}
state = xfs_da_state_alloc(args);
error = xfs_attr_node_lookup(args, state);
xfs_da_state_free(state);
return error;
}
static int
xfs_attr_intent_init(
struct xfs_da_args *args,
unsigned int op_flags, /* op flag (set or remove) */
struct xfs_attr_intent **attr) /* new xfs_attr_intent */
{
struct xfs_attr_intent *new;
new = kmem_cache_zalloc(xfs_attr_intent_cache, GFP_NOFS | __GFP_NOFAIL);
new->xattri_op_flags = op_flags;
new->xattri_da_args = args;
*attr = new;
return 0;
}
/* Sets an attribute for an inode as a deferred operation */
static int
xfs_attr_defer_add(
struct xfs_da_args *args)
{
struct xfs_attr_intent *new;
int error = 0;
error = xfs_attr_intent_init(args, XFS_ATTRI_OP_FLAGS_SET, &new);
if (error)
return error;
new->xattri_dela_state = xfs_attr_init_add_state(args);
xfs_defer_add(args->trans, XFS_DEFER_OPS_TYPE_ATTR, &new->xattri_list);
trace_xfs_attr_defer_add(new->xattri_dela_state, args->dp);
return 0;
}
/* Sets an attribute for an inode as a deferred operation */
static int
xfs_attr_defer_replace(
struct xfs_da_args *args)
{
struct xfs_attr_intent *new;
int error = 0;
error = xfs_attr_intent_init(args, XFS_ATTRI_OP_FLAGS_REPLACE, &new);
if (error)
return error;
new->xattri_dela_state = xfs_attr_init_replace_state(args);
xfs_defer_add(args->trans, XFS_DEFER_OPS_TYPE_ATTR, &new->xattri_list);
trace_xfs_attr_defer_replace(new->xattri_dela_state, args->dp);
return 0;
}
/* Removes an attribute for an inode as a deferred operation */
static int
xfs_attr_defer_remove(
struct xfs_da_args *args)
{
struct xfs_attr_intent *new;
int error;
error = xfs_attr_intent_init(args, XFS_ATTRI_OP_FLAGS_REMOVE, &new);
if (error)
return error;
new->xattri_dela_state = xfs_attr_init_remove_state(args);
xfs_defer_add(args->trans, XFS_DEFER_OPS_TYPE_ATTR, &new->xattri_list);
trace_xfs_attr_defer_remove(new->xattri_dela_state, args->dp);
return 0;
}
/*
* Note: If args->value is NULL the attribute will be removed, just like the
* Linux ->setattr API.
*/
int
xfs_attr_set(
struct xfs_da_args *args)
{
struct xfs_inode *dp = args->dp;
struct xfs_mount *mp = dp->i_mount;
struct xfs_trans_res tres;
bool rsvd = (args->attr_filter & XFS_ATTR_ROOT);
int error, local;
int rmt_blks = 0;
unsigned int total;
if (xfs_is_shutdown(dp->i_mount))
return -EIO;
error = xfs_qm_dqattach(dp);
if (error)
return error;
args->geo = mp->m_attr_geo;
args->whichfork = XFS_ATTR_FORK;
args->hashval = xfs_da_hashname(args->name, args->namelen);
/*
* We have no control over the attribute names that userspace passes us
* to remove, so we have to allow the name lookup prior to attribute
xfs: fix TOCTOU race involving the new logged xattrs control knob I found a race involving the larp control knob, aka the debugging knob that lets developers enable logging of extended attribute updates: Thread 1 Thread 2 echo 0 > /sys/fs/xfs/debug/larp setxattr(REPLACE) xfs_has_larp (returns false) xfs_attr_set echo 1 > /sys/fs/xfs/debug/larp xfs_attr_defer_replace xfs_attr_init_replace_state xfs_has_larp (returns true) xfs_attr_init_remove_state <oops, wrong DAS state!> This isn't a particularly severe problem right now because xattr logging is only enabled when CONFIG_XFS_DEBUG=y, and developers *should* know what they're doing. However, the eventual intent is that callers should be able to ask for the assistance of the log in persisting xattr updates. This capability might not be required for /all/ callers, which means that dynamic control must work correctly. Once an xattr update has decided whether or not to use logged xattrs, it needs to stay in that mode until the end of the operation regardless of what subsequent parallel operations might do. Therefore, it is an error to continue sampling xfs_globals.larp once xfs_attr_change has made a decision about larp, and it was not correct for me to have told Allison that ->create_intent functions can sample the global log incompat feature bitfield to decide to elide a log item. Instead, create a new op flag for the xfs_da_args structure, and convert all other callers of xfs_has_larp and xfs_sb_version_haslogxattrs within the attr update state machine to look for the operations flag. Signed-off-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Allison Henderson <allison.henderson@oracle.com>
2022-06-06 04:51:22 +03:00
* removal to fail as well. Preserve the logged flag, since we need
* to pass that through to the logging code.
*/
xfs: fix TOCTOU race involving the new logged xattrs control knob I found a race involving the larp control knob, aka the debugging knob that lets developers enable logging of extended attribute updates: Thread 1 Thread 2 echo 0 > /sys/fs/xfs/debug/larp setxattr(REPLACE) xfs_has_larp (returns false) xfs_attr_set echo 1 > /sys/fs/xfs/debug/larp xfs_attr_defer_replace xfs_attr_init_replace_state xfs_has_larp (returns true) xfs_attr_init_remove_state <oops, wrong DAS state!> This isn't a particularly severe problem right now because xattr logging is only enabled when CONFIG_XFS_DEBUG=y, and developers *should* know what they're doing. However, the eventual intent is that callers should be able to ask for the assistance of the log in persisting xattr updates. This capability might not be required for /all/ callers, which means that dynamic control must work correctly. Once an xattr update has decided whether or not to use logged xattrs, it needs to stay in that mode until the end of the operation regardless of what subsequent parallel operations might do. Therefore, it is an error to continue sampling xfs_globals.larp once xfs_attr_change has made a decision about larp, and it was not correct for me to have told Allison that ->create_intent functions can sample the global log incompat feature bitfield to decide to elide a log item. Instead, create a new op flag for the xfs_da_args structure, and convert all other callers of xfs_has_larp and xfs_sb_version_haslogxattrs within the attr update state machine to look for the operations flag. Signed-off-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Allison Henderson <allison.henderson@oracle.com>
2022-06-06 04:51:22 +03:00
args->op_flags = XFS_DA_OP_OKNOENT |
(args->op_flags & XFS_DA_OP_LOGGED);
if (args->value) {
XFS_STATS_INC(mp, xs_attr_set);
args->total = xfs_attr_calc_size(args, &local);
/*
* If the inode doesn't have an attribute fork, add one.
* (inode must not be locked when we call this routine)
*/
if (xfs_inode_has_attr_fork(dp) == 0) {
int sf_size = sizeof(struct xfs_attr_sf_hdr) +
xfs_attr_sf_entsize_byname(args->namelen,
args->valuelen);
error = xfs_bmap_add_attrfork(dp, sf_size, rsvd);
if (error)
return error;
}
if (!local)
rmt_blks = xfs_attr3_rmt_blocks(mp, args->valuelen);
} else {
XFS_STATS_INC(mp, xs_attr_remove);
rmt_blks = xfs_attr3_rmt_blocks(mp, XFS_XATTR_SIZE_MAX);
}
/*
* Root fork attributes can use reserved data blocks for this
* operation if necessary
*/
xfs_init_attr_trans(args, &tres, &total);
error = xfs_trans_alloc_inode(dp, &tres, total, 0, rsvd, &args->trans);
if (error)
return error;
if (args->value || xfs_inode_hasattr(dp)) {
error = xfs_iext_count_may_overflow(dp, XFS_ATTR_FORK,
XFS_IEXT_ATTR_MANIP_CNT(rmt_blks));
if (error == -EFBIG)
error = xfs_iext_count_upgrade(args->trans, dp,
XFS_IEXT_ATTR_MANIP_CNT(rmt_blks));
if (error)
goto out_trans_cancel;
}
error = xfs_attr_lookup(args);
switch (error) {
case -EEXIST:
/* if no value, we are performing a remove operation */
if (!args->value) {
error = xfs_attr_defer_remove(args);
break;
}
/* Pure create fails if the attr already exists */
if (args->attr_flags & XATTR_CREATE)
goto out_trans_cancel;
error = xfs_attr_defer_replace(args);
break;
case -ENOATTR:
/* Can't remove what isn't there. */
if (!args->value)
goto out_trans_cancel;
/* Pure replace fails if no existing attr to replace. */
if (args->attr_flags & XATTR_REPLACE)
goto out_trans_cancel;
error = xfs_attr_defer_add(args);
break;
default:
goto out_trans_cancel;
}
if (error)
goto out_trans_cancel;
/*
* If this is a synchronous mount, make sure that the
* transaction goes to disk before returning to the user.
*/
if (xfs_has_wsync(mp))
xfs_trans_set_sync(args->trans);
if (!(args->op_flags & XFS_DA_OP_NOTIME))
xfs_trans_ichgtime(args->trans, dp, XFS_ICHGTIME_CHG);
/*
* Commit the last in the sequence of transactions.
*/
xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE);
error = xfs_trans_commit(args->trans);
out_unlock:
xfs_iunlock(dp, XFS_ILOCK_EXCL);
return error;
out_trans_cancel:
if (args->trans)
xfs_trans_cancel(args->trans);
goto out_unlock;
}
/*========================================================================
* External routines when attribute list is inside the inode
*========================================================================*/
static inline int xfs_attr_sf_totsize(struct xfs_inode *dp)
{
struct xfs_attr_shortform *sf;
xfs: make inode attribute forks a permanent part of struct xfs_inode Syzkaller reported a UAF bug a while back: ================================================================== BUG: KASAN: use-after-free in xfs_ilock_attr_map_shared+0xe3/0xf6 fs/xfs/xfs_inode.c:127 Read of size 4 at addr ffff88802cec919c by task syz-executor262/2958 CPU: 2 PID: 2958 Comm: syz-executor262 Not tainted 5.15.0-0.30.3-20220406_1406 #3 Hardware name: Red Hat KVM, BIOS 1.13.0-2.module+el8.3.0+7860+a7792d29 04/01/2014 Call Trace: <TASK> __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0x82/0xa9 lib/dump_stack.c:106 print_address_description.constprop.9+0x21/0x2d5 mm/kasan/report.c:256 __kasan_report mm/kasan/report.c:442 [inline] kasan_report.cold.14+0x7f/0x11b mm/kasan/report.c:459 xfs_ilock_attr_map_shared+0xe3/0xf6 fs/xfs/xfs_inode.c:127 xfs_attr_get+0x378/0x4c2 fs/xfs/libxfs/xfs_attr.c:159 xfs_xattr_get+0xe3/0x150 fs/xfs/xfs_xattr.c:36 __vfs_getxattr+0xdf/0x13d fs/xattr.c:399 cap_inode_need_killpriv+0x41/0x5d security/commoncap.c:300 security_inode_need_killpriv+0x4c/0x97 security/security.c:1408 dentry_needs_remove_privs.part.28+0x21/0x63 fs/inode.c:1912 dentry_needs_remove_privs+0x80/0x9e fs/inode.c:1908 do_truncate+0xc3/0x1e0 fs/open.c:56 handle_truncate fs/namei.c:3084 [inline] do_open fs/namei.c:3432 [inline] path_openat+0x30ab/0x396d fs/namei.c:3561 do_filp_open+0x1c4/0x290 fs/namei.c:3588 do_sys_openat2+0x60d/0x98c fs/open.c:1212 do_sys_open+0xcf/0x13c fs/open.c:1228 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3a/0x7e arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0x0 RIP: 0033:0x7f7ef4bb753d Code: 00 c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 1b 79 2c 00 f7 d8 64 89 01 48 RSP: 002b:00007f7ef52c2ed8 EFLAGS: 00000246 ORIG_RAX: 0000000000000055 RAX: ffffffffffffffda RBX: 0000000000404148 RCX: 00007f7ef4bb753d RDX: 00007f7ef4bb753d RSI: 0000000000000000 RDI: 0000000020004fc0 RBP: 0000000000404140 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 0030656c69662f2e R13: 00007ffd794db37f R14: 00007ffd794db470 R15: 00007f7ef52c2fc0 </TASK> Allocated by task 2953: kasan_save_stack+0x19/0x38 mm/kasan/common.c:38 kasan_set_track mm/kasan/common.c:46 [inline] set_alloc_info mm/kasan/common.c:434 [inline] __kasan_slab_alloc+0x68/0x7c mm/kasan/common.c:467 kasan_slab_alloc include/linux/kasan.h:254 [inline] slab_post_alloc_hook mm/slab.h:519 [inline] slab_alloc_node mm/slub.c:3213 [inline] slab_alloc mm/slub.c:3221 [inline] kmem_cache_alloc+0x11b/0x3eb mm/slub.c:3226 kmem_cache_zalloc include/linux/slab.h:711 [inline] xfs_ifork_alloc+0x25/0xa2 fs/xfs/libxfs/xfs_inode_fork.c:287 xfs_bmap_add_attrfork+0x3f2/0x9b1 fs/xfs/libxfs/xfs_bmap.c:1098 xfs_attr_set+0xe38/0x12a7 fs/xfs/libxfs/xfs_attr.c:746 xfs_xattr_set+0xeb/0x1a9 fs/xfs/xfs_xattr.c:59 __vfs_setxattr+0x11b/0x177 fs/xattr.c:180 __vfs_setxattr_noperm+0x128/0x5e0 fs/xattr.c:214 __vfs_setxattr_locked+0x1d4/0x258 fs/xattr.c:275 vfs_setxattr+0x154/0x33d fs/xattr.c:301 setxattr+0x216/0x29f fs/xattr.c:575 __do_sys_fsetxattr fs/xattr.c:632 [inline] __se_sys_fsetxattr fs/xattr.c:621 [inline] __x64_sys_fsetxattr+0x243/0x2fe fs/xattr.c:621 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3a/0x7e arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0x0 Freed by task 2949: kasan_save_stack+0x19/0x38 mm/kasan/common.c:38 kasan_set_track+0x1c/0x21 mm/kasan/common.c:46 kasan_set_free_info+0x20/0x30 mm/kasan/generic.c:360 ____kasan_slab_free mm/kasan/common.c:366 [inline] ____kasan_slab_free mm/kasan/common.c:328 [inline] __kasan_slab_free+0xe2/0x10e mm/kasan/common.c:374 kasan_slab_free include/linux/kasan.h:230 [inline] slab_free_hook mm/slub.c:1700 [inline] slab_free_freelist_hook mm/slub.c:1726 [inline] slab_free mm/slub.c:3492 [inline] kmem_cache_free+0xdc/0x3ce mm/slub.c:3508 xfs_attr_fork_remove+0x8d/0x132 fs/xfs/libxfs/xfs_attr_leaf.c:773 xfs_attr_sf_removename+0x5dd/0x6cb fs/xfs/libxfs/xfs_attr_leaf.c:822 xfs_attr_remove_iter+0x68c/0x805 fs/xfs/libxfs/xfs_attr.c:1413 xfs_attr_remove_args+0xb1/0x10d fs/xfs/libxfs/xfs_attr.c:684 xfs_attr_set+0xf1e/0x12a7 fs/xfs/libxfs/xfs_attr.c:802 xfs_xattr_set+0xeb/0x1a9 fs/xfs/xfs_xattr.c:59 __vfs_removexattr+0x106/0x16a fs/xattr.c:468 cap_inode_killpriv+0x24/0x47 security/commoncap.c:324 security_inode_killpriv+0x54/0xa1 security/security.c:1414 setattr_prepare+0x1a6/0x897 fs/attr.c:146 xfs_vn_change_ok+0x111/0x15e fs/xfs/xfs_iops.c:682 xfs_vn_setattr_size+0x5f/0x15a fs/xfs/xfs_iops.c:1065 xfs_vn_setattr+0x125/0x2ad fs/xfs/xfs_iops.c:1093 notify_change+0xae5/0x10a1 fs/attr.c:410 do_truncate+0x134/0x1e0 fs/open.c:64 handle_truncate fs/namei.c:3084 [inline] do_open fs/namei.c:3432 [inline] path_openat+0x30ab/0x396d fs/namei.c:3561 do_filp_open+0x1c4/0x290 fs/namei.c:3588 do_sys_openat2+0x60d/0x98c fs/open.c:1212 do_sys_open+0xcf/0x13c fs/open.c:1228 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3a/0x7e arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0x0 The buggy address belongs to the object at ffff88802cec9188 which belongs to the cache xfs_ifork of size 40 The buggy address is located 20 bytes inside of 40-byte region [ffff88802cec9188, ffff88802cec91b0) The buggy address belongs to the page: page:00000000c3af36a1 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x2cec9 flags: 0xfffffc0000200(slab|node=0|zone=1|lastcpupid=0x1fffff) raw: 000fffffc0000200 ffffea00009d2580 0000000600000006 ffff88801a9ffc80 raw: 0000000000000000 0000000080490049 00000001ffffffff 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: ffff88802cec9080: fb fb fb fc fc fa fb fb fb fb fc fc fb fb fb fb ffff88802cec9100: fb fc fc fb fb fb fb fb fc fc fb fb fb fb fb fc >ffff88802cec9180: fc fa fb fb fb fb fc fc fa fb fb fb fb fc fc fb ^ ffff88802cec9200: fb fb fb fb fc fc fb fb fb fb fb fc fc fb fb fb ffff88802cec9280: fb fb fc fc fa fb fb fb fb fc fc fa fb fb fb fb ================================================================== The root cause of this bug is the unlocked access to xfs_inode.i_afp from the getxattr code paths while trying to determine which ILOCK mode to use to stabilize the xattr data. Unfortunately, the VFS does not acquire i_rwsem when vfs_getxattr (or listxattr) call into the filesystem, which means that getxattr can race with a removexattr that's tearing down the attr fork and crash: xfs_attr_set: xfs_attr_get: xfs_attr_fork_remove: xfs_ilock_attr_map_shared: xfs_idestroy_fork(ip->i_afp); kmem_cache_free(xfs_ifork_cache, ip->i_afp); if (ip->i_afp && ip->i_afp = NULL; xfs_need_iread_extents(ip->i_afp)) <KABOOM> ip->i_forkoff = 0; Regrettably, the VFS is much more lax about i_rwsem and getxattr than is immediately obvious -- not only does it not guarantee that we hold i_rwsem, it actually doesn't guarantee that we *don't* hold it either. The getxattr system call won't acquire the lock before calling XFS, but the file capabilities code calls getxattr with and without i_rwsem held to determine if the "security.capabilities" xattr is set on the file. Fixing the VFS locking requires a treewide investigation into every code path that could touch an xattr and what i_rwsem state it expects or sets up. That could take years or even prove impossible; fortunately, we can fix this UAF problem inside XFS. An earlier version of this patch used smp_wmb in xfs_attr_fork_remove to ensure that i_forkoff is always zeroed before i_afp is set to null and changed the read paths to use smp_rmb before accessing i_forkoff and i_afp, which avoided these UAF problems. However, the patch author was too busy dealing with other problems in the meantime, and by the time he came back to this issue, the situation had changed a bit. On a modern system with selinux, each inode will always have at least one xattr for the selinux label, so it doesn't make much sense to keep incurring the extra pointer dereference. Furthermore, Allison's upcoming parent pointer patchset will also cause nearly every inode in the filesystem to have extended attributes. Therefore, make the inode attribute fork structure part of struct xfs_inode, at a cost of 40 more bytes. This patch adds a clunky if_present field where necessary to maintain the existing logic of xattr fork null pointer testing in the existing codebase. The next patch switches the logic over to XFS_IFORK_Q and it all goes away. Signed-off-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Dave Chinner <dchinner@redhat.com>
2022-07-09 20:56:06 +03:00
sf = (struct xfs_attr_shortform *)dp->i_af.if_u1.if_data;
return be16_to_cpu(sf->hdr.totsize);
}
/*
* Add a name to the shortform attribute list structure
* This is the external routine.
*/
xfs: use XFS_DA_OP flags in deferred attr ops We currently store the high level attr operation in args->attr_flags. This field contains what the VFS is telling us to do, but don't necessarily match what we are doing in the low level modification state machine. e.g. XATTR_REPLACE implies both XFS_DA_OP_ADDNAME and XFS_DA_OP_RENAME because it is doing both a remove and adding a new attr. However, deep in the individual state machine operations, we check errors against this high level VFS op flags, not the low level XFS_DA_OP flags. Indeed, we don't even have a low level flag for a REMOVE operation, so the only way we know we are doing a remove is the complete absence of XATTR_REPLACE, XATTR_CREATE, XFS_DA_OP_ADDNAME and XFS_DA_OP_RENAME. And because there are other flags in these fields, this is a pain to check if we need to. As the XFS_DA_OP flags are only needed once the deferred operations are set up, set these flags appropriately when we set the initial operation state. We also introduce a XFS_DA_OP_REMOVE flag to make it easy to know that we are doing a remove operation. With these, we can remove the use of XATTR_REPLACE and XATTR_CREATE in low level lookup operations, and manipulate the low level flags according to the low level context that is operating. e.g. log recovery does not have a VFS xattr operation state to copy into args->attr_flags, and the low level state machine ops we do for recovery do not match the high level VFS operations that were in progress when the system failed... Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-12 08:12:56 +03:00
static int
xfs_attr_shortform_addname(
struct xfs_da_args *args)
{
xfs: use XFS_DA_OP flags in deferred attr ops We currently store the high level attr operation in args->attr_flags. This field contains what the VFS is telling us to do, but don't necessarily match what we are doing in the low level modification state machine. e.g. XATTR_REPLACE implies both XFS_DA_OP_ADDNAME and XFS_DA_OP_RENAME because it is doing both a remove and adding a new attr. However, deep in the individual state machine operations, we check errors against this high level VFS op flags, not the low level XFS_DA_OP flags. Indeed, we don't even have a low level flag for a REMOVE operation, so the only way we know we are doing a remove is the complete absence of XATTR_REPLACE, XATTR_CREATE, XFS_DA_OP_ADDNAME and XFS_DA_OP_RENAME. And because there are other flags in these fields, this is a pain to check if we need to. As the XFS_DA_OP flags are only needed once the deferred operations are set up, set these flags appropriately when we set the initial operation state. We also introduce a XFS_DA_OP_REMOVE flag to make it easy to know that we are doing a remove operation. With these, we can remove the use of XATTR_REPLACE and XATTR_CREATE in low level lookup operations, and manipulate the low level flags according to the low level context that is operating. e.g. log recovery does not have a VFS xattr operation state to copy into args->attr_flags, and the low level state machine ops we do for recovery do not match the high level VFS operations that were in progress when the system failed... Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-12 08:12:56 +03:00
int newsize, forkoff;
int error;
trace_xfs_attr_sf_addname(args);
xfs: use XFS_DA_OP flags in deferred attr ops We currently store the high level attr operation in args->attr_flags. This field contains what the VFS is telling us to do, but don't necessarily match what we are doing in the low level modification state machine. e.g. XATTR_REPLACE implies both XFS_DA_OP_ADDNAME and XFS_DA_OP_RENAME because it is doing both a remove and adding a new attr. However, deep in the individual state machine operations, we check errors against this high level VFS op flags, not the low level XFS_DA_OP flags. Indeed, we don't even have a low level flag for a REMOVE operation, so the only way we know we are doing a remove is the complete absence of XATTR_REPLACE, XATTR_CREATE, XFS_DA_OP_ADDNAME and XFS_DA_OP_RENAME. And because there are other flags in these fields, this is a pain to check if we need to. As the XFS_DA_OP flags are only needed once the deferred operations are set up, set these flags appropriately when we set the initial operation state. We also introduce a XFS_DA_OP_REMOVE flag to make it easy to know that we are doing a remove operation. With these, we can remove the use of XATTR_REPLACE and XATTR_CREATE in low level lookup operations, and manipulate the low level flags according to the low level context that is operating. e.g. log recovery does not have a VFS xattr operation state to copy into args->attr_flags, and the low level state machine ops we do for recovery do not match the high level VFS operations that were in progress when the system failed... Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-12 08:12:56 +03:00
error = xfs_attr_shortform_lookup(args);
switch (error) {
case -ENOATTR:
if (args->op_flags & XFS_DA_OP_REPLACE)
return error;
break;
case -EEXIST:
if (!(args->op_flags & XFS_DA_OP_REPLACE))
return error;
error = xfs_attr_sf_removename(args);
if (error)
return error;
/*
xfs: use XFS_DA_OP flags in deferred attr ops We currently store the high level attr operation in args->attr_flags. This field contains what the VFS is telling us to do, but don't necessarily match what we are doing in the low level modification state machine. e.g. XATTR_REPLACE implies both XFS_DA_OP_ADDNAME and XFS_DA_OP_RENAME because it is doing both a remove and adding a new attr. However, deep in the individual state machine operations, we check errors against this high level VFS op flags, not the low level XFS_DA_OP flags. Indeed, we don't even have a low level flag for a REMOVE operation, so the only way we know we are doing a remove is the complete absence of XATTR_REPLACE, XATTR_CREATE, XFS_DA_OP_ADDNAME and XFS_DA_OP_RENAME. And because there are other flags in these fields, this is a pain to check if we need to. As the XFS_DA_OP flags are only needed once the deferred operations are set up, set these flags appropriately when we set the initial operation state. We also introduce a XFS_DA_OP_REMOVE flag to make it easy to know that we are doing a remove operation. With these, we can remove the use of XATTR_REPLACE and XATTR_CREATE in low level lookup operations, and manipulate the low level flags according to the low level context that is operating. e.g. log recovery does not have a VFS xattr operation state to copy into args->attr_flags, and the low level state machine ops we do for recovery do not match the high level VFS operations that were in progress when the system failed... Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-12 08:12:56 +03:00
* Since we have removed the old attr, clear XFS_DA_OP_REPLACE
* so that the new attr doesn't fit in shortform format, the
* leaf format add routine won't trip over the attr not being
* around.
*/
xfs: use XFS_DA_OP flags in deferred attr ops We currently store the high level attr operation in args->attr_flags. This field contains what the VFS is telling us to do, but don't necessarily match what we are doing in the low level modification state machine. e.g. XATTR_REPLACE implies both XFS_DA_OP_ADDNAME and XFS_DA_OP_RENAME because it is doing both a remove and adding a new attr. However, deep in the individual state machine operations, we check errors against this high level VFS op flags, not the low level XFS_DA_OP flags. Indeed, we don't even have a low level flag for a REMOVE operation, so the only way we know we are doing a remove is the complete absence of XATTR_REPLACE, XATTR_CREATE, XFS_DA_OP_ADDNAME and XFS_DA_OP_RENAME. And because there are other flags in these fields, this is a pain to check if we need to. As the XFS_DA_OP flags are only needed once the deferred operations are set up, set these flags appropriately when we set the initial operation state. We also introduce a XFS_DA_OP_REMOVE flag to make it easy to know that we are doing a remove operation. With these, we can remove the use of XATTR_REPLACE and XATTR_CREATE in low level lookup operations, and manipulate the low level flags according to the low level context that is operating. e.g. log recovery does not have a VFS xattr operation state to copy into args->attr_flags, and the low level state machine ops we do for recovery do not match the high level VFS operations that were in progress when the system failed... Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-12 08:12:56 +03:00
args->op_flags &= ~XFS_DA_OP_REPLACE;
break;
case 0:
break;
default:
return error;
}
if (args->namelen >= XFS_ATTR_SF_ENTSIZE_MAX ||
args->valuelen >= XFS_ATTR_SF_ENTSIZE_MAX)
return -ENOSPC;
newsize = xfs_attr_sf_totsize(args->dp);
newsize += xfs_attr_sf_entsize_byname(args->namelen, args->valuelen);
forkoff = xfs_attr_shortform_bytesfit(args->dp, newsize);
if (!forkoff)
return -ENOSPC;
xfs_attr_shortform_add(args, forkoff);
return 0;
}
/*========================================================================
* External routines when attribute list is one block
*========================================================================*/
xfs: use XFS_DA_OP flags in deferred attr ops We currently store the high level attr operation in args->attr_flags. This field contains what the VFS is telling us to do, but don't necessarily match what we are doing in the low level modification state machine. e.g. XATTR_REPLACE implies both XFS_DA_OP_ADDNAME and XFS_DA_OP_RENAME because it is doing both a remove and adding a new attr. However, deep in the individual state machine operations, we check errors against this high level VFS op flags, not the low level XFS_DA_OP flags. Indeed, we don't even have a low level flag for a REMOVE operation, so the only way we know we are doing a remove is the complete absence of XATTR_REPLACE, XATTR_CREATE, XFS_DA_OP_ADDNAME and XFS_DA_OP_RENAME. And because there are other flags in these fields, this is a pain to check if we need to. As the XFS_DA_OP flags are only needed once the deferred operations are set up, set these flags appropriately when we set the initial operation state. We also introduce a XFS_DA_OP_REMOVE flag to make it easy to know that we are doing a remove operation. With these, we can remove the use of XATTR_REPLACE and XATTR_CREATE in low level lookup operations, and manipulate the low level flags according to the low level context that is operating. e.g. log recovery does not have a VFS xattr operation state to copy into args->attr_flags, and the low level state machine ops we do for recovery do not match the high level VFS operations that were in progress when the system failed... Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-12 08:12:56 +03:00
/* Save the current remote block info and clear the current pointers. */
static void
xfs_attr_save_rmt_blk(
struct xfs_da_args *args)
{
args->blkno2 = args->blkno;
args->index2 = args->index;
args->rmtblkno2 = args->rmtblkno;
args->rmtblkcnt2 = args->rmtblkcnt;
args->rmtvaluelen2 = args->rmtvaluelen;
xfs: use XFS_DA_OP flags in deferred attr ops We currently store the high level attr operation in args->attr_flags. This field contains what the VFS is telling us to do, but don't necessarily match what we are doing in the low level modification state machine. e.g. XATTR_REPLACE implies both XFS_DA_OP_ADDNAME and XFS_DA_OP_RENAME because it is doing both a remove and adding a new attr. However, deep in the individual state machine operations, we check errors against this high level VFS op flags, not the low level XFS_DA_OP flags. Indeed, we don't even have a low level flag for a REMOVE operation, so the only way we know we are doing a remove is the complete absence of XATTR_REPLACE, XATTR_CREATE, XFS_DA_OP_ADDNAME and XFS_DA_OP_RENAME. And because there are other flags in these fields, this is a pain to check if we need to. As the XFS_DA_OP flags are only needed once the deferred operations are set up, set these flags appropriately when we set the initial operation state. We also introduce a XFS_DA_OP_REMOVE flag to make it easy to know that we are doing a remove operation. With these, we can remove the use of XATTR_REPLACE and XATTR_CREATE in low level lookup operations, and manipulate the low level flags according to the low level context that is operating. e.g. log recovery does not have a VFS xattr operation state to copy into args->attr_flags, and the low level state machine ops we do for recovery do not match the high level VFS operations that were in progress when the system failed... Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-12 08:12:56 +03:00
args->rmtblkno = 0;
args->rmtblkcnt = 0;
args->rmtvaluelen = 0;
}
/* Set stored info about a remote block */
xfs: use XFS_DA_OP flags in deferred attr ops We currently store the high level attr operation in args->attr_flags. This field contains what the VFS is telling us to do, but don't necessarily match what we are doing in the low level modification state machine. e.g. XATTR_REPLACE implies both XFS_DA_OP_ADDNAME and XFS_DA_OP_RENAME because it is doing both a remove and adding a new attr. However, deep in the individual state machine operations, we check errors against this high level VFS op flags, not the low level XFS_DA_OP flags. Indeed, we don't even have a low level flag for a REMOVE operation, so the only way we know we are doing a remove is the complete absence of XATTR_REPLACE, XATTR_CREATE, XFS_DA_OP_ADDNAME and XFS_DA_OP_RENAME. And because there are other flags in these fields, this is a pain to check if we need to. As the XFS_DA_OP flags are only needed once the deferred operations are set up, set these flags appropriately when we set the initial operation state. We also introduce a XFS_DA_OP_REMOVE flag to make it easy to know that we are doing a remove operation. With these, we can remove the use of XATTR_REPLACE and XATTR_CREATE in low level lookup operations, and manipulate the low level flags according to the low level context that is operating. e.g. log recovery does not have a VFS xattr operation state to copy into args->attr_flags, and the low level state machine ops we do for recovery do not match the high level VFS operations that were in progress when the system failed... Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-12 08:12:56 +03:00
static void
xfs_attr_restore_rmt_blk(
struct xfs_da_args *args)
{
args->blkno = args->blkno2;
args->index = args->index2;
args->rmtblkno = args->rmtblkno2;
args->rmtblkcnt = args->rmtblkcnt2;
args->rmtvaluelen = args->rmtvaluelen2;
}
/*
* Tries to add an attribute to an inode in leaf form
*
* This function is meant to execute as part of a delayed operation and leaves
* the transaction handling to the caller. On success the attribute is added
* and the inode and transaction are left dirty. If there is not enough space,
* the attr data is converted to node format and -ENOSPC is returned. Caller is
* responsible for handling the dirty inode and transaction or adding the attr
* in node format.
*/
STATIC int
xfs_attr_leaf_try_add(
struct xfs_da_args *args)
{
struct xfs_buf *bp;
int error;
error = xfs_attr3_leaf_read(args->trans, args->dp, 0, &bp);
if (error)
return error;
/*
* Look up the xattr name to set the insertion point for the new xattr.
*/
error = xfs_attr3_leaf_lookup_int(bp, args);
xfs: use XFS_DA_OP flags in deferred attr ops We currently store the high level attr operation in args->attr_flags. This field contains what the VFS is telling us to do, but don't necessarily match what we are doing in the low level modification state machine. e.g. XATTR_REPLACE implies both XFS_DA_OP_ADDNAME and XFS_DA_OP_RENAME because it is doing both a remove and adding a new attr. However, deep in the individual state machine operations, we check errors against this high level VFS op flags, not the low level XFS_DA_OP flags. Indeed, we don't even have a low level flag for a REMOVE operation, so the only way we know we are doing a remove is the complete absence of XATTR_REPLACE, XATTR_CREATE, XFS_DA_OP_ADDNAME and XFS_DA_OP_RENAME. And because there are other flags in these fields, this is a pain to check if we need to. As the XFS_DA_OP flags are only needed once the deferred operations are set up, set these flags appropriately when we set the initial operation state. We also introduce a XFS_DA_OP_REMOVE flag to make it easy to know that we are doing a remove operation. With these, we can remove the use of XATTR_REPLACE and XATTR_CREATE in low level lookup operations, and manipulate the low level flags according to the low level context that is operating. e.g. log recovery does not have a VFS xattr operation state to copy into args->attr_flags, and the low level state machine ops we do for recovery do not match the high level VFS operations that were in progress when the system failed... Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-12 08:12:56 +03:00
switch (error) {
case -ENOATTR:
if (args->op_flags & XFS_DA_OP_REPLACE)
goto out_brelse;
break;
case -EEXIST:
if (!(args->op_flags & XFS_DA_OP_REPLACE))
goto out_brelse;
trace_xfs_attr_leaf_replace(args);
xfs: remote attribute overwrite causes transaction overrun Commit e461fcb ("xfs: remote attribute lookups require the value length") passes the remote attribute length in the xfs_da_args structure on lookup so that CRC calculations and validity checking can be performed correctly by related code. This, unfortunately has the side effect of changing the args->valuelen parameter in cases where it shouldn't. That is, when we replace a remote attribute, the incoming replacement stores the value and length in args->value and args->valuelen, but then the lookup which finds the existing remote attribute overwrites args->valuelen with the length of the remote attribute being replaced. Hence when we go to create the new attribute, we create it of the size of the existing remote attribute, not the size it is supposed to be. When the new attribute is much smaller than the old attribute, this results in a transaction overrun and an ASSERT() failure on a debug kernel: XFS: Assertion failed: tp->t_blk_res_used <= tp->t_blk_res, file: fs/xfs/xfs_trans.c, line: 331 Fix this by keeping the remote attribute value length separate to the attribute value length in the xfs_da_args structure. The enables us to pass the length of the remote attribute to be removed without overwriting the new attribute's length. Also, ensure that when we save remote block contexts for a later rename we zero the original state variables so that we don't confuse the state of the attribute to be removes with the state of the new attribute that we just added. [Spotted by Brain Foster.] Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Brian Foster <bfoster@redhat.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2014-05-06 01:37:31 +04:00
/*
xfs: use XFS_DA_OP flags in deferred attr ops We currently store the high level attr operation in args->attr_flags. This field contains what the VFS is telling us to do, but don't necessarily match what we are doing in the low level modification state machine. e.g. XATTR_REPLACE implies both XFS_DA_OP_ADDNAME and XFS_DA_OP_RENAME because it is doing both a remove and adding a new attr. However, deep in the individual state machine operations, we check errors against this high level VFS op flags, not the low level XFS_DA_OP flags. Indeed, we don't even have a low level flag for a REMOVE operation, so the only way we know we are doing a remove is the complete absence of XATTR_REPLACE, XATTR_CREATE, XFS_DA_OP_ADDNAME and XFS_DA_OP_RENAME. And because there are other flags in these fields, this is a pain to check if we need to. As the XFS_DA_OP flags are only needed once the deferred operations are set up, set these flags appropriately when we set the initial operation state. We also introduce a XFS_DA_OP_REMOVE flag to make it easy to know that we are doing a remove operation. With these, we can remove the use of XATTR_REPLACE and XATTR_CREATE in low level lookup operations, and manipulate the low level flags according to the low level context that is operating. e.g. log recovery does not have a VFS xattr operation state to copy into args->attr_flags, and the low level state machine ops we do for recovery do not match the high level VFS operations that were in progress when the system failed... Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-12 08:12:56 +03:00
* Save the existing remote attr state so that the current
* values reflect the state of the new attribute we are about to
xfs: remote attribute overwrite causes transaction overrun Commit e461fcb ("xfs: remote attribute lookups require the value length") passes the remote attribute length in the xfs_da_args structure on lookup so that CRC calculations and validity checking can be performed correctly by related code. This, unfortunately has the side effect of changing the args->valuelen parameter in cases where it shouldn't. That is, when we replace a remote attribute, the incoming replacement stores the value and length in args->value and args->valuelen, but then the lookup which finds the existing remote attribute overwrites args->valuelen with the length of the remote attribute being replaced. Hence when we go to create the new attribute, we create it of the size of the existing remote attribute, not the size it is supposed to be. When the new attribute is much smaller than the old attribute, this results in a transaction overrun and an ASSERT() failure on a debug kernel: XFS: Assertion failed: tp->t_blk_res_used <= tp->t_blk_res, file: fs/xfs/xfs_trans.c, line: 331 Fix this by keeping the remote attribute value length separate to the attribute value length in the xfs_da_args structure. The enables us to pass the length of the remote attribute to be removed without overwriting the new attribute's length. Also, ensure that when we save remote block contexts for a later rename we zero the original state variables so that we don't confuse the state of the attribute to be removes with the state of the new attribute that we just added. [Spotted by Brain Foster.] Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Brian Foster <bfoster@redhat.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2014-05-06 01:37:31 +04:00
* add, not the attribute we just found and will remove later.
*/
xfs: use XFS_DA_OP flags in deferred attr ops We currently store the high level attr operation in args->attr_flags. This field contains what the VFS is telling us to do, but don't necessarily match what we are doing in the low level modification state machine. e.g. XATTR_REPLACE implies both XFS_DA_OP_ADDNAME and XFS_DA_OP_RENAME because it is doing both a remove and adding a new attr. However, deep in the individual state machine operations, we check errors against this high level VFS op flags, not the low level XFS_DA_OP flags. Indeed, we don't even have a low level flag for a REMOVE operation, so the only way we know we are doing a remove is the complete absence of XATTR_REPLACE, XATTR_CREATE, XFS_DA_OP_ADDNAME and XFS_DA_OP_RENAME. And because there are other flags in these fields, this is a pain to check if we need to. As the XFS_DA_OP flags are only needed once the deferred operations are set up, set these flags appropriately when we set the initial operation state. We also introduce a XFS_DA_OP_REMOVE flag to make it easy to know that we are doing a remove operation. With these, we can remove the use of XATTR_REPLACE and XATTR_CREATE in low level lookup operations, and manipulate the low level flags according to the low level context that is operating. e.g. log recovery does not have a VFS xattr operation state to copy into args->attr_flags, and the low level state machine ops we do for recovery do not match the high level VFS operations that were in progress when the system failed... Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-12 08:12:56 +03:00
xfs_attr_save_rmt_blk(args);
break;
case 0:
break;
default:
goto out_brelse;
}
return xfs_attr3_leaf_add(bp, args);
out_brelse:
xfs_trans_brelse(args->trans, bp);
return error;
}
/*
* Return EEXIST if attr is found, or ENOATTR if not
*/
STATIC int
xfs_attr_leaf_hasname(
struct xfs_da_args *args,
struct xfs_buf **bp)
{
int error = 0;
error = xfs_attr3_leaf_read(args->trans, args->dp, 0, bp);
if (error)
return error;
error = xfs_attr3_leaf_lookup_int(*bp, args);
if (error != -ENOATTR && error != -EEXIST)
xfs_trans_brelse(args->trans, *bp);
return error;
}
/*
* Remove a name from the leaf attribute list structure
*
* This leaf block cannot have a "remote" value, we only call this routine
* if bmap_one_block() says there is only one block (ie: no remote blks).
*/
STATIC int
xfs_attr_leaf_removename(
struct xfs_da_args *args)
{
struct xfs_inode *dp;
struct xfs_buf *bp;
int error, forkoff;
trace_xfs_attr_leaf_removename(args);
/*
* Remove the attribute.
*/
dp = args->dp;
error = xfs_attr_leaf_hasname(args, &bp);
if (error == -ENOATTR) {
xfs_trans_brelse(args->trans, bp);
xfs: ATTR_REPLACE algorithm with LARP enabled needs rework We can't use the same algorithm for replacing an existing attribute when logging attributes. The existing algorithm is essentially: 1. create new attr w/ INCOMPLETE 2. atomically flip INCOMPLETE flags between old + new attribute 3. remove old attr which is marked w/ INCOMPLETE This algorithm guarantees that we see either the old or new attribute, and if we fail after the atomic flag flip, we don't have to recover the removal of the old attr because we never see INCOMPLETE attributes in lookups. For logged attributes, however, this does not work. The logged attribute intents do not track the work that has been done as the transaction rolls, and hence the only recovery mechanism we have is "run the replace operation from scratch". This is further exacerbated by the attempt to avoid needing the INCOMPLETE flag to create an atomic swap. This means we can create a second active attribute of the same name before we remove the original. If we fail at any point after the create but before the removal has completed, we end up with duplicate attributes in the attr btree and recovery only tries to replace one of them. There are several other failure modes where we can leave partially allocated remote attributes that expose stale data, partially free remote attributes that enable UAF based stale data exposure, etc. TO fix this, we need a different algorithm for replace operations when LARP is enabled. Luckily, it's not that complex if we take the right first step. That is, the first thing we log is the attri intent with the new name/value pair and mark the old attr as INCOMPLETE in the same transaction. From there, we then remove the old attr and keep relogging the new name/value in the intent, such that we always know that we have to create the new attr in recovery. Once the old attr is removed, we then run a normal ATTR_CREATE operation relogging the intent as we go. If the new attr is local, then it gets created in a single atomic transaction that also logs the final intent done. If the new attr is remote, the we set INCOMPLETE on the new attr while we allocate and set the remote value, and then we clear the INCOMPLETE flag at in the last transaction taht logs the final intent done. If we fail at any point in this algorithm, log recovery will always see the same state on disk: the new name/value in the intent, and either an INCOMPLETE attr or no attr in the attr btree. If we find an INCOMPLETE attr, we run the full replace starting with removing the INCOMPLETE attr. If we don't find it, then we simply create the new attr. Notably, recovery of a failed create that has an INCOMPLETE flag set is now the same - we start with the lookup of the INCOMPLETE attr, and if that exists then we do the full replace recovery process, otherwise we just create the new attr. Hence changing the way we do the replace operation when LARP is enabled allows us to use the same log recovery algorithm for both the ATTR_CREATE and ATTR_REPLACE operations. This is also the same algorithm we use for runtime ATTR_REPLACE operations (except for the step setting up the initial conditions). The result is that: - ATTR_CREATE uses the same algorithm regardless of whether LARP is enabled or not - ATTR_REPLACE with larp=0 is identical to the old algorithm - ATTR_REPLACE with larp=1 runs an unmodified attr removal algorithm from the larp=0 code and then runs the unmodified ATTR_CREATE code. - log recovery when larp=1 runs the same ATTR_REPLACE algorithm as it uses at runtime. Because the state machine is now quite clean, changing the algorithm is really just a case of changing the initial state and how the states link together for the ATTR_REPLACE case. Hence it's not a huge amount of code for what is a fairly substantial rework of the attr logging and recovery algorithm.... Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-12 08:12:56 +03:00
if (args->op_flags & XFS_DA_OP_RECOVERY)
return 0;
return error;
} else if (error != -EEXIST)
return error;
xfs_attr3_leaf_remove(bp, args);
/*
* If the result is small enough, shrink it all into the inode.
*/
forkoff = xfs_attr_shortform_allfit(bp, dp);
if (forkoff)
return xfs_attr3_leaf_to_shortform(bp, args, forkoff);
/* bp is gone due to xfs_da_shrink_inode */
return 0;
}
/*
* Look up a name in a leaf attribute list structure.
*
* This leaf block cannot have a "remote" value, we only call this routine
* if bmap_one_block() says there is only one block (ie: no remote blks).
*
* Returns 0 on successful retrieval, otherwise an error.
*/
STATIC int
xfs_attr_leaf_get(xfs_da_args_t *args)
{
struct xfs_buf *bp;
int error;
trace_xfs_attr_leaf_get(args);
error = xfs_attr_leaf_hasname(args, &bp);
if (error == -ENOATTR) {
xfs_trans_brelse(args->trans, bp);
return error;
} else if (error != -EEXIST)
return error;
error = xfs_attr3_leaf_getvalue(bp, args);
xfs_trans_brelse(args->trans, bp);
return error;
}
/* Return EEXIST if attr is found, or ENOATTR if not. */
STATIC int
xfs_attr_node_lookup(
struct xfs_da_args *args,
struct xfs_da_state *state)
{
int retval, error;
/*
* Search to see if name exists, and get back a pointer to it.
*/
error = xfs_da3_node_lookup_int(state, &retval);
xfs: Fix the free logic of state in xfs_attr_node_hasname When testing xfstests xfs/126 on lastest upstream kernel, it will hang on some machine. Adding a getxattr operation after xattr corrupted, I can reproduce it 100%. The deadlock as below: [983.923403] task:setfattr state:D stack: 0 pid:17639 ppid: 14687 flags:0x00000080 [ 983.923405] Call Trace: [ 983.923410] __schedule+0x2c4/0x700 [ 983.923412] schedule+0x37/0xa0 [ 983.923414] schedule_timeout+0x274/0x300 [ 983.923416] __down+0x9b/0xf0 [ 983.923451] ? xfs_buf_find.isra.29+0x3c8/0x5f0 [xfs] [ 983.923453] down+0x3b/0x50 [ 983.923471] xfs_buf_lock+0x33/0xf0 [xfs] [ 983.923490] xfs_buf_find.isra.29+0x3c8/0x5f0 [xfs] [ 983.923508] xfs_buf_get_map+0x4c/0x320 [xfs] [ 983.923525] xfs_buf_read_map+0x53/0x310 [xfs] [ 983.923541] ? xfs_da_read_buf+0xcf/0x120 [xfs] [ 983.923560] xfs_trans_read_buf_map+0x1cf/0x360 [xfs] [ 983.923575] ? xfs_da_read_buf+0xcf/0x120 [xfs] [ 983.923590] xfs_da_read_buf+0xcf/0x120 [xfs] [ 983.923606] xfs_da3_node_read+0x1f/0x40 [xfs] [ 983.923621] xfs_da3_node_lookup_int+0x69/0x4a0 [xfs] [ 983.923624] ? kmem_cache_alloc+0x12e/0x270 [ 983.923637] xfs_attr_node_hasname+0x6e/0xa0 [xfs] [ 983.923651] xfs_has_attr+0x6e/0xd0 [xfs] [ 983.923664] xfs_attr_set+0x273/0x320 [xfs] [ 983.923683] xfs_xattr_set+0x87/0xd0 [xfs] [ 983.923686] __vfs_removexattr+0x4d/0x60 [ 983.923688] __vfs_removexattr_locked+0xac/0x130 [ 983.923689] vfs_removexattr+0x4e/0xf0 [ 983.923690] removexattr+0x4d/0x80 [ 983.923693] ? __check_object_size+0xa8/0x16b [ 983.923695] ? strncpy_from_user+0x47/0x1a0 [ 983.923696] ? getname_flags+0x6a/0x1e0 [ 983.923697] ? _cond_resched+0x15/0x30 [ 983.923699] ? __sb_start_write+0x1e/0x70 [ 983.923700] ? mnt_want_write+0x28/0x50 [ 983.923701] path_removexattr+0x9b/0xb0 [ 983.923702] __x64_sys_removexattr+0x17/0x20 [ 983.923704] do_syscall_64+0x5b/0x1a0 [ 983.923705] entry_SYSCALL_64_after_hwframe+0x65/0xca [ 983.923707] RIP: 0033:0x7f080f10ee1b When getxattr calls xfs_attr_node_get function, xfs_da3_node_lookup_int fails with EFSCORRUPTED in xfs_attr_node_hasname because we have use blocktrash to random it in xfs/126. So it free state in internal and xfs_attr_node_get doesn't do xfs_buf_trans release job. Then subsequent removexattr will hang because of it. This bug was introduced by kernel commit 07120f1abdff ("xfs: Add xfs_has_attr and subroutines"). It adds xfs_attr_node_hasname helper and said caller will be responsible for freeing the state in this case. But xfs_attr_node_hasname will free state itself instead of caller if xfs_da3_node_lookup_int fails. Fix this bug by moving the step of free state into caller. Also, use "goto error/out" instead of returning error directly in xfs_attr_node_addname_find_attr and xfs_attr_node_removename_setup function because we should free state ourselves. Fixes: 07120f1abdff ("xfs: Add xfs_has_attr and subroutines") Signed-off-by: Yang Xu <xuyang2018.jy@fujitsu.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Darrick J. Wong <djwong@kernel.org>
2021-11-24 21:06:02 +03:00
if (error)
return error;
xfs: Fix the free logic of state in xfs_attr_node_hasname When testing xfstests xfs/126 on lastest upstream kernel, it will hang on some machine. Adding a getxattr operation after xattr corrupted, I can reproduce it 100%. The deadlock as below: [983.923403] task:setfattr state:D stack: 0 pid:17639 ppid: 14687 flags:0x00000080 [ 983.923405] Call Trace: [ 983.923410] __schedule+0x2c4/0x700 [ 983.923412] schedule+0x37/0xa0 [ 983.923414] schedule_timeout+0x274/0x300 [ 983.923416] __down+0x9b/0xf0 [ 983.923451] ? xfs_buf_find.isra.29+0x3c8/0x5f0 [xfs] [ 983.923453] down+0x3b/0x50 [ 983.923471] xfs_buf_lock+0x33/0xf0 [xfs] [ 983.923490] xfs_buf_find.isra.29+0x3c8/0x5f0 [xfs] [ 983.923508] xfs_buf_get_map+0x4c/0x320 [xfs] [ 983.923525] xfs_buf_read_map+0x53/0x310 [xfs] [ 983.923541] ? xfs_da_read_buf+0xcf/0x120 [xfs] [ 983.923560] xfs_trans_read_buf_map+0x1cf/0x360 [xfs] [ 983.923575] ? xfs_da_read_buf+0xcf/0x120 [xfs] [ 983.923590] xfs_da_read_buf+0xcf/0x120 [xfs] [ 983.923606] xfs_da3_node_read+0x1f/0x40 [xfs] [ 983.923621] xfs_da3_node_lookup_int+0x69/0x4a0 [xfs] [ 983.923624] ? kmem_cache_alloc+0x12e/0x270 [ 983.923637] xfs_attr_node_hasname+0x6e/0xa0 [xfs] [ 983.923651] xfs_has_attr+0x6e/0xd0 [xfs] [ 983.923664] xfs_attr_set+0x273/0x320 [xfs] [ 983.923683] xfs_xattr_set+0x87/0xd0 [xfs] [ 983.923686] __vfs_removexattr+0x4d/0x60 [ 983.923688] __vfs_removexattr_locked+0xac/0x130 [ 983.923689] vfs_removexattr+0x4e/0xf0 [ 983.923690] removexattr+0x4d/0x80 [ 983.923693] ? __check_object_size+0xa8/0x16b [ 983.923695] ? strncpy_from_user+0x47/0x1a0 [ 983.923696] ? getname_flags+0x6a/0x1e0 [ 983.923697] ? _cond_resched+0x15/0x30 [ 983.923699] ? __sb_start_write+0x1e/0x70 [ 983.923700] ? mnt_want_write+0x28/0x50 [ 983.923701] path_removexattr+0x9b/0xb0 [ 983.923702] __x64_sys_removexattr+0x17/0x20 [ 983.923704] do_syscall_64+0x5b/0x1a0 [ 983.923705] entry_SYSCALL_64_after_hwframe+0x65/0xca [ 983.923707] RIP: 0033:0x7f080f10ee1b When getxattr calls xfs_attr_node_get function, xfs_da3_node_lookup_int fails with EFSCORRUPTED in xfs_attr_node_hasname because we have use blocktrash to random it in xfs/126. So it free state in internal and xfs_attr_node_get doesn't do xfs_buf_trans release job. Then subsequent removexattr will hang because of it. This bug was introduced by kernel commit 07120f1abdff ("xfs: Add xfs_has_attr and subroutines"). It adds xfs_attr_node_hasname helper and said caller will be responsible for freeing the state in this case. But xfs_attr_node_hasname will free state itself instead of caller if xfs_da3_node_lookup_int fails. Fix this bug by moving the step of free state into caller. Also, use "goto error/out" instead of returning error directly in xfs_attr_node_addname_find_attr and xfs_attr_node_removename_setup function because we should free state ourselves. Fixes: 07120f1abdff ("xfs: Add xfs_has_attr and subroutines") Signed-off-by: Yang Xu <xuyang2018.jy@fujitsu.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Darrick J. Wong <djwong@kernel.org>
2021-11-24 21:06:02 +03:00
return retval;
}
/*========================================================================
* External routines when attribute list size > geo->blksize
*========================================================================*/
STATIC int
xfs_attr_node_addname_find_attr(
struct xfs_attr_intent *attr)
{
xfs: use XFS_DA_OP flags in deferred attr ops We currently store the high level attr operation in args->attr_flags. This field contains what the VFS is telling us to do, but don't necessarily match what we are doing in the low level modification state machine. e.g. XATTR_REPLACE implies both XFS_DA_OP_ADDNAME and XFS_DA_OP_RENAME because it is doing both a remove and adding a new attr. However, deep in the individual state machine operations, we check errors against this high level VFS op flags, not the low level XFS_DA_OP flags. Indeed, we don't even have a low level flag for a REMOVE operation, so the only way we know we are doing a remove is the complete absence of XATTR_REPLACE, XATTR_CREATE, XFS_DA_OP_ADDNAME and XFS_DA_OP_RENAME. And because there are other flags in these fields, this is a pain to check if we need to. As the XFS_DA_OP flags are only needed once the deferred operations are set up, set these flags appropriately when we set the initial operation state. We also introduce a XFS_DA_OP_REMOVE flag to make it easy to know that we are doing a remove operation. With these, we can remove the use of XATTR_REPLACE and XATTR_CREATE in low level lookup operations, and manipulate the low level flags according to the low level context that is operating. e.g. log recovery does not have a VFS xattr operation state to copy into args->attr_flags, and the low level state machine ops we do for recovery do not match the high level VFS operations that were in progress when the system failed... Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-12 08:12:56 +03:00
struct xfs_da_args *args = attr->xattri_da_args;
int error;
/*
* Search to see if name already exists, and get back a pointer
* to where it should go.
*/
xfs_attr_item_init_da_state(attr);
error = xfs_attr_node_lookup(args, attr->xattri_da_state);
xfs: use XFS_DA_OP flags in deferred attr ops We currently store the high level attr operation in args->attr_flags. This field contains what the VFS is telling us to do, but don't necessarily match what we are doing in the low level modification state machine. e.g. XATTR_REPLACE implies both XFS_DA_OP_ADDNAME and XFS_DA_OP_RENAME because it is doing both a remove and adding a new attr. However, deep in the individual state machine operations, we check errors against this high level VFS op flags, not the low level XFS_DA_OP flags. Indeed, we don't even have a low level flag for a REMOVE operation, so the only way we know we are doing a remove is the complete absence of XATTR_REPLACE, XATTR_CREATE, XFS_DA_OP_ADDNAME and XFS_DA_OP_RENAME. And because there are other flags in these fields, this is a pain to check if we need to. As the XFS_DA_OP flags are only needed once the deferred operations are set up, set these flags appropriately when we set the initial operation state. We also introduce a XFS_DA_OP_REMOVE flag to make it easy to know that we are doing a remove operation. With these, we can remove the use of XATTR_REPLACE and XATTR_CREATE in low level lookup operations, and manipulate the low level flags according to the low level context that is operating. e.g. log recovery does not have a VFS xattr operation state to copy into args->attr_flags, and the low level state machine ops we do for recovery do not match the high level VFS operations that were in progress when the system failed... Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-12 08:12:56 +03:00
switch (error) {
case -ENOATTR:
if (args->op_flags & XFS_DA_OP_REPLACE)
goto error;
break;
case -EEXIST:
if (!(args->op_flags & XFS_DA_OP_REPLACE))
goto error;
xfs: remote attribute overwrite causes transaction overrun Commit e461fcb ("xfs: remote attribute lookups require the value length") passes the remote attribute length in the xfs_da_args structure on lookup so that CRC calculations and validity checking can be performed correctly by related code. This, unfortunately has the side effect of changing the args->valuelen parameter in cases where it shouldn't. That is, when we replace a remote attribute, the incoming replacement stores the value and length in args->value and args->valuelen, but then the lookup which finds the existing remote attribute overwrites args->valuelen with the length of the remote attribute being replaced. Hence when we go to create the new attribute, we create it of the size of the existing remote attribute, not the size it is supposed to be. When the new attribute is much smaller than the old attribute, this results in a transaction overrun and an ASSERT() failure on a debug kernel: XFS: Assertion failed: tp->t_blk_res_used <= tp->t_blk_res, file: fs/xfs/xfs_trans.c, line: 331 Fix this by keeping the remote attribute value length separate to the attribute value length in the xfs_da_args structure. The enables us to pass the length of the remote attribute to be removed without overwriting the new attribute's length. Also, ensure that when we save remote block contexts for a later rename we zero the original state variables so that we don't confuse the state of the attribute to be removes with the state of the new attribute that we just added. [Spotted by Brain Foster.] Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Brian Foster <bfoster@redhat.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2014-05-06 01:37:31 +04:00
xfs: use XFS_DA_OP flags in deferred attr ops We currently store the high level attr operation in args->attr_flags. This field contains what the VFS is telling us to do, but don't necessarily match what we are doing in the low level modification state machine. e.g. XATTR_REPLACE implies both XFS_DA_OP_ADDNAME and XFS_DA_OP_RENAME because it is doing both a remove and adding a new attr. However, deep in the individual state machine operations, we check errors against this high level VFS op flags, not the low level XFS_DA_OP flags. Indeed, we don't even have a low level flag for a REMOVE operation, so the only way we know we are doing a remove is the complete absence of XATTR_REPLACE, XATTR_CREATE, XFS_DA_OP_ADDNAME and XFS_DA_OP_RENAME. And because there are other flags in these fields, this is a pain to check if we need to. As the XFS_DA_OP flags are only needed once the deferred operations are set up, set these flags appropriately when we set the initial operation state. We also introduce a XFS_DA_OP_REMOVE flag to make it easy to know that we are doing a remove operation. With these, we can remove the use of XATTR_REPLACE and XATTR_CREATE in low level lookup operations, and manipulate the low level flags according to the low level context that is operating. e.g. log recovery does not have a VFS xattr operation state to copy into args->attr_flags, and the low level state machine ops we do for recovery do not match the high level VFS operations that were in progress when the system failed... Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-12 08:12:56 +03:00
trace_xfs_attr_node_replace(args);
xfs: remote attribute overwrite causes transaction overrun Commit e461fcb ("xfs: remote attribute lookups require the value length") passes the remote attribute length in the xfs_da_args structure on lookup so that CRC calculations and validity checking can be performed correctly by related code. This, unfortunately has the side effect of changing the args->valuelen parameter in cases where it shouldn't. That is, when we replace a remote attribute, the incoming replacement stores the value and length in args->value and args->valuelen, but then the lookup which finds the existing remote attribute overwrites args->valuelen with the length of the remote attribute being replaced. Hence when we go to create the new attribute, we create it of the size of the existing remote attribute, not the size it is supposed to be. When the new attribute is much smaller than the old attribute, this results in a transaction overrun and an ASSERT() failure on a debug kernel: XFS: Assertion failed: tp->t_blk_res_used <= tp->t_blk_res, file: fs/xfs/xfs_trans.c, line: 331 Fix this by keeping the remote attribute value length separate to the attribute value length in the xfs_da_args structure. The enables us to pass the length of the remote attribute to be removed without overwriting the new attribute's length. Also, ensure that when we save remote block contexts for a later rename we zero the original state variables so that we don't confuse the state of the attribute to be removes with the state of the new attribute that we just added. [Spotted by Brain Foster.] Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Brian Foster <bfoster@redhat.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2014-05-06 01:37:31 +04:00
/*
xfs: use XFS_DA_OP flags in deferred attr ops We currently store the high level attr operation in args->attr_flags. This field contains what the VFS is telling us to do, but don't necessarily match what we are doing in the low level modification state machine. e.g. XATTR_REPLACE implies both XFS_DA_OP_ADDNAME and XFS_DA_OP_RENAME because it is doing both a remove and adding a new attr. However, deep in the individual state machine operations, we check errors against this high level VFS op flags, not the low level XFS_DA_OP flags. Indeed, we don't even have a low level flag for a REMOVE operation, so the only way we know we are doing a remove is the complete absence of XATTR_REPLACE, XATTR_CREATE, XFS_DA_OP_ADDNAME and XFS_DA_OP_RENAME. And because there are other flags in these fields, this is a pain to check if we need to. As the XFS_DA_OP flags are only needed once the deferred operations are set up, set these flags appropriately when we set the initial operation state. We also introduce a XFS_DA_OP_REMOVE flag to make it easy to know that we are doing a remove operation. With these, we can remove the use of XATTR_REPLACE and XATTR_CREATE in low level lookup operations, and manipulate the low level flags according to the low level context that is operating. e.g. log recovery does not have a VFS xattr operation state to copy into args->attr_flags, and the low level state machine ops we do for recovery do not match the high level VFS operations that were in progress when the system failed... Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-12 08:12:56 +03:00
* Save the existing remote attr state so that the current
* values reflect the state of the new attribute we are about to
xfs: remote attribute overwrite causes transaction overrun Commit e461fcb ("xfs: remote attribute lookups require the value length") passes the remote attribute length in the xfs_da_args structure on lookup so that CRC calculations and validity checking can be performed correctly by related code. This, unfortunately has the side effect of changing the args->valuelen parameter in cases where it shouldn't. That is, when we replace a remote attribute, the incoming replacement stores the value and length in args->value and args->valuelen, but then the lookup which finds the existing remote attribute overwrites args->valuelen with the length of the remote attribute being replaced. Hence when we go to create the new attribute, we create it of the size of the existing remote attribute, not the size it is supposed to be. When the new attribute is much smaller than the old attribute, this results in a transaction overrun and an ASSERT() failure on a debug kernel: XFS: Assertion failed: tp->t_blk_res_used <= tp->t_blk_res, file: fs/xfs/xfs_trans.c, line: 331 Fix this by keeping the remote attribute value length separate to the attribute value length in the xfs_da_args structure. The enables us to pass the length of the remote attribute to be removed without overwriting the new attribute's length. Also, ensure that when we save remote block contexts for a later rename we zero the original state variables so that we don't confuse the state of the attribute to be removes with the state of the new attribute that we just added. [Spotted by Brain Foster.] Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Brian Foster <bfoster@redhat.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2014-05-06 01:37:31 +04:00
* add, not the attribute we just found and will remove later.
*/
xfs: use XFS_DA_OP flags in deferred attr ops We currently store the high level attr operation in args->attr_flags. This field contains what the VFS is telling us to do, but don't necessarily match what we are doing in the low level modification state machine. e.g. XATTR_REPLACE implies both XFS_DA_OP_ADDNAME and XFS_DA_OP_RENAME because it is doing both a remove and adding a new attr. However, deep in the individual state machine operations, we check errors against this high level VFS op flags, not the low level XFS_DA_OP flags. Indeed, we don't even have a low level flag for a REMOVE operation, so the only way we know we are doing a remove is the complete absence of XATTR_REPLACE, XATTR_CREATE, XFS_DA_OP_ADDNAME and XFS_DA_OP_RENAME. And because there are other flags in these fields, this is a pain to check if we need to. As the XFS_DA_OP flags are only needed once the deferred operations are set up, set these flags appropriately when we set the initial operation state. We also introduce a XFS_DA_OP_REMOVE flag to make it easy to know that we are doing a remove operation. With these, we can remove the use of XATTR_REPLACE and XATTR_CREATE in low level lookup operations, and manipulate the low level flags according to the low level context that is operating. e.g. log recovery does not have a VFS xattr operation state to copy into args->attr_flags, and the low level state machine ops we do for recovery do not match the high level VFS operations that were in progress when the system failed... Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-12 08:12:56 +03:00
xfs_attr_save_rmt_blk(args);
break;
case 0:
break;
default:
goto error;
}
return 0;
error:
xfs: don't leak da state when freeing the attr intent item kmemleak reported that we lost an xfs_da_state while removing xattrs in generic/020: unreferenced object 0xffff88801c0e4b40 (size 480): comm "attr", pid 30515, jiffies 4294931061 (age 5.960s) hex dump (first 32 bytes): 78 bc 65 07 00 c9 ff ff 00 30 60 1c 80 88 ff ff x.e......0`..... 02 00 00 00 00 00 00 00 80 18 83 4e 80 88 ff ff ...........N.... backtrace: [<ffffffffa023ef4a>] xfs_da_state_alloc+0x1a/0x30 [xfs] [<ffffffffa021b6f3>] xfs_attr_node_hasname+0x23/0x90 [xfs] [<ffffffffa021c6f1>] xfs_attr_set_iter+0x441/0xa30 [xfs] [<ffffffffa02b5104>] xfs_xattri_finish_update+0x44/0x80 [xfs] [<ffffffffa02b515e>] xfs_attr_finish_item+0x1e/0x40 [xfs] [<ffffffffa0244744>] xfs_defer_finish_noroll+0x184/0x740 [xfs] [<ffffffffa02a6473>] __xfs_trans_commit+0x153/0x3e0 [xfs] [<ffffffffa021d149>] xfs_attr_set+0x469/0x7e0 [xfs] [<ffffffffa02a78d9>] xfs_xattr_set+0x89/0xd0 [xfs] [<ffffffff812e6512>] __vfs_removexattr+0x52/0x70 [<ffffffff812e6a08>] __vfs_removexattr_locked+0xb8/0x150 [<ffffffff812e6af6>] vfs_removexattr+0x56/0x100 [<ffffffff812e6bf8>] removexattr+0x58/0x90 [<ffffffff812e6cce>] path_removexattr+0x9e/0xc0 [<ffffffff812e6d44>] __x64_sys_lremovexattr+0x14/0x20 [<ffffffff81786b35>] do_syscall_64+0x35/0x80 I think this is a consequence of xfs_attr_node_removename_setup attaching a new da(btree) state to xfs_attr_item and never freeing it. I /think/ it's the case that the remove paths could detach the da state earlier in the remove state machine since nothing else accesses the state. However, let's future-proof the new xattr code by adding a catch-all when we free the xfs_attr_item to make sure we never leak the da state. Signed-off-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Reviewed-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-20 07:41:34 +03:00
if (attr->xattri_da_state) {
xfs_da_state_free(attr->xattri_da_state);
xfs: don't leak da state when freeing the attr intent item kmemleak reported that we lost an xfs_da_state while removing xattrs in generic/020: unreferenced object 0xffff88801c0e4b40 (size 480): comm "attr", pid 30515, jiffies 4294931061 (age 5.960s) hex dump (first 32 bytes): 78 bc 65 07 00 c9 ff ff 00 30 60 1c 80 88 ff ff x.e......0`..... 02 00 00 00 00 00 00 00 80 18 83 4e 80 88 ff ff ...........N.... backtrace: [<ffffffffa023ef4a>] xfs_da_state_alloc+0x1a/0x30 [xfs] [<ffffffffa021b6f3>] xfs_attr_node_hasname+0x23/0x90 [xfs] [<ffffffffa021c6f1>] xfs_attr_set_iter+0x441/0xa30 [xfs] [<ffffffffa02b5104>] xfs_xattri_finish_update+0x44/0x80 [xfs] [<ffffffffa02b515e>] xfs_attr_finish_item+0x1e/0x40 [xfs] [<ffffffffa0244744>] xfs_defer_finish_noroll+0x184/0x740 [xfs] [<ffffffffa02a6473>] __xfs_trans_commit+0x153/0x3e0 [xfs] [<ffffffffa021d149>] xfs_attr_set+0x469/0x7e0 [xfs] [<ffffffffa02a78d9>] xfs_xattr_set+0x89/0xd0 [xfs] [<ffffffff812e6512>] __vfs_removexattr+0x52/0x70 [<ffffffff812e6a08>] __vfs_removexattr_locked+0xb8/0x150 [<ffffffff812e6af6>] vfs_removexattr+0x56/0x100 [<ffffffff812e6bf8>] removexattr+0x58/0x90 [<ffffffff812e6cce>] path_removexattr+0x9e/0xc0 [<ffffffff812e6d44>] __x64_sys_lremovexattr+0x14/0x20 [<ffffffff81786b35>] do_syscall_64+0x35/0x80 I think this is a consequence of xfs_attr_node_removename_setup attaching a new da(btree) state to xfs_attr_item and never freeing it. I /think/ it's the case that the remove paths could detach the da state earlier in the remove state machine since nothing else accesses the state. However, let's future-proof the new xattr code by adding a catch-all when we free the xfs_attr_item to make sure we never leak the da state. Signed-off-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Reviewed-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-20 07:41:34 +03:00
attr->xattri_da_state = NULL;
}
xfs: use XFS_DA_OP flags in deferred attr ops We currently store the high level attr operation in args->attr_flags. This field contains what the VFS is telling us to do, but don't necessarily match what we are doing in the low level modification state machine. e.g. XATTR_REPLACE implies both XFS_DA_OP_ADDNAME and XFS_DA_OP_RENAME because it is doing both a remove and adding a new attr. However, deep in the individual state machine operations, we check errors against this high level VFS op flags, not the low level XFS_DA_OP flags. Indeed, we don't even have a low level flag for a REMOVE operation, so the only way we know we are doing a remove is the complete absence of XATTR_REPLACE, XATTR_CREATE, XFS_DA_OP_ADDNAME and XFS_DA_OP_RENAME. And because there are other flags in these fields, this is a pain to check if we need to. As the XFS_DA_OP flags are only needed once the deferred operations are set up, set these flags appropriately when we set the initial operation state. We also introduce a XFS_DA_OP_REMOVE flag to make it easy to know that we are doing a remove operation. With these, we can remove the use of XATTR_REPLACE and XATTR_CREATE in low level lookup operations, and manipulate the low level flags according to the low level context that is operating. e.g. log recovery does not have a VFS xattr operation state to copy into args->attr_flags, and the low level state machine ops we do for recovery do not match the high level VFS operations that were in progress when the system failed... Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-12 08:12:56 +03:00
return error;
}
/*
* Add a name to a Btree-format attribute list.
*
* This will involve walking down the Btree, and may involve splitting
* leaf nodes and even splitting intermediate nodes up to and including
* the root node (a special case of an intermediate node).
*/
static int
xfs_attr_node_try_addname(
struct xfs_attr_intent *attr)
{
struct xfs_da_state *state = attr->xattri_da_state;
struct xfs_da_state_blk *blk;
int error;
trace_xfs_attr_node_addname(state->args);
blk = &state->path.blk[state->path.active-1];
ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
error = xfs_attr3_leaf_add(blk->bp, state->args);
if (error == -ENOSPC) {
if (state->path.active == 1) {
/*
* Its really a single leaf node, but it had
* out-of-line values so it looked like it *might*
* have been a b-tree. Let the caller deal with this.
*/
goto out;
}
/*
* Split as many Btree elements as required.
* This code tracks the new and old attr's location
* in the index/blkno/rmtblkno/rmtblkcnt fields and
* in the index2/blkno2/rmtblkno2/rmtblkcnt2 fields.
*/
error = xfs_da3_split(state);
if (error)
goto out;
} else {
/*
* Addition succeeded, update Btree hashvals.
*/
xfs_da3_fixhashpath(state, &state->path);
}
out:
xfs_da_state_free(state);
xfs: don't leak da state when freeing the attr intent item kmemleak reported that we lost an xfs_da_state while removing xattrs in generic/020: unreferenced object 0xffff88801c0e4b40 (size 480): comm "attr", pid 30515, jiffies 4294931061 (age 5.960s) hex dump (first 32 bytes): 78 bc 65 07 00 c9 ff ff 00 30 60 1c 80 88 ff ff x.e......0`..... 02 00 00 00 00 00 00 00 80 18 83 4e 80 88 ff ff ...........N.... backtrace: [<ffffffffa023ef4a>] xfs_da_state_alloc+0x1a/0x30 [xfs] [<ffffffffa021b6f3>] xfs_attr_node_hasname+0x23/0x90 [xfs] [<ffffffffa021c6f1>] xfs_attr_set_iter+0x441/0xa30 [xfs] [<ffffffffa02b5104>] xfs_xattri_finish_update+0x44/0x80 [xfs] [<ffffffffa02b515e>] xfs_attr_finish_item+0x1e/0x40 [xfs] [<ffffffffa0244744>] xfs_defer_finish_noroll+0x184/0x740 [xfs] [<ffffffffa02a6473>] __xfs_trans_commit+0x153/0x3e0 [xfs] [<ffffffffa021d149>] xfs_attr_set+0x469/0x7e0 [xfs] [<ffffffffa02a78d9>] xfs_xattr_set+0x89/0xd0 [xfs] [<ffffffff812e6512>] __vfs_removexattr+0x52/0x70 [<ffffffff812e6a08>] __vfs_removexattr_locked+0xb8/0x150 [<ffffffff812e6af6>] vfs_removexattr+0x56/0x100 [<ffffffff812e6bf8>] removexattr+0x58/0x90 [<ffffffff812e6cce>] path_removexattr+0x9e/0xc0 [<ffffffff812e6d44>] __x64_sys_lremovexattr+0x14/0x20 [<ffffffff81786b35>] do_syscall_64+0x35/0x80 I think this is a consequence of xfs_attr_node_removename_setup attaching a new da(btree) state to xfs_attr_item and never freeing it. I /think/ it's the case that the remove paths could detach the da state earlier in the remove state machine since nothing else accesses the state. However, let's future-proof the new xattr code by adding a catch-all when we free the xfs_attr_item to make sure we never leak the da state. Signed-off-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Reviewed-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2022-05-20 07:41:34 +03:00
attr->xattri_da_state = NULL;
return error;
}
static int
xfs_attr_node_removename(
struct xfs_da_args *args,
struct xfs_da_state *state)
{
struct xfs_da_state_blk *blk;
int retval;
/*
* Remove the name and update the hashvals in the tree.
*/
blk = &state->path.blk[state->path.active-1];
ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
retval = xfs_attr3_leaf_remove(blk->bp, args);
xfs_da3_fixhashpath(state, &state->path);
return retval;
}
static int
xfs_attr_node_remove_attr(
struct xfs_attr_intent *attr)
{
struct xfs_da_args *args = attr->xattri_da_args;
struct xfs_da_state *state = xfs_da_state_alloc(args);
int retval = 0;
int error = 0;
/*
* The attr we are removing has already been marked incomplete, so
* we need to set the filter appropriately to re-find the "old"
* attribute entry after any split ops.
*/
args->attr_filter |= XFS_ATTR_INCOMPLETE;
error = xfs_da3_node_lookup_int(state, &retval);
if (error)
goto out;
error = xfs_attr_node_removename(args, state);
/*
* Check to see if the tree needs to be collapsed.
*/
if (retval && (state->path.active > 1)) {
error = xfs_da3_join(state);
if (error)
goto out;
}
retval = error = 0;
out:
xfs_da_state_free(state);
if (error)
return error;
return retval;
}
/*
* Retrieve the attribute data from a node attribute list.
*
* This routine gets called for any attribute fork that has more than one
* block, ie: both true Btree attr lists and for single-leaf-blocks with
* "remote" values taking up more blocks.
*
* Returns 0 on successful retrieval, otherwise an error.
*/
STATIC int
xfs_attr_node_get(
struct xfs_da_args *args)
{
struct xfs_da_state *state;
struct xfs_da_state_blk *blk;
int i;
int error;
trace_xfs_attr_node_get(args);
/*
* Search to see if name exists, and get back a pointer to it.
*/
state = xfs_da_state_alloc(args);
error = xfs_attr_node_lookup(args, state);
if (error != -EEXIST)
goto out_release;
/*
* Get the value, local or "remote"
*/
blk = &state->path.blk[state->path.active - 1];
error = xfs_attr3_leaf_getvalue(blk->bp, args);
/*
* If not in a transaction, we have to release all the buffers.
*/
out_release:
for (i = 0; i < state->path.active; i++) {
xfs_trans_brelse(args->trans, state->path.blk[i].bp);
state->path.blk[i].bp = NULL;
}
xfs_da_state_free(state);
return error;
}
/* Returns true if the attribute entry name is valid. */
bool
xfs_attr_namecheck(
const void *name,
size_t length)
{
/*
* MAXNAMELEN includes the trailing null, but (name/length) leave it
* out, so use >= for the length check.
*/
if (length >= MAXNAMELEN)
return false;
/* There shouldn't be any nulls here */
return !memchr(name, 0, length);
}
int __init
xfs_attr_intent_init_cache(void)
{
xfs_attr_intent_cache = kmem_cache_create("xfs_attr_intent",
sizeof(struct xfs_attr_intent),
0, 0, NULL);
return xfs_attr_intent_cache != NULL ? 0 : -ENOMEM;
}
void
xfs_attr_intent_destroy_cache(void)
{
kmem_cache_destroy(xfs_attr_intent_cache);
xfs_attr_intent_cache = NULL;
}