0020a190cf
The AIL pushing is stalling on log forces when it comes across pinned items. This is happening on removal workloads where the AIL is dominated by stale items that are removed from AIL when the checkpoint that marks the items stale is committed to the journal. This results is relatively few items in the AIL, but those that are are often pinned as directories items are being removed from are still being logged. As a result, many push cycles through the CIL will first issue a blocking log force to unpin the items. This can take some time to complete, with tracing regularly showing push delays of half a second and sometimes up into the range of several seconds. Sequences like this aren't uncommon: .... 399.829437: xfsaild: last lsn 0x11002dd000 count 101 stuck 101 flushing 0 tout 20 <wanted 20ms, got 270ms delay> 400.099622: xfsaild: target 0x11002f3600, prev 0x11002f3600, last lsn 0x0 400.099623: xfsaild: first lsn 0x11002f3600 400.099679: xfsaild: last lsn 0x1100305000 count 16 stuck 11 flushing 0 tout 50 <wanted 50ms, got 500ms delay> 400.589348: xfsaild: target 0x110032e600, prev 0x11002f3600, last lsn 0x0 400.589349: xfsaild: first lsn 0x1100305000 400.589595: xfsaild: last lsn 0x110032e600 count 156 stuck 101 flushing 30 tout 50 <wanted 50ms, got 460ms delay> 400.950341: xfsaild: target 0x1100353000, prev 0x110032e600, last lsn 0x0 400.950343: xfsaild: first lsn 0x1100317c00 400.950436: xfsaild: last lsn 0x110033d200 count 105 stuck 101 flushing 0 tout 20 <wanted 20ms, got 200ms delay> 401.142333: xfsaild: target 0x1100361600, prev 0x1100353000, last lsn 0x0 401.142334: xfsaild: first lsn 0x110032e600 401.142535: xfsaild: last lsn 0x1100353000 count 122 stuck 101 flushing 8 tout 10 <wanted 10ms, got 10ms delay> 401.154323: xfsaild: target 0x1100361600, prev 0x1100361600, last lsn 0x1100353000 401.154328: xfsaild: first lsn 0x1100353000 401.154389: xfsaild: last lsn 0x1100353000 count 101 stuck 101 flushing 0 tout 20 <wanted 20ms, got 300ms delay> 401.451525: xfsaild: target 0x1100361600, prev 0x1100361600, last lsn 0x0 401.451526: xfsaild: first lsn 0x1100353000 401.451804: xfsaild: last lsn 0x1100377200 count 170 stuck 22 flushing 122 tout 50 <wanted 50ms, got 500ms delay> 401.933581: xfsaild: target 0x1100361600, prev 0x1100361600, last lsn 0x0 .... In each of these cases, every AIL pass saw 101 log items stuck on the AIL (pinned) with very few other items being found. Each pass, a log force was issued, and delay between last/first is the sleep time + the sync log force time. Some of these 101 items pinned the tail of the log. The tail of the log does slowly creep forward (first lsn), but the problem is that the log is actually out of reservation space because it's been running so many transactions that stale items that never reach the AIL but consume log space. Hence we have a largely empty AIL, with long term pins on items that pin the tail of the log that don't get pushed frequently enough to keep log space available. The problem is the hundreds of milliseconds that we block in the log force pushing the CIL out to disk. The AIL should not be stalled like this - it needs to run and flush items that are at the tail of the log with minimal latency. What we really need to do is trigger a log flush, but then not wait for it at all - we've already done our waiting for stuff to complete when we backed off prior to the log force being issued. Even if we remove the XFS_LOG_SYNC from the xfs_log_force() call, we still do a blocking flush of the CIL and that is what is causing the issue. Hence we need a new interface for the CIL to trigger an immediate background push of the CIL to get it moving faster but not to wait on that to occur. While the CIL is pushing, the AIL can also be pushing. We already have an internal interface to do this - xlog_cil_push_now() - but we need a wrapper for it to be used externally. xlog_cil_force_seq() can easily be extended to do what we need as it already implements the synchronous CIL push via xlog_cil_push_now(). Add the necessary flags and "push current sequence" semantics to xlog_cil_force_seq() and convert the AIL pushing to use it. One of the complexities here is that the CIL push does not guarantee that the commit record for the CIL checkpoint is written to disk. The current log force ensures this by submitting the current ACTIVE iclog that the commit record was written to. We need the CIL to actually write this commit record to disk for an async push to ensure that the checkpoint actually makes it to disk and unpins the pinned items in the checkpoint on completion. Hence we need to pass down to the CIL push that we are doing an async flush so that it can switch out the commit_iclog if necessary to get written to disk when the commit iclog is finally released. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Signed-off-by: Darrick J. Wong <djwong@kernel.org>
149 lines
4.2 KiB
C
149 lines
4.2 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
|
|
* All Rights Reserved.
|
|
*/
|
|
#ifndef __XFS_LOG_H__
|
|
#define __XFS_LOG_H__
|
|
|
|
struct xfs_cil_ctx;
|
|
|
|
struct xfs_log_vec {
|
|
struct xfs_log_vec *lv_next; /* next lv in build list */
|
|
int lv_niovecs; /* number of iovecs in lv */
|
|
struct xfs_log_iovec *lv_iovecp; /* iovec array */
|
|
struct xfs_log_item *lv_item; /* owner */
|
|
char *lv_buf; /* formatted buffer */
|
|
int lv_bytes; /* accounted space in buffer */
|
|
int lv_buf_len; /* aligned size of buffer */
|
|
int lv_size; /* size of allocated lv */
|
|
};
|
|
|
|
#define XFS_LOG_VEC_ORDERED (-1)
|
|
|
|
static inline void *
|
|
xlog_prepare_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp,
|
|
uint type)
|
|
{
|
|
struct xfs_log_iovec *vec = *vecp;
|
|
|
|
if (vec) {
|
|
ASSERT(vec - lv->lv_iovecp < lv->lv_niovecs);
|
|
vec++;
|
|
} else {
|
|
vec = &lv->lv_iovecp[0];
|
|
}
|
|
|
|
vec->i_type = type;
|
|
vec->i_addr = lv->lv_buf + lv->lv_buf_len;
|
|
|
|
ASSERT(IS_ALIGNED((unsigned long)vec->i_addr, sizeof(uint64_t)));
|
|
|
|
*vecp = vec;
|
|
return vec->i_addr;
|
|
}
|
|
|
|
/*
|
|
* We need to make sure the next buffer is naturally aligned for the biggest
|
|
* basic data type we put into it. We already accounted for this padding when
|
|
* sizing the buffer.
|
|
*
|
|
* However, this padding does not get written into the log, and hence we have to
|
|
* track the space used by the log vectors separately to prevent log space hangs
|
|
* due to inaccurate accounting (i.e. a leak) of the used log space through the
|
|
* CIL context ticket.
|
|
*/
|
|
static inline void
|
|
xlog_finish_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec *vec, int len)
|
|
{
|
|
lv->lv_buf_len += round_up(len, sizeof(uint64_t));
|
|
lv->lv_bytes += len;
|
|
vec->i_len = len;
|
|
}
|
|
|
|
static inline void *
|
|
xlog_copy_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp,
|
|
uint type, void *data, int len)
|
|
{
|
|
void *buf;
|
|
|
|
buf = xlog_prepare_iovec(lv, vecp, type);
|
|
memcpy(buf, data, len);
|
|
xlog_finish_iovec(lv, *vecp, len);
|
|
return buf;
|
|
}
|
|
|
|
/*
|
|
* By comparing each component, we don't have to worry about extra
|
|
* endian issues in treating two 32 bit numbers as one 64 bit number
|
|
*/
|
|
static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
|
|
{
|
|
if (CYCLE_LSN(lsn1) != CYCLE_LSN(lsn2))
|
|
return (CYCLE_LSN(lsn1)<CYCLE_LSN(lsn2))? -999 : 999;
|
|
|
|
if (BLOCK_LSN(lsn1) != BLOCK_LSN(lsn2))
|
|
return (BLOCK_LSN(lsn1)<BLOCK_LSN(lsn2))? -999 : 999;
|
|
|
|
return 0;
|
|
}
|
|
|
|
#define XFS_LSN_CMP(x,y) _lsn_cmp(x,y)
|
|
|
|
/*
|
|
* Flags to xfs_log_force()
|
|
*
|
|
* XFS_LOG_SYNC: Synchronous force in-core log to disk
|
|
*/
|
|
#define XFS_LOG_SYNC 0x1
|
|
|
|
/* Log manager interfaces */
|
|
struct xfs_mount;
|
|
struct xlog_in_core;
|
|
struct xlog_ticket;
|
|
struct xfs_log_item;
|
|
struct xfs_item_ops;
|
|
struct xfs_trans;
|
|
struct xlog;
|
|
|
|
int xfs_log_force(struct xfs_mount *mp, uint flags);
|
|
int xfs_log_force_seq(struct xfs_mount *mp, xfs_csn_t seq, uint flags,
|
|
int *log_forced);
|
|
int xfs_log_mount(struct xfs_mount *mp,
|
|
struct xfs_buftarg *log_target,
|
|
xfs_daddr_t start_block,
|
|
int num_bblocks);
|
|
int xfs_log_mount_finish(struct xfs_mount *mp);
|
|
void xfs_log_mount_cancel(struct xfs_mount *);
|
|
xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
|
|
xfs_lsn_t xlog_assign_tail_lsn_locked(struct xfs_mount *mp);
|
|
void xfs_log_space_wake(struct xfs_mount *mp);
|
|
int xfs_log_reserve(struct xfs_mount *mp,
|
|
int length,
|
|
int count,
|
|
struct xlog_ticket **ticket,
|
|
uint8_t clientid,
|
|
bool permanent);
|
|
int xfs_log_regrant(struct xfs_mount *mp, struct xlog_ticket *tic);
|
|
void xfs_log_unmount(struct xfs_mount *mp);
|
|
bool xfs_log_writable(struct xfs_mount *mp);
|
|
|
|
struct xlog_ticket *xfs_log_ticket_get(struct xlog_ticket *ticket);
|
|
void xfs_log_ticket_put(struct xlog_ticket *ticket);
|
|
|
|
void xlog_cil_process_committed(struct list_head *list);
|
|
bool xfs_log_item_in_current_chkpt(struct xfs_log_item *lip);
|
|
|
|
void xfs_log_work_queue(struct xfs_mount *mp);
|
|
int xfs_log_quiesce(struct xfs_mount *mp);
|
|
void xfs_log_clean(struct xfs_mount *mp);
|
|
bool xfs_log_check_lsn(struct xfs_mount *, xfs_lsn_t);
|
|
|
|
xfs_lsn_t xlog_grant_push_threshold(struct xlog *log, int need_bytes);
|
|
bool xlog_force_shutdown(struct xlog *log, int shutdown_flags);
|
|
|
|
void xlog_use_incompat_feat(struct xlog *log);
|
|
void xlog_drop_incompat_feat(struct xlog *log);
|
|
|
|
#endif /* __XFS_LOG_H__ */
|