xfs: sync work is now only periodic log work
The only thing the periodic sync work does now is flush the AIL and idle the log. These are really functions of the log code, so move the work to xfs_log.c and rename it appropriately. The only wart that this leaves behind is the xfssyncd_centisecs sysctl, otherwise the xfssyncd is dead. Clean up any comments that related to xfssyncd to reflect it's passing. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Mark Tinguely <tinguely@sgi.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Ben Myers <bpm@sgi.com>
This commit is contained in:
parent
7f7bebefba
commit
f661f1e0bf
@ -34,6 +34,7 @@
|
|||||||
#include "xfs_dinode.h"
|
#include "xfs_dinode.h"
|
||||||
#include "xfs_inode.h"
|
#include "xfs_inode.h"
|
||||||
#include "xfs_trace.h"
|
#include "xfs_trace.h"
|
||||||
|
#include "xfs_fsops.h"
|
||||||
|
|
||||||
kmem_zone_t *xfs_log_ticket_zone;
|
kmem_zone_t *xfs_log_ticket_zone;
|
||||||
|
|
||||||
@ -679,25 +680,29 @@ out:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Finish the recovery of the file system. This is separate from
|
* Finish the recovery of the file system. This is separate from the
|
||||||
* the xfs_log_mount() call, because it depends on the code in
|
* xfs_log_mount() call, because it depends on the code in xfs_mountfs() to read
|
||||||
* xfs_mountfs() to read in the root and real-time bitmap inodes
|
* in the root and real-time bitmap inodes between calling xfs_log_mount() and
|
||||||
* between calling xfs_log_mount() and here.
|
* here.
|
||||||
*
|
*
|
||||||
* mp - ubiquitous xfs mount point structure
|
* If we finish recovery successfully, start the background log work. If we are
|
||||||
|
* not doing recovery, then we have a RO filesystem and we don't need to start
|
||||||
|
* it.
|
||||||
*/
|
*/
|
||||||
int
|
int
|
||||||
xfs_log_mount_finish(xfs_mount_t *mp)
|
xfs_log_mount_finish(xfs_mount_t *mp)
|
||||||
{
|
{
|
||||||
int error;
|
int error = 0;
|
||||||
|
|
||||||
if (!(mp->m_flags & XFS_MOUNT_NORECOVERY))
|
if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) {
|
||||||
error = xlog_recover_finish(mp->m_log);
|
error = xlog_recover_finish(mp->m_log);
|
||||||
else {
|
if (!error)
|
||||||
error = 0;
|
xfs_log_work_queue(mp);
|
||||||
|
} else {
|
||||||
ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
|
ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -858,7 +863,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
|
|||||||
void
|
void
|
||||||
xfs_log_unmount(xfs_mount_t *mp)
|
xfs_log_unmount(xfs_mount_t *mp)
|
||||||
{
|
{
|
||||||
cancel_delayed_work_sync(&mp->m_sync_work);
|
cancel_delayed_work_sync(&mp->m_log->l_work);
|
||||||
xfs_trans_ail_destroy(mp);
|
xfs_trans_ail_destroy(mp);
|
||||||
xlog_dealloc_log(mp->m_log);
|
xlog_dealloc_log(mp->m_log);
|
||||||
}
|
}
|
||||||
@ -1161,6 +1166,40 @@ done:
|
|||||||
} /* xlog_get_iclog_buffer_size */
|
} /* xlog_get_iclog_buffer_size */
|
||||||
|
|
||||||
|
|
||||||
|
void
|
||||||
|
xfs_log_work_queue(
|
||||||
|
struct xfs_mount *mp)
|
||||||
|
{
|
||||||
|
queue_delayed_work(xfs_syncd_wq, &mp->m_log->l_work,
|
||||||
|
msecs_to_jiffies(xfs_syncd_centisecs * 10));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Every sync period we need to unpin all items in the AIL and push them to
|
||||||
|
* disk. If there is nothing dirty, then we might need to cover the log to
|
||||||
|
* indicate that the filesystem is idle.
|
||||||
|
*/
|
||||||
|
void
|
||||||
|
xfs_log_worker(
|
||||||
|
struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct xlog *log = container_of(to_delayed_work(work),
|
||||||
|
struct xlog, l_work);
|
||||||
|
struct xfs_mount *mp = log->l_mp;
|
||||||
|
|
||||||
|
/* dgc: errors ignored - not fatal and nowhere to report them */
|
||||||
|
if (xfs_log_need_covered(mp))
|
||||||
|
xfs_fs_log_dummy(mp);
|
||||||
|
else
|
||||||
|
xfs_log_force(mp, 0);
|
||||||
|
|
||||||
|
/* start pushing all the metadata that is currently dirty */
|
||||||
|
xfs_ail_push_all(mp->m_ail);
|
||||||
|
|
||||||
|
/* queue us up again */
|
||||||
|
xfs_log_work_queue(mp);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This routine initializes some of the log structure for a given mount point.
|
* This routine initializes some of the log structure for a given mount point.
|
||||||
* Its primary purpose is to fill in enough, so recovery can occur. However,
|
* Its primary purpose is to fill in enough, so recovery can occur. However,
|
||||||
@ -1195,6 +1234,7 @@ xlog_alloc_log(
|
|||||||
log->l_logBBsize = num_bblks;
|
log->l_logBBsize = num_bblks;
|
||||||
log->l_covered_state = XLOG_STATE_COVER_IDLE;
|
log->l_covered_state = XLOG_STATE_COVER_IDLE;
|
||||||
log->l_flags |= XLOG_ACTIVE_RECOVERY;
|
log->l_flags |= XLOG_ACTIVE_RECOVERY;
|
||||||
|
INIT_DELAYED_WORK(&log->l_work, xfs_log_worker);
|
||||||
|
|
||||||
log->l_prev_block = -1;
|
log->l_prev_block = -1;
|
||||||
/* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
|
/* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
|
||||||
@ -3700,3 +3740,4 @@ xlog_iclogs_empty(
|
|||||||
} while (iclog != log->l_iclog);
|
} while (iclog != log->l_iclog);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -181,5 +181,8 @@ int xfs_log_commit_cil(struct xfs_mount *mp, struct xfs_trans *tp,
|
|||||||
xfs_lsn_t *commit_lsn, int flags);
|
xfs_lsn_t *commit_lsn, int flags);
|
||||||
bool xfs_log_item_in_current_chkpt(struct xfs_log_item *lip);
|
bool xfs_log_item_in_current_chkpt(struct xfs_log_item *lip);
|
||||||
|
|
||||||
|
void xfs_log_work_queue(struct xfs_mount *mp);
|
||||||
|
void xfs_log_worker(struct work_struct *work);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
#endif /* __XFS_LOG_H__ */
|
#endif /* __XFS_LOG_H__ */
|
||||||
|
@ -495,6 +495,7 @@ struct xlog {
|
|||||||
struct xfs_buf *l_xbuf; /* extra buffer for log
|
struct xfs_buf *l_xbuf; /* extra buffer for log
|
||||||
* wrapping */
|
* wrapping */
|
||||||
struct xfs_buftarg *l_targ; /* buftarg of log */
|
struct xfs_buftarg *l_targ; /* buftarg of log */
|
||||||
|
struct delayed_work l_work; /* background flush work */
|
||||||
uint l_flags;
|
uint l_flags;
|
||||||
uint l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */
|
uint l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */
|
||||||
struct list_head *l_buf_cancel_table;
|
struct list_head *l_buf_cancel_table;
|
||||||
|
@ -197,7 +197,6 @@ typedef struct xfs_mount {
|
|||||||
struct mutex m_icsb_mutex; /* balancer sync lock */
|
struct mutex m_icsb_mutex; /* balancer sync lock */
|
||||||
#endif
|
#endif
|
||||||
struct xfs_mru_cache *m_filestream; /* per-mount filestream data */
|
struct xfs_mru_cache *m_filestream; /* per-mount filestream data */
|
||||||
struct delayed_work m_sync_work; /* background sync work */
|
|
||||||
struct delayed_work m_reclaim_work; /* background inode reclaim */
|
struct delayed_work m_reclaim_work; /* background inode reclaim */
|
||||||
struct work_struct m_flush_work; /* background inode flush */
|
struct work_struct m_flush_work; /* background inode flush */
|
||||||
__int64_t m_update_flags; /* sb flags we need to update
|
__int64_t m_update_flags; /* sb flags we need to update
|
||||||
|
@ -1005,7 +1005,6 @@ xfs_fs_put_super(
|
|||||||
{
|
{
|
||||||
struct xfs_mount *mp = XFS_M(sb);
|
struct xfs_mount *mp = XFS_M(sb);
|
||||||
|
|
||||||
cancel_delayed_work_sync(&mp->m_sync_work);
|
|
||||||
cancel_work_sync(&mp->m_flush_work);
|
cancel_work_sync(&mp->m_flush_work);
|
||||||
|
|
||||||
xfs_filestream_unmount(mp);
|
xfs_filestream_unmount(mp);
|
||||||
@ -1040,10 +1039,10 @@ xfs_fs_sync_fs(
|
|||||||
if (laptop_mode) {
|
if (laptop_mode) {
|
||||||
/*
|
/*
|
||||||
* The disk must be active because we're syncing.
|
* The disk must be active because we're syncing.
|
||||||
* We schedule xfssyncd now (now that the disk is
|
* We schedule log work now (now that the disk is
|
||||||
* active) instead of later (when it might not be).
|
* active) instead of later (when it might not be).
|
||||||
*/
|
*/
|
||||||
flush_delayed_work(&mp->m_sync_work);
|
flush_delayed_work(&mp->m_log->l_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -1200,7 +1199,7 @@ xfs_fs_remount(
|
|||||||
* value if it is non-zero, otherwise go with the default.
|
* value if it is non-zero, otherwise go with the default.
|
||||||
*/
|
*/
|
||||||
xfs_restore_resvblks(mp);
|
xfs_restore_resvblks(mp);
|
||||||
xfs_syncd_queue_sync(mp);
|
xfs_log_work_queue(mp);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* rw -> ro */
|
/* rw -> ro */
|
||||||
@ -1246,7 +1245,7 @@ xfs_fs_unfreeze(
|
|||||||
struct xfs_mount *mp = XFS_M(sb);
|
struct xfs_mount *mp = XFS_M(sb);
|
||||||
|
|
||||||
xfs_restore_resvblks(mp);
|
xfs_restore_resvblks(mp);
|
||||||
xfs_syncd_queue_sync(mp);
|
xfs_log_work_queue(mp);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1326,7 +1325,6 @@ xfs_fs_fill_super(
|
|||||||
mutex_init(&mp->m_growlock);
|
mutex_init(&mp->m_growlock);
|
||||||
atomic_set(&mp->m_active_trans, 0);
|
atomic_set(&mp->m_active_trans, 0);
|
||||||
INIT_WORK(&mp->m_flush_work, xfs_flush_worker);
|
INIT_WORK(&mp->m_flush_work, xfs_flush_worker);
|
||||||
INIT_DELAYED_WORK(&mp->m_sync_work, xfs_sync_worker);
|
|
||||||
INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
|
INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
|
||||||
|
|
||||||
mp->m_super = sb;
|
mp->m_super = sb;
|
||||||
@ -1410,12 +1408,6 @@ xfs_fs_fill_super(
|
|||||||
goto out_unmount;
|
goto out_unmount;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* The filesystem is successfully mounted, so we can start background
|
|
||||||
* sync work now.
|
|
||||||
*/
|
|
||||||
xfs_syncd_queue_sync(mp);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_filestream_unmount:
|
out_filestream_unmount:
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
#include "xfs_fs.h"
|
#include "xfs_fs.h"
|
||||||
#include "xfs_types.h"
|
#include "xfs_types.h"
|
||||||
#include "xfs_log.h"
|
#include "xfs_log.h"
|
||||||
|
#include "xfs_log_priv.h"
|
||||||
#include "xfs_inum.h"
|
#include "xfs_inum.h"
|
||||||
#include "xfs_trans.h"
|
#include "xfs_trans.h"
|
||||||
#include "xfs_trans_priv.h"
|
#include "xfs_trans_priv.h"
|
||||||
@ -344,8 +345,8 @@ xfs_quiesce_attr(
|
|||||||
/* flush all pending changes from the AIL */
|
/* flush all pending changes from the AIL */
|
||||||
xfs_ail_push_all_sync(mp->m_ail);
|
xfs_ail_push_all_sync(mp->m_ail);
|
||||||
|
|
||||||
/* stop background sync work */
|
/* stop background log work */
|
||||||
cancel_delayed_work_sync(&mp->m_sync_work);
|
cancel_delayed_work_sync(&mp->m_log->l_work);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Just warn here till VFS can correctly support
|
* Just warn here till VFS can correctly support
|
||||||
@ -376,40 +377,6 @@ xfs_quiesce_attr(
|
|||||||
xfs_buf_unlock(mp->m_sb_bp);
|
xfs_buf_unlock(mp->m_sb_bp);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
|
||||||
xfs_syncd_queue_sync(
|
|
||||||
struct xfs_mount *mp)
|
|
||||||
{
|
|
||||||
queue_delayed_work(xfs_syncd_wq, &mp->m_sync_work,
|
|
||||||
msecs_to_jiffies(xfs_syncd_centisecs * 10));
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Every sync period we need to push dirty metadata and try to cover the log
|
|
||||||
* to indicate the filesystem is idle and not frozen.
|
|
||||||
*/
|
|
||||||
void
|
|
||||||
xfs_sync_worker(
|
|
||||||
struct work_struct *work)
|
|
||||||
{
|
|
||||||
struct xfs_mount *mp = container_of(to_delayed_work(work),
|
|
||||||
struct xfs_mount, m_sync_work);
|
|
||||||
int error;
|
|
||||||
|
|
||||||
/* dgc: errors ignored here */
|
|
||||||
if (mp->m_super->s_writers.frozen == SB_UNFROZEN &&
|
|
||||||
xfs_log_need_covered(mp))
|
|
||||||
error = xfs_fs_log_dummy(mp);
|
|
||||||
else
|
|
||||||
xfs_log_force(mp, 0);
|
|
||||||
|
|
||||||
/* start pushing all the metadata that is currently dirty */
|
|
||||||
xfs_ail_push_all(mp->m_ail);
|
|
||||||
|
|
||||||
/* queue us up again */
|
|
||||||
xfs_syncd_queue_sync(mp);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Queue a new inode reclaim pass if there are reclaimable inodes and there
|
* Queue a new inode reclaim pass if there are reclaimable inodes and there
|
||||||
* isn't a reclaim pass already in progress. By default it runs every 5s based
|
* isn't a reclaim pass already in progress. By default it runs every 5s based
|
||||||
|
@ -26,8 +26,6 @@ struct xfs_perag;
|
|||||||
|
|
||||||
extern struct workqueue_struct *xfs_syncd_wq; /* sync workqueue */
|
extern struct workqueue_struct *xfs_syncd_wq; /* sync workqueue */
|
||||||
|
|
||||||
void xfs_syncd_queue_sync(struct xfs_mount *mp);
|
|
||||||
void xfs_sync_worker(struct work_struct *work);
|
|
||||||
void xfs_flush_worker(struct work_struct *work);
|
void xfs_flush_worker(struct work_struct *work);
|
||||||
void xfs_reclaim_worker(struct work_struct *work);
|
void xfs_reclaim_worker(struct work_struct *work);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user