mirror of
https://github.com/systemd/systemd-stable.git
synced 2025-02-26 09:57:26 +03:00
Merge pull request #10525 from poettering/journal-vaccum-all
journald: add ability to vacuum active files too
This commit is contained in:
commit
84ac98faa8
@ -749,32 +749,28 @@
|
||||
<term><option>--vacuum-time=</option></term>
|
||||
<term><option>--vacuum-files=</option></term>
|
||||
|
||||
<listitem><para>Removes the oldest archived journal files until the disk
|
||||
space they use falls below the specified size (specified with
|
||||
the usual <literal>K</literal>, <literal>M</literal>,
|
||||
<literal>G</literal> and <literal>T</literal> suffixes), or all
|
||||
archived journal files contain no data older than the specified
|
||||
timespan (specified with the usual <literal>s</literal>,
|
||||
<literal>m</literal>, <literal>h</literal>,
|
||||
<literal>days</literal>, <literal>months</literal>,
|
||||
<literal>weeks</literal> and <literal>years</literal> suffixes),
|
||||
or no more than the specified number of separate journal files
|
||||
remain. Note that running <option>--vacuum-size=</option> has
|
||||
only an indirect effect on the output shown by
|
||||
<option>--disk-usage</option>, as the latter includes active
|
||||
journal files, while the vacuuming operation only operates
|
||||
on archived journal files. Similarly,
|
||||
<option>--vacuum-files=</option> might not actually reduce the
|
||||
number of journal files to below the specified number, as it
|
||||
will not remove active journal
|
||||
files. <option>--vacuum-size=</option>,
|
||||
<option>--vacuum-time=</option> and
|
||||
<option>--vacuum-files=</option> may be combined in a single
|
||||
invocation to enforce any combination of a size, a time and a
|
||||
number of files limit on the archived journal
|
||||
files. Specifying any of these three parameters as zero is
|
||||
equivalent to not enforcing the specific limit, and is thus
|
||||
redundant.</para></listitem>
|
||||
<listitem><para>Removes the oldest archived journal files until the disk space they use falls below the
|
||||
specified size (specified with the usual <literal>K</literal>, <literal>M</literal>, <literal>G</literal> and
|
||||
<literal>T</literal> suffixes), or all archived journal files contain no data older than the specified timespan
|
||||
(specified with the usual <literal>s</literal>, <literal>m</literal>, <literal>h</literal>,
|
||||
<literal>days</literal>, <literal>months</literal>, <literal>weeks</literal> and <literal>years</literal>
|
||||
suffixes), or no more than the specified number of separate journal files remain. Note that running
|
||||
<option>--vacuum-size=</option> has only an indirect effect on the output shown by
|
||||
<option>--disk-usage</option>, as the latter includes active journal files, while the vacuuming operation only
|
||||
operates on archived journal files. Similarly, <option>--vacuum-files=</option> might not actually reduce the
|
||||
number of journal files to below the specified number, as it will not remove active journal
|
||||
files.</para>
|
||||
|
||||
<para><option>--vacuum-size=</option>, <option>--vacuum-time=</option> and <option>--vacuum-files=</option>
|
||||
may be combined in a single invocation to enforce any combination of a size, a time and a number of files limit
|
||||
on the archived journal files. Specifying any of these three parameters as zero is equivalent to not enforcing
|
||||
the specific limit, and is thus redundant.</para>
|
||||
|
||||
<para>These three switches may also be combined with <option>--rotate</option> into one command. If so, all
|
||||
active files are rotated first, and the requested vacuuming operation is executed right after. The rotation has
|
||||
the effect that all currently active files are archived (and potentially new, empty journal files opened as
|
||||
replacement), and hence the vacuuming operation has the greatest effect as it can take all log data written so
|
||||
far into account.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
@ -896,9 +892,12 @@
|
||||
<varlistentry>
|
||||
<term><option>--rotate</option></term>
|
||||
|
||||
<listitem><para>Asks the journal daemon to rotate journal
|
||||
files. This call does not return until the rotation operation
|
||||
is complete.</para></listitem>
|
||||
<listitem><para>Asks the journal daemon to rotate journal files. This call does not return until the rotation
|
||||
operation is complete. Journal file rotation has the effect that all currently active journal files are marked
|
||||
as archived and renamed, so that they are never written to in future. New (empty) journal files are then
|
||||
created in their place. This operation may be combined with <option>--vacuum-size=</option>,
|
||||
<option>--vacuum-time=</option> and <option>--vacuum-file=</option> into a single command, see
|
||||
above.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<xi:include href="standard-options.xml" xpointer="help" />
|
||||
|
@ -3224,30 +3224,30 @@ int journal_file_open(
|
||||
if (fname && (flags & O_CREAT) && !endswith(fname, ".journal"))
|
||||
return -EINVAL;
|
||||
|
||||
f = new0(JournalFile, 1);
|
||||
f = new(JournalFile, 1);
|
||||
if (!f)
|
||||
return -ENOMEM;
|
||||
|
||||
f->fd = fd;
|
||||
f->mode = mode;
|
||||
*f = (JournalFile) {
|
||||
.fd = fd,
|
||||
.mode = mode,
|
||||
|
||||
.flags = flags,
|
||||
.prot = prot_from_flags(flags),
|
||||
.writable = (flags & O_ACCMODE) != O_RDONLY,
|
||||
|
||||
f->flags = flags;
|
||||
f->prot = prot_from_flags(flags);
|
||||
f->writable = (flags & O_ACCMODE) != O_RDONLY;
|
||||
#if HAVE_LZ4
|
||||
f->compress_lz4 = compress;
|
||||
.compress_lz4 = compress,
|
||||
#elif HAVE_XZ
|
||||
f->compress_xz = compress;
|
||||
.compress_xz = compress,
|
||||
#endif
|
||||
|
||||
if (compress_threshold_bytes == (uint64_t) -1)
|
||||
f->compress_threshold_bytes = DEFAULT_COMPRESS_THRESHOLD;
|
||||
else
|
||||
f->compress_threshold_bytes = MAX(MIN_COMPRESS_THRESHOLD, compress_threshold_bytes);
|
||||
|
||||
.compress_threshold_bytes = compress_threshold_bytes == (uint64_t) -1 ?
|
||||
DEFAULT_COMPRESS_THRESHOLD :
|
||||
MAX(MIN_COMPRESS_THRESHOLD, compress_threshold_bytes),
|
||||
#if HAVE_GCRYPT
|
||||
f->seal = seal;
|
||||
.seal = seal,
|
||||
#endif
|
||||
};
|
||||
|
||||
log_debug("Journal effective settings seal=%s compress=%s compress_threshold_bytes=%s",
|
||||
yes_no(f->seal), yes_no(JOURNAL_FILE_COMPRESS(f)),
|
||||
@ -3437,72 +3437,143 @@ fail:
|
||||
return r;
|
||||
}
|
||||
|
||||
int journal_file_rotate(JournalFile **f, bool compress, uint64_t compress_threshold_bytes, bool seal, Set *deferred_closes) {
|
||||
int journal_file_archive(JournalFile *f) {
|
||||
_cleanup_free_ char *p = NULL;
|
||||
size_t l;
|
||||
JournalFile *old_file, *new_file = NULL;
|
||||
|
||||
assert(f);
|
||||
|
||||
if (!f->writable)
|
||||
return -EINVAL;
|
||||
|
||||
/* Is this a journal file that was passed to us as fd? If so, we synthesized a path name for it, and we refuse
|
||||
* rotation, since we don't know the actual path, and couldn't rename the file hence. */
|
||||
if (path_startswith(f->path, "/proc/self/fd"))
|
||||
return -EINVAL;
|
||||
|
||||
if (!endswith(f->path, ".journal"))
|
||||
return -EINVAL;
|
||||
|
||||
if (asprintf(&p, "%.*s@" SD_ID128_FORMAT_STR "-%016"PRIx64"-%016"PRIx64".journal",
|
||||
(int) strlen(f->path) - 8, f->path,
|
||||
SD_ID128_FORMAT_VAL(f->header->seqnum_id),
|
||||
le64toh(f->header->head_entry_seqnum),
|
||||
le64toh(f->header->head_entry_realtime)) < 0)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Try to rename the file to the archived version. If the file already was deleted, we'll get ENOENT, let's
|
||||
* ignore that case. */
|
||||
if (rename(f->path, p) < 0 && errno != ENOENT)
|
||||
return -errno;
|
||||
|
||||
/* Sync the rename to disk */
|
||||
(void) fsync_directory_of_file(f->fd);
|
||||
|
||||
/* Set as archive so offlining commits w/state=STATE_ARCHIVED. Previously we would set old_file->header->state
|
||||
* to STATE_ARCHIVED directly here, but journal_file_set_offline() short-circuits when state != STATE_ONLINE,
|
||||
* which would result in the rotated journal never getting fsync() called before closing. Now we simply queue
|
||||
* the archive state by setting an archive bit, leaving the state as STATE_ONLINE so proper offlining
|
||||
* occurs. */
|
||||
f->archive = true;
|
||||
|
||||
/* Currently, btrfs is not very good with out write patterns and fragments heavily. Let's defrag our journal
|
||||
* files when we archive them */
|
||||
f->defrag_on_close = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
JournalFile* journal_initiate_close(
|
||||
JournalFile *f,
|
||||
Set *deferred_closes) {
|
||||
|
||||
int r;
|
||||
|
||||
assert(f);
|
||||
|
||||
if (deferred_closes) {
|
||||
|
||||
r = set_put(deferred_closes, f);
|
||||
if (r < 0)
|
||||
log_debug_errno(r, "Failed to add file to deferred close set, closing immediately.");
|
||||
else {
|
||||
(void) journal_file_set_offline(f, false);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return journal_file_close(f);
|
||||
}
|
||||
|
||||
int journal_file_rotate(
|
||||
JournalFile **f,
|
||||
bool compress,
|
||||
uint64_t compress_threshold_bytes,
|
||||
bool seal,
|
||||
Set *deferred_closes) {
|
||||
|
||||
JournalFile *new_file = NULL;
|
||||
int r;
|
||||
|
||||
assert(f);
|
||||
assert(*f);
|
||||
|
||||
old_file = *f;
|
||||
|
||||
if (!old_file->writable)
|
||||
return -EINVAL;
|
||||
|
||||
/* Is this a journal file that was passed to us as fd? If so, we synthesized a path name for it, and we refuse
|
||||
* rotation, since we don't know the actual path, and couldn't rename the file hence. */
|
||||
if (path_startswith(old_file->path, "/proc/self/fd"))
|
||||
return -EINVAL;
|
||||
|
||||
if (!endswith(old_file->path, ".journal"))
|
||||
return -EINVAL;
|
||||
|
||||
l = strlen(old_file->path);
|
||||
r = asprintf(&p, "%.*s@" SD_ID128_FORMAT_STR "-%016"PRIx64"-%016"PRIx64".journal",
|
||||
(int) l - 8, old_file->path,
|
||||
SD_ID128_FORMAT_VAL(old_file->header->seqnum_id),
|
||||
le64toh((*f)->header->head_entry_seqnum),
|
||||
le64toh((*f)->header->head_entry_realtime));
|
||||
r = journal_file_archive(*f);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
r = journal_file_open(
|
||||
-1,
|
||||
(*f)->path,
|
||||
(*f)->flags,
|
||||
(*f)->mode,
|
||||
compress,
|
||||
compress_threshold_bytes,
|
||||
seal,
|
||||
NULL, /* metrics */
|
||||
(*f)->mmap,
|
||||
deferred_closes,
|
||||
*f, /* template */
|
||||
&new_file);
|
||||
|
||||
journal_initiate_close(*f, deferred_closes);
|
||||
*f = new_file;
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
int journal_file_dispose(int dir_fd, const char *fname) {
|
||||
_cleanup_free_ char *p = NULL;
|
||||
_cleanup_close_ int fd = -1;
|
||||
|
||||
assert(fname);
|
||||
|
||||
/* Renames a journal file to *.journal~, i.e. to mark it as corruped or otherwise uncleanly shutdown. Note that
|
||||
* this is done without looking into the file or changing any of its contents. The idea is that this is called
|
||||
* whenever something is suspicious and we want to move the file away and make clear that it is not accessed
|
||||
* for writing anymore. */
|
||||
|
||||
if (!endswith(fname, ".journal"))
|
||||
return -EINVAL;
|
||||
|
||||
if (asprintf(&p, "%.*s@%016" PRIx64 "-%016" PRIx64 ".journal~",
|
||||
(int) strlen(fname) - 8, fname,
|
||||
now(CLOCK_REALTIME),
|
||||
random_u64()) < 0)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Try to rename the file to the archived version. If the file
|
||||
* already was deleted, we'll get ENOENT, let's ignore that
|
||||
* case. */
|
||||
r = rename(old_file->path, p);
|
||||
if (r < 0 && errno != ENOENT)
|
||||
if (renameat(dir_fd, fname, dir_fd, p) < 0)
|
||||
return -errno;
|
||||
|
||||
/* Sync the rename to disk */
|
||||
(void) fsync_directory_of_file(old_file->fd);
|
||||
/* btrfs doesn't cope well with our write pattern and fragments heavily. Let's defrag all files we rotate */
|
||||
fd = openat(dir_fd, p, O_RDONLY|O_CLOEXEC|O_NOCTTY|O_NOFOLLOW);
|
||||
if (fd < 0)
|
||||
log_debug_errno(errno, "Failed to open file for defragmentation/FS_NOCOW_FL, ignoring: %m");
|
||||
else {
|
||||
(void) chattr_fd(fd, 0, FS_NOCOW_FL, NULL);
|
||||
(void) btrfs_defrag_fd(fd);
|
||||
}
|
||||
|
||||
/* Set as archive so offlining commits w/state=STATE_ARCHIVED.
|
||||
* Previously we would set old_file->header->state to STATE_ARCHIVED directly here,
|
||||
* but journal_file_set_offline() short-circuits when state != STATE_ONLINE, which
|
||||
* would result in the rotated journal never getting fsync() called before closing.
|
||||
* Now we simply queue the archive state by setting an archive bit, leaving the state
|
||||
* as STATE_ONLINE so proper offlining occurs. */
|
||||
old_file->archive = true;
|
||||
|
||||
/* Currently, btrfs is not very good with out write patterns
|
||||
* and fragments heavily. Let's defrag our journal files when
|
||||
* we archive them */
|
||||
old_file->defrag_on_close = true;
|
||||
|
||||
r = journal_file_open(-1, old_file->path, old_file->flags, old_file->mode, compress,
|
||||
compress_threshold_bytes, seal, NULL, old_file->mmap, deferred_closes,
|
||||
old_file, &new_file);
|
||||
|
||||
if (deferred_closes &&
|
||||
set_put(deferred_closes, old_file) >= 0)
|
||||
(void) journal_file_set_offline(old_file, false);
|
||||
else
|
||||
(void) journal_file_close(old_file);
|
||||
|
||||
*f = new_file;
|
||||
return r;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int journal_file_open_reliably(
|
||||
@ -3518,9 +3589,8 @@ int journal_file_open_reliably(
|
||||
JournalFile *template,
|
||||
JournalFile **ret) {
|
||||
|
||||
int r;
|
||||
size_t l;
|
||||
_cleanup_free_ char *p = NULL;
|
||||
int r;
|
||||
|
||||
r = journal_file_open(-1, fname, flags, mode, compress, compress_threshold_bytes, seal, metrics, mmap_cache,
|
||||
deferred_closes, template, ret);
|
||||
@ -3546,25 +3616,12 @@ int journal_file_open_reliably(
|
||||
return r;
|
||||
|
||||
/* The file is corrupted. Rotate it away and try it again (but only once) */
|
||||
|
||||
l = strlen(fname);
|
||||
if (asprintf(&p, "%.*s@%016"PRIx64 "-%016"PRIx64 ".journal~",
|
||||
(int) l - 8, fname,
|
||||
now(CLOCK_REALTIME),
|
||||
random_u64()) < 0)
|
||||
return -ENOMEM;
|
||||
|
||||
if (rename(fname, p) < 0)
|
||||
return -errno;
|
||||
|
||||
/* btrfs doesn't cope well with our write pattern and
|
||||
* fragments heavily. Let's defrag all files we rotate */
|
||||
|
||||
(void) chattr_path(p, 0, FS_NOCOW_FL, NULL);
|
||||
(void) btrfs_defrag(p);
|
||||
|
||||
log_warning_errno(r, "File %s corrupted or uncleanly shut down, renaming and replacing.", fname);
|
||||
|
||||
r = journal_file_dispose(AT_FDCWD, fname);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
return journal_file_open(-1, fname, flags, mode, compress, compress_threshold_bytes, seal, metrics, mmap_cache,
|
||||
deferred_closes, template, ret);
|
||||
}
|
||||
|
@ -235,8 +235,12 @@ int journal_file_copy_entry(JournalFile *from, JournalFile *to, Object *o, uint6
|
||||
void journal_file_dump(JournalFile *f);
|
||||
void journal_file_print_header(JournalFile *f);
|
||||
|
||||
int journal_file_archive(JournalFile *f);
|
||||
JournalFile* journal_initiate_close(JournalFile *f, Set *deferred_closes);
|
||||
int journal_file_rotate(JournalFile **f, bool compress, uint64_t compress_threshold_bytes, bool seal, Set *deferred_closes);
|
||||
|
||||
int journal_file_dispose(int dir_fd, const char *fname);
|
||||
|
||||
void journal_file_post_change(JournalFile *f);
|
||||
int journal_file_enable_post_change_timer(JournalFile *f, sd_event *e, usec_t t);
|
||||
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include "journal-vacuum.h"
|
||||
#include "parse-util.h"
|
||||
#include "string-util.h"
|
||||
#include "time-util.h"
|
||||
#include "util.h"
|
||||
#include "xattr-util.h"
|
||||
|
||||
@ -125,11 +126,10 @@ int journal_directory_vacuum(
|
||||
usec_t *oldest_usec,
|
||||
bool verbose) {
|
||||
|
||||
uint64_t sum = 0, freed = 0, n_active_files = 0;
|
||||
size_t n_list = 0, n_allocated = 0, i;
|
||||
_cleanup_closedir_ DIR *d = NULL;
|
||||
struct vacuum_info *list = NULL;
|
||||
unsigned n_list = 0, i, n_active_files = 0;
|
||||
size_t n_allocated = 0;
|
||||
uint64_t sum = 0, freed = 0;
|
||||
usec_t retention_limit = 0;
|
||||
char sbytes[FORMAT_BYTES_MAX];
|
||||
struct dirent *de;
|
||||
@ -140,13 +140,8 @@ int journal_directory_vacuum(
|
||||
if (max_use <= 0 && max_retention_usec <= 0 && n_max_files <= 0)
|
||||
return 0;
|
||||
|
||||
if (max_retention_usec > 0) {
|
||||
retention_limit = now(CLOCK_REALTIME);
|
||||
if (retention_limit > max_retention_usec)
|
||||
retention_limit -= max_retention_usec;
|
||||
else
|
||||
max_retention_usec = retention_limit = 0;
|
||||
}
|
||||
if (max_retention_usec > 0)
|
||||
retention_limit = usec_sub_unsigned(now(CLOCK_REALTIME), max_retention_usec);
|
||||
|
||||
d = opendir(directory);
|
||||
if (!d)
|
||||
@ -272,13 +267,14 @@ int journal_directory_vacuum(
|
||||
goto finish;
|
||||
}
|
||||
|
||||
list[n_list].filename = TAKE_PTR(p);
|
||||
list[n_list].usage = size;
|
||||
list[n_list].seqnum = seqnum;
|
||||
list[n_list].realtime = realtime;
|
||||
list[n_list].seqnum_id = seqnum_id;
|
||||
list[n_list].have_seqnum = have_seqnum;
|
||||
n_list++;
|
||||
list[n_list++] = (struct vacuum_info) {
|
||||
.filename = TAKE_PTR(p),
|
||||
.usage = size,
|
||||
.seqnum = seqnum,
|
||||
.realtime = realtime,
|
||||
.seqnum_id = seqnum_id,
|
||||
.have_seqnum = have_seqnum,
|
||||
};
|
||||
|
||||
sum += size;
|
||||
}
|
||||
@ -286,7 +282,7 @@ int journal_directory_vacuum(
|
||||
typesafe_qsort(list, n_list, vacuum_compare);
|
||||
|
||||
for (i = 0; i < n_list; i++) {
|
||||
unsigned left;
|
||||
uint64_t left;
|
||||
|
||||
left = n_active_files + n_list - i;
|
||||
|
||||
|
@ -167,6 +167,7 @@ static enum {
|
||||
ACTION_SYNC,
|
||||
ACTION_ROTATE,
|
||||
ACTION_VACUUM,
|
||||
ACTION_ROTATE_AND_VACUUM,
|
||||
ACTION_LIST_FIELDS,
|
||||
ACTION_LIST_FIELD_NAMES,
|
||||
} arg_action = ACTION_SHOW;
|
||||
@ -687,7 +688,7 @@ static int parse_argv(int argc, char *argv[]) {
|
||||
return r;
|
||||
}
|
||||
|
||||
arg_action = ACTION_VACUUM;
|
||||
arg_action = arg_action == ACTION_ROTATE ? ACTION_ROTATE_AND_VACUUM : ACTION_VACUUM;
|
||||
break;
|
||||
|
||||
case ARG_VACUUM_FILES:
|
||||
@ -697,7 +698,7 @@ static int parse_argv(int argc, char *argv[]) {
|
||||
return r;
|
||||
}
|
||||
|
||||
arg_action = ACTION_VACUUM;
|
||||
arg_action = arg_action == ACTION_ROTATE ? ACTION_ROTATE_AND_VACUUM : ACTION_VACUUM;
|
||||
break;
|
||||
|
||||
case ARG_VACUUM_TIME:
|
||||
@ -707,7 +708,7 @@ static int parse_argv(int argc, char *argv[]) {
|
||||
return r;
|
||||
}
|
||||
|
||||
arg_action = ACTION_VACUUM;
|
||||
arg_action = arg_action == ACTION_ROTATE ? ACTION_ROTATE_AND_VACUUM : ACTION_VACUUM;
|
||||
break;
|
||||
|
||||
#if HAVE_GCRYPT
|
||||
@ -896,7 +897,7 @@ static int parse_argv(int argc, char *argv[]) {
|
||||
break;
|
||||
|
||||
case ARG_ROTATE:
|
||||
arg_action = ACTION_ROTATE;
|
||||
arg_action = arg_action == ACTION_VACUUM ? ACTION_ROTATE_AND_VACUUM : ACTION_ROTATE;
|
||||
break;
|
||||
|
||||
case ARG_SYNC:
|
||||
@ -1970,7 +1971,7 @@ static int send_signal_and_wait(int sig, const char *watch_path) {
|
||||
/* See if a sync happened by now. */
|
||||
r = read_timestamp_file(watch_path, &tstamp);
|
||||
if (r < 0 && r != -ENOENT)
|
||||
return log_error_errno(errno, "Failed to read %s: %m", watch_path);
|
||||
return log_error_errno(r, "Failed to read %s: %m", watch_path);
|
||||
if (r >= 0 && tstamp >= start)
|
||||
return 0;
|
||||
|
||||
@ -2120,6 +2121,7 @@ int main(int argc, char *argv[]) {
|
||||
case ACTION_DISK_USAGE:
|
||||
case ACTION_LIST_BOOTS:
|
||||
case ACTION_VACUUM:
|
||||
case ACTION_ROTATE_AND_VACUUM:
|
||||
case ACTION_LIST_FIELDS:
|
||||
case ACTION_LIST_FIELD_NAMES:
|
||||
/* These ones require access to the journal files, continue below. */
|
||||
@ -2237,6 +2239,14 @@ int main(int argc, char *argv[]) {
|
||||
r = list_boots(j);
|
||||
goto finish;
|
||||
|
||||
case ACTION_ROTATE_AND_VACUUM:
|
||||
|
||||
r = rotate();
|
||||
if (r < 0)
|
||||
goto finish;
|
||||
|
||||
_fallthrough_;
|
||||
|
||||
case ACTION_VACUUM: {
|
||||
Directory *d;
|
||||
Iterator i;
|
||||
|
@ -74,6 +74,8 @@
|
||||
* for a bit of additional metadata. */
|
||||
#define DEFAULT_LINE_MAX (48*1024)
|
||||
|
||||
#define DEFERRED_CLOSES_MAX (4096)
|
||||
|
||||
static int determine_path_usage(Server *s, const char *path, uint64_t *ret_used, uint64_t *ret_free) {
|
||||
_cleanup_closedir_ DIR *d = NULL;
|
||||
struct dirent *de;
|
||||
@ -252,8 +254,9 @@ static int open_journal(
|
||||
bool seal,
|
||||
JournalMetrics *metrics,
|
||||
JournalFile **ret) {
|
||||
int r;
|
||||
|
||||
JournalFile *f;
|
||||
int r;
|
||||
|
||||
assert(s);
|
||||
assert(fname);
|
||||
@ -399,17 +402,21 @@ static JournalFile* find_journal(Server *s, uid_t uid) {
|
||||
if (uid_for_system_journal(uid))
|
||||
return s->system_journal;
|
||||
|
||||
r = sd_id128_get_machine(&machine);
|
||||
if (r < 0)
|
||||
return s->system_journal;
|
||||
|
||||
f = ordered_hashmap_get(s->user_journals, UID_TO_PTR(uid));
|
||||
if (f)
|
||||
return f;
|
||||
|
||||
if (asprintf(&p, "/var/log/journal/" SD_ID128_FORMAT_STR "/user-"UID_FMT".journal",
|
||||
SD_ID128_FORMAT_VAL(machine), uid) < 0)
|
||||
r = sd_id128_get_machine(&machine);
|
||||
if (r < 0) {
|
||||
log_debug_errno(r, "Failed to determine machine ID, using system log: %m");
|
||||
return s->system_journal;
|
||||
}
|
||||
|
||||
if (asprintf(&p, "/var/log/journal/" SD_ID128_FORMAT_STR "/user-"UID_FMT".journal",
|
||||
SD_ID128_FORMAT_VAL(machine), uid) < 0) {
|
||||
log_oom();
|
||||
return s->system_journal;
|
||||
}
|
||||
|
||||
while (ordered_hashmap_size(s->user_journals) >= USER_JOURNALS_MAX) {
|
||||
/* Too many open? Then let's close one */
|
||||
@ -459,17 +466,81 @@ static int do_rotate(
|
||||
return r;
|
||||
}
|
||||
|
||||
void server_rotate(Server *s) {
|
||||
static void server_process_deferred_closes(Server *s) {
|
||||
JournalFile *f;
|
||||
void *k;
|
||||
Iterator i;
|
||||
|
||||
/* Perform any deferred closes which aren't still offlining. */
|
||||
SET_FOREACH(f, s->deferred_closes, i) {
|
||||
if (journal_file_is_offlining(f))
|
||||
continue;
|
||||
|
||||
(void) set_remove(s->deferred_closes, f);
|
||||
(void) journal_file_close(f);
|
||||
}
|
||||
}
|
||||
|
||||
static void server_vacuum_deferred_closes(Server *s) {
|
||||
assert(s);
|
||||
|
||||
/* Make some room in the deferred closes list, so that it doesn't grow without bounds */
|
||||
if (set_size(s->deferred_closes) < DEFERRED_CLOSES_MAX)
|
||||
return;
|
||||
|
||||
/* Let's first remove all journal files that might already have completed closing */
|
||||
server_process_deferred_closes(s);
|
||||
|
||||
/* And now, let's close some more until we reach the limit again. */
|
||||
while (set_size(s->deferred_closes) >= DEFERRED_CLOSES_MAX) {
|
||||
JournalFile *f;
|
||||
|
||||
assert_se(f = set_steal_first(s->deferred_closes));
|
||||
journal_file_close(f);
|
||||
}
|
||||
}
|
||||
|
||||
static int open_user_journal_directory(Server *s, DIR **ret_dir, char **ret_path) {
|
||||
_cleanup_closedir_ DIR *dir = NULL;
|
||||
_cleanup_free_ char *path = NULL;
|
||||
sd_id128_t machine;
|
||||
int r;
|
||||
|
||||
assert(s);
|
||||
|
||||
r = sd_id128_get_machine(&machine);
|
||||
if (r < 0)
|
||||
return log_error_errno(r, "Failed to determine machine ID, ignoring: %m");
|
||||
|
||||
if (asprintf(&path, "/var/log/journal/" SD_ID128_FORMAT_STR "/", SD_ID128_FORMAT_VAL(machine)) < 0)
|
||||
return log_oom();
|
||||
|
||||
dir = opendir(path);
|
||||
if (!dir)
|
||||
return log_error_errno(errno, "Failed to open user journal directory '%s': %m", path);
|
||||
|
||||
if (ret_dir)
|
||||
*ret_dir = TAKE_PTR(dir);
|
||||
if (ret_path)
|
||||
*ret_path = TAKE_PTR(path);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void server_rotate(Server *s) {
|
||||
_cleanup_free_ char *path = NULL;
|
||||
_cleanup_closedir_ DIR *d = NULL;
|
||||
JournalFile *f;
|
||||
Iterator i;
|
||||
void *k;
|
||||
int r;
|
||||
|
||||
log_debug("Rotating...");
|
||||
|
||||
/* First, rotate the system journal (either in its runtime flavour or in its runtime flavour) */
|
||||
(void) do_rotate(s, &s->runtime_journal, "runtime", false, 0);
|
||||
(void) do_rotate(s, &s->system_journal, "system", s->seal, 0);
|
||||
|
||||
/* Then, rotate all user journals we have open (keeping them open) */
|
||||
ORDERED_HASHMAP_FOREACH_KEY(f, k, s->user_journals, i) {
|
||||
r = do_rotate(s, &f, "user", s->seal, PTR_TO_UID(k));
|
||||
if (r >= 0)
|
||||
@ -479,12 +550,92 @@ void server_rotate(Server *s) {
|
||||
ordered_hashmap_remove(s->user_journals, k);
|
||||
}
|
||||
|
||||
/* Perform any deferred closes which aren't still offlining. */
|
||||
SET_FOREACH(f, s->deferred_closes, i)
|
||||
if (!journal_file_is_offlining(f)) {
|
||||
(void) set_remove(s->deferred_closes, f);
|
||||
(void) journal_file_close(f);
|
||||
/* Finally, also rotate all user journals we currently do not have open. */
|
||||
r = open_user_journal_directory(s, &d, &path);
|
||||
if (r >= 0) {
|
||||
struct dirent *de;
|
||||
|
||||
FOREACH_DIRENT(de, d, log_warning_errno(errno, "Failed to enumerate %s, ignoring: %m", path)) {
|
||||
_cleanup_free_ char *u = NULL, *full = NULL;
|
||||
_cleanup_close_ int fd = -1;
|
||||
const char *a, *b;
|
||||
uid_t uid;
|
||||
|
||||
a = startswith(de->d_name, "user-");
|
||||
if (!a)
|
||||
continue;
|
||||
b = endswith(de->d_name, ".journal");
|
||||
if (!b)
|
||||
continue;
|
||||
|
||||
u = strndup(a, b-a);
|
||||
if (!u) {
|
||||
log_oom();
|
||||
break;
|
||||
}
|
||||
|
||||
r = parse_uid(u, &uid);
|
||||
if (r < 0) {
|
||||
log_debug_errno(r, "Failed to parse UID from file name '%s', ignoring: %m", de->d_name);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Already rotated in the above loop? i.e. is it an open user journal? */
|
||||
if (ordered_hashmap_contains(s->user_journals, UID_TO_PTR(uid)))
|
||||
continue;
|
||||
|
||||
full = strjoin(path, de->d_name);
|
||||
if (!full) {
|
||||
log_oom();
|
||||
break;
|
||||
}
|
||||
|
||||
fd = openat(dirfd(d), de->d_name, O_RDWR|O_CLOEXEC|O_NOCTTY|O_NOFOLLOW|O_NONBLOCK);
|
||||
if (fd < 0) {
|
||||
log_full_errno(IN_SET(errno, ELOOP, ENOENT) ? LOG_DEBUG : LOG_WARNING, errno,
|
||||
"Failed to open journal file '%s' for rotation: %m", full);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Make some room in the set of deferred close()s */
|
||||
server_vacuum_deferred_closes(s);
|
||||
|
||||
/* Open the file briefly, so that we can archive it */
|
||||
r = journal_file_open(fd,
|
||||
full,
|
||||
O_RDWR,
|
||||
0640,
|
||||
s->compress.enabled,
|
||||
s->compress.threshold_bytes,
|
||||
s->seal,
|
||||
&s->system_storage.metrics,
|
||||
s->mmap,
|
||||
s->deferred_closes,
|
||||
NULL,
|
||||
&f);
|
||||
if (r < 0) {
|
||||
log_warning_errno(r, "Failed to read journal file %s for rotation, trying to move it out of the way: %m", full);
|
||||
|
||||
r = journal_file_dispose(dirfd(d), de->d_name);
|
||||
if (r < 0)
|
||||
log_warning_errno(r, "Failed to move %s out of the way, ignoring: %m", full);
|
||||
else
|
||||
log_debug("Successfully moved %s out of the way.", full);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
TAKE_FD(fd); /* Donated to journal_file_open() */
|
||||
|
||||
r = journal_file_archive(f);
|
||||
if (r < 0)
|
||||
log_debug_errno(r, "Failed to archive journal file '%s', ignoring: %m", full);
|
||||
|
||||
f = journal_initiate_close(f, s->deferred_closes);
|
||||
}
|
||||
}
|
||||
|
||||
server_process_deferred_closes(s);
|
||||
}
|
||||
|
||||
void server_sync(Server *s) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user